Tue Oct 7 09:48:27 2008 UTC ()
- Replace lwp_t::l_sched_info with union: pointer and timeslice.
- Change minimal time-quantum to ~20 ms.
- Thus remove unneeded pool in M2, and unused sched_lwp_exit().
- Do not increase l_slptime twice for SCHED_4BSD (regression fix).


(rmind)
diff -r1.122 -r1.123 src/sys/kern/kern_lwp.c
diff -r1.21 -r1.22 src/sys/kern/kern_runq.c
diff -r1.23 -r1.24 src/sys/kern/sched_4bsd.c
diff -r1.25 -r1.26 src/sys/kern/sched_m2.c
diff -r1.107 -r1.108 src/sys/sys/lwp.h
diff -r1.64 -r1.65 src/sys/sys/sched.h

cvs diff -r1.122 -r1.123 src/sys/kern/kern_lwp.c (expand / switch to unified diff)

--- src/sys/kern/kern_lwp.c 2008/07/14 01:19:37 1.122
+++ src/sys/kern/kern_lwp.c 2008/10/07 09:48:27 1.123
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_lwp.c,v 1.122 2008/07/14 01:19:37 rmind Exp $ */ 1/* $NetBSD: kern_lwp.c,v 1.123 2008/10/07 09:48:27 rmind Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, and Andrew Doran. 8 * by Nathan J. Williams, and Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -196,27 +196,27 @@ @@ -196,27 +196,27 @@
196 * 196 *
197 * LSIDL, LSZOMB, LSSTOP, LSSUSPENDED 197 * LSIDL, LSZOMB, LSSTOP, LSSUSPENDED
198 * 198 *
199 * Note that an LWP is considered running or likely to run soon if in 199 * Note that an LWP is considered running or likely to run soon if in
200 * one of the following states. This affects the value of p_nrlwps: 200 * one of the following states. This affects the value of p_nrlwps:
201 * 201 *
202 * LSRUN, LSONPROC, LSSLEEP 202 * LSRUN, LSONPROC, LSSLEEP
203 * 203 *
204 * p_lock does not need to be held when transitioning among these 204 * p_lock does not need to be held when transitioning among these
205 * three states. 205 * three states.
206 */ 206 */
207 207
208#include <sys/cdefs.h> 208#include <sys/cdefs.h>
209__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.122 2008/07/14 01:19:37 rmind Exp $"); 209__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.123 2008/10/07 09:48:27 rmind Exp $");
210 210
211#include "opt_ddb.h" 211#include "opt_ddb.h"
212#include "opt_lockdebug.h" 212#include "opt_lockdebug.h"
213 213
214#define _LWP_API_PRIVATE 214#define _LWP_API_PRIVATE
215 215
216#include <sys/param.h> 216#include <sys/param.h>
217#include <sys/systm.h> 217#include <sys/systm.h>
218#include <sys/cpu.h> 218#include <sys/cpu.h>
219#include <sys/pool.h> 219#include <sys/pool.h>
220#include <sys/proc.h> 220#include <sys/proc.h>
221#include <sys/syscallargs.h> 221#include <sys/syscallargs.h>
222#include <sys/syscall_stats.h> 222#include <sys/syscall_stats.h>
@@ -921,27 +921,26 @@ lwp_free(struct lwp *l, bool recycle, bo @@ -921,27 +921,26 @@ lwp_free(struct lwp *l, bool recycle, bo
921 921
922 /* 922 /*
923 * Free the LWP's turnstile and the LWP structure itself unless the 923 * Free the LWP's turnstile and the LWP structure itself unless the
924 * caller wants to recycle them. Also, free the scheduler specific 924 * caller wants to recycle them. Also, free the scheduler specific
925 * data. 925 * data.
926 * 926 *
927 * We can't return turnstile0 to the pool (it didn't come from it), 927 * We can't return turnstile0 to the pool (it didn't come from it),
928 * so if it comes up just drop it quietly and move on. 928 * so if it comes up just drop it quietly and move on.
929 * 929 *
930 * We don't recycle the VM resources at this time. 930 * We don't recycle the VM resources at this time.
931 */ 931 */
932 if (l->l_lwpctl != NULL) 932 if (l->l_lwpctl != NULL)
933 lwp_ctl_free(l); 933 lwp_ctl_free(l);
934 sched_lwp_exit(l); 
935 934
936 if (!recycle && l->l_ts != &turnstile0) 935 if (!recycle && l->l_ts != &turnstile0)
937 pool_cache_put(turnstile_cache, l->l_ts); 936 pool_cache_put(turnstile_cache, l->l_ts);
938 if (l->l_name != NULL) 937 if (l->l_name != NULL)
939 kmem_free(l->l_name, MAXCOMLEN); 938 kmem_free(l->l_name, MAXCOMLEN);
940#ifndef __NO_CPU_LWP_FREE 939#ifndef __NO_CPU_LWP_FREE
941 cpu_lwp_free2(l); 940 cpu_lwp_free2(l);
942#endif 941#endif
943 KASSERT((l->l_flag & LW_INMEM) != 0); 942 KASSERT((l->l_flag & LW_INMEM) != 0);
944 uvm_lwp_exit(l); 943 uvm_lwp_exit(l);
945 KASSERT(SLIST_EMPTY(&l->l_pi_lenders)); 944 KASSERT(SLIST_EMPTY(&l->l_pi_lenders));
946 KASSERT(l->l_inheritedprio == -1); 945 KASSERT(l->l_inheritedprio == -1);
947 if (!recycle) 946 if (!recycle)

cvs diff -r1.21 -r1.22 src/sys/kern/kern_runq.c (expand / switch to unified diff)

--- src/sys/kern/kern_runq.c 2008/09/30 16:28:45 1.21
+++ src/sys/kern/kern_runq.c 2008/10/07 09:48:27 1.22
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_runq.c,v 1.21 2008/09/30 16:28:45 rmind Exp $ */ 1/* $NetBSD: kern_runq.c,v 1.22 2008/10/07 09:48:27 rmind Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org> 4 * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org>
5 * All rights reserved. 5 * All rights reserved.
6 *  6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -17,27 +17,27 @@ @@ -17,27 +17,27 @@
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE. 26 * SUCH DAMAGE.
27 */ 27 */
28 28
29#include <sys/cdefs.h> 29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD: kern_runq.c,v 1.21 2008/09/30 16:28:45 rmind Exp $"); 30__KERNEL_RCSID(0, "$NetBSD: kern_runq.c,v 1.22 2008/10/07 09:48:27 rmind Exp $");
31 31
32#include <sys/param.h> 32#include <sys/param.h>
33#include <sys/kernel.h> 33#include <sys/kernel.h>
34#include <sys/bitops.h> 34#include <sys/bitops.h>
35#include <sys/cpu.h> 35#include <sys/cpu.h>
36#include <sys/idle.h> 36#include <sys/idle.h>
37#include <sys/intr.h> 37#include <sys/intr.h>
38#include <sys/kmem.h> 38#include <sys/kmem.h>
39#include <sys/lwp.h> 39#include <sys/lwp.h>
40#include <sys/mutex.h> 40#include <sys/mutex.h>
41#include <sys/proc.h> 41#include <sys/proc.h>
42#include <sys/sched.h> 42#include <sys/sched.h>
43#include <sys/syscallargs.h> 43#include <sys/syscallargs.h>
@@ -113,28 +113,28 @@ static u_int cacheht_time; /* Cache hot @@ -113,28 +113,28 @@ static u_int cacheht_time; /* Cache hot
113static u_int min_catch; /* Minimal LWP count for catching */ 113static u_int min_catch; /* Minimal LWP count for catching */
114static u_int balance_period; /* Balance period */ 114static u_int balance_period; /* Balance period */
115static struct cpu_info *worker_ci; /* Victim CPU */ 115static struct cpu_info *worker_ci; /* Victim CPU */
116#ifdef MULTIPROCESSOR 116#ifdef MULTIPROCESSOR
117static struct callout balance_ch; /* Callout of balancer */ 117static struct callout balance_ch; /* Callout of balancer */
118#endif 118#endif
119 119
120void 120void
121runq_init(void) 121runq_init(void)
122{ 122{
123 123
124 /* Balancing */ 124 /* Balancing */
125 worker_ci = curcpu(); 125 worker_ci = curcpu();
126 cacheht_time = mstohz(3); /* ~3 ms */ 126 cacheht_time = mstohz(3); /* ~3 ms */
127 balance_period = mstohz(300); /* ~300ms */ 127 balance_period = mstohz(300); /* ~300 ms */
128 128
129 /* Minimal count of LWPs for catching */ 129 /* Minimal count of LWPs for catching */
130 min_catch = 1; 130 min_catch = 1;
131 131
132 /* Initialize balancing callout and run it */ 132 /* Initialize balancing callout and run it */
133#ifdef MULTIPROCESSOR 133#ifdef MULTIPROCESSOR
134 callout_init(&balance_ch, CALLOUT_MPSAFE); 134 callout_init(&balance_ch, CALLOUT_MPSAFE);
135 callout_setfunc(&balance_ch, sched_balance, NULL); 135 callout_setfunc(&balance_ch, sched_balance, NULL);
136 callout_schedule(&balance_ch, balance_period); 136 callout_schedule(&balance_ch, balance_period);
137#endif 137#endif
138} 138}
139 139
140void 140void
@@ -629,26 +629,29 @@ sched_idle(void) @@ -629,26 +629,29 @@ sched_idle(void)
629{ 629{
630 630
631} 631}
632#endif /* MULTIPROCESSOR */ 632#endif /* MULTIPROCESSOR */
633 633
634/* 634/*
635 * Scheduling statistics and balancing. 635 * Scheduling statistics and balancing.
636 */ 636 */
637void 637void
638sched_lwp_stats(struct lwp *l) 638sched_lwp_stats(struct lwp *l)
639{ 639{
640 int batch; 640 int batch;
641 641
 642 KASSERT(lwp_locked(l, NULL));
 643
 644 /* Update sleep time */
642 if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP || 645 if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
643 l->l_stat == LSSUSPENDED) 646 l->l_stat == LSSUSPENDED)
644 l->l_slptime++; 647 l->l_slptime++;
645 648
646 /* 649 /*
647 * Set that thread is more CPU-bound, if sum of run time exceeds the 650 * Set that thread is more CPU-bound, if sum of run time exceeds the
648 * sum of sleep time. Check if thread is CPU-bound a first time. 651 * sum of sleep time. Check if thread is CPU-bound a first time.
649 */ 652 */
650 batch = (l->l_rticksum > l->l_slpticksum); 653 batch = (l->l_rticksum > l->l_slpticksum);
651 if (batch != 0) { 654 if (batch != 0) {
652 if ((l->l_flag & LW_BATCH) == 0) 655 if ((l->l_flag & LW_BATCH) == 0)
653 batch = 0; 656 batch = 0;
654 l->l_flag |= LW_BATCH; 657 l->l_flag |= LW_BATCH;

cvs diff -r1.23 -r1.24 src/sys/kern/sched_4bsd.c (expand / switch to unified diff)

--- src/sys/kern/sched_4bsd.c 2008/05/25 22:04:50 1.23
+++ src/sys/kern/sched_4bsd.c 2008/10/07 09:48:27 1.24
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: sched_4bsd.c,v 1.23 2008/05/25 22:04:50 ad Exp $ */ 1/* $NetBSD: sched_4bsd.c,v 1.24 2008/10/07 09:48:27 rmind Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran, and 9 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran, and
10 * Daniel Sieger. 10 * Daniel Sieger.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
@@ -58,27 +58,27 @@ @@ -58,27 +58,27 @@
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * SUCH DAMAGE. 65 * SUCH DAMAGE.
66 * 66 *
67 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 67 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
68 */ 68 */
69 69
70#include <sys/cdefs.h> 70#include <sys/cdefs.h>
71__KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.23 2008/05/25 22:04:50 ad Exp $"); 71__KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.24 2008/10/07 09:48:27 rmind Exp $");
72 72
73#include "opt_ddb.h" 73#include "opt_ddb.h"
74#include "opt_lockdebug.h" 74#include "opt_lockdebug.h"
75#include "opt_perfctrs.h" 75#include "opt_perfctrs.h"
76 76
77#include <sys/param.h> 77#include <sys/param.h>
78#include <sys/systm.h> 78#include <sys/systm.h>
79#include <sys/callout.h> 79#include <sys/callout.h>
80#include <sys/cpu.h> 80#include <sys/cpu.h>
81#include <sys/proc.h> 81#include <sys/proc.h>
82#include <sys/kernel.h> 82#include <sys/kernel.h>
83#include <sys/signalvar.h> 83#include <sys/signalvar.h>
84#include <sys/resourcevar.h> 84#include <sys/resourcevar.h>
@@ -278,43 +278,34 @@ decay_cpu_batch(fixpt_t loadfac, fixpt_t @@ -278,43 +278,34 @@ decay_cpu_batch(fixpt_t loadfac, fixpt_t
278 } 278 }
279 279
280 return estcpu; 280 return estcpu;
281} 281}
282 282
283/* 283/*
284 * sched_pstats_hook: 284 * sched_pstats_hook:
285 * 285 *
286 * Periodically called from sched_pstats(); used to recalculate priorities. 286 * Periodically called from sched_pstats(); used to recalculate priorities.
287 */ 287 */
288void 288void
289sched_pstats_hook(struct lwp *l, int batch) 289sched_pstats_hook(struct lwp *l, int batch)
290{ 290{
291 fixpt_t loadfac; 
292 int sleeptm; 
293 291
294 /* 292 /*
295 * If the LWP has slept an entire second, stop recalculating 293 * If the LWP has slept an entire second, stop recalculating
296 * its priority until it wakes up. 294 * its priority until it wakes up.
297 */ 295 */
298 if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP || 296 KASSERT(lwp_locked(l, NULL));
299 l->l_stat == LSSUSPENDED) { 297 if (l->l_slptime > 0) {
300 l->l_slptime++; 298 fixpt_t loadfac = 2 * (averunnable.ldavg[0]);
301 sleeptm = 1; 
302 } else { 
303 sleeptm = 0x7fffffff; 
304 } 
305 
306 if (l->l_slptime <= sleeptm) { 
307 loadfac = 2 * (averunnable.ldavg[0]); 
308 l->l_estcpu = decay_cpu(loadfac, l->l_estcpu); 299 l->l_estcpu = decay_cpu(loadfac, l->l_estcpu);
309 resetpriority(l); 300 resetpriority(l);
310 } 301 }
311} 302}
312 303
313/* 304/*
314 * Recalculate the priority of a process after it has slept for a while. 305 * Recalculate the priority of a process after it has slept for a while.
315 */ 306 */
316static void 307static void
317updatepri(struct lwp *l) 308updatepri(struct lwp *l)
318{ 309{
319 fixpt_t loadfac; 310 fixpt_t loadfac;
320 311
@@ -462,32 +453,26 @@ void @@ -462,32 +453,26 @@ void
462sched_slept(struct lwp *l) 453sched_slept(struct lwp *l)
463{ 454{
464 455
465} 456}
466 457
467void 458void
468sched_lwp_fork(struct lwp *l1, struct lwp *l2) 459sched_lwp_fork(struct lwp *l1, struct lwp *l2)
469{ 460{
470 461
471 l2->l_estcpu = l1->l_estcpu; 462 l2->l_estcpu = l1->l_estcpu;
472} 463}
473 464
474void 465void
475sched_lwp_exit(struct lwp *l) 
476{ 
477 
478} 
479 
480void 
481sched_lwp_collect(struct lwp *t) 466sched_lwp_collect(struct lwp *t)
482{ 467{
483 lwp_t *l; 468 lwp_t *l;
484 469
485 /* Absorb estcpu value of collected LWP. */ 470 /* Absorb estcpu value of collected LWP. */
486 l = curlwp; 471 l = curlwp;
487 lwp_lock(l); 472 lwp_lock(l);
488 l->l_estcpu += t->l_estcpu; 473 l->l_estcpu += t->l_estcpu;
489 lwp_unlock(l); 474 lwp_unlock(l);
490} 475}
491 476
492void 477void
493sched_oncpu(lwp_t *l) 478sched_oncpu(lwp_t *l)

cvs diff -r1.25 -r1.26 src/sys/kern/sched_m2.c (expand / switch to unified diff)

--- src/sys/kern/sched_m2.c 2008/05/19 12:48:54 1.25
+++ src/sys/kern/sched_m2.c 2008/10/07 09:48:27 1.26
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: sched_m2.c,v 1.25 2008/05/19 12:48:54 rmind Exp $ */ 1/* $NetBSD: sched_m2.c,v 1.26 2008/10/07 09:48:27 rmind Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org> 4 * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org>
5 * All rights reserved. 5 * All rights reserved.
6 *  6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -23,27 +23,27 @@ @@ -23,27 +23,27 @@
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE. 26 * SUCH DAMAGE.
27 */ 27 */
28 28
29/* 29/*
30 * TODO: 30 * TODO:
31 * - Implementation of fair share queue; 31 * - Implementation of fair share queue;
32 * - Support for NUMA; 32 * - Support for NUMA;
33 */ 33 */
34 34
35#include <sys/cdefs.h> 35#include <sys/cdefs.h>
36__KERNEL_RCSID(0, "$NetBSD: sched_m2.c,v 1.25 2008/05/19 12:48:54 rmind Exp $"); 36__KERNEL_RCSID(0, "$NetBSD: sched_m2.c,v 1.26 2008/10/07 09:48:27 rmind Exp $");
37 37
38#include <sys/param.h> 38#include <sys/param.h>
39 39
40#include <sys/bitops.h> 40#include <sys/bitops.h>
41#include <sys/cpu.h> 41#include <sys/cpu.h>
42#include <sys/callout.h> 42#include <sys/callout.h>
43#include <sys/errno.h> 43#include <sys/errno.h>
44#include <sys/kernel.h> 44#include <sys/kernel.h>
45#include <sys/kmem.h> 45#include <sys/kmem.h>
46#include <sys/lwp.h> 46#include <sys/lwp.h>
47#include <sys/mutex.h> 47#include <sys/mutex.h>
48#include <sys/pool.h> 48#include <sys/pool.h>
49#include <sys/proc.h> 49#include <sys/proc.h>
@@ -65,55 +65,45 @@ __KERNEL_RCSID(0, "$NetBSD: sched_m2.c,v @@ -65,55 +65,45 @@ __KERNEL_RCSID(0, "$NetBSD: sched_m2.c,v
65#define PRI_HIGHEST_TS (MAXPRI_USER) 65#define PRI_HIGHEST_TS (MAXPRI_USER)
66 66
67/* 67/*
68 * Time-slices and priorities. 68 * Time-slices and priorities.
69 */ 69 */
70static u_int min_ts; /* Minimal time-slice */ 70static u_int min_ts; /* Minimal time-slice */
71static u_int max_ts; /* Maximal time-slice */ 71static u_int max_ts; /* Maximal time-slice */
72static u_int rt_ts; /* Real-time time-slice */ 72static u_int rt_ts; /* Real-time time-slice */
73static u_int ts_map[PRI_COUNT]; /* Map of time-slices */ 73static u_int ts_map[PRI_COUNT]; /* Map of time-slices */
74static pri_t high_pri[PRI_COUNT]; /* Map for priority increase */ 74static pri_t high_pri[PRI_COUNT]; /* Map for priority increase */
75 75
76static void sched_precalcts(void); 76static void sched_precalcts(void);
77 77
78typedef struct { 
79 u_int sl_timeslice; /* Time-slice of thread */ 
80} sched_info_lwp_t; 
81 
82static pool_cache_t sil_pool; 
83 
84/* 78/*
85 * Initialization and setup. 79 * Initialization and setup.
86 */ 80 */
87 81
88void 82void
89sched_rqinit(void) 83sched_rqinit(void)
90{ 84{
91 struct cpu_info *ci = curcpu(); 85 struct cpu_info *ci = curcpu();
92 86
93 if (hz < 100) { 87 if (hz < 100) {
94 panic("sched_rqinit: value of HZ is too low\n"); 88 panic("sched_rqinit: value of HZ is too low\n");
95 } 89 }
96 90
97 /* Default timing ranges */ 91 /* Default timing ranges */
98 min_ts = mstohz(50); /* ~50ms */ 92 min_ts = mstohz(20); /* ~20 ms */
99 max_ts = mstohz(150); /* ~150ms */ 93 max_ts = mstohz(150); /* ~150 ms */
100 rt_ts = mstohz(100); /* ~100ms */ 94 rt_ts = mstohz(100); /* ~100 ms */
101 sched_precalcts(); 95 sched_precalcts();
102 96
103 /* Pool of the scheduler-specific structures */ 
104 sil_pool = pool_cache_init(sizeof(sched_info_lwp_t), coherency_unit, 
105 0, 0, "lwpsd", NULL, IPL_NONE, NULL, NULL, NULL); 
106 
107 /* Attach the primary CPU here */ 97 /* Attach the primary CPU here */
108 sched_cpuattach(ci); 98 sched_cpuattach(ci);
109 99
110 sched_lwp_fork(NULL, &lwp0); 100 sched_lwp_fork(NULL, &lwp0);
111 sched_newts(&lwp0); 101 sched_newts(&lwp0);
112} 102}
113 103
114/* Pre-calculate the time-slices for the priorities */ 104/* Pre-calculate the time-slices for the priorities */
115static void 105static void
116sched_precalcts(void) 106sched_precalcts(void)
117{ 107{
118 pri_t p; 108 pri_t p;
119 109
@@ -142,85 +132,68 @@ sched_proc_fork(struct proc *parent, str @@ -142,85 +132,68 @@ sched_proc_fork(struct proc *parent, str
142 struct lwp *l; 132 struct lwp *l;
143 133
144 LIST_FOREACH(l, &child->p_lwps, l_sibling) { 134 LIST_FOREACH(l, &child->p_lwps, l_sibling) {
145 lwp_lock(l); 135 lwp_lock(l);
146 sched_newts(l); 136 sched_newts(l);
147 lwp_unlock(l); 137 lwp_unlock(l);
148 } 138 }
149} 139}
150 140
151void 141void
152sched_proc_exit(struct proc *child, struct proc *parent) 142sched_proc_exit(struct proc *child, struct proc *parent)
153{ 143{
154 144
155 /* Dummy */ 
156} 145}
157 146
158void 147void
159sched_lwp_fork(struct lwp *l1, struct lwp *l2) 148sched_lwp_fork(struct lwp *l1, struct lwp *l2)
160{ 149{
161 150
162 KASSERT(l2->l_sched_info == NULL); 
163 l2->l_sched_info = pool_cache_get(sil_pool, PR_WAITOK); 
164 memset(l2->l_sched_info, 0, sizeof(sched_info_lwp_t)); 
165} 
166 
167void 
168sched_lwp_exit(struct lwp *l) 
169{ 
170 
171 KASSERT(l->l_sched_info != NULL); 
172 pool_cache_put(sil_pool, l->l_sched_info); 
173 l->l_sched_info = NULL; 
174} 151}
175 152
176void 153void
177sched_lwp_collect(struct lwp *l) 154sched_lwp_collect(struct lwp *l)
178{ 155{
179 156
180} 157}
181 158
182void 159void
183sched_setrunnable(struct lwp *l) 160sched_setrunnable(struct lwp *l)
184{ 161{
185 162
186 /* Dummy */ 
187} 163}
188 164
189void 165void
190sched_schedclock(struct lwp *l) 166sched_schedclock(struct lwp *l)
191{ 167{
192 168
193 /* Dummy */ 
194} 169}
195 170
196/* 171/*
197 * Priorities and time-slice. 172 * Priorities and time-slice.
198 */ 173 */
199 174
200void 175void
201sched_nice(struct proc *p, int prio) 176sched_nice(struct proc *p, int prio)
202{ 177{
203 178
204 /* TODO: implement as SCHED_IA */ 
205} 179}
206 180
207/* Recalculate the time-slice */ 181/* Recalculate the time-slice */
208void 182void
209sched_newts(struct lwp *l) 183sched_newts(struct lwp *l)
210{ 184{
211 sched_info_lwp_t *sil = l->l_sched_info; 
212 185
213 sil->sl_timeslice = ts_map[lwp_eprio(l)]; 186 l->l_sched.timeslice = ts_map[lwp_eprio(l)];
214} 187}
215 188
216void 189void
217sched_slept(struct lwp *l) 190sched_slept(struct lwp *l)
218{ 191{
219 192
220 /* 193 /*
221 * If thread is in time-sharing queue and batch flag is not marked, 194 * If thread is in time-sharing queue and batch flag is not marked,
222 * increase the the priority, and run with the lower time-quantum. 195 * increase the the priority, and run with the lower time-quantum.
223 */ 196 */
224 if (l->l_priority < PRI_HIGHEST_TS && (l->l_flag & LW_BATCH) == 0) { 197 if (l->l_priority < PRI_HIGHEST_TS && (l->l_flag & LW_BATCH) == 0) {
225 KASSERT(l->l_class == SCHED_OTHER); 198 KASSERT(l->l_class == SCHED_OTHER);
226 l->l_priority++; 199 l->l_priority++;
@@ -235,107 +208,106 @@ sched_wakeup(struct lwp *l) @@ -235,107 +208,106 @@ sched_wakeup(struct lwp *l)
235 if (l->l_slptime >= 1) 208 if (l->l_slptime >= 1)
236 l->l_priority = high_pri[l->l_priority]; 209 l->l_priority = high_pri[l->l_priority];
237} 210}
238 211
239void 212void
240sched_pstats_hook(struct lwp *l, int batch) 213sched_pstats_hook(struct lwp *l, int batch)
241{ 214{
242 pri_t prio; 215 pri_t prio;
243 216
244 /* 217 /*
245 * Estimate threads on time-sharing queue only, however, 218 * Estimate threads on time-sharing queue only, however,
246 * exclude the highest priority for performance purposes. 219 * exclude the highest priority for performance purposes.
247 */ 220 */
 221 KASSERT(lwp_locked(l, NULL));
248 if (l->l_priority >= PRI_HIGHEST_TS) 222 if (l->l_priority >= PRI_HIGHEST_TS)
249 return; 223 return;
250 KASSERT(l->l_class == SCHED_OTHER); 224 KASSERT(l->l_class == SCHED_OTHER);
251 225
252 /* If it is CPU-bound not a first time - decrease the priority */ 226 /* If it is CPU-bound not a first time - decrease the priority */
253 prio = l->l_priority; 227 prio = l->l_priority;
254 if (batch && prio != 0) 228 if (batch && prio != 0)
255 prio--; 229 prio--;
256 230
257 /* If thread was not ran a second or more - set a high priority */ 231 /* If thread was not ran a second or more - set a high priority */
258 if (l->l_stat == LSRUN) { 232 if (l->l_stat == LSRUN) {
259 if (l->l_rticks && (hardclock_ticks - l->l_rticks >= hz)) 233 if (l->l_rticks && (hardclock_ticks - l->l_rticks >= hz))
260 prio = high_pri[prio]; 234 prio = high_pri[prio];
261 /* Re-enqueue the thread if priority has changed */ 235 /* Re-enqueue the thread if priority has changed */
262 if (prio != l->l_priority) 236 if (prio != l->l_priority)
263 lwp_changepri(l, prio); 237 lwp_changepri(l, prio);
264 } else { 238 } else {
265 /* In other states, change the priority directly */ 239 /* In other states, change the priority directly */
266 l->l_priority = prio; 240 l->l_priority = prio;
267 } 241 }
268} 242}
269 243
270void 244void
271sched_oncpu(lwp_t *l) 245sched_oncpu(lwp_t *l)
272{ 246{
273 sched_info_lwp_t *sil = l->l_sched_info; 247 struct schedstate_percpu *spc = &l->l_cpu->ci_schedstate;
274 248
275 /* Update the counters */ 249 /* Update the counters */
276 sil = l->l_sched_info; 250 KASSERT(l->l_sched.timeslice >= min_ts);
277 KASSERT(sil->sl_timeslice >= min_ts); 251 KASSERT(l->l_sched.timeslice <= max_ts);
278 KASSERT(sil->sl_timeslice <= max_ts); 252 spc->spc_ticks = l->l_sched.timeslice;
279 l->l_cpu->ci_schedstate.spc_ticks = sil->sl_timeslice; 
280} 253}
281 254
282/* 255/*
283 * Time-driven events. 256 * Time-driven events.
284 */ 257 */
285 258
286/* 259/*
287 * Called once per time-quantum. This routine is CPU-local and runs at 260 * Called once per time-quantum. This routine is CPU-local and runs at
288 * IPL_SCHED, thus the locking is not needed. 261 * IPL_SCHED, thus the locking is not needed.
289 */ 262 */
290void 263void
291sched_tick(struct cpu_info *ci) 264sched_tick(struct cpu_info *ci)
292{ 265{
293 struct schedstate_percpu *spc = &ci->ci_schedstate; 266 struct schedstate_percpu *spc = &ci->ci_schedstate;
294 struct lwp *l = curlwp; 267 struct lwp *l = curlwp;
295 const sched_info_lwp_t *sil = l->l_sched_info; 
296 268
297 if (CURCPU_IDLE_P()) 269 if (__predict_false(CURCPU_IDLE_P()))
298 return; 270 return;
299 271
300 switch (l->l_class) { 272 switch (l->l_class) {
301 case SCHED_FIFO: 273 case SCHED_FIFO:
302 /* 274 /*
303 * Update the time-quantum, and continue running, 275 * Update the time-quantum, and continue running,
304 * if thread runs on FIFO real-time policy. 276 * if thread runs on FIFO real-time policy.
305 */ 277 */
306 KASSERT(l->l_priority > PRI_HIGHEST_TS); 278 KASSERT(l->l_priority > PRI_HIGHEST_TS);
307 spc->spc_ticks = sil->sl_timeslice; 279 spc->spc_ticks = l->l_sched.timeslice;
308 return; 280 return;
309 case SCHED_OTHER: 281 case SCHED_OTHER:
310 /* 282 /*
311 * If thread is in time-sharing queue, decrease the priority, 283 * If thread is in time-sharing queue, decrease the priority,
312 * and run with a higher time-quantum. 284 * and run with a higher time-quantum.
313 */ 285 */
314 KASSERT(l->l_priority <= PRI_HIGHEST_TS); 286 KASSERT(l->l_priority <= PRI_HIGHEST_TS);
315 if (l->l_priority != 0) 287 if (l->l_priority != 0)
316 l->l_priority--; 288 l->l_priority--;
317 break; 289 break;
318 } 290 }
319 291
320 /* 292 /*
321 * If there are higher priority threads or threads in the same queue, 293 * If there are higher priority threads or threads in the same queue,
322 * mark that thread should yield, otherwise, continue running. 294 * mark that thread should yield, otherwise, continue running.
323 */ 295 */
324 if (lwp_eprio(l) <= spc->spc_maxpriority || l->l_target_cpu) { 296 if (lwp_eprio(l) <= spc->spc_maxpriority || l->l_target_cpu) {
325 spc->spc_flags |= SPCF_SHOULDYIELD; 297 spc->spc_flags |= SPCF_SHOULDYIELD;
326 cpu_need_resched(ci, 0); 298 cpu_need_resched(ci, 0);
327 } else 299 } else
328 spc->spc_ticks = sil->sl_timeslice; 300 spc->spc_ticks = l->l_sched.timeslice;
329} 301}
330 302
331/* 303/*
332 * Sysctl nodes and initialization. 304 * Sysctl nodes and initialization.
333 */ 305 */
334 306
335static int 307static int
336sysctl_sched_rtts(SYSCTLFN_ARGS) 308sysctl_sched_rtts(SYSCTLFN_ARGS)
337{ 309{
338 struct sysctlnode node; 310 struct sysctlnode node;
339 int rttsms = hztoms(rt_ts); 311 int rttsms = hztoms(rt_ts);
340 312
341 node = *rnode; 313 node = *rnode;

cvs diff -r1.107 -r1.108 src/sys/sys/lwp.h (expand / switch to unified diff)

--- src/sys/sys/lwp.h 2008/08/28 06:21:22 1.107
+++ src/sys/sys/lwp.h 2008/10/07 09:48:27 1.108
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: lwp.h,v 1.107 2008/08/28 06:21:22 yamt Exp $ */ 1/* $NetBSD: lwp.h,v 1.108 2008/10/07 09:48:27 rmind Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams and Andrew Doran. 8 * by Nathan J. Williams and Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -60,27 +60,30 @@ @@ -60,27 +60,30 @@
60 * S: l_selcpu->sc_lock 60 * S: l_selcpu->sc_lock
61 * (: unlocked, stable 61 * (: unlocked, stable
62 * !: unlocked, may only be reliably accessed by the LWP itself 62 * !: unlocked, may only be reliably accessed by the LWP itself
63 * ?: undecided 63 * ?: undecided
64 * 64 *
65 * Fields are clustered together by usage (to increase the likelyhood 65 * Fields are clustered together by usage (to increase the likelyhood
66 * of cache hits) and by size (to reduce dead space in the structure). 66 * of cache hits) and by size (to reduce dead space in the structure).
67 */ 67 */
68struct lockdebug; 68struct lockdebug;
69 69
70struct lwp { 70struct lwp {
71 /* Scheduling and overall state */ 71 /* Scheduling and overall state */
72 TAILQ_ENTRY(lwp) l_runq; /* s: run queue */ 72 TAILQ_ENTRY(lwp) l_runq; /* s: run queue */
73 void *l_sched_info; /* s: Scheduler-specific structure */ 73 union {
 74 void * info; /* s: scheduler-specific structure */
 75 u_int timeslice; /* l: time-quantum for SCHED_M2 */
 76 } l_sched;
74 struct cpu_info *volatile l_cpu;/* s: CPU we're on if LSONPROC */ 77 struct cpu_info *volatile l_cpu;/* s: CPU we're on if LSONPROC */
75 kmutex_t * volatile l_mutex; /* l: ptr to mutex on sched state */ 78 kmutex_t * volatile l_mutex; /* l: ptr to mutex on sched state */
76 int l_ctxswtch; /* l: performing a context switch */ 79 int l_ctxswtch; /* l: performing a context switch */
77 struct user *l_addr; /* l: KVA of u-area (PROC ONLY) */ 80 struct user *l_addr; /* l: KVA of u-area (PROC ONLY) */
78 struct mdlwp l_md; /* l: machine-dependent fields. */ 81 struct mdlwp l_md; /* l: machine-dependent fields. */
79 int l_flag; /* l: misc flag values */ 82 int l_flag; /* l: misc flag values */
80 int l_stat; /* l: overall LWP status */ 83 int l_stat; /* l: overall LWP status */
81 struct bintime l_rtime; /* l: real time */ 84 struct bintime l_rtime; /* l: real time */
82 struct bintime l_stime; /* l: start time (while ONPROC) */ 85 struct bintime l_stime; /* l: start time (while ONPROC) */
83 u_int l_swtime; /* l: time swapped in or out */ 86 u_int l_swtime; /* l: time swapped in or out */
84 u_int l_holdcnt; /* l: if non-zero, don't swap */ 87 u_int l_holdcnt; /* l: if non-zero, don't swap */
85 u_int l_rticks; /* l: Saved start time of run */ 88 u_int l_rticks; /* l: Saved start time of run */
86 u_int l_rticksum; /* l: Sum of ticks spent running */ 89 u_int l_rticksum; /* l: Sum of ticks spent running */

cvs diff -r1.64 -r1.65 src/sys/sys/sched.h (expand / switch to unified diff)

--- src/sys/sys/sched.h 2008/08/10 10:19:19 1.64
+++ src/sys/sys/sched.h 2008/10/07 09:48:27 1.65
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: sched.h,v 1.64 2008/08/10 10:19:19 martin Exp $ */ 1/* $NetBSD: sched.h,v 1.65 2008/10/07 09:48:27 rmind Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1999, 2000, 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 1999, 2000, 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Ross Harvey, Jason R. Thorpe, Nathan J. Williams, Andrew Doran and 8 * by Ross Harvey, Jason R. Thorpe, Nathan J. Williams, Andrew Doran and
9 * Daniel Sieger. 9 * Daniel Sieger.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -90,27 +90,26 @@ struct sched_param { @@ -90,27 +90,26 @@ struct sched_param {
90#define SCHED_RR 2 90#define SCHED_RR 2
91 91
92#if defined(_NETBSD_SOURCE) 92#if defined(_NETBSD_SOURCE)
93__BEGIN_DECLS 93__BEGIN_DECLS
94 94
95/* 95/*
96 * Interface of CPU-sets. 96 * Interface of CPU-sets.
97 */ 97 */
98typedef struct _cpuset cpuset_t; 98typedef struct _cpuset cpuset_t;
99typedef struct _kcpuset kcpuset_t; /* XXX: lwp.h included from userland */ 99typedef struct _kcpuset kcpuset_t; /* XXX: lwp.h included from userland */
100 100
101#ifdef _KERNEL 101#ifdef _KERNEL
102 102
103 
104kcpuset_t *kcpuset_create(void); 103kcpuset_t *kcpuset_create(void);
105void kcpuset_destroy(kcpuset_t *); 104void kcpuset_destroy(kcpuset_t *);
106void kcpuset_copy(kcpuset_t *, const kcpuset_t *); 105void kcpuset_copy(kcpuset_t *, const kcpuset_t *);
107void kcpuset_use(kcpuset_t *); 106void kcpuset_use(kcpuset_t *);
108void kcpuset_unuse(kcpuset_t *, kcpuset_t **); 107void kcpuset_unuse(kcpuset_t *, kcpuset_t **);
109int kcpuset_copyin(const cpuset_t *, kcpuset_t *, size_t); 108int kcpuset_copyin(const cpuset_t *, kcpuset_t *, size_t);
110int kcpuset_copyout(const kcpuset_t *, cpuset_t *, size_t); 109int kcpuset_copyout(const kcpuset_t *, cpuset_t *, size_t);
111void kcpuset_zero(kcpuset_t *); 110void kcpuset_zero(kcpuset_t *);
112int kcpuset_isset(cpuid_t, const kcpuset_t *); 111int kcpuset_isset(cpuid_t, const kcpuset_t *);
113 112
114#else 113#else
115 114
116#define cpuset_create() _cpuset_create() 115#define cpuset_create() _cpuset_create()
@@ -118,26 +117,27 @@ int kcpuset_isset(cpuid_t, const kcpuset @@ -118,26 +117,27 @@ int kcpuset_isset(cpuid_t, const kcpuset
118#define cpuset_size(c) _cpuset_size(c) 117#define cpuset_size(c) _cpuset_size(c)
119#define cpuset_zero(c) _cpuset_zero(c) 118#define cpuset_zero(c) _cpuset_zero(c)
120#define cpuset_isset(i, c) _cpuset_isset(i, c) 119#define cpuset_isset(i, c) _cpuset_isset(i, c)
121#define cpuset_set(i, c) _cpuset_set(i, c) 120#define cpuset_set(i, c) _cpuset_set(i, c)
122#define cpuset_clr(i, c) _cpuset_clr(i, c) 121#define cpuset_clr(i, c) _cpuset_clr(i, c)
123 122
124cpuset_t *_cpuset_create(void); 123cpuset_t *_cpuset_create(void);
125void _cpuset_destroy(cpuset_t *); 124void _cpuset_destroy(cpuset_t *);
126void _cpuset_zero(cpuset_t *); 125void _cpuset_zero(cpuset_t *);
127int _cpuset_set(cpuid_t, cpuset_t *); 126int _cpuset_set(cpuid_t, cpuset_t *);
128int _cpuset_clr(cpuid_t, cpuset_t *); 127int _cpuset_clr(cpuid_t, cpuset_t *);
129int _cpuset_isset(cpuid_t, const cpuset_t *); 128int _cpuset_isset(cpuid_t, const cpuset_t *);
130size_t _cpuset_size(const cpuset_t *); 129size_t _cpuset_size(const cpuset_t *);
 130
131#endif 131#endif
132 132
133/* 133/*
134 * Internal affinity and scheduling calls. 134 * Internal affinity and scheduling calls.
135 */ 135 */
136int _sched_getaffinity(pid_t, lwpid_t, size_t, cpuset_t *); 136int _sched_getaffinity(pid_t, lwpid_t, size_t, cpuset_t *);
137int _sched_setaffinity(pid_t, lwpid_t, size_t, const cpuset_t *); 137int _sched_setaffinity(pid_t, lwpid_t, size_t, const cpuset_t *);
138int _sched_getparam(pid_t, lwpid_t, int *, struct sched_param *); 138int _sched_getparam(pid_t, lwpid_t, int *, struct sched_param *);
139int _sched_setparam(pid_t, lwpid_t, int, const struct sched_param *); 139int _sched_setparam(pid_t, lwpid_t, int, const struct sched_param *);
140__END_DECLS 140__END_DECLS
141 141
142/* 142/*
143 * CPU states. 143 * CPU states.
@@ -240,27 +240,26 @@ bool sched_curcpu_runnable_p(void); @@ -240,27 +240,26 @@ bool sched_curcpu_runnable_p(void);
240void sched_dequeue(struct lwp *); 240void sched_dequeue(struct lwp *);
241void sched_enqueue(struct lwp *, bool); 241void sched_enqueue(struct lwp *, bool);
242struct lwp * sched_nextlwp(void); 242struct lwp * sched_nextlwp(void);
243void sched_oncpu(struct lwp *); 243void sched_oncpu(struct lwp *);
244void sched_newts(struct lwp *); 244void sched_newts(struct lwp *);
245 245
246/* Priority adjustment */ 246/* Priority adjustment */
247void sched_nice(struct proc *, int); 247void sched_nice(struct proc *, int);
248 248
249/* Handlers of fork and exit */ 249/* Handlers of fork and exit */
250void sched_proc_fork(struct proc *, struct proc *); 250void sched_proc_fork(struct proc *, struct proc *);
251void sched_proc_exit(struct proc *, struct proc *); 251void sched_proc_exit(struct proc *, struct proc *);
252void sched_lwp_fork(struct lwp *, struct lwp *); 252void sched_lwp_fork(struct lwp *, struct lwp *);
253void sched_lwp_exit(struct lwp *); 
254void sched_lwp_collect(struct lwp *); 253void sched_lwp_collect(struct lwp *);
255 254
256void sched_slept(struct lwp *); 255void sched_slept(struct lwp *);
257void sched_wakeup(struct lwp *); 256void sched_wakeup(struct lwp *);
258 257
259void setrunnable(struct lwp *); 258void setrunnable(struct lwp *);
260void sched_setrunnable(struct lwp *); 259void sched_setrunnable(struct lwp *);
261 260
262struct cpu_info *sched_takecpu(struct lwp *); 261struct cpu_info *sched_takecpu(struct lwp *);
263void sched_print_runqueue(void (*pr)(const char *, ...)); 262void sched_print_runqueue(void (*pr)(const char *, ...));
264 263
265/* Dispatching */ 264/* Dispatching */
266bool kpreempt(uintptr_t); 265bool kpreempt(uintptr_t);