Fri Dec 2 12:29:36 2011 UTC ()
update a comment


(yamt)
diff -r1.27 -r1.28 src/sys/kern/sched_4bsd.c

cvs diff -r1.27 -r1.28 src/sys/kern/sched_4bsd.c (expand / switch to unified diff)

--- src/sys/kern/sched_4bsd.c 2011/07/27 14:35:34 1.27
+++ src/sys/kern/sched_4bsd.c 2011/12/02 12:29:35 1.28
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: sched_4bsd.c,v 1.27 2011/07/27 14:35:34 uebayasi Exp $ */ 1/* $NetBSD: sched_4bsd.c,v 1.28 2011/12/02 12:29:35 yamt Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran, and 9 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran, and
10 * Daniel Sieger. 10 * Daniel Sieger.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
@@ -58,27 +58,27 @@ @@ -58,27 +58,27 @@
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * SUCH DAMAGE. 65 * SUCH DAMAGE.
66 * 66 *
67 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 67 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
68 */ 68 */
69 69
70#include <sys/cdefs.h> 70#include <sys/cdefs.h>
71__KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.27 2011/07/27 14:35:34 uebayasi Exp $"); 71__KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.28 2011/12/02 12:29:35 yamt Exp $");
72 72
73#include "opt_ddb.h" 73#include "opt_ddb.h"
74#include "opt_lockdebug.h" 74#include "opt_lockdebug.h"
75#include "opt_perfctrs.h" 75#include "opt_perfctrs.h"
76 76
77#include <sys/param.h> 77#include <sys/param.h>
78#include <sys/systm.h> 78#include <sys/systm.h>
79#include <sys/callout.h> 79#include <sys/callout.h>
80#include <sys/cpu.h> 80#include <sys/cpu.h>
81#include <sys/proc.h> 81#include <sys/proc.h>
82#include <sys/kernel.h> 82#include <sys/kernel.h>
83#include <sys/signalvar.h> 83#include <sys/signalvar.h>
84#include <sys/resourcevar.h> 84#include <sys/resourcevar.h>
@@ -364,38 +364,37 @@ resetpriority(struct lwp *l) @@ -364,38 +364,37 @@ resetpriority(struct lwp *l)
364 KASSERT(lwp_locked(l, NULL)); 364 KASSERT(lwp_locked(l, NULL));
365 365
366 if (l->l_class != SCHED_OTHER) 366 if (l->l_class != SCHED_OTHER)
367 return; 367 return;
368 368
369 /* See comments above ESTCPU_SHIFT definition. */ 369 /* See comments above ESTCPU_SHIFT definition. */
370 pri = (PRI_KERNEL - 1) - (l->l_estcpu >> ESTCPU_SHIFT) - p->p_nice; 370 pri = (PRI_KERNEL - 1) - (l->l_estcpu >> ESTCPU_SHIFT) - p->p_nice;
371 pri = imax(pri, 0); 371 pri = imax(pri, 0);
372 if (pri != l->l_priority) 372 if (pri != l->l_priority)
373 lwp_changepri(l, pri); 373 lwp_changepri(l, pri);
374} 374}
375 375
376/* 376/*
377 * We adjust the priority of the current process. The priority of a process 377 * We adjust the priority of the current LWP. The priority of a LWP
378 * gets worse as it accumulates CPU time. The CPU usage estimator (l_estcpu) 378 * gets worse as it accumulates CPU time. The CPU usage estimator (l_estcpu)
379 * is increased here. The formula for computing priorities (in kern_synch.c) 379 * is increased here. The formula for computing priorities will compute a
380 * will compute a different value each time l_estcpu increases. This can 380 * different value each time l_estcpu increases. This can cause a switch,
381 * cause a switch, but unless the priority crosses a PPQ boundary the actual 381 * but unless the priority crosses a PPQ boundary the actual queue will not
382 * queue will not change. The CPU usage estimator ramps up quite quickly 382 * change. The CPU usage estimator ramps up quite quickly when the process
383 * when the process is running (linearly), and decays away exponentially, at 383 * is running (linearly), and decays away exponentially, at a rate which is
384 * a rate which is proportionally slower when the system is busy. The basic 384 * proportionally slower when the system is busy. The basic principle is
385 * principle is that the system will 90% forget that the process used a lot 385 * that the system will 90% forget that the process used a lot of CPU time
386 * of CPU time in 5 * loadav seconds. This causes the system to favor 386 * in 5 * loadav seconds. This causes the system to favor processes which
387 * processes which haven't run much recently, and to round-robin among other 387 * haven't run much recently, and to round-robin among other processes.
388 * processes. 
389 */ 388 */
390 389
391void 390void
392sched_schedclock(struct lwp *l) 391sched_schedclock(struct lwp *l)
393{ 392{
394 393
395 if (l->l_class != SCHED_OTHER) 394 if (l->l_class != SCHED_OTHER)
396 return; 395 return;
397 396
398 KASSERT(!CURCPU_IDLE_P()); 397 KASSERT(!CURCPU_IDLE_P());
399 l->l_estcpu = ESTCPULIM(l->l_estcpu + ESTCPU_ACCUM); 398 l->l_estcpu = ESTCPULIM(l->l_estcpu + ESTCPU_ACCUM);
400 lwp_lock(l); 399 lwp_lock(l);
401 resetpriority(l); 400 resetpriority(l);