Mon Apr 13 15:54:45 2020 UTC ()
hardclock_ticks -> getticks()


(maxv)
diff -r1.45 -r1.46 src/sys/kern/kern_condvar.c
diff -r1.65 -r1.66 src/sys/kern/kern_runq.c
diff -r1.64 -r1.65 src/sys/kern/kern_sleepq.c
diff -r1.59 -r1.60 src/sys/kern/kern_timeout.c
diff -r1.37 -r1.38 src/sys/kern/sched_m2.c
diff -r1.76 -r1.77 src/sys/kern/vfs_mount.c
diff -r1.118 -r1.119 src/sys/kern/vfs_vnode.c
diff -r1.225 -r1.226 src/sys/netinet/tcp_usrreq.c
diff -r1.125 -r1.126 src/sys/uvm/uvm_pdaemon.c

cvs diff -r1.45 -r1.46 src/sys/kern/kern_condvar.c (expand / switch to unified diff)

--- src/sys/kern/kern_condvar.c 2020/04/10 17:16:21 1.45
+++ src/sys/kern/kern_condvar.c 2020/04/13 15:54:45 1.46
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_condvar.c,v 1.45 2020/04/10 17:16:21 ad Exp $ */ 1/* $NetBSD: kern_condvar.c,v 1.46 2020/04/13 15:54:45 maxv Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2006, 2007, 2008, 2019, 2020 The NetBSD Foundation, Inc. 4 * Copyright (c) 2006, 2007, 2008, 2019, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -24,27 +24,27 @@ @@ -24,27 +24,27 @@
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Kernel condition variable implementation. 33 * Kernel condition variable implementation.
34 */ 34 */
35 35
36#include <sys/cdefs.h> 36#include <sys/cdefs.h>
37__KERNEL_RCSID(0, "$NetBSD: kern_condvar.c,v 1.45 2020/04/10 17:16:21 ad Exp $"); 37__KERNEL_RCSID(0, "$NetBSD: kern_condvar.c,v 1.46 2020/04/13 15:54:45 maxv Exp $");
38 38
39#include <sys/param.h> 39#include <sys/param.h>
40#include <sys/systm.h> 40#include <sys/systm.h>
41#include <sys/lwp.h> 41#include <sys/lwp.h>
42#include <sys/condvar.h> 42#include <sys/condvar.h>
43#include <sys/sleepq.h> 43#include <sys/sleepq.h>
44#include <sys/lockdebug.h> 44#include <sys/lockdebug.h>
45#include <sys/cpu.h> 45#include <sys/cpu.h>
46#include <sys/kernel.h> 46#include <sys/kernel.h>
47 47
48/* 48/*
49 * Accessors for the private contents of the kcondvar_t data type. 49 * Accessors for the private contents of the kcondvar_t data type.
50 * 50 *
@@ -330,33 +330,33 @@ timo2bintime(unsigned timo) @@ -330,33 +330,33 @@ timo2bintime(unsigned timo)
330 */ 330 */
331int 331int
332cv_timedwaitbt(kcondvar_t *cv, kmutex_t *mtx, struct bintime *bt, 332cv_timedwaitbt(kcondvar_t *cv, kmutex_t *mtx, struct bintime *bt,
333 const struct bintime *epsilon __diagused) 333 const struct bintime *epsilon __diagused)
334{ 334{
335 struct bintime slept; 335 struct bintime slept;
336 unsigned start, end; 336 unsigned start, end;
337 int error; 337 int error;
338 338
339 KASSERTMSG(bt->sec >= 0, "negative timeout"); 339 KASSERTMSG(bt->sec >= 0, "negative timeout");
340 KASSERTMSG(epsilon != NULL, "specify maximum requested delay"); 340 KASSERTMSG(epsilon != NULL, "specify maximum requested delay");
341 341
342 /* 342 /*
343 * hardclock_ticks is technically int, but nothing special 343 * getticks() is technically int, but nothing special
344 * happens instead of overflow, so we assume two's-complement 344 * happens instead of overflow, so we assume two's-complement
345 * wraparound and just treat it as unsigned. 345 * wraparound and just treat it as unsigned.
346 */ 346 */
347 start = hardclock_ticks; 347 start = getticks();
348 error = cv_timedwait(cv, mtx, bintime2timo(bt)); 348 error = cv_timedwait(cv, mtx, bintime2timo(bt));
349 end = hardclock_ticks; 349 end = getticks();
350 350
351 slept = timo2bintime(end - start); 351 slept = timo2bintime(end - start);
352 /* bt := bt - slept */ 352 /* bt := bt - slept */
353 bintime_sub(bt, &slept); 353 bintime_sub(bt, &slept);
354 354
355 return error; 355 return error;
356} 356}
357 357
358/* 358/*
359 * cv_timedwaitbt_sig: 359 * cv_timedwaitbt_sig:
360 * 360 *
361 * Wait on a condition variable until awoken, the specified 361 * Wait on a condition variable until awoken, the specified
362 * timeout expires, or interrupted by a signal. Returns zero if 362 * timeout expires, or interrupted by a signal. Returns zero if
@@ -373,33 +373,33 @@ cv_timedwaitbt(kcondvar_t *cv, kmutex_t  @@ -373,33 +373,33 @@ cv_timedwaitbt(kcondvar_t *cv, kmutex_t
373 */ 373 */
374int 374int
375cv_timedwaitbt_sig(kcondvar_t *cv, kmutex_t *mtx, struct bintime *bt, 375cv_timedwaitbt_sig(kcondvar_t *cv, kmutex_t *mtx, struct bintime *bt,
376 const struct bintime *epsilon __diagused) 376 const struct bintime *epsilon __diagused)
377{ 377{
378 struct bintime slept; 378 struct bintime slept;
379 unsigned start, end; 379 unsigned start, end;
380 int error; 380 int error;
381 381
382 KASSERTMSG(bt->sec >= 0, "negative timeout"); 382 KASSERTMSG(bt->sec >= 0, "negative timeout");
383 KASSERTMSG(epsilon != NULL, "specify maximum requested delay"); 383 KASSERTMSG(epsilon != NULL, "specify maximum requested delay");
384 384
385 /* 385 /*
386 * hardclock_ticks is technically int, but nothing special 386 * getticks() is technically int, but nothing special
387 * happens instead of overflow, so we assume two's-complement 387 * happens instead of overflow, so we assume two's-complement
388 * wraparound and just treat it as unsigned. 388 * wraparound and just treat it as unsigned.
389 */ 389 */
390 start = hardclock_ticks; 390 start = getticks();
391 error = cv_timedwait_sig(cv, mtx, bintime2timo(bt)); 391 error = cv_timedwait_sig(cv, mtx, bintime2timo(bt));
392 end = hardclock_ticks; 392 end = getticks();
393 393
394 slept = timo2bintime(end - start); 394 slept = timo2bintime(end - start);
395 /* bt := bt - slept */ 395 /* bt := bt - slept */
396 bintime_sub(bt, &slept); 396 bintime_sub(bt, &slept);
397 397
398 return error; 398 return error;
399} 399}
400 400
401/* 401/*
402 * cv_signal: 402 * cv_signal:
403 * 403 *
404 * Wake the highest priority LWP waiting on a condition variable. 404 * Wake the highest priority LWP waiting on a condition variable.
405 * Must be called with the interlocking mutex held. 405 * Must be called with the interlocking mutex held.

cvs diff -r1.65 -r1.66 src/sys/kern/kern_runq.c (expand / switch to unified diff)

--- src/sys/kern/kern_runq.c 2020/04/04 20:17:58 1.65
+++ src/sys/kern/kern_runq.c 2020/04/13 15:54:45 1.66
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_runq.c,v 1.65 2020/04/04 20:17:58 ad Exp $ */ 1/* $NetBSD: kern_runq.c,v 1.66 2020/04/13 15:54:45 maxv Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2019, 2020 The NetBSD Foundation, Inc. 4 * Copyright (c) 2019, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -46,27 +46,27 @@ @@ -46,27 +46,27 @@
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE. 55 * SUCH DAMAGE.
56 */ 56 */
57 57
58#include <sys/cdefs.h> 58#include <sys/cdefs.h>
59__KERNEL_RCSID(0, "$NetBSD: kern_runq.c,v 1.65 2020/04/04 20:17:58 ad Exp $"); 59__KERNEL_RCSID(0, "$NetBSD: kern_runq.c,v 1.66 2020/04/13 15:54:45 maxv Exp $");
60 60
61#include "opt_dtrace.h" 61#include "opt_dtrace.h"
62 62
63#include <sys/param.h> 63#include <sys/param.h>
64#include <sys/kernel.h> 64#include <sys/kernel.h>
65#include <sys/bitops.h> 65#include <sys/bitops.h>
66#include <sys/cpu.h> 66#include <sys/cpu.h>
67#include <sys/idle.h> 67#include <sys/idle.h>
68#include <sys/intr.h> 68#include <sys/intr.h>
69#include <sys/kmem.h> 69#include <sys/kmem.h>
70#include <sys/lwp.h> 70#include <sys/lwp.h>
71#include <sys/mutex.h> 71#include <sys/mutex.h>
72#include <sys/proc.h> 72#include <sys/proc.h>
@@ -407,27 +407,27 @@ sched_resched_lwp(struct lwp *l, bool un @@ -407,27 +407,27 @@ sched_resched_lwp(struct lwp *l, bool un
407 * Estimate if LWP is cache-hot. 407 * Estimate if LWP is cache-hot.
408 */ 408 */
409static inline bool 409static inline bool
410lwp_cache_hot(const struct lwp *l) 410lwp_cache_hot(const struct lwp *l)
411{ 411{
412 412
413 /* Leave new LWPs in peace, determination has already been made. */ 413 /* Leave new LWPs in peace, determination has already been made. */
414 if (l->l_stat == LSIDL) 414 if (l->l_stat == LSIDL)
415 return true; 415 return true;
416 416
417 if (__predict_false(l->l_slptime != 0 || l->l_rticks == 0)) 417 if (__predict_false(l->l_slptime != 0 || l->l_rticks == 0))
418 return false; 418 return false;
419 419
420 return (hardclock_ticks - l->l_rticks < mstohz(cacheht_time)); 420 return (getticks() - l->l_rticks < mstohz(cacheht_time));
421} 421}
422 422
423/* 423/*
424 * Check if LWP can migrate to the chosen CPU. 424 * Check if LWP can migrate to the chosen CPU.
425 */ 425 */
426static inline bool 426static inline bool
427sched_migratable(const struct lwp *l, struct cpu_info *ci) 427sched_migratable(const struct lwp *l, struct cpu_info *ci)
428{ 428{
429 const struct schedstate_percpu *spc = &ci->ci_schedstate; 429 const struct schedstate_percpu *spc = &ci->ci_schedstate;
430 KASSERT(lwp_locked(__UNCONST(l), NULL)); 430 KASSERT(lwp_locked(__UNCONST(l), NULL));
431 431
432 /* Is CPU offline? */ 432 /* Is CPU offline? */
433 if (__predict_false(spc->spc_flags & SPCF_OFFLINE)) 433 if (__predict_false(spc->spc_flags & SPCF_OFFLINE))
@@ -830,30 +830,30 @@ sched_idle(void) @@ -830,30 +830,30 @@ sched_idle(void)
830 spc->spc_psid == PS_NONE) { 830 spc->spc_psid == PS_NONE) {
831 return; 831 return;
832 } 832 }
833 } 833 }
834 834
835 /* 835 /*
836 * Find something to run, unless this CPU exceeded the rate limit.  836 * Find something to run, unless this CPU exceeded the rate limit.
837 * Start looking on the current package to maximise L2/L3 cache 837 * Start looking on the current package to maximise L2/L3 cache
838 * locality. Then expand to looking at the rest of the system. 838 * locality. Then expand to looking at the rest of the system.
839 * 839 *
840 * XXX Should probably look at 2nd class CPUs first, but they will 840 * XXX Should probably look at 2nd class CPUs first, but they will
841 * shed jobs via preempt() anyway. 841 * shed jobs via preempt() anyway.
842 */ 842 */
843 if (spc->spc_nextskim > hardclock_ticks) { 843 if (spc->spc_nextskim > getticks()) {
844 return; 844 return;
845 } 845 }
846 spc->spc_nextskim = hardclock_ticks + mstohz(skim_interval); 846 spc->spc_nextskim = getticks() + mstohz(skim_interval);
847 847
848 /* In the outer loop scroll through all CPU packages, starting here. */ 848 /* In the outer loop scroll through all CPU packages, starting here. */
849 first = ci->ci_package1st; 849 first = ci->ci_package1st;
850 outer = first; 850 outer = first;
851 do { 851 do {
852 /* In the inner loop scroll through all CPUs in package. */ 852 /* In the inner loop scroll through all CPUs in package. */
853 inner = outer; 853 inner = outer;
854 do { 854 do {
855 /* Don't hit the locks unless needed. */ 855 /* Don't hit the locks unless needed. */
856 tspc = &inner->ci_schedstate; 856 tspc = &inner->ci_schedstate;
857 if (ci == inner || ci == mci || 857 if (ci == inner || ci == mci ||
858 spc->spc_psid != tspc->spc_psid || 858 spc->spc_psid != tspc->spc_psid ||
859 tspc->spc_mcount < min_catch) { 859 tspc->spc_mcount < min_catch) {
@@ -1047,45 +1047,45 @@ sched_lwp_stats(struct lwp *l) @@ -1047,45 +1047,45 @@ sched_lwp_stats(struct lwp *l)
1047/* 1047/*
1048 * Scheduler mill. 1048 * Scheduler mill.
1049 */ 1049 */
1050struct lwp * 1050struct lwp *
1051sched_nextlwp(void) 1051sched_nextlwp(void)
1052{ 1052{
1053 struct cpu_info *ci = curcpu(); 1053 struct cpu_info *ci = curcpu();
1054 struct schedstate_percpu *spc; 1054 struct schedstate_percpu *spc;
1055 TAILQ_HEAD(, lwp) *q_head; 1055 TAILQ_HEAD(, lwp) *q_head;
1056 struct lwp *l; 1056 struct lwp *l;
1057 1057
1058 /* Update the last run time on switch */ 1058 /* Update the last run time on switch */
1059 l = curlwp; 1059 l = curlwp;
1060 l->l_rticksum += (hardclock_ticks - l->l_rticks); 1060 l->l_rticksum += (getticks() - l->l_rticks);
1061 1061
1062 /* Return to idle LWP if there is a migrating thread */ 1062 /* Return to idle LWP if there is a migrating thread */
1063 spc = &ci->ci_schedstate; 1063 spc = &ci->ci_schedstate;
1064 if (__predict_false(spc->spc_migrating != NULL)) 1064 if (__predict_false(spc->spc_migrating != NULL))
1065 return NULL; 1065 return NULL;
1066 1066
1067 /* Return to idle LWP if there is no runnable job */ 1067 /* Return to idle LWP if there is no runnable job */
1068 if (__predict_false(spc->spc_count == 0)) 1068 if (__predict_false(spc->spc_count == 0))
1069 return NULL; 1069 return NULL;
1070 1070
1071 /* Take the highest priority thread */ 1071 /* Take the highest priority thread */
1072 KASSERT(spc->spc_bitmap[spc->spc_maxpriority >> BITMAP_SHIFT]); 1072 KASSERT(spc->spc_bitmap[spc->spc_maxpriority >> BITMAP_SHIFT]);
1073 q_head = sched_getrq(spc, spc->spc_maxpriority); 1073 q_head = sched_getrq(spc, spc->spc_maxpriority);
1074 l = TAILQ_FIRST(q_head); 1074 l = TAILQ_FIRST(q_head);
1075 KASSERT(l != NULL); 1075 KASSERT(l != NULL);
1076 1076
1077 sched_oncpu(l); 1077 sched_oncpu(l);
1078 l->l_rticks = hardclock_ticks; 1078 l->l_rticks = getticks();
1079 1079
1080 return l; 1080 return l;
1081} 1081}
1082 1082
1083/* 1083/*
1084 * sched_curcpu_runnable_p: return if curcpu() should exit the idle loop. 1084 * sched_curcpu_runnable_p: return if curcpu() should exit the idle loop.
1085 */ 1085 */
1086 1086
1087bool 1087bool
1088sched_curcpu_runnable_p(void) 1088sched_curcpu_runnable_p(void)
1089{ 1089{
1090 const struct cpu_info *ci; 1090 const struct cpu_info *ci;
1091 const struct schedstate_percpu *spc; 1091 const struct schedstate_percpu *spc;
@@ -1194,19 +1194,19 @@ sched_print_runqueue(void (*pr)(const ch @@ -1194,19 +1194,19 @@ sched_print_runqueue(void (*pr)(const ch
1194 (*pr)(" %5s %4s %4s %10s %3s %18s %4s %4s %s\n", 1194 (*pr)(" %5s %4s %4s %10s %3s %18s %4s %4s %s\n",
1195 "LID", "PRI", "EPRI", "FL", "ST", "LWP", "CPU", "TCI", "LRTICKS"); 1195 "LID", "PRI", "EPRI", "FL", "ST", "LWP", "CPU", "TCI", "LRTICKS");
1196 1196
1197 PROCLIST_FOREACH(p, &allproc) { 1197 PROCLIST_FOREACH(p, &allproc) {
1198 (*pr)(" /- %d (%s)\n", (int)p->p_pid, p->p_comm); 1198 (*pr)(" /- %d (%s)\n", (int)p->p_pid, p->p_comm);
1199 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 1199 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1200 ci = l->l_cpu; 1200 ci = l->l_cpu;
1201 tci = l->l_target_cpu; 1201 tci = l->l_target_cpu;
1202 (*pr)(" | %5d %4u %4u 0x%8.8x %3s %18p %4u %4d %u\n", 1202 (*pr)(" | %5d %4u %4u 0x%8.8x %3s %18p %4u %4d %u\n",
1203 (int)l->l_lid, l->l_priority, lwp_eprio(l), 1203 (int)l->l_lid, l->l_priority, lwp_eprio(l),
1204 l->l_flag, l->l_stat == LSRUN ? "RQ" : 1204 l->l_flag, l->l_stat == LSRUN ? "RQ" :
1205 (l->l_stat == LSSLEEP ? "SQ" : "-"), 1205 (l->l_stat == LSSLEEP ? "SQ" : "-"),
1206 l, ci->ci_index, (tci ? tci->ci_index : -1), 1206 l, ci->ci_index, (tci ? tci->ci_index : -1),
1207 (u_int)(hardclock_ticks - l->l_rticks)); 1207 (u_int)(getticks() - l->l_rticks));
1208 } 1208 }
1209 } 1209 }
1210} 1210}
1211 1211
1212#endif 1212#endif

cvs diff -r1.64 -r1.65 src/sys/kern/kern_sleepq.c (expand / switch to unified diff)

--- src/sys/kern/kern_sleepq.c 2020/04/10 17:16:21 1.64
+++ src/sys/kern/kern_sleepq.c 2020/04/13 15:54:45 1.65
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_sleepq.c,v 1.64 2020/04/10 17:16:21 ad Exp $ */ 1/* $NetBSD: kern_sleepq.c,v 1.65 2020/04/13 15:54:45 maxv Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2006, 2007, 2008, 2009, 2019, 2020 The NetBSD Foundation, Inc. 4 * Copyright (c) 2006, 2007, 2008, 2009, 2019, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -25,27 +25,27 @@ @@ -25,27 +25,27 @@
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Sleep queue implementation, used by turnstiles and general sleep/wakeup 33 * Sleep queue implementation, used by turnstiles and general sleep/wakeup
34 * interfaces. 34 * interfaces.
35 */ 35 */
36 36
37#include <sys/cdefs.h> 37#include <sys/cdefs.h>
38__KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.64 2020/04/10 17:16:21 ad Exp $"); 38__KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.65 2020/04/13 15:54:45 maxv Exp $");
39 39
40#include <sys/param.h> 40#include <sys/param.h>
41#include <sys/kernel.h> 41#include <sys/kernel.h>
42#include <sys/cpu.h> 42#include <sys/cpu.h>
43#include <sys/intr.h> 43#include <sys/intr.h>
44#include <sys/pool.h> 44#include <sys/pool.h>
45#include <sys/proc.h>  45#include <sys/proc.h>
46#include <sys/resourcevar.h> 46#include <sys/resourcevar.h>
47#include <sys/sched.h> 47#include <sys/sched.h>
48#include <sys/systm.h> 48#include <sys/systm.h>
49#include <sys/sleepq.h> 49#include <sys/sleepq.h>
50#include <sys/ktrace.h> 50#include <sys/ktrace.h>
51 51
@@ -141,27 +141,27 @@ sleepq_remove(sleepq_t *sq, lwp_t *l) @@ -141,27 +141,27 @@ sleepq_remove(sleepq_t *sq, lwp_t *l)
141 141
142 /* 142 /*
143 * If the LWP is still on the CPU, mark it as LSONPROC. It may be 143 * If the LWP is still on the CPU, mark it as LSONPROC. It may be
144 * about to call mi_switch(), in which case it will yield. 144 * about to call mi_switch(), in which case it will yield.
145 */ 145 */
146 if ((l->l_pflag & LP_RUNNING) != 0) { 146 if ((l->l_pflag & LP_RUNNING) != 0) {
147 l->l_stat = LSONPROC; 147 l->l_stat = LSONPROC;
148 l->l_slptime = 0; 148 l->l_slptime = 0;
149 lwp_setlock(l, spc->spc_lwplock); 149 lwp_setlock(l, spc->spc_lwplock);
150 return; 150 return;
151 } 151 }
152 152
153 /* Update sleep time delta, call the wake-up handler of scheduler */ 153 /* Update sleep time delta, call the wake-up handler of scheduler */
154 l->l_slpticksum += (hardclock_ticks - l->l_slpticks); 154 l->l_slpticksum += (getticks() - l->l_slpticks);
155 sched_wakeup(l); 155 sched_wakeup(l);
156 156
157 /* Look for a CPU to wake up */ 157 /* Look for a CPU to wake up */
158 l->l_cpu = sched_takecpu(l); 158 l->l_cpu = sched_takecpu(l);
159 ci = l->l_cpu; 159 ci = l->l_cpu;
160 spc = &ci->ci_schedstate; 160 spc = &ci->ci_schedstate;
161 161
162 /* 162 /*
163 * Set it running. 163 * Set it running.
164 */ 164 */
165 spc_lock(ci); 165 spc_lock(ci);
166 lwp_setlock(l, spc->spc_mutex); 166 lwp_setlock(l, spc->spc_mutex);
167 sched_setrunnable(l); 167 sched_setrunnable(l);
@@ -218,27 +218,27 @@ sleepq_enqueue(sleepq_t *sq, wchan_t wch @@ -218,27 +218,27 @@ sleepq_enqueue(sleepq_t *sq, wchan_t wch
218 KASSERT(l->l_stat == LSONPROC); 218 KASSERT(l->l_stat == LSONPROC);
219 KASSERT(l->l_wchan == NULL && l->l_sleepq == NULL); 219 KASSERT(l->l_wchan == NULL && l->l_sleepq == NULL);
220 220
221 l->l_syncobj = sobj; 221 l->l_syncobj = sobj;
222 l->l_wchan = wchan; 222 l->l_wchan = wchan;
223 l->l_sleepq = sq; 223 l->l_sleepq = sq;
224 l->l_wmesg = wmesg; 224 l->l_wmesg = wmesg;
225 l->l_slptime = 0; 225 l->l_slptime = 0;
226 l->l_stat = LSSLEEP; 226 l->l_stat = LSSLEEP;
227 227
228 sleepq_insert(sq, l, sobj); 228 sleepq_insert(sq, l, sobj);
229 229
230 /* Save the time when thread has slept */ 230 /* Save the time when thread has slept */
231 l->l_slpticks = hardclock_ticks; 231 l->l_slpticks = getticks();
232 sched_slept(l); 232 sched_slept(l);
233} 233}
234 234
235/* 235/*
236 * sleepq_block: 236 * sleepq_block:
237 * 237 *
238 * After any intermediate step such as releasing an interlock, switch. 238 * After any intermediate step such as releasing an interlock, switch.
239 * sleepq_block() may return early under exceptional conditions, for 239 * sleepq_block() may return early under exceptional conditions, for
240 * example if the LWP's containing process is exiting. 240 * example if the LWP's containing process is exiting.
241 * 241 *
242 * timo is a timeout in ticks. timo = 0 specifies an infinite timeout. 242 * timo is a timeout in ticks. timo = 0 specifies an infinite timeout.
243 */ 243 */
244int 244int

cvs diff -r1.59 -r1.60 src/sys/kern/kern_timeout.c (expand / switch to unified diff)

--- src/sys/kern/kern_timeout.c 2020/03/21 02:32:37 1.59
+++ src/sys/kern/kern_timeout.c 2020/04/13 15:54:45 1.60
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_timeout.c,v 1.59 2020/03/21 02:32:37 ad Exp $ */ 1/* $NetBSD: kern_timeout.c,v 1.60 2020/04/13 15:54:45 maxv Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2003, 2006, 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc. 4 * Copyright (c) 2003, 2006, 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe, and by Andrew Doran. 8 * by Jason R. Thorpe, and by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -49,27 +49,27 @@ @@ -49,27 +49,27 @@
49 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, 49 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
50 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY 50 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
51 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 51 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
52 * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 52 * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
53 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 53 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
54 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 54 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
55 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 55 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
56 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 56 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
57 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 57 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
58 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 58 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 */ 59 */
60 60
61#include <sys/cdefs.h> 61#include <sys/cdefs.h>
62__KERNEL_RCSID(0, "$NetBSD: kern_timeout.c,v 1.59 2020/03/21 02:32:37 ad Exp $"); 62__KERNEL_RCSID(0, "$NetBSD: kern_timeout.c,v 1.60 2020/04/13 15:54:45 maxv Exp $");
63 63
64/* 64/*
65 * Timeouts are kept in a hierarchical timing wheel. The c_time is the 65 * Timeouts are kept in a hierarchical timing wheel. The c_time is the
66 * value of c_cpu->cc_ticks when the timeout should be called. There are 66 * value of c_cpu->cc_ticks when the timeout should be called. There are
67 * four levels with 256 buckets each. See 'Scheme 7' in "Hashed and 67 * four levels with 256 buckets each. See 'Scheme 7' in "Hashed and
68 * Hierarchical Timing Wheels: Efficient Data Structures for Implementing 68 * Hierarchical Timing Wheels: Efficient Data Structures for Implementing
69 * a Timer Facility" by George Varghese and Tony Lauck. 69 * a Timer Facility" by George Varghese and Tony Lauck.
70 * 70 *
71 * Some of the "math" in here is a bit tricky. We have to beware of 71 * Some of the "math" in here is a bit tricky. We have to beware of
72 * wrapping ints. 72 * wrapping ints.
73 * 73 *
74 * We use the fact that any element added to the queue must be added with 74 * We use the fact that any element added to the queue must be added with
75 * a positive time. That means that any element `to' on the queue cannot 75 * a positive time. That means that any element `to' on the queue cannot
@@ -829,27 +829,27 @@ db_show_callout_bucket(struct callout_cp @@ -829,27 +829,27 @@ db_show_callout_bucket(struct callout_cp
829 if (CIRCQ_LAST(&c->c_list, kbucket)) 829 if (CIRCQ_LAST(&c->c_list, kbucket))
830 break; 830 break;
831 } 831 }
832} 832}
833 833
834void 834void
835db_show_callout(db_expr_t addr, bool haddr, db_expr_t count, const char *modif) 835db_show_callout(db_expr_t addr, bool haddr, db_expr_t count, const char *modif)
836{ 836{
837 struct callout_cpu *cc, ccb; 837 struct callout_cpu *cc, ccb;
838 struct cpu_info *ci, cib; 838 struct cpu_info *ci, cib;
839 int b; 839 int b;
840 840
841#ifndef CRASH 841#ifndef CRASH
842 db_printf("hardclock_ticks now: %d\n", hardclock_ticks); 842 db_printf("hardclock_ticks now: %d\n", getticks());
843#endif 843#endif
844 db_printf(" ticks wheel arg func\n"); 844 db_printf(" ticks wheel arg func\n");
845 845
846 /* 846 /*
847 * Don't lock the callwheel; all the other CPUs are paused 847 * Don't lock the callwheel; all the other CPUs are paused
848 * anyhow, and we might be called in a circumstance where 848 * anyhow, and we might be called in a circumstance where
849 * some other CPU was paused while holding the lock. 849 * some other CPU was paused while holding the lock.
850 */ 850 */
851 for (ci = db_cpu_first(); ci != NULL; ci = db_cpu_next(ci)) { 851 for (ci = db_cpu_first(); ci != NULL; ci = db_cpu_next(ci)) {
852 db_read_bytes((db_addr_t)ci, sizeof(cib), (char *)&cib); 852 db_read_bytes((db_addr_t)ci, sizeof(cib), (char *)&cib);
853 cc = cib.ci_data.cpu_callout; 853 cc = cib.ci_data.cpu_callout;
854 db_read_bytes((db_addr_t)cc, sizeof(ccb), (char *)&ccb); 854 db_read_bytes((db_addr_t)cc, sizeof(ccb), (char *)&ccb);
855 db_show_callout_bucket(&ccb, &cc->cc_todo, &ccb.cc_todo); 855 db_show_callout_bucket(&ccb, &cc->cc_todo, &ccb.cc_todo);

cvs diff -r1.37 -r1.38 src/sys/kern/sched_m2.c (expand / switch to unified diff)

--- src/sys/kern/sched_m2.c 2019/12/06 18:33:19 1.37
+++ src/sys/kern/sched_m2.c 2020/04/13 15:54:45 1.38
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: sched_m2.c,v 1.37 2019/12/06 18:33:19 ad Exp $ */ 1/* $NetBSD: sched_m2.c,v 1.38 2020/04/13 15:54:45 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org> 4 * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org>
5 * All rights reserved. 5 * All rights reserved.
6 *  6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -23,27 +23,27 @@ @@ -23,27 +23,27 @@
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE. 26 * SUCH DAMAGE.
27 */ 27 */
28 28
29/* 29/*
30 * TODO: 30 * TODO:
31 * - Implementation of fair share queue; 31 * - Implementation of fair share queue;
32 * - Support for NUMA; 32 * - Support for NUMA;
33 */ 33 */
34 34
35#include <sys/cdefs.h> 35#include <sys/cdefs.h>
36__KERNEL_RCSID(0, "$NetBSD: sched_m2.c,v 1.37 2019/12/06 18:33:19 ad Exp $"); 36__KERNEL_RCSID(0, "$NetBSD: sched_m2.c,v 1.38 2020/04/13 15:54:45 maxv Exp $");
37 37
38#include <sys/param.h> 38#include <sys/param.h>
39 39
40#include <sys/cpu.h> 40#include <sys/cpu.h>
41#include <sys/callout.h> 41#include <sys/callout.h>
42#include <sys/errno.h> 42#include <sys/errno.h>
43#include <sys/kernel.h> 43#include <sys/kernel.h>
44#include <sys/kmem.h> 44#include <sys/kmem.h>
45#include <sys/lwp.h> 45#include <sys/lwp.h>
46#include <sys/mutex.h> 46#include <sys/mutex.h>
47#include <sys/pool.h> 47#include <sys/pool.h>
48#include <sys/proc.h> 48#include <sys/proc.h>
49#include <sys/pset.h> 49#include <sys/pset.h>
@@ -245,27 +245,27 @@ sched_pstats_hook(struct lwp *l, int bat @@ -245,27 +245,27 @@ sched_pstats_hook(struct lwp *l, int bat
245 */ 245 */
246 KASSERT(lwp_locked(l, NULL)); 246 KASSERT(lwp_locked(l, NULL));
247 if (l->l_priority >= PRI_HIGHEST_TS) 247 if (l->l_priority >= PRI_HIGHEST_TS)
248 return; 248 return;
249 KASSERT(l->l_class == SCHED_OTHER); 249 KASSERT(l->l_class == SCHED_OTHER);
250 250
251 /* If it is CPU-bound not a first time - decrease the priority */ 251 /* If it is CPU-bound not a first time - decrease the priority */
252 prio = l->l_priority; 252 prio = l->l_priority;
253 if (batch && prio != 0) 253 if (batch && prio != 0)
254 prio--; 254 prio--;
255 255
256 /* If thread was not ran a second or more - set a high priority */ 256 /* If thread was not ran a second or more - set a high priority */
257 if (l->l_stat == LSRUN) { 257 if (l->l_stat == LSRUN) {
258 if (l->l_rticks && (hardclock_ticks - l->l_rticks >= hz)) 258 if (l->l_rticks && (getticks() - l->l_rticks >= hz))
259 prio = high_pri[prio]; 259 prio = high_pri[prio];
260 /* Re-enqueue the thread if priority has changed */ 260 /* Re-enqueue the thread if priority has changed */
261 if (prio != l->l_priority) 261 if (prio != l->l_priority)
262 lwp_changepri(l, prio); 262 lwp_changepri(l, prio);
263 } else { 263 } else {
264 /* In other states, change the priority directly */ 264 /* In other states, change the priority directly */
265 l->l_priority = prio; 265 l->l_priority = prio;
266 } 266 }
267} 267}
268 268
269void 269void
270sched_oncpu(lwp_t *l) 270sched_oncpu(lwp_t *l)
271{ 271{

cvs diff -r1.76 -r1.77 src/sys/kern/vfs_mount.c (expand / switch to unified diff)

--- src/sys/kern/vfs_mount.c 2020/04/10 22:34:36 1.76
+++ src/sys/kern/vfs_mount.c 2020/04/13 15:54:45 1.77
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: vfs_mount.c,v 1.76 2020/04/10 22:34:36 ad Exp $ */ 1/* $NetBSD: vfs_mount.c,v 1.77 2020/04/13 15:54:45 maxv Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1997-2020 The NetBSD Foundation, Inc. 4 * Copyright (c) 1997-2020 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran. 9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -57,27 +57,27 @@ @@ -57,27 +57,27 @@
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE. 64 * SUCH DAMAGE.
65 * 65 *
66 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94 66 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
67 */ 67 */
68 68
69#include <sys/cdefs.h> 69#include <sys/cdefs.h>
70__KERNEL_RCSID(0, "$NetBSD: vfs_mount.c,v 1.76 2020/04/10 22:34:36 ad Exp $"); 70__KERNEL_RCSID(0, "$NetBSD: vfs_mount.c,v 1.77 2020/04/13 15:54:45 maxv Exp $");
71 71
72#include <sys/param.h> 72#include <sys/param.h>
73#include <sys/kernel.h> 73#include <sys/kernel.h>
74 74
75#include <sys/atomic.h> 75#include <sys/atomic.h>
76#include <sys/buf.h> 76#include <sys/buf.h>
77#include <sys/conf.h> 77#include <sys/conf.h>
78#include <sys/fcntl.h> 78#include <sys/fcntl.h>
79#include <sys/filedesc.h> 79#include <sys/filedesc.h>
80#include <sys/device.h> 80#include <sys/device.h>
81#include <sys/kauth.h> 81#include <sys/kauth.h>
82#include <sys/kmem.h> 82#include <sys/kmem.h>
83#include <sys/module.h> 83#include <sys/module.h>
@@ -510,29 +510,29 @@ vfs_insmntque(vnode_t *vp, struct mount  @@ -510,29 +510,29 @@ vfs_insmntque(vnode_t *vp, struct mount
510 * If WRITECLOSE is set, only flush out regular file vnodes open for 510 * If WRITECLOSE is set, only flush out regular file vnodes open for
511 * writing. 511 * writing.
512 * 512 *
513 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 513 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
514 */ 514 */
515#ifdef DEBUG 515#ifdef DEBUG
516int busyprt = 0; /* print out busy vnodes */ 516int busyprt = 0; /* print out busy vnodes */
517struct ctldebug debug1 = { "busyprt", &busyprt }; 517struct ctldebug debug1 = { "busyprt", &busyprt };
518#endif 518#endif
519 519
520static vnode_t * 520static vnode_t *
521vflushnext(struct vnode_iterator *marker, int *when) 521vflushnext(struct vnode_iterator *marker, int *when)
522{ 522{
523 if (hardclock_ticks > *when) { 523 if (getticks() > *when) {
524 yield(); 524 yield();
525 *when = hardclock_ticks + hz / 10; 525 *when = getticks() + hz / 10;
526 } 526 }
527 return vfs_vnode_iterator_next1(marker, NULL, NULL, true); 527 return vfs_vnode_iterator_next1(marker, NULL, NULL, true);
528} 528}
529 529
530/* 530/*
531 * Flush one vnode. Referenced on entry, unreferenced on return. 531 * Flush one vnode. Referenced on entry, unreferenced on return.
532 */ 532 */
533static int 533static int
534vflush_one(vnode_t *vp, vnode_t *skipvp, int flags) 534vflush_one(vnode_t *vp, vnode_t *skipvp, int flags)
535{ 535{
536 int error; 536 int error;
537 struct vattr vattr; 537 struct vattr vattr;
538 538

cvs diff -r1.118 -r1.119 src/sys/kern/vfs_vnode.c (expand / switch to unified diff)

--- src/sys/kern/vfs_vnode.c 2020/04/04 20:54:42 1.118
+++ src/sys/kern/vfs_vnode.c 2020/04/13 15:54:45 1.119
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: vfs_vnode.c,v 1.118 2020/04/04 20:54:42 ad Exp $ */ 1/* $NetBSD: vfs_vnode.c,v 1.119 2020/04/13 15:54:45 maxv Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1997-2011, 2019, 2020 The NetBSD Foundation, Inc. 4 * Copyright (c) 1997-2011, 2019, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran. 9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -145,27 +145,27 @@ @@ -145,27 +145,27 @@
145 * 145 *
146 * Note on v_usecount and its locking 146 * Note on v_usecount and its locking
147 * 147 *
148 * At nearly all points it is known that v_usecount could be zero, 148 * At nearly all points it is known that v_usecount could be zero,
149 * the vnode_t::v_interlock will be held. To change the count away 149 * the vnode_t::v_interlock will be held. To change the count away
150 * from zero, the interlock must be held. To change from a non-zero 150 * from zero, the interlock must be held. To change from a non-zero
151 * value to zero, again the interlock must be held. 151 * value to zero, again the interlock must be held.
152 * 152 *
153 * Changing the usecount from a non-zero value to a non-zero value can 153 * Changing the usecount from a non-zero value to a non-zero value can
154 * safely be done using atomic operations, without the interlock held. 154 * safely be done using atomic operations, without the interlock held.
155 */ 155 */
156 156
157#include <sys/cdefs.h> 157#include <sys/cdefs.h>
158__KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.118 2020/04/04 20:54:42 ad Exp $"); 158__KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.119 2020/04/13 15:54:45 maxv Exp $");
159 159
160#ifdef _KERNEL_OPT 160#ifdef _KERNEL_OPT
161#include "opt_pax.h" 161#include "opt_pax.h"
162#endif 162#endif
163 163
164#include <sys/param.h> 164#include <sys/param.h>
165#include <sys/kernel.h> 165#include <sys/kernel.h>
166 166
167#include <sys/atomic.h> 167#include <sys/atomic.h>
168#include <sys/buf.h> 168#include <sys/buf.h>
169#include <sys/conf.h> 169#include <sys/conf.h>
170#include <sys/device.h> 170#include <sys/device.h>
171#include <sys/hash.h> 171#include <sys/hash.h>
@@ -473,38 +473,38 @@ lru_which(vnode_t *vp) @@ -473,38 +473,38 @@ lru_which(vnode_t *vp)
473 */ 473 */
474static void 474static void
475lru_requeue(vnode_t *vp, vnodelst_t *listhd) 475lru_requeue(vnode_t *vp, vnodelst_t *listhd)
476{ 476{
477 vnode_impl_t *vip; 477 vnode_impl_t *vip;
478 int d; 478 int d;
479 479
480 /* 480 /*
481 * If the vnode is on the correct list, and was put there recently, 481 * If the vnode is on the correct list, and was put there recently,
482 * then leave it be, thus avoiding huge cache and lock contention. 482 * then leave it be, thus avoiding huge cache and lock contention.
483 */ 483 */
484 vip = VNODE_TO_VIMPL(vp); 484 vip = VNODE_TO_VIMPL(vp);
485 if (listhd == vip->vi_lrulisthd && 485 if (listhd == vip->vi_lrulisthd &&
486 (hardclock_ticks - vip->vi_lrulisttm) < hz) { 486 (getticks() - vip->vi_lrulisttm) < hz) {
487 return; 487 return;
488 } 488 }
489 489
490 mutex_enter(&vdrain_lock); 490 mutex_enter(&vdrain_lock);
491 d = 0; 491 d = 0;
492 if (vip->vi_lrulisthd != NULL) 492 if (vip->vi_lrulisthd != NULL)
493 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist); 493 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
494 else 494 else
495 d++; 495 d++;
496 vip->vi_lrulisthd = listhd; 496 vip->vi_lrulisthd = listhd;
497 vip->vi_lrulisttm = hardclock_ticks; 497 vip->vi_lrulisttm = getticks();
498 if (vip->vi_lrulisthd != NULL) 498 if (vip->vi_lrulisthd != NULL)
499 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist); 499 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
500 else 500 else
501 d--; 501 d--;
502 if (d != 0) { 502 if (d != 0) {
503 /* 503 /*
504 * Looks strange? This is not a bug. Don't store 504 * Looks strange? This is not a bug. Don't store
505 * numvnodes unless there is a change - avoid false 505 * numvnodes unless there is a change - avoid false
506 * sharing on MP. 506 * sharing on MP.
507 */ 507 */
508 numvnodes += d; 508 numvnodes += d;
509 } 509 }
510 if (numvnodes > desiredvnodes || listhd == &lru_list[LRU_VRELE]) 510 if (numvnodes > desiredvnodes || listhd == &lru_list[LRU_VRELE])
@@ -530,27 +530,27 @@ vrele_flush(struct mount *mp) @@ -530,27 +530,27 @@ vrele_flush(struct mount *mp)
530 TAILQ_INSERT_HEAD(&lru_list[LRU_VRELE], marker, vi_lrulist); 530 TAILQ_INSERT_HEAD(&lru_list[LRU_VRELE], marker, vi_lrulist);
531 531
532 while ((vip = TAILQ_NEXT(marker, vi_lrulist))) { 532 while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
533 TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist); 533 TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
534 TAILQ_INSERT_AFTER(&lru_list[LRU_VRELE], vip, marker, 534 TAILQ_INSERT_AFTER(&lru_list[LRU_VRELE], vip, marker,
535 vi_lrulist); 535 vi_lrulist);
536 vp = VIMPL_TO_VNODE(vip); 536 vp = VIMPL_TO_VNODE(vip);
537 if (vnis_marker(vp)) 537 if (vnis_marker(vp))
538 continue; 538 continue;
539 539
540 KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]); 540 KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
541 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist); 541 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
542 vip->vi_lrulisthd = &lru_list[LRU_HOLD]; 542 vip->vi_lrulisthd = &lru_list[LRU_HOLD];
543 vip->vi_lrulisttm = hardclock_ticks; 543 vip->vi_lrulisttm = getticks();
544 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist); 544 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
545 mutex_exit(&vdrain_lock); 545 mutex_exit(&vdrain_lock);
546 546
547 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 547 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
548 mutex_enter(vp->v_interlock); 548 mutex_enter(vp->v_interlock);
549 vrelel(vp, 0, LK_EXCLUSIVE); 549 vrelel(vp, 0, LK_EXCLUSIVE);
550 550
551 mutex_enter(&vdrain_lock); 551 mutex_enter(&vdrain_lock);
552 } 552 }
553 553
554 TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist); 554 TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
555 mutex_exit(&vdrain_lock); 555 mutex_exit(&vdrain_lock);
556 556
@@ -612,27 +612,27 @@ vdrain_vrele(vnode_t *vp) @@ -612,27 +612,27 @@ vdrain_vrele(vnode_t *vp)
612 mp = vp->v_mount; 612 mp = vp->v_mount;
613 if (fstrans_start_nowait(mp) != 0) 613 if (fstrans_start_nowait(mp) != 0)
614 return; 614 return;
615 615
616 /* 616 /*
617 * First remove the vnode from the vrele list. 617 * First remove the vnode from the vrele list.
618 * Put it on the last lru list, the last vrele() 618 * Put it on the last lru list, the last vrele()
619 * will put it back onto the right list before 619 * will put it back onto the right list before
620 * its v_usecount reaches zero. 620 * its v_usecount reaches zero.
621 */ 621 */
622 KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]); 622 KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
623 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist); 623 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
624 vip->vi_lrulisthd = &lru_list[LRU_HOLD]; 624 vip->vi_lrulisthd = &lru_list[LRU_HOLD];
625 vip->vi_lrulisttm = hardclock_ticks; 625 vip->vi_lrulisttm = getticks();
626 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist); 626 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
627 627
628 vdrain_retry = true; 628 vdrain_retry = true;
629 mutex_exit(&vdrain_lock); 629 mutex_exit(&vdrain_lock);
630 630
631 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 631 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
632 mutex_enter(vp->v_interlock); 632 mutex_enter(vp->v_interlock);
633 vrelel(vp, 0, LK_EXCLUSIVE); 633 vrelel(vp, 0, LK_EXCLUSIVE);
634 fstrans_done(mp); 634 fstrans_done(mp);
635 635
636 mutex_enter(&vdrain_lock); 636 mutex_enter(&vdrain_lock);
637} 637}
638 638

cvs diff -r1.225 -r1.226 src/sys/netinet/tcp_usrreq.c (expand / switch to unified diff)

--- src/sys/netinet/tcp_usrreq.c 2019/08/06 15:48:18 1.225
+++ src/sys/netinet/tcp_usrreq.c 2020/04/13 15:54:45 1.226
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: tcp_usrreq.c,v 1.225 2019/08/06 15:48:18 riastradh Exp $ */ 1/* $NetBSD: tcp_usrreq.c,v 1.226 2020/04/13 15:54:45 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. 4 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -89,27 +89,27 @@ @@ -89,27 +89,27 @@
89 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 89 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
90 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 90 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
91 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 91 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
92 * SUCH DAMAGE. 92 * SUCH DAMAGE.
93 * 93 *
94 * @(#)tcp_usrreq.c 8.5 (Berkeley) 6/21/95 94 * @(#)tcp_usrreq.c 8.5 (Berkeley) 6/21/95
95 */ 95 */
96 96
97/* 97/*
98 * TCP protocol interface to socket abstraction. 98 * TCP protocol interface to socket abstraction.
99 */ 99 */
100 100
101#include <sys/cdefs.h> 101#include <sys/cdefs.h>
102__KERNEL_RCSID(0, "$NetBSD: tcp_usrreq.c,v 1.225 2019/08/06 15:48:18 riastradh Exp $"); 102__KERNEL_RCSID(0, "$NetBSD: tcp_usrreq.c,v 1.226 2020/04/13 15:54:45 maxv Exp $");
103 103
104#ifdef _KERNEL_OPT 104#ifdef _KERNEL_OPT
105#include "opt_inet.h" 105#include "opt_inet.h"
106#include "opt_tcp_debug.h" 106#include "opt_tcp_debug.h"
107#include "opt_mbuftrace.h" 107#include "opt_mbuftrace.h"
108#include "opt_tcp_space.h" 108#include "opt_tcp_space.h"
109#include "opt_net_mpsafe.h" 109#include "opt_net_mpsafe.h"
110#endif 110#endif
111 111
112#include <sys/param.h> 112#include <sys/param.h>
113#include <sys/systm.h> 113#include <sys/systm.h>
114#include <sys/kernel.h> 114#include <sys/kernel.h>
115#include <sys/mbuf.h> 115#include <sys/mbuf.h>
@@ -246,27 +246,27 @@ tcp_fill_info(struct tcpcb *tp, struct t @@ -246,27 +246,27 @@ tcp_fill_info(struct tcpcb *tp, struct t
246 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS; 246 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS;
247 if (tp->t_flags & TF_SACK_PERMIT) 247 if (tp->t_flags & TF_SACK_PERMIT)
248 ti->tcpi_options |= TCPI_OPT_SACK; 248 ti->tcpi_options |= TCPI_OPT_SACK;
249 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) { 249 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) {
250 ti->tcpi_options |= TCPI_OPT_WSCALE; 250 ti->tcpi_options |= TCPI_OPT_WSCALE;
251 ti->tcpi_snd_wscale = tp->snd_scale; 251 ti->tcpi_snd_wscale = tp->snd_scale;
252 ti->tcpi_rcv_wscale = tp->rcv_scale; 252 ti->tcpi_rcv_wscale = tp->rcv_scale;
253 } 253 }
254 if (tp->t_flags & TF_ECN_PERMIT) { 254 if (tp->t_flags & TF_ECN_PERMIT) {
255 ti->tcpi_options |= TCPI_OPT_ECN; 255 ti->tcpi_options |= TCPI_OPT_ECN;
256 } 256 }
257 257
258 ti->tcpi_rto = tp->t_rxtcur * tick; 258 ti->tcpi_rto = tp->t_rxtcur * tick;
259 ti->tcpi_last_data_recv = (long)(hardclock_ticks - 259 ti->tcpi_last_data_recv = (long)(getticks() -
260 (int)tp->t_rcvtime) * tick; 260 (int)tp->t_rcvtime) * tick;
261 ti->tcpi_rtt = ((u_int64_t)tp->t_srtt * tick) >> TCP_RTT_SHIFT; 261 ti->tcpi_rtt = ((u_int64_t)tp->t_srtt * tick) >> TCP_RTT_SHIFT;
262 ti->tcpi_rttvar = ((u_int64_t)tp->t_rttvar * tick) >> TCP_RTTVAR_SHIFT; 262 ti->tcpi_rttvar = ((u_int64_t)tp->t_rttvar * tick) >> TCP_RTTVAR_SHIFT;
263 263
264 ti->tcpi_snd_ssthresh = tp->snd_ssthresh; 264 ti->tcpi_snd_ssthresh = tp->snd_ssthresh;
265 /* Linux API wants these in # of segments, apparently */ 265 /* Linux API wants these in # of segments, apparently */
266 ti->tcpi_snd_cwnd = tp->snd_cwnd / tp->t_segsz; 266 ti->tcpi_snd_cwnd = tp->snd_cwnd / tp->t_segsz;
267 ti->tcpi_snd_wnd = tp->snd_wnd / tp->t_segsz; 267 ti->tcpi_snd_wnd = tp->snd_wnd / tp->t_segsz;
268 268
269 /* 269 /*
270 * FreeBSD-specific extension fields for tcp_info. 270 * FreeBSD-specific extension fields for tcp_info.
271 */ 271 */
272 ti->tcpi_rcv_space = tp->rcv_wnd; 272 ti->tcpi_rcv_space = tp->rcv_wnd;

cvs diff -r1.125 -r1.126 src/sys/uvm/uvm_pdaemon.c (expand / switch to unified diff)

--- src/sys/uvm/uvm_pdaemon.c 2020/02/23 15:46:43 1.125
+++ src/sys/uvm/uvm_pdaemon.c 2020/04/13 15:54:45 1.126
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: uvm_pdaemon.c,v 1.125 2020/02/23 15:46:43 ad Exp $ */ 1/* $NetBSD: uvm_pdaemon.c,v 1.126 2020/04/13 15:54:45 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California. 5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 * 6 *
7 * All rights reserved. 7 * All rights reserved.
8 * 8 *
9 * This code is derived from software contributed to Berkeley by 9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University. 10 * The Mach Operating System project at Carnegie-Mellon University.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
@@ -56,27 +56,27 @@ @@ -56,27 +56,27 @@
56 * School of Computer Science 56 * School of Computer Science
57 * Carnegie Mellon University 57 * Carnegie Mellon University
58 * Pittsburgh PA 15213-3890 58 * Pittsburgh PA 15213-3890
59 * 59 *
60 * any improvements or extensions that they make and grant Carnegie the 60 * any improvements or extensions that they make and grant Carnegie the
61 * rights to redistribute these changes. 61 * rights to redistribute these changes.
62 */ 62 */
63 63
64/* 64/*
65 * uvm_pdaemon.c: the page daemon 65 * uvm_pdaemon.c: the page daemon
66 */ 66 */
67 67
68#include <sys/cdefs.h> 68#include <sys/cdefs.h>
69__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.125 2020/02/23 15:46:43 ad Exp $"); 69__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.126 2020/04/13 15:54:45 maxv Exp $");
70 70
71#include "opt_uvmhist.h" 71#include "opt_uvmhist.h"
72#include "opt_readahead.h" 72#include "opt_readahead.h"
73 73
74#define __RWLOCK_PRIVATE 74#define __RWLOCK_PRIVATE
75 75
76#include <sys/param.h> 76#include <sys/param.h>
77#include <sys/proc.h> 77#include <sys/proc.h>
78#include <sys/systm.h> 78#include <sys/systm.h>
79#include <sys/kernel.h> 79#include <sys/kernel.h>
80#include <sys/pool.h> 80#include <sys/pool.h>
81#include <sys/buf.h> 81#include <sys/buf.h>
82#include <sys/module.h> 82#include <sys/module.h>
@@ -988,29 +988,29 @@ uvmpd_pool_drain_thread(void *arg) @@ -988,29 +988,29 @@ uvmpd_pool_drain_thread(void *arg)
988{ 988{
989 struct pool *firstpool, *curpool; 989 struct pool *firstpool, *curpool;
990 int bufcnt, lastslept; 990 int bufcnt, lastslept;
991 bool cycled; 991 bool cycled;
992 992
993 firstpool = NULL; 993 firstpool = NULL;
994 cycled = true; 994 cycled = true;
995 for (;;) { 995 for (;;) {
996 /* 996 /*
997 * sleep until awoken by the pagedaemon. 997 * sleep until awoken by the pagedaemon.
998 */ 998 */
999 mutex_enter(&uvmpd_lock); 999 mutex_enter(&uvmpd_lock);
1000 if (!uvmpd_pool_drain_run) { 1000 if (!uvmpd_pool_drain_run) {
1001 lastslept = hardclock_ticks; 1001 lastslept = getticks();
1002 cv_wait(&uvmpd_pool_drain_cv, &uvmpd_lock); 1002 cv_wait(&uvmpd_pool_drain_cv, &uvmpd_lock);
1003 if (hardclock_ticks != lastslept) { 1003 if (getticks() != lastslept) {
1004 cycled = false; 1004 cycled = false;
1005 firstpool = NULL; 1005 firstpool = NULL;
1006 } 1006 }
1007 } 1007 }
1008 uvmpd_pool_drain_run = false; 1008 uvmpd_pool_drain_run = false;
1009 mutex_exit(&uvmpd_lock); 1009 mutex_exit(&uvmpd_lock);
1010 1010
1011 /* 1011 /*
1012 * rate limit draining, otherwise in desperate circumstances 1012 * rate limit draining, otherwise in desperate circumstances
1013 * this can totally saturate the system with xcall activity. 1013 * this can totally saturate the system with xcall activity.
1014 */ 1014 */
1015 if (cycled) { 1015 if (cycled) {
1016 kpause("uvmpdlmt", false, 1, NULL); 1016 kpause("uvmpdlmt", false, 1, NULL);