Thu Mar 26 19:42:39 2020 UTC ()
Leave the idle LWPs in state LSIDL even when running, so they don't mess up
output from ps/top/etc.  Correctness isn't at stake, LWPs in other states
are temporarily on the CPU at times too (e.g.  LSZOMB, LSSLEEP).


(ad)
diff -r1.32 -r1.33 src/sys/kern/kern_idle.c
diff -r1.344 -r1.345 src/sys/kern/kern_synch.c

cvs diff -r1.32 -r1.33 src/sys/kern/kern_idle.c (expand / switch to unified diff)

--- src/sys/kern/kern_idle.c 2020/02/15 18:12:15 1.32
+++ src/sys/kern/kern_idle.c 2020/03/26 19:42:39 1.33
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_idle.c,v 1.32 2020/02/15 18:12:15 ad Exp $ */ 1/* $NetBSD: kern_idle.c,v 1.33 2020/03/26 19:42:39 ad Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c)2002, 2006, 2007 YAMAMOTO Takashi, 4 * Copyright (c)2002, 2006, 2007 YAMAMOTO Takashi,
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -18,27 +18,27 @@ @@ -18,27 +18,27 @@
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE. 26 * SUCH DAMAGE.
27 */ 27 */
28 28
29#include <sys/cdefs.h> 29#include <sys/cdefs.h>
30 30
31__KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.32 2020/02/15 18:12:15 ad Exp $"); 31__KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.33 2020/03/26 19:42:39 ad Exp $");
32 32
33#include <sys/param.h> 33#include <sys/param.h>
34#include <sys/cpu.h> 34#include <sys/cpu.h>
35#include <sys/idle.h> 35#include <sys/idle.h>
36#include <sys/kthread.h> 36#include <sys/kthread.h>
37#include <sys/lockdebug.h> 37#include <sys/lockdebug.h>
38#include <sys/kmem.h> 38#include <sys/kmem.h>
39#include <sys/proc.h> 39#include <sys/proc.h>
40#include <sys/atomic.h> 40#include <sys/atomic.h>
41 41
42#include <uvm/uvm.h> /* uvm_idle */ 42#include <uvm/uvm.h> /* uvm_idle */
43#include <uvm/uvm_extern.h> 43#include <uvm/uvm_extern.h>
44 44
@@ -46,28 +46,28 @@ void @@ -46,28 +46,28 @@ void
46idle_loop(void *dummy) 46idle_loop(void *dummy)
47{ 47{
48 struct cpu_info *ci = curcpu(); 48 struct cpu_info *ci = curcpu();
49 struct schedstate_percpu *spc; 49 struct schedstate_percpu *spc;
50 struct lwp *l = curlwp; 50 struct lwp *l = curlwp;
51 51
52 lwp_lock(l); 52 lwp_lock(l);
53 spc = &ci->ci_schedstate; 53 spc = &ci->ci_schedstate;
54 KASSERT(lwp_locked(l, spc->spc_lwplock)); 54 KASSERT(lwp_locked(l, spc->spc_lwplock));
55 kcpuset_atomic_set(kcpuset_running, cpu_index(ci)); 55 kcpuset_atomic_set(kcpuset_running, cpu_index(ci));
56 /* Update start time for this thread. */ 56 /* Update start time for this thread. */
57 binuptime(&l->l_stime); 57 binuptime(&l->l_stime);
58 spc->spc_flags |= SPCF_RUNNING; 58 spc->spc_flags |= SPCF_RUNNING;
59 KASSERT(l->l_stat == LSONPROC); 
60 KASSERT((l->l_pflag & LP_RUNNING) != 0); 59 KASSERT((l->l_pflag & LP_RUNNING) != 0);
 60 l->l_stat = LSIDL;
61 lwp_unlock(l); 61 lwp_unlock(l);
62 62
63 /* 63 /*
64 * Use spl0() here to ensure that we have the correct interrupt 64 * Use spl0() here to ensure that we have the correct interrupt
65 * priority. This may be the first thread running on the CPU, 65 * priority. This may be the first thread running on the CPU,
66 * in which case we took an odd route to get here. 66 * in which case we took an odd route to get here.
67 */ 67 */
68 spl0(); 68 spl0();
69 KERNEL_UNLOCK_ALL(l, NULL); 69 KERNEL_UNLOCK_ALL(l, NULL);
70 70
71 for (;;) { 71 for (;;) {
72 LOCKDEBUG_BARRIER(NULL, 0); 72 LOCKDEBUG_BARRIER(NULL, 0);
73 KASSERT((l->l_flag & LW_IDLE) != 0); 73 KASSERT((l->l_flag & LW_IDLE) != 0);
@@ -81,50 +81,49 @@ idle_loop(void *dummy) @@ -81,50 +81,49 @@ idle_loop(void *dummy)
81 if ((spc->spc_flags & SPCF_OFFLINE) == 0) { 81 if ((spc->spc_flags & SPCF_OFFLINE) == 0) {
82 uvm_idle(); 82 uvm_idle();
83 } 83 }
84 if (!sched_curcpu_runnable_p()) { 84 if (!sched_curcpu_runnable_p()) {
85 cpu_idle(); 85 cpu_idle();
86 if (!sched_curcpu_runnable_p() && 86 if (!sched_curcpu_runnable_p() &&
87 !ci->ci_want_resched) { 87 !ci->ci_want_resched) {
88 continue; 88 continue;
89 } 89 }
90 } 90 }
91 } 91 }
92 KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_lwplock); 92 KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_lwplock);
93 lwp_lock(l); 93 lwp_lock(l);
94 l->l_stat = LSIDL; 
95 spc_lock(l->l_cpu); 94 spc_lock(l->l_cpu);
96 mi_switch(l); 95 mi_switch(l);
97 KASSERT(curlwp == l); 96 KASSERT(curlwp == l);
98 KASSERT(l->l_stat == LSONPROC); 97 KASSERT(l->l_stat == LSIDL);
99 } 98 }
100} 99}
101 100
102int 101int
103create_idle_lwp(struct cpu_info *ci) 102create_idle_lwp(struct cpu_info *ci)
104{ 103{
105 lwp_t *l; 104 lwp_t *l;
106 int error; 105 int error;
107 106
108 KASSERT(ci->ci_data.cpu_idlelwp == NULL); 107 KASSERT(ci->ci_data.cpu_idlelwp == NULL);
109 error = kthread_create(PRI_IDLE, KTHREAD_MPSAFE | KTHREAD_IDLE, 108 error = kthread_create(PRI_IDLE, KTHREAD_MPSAFE | KTHREAD_IDLE,
110 ci, idle_loop, NULL, &l, "idle/%u", ci->ci_index); 109 ci, idle_loop, NULL, &l, "idle/%u", ci->ci_index);
111 if (error != 0) 110 if (error != 0)
112 panic("create_idle_lwp: error %d", error); 111 panic("create_idle_lwp: error %d", error);
113 lwp_lock(l); 112 lwp_lock(l);
114 l->l_flag |= LW_IDLE; 113 l->l_flag |= LW_IDLE;
115 if (ci != lwp0.l_cpu) { 114 if (ci != lwp0.l_cpu) {
116 /* 115 /*
117 * For secondary CPUs, the idle LWP is the first to run, and 116 * For secondary CPUs, the idle LWP is the first to run, and
118 * it's directly entered from MD code without a trip through 117 * it's directly entered from MD code without a trip through
119 * mi_switch(). Make the picture look good in case the CPU 118 * mi_switch(). Make the picture look good in case the CPU
120 * takes an interrupt before it calls idle_loop(). 119 * takes an interrupt before it calls idle_loop().
121 */ 120 */
122 l->l_stat = LSONPROC; 121 l->l_stat = LSIDL;
123 l->l_pflag |= LP_RUNNING; 122 l->l_pflag |= LP_RUNNING;
124 ci->ci_onproc = l; 123 ci->ci_onproc = l;
125 } 124 }
126 lwp_unlock(l); 125 lwp_unlock(l);
127 ci->ci_data.cpu_idlelwp = l; 126 ci->ci_data.cpu_idlelwp = l;
128 127
129 return error; 128 return error;
130} 129}

cvs diff -r1.344 -r1.345 src/sys/kern/kern_synch.c (expand / switch to unified diff)

--- src/sys/kern/kern_synch.c 2020/03/14 20:23:51 1.344
+++ src/sys/kern/kern_synch.c 2020/03/26 19:42:39 1.345
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_synch.c,v 1.344 2020/03/14 20:23:51 ad Exp $ */ 1/* $NetBSD: kern_synch.c,v 1.345 2020/03/26 19:42:39 ad Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009, 2019, 2020 4 * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009, 2019, 2020
5 * The NetBSD Foundation, Inc. 5 * The NetBSD Foundation, Inc.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to The NetBSD Foundation 8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and 10 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
11 * Daniel Sieger. 11 * Daniel Sieger.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions 14 * modification, are permitted provided that the following conditions
@@ -59,27 +59,27 @@ @@ -59,27 +59,27 @@
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE. 66 * SUCH DAMAGE.
67 * 67 *
68 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 68 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
69 */ 69 */
70 70
71#include <sys/cdefs.h> 71#include <sys/cdefs.h>
72__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.344 2020/03/14 20:23:51 ad Exp $"); 72__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.345 2020/03/26 19:42:39 ad Exp $");
73 73
74#include "opt_kstack.h" 74#include "opt_kstack.h"
75#include "opt_dtrace.h" 75#include "opt_dtrace.h"
76 76
77#define __MUTEX_PRIVATE 77#define __MUTEX_PRIVATE
78 78
79#include <sys/param.h> 79#include <sys/param.h>
80#include <sys/systm.h> 80#include <sys/systm.h>
81#include <sys/proc.h> 81#include <sys/proc.h>
82#include <sys/kernel.h> 82#include <sys/kernel.h>
83#include <sys/cpu.h> 83#include <sys/cpu.h>
84#include <sys/pserialize.h> 84#include <sys/pserialize.h>
85#include <sys/resourcevar.h> 85#include <sys/resourcevar.h>
@@ -377,31 +377,27 @@ kpreempt(uintptr_t where) @@ -377,31 +377,27 @@ kpreempt(uintptr_t where)
377 l = curlwp; 377 l = curlwp;
378 failed = 0; 378 failed = 0;
379 while ((dop = l->l_dopreempt) != 0) { 379 while ((dop = l->l_dopreempt) != 0) {
380 if (l->l_stat != LSONPROC) { 380 if (l->l_stat != LSONPROC) {
381 /* 381 /*
382 * About to block (or die), let it happen. 382 * About to block (or die), let it happen.
383 * Doesn't really count as "preemption has 383 * Doesn't really count as "preemption has
384 * been blocked", since we're going to 384 * been blocked", since we're going to
385 * context switch. 385 * context switch.
386 */ 386 */
387 atomic_swap_uint(&l->l_dopreempt, 0); 387 atomic_swap_uint(&l->l_dopreempt, 0);
388 return true; 388 return true;
389 } 389 }
390 if (__predict_false((l->l_flag & LW_IDLE) != 0)) { 390 KASSERT((l->l_flag & LW_IDLE) == 0);
391 /* Can't preempt idle loop, don't count as failure. */ 
392 atomic_swap_uint(&l->l_dopreempt, 0); 
393 return true; 
394 } 
395 if (__predict_false(l->l_nopreempt != 0)) { 391 if (__predict_false(l->l_nopreempt != 0)) {
396 /* LWP holds preemption disabled, explicitly. */ 392 /* LWP holds preemption disabled, explicitly. */
397 if ((dop & DOPREEMPT_COUNTED) == 0) { 393 if ((dop & DOPREEMPT_COUNTED) == 0) {
398 kpreempt_ev_crit.ev_count++; 394 kpreempt_ev_crit.ev_count++;
399 } 395 }
400 failed = (uintptr_t)&kpreempt_is_disabled; 396 failed = (uintptr_t)&kpreempt_is_disabled;
401 break; 397 break;
402 } 398 }
403 if (__predict_false((l->l_pflag & LP_INTR) != 0)) { 399 if (__predict_false((l->l_pflag & LP_INTR) != 0)) {
404 /* Can't preempt soft interrupts yet. */ 400 /* Can't preempt soft interrupts yet. */
405 atomic_swap_uint(&l->l_dopreempt, 0); 401 atomic_swap_uint(&l->l_dopreempt, 0);
406 failed = (uintptr_t)&is_softint_lwp; 402 failed = (uintptr_t)&is_softint_lwp;
407 break; 403 break;
@@ -537,32 +533,30 @@ nextlwp(struct cpu_info *ci, struct sche @@ -537,32 +533,30 @@ nextlwp(struct cpu_info *ci, struct sche
537 */ 533 */
538 newl = sched_nextlwp(); 534 newl = sched_nextlwp();
539 if (newl != NULL) { 535 if (newl != NULL) {
540 sched_dequeue(newl); 536 sched_dequeue(newl);
541 KASSERT(lwp_locked(newl, spc->spc_mutex)); 537 KASSERT(lwp_locked(newl, spc->spc_mutex));
542 KASSERT(newl->l_cpu == ci); 538 KASSERT(newl->l_cpu == ci);
543 newl->l_stat = LSONPROC; 539 newl->l_stat = LSONPROC;
544 newl->l_pflag |= LP_RUNNING; 540 newl->l_pflag |= LP_RUNNING;
545 spc->spc_curpriority = lwp_eprio(newl); 541 spc->spc_curpriority = lwp_eprio(newl);
546 spc->spc_flags &= ~(SPCF_SWITCHCLEAR | SPCF_IDLE); 542 spc->spc_flags &= ~(SPCF_SWITCHCLEAR | SPCF_IDLE);
547 lwp_setlock(newl, spc->spc_lwplock); 543 lwp_setlock(newl, spc->spc_lwplock);
548 } else { 544 } else {
549 /* 545 /*
550 * Updates to newl here are unlocked, but newl is the idle 546 * The idle LWP does not get set to LSONPROC, because
551 * LWP and thus sheltered from outside interference, so no 547 * otherwise it screws up the output from top(1) etc.
552 * harm is going to come of it. 
553 */ 548 */
554 newl = ci->ci_data.cpu_idlelwp; 549 newl = ci->ci_data.cpu_idlelwp;
555 newl->l_stat = LSONPROC; 
556 newl->l_pflag |= LP_RUNNING; 550 newl->l_pflag |= LP_RUNNING;
557 spc->spc_curpriority = PRI_IDLE; 551 spc->spc_curpriority = PRI_IDLE;
558 spc->spc_flags = (spc->spc_flags & ~SPCF_SWITCHCLEAR) | 552 spc->spc_flags = (spc->spc_flags & ~SPCF_SWITCHCLEAR) |
559 SPCF_IDLE; 553 SPCF_IDLE;
560 } 554 }
561 555
562 /* 556 /*
563 * Only clear want_resched if there are no pending (slow) software 557 * Only clear want_resched if there are no pending (slow) software
564 * interrupts. We can do this without an atomic, because no new 558 * interrupts. We can do this without an atomic, because no new
565 * LWPs can appear in the queue due to our hold on spc_mutex, and 559 * LWPs can appear in the queue due to our hold on spc_mutex, and
566 * the update to ci_want_resched will become globally visible before 560 * the update to ci_want_resched will become globally visible before
567 * the release of spc_mutex becomes globally visible. 561 * the release of spc_mutex becomes globally visible.
568 */ 562 */
@@ -830,27 +824,27 @@ mi_switch(lwp_t *l) @@ -830,27 +824,27 @@ mi_switch(lwp_t *l)
830 */ 824 */
831 KASSERT(l->l_cpu == ci); 825 KASSERT(l->l_cpu == ci);
832 KASSERT(ci->ci_mtx_count == -1); 826 KASSERT(ci->ci_mtx_count == -1);
833 ci->ci_mtx_count = 0; 827 ci->ci_mtx_count = 0;
834 splx(oldspl); 828 splx(oldspl);
835 } else { 829 } else {
836 /* Nothing to do - just unlock and return. */ 830 /* Nothing to do - just unlock and return. */
837 mutex_spin_exit(spc->spc_mutex); 831 mutex_spin_exit(spc->spc_mutex);
838 l->l_pflag &= ~LP_PREEMPTING; 832 l->l_pflag &= ~LP_PREEMPTING;
839 lwp_unlock(l); 833 lwp_unlock(l);
840 } 834 }
841 835
842 KASSERT(l == curlwp); 836 KASSERT(l == curlwp);
843 KASSERT(l->l_stat == LSONPROC); 837 KASSERT(l->l_stat == LSONPROC || (l->l_flag & LW_IDLE) != 0);
844 838
845 SYSCALL_TIME_WAKEUP(l); 839 SYSCALL_TIME_WAKEUP(l);
846 LOCKDEBUG_BARRIER(NULL, 1); 840 LOCKDEBUG_BARRIER(NULL, 1);
847} 841}
848 842
849/* 843/*
850 * setrunnable: change LWP state to be runnable, placing it on the run queue. 844 * setrunnable: change LWP state to be runnable, placing it on the run queue.
851 * 845 *
852 * Call with the process and LWP locked. Will return with the LWP unlocked. 846 * Call with the process and LWP locked. Will return with the LWP unlocked.
853 */ 847 */
854void 848void
855setrunnable(struct lwp *l) 849setrunnable(struct lwp *l)
856{ 850{