Fri Dec 6 21:36:11 2019 UTC ()
Make it possible to call mi_switch() and immediately switch to another CPU.
This seems to take about 3us on my Intel system.  Two changes required:

- Have the caller to mi_switch() be responsible for calling spc_lock().
- Avoid using l->l_cpu in mi_switch().

While here:

- Add a couple of calls to membar_enter()
- Have the idle LWP set itself to LSIDL, to match softint_thread().
- Remove unused return value from mi_switch().


(ad)
diff -r1.484 -r1.485 src/sys/kern/kern_exec.c
diff -r1.277 -r1.278 src/sys/kern/kern_exit.c
diff -r1.27 -r1.28 src/sys/kern/kern_idle.c
diff -r1.216 -r1.217 src/sys/kern/kern_lwp.c
diff -r1.380 -r1.381 src/sys/kern/kern_sig.c
diff -r1.53 -r1.54 src/sys/kern/kern_sleepq.c
diff -r1.54 -r1.55 src/sys/kern/kern_softint.c
diff -r1.328 -r1.329 src/sys/kern/kern_synch.c
diff -r1.79 -r1.80 src/sys/sys/sched.h

cvs diff -r1.484 -r1.485 src/sys/kern/kern_exec.c (expand / switch to unified diff)

--- src/sys/kern/kern_exec.c 2019/11/23 19:42:52 1.484
+++ src/sys/kern/kern_exec.c 2019/12/06 21:36:10 1.485
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_exec.c,v 1.484 2019/11/23 19:42:52 ad Exp $ */ 1/* $NetBSD: kern_exec.c,v 1.485 2019/12/06 21:36:10 ad Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2008, 2019 The NetBSD Foundation, Inc. 4 * Copyright (c) 2008, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -52,27 +52,27 @@ @@ -52,27 +52,27 @@
52 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 52 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
53 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 53 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
54 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 54 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
55 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 55 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
57 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 57 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
58 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 58 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
59 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 59 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
60 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 60 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
61 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 61 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 */ 62 */
63 63
64#include <sys/cdefs.h> 64#include <sys/cdefs.h>
65__KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.484 2019/11/23 19:42:52 ad Exp $"); 65__KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.485 2019/12/06 21:36:10 ad Exp $");
66 66
67#include "opt_exec.h" 67#include "opt_exec.h"
68#include "opt_execfmt.h" 68#include "opt_execfmt.h"
69#include "opt_ktrace.h" 69#include "opt_ktrace.h"
70#include "opt_modular.h" 70#include "opt_modular.h"
71#include "opt_syscall_debug.h" 71#include "opt_syscall_debug.h"
72#include "veriexec.h" 72#include "veriexec.h"
73#include "opt_pax.h" 73#include "opt_pax.h"
74 74
75#include <sys/param.h> 75#include <sys/param.h>
76#include <sys/systm.h> 76#include <sys/systm.h>
77#include <sys/filedesc.h> 77#include <sys/filedesc.h>
78#include <sys/kernel.h> 78#include <sys/kernel.h>
@@ -1353,26 +1353,27 @@ execve_runproc(struct lwp *l, struct exe @@ -1353,26 +1353,27 @@ execve_runproc(struct lwp *l, struct exe
1353 p->p_pptr->p_nstopchild++; 1353 p->p_pptr->p_nstopchild++;
1354 p->p_waited = 0; 1354 p->p_waited = 0;
1355 mutex_enter(p->p_lock); 1355 mutex_enter(p->p_lock);
1356 ksiginfo_queue_init(&kq); 1356 ksiginfo_queue_init(&kq);
1357 sigclearall(p, &contsigmask, &kq); 1357 sigclearall(p, &contsigmask, &kq);
1358 lwp_lock(l); 1358 lwp_lock(l);
1359 l->l_stat = LSSTOP; 1359 l->l_stat = LSSTOP;
1360 p->p_stat = SSTOP; 1360 p->p_stat = SSTOP;
1361 p->p_nrlwps--; 1361 p->p_nrlwps--;
1362 lwp_unlock(l); 1362 lwp_unlock(l);
1363 mutex_exit(p->p_lock); 1363 mutex_exit(p->p_lock);
1364 mutex_exit(proc_lock); 1364 mutex_exit(proc_lock);
1365 lwp_lock(l); 1365 lwp_lock(l);
 1366 spc_lock(l->l_cpu);
1366 mi_switch(l); 1367 mi_switch(l);
1367 ksiginfo_queue_drain(&kq); 1368 ksiginfo_queue_drain(&kq);
1368 KERNEL_LOCK(l->l_biglocks, l); 1369 KERNEL_LOCK(l->l_biglocks, l);
1369 } else { 1370 } else {
1370 mutex_exit(proc_lock); 1371 mutex_exit(proc_lock);
1371 } 1372 }
1372 1373
1373 exec_path_free(data); 1374 exec_path_free(data);
1374#ifdef TRACE_EXEC 1375#ifdef TRACE_EXEC
1375 DPRINTF(("%s finished\n", __func__)); 1376 DPRINTF(("%s finished\n", __func__));
1376#endif 1377#endif
1377 return EJUSTRETURN; 1378 return EJUSTRETURN;
1378 1379

cvs diff -r1.277 -r1.278 src/sys/kern/kern_exit.c (expand / switch to unified diff)

--- src/sys/kern/kern_exit.c 2019/10/03 22:48:44 1.277
+++ src/sys/kern/kern_exit.c 2019/12/06 21:36:10 1.278
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_exit.c,v 1.277 2019/10/03 22:48:44 kamil Exp $ */ 1/* $NetBSD: kern_exit.c,v 1.278 2019/12/06 21:36:10 ad Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 1999, 2006, 2007, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 1999, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Andrew Doran. 9 * NASA Ames Research Center, and by Andrew Doran.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -57,27 +57,27 @@ @@ -57,27 +57,27 @@
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE. 64 * SUCH DAMAGE.
65 * 65 *
66 * @(#)kern_exit.c 8.10 (Berkeley) 2/23/95 66 * @(#)kern_exit.c 8.10 (Berkeley) 2/23/95
67 */ 67 */
68 68
69#include <sys/cdefs.h> 69#include <sys/cdefs.h>
70__KERNEL_RCSID(0, "$NetBSD: kern_exit.c,v 1.277 2019/10/03 22:48:44 kamil Exp $"); 70__KERNEL_RCSID(0, "$NetBSD: kern_exit.c,v 1.278 2019/12/06 21:36:10 ad Exp $");
71 71
72#include "opt_ktrace.h" 72#include "opt_ktrace.h"
73#include "opt_dtrace.h" 73#include "opt_dtrace.h"
74#include "opt_sysv.h" 74#include "opt_sysv.h"
75 75
76#include <sys/param.h> 76#include <sys/param.h>
77#include <sys/systm.h> 77#include <sys/systm.h>
78#include <sys/ioctl.h> 78#include <sys/ioctl.h>
79#include <sys/tty.h> 79#include <sys/tty.h>
80#include <sys/time.h> 80#include <sys/time.h>
81#include <sys/resource.h> 81#include <sys/resource.h>
82#include <sys/kernel.h> 82#include <sys/kernel.h>
83#include <sys/proc.h> 83#include <sys/proc.h>
@@ -235,26 +235,27 @@ exit1(struct lwp *l, int exitcode, int s @@ -235,26 +235,27 @@ exit1(struct lwp *l, int exitcode, int s
235 mutex_enter(proc_lock); 235 mutex_enter(proc_lock);
236 mutex_enter(p->p_lock); 236 mutex_enter(p->p_lock);
237 } 237 }
238 p->p_waited = 0; 238 p->p_waited = 0;
239 p->p_pptr->p_nstopchild++; 239 p->p_pptr->p_nstopchild++;
240 p->p_stat = SSTOP; 240 p->p_stat = SSTOP;
241 mutex_exit(proc_lock); 241 mutex_exit(proc_lock);
242 lwp_lock(l); 242 lwp_lock(l);
243 p->p_nrlwps--; 243 p->p_nrlwps--;
244 l->l_stat = LSSTOP; 244 l->l_stat = LSSTOP;
245 lwp_unlock(l); 245 lwp_unlock(l);
246 mutex_exit(p->p_lock); 246 mutex_exit(p->p_lock);
247 lwp_lock(l); 247 lwp_lock(l);
 248 spc_lock(l->l_cpu);
248 mi_switch(l); 249 mi_switch(l);
249 KERNEL_LOCK(l->l_biglocks, l); 250 KERNEL_LOCK(l->l_biglocks, l);
250 mutex_enter(p->p_lock); 251 mutex_enter(p->p_lock);
251 } 252 }
252 253
253 /* 254 /*
254 * Bin any remaining signals and mark the process as dying so it will 255 * Bin any remaining signals and mark the process as dying so it will
255 * not be found for, e.g. signals. 256 * not be found for, e.g. signals.
256 */ 257 */
257 sigfillset(&p->p_sigctx.ps_sigignore); 258 sigfillset(&p->p_sigctx.ps_sigignore);
258 sigclearall(p, NULL, &kq); 259 sigclearall(p, NULL, &kq);
259 p->p_stat = SDYING; 260 p->p_stat = SDYING;
260 mutex_exit(p->p_lock); 261 mutex_exit(p->p_lock);

cvs diff -r1.27 -r1.28 src/sys/kern/kern_idle.c (expand / switch to unified diff)

--- src/sys/kern/kern_idle.c 2019/12/01 15:34:46 1.27
+++ src/sys/kern/kern_idle.c 2019/12/06 21:36:10 1.28
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_idle.c,v 1.27 2019/12/01 15:34:46 ad Exp $ */ 1/* $NetBSD: kern_idle.c,v 1.28 2019/12/06 21:36:10 ad Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c)2002, 2006, 2007 YAMAMOTO Takashi, 4 * Copyright (c)2002, 2006, 2007 YAMAMOTO Takashi,
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -18,27 +18,27 @@ @@ -18,27 +18,27 @@
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE. 26 * SUCH DAMAGE.
27 */ 27 */
28 28
29#include <sys/cdefs.h> 29#include <sys/cdefs.h>
30 30
31__KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.27 2019/12/01 15:34:46 ad Exp $"); 31__KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.28 2019/12/06 21:36:10 ad Exp $");
32 32
33#include <sys/param.h> 33#include <sys/param.h>
34#include <sys/cpu.h> 34#include <sys/cpu.h>
35#include <sys/idle.h> 35#include <sys/idle.h>
36#include <sys/kthread.h> 36#include <sys/kthread.h>
37#include <sys/lockdebug.h> 37#include <sys/lockdebug.h>
38#include <sys/kmem.h> 38#include <sys/kmem.h>
39#include <sys/proc.h> 39#include <sys/proc.h>
40#include <sys/atomic.h> 40#include <sys/atomic.h>
41 41
42#include <uvm/uvm.h> /* uvm_pageidlezero */ 42#include <uvm/uvm.h> /* uvm_pageidlezero */
43#include <uvm/uvm_extern.h> 43#include <uvm/uvm_extern.h>
44 44
@@ -83,26 +83,28 @@ idle_loop(void *dummy) @@ -83,26 +83,28 @@ idle_loop(void *dummy)
83 if ((spc->spc_flags & SPCF_OFFLINE) == 0) { 83 if ((spc->spc_flags & SPCF_OFFLINE) == 0) {
84 uvm_pageidlezero(); 84 uvm_pageidlezero();
85 } 85 }
86 if (!sched_curcpu_runnable_p()) { 86 if (!sched_curcpu_runnable_p()) {
87 cpu_idle(); 87 cpu_idle();
88 if (!sched_curcpu_runnable_p() && 88 if (!sched_curcpu_runnable_p() &&
89 !ci->ci_want_resched) { 89 !ci->ci_want_resched) {
90 continue; 90 continue;
91 } 91 }
92 } 92 }
93 } 93 }
94 KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_lwplock); 94 KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_lwplock);
95 lwp_lock(l); 95 lwp_lock(l);
 96 l->l_stat = LSIDL;
 97 spc_lock(l->l_cpu);
96 mi_switch(l); 98 mi_switch(l);
97 KASSERT(curlwp == l); 99 KASSERT(curlwp == l);
98 KASSERT(l->l_stat == LSONPROC); 100 KASSERT(l->l_stat == LSONPROC);
99 } 101 }
100} 102}
101 103
102int 104int
103create_idle_lwp(struct cpu_info *ci) 105create_idle_lwp(struct cpu_info *ci)
104{ 106{
105 lwp_t *l; 107 lwp_t *l;
106 int error; 108 int error;
107 109
108 KASSERT(ci->ci_data.cpu_idlelwp == NULL); 110 KASSERT(ci->ci_data.cpu_idlelwp == NULL);

cvs diff -r1.216 -r1.217 src/sys/kern/kern_lwp.c (expand / switch to unified diff)

--- src/sys/kern/kern_lwp.c 2019/12/03 05:07:48 1.216
+++ src/sys/kern/kern_lwp.c 2019/12/06 21:36:10 1.217
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_lwp.c,v 1.216 2019/12/03 05:07:48 riastradh Exp $ */ 1/* $NetBSD: kern_lwp.c,v 1.217 2019/12/06 21:36:10 ad Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc. 4 * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, and Andrew Doran. 8 * by Nathan J. Williams, and Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -199,27 +199,27 @@ @@ -199,27 +199,27 @@
199 * (But not always for kernel threads. There are some special cases 199 * (But not always for kernel threads. There are some special cases
200 * as mentioned above: soft interrupts, and the idle loops.) 200 * as mentioned above: soft interrupts, and the idle loops.)
201 * 201 *
202 * Note that an LWP is considered running or likely to run soon if in 202 * Note that an LWP is considered running or likely to run soon if in
203 * one of the following states. This affects the value of p_nrlwps: 203 * one of the following states. This affects the value of p_nrlwps:
204 * 204 *
205 * LSRUN, LSONPROC, LSSLEEP 205 * LSRUN, LSONPROC, LSSLEEP
206 * 206 *
207 * p_lock does not need to be held when transitioning among these 207 * p_lock does not need to be held when transitioning among these
208 * three states, hence p_lock is rarely taken for state transitions. 208 * three states, hence p_lock is rarely taken for state transitions.
209 */ 209 */
210 210
211#include <sys/cdefs.h> 211#include <sys/cdefs.h>
212__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.216 2019/12/03 05:07:48 riastradh Exp $"); 212__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.217 2019/12/06 21:36:10 ad Exp $");
213 213
214#include "opt_ddb.h" 214#include "opt_ddb.h"
215#include "opt_lockdebug.h" 215#include "opt_lockdebug.h"
216#include "opt_dtrace.h" 216#include "opt_dtrace.h"
217 217
218#define _LWP_API_PRIVATE 218#define _LWP_API_PRIVATE
219 219
220#include <sys/param.h> 220#include <sys/param.h>
221#include <sys/systm.h> 221#include <sys/systm.h>
222#include <sys/cpu.h> 222#include <sys/cpu.h>
223#include <sys/pool.h> 223#include <sys/pool.h>
224#include <sys/proc.h> 224#include <sys/proc.h>
225#include <sys/syscallargs.h> 225#include <sys/syscallargs.h>
@@ -1592,26 +1592,27 @@ lwp_userret(struct lwp *l) @@ -1592,26 +1592,27 @@ lwp_userret(struct lwp *l)
1592 * p->p_lwpcv so that sigexit() will write the core file out 1592 * p->p_lwpcv so that sigexit() will write the core file out
1593 * once all other LWPs are suspended.  1593 * once all other LWPs are suspended.
1594 */ 1594 */
1595 if ((l->l_flag & LW_WSUSPEND) != 0) { 1595 if ((l->l_flag & LW_WSUSPEND) != 0) {
1596 pcu_save_all(l); 1596 pcu_save_all(l);
1597 mutex_enter(p->p_lock); 1597 mutex_enter(p->p_lock);
1598 p->p_nrlwps--; 1598 p->p_nrlwps--;
1599 cv_broadcast(&p->p_lwpcv); 1599 cv_broadcast(&p->p_lwpcv);
1600 lwp_lock(l); 1600 lwp_lock(l);
1601 l->l_stat = LSSUSPENDED; 1601 l->l_stat = LSSUSPENDED;
1602 lwp_unlock(l); 1602 lwp_unlock(l);
1603 mutex_exit(p->p_lock); 1603 mutex_exit(p->p_lock);
1604 lwp_lock(l); 1604 lwp_lock(l);
 1605 spc_lock(l->l_cpu);
1605 mi_switch(l); 1606 mi_switch(l);
1606 } 1607 }
1607 1608
1608 /* Process is exiting. */ 1609 /* Process is exiting. */
1609 if ((l->l_flag & LW_WEXIT) != 0) { 1610 if ((l->l_flag & LW_WEXIT) != 0) {
1610 lwp_exit(l); 1611 lwp_exit(l);
1611 KASSERT(0); 1612 KASSERT(0);
1612 /* NOTREACHED */ 1613 /* NOTREACHED */
1613 } 1614 }
1614 1615
1615 /* update lwpctl processor (for vfork child_return) */ 1616 /* update lwpctl processor (for vfork child_return) */
1616 if (l->l_flag & LW_LWPCTL) { 1617 if (l->l_flag & LW_LWPCTL) {
1617 lwp_lock(l); 1618 lwp_lock(l);

cvs diff -r1.380 -r1.381 src/sys/kern/kern_sig.c (expand / switch to unified diff)

--- src/sys/kern/kern_sig.c 2019/11/21 18:17:36 1.380
+++ src/sys/kern/kern_sig.c 2019/12/06 21:36:10 1.381
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_sig.c,v 1.380 2019/11/21 18:17:36 ad Exp $ */ 1/* $NetBSD: kern_sig.c,v 1.381 2019/12/06 21:36:10 ad Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2006, 2007, 2008, 2019 The NetBSD Foundation, Inc. 4 * Copyright (c) 2006, 2007, 2008, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -60,27 +60,27 @@ @@ -60,27 +60,27 @@
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE. 63 * SUCH DAMAGE.
64 * 64 *
65 * @(#)kern_sig.c 8.14 (Berkeley) 5/14/95 65 * @(#)kern_sig.c 8.14 (Berkeley) 5/14/95
66 */ 66 */
67 67
68/* 68/*
69 * Signal subsystem. 69 * Signal subsystem.
70 */ 70 */
71 71
72#include <sys/cdefs.h> 72#include <sys/cdefs.h>
73__KERNEL_RCSID(0, "$NetBSD: kern_sig.c,v 1.380 2019/11/21 18:17:36 ad Exp $"); 73__KERNEL_RCSID(0, "$NetBSD: kern_sig.c,v 1.381 2019/12/06 21:36:10 ad Exp $");
74 74
75#include "opt_ptrace.h" 75#include "opt_ptrace.h"
76#include "opt_dtrace.h" 76#include "opt_dtrace.h"
77#include "opt_compat_sunos.h" 77#include "opt_compat_sunos.h"
78#include "opt_compat_netbsd.h" 78#include "opt_compat_netbsd.h"
79#include "opt_compat_netbsd32.h" 79#include "opt_compat_netbsd32.h"
80#include "opt_pax.h" 80#include "opt_pax.h"
81 81
82#define SIGPROP /* include signal properties table */ 82#define SIGPROP /* include signal properties table */
83#include <sys/param.h> 83#include <sys/param.h>
84#include <sys/signalvar.h> 84#include <sys/signalvar.h>
85#include <sys/proc.h> 85#include <sys/proc.h>
86#include <sys/ptrace.h> 86#include <sys/ptrace.h>
@@ -1761,26 +1761,27 @@ sigswitch_unlock_and_switch_away(struct  @@ -1761,26 +1761,27 @@ sigswitch_unlock_and_switch_away(struct
1761 KASSERT(p->p_nrlwps > 0); 1761 KASSERT(p->p_nrlwps > 0);
1762 1762
1763 KERNEL_UNLOCK_ALL(l, &biglocks); 1763 KERNEL_UNLOCK_ALL(l, &biglocks);
1764 if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) { 1764 if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) {
1765 p->p_nrlwps--; 1765 p->p_nrlwps--;
1766 lwp_lock(l); 1766 lwp_lock(l);
1767 KASSERT(l->l_stat == LSONPROC || l->l_stat == LSSLEEP); 1767 KASSERT(l->l_stat == LSONPROC || l->l_stat == LSSLEEP);
1768 l->l_stat = LSSTOP; 1768 l->l_stat = LSSTOP;
1769 lwp_unlock(l); 1769 lwp_unlock(l);
1770 } 1770 }
1771 1771
1772 mutex_exit(p->p_lock); 1772 mutex_exit(p->p_lock);
1773 lwp_lock(l); 1773 lwp_lock(l);
 1774 spc_lock(l->l_cpu);
1774 mi_switch(l); 1775 mi_switch(l);
1775 KERNEL_LOCK(biglocks, l); 1776 KERNEL_LOCK(biglocks, l);
1776} 1777}
1777 1778
1778/* 1779/*
1779 * Check for a signal from the debugger. 1780 * Check for a signal from the debugger.
1780 */ 1781 */
1781static int 1782static int
1782sigchecktrace(void) 1783sigchecktrace(void)
1783{ 1784{
1784 struct lwp *l = curlwp; 1785 struct lwp *l = curlwp;
1785 struct proc *p = l->l_proc; 1786 struct proc *p = l->l_proc;
1786 int signo; 1787 int signo;

cvs diff -r1.53 -r1.54 src/sys/kern/kern_sleepq.c (expand / switch to unified diff)

--- src/sys/kern/kern_sleepq.c 2019/11/23 19:42:52 1.53
+++ src/sys/kern/kern_sleepq.c 2019/12/06 21:36:10 1.54
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_sleepq.c,v 1.53 2019/11/23 19:42:52 ad Exp $ */ 1/* $NetBSD: kern_sleepq.c,v 1.54 2019/12/06 21:36:10 ad Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2006, 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc. 4 * Copyright (c) 2006, 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -25,27 +25,27 @@ @@ -25,27 +25,27 @@
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Sleep queue implementation, used by turnstiles and general sleep/wakeup 33 * Sleep queue implementation, used by turnstiles and general sleep/wakeup
34 * interfaces. 34 * interfaces.
35 */ 35 */
36 36
37#include <sys/cdefs.h> 37#include <sys/cdefs.h>
38__KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.53 2019/11/23 19:42:52 ad Exp $"); 38__KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.54 2019/12/06 21:36:10 ad Exp $");
39 39
40#include <sys/param.h> 40#include <sys/param.h>
41#include <sys/kernel.h> 41#include <sys/kernel.h>
42#include <sys/cpu.h> 42#include <sys/cpu.h>
43#include <sys/intr.h> 43#include <sys/intr.h>
44#include <sys/pool.h> 44#include <sys/pool.h>
45#include <sys/proc.h>  45#include <sys/proc.h>
46#include <sys/resourcevar.h> 46#include <sys/resourcevar.h>
47#include <sys/sched.h> 47#include <sys/sched.h>
48#include <sys/systm.h> 48#include <sys/systm.h>
49#include <sys/sleepq.h> 49#include <sys/sleepq.h>
50#include <sys/ktrace.h> 50#include <sys/ktrace.h>
51 51
@@ -250,26 +250,27 @@ sleepq_block(int timo, bool catch_p) @@ -250,26 +250,27 @@ sleepq_block(int timo, bool catch_p)
250 error = EINTR; 250 error = EINTR;
251 early = true; 251 early = true;
252 } else if ((l->l_flag & LW_PENDSIG) != 0 && sigispending(l, 0)) 252 } else if ((l->l_flag & LW_PENDSIG) != 0 && sigispending(l, 0))
253 early = true; 253 early = true;
254 } 254 }
255 255
256 if (early) { 256 if (early) {
257 /* lwp_unsleep() will release the lock */ 257 /* lwp_unsleep() will release the lock */
258 lwp_unsleep(l, true); 258 lwp_unsleep(l, true);
259 } else { 259 } else {
260 if (timo) { 260 if (timo) {
261 callout_schedule(&l->l_timeout_ch, timo); 261 callout_schedule(&l->l_timeout_ch, timo);
262 } 262 }
 263 spc_lock(l->l_cpu);
263 mi_switch(l); 264 mi_switch(l);
264 265
265 /* The LWP and sleep queue are now unlocked. */ 266 /* The LWP and sleep queue are now unlocked. */
266 if (timo) { 267 if (timo) {
267 /* 268 /*
268 * Even if the callout appears to have fired, we 269 * Even if the callout appears to have fired, we
269 * need to stop it in order to synchronise with 270 * need to stop it in order to synchronise with
270 * other CPUs. It's important that we do this in 271 * other CPUs. It's important that we do this in
271 * this LWP's context, and not during wakeup, in 272 * this LWP's context, and not during wakeup, in
272 * order to keep the callout & its cache lines 273 * order to keep the callout & its cache lines
273 * co-located on the CPU with the LWP. 274 * co-located on the CPU with the LWP.
274 */ 275 */
275 if (callout_halt(&l->l_timeout_ch, NULL)) 276 if (callout_halt(&l->l_timeout_ch, NULL))

cvs diff -r1.54 -r1.55 src/sys/kern/kern_softint.c (expand / switch to unified diff)

--- src/sys/kern/kern_softint.c 2019/12/06 18:15:57 1.54
+++ src/sys/kern/kern_softint.c 2019/12/06 21:36:10 1.55
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_softint.c,v 1.54 2019/12/06 18:15:57 ad Exp $ */ 1/* $NetBSD: kern_softint.c,v 1.55 2019/12/06 21:36:10 ad Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2007, 2008, 2019 The NetBSD Foundation, Inc. 4 * Copyright (c) 2007, 2008, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -160,27 +160,27 @@ @@ -160,27 +160,27 @@
160 * interrupt; 160 * interrupt;
161 * } 161 * }
162 * 162 *
163 * Once the soft interrupt has fired (and even if it has blocked), 163 * Once the soft interrupt has fired (and even if it has blocked),
164 * no further soft interrupts at that level will be triggered by 164 * no further soft interrupts at that level will be triggered by
165 * MI code until the soft interrupt handler has ceased execution.  165 * MI code until the soft interrupt handler has ceased execution.
166 * If a soft interrupt handler blocks and is resumed, it resumes 166 * If a soft interrupt handler blocks and is resumed, it resumes
167 * execution as a normal LWP (kthread) and gains VM context. Only 167 * execution as a normal LWP (kthread) and gains VM context. Only
168 * when it has completed and is ready to fire again will it 168 * when it has completed and is ready to fire again will it
169 * interrupt other threads. 169 * interrupt other threads.
170 */ 170 */
171 171
172#include <sys/cdefs.h> 172#include <sys/cdefs.h>
173__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.54 2019/12/06 18:15:57 ad Exp $"); 173__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.55 2019/12/06 21:36:10 ad Exp $");
174 174
175#include <sys/param.h> 175#include <sys/param.h>
176#include <sys/proc.h> 176#include <sys/proc.h>
177#include <sys/intr.h> 177#include <sys/intr.h>
178#include <sys/ipi.h> 178#include <sys/ipi.h>
179#include <sys/mutex.h> 179#include <sys/mutex.h>
180#include <sys/kernel.h> 180#include <sys/kernel.h>
181#include <sys/kthread.h> 181#include <sys/kthread.h>
182#include <sys/evcnt.h> 182#include <sys/evcnt.h>
183#include <sys/cpu.h> 183#include <sys/cpu.h>
184#include <sys/xcall.h> 184#include <sys/xcall.h>
185 185
186#include <net/netisr.h> 186#include <net/netisr.h>
@@ -719,26 +719,27 @@ softint_thread(void *cookie) @@ -719,26 +719,27 @@ softint_thread(void *cookie)
719 /* 719 /*
720 * Clear pending status and run it. We must drop the 720 * Clear pending status and run it. We must drop the
721 * spl before mi_switch(), since IPL_HIGH may be higher 721 * spl before mi_switch(), since IPL_HIGH may be higher
722 * than IPL_SCHED (and it is not safe to switch at a 722 * than IPL_SCHED (and it is not safe to switch at a
723 * higher level). 723 * higher level).
724 */ 724 */
725 s = splhigh(); 725 s = splhigh();
726 l->l_cpu->ci_data.cpu_softints &= ~si->si_machdep; 726 l->l_cpu->ci_data.cpu_softints &= ~si->si_machdep;
727 softint_execute(si, l, s); 727 softint_execute(si, l, s);
728 splx(s); 728 splx(s);
729 729
730 lwp_lock(l); 730 lwp_lock(l);
731 l->l_stat = LSIDL; 731 l->l_stat = LSIDL;
 732 spc_lock(l->l_cpu);
732 mi_switch(l); 733 mi_switch(l);
733 } 734 }
734} 735}
735 736
736/* 737/*
737 * softint_picklwp: 738 * softint_picklwp:
738 * 739 *
739 * Slow path: called from mi_switch() to pick the highest priority 740 * Slow path: called from mi_switch() to pick the highest priority
740 * soft interrupt LWP that needs to run. 741 * soft interrupt LWP that needs to run.
741 */ 742 */
742lwp_t * 743lwp_t *
743softint_picklwp(void) 744softint_picklwp(void)
744{ 745{

cvs diff -r1.328 -r1.329 src/sys/kern/kern_synch.c (expand / switch to unified diff)

--- src/sys/kern/kern_synch.c 2019/12/03 05:07:48 1.328
+++ src/sys/kern/kern_synch.c 2019/12/06 21:36:10 1.329
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_synch.c,v 1.328 2019/12/03 05:07:48 riastradh Exp $ */ 1/* $NetBSD: kern_synch.c,v 1.329 2019/12/06 21:36:10 ad Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009, 2019 4 * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009, 2019
5 * The NetBSD Foundation, Inc. 5 * The NetBSD Foundation, Inc.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to The NetBSD Foundation 8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and 10 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
11 * Daniel Sieger. 11 * Daniel Sieger.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions 14 * modification, are permitted provided that the following conditions
@@ -59,27 +59,27 @@ @@ -59,27 +59,27 @@
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE. 66 * SUCH DAMAGE.
67 * 67 *
68 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 68 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
69 */ 69 */
70 70
71#include <sys/cdefs.h> 71#include <sys/cdefs.h>
72__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.328 2019/12/03 05:07:48 riastradh Exp $"); 72__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.329 2019/12/06 21:36:10 ad Exp $");
73 73
74#include "opt_kstack.h" 74#include "opt_kstack.h"
75#include "opt_dtrace.h" 75#include "opt_dtrace.h"
76 76
77#define __MUTEX_PRIVATE 77#define __MUTEX_PRIVATE
78 78
79#include <sys/param.h> 79#include <sys/param.h>
80#include <sys/systm.h> 80#include <sys/systm.h>
81#include <sys/proc.h> 81#include <sys/proc.h>
82#include <sys/kernel.h> 82#include <sys/kernel.h>
83#include <sys/cpu.h> 83#include <sys/cpu.h>
84#include <sys/pserialize.h> 84#include <sys/pserialize.h>
85#include <sys/resourcevar.h> 85#include <sys/resourcevar.h>
@@ -259,50 +259,56 @@ wakeup(wchan_t ident) @@ -259,50 +259,56 @@ wakeup(wchan_t ident)
259 259
260/* 260/*
261 * General yield call. Puts the current LWP back on its run queue and 261 * General yield call. Puts the current LWP back on its run queue and
262 * performs a voluntary context switch. Should only be called when the 262 * performs a voluntary context switch. Should only be called when the
263 * current LWP explicitly requests it (eg sched_yield(2)). 263 * current LWP explicitly requests it (eg sched_yield(2)).
264 */ 264 */
265void 265void
266yield(void) 266yield(void)
267{ 267{
268 struct lwp *l = curlwp; 268 struct lwp *l = curlwp;
269 269
270 KERNEL_UNLOCK_ALL(l, &l->l_biglocks); 270 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
271 lwp_lock(l); 271 lwp_lock(l);
 272
272 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock)); 273 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
273 KASSERT(l->l_stat == LSONPROC); 274 KASSERT(l->l_stat == LSONPROC);
 275
274 /* Voluntary - ditch kpriority boost. */ 276 /* Voluntary - ditch kpriority boost. */
275 l->l_kpriority = false; 277 l->l_kpriority = false;
276 (void)mi_switch(l); 278 spc_lock(l->l_cpu);
 279 mi_switch(l);
277 KERNEL_LOCK(l->l_biglocks, l); 280 KERNEL_LOCK(l->l_biglocks, l);
278} 281}
279 282
280/* 283/*
281 * General preemption call. Puts the current LWP back on its run queue 284 * General preemption call. Puts the current LWP back on its run queue
282 * and performs an involuntary context switch. 285 * and performs an involuntary context switch.
283 */ 286 */
284void 287void
285preempt(void) 288preempt(void)
286{ 289{
287 struct lwp *l = curlwp; 290 struct lwp *l = curlwp;
288 291
289 KERNEL_UNLOCK_ALL(l, &l->l_biglocks); 292 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
290 lwp_lock(l); 293 lwp_lock(l);
 294
291 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock)); 295 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
292 KASSERT(l->l_stat == LSONPROC); 296 KASSERT(l->l_stat == LSONPROC);
 297
293 /* Involuntary - keep kpriority boost. */ 298 /* Involuntary - keep kpriority boost. */
294 l->l_pflag |= LP_PREEMPTING; 299 l->l_pflag |= LP_PREEMPTING;
295 (void)mi_switch(l); 300 spc_lock(l->l_cpu);
 301 mi_switch(l);
296 KERNEL_LOCK(l->l_biglocks, l); 302 KERNEL_LOCK(l->l_biglocks, l);
297} 303}
298 304
299/* 305/*
300 * Handle a request made by another agent to preempt the current LWP 306 * Handle a request made by another agent to preempt the current LWP
301 * in-kernel. Usually called when l_dopreempt may be non-zero. 307 * in-kernel. Usually called when l_dopreempt may be non-zero.
302 * 308 *
303 * Character addresses for lockstat only. 309 * Character addresses for lockstat only.
304 */ 310 */
305static char kpreempt_is_disabled; 311static char kpreempt_is_disabled;
306static char kernel_lock_held; 312static char kernel_lock_held;
307static char is_softint_lwp; 313static char is_softint_lwp;
308static char spl_is_raised; 314static char spl_is_raised;
@@ -362,27 +368,29 @@ kpreempt(uintptr_t where) @@ -362,27 +368,29 @@ kpreempt(uintptr_t where)
362 * It may be that the IPL is too high. 368 * It may be that the IPL is too high.
363 * kpreempt_enter() can schedule an 369 * kpreempt_enter() can schedule an
364 * interrupt to retry later. 370 * interrupt to retry later.
365 */ 371 */
366 splx(s); 372 splx(s);
367 failed = (uintptr_t)&spl_is_raised; 373 failed = (uintptr_t)&spl_is_raised;
368 break; 374 break;
369 } 375 }
370 /* Do it! */ 376 /* Do it! */
371 if (__predict_true((dop & DOPREEMPT_COUNTED) == 0)) { 377 if (__predict_true((dop & DOPREEMPT_COUNTED) == 0)) {
372 kpreempt_ev_immed.ev_count++; 378 kpreempt_ev_immed.ev_count++;
373 } 379 }
374 lwp_lock(l); 380 lwp_lock(l);
 381 /* Involuntary - keep kpriority boost. */
375 l->l_pflag |= LP_PREEMPTING; 382 l->l_pflag |= LP_PREEMPTING;
 383 spc_lock(l->l_cpu);
376 mi_switch(l); 384 mi_switch(l);
377 l->l_nopreempt++; 385 l->l_nopreempt++;
378 splx(s); 386 splx(s);
379 387
380 /* Take care of any MD cleanup. */ 388 /* Take care of any MD cleanup. */
381 cpu_kpreempt_exit(where); 389 cpu_kpreempt_exit(where);
382 l->l_nopreempt--; 390 l->l_nopreempt--;
383 } 391 }
384 392
385 if (__predict_true(!failed)) { 393 if (__predict_true(!failed)) {
386 return false; 394 return false;
387 } 395 }
388 396
@@ -491,50 +499,52 @@ nextlwp(struct cpu_info *ci, struct sche @@ -491,50 +499,52 @@ nextlwp(struct cpu_info *ci, struct sche
491 * the update to ci_want_resched will become globally visible before 499 * the update to ci_want_resched will become globally visible before
492 * the release of spc_mutex becomes globally visible. 500 * the release of spc_mutex becomes globally visible.
493 */ 501 */
494 ci->ci_want_resched = ci->ci_data.cpu_softints; 502 ci->ci_want_resched = ci->ci_data.cpu_softints;
495 spc->spc_flags &= ~SPCF_SWITCHCLEAR; 503 spc->spc_flags &= ~SPCF_SWITCHCLEAR;
496 spc->spc_curpriority = lwp_eprio(newl); 504 spc->spc_curpriority = lwp_eprio(newl);
497 505
498 return newl; 506 return newl;
499} 507}
500 508
501/* 509/*
502 * The machine independent parts of context switch. 510 * The machine independent parts of context switch.
503 * 511 *
504 * Returns 1 if another LWP was actually run. 512 * NOTE: do not use l->l_cpu in this routine. The caller may have enqueued
 513 * itself onto another CPU's run queue, so l->l_cpu may point elsewhere.
505 */ 514 */
506int 515void
507mi_switch(lwp_t *l) 516mi_switch(lwp_t *l)
508{ 517{
509 struct cpu_info *ci; 518 struct cpu_info *ci;
510 struct schedstate_percpu *spc; 519 struct schedstate_percpu *spc;
511 struct lwp *newl; 520 struct lwp *newl;
512 int retval, oldspl; 521 int oldspl;
513 struct bintime bt; 522 struct bintime bt;
514 bool returning; 523 bool returning;
515 524
516 KASSERT(lwp_locked(l, NULL)); 525 KASSERT(lwp_locked(l, NULL));
517 KASSERT(kpreempt_disabled()); 526 KASSERT(kpreempt_disabled());
 527 KASSERT(mutex_owned(curcpu()->ci_schedstate.spc_mutex));
518 LOCKDEBUG_BARRIER(l->l_mutex, 1); 528 LOCKDEBUG_BARRIER(l->l_mutex, 1);
519 529
520 kstack_check_magic(l); 530 kstack_check_magic(l);
521 531
522 binuptime(&bt); 532 binuptime(&bt);
523 533
524 KASSERTMSG(l == curlwp, "l %p curlwp %p", l, curlwp); 534 KASSERTMSG(l == curlwp, "l %p curlwp %p", l, curlwp);
525 KASSERT((l->l_pflag & LP_RUNNING) != 0); 535 KASSERT((l->l_pflag & LP_RUNNING) != 0);
526 KASSERT(l->l_cpu == curcpu()); 536 KASSERT(l->l_cpu == curcpu() || l->l_stat == LSRUN);
527 ci = l->l_cpu; 537 ci = curcpu();
528 spc = &ci->ci_schedstate; 538 spc = &ci->ci_schedstate;
529 returning = false; 539 returning = false;
530 newl = NULL; 540 newl = NULL;
531 541
532 /* 542 /*
533 * If we have been asked to switch to a specific LWP, then there 543 * If we have been asked to switch to a specific LWP, then there
534 * is no need to inspect the run queues. If a soft interrupt is 544 * is no need to inspect the run queues. If a soft interrupt is
535 * blocking, then return to the interrupted thread without adjusting 545 * blocking, then return to the interrupted thread without adjusting
536 * VM context or its start time: neither have been changed in order 546 * VM context or its start time: neither have been changed in order
537 * to take the interrupt. 547 * to take the interrupt.
538 */ 548 */
539 if (l->l_switchto != NULL) { 549 if (l->l_switchto != NULL) {
540 if ((l->l_pflag & LP_INTR) != 0) { 550 if ((l->l_pflag & LP_INTR) != 0) {
@@ -545,51 +555,44 @@ mi_switch(lwp_t *l) @@ -545,51 +555,44 @@ mi_switch(lwp_t *l)
545 } 555 }
546 newl = l->l_switchto; 556 newl = l->l_switchto;
547 l->l_switchto = NULL; 557 l->l_switchto = NULL;
548 } 558 }
549#ifndef __HAVE_FAST_SOFTINTS 559#ifndef __HAVE_FAST_SOFTINTS
550 else if (ci->ci_data.cpu_softints != 0) { 560 else if (ci->ci_data.cpu_softints != 0) {
551 /* There are pending soft interrupts, so pick one. */ 561 /* There are pending soft interrupts, so pick one. */
552 newl = softint_picklwp(); 562 newl = softint_picklwp();
553 newl->l_stat = LSONPROC; 563 newl->l_stat = LSONPROC;
554 newl->l_pflag |= LP_RUNNING; 564 newl->l_pflag |= LP_RUNNING;
555 } 565 }
556#endif /* !__HAVE_FAST_SOFTINTS */ 566#endif /* !__HAVE_FAST_SOFTINTS */
557 567
558 /* Lock the runqueue */ 
559 KASSERT(l->l_stat != LSRUN); 
560 mutex_spin_enter(spc->spc_mutex); 
561 
562 /* 568 /*
563 * If on the CPU and we have gotten this far, then we must yield. 569 * If on the CPU and we have gotten this far, then we must yield.
564 */ 570 */
565 if (l->l_stat == LSONPROC && l != newl) { 571 if (l->l_stat == LSONPROC && l != newl) {
566 KASSERT(lwp_locked(l, spc->spc_lwplock)); 572 KASSERT(lwp_locked(l, spc->spc_lwplock));
567 if ((l->l_flag & LW_IDLE) == 0) { 573 KASSERT((l->l_flag & LW_IDLE) == 0);
568 l->l_stat = LSRUN; 574 l->l_stat = LSRUN;
569 lwp_setlock(l, spc->spc_mutex); 575 lwp_setlock(l, spc->spc_mutex);
570 sched_enqueue(l); 576 sched_enqueue(l);
571 /* 577 /*
572 * Handle migration. Note that "migrating LWP" may 578 * Handle migration. Note that "migrating LWP" may
573 * be reset here, if interrupt/preemption happens 579 * be reset here, if interrupt/preemption happens
574 * early in idle LWP. 580 * early in idle LWP.
575 */ 581 */
576 if (l->l_target_cpu != NULL && 582 if (l->l_target_cpu != NULL && (l->l_pflag & LP_BOUND) == 0) {
577 (l->l_pflag & LP_BOUND) == 0) { 583 KASSERT((l->l_pflag & LP_INTR) == 0);
578 KASSERT((l->l_pflag & LP_INTR) == 0); 584 spc->spc_migrating = l;
579 spc->spc_migrating = l; 585 }
580 } 
581 } else 
582 l->l_stat = LSIDL; 
583 } 586 }
584 587
585 /* Pick new LWP to run. */ 588 /* Pick new LWP to run. */
586 if (newl == NULL) { 589 if (newl == NULL) {
587 newl = nextlwp(ci, spc); 590 newl = nextlwp(ci, spc);
588 } 591 }
589 592
590 /* Items that must be updated with the CPU locked. */ 593 /* Items that must be updated with the CPU locked. */
591 if (!returning) { 594 if (!returning) {
592 /* Count time spent in current system call */ 595 /* Count time spent in current system call */
593 SYSCALL_TIME_SLEEP(l); 596 SYSCALL_TIME_SLEEP(l);
594 597
595 updatertime(l, &bt); 598 updatertime(l, &bt);
@@ -684,26 +687,27 @@ mi_switch(lwp_t *l) @@ -684,26 +687,27 @@ mi_switch(lwp_t *l)
684 if (!returning) 687 if (!returning)
685 pmap_deactivate(l); 688 pmap_deactivate(l);
686 689
687 /* 690 /*
688 * We may need to spin-wait if 'newl' is still 691 * We may need to spin-wait if 'newl' is still
689 * context switching on another CPU. 692 * context switching on another CPU.
690 */ 693 */
691 if (__predict_false(newl->l_ctxswtch != 0)) { 694 if (__predict_false(newl->l_ctxswtch != 0)) {
692 u_int count; 695 u_int count;
693 count = SPINLOCK_BACKOFF_MIN; 696 count = SPINLOCK_BACKOFF_MIN;
694 while (newl->l_ctxswtch) 697 while (newl->l_ctxswtch)
695 SPINLOCK_BACKOFF(count); 698 SPINLOCK_BACKOFF(count);
696 } 699 }
 700 membar_enter();
697 701
698 /* 702 /*
699 * If DTrace has set the active vtime enum to anything 703 * If DTrace has set the active vtime enum to anything
700 * other than INACTIVE (0), then it should have set the 704 * other than INACTIVE (0), then it should have set the
701 * function to call. 705 * function to call.
702 */ 706 */
703 if (__predict_false(dtrace_vtime_active)) { 707 if (__predict_false(dtrace_vtime_active)) {
704 (*dtrace_vtime_switch_func)(newl); 708 (*dtrace_vtime_switch_func)(newl);
705 } 709 }
706 710
707 /* 711 /*
708 * We must ensure not to come here from inside a read section. 712 * We must ensure not to come here from inside a read section.
709 */ 713 */
@@ -733,48 +737,45 @@ mi_switch(lwp_t *l) @@ -733,48 +737,45 @@ mi_switch(lwp_t *l)
733 /* Normalize the count of the spin-mutexes */ 737 /* Normalize the count of the spin-mutexes */
734 ci->ci_mtx_count++; 738 ci->ci_mtx_count++;
735 /* Unmark the state of context switch */ 739 /* Unmark the state of context switch */
736 membar_exit(); 740 membar_exit();
737 prevlwp->l_ctxswtch = 0; 741 prevlwp->l_ctxswtch = 0;
738 } 742 }
739 743
740 /* Update status for lwpctl, if present. */ 744 /* Update status for lwpctl, if present. */
741 if (l->l_lwpctl != NULL) { 745 if (l->l_lwpctl != NULL) {
742 l->l_lwpctl->lc_curcpu = (int)cpu_index(ci); 746 l->l_lwpctl->lc_curcpu = (int)cpu_index(ci);
743 l->l_lwpctl->lc_pctr++; 747 l->l_lwpctl->lc_pctr++;
744 } 748 }
745 749
746 KASSERT(l->l_cpu == ci); 
747 splx(oldspl); 
748 /* 750 /*
749 * note that, unless the caller disabled preemption, 751 * Note that, unless the caller disabled preemption, we can
750 * we can be preempted at any time after the above splx() call. 752 * be preempted at any time after this splx().
751 */ 753 */
752 retval = 1; 754 splx(oldspl);
753 } else { 755 } else {
754 /* Nothing to do - just unlock and return. */ 756 /* Nothing to do - just unlock and return. */
755 mutex_spin_exit(spc->spc_mutex); 757 mutex_spin_exit(spc->spc_mutex);
756 l->l_pflag &= ~LP_PREEMPTING; 758 l->l_pflag &= ~LP_PREEMPTING;
757 lwp_unlock(l); 759 lwp_unlock(l);
758 retval = 0; 
759 } 760 }
760 761
 762 /* Only now is it safe to consider l_cpu again. */
761 KASSERT(l == curlwp); 763 KASSERT(l == curlwp);
 764 KASSERT(l->l_cpu == ci);
762 KASSERT(l->l_stat == LSONPROC); 765 KASSERT(l->l_stat == LSONPROC);
763 766
764 SYSCALL_TIME_WAKEUP(l); 767 SYSCALL_TIME_WAKEUP(l);
765 LOCKDEBUG_BARRIER(NULL, 1); 768 LOCKDEBUG_BARRIER(NULL, 1);
766 
767 return retval; 
768} 769}
769 770
770/* 771/*
771 * The machine independent parts of context switch to oblivion. 772 * The machine independent parts of context switch to oblivion.
772 * Does not return. Call with the LWP unlocked. 773 * Does not return. Call with the LWP unlocked.
773 */ 774 */
774void 775void
775lwp_exit_switchaway(lwp_t *l) 776lwp_exit_switchaway(lwp_t *l)
776{ 777{
777 struct cpu_info *ci; 778 struct cpu_info *ci;
778 struct lwp *newl; 779 struct lwp *newl;
779 struct bintime bt; 780 struct bintime bt;
780 781
@@ -838,26 +839,27 @@ lwp_exit_switchaway(lwp_t *l) @@ -838,26 +839,27 @@ lwp_exit_switchaway(lwp_t *l)
838 if (l->l_lwpctl != NULL) 839 if (l->l_lwpctl != NULL)
839 l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED; 840 l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
840 841
841 /* 842 /*
842 * We may need to spin-wait if 'newl' is still 843 * We may need to spin-wait if 'newl' is still
843 * context switching on another CPU. 844 * context switching on another CPU.
844 */ 845 */
845 if (__predict_false(newl->l_ctxswtch != 0)) { 846 if (__predict_false(newl->l_ctxswtch != 0)) {
846 u_int count; 847 u_int count;
847 count = SPINLOCK_BACKOFF_MIN; 848 count = SPINLOCK_BACKOFF_MIN;
848 while (newl->l_ctxswtch) 849 while (newl->l_ctxswtch)
849 SPINLOCK_BACKOFF(count); 850 SPINLOCK_BACKOFF(count);
850 } 851 }
 852 membar_enter();
851 853
852 /* 854 /*
853 * If DTrace has set the active vtime enum to anything 855 * If DTrace has set the active vtime enum to anything
854 * other than INACTIVE (0), then it should have set the 856 * other than INACTIVE (0), then it should have set the
855 * function to call. 857 * function to call.
856 */ 858 */
857 if (__predict_false(dtrace_vtime_active)) { 859 if (__predict_false(dtrace_vtime_active)) {
858 (*dtrace_vtime_switch_func)(newl); 860 (*dtrace_vtime_switch_func)(newl);
859 } 861 }
860 862
861 /* Switch to the new LWP.. */ 863 /* Switch to the new LWP.. */
862 (void)cpu_switchto(NULL, newl, false); 864 (void)cpu_switchto(NULL, newl, false);
863 865

cvs diff -r1.79 -r1.80 src/sys/sys/sched.h (expand / switch to unified diff)

--- src/sys/sys/sched.h 2019/12/03 22:28:41 1.79
+++ src/sys/sys/sched.h 2019/12/06 21:36:10 1.80
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: sched.h,v 1.79 2019/12/03 22:28:41 ad Exp $ */ 1/* $NetBSD: sched.h,v 1.80 2019/12/06 21:36:10 ad Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1999, 2000, 2001, 2002, 2007, 2008, 2019 4 * Copyright (c) 1999, 2000, 2001, 2002, 2007, 2008, 2019
5 * The NetBSD Foundation, Inc. 5 * The NetBSD Foundation, Inc.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to The NetBSD Foundation 8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Ross Harvey, Jason R. Thorpe, Nathan J. Williams, Andrew Doran and 9 * by Ross Harvey, Jason R. Thorpe, Nathan J. Williams, Andrew Doran and
10 * Daniel Sieger. 10 * Daniel Sieger.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
@@ -256,23 +256,23 @@ void sched_slept(struct lwp *); @@ -256,23 +256,23 @@ void sched_slept(struct lwp *);
256void sched_wakeup(struct lwp *); 256void sched_wakeup(struct lwp *);
257 257
258void setrunnable(struct lwp *); 258void setrunnable(struct lwp *);
259void sched_setrunnable(struct lwp *); 259void sched_setrunnable(struct lwp *);
260 260
261struct cpu_info *sched_takecpu(struct lwp *); 261struct cpu_info *sched_takecpu(struct lwp *);
262void sched_print_runqueue(void (*pr)(const char *, ...) 262void sched_print_runqueue(void (*pr)(const char *, ...)
263 __printflike(1, 2)); 263 __printflike(1, 2));
264 264
265/* Dispatching */ 265/* Dispatching */
266bool kpreempt(uintptr_t); 266bool kpreempt(uintptr_t);
267void preempt(void); 267void preempt(void);
268void yield(void); 268void yield(void);
269int mi_switch(struct lwp *); 269void mi_switch(struct lwp *);
270void updatertime(lwp_t *, const struct bintime *); 270void updatertime(lwp_t *, const struct bintime *);
271void sched_idle(void); 271void sched_idle(void);
272void suspendsched(void); 272void suspendsched(void);
273 273
274int do_sched_setparam(pid_t, lwpid_t, int, const struct sched_param *); 274int do_sched_setparam(pid_t, lwpid_t, int, const struct sched_param *);
275int do_sched_getparam(pid_t, lwpid_t, int *, struct sched_param *); 275int do_sched_getparam(pid_t, lwpid_t, int *, struct sched_param *);
276 276
277#endif /* _KERNEL */ 277#endif /* _KERNEL */
278#endif /* _SYS_SCHED_H_ */ 278#endif /* _SYS_SCHED_H_ */