Sun Dec 1 13:20:42 2019 UTC ()
PR port-sparc/54718 (sparc install hangs since recent scheduler changes)

- sched_tick: cpu_need_resched is no longer the correct thing to do here.
  All we need to do is OR the request into the local ci_want_resched.

- sched_resched_cpu: we need to set RESCHED_UPREEMPT even on softint LWPs,
  especially in the !__HAVE_FAST_SOFTINTS case, because the LWP with the
  LP_INTR flag could be running via softint_overlay() - i.e. it has been
  temporarily borrowed from a user process, and it needs to notice the
  resched after it has stopped running softints.


(ad)
diff -r1.50 -r1.51 src/sys/kern/kern_runq.c
diff -r1.38 -r1.39 src/sys/kern/sched_4bsd.c
diff -r1.34 -r1.35 src/sys/kern/sched_m2.c

cvs diff -r1.50 -r1.51 src/sys/kern/kern_runq.c (expand / switch to unified diff)

--- src/sys/kern/kern_runq.c 2019/11/27 20:31:13 1.50
+++ src/sys/kern/kern_runq.c 2019/12/01 13:20:42 1.51
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_runq.c,v 1.50 2019/11/27 20:31:13 ad Exp $ */ 1/* $NetBSD: kern_runq.c,v 1.51 2019/12/01 13:20:42 ad Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2019 The NetBSD Foundation, Inc. 4 * Copyright (c) 2019 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -46,27 +46,27 @@ @@ -46,27 +46,27 @@
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE. 55 * SUCH DAMAGE.
56 */ 56 */
57 57
58#include <sys/cdefs.h> 58#include <sys/cdefs.h>
59__KERNEL_RCSID(0, "$NetBSD: kern_runq.c,v 1.50 2019/11/27 20:31:13 ad Exp $"); 59__KERNEL_RCSID(0, "$NetBSD: kern_runq.c,v 1.51 2019/12/01 13:20:42 ad Exp $");
60 60
61#include "opt_dtrace.h" 61#include "opt_dtrace.h"
62 62
63#include <sys/param.h> 63#include <sys/param.h>
64#include <sys/kernel.h> 64#include <sys/kernel.h>
65#include <sys/bitops.h> 65#include <sys/bitops.h>
66#include <sys/cpu.h> 66#include <sys/cpu.h>
67#include <sys/idle.h> 67#include <sys/idle.h>
68#include <sys/intr.h> 68#include <sys/intr.h>
69#include <sys/kmem.h> 69#include <sys/kmem.h>
70#include <sys/lwp.h> 70#include <sys/lwp.h>
71#include <sys/mutex.h> 71#include <sys/mutex.h>
72#include <sys/proc.h> 72#include <sys/proc.h>
@@ -371,33 +371,28 @@ sched_resched_cpu(struct cpu_info *ci, p @@ -371,33 +371,28 @@ sched_resched_cpu(struct cpu_info *ci, p
371 if (pri <= spc->spc_curpriority || !mp_online) { 371 if (pri <= spc->spc_curpriority || !mp_online) {
372 if (__predict_true(unlock)) { 372 if (__predict_true(unlock)) {
373 spc_unlock(ci); 373 spc_unlock(ci);
374 } 374 }
375 return; 375 return;
376 } 376 }
377 377
378 /* 378 /*
379 * Figure out what kind of preemption we should do. 379 * Figure out what kind of preemption we should do.
380 */  380 */
381 l = ci->ci_data.cpu_onproc; 381 l = ci->ci_data.cpu_onproc;
382 if ((l->l_flag & LW_IDLE) != 0) { 382 if ((l->l_flag & LW_IDLE) != 0) {
383 f = RESCHED_IDLE | RESCHED_UPREEMPT; 383 f = RESCHED_IDLE | RESCHED_UPREEMPT;
384 } else if ((l->l_pflag & LP_INTR) != 0) { 384 } else if (pri >= sched_kpreempt_pri && (l->l_pflag & LP_INTR) == 0) {
385 /* We can't currently preempt interrupt LWPs - should do. */ 385 /* We can't currently preempt softints - should be able to. */
386 if (__predict_true(unlock)) { 
387 spc_unlock(ci); 
388 } 
389 return; 
390 } else if (pri >= sched_kpreempt_pri) { 
391#ifdef __HAVE_PREEMPTION 386#ifdef __HAVE_PREEMPTION
392 f = RESCHED_KPREEMPT; 387 f = RESCHED_KPREEMPT;
393#else 388#else
394 /* Leave door open for test: set kpreempt_pri with sysctl. */ 389 /* Leave door open for test: set kpreempt_pri with sysctl. */
395 f = RESCHED_UPREEMPT; 390 f = RESCHED_UPREEMPT;
396#endif 391#endif
397 /* 392 /*
398 * l_dopreempt must be set with the CPU locked to sync with 393 * l_dopreempt must be set with the CPU locked to sync with
399 * mi_switch(). It must also be set with an atomic to sync 394 * mi_switch(). It must also be set with an atomic to sync
400 * with kpreempt(). 395 * with kpreempt().
401 */ 396 */
402 atomic_or_uint(&l->l_dopreempt, DOPREEMPT_ACTIVE); 397 atomic_or_uint(&l->l_dopreempt, DOPREEMPT_ACTIVE);
403 } else { 398 } else {

cvs diff -r1.38 -r1.39 src/sys/kern/sched_4bsd.c (expand / switch to unified diff)

--- src/sys/kern/sched_4bsd.c 2019/11/29 18:29:45 1.38
+++ src/sys/kern/sched_4bsd.c 2019/12/01 13:20:42 1.39
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: sched_4bsd.c,v 1.38 2019/11/29 18:29:45 ad Exp $ */ 1/* $NetBSD: sched_4bsd.c,v 1.39 2019/12/01 13:20:42 ad Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2019 4 * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2019
5 * The NetBSD Foundation, Inc. 5 * The NetBSD Foundation, Inc.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to The NetBSD Foundation 8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran, and 10 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran, and
11 * Daniel Sieger. 11 * Daniel Sieger.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions 14 * modification, are permitted provided that the following conditions
@@ -59,27 +59,27 @@ @@ -59,27 +59,27 @@
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE. 66 * SUCH DAMAGE.
67 * 67 *
68 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 68 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
69 */ 69 */
70 70
71#include <sys/cdefs.h> 71#include <sys/cdefs.h>
72__KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.38 2019/11/29 18:29:45 ad Exp $"); 72__KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.39 2019/12/01 13:20:42 ad Exp $");
73 73
74#include "opt_ddb.h" 74#include "opt_ddb.h"
75#include "opt_lockdebug.h" 75#include "opt_lockdebug.h"
76 76
77#include <sys/param.h> 77#include <sys/param.h>
78#include <sys/systm.h> 78#include <sys/systm.h>
79#include <sys/callout.h> 79#include <sys/callout.h>
80#include <sys/cpu.h> 80#include <sys/cpu.h>
81#include <sys/proc.h> 81#include <sys/proc.h>
82#include <sys/kernel.h> 82#include <sys/kernel.h>
83#include <sys/resourcevar.h> 83#include <sys/resourcevar.h>
84#include <sys/sched.h> 84#include <sys/sched.h>
85#include <sys/sysctl.h> 85#include <sys/sysctl.h>
@@ -120,52 +120,52 @@ sched_tick(struct cpu_info *ci) @@ -120,52 +120,52 @@ sched_tick(struct cpu_info *ci)
120 /* 120 /*
121 * Can only be spc_lwplock or a turnstile lock at this point 121 * Can only be spc_lwplock or a turnstile lock at this point
122 * (if we interrupted priority inheritance trylock dance). 122 * (if we interrupted priority inheritance trylock dance).
123 */ 123 */
124 KASSERT(l->l_mutex != spc->spc_mutex); 124 KASSERT(l->l_mutex != spc->spc_mutex);
125 switch (l->l_class) { 125 switch (l->l_class) {
126 case SCHED_FIFO: 126 case SCHED_FIFO:
127 /* No timeslicing for FIFO jobs. */ 127 /* No timeslicing for FIFO jobs. */
128 break; 128 break;
129 case SCHED_RR: 129 case SCHED_RR:
130 /* Force it into mi_switch() to look for other jobs to run. */ 130 /* Force it into mi_switch() to look for other jobs to run. */
131#ifdef __HAVE_PREEMPTION 131#ifdef __HAVE_PREEMPTION
132 atomic_or_uint(&l->l_dopreempt, DOPREEMPT_ACTIVE); 132 atomic_or_uint(&l->l_dopreempt, DOPREEMPT_ACTIVE);
133 cpu_need_resched(ci, l, RESCHED_KPREEMPT); 133 atomic_or_uint(&ci->ci_want_resched, RESCHED_KPREEMPT);
134#else 134#else
135 cpu_need_resched(ci, l, RESCHED_UPREEMPT); 135 atomic_or_uint(&ci->ci_want_resched, RESCHED_UPREEMPT);
136#endif 136#endif
137 break; 137 break;
138 default: 138 default:
139 if (spc->spc_flags & SPCF_SHOULDYIELD) { 139 if (spc->spc_flags & SPCF_SHOULDYIELD) {
140 /* 140 /*
141 * Process is stuck in kernel somewhere, probably 141 * Process is stuck in kernel somewhere, probably
142 * due to buggy or inefficient code. Force a 142 * due to buggy or inefficient code. Force a
143 * kernel preemption. 143 * kernel preemption.
144 */ 144 */
145#ifdef __HAVE_PREEMPTION 145#ifdef __HAVE_PREEMPTION
146 atomic_or_uint(&l->l_dopreempt, DOPREEMPT_ACTIVE); 146 atomic_or_uint(&l->l_dopreempt, DOPREEMPT_ACTIVE);
147 cpu_need_resched(ci, l, RESCHED_KPREEMPT); 147 atomic_or_uint(&ci->ci_want_resched, RESCHED_KPREEMPT);
148#else 148#else
149 cpu_need_resched(ci, l, RESCHED_UPREEMPT); 149 atomic_or_uint(&ci->ci_want_resched, RESCHED_UPREEMPT);
150#endif 150#endif
151 } else if (spc->spc_flags & SPCF_SEENRR) { 151 } else if (spc->spc_flags & SPCF_SEENRR) {
152 /* 152 /*
153 * The process has already been through a roundrobin 153 * The process has already been through a roundrobin
154 * without switching and may be hogging the CPU. 154 * without switching and may be hogging the CPU.
155 * Indicate that the process should yield. 155 * Indicate that the process should yield.
156 */ 156 */
157 spc->spc_flags |= SPCF_SHOULDYIELD; 157 spc->spc_flags |= SPCF_SHOULDYIELD;
158 cpu_need_resched(ci, l, RESCHED_UPREEMPT); 158 atomic_or_uint(&ci->ci_want_resched, RESCHED_UPREEMPT);
159 } else { 159 } else {
160 spc->spc_flags |= SPCF_SEENRR; 160 spc->spc_flags |= SPCF_SEENRR;
161 } 161 }
162 break; 162 break;
163 } 163 }
164} 164}
165 165
166/* 166/*
167 * Why PRIO_MAX - 2? From setpriority(2): 167 * Why PRIO_MAX - 2? From setpriority(2):
168 * 168 *
169 * prio is a value in the range -20 to 20. The default priority is 169 * prio is a value in the range -20 to 20. The default priority is
170 * 0; lower priorities cause more favorable scheduling. A value of 170 * 0; lower priorities cause more favorable scheduling. A value of
171 * 19 or 20 will schedule a process only when nothing at priority <= 171 * 19 or 20 will schedule a process only when nothing at priority <=

cvs diff -r1.34 -r1.35 src/sys/kern/sched_m2.c (expand / switch to unified diff)

--- src/sys/kern/sched_m2.c 2019/11/22 20:07:53 1.34
+++ src/sys/kern/sched_m2.c 2019/12/01 13:20:42 1.35
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: sched_m2.c,v 1.34 2019/11/22 20:07:53 ad Exp $ */ 1/* $NetBSD: sched_m2.c,v 1.35 2019/12/01 13:20:42 ad Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org> 4 * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org>
5 * All rights reserved. 5 * All rights reserved.
6 *  6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -23,27 +23,27 @@ @@ -23,27 +23,27 @@
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE. 26 * SUCH DAMAGE.
27 */ 27 */
28 28
29/* 29/*
30 * TODO: 30 * TODO:
31 * - Implementation of fair share queue; 31 * - Implementation of fair share queue;
32 * - Support for NUMA; 32 * - Support for NUMA;
33 */ 33 */
34 34
35#include <sys/cdefs.h> 35#include <sys/cdefs.h>
36__KERNEL_RCSID(0, "$NetBSD: sched_m2.c,v 1.34 2019/11/22 20:07:53 ad Exp $"); 36__KERNEL_RCSID(0, "$NetBSD: sched_m2.c,v 1.35 2019/12/01 13:20:42 ad Exp $");
37 37
38#include <sys/param.h> 38#include <sys/param.h>
39 39
40#include <sys/cpu.h> 40#include <sys/cpu.h>
41#include <sys/callout.h> 41#include <sys/callout.h>
42#include <sys/errno.h> 42#include <sys/errno.h>
43#include <sys/kernel.h> 43#include <sys/kernel.h>
44#include <sys/kmem.h> 44#include <sys/kmem.h>
45#include <sys/lwp.h> 45#include <sys/lwp.h>
46#include <sys/mutex.h> 46#include <sys/mutex.h>
47#include <sys/pool.h> 47#include <sys/pool.h>
48#include <sys/proc.h> 48#include <sys/proc.h>
49#include <sys/pset.h> 49#include <sys/pset.h>
@@ -320,27 +320,27 @@ sched_tick(struct cpu_info *ci) @@ -320,27 +320,27 @@ sched_tick(struct cpu_info *ci)
320 const int n = uimax((p->p_nice - NZERO) >> 2, 1); 320 const int n = uimax((p->p_nice - NZERO) >> 2, 1);
321 l->l_priority = imax(l->l_priority - n, 0); 321 l->l_priority = imax(l->l_priority - n, 0);
322 } else 322 } else
323 l->l_priority--; 323 l->l_priority--;
324 break; 324 break;
325 } 325 }
326 326
327 /* 327 /*
328 * If there are higher priority threads or threads in the same queue, 328 * If there are higher priority threads or threads in the same queue,
329 * mark that thread should yield, otherwise, continue running. 329 * mark that thread should yield, otherwise, continue running.
330 */ 330 */
331 if (lwp_eprio(l) <= spc->spc_maxpriority || l->l_target_cpu) { 331 if (lwp_eprio(l) <= spc->spc_maxpriority || l->l_target_cpu) {
332 spc->spc_flags |= SPCF_SHOULDYIELD; 332 spc->spc_flags |= SPCF_SHOULDYIELD;
333 cpu_need_resched(ci, l, RESCHED_UPREEMPT); 333 atomic_or_uint(&ci->ci_want_resched, RESCHED_UPREEMPT);
334 } else 334 } else
335 spc->spc_ticks = l->l_sched.timeslice;  335 spc->spc_ticks = l->l_sched.timeslice;
336 lwp_unlock(l); 336 lwp_unlock(l);
337} 337}
338 338
339/* 339/*
340 * Sysctl nodes and initialization. 340 * Sysctl nodes and initialization.
341 */ 341 */
342 342
343static int 343static int
344sysctl_sched_rtts(SYSCTLFN_ARGS) 344sysctl_sched_rtts(SYSCTLFN_ARGS)
345{ 345{
346 struct sysctlnode node; 346 struct sysctlnode node;