| @@ -1,14 +1,14 @@ | | | @@ -1,14 +1,14 @@ |
1 | /* $NetBSD: kern_synch.c,v 1.344 2020/03/14 20:23:51 ad Exp $ */ | | 1 | /* $NetBSD: kern_synch.c,v 1.345 2020/03/26 19:42:39 ad Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009, 2019, 2020 | | 4 | * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009, 2019, 2020 |
5 | * The NetBSD Foundation, Inc. | | 5 | * The NetBSD Foundation, Inc. |
6 | * All rights reserved. | | 6 | * All rights reserved. |
7 | * | | 7 | * |
8 | * This code is derived from software contributed to The NetBSD Foundation | | 8 | * This code is derived from software contributed to The NetBSD Foundation |
9 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, | | 9 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, |
10 | * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and | | 10 | * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and |
11 | * Daniel Sieger. | | 11 | * Daniel Sieger. |
12 | * | | 12 | * |
13 | * Redistribution and use in source and binary forms, with or without | | 13 | * Redistribution and use in source and binary forms, with or without |
14 | * modification, are permitted provided that the following conditions | | 14 | * modification, are permitted provided that the following conditions |
| @@ -59,27 +59,27 @@ | | | @@ -59,27 +59,27 @@ |
59 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | | 59 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
60 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | | 60 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
61 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | | 61 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
62 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 62 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
63 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | | 63 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
64 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 64 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
65 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 65 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
66 | * SUCH DAMAGE. | | 66 | * SUCH DAMAGE. |
67 | * | | 67 | * |
68 | * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 | | 68 | * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 |
69 | */ | | 69 | */ |
70 | | | 70 | |
71 | #include <sys/cdefs.h> | | 71 | #include <sys/cdefs.h> |
72 | __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.344 2020/03/14 20:23:51 ad Exp $"); | | 72 | __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.345 2020/03/26 19:42:39 ad Exp $"); |
73 | | | 73 | |
74 | #include "opt_kstack.h" | | 74 | #include "opt_kstack.h" |
75 | #include "opt_dtrace.h" | | 75 | #include "opt_dtrace.h" |
76 | | | 76 | |
77 | #define __MUTEX_PRIVATE | | 77 | #define __MUTEX_PRIVATE |
78 | | | 78 | |
79 | #include <sys/param.h> | | 79 | #include <sys/param.h> |
80 | #include <sys/systm.h> | | 80 | #include <sys/systm.h> |
81 | #include <sys/proc.h> | | 81 | #include <sys/proc.h> |
82 | #include <sys/kernel.h> | | 82 | #include <sys/kernel.h> |
83 | #include <sys/cpu.h> | | 83 | #include <sys/cpu.h> |
84 | #include <sys/pserialize.h> | | 84 | #include <sys/pserialize.h> |
85 | #include <sys/resourcevar.h> | | 85 | #include <sys/resourcevar.h> |
| @@ -377,31 +377,27 @@ kpreempt(uintptr_t where) | | | @@ -377,31 +377,27 @@ kpreempt(uintptr_t where) |
377 | l = curlwp; | | 377 | l = curlwp; |
378 | failed = 0; | | 378 | failed = 0; |
379 | while ((dop = l->l_dopreempt) != 0) { | | 379 | while ((dop = l->l_dopreempt) != 0) { |
380 | if (l->l_stat != LSONPROC) { | | 380 | if (l->l_stat != LSONPROC) { |
381 | /* | | 381 | /* |
382 | * About to block (or die), let it happen. | | 382 | * About to block (or die), let it happen. |
383 | * Doesn't really count as "preemption has | | 383 | * Doesn't really count as "preemption has |
384 | * been blocked", since we're going to | | 384 | * been blocked", since we're going to |
385 | * context switch. | | 385 | * context switch. |
386 | */ | | 386 | */ |
387 | atomic_swap_uint(&l->l_dopreempt, 0); | | 387 | atomic_swap_uint(&l->l_dopreempt, 0); |
388 | return true; | | 388 | return true; |
389 | } | | 389 | } |
390 | if (__predict_false((l->l_flag & LW_IDLE) != 0)) { | | 390 | KASSERT((l->l_flag & LW_IDLE) == 0); |
391 | /* Can't preempt idle loop, don't count as failure. */ | | | |
392 | atomic_swap_uint(&l->l_dopreempt, 0); | | | |
393 | return true; | | | |
394 | } | | | |
395 | if (__predict_false(l->l_nopreempt != 0)) { | | 391 | if (__predict_false(l->l_nopreempt != 0)) { |
396 | /* LWP holds preemption disabled, explicitly. */ | | 392 | /* LWP holds preemption disabled, explicitly. */ |
397 | if ((dop & DOPREEMPT_COUNTED) == 0) { | | 393 | if ((dop & DOPREEMPT_COUNTED) == 0) { |
398 | kpreempt_ev_crit.ev_count++; | | 394 | kpreempt_ev_crit.ev_count++; |
399 | } | | 395 | } |
400 | failed = (uintptr_t)&kpreempt_is_disabled; | | 396 | failed = (uintptr_t)&kpreempt_is_disabled; |
401 | break; | | 397 | break; |
402 | } | | 398 | } |
403 | if (__predict_false((l->l_pflag & LP_INTR) != 0)) { | | 399 | if (__predict_false((l->l_pflag & LP_INTR) != 0)) { |
404 | /* Can't preempt soft interrupts yet. */ | | 400 | /* Can't preempt soft interrupts yet. */ |
405 | atomic_swap_uint(&l->l_dopreempt, 0); | | 401 | atomic_swap_uint(&l->l_dopreempt, 0); |
406 | failed = (uintptr_t)&is_softint_lwp; | | 402 | failed = (uintptr_t)&is_softint_lwp; |
407 | break; | | 403 | break; |
| @@ -537,32 +533,30 @@ nextlwp(struct cpu_info *ci, struct sche | | | @@ -537,32 +533,30 @@ nextlwp(struct cpu_info *ci, struct sche |
537 | */ | | 533 | */ |
538 | newl = sched_nextlwp(); | | 534 | newl = sched_nextlwp(); |
539 | if (newl != NULL) { | | 535 | if (newl != NULL) { |
540 | sched_dequeue(newl); | | 536 | sched_dequeue(newl); |
541 | KASSERT(lwp_locked(newl, spc->spc_mutex)); | | 537 | KASSERT(lwp_locked(newl, spc->spc_mutex)); |
542 | KASSERT(newl->l_cpu == ci); | | 538 | KASSERT(newl->l_cpu == ci); |
543 | newl->l_stat = LSONPROC; | | 539 | newl->l_stat = LSONPROC; |
544 | newl->l_pflag |= LP_RUNNING; | | 540 | newl->l_pflag |= LP_RUNNING; |
545 | spc->spc_curpriority = lwp_eprio(newl); | | 541 | spc->spc_curpriority = lwp_eprio(newl); |
546 | spc->spc_flags &= ~(SPCF_SWITCHCLEAR | SPCF_IDLE); | | 542 | spc->spc_flags &= ~(SPCF_SWITCHCLEAR | SPCF_IDLE); |
547 | lwp_setlock(newl, spc->spc_lwplock); | | 543 | lwp_setlock(newl, spc->spc_lwplock); |
548 | } else { | | 544 | } else { |
549 | /* | | 545 | /* |
550 | * Updates to newl here are unlocked, but newl is the idle | | 546 | * The idle LWP does not get set to LSONPROC, because |
551 | * LWP and thus sheltered from outside interference, so no | | 547 | * otherwise it screws up the output from top(1) etc. |
552 | * harm is going to come of it. | | | |
553 | */ | | 548 | */ |
554 | newl = ci->ci_data.cpu_idlelwp; | | 549 | newl = ci->ci_data.cpu_idlelwp; |
555 | newl->l_stat = LSONPROC; | | | |
556 | newl->l_pflag |= LP_RUNNING; | | 550 | newl->l_pflag |= LP_RUNNING; |
557 | spc->spc_curpriority = PRI_IDLE; | | 551 | spc->spc_curpriority = PRI_IDLE; |
558 | spc->spc_flags = (spc->spc_flags & ~SPCF_SWITCHCLEAR) | | | 552 | spc->spc_flags = (spc->spc_flags & ~SPCF_SWITCHCLEAR) | |
559 | SPCF_IDLE; | | 553 | SPCF_IDLE; |
560 | } | | 554 | } |
561 | | | 555 | |
562 | /* | | 556 | /* |
563 | * Only clear want_resched if there are no pending (slow) software | | 557 | * Only clear want_resched if there are no pending (slow) software |
564 | * interrupts. We can do this without an atomic, because no new | | 558 | * interrupts. We can do this without an atomic, because no new |
565 | * LWPs can appear in the queue due to our hold on spc_mutex, and | | 559 | * LWPs can appear in the queue due to our hold on spc_mutex, and |
566 | * the update to ci_want_resched will become globally visible before | | 560 | * the update to ci_want_resched will become globally visible before |
567 | * the release of spc_mutex becomes globally visible. | | 561 | * the release of spc_mutex becomes globally visible. |
568 | */ | | 562 | */ |
| @@ -830,27 +824,27 @@ mi_switch(lwp_t *l) | | | @@ -830,27 +824,27 @@ mi_switch(lwp_t *l) |
830 | */ | | 824 | */ |
831 | KASSERT(l->l_cpu == ci); | | 825 | KASSERT(l->l_cpu == ci); |
832 | KASSERT(ci->ci_mtx_count == -1); | | 826 | KASSERT(ci->ci_mtx_count == -1); |
833 | ci->ci_mtx_count = 0; | | 827 | ci->ci_mtx_count = 0; |
834 | splx(oldspl); | | 828 | splx(oldspl); |
835 | } else { | | 829 | } else { |
836 | /* Nothing to do - just unlock and return. */ | | 830 | /* Nothing to do - just unlock and return. */ |
837 | mutex_spin_exit(spc->spc_mutex); | | 831 | mutex_spin_exit(spc->spc_mutex); |
838 | l->l_pflag &= ~LP_PREEMPTING; | | 832 | l->l_pflag &= ~LP_PREEMPTING; |
839 | lwp_unlock(l); | | 833 | lwp_unlock(l); |
840 | } | | 834 | } |
841 | | | 835 | |
842 | KASSERT(l == curlwp); | | 836 | KASSERT(l == curlwp); |
843 | KASSERT(l->l_stat == LSONPROC); | | 837 | KASSERT(l->l_stat == LSONPROC || (l->l_flag & LW_IDLE) != 0); |
844 | | | 838 | |
845 | SYSCALL_TIME_WAKEUP(l); | | 839 | SYSCALL_TIME_WAKEUP(l); |
846 | LOCKDEBUG_BARRIER(NULL, 1); | | 840 | LOCKDEBUG_BARRIER(NULL, 1); |
847 | } | | 841 | } |
848 | | | 842 | |
849 | /* | | 843 | /* |
850 | * setrunnable: change LWP state to be runnable, placing it on the run queue. | | 844 | * setrunnable: change LWP state to be runnable, placing it on the run queue. |
851 | * | | 845 | * |
852 | * Call with the process and LWP locked. Will return with the LWP unlocked. | | 846 | * Call with the process and LWP locked. Will return with the LWP unlocked. |
853 | */ | | 847 | */ |
854 | void | | 848 | void |
855 | setrunnable(struct lwp *l) | | 849 | setrunnable(struct lwp *l) |
856 | { | | 850 | { |