| @@ -1,14 +1,14 @@ | | | @@ -1,14 +1,14 @@ |
1 | /* $NetBSD: kern_lwp.c,v 1.221 2020/01/26 19:06:24 ad Exp $ */ | | 1 | /* $NetBSD: kern_lwp.c,v 1.222 2020/01/27 21:58:16 ad Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2019, 2020 | | 4 | * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2019, 2020 |
5 | * The NetBSD Foundation, Inc. | | 5 | * The NetBSD Foundation, Inc. |
6 | * All rights reserved. | | 6 | * All rights reserved. |
7 | * | | 7 | * |
8 | * This code is derived from software contributed to The NetBSD Foundation | | 8 | * This code is derived from software contributed to The NetBSD Foundation |
9 | * by Nathan J. Williams, and Andrew Doran. | | 9 | * by Nathan J. Williams, and Andrew Doran. |
10 | * | | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | | 11 | * Redistribution and use in source and binary forms, with or without |
12 | * modification, are permitted provided that the following conditions | | 12 | * modification, are permitted provided that the following conditions |
13 | * are met: | | 13 | * are met: |
14 | * 1. Redistributions of source code must retain the above copyright | | 14 | * 1. Redistributions of source code must retain the above copyright |
| @@ -201,27 +201,27 @@ | | | @@ -201,27 +201,27 @@ |
201 | * (But not always for kernel threads. There are some special cases | | 201 | * (But not always for kernel threads. There are some special cases |
202 | * as mentioned above: soft interrupts, and the idle loops.) | | 202 | * as mentioned above: soft interrupts, and the idle loops.) |
203 | * | | 203 | * |
204 | * Note that an LWP is considered running or likely to run soon if in | | 204 | * Note that an LWP is considered running or likely to run soon if in |
205 | * one of the following states. This affects the value of p_nrlwps: | | 205 | * one of the following states. This affects the value of p_nrlwps: |
206 | * | | 206 | * |
207 | * LSRUN, LSONPROC, LSSLEEP | | 207 | * LSRUN, LSONPROC, LSSLEEP |
208 | * | | 208 | * |
209 | * p_lock does not need to be held when transitioning among these | | 209 | * p_lock does not need to be held when transitioning among these |
210 | * three states, hence p_lock is rarely taken for state transitions. | | 210 | * three states, hence p_lock is rarely taken for state transitions. |
211 | */ | | 211 | */ |
212 | | | 212 | |
213 | #include <sys/cdefs.h> | | 213 | #include <sys/cdefs.h> |
214 | __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.221 2020/01/26 19:06:24 ad Exp $"); | | 214 | __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.222 2020/01/27 21:58:16 ad Exp $"); |
215 | | | 215 | |
216 | #include "opt_ddb.h" | | 216 | #include "opt_ddb.h" |
217 | #include "opt_lockdebug.h" | | 217 | #include "opt_lockdebug.h" |
218 | #include "opt_dtrace.h" | | 218 | #include "opt_dtrace.h" |
219 | | | 219 | |
220 | #define _LWP_API_PRIVATE | | 220 | #define _LWP_API_PRIVATE |
221 | | | 221 | |
222 | #include <sys/param.h> | | 222 | #include <sys/param.h> |
223 | #include <sys/systm.h> | | 223 | #include <sys/systm.h> |
224 | #include <sys/cpu.h> | | 224 | #include <sys/cpu.h> |
225 | #include <sys/pool.h> | | 225 | #include <sys/pool.h> |
226 | #include <sys/proc.h> | | 226 | #include <sys/proc.h> |
227 | #include <sys/syscallargs.h> | | 227 | #include <sys/syscallargs.h> |
| @@ -643,38 +643,34 @@ lwp_wait(struct lwp *l, lwpid_t lid, lwp | | | @@ -643,38 +643,34 @@ lwp_wait(struct lwp *l, lwpid_t lid, lwp |
643 | if (error != 0) | | 643 | if (error != 0) |
644 | break; | | 644 | break; |
645 | if (nfound == 0) { | | 645 | if (nfound == 0) { |
646 | error = ESRCH; | | 646 | error = ESRCH; |
647 | break; | | 647 | break; |
648 | } | | 648 | } |
649 | | | 649 | |
650 | /* | | 650 | /* |
651 | * Note: since the lock will be dropped, need to restart on | | 651 | * Note: since the lock will be dropped, need to restart on |
652 | * wakeup to run all LWPs again, e.g. there may be new LWPs. | | 652 | * wakeup to run all LWPs again, e.g. there may be new LWPs. |
653 | */ | | 653 | */ |
654 | if (exiting) { | | 654 | if (exiting) { |
655 | KASSERT(p->p_nlwps > 1); | | 655 | KASSERT(p->p_nlwps > 1); |
656 | cv_wait(&p->p_lwpcv, p->p_lock); | | 656 | error = cv_timedwait(&p->p_lwpcv, p->p_lock, 1); |
657 | error = EAGAIN; | | | |
658 | break; | | 657 | break; |
659 | } | | 658 | } |
660 | | | 659 | |
661 | /* | | 660 | /* |
662 | * If all other LWPs are waiting for exits or suspends | | 661 | * If all other LWPs are waiting for exits or suspends |
663 | * and the supply of zombies and potential zombies is | | 662 | * and the supply of zombies and potential zombies is |
664 | * exhausted, then we are about to deadlock. | | 663 | * exhausted, then we are about to deadlock. |
665 | * | | | |
666 | * If the process is exiting (and this LWP is not the one | | | |
667 | * that is coordinating the exit) then bail out now. | | | |
668 | */ | | 664 | */ |
669 | if ((p->p_sflag & PS_WEXIT) != 0 || | | 665 | if ((p->p_sflag & PS_WEXIT) != 0 || |
670 | p->p_nrlwps + p->p_nzlwps - p->p_ndlwps <= p->p_nlwpwait) { | | 666 | p->p_nrlwps + p->p_nzlwps - p->p_ndlwps <= p->p_nlwpwait) { |
671 | error = EDEADLK; | | 667 | error = EDEADLK; |
672 | break; | | 668 | break; |
673 | } | | 669 | } |
674 | | | 670 | |
675 | /* | | 671 | /* |
676 | * Sit around and wait for something to happen. We'll be | | 672 | * Sit around and wait for something to happen. We'll be |
677 | * awoken if any of the conditions examined change: if an | | 673 | * awoken if any of the conditions examined change: if an |
678 | * LWP exits, is collected, or is detached. | | 674 | * LWP exits, is collected, or is detached. |
679 | */ | | 675 | */ |
680 | if ((error = cv_wait_sig(&p->p_lwpcv, p->p_lock)) != 0) | | 676 | if ((error = cv_wait_sig(&p->p_lwpcv, p->p_lock)) != 0) |
| @@ -829,27 +825,27 @@ lwp_create(lwp_t *l1, proc_t *p2, vaddr_ | | | @@ -829,27 +825,27 @@ lwp_create(lwp_t *l1, proc_t *p2, vaddr_ |
829 | l2->l_class = sclass; | | 825 | l2->l_class = sclass; |
830 | | | 826 | |
831 | /* | | 827 | /* |
832 | * If vfork(), we want the LWP to run fast and on the same CPU | | 828 | * If vfork(), we want the LWP to run fast and on the same CPU |
833 | * as its parent, so that it can reuse the VM context and cache | | 829 | * as its parent, so that it can reuse the VM context and cache |
834 | * footprint on the local CPU. | | 830 | * footprint on the local CPU. |
835 | */ | | 831 | */ |
836 | l2->l_kpriority = ((flags & LWP_VFORK) ? true : false); | | 832 | l2->l_kpriority = ((flags & LWP_VFORK) ? true : false); |
837 | l2->l_kpribase = PRI_KERNEL; | | 833 | l2->l_kpribase = PRI_KERNEL; |
838 | l2->l_priority = l1->l_priority; | | 834 | l2->l_priority = l1->l_priority; |
839 | l2->l_inheritedprio = -1; | | 835 | l2->l_inheritedprio = -1; |
840 | l2->l_protectprio = -1; | | 836 | l2->l_protectprio = -1; |
841 | l2->l_auxprio = -1; | | 837 | l2->l_auxprio = -1; |
842 | l2->l_flag = (l1->l_flag & (LW_WEXIT | LW_WREBOOT | LW_WCORE)); | | 838 | l2->l_flag = 0; |
843 | l2->l_pflag = LP_MPSAFE; | | 839 | l2->l_pflag = LP_MPSAFE; |
844 | TAILQ_INIT(&l2->l_ld_locks); | | 840 | TAILQ_INIT(&l2->l_ld_locks); |
845 | l2->l_psrefs = 0; | | 841 | l2->l_psrefs = 0; |
846 | kmsan_lwp_alloc(l2); | | 842 | kmsan_lwp_alloc(l2); |
847 | | | 843 | |
848 | /* | | 844 | /* |
849 | * For vfork, borrow parent's lwpctl context if it exists. | | 845 | * For vfork, borrow parent's lwpctl context if it exists. |
850 | * This also causes us to return via lwp_userret. | | 846 | * This also causes us to return via lwp_userret. |
851 | */ | | 847 | */ |
852 | if (flags & LWP_VFORK && l1->l_lwpctl) { | | 848 | if (flags & LWP_VFORK && l1->l_lwpctl) { |
853 | l2->l_lwpctl = l1->l_lwpctl; | | 849 | l2->l_lwpctl = l1->l_lwpctl; |
854 | l2->l_flag |= LW_LWPCTL; | | 850 | l2->l_flag |= LW_LWPCTL; |
855 | } | | 851 | } |
| @@ -911,26 +907,31 @@ lwp_create(lwp_t *l1, proc_t *p2, vaddr_ | | | @@ -911,26 +907,31 @@ lwp_create(lwp_t *l1, proc_t *p2, vaddr_ |
911 | p2->p_nlwpid = lid + 1; | | 907 | p2->p_nlwpid = lid + 1; |
912 | } else { | | 908 | } else { |
913 | lid = 0; | | 909 | lid = 0; |
914 | } | | 910 | } |
915 | | | 911 | |
916 | mutex_enter(p2->p_lock); | | 912 | mutex_enter(p2->p_lock); |
917 | | | 913 | |
918 | if ((flags & LWP_DETACHED) != 0) { | | 914 | if ((flags & LWP_DETACHED) != 0) { |
919 | l2->l_prflag = LPR_DETACHED; | | 915 | l2->l_prflag = LPR_DETACHED; |
920 | p2->p_ndlwps++; | | 916 | p2->p_ndlwps++; |
921 | } else | | 917 | } else |
922 | l2->l_prflag = 0; | | 918 | l2->l_prflag = 0; |
923 | | | 919 | |
| | | 920 | if (l1->l_proc == p2) |
| | | 921 | l2->l_flag |= (l1->l_flag & (LW_WEXIT | LW_WREBOOT | LW_WCORE)); |
| | | 922 | else |
| | | 923 | l2->l_flag |= (l1->l_flag & LW_WREBOOT); |
| | | 924 | |
924 | l2->l_sigstk = *sigstk; | | 925 | l2->l_sigstk = *sigstk; |
925 | l2->l_sigmask = *sigmask; | | 926 | l2->l_sigmask = *sigmask; |
926 | TAILQ_INIT(&l2->l_sigpend.sp_info); | | 927 | TAILQ_INIT(&l2->l_sigpend.sp_info); |
927 | sigemptyset(&l2->l_sigpend.sp_set); | | 928 | sigemptyset(&l2->l_sigpend.sp_set); |
928 | | | 929 | |
929 | if (__predict_true(lid == 0)) { | | 930 | if (__predict_true(lid == 0)) { |
930 | /* | | 931 | /* |
931 | * XXX: l_lid are expected to be unique (for a process) | | 932 | * XXX: l_lid are expected to be unique (for a process) |
932 | * if LWP_PIDLID is sometimes set this won't be true. | | 933 | * if LWP_PIDLID is sometimes set this won't be true. |
933 | * Once 2^31 threads have been allocated we have to | | 934 | * Once 2^31 threads have been allocated we have to |
934 | * scan to ensure we allocate a unique value. | | 935 | * scan to ensure we allocate a unique value. |
935 | */ | | 936 | */ |
936 | lid = ++p2->p_nlwpid; | | 937 | lid = ++p2->p_nlwpid; |