| @@ -1,1033 +1,1033 @@ | | | @@ -1,1033 +1,1033 @@ |
1 | /* $NetBSD: kern_exit.c,v 1.214.4.2.2.1 2015/11/07 20:42:59 snj Exp $ */ | | 1 | /* $NetBSD: kern_exit.c,v 1.214.4.2.2.2 2015/11/07 20:49:19 snj Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 1998, 1999, 2006, 2007, 2008 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 1998, 1999, 2006, 2007, 2008 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, | | 8 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, |
9 | * NASA Ames Research Center, and by Andrew Doran. | | 9 | * NASA Ames Research Center, and by Andrew Doran. |
10 | * | | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | | 11 | * Redistribution and use in source and binary forms, with or without |
12 | * modification, are permitted provided that the following conditions | | 12 | * modification, are permitted provided that the following conditions |
13 | * are met: | | 13 | * are met: |
14 | * 1. Redistributions of source code must retain the above copyright | | 14 | * 1. Redistributions of source code must retain the above copyright |
15 | * notice, this list of conditions and the following disclaimer. | | 15 | * notice, this list of conditions and the following disclaimer. |
16 | * 2. Redistributions in binary form must reproduce the above copyright | | 16 | * 2. Redistributions in binary form must reproduce the above copyright |
17 | * notice, this list of conditions and the following disclaimer in the | | 17 | * notice, this list of conditions and the following disclaimer in the |
18 | * documentation and/or other materials provided with the distribution. | | 18 | * documentation and/or other materials provided with the distribution. |
19 | * | | 19 | * |
20 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 20 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
21 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 21 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
22 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 22 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
23 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 23 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
24 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 24 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
30 | * POSSIBILITY OF SUCH DAMAGE. | | 30 | * POSSIBILITY OF SUCH DAMAGE. |
31 | */ | | 31 | */ |
32 | | | 32 | |
33 | /* | | 33 | /* |
34 | * Copyright (c) 1982, 1986, 1989, 1991, 1993 | | 34 | * Copyright (c) 1982, 1986, 1989, 1991, 1993 |
35 | * The Regents of the University of California. All rights reserved. | | 35 | * The Regents of the University of California. All rights reserved. |
36 | * (c) UNIX System Laboratories, Inc. | | 36 | * (c) UNIX System Laboratories, Inc. |
37 | * All or some portions of this file are derived from material licensed | | 37 | * All or some portions of this file are derived from material licensed |
38 | * to the University of California by American Telephone and Telegraph | | 38 | * to the University of California by American Telephone and Telegraph |
39 | * Co. or Unix System Laboratories, Inc. and are reproduced herein with | | 39 | * Co. or Unix System Laboratories, Inc. and are reproduced herein with |
40 | * the permission of UNIX System Laboratories, Inc. | | 40 | * the permission of UNIX System Laboratories, Inc. |
41 | * | | 41 | * |
42 | * Redistribution and use in source and binary forms, with or without | | 42 | * Redistribution and use in source and binary forms, with or without |
43 | * modification, are permitted provided that the following conditions | | 43 | * modification, are permitted provided that the following conditions |
44 | * are met: | | 44 | * are met: |
45 | * 1. Redistributions of source code must retain the above copyright | | 45 | * 1. Redistributions of source code must retain the above copyright |
46 | * notice, this list of conditions and the following disclaimer. | | 46 | * notice, this list of conditions and the following disclaimer. |
47 | * 2. Redistributions in binary form must reproduce the above copyright | | 47 | * 2. Redistributions in binary form must reproduce the above copyright |
48 | * notice, this list of conditions and the following disclaimer in the | | 48 | * notice, this list of conditions and the following disclaimer in the |
49 | * documentation and/or other materials provided with the distribution. | | 49 | * documentation and/or other materials provided with the distribution. |
50 | * 3. Neither the name of the University nor the names of its contributors | | 50 | * 3. Neither the name of the University nor the names of its contributors |
51 | * may be used to endorse or promote products derived from this software | | 51 | * may be used to endorse or promote products derived from this software |
52 | * without specific prior written permission. | | 52 | * without specific prior written permission. |
53 | * | | 53 | * |
54 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | | 54 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
55 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 55 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
56 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | | 56 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
57 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | | 57 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
58 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | | 58 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
59 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | | 59 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
60 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 60 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
61 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | | 61 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
62 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 62 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
63 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 63 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
64 | * SUCH DAMAGE. | | 64 | * SUCH DAMAGE. |
65 | * | | 65 | * |
66 | * @(#)kern_exit.c 8.10 (Berkeley) 2/23/95 | | 66 | * @(#)kern_exit.c 8.10 (Berkeley) 2/23/95 |
67 | */ | | 67 | */ |
68 | | | 68 | |
69 | #include <sys/cdefs.h> | | 69 | #include <sys/cdefs.h> |
70 | __KERNEL_RCSID(0, "$NetBSD: kern_exit.c,v 1.214.4.2.2.1 2015/11/07 20:42:59 snj Exp $"); | | 70 | __KERNEL_RCSID(0, "$NetBSD: kern_exit.c,v 1.214.4.2.2.2 2015/11/07 20:49:19 snj Exp $"); |
71 | | | 71 | |
72 | #include "opt_ktrace.h" | | 72 | #include "opt_ktrace.h" |
73 | #include "opt_perfctrs.h" | | 73 | #include "opt_perfctrs.h" |
74 | #include "opt_sa.h" | | 74 | #include "opt_sa.h" |
75 | #include "opt_sysv.h" | | 75 | #include "opt_sysv.h" |
76 | | | 76 | |
77 | #include <sys/param.h> | | 77 | #include <sys/param.h> |
78 | #include <sys/aio.h> | | 78 | #include <sys/aio.h> |
79 | #include <sys/systm.h> | | 79 | #include <sys/systm.h> |
80 | #include <sys/ioctl.h> | | 80 | #include <sys/ioctl.h> |
81 | #include <sys/tty.h> | | 81 | #include <sys/tty.h> |
82 | #include <sys/time.h> | | 82 | #include <sys/time.h> |
83 | #include <sys/resource.h> | | 83 | #include <sys/resource.h> |
84 | #include <sys/kernel.h> | | 84 | #include <sys/kernel.h> |
85 | #include <sys/proc.h> | | 85 | #include <sys/proc.h> |
86 | #include <sys/buf.h> | | 86 | #include <sys/buf.h> |
87 | #include <sys/wait.h> | | 87 | #include <sys/wait.h> |
88 | #include <sys/file.h> | | 88 | #include <sys/file.h> |
89 | #include <sys/vnode.h> | | 89 | #include <sys/vnode.h> |
90 | #include <sys/syslog.h> | | 90 | #include <sys/syslog.h> |
91 | #include <sys/malloc.h> | | 91 | #include <sys/malloc.h> |
92 | #include <sys/pool.h> | | 92 | #include <sys/pool.h> |
93 | #include <sys/uidinfo.h> | | 93 | #include <sys/uidinfo.h> |
94 | #if defined(PERFCTRS) | | 94 | #if defined(PERFCTRS) |
95 | #include <sys/pmc.h> | | 95 | #include <sys/pmc.h> |
96 | #endif | | 96 | #endif |
97 | #include <sys/ptrace.h> | | 97 | #include <sys/ptrace.h> |
98 | #include <sys/acct.h> | | 98 | #include <sys/acct.h> |
99 | #include <sys/filedesc.h> | | 99 | #include <sys/filedesc.h> |
100 | #include <sys/ras.h> | | 100 | #include <sys/ras.h> |
101 | #include <sys/signalvar.h> | | 101 | #include <sys/signalvar.h> |
102 | #include <sys/sched.h> | | 102 | #include <sys/sched.h> |
103 | #include <sys/sa.h> | | 103 | #include <sys/sa.h> |
104 | #include <sys/savar.h> | | 104 | #include <sys/savar.h> |
105 | #include <sys/mount.h> | | 105 | #include <sys/mount.h> |
106 | #include <sys/syscallargs.h> | | 106 | #include <sys/syscallargs.h> |
107 | #include <sys/kauth.h> | | 107 | #include <sys/kauth.h> |
108 | #include <sys/sleepq.h> | | 108 | #include <sys/sleepq.h> |
109 | #include <sys/lockdebug.h> | | 109 | #include <sys/lockdebug.h> |
110 | #include <sys/ktrace.h> | | 110 | #include <sys/ktrace.h> |
111 | #include <sys/cpu.h> | | 111 | #include <sys/cpu.h> |
112 | #include <sys/lwpctl.h> | | 112 | #include <sys/lwpctl.h> |
113 | #include <sys/atomic.h> | | 113 | #include <sys/atomic.h> |
114 | | | 114 | |
115 | #include <uvm/uvm_extern.h> | | 115 | #include <uvm/uvm_extern.h> |
116 | | | 116 | |
117 | #define DEBUG_EXIT | | 117 | #define DEBUG_EXIT |
118 | | | 118 | |
119 | #ifdef DEBUG_EXIT | | 119 | #ifdef DEBUG_EXIT |
120 | int debug_exit = 0; | | 120 | int debug_exit = 0; |
121 | #define DPRINTF(x) if (debug_exit) printf x | | 121 | #define DPRINTF(x) if (debug_exit) printf x |
122 | #else | | 122 | #else |
123 | #define DPRINTF(x) | | 123 | #define DPRINTF(x) |
124 | #endif | | 124 | #endif |
125 | | | 125 | |
126 | static int find_stopped_child(struct proc *, pid_t, int, struct proc **, int *); | | 126 | static int find_stopped_child(struct proc *, pid_t, int, struct proc **, int *); |
127 | static void proc_free(struct proc *, struct rusage *); | | 127 | static void proc_free(struct proc *, struct rusage *); |
128 | | | 128 | |
129 | /* | | 129 | /* |
130 | * Fill in the appropriate signal information, and signal the parent. | | 130 | * Fill in the appropriate signal information, and signal the parent. |
131 | */ | | 131 | */ |
132 | static void | | 132 | static void |
133 | exit_psignal(struct proc *p, struct proc *pp, ksiginfo_t *ksi) | | 133 | exit_psignal(struct proc *p, struct proc *pp, ksiginfo_t *ksi) |
134 | { | | 134 | { |
135 | | | 135 | |
136 | KSI_INIT(ksi); | | 136 | KSI_INIT(ksi); |
137 | if ((ksi->ksi_signo = P_EXITSIG(p)) == SIGCHLD) { | | 137 | if ((ksi->ksi_signo = P_EXITSIG(p)) == SIGCHLD) { |
138 | if (WIFSIGNALED(p->p_xstat)) { | | 138 | if (WIFSIGNALED(p->p_xstat)) { |
139 | if (WCOREDUMP(p->p_xstat)) | | 139 | if (WCOREDUMP(p->p_xstat)) |
140 | ksi->ksi_code = CLD_DUMPED; | | 140 | ksi->ksi_code = CLD_DUMPED; |
141 | else | | 141 | else |
142 | ksi->ksi_code = CLD_KILLED; | | 142 | ksi->ksi_code = CLD_KILLED; |
143 | } else { | | 143 | } else { |
144 | ksi->ksi_code = CLD_EXITED; | | 144 | ksi->ksi_code = CLD_EXITED; |
145 | } | | 145 | } |
146 | } | | 146 | } |
147 | /* | | 147 | /* |
148 | * We fill those in, even for non-SIGCHLD. | | 148 | * We fill those in, even for non-SIGCHLD. |
149 | * It's safe to access p->p_cred unlocked here. | | 149 | * It's safe to access p->p_cred unlocked here. |
150 | */ | | 150 | */ |
151 | ksi->ksi_pid = p->p_pid; | | 151 | ksi->ksi_pid = p->p_pid; |
152 | ksi->ksi_uid = kauth_cred_geteuid(p->p_cred); | | 152 | ksi->ksi_uid = kauth_cred_geteuid(p->p_cred); |
153 | ksi->ksi_status = p->p_xstat; | | 153 | ksi->ksi_status = p->p_xstat; |
154 | /* XXX: is this still valid? */ | | 154 | /* XXX: is this still valid? */ |
155 | ksi->ksi_utime = p->p_stats->p_ru.ru_utime.tv_sec; | | 155 | ksi->ksi_utime = p->p_stats->p_ru.ru_utime.tv_sec; |
156 | ksi->ksi_stime = p->p_stats->p_ru.ru_stime.tv_sec; | | 156 | ksi->ksi_stime = p->p_stats->p_ru.ru_stime.tv_sec; |
157 | } | | 157 | } |
158 | | | 158 | |
159 | /* | | 159 | /* |
160 | * exit -- | | 160 | * exit -- |
161 | * Death of process. | | 161 | * Death of process. |
162 | */ | | 162 | */ |
163 | int | | 163 | int |
164 | sys_exit(struct lwp *l, const struct sys_exit_args *uap, register_t *retval) | | 164 | sys_exit(struct lwp *l, const struct sys_exit_args *uap, register_t *retval) |
165 | { | | 165 | { |
166 | /* { | | 166 | /* { |
167 | syscallarg(int) rval; | | 167 | syscallarg(int) rval; |
168 | } */ | | 168 | } */ |
169 | struct proc *p = l->l_proc; | | 169 | struct proc *p = l->l_proc; |
170 | | | 170 | |
171 | /* Don't call exit1() multiple times in the same process. */ | | 171 | /* Don't call exit1() multiple times in the same process. */ |
172 | mutex_enter(p->p_lock); | | 172 | mutex_enter(p->p_lock); |
173 | if (p->p_sflag & PS_WEXIT) { | | 173 | if (p->p_sflag & PS_WEXIT) { |
174 | mutex_exit(p->p_lock); | | 174 | mutex_exit(p->p_lock); |
175 | lwp_exit(l); | | 175 | lwp_exit(l); |
176 | } | | 176 | } |
177 | | | 177 | |
178 | /* exit1() will release the mutex. */ | | 178 | /* exit1() will release the mutex. */ |
179 | exit1(l, W_EXITCODE(SCARG(uap, rval), 0)); | | 179 | exit1(l, W_EXITCODE(SCARG(uap, rval), 0)); |
180 | /* NOTREACHED */ | | 180 | /* NOTREACHED */ |
181 | return (0); | | 181 | return (0); |
182 | } | | 182 | } |
183 | | | 183 | |
184 | /* | | 184 | /* |
185 | * Exit: deallocate address space and other resources, change proc state | | 185 | * Exit: deallocate address space and other resources, change proc state |
186 | * to zombie, and unlink proc from allproc and parent's lists. Save exit | | 186 | * to zombie, and unlink proc from allproc and parent's lists. Save exit |
187 | * status and rusage for wait(). Check for child processes and orphan them. | | 187 | * status and rusage for wait(). Check for child processes and orphan them. |
188 | * | | 188 | * |
189 | * Must be called with p->p_lock held. Does not return. | | 189 | * Must be called with p->p_lock held. Does not return. |
190 | */ | | 190 | */ |
191 | void | | 191 | void |
192 | exit1(struct lwp *l, int rv) | | 192 | exit1(struct lwp *l, int rv) |
193 | { | | 193 | { |
194 | struct proc *p, *q, *nq; | | 194 | struct proc *p, *q, *nq; |
195 | struct pgrp *pgrp; | | 195 | struct pgrp *pgrp; |
196 | ksiginfo_t ksi; | | 196 | ksiginfo_t ksi; |
197 | ksiginfoq_t kq; | | 197 | ksiginfoq_t kq; |
198 | int wakeinit, sa; | | 198 | int wakeinit, sa; |
199 | | | 199 | |
200 | p = l->l_proc; | | 200 | p = l->l_proc; |
201 | | | 201 | |
202 | KASSERT(mutex_owned(p->p_lock)); | | 202 | KASSERT(mutex_owned(p->p_lock)); |
203 | | | 203 | |
204 | if (__predict_false(p == initproc)) | | 204 | if (__predict_false(p == initproc)) |
205 | panic("init died (signal %d, exit %d)", | | 205 | panic("init died (signal %d, exit %d)", |
206 | WTERMSIG(rv), WEXITSTATUS(rv)); | | 206 | WTERMSIG(rv), WEXITSTATUS(rv)); |
207 | | | 207 | |
208 | /* | | 208 | /* |
209 | * Disable scheduler activation upcalls. We're trying to get out of | | 209 | * Disable scheduler activation upcalls. We're trying to get out of |
210 | * here. | | 210 | * here. |
211 | */ | | 211 | */ |
212 | sa = 0; | | 212 | sa = 0; |
213 | #ifdef KERN_SA | | 213 | #ifdef KERN_SA |
214 | if ((p->p_sa != NULL)) { | | 214 | if ((p->p_sa != NULL)) { |
215 | l->l_pflag |= LP_SA_NOBLOCK; | | 215 | l->l_pflag |= LP_SA_NOBLOCK; |
216 | sa = 1; | | 216 | sa = 1; |
217 | } | | 217 | } |
218 | #endif | | 218 | #endif |
219 | | | 219 | |
220 | p->p_sflag |= PS_WEXIT; | | 220 | p->p_sflag |= PS_WEXIT; |
221 | | | 221 | |
222 | /* | | 222 | /* |
223 | * Force all other LWPs to exit before we do. Only then can we | | 223 | * Force all other LWPs to exit before we do. Only then can we |
224 | * begin to tear down the rest of the process state. | | 224 | * begin to tear down the rest of the process state. |
225 | */ | | 225 | */ |
226 | if (sa || p->p_nlwps > 1) | | 226 | if (sa || p->p_nlwps > 1) |
227 | exit_lwps(l); | | 227 | exit_lwps(l); |
228 | | | 228 | |
229 | ksiginfo_queue_init(&kq); | | 229 | ksiginfo_queue_init(&kq); |
230 | | | 230 | |
231 | /* | | 231 | /* |
232 | * If we have been asked to stop on exit, do so now. | | 232 | * If we have been asked to stop on exit, do so now. |
233 | */ | | 233 | */ |
234 | if (__predict_false(p->p_sflag & PS_STOPEXIT)) { | | 234 | if (__predict_false(p->p_sflag & PS_STOPEXIT)) { |
235 | KERNEL_UNLOCK_ALL(l, &l->l_biglocks); | | 235 | KERNEL_UNLOCK_ALL(l, &l->l_biglocks); |
236 | sigclearall(p, &contsigmask, &kq); | | 236 | sigclearall(p, &contsigmask, &kq); |
237 | | | 237 | |
238 | if (!mutex_tryenter(proc_lock)) { | | 238 | if (!mutex_tryenter(proc_lock)) { |
239 | mutex_exit(p->p_lock); | | 239 | mutex_exit(p->p_lock); |
240 | mutex_enter(proc_lock); | | 240 | mutex_enter(proc_lock); |
241 | mutex_enter(p->p_lock); | | 241 | mutex_enter(p->p_lock); |
242 | } | | 242 | } |
243 | p->p_waited = 0; | | 243 | p->p_waited = 0; |
244 | p->p_pptr->p_nstopchild++; | | 244 | p->p_pptr->p_nstopchild++; |
245 | mutex_exit(proc_lock); | | | |
246 | p->p_stat = SSTOP; | | 245 | p->p_stat = SSTOP; |
| | | 246 | mutex_exit(proc_lock); |
247 | lwp_lock(l); | | 247 | lwp_lock(l); |
248 | p->p_nrlwps--; | | 248 | p->p_nrlwps--; |
249 | l->l_stat = LSSTOP; | | 249 | l->l_stat = LSSTOP; |
250 | mutex_exit(p->p_lock); | | 250 | mutex_exit(p->p_lock); |
251 | mi_switch(l); | | 251 | mi_switch(l); |
252 | KERNEL_LOCK(l->l_biglocks, l); | | 252 | KERNEL_LOCK(l->l_biglocks, l); |
253 | mutex_enter(p->p_lock); | | 253 | mutex_enter(p->p_lock); |
254 | } | | 254 | } |
255 | | | 255 | |
256 | /* | | 256 | /* |
257 | * Bin any remaining signals and mark the process as dying so it will | | 257 | * Bin any remaining signals and mark the process as dying so it will |
258 | * not be found for, e.g. signals. | | 258 | * not be found for, e.g. signals. |
259 | */ | | 259 | */ |
260 | sigfillset(&p->p_sigctx.ps_sigignore); | | 260 | sigfillset(&p->p_sigctx.ps_sigignore); |
261 | sigclearall(p, NULL, &kq); | | 261 | sigclearall(p, NULL, &kq); |
262 | p->p_stat = SDYING; | | 262 | p->p_stat = SDYING; |
263 | mutex_exit(p->p_lock); | | 263 | mutex_exit(p->p_lock); |
264 | ksiginfo_queue_drain(&kq); | | 264 | ksiginfo_queue_drain(&kq); |
265 | | | 265 | |
266 | /* Destroy any lwpctl info. */ | | 266 | /* Destroy any lwpctl info. */ |
267 | if (p->p_lwpctl != NULL) | | 267 | if (p->p_lwpctl != NULL) |
268 | lwp_ctl_exit(); | | 268 | lwp_ctl_exit(); |
269 | | | 269 | |
270 | /* Destroy all AIO works */ | | 270 | /* Destroy all AIO works */ |
271 | aio_exit(p, p->p_aio); | | 271 | aio_exit(p, p->p_aio); |
272 | | | 272 | |
273 | /* | | 273 | /* |
274 | * Drain all remaining references that procfs, ptrace and others may | | 274 | * Drain all remaining references that procfs, ptrace and others may |
275 | * have on the process. | | 275 | * have on the process. |
276 | */ | | 276 | */ |
277 | rw_enter(&p->p_reflock, RW_WRITER); | | 277 | rw_enter(&p->p_reflock, RW_WRITER); |
278 | | | 278 | |
279 | DPRINTF(("exit1: %d.%d exiting.\n", p->p_pid, l->l_lid)); | | 279 | DPRINTF(("exit1: %d.%d exiting.\n", p->p_pid, l->l_lid)); |
280 | | | 280 | |
281 | timers_free(p, TIMERS_ALL); | | 281 | timers_free(p, TIMERS_ALL); |
282 | #if defined(__HAVE_RAS) | | 282 | #if defined(__HAVE_RAS) |
283 | ras_purgeall(); | | 283 | ras_purgeall(); |
284 | #endif | | 284 | #endif |
285 | | | 285 | |
286 | /* | | 286 | /* |
287 | * Close open files, release open-file table and free signal | | 287 | * Close open files, release open-file table and free signal |
288 | * actions. This may block! | | 288 | * actions. This may block! |
289 | */ | | 289 | */ |
290 | fd_free(); | | 290 | fd_free(); |
291 | cwdfree(p->p_cwdi); | | 291 | cwdfree(p->p_cwdi); |
292 | p->p_cwdi = NULL; | | 292 | p->p_cwdi = NULL; |
293 | doexithooks(p); | | 293 | doexithooks(p); |
294 | sigactsfree(p->p_sigacts); | | 294 | sigactsfree(p->p_sigacts); |
295 | | | 295 | |
296 | /* | | 296 | /* |
297 | * Write out accounting data. | | 297 | * Write out accounting data. |
298 | */ | | 298 | */ |
299 | (void)acct_process(l); | | 299 | (void)acct_process(l); |
300 | | | 300 | |
301 | #ifdef KTRACE | | 301 | #ifdef KTRACE |
302 | /* | | 302 | /* |
303 | * Release trace file. | | 303 | * Release trace file. |
304 | */ | | 304 | */ |
305 | if (p->p_tracep != NULL) { | | 305 | if (p->p_tracep != NULL) { |
306 | mutex_enter(&ktrace_lock); | | 306 | mutex_enter(&ktrace_lock); |
307 | ktrderef(p); | | 307 | ktrderef(p); |
308 | mutex_exit(&ktrace_lock); | | 308 | mutex_exit(&ktrace_lock); |
309 | } | | 309 | } |
310 | #endif | | 310 | #endif |
311 | | | 311 | |
312 | /* | | 312 | /* |
313 | * If emulation has process exit hook, call it now. | | 313 | * If emulation has process exit hook, call it now. |
314 | * Set the exit status now so that the exit hook has | | 314 | * Set the exit status now so that the exit hook has |
315 | * an opportunity to tweak it (COMPAT_LINUX requires | | 315 | * an opportunity to tweak it (COMPAT_LINUX requires |
316 | * this for thread group emulation) | | 316 | * this for thread group emulation) |
317 | */ | | 317 | */ |
318 | p->p_xstat = rv; | | 318 | p->p_xstat = rv; |
319 | if (p->p_emul->e_proc_exit) | | 319 | if (p->p_emul->e_proc_exit) |
320 | (*p->p_emul->e_proc_exit)(p); | | 320 | (*p->p_emul->e_proc_exit)(p); |
321 | | | 321 | |
322 | /* | | 322 | /* |
323 | * Free the VM resources we're still holding on to. | | 323 | * Free the VM resources we're still holding on to. |
324 | * We must do this from a valid thread because doing | | 324 | * We must do this from a valid thread because doing |
325 | * so may block. This frees vmspace, which we don't | | 325 | * so may block. This frees vmspace, which we don't |
326 | * need anymore. The only remaining lwp is the one | | 326 | * need anymore. The only remaining lwp is the one |
327 | * we run at this moment, nothing runs in userland | | 327 | * we run at this moment, nothing runs in userland |
328 | * anymore. | | 328 | * anymore. |
329 | */ | | 329 | */ |
330 | uvm_proc_exit(p); | | 330 | uvm_proc_exit(p); |
331 | | | 331 | |
332 | /* | | 332 | /* |
333 | * Stop profiling. | | 333 | * Stop profiling. |
334 | */ | | 334 | */ |
335 | if (__predict_false((p->p_stflag & PST_PROFIL) != 0)) { | | 335 | if (__predict_false((p->p_stflag & PST_PROFIL) != 0)) { |
336 | mutex_spin_enter(&p->p_stmutex); | | 336 | mutex_spin_enter(&p->p_stmutex); |
337 | stopprofclock(p); | | 337 | stopprofclock(p); |
338 | mutex_spin_exit(&p->p_stmutex); | | 338 | mutex_spin_exit(&p->p_stmutex); |
339 | } | | 339 | } |
340 | | | 340 | |
341 | /* | | 341 | /* |
342 | * If parent is waiting for us to exit or exec, PL_PPWAIT is set; we | | 342 | * If parent is waiting for us to exit or exec, PL_PPWAIT is set; we |
343 | * wake up the parent early to avoid deadlock. We can do this once | | 343 | * wake up the parent early to avoid deadlock. We can do this once |
344 | * the VM resources are released. | | 344 | * the VM resources are released. |
345 | */ | | 345 | */ |
346 | mutex_enter(proc_lock); | | 346 | mutex_enter(proc_lock); |
347 | if (p->p_lflag & PL_PPWAIT) { | | 347 | if (p->p_lflag & PL_PPWAIT) { |
348 | p->p_lflag &= ~PL_PPWAIT; | | 348 | p->p_lflag &= ~PL_PPWAIT; |
349 | cv_broadcast(&p->p_pptr->p_waitcv); | | 349 | cv_broadcast(&p->p_pptr->p_waitcv); |
350 | } | | 350 | } |
351 | | | 351 | |
352 | if (SESS_LEADER(p)) { | | 352 | if (SESS_LEADER(p)) { |
353 | struct vnode *vprele = NULL, *vprevoke = NULL; | | 353 | struct vnode *vprele = NULL, *vprevoke = NULL; |
354 | struct session *sp = p->p_session; | | 354 | struct session *sp = p->p_session; |
355 | struct tty *tp; | | 355 | struct tty *tp; |
356 | | | 356 | |
357 | if (sp->s_ttyvp) { | | 357 | if (sp->s_ttyvp) { |
358 | /* | | 358 | /* |
359 | * Controlling process. | | 359 | * Controlling process. |
360 | * Signal foreground pgrp, | | 360 | * Signal foreground pgrp, |
361 | * drain controlling terminal | | 361 | * drain controlling terminal |
362 | * and revoke access to controlling terminal. | | 362 | * and revoke access to controlling terminal. |
363 | */ | | 363 | */ |
364 | tp = sp->s_ttyp; | | 364 | tp = sp->s_ttyp; |
365 | mutex_spin_enter(&tty_lock); | | 365 | mutex_spin_enter(&tty_lock); |
366 | if (tp->t_session == sp) { | | 366 | if (tp->t_session == sp) { |
367 | /* we can't guarantee the revoke will do this */ | | 367 | /* we can't guarantee the revoke will do this */ |
368 | pgrp = tp->t_pgrp; | | 368 | pgrp = tp->t_pgrp; |
369 | tp->t_pgrp = NULL; | | 369 | tp->t_pgrp = NULL; |
370 | tp->t_session = NULL; | | 370 | tp->t_session = NULL; |
371 | mutex_spin_exit(&tty_lock); | | 371 | mutex_spin_exit(&tty_lock); |
372 | if (pgrp != NULL) { | | 372 | if (pgrp != NULL) { |
373 | pgsignal(pgrp, SIGHUP, 1); | | 373 | pgsignal(pgrp, SIGHUP, 1); |
374 | } | | 374 | } |
375 | mutex_exit(proc_lock); | | 375 | mutex_exit(proc_lock); |
376 | (void) ttywait(tp); | | 376 | (void) ttywait(tp); |
377 | mutex_enter(proc_lock); | | 377 | mutex_enter(proc_lock); |
378 | | | 378 | |
379 | /* The tty could have been revoked. */ | | 379 | /* The tty could have been revoked. */ |
380 | vprevoke = sp->s_ttyvp; | | 380 | vprevoke = sp->s_ttyvp; |
381 | } else | | 381 | } else |
382 | mutex_spin_exit(&tty_lock); | | 382 | mutex_spin_exit(&tty_lock); |
383 | vprele = sp->s_ttyvp; | | 383 | vprele = sp->s_ttyvp; |
384 | sp->s_ttyvp = NULL; | | 384 | sp->s_ttyvp = NULL; |
385 | /* | | 385 | /* |
386 | * s_ttyp is not zero'd; we use this to indicate | | 386 | * s_ttyp is not zero'd; we use this to indicate |
387 | * that the session once had a controlling terminal. | | 387 | * that the session once had a controlling terminal. |
388 | * (for logging and informational purposes) | | 388 | * (for logging and informational purposes) |
389 | */ | | 389 | */ |
390 | } | | 390 | } |
391 | sp->s_leader = NULL; | | 391 | sp->s_leader = NULL; |
392 | | | 392 | |
393 | if (vprevoke != NULL || vprele != NULL) { | | 393 | if (vprevoke != NULL || vprele != NULL) { |
394 | if (vprevoke != NULL) { | | 394 | if (vprevoke != NULL) { |
395 | SESSRELE(sp); | | 395 | SESSRELE(sp); |
396 | mutex_exit(proc_lock); | | 396 | mutex_exit(proc_lock); |
397 | VOP_REVOKE(vprevoke, REVOKEALL); | | 397 | VOP_REVOKE(vprevoke, REVOKEALL); |
398 | } else | | 398 | } else |
399 | mutex_exit(proc_lock); | | 399 | mutex_exit(proc_lock); |
400 | if (vprele != NULL) | | 400 | if (vprele != NULL) |
401 | vrele(vprele); | | 401 | vrele(vprele); |
402 | mutex_enter(proc_lock); | | 402 | mutex_enter(proc_lock); |
403 | } | | 403 | } |
404 | } | | 404 | } |
405 | fixjobc(p, p->p_pgrp, 0); | | 405 | fixjobc(p, p->p_pgrp, 0); |
406 | | | 406 | |
407 | /* | | 407 | /* |
408 | * Finalize the last LWP's specificdata, as well as the | | 408 | * Finalize the last LWP's specificdata, as well as the |
409 | * specificdata for the proc itself. | | 409 | * specificdata for the proc itself. |
410 | */ | | 410 | */ |
411 | lwp_finispecific(l); | | 411 | lwp_finispecific(l); |
412 | proc_finispecific(p); | | 412 | proc_finispecific(p); |
413 | | | 413 | |
414 | /* | | 414 | /* |
415 | * Notify interested parties of our demise. | | 415 | * Notify interested parties of our demise. |
416 | */ | | 416 | */ |
417 | KNOTE(&p->p_klist, NOTE_EXIT); | | 417 | KNOTE(&p->p_klist, NOTE_EXIT); |
418 | | | 418 | |
419 | #if PERFCTRS | | 419 | #if PERFCTRS |
420 | /* | | 420 | /* |
421 | * Save final PMC information in parent process & clean up. | | 421 | * Save final PMC information in parent process & clean up. |
422 | */ | | 422 | */ |
423 | if (PMC_ENABLED(p)) { | | 423 | if (PMC_ENABLED(p)) { |
424 | pmc_save_context(p); | | 424 | pmc_save_context(p); |
425 | pmc_accumulate(p->p_pptr, p); | | 425 | pmc_accumulate(p->p_pptr, p); |
426 | pmc_process_exit(p); | | 426 | pmc_process_exit(p); |
427 | } | | 427 | } |
428 | #endif | | 428 | #endif |
429 | | | 429 | |
430 | /* | | 430 | /* |
431 | * Reset p_opptr pointer of all former children which got | | 431 | * Reset p_opptr pointer of all former children which got |
432 | * traced by another process and were reparented. We reset | | 432 | * traced by another process and were reparented. We reset |
433 | * it to NULL here; the trace detach code then reparents | | 433 | * it to NULL here; the trace detach code then reparents |
434 | * the child to initproc. We only check allproc list, since | | 434 | * the child to initproc. We only check allproc list, since |
435 | * eventual former children on zombproc list won't reference | | 435 | * eventual former children on zombproc list won't reference |
436 | * p_opptr anymore. | | 436 | * p_opptr anymore. |
437 | */ | | 437 | */ |
438 | if (__predict_false(p->p_slflag & PSL_CHTRACED)) { | | 438 | if (__predict_false(p->p_slflag & PSL_CHTRACED)) { |
439 | PROCLIST_FOREACH(q, &allproc) { | | 439 | PROCLIST_FOREACH(q, &allproc) { |
440 | if ((q->p_flag & PK_MARKER) != 0) | | 440 | if ((q->p_flag & PK_MARKER) != 0) |
441 | continue; | | 441 | continue; |
442 | if (q->p_opptr == p) | | 442 | if (q->p_opptr == p) |
443 | q->p_opptr = NULL; | | 443 | q->p_opptr = NULL; |
444 | } | | 444 | } |
445 | } | | 445 | } |
446 | | | 446 | |
447 | /* | | 447 | /* |
448 | * Give orphaned children to init(8). | | 448 | * Give orphaned children to init(8). |
449 | */ | | 449 | */ |
450 | q = LIST_FIRST(&p->p_children); | | 450 | q = LIST_FIRST(&p->p_children); |
451 | wakeinit = (q != NULL); | | 451 | wakeinit = (q != NULL); |
452 | for (; q != NULL; q = nq) { | | 452 | for (; q != NULL; q = nq) { |
453 | nq = LIST_NEXT(q, p_sibling); | | 453 | nq = LIST_NEXT(q, p_sibling); |
454 | | | 454 | |
455 | /* | | 455 | /* |
456 | * Traced processes are killed since their existence | | 456 | * Traced processes are killed since their existence |
457 | * means someone is screwing up. Since we reset the | | 457 | * means someone is screwing up. Since we reset the |
458 | * trace flags, the logic in sys_wait4() would not be | | 458 | * trace flags, the logic in sys_wait4() would not be |
459 | * triggered to reparent the process to its | | 459 | * triggered to reparent the process to its |
460 | * original parent, so we must do this here. | | 460 | * original parent, so we must do this here. |
461 | */ | | 461 | */ |
462 | if (__predict_false(q->p_slflag & PSL_TRACED)) { | | 462 | if (__predict_false(q->p_slflag & PSL_TRACED)) { |
463 | mutex_enter(p->p_lock); | | 463 | mutex_enter(p->p_lock); |
464 | q->p_slflag &= ~(PSL_TRACED|PSL_FSTRACE|PSL_SYSCALL); | | 464 | q->p_slflag &= ~(PSL_TRACED|PSL_FSTRACE|PSL_SYSCALL); |
465 | mutex_exit(p->p_lock); | | 465 | mutex_exit(p->p_lock); |
466 | if (q->p_opptr != q->p_pptr) { | | 466 | if (q->p_opptr != q->p_pptr) { |
467 | struct proc *t = q->p_opptr; | | 467 | struct proc *t = q->p_opptr; |
468 | proc_reparent(q, t ? t : initproc); | | 468 | proc_reparent(q, t ? t : initproc); |
469 | q->p_opptr = NULL; | | 469 | q->p_opptr = NULL; |
470 | } else | | 470 | } else |
471 | proc_reparent(q, initproc); | | 471 | proc_reparent(q, initproc); |
472 | killproc(q, "orphaned traced process"); | | 472 | killproc(q, "orphaned traced process"); |
473 | } else | | 473 | } else |
474 | proc_reparent(q, initproc); | | 474 | proc_reparent(q, initproc); |
475 | } | | 475 | } |
476 | | | 476 | |
477 | /* | | 477 | /* |
478 | * Move proc from allproc to zombproc, it's now nearly ready to be | | 478 | * Move proc from allproc to zombproc, it's now nearly ready to be |
479 | * collected by parent. | | 479 | * collected by parent. |
480 | */ | | 480 | */ |
481 | LIST_REMOVE(l, l_list); | | 481 | LIST_REMOVE(l, l_list); |
482 | LIST_REMOVE(p, p_list); | | 482 | LIST_REMOVE(p, p_list); |
483 | LIST_INSERT_HEAD(&zombproc, p, p_list); | | 483 | LIST_INSERT_HEAD(&zombproc, p, p_list); |
484 | | | 484 | |
485 | /* | | 485 | /* |
486 | * Mark the process as dead. We must do this before we signal | | 486 | * Mark the process as dead. We must do this before we signal |
487 | * the parent. | | 487 | * the parent. |
488 | */ | | 488 | */ |
489 | p->p_stat = SDEAD; | | 489 | p->p_stat = SDEAD; |
490 | | | 490 | |
491 | /* Put in front of parent's sibling list for parent to collect it */ | | 491 | /* Put in front of parent's sibling list for parent to collect it */ |
492 | q = p->p_pptr; | | 492 | q = p->p_pptr; |
493 | q->p_nstopchild++; | | 493 | q->p_nstopchild++; |
494 | if (LIST_FIRST(&q->p_children) != p) { | | 494 | if (LIST_FIRST(&q->p_children) != p) { |
495 | /* Put child where it can be found quickly */ | | 495 | /* Put child where it can be found quickly */ |
496 | LIST_REMOVE(p, p_sibling); | | 496 | LIST_REMOVE(p, p_sibling); |
497 | LIST_INSERT_HEAD(&q->p_children, p, p_sibling); | | 497 | LIST_INSERT_HEAD(&q->p_children, p, p_sibling); |
498 | } | | 498 | } |
499 | | | 499 | |
500 | /* | | 500 | /* |
501 | * Notify parent that we're gone. If parent has the P_NOCLDWAIT | | 501 | * Notify parent that we're gone. If parent has the P_NOCLDWAIT |
502 | * flag set, notify init instead (and hope it will handle | | 502 | * flag set, notify init instead (and hope it will handle |
503 | * this situation). | | 503 | * this situation). |
504 | */ | | 504 | */ |
505 | if (q->p_flag & (PK_NOCLDWAIT|PK_CLDSIGIGN)) { | | 505 | if (q->p_flag & (PK_NOCLDWAIT|PK_CLDSIGIGN)) { |
506 | proc_reparent(p, initproc); | | 506 | proc_reparent(p, initproc); |
507 | wakeinit = 1; | | 507 | wakeinit = 1; |
508 | | | 508 | |
509 | /* | | 509 | /* |
510 | * If this was the last child of our parent, notify | | 510 | * If this was the last child of our parent, notify |
511 | * parent, so in case he was wait(2)ing, he will | | 511 | * parent, so in case he was wait(2)ing, he will |
512 | * continue. | | 512 | * continue. |
513 | */ | | 513 | */ |
514 | if (LIST_FIRST(&q->p_children) == NULL) | | 514 | if (LIST_FIRST(&q->p_children) == NULL) |
515 | cv_broadcast(&q->p_waitcv); | | 515 | cv_broadcast(&q->p_waitcv); |
516 | } | | 516 | } |
517 | | | 517 | |
518 | /* Reload parent pointer, since p may have been reparented above */ | | 518 | /* Reload parent pointer, since p may have been reparented above */ |
519 | q = p->p_pptr; | | 519 | q = p->p_pptr; |
520 | | | 520 | |
521 | if (__predict_false((p->p_slflag & PSL_FSTRACE) == 0 && p->p_exitsig != 0)) { | | 521 | if (__predict_false((p->p_slflag & PSL_FSTRACE) == 0 && p->p_exitsig != 0)) { |
522 | exit_psignal(p, q, &ksi); | | 522 | exit_psignal(p, q, &ksi); |
523 | kpsignal(q, &ksi, NULL); | | 523 | kpsignal(q, &ksi, NULL); |
524 | } | | 524 | } |
525 | | | 525 | |
526 | /* Calculate the final rusage info. */ | | 526 | /* Calculate the final rusage info. */ |
527 | calcru(p, &p->p_stats->p_ru.ru_utime, &p->p_stats->p_ru.ru_stime, | | 527 | calcru(p, &p->p_stats->p_ru.ru_utime, &p->p_stats->p_ru.ru_stime, |
528 | NULL, NULL); | | 528 | NULL, NULL); |
529 | | | 529 | |
530 | if (wakeinit) | | 530 | if (wakeinit) |
531 | cv_broadcast(&initproc->p_waitcv); | | 531 | cv_broadcast(&initproc->p_waitcv); |
532 | | | 532 | |
533 | callout_destroy(&l->l_timeout_ch); | | 533 | callout_destroy(&l->l_timeout_ch); |
534 | | | 534 | |
535 | /* | | 535 | /* |
536 | * Remaining lwp resources will be freed in lwp_exit2() once we've | | 536 | * Remaining lwp resources will be freed in lwp_exit2() once we've |
537 | * switch to idle context; at that point, we will be marked as a | | 537 | * switch to idle context; at that point, we will be marked as a |
538 | * full blown zombie. | | 538 | * full blown zombie. |
539 | */ | | 539 | */ |
540 | mutex_enter(p->p_lock); | | 540 | mutex_enter(p->p_lock); |
541 | lwp_drainrefs(l); | | 541 | lwp_drainrefs(l); |
542 | lwp_lock(l); | | 542 | lwp_lock(l); |
543 | l->l_prflag &= ~LPR_DETACHED; | | 543 | l->l_prflag &= ~LPR_DETACHED; |
544 | l->l_stat = LSZOMB; | | 544 | l->l_stat = LSZOMB; |
545 | lwp_unlock(l); | | 545 | lwp_unlock(l); |
546 | KASSERT(curlwp == l); | | 546 | KASSERT(curlwp == l); |
547 | KASSERT(p->p_nrlwps == 1); | | 547 | KASSERT(p->p_nrlwps == 1); |
548 | KASSERT(p->p_nlwps == 1); | | 548 | KASSERT(p->p_nlwps == 1); |
549 | p->p_stat = SZOMB; | | 549 | p->p_stat = SZOMB; |
550 | p->p_nrlwps--; | | 550 | p->p_nrlwps--; |
551 | p->p_nzlwps++; | | 551 | p->p_nzlwps++; |
552 | p->p_ndlwps = 0; | | 552 | p->p_ndlwps = 0; |
553 | mutex_exit(p->p_lock); | | 553 | mutex_exit(p->p_lock); |
554 | | | 554 | |
555 | /* | | 555 | /* |
556 | * Signal the parent to collect us, and drop the proclist lock. | | 556 | * Signal the parent to collect us, and drop the proclist lock. |
557 | * Drop debugger/procfs lock; no new references can be gained. | | 557 | * Drop debugger/procfs lock; no new references can be gained. |
558 | */ | | 558 | */ |
559 | cv_broadcast(&p->p_pptr->p_waitcv); | | 559 | cv_broadcast(&p->p_pptr->p_waitcv); |
560 | rw_exit(&p->p_reflock); | | 560 | rw_exit(&p->p_reflock); |
561 | mutex_exit(proc_lock); | | 561 | mutex_exit(proc_lock); |
562 | | | 562 | |
563 | /* Verify that we hold no locks other than the kernel lock. */ | | 563 | /* Verify that we hold no locks other than the kernel lock. */ |
564 | LOCKDEBUG_BARRIER(&kernel_lock, 0); | | 564 | LOCKDEBUG_BARRIER(&kernel_lock, 0); |
565 | | | 565 | |
566 | /* | | 566 | /* |
567 | * NOTE: WE ARE NO LONGER ALLOWED TO SLEEP! | | 567 | * NOTE: WE ARE NO LONGER ALLOWED TO SLEEP! |
568 | */ | | 568 | */ |
569 | | | 569 | |
570 | /* | | 570 | /* |
571 | * Give machine-dependent code a chance to free any MD LWP | | 571 | * Give machine-dependent code a chance to free any MD LWP |
572 | * resources. This must be done before uvm_lwp_exit(), in | | 572 | * resources. This must be done before uvm_lwp_exit(), in |
573 | * case these resources are in the PCB. | | 573 | * case these resources are in the PCB. |
574 | */ | | 574 | */ |
575 | #ifndef __NO_CPU_LWP_FREE | | 575 | #ifndef __NO_CPU_LWP_FREE |
576 | cpu_lwp_free(l, 1); | | 576 | cpu_lwp_free(l, 1); |
577 | #endif | | 577 | #endif |
578 | pmap_deactivate(l); | | 578 | pmap_deactivate(l); |
579 | | | 579 | |
580 | /* This process no longer needs to hold the kernel lock. */ | | 580 | /* This process no longer needs to hold the kernel lock. */ |
581 | #ifdef notyet | | 581 | #ifdef notyet |
582 | /* XXXSMP hold in lwp_userret() */ | | 582 | /* XXXSMP hold in lwp_userret() */ |
583 | KERNEL_UNLOCK_LAST(l); | | 583 | KERNEL_UNLOCK_LAST(l); |
584 | #else | | 584 | #else |
585 | KERNEL_UNLOCK_ALL(l, NULL); | | 585 | KERNEL_UNLOCK_ALL(l, NULL); |
586 | #endif | | 586 | #endif |
587 | | | 587 | |
588 | lwp_exit_switchaway(l); | | 588 | lwp_exit_switchaway(l); |
589 | } | | 589 | } |
590 | | | 590 | |
591 | void | | 591 | void |
592 | exit_lwps(struct lwp *l) | | 592 | exit_lwps(struct lwp *l) |
593 | { | | 593 | { |
594 | struct proc *p; | | 594 | struct proc *p; |
595 | struct lwp *l2; | | 595 | struct lwp *l2; |
596 | int error; | | 596 | int error; |
597 | lwpid_t waited; | | 597 | lwpid_t waited; |
598 | int nlocks; | | 598 | int nlocks; |
599 | | | 599 | |
600 | KERNEL_UNLOCK_ALL(l, &nlocks); | | 600 | KERNEL_UNLOCK_ALL(l, &nlocks); |
601 | | | 601 | |
602 | p = l->l_proc; | | 602 | p = l->l_proc; |
603 | KASSERT(mutex_owned(p->p_lock)); | | 603 | KASSERT(mutex_owned(p->p_lock)); |
604 | | | 604 | |
605 | #ifdef KERN_SA | | 605 | #ifdef KERN_SA |
606 | if (p->p_sa != NULL) { | | 606 | if (p->p_sa != NULL) { |
607 | struct sadata_vp *vp; | | 607 | struct sadata_vp *vp; |
608 | SLIST_FOREACH(vp, &p->p_sa->sa_vps, savp_next) { | | 608 | SLIST_FOREACH(vp, &p->p_sa->sa_vps, savp_next) { |
609 | /* | | 609 | /* |
610 | * Make SA-cached LWPs normal process interruptable | | 610 | * Make SA-cached LWPs normal process interruptable |
611 | * so that the exit code can wake them. Locking | | 611 | * so that the exit code can wake them. Locking |
612 | * savp_mutex locks all the lwps on this vp that | | 612 | * savp_mutex locks all the lwps on this vp that |
613 | * we need to adjust. | | 613 | * we need to adjust. |
614 | */ | | 614 | */ |
615 | mutex_enter(&vp->savp_mutex); | | 615 | mutex_enter(&vp->savp_mutex); |
616 | DPRINTF(("exit_lwps: Making cached LWPs of %d on " | | 616 | DPRINTF(("exit_lwps: Making cached LWPs of %d on " |
617 | "VP %d interruptable: ", p->p_pid, vp->savp_id)); | | 617 | "VP %d interruptable: ", p->p_pid, vp->savp_id)); |
618 | TAILQ_FOREACH(l2, &vp->savp_lwpcache, l_sleepchain) { | | 618 | TAILQ_FOREACH(l2, &vp->savp_lwpcache, l_sleepchain) { |
619 | l2->l_flag |= LW_SINTR; | | 619 | l2->l_flag |= LW_SINTR; |
620 | DPRINTF(("%d ", l2->l_lid)); | | 620 | DPRINTF(("%d ", l2->l_lid)); |
621 | } | | 621 | } |
622 | DPRINTF(("\n")); | | 622 | DPRINTF(("\n")); |
623 | | | 623 | |
624 | DPRINTF(("exit_lwps: Making unblocking LWPs of %d on " | | 624 | DPRINTF(("exit_lwps: Making unblocking LWPs of %d on " |
625 | "VP %d interruptable: ", p->p_pid, vp->savp_id)); | | 625 | "VP %d interruptable: ", p->p_pid, vp->savp_id)); |
626 | TAILQ_FOREACH(l2, &vp->savp_woken, l_sleepchain) { | | 626 | TAILQ_FOREACH(l2, &vp->savp_woken, l_sleepchain) { |
627 | vp->savp_woken_count--; | | 627 | vp->savp_woken_count--; |
628 | l2->l_flag |= LW_SINTR; | | 628 | l2->l_flag |= LW_SINTR; |
629 | DPRINTF(("%d ", l2->l_lid)); | | 629 | DPRINTF(("%d ", l2->l_lid)); |
630 | } | | 630 | } |
631 | DPRINTF(("\n")); | | 631 | DPRINTF(("\n")); |
632 | mutex_exit(&vp->savp_mutex); | | 632 | mutex_exit(&vp->savp_mutex); |
633 | } | | 633 | } |
634 | } | | 634 | } |
635 | #endif | | 635 | #endif |
636 | | | 636 | |
637 | retry: | | 637 | retry: |
638 | /* | | 638 | /* |
639 | * Interrupt LWPs in interruptable sleep, unsuspend suspended | | 639 | * Interrupt LWPs in interruptable sleep, unsuspend suspended |
640 | * LWPs and then wait for everyone else to finish. | | 640 | * LWPs and then wait for everyone else to finish. |
641 | */ | | 641 | */ |
642 | LIST_FOREACH(l2, &p->p_lwps, l_sibling) { | | 642 | LIST_FOREACH(l2, &p->p_lwps, l_sibling) { |
643 | if (l2 == l) | | 643 | if (l2 == l) |
644 | continue; | | 644 | continue; |
645 | lwp_lock(l2); | | 645 | lwp_lock(l2); |
646 | l2->l_flag &= ~LW_SA; | | 646 | l2->l_flag &= ~LW_SA; |
647 | l2->l_flag |= LW_WEXIT; | | 647 | l2->l_flag |= LW_WEXIT; |
648 | if ((l2->l_stat == LSSLEEP && (l2->l_flag & LW_SINTR)) || | | 648 | if ((l2->l_stat == LSSLEEP && (l2->l_flag & LW_SINTR)) || |
649 | l2->l_stat == LSSUSPENDED || l2->l_stat == LSSTOP) { | | 649 | l2->l_stat == LSSUSPENDED || l2->l_stat == LSSTOP) { |
650 | /* setrunnable() will release the lock. */ | | 650 | /* setrunnable() will release the lock. */ |
651 | setrunnable(l2); | | 651 | setrunnable(l2); |
652 | DPRINTF(("exit_lwps: Made %d.%d runnable\n", | | 652 | DPRINTF(("exit_lwps: Made %d.%d runnable\n", |
653 | p->p_pid, l2->l_lid)); | | 653 | p->p_pid, l2->l_lid)); |
654 | continue; | | 654 | continue; |
655 | } | | 655 | } |
656 | lwp_unlock(l2); | | 656 | lwp_unlock(l2); |
657 | } | | 657 | } |
658 | while (p->p_nlwps > 1) { | | 658 | while (p->p_nlwps > 1) { |
659 | DPRINTF(("exit_lwps: waiting for %d LWPs (%d zombies)\n", | | 659 | DPRINTF(("exit_lwps: waiting for %d LWPs (%d zombies)\n", |
660 | p->p_nlwps, p->p_nzlwps)); | | 660 | p->p_nlwps, p->p_nzlwps)); |
661 | error = lwp_wait1(l, 0, &waited, LWPWAIT_EXITCONTROL); | | 661 | error = lwp_wait1(l, 0, &waited, LWPWAIT_EXITCONTROL); |
662 | if (p->p_nlwps == 1) | | 662 | if (p->p_nlwps == 1) |
663 | break; | | 663 | break; |
664 | if (error == EDEADLK) { | | 664 | if (error == EDEADLK) { |
665 | /* | | 665 | /* |
666 | * LWPs can get suspended/slept behind us. | | 666 | * LWPs can get suspended/slept behind us. |
667 | * (eg. sa_setwoken) | | 667 | * (eg. sa_setwoken) |
668 | * kick them again and retry. | | 668 | * kick them again and retry. |
669 | */ | | 669 | */ |
670 | goto retry; | | 670 | goto retry; |
671 | } | | 671 | } |
672 | if (error) | | 672 | if (error) |
673 | panic("exit_lwps: lwp_wait1 failed with error %d", | | 673 | panic("exit_lwps: lwp_wait1 failed with error %d", |
674 | error); | | 674 | error); |
675 | DPRINTF(("exit_lwps: Got LWP %d from lwp_wait1()\n", waited)); | | 675 | DPRINTF(("exit_lwps: Got LWP %d from lwp_wait1()\n", waited)); |
676 | } | | 676 | } |
677 | | | 677 | |
678 | KERNEL_LOCK(nlocks, l); | | 678 | KERNEL_LOCK(nlocks, l); |
679 | KASSERT(p->p_nlwps == 1); | | 679 | KASSERT(p->p_nlwps == 1); |
680 | } | | 680 | } |
681 | | | 681 | |
682 | int | | 682 | int |
683 | do_sys_wait(struct lwp *l, int *pid, int *status, int options, | | 683 | do_sys_wait(struct lwp *l, int *pid, int *status, int options, |
684 | struct rusage *ru, int *was_zombie) | | 684 | struct rusage *ru, int *was_zombie) |
685 | { | | 685 | { |
686 | struct proc *child; | | 686 | struct proc *child; |
687 | int error; | | 687 | int error; |
688 | | | 688 | |
689 | mutex_enter(proc_lock); | | 689 | mutex_enter(proc_lock); |
690 | error = find_stopped_child(l->l_proc, *pid, options, &child, status); | | 690 | error = find_stopped_child(l->l_proc, *pid, options, &child, status); |
691 | | | 691 | |
692 | if (child == NULL) { | | 692 | if (child == NULL) { |
693 | mutex_exit(proc_lock); | | 693 | mutex_exit(proc_lock); |
694 | *pid = 0; | | 694 | *pid = 0; |
695 | return error; | | 695 | return error; |
696 | } | | 696 | } |
697 | | | 697 | |
698 | *pid = child->p_pid; | | 698 | *pid = child->p_pid; |
699 | | | 699 | |
700 | if (child->p_stat == SZOMB) { | | 700 | if (child->p_stat == SZOMB) { |
701 | /* proc_free() will release the proc_lock. */ | | 701 | /* proc_free() will release the proc_lock. */ |
702 | *was_zombie = 1; | | 702 | *was_zombie = 1; |
703 | if (options & WNOWAIT) | | 703 | if (options & WNOWAIT) |
704 | mutex_exit(proc_lock); | | 704 | mutex_exit(proc_lock); |
705 | else { | | 705 | else { |
706 | proc_free(child, ru); | | 706 | proc_free(child, ru); |
707 | } | | 707 | } |
708 | } else { | | 708 | } else { |
709 | /* Child state must have been SSTOP. */ | | 709 | /* Child state must have been SSTOP. */ |
710 | *was_zombie = 0; | | 710 | *was_zombie = 0; |
711 | mutex_exit(proc_lock); | | 711 | mutex_exit(proc_lock); |
712 | *status = W_STOPCODE(*status); | | 712 | *status = W_STOPCODE(*status); |
713 | } | | 713 | } |
714 | | | 714 | |
715 | return 0; | | 715 | return 0; |
716 | } | | 716 | } |
717 | | | 717 | |
718 | int | | 718 | int |
719 | sys_wait4(struct lwp *l, const struct sys_wait4_args *uap, register_t *retval) | | 719 | sys_wait4(struct lwp *l, const struct sys_wait4_args *uap, register_t *retval) |
720 | { | | 720 | { |
721 | /* { | | 721 | /* { |
722 | syscallarg(int) pid; | | 722 | syscallarg(int) pid; |
723 | syscallarg(int *) status; | | 723 | syscallarg(int *) status; |
724 | syscallarg(int) options; | | 724 | syscallarg(int) options; |
725 | syscallarg(struct rusage *) rusage; | | 725 | syscallarg(struct rusage *) rusage; |
726 | } */ | | 726 | } */ |
727 | int status, error; | | 727 | int status, error; |
728 | int was_zombie; | | 728 | int was_zombie; |
729 | struct rusage ru; | | 729 | struct rusage ru; |
730 | int pid = SCARG(uap, pid); | | 730 | int pid = SCARG(uap, pid); |
731 | | | 731 | |
732 | error = do_sys_wait(l, &pid, &status, SCARG(uap, options), | | 732 | error = do_sys_wait(l, &pid, &status, SCARG(uap, options), |
733 | SCARG(uap, rusage) != NULL ? &ru : NULL, &was_zombie); | | 733 | SCARG(uap, rusage) != NULL ? &ru : NULL, &was_zombie); |
734 | | | 734 | |
735 | retval[0] = pid; | | 735 | retval[0] = pid; |
736 | if (pid == 0) | | 736 | if (pid == 0) |
737 | return error; | | 737 | return error; |
738 | | | 738 | |
739 | if (SCARG(uap, rusage)) | | 739 | if (SCARG(uap, rusage)) |
740 | error = copyout(&ru, SCARG(uap, rusage), sizeof(ru)); | | 740 | error = copyout(&ru, SCARG(uap, rusage), sizeof(ru)); |
741 | | | 741 | |
742 | if (error == 0 && SCARG(uap, status)) | | 742 | if (error == 0 && SCARG(uap, status)) |
743 | error = copyout(&status, SCARG(uap, status), sizeof(status)); | | 743 | error = copyout(&status, SCARG(uap, status), sizeof(status)); |
744 | | | 744 | |
745 | return error; | | 745 | return error; |
746 | } | | 746 | } |
747 | | | 747 | |
748 | /* | | 748 | /* |
749 | * Scan list of child processes for a child process that has stopped or | | 749 | * Scan list of child processes for a child process that has stopped or |
750 | * exited. Used by sys_wait4 and 'compat' equivalents. | | 750 | * exited. Used by sys_wait4 and 'compat' equivalents. |
751 | * | | 751 | * |
752 | * Must be called with the proc_lock held, and may release while waiting. | | 752 | * Must be called with the proc_lock held, and may release while waiting. |
753 | */ | | 753 | */ |
754 | static int | | 754 | static int |
755 | find_stopped_child(struct proc *parent, pid_t pid, int options, | | 755 | find_stopped_child(struct proc *parent, pid_t pid, int options, |
756 | struct proc **child_p, int *status_p) | | 756 | struct proc **child_p, int *status_p) |
757 | { | | 757 | { |
758 | struct proc *child, *dead; | | 758 | struct proc *child, *dead; |
759 | int error; | | 759 | int error; |
760 | | | 760 | |
761 | KASSERT(mutex_owned(proc_lock)); | | 761 | KASSERT(mutex_owned(proc_lock)); |
762 | | | 762 | |
763 | if (options & ~(WUNTRACED|WNOHANG|WALTSIG|WALLSIG) | | 763 | if (options & ~(WUNTRACED|WNOHANG|WALTSIG|WALLSIG) |
764 | && !(options & WOPTSCHECKED)) { | | 764 | && !(options & WOPTSCHECKED)) { |
765 | *child_p = NULL; | | 765 | *child_p = NULL; |
766 | return EINVAL; | | 766 | return EINVAL; |
767 | } | | 767 | } |
768 | | | 768 | |
769 | if (pid == 0 && !(options & WOPTSCHECKED)) | | 769 | if (pid == 0 && !(options & WOPTSCHECKED)) |
770 | pid = -parent->p_pgid; | | 770 | pid = -parent->p_pgid; |
771 | | | 771 | |
772 | for (;;) { | | 772 | for (;;) { |
773 | error = ECHILD; | | 773 | error = ECHILD; |
774 | dead = NULL; | | 774 | dead = NULL; |
775 | | | 775 | |
776 | LIST_FOREACH(child, &parent->p_children, p_sibling) { | | 776 | LIST_FOREACH(child, &parent->p_children, p_sibling) { |
777 | if (pid >= 0) { | | 777 | if (pid >= 0) { |
778 | if (child->p_pid != pid) { | | 778 | if (child->p_pid != pid) { |
779 | child = p_find(pid, PFIND_ZOMBIE | | | 779 | child = p_find(pid, PFIND_ZOMBIE | |
780 | PFIND_LOCKED); | | 780 | PFIND_LOCKED); |
781 | if (child == NULL || | | 781 | if (child == NULL || |
782 | child->p_pptr != parent) { | | 782 | child->p_pptr != parent) { |
783 | child = NULL; | | 783 | child = NULL; |
784 | break; | | 784 | break; |
785 | } | | 785 | } |
786 | } | | 786 | } |
787 | } else if (pid != WAIT_ANY && child->p_pgid != -pid) { | | 787 | } else if (pid != WAIT_ANY && child->p_pgid != -pid) { |
788 | /* Child not in correct pgrp */ | | 788 | /* Child not in correct pgrp */ |
789 | continue; | | 789 | continue; |
790 | } | | 790 | } |
791 | | | 791 | |
792 | /* | | 792 | /* |
793 | * Wait for processes with p_exitsig != SIGCHLD | | 793 | * Wait for processes with p_exitsig != SIGCHLD |
794 | * processes only if WALTSIG is set; wait for | | 794 | * processes only if WALTSIG is set; wait for |
795 | * processes with p_exitsig == SIGCHLD only | | 795 | * processes with p_exitsig == SIGCHLD only |
796 | * if WALTSIG is clear. | | 796 | * if WALTSIG is clear. |
797 | */ | | 797 | */ |
798 | if (((options & WALLSIG) == 0) && | | 798 | if (((options & WALLSIG) == 0) && |
799 | (options & WALTSIG ? child->p_exitsig == SIGCHLD | | 799 | (options & WALTSIG ? child->p_exitsig == SIGCHLD |
800 | : P_EXITSIG(child) != SIGCHLD)){ | | 800 | : P_EXITSIG(child) != SIGCHLD)){ |
801 | if (child->p_pid == pid) { | | 801 | if (child->p_pid == pid) { |
802 | child = NULL; | | 802 | child = NULL; |
803 | break; | | 803 | break; |
804 | } | | 804 | } |
805 | continue; | | 805 | continue; |
806 | } | | 806 | } |
807 | | | 807 | |
808 | error = 0; | | 808 | error = 0; |
809 | if ((options & WNOZOMBIE) == 0) { | | 809 | if ((options & WNOZOMBIE) == 0) { |
810 | if (child->p_stat == SZOMB) | | 810 | if (child->p_stat == SZOMB) |
811 | break; | | 811 | break; |
812 | if (child->p_stat == SDEAD) { | | 812 | if (child->p_stat == SDEAD) { |
813 | /* | | 813 | /* |
814 | * We may occasionally arrive here | | 814 | * We may occasionally arrive here |
815 | * after receiving a signal, but | | 815 | * after receiving a signal, but |
816 | * immediatley before the child | | 816 | * immediatley before the child |
817 | * process is zombified. The wait | | 817 | * process is zombified. The wait |
818 | * will be short, so avoid returning | | 818 | * will be short, so avoid returning |
819 | * to userspace. | | 819 | * to userspace. |
820 | */ | | 820 | */ |
821 | dead = child; | | 821 | dead = child; |
822 | } | | 822 | } |
823 | } | | 823 | } |
824 | | | 824 | |
825 | if (child->p_stat == SSTOP && | | 825 | if (child->p_stat == SSTOP && |
826 | child->p_waited == 0 && | | 826 | child->p_waited == 0 && |
827 | (child->p_slflag & PSL_TRACED || | | 827 | (child->p_slflag & PSL_TRACED || |
828 | options & WUNTRACED)) { | | 828 | options & WUNTRACED)) { |
829 | if ((options & WNOWAIT) == 0) { | | 829 | if ((options & WNOWAIT) == 0) { |
830 | child->p_waited = 1; | | 830 | child->p_waited = 1; |
831 | parent->p_nstopchild--; | | 831 | parent->p_nstopchild--; |
832 | } | | 832 | } |
833 | break; | | 833 | break; |
834 | } | | 834 | } |
835 | if (parent->p_nstopchild == 0 || child->p_pid == pid) { | | 835 | if (parent->p_nstopchild == 0 || child->p_pid == pid) { |
836 | child = NULL; | | 836 | child = NULL; |
837 | break; | | 837 | break; |
838 | } | | 838 | } |
839 | } | | 839 | } |
840 | | | 840 | |
841 | if (child != NULL || error != 0 || | | 841 | if (child != NULL || error != 0 || |
842 | ((options & WNOHANG) != 0 && dead == NULL)) { | | 842 | ((options & WNOHANG) != 0 && dead == NULL)) { |
843 | if (child != NULL) { | | 843 | if (child != NULL) { |
844 | *status_p = child->p_xstat; | | 844 | *status_p = child->p_xstat; |
845 | } | | 845 | } |
846 | *child_p = child; | | 846 | *child_p = child; |
847 | return error; | | 847 | return error; |
848 | } | | 848 | } |
849 | | | 849 | |
850 | /* | | 850 | /* |
851 | * Wait for another child process to stop. | | 851 | * Wait for another child process to stop. |
852 | */ | | 852 | */ |
853 | error = cv_wait_sig(&parent->p_waitcv, proc_lock); | | 853 | error = cv_wait_sig(&parent->p_waitcv, proc_lock); |
854 | | | 854 | |
855 | if (error != 0) { | | 855 | if (error != 0) { |
856 | *child_p = NULL; | | 856 | *child_p = NULL; |
857 | return error; | | 857 | return error; |
858 | } | | 858 | } |
859 | } | | 859 | } |
860 | } | | 860 | } |
861 | | | 861 | |
862 | /* | | 862 | /* |
863 | * Free a process after parent has taken all the state info. Must be called | | 863 | * Free a process after parent has taken all the state info. Must be called |
864 | * with the proclist lock held, and will release before returning. | | 864 | * with the proclist lock held, and will release before returning. |
865 | * | | 865 | * |
866 | * *ru is returned to the caller, and must be freed by the caller. | | 866 | * *ru is returned to the caller, and must be freed by the caller. |
867 | */ | | 867 | */ |
868 | static void | | 868 | static void |
869 | proc_free(struct proc *p, struct rusage *ru) | | 869 | proc_free(struct proc *p, struct rusage *ru) |
870 | { | | 870 | { |
871 | struct proc *parent; | | 871 | struct proc *parent; |
872 | struct lwp *l; | | 872 | struct lwp *l; |
873 | ksiginfo_t ksi; | | 873 | ksiginfo_t ksi; |
874 | kauth_cred_t cred1, cred2; | | 874 | kauth_cred_t cred1, cred2; |
875 | uid_t uid; | | 875 | uid_t uid; |
876 | | | 876 | |
877 | KASSERT(mutex_owned(proc_lock)); | | 877 | KASSERT(mutex_owned(proc_lock)); |
878 | KASSERT(p->p_nlwps == 1); | | 878 | KASSERT(p->p_nlwps == 1); |
879 | KASSERT(p->p_nzlwps == 1); | | 879 | KASSERT(p->p_nzlwps == 1); |
880 | KASSERT(p->p_nrlwps == 0); | | 880 | KASSERT(p->p_nrlwps == 0); |
881 | KASSERT(p->p_stat == SZOMB); | | 881 | KASSERT(p->p_stat == SZOMB); |
882 | | | 882 | |
883 | /* | | 883 | /* |
884 | * If we got the child via ptrace(2) or procfs, and | | 884 | * If we got the child via ptrace(2) or procfs, and |
885 | * the parent is different (meaning the process was | | 885 | * the parent is different (meaning the process was |
886 | * attached, rather than run as a child), then we need | | 886 | * attached, rather than run as a child), then we need |
887 | * to give it back to the old parent, and send the | | 887 | * to give it back to the old parent, and send the |
888 | * parent the exit signal. The rest of the cleanup | | 888 | * parent the exit signal. The rest of the cleanup |
889 | * will be done when the old parent waits on the child. | | 889 | * will be done when the old parent waits on the child. |
890 | */ | | 890 | */ |
891 | if ((p->p_slflag & PSL_TRACED) != 0) { | | 891 | if ((p->p_slflag & PSL_TRACED) != 0) { |
892 | parent = p->p_pptr; | | 892 | parent = p->p_pptr; |
893 | if (p->p_opptr != parent){ | | 893 | if (p->p_opptr != parent){ |
894 | mutex_enter(p->p_lock); | | 894 | mutex_enter(p->p_lock); |
895 | p->p_slflag &= ~(PSL_TRACED|PSL_FSTRACE|PSL_SYSCALL); | | 895 | p->p_slflag &= ~(PSL_TRACED|PSL_FSTRACE|PSL_SYSCALL); |
896 | mutex_exit(p->p_lock); | | 896 | mutex_exit(p->p_lock); |
897 | parent = p->p_opptr; | | 897 | parent = p->p_opptr; |
898 | if (parent == NULL) | | 898 | if (parent == NULL) |
899 | parent = initproc; | | 899 | parent = initproc; |
900 | proc_reparent(p, parent); | | 900 | proc_reparent(p, parent); |
901 | p->p_opptr = NULL; | | 901 | p->p_opptr = NULL; |
902 | if (p->p_exitsig != 0) { | | 902 | if (p->p_exitsig != 0) { |
903 | exit_psignal(p, parent, &ksi); | | 903 | exit_psignal(p, parent, &ksi); |
904 | kpsignal(parent, &ksi, NULL); | | 904 | kpsignal(parent, &ksi, NULL); |
905 | } | | 905 | } |
906 | cv_broadcast(&parent->p_waitcv); | | 906 | cv_broadcast(&parent->p_waitcv); |
907 | mutex_exit(proc_lock); | | 907 | mutex_exit(proc_lock); |
908 | return; | | 908 | return; |
909 | } | | 909 | } |
910 | } | | 910 | } |
911 | | | 911 | |
912 | /* | | 912 | /* |
913 | * Finally finished with old proc entry. Unlink it from its process | | 913 | * Finally finished with old proc entry. Unlink it from its process |
914 | * group. | | 914 | * group. |
915 | */ | | 915 | */ |
916 | leavepgrp(p); | | 916 | leavepgrp(p); |
917 | | | 917 | |
918 | parent = p->p_pptr; | | 918 | parent = p->p_pptr; |
919 | sched_proc_exit(parent, p); | | 919 | sched_proc_exit(parent, p); |
920 | | | 920 | |
921 | /* | | 921 | /* |
922 | * Add child times of exiting process onto its own times. | | 922 | * Add child times of exiting process onto its own times. |
923 | * This cannot be done any earlier else it might get done twice. | | 923 | * This cannot be done any earlier else it might get done twice. |
924 | */ | | 924 | */ |
925 | l = LIST_FIRST(&p->p_lwps); | | 925 | l = LIST_FIRST(&p->p_lwps); |
926 | p->p_stats->p_ru.ru_nvcsw += (l->l_ncsw - l->l_nivcsw); | | 926 | p->p_stats->p_ru.ru_nvcsw += (l->l_ncsw - l->l_nivcsw); |
927 | p->p_stats->p_ru.ru_nivcsw += l->l_nivcsw; | | 927 | p->p_stats->p_ru.ru_nivcsw += l->l_nivcsw; |
928 | ruadd(&p->p_stats->p_ru, &l->l_ru); | | 928 | ruadd(&p->p_stats->p_ru, &l->l_ru); |
929 | ruadd(&p->p_stats->p_ru, &p->p_stats->p_cru); | | 929 | ruadd(&p->p_stats->p_ru, &p->p_stats->p_cru); |
930 | ruadd(&parent->p_stats->p_cru, &p->p_stats->p_ru); | | 930 | ruadd(&parent->p_stats->p_cru, &p->p_stats->p_ru); |
931 | if (ru != NULL) | | 931 | if (ru != NULL) |
932 | *ru = p->p_stats->p_ru; | | 932 | *ru = p->p_stats->p_ru; |
933 | p->p_xstat = 0; | | 933 | p->p_xstat = 0; |
934 | | | 934 | |
935 | /* Release any SA state. */ | | 935 | /* Release any SA state. */ |
936 | #ifdef KERN_SA | | 936 | #ifdef KERN_SA |
937 | if (p->p_sa) | | 937 | if (p->p_sa) |
938 | sa_release(p); | | 938 | sa_release(p); |
939 | #endif | | 939 | #endif |
940 | | | 940 | |
941 | /* | | 941 | /* |
942 | * At this point we are going to start freeing the final resources. | | 942 | * At this point we are going to start freeing the final resources. |
943 | * If anyone tries to access the proc structure after here they will | | 943 | * If anyone tries to access the proc structure after here they will |
944 | * get a shock - bits are missing. Attempt to make it hard! We | | 944 | * get a shock - bits are missing. Attempt to make it hard! We |
945 | * don't bother with any further locking past this point. | | 945 | * don't bother with any further locking past this point. |
946 | */ | | 946 | */ |
947 | p->p_stat = SIDL; /* not even a zombie any more */ | | 947 | p->p_stat = SIDL; /* not even a zombie any more */ |
948 | LIST_REMOVE(p, p_list); /* off zombproc */ | | 948 | LIST_REMOVE(p, p_list); /* off zombproc */ |
949 | parent = p->p_pptr; | | 949 | parent = p->p_pptr; |
950 | p->p_pptr->p_nstopchild--; | | 950 | p->p_pptr->p_nstopchild--; |
951 | LIST_REMOVE(p, p_sibling); | | 951 | LIST_REMOVE(p, p_sibling); |
952 | | | 952 | |
953 | /* | | 953 | /* |
954 | * Let pid be reallocated. | | 954 | * Let pid be reallocated. |
955 | */ | | 955 | */ |
956 | proc_free_pid(p); | | 956 | proc_free_pid(p); |
957 | mutex_exit(proc_lock); | | 957 | mutex_exit(proc_lock); |
958 | | | 958 | |
959 | /* | | 959 | /* |
960 | * Delay release until after lwp_free. | | 960 | * Delay release until after lwp_free. |
961 | */ | | 961 | */ |
962 | cred2 = l->l_cred; | | 962 | cred2 = l->l_cred; |
963 | | | 963 | |
964 | /* | | 964 | /* |
965 | * Free the last LWP's resources. | | 965 | * Free the last LWP's resources. |
966 | * | | 966 | * |
967 | * lwp_free ensures the LWP is no longer running on another CPU. | | 967 | * lwp_free ensures the LWP is no longer running on another CPU. |
968 | */ | | 968 | */ |
969 | lwp_free(l, false, true); | | 969 | lwp_free(l, false, true); |
970 | | | 970 | |
971 | /* | | 971 | /* |
972 | * Now no one except us can reach the process p. | | 972 | * Now no one except us can reach the process p. |
973 | */ | | 973 | */ |
974 | | | 974 | |
975 | /* | | 975 | /* |
976 | * Decrement the count of procs running with this uid. | | 976 | * Decrement the count of procs running with this uid. |
977 | */ | | 977 | */ |
978 | cred1 = p->p_cred; | | 978 | cred1 = p->p_cred; |
979 | uid = kauth_cred_getuid(cred1); | | 979 | uid = kauth_cred_getuid(cred1); |
980 | (void)chgproccnt(uid, -1); | | 980 | (void)chgproccnt(uid, -1); |
981 | | | 981 | |
982 | /* | | 982 | /* |
983 | * Release substructures. | | 983 | * Release substructures. |
984 | */ | | 984 | */ |
985 | | | 985 | |
986 | limfree(p->p_limit); | | 986 | limfree(p->p_limit); |
987 | pstatsfree(p->p_stats); | | 987 | pstatsfree(p->p_stats); |
988 | kauth_cred_free(cred1); | | 988 | kauth_cred_free(cred1); |
989 | kauth_cred_free(cred2); | | 989 | kauth_cred_free(cred2); |
990 | | | 990 | |
991 | /* | | 991 | /* |
992 | * Release reference to text vnode | | 992 | * Release reference to text vnode |
993 | */ | | 993 | */ |
994 | if (p->p_textvp) | | 994 | if (p->p_textvp) |
995 | vrele(p->p_textvp); | | 995 | vrele(p->p_textvp); |
996 | | | 996 | |
997 | mutex_destroy(&p->p_auxlock); | | 997 | mutex_destroy(&p->p_auxlock); |
998 | mutex_obj_free(p->p_lock); | | 998 | mutex_obj_free(p->p_lock); |
999 | mutex_destroy(&p->p_stmutex); | | 999 | mutex_destroy(&p->p_stmutex); |
1000 | cv_destroy(&p->p_waitcv); | | 1000 | cv_destroy(&p->p_waitcv); |
1001 | cv_destroy(&p->p_lwpcv); | | 1001 | cv_destroy(&p->p_lwpcv); |
1002 | rw_destroy(&p->p_reflock); | | 1002 | rw_destroy(&p->p_reflock); |
1003 | | | 1003 | |
1004 | proc_free_mem(p); | | 1004 | proc_free_mem(p); |
1005 | } | | 1005 | } |
1006 | | | 1006 | |
1007 | /* | | 1007 | /* |
1008 | * make process 'parent' the new parent of process 'child'. | | 1008 | * make process 'parent' the new parent of process 'child'. |
1009 | * | | 1009 | * |
1010 | * Must be called with proc_lock held. | | 1010 | * Must be called with proc_lock held. |
1011 | */ | | 1011 | */ |
1012 | void | | 1012 | void |
1013 | proc_reparent(struct proc *child, struct proc *parent) | | 1013 | proc_reparent(struct proc *child, struct proc *parent) |
1014 | { | | 1014 | { |
1015 | | | 1015 | |
1016 | KASSERT(mutex_owned(proc_lock)); | | 1016 | KASSERT(mutex_owned(proc_lock)); |
1017 | | | 1017 | |
1018 | if (child->p_pptr == parent) | | 1018 | if (child->p_pptr == parent) |
1019 | return; | | 1019 | return; |
1020 | | | 1020 | |
1021 | if (child->p_stat == SZOMB || child->p_stat == SDEAD || | | 1021 | if (child->p_stat == SZOMB || child->p_stat == SDEAD || |
1022 | (child->p_stat == SSTOP && !child->p_waited)) { | | 1022 | (child->p_stat == SSTOP && !child->p_waited)) { |
1023 | child->p_pptr->p_nstopchild--; | | 1023 | child->p_pptr->p_nstopchild--; |
1024 | parent->p_nstopchild++; | | 1024 | parent->p_nstopchild++; |
1025 | } | | 1025 | } |
1026 | if (parent == initproc) | | 1026 | if (parent == initproc) |
1027 | child->p_exitsig = SIGCHLD; | | 1027 | child->p_exitsig = SIGCHLD; |
1028 | | | 1028 | |
1029 | LIST_REMOVE(child, p_sibling); | | 1029 | LIST_REMOVE(child, p_sibling); |
1030 | LIST_INSERT_HEAD(&parent->p_children, child, p_sibling); | | 1030 | LIST_INSERT_HEAD(&parent->p_children, child, p_sibling); |
1031 | child->p_pptr = parent; | | 1031 | child->p_pptr = parent; |
1032 | child->p_ppid = parent->p_pid; | | 1032 | child->p_ppid = parent->p_pid; |
1033 | } | | 1033 | } |