Tue Oct 13 00:27:19 2015 UTC ()
Currently, if a process is exiting and its parent has indicated no intent
of reaping the process (nor any other children), the process wil get
reparented to init.  Since the state of the exiting process at this point
is SDEAD, proc_reparent() will not update either the old or new parent's
p_nstopchild counters.

This change causes both old and new parents to be properly updated.

Fixes PR kern/50300

Pullups will be requested for:

       NetBSD-7, -6, -6-0, -6-1, -5, -5-0, -5-1, and -5-2


(pgoyette)
diff -r1.245 -r1.246 src/sys/kern/kern_exit.c

cvs diff -r1.245 -r1.246 src/sys/kern/kern_exit.c (switch to unified diff)

--- src/sys/kern/kern_exit.c 2015/10/02 16:54:15 1.245
+++ src/sys/kern/kern_exit.c 2015/10/13 00:27:19 1.246
@@ -1,974 +1,974 @@ @@ -1,974 +1,974 @@
1/* $NetBSD: kern_exit.c,v 1.245 2015/10/02 16:54:15 christos Exp $ */ 1/* $NetBSD: kern_exit.c,v 1.246 2015/10/13 00:27:19 pgoyette Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 1999, 2006, 2007, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 1999, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Andrew Doran. 9 * NASA Ames Research Center, and by Andrew Doran.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer. 15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE. 30 * POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32 32
33/* 33/*
34 * Copyright (c) 1982, 1986, 1989, 1991, 1993 34 * Copyright (c) 1982, 1986, 1989, 1991, 1993
35 * The Regents of the University of California. All rights reserved. 35 * The Regents of the University of California. All rights reserved.
36 * (c) UNIX System Laboratories, Inc. 36 * (c) UNIX System Laboratories, Inc.
37 * All or some portions of this file are derived from material licensed 37 * All or some portions of this file are derived from material licensed
38 * to the University of California by American Telephone and Telegraph 38 * to the University of California by American Telephone and Telegraph
39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 * the permission of UNIX System Laboratories, Inc. 40 * the permission of UNIX System Laboratories, Inc.
41 * 41 *
42 * Redistribution and use in source and binary forms, with or without 42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions 43 * modification, are permitted provided that the following conditions
44 * are met: 44 * are met:
45 * 1. Redistributions of source code must retain the above copyright 45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer. 46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright 47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the 48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution. 49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors 50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software 51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission. 52 * without specific prior written permission.
53 * 53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE. 64 * SUCH DAMAGE.
65 * 65 *
66 * @(#)kern_exit.c 8.10 (Berkeley) 2/23/95 66 * @(#)kern_exit.c 8.10 (Berkeley) 2/23/95
67 */ 67 */
68 68
69#include <sys/cdefs.h> 69#include <sys/cdefs.h>
70__KERNEL_RCSID(0, "$NetBSD: kern_exit.c,v 1.245 2015/10/02 16:54:15 christos Exp $"); 70__KERNEL_RCSID(0, "$NetBSD: kern_exit.c,v 1.246 2015/10/13 00:27:19 pgoyette Exp $");
71 71
72#include "opt_ktrace.h" 72#include "opt_ktrace.h"
73#include "opt_dtrace.h" 73#include "opt_dtrace.h"
74#include "opt_perfctrs.h" 74#include "opt_perfctrs.h"
75#include "opt_sysv.h" 75#include "opt_sysv.h"
76 76
77#include <sys/param.h> 77#include <sys/param.h>
78#include <sys/systm.h> 78#include <sys/systm.h>
79#include <sys/ioctl.h> 79#include <sys/ioctl.h>
80#include <sys/tty.h> 80#include <sys/tty.h>
81#include <sys/time.h> 81#include <sys/time.h>
82#include <sys/resource.h> 82#include <sys/resource.h>
83#include <sys/kernel.h> 83#include <sys/kernel.h>
84#include <sys/proc.h> 84#include <sys/proc.h>
85#include <sys/buf.h> 85#include <sys/buf.h>
86#include <sys/wait.h> 86#include <sys/wait.h>
87#include <sys/file.h> 87#include <sys/file.h>
88#include <sys/vnode.h> 88#include <sys/vnode.h>
89#include <sys/syslog.h> 89#include <sys/syslog.h>
90#include <sys/pool.h> 90#include <sys/pool.h>
91#include <sys/uidinfo.h> 91#include <sys/uidinfo.h>
92#if defined(PERFCTRS) 92#if defined(PERFCTRS)
93#include <sys/pmc.h> 93#include <sys/pmc.h>
94#endif 94#endif
95#include <sys/ptrace.h> 95#include <sys/ptrace.h>
96#include <sys/acct.h> 96#include <sys/acct.h>
97#include <sys/filedesc.h> 97#include <sys/filedesc.h>
98#include <sys/ras.h> 98#include <sys/ras.h>
99#include <sys/signalvar.h> 99#include <sys/signalvar.h>
100#include <sys/sched.h> 100#include <sys/sched.h>
101#include <sys/mount.h> 101#include <sys/mount.h>
102#include <sys/syscallargs.h> 102#include <sys/syscallargs.h>
103#include <sys/kauth.h> 103#include <sys/kauth.h>
104#include <sys/sleepq.h> 104#include <sys/sleepq.h>
105#include <sys/lockdebug.h> 105#include <sys/lockdebug.h>
106#include <sys/ktrace.h> 106#include <sys/ktrace.h>
107#include <sys/cpu.h> 107#include <sys/cpu.h>
108#include <sys/lwpctl.h> 108#include <sys/lwpctl.h>
109#include <sys/atomic.h> 109#include <sys/atomic.h>
110#include <sys/sdt.h> 110#include <sys/sdt.h>
111 111
112#include <uvm/uvm_extern.h> 112#include <uvm/uvm_extern.h>
113 113
114#ifdef DEBUG_EXIT 114#ifdef DEBUG_EXIT
115int debug_exit = 0; 115int debug_exit = 0;
116#define DPRINTF(x) if (debug_exit) printf x 116#define DPRINTF(x) if (debug_exit) printf x
117#else 117#else
118#define DPRINTF(x) 118#define DPRINTF(x)
119#endif 119#endif
120 120
121static int find_stopped_child(struct proc *, pid_t, int, struct proc **, int *); 121static int find_stopped_child(struct proc *, pid_t, int, struct proc **, int *);
122static void proc_free(struct proc *, struct rusage *); 122static void proc_free(struct proc *, struct rusage *);
123 123
124/* 124/*
125 * DTrace SDT provider definitions 125 * DTrace SDT provider definitions
126 */ 126 */
127SDT_PROVIDER_DECLARE(proc); 127SDT_PROVIDER_DECLARE(proc);
128SDT_PROBE_DEFINE1(proc, kernel, , exit, "int"); 128SDT_PROBE_DEFINE1(proc, kernel, , exit, "int");
129 129
130/* 130/*
131 * Fill in the appropriate signal information, and signal the parent. 131 * Fill in the appropriate signal information, and signal the parent.
132 */ 132 */
133/* XXX noclone works around a gcc 4.5 bug on arm */ 133/* XXX noclone works around a gcc 4.5 bug on arm */
134static void __noclone 134static void __noclone
135exit_psignal(struct proc *p, struct proc *pp, ksiginfo_t *ksi) 135exit_psignal(struct proc *p, struct proc *pp, ksiginfo_t *ksi)
136{ 136{
137 137
138 KSI_INIT(ksi); 138 KSI_INIT(ksi);
139 if ((ksi->ksi_signo = P_EXITSIG(p)) == SIGCHLD) { 139 if ((ksi->ksi_signo = P_EXITSIG(p)) == SIGCHLD) {
140 if (WIFSIGNALED(p->p_xstat)) { 140 if (WIFSIGNALED(p->p_xstat)) {
141 if (WCOREDUMP(p->p_xstat)) 141 if (WCOREDUMP(p->p_xstat))
142 ksi->ksi_code = CLD_DUMPED; 142 ksi->ksi_code = CLD_DUMPED;
143 else 143 else
144 ksi->ksi_code = CLD_KILLED; 144 ksi->ksi_code = CLD_KILLED;
145 } else { 145 } else {
146 ksi->ksi_code = CLD_EXITED; 146 ksi->ksi_code = CLD_EXITED;
147 } 147 }
148 } 148 }
149 /* 149 /*
150 * We fill those in, even for non-SIGCHLD. 150 * We fill those in, even for non-SIGCHLD.
151 * It's safe to access p->p_cred unlocked here. 151 * It's safe to access p->p_cred unlocked here.
152 */ 152 */
153 ksi->ksi_pid = p->p_pid; 153 ksi->ksi_pid = p->p_pid;
154 ksi->ksi_uid = kauth_cred_geteuid(p->p_cred); 154 ksi->ksi_uid = kauth_cred_geteuid(p->p_cred);
155 ksi->ksi_status = p->p_xstat; 155 ksi->ksi_status = p->p_xstat;
156 /* XXX: is this still valid? */ 156 /* XXX: is this still valid? */
157 ksi->ksi_utime = p->p_stats->p_ru.ru_utime.tv_sec; 157 ksi->ksi_utime = p->p_stats->p_ru.ru_utime.tv_sec;
158 ksi->ksi_stime = p->p_stats->p_ru.ru_stime.tv_sec; 158 ksi->ksi_stime = p->p_stats->p_ru.ru_stime.tv_sec;
159} 159}
160 160
161/* 161/*
162 * exit -- 162 * exit --
163 * Death of process. 163 * Death of process.
164 */ 164 */
165int 165int
166sys_exit(struct lwp *l, const struct sys_exit_args *uap, register_t *retval) 166sys_exit(struct lwp *l, const struct sys_exit_args *uap, register_t *retval)
167{ 167{
168 /* { 168 /* {
169 syscallarg(int) rval; 169 syscallarg(int) rval;
170 } */ 170 } */
171 struct proc *p = l->l_proc; 171 struct proc *p = l->l_proc;
172 172
173 /* Don't call exit1() multiple times in the same process. */ 173 /* Don't call exit1() multiple times in the same process. */
174 mutex_enter(p->p_lock); 174 mutex_enter(p->p_lock);
175 if (p->p_sflag & PS_WEXIT) { 175 if (p->p_sflag & PS_WEXIT) {
176 mutex_exit(p->p_lock); 176 mutex_exit(p->p_lock);
177 lwp_exit(l); 177 lwp_exit(l);
178 } 178 }
179 179
180 /* exit1() will release the mutex. */ 180 /* exit1() will release the mutex. */
181 exit1(l, W_EXITCODE(SCARG(uap, rval), 0)); 181 exit1(l, W_EXITCODE(SCARG(uap, rval), 0));
182 /* NOTREACHED */ 182 /* NOTREACHED */
183 return (0); 183 return (0);
184} 184}
185 185
186/* 186/*
187 * Exit: deallocate address space and other resources, change proc state 187 * Exit: deallocate address space and other resources, change proc state
188 * to zombie, and unlink proc from allproc and parent's lists. Save exit 188 * to zombie, and unlink proc from allproc and parent's lists. Save exit
189 * status and rusage for wait(). Check for child processes and orphan them. 189 * status and rusage for wait(). Check for child processes and orphan them.
190 * 190 *
191 * Must be called with p->p_lock held. Does not return. 191 * Must be called with p->p_lock held. Does not return.
192 */ 192 */
193void 193void
194exit1(struct lwp *l, int rv) 194exit1(struct lwp *l, int rv)
195{ 195{
196 struct proc *p, *child, *next_child, *old_parent, *new_parent; 196 struct proc *p, *child, *next_child, *old_parent, *new_parent;
197 struct pgrp *pgrp; 197 struct pgrp *pgrp;
198 ksiginfo_t ksi; 198 ksiginfo_t ksi;
199 ksiginfoq_t kq; 199 ksiginfoq_t kq;
200 int wakeinit; 200 int wakeinit;
201 201
202 p = l->l_proc; 202 p = l->l_proc;
203 203
204 KASSERT(mutex_owned(p->p_lock)); 204 KASSERT(mutex_owned(p->p_lock));
205 KASSERT(p->p_vmspace != NULL); 205 KASSERT(p->p_vmspace != NULL);
206 206
207 if (__predict_false(p == initproc)) { 207 if (__predict_false(p == initproc)) {
208 panic("init died (signal %d, exit %d)", 208 panic("init died (signal %d, exit %d)",
209 WTERMSIG(rv), WEXITSTATUS(rv)); 209 WTERMSIG(rv), WEXITSTATUS(rv));
210 } 210 }
211 211
212 p->p_sflag |= PS_WEXIT; 212 p->p_sflag |= PS_WEXIT;
213 213
214 /* 214 /*
215 * Force all other LWPs to exit before we do. Only then can we 215 * Force all other LWPs to exit before we do. Only then can we
216 * begin to tear down the rest of the process state. 216 * begin to tear down the rest of the process state.
217 */ 217 */
218 if (p->p_nlwps > 1) { 218 if (p->p_nlwps > 1) {
219 exit_lwps(l); 219 exit_lwps(l);
220 } 220 }
221 221
222 ksiginfo_queue_init(&kq); 222 ksiginfo_queue_init(&kq);
223 223
224 /* 224 /*
225 * If we have been asked to stop on exit, do so now. 225 * If we have been asked to stop on exit, do so now.
226 */ 226 */
227 if (__predict_false(p->p_sflag & PS_STOPEXIT)) { 227 if (__predict_false(p->p_sflag & PS_STOPEXIT)) {
228 KERNEL_UNLOCK_ALL(l, &l->l_biglocks); 228 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
229 sigclearall(p, &contsigmask, &kq); 229 sigclearall(p, &contsigmask, &kq);
230 p->p_waited = 0; 230 p->p_waited = 0;
231 membar_producer(); 231 membar_producer();
232 p->p_stat = SSTOP; 232 p->p_stat = SSTOP;
233 lwp_lock(l); 233 lwp_lock(l);
234 p->p_nrlwps--; 234 p->p_nrlwps--;
235 l->l_stat = LSSTOP; 235 l->l_stat = LSSTOP;
236 lwp_unlock(l); 236 lwp_unlock(l);
237 mutex_exit(p->p_lock); 237 mutex_exit(p->p_lock);
238 lwp_lock(l); 238 lwp_lock(l);
239 mi_switch(l); 239 mi_switch(l);
240 KERNEL_LOCK(l->l_biglocks, l); 240 KERNEL_LOCK(l->l_biglocks, l);
241 mutex_enter(p->p_lock); 241 mutex_enter(p->p_lock);
242 } 242 }
243 243
244 /* 244 /*
245 * Bin any remaining signals and mark the process as dying so it will 245 * Bin any remaining signals and mark the process as dying so it will
246 * not be found for, e.g. signals. 246 * not be found for, e.g. signals.
247 */ 247 */
248 sigfillset(&p->p_sigctx.ps_sigignore); 248 sigfillset(&p->p_sigctx.ps_sigignore);
249 sigclearall(p, NULL, &kq); 249 sigclearall(p, NULL, &kq);
250 p->p_stat = SDYING; 250 p->p_stat = SDYING;
251 mutex_exit(p->p_lock); 251 mutex_exit(p->p_lock);
252 ksiginfo_queue_drain(&kq); 252 ksiginfo_queue_drain(&kq);
253 253
254 /* Destroy any lwpctl info. */ 254 /* Destroy any lwpctl info. */
255 if (p->p_lwpctl != NULL) 255 if (p->p_lwpctl != NULL)
256 lwp_ctl_exit(); 256 lwp_ctl_exit();
257 257
258 /* 258 /*
259 * Drain all remaining references that procfs, ptrace and others may 259 * Drain all remaining references that procfs, ptrace and others may
260 * have on the process. 260 * have on the process.
261 */ 261 */
262 rw_enter(&p->p_reflock, RW_WRITER); 262 rw_enter(&p->p_reflock, RW_WRITER);
263 263
264 DPRINTF(("exit1: %d.%d exiting.\n", p->p_pid, l->l_lid)); 264 DPRINTF(("exit1: %d.%d exiting.\n", p->p_pid, l->l_lid));
265 265
266 timers_free(p, TIMERS_ALL); 266 timers_free(p, TIMERS_ALL);
267#if defined(__HAVE_RAS) 267#if defined(__HAVE_RAS)
268 ras_purgeall(); 268 ras_purgeall();
269#endif 269#endif
270 270
271 /* 271 /*
272 * Close open files, release open-file table and free signal 272 * Close open files, release open-file table and free signal
273 * actions. This may block! 273 * actions. This may block!
274 */ 274 */
275 fd_free(); 275 fd_free();
276 cwdfree(p->p_cwdi); 276 cwdfree(p->p_cwdi);
277 p->p_cwdi = NULL; 277 p->p_cwdi = NULL;
278 doexithooks(p); 278 doexithooks(p);
279 sigactsfree(p->p_sigacts); 279 sigactsfree(p->p_sigacts);
280 280
281 /* 281 /*
282 * Write out accounting data. 282 * Write out accounting data.
283 */ 283 */
284 (void)acct_process(l); 284 (void)acct_process(l);
285 285
286#ifdef KTRACE 286#ifdef KTRACE
287 /* 287 /*
288 * Release trace file. 288 * Release trace file.
289 */ 289 */
290 if (p->p_tracep != NULL) { 290 if (p->p_tracep != NULL) {
291 mutex_enter(&ktrace_lock); 291 mutex_enter(&ktrace_lock);
292 ktrderef(p); 292 ktrderef(p);
293 mutex_exit(&ktrace_lock); 293 mutex_exit(&ktrace_lock);
294 } 294 }
295#endif 295#endif
296 296
297 /* 297 /*
298 * If emulation has process exit hook, call it now. 298 * If emulation has process exit hook, call it now.
299 * Set the exit status now so that the exit hook has 299 * Set the exit status now so that the exit hook has
300 * an opportunity to tweak it (COMPAT_LINUX requires 300 * an opportunity to tweak it (COMPAT_LINUX requires
301 * this for thread group emulation) 301 * this for thread group emulation)
302 */ 302 */
303 p->p_xstat = rv; 303 p->p_xstat = rv;
304 if (p->p_emul->e_proc_exit) 304 if (p->p_emul->e_proc_exit)
305 (*p->p_emul->e_proc_exit)(p); 305 (*p->p_emul->e_proc_exit)(p);
306 306
307 /* 307 /*
308 * Free the VM resources we're still holding on to. 308 * Free the VM resources we're still holding on to.
309 * We must do this from a valid thread because doing 309 * We must do this from a valid thread because doing
310 * so may block. This frees vmspace, which we don't 310 * so may block. This frees vmspace, which we don't
311 * need anymore. The only remaining lwp is the one 311 * need anymore. The only remaining lwp is the one
312 * we run at this moment, nothing runs in userland 312 * we run at this moment, nothing runs in userland
313 * anymore. 313 * anymore.
314 */ 314 */
315 uvm_proc_exit(p); 315 uvm_proc_exit(p);
316 316
317 /* 317 /*
318 * Stop profiling. 318 * Stop profiling.
319 */ 319 */
320 if (__predict_false((p->p_stflag & PST_PROFIL) != 0)) { 320 if (__predict_false((p->p_stflag & PST_PROFIL) != 0)) {
321 mutex_spin_enter(&p->p_stmutex); 321 mutex_spin_enter(&p->p_stmutex);
322 stopprofclock(p); 322 stopprofclock(p);
323 mutex_spin_exit(&p->p_stmutex); 323 mutex_spin_exit(&p->p_stmutex);
324 } 324 }
325 325
326 /* 326 /*
327 * If parent is waiting for us to exit or exec, PL_PPWAIT is set; we 327 * If parent is waiting for us to exit or exec, PL_PPWAIT is set; we
328 * wake up the parent early to avoid deadlock. We can do this once 328 * wake up the parent early to avoid deadlock. We can do this once
329 * the VM resources are released. 329 * the VM resources are released.
330 */ 330 */
331 mutex_enter(proc_lock); 331 mutex_enter(proc_lock);
332 if (p->p_lflag & PL_PPWAIT) { 332 if (p->p_lflag & PL_PPWAIT) {
333#if 0 333#if 0
334 lwp_t *lp; 334 lwp_t *lp;
335 335
336 l->l_lwpctl = NULL; /* was on loan from blocked parent */ 336 l->l_lwpctl = NULL; /* was on loan from blocked parent */
337 p->p_lflag &= ~PL_PPWAIT; 337 p->p_lflag &= ~PL_PPWAIT;
338 338
339 lp = p->p_vforklwp; 339 lp = p->p_vforklwp;
340 p->p_vforklwp = NULL; 340 p->p_vforklwp = NULL;
341 lp->l_pflag &= ~LP_VFORKWAIT; /* XXX */ 341 lp->l_pflag &= ~LP_VFORKWAIT; /* XXX */
342 cv_broadcast(&lp->l_waitcv); 342 cv_broadcast(&lp->l_waitcv);
343#else 343#else
344 l->l_lwpctl = NULL; /* was on loan from blocked parent */ 344 l->l_lwpctl = NULL; /* was on loan from blocked parent */
345 p->p_lflag &= ~PL_PPWAIT; 345 p->p_lflag &= ~PL_PPWAIT;
346 cv_broadcast(&p->p_pptr->p_waitcv); 346 cv_broadcast(&p->p_pptr->p_waitcv);
347#endif 347#endif
348 } 348 }
349 349
350 if (SESS_LEADER(p)) { 350 if (SESS_LEADER(p)) {
351 struct vnode *vprele = NULL, *vprevoke = NULL; 351 struct vnode *vprele = NULL, *vprevoke = NULL;
352 struct session *sp = p->p_session; 352 struct session *sp = p->p_session;
353 struct tty *tp; 353 struct tty *tp;
354 354
355 if (sp->s_ttyvp) { 355 if (sp->s_ttyvp) {
356 /* 356 /*
357 * Controlling process. 357 * Controlling process.
358 * Signal foreground pgrp, 358 * Signal foreground pgrp,
359 * drain controlling terminal 359 * drain controlling terminal
360 * and revoke access to controlling terminal. 360 * and revoke access to controlling terminal.
361 */ 361 */
362 tp = sp->s_ttyp; 362 tp = sp->s_ttyp;
363 mutex_spin_enter(&tty_lock); 363 mutex_spin_enter(&tty_lock);
364 if (tp->t_session == sp) { 364 if (tp->t_session == sp) {
365 /* we can't guarantee the revoke will do this */ 365 /* we can't guarantee the revoke will do this */
366 pgrp = tp->t_pgrp; 366 pgrp = tp->t_pgrp;
367 tp->t_pgrp = NULL; 367 tp->t_pgrp = NULL;
368 tp->t_session = NULL; 368 tp->t_session = NULL;
369 mutex_spin_exit(&tty_lock); 369 mutex_spin_exit(&tty_lock);
370 if (pgrp != NULL) { 370 if (pgrp != NULL) {
371 pgsignal(pgrp, SIGHUP, 1); 371 pgsignal(pgrp, SIGHUP, 1);
372 } 372 }
373 mutex_exit(proc_lock); 373 mutex_exit(proc_lock);
374 (void) ttywait(tp); 374 (void) ttywait(tp);
375 mutex_enter(proc_lock); 375 mutex_enter(proc_lock);
376 376
377 /* The tty could have been revoked. */ 377 /* The tty could have been revoked. */
378 vprevoke = sp->s_ttyvp; 378 vprevoke = sp->s_ttyvp;
379 } else 379 } else
380 mutex_spin_exit(&tty_lock); 380 mutex_spin_exit(&tty_lock);
381 vprele = sp->s_ttyvp; 381 vprele = sp->s_ttyvp;
382 sp->s_ttyvp = NULL; 382 sp->s_ttyvp = NULL;
383 /* 383 /*
384 * s_ttyp is not zero'd; we use this to indicate 384 * s_ttyp is not zero'd; we use this to indicate
385 * that the session once had a controlling terminal. 385 * that the session once had a controlling terminal.
386 * (for logging and informational purposes) 386 * (for logging and informational purposes)
387 */ 387 */
388 } 388 }
389 sp->s_leader = NULL; 389 sp->s_leader = NULL;
390 390
391 if (vprevoke != NULL || vprele != NULL) { 391 if (vprevoke != NULL || vprele != NULL) {
392 if (vprevoke != NULL) { 392 if (vprevoke != NULL) {
393 /* Releases proc_lock. */ 393 /* Releases proc_lock. */
394 proc_sessrele(sp); 394 proc_sessrele(sp);
395 VOP_REVOKE(vprevoke, REVOKEALL); 395 VOP_REVOKE(vprevoke, REVOKEALL);
396 } else 396 } else
397 mutex_exit(proc_lock); 397 mutex_exit(proc_lock);
398 if (vprele != NULL) 398 if (vprele != NULL)
399 vrele(vprele); 399 vrele(vprele);
400 mutex_enter(proc_lock); 400 mutex_enter(proc_lock);
401 } 401 }
402 } 402 }
403 fixjobc(p, p->p_pgrp, 0); 403 fixjobc(p, p->p_pgrp, 0);
404 404
405 /* 405 /*
406 * Finalize the last LWP's specificdata, as well as the 406 * Finalize the last LWP's specificdata, as well as the
407 * specificdata for the proc itself. 407 * specificdata for the proc itself.
408 */ 408 */
409 lwp_finispecific(l); 409 lwp_finispecific(l);
410 proc_finispecific(p); 410 proc_finispecific(p);
411 411
412 /* 412 /*
413 * Notify interested parties of our demise. 413 * Notify interested parties of our demise.
414 */ 414 */
415 KNOTE(&p->p_klist, NOTE_EXIT); 415 KNOTE(&p->p_klist, NOTE_EXIT);
416 416
417 SDT_PROBE(proc, kernel, , exit, 417 SDT_PROBE(proc, kernel, , exit,
418 (WCOREDUMP(rv) ? CLD_DUMPED : 418 (WCOREDUMP(rv) ? CLD_DUMPED :
419 (WIFSIGNALED(rv) ? CLD_KILLED : CLD_EXITED)), 419 (WIFSIGNALED(rv) ? CLD_KILLED : CLD_EXITED)),
420 0,0,0,0); 420 0,0,0,0);
421 421
422#if PERFCTRS 422#if PERFCTRS
423 /* 423 /*
424 * Save final PMC information in parent process & clean up. 424 * Save final PMC information in parent process & clean up.
425 */ 425 */
426 if (PMC_ENABLED(p)) { 426 if (PMC_ENABLED(p)) {
427 pmc_save_context(p); 427 pmc_save_context(p);
428 pmc_accumulate(p->p_pptr, p); 428 pmc_accumulate(p->p_pptr, p);
429 pmc_process_exit(p); 429 pmc_process_exit(p);
430 } 430 }
431#endif 431#endif
432 432
433 /* 433 /*
434 * Reset p_opptr pointer of all former children which got 434 * Reset p_opptr pointer of all former children which got
435 * traced by another process and were reparented. We reset 435 * traced by another process and were reparented. We reset
436 * it to NULL here; the trace detach code then reparents 436 * it to NULL here; the trace detach code then reparents
437 * the child to initproc. We only check allproc list, since 437 * the child to initproc. We only check allproc list, since
438 * eventual former children on zombproc list won't reference 438 * eventual former children on zombproc list won't reference
439 * p_opptr anymore. 439 * p_opptr anymore.
440 */ 440 */
441 if (__predict_false(p->p_slflag & PSL_CHTRACED)) { 441 if (__predict_false(p->p_slflag & PSL_CHTRACED)) {
442 struct proc *q; 442 struct proc *q;
443 PROCLIST_FOREACH(q, &allproc) { 443 PROCLIST_FOREACH(q, &allproc) {
444 if (q->p_opptr == p) 444 if (q->p_opptr == p)
445 q->p_opptr = NULL; 445 q->p_opptr = NULL;
446 } 446 }
447 } 447 }
448 448
449 /* 449 /*
450 * Give orphaned children to init(8). 450 * Give orphaned children to init(8).
451 */ 451 */
452 child = LIST_FIRST(&p->p_children); 452 child = LIST_FIRST(&p->p_children);
453 wakeinit = (child != NULL); 453 wakeinit = (child != NULL);
454 for (; child != NULL; child = next_child) { 454 for (; child != NULL; child = next_child) {
455 next_child = LIST_NEXT(child, p_sibling); 455 next_child = LIST_NEXT(child, p_sibling);
456 456
457 /* 457 /*
458 * Traced processes are killed since their existence 458 * Traced processes are killed since their existence
459 * means someone is screwing up. Since we reset the 459 * means someone is screwing up. Since we reset the
460 * trace flags, the logic in sys_wait4() would not be 460 * trace flags, the logic in sys_wait4() would not be
461 * triggered to reparent the process to its 461 * triggered to reparent the process to its
462 * original parent, so we must do this here. 462 * original parent, so we must do this here.
463 */ 463 */
464 if (__predict_false(child->p_slflag & PSL_TRACED)) { 464 if (__predict_false(child->p_slflag & PSL_TRACED)) {
465 mutex_enter(p->p_lock); 465 mutex_enter(p->p_lock);
466 child->p_slflag &= 466 child->p_slflag &=
467 ~(PSL_TRACED|PSL_FSTRACE|PSL_SYSCALL); 467 ~(PSL_TRACED|PSL_FSTRACE|PSL_SYSCALL);
468 mutex_exit(p->p_lock); 468 mutex_exit(p->p_lock);
469 if (child->p_opptr != child->p_pptr) { 469 if (child->p_opptr != child->p_pptr) {
470 struct proc *t = child->p_opptr; 470 struct proc *t = child->p_opptr;
471 proc_reparent(child, t ? t : initproc); 471 proc_reparent(child, t ? t : initproc);
472 child->p_opptr = NULL; 472 child->p_opptr = NULL;
473 } else 473 } else
474 proc_reparent(child, initproc); 474 proc_reparent(child, initproc);
475 killproc(child, "orphaned traced process"); 475 killproc(child, "orphaned traced process");
476 } else 476 } else
477 proc_reparent(child, initproc); 477 proc_reparent(child, initproc);
478 } 478 }
479 479
480 /* 480 /*
481 * Move proc from allproc to zombproc, it's now nearly ready to be 481 * Move proc from allproc to zombproc, it's now nearly ready to be
482 * collected by parent. 482 * collected by parent.
483 */ 483 */
484 LIST_REMOVE(l, l_list); 484 LIST_REMOVE(l, l_list);
485 LIST_REMOVE(p, p_list); 485 LIST_REMOVE(p, p_list);
486 LIST_INSERT_HEAD(&zombproc, p, p_list); 486 LIST_INSERT_HEAD(&zombproc, p, p_list);
487 487
488 /* 488 /*
489 * Mark the process as dead. We must do this before we signal 489 * Mark the process as dead. We must do this before we signal
490 * the parent. 490 * the parent.
491 */ 491 */
492 p->p_stat = SDEAD; 492 p->p_stat = SDEAD;
493 493
494 /* Put in front of parent's sibling list for parent to collect it */ 494 /* Put in front of parent's sibling list for parent to collect it */
495 old_parent = p->p_pptr; 495 old_parent = p->p_pptr;
496 old_parent->p_nstopchild++; 496 old_parent->p_nstopchild++;
497 if (LIST_FIRST(&old_parent->p_children) != p) { 497 if (LIST_FIRST(&old_parent->p_children) != p) {
498 /* Put child where it can be found quickly */ 498 /* Put child where it can be found quickly */
499 LIST_REMOVE(p, p_sibling); 499 LIST_REMOVE(p, p_sibling);
500 LIST_INSERT_HEAD(&old_parent->p_children, p, p_sibling); 500 LIST_INSERT_HEAD(&old_parent->p_children, p, p_sibling);
501 } 501 }
502 502
503 /* 503 /*
504 * Notify parent that we're gone. If parent has the P_NOCLDWAIT 504 * Notify parent that we're gone. If parent has the P_NOCLDWAIT
505 * flag set, notify init instead (and hope it will handle 505 * flag set, notify init instead (and hope it will handle
506 * this situation). 506 * this situation).
507 */ 507 */
508 if (old_parent->p_flag & (PK_NOCLDWAIT|PK_CLDSIGIGN)) { 508 if (old_parent->p_flag & (PK_NOCLDWAIT|PK_CLDSIGIGN)) {
509 proc_reparent(p, initproc); 509 proc_reparent(p, initproc);
510 wakeinit = 1; 510 wakeinit = 1;
511 511
512 /* 512 /*
513 * If this was the last child of our parent, notify 513 * If this was the last child of our parent, notify
514 * parent, so in case he was wait(2)ing, he will 514 * parent, so in case he was wait(2)ing, he will
515 * continue. 515 * continue.
516 */ 516 */
517 if (LIST_FIRST(&old_parent->p_children) == NULL) 517 if (LIST_FIRST(&old_parent->p_children) == NULL)
518 cv_broadcast(&old_parent->p_waitcv); 518 cv_broadcast(&old_parent->p_waitcv);
519 } 519 }
520 520
521 /* Reload parent pointer, since p may have been reparented above */ 521 /* Reload parent pointer, since p may have been reparented above */
522 new_parent = p->p_pptr; 522 new_parent = p->p_pptr;
523 523
524 if (__predict_false((p->p_slflag & PSL_FSTRACE) == 0 && 524 if (__predict_false((p->p_slflag & PSL_FSTRACE) == 0 &&
525 p->p_exitsig != 0)) { 525 p->p_exitsig != 0)) {
526 exit_psignal(p, new_parent, &ksi); 526 exit_psignal(p, new_parent, &ksi);
527 kpsignal(new_parent, &ksi, NULL); 527 kpsignal(new_parent, &ksi, NULL);
528 } 528 }
529 529
530 /* Calculate the final rusage info. */ 530 /* Calculate the final rusage info. */
531 calcru(p, &p->p_stats->p_ru.ru_utime, &p->p_stats->p_ru.ru_stime, 531 calcru(p, &p->p_stats->p_ru.ru_utime, &p->p_stats->p_ru.ru_stime,
532 NULL, NULL); 532 NULL, NULL);
533 533
534 if (wakeinit) 534 if (wakeinit)
535 cv_broadcast(&initproc->p_waitcv); 535 cv_broadcast(&initproc->p_waitcv);
536 536
537 callout_destroy(&l->l_timeout_ch); 537 callout_destroy(&l->l_timeout_ch);
538 538
539 /* 539 /*
540 * Release any PCU resources before becoming a zombie. 540 * Release any PCU resources before becoming a zombie.
541 */ 541 */
542 pcu_discard_all(l); 542 pcu_discard_all(l);
543 543
544 mutex_enter(p->p_lock); 544 mutex_enter(p->p_lock);
545 /* Free the linux lwp id */ 545 /* Free the linux lwp id */
546 if ((l->l_pflag & LP_PIDLID) != 0 && l->l_lid != p->p_pid) 546 if ((l->l_pflag & LP_PIDLID) != 0 && l->l_lid != p->p_pid)
547 proc_free_pid(l->l_lid); 547 proc_free_pid(l->l_lid);
548 lwp_drainrefs(l); 548 lwp_drainrefs(l);
549 lwp_lock(l); 549 lwp_lock(l);
550 l->l_prflag &= ~LPR_DETACHED; 550 l->l_prflag &= ~LPR_DETACHED;
551 l->l_stat = LSZOMB; 551 l->l_stat = LSZOMB;
552 lwp_unlock(l); 552 lwp_unlock(l);
553 KASSERT(curlwp == l); 553 KASSERT(curlwp == l);
554 KASSERT(p->p_nrlwps == 1); 554 KASSERT(p->p_nrlwps == 1);
555 KASSERT(p->p_nlwps == 1); 555 KASSERT(p->p_nlwps == 1);
556 p->p_stat = SZOMB; 556 p->p_stat = SZOMB;
557 p->p_nrlwps--; 557 p->p_nrlwps--;
558 p->p_nzlwps++; 558 p->p_nzlwps++;
559 p->p_ndlwps = 0; 559 p->p_ndlwps = 0;
560 mutex_exit(p->p_lock); 560 mutex_exit(p->p_lock);
561 561
562 /* 562 /*
563 * Signal the parent to collect us, and drop the proclist lock. 563 * Signal the parent to collect us, and drop the proclist lock.
564 * Drop debugger/procfs lock; no new references can be gained. 564 * Drop debugger/procfs lock; no new references can be gained.
565 */ 565 */
566 cv_broadcast(&p->p_pptr->p_waitcv); 566 cv_broadcast(&p->p_pptr->p_waitcv);
567 rw_exit(&p->p_reflock); 567 rw_exit(&p->p_reflock);
568 mutex_exit(proc_lock); 568 mutex_exit(proc_lock);
569 569
570 /* Verify that we hold no locks other than the kernel lock. */ 570 /* Verify that we hold no locks other than the kernel lock. */
571 LOCKDEBUG_BARRIER(&kernel_lock, 0); 571 LOCKDEBUG_BARRIER(&kernel_lock, 0);
572 572
573 /* 573 /*
574 * NOTE: WE ARE NO LONGER ALLOWED TO SLEEP! 574 * NOTE: WE ARE NO LONGER ALLOWED TO SLEEP!
575 */ 575 */
576 576
577 /* 577 /*
578 * Give machine-dependent code a chance to free any MD LWP 578 * Give machine-dependent code a chance to free any MD LWP
579 * resources. This must be done before uvm_lwp_exit(), in 579 * resources. This must be done before uvm_lwp_exit(), in
580 * case these resources are in the PCB. 580 * case these resources are in the PCB.
581 */ 581 */
582 cpu_lwp_free(l, 1); 582 cpu_lwp_free(l, 1);
583 583
584 pmap_deactivate(l); 584 pmap_deactivate(l);
585 585
586 /* This process no longer needs to hold the kernel lock. */ 586 /* This process no longer needs to hold the kernel lock. */
587#ifdef notyet 587#ifdef notyet
588 /* XXXSMP hold in lwp_userret() */ 588 /* XXXSMP hold in lwp_userret() */
589 KERNEL_UNLOCK_LAST(l); 589 KERNEL_UNLOCK_LAST(l);
590#else 590#else
591 KERNEL_UNLOCK_ALL(l, NULL); 591 KERNEL_UNLOCK_ALL(l, NULL);
592#endif 592#endif
593 593
594 lwp_exit_switchaway(l); 594 lwp_exit_switchaway(l);
595} 595}
596 596
597void 597void
598exit_lwps(struct lwp *l) 598exit_lwps(struct lwp *l)
599{ 599{
600 proc_t *p = l->l_proc; 600 proc_t *p = l->l_proc;
601 lwp_t *l2; 601 lwp_t *l2;
602 int nlocks; 602 int nlocks;
603 603
604 KERNEL_UNLOCK_ALL(l, &nlocks); 604 KERNEL_UNLOCK_ALL(l, &nlocks);
605retry: 605retry:
606 KASSERT(mutex_owned(p->p_lock)); 606 KASSERT(mutex_owned(p->p_lock));
607 607
608 /* 608 /*
609 * Interrupt LWPs in interruptable sleep, unsuspend suspended 609 * Interrupt LWPs in interruptable sleep, unsuspend suspended
610 * LWPs and then wait for everyone else to finish. 610 * LWPs and then wait for everyone else to finish.
611 */ 611 */
612 LIST_FOREACH(l2, &p->p_lwps, l_sibling) { 612 LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
613 if (l2 == l) 613 if (l2 == l)
614 continue; 614 continue;
615 lwp_lock(l2); 615 lwp_lock(l2);
616 l2->l_flag |= LW_WEXIT; 616 l2->l_flag |= LW_WEXIT;
617 if ((l2->l_stat == LSSLEEP && (l2->l_flag & LW_SINTR)) || 617 if ((l2->l_stat == LSSLEEP && (l2->l_flag & LW_SINTR)) ||
618 l2->l_stat == LSSUSPENDED || l2->l_stat == LSSTOP) { 618 l2->l_stat == LSSUSPENDED || l2->l_stat == LSSTOP) {
619 /* setrunnable() will release the lock. */ 619 /* setrunnable() will release the lock. */
620 setrunnable(l2); 620 setrunnable(l2);
621 continue; 621 continue;
622 } 622 }
623 lwp_unlock(l2); 623 lwp_unlock(l2);
624 } 624 }
625 625
626 /* 626 /*
627 * Wait for every LWP to exit. Note: LWPs can get suspended/slept 627 * Wait for every LWP to exit. Note: LWPs can get suspended/slept
628 * behind us or there may even be new LWPs created. Therefore, a 628 * behind us or there may even be new LWPs created. Therefore, a
629 * full retry is required on error. 629 * full retry is required on error.
630 */ 630 */
631 while (p->p_nlwps > 1) { 631 while (p->p_nlwps > 1) {
632 if (lwp_wait(l, 0, NULL, true)) { 632 if (lwp_wait(l, 0, NULL, true)) {
633 goto retry; 633 goto retry;
634 } 634 }
635 } 635 }
636 636
637 KERNEL_LOCK(nlocks, l); 637 KERNEL_LOCK(nlocks, l);
638 KASSERT(p->p_nlwps == 1); 638 KASSERT(p->p_nlwps == 1);
639} 639}
640 640
641int 641int
642do_sys_wait(int *pid, int *status, int options, struct rusage *ru) 642do_sys_wait(int *pid, int *status, int options, struct rusage *ru)
643{ 643{
644 proc_t *child; 644 proc_t *child;
645 int error; 645 int error;
646 646
647 if (ru != NULL) { 647 if (ru != NULL) {
648 memset(ru, 0, sizeof(*ru)); 648 memset(ru, 0, sizeof(*ru));
649 } 649 }
650 mutex_enter(proc_lock); 650 mutex_enter(proc_lock);
651 error = find_stopped_child(curproc, *pid, options, &child, status); 651 error = find_stopped_child(curproc, *pid, options, &child, status);
652 if (child == NULL) { 652 if (child == NULL) {
653 mutex_exit(proc_lock); 653 mutex_exit(proc_lock);
654 *pid = 0; 654 *pid = 0;
655 return error; 655 return error;
656 } 656 }
657 *pid = child->p_pid; 657 *pid = child->p_pid;
658 658
659 if (child->p_stat == SZOMB) { 659 if (child->p_stat == SZOMB) {
660 /* proc_free() will release the proc_lock. */ 660 /* proc_free() will release the proc_lock. */
661 if (options & WNOWAIT) { 661 if (options & WNOWAIT) {
662 mutex_exit(proc_lock); 662 mutex_exit(proc_lock);
663 } else { 663 } else {
664 proc_free(child, ru); 664 proc_free(child, ru);
665 } 665 }
666 } else { 666 } else {
667 /* Child state must have been SSTOP. */ 667 /* Child state must have been SSTOP. */
668 mutex_exit(proc_lock); 668 mutex_exit(proc_lock);
669 *status = W_STOPCODE(*status); 669 *status = W_STOPCODE(*status);
670 } 670 }
671 return 0; 671 return 0;
672} 672}
673 673
674int 674int
675sys___wait450(struct lwp *l, const struct sys___wait450_args *uap, 675sys___wait450(struct lwp *l, const struct sys___wait450_args *uap,
676 register_t *retval) 676 register_t *retval)
677{ 677{
678 /* { 678 /* {
679 syscallarg(int) pid; 679 syscallarg(int) pid;
680 syscallarg(int *) status; 680 syscallarg(int *) status;
681 syscallarg(int) options; 681 syscallarg(int) options;
682 syscallarg(struct rusage *) rusage; 682 syscallarg(struct rusage *) rusage;
683 } */ 683 } */
684 int error, status, pid = SCARG(uap, pid); 684 int error, status, pid = SCARG(uap, pid);
685 struct rusage ru; 685 struct rusage ru;
686 686
687 error = do_sys_wait(&pid, &status, SCARG(uap, options), 687 error = do_sys_wait(&pid, &status, SCARG(uap, options),
688 SCARG(uap, rusage) != NULL ? &ru : NULL); 688 SCARG(uap, rusage) != NULL ? &ru : NULL);
689 689
690 retval[0] = pid; 690 retval[0] = pid;
691 if (pid == 0) { 691 if (pid == 0) {
692 return error; 692 return error;
693 } 693 }
694 if (SCARG(uap, status)) { 694 if (SCARG(uap, status)) {
695 error = copyout(&status, SCARG(uap, status), sizeof(status)); 695 error = copyout(&status, SCARG(uap, status), sizeof(status));
696 } 696 }
697 if (SCARG(uap, rusage) && error == 0) { 697 if (SCARG(uap, rusage) && error == 0) {
698 error = copyout(&ru, SCARG(uap, rusage), sizeof(ru)); 698 error = copyout(&ru, SCARG(uap, rusage), sizeof(ru));
699 } 699 }
700 return error; 700 return error;
701} 701}
702 702
703/* 703/*
704 * Scan list of child processes for a child process that has stopped or 704 * Scan list of child processes for a child process that has stopped or
705 * exited. Used by sys_wait4 and 'compat' equivalents. 705 * exited. Used by sys_wait4 and 'compat' equivalents.
706 * 706 *
707 * Must be called with the proc_lock held, and may release while waiting. 707 * Must be called with the proc_lock held, and may release while waiting.
708 */ 708 */
709static int 709static int
710find_stopped_child(struct proc *parent, pid_t pid, int options, 710find_stopped_child(struct proc *parent, pid_t pid, int options,
711 struct proc **child_p, int *status_p) 711 struct proc **child_p, int *status_p)
712{ 712{
713 struct proc *child, *dead; 713 struct proc *child, *dead;
714 int error; 714 int error;
715 715
716 KASSERT(mutex_owned(proc_lock)); 716 KASSERT(mutex_owned(proc_lock));
717 717
718 if (options & ~(WUNTRACED|WNOHANG|WALTSIG|WALLSIG) 718 if (options & ~(WUNTRACED|WNOHANG|WALTSIG|WALLSIG)
719 && !(options & WOPTSCHECKED)) { 719 && !(options & WOPTSCHECKED)) {
720 *child_p = NULL; 720 *child_p = NULL;
721 return EINVAL; 721 return EINVAL;
722 } 722 }
723 723
724 if (pid == 0 && !(options & WOPTSCHECKED)) 724 if (pid == 0 && !(options & WOPTSCHECKED))
725 pid = -parent->p_pgid; 725 pid = -parent->p_pgid;
726 726
727 for (;;) { 727 for (;;) {
728 error = ECHILD; 728 error = ECHILD;
729 dead = NULL; 729 dead = NULL;
730 730
731 LIST_FOREACH(child, &parent->p_children, p_sibling) { 731 LIST_FOREACH(child, &parent->p_children, p_sibling) {
732 if (pid >= 0) { 732 if (pid >= 0) {
733 if (child->p_pid != pid) { 733 if (child->p_pid != pid) {
734 child = proc_find_raw(pid); 734 child = proc_find_raw(pid);
735 if (child == NULL || 735 if (child == NULL ||
736 child->p_stat == SIDL || 736 child->p_stat == SIDL ||
737 child->p_pptr != parent) { 737 child->p_pptr != parent) {
738 child = NULL; 738 child = NULL;
739 break; 739 break;
740 } 740 }
741 } 741 }
742 } else if (pid != WAIT_ANY && child->p_pgid != -pid) { 742 } else if (pid != WAIT_ANY && child->p_pgid != -pid) {
743 /* Child not in correct pgrp */ 743 /* Child not in correct pgrp */
744 continue; 744 continue;
745 } 745 }
746 746
747 /* 747 /*
748 * Wait for processes with p_exitsig != SIGCHLD 748 * Wait for processes with p_exitsig != SIGCHLD
749 * processes only if WALTSIG is set; wait for 749 * processes only if WALTSIG is set; wait for
750 * processes with p_exitsig == SIGCHLD only 750 * processes with p_exitsig == SIGCHLD only
751 * if WALTSIG is clear. 751 * if WALTSIG is clear.
752 */ 752 */
753 if (((options & WALLSIG) == 0) && 753 if (((options & WALLSIG) == 0) &&
754 (options & WALTSIG ? child->p_exitsig == SIGCHLD 754 (options & WALTSIG ? child->p_exitsig == SIGCHLD
755 : P_EXITSIG(child) != SIGCHLD)){ 755 : P_EXITSIG(child) != SIGCHLD)){
756 if (child->p_pid == pid) { 756 if (child->p_pid == pid) {
757 child = NULL; 757 child = NULL;
758 break; 758 break;
759 } 759 }
760 continue; 760 continue;
761 } 761 }
762 762
763 error = 0; 763 error = 0;
764 if ((options & WNOZOMBIE) == 0) { 764 if ((options & WNOZOMBIE) == 0) {
765 if (child->p_stat == SZOMB) 765 if (child->p_stat == SZOMB)
766 break; 766 break;
767 if (child->p_stat == SDEAD) { 767 if (child->p_stat == SDEAD) {
768 /* 768 /*
769 * We may occasionally arrive here 769 * We may occasionally arrive here
770 * after receiving a signal, but 770 * after receiving a signal, but
771 * immediately before the child 771 * immediately before the child
772 * process is zombified. The wait 772 * process is zombified. The wait
773 * will be short, so avoid returning 773 * will be short, so avoid returning
774 * to userspace. 774 * to userspace.
775 */ 775 */
776 dead = child; 776 dead = child;
777 } 777 }
778 } 778 }
779 779
780 if (child->p_stat == SSTOP && 780 if (child->p_stat == SSTOP &&
781 child->p_waited == 0 && 781 child->p_waited == 0 &&
782 (child->p_slflag & PSL_TRACED || 782 (child->p_slflag & PSL_TRACED ||
783 options & WUNTRACED)) { 783 options & WUNTRACED)) {
784 if ((options & WNOWAIT) == 0) { 784 if ((options & WNOWAIT) == 0) {
785 child->p_waited = 1; 785 child->p_waited = 1;
786 parent->p_nstopchild--; 786 parent->p_nstopchild--;
787 } 787 }
788 break; 788 break;
789 } 789 }
790 if (parent->p_nstopchild == 0 || child->p_pid == pid) { 790 if (parent->p_nstopchild == 0 || child->p_pid == pid) {
791 child = NULL; 791 child = NULL;
792 break; 792 break;
793 } 793 }
794 } 794 }
795 795
796 if (child != NULL || error != 0 || 796 if (child != NULL || error != 0 ||
797 ((options & WNOHANG) != 0 && dead == NULL)) { 797 ((options & WNOHANG) != 0 && dead == NULL)) {
798 if (child != NULL) { 798 if (child != NULL) {
799 *status_p = child->p_xstat; 799 *status_p = child->p_xstat;
800 } 800 }
801 *child_p = child; 801 *child_p = child;
802 return error; 802 return error;
803 } 803 }
804 804
805 /* 805 /*
806 * Wait for another child process to stop. 806 * Wait for another child process to stop.
807 */ 807 */
808 error = cv_wait_sig(&parent->p_waitcv, proc_lock); 808 error = cv_wait_sig(&parent->p_waitcv, proc_lock);
809 809
810 if (error != 0) { 810 if (error != 0) {
811 *child_p = NULL; 811 *child_p = NULL;
812 return error; 812 return error;
813 } 813 }
814 } 814 }
815} 815}
816 816
817/* 817/*
818 * Free a process after parent has taken all the state info. Must be called 818 * Free a process after parent has taken all the state info. Must be called
819 * with the proclist lock held, and will release before returning. 819 * with the proclist lock held, and will release before returning.
820 * 820 *
821 * *ru is returned to the caller, and must be freed by the caller. 821 * *ru is returned to the caller, and must be freed by the caller.
822 */ 822 */
823static void 823static void
824proc_free(struct proc *p, struct rusage *ru) 824proc_free(struct proc *p, struct rusage *ru)
825{ 825{
826 struct proc *parent = p->p_pptr; 826 struct proc *parent = p->p_pptr;
827 struct lwp *l; 827 struct lwp *l;
828 ksiginfo_t ksi; 828 ksiginfo_t ksi;
829 kauth_cred_t cred1, cred2; 829 kauth_cred_t cred1, cred2;
830 uid_t uid; 830 uid_t uid;
831 831
832 KASSERT(mutex_owned(proc_lock)); 832 KASSERT(mutex_owned(proc_lock));
833 KASSERT(p->p_nlwps == 1); 833 KASSERT(p->p_nlwps == 1);
834 KASSERT(p->p_nzlwps == 1); 834 KASSERT(p->p_nzlwps == 1);
835 KASSERT(p->p_nrlwps == 0); 835 KASSERT(p->p_nrlwps == 0);
836 KASSERT(p->p_stat == SZOMB); 836 KASSERT(p->p_stat == SZOMB);
837 837
838 /* 838 /*
839 * If we got the child via ptrace(2) or procfs, and 839 * If we got the child via ptrace(2) or procfs, and
840 * the parent is different (meaning the process was 840 * the parent is different (meaning the process was
841 * attached, rather than run as a child), then we need 841 * attached, rather than run as a child), then we need
842 * to give it back to the old parent, and send the 842 * to give it back to the old parent, and send the
843 * parent the exit signal. The rest of the cleanup 843 * parent the exit signal. The rest of the cleanup
844 * will be done when the old parent waits on the child. 844 * will be done when the old parent waits on the child.
845 */ 845 */
846 if ((p->p_slflag & PSL_TRACED) != 0 && p->p_opptr != parent) { 846 if ((p->p_slflag & PSL_TRACED) != 0 && p->p_opptr != parent) {
847 mutex_enter(p->p_lock); 847 mutex_enter(p->p_lock);
848 p->p_slflag &= ~(PSL_TRACED|PSL_FSTRACE|PSL_SYSCALL); 848 p->p_slflag &= ~(PSL_TRACED|PSL_FSTRACE|PSL_SYSCALL);
849 mutex_exit(p->p_lock); 849 mutex_exit(p->p_lock);
850 parent = (p->p_opptr == NULL) ? initproc : p->p_opptr; 850 parent = (p->p_opptr == NULL) ? initproc : p->p_opptr;
851 proc_reparent(p, parent); 851 proc_reparent(p, parent);
852 p->p_opptr = NULL; 852 p->p_opptr = NULL;
853 if (p->p_exitsig != 0) { 853 if (p->p_exitsig != 0) {
854 exit_psignal(p, parent, &ksi); 854 exit_psignal(p, parent, &ksi);
855 kpsignal(parent, &ksi, NULL); 855 kpsignal(parent, &ksi, NULL);
856 } 856 }
857 cv_broadcast(&parent->p_waitcv); 857 cv_broadcast(&parent->p_waitcv);
858 mutex_exit(proc_lock); 858 mutex_exit(proc_lock);
859 return; 859 return;
860 } 860 }
861 861
862 sched_proc_exit(parent, p); 862 sched_proc_exit(parent, p);
863 863
864 /* 864 /*
865 * Add child times of exiting process onto its own times. 865 * Add child times of exiting process onto its own times.
866 * This cannot be done any earlier else it might get done twice. 866 * This cannot be done any earlier else it might get done twice.
867 */ 867 */
868 l = LIST_FIRST(&p->p_lwps); 868 l = LIST_FIRST(&p->p_lwps);
869 p->p_stats->p_ru.ru_nvcsw += (l->l_ncsw - l->l_nivcsw); 869 p->p_stats->p_ru.ru_nvcsw += (l->l_ncsw - l->l_nivcsw);
870 p->p_stats->p_ru.ru_nivcsw += l->l_nivcsw; 870 p->p_stats->p_ru.ru_nivcsw += l->l_nivcsw;
871 ruadd(&p->p_stats->p_ru, &l->l_ru); 871 ruadd(&p->p_stats->p_ru, &l->l_ru);
872 ruadd(&p->p_stats->p_ru, &p->p_stats->p_cru); 872 ruadd(&p->p_stats->p_ru, &p->p_stats->p_cru);
873 ruadd(&parent->p_stats->p_cru, &p->p_stats->p_ru); 873 ruadd(&parent->p_stats->p_cru, &p->p_stats->p_ru);
874 if (ru != NULL) 874 if (ru != NULL)
875 *ru = p->p_stats->p_ru; 875 *ru = p->p_stats->p_ru;
876 p->p_xstat = 0; 876 p->p_xstat = 0;
877 877
878 /* 878 /*
879 * At this point we are going to start freeing the final resources.  879 * At this point we are going to start freeing the final resources.
880 * If anyone tries to access the proc structure after here they will 880 * If anyone tries to access the proc structure after here they will
881 * get a shock - bits are missing. Attempt to make it hard! We 881 * get a shock - bits are missing. Attempt to make it hard! We
882 * don't bother with any further locking past this point. 882 * don't bother with any further locking past this point.
883 */ 883 */
884 p->p_stat = SIDL; /* not even a zombie any more */ 884 p->p_stat = SIDL; /* not even a zombie any more */
885 LIST_REMOVE(p, p_list); /* off zombproc */ 885 LIST_REMOVE(p, p_list); /* off zombproc */
886 parent->p_nstopchild--; 886 parent->p_nstopchild--;
887 LIST_REMOVE(p, p_sibling); 887 LIST_REMOVE(p, p_sibling);
888 888
889 /* 889 /*
890 * Let pid be reallocated. 890 * Let pid be reallocated.
891 */ 891 */
892 proc_free_pid(p->p_pid); 892 proc_free_pid(p->p_pid);
893 893
894 /* 894 /*
895 * Unlink process from its process group. 895 * Unlink process from its process group.
896 * Releases the proc_lock. 896 * Releases the proc_lock.
897 */ 897 */
898 proc_leavepgrp(p); 898 proc_leavepgrp(p);
899 899
900 /* 900 /*
901 * Delay release until after lwp_free. 901 * Delay release until after lwp_free.
902 */ 902 */
903 cred2 = l->l_cred; 903 cred2 = l->l_cred;
904 904
905 /* 905 /*
906 * Free the last LWP's resources. 906 * Free the last LWP's resources.
907 * 907 *
908 * lwp_free ensures the LWP is no longer running on another CPU. 908 * lwp_free ensures the LWP is no longer running on another CPU.
909 */ 909 */
910 lwp_free(l, false, true); 910 lwp_free(l, false, true);
911 911
912 /* 912 /*
913 * Now no one except us can reach the process p. 913 * Now no one except us can reach the process p.
914 */ 914 */
915 915
916 /* 916 /*
917 * Decrement the count of procs running with this uid. 917 * Decrement the count of procs running with this uid.
918 */ 918 */
919 cred1 = p->p_cred; 919 cred1 = p->p_cred;
920 uid = kauth_cred_getuid(cred1); 920 uid = kauth_cred_getuid(cred1);
921 (void)chgproccnt(uid, -1); 921 (void)chgproccnt(uid, -1);
922 922
923 /* 923 /*
924 * Release substructures. 924 * Release substructures.
925 */ 925 */
926 926
927 lim_free(p->p_limit); 927 lim_free(p->p_limit);
928 pstatsfree(p->p_stats); 928 pstatsfree(p->p_stats);
929 kauth_cred_free(cred1); 929 kauth_cred_free(cred1);
930 kauth_cred_free(cred2); 930 kauth_cred_free(cred2);
931 931
932 /* 932 /*
933 * Release reference to text vnode 933 * Release reference to text vnode
934 */ 934 */
935 if (p->p_textvp) 935 if (p->p_textvp)
936 vrele(p->p_textvp); 936 vrele(p->p_textvp);
937 937
938 mutex_destroy(&p->p_auxlock); 938 mutex_destroy(&p->p_auxlock);
939 mutex_obj_free(p->p_lock); 939 mutex_obj_free(p->p_lock);
940 mutex_destroy(&p->p_stmutex); 940 mutex_destroy(&p->p_stmutex);
941 cv_destroy(&p->p_waitcv); 941 cv_destroy(&p->p_waitcv);
942 cv_destroy(&p->p_lwpcv); 942 cv_destroy(&p->p_lwpcv);
943 rw_destroy(&p->p_reflock); 943 rw_destroy(&p->p_reflock);
944 944
945 proc_free_mem(p); 945 proc_free_mem(p);
946} 946}
947 947
948/* 948/*
949 * make process 'parent' the new parent of process 'child'. 949 * make process 'parent' the new parent of process 'child'.
950 * 950 *
951 * Must be called with proc_lock held. 951 * Must be called with proc_lock held.
952 */ 952 */
953void 953void
954proc_reparent(struct proc *child, struct proc *parent) 954proc_reparent(struct proc *child, struct proc *parent)
955{ 955{
956 956
957 KASSERT(mutex_owned(proc_lock)); 957 KASSERT(mutex_owned(proc_lock));
958 958
959 if (child->p_pptr == parent) 959 if (child->p_pptr == parent)
960 return; 960 return;
961 961
962 if (child->p_stat == SZOMB || 962 if (child->p_stat == SZOMB || child->p_stat == SDEAD ||
963 (child->p_stat == SSTOP && !child->p_waited)) { 963 (child->p_stat == SSTOP && !child->p_waited)) {
964 child->p_pptr->p_nstopchild--; 964 child->p_pptr->p_nstopchild--;
965 parent->p_nstopchild++; 965 parent->p_nstopchild++;
966 } 966 }
967 if (parent == initproc) 967 if (parent == initproc)
968 child->p_exitsig = SIGCHLD; 968 child->p_exitsig = SIGCHLD;
969 969
970 LIST_REMOVE(child, p_sibling); 970 LIST_REMOVE(child, p_sibling);
971 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling); 971 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
972 child->p_pptr = parent; 972 child->p_pptr = parent;
973 child->p_ppid = parent->p_pid; 973 child->p_ppid = parent->p_pid;
974} 974}