Thu Jan 30 12:36:38 2020 UTC ()
Update comments


(ad)
diff -r1.74 -r1.75 src/sys/kern/sys_lwp.c

cvs diff -r1.74 -r1.75 src/sys/kern/sys_lwp.c (switch to unified diff)

--- src/sys/kern/sys_lwp.c 2020/01/29 15:47:52 1.74
+++ src/sys/kern/sys_lwp.c 2020/01/30 12:36:38 1.75
@@ -1,754 +1,760 @@ @@ -1,754 +1,760 @@
1/* $NetBSD: sys_lwp.c,v 1.74 2020/01/29 15:47:52 ad Exp $ */ 1/* $NetBSD: sys_lwp.c,v 1.75 2020/01/30 12:36:38 ad Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2001, 2006, 2007, 2008, 2019, 2020 The NetBSD Foundation, Inc. 4 * Copyright (c) 2001, 2006, 2007, 2008, 2019, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, and Andrew Doran. 8 * by Nathan J. Williams, and Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Lightweight process (LWP) system calls. See kern_lwp.c for a description 33 * Lightweight process (LWP) system calls. See kern_lwp.c for a description
34 * of LWPs. 34 * of LWPs.
35 */ 35 */
36 36
37#include <sys/cdefs.h> 37#include <sys/cdefs.h>
38__KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.74 2020/01/29 15:47:52 ad Exp $"); 38__KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.75 2020/01/30 12:36:38 ad Exp $");
39 39
40#include <sys/param.h> 40#include <sys/param.h>
41#include <sys/systm.h> 41#include <sys/systm.h>
42#include <sys/pool.h> 42#include <sys/pool.h>
43#include <sys/proc.h> 43#include <sys/proc.h>
44#include <sys/types.h> 44#include <sys/types.h>
45#include <sys/syscallargs.h> 45#include <sys/syscallargs.h>
46#include <sys/kauth.h> 46#include <sys/kauth.h>
47#include <sys/kmem.h> 47#include <sys/kmem.h>
48#include <sys/ptrace.h> 48#include <sys/ptrace.h>
49#include <sys/sleepq.h> 49#include <sys/sleepq.h>
50#include <sys/lwpctl.h> 50#include <sys/lwpctl.h>
51#include <sys/cpu.h> 51#include <sys/cpu.h>
52 52
53#include <uvm/uvm_extern.h> 53#include <uvm/uvm_extern.h>
54 54
55#define LWP_UNPARK_MAX 1024 55#define LWP_UNPARK_MAX 1024
56 56
57static const stack_t lwp_ss_init = SS_INIT; 57static const stack_t lwp_ss_init = SS_INIT;
58 58
59syncobj_t lwp_park_syncobj = { 59syncobj_t lwp_park_syncobj = {
60 .sobj_flag = SOBJ_SLEEPQ_NULL, 60 .sobj_flag = SOBJ_SLEEPQ_NULL,
61 .sobj_unsleep = sleepq_unsleep, 61 .sobj_unsleep = sleepq_unsleep,
62 .sobj_changepri = sleepq_changepri, 62 .sobj_changepri = sleepq_changepri,
63 .sobj_lendpri = sleepq_lendpri, 63 .sobj_lendpri = sleepq_lendpri,
64 .sobj_owner = syncobj_noowner, 64 .sobj_owner = syncobj_noowner,
65}; 65};
66 66
67static void 67static void
68mi_startlwp(void *arg) 68mi_startlwp(void *arg)
69{ 69{
70 struct lwp *l = curlwp; 70 struct lwp *l = curlwp;
71 struct proc *p = l->l_proc; 71 struct proc *p = l->l_proc;
72 72
73 (p->p_emul->e_startlwp)(arg); 73 (p->p_emul->e_startlwp)(arg);
74 74
75 /* If the process is traced, report lwp creation to a debugger */ 75 /* If the process is traced, report lwp creation to a debugger */
76 if ((p->p_slflag & (PSL_TRACED|PSL_TRACELWP_CREATE)) == 76 if ((p->p_slflag & (PSL_TRACED|PSL_TRACELWP_CREATE)) ==
77 (PSL_TRACED|PSL_TRACELWP_CREATE)) { 77 (PSL_TRACED|PSL_TRACELWP_CREATE)) {
78 /* Paranoid check */ 78 /* Paranoid check */
79 mutex_enter(proc_lock); 79 mutex_enter(proc_lock);
80 if ((p->p_slflag & (PSL_TRACED|PSL_TRACELWP_CREATE)) != 80 if ((p->p_slflag & (PSL_TRACED|PSL_TRACELWP_CREATE)) !=
81 (PSL_TRACED|PSL_TRACELWP_CREATE)) {  81 (PSL_TRACED|PSL_TRACELWP_CREATE)) {
82 mutex_exit(proc_lock); 82 mutex_exit(proc_lock);
83 return; 83 return;
84 } 84 }
85 85
86 mutex_enter(p->p_lock); 86 mutex_enter(p->p_lock);
87 eventswitch(TRAP_LWP, PTRACE_LWP_CREATE, l->l_lid); 87 eventswitch(TRAP_LWP, PTRACE_LWP_CREATE, l->l_lid);
88 } 88 }
89} 89}
90 90
91int 91int
92do_lwp_create(lwp_t *l, void *arg, u_long flags, lwp_t **l2, 92do_lwp_create(lwp_t *l, void *arg, u_long flags, lwp_t **l2,
93 const sigset_t *sigmask, const stack_t *sigstk) 93 const sigset_t *sigmask, const stack_t *sigstk)
94{ 94{
95 struct proc *p = l->l_proc; 95 struct proc *p = l->l_proc;
96 vaddr_t uaddr; 96 vaddr_t uaddr;
97 int error; 97 int error;
98 98
99 /* XXX check against resource limits */ 99 /* XXX check against resource limits */
100 100
101 uaddr = uvm_uarea_alloc(); 101 uaddr = uvm_uarea_alloc();
102 if (__predict_false(uaddr == 0)) 102 if (__predict_false(uaddr == 0))
103 return ENOMEM; 103 return ENOMEM;
104 104
105 error = lwp_create(l, p, uaddr, flags & LWP_DETACHED, NULL, 0, 105 error = lwp_create(l, p, uaddr, flags & LWP_DETACHED, NULL, 0,
106 mi_startlwp, arg, l2, l->l_class, sigmask, &lwp_ss_init); 106 mi_startlwp, arg, l2, l->l_class, sigmask, &lwp_ss_init);
107 if (__predict_false(error)) { 107 if (__predict_false(error)) {
108 uvm_uarea_free(uaddr); 108 uvm_uarea_free(uaddr);
109 return error; 109 return error;
110 } 110 }
111 111
112 return 0; 112 return 0;
113} 113}
114 114
115int 115int
116sys__lwp_create(struct lwp *l, const struct sys__lwp_create_args *uap, 116sys__lwp_create(struct lwp *l, const struct sys__lwp_create_args *uap,
117 register_t *retval) 117 register_t *retval)
118{ 118{
119 /* { 119 /* {
120 syscallarg(const ucontext_t *) ucp; 120 syscallarg(const ucontext_t *) ucp;
121 syscallarg(u_long) flags; 121 syscallarg(u_long) flags;
122 syscallarg(lwpid_t *) new_lwp; 122 syscallarg(lwpid_t *) new_lwp;
123 } */ 123 } */
124 struct proc *p = l->l_proc; 124 struct proc *p = l->l_proc;
125 ucontext_t *newuc; 125 ucontext_t *newuc;
126 lwp_t *l2; 126 lwp_t *l2;
127 int error; 127 int error;
128 128
129 newuc = kmem_alloc(sizeof(ucontext_t), KM_SLEEP); 129 newuc = kmem_alloc(sizeof(ucontext_t), KM_SLEEP);
130 error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize); 130 error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize);
131 if (error) 131 if (error)
132 goto fail; 132 goto fail;
133 133
134 /* validate the ucontext */ 134 /* validate the ucontext */
135 if ((newuc->uc_flags & _UC_CPU) == 0) { 135 if ((newuc->uc_flags & _UC_CPU) == 0) {
136 error = EINVAL; 136 error = EINVAL;
137 goto fail; 137 goto fail;
138 } 138 }
139 error = cpu_mcontext_validate(l, &newuc->uc_mcontext); 139 error = cpu_mcontext_validate(l, &newuc->uc_mcontext);
140 if (error) 140 if (error)
141 goto fail; 141 goto fail;
142 142
143 const sigset_t *sigmask = newuc->uc_flags & _UC_SIGMASK ? 143 const sigset_t *sigmask = newuc->uc_flags & _UC_SIGMASK ?
144 &newuc->uc_sigmask : &l->l_sigmask; 144 &newuc->uc_sigmask : &l->l_sigmask;
145 error = do_lwp_create(l, newuc, SCARG(uap, flags), &l2, sigmask, 145 error = do_lwp_create(l, newuc, SCARG(uap, flags), &l2, sigmask,
146 &SS_INIT); 146 &SS_INIT);
147 if (error) 147 if (error)
148 goto fail; 148 goto fail;
149 149
150 error = copyout(&l2->l_lid, SCARG(uap, new_lwp), sizeof(l2->l_lid)); 150 error = copyout(&l2->l_lid, SCARG(uap, new_lwp), sizeof(l2->l_lid));
151 if (error == 0) { 151 if (error == 0) {
152 lwp_start(l2, SCARG(uap, flags)); 152 lwp_start(l2, SCARG(uap, flags));
153 return 0; 153 return 0;
154 } 154 }
155 lwp_exit(l2); 155 lwp_exit(l2);
156 fail: 156 fail:
157 kmem_free(newuc, sizeof(ucontext_t)); 157 kmem_free(newuc, sizeof(ucontext_t));
158 return error; 158 return error;
159} 159}
160 160
161int 161int
162sys__lwp_exit(struct lwp *l, const void *v, register_t *retval) 162sys__lwp_exit(struct lwp *l, const void *v, register_t *retval)
163{ 163{
164 164
165 lwp_exit(l); 165 lwp_exit(l);
166 return 0; 166 return 0;
167} 167}
168 168
169int 169int
170sys__lwp_self(struct lwp *l, const void *v, register_t *retval) 170sys__lwp_self(struct lwp *l, const void *v, register_t *retval)
171{ 171{
172 172
173 *retval = l->l_lid; 173 *retval = l->l_lid;
174 return 0; 174 return 0;
175} 175}
176 176
177int 177int
178sys__lwp_getprivate(struct lwp *l, const void *v, register_t *retval) 178sys__lwp_getprivate(struct lwp *l, const void *v, register_t *retval)
179{ 179{
180 180
181 *retval = (uintptr_t)l->l_private; 181 *retval = (uintptr_t)l->l_private;
182 return 0; 182 return 0;
183} 183}
184 184
185int 185int
186sys__lwp_setprivate(struct lwp *l, const struct sys__lwp_setprivate_args *uap, 186sys__lwp_setprivate(struct lwp *l, const struct sys__lwp_setprivate_args *uap,
187 register_t *retval) 187 register_t *retval)
188{ 188{
189 /* { 189 /* {
190 syscallarg(void *) ptr; 190 syscallarg(void *) ptr;
191 } */ 191 } */
192 192
193 return lwp_setprivate(l, SCARG(uap, ptr)); 193 return lwp_setprivate(l, SCARG(uap, ptr));
194} 194}
195 195
196int 196int
197sys__lwp_suspend(struct lwp *l, const struct sys__lwp_suspend_args *uap, 197sys__lwp_suspend(struct lwp *l, const struct sys__lwp_suspend_args *uap,
198 register_t *retval) 198 register_t *retval)
199{ 199{
200 /* { 200 /* {
201 syscallarg(lwpid_t) target; 201 syscallarg(lwpid_t) target;
202 } */ 202 } */
203 struct proc *p = l->l_proc; 203 struct proc *p = l->l_proc;
204 struct lwp *t; 204 struct lwp *t;
205 int error; 205 int error;
206 206
207 mutex_enter(p->p_lock); 207 mutex_enter(p->p_lock);
208 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 208 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
209 mutex_exit(p->p_lock); 209 mutex_exit(p->p_lock);
210 return ESRCH; 210 return ESRCH;
211 } 211 }
212 212
213 /* 213 /*
214 * Check for deadlock, which is only possible when we're suspending 214 * Check for deadlock, which is only possible when we're suspending
215 * ourself. XXX There is a short race here, as p_nrlwps is only 215 * ourself. XXX There is a short race here, as p_nrlwps is only
216 * incremented when an LWP suspends itself on the kernel/user 216 * incremented when an LWP suspends itself on the kernel/user
217 * boundary. It's still possible to kill -9 the process so we 217 * boundary. It's still possible to kill -9 the process so we
218 * don't bother checking further. 218 * don't bother checking further.
219 */ 219 */
220 lwp_lock(t); 220 lwp_lock(t);
221 if ((t == l && p->p_nrlwps == 1) || 221 if ((t == l && p->p_nrlwps == 1) ||
222 (l->l_flag & (LW_WCORE | LW_WEXIT)) != 0) { 222 (l->l_flag & (LW_WCORE | LW_WEXIT)) != 0) {
223 lwp_unlock(t); 223 lwp_unlock(t);
224 mutex_exit(p->p_lock); 224 mutex_exit(p->p_lock);
225 return EDEADLK; 225 return EDEADLK;
226 } 226 }
227 227
228 /* 228 /*
229 * Suspend the LWP. XXX If it's on a different CPU, we should wait 229 * Suspend the LWP. XXX If it's on a different CPU, we should wait
230 * for it to be preempted, where it will put itself to sleep.  230 * for it to be preempted, where it will put itself to sleep.
231 * 231 *
232 * Suspension of the current LWP will happen on return to userspace. 232 * Suspension of the current LWP will happen on return to userspace.
233 */ 233 */
234 error = lwp_suspend(l, t); 234 error = lwp_suspend(l, t);
235 if (error) { 235 if (error) {
236 mutex_exit(p->p_lock); 236 mutex_exit(p->p_lock);
237 return error; 237 return error;
238 } 238 }
239 239
240 /* 240 /*
241 * Wait for: 241 * Wait for:
242 * o process exiting 242 * o process exiting
243 * o target LWP suspended 243 * o target LWP suspended
244 * o target LWP not suspended and L_WSUSPEND clear 244 * o target LWP not suspended and L_WSUSPEND clear
245 * o target LWP exited 245 * o target LWP exited
246 */ 246 */
247 for (;;) { 247 for (;;) {
248 error = cv_wait_sig(&p->p_lwpcv, p->p_lock); 248 error = cv_wait_sig(&p->p_lwpcv, p->p_lock);
249 if (error) { 249 if (error) {
250 error = ERESTART; 250 error = ERESTART;
251 break; 251 break;
252 } 252 }
253 if (lwp_find(p, SCARG(uap, target)) == NULL) { 253 if (lwp_find(p, SCARG(uap, target)) == NULL) {
254 error = ESRCH; 254 error = ESRCH;
255 break; 255 break;
256 } 256 }
257 if ((l->l_flag | t->l_flag) & (LW_WCORE | LW_WEXIT)) { 257 if ((l->l_flag | t->l_flag) & (LW_WCORE | LW_WEXIT)) {
258 error = ERESTART; 258 error = ERESTART;
259 break; 259 break;
260 } 260 }
261 if (t->l_stat == LSSUSPENDED || 261 if (t->l_stat == LSSUSPENDED ||
262 (t->l_flag & LW_WSUSPEND) == 0) 262 (t->l_flag & LW_WSUSPEND) == 0)
263 break; 263 break;
264 } 264 }
265 mutex_exit(p->p_lock); 265 mutex_exit(p->p_lock);
266 266
267 return error; 267 return error;
268} 268}
269 269
270int 270int
271sys__lwp_continue(struct lwp *l, const struct sys__lwp_continue_args *uap, 271sys__lwp_continue(struct lwp *l, const struct sys__lwp_continue_args *uap,
272 register_t *retval) 272 register_t *retval)
273{ 273{
274 /* { 274 /* {
275 syscallarg(lwpid_t) target; 275 syscallarg(lwpid_t) target;
276 } */ 276 } */
277 int error; 277 int error;
278 struct proc *p = l->l_proc; 278 struct proc *p = l->l_proc;
279 struct lwp *t; 279 struct lwp *t;
280 280
281 error = 0; 281 error = 0;
282 282
283 mutex_enter(p->p_lock); 283 mutex_enter(p->p_lock);
284 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 284 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
285 mutex_exit(p->p_lock); 285 mutex_exit(p->p_lock);
286 return ESRCH; 286 return ESRCH;
287 } 287 }
288 288
289 lwp_lock(t); 289 lwp_lock(t);
290 lwp_continue(t); 290 lwp_continue(t);
291 mutex_exit(p->p_lock); 291 mutex_exit(p->p_lock);
292 292
293 return error; 293 return error;
294} 294}
295 295
296int 296int
297sys__lwp_wakeup(struct lwp *l, const struct sys__lwp_wakeup_args *uap, 297sys__lwp_wakeup(struct lwp *l, const struct sys__lwp_wakeup_args *uap,
298 register_t *retval) 298 register_t *retval)
299{ 299{
300 /* { 300 /* {
301 syscallarg(lwpid_t) target; 301 syscallarg(lwpid_t) target;
302 } */ 302 } */
303 struct lwp *t; 303 struct lwp *t;
304 struct proc *p; 304 struct proc *p;
305 int error; 305 int error;
306 306
307 p = l->l_proc; 307 p = l->l_proc;
308 mutex_enter(p->p_lock); 308 mutex_enter(p->p_lock);
309 309
310 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 310 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
311 mutex_exit(p->p_lock); 311 mutex_exit(p->p_lock);
312 return ESRCH; 312 return ESRCH;
313 } 313 }
314 314
315 lwp_lock(t); 315 lwp_lock(t);
316 t->l_flag |= (LW_CANCELLED | LW_UNPARKED); 316 t->l_flag |= (LW_CANCELLED | LW_UNPARKED);
317 317
318 if (t->l_stat != LSSLEEP) { 318 if (t->l_stat != LSSLEEP) {
319 lwp_unlock(t); 319 lwp_unlock(t);
320 error = ENODEV; 320 error = ENODEV;
321 } else if ((t->l_flag & LW_SINTR) == 0) { 321 } else if ((t->l_flag & LW_SINTR) == 0) {
322 lwp_unlock(t); 322 lwp_unlock(t);
323 error = EBUSY; 323 error = EBUSY;
324 } else { 324 } else {
325 /* Wake it up. lwp_unsleep() will release the LWP lock. */ 325 /* Wake it up. lwp_unsleep() will release the LWP lock. */
326 lwp_unsleep(t, true); 326 lwp_unsleep(t, true);
327 error = 0; 327 error = 0;
328 } 328 }
329 329
330 mutex_exit(p->p_lock); 330 mutex_exit(p->p_lock);
331 331
332 return error; 332 return error;
333} 333}
334 334
335int 335int
336sys__lwp_wait(struct lwp *l, const struct sys__lwp_wait_args *uap, 336sys__lwp_wait(struct lwp *l, const struct sys__lwp_wait_args *uap,
337 register_t *retval) 337 register_t *retval)
338{ 338{
339 /* { 339 /* {
340 syscallarg(lwpid_t) wait_for; 340 syscallarg(lwpid_t) wait_for;
341 syscallarg(lwpid_t *) departed; 341 syscallarg(lwpid_t *) departed;
342 } */ 342 } */
343 struct proc *p = l->l_proc; 343 struct proc *p = l->l_proc;
344 int error; 344 int error;
345 lwpid_t dep; 345 lwpid_t dep;
346 346
347 mutex_enter(p->p_lock); 347 mutex_enter(p->p_lock);
348 error = lwp_wait(l, SCARG(uap, wait_for), &dep, false); 348 error = lwp_wait(l, SCARG(uap, wait_for), &dep, false);
349 mutex_exit(p->p_lock); 349 mutex_exit(p->p_lock);
350 350
351 if (!error && SCARG(uap, departed)) { 351 if (!error && SCARG(uap, departed)) {
352 error = copyout(&dep, SCARG(uap, departed), sizeof(dep)); 352 error = copyout(&dep, SCARG(uap, departed), sizeof(dep));
353 } 353 }
354 354
355 return error; 355 return error;
356} 356}
357 357
358int 358int
359sys__lwp_kill(struct lwp *l, const struct sys__lwp_kill_args *uap, 359sys__lwp_kill(struct lwp *l, const struct sys__lwp_kill_args *uap,
360 register_t *retval) 360 register_t *retval)
361{ 361{
362 /* { 362 /* {
363 syscallarg(lwpid_t) target; 363 syscallarg(lwpid_t) target;
364 syscallarg(int) signo; 364 syscallarg(int) signo;
365 } */ 365 } */
366 struct proc *p = l->l_proc; 366 struct proc *p = l->l_proc;
367 struct lwp *t; 367 struct lwp *t;
368 ksiginfo_t ksi; 368 ksiginfo_t ksi;
369 int signo = SCARG(uap, signo); 369 int signo = SCARG(uap, signo);
370 int error = 0; 370 int error = 0;
371 371
372 if ((u_int)signo >= NSIG) 372 if ((u_int)signo >= NSIG)
373 return EINVAL; 373 return EINVAL;
374 374
375 KSI_INIT(&ksi); 375 KSI_INIT(&ksi);
376 ksi.ksi_signo = signo; 376 ksi.ksi_signo = signo;
377 ksi.ksi_code = SI_LWP; 377 ksi.ksi_code = SI_LWP;
378 ksi.ksi_pid = p->p_pid; 378 ksi.ksi_pid = p->p_pid;
379 ksi.ksi_uid = kauth_cred_geteuid(l->l_cred); 379 ksi.ksi_uid = kauth_cred_geteuid(l->l_cred);
380 ksi.ksi_lid = SCARG(uap, target); 380 ksi.ksi_lid = SCARG(uap, target);
381 381
382 mutex_enter(proc_lock); 382 mutex_enter(proc_lock);
383 mutex_enter(p->p_lock); 383 mutex_enter(p->p_lock);
384 if ((t = lwp_find(p, ksi.ksi_lid)) == NULL) 384 if ((t = lwp_find(p, ksi.ksi_lid)) == NULL)
385 error = ESRCH; 385 error = ESRCH;
386 else if (signo != 0) 386 else if (signo != 0)
387 kpsignal2(p, &ksi); 387 kpsignal2(p, &ksi);
388 mutex_exit(p->p_lock); 388 mutex_exit(p->p_lock);
389 mutex_exit(proc_lock); 389 mutex_exit(proc_lock);
390 390
391 return error; 391 return error;
392} 392}
393 393
394int 394int
395sys__lwp_detach(struct lwp *l, const struct sys__lwp_detach_args *uap, 395sys__lwp_detach(struct lwp *l, const struct sys__lwp_detach_args *uap,
396 register_t *retval) 396 register_t *retval)
397{ 397{
398 /* { 398 /* {
399 syscallarg(lwpid_t) target; 399 syscallarg(lwpid_t) target;
400 } */ 400 } */
401 struct proc *p; 401 struct proc *p;
402 struct lwp *t; 402 struct lwp *t;
403 lwpid_t target; 403 lwpid_t target;
404 int error; 404 int error;
405 405
406 target = SCARG(uap, target); 406 target = SCARG(uap, target);
407 p = l->l_proc; 407 p = l->l_proc;
408 408
409 mutex_enter(p->p_lock); 409 mutex_enter(p->p_lock);
410 410
411 if (l->l_lid == target) 411 if (l->l_lid == target)
412 t = l; 412 t = l;
413 else { 413 else {
414 /* 414 /*
415 * We can't use lwp_find() here because the target might 415 * We can't use lwp_find() here because the target might
416 * be a zombie. 416 * be a zombie.
417 */ 417 */
418 t = radix_tree_lookup_node(&p->p_lwptree, 418 t = radix_tree_lookup_node(&p->p_lwptree,
419 (uint64_t)(target - 1)); 419 (uint64_t)(target - 1));
420 KASSERT(t == NULL || t->l_lid == target); 420 KASSERT(t == NULL || t->l_lid == target);
421 } 421 }
422 422
423 /* 423 /*
424 * If the LWP is already detached, there's nothing to do. 424 * If the LWP is already detached, there's nothing to do.
425 * If it's a zombie, we need to clean up after it. LSZOMB 425 * If it's a zombie, we need to clean up after it. LSZOMB
426 * is visible with the proc mutex held. 426 * is visible with the proc mutex held.
427 * 427 *
428 * After we have detached or released the LWP, kick any 428 * After we have detached or released the LWP, kick any
429 * other LWPs that may be sitting in _lwp_wait(), waiting 429 * other LWPs that may be sitting in _lwp_wait(), waiting
430 * for the target LWP to exit. 430 * for the target LWP to exit.
431 */ 431 */
432 if (t != NULL && t->l_stat != LSIDL) { 432 if (t != NULL && t->l_stat != LSIDL) {
433 if ((t->l_prflag & LPR_DETACHED) == 0) { 433 if ((t->l_prflag & LPR_DETACHED) == 0) {
434 p->p_ndlwps++; 434 p->p_ndlwps++;
435 t->l_prflag |= LPR_DETACHED; 435 t->l_prflag |= LPR_DETACHED;
436 if (t->l_stat == LSZOMB) { 436 if (t->l_stat == LSZOMB) {
437 /* Releases proc mutex. */ 437 /* Releases proc mutex. */
438 lwp_free(t, false, false); 438 lwp_free(t, false, false);
439 return 0; 439 return 0;
440 } 440 }
441 error = 0; 441 error = 0;
442 442
443 /* 443 /*
444 * Have any LWPs sleeping in lwp_wait() recheck 444 * Have any LWPs sleeping in lwp_wait() recheck
445 * for deadlock. 445 * for deadlock.
446 */ 446 */
447 cv_broadcast(&p->p_lwpcv); 447 cv_broadcast(&p->p_lwpcv);
448 } else 448 } else
449 error = EINVAL; 449 error = EINVAL;
450 } else 450 } else
451 error = ESRCH; 451 error = ESRCH;
452 452
453 mutex_exit(p->p_lock); 453 mutex_exit(p->p_lock);
454 454
455 return error; 455 return error;
456} 456}
457 457
458int 458int
459lwp_unpark(const lwpid_t *tp, const u_int ntargets) 459lwp_unpark(const lwpid_t *tp, const u_int ntargets)
460{ 460{
461 uint64_t id; 461 uint64_t id;
462 u_int target; 462 u_int target;
463 int error; 463 int error;
464 proc_t *p; 464 proc_t *p;
465 lwp_t *t; 465 lwp_t *t;
466 466
467 p = curproc; 467 p = curproc;
468 error = 0; 468 error = 0;
469 469
470 rw_enter(&p->p_treelock, RW_READER); 470 rw_enter(&p->p_treelock, RW_READER);
471 for (target = 0; target < ntargets; target++) { 471 for (target = 0; target < ntargets; target++) {
472 /* 472 /*
473 * We don't bother excluding zombies or idle LWPs here, as 473 * We don't bother excluding zombies or idle LWPs here, as
474 * setting LW_UNPARKED on them won't do any harm. 474 * setting LW_UNPARKED on them won't do any harm.
475 */ 475 */
476 id = (uint64_t)(tp[target] - 1); 476 id = (uint64_t)(tp[target] - 1);
477 t = radix_tree_lookup_node(&p->p_lwptree, id); 477 t = radix_tree_lookup_node(&p->p_lwptree, id);
478 if (t == NULL) { 478 if (t == NULL) {
479 error = ESRCH; 479 error = ESRCH;
480 continue; 480 continue;
481 } 481 }
482 482
483 /* It may not have parked yet or we may have raced. */ 
484 lwp_lock(t); 483 lwp_lock(t);
485 if (t->l_syncobj == &lwp_park_syncobj) { 484 if (t->l_syncobj == &lwp_park_syncobj) {
486 /* Releases the LWP lock. */ 485 /*
 486 * As expected it's parked, so wake it up.
 487 * lwp_unsleep() will release the LWP lock.
 488 */
487 lwp_unsleep(t, true); 489 lwp_unsleep(t, true);
488 } else { 490 } else {
489 /* 491 /*
490 * Set the operation pending. The next call to 492 * It hasn't parked yet because the wakeup side won
491 * _lwp_park() will return early. 493 * the race, or something else has happened to make
 494 * the thread not park. Why doesn't really matter.
 495 * Set the operation pending, so that the next call
 496 * to _lwp_park() in the LWP returns early. If it
 497 * turns out to be a spurious wakeup, no harm done.
492 */ 498 */
493 t->l_flag |= LW_UNPARKED; 499 t->l_flag |= LW_UNPARKED;
494 lwp_unlock(t); 500 lwp_unlock(t);
495 } 501 }
496 } 502 }
497 rw_exit(&p->p_treelock); 503 rw_exit(&p->p_treelock);
498 504
499 return error; 505 return error;
500} 506}
501 507
502int 508int
503lwp_park(clockid_t clock_id, int flags, struct timespec *ts) 509lwp_park(clockid_t clock_id, int flags, struct timespec *ts)
504{ 510{
505 int timo, error; 511 int timo, error;
506 struct timespec start; 512 struct timespec start;
507 lwp_t *l; 513 lwp_t *l;
508 bool timeremain = !(flags & TIMER_ABSTIME) && ts; 514 bool timeremain = !(flags & TIMER_ABSTIME) && ts;
509 515
510 if (ts != NULL) { 516 if (ts != NULL) {
511 if ((error = ts2timo(clock_id, flags, ts, &timo,  517 if ((error = ts2timo(clock_id, flags, ts, &timo,
512 timeremain ? &start : NULL)) != 0) 518 timeremain ? &start : NULL)) != 0)
513 return error; 519 return error;
514 KASSERT(timo != 0); 520 KASSERT(timo != 0);
515 } else { 521 } else {
516 timo = 0; 522 timo = 0;
517 } 523 }
518 524
519 /* 525 /*
520 * Before going the full route and blocking, check to see if an 526 * Before going the full route and blocking, check to see if an
521 * unpark op is pending. 527 * unpark op is pending.
522 */ 528 */
523 l = curlwp; 529 l = curlwp;
524 lwp_lock(l); 530 lwp_lock(l);
525 if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) { 531 if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) {
526 l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED); 532 l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED);
527 lwp_unlock(l); 533 lwp_unlock(l);
528 return EALREADY; 534 return EALREADY;
529 } 535 }
530 l->l_biglocks = 0; 536 l->l_biglocks = 0;
531 sleepq_enqueue(NULL, l, "parked", &lwp_park_syncobj); 537 sleepq_enqueue(NULL, l, "parked", &lwp_park_syncobj);
532 error = sleepq_block(timo, true); 538 error = sleepq_block(timo, true);
533 switch (error) { 539 switch (error) {
534 case EWOULDBLOCK: 540 case EWOULDBLOCK:
535 error = ETIMEDOUT; 541 error = ETIMEDOUT;
536 if (timeremain) 542 if (timeremain)
537 memset(ts, 0, sizeof(*ts)); 543 memset(ts, 0, sizeof(*ts));
538 break; 544 break;
539 case ERESTART: 545 case ERESTART:
540 error = EINTR; 546 error = EINTR;
541 /*FALLTHROUGH*/ 547 /*FALLTHROUGH*/
542 default: 548 default:
543 if (timeremain) 549 if (timeremain)
544 clock_timeleft(clock_id, ts, &start); 550 clock_timeleft(clock_id, ts, &start);
545 break; 551 break;
546 } 552 }
547 return error; 553 return error;
548} 554}
549 555
550/* 556/*
551 * 'park' an LWP waiting on a user-level synchronisation object. The LWP 557 * 'park' an LWP waiting on a user-level synchronisation object. The LWP
552 * will remain parked until another LWP in the same process calls in and 558 * will remain parked until another LWP in the same process calls in and
553 * requests that it be unparked. 559 * requests that it be unparked.
554 */ 560 */
555int 561int
556sys____lwp_park60(struct lwp *l, const struct sys____lwp_park60_args *uap, 562sys____lwp_park60(struct lwp *l, const struct sys____lwp_park60_args *uap,
557 register_t *retval) 563 register_t *retval)
558{ 564{
559 /* { 565 /* {
560 syscallarg(clockid_t) clock_id; 566 syscallarg(clockid_t) clock_id;
561 syscallarg(int) flags; 567 syscallarg(int) flags;
562 syscallarg(struct timespec *) ts; 568 syscallarg(struct timespec *) ts;
563 syscallarg(lwpid_t) unpark; 569 syscallarg(lwpid_t) unpark;
564 syscallarg(const void *) hint; 570 syscallarg(const void *) hint;
565 syscallarg(const void *) unparkhint; 571 syscallarg(const void *) unparkhint;
566 } */ 572 } */
567 struct timespec ts, *tsp; 573 struct timespec ts, *tsp;
568 int error; 574 int error;
569 575
570 if (SCARG(uap, ts) == NULL) 576 if (SCARG(uap, ts) == NULL)
571 tsp = NULL; 577 tsp = NULL;
572 else { 578 else {
573 error = copyin(SCARG(uap, ts), &ts, sizeof(ts)); 579 error = copyin(SCARG(uap, ts), &ts, sizeof(ts));
574 if (error != 0) 580 if (error != 0)
575 return error; 581 return error;
576 tsp = &ts; 582 tsp = &ts;
577 } 583 }
578 584
579 if (SCARG(uap, unpark) != 0) { 585 if (SCARG(uap, unpark) != 0) {
580 error = lwp_unpark(&SCARG(uap, unpark), 1); 586 error = lwp_unpark(&SCARG(uap, unpark), 1);
581 if (error != 0) 587 if (error != 0)
582 return error; 588 return error;
583 } 589 }
584 590
585 error = lwp_park(SCARG(uap, clock_id), SCARG(uap, flags), tsp); 591 error = lwp_park(SCARG(uap, clock_id), SCARG(uap, flags), tsp);
586 if (SCARG(uap, ts) != NULL && (SCARG(uap, flags) & TIMER_ABSTIME) == 0) 592 if (SCARG(uap, ts) != NULL && (SCARG(uap, flags) & TIMER_ABSTIME) == 0)
587 (void)copyout(tsp, SCARG(uap, ts), sizeof(*tsp)); 593 (void)copyout(tsp, SCARG(uap, ts), sizeof(*tsp));
588 return error; 594 return error;
589} 595}
590 596
591int 597int
592sys__lwp_unpark(struct lwp *l, const struct sys__lwp_unpark_args *uap, 598sys__lwp_unpark(struct lwp *l, const struct sys__lwp_unpark_args *uap,
593 register_t *retval) 599 register_t *retval)
594{ 600{
595 /* { 601 /* {
596 syscallarg(lwpid_t) target; 602 syscallarg(lwpid_t) target;
597 syscallarg(const void *) hint; 603 syscallarg(const void *) hint;
598 } */ 604 } */
599 605
600 return lwp_unpark(&SCARG(uap, target), 1); 606 return lwp_unpark(&SCARG(uap, target), 1);
601} 607}
602 608
603int 609int
604sys__lwp_unpark_all(struct lwp *l, const struct sys__lwp_unpark_all_args *uap, 610sys__lwp_unpark_all(struct lwp *l, const struct sys__lwp_unpark_all_args *uap,
605 register_t *retval) 611 register_t *retval)
606{ 612{
607 /* { 613 /* {
608 syscallarg(const lwpid_t *) targets; 614 syscallarg(const lwpid_t *) targets;
609 syscallarg(size_t) ntargets; 615 syscallarg(size_t) ntargets;
610 syscallarg(const void *) hint; 616 syscallarg(const void *) hint;
611 } */ 617 } */
612 lwpid_t targets[32], *tp; 618 lwpid_t targets[32], *tp;
613 int error; 619 int error;
614 u_int ntargets; 620 u_int ntargets;
615 size_t sz; 621 size_t sz;
616 622
617 ntargets = SCARG(uap, ntargets); 623 ntargets = SCARG(uap, ntargets);
618 if (SCARG(uap, targets) == NULL) { 624 if (SCARG(uap, targets) == NULL) {
619 /* 625 /*
620 * Let the caller know how much we are willing to do, and 626 * Let the caller know how much we are willing to do, and
621 * let it unpark the LWPs in blocks. 627 * let it unpark the LWPs in blocks.
622 */ 628 */
623 *retval = LWP_UNPARK_MAX; 629 *retval = LWP_UNPARK_MAX;
624 return 0; 630 return 0;
625 } 631 }
626 if (ntargets > LWP_UNPARK_MAX || ntargets == 0) 632 if (ntargets > LWP_UNPARK_MAX || ntargets == 0)
627 return EINVAL; 633 return EINVAL;
628 634
629 /* 635 /*
630 * Copy in the target array. If it's a small number of LWPs, then 636 * Copy in the target array. If it's a small number of LWPs, then
631 * place the numbers on the stack. 637 * place the numbers on the stack.
632 */ 638 */
633 sz = sizeof(lwpid_t) * ntargets; 639 sz = sizeof(lwpid_t) * ntargets;
634 if (sz <= sizeof(targets)) 640 if (sz <= sizeof(targets))
635 tp = targets; 641 tp = targets;
636 else 642 else
637 tp = kmem_alloc(sz, KM_SLEEP); 643 tp = kmem_alloc(sz, KM_SLEEP);
638 error = copyin(SCARG(uap, targets), tp, sz); 644 error = copyin(SCARG(uap, targets), tp, sz);
639 if (error != 0) { 645 if (error != 0) {
640 if (tp != targets) { 646 if (tp != targets) {
641 kmem_free(tp, sz); 647 kmem_free(tp, sz);
642 } 648 }
643 return error; 649 return error;
644 } 650 }
645 error = lwp_unpark(tp, ntargets); 651 error = lwp_unpark(tp, ntargets);
646 if (tp != targets) 652 if (tp != targets)
647 kmem_free(tp, sz); 653 kmem_free(tp, sz);
648 return error; 654 return error;
649} 655}
650 656
651int 657int
652sys__lwp_setname(struct lwp *l, const struct sys__lwp_setname_args *uap, 658sys__lwp_setname(struct lwp *l, const struct sys__lwp_setname_args *uap,
653 register_t *retval) 659 register_t *retval)
654{ 660{
655 /* { 661 /* {
656 syscallarg(lwpid_t) target; 662 syscallarg(lwpid_t) target;
657 syscallarg(const char *) name; 663 syscallarg(const char *) name;
658 } */ 664 } */
659 char *name, *oname; 665 char *name, *oname;
660 lwpid_t target; 666 lwpid_t target;
661 proc_t *p; 667 proc_t *p;
662 lwp_t *t; 668 lwp_t *t;
663 int error; 669 int error;
664 670
665 if ((target = SCARG(uap, target)) == 0) 671 if ((target = SCARG(uap, target)) == 0)
666 target = l->l_lid; 672 target = l->l_lid;
667 673
668 name = kmem_alloc(MAXCOMLEN, KM_SLEEP); 674 name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
669 error = copyinstr(SCARG(uap, name), name, MAXCOMLEN, NULL); 675 error = copyinstr(SCARG(uap, name), name, MAXCOMLEN, NULL);
670 switch (error) { 676 switch (error) {
671 case ENAMETOOLONG: 677 case ENAMETOOLONG:
672 case 0: 678 case 0:
673 name[MAXCOMLEN - 1] = '\0'; 679 name[MAXCOMLEN - 1] = '\0';
674 break; 680 break;
675 default: 681 default:
676 kmem_free(name, MAXCOMLEN); 682 kmem_free(name, MAXCOMLEN);
677 return error; 683 return error;
678 } 684 }
679 685
680 p = curproc; 686 p = curproc;
681 mutex_enter(p->p_lock); 687 mutex_enter(p->p_lock);
682 if ((t = lwp_find(p, target)) == NULL) { 688 if ((t = lwp_find(p, target)) == NULL) {
683 mutex_exit(p->p_lock); 689 mutex_exit(p->p_lock);
684 kmem_free(name, MAXCOMLEN); 690 kmem_free(name, MAXCOMLEN);
685 return ESRCH; 691 return ESRCH;
686 } 692 }
687 lwp_lock(t); 693 lwp_lock(t);
688 oname = t->l_name; 694 oname = t->l_name;
689 t->l_name = name; 695 t->l_name = name;
690 lwp_unlock(t); 696 lwp_unlock(t);
691 mutex_exit(p->p_lock); 697 mutex_exit(p->p_lock);
692 698
693 if (oname != NULL) 699 if (oname != NULL)
694 kmem_free(oname, MAXCOMLEN); 700 kmem_free(oname, MAXCOMLEN);
695 701
696 return 0; 702 return 0;
697} 703}
698 704
699int 705int
700sys__lwp_getname(struct lwp *l, const struct sys__lwp_getname_args *uap, 706sys__lwp_getname(struct lwp *l, const struct sys__lwp_getname_args *uap,
701 register_t *retval) 707 register_t *retval)
702{ 708{
703 /* { 709 /* {
704 syscallarg(lwpid_t) target; 710 syscallarg(lwpid_t) target;
705 syscallarg(char *) name; 711 syscallarg(char *) name;
706 syscallarg(size_t) len; 712 syscallarg(size_t) len;
707 } */ 713 } */
708 char name[MAXCOMLEN]; 714 char name[MAXCOMLEN];
709 lwpid_t target; 715 lwpid_t target;
710 size_t len; 716 size_t len;
711 proc_t *p; 717 proc_t *p;
712 lwp_t *t; 718 lwp_t *t;
713 719
714 if ((target = SCARG(uap, target)) == 0) 720 if ((target = SCARG(uap, target)) == 0)
715 target = l->l_lid; 721 target = l->l_lid;
716 722
717 p = curproc; 723 p = curproc;
718 mutex_enter(p->p_lock); 724 mutex_enter(p->p_lock);
719 if ((t = lwp_find(p, target)) == NULL) { 725 if ((t = lwp_find(p, target)) == NULL) {
720 mutex_exit(p->p_lock); 726 mutex_exit(p->p_lock);
721 return ESRCH; 727 return ESRCH;
722 } 728 }
723 lwp_lock(t); 729 lwp_lock(t);
724 if (t->l_name == NULL) 730 if (t->l_name == NULL)
725 name[0] = '\0'; 731 name[0] = '\0';
726 else 732 else
727 strlcpy(name, t->l_name, sizeof(name)); 733 strlcpy(name, t->l_name, sizeof(name));
728 lwp_unlock(t); 734 lwp_unlock(t);
729 mutex_exit(p->p_lock); 735 mutex_exit(p->p_lock);
730 736
731 len = uimin(SCARG(uap, len), sizeof(name)); 737 len = uimin(SCARG(uap, len), sizeof(name));
732 738
733 return copyoutstr(name, SCARG(uap, name), len, NULL); 739 return copyoutstr(name, SCARG(uap, name), len, NULL);
734} 740}
735 741
736int 742int
737sys__lwp_ctl(struct lwp *l, const struct sys__lwp_ctl_args *uap, 743sys__lwp_ctl(struct lwp *l, const struct sys__lwp_ctl_args *uap,
738 register_t *retval) 744 register_t *retval)
739{ 745{
740 /* { 746 /* {
741 syscallarg(int) features; 747 syscallarg(int) features;
742 syscallarg(struct lwpctl **) address; 748 syscallarg(struct lwpctl **) address;
743 } */ 749 } */
744 int error, features; 750 int error, features;
745 vaddr_t vaddr; 751 vaddr_t vaddr;
746 752
747 features = SCARG(uap, features); 753 features = SCARG(uap, features);
748 features &= ~(LWPCTL_FEATURE_CURCPU | LWPCTL_FEATURE_PCTR); 754 features &= ~(LWPCTL_FEATURE_CURCPU | LWPCTL_FEATURE_PCTR);
749 if (features != 0) 755 if (features != 0)
750 return ENODEV; 756 return ENODEV;
751 if ((error = lwp_ctl_alloc(&vaddr)) != 0) 757 if ((error = lwp_ctl_alloc(&vaddr)) != 0)
752 return error; 758 return error;
753 return copyout(&vaddr, SCARG(uap, address), sizeof(void *)); 759 return copyout(&vaddr, SCARG(uap, address), sizeof(void *));
754} 760}