Mon Oct 26 17:35:39 2020 UTC ()
Depend directly on EXEC_ELF{32,64} to determine which versions of the coredump
code are available.


(christos)
diff -r1.32 -r1.33 src/sys/kern/kern_core.c
diff -r1.392 -r1.393 src/sys/kern/kern_sig.c

cvs diff -r1.32 -r1.33 src/sys/kern/kern_core.c (switch to unified diff)

--- src/sys/kern/kern_core.c 2020/10/20 13:47:30 1.32
+++ src/sys/kern/kern_core.c 2020/10/26 17:35:39 1.33
@@ -1,370 +1,371 @@ @@ -1,370 +1,371 @@
1/* $NetBSD: kern_core.c,v 1.32 2020/10/20 13:47:30 christos Exp $ */ 1/* $NetBSD: kern_core.c,v 1.33 2020/10/26 17:35:39 christos Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1982, 1986, 1989, 1991, 1993 4 * Copyright (c) 1982, 1986, 1989, 1991, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc. 6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed 7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph 8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc. 10 * the permission of UNIX System Laboratories, Inc.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
15 * 1. Redistributions of source code must retain the above copyright 15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer. 16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright 17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the 18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution. 19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors 20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software 21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission. 22 * without specific prior written permission.
23 * 23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE. 34 * SUCH DAMAGE.
35 * 35 *
36 * @(#)kern_sig.c 8.14 (Berkeley) 5/14/95 36 * @(#)kern_sig.c 8.14 (Berkeley) 5/14/95
37 */ 37 */
38 38
39#include <sys/cdefs.h> 39#include <sys/cdefs.h>
40__KERNEL_RCSID(0, "$NetBSD: kern_core.c,v 1.32 2020/10/20 13:47:30 christos Exp $"); 40__KERNEL_RCSID(0, "$NetBSD: kern_core.c,v 1.33 2020/10/26 17:35:39 christos Exp $");
41 41
42#ifdef _KERNEL_OPT 42#ifdef _KERNEL_OPT
 43#include "opt_execfmt.h"
43#include "opt_compat_netbsd32.h" 44#include "opt_compat_netbsd32.h"
44#endif 45#endif
45 46
46#include <sys/param.h> 47#include <sys/param.h>
47#include <sys/vnode.h> 48#include <sys/vnode.h>
48#include <sys/namei.h> 49#include <sys/namei.h>
49#include <sys/acct.h> 50#include <sys/acct.h>
50#include <sys/file.h> 51#include <sys/file.h>
51#include <sys/stat.h> 52#include <sys/stat.h>
52#include <sys/proc.h> 53#include <sys/proc.h>
53#include <sys/exec.h> 54#include <sys/exec.h>
54#include <sys/filedesc.h> 55#include <sys/filedesc.h>
55#include <sys/kauth.h> 56#include <sys/kauth.h>
56#include <sys/module.h> 57#include <sys/module.h>
57#include <sys/compat_stub.h> 58#include <sys/compat_stub.h>
58#include <sys/exec_elf.h> 59#include <sys/exec_elf.h>
59 60
60#ifdef COMPAT_NETBSD32 61#ifdef COMPAT_NETBSD32
61#define COREDUMP_MODULE_DEP "compat_netbsd32_ptrace" 62#define COREDUMP_MODULE_DEP "compat_netbsd32_ptrace"
62#else 63#else
63#define COREDUMP_MODULE_DEP NULL 64#define COREDUMP_MODULE_DEP NULL
64#endif 65#endif
65 66
66MODULE(MODULE_CLASS_MISC, coredump, COREDUMP_MODULE_DEP); 67MODULE(MODULE_CLASS_MISC, coredump, COREDUMP_MODULE_DEP);
67 68
68struct coredump_iostate { 69struct coredump_iostate {
69 struct lwp *io_lwp; 70 struct lwp *io_lwp;
70 struct vnode *io_vp; 71 struct vnode *io_vp;
71 kauth_cred_t io_cred; 72 kauth_cred_t io_cred;
72 off_t io_offset; 73 off_t io_offset;
73}; 74};
74 75
75static int coredump(struct lwp *, const char *); 76static int coredump(struct lwp *, const char *);
76static int coredump_buildname(struct proc *, char *, const char *, size_t); 77static int coredump_buildname(struct proc *, char *, const char *, size_t);
77static int coredump_write(struct coredump_iostate *, enum uio_seg segflg, 78static int coredump_write(struct coredump_iostate *, enum uio_seg segflg,
78 const void *, size_t); 79 const void *, size_t);
79static off_t coredump_offset(struct coredump_iostate *); 80static off_t coredump_offset(struct coredump_iostate *);
80 81
81static int 82static int
82coredump_modcmd(modcmd_t cmd, void *arg) 83coredump_modcmd(modcmd_t cmd, void *arg)
83{ 84{
84 85
85 switch (cmd) { 86 switch (cmd) {
86 case MODULE_CMD_INIT: 87 case MODULE_CMD_INIT:
87 MODULE_HOOK_SET(coredump_hook, coredump); 88 MODULE_HOOK_SET(coredump_hook, coredump);
88 MODULE_HOOK_SET(coredump_write_hook, coredump_write); 89 MODULE_HOOK_SET(coredump_write_hook, coredump_write);
89 MODULE_HOOK_SET(coredump_offset_hook, coredump_offset); 90 MODULE_HOOK_SET(coredump_offset_hook, coredump_offset);
90 MODULE_HOOK_SET(coredump_netbsd_hook, real_coredump_netbsd); 91 MODULE_HOOK_SET(coredump_netbsd_hook, real_coredump_netbsd);
91#if !defined(_LP64) || defined(COMPAT_NETBSD32) 92#ifdef EXEC_ELF32
92 MODULE_HOOK_SET(coredump_elf32_hook, real_coredump_elf32); 93 MODULE_HOOK_SET(coredump_elf32_hook, real_coredump_elf32);
93#endif 94#endif
94#ifdef _LP64 95#ifdef EXEC_ELF64
95 MODULE_HOOK_SET(coredump_elf64_hook, real_coredump_elf64); 96 MODULE_HOOK_SET(coredump_elf64_hook, real_coredump_elf64);
96#endif 97#endif
97 MODULE_HOOK_SET(uvm_coredump_walkmap_hook, 98 MODULE_HOOK_SET(uvm_coredump_walkmap_hook,
98 uvm_coredump_walkmap); 99 uvm_coredump_walkmap);
99 MODULE_HOOK_SET(uvm_coredump_count_segs_hook, 100 MODULE_HOOK_SET(uvm_coredump_count_segs_hook,
100 uvm_coredump_count_segs); 101 uvm_coredump_count_segs);
101 return 0; 102 return 0;
102 case MODULE_CMD_FINI: 103 case MODULE_CMD_FINI:
103 MODULE_HOOK_UNSET(uvm_coredump_count_segs_hook); 104 MODULE_HOOK_UNSET(uvm_coredump_count_segs_hook);
104 MODULE_HOOK_UNSET(uvm_coredump_walkmap_hook); 105 MODULE_HOOK_UNSET(uvm_coredump_walkmap_hook);
105#ifdef _LP64 106#ifdef EXEC_ELF64
106 MODULE_HOOK_UNSET(coredump_elf64_hook); 107 MODULE_HOOK_UNSET(coredump_elf64_hook);
107#endif 108#endif
108#if !defined(_LP64) || defined(COMPAT_NETBSD32) 109#ifdef EXEC_ELF32
109 MODULE_HOOK_UNSET(coredump_elf32_hook); 110 MODULE_HOOK_UNSET(coredump_elf32_hook);
110#endif 111#endif
111 MODULE_HOOK_UNSET(coredump_netbsd_hook); 112 MODULE_HOOK_UNSET(coredump_netbsd_hook);
112 MODULE_HOOK_UNSET(coredump_offset_hook); 113 MODULE_HOOK_UNSET(coredump_offset_hook);
113 MODULE_HOOK_UNSET(coredump_write_hook); 114 MODULE_HOOK_UNSET(coredump_write_hook);
114 MODULE_HOOK_UNSET(coredump_hook); 115 MODULE_HOOK_UNSET(coredump_hook);
115 return 0; 116 return 0;
116 default: 117 default:
117 return ENOTTY; 118 return ENOTTY;
118 } 119 }
119} 120}
120 121
121/* 122/*
122 * Dump core, into a file named "progname.core" or "core" (depending on the 123 * Dump core, into a file named "progname.core" or "core" (depending on the
123 * value of shortcorename), unless the process was setuid/setgid. 124 * value of shortcorename), unless the process was setuid/setgid.
124 */ 125 */
125static int 126static int
126coredump(struct lwp *l, const char *pattern) 127coredump(struct lwp *l, const char *pattern)
127{ 128{
128 struct vnode *vp; 129 struct vnode *vp;
129 struct proc *p; 130 struct proc *p;
130 struct vmspace *vm; 131 struct vmspace *vm;
131 kauth_cred_t cred; 132 kauth_cred_t cred;
132 struct pathbuf *pb; 133 struct pathbuf *pb;
133 struct nameidata nd; 134 struct nameidata nd;
134 struct vattr vattr; 135 struct vattr vattr;
135 struct coredump_iostate io; 136 struct coredump_iostate io;
136 struct plimit *lim; 137 struct plimit *lim;
137 int error, error1; 138 int error, error1;
138 char *name, *lastslash; 139 char *name, *lastslash;
139 140
140 name = PNBUF_GET(); 141 name = PNBUF_GET();
141 142
142 p = l->l_proc; 143 p = l->l_proc;
143 vm = p->p_vmspace; 144 vm = p->p_vmspace;
144 145
145 mutex_enter(&proc_lock); /* p_session */ 146 mutex_enter(&proc_lock); /* p_session */
146 mutex_enter(p->p_lock); 147 mutex_enter(p->p_lock);
147 148
148 /* 149 /*
149 * Refuse to core if the data + stack + user size is larger than 150 * Refuse to core if the data + stack + user size is larger than
150 * the core dump limit. XXX THIS IS WRONG, because of mapped 151 * the core dump limit. XXX THIS IS WRONG, because of mapped
151 * data. 152 * data.
152 */ 153 */
153 if (USPACE + ctob(vm->vm_dsize + vm->vm_ssize) >= 154 if (USPACE + ctob(vm->vm_dsize + vm->vm_ssize) >=
154 p->p_rlimit[RLIMIT_CORE].rlim_cur) { 155 p->p_rlimit[RLIMIT_CORE].rlim_cur) {
155 error = EFBIG; /* better error code? */ 156 error = EFBIG; /* better error code? */
156 mutex_exit(p->p_lock); 157 mutex_exit(p->p_lock);
157 mutex_exit(&proc_lock); 158 mutex_exit(&proc_lock);
158 goto done; 159 goto done;
159 } 160 }
160 161
161 /* 162 /*
162 * It may well not be curproc, so grab a reference to its current 163 * It may well not be curproc, so grab a reference to its current
163 * credentials. 164 * credentials.
164 */ 165 */
165 kauth_cred_hold(p->p_cred); 166 kauth_cred_hold(p->p_cred);
166 cred = p->p_cred; 167 cred = p->p_cred;
167 168
168 /* 169 /*
169 * Make sure the process has not set-id, to prevent data leaks, 170 * Make sure the process has not set-id, to prevent data leaks,
170 * unless it was specifically requested to allow set-id coredumps. 171 * unless it was specifically requested to allow set-id coredumps.
171 */ 172 */
172 if (p->p_flag & PK_SUGID) { 173 if (p->p_flag & PK_SUGID) {
173 if (!security_setidcore_dump) { 174 if (!security_setidcore_dump) {
174 error = EPERM; 175 error = EPERM;
175 mutex_exit(p->p_lock); 176 mutex_exit(p->p_lock);
176 mutex_exit(&proc_lock); 177 mutex_exit(&proc_lock);
177 goto done; 178 goto done;
178 } 179 }
179 pattern = security_setidcore_path; 180 pattern = security_setidcore_path;
180 } 181 }
181 182
182 /* Lock, as p_limit and pl_corename might change. */ 183 /* Lock, as p_limit and pl_corename might change. */
183 lim = p->p_limit; 184 lim = p->p_limit;
184 mutex_enter(&lim->pl_lock); 185 mutex_enter(&lim->pl_lock);
185 if (pattern == NULL) { 186 if (pattern == NULL) {
186 pattern = lim->pl_corename; 187 pattern = lim->pl_corename;
187 } 188 }
188 error = coredump_buildname(p, name, pattern, MAXPATHLEN); 189 error = coredump_buildname(p, name, pattern, MAXPATHLEN);
189 mutex_exit(&lim->pl_lock); 190 mutex_exit(&lim->pl_lock);
190 191
191 if (error) { 192 if (error) {
192 mutex_exit(p->p_lock); 193 mutex_exit(p->p_lock);
193 mutex_exit(&proc_lock); 194 mutex_exit(&proc_lock);
194 goto done; 195 goto done;
195 } 196 }
196 197
197 /* 198 /*
198 * On a simple filename, see if the filesystem allow us to write 199 * On a simple filename, see if the filesystem allow us to write
199 * core dumps there. 200 * core dumps there.
200 */ 201 */
201 lastslash = strrchr(name, '/'); 202 lastslash = strrchr(name, '/');
202 if (!lastslash) { 203 if (!lastslash) {
203 vp = p->p_cwdi->cwdi_cdir; 204 vp = p->p_cwdi->cwdi_cdir;
204 if (vp->v_mount == NULL || 205 if (vp->v_mount == NULL ||
205 (vp->v_mount->mnt_flag & MNT_NOCOREDUMP) != 0) 206 (vp->v_mount->mnt_flag & MNT_NOCOREDUMP) != 0)
206 error = EPERM; 207 error = EPERM;
207 } 208 }
208 209
209 mutex_exit(p->p_lock); 210 mutex_exit(p->p_lock);
210 mutex_exit(&proc_lock); 211 mutex_exit(&proc_lock);
211 if (error) 212 if (error)
212 goto done; 213 goto done;
213 214
214 /* 215 /*
215 * On a complex filename, see if the filesystem allow us to write 216 * On a complex filename, see if the filesystem allow us to write
216 * core dumps there. 217 * core dumps there.
217 * 218 *
218 * XXX: We should have an API that avoids double lookups 219 * XXX: We should have an API that avoids double lookups
219 */ 220 */
220 if (lastslash) { 221 if (lastslash) {
221 char c[2]; 222 char c[2];
222 223
223 if (lastslash - name >= MAXPATHLEN - 2) { 224 if (lastslash - name >= MAXPATHLEN - 2) {
224 error = EPERM; 225 error = EPERM;
225 goto done; 226 goto done;
226 } 227 }
227 228
228 c[0] = lastslash[1]; 229 c[0] = lastslash[1];
229 c[1] = lastslash[2]; 230 c[1] = lastslash[2];
230 lastslash[1] = '.'; 231 lastslash[1] = '.';
231 lastslash[2] = '\0'; 232 lastslash[2] = '\0';
232 error = namei_simple_kernel(name, NSM_FOLLOW_NOEMULROOT, &vp); 233 error = namei_simple_kernel(name, NSM_FOLLOW_NOEMULROOT, &vp);
233 if (error) 234 if (error)
234 goto done; 235 goto done;
235 if (vp->v_mount == NULL || 236 if (vp->v_mount == NULL ||
236 (vp->v_mount->mnt_flag & MNT_NOCOREDUMP) != 0) 237 (vp->v_mount->mnt_flag & MNT_NOCOREDUMP) != 0)
237 error = EPERM; 238 error = EPERM;
238 vrele(vp); 239 vrele(vp);
239 if (error) 240 if (error)
240 goto done; 241 goto done;
241 lastslash[1] = c[0]; 242 lastslash[1] = c[0];
242 lastslash[2] = c[1]; 243 lastslash[2] = c[1];
243 } 244 }
244 245
245 pb = pathbuf_create(name); 246 pb = pathbuf_create(name);
246 if (pb == NULL) { 247 if (pb == NULL) {
247 error = ENOMEM; 248 error = ENOMEM;
248 goto done; 249 goto done;
249 } 250 }
250 NDINIT(&nd, LOOKUP, NOFOLLOW, pb); 251 NDINIT(&nd, LOOKUP, NOFOLLOW, pb);
251 if ((error = vn_open(&nd, O_CREAT | O_NOFOLLOW | FWRITE, 252 if ((error = vn_open(&nd, O_CREAT | O_NOFOLLOW | FWRITE,
252 S_IRUSR | S_IWUSR)) != 0) { 253 S_IRUSR | S_IWUSR)) != 0) {
253 pathbuf_destroy(pb); 254 pathbuf_destroy(pb);
254 goto done; 255 goto done;
255 } 256 }
256 vp = nd.ni_vp; 257 vp = nd.ni_vp;
257 pathbuf_destroy(pb); 258 pathbuf_destroy(pb);
258 259
259 /* 260 /*
260 * Don't dump to: 261 * Don't dump to:
261 * - non-regular files 262 * - non-regular files
262 * - files with links 263 * - files with links
263 * - files we don't own 264 * - files we don't own
264 */ 265 */
265 if (vp->v_type != VREG || 266 if (vp->v_type != VREG ||
266 VOP_GETATTR(vp, &vattr, cred) || vattr.va_nlink != 1 || 267 VOP_GETATTR(vp, &vattr, cred) || vattr.va_nlink != 1 ||
267 vattr.va_uid != kauth_cred_geteuid(cred)) { 268 vattr.va_uid != kauth_cred_geteuid(cred)) {
268 error = EACCES; 269 error = EACCES;
269 goto out; 270 goto out;
270 } 271 }
271 vattr_null(&vattr); 272 vattr_null(&vattr);
272 vattr.va_size = 0; 273 vattr.va_size = 0;
273 274
274 if ((p->p_flag & PK_SUGID) && security_setidcore_dump) { 275 if ((p->p_flag & PK_SUGID) && security_setidcore_dump) {
275 vattr.va_uid = security_setidcore_owner; 276 vattr.va_uid = security_setidcore_owner;
276 vattr.va_gid = security_setidcore_group; 277 vattr.va_gid = security_setidcore_group;
277 vattr.va_mode = security_setidcore_mode; 278 vattr.va_mode = security_setidcore_mode;
278 } 279 }
279 280
280 VOP_SETATTR(vp, &vattr, cred); 281 VOP_SETATTR(vp, &vattr, cred);
281 p->p_acflag |= ACORE; 282 p->p_acflag |= ACORE;
282 283
283 io.io_lwp = l; 284 io.io_lwp = l;
284 io.io_vp = vp; 285 io.io_vp = vp;
285 io.io_cred = cred; 286 io.io_cred = cred;
286 io.io_offset = 0; 287 io.io_offset = 0;
287 288
288 /* Now dump the actual core file. */ 289 /* Now dump the actual core file. */
289 error = (*p->p_execsw->es_coredump)(l, &io); 290 error = (*p->p_execsw->es_coredump)(l, &io);
290 out: 291 out:
291 VOP_UNLOCK(vp); 292 VOP_UNLOCK(vp);
292 error1 = vn_close(vp, FWRITE, cred); 293 error1 = vn_close(vp, FWRITE, cred);
293 if (error == 0) 294 if (error == 0)
294 error = error1; 295 error = error1;
295done: 296done:
296 if (name != NULL) 297 if (name != NULL)
297 PNBUF_PUT(name); 298 PNBUF_PUT(name);
298 return error; 299 return error;
299} 300}
300 301
301static int 302static int
302coredump_buildname(struct proc *p, char *dst, const char *src, size_t len) 303coredump_buildname(struct proc *p, char *dst, const char *src, size_t len)
303{ 304{
304 const char *s; 305 const char *s;
305 char *d, *end; 306 char *d, *end;
306 int i; 307 int i;
307 308
308 KASSERT(mutex_owned(&proc_lock)); 309 KASSERT(mutex_owned(&proc_lock));
309 310
310 for (s = src, d = dst, end = d + len; *s != '\0'; s++) { 311 for (s = src, d = dst, end = d + len; *s != '\0'; s++) {
311 if (*s == '%') { 312 if (*s == '%') {
312 switch (*(s + 1)) { 313 switch (*(s + 1)) {
313 case 'n': 314 case 'n':
314 i = snprintf(d, end - d, "%s", p->p_comm); 315 i = snprintf(d, end - d, "%s", p->p_comm);
315 break; 316 break;
316 case 'p': 317 case 'p':
317 i = snprintf(d, end - d, "%d", p->p_pid); 318 i = snprintf(d, end - d, "%d", p->p_pid);
318 break; 319 break;
319 case 'u': 320 case 'u':
320 i = snprintf(d, end - d, "%.*s", 321 i = snprintf(d, end - d, "%.*s",
321 (int)sizeof p->p_pgrp->pg_session->s_login, 322 (int)sizeof p->p_pgrp->pg_session->s_login,
322 p->p_pgrp->pg_session->s_login); 323 p->p_pgrp->pg_session->s_login);
323 break; 324 break;
324 case 't': 325 case 't':
325 i = snprintf(d, end - d, "%lld", 326 i = snprintf(d, end - d, "%lld",
326 (long long)p->p_stats->p_start.tv_sec); 327 (long long)p->p_stats->p_start.tv_sec);
327 break; 328 break;
328 default: 329 default:
329 goto copy; 330 goto copy;
330 } 331 }
331 d += i; 332 d += i;
332 s++; 333 s++;
333 } else { 334 } else {
334 copy: *d = *s; 335 copy: *d = *s;
335 d++; 336 d++;
336 } 337 }
337 if (d >= end) 338 if (d >= end)
338 return (ENAMETOOLONG); 339 return (ENAMETOOLONG);
339 } 340 }
340 *d = '\0'; 341 *d = '\0';
341 return 0; 342 return 0;
342} 343}
343 344
344static int 345static int
345coredump_write(struct coredump_iostate *io, enum uio_seg segflg, 346coredump_write(struct coredump_iostate *io, enum uio_seg segflg,
346 const void *data, size_t len) 347 const void *data, size_t len)
347{ 348{
348 int error; 349 int error;
349 350
350 error = vn_rdwr(UIO_WRITE, io->io_vp, __UNCONST(data), len, 351 error = vn_rdwr(UIO_WRITE, io->io_vp, __UNCONST(data), len,
351 io->io_offset, segflg, 352 io->io_offset, segflg,
352 IO_NODELOCKED|IO_UNIT, io->io_cred, NULL, 353 IO_NODELOCKED|IO_UNIT, io->io_cred, NULL,
353 segflg == UIO_USERSPACE ? io->io_lwp : NULL); 354 segflg == UIO_USERSPACE ? io->io_lwp : NULL);
354 if (error) { 355 if (error) {
355 printf("pid %d (%s): %s write of %zu@%p at %lld failed: %d\n", 356 printf("pid %d (%s): %s write of %zu@%p at %lld failed: %d\n",
356 io->io_lwp->l_proc->p_pid, io->io_lwp->l_proc->p_comm, 357 io->io_lwp->l_proc->p_pid, io->io_lwp->l_proc->p_comm,
357 segflg == UIO_USERSPACE ? "user" : "system", 358 segflg == UIO_USERSPACE ? "user" : "system",
358 len, data, (long long) io->io_offset, error); 359 len, data, (long long) io->io_offset, error);
359 return (error); 360 return (error);
360 } 361 }
361 362
362 io->io_offset += len; 363 io->io_offset += len;
363 return (0); 364 return (0);
364} 365}
365 366
366static off_t 367static off_t
367coredump_offset(struct coredump_iostate *io) 368coredump_offset(struct coredump_iostate *io)
368{ 369{
369 return io->io_offset; 370 return io->io_offset;
370} 371}

cvs diff -r1.392 -r1.393 src/sys/kern/kern_sig.c (switch to unified diff)

--- src/sys/kern/kern_sig.c 2020/10/20 13:16:26 1.392
+++ src/sys/kern/kern_sig.c 2020/10/26 17:35:39 1.393
@@ -1,1073 +1,1074 @@ @@ -1,1073 +1,1074 @@
1/* $NetBSD: kern_sig.c,v 1.392 2020/10/20 13:16:26 christos Exp $ */ 1/* $NetBSD: kern_sig.c,v 1.393 2020/10/26 17:35:39 christos Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2006, 2007, 2008, 2019 The NetBSD Foundation, Inc. 4 * Copyright (c) 2006, 2007, 2008, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Copyright (c) 1982, 1986, 1989, 1991, 1993 33 * Copyright (c) 1982, 1986, 1989, 1991, 1993
34 * The Regents of the University of California. All rights reserved. 34 * The Regents of the University of California. All rights reserved.
35 * (c) UNIX System Laboratories, Inc. 35 * (c) UNIX System Laboratories, Inc.
36 * All or some portions of this file are derived from material licensed 36 * All or some portions of this file are derived from material licensed
37 * to the University of California by American Telephone and Telegraph 37 * to the University of California by American Telephone and Telegraph
38 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 38 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
39 * the permission of UNIX System Laboratories, Inc. 39 * the permission of UNIX System Laboratories, Inc.
40 * 40 *
41 * Redistribution and use in source and binary forms, with or without 41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions 42 * modification, are permitted provided that the following conditions
43 * are met: 43 * are met:
44 * 1. Redistributions of source code must retain the above copyright 44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer. 45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright 46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the 47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution. 48 * documentation and/or other materials provided with the distribution.
49 * 3. Neither the name of the University nor the names of its contributors 49 * 3. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software 50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission. 51 * without specific prior written permission.
52 * 52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE. 63 * SUCH DAMAGE.
64 * 64 *
65 * @(#)kern_sig.c 8.14 (Berkeley) 5/14/95 65 * @(#)kern_sig.c 8.14 (Berkeley) 5/14/95
66 */ 66 */
67 67
68/* 68/*
69 * Signal subsystem. 69 * Signal subsystem.
70 */ 70 */
71 71
72#include <sys/cdefs.h> 72#include <sys/cdefs.h>
73__KERNEL_RCSID(0, "$NetBSD: kern_sig.c,v 1.392 2020/10/20 13:16:26 christos Exp $"); 73__KERNEL_RCSID(0, "$NetBSD: kern_sig.c,v 1.393 2020/10/26 17:35:39 christos Exp $");
74 74
 75#include "opt_execfmt.h"
75#include "opt_ptrace.h" 76#include "opt_ptrace.h"
76#include "opt_dtrace.h" 77#include "opt_dtrace.h"
77#include "opt_compat_sunos.h" 78#include "opt_compat_sunos.h"
78#include "opt_compat_netbsd.h" 79#include "opt_compat_netbsd.h"
79#include "opt_compat_netbsd32.h" 80#include "opt_compat_netbsd32.h"
80#include "opt_pax.h" 81#include "opt_pax.h"
81 82
82#define SIGPROP /* include signal properties table */ 83#define SIGPROP /* include signal properties table */
83#include <sys/param.h> 84#include <sys/param.h>
84#include <sys/signalvar.h> 85#include <sys/signalvar.h>
85#include <sys/proc.h> 86#include <sys/proc.h>
86#include <sys/ptrace.h> 87#include <sys/ptrace.h>
87#include <sys/systm.h> 88#include <sys/systm.h>
88#include <sys/wait.h> 89#include <sys/wait.h>
89#include <sys/ktrace.h> 90#include <sys/ktrace.h>
90#include <sys/syslog.h> 91#include <sys/syslog.h>
91#include <sys/filedesc.h> 92#include <sys/filedesc.h>
92#include <sys/file.h> 93#include <sys/file.h>
93#include <sys/pool.h> 94#include <sys/pool.h>
94#include <sys/ucontext.h> 95#include <sys/ucontext.h>
95#include <sys/exec.h> 96#include <sys/exec.h>
96#include <sys/kauth.h> 97#include <sys/kauth.h>
97#include <sys/acct.h> 98#include <sys/acct.h>
98#include <sys/callout.h> 99#include <sys/callout.h>
99#include <sys/atomic.h> 100#include <sys/atomic.h>
100#include <sys/cpu.h> 101#include <sys/cpu.h>
101#include <sys/module.h> 102#include <sys/module.h>
102#include <sys/sdt.h> 103#include <sys/sdt.h>
103#include <sys/exec_elf.h> 104#include <sys/exec_elf.h>
104#include <sys/compat_stub.h> 105#include <sys/compat_stub.h>
105 106
106#ifdef PAX_SEGVGUARD 107#ifdef PAX_SEGVGUARD
107#include <sys/pax.h> 108#include <sys/pax.h>
108#endif /* PAX_SEGVGUARD */ 109#endif /* PAX_SEGVGUARD */
109 110
110#include <uvm/uvm_extern.h> 111#include <uvm/uvm_extern.h>
111 112
112#define SIGQUEUE_MAX 32 113#define SIGQUEUE_MAX 32
113static pool_cache_t sigacts_cache __read_mostly; 114static pool_cache_t sigacts_cache __read_mostly;
114static pool_cache_t ksiginfo_cache __read_mostly; 115static pool_cache_t ksiginfo_cache __read_mostly;
115static callout_t proc_stop_ch __cacheline_aligned; 116static callout_t proc_stop_ch __cacheline_aligned;
116 117
117sigset_t contsigmask __cacheline_aligned; 118sigset_t contsigmask __cacheline_aligned;
118sigset_t stopsigmask __cacheline_aligned; 119sigset_t stopsigmask __cacheline_aligned;
119static sigset_t vforksigmask __cacheline_aligned; 120static sigset_t vforksigmask __cacheline_aligned;
120sigset_t sigcantmask __cacheline_aligned; 121sigset_t sigcantmask __cacheline_aligned;
121 122
122static void ksiginfo_exechook(struct proc *, void *); 123static void ksiginfo_exechook(struct proc *, void *);
123static void proc_stop(struct proc *, int); 124static void proc_stop(struct proc *, int);
124static void proc_stop_done(struct proc *, int); 125static void proc_stop_done(struct proc *, int);
125static void proc_stop_callout(void *); 126static void proc_stop_callout(void *);
126static int sigchecktrace(void); 127static int sigchecktrace(void);
127static int sigpost(struct lwp *, sig_t, int, int); 128static int sigpost(struct lwp *, sig_t, int, int);
128static int sigput(sigpend_t *, struct proc *, ksiginfo_t *); 129static int sigput(sigpend_t *, struct proc *, ksiginfo_t *);
129static int sigunwait(struct proc *, const ksiginfo_t *); 130static int sigunwait(struct proc *, const ksiginfo_t *);
130static void sigswitch(int, int, bool); 131static void sigswitch(int, int, bool);
131static void sigswitch_unlock_and_switch_away(struct lwp *); 132static void sigswitch_unlock_and_switch_away(struct lwp *);
132 133
133static void sigacts_poolpage_free(struct pool *, void *); 134static void sigacts_poolpage_free(struct pool *, void *);
134static void *sigacts_poolpage_alloc(struct pool *, int); 135static void *sigacts_poolpage_alloc(struct pool *, int);
135 136
136/* 137/*
137 * DTrace SDT provider definitions 138 * DTrace SDT provider definitions
138 */ 139 */
139SDT_PROVIDER_DECLARE(proc); 140SDT_PROVIDER_DECLARE(proc);
140SDT_PROBE_DEFINE3(proc, kernel, , signal__send, 141SDT_PROBE_DEFINE3(proc, kernel, , signal__send,
141 "struct lwp *", /* target thread */ 142 "struct lwp *", /* target thread */
142 "struct proc *", /* target process */ 143 "struct proc *", /* target process */
143 "int"); /* signal */ 144 "int"); /* signal */
144SDT_PROBE_DEFINE3(proc, kernel, , signal__discard, 145SDT_PROBE_DEFINE3(proc, kernel, , signal__discard,
145 "struct lwp *", /* target thread */ 146 "struct lwp *", /* target thread */
146 "struct proc *", /* target process */ 147 "struct proc *", /* target process */
147 "int"); /* signal */ 148 "int"); /* signal */
148SDT_PROBE_DEFINE3(proc, kernel, , signal__handle, 149SDT_PROBE_DEFINE3(proc, kernel, , signal__handle,
149 "int", /* signal */ 150 "int", /* signal */
150 "ksiginfo_t *", /* signal info */ 151 "ksiginfo_t *", /* signal info */
151 "void (*)(void)"); /* handler address */ 152 "void (*)(void)"); /* handler address */
152 153
153 154
154static struct pool_allocator sigactspool_allocator = { 155static struct pool_allocator sigactspool_allocator = {
155 .pa_alloc = sigacts_poolpage_alloc, 156 .pa_alloc = sigacts_poolpage_alloc,
156 .pa_free = sigacts_poolpage_free 157 .pa_free = sigacts_poolpage_free
157}; 158};
158 159
159#ifdef DEBUG 160#ifdef DEBUG
160int kern_logsigexit = 1; 161int kern_logsigexit = 1;
161#else 162#else
162int kern_logsigexit = 0; 163int kern_logsigexit = 0;
163#endif 164#endif
164 165
165static const char logcoredump[] = 166static const char logcoredump[] =
166 "pid %d (%s), uid %d: exited on signal %d (core dumped)\n"; 167 "pid %d (%s), uid %d: exited on signal %d (core dumped)\n";
167static const char lognocoredump[] = 168static const char lognocoredump[] =
168 "pid %d (%s), uid %d: exited on signal %d (core not dumped, err = %d)\n"; 169 "pid %d (%s), uid %d: exited on signal %d (core not dumped, err = %d)\n";
169 170
170static kauth_listener_t signal_listener; 171static kauth_listener_t signal_listener;
171 172
172static int 173static int
173signal_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie, 174signal_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie,
174 void *arg0, void *arg1, void *arg2, void *arg3) 175 void *arg0, void *arg1, void *arg2, void *arg3)
175{ 176{
176 struct proc *p; 177 struct proc *p;
177 int result, signum; 178 int result, signum;
178 179
179 result = KAUTH_RESULT_DEFER; 180 result = KAUTH_RESULT_DEFER;
180 p = arg0; 181 p = arg0;
181 signum = (int)(unsigned long)arg1; 182 signum = (int)(unsigned long)arg1;
182 183
183 if (action != KAUTH_PROCESS_SIGNAL) 184 if (action != KAUTH_PROCESS_SIGNAL)
184 return result; 185 return result;
185 186
186 if (kauth_cred_uidmatch(cred, p->p_cred) || 187 if (kauth_cred_uidmatch(cred, p->p_cred) ||
187 (signum == SIGCONT && (curproc->p_session == p->p_session))) 188 (signum == SIGCONT && (curproc->p_session == p->p_session)))
188 result = KAUTH_RESULT_ALLOW; 189 result = KAUTH_RESULT_ALLOW;
189 190
190 return result; 191 return result;
191} 192}
192 193
193static int 194static int
194sigacts_ctor(void *arg __unused, void *obj, int flags __unused) 195sigacts_ctor(void *arg __unused, void *obj, int flags __unused)
195{ 196{
196 memset(obj, 0, sizeof(struct sigacts)); 197 memset(obj, 0, sizeof(struct sigacts));
197 return 0; 198 return 0;
198} 199}
199 200
200/* 201/*
201 * signal_init: 202 * signal_init:
202 * 203 *
203 * Initialize global signal-related data structures. 204 * Initialize global signal-related data structures.
204 */ 205 */
205void 206void
206signal_init(void) 207signal_init(void)
207{ 208{
208 209
209 sigactspool_allocator.pa_pagesz = (PAGE_SIZE)*2; 210 sigactspool_allocator.pa_pagesz = (PAGE_SIZE)*2;
210 211
211 sigacts_cache = pool_cache_init(sizeof(struct sigacts), 0, 0, 0, 212 sigacts_cache = pool_cache_init(sizeof(struct sigacts), 0, 0, 0,
212 "sigacts", sizeof(struct sigacts) > PAGE_SIZE ? 213 "sigacts", sizeof(struct sigacts) > PAGE_SIZE ?
213 &sigactspool_allocator : NULL, IPL_NONE, sigacts_ctor, NULL, NULL); 214 &sigactspool_allocator : NULL, IPL_NONE, sigacts_ctor, NULL, NULL);
214 ksiginfo_cache = pool_cache_init(sizeof(ksiginfo_t), 0, 0, 0, 215 ksiginfo_cache = pool_cache_init(sizeof(ksiginfo_t), 0, 0, 0,
215 "ksiginfo", NULL, IPL_VM, NULL, NULL, NULL); 216 "ksiginfo", NULL, IPL_VM, NULL, NULL, NULL);
216 217
217 exechook_establish(ksiginfo_exechook, NULL); 218 exechook_establish(ksiginfo_exechook, NULL);
218 219
219 callout_init(&proc_stop_ch, CALLOUT_MPSAFE); 220 callout_init(&proc_stop_ch, CALLOUT_MPSAFE);
220 callout_setfunc(&proc_stop_ch, proc_stop_callout, NULL); 221 callout_setfunc(&proc_stop_ch, proc_stop_callout, NULL);
221 222
222 signal_listener = kauth_listen_scope(KAUTH_SCOPE_PROCESS, 223 signal_listener = kauth_listen_scope(KAUTH_SCOPE_PROCESS,
223 signal_listener_cb, NULL); 224 signal_listener_cb, NULL);
224} 225}
225 226
226/* 227/*
227 * sigacts_poolpage_alloc: 228 * sigacts_poolpage_alloc:
228 * 229 *
229 * Allocate a page for the sigacts memory pool. 230 * Allocate a page for the sigacts memory pool.
230 */ 231 */
231static void * 232static void *
232sigacts_poolpage_alloc(struct pool *pp, int flags) 233sigacts_poolpage_alloc(struct pool *pp, int flags)
233{ 234{
234 235
235 return (void *)uvm_km_alloc(kernel_map, 236 return (void *)uvm_km_alloc(kernel_map,
236 PAGE_SIZE * 2, PAGE_SIZE * 2, 237 PAGE_SIZE * 2, PAGE_SIZE * 2,
237 ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK) 238 ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK)
238 | UVM_KMF_WIRED); 239 | UVM_KMF_WIRED);
239} 240}
240 241
241/* 242/*
242 * sigacts_poolpage_free: 243 * sigacts_poolpage_free:
243 * 244 *
244 * Free a page on behalf of the sigacts memory pool. 245 * Free a page on behalf of the sigacts memory pool.
245 */ 246 */
246static void 247static void
247sigacts_poolpage_free(struct pool *pp, void *v) 248sigacts_poolpage_free(struct pool *pp, void *v)
248{ 249{
249 250
250 uvm_km_free(kernel_map, (vaddr_t)v, PAGE_SIZE * 2, UVM_KMF_WIRED); 251 uvm_km_free(kernel_map, (vaddr_t)v, PAGE_SIZE * 2, UVM_KMF_WIRED);
251} 252}
252 253
253/* 254/*
254 * sigactsinit: 255 * sigactsinit:
255 * 256 *
256 * Create an initial sigacts structure, using the same signal state 257 * Create an initial sigacts structure, using the same signal state
257 * as of specified process. If 'share' is set, share the sigacts by 258 * as of specified process. If 'share' is set, share the sigacts by
258 * holding a reference, otherwise just copy it from parent. 259 * holding a reference, otherwise just copy it from parent.
259 */ 260 */
260struct sigacts * 261struct sigacts *
261sigactsinit(struct proc *pp, int share) 262sigactsinit(struct proc *pp, int share)
262{ 263{
263 struct sigacts *ps = pp->p_sigacts, *ps2; 264 struct sigacts *ps = pp->p_sigacts, *ps2;
264 265
265 if (__predict_false(share)) { 266 if (__predict_false(share)) {
266 atomic_inc_uint(&ps->sa_refcnt); 267 atomic_inc_uint(&ps->sa_refcnt);
267 return ps; 268 return ps;
268 } 269 }
269 ps2 = pool_cache_get(sigacts_cache, PR_WAITOK); 270 ps2 = pool_cache_get(sigacts_cache, PR_WAITOK);
270 mutex_init(&ps2->sa_mutex, MUTEX_DEFAULT, IPL_SCHED); 271 mutex_init(&ps2->sa_mutex, MUTEX_DEFAULT, IPL_SCHED);
271 ps2->sa_refcnt = 1; 272 ps2->sa_refcnt = 1;
272 273
273 mutex_enter(&ps->sa_mutex); 274 mutex_enter(&ps->sa_mutex);
274 memcpy(ps2->sa_sigdesc, ps->sa_sigdesc, sizeof(ps2->sa_sigdesc)); 275 memcpy(ps2->sa_sigdesc, ps->sa_sigdesc, sizeof(ps2->sa_sigdesc));
275 mutex_exit(&ps->sa_mutex); 276 mutex_exit(&ps->sa_mutex);
276 return ps2; 277 return ps2;
277} 278}
278 279
279/* 280/*
280 * sigactsunshare: 281 * sigactsunshare:
281 * 282 *
282 * Make this process not share its sigacts, maintaining all signal state. 283 * Make this process not share its sigacts, maintaining all signal state.
283 */ 284 */
284void 285void
285sigactsunshare(struct proc *p) 286sigactsunshare(struct proc *p)
286{ 287{
287 struct sigacts *ps, *oldps = p->p_sigacts; 288 struct sigacts *ps, *oldps = p->p_sigacts;
288 289
289 if (__predict_true(oldps->sa_refcnt == 1)) 290 if (__predict_true(oldps->sa_refcnt == 1))
290 return; 291 return;
291 292
292 ps = pool_cache_get(sigacts_cache, PR_WAITOK); 293 ps = pool_cache_get(sigacts_cache, PR_WAITOK);
293 mutex_init(&ps->sa_mutex, MUTEX_DEFAULT, IPL_SCHED); 294 mutex_init(&ps->sa_mutex, MUTEX_DEFAULT, IPL_SCHED);
294 memcpy(ps->sa_sigdesc, oldps->sa_sigdesc, sizeof(ps->sa_sigdesc)); 295 memcpy(ps->sa_sigdesc, oldps->sa_sigdesc, sizeof(ps->sa_sigdesc));
295 ps->sa_refcnt = 1; 296 ps->sa_refcnt = 1;
296 297
297 p->p_sigacts = ps; 298 p->p_sigacts = ps;
298 sigactsfree(oldps); 299 sigactsfree(oldps);
299} 300}
300 301
301/* 302/*
302 * sigactsfree; 303 * sigactsfree;
303 * 304 *
304 * Release a sigacts structure. 305 * Release a sigacts structure.
305 */ 306 */
306void 307void
307sigactsfree(struct sigacts *ps) 308sigactsfree(struct sigacts *ps)
308{ 309{
309 310
310 if (atomic_dec_uint_nv(&ps->sa_refcnt) == 0) { 311 if (atomic_dec_uint_nv(&ps->sa_refcnt) == 0) {
311 mutex_destroy(&ps->sa_mutex); 312 mutex_destroy(&ps->sa_mutex);
312 pool_cache_put(sigacts_cache, ps); 313 pool_cache_put(sigacts_cache, ps);
313 } 314 }
314} 315}
315 316
316/* 317/*
317 * siginit: 318 * siginit:
318 * 319 *
319 * Initialize signal state for process 0; set to ignore signals that 320 * Initialize signal state for process 0; set to ignore signals that
320 * are ignored by default and disable the signal stack. Locking not 321 * are ignored by default and disable the signal stack. Locking not
321 * required as the system is still cold. 322 * required as the system is still cold.
322 */ 323 */
323void 324void
324siginit(struct proc *p) 325siginit(struct proc *p)
325{ 326{
326 struct lwp *l; 327 struct lwp *l;
327 struct sigacts *ps; 328 struct sigacts *ps;
328 int signo, prop; 329 int signo, prop;
329 330
330 ps = p->p_sigacts; 331 ps = p->p_sigacts;
331 sigemptyset(&contsigmask); 332 sigemptyset(&contsigmask);
332 sigemptyset(&stopsigmask); 333 sigemptyset(&stopsigmask);
333 sigemptyset(&vforksigmask); 334 sigemptyset(&vforksigmask);
334 sigemptyset(&sigcantmask); 335 sigemptyset(&sigcantmask);
335 for (signo = 1; signo < NSIG; signo++) { 336 for (signo = 1; signo < NSIG; signo++) {
336 prop = sigprop[signo]; 337 prop = sigprop[signo];
337 if (prop & SA_CONT) 338 if (prop & SA_CONT)
338 sigaddset(&contsigmask, signo); 339 sigaddset(&contsigmask, signo);
339 if (prop & SA_STOP) 340 if (prop & SA_STOP)
340 sigaddset(&stopsigmask, signo); 341 sigaddset(&stopsigmask, signo);
341 if (prop & SA_STOP && signo != SIGSTOP) 342 if (prop & SA_STOP && signo != SIGSTOP)
342 sigaddset(&vforksigmask, signo); 343 sigaddset(&vforksigmask, signo);
343 if (prop & SA_CANTMASK) 344 if (prop & SA_CANTMASK)
344 sigaddset(&sigcantmask, signo); 345 sigaddset(&sigcantmask, signo);
345 if (prop & SA_IGNORE && signo != SIGCONT) 346 if (prop & SA_IGNORE && signo != SIGCONT)
346 sigaddset(&p->p_sigctx.ps_sigignore, signo); 347 sigaddset(&p->p_sigctx.ps_sigignore, signo);
347 sigemptyset(&SIGACTION_PS(ps, signo).sa_mask); 348 sigemptyset(&SIGACTION_PS(ps, signo).sa_mask);
348 SIGACTION_PS(ps, signo).sa_flags = SA_RESTART; 349 SIGACTION_PS(ps, signo).sa_flags = SA_RESTART;
349 } 350 }
350 sigemptyset(&p->p_sigctx.ps_sigcatch); 351 sigemptyset(&p->p_sigctx.ps_sigcatch);
351 p->p_sflag &= ~PS_NOCLDSTOP; 352 p->p_sflag &= ~PS_NOCLDSTOP;
352 353
353 ksiginfo_queue_init(&p->p_sigpend.sp_info); 354 ksiginfo_queue_init(&p->p_sigpend.sp_info);
354 sigemptyset(&p->p_sigpend.sp_set); 355 sigemptyset(&p->p_sigpend.sp_set);
355 356
356 /* 357 /*
357 * Reset per LWP state. 358 * Reset per LWP state.
358 */ 359 */
359 l = LIST_FIRST(&p->p_lwps); 360 l = LIST_FIRST(&p->p_lwps);
360 l->l_sigwaited = NULL; 361 l->l_sigwaited = NULL;
361 l->l_sigstk = SS_INIT; 362 l->l_sigstk = SS_INIT;
362 ksiginfo_queue_init(&l->l_sigpend.sp_info); 363 ksiginfo_queue_init(&l->l_sigpend.sp_info);
363 sigemptyset(&l->l_sigpend.sp_set); 364 sigemptyset(&l->l_sigpend.sp_set);
364 365
365 /* One reference. */ 366 /* One reference. */
366 ps->sa_refcnt = 1; 367 ps->sa_refcnt = 1;
367} 368}
368 369
369/* 370/*
370 * execsigs: 371 * execsigs:
371 * 372 *
372 * Reset signals for an exec of the specified process. 373 * Reset signals for an exec of the specified process.
373 */ 374 */
374void 375void
375execsigs(struct proc *p) 376execsigs(struct proc *p)
376{ 377{
377 struct sigacts *ps; 378 struct sigacts *ps;
378 struct lwp *l; 379 struct lwp *l;
379 int signo, prop; 380 int signo, prop;
380 sigset_t tset; 381 sigset_t tset;
381 ksiginfoq_t kq; 382 ksiginfoq_t kq;
382 383
383 KASSERT(p->p_nlwps == 1); 384 KASSERT(p->p_nlwps == 1);
384 385
385 sigactsunshare(p); 386 sigactsunshare(p);
386 ps = p->p_sigacts; 387 ps = p->p_sigacts;
387 388
388 /* 389 /*
389 * Reset caught signals. Held signals remain held through 390 * Reset caught signals. Held signals remain held through
390 * l->l_sigmask (unless they were caught, and are now ignored 391 * l->l_sigmask (unless they were caught, and are now ignored
391 * by default). 392 * by default).
392 * 393 *
393 * No need to lock yet, the process has only one LWP and 394 * No need to lock yet, the process has only one LWP and
394 * at this point the sigacts are private to the process. 395 * at this point the sigacts are private to the process.
395 */ 396 */
396 sigemptyset(&tset); 397 sigemptyset(&tset);
397 for (signo = 1; signo < NSIG; signo++) { 398 for (signo = 1; signo < NSIG; signo++) {
398 if (sigismember(&p->p_sigctx.ps_sigcatch, signo)) { 399 if (sigismember(&p->p_sigctx.ps_sigcatch, signo)) {
399 prop = sigprop[signo]; 400 prop = sigprop[signo];
400 if (prop & SA_IGNORE) { 401 if (prop & SA_IGNORE) {
401 if ((prop & SA_CONT) == 0) 402 if ((prop & SA_CONT) == 0)
402 sigaddset(&p->p_sigctx.ps_sigignore, 403 sigaddset(&p->p_sigctx.ps_sigignore,
403 signo); 404 signo);
404 sigaddset(&tset, signo); 405 sigaddset(&tset, signo);
405 } 406 }
406 SIGACTION_PS(ps, signo).sa_handler = SIG_DFL; 407 SIGACTION_PS(ps, signo).sa_handler = SIG_DFL;
407 } 408 }
408 sigemptyset(&SIGACTION_PS(ps, signo).sa_mask); 409 sigemptyset(&SIGACTION_PS(ps, signo).sa_mask);
409 SIGACTION_PS(ps, signo).sa_flags = SA_RESTART; 410 SIGACTION_PS(ps, signo).sa_flags = SA_RESTART;
410 } 411 }
411 ksiginfo_queue_init(&kq); 412 ksiginfo_queue_init(&kq);
412 413
413 mutex_enter(p->p_lock); 414 mutex_enter(p->p_lock);
414 sigclearall(p, &tset, &kq); 415 sigclearall(p, &tset, &kq);
415 sigemptyset(&p->p_sigctx.ps_sigcatch); 416 sigemptyset(&p->p_sigctx.ps_sigcatch);
416 417
417 /* 418 /*
418 * Reset no zombies if child dies flag as Solaris does. 419 * Reset no zombies if child dies flag as Solaris does.
419 */ 420 */
420 p->p_flag &= ~(PK_NOCLDWAIT | PK_CLDSIGIGN); 421 p->p_flag &= ~(PK_NOCLDWAIT | PK_CLDSIGIGN);
421 if (SIGACTION_PS(ps, SIGCHLD).sa_handler == SIG_IGN) 422 if (SIGACTION_PS(ps, SIGCHLD).sa_handler == SIG_IGN)
422 SIGACTION_PS(ps, SIGCHLD).sa_handler = SIG_DFL; 423 SIGACTION_PS(ps, SIGCHLD).sa_handler = SIG_DFL;
423 424
424 /* 425 /*
425 * Reset per-LWP state. 426 * Reset per-LWP state.
426 */ 427 */
427 l = LIST_FIRST(&p->p_lwps); 428 l = LIST_FIRST(&p->p_lwps);
428 l->l_sigwaited = NULL; 429 l->l_sigwaited = NULL;
429 l->l_sigstk = SS_INIT; 430 l->l_sigstk = SS_INIT;
430 ksiginfo_queue_init(&l->l_sigpend.sp_info); 431 ksiginfo_queue_init(&l->l_sigpend.sp_info);
431 sigemptyset(&l->l_sigpend.sp_set); 432 sigemptyset(&l->l_sigpend.sp_set);
432 mutex_exit(p->p_lock); 433 mutex_exit(p->p_lock);
433 434
434 ksiginfo_queue_drain(&kq); 435 ksiginfo_queue_drain(&kq);
435} 436}
436 437
437/* 438/*
438 * ksiginfo_exechook: 439 * ksiginfo_exechook:
439 * 440 *
440 * Free all pending ksiginfo entries from a process on exec. 441 * Free all pending ksiginfo entries from a process on exec.
441 * Additionally, drain any unused ksiginfo structures in the 442 * Additionally, drain any unused ksiginfo structures in the
442 * system back to the pool. 443 * system back to the pool.
443 * 444 *
444 * XXX This should not be a hook, every process has signals. 445 * XXX This should not be a hook, every process has signals.
445 */ 446 */
446static void 447static void
447ksiginfo_exechook(struct proc *p, void *v) 448ksiginfo_exechook(struct proc *p, void *v)
448{ 449{
449 ksiginfoq_t kq; 450 ksiginfoq_t kq;
450 451
451 ksiginfo_queue_init(&kq); 452 ksiginfo_queue_init(&kq);
452 453
453 mutex_enter(p->p_lock); 454 mutex_enter(p->p_lock);
454 sigclearall(p, NULL, &kq); 455 sigclearall(p, NULL, &kq);
455 mutex_exit(p->p_lock); 456 mutex_exit(p->p_lock);
456 457
457 ksiginfo_queue_drain(&kq); 458 ksiginfo_queue_drain(&kq);
458} 459}
459 460
460/* 461/*
461 * ksiginfo_alloc: 462 * ksiginfo_alloc:
462 * 463 *
463 * Allocate a new ksiginfo structure from the pool, and optionally copy 464 * Allocate a new ksiginfo structure from the pool, and optionally copy
464 * an existing one. If the existing ksiginfo_t is from the pool, and 465 * an existing one. If the existing ksiginfo_t is from the pool, and
465 * has not been queued somewhere, then just return it. Additionally, 466 * has not been queued somewhere, then just return it. Additionally,
466 * if the existing ksiginfo_t does not contain any information beyond 467 * if the existing ksiginfo_t does not contain any information beyond
467 * the signal number, then just return it. 468 * the signal number, then just return it.
468 */ 469 */
469ksiginfo_t * 470ksiginfo_t *
470ksiginfo_alloc(struct proc *p, ksiginfo_t *ok, int flags) 471ksiginfo_alloc(struct proc *p, ksiginfo_t *ok, int flags)
471{ 472{
472 ksiginfo_t *kp; 473 ksiginfo_t *kp;
473 474
474 if (ok != NULL) { 475 if (ok != NULL) {
475 if ((ok->ksi_flags & (KSI_QUEUED | KSI_FROMPOOL)) == 476 if ((ok->ksi_flags & (KSI_QUEUED | KSI_FROMPOOL)) ==
476 KSI_FROMPOOL) 477 KSI_FROMPOOL)
477 return ok; 478 return ok;
478 if (KSI_EMPTY_P(ok)) 479 if (KSI_EMPTY_P(ok))
479 return ok; 480 return ok;
480 } 481 }
481 482
482 kp = pool_cache_get(ksiginfo_cache, flags); 483 kp = pool_cache_get(ksiginfo_cache, flags);
483 if (kp == NULL) { 484 if (kp == NULL) {
484#ifdef DIAGNOSTIC 485#ifdef DIAGNOSTIC
485 printf("Out of memory allocating ksiginfo for pid %d\n", 486 printf("Out of memory allocating ksiginfo for pid %d\n",
486 p->p_pid); 487 p->p_pid);
487#endif 488#endif
488 return NULL; 489 return NULL;
489 } 490 }
490 491
491 if (ok != NULL) { 492 if (ok != NULL) {
492 memcpy(kp, ok, sizeof(*kp)); 493 memcpy(kp, ok, sizeof(*kp));
493 kp->ksi_flags &= ~KSI_QUEUED; 494 kp->ksi_flags &= ~KSI_QUEUED;
494 } else 495 } else
495 KSI_INIT_EMPTY(kp); 496 KSI_INIT_EMPTY(kp);
496 497
497 kp->ksi_flags |= KSI_FROMPOOL; 498 kp->ksi_flags |= KSI_FROMPOOL;
498 499
499 return kp; 500 return kp;
500} 501}
501 502
502/* 503/*
503 * ksiginfo_free: 504 * ksiginfo_free:
504 * 505 *
505 * If the given ksiginfo_t is from the pool and has not been queued, 506 * If the given ksiginfo_t is from the pool and has not been queued,
506 * then free it. 507 * then free it.
507 */ 508 */
508void 509void
509ksiginfo_free(ksiginfo_t *kp) 510ksiginfo_free(ksiginfo_t *kp)
510{ 511{
511 512
512 if ((kp->ksi_flags & (KSI_QUEUED | KSI_FROMPOOL)) != KSI_FROMPOOL) 513 if ((kp->ksi_flags & (KSI_QUEUED | KSI_FROMPOOL)) != KSI_FROMPOOL)
513 return; 514 return;
514 pool_cache_put(ksiginfo_cache, kp); 515 pool_cache_put(ksiginfo_cache, kp);
515} 516}
516 517
517/* 518/*
518 * ksiginfo_queue_drain: 519 * ksiginfo_queue_drain:
519 * 520 *
520 * Drain a non-empty ksiginfo_t queue. 521 * Drain a non-empty ksiginfo_t queue.
521 */ 522 */
522void 523void
523ksiginfo_queue_drain0(ksiginfoq_t *kq) 524ksiginfo_queue_drain0(ksiginfoq_t *kq)
524{ 525{
525 ksiginfo_t *ksi; 526 ksiginfo_t *ksi;
526 527
527 KASSERT(!TAILQ_EMPTY(kq)); 528 KASSERT(!TAILQ_EMPTY(kq));
528 529
529 while (!TAILQ_EMPTY(kq)) { 530 while (!TAILQ_EMPTY(kq)) {
530 ksi = TAILQ_FIRST(kq); 531 ksi = TAILQ_FIRST(kq);
531 TAILQ_REMOVE(kq, ksi, ksi_list); 532 TAILQ_REMOVE(kq, ksi, ksi_list);
532 pool_cache_put(ksiginfo_cache, ksi); 533 pool_cache_put(ksiginfo_cache, ksi);
533 } 534 }
534} 535}
535 536
536static int 537static int
537siggetinfo(sigpend_t *sp, ksiginfo_t *out, int signo) 538siggetinfo(sigpend_t *sp, ksiginfo_t *out, int signo)
538{ 539{
539 ksiginfo_t *ksi, *nksi; 540 ksiginfo_t *ksi, *nksi;
540 541
541 if (sp == NULL) 542 if (sp == NULL)
542 goto out; 543 goto out;
543 544
544 /* Find siginfo and copy it out. */ 545 /* Find siginfo and copy it out. */
545 int count = 0; 546 int count = 0;
546 TAILQ_FOREACH_SAFE(ksi, &sp->sp_info, ksi_list, nksi) { 547 TAILQ_FOREACH_SAFE(ksi, &sp->sp_info, ksi_list, nksi) {
547 if (ksi->ksi_signo != signo) 548 if (ksi->ksi_signo != signo)
548 continue; 549 continue;
549 if (count++ > 0) /* Only remove the first, count all of them */ 550 if (count++ > 0) /* Only remove the first, count all of them */
550 continue;  551 continue;
551 TAILQ_REMOVE(&sp->sp_info, ksi, ksi_list); 552 TAILQ_REMOVE(&sp->sp_info, ksi, ksi_list);
552 KASSERT((ksi->ksi_flags & KSI_FROMPOOL) != 0); 553 KASSERT((ksi->ksi_flags & KSI_FROMPOOL) != 0);
553 KASSERT((ksi->ksi_flags & KSI_QUEUED) != 0); 554 KASSERT((ksi->ksi_flags & KSI_QUEUED) != 0);
554 ksi->ksi_flags &= ~KSI_QUEUED; 555 ksi->ksi_flags &= ~KSI_QUEUED;
555 if (out != NULL) { 556 if (out != NULL) {
556 memcpy(out, ksi, sizeof(*out)); 557 memcpy(out, ksi, sizeof(*out));
557 out->ksi_flags &= ~(KSI_FROMPOOL | KSI_QUEUED); 558 out->ksi_flags &= ~(KSI_FROMPOOL | KSI_QUEUED);
558 } 559 }
559 ksiginfo_free(ksi); 560 ksiginfo_free(ksi);
560 } 561 }
561 if (count) 562 if (count)
562 return count; 563 return count;
563 564
564out: 565out:
565 /* If there is no siginfo, then manufacture it. */ 566 /* If there is no siginfo, then manufacture it. */
566 if (out != NULL) { 567 if (out != NULL) {
567 KSI_INIT(out); 568 KSI_INIT(out);
568 out->ksi_info._signo = signo; 569 out->ksi_info._signo = signo;
569 out->ksi_info._code = SI_NOINFO; 570 out->ksi_info._code = SI_NOINFO;
570 } 571 }
571 return 0; 572 return 0;
572} 573}
573 574
574/* 575/*
575 * sigget: 576 * sigget:
576 * 577 *
577 * Fetch the first pending signal from a set. Optionally, also fetch 578 * Fetch the first pending signal from a set. Optionally, also fetch
578 * or manufacture a ksiginfo element. Returns the number of the first 579 * or manufacture a ksiginfo element. Returns the number of the first
579 * pending signal, or zero. 580 * pending signal, or zero.
580 */  581 */
581int 582int
582sigget(sigpend_t *sp, ksiginfo_t *out, int signo, const sigset_t *mask) 583sigget(sigpend_t *sp, ksiginfo_t *out, int signo, const sigset_t *mask)
583{ 584{
584 sigset_t tset; 585 sigset_t tset;
585 int count; 586 int count;
586 587
587 /* If there's no pending set, the signal is from the debugger. */ 588 /* If there's no pending set, the signal is from the debugger. */
588 if (sp == NULL) 589 if (sp == NULL)
589 goto out; 590 goto out;
590 591
591 /* Construct mask from signo, and 'mask'. */ 592 /* Construct mask from signo, and 'mask'. */
592 if (signo == 0) { 593 if (signo == 0) {
593 if (mask != NULL) { 594 if (mask != NULL) {
594 tset = *mask; 595 tset = *mask;
595 __sigandset(&sp->sp_set, &tset); 596 __sigandset(&sp->sp_set, &tset);
596 } else 597 } else
597 tset = sp->sp_set; 598 tset = sp->sp_set;
598 599
599 /* If there are no signals pending - return. */ 600 /* If there are no signals pending - return. */
600 if ((signo = firstsig(&tset)) == 0) 601 if ((signo = firstsig(&tset)) == 0)
601 goto out; 602 goto out;
602 } else { 603 } else {
603 KASSERT(sigismember(&sp->sp_set, signo)); 604 KASSERT(sigismember(&sp->sp_set, signo));
604 } 605 }
605 606
606 sigdelset(&sp->sp_set, signo); 607 sigdelset(&sp->sp_set, signo);
607out: 608out:
608 count = siggetinfo(sp, out, signo); 609 count = siggetinfo(sp, out, signo);
609 if (count > 1) 610 if (count > 1)
610 sigaddset(&sp->sp_set, signo); 611 sigaddset(&sp->sp_set, signo);
611 return signo; 612 return signo;
612} 613}
613 614
614/* 615/*
615 * sigput: 616 * sigput:
616 * 617 *
617 * Append a new ksiginfo element to the list of pending ksiginfo's. 618 * Append a new ksiginfo element to the list of pending ksiginfo's.
618 */ 619 */
619static int 620static int
620sigput(sigpend_t *sp, struct proc *p, ksiginfo_t *ksi) 621sigput(sigpend_t *sp, struct proc *p, ksiginfo_t *ksi)
621{ 622{
622 ksiginfo_t *kp; 623 ksiginfo_t *kp;
623 624
624 KASSERT(mutex_owned(p->p_lock)); 625 KASSERT(mutex_owned(p->p_lock));
625 KASSERT((ksi->ksi_flags & KSI_QUEUED) == 0); 626 KASSERT((ksi->ksi_flags & KSI_QUEUED) == 0);
626 627
627 sigaddset(&sp->sp_set, ksi->ksi_signo); 628 sigaddset(&sp->sp_set, ksi->ksi_signo);
628 629
629 /* 630 /*
630 * If there is no siginfo, we are done. 631 * If there is no siginfo, we are done.
631 */ 632 */
632 if (KSI_EMPTY_P(ksi)) 633 if (KSI_EMPTY_P(ksi))
633 return 0; 634 return 0;
634 635
635 KASSERT((ksi->ksi_flags & KSI_FROMPOOL) != 0); 636 KASSERT((ksi->ksi_flags & KSI_FROMPOOL) != 0);
636 637
637 size_t count = 0; 638 size_t count = 0;
638 TAILQ_FOREACH(kp, &sp->sp_info, ksi_list) { 639 TAILQ_FOREACH(kp, &sp->sp_info, ksi_list) {
639 count++; 640 count++;
640 if (ksi->ksi_signo >= SIGRTMIN && ksi->ksi_signo <= SIGRTMAX) 641 if (ksi->ksi_signo >= SIGRTMIN && ksi->ksi_signo <= SIGRTMAX)
641 continue; 642 continue;
642 if (kp->ksi_signo == ksi->ksi_signo) { 643 if (kp->ksi_signo == ksi->ksi_signo) {
643 KSI_COPY(ksi, kp); 644 KSI_COPY(ksi, kp);
644 kp->ksi_flags |= KSI_QUEUED; 645 kp->ksi_flags |= KSI_QUEUED;
645 return 0; 646 return 0;
646 } 647 }
647 } 648 }
648  649
649 if (count >= SIGQUEUE_MAX) { 650 if (count >= SIGQUEUE_MAX) {
650#ifdef DIAGNOSTIC 651#ifdef DIAGNOSTIC
651 printf("%s(%d): Signal queue is full signal=%d\n", 652 printf("%s(%d): Signal queue is full signal=%d\n",
652 p->p_comm, p->p_pid, ksi->ksi_signo); 653 p->p_comm, p->p_pid, ksi->ksi_signo);
653#endif 654#endif
654 return EAGAIN; 655 return EAGAIN;
655 } 656 }
656 ksi->ksi_flags |= KSI_QUEUED; 657 ksi->ksi_flags |= KSI_QUEUED;
657 TAILQ_INSERT_TAIL(&sp->sp_info, ksi, ksi_list); 658 TAILQ_INSERT_TAIL(&sp->sp_info, ksi, ksi_list);
658  659
659 return 0; 660 return 0;
660} 661}
661 662
662/* 663/*
663 * sigclear: 664 * sigclear:
664 * 665 *
665 * Clear all pending signals in the specified set. 666 * Clear all pending signals in the specified set.
666 */ 667 */
667void 668void
668sigclear(sigpend_t *sp, const sigset_t *mask, ksiginfoq_t *kq) 669sigclear(sigpend_t *sp, const sigset_t *mask, ksiginfoq_t *kq)
669{ 670{
670 ksiginfo_t *ksi, *next; 671 ksiginfo_t *ksi, *next;
671 672
672 if (mask == NULL) 673 if (mask == NULL)
673 sigemptyset(&sp->sp_set); 674 sigemptyset(&sp->sp_set);
674 else 675 else
675 sigminusset(mask, &sp->sp_set); 676 sigminusset(mask, &sp->sp_set);
676 677
677 TAILQ_FOREACH_SAFE(ksi, &sp->sp_info, ksi_list, next) { 678 TAILQ_FOREACH_SAFE(ksi, &sp->sp_info, ksi_list, next) {
678 if (mask == NULL || sigismember(mask, ksi->ksi_signo)) { 679 if (mask == NULL || sigismember(mask, ksi->ksi_signo)) {
679 TAILQ_REMOVE(&sp->sp_info, ksi, ksi_list); 680 TAILQ_REMOVE(&sp->sp_info, ksi, ksi_list);
680 KASSERT((ksi->ksi_flags & KSI_FROMPOOL) != 0); 681 KASSERT((ksi->ksi_flags & KSI_FROMPOOL) != 0);
681 KASSERT((ksi->ksi_flags & KSI_QUEUED) != 0); 682 KASSERT((ksi->ksi_flags & KSI_QUEUED) != 0);
682 TAILQ_INSERT_TAIL(kq, ksi, ksi_list); 683 TAILQ_INSERT_TAIL(kq, ksi, ksi_list);
683 } 684 }
684 } 685 }
685} 686}
686 687
687/* 688/*
688 * sigclearall: 689 * sigclearall:
689 * 690 *
690 * Clear all pending signals in the specified set from a process and 691 * Clear all pending signals in the specified set from a process and
691 * its LWPs. 692 * its LWPs.
692 */ 693 */
693void 694void
694sigclearall(struct proc *p, const sigset_t *mask, ksiginfoq_t *kq) 695sigclearall(struct proc *p, const sigset_t *mask, ksiginfoq_t *kq)
695{ 696{
696 struct lwp *l; 697 struct lwp *l;
697 698
698 KASSERT(mutex_owned(p->p_lock)); 699 KASSERT(mutex_owned(p->p_lock));
699 700
700 sigclear(&p->p_sigpend, mask, kq); 701 sigclear(&p->p_sigpend, mask, kq);
701 702
702 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 703 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
703 sigclear(&l->l_sigpend, mask, kq); 704 sigclear(&l->l_sigpend, mask, kq);
704 } 705 }
705} 706}
706 707
707/* 708/*
708 * sigispending: 709 * sigispending:
709 * 710 *
710 * Return the first signal number if there are pending signals for the 711 * Return the first signal number if there are pending signals for the
711 * current LWP. May be called unlocked provided that LW_PENDSIG is set, 712 * current LWP. May be called unlocked provided that LW_PENDSIG is set,
712 * and that the signal has been posted to the appopriate queue before 713 * and that the signal has been posted to the appopriate queue before
713 * LW_PENDSIG is set. 714 * LW_PENDSIG is set.
714 * 715 *
715 * This should only ever be called with (l == curlwp), unless the 716 * This should only ever be called with (l == curlwp), unless the
716 * result does not matter (procfs, sysctl). 717 * result does not matter (procfs, sysctl).
717 */  718 */
718int 719int
719sigispending(struct lwp *l, int signo) 720sigispending(struct lwp *l, int signo)
720{ 721{
721 struct proc *p = l->l_proc; 722 struct proc *p = l->l_proc;
722 sigset_t tset; 723 sigset_t tset;
723 724
724 membar_consumer(); 725 membar_consumer();
725 726
726 tset = l->l_sigpend.sp_set; 727 tset = l->l_sigpend.sp_set;
727 sigplusset(&p->p_sigpend.sp_set, &tset); 728 sigplusset(&p->p_sigpend.sp_set, &tset);
728 sigminusset(&p->p_sigctx.ps_sigignore, &tset); 729 sigminusset(&p->p_sigctx.ps_sigignore, &tset);
729 sigminusset(&l->l_sigmask, &tset); 730 sigminusset(&l->l_sigmask, &tset);
730 731
731 if (signo == 0) { 732 if (signo == 0) {
732 return firstsig(&tset); 733 return firstsig(&tset);
733 } 734 }
734 return sigismember(&tset, signo) ? signo : 0; 735 return sigismember(&tset, signo) ? signo : 0;
735} 736}
736 737
737void 738void
738getucontext(struct lwp *l, ucontext_t *ucp) 739getucontext(struct lwp *l, ucontext_t *ucp)
739{ 740{
740 struct proc *p = l->l_proc; 741 struct proc *p = l->l_proc;
741 742
742 KASSERT(mutex_owned(p->p_lock)); 743 KASSERT(mutex_owned(p->p_lock));
743 744
744 ucp->uc_flags = 0; 745 ucp->uc_flags = 0;
745 ucp->uc_link = l->l_ctxlink; 746 ucp->uc_link = l->l_ctxlink;
746 ucp->uc_sigmask = l->l_sigmask; 747 ucp->uc_sigmask = l->l_sigmask;
747 ucp->uc_flags |= _UC_SIGMASK; 748 ucp->uc_flags |= _UC_SIGMASK;
748 749
749 /* 750 /*
750 * The (unsupplied) definition of the `current execution stack' 751 * The (unsupplied) definition of the `current execution stack'
751 * in the System V Interface Definition appears to allow returning 752 * in the System V Interface Definition appears to allow returning
752 * the main context stack. 753 * the main context stack.
753 */ 754 */
754 if ((l->l_sigstk.ss_flags & SS_ONSTACK) == 0) { 755 if ((l->l_sigstk.ss_flags & SS_ONSTACK) == 0) {
755 ucp->uc_stack.ss_sp = (void *)l->l_proc->p_stackbase; 756 ucp->uc_stack.ss_sp = (void *)l->l_proc->p_stackbase;
756 ucp->uc_stack.ss_size = ctob(l->l_proc->p_vmspace->vm_ssize); 757 ucp->uc_stack.ss_size = ctob(l->l_proc->p_vmspace->vm_ssize);
757 ucp->uc_stack.ss_flags = 0; /* XXX, def. is Very Fishy */ 758 ucp->uc_stack.ss_flags = 0; /* XXX, def. is Very Fishy */
758 } else { 759 } else {
759 /* Simply copy alternate signal execution stack. */ 760 /* Simply copy alternate signal execution stack. */
760 ucp->uc_stack = l->l_sigstk; 761 ucp->uc_stack = l->l_sigstk;
761 } 762 }
762 ucp->uc_flags |= _UC_STACK; 763 ucp->uc_flags |= _UC_STACK;
763 mutex_exit(p->p_lock); 764 mutex_exit(p->p_lock);
764 cpu_getmcontext(l, &ucp->uc_mcontext, &ucp->uc_flags); 765 cpu_getmcontext(l, &ucp->uc_mcontext, &ucp->uc_flags);
765 mutex_enter(p->p_lock); 766 mutex_enter(p->p_lock);
766} 767}
767 768
768int 769int
769setucontext(struct lwp *l, const ucontext_t *ucp) 770setucontext(struct lwp *l, const ucontext_t *ucp)
770{ 771{
771 struct proc *p = l->l_proc; 772 struct proc *p = l->l_proc;
772 int error; 773 int error;
773 774
774 KASSERT(mutex_owned(p->p_lock)); 775 KASSERT(mutex_owned(p->p_lock));
775 776
776 if ((ucp->uc_flags & _UC_SIGMASK) != 0) { 777 if ((ucp->uc_flags & _UC_SIGMASK) != 0) {
777 error = sigprocmask1(l, SIG_SETMASK, &ucp->uc_sigmask, NULL); 778 error = sigprocmask1(l, SIG_SETMASK, &ucp->uc_sigmask, NULL);
778 if (error != 0) 779 if (error != 0)
779 return error; 780 return error;
780 } 781 }
781 782
782 mutex_exit(p->p_lock); 783 mutex_exit(p->p_lock);
783 error = cpu_setmcontext(l, &ucp->uc_mcontext, ucp->uc_flags); 784 error = cpu_setmcontext(l, &ucp->uc_mcontext, ucp->uc_flags);
784 mutex_enter(p->p_lock); 785 mutex_enter(p->p_lock);
785 if (error != 0) 786 if (error != 0)
786 return (error); 787 return (error);
787 788
788 l->l_ctxlink = ucp->uc_link; 789 l->l_ctxlink = ucp->uc_link;
789 790
790 /* 791 /*
791 * If there was stack information, update whether or not we are 792 * If there was stack information, update whether or not we are
792 * still running on an alternate signal stack. 793 * still running on an alternate signal stack.
793 */ 794 */
794 if ((ucp->uc_flags & _UC_STACK) != 0) { 795 if ((ucp->uc_flags & _UC_STACK) != 0) {
795 if (ucp->uc_stack.ss_flags & SS_ONSTACK) 796 if (ucp->uc_stack.ss_flags & SS_ONSTACK)
796 l->l_sigstk.ss_flags |= SS_ONSTACK; 797 l->l_sigstk.ss_flags |= SS_ONSTACK;
797 else 798 else
798 l->l_sigstk.ss_flags &= ~SS_ONSTACK; 799 l->l_sigstk.ss_flags &= ~SS_ONSTACK;
799 } 800 }
800 801
801 return 0; 802 return 0;
802} 803}
803 804
804/* 805/*
805 * killpg1: common code for kill process group/broadcast kill. 806 * killpg1: common code for kill process group/broadcast kill.
806 */ 807 */
807int 808int
808killpg1(struct lwp *l, ksiginfo_t *ksi, int pgid, int all) 809killpg1(struct lwp *l, ksiginfo_t *ksi, int pgid, int all)
809{ 810{
810 struct proc *p, *cp; 811 struct proc *p, *cp;
811 kauth_cred_t pc; 812 kauth_cred_t pc;
812 struct pgrp *pgrp; 813 struct pgrp *pgrp;
813 int nfound; 814 int nfound;
814 int signo = ksi->ksi_signo; 815 int signo = ksi->ksi_signo;
815 816
816 cp = l->l_proc; 817 cp = l->l_proc;
817 pc = l->l_cred; 818 pc = l->l_cred;
818 nfound = 0; 819 nfound = 0;
819 820
820 mutex_enter(&proc_lock); 821 mutex_enter(&proc_lock);
821 if (all) { 822 if (all) {
822 /* 823 /*
823 * Broadcast. 824 * Broadcast.
824 */ 825 */
825 PROCLIST_FOREACH(p, &allproc) { 826 PROCLIST_FOREACH(p, &allproc) {
826 if (p->p_pid <= 1 || p == cp || 827 if (p->p_pid <= 1 || p == cp ||
827 (p->p_flag & PK_SYSTEM) != 0) 828 (p->p_flag & PK_SYSTEM) != 0)
828 continue; 829 continue;
829 mutex_enter(p->p_lock); 830 mutex_enter(p->p_lock);
830 if (kauth_authorize_process(pc, 831 if (kauth_authorize_process(pc,
831 KAUTH_PROCESS_SIGNAL, p, KAUTH_ARG(signo), NULL, 832 KAUTH_PROCESS_SIGNAL, p, KAUTH_ARG(signo), NULL,
832 NULL) == 0) { 833 NULL) == 0) {
833 nfound++; 834 nfound++;
834 if (signo) 835 if (signo)
835 kpsignal2(p, ksi); 836 kpsignal2(p, ksi);
836 } 837 }
837 mutex_exit(p->p_lock); 838 mutex_exit(p->p_lock);
838 } 839 }
839 } else { 840 } else {
840 if (pgid == 0) 841 if (pgid == 0)
841 /* Zero pgid means send to my process group. */ 842 /* Zero pgid means send to my process group. */
842 pgrp = cp->p_pgrp; 843 pgrp = cp->p_pgrp;
843 else { 844 else {
844 pgrp = pgrp_find(pgid); 845 pgrp = pgrp_find(pgid);
845 if (pgrp == NULL) 846 if (pgrp == NULL)
846 goto out; 847 goto out;
847 } 848 }
848 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 849 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
849 if (p->p_pid <= 1 || p->p_flag & PK_SYSTEM) 850 if (p->p_pid <= 1 || p->p_flag & PK_SYSTEM)
850 continue; 851 continue;
851 mutex_enter(p->p_lock); 852 mutex_enter(p->p_lock);
852 if (kauth_authorize_process(pc, KAUTH_PROCESS_SIGNAL, 853 if (kauth_authorize_process(pc, KAUTH_PROCESS_SIGNAL,
853 p, KAUTH_ARG(signo), NULL, NULL) == 0) { 854 p, KAUTH_ARG(signo), NULL, NULL) == 0) {
854 nfound++; 855 nfound++;
855 if (signo && P_ZOMBIE(p) == 0) 856 if (signo && P_ZOMBIE(p) == 0)
856 kpsignal2(p, ksi); 857 kpsignal2(p, ksi);
857 } 858 }
858 mutex_exit(p->p_lock); 859 mutex_exit(p->p_lock);
859 } 860 }
860 } 861 }
861out: 862out:
862 mutex_exit(&proc_lock); 863 mutex_exit(&proc_lock);
863 return nfound ? 0 : ESRCH; 864 return nfound ? 0 : ESRCH;
864} 865}
865 866
866/* 867/*
867 * Send a signal to a process group. If checktty is set, limit to members 868 * Send a signal to a process group. If checktty is set, limit to members
868 * which have a controlling terminal. 869 * which have a controlling terminal.
869 */ 870 */
870void 871void
871pgsignal(struct pgrp *pgrp, int sig, int checkctty) 872pgsignal(struct pgrp *pgrp, int sig, int checkctty)
872{ 873{
873 ksiginfo_t ksi; 874 ksiginfo_t ksi;
874 875
875 KASSERT(!cpu_intr_p()); 876 KASSERT(!cpu_intr_p());
876 KASSERT(mutex_owned(&proc_lock)); 877 KASSERT(mutex_owned(&proc_lock));
877 878
878 KSI_INIT_EMPTY(&ksi); 879 KSI_INIT_EMPTY(&ksi);
879 ksi.ksi_signo = sig; 880 ksi.ksi_signo = sig;
880 kpgsignal(pgrp, &ksi, NULL, checkctty); 881 kpgsignal(pgrp, &ksi, NULL, checkctty);
881} 882}
882 883
883void 884void
884kpgsignal(struct pgrp *pgrp, ksiginfo_t *ksi, void *data, int checkctty) 885kpgsignal(struct pgrp *pgrp, ksiginfo_t *ksi, void *data, int checkctty)
885{ 886{
886 struct proc *p; 887 struct proc *p;
887 888
888 KASSERT(!cpu_intr_p()); 889 KASSERT(!cpu_intr_p());
889 KASSERT(mutex_owned(&proc_lock)); 890 KASSERT(mutex_owned(&proc_lock));
890 KASSERT(pgrp != NULL); 891 KASSERT(pgrp != NULL);
891 892
892 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) 893 LIST_FOREACH(p, &pgrp->pg_members, p_pglist)
893 if (checkctty == 0 || p->p_lflag & PL_CONTROLT) 894 if (checkctty == 0 || p->p_lflag & PL_CONTROLT)
894 kpsignal(p, ksi, data); 895 kpsignal(p, ksi, data);
895} 896}
896 897
897/* 898/*
898 * Send a signal caused by a trap to the current LWP. If it will be caught 899 * Send a signal caused by a trap to the current LWP. If it will be caught
899 * immediately, deliver it with correct code. Otherwise, post it normally. 900 * immediately, deliver it with correct code. Otherwise, post it normally.
900 */ 901 */
901void 902void
902trapsignal(struct lwp *l, ksiginfo_t *ksi) 903trapsignal(struct lwp *l, ksiginfo_t *ksi)
903{ 904{
904 struct proc *p; 905 struct proc *p;
905 struct sigacts *ps; 906 struct sigacts *ps;
906 int signo = ksi->ksi_signo; 907 int signo = ksi->ksi_signo;
907 sigset_t *mask; 908 sigset_t *mask;
908 sig_t action; 909 sig_t action;
909 910
910 KASSERT(KSI_TRAP_P(ksi)); 911 KASSERT(KSI_TRAP_P(ksi));
911 912
912 ksi->ksi_lid = l->l_lid; 913 ksi->ksi_lid = l->l_lid;
913 p = l->l_proc; 914 p = l->l_proc;
914 915
915 KASSERT(!cpu_intr_p()); 916 KASSERT(!cpu_intr_p());
916 mutex_enter(&proc_lock); 917 mutex_enter(&proc_lock);
917 mutex_enter(p->p_lock); 918 mutex_enter(p->p_lock);
918 919
919repeat: 920repeat:
920 /* 921 /*
921 * If we are exiting, demise now. 922 * If we are exiting, demise now.
922 * 923 *
923 * This avoids notifying tracer and deadlocking. 924 * This avoids notifying tracer and deadlocking.
924 */ 925 */
925 if (__predict_false(ISSET(p->p_sflag, PS_WEXIT))) { 926 if (__predict_false(ISSET(p->p_sflag, PS_WEXIT))) {
926 mutex_exit(p->p_lock); 927 mutex_exit(p->p_lock);
927 mutex_exit(&proc_lock); 928 mutex_exit(&proc_lock);
928 lwp_exit(l); 929 lwp_exit(l);
929 panic("trapsignal"); 930 panic("trapsignal");
930 /* NOTREACHED */ 931 /* NOTREACHED */
931 } 932 }
932 933
933 /* 934 /*
934 * The process is already stopping. 935 * The process is already stopping.
935 */ 936 */
936 if ((p->p_sflag & PS_STOPPING) != 0) { 937 if ((p->p_sflag & PS_STOPPING) != 0) {
937 mutex_exit(&proc_lock); 938 mutex_exit(&proc_lock);
938 sigswitch_unlock_and_switch_away(l); 939 sigswitch_unlock_and_switch_away(l);
939 mutex_enter(&proc_lock); 940 mutex_enter(&proc_lock);
940 mutex_enter(p->p_lock); 941 mutex_enter(p->p_lock);
941 goto repeat; 942 goto repeat;
942 } 943 }
943 944
944 mask = &l->l_sigmask; 945 mask = &l->l_sigmask;
945 ps = p->p_sigacts; 946 ps = p->p_sigacts;
946 action = SIGACTION_PS(ps, signo).sa_handler; 947 action = SIGACTION_PS(ps, signo).sa_handler;
947 948
948 if (ISSET(p->p_slflag, PSL_TRACED) && 949 if (ISSET(p->p_slflag, PSL_TRACED) &&
949 !(p->p_pptr == p->p_opptr && ISSET(p->p_lflag, PL_PPWAIT)) && 950 !(p->p_pptr == p->p_opptr && ISSET(p->p_lflag, PL_PPWAIT)) &&
950 p->p_xsig != SIGKILL && 951 p->p_xsig != SIGKILL &&
951 !sigismember(&p->p_sigpend.sp_set, SIGKILL)) { 952 !sigismember(&p->p_sigpend.sp_set, SIGKILL)) {
952 p->p_xsig = signo; 953 p->p_xsig = signo;
953 p->p_sigctx.ps_faked = true; 954 p->p_sigctx.ps_faked = true;
954 p->p_sigctx.ps_lwp = ksi->ksi_lid; 955 p->p_sigctx.ps_lwp = ksi->ksi_lid;
955 p->p_sigctx.ps_info = ksi->ksi_info; 956 p->p_sigctx.ps_info = ksi->ksi_info;
956 sigswitch(0, signo, true); 957 sigswitch(0, signo, true);
957 958
958 if (ktrpoint(KTR_PSIG)) { 959 if (ktrpoint(KTR_PSIG)) {
959 if (p->p_emul->e_ktrpsig) 960 if (p->p_emul->e_ktrpsig)
960 p->p_emul->e_ktrpsig(signo, action, mask, ksi); 961 p->p_emul->e_ktrpsig(signo, action, mask, ksi);
961 else 962 else
962 ktrpsig(signo, action, mask, ksi); 963 ktrpsig(signo, action, mask, ksi);
963 } 964 }
964 return; 965 return;
965 } 966 }
966 967
967 const bool caught = sigismember(&p->p_sigctx.ps_sigcatch, signo); 968 const bool caught = sigismember(&p->p_sigctx.ps_sigcatch, signo);
968 const bool masked = sigismember(mask, signo); 969 const bool masked = sigismember(mask, signo);
969 if (caught && !masked) { 970 if (caught && !masked) {
970 mutex_exit(&proc_lock); 971 mutex_exit(&proc_lock);
971 l->l_ru.ru_nsignals++; 972 l->l_ru.ru_nsignals++;
972 kpsendsig(l, ksi, mask); 973 kpsendsig(l, ksi, mask);
973 mutex_exit(p->p_lock); 974 mutex_exit(p->p_lock);
974 975
975 if (ktrpoint(KTR_PSIG)) { 976 if (ktrpoint(KTR_PSIG)) {
976 if (p->p_emul->e_ktrpsig) 977 if (p->p_emul->e_ktrpsig)
977 p->p_emul->e_ktrpsig(signo, action, mask, ksi); 978 p->p_emul->e_ktrpsig(signo, action, mask, ksi);
978 else 979 else
979 ktrpsig(signo, action, mask, ksi); 980 ktrpsig(signo, action, mask, ksi);
980 } 981 }
981 return; 982 return;
982 } 983 }
983 984
984 /* 985 /*
985 * If the signal is masked or ignored, then unmask it and 986 * If the signal is masked or ignored, then unmask it and
986 * reset it to the default action so that the process or 987 * reset it to the default action so that the process or
987 * its tracer will be notified. 988 * its tracer will be notified.
988 */ 989 */
989 const bool ignored = action == SIG_IGN; 990 const bool ignored = action == SIG_IGN;
990 if (masked || ignored) { 991 if (masked || ignored) {
991 mutex_enter(&ps->sa_mutex); 992 mutex_enter(&ps->sa_mutex);
992 sigdelset(mask, signo);  993 sigdelset(mask, signo);
993 sigdelset(&p->p_sigctx.ps_sigcatch, signo); 994 sigdelset(&p->p_sigctx.ps_sigcatch, signo);
994 sigdelset(&p->p_sigctx.ps_sigignore, signo); 995 sigdelset(&p->p_sigctx.ps_sigignore, signo);
995 sigdelset(&SIGACTION_PS(ps, signo).sa_mask, signo); 996 sigdelset(&SIGACTION_PS(ps, signo).sa_mask, signo);
996 SIGACTION_PS(ps, signo).sa_handler = SIG_DFL; 997 SIGACTION_PS(ps, signo).sa_handler = SIG_DFL;
997 mutex_exit(&ps->sa_mutex); 998 mutex_exit(&ps->sa_mutex);
998 } 999 }
999 1000
1000 kpsignal2(p, ksi); 1001 kpsignal2(p, ksi);
1001 mutex_exit(p->p_lock); 1002 mutex_exit(p->p_lock);
1002 mutex_exit(&proc_lock); 1003 mutex_exit(&proc_lock);
1003} 1004}
1004 1005
1005/* 1006/*
1006 * Fill in signal information and signal the parent for a child status change. 1007 * Fill in signal information and signal the parent for a child status change.
1007 */ 1008 */
1008void 1009void
1009child_psignal(struct proc *p, int mask) 1010child_psignal(struct proc *p, int mask)
1010{ 1011{
1011 ksiginfo_t ksi; 1012 ksiginfo_t ksi;
1012 struct proc *q; 1013 struct proc *q;
1013 int xsig; 1014 int xsig;
1014 1015
1015 KASSERT(mutex_owned(&proc_lock)); 1016 KASSERT(mutex_owned(&proc_lock));
1016 KASSERT(mutex_owned(p->p_lock)); 1017 KASSERT(mutex_owned(p->p_lock));
1017 1018
1018 xsig = p->p_xsig; 1019 xsig = p->p_xsig;
1019 1020
1020 KSI_INIT(&ksi); 1021 KSI_INIT(&ksi);
1021 ksi.ksi_signo = SIGCHLD; 1022 ksi.ksi_signo = SIGCHLD;
1022 ksi.ksi_code = (xsig == SIGCONT ? CLD_CONTINUED : CLD_STOPPED); 1023 ksi.ksi_code = (xsig == SIGCONT ? CLD_CONTINUED : CLD_STOPPED);
1023 ksi.ksi_pid = p->p_pid; 1024 ksi.ksi_pid = p->p_pid;
1024 ksi.ksi_uid = kauth_cred_geteuid(p->p_cred); 1025 ksi.ksi_uid = kauth_cred_geteuid(p->p_cred);
1025 ksi.ksi_status = xsig; 1026 ksi.ksi_status = xsig;
1026 ksi.ksi_utime = p->p_stats->p_ru.ru_utime.tv_sec; 1027 ksi.ksi_utime = p->p_stats->p_ru.ru_utime.tv_sec;
1027 ksi.ksi_stime = p->p_stats->p_ru.ru_stime.tv_sec; 1028 ksi.ksi_stime = p->p_stats->p_ru.ru_stime.tv_sec;
1028 1029
1029 q = p->p_pptr; 1030 q = p->p_pptr;
1030 1031
1031 mutex_exit(p->p_lock); 1032 mutex_exit(p->p_lock);
1032 mutex_enter(q->p_lock); 1033 mutex_enter(q->p_lock);
1033 1034
1034 if ((q->p_sflag & mask) == 0) 1035 if ((q->p_sflag & mask) == 0)
1035 kpsignal2(q, &ksi); 1036 kpsignal2(q, &ksi);
1036 1037
1037 mutex_exit(q->p_lock); 1038 mutex_exit(q->p_lock);
1038 mutex_enter(p->p_lock); 1039 mutex_enter(p->p_lock);
1039} 1040}
1040 1041
1041void 1042void
1042psignal(struct proc *p, int signo) 1043psignal(struct proc *p, int signo)
1043{ 1044{
1044 ksiginfo_t ksi; 1045 ksiginfo_t ksi;
1045 1046
1046 KASSERT(!cpu_intr_p()); 1047 KASSERT(!cpu_intr_p());
1047 KASSERT(mutex_owned(&proc_lock)); 1048 KASSERT(mutex_owned(&proc_lock));
1048 1049
1049 KSI_INIT_EMPTY(&ksi); 1050 KSI_INIT_EMPTY(&ksi);
1050 ksi.ksi_signo = signo; 1051 ksi.ksi_signo = signo;
1051 mutex_enter(p->p_lock); 1052 mutex_enter(p->p_lock);
1052 kpsignal2(p, &ksi); 1053 kpsignal2(p, &ksi);
1053 mutex_exit(p->p_lock); 1054 mutex_exit(p->p_lock);
1054} 1055}
1055 1056
1056void 1057void
1057kpsignal(struct proc *p, ksiginfo_t *ksi, void *data) 1058kpsignal(struct proc *p, ksiginfo_t *ksi, void *data)
1058{ 1059{
1059 fdfile_t *ff; 1060 fdfile_t *ff;
1060 file_t *fp; 1061 file_t *fp;
1061 fdtab_t *dt; 1062 fdtab_t *dt;
1062 1063
1063 KASSERT(!cpu_intr_p()); 1064 KASSERT(!cpu_intr_p());
1064 KASSERT(mutex_owned(&proc_lock)); 1065 KASSERT(mutex_owned(&proc_lock));
1065 1066
1066 if ((p->p_sflag & PS_WEXIT) == 0 && data) { 1067 if ((p->p_sflag & PS_WEXIT) == 0 && data) {
1067 size_t fd; 1068 size_t fd;
1068 filedesc_t *fdp = p->p_fd; 1069 filedesc_t *fdp = p->p_fd;
1069 1070
1070 /* XXXSMP locking */ 1071 /* XXXSMP locking */
1071 ksi->ksi_fd = -1; 1072 ksi->ksi_fd = -1;
1072 dt = atomic_load_consume(&fdp->fd_dt); 1073 dt = atomic_load_consume(&fdp->fd_dt);
1073 for (fd = 0; fd < dt->dt_nfiles; fd++) { 1074 for (fd = 0; fd < dt->dt_nfiles; fd++) {
@@ -1360,1329 +1361,1329 @@ kpsignal2(struct proc *p, ksiginfo_t *ks @@ -1360,1329 +1361,1329 @@ kpsignal2(struct proc *p, ksiginfo_t *ks
1360 if (sigismember(&p->p_sigctx.ps_sigignore, signo)) 1361 if (sigismember(&p->p_sigctx.ps_sigignore, signo))
1361 goto discard; 1362 goto discard;
1362 1363
1363 else if (sigismember(&p->p_sigctx.ps_sigcatch, signo)) 1364 else if (sigismember(&p->p_sigctx.ps_sigcatch, signo))
1364 action = SIG_CATCH; 1365 action = SIG_CATCH;
1365 else { 1366 else {
1366 action = SIG_DFL; 1367 action = SIG_DFL;
1367 1368
1368 /* 1369 /*
1369 * If sending a tty stop signal to a member of an 1370 * If sending a tty stop signal to a member of an
1370 * orphaned process group, discard the signal here if 1371 * orphaned process group, discard the signal here if
1371 * the action is default; don't stop the process below 1372 * the action is default; don't stop the process below
1372 * if sleeping, and don't clear any pending SIGCONT. 1373 * if sleeping, and don't clear any pending SIGCONT.
1373 */ 1374 */
1374 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0) 1375 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0)
1375 goto discard; 1376 goto discard;
1376 1377
1377 if (prop & SA_KILL && p->p_nice > NZERO) 1378 if (prop & SA_KILL && p->p_nice > NZERO)
1378 p->p_nice = NZERO; 1379 p->p_nice = NZERO;
1379 } 1380 }
1380 } 1381 }
1381 1382
1382 /* 1383 /*
1383 * If stopping or continuing a process, discard any pending 1384 * If stopping or continuing a process, discard any pending
1384 * signals that would do the inverse. 1385 * signals that would do the inverse.
1385 */ 1386 */
1386 if ((prop & (SA_CONT | SA_STOP)) != 0) { 1387 if ((prop & (SA_CONT | SA_STOP)) != 0) {
1387 ksiginfoq_t kq; 1388 ksiginfoq_t kq;
1388 1389
1389 ksiginfo_queue_init(&kq); 1390 ksiginfo_queue_init(&kq);
1390 if ((prop & SA_CONT) != 0) 1391 if ((prop & SA_CONT) != 0)
1391 sigclear(&p->p_sigpend, &stopsigmask, &kq); 1392 sigclear(&p->p_sigpend, &stopsigmask, &kq);
1392 if ((prop & SA_STOP) != 0) 1393 if ((prop & SA_STOP) != 0)
1393 sigclear(&p->p_sigpend, &contsigmask, &kq); 1394 sigclear(&p->p_sigpend, &contsigmask, &kq);
1394 ksiginfo_queue_drain(&kq); /* XXXSMP */ 1395 ksiginfo_queue_drain(&kq); /* XXXSMP */
1395 } 1396 }
1396 1397
1397 /* 1398 /*
1398 * If the signal doesn't have SA_CANTMASK (no override for SIGKILL, 1399 * If the signal doesn't have SA_CANTMASK (no override for SIGKILL,
1399 * please!), check if any LWPs are waiting on it. If yes, pass on 1400 * please!), check if any LWPs are waiting on it. If yes, pass on
1400 * the signal info. The signal won't be processed further here. 1401 * the signal info. The signal won't be processed further here.
1401 */ 1402 */
1402 if ((prop & SA_CANTMASK) == 0 && !LIST_EMPTY(&p->p_sigwaiters) && 1403 if ((prop & SA_CANTMASK) == 0 && !LIST_EMPTY(&p->p_sigwaiters) &&
1403 p->p_stat == SACTIVE && (p->p_sflag & PS_STOPPING) == 0 && 1404 p->p_stat == SACTIVE && (p->p_sflag & PS_STOPPING) == 0 &&
1404 sigunwait(p, ksi)) 1405 sigunwait(p, ksi))
1405 goto discard; 1406 goto discard;
1406 1407
1407 /* 1408 /*
1408 * XXXSMP Should be allocated by the caller, we're holding locks 1409 * XXXSMP Should be allocated by the caller, we're holding locks
1409 * here. 1410 * here.
1410 */ 1411 */
1411 if (kp == NULL && (kp = ksiginfo_alloc(p, ksi, PR_NOWAIT)) == NULL) 1412 if (kp == NULL && (kp = ksiginfo_alloc(p, ksi, PR_NOWAIT)) == NULL)
1412 goto discard; 1413 goto discard;
1413 1414
1414 /* 1415 /*
1415 * LWP private signals are easy - just find the LWP and post 1416 * LWP private signals are easy - just find the LWP and post
1416 * the signal to it. 1417 * the signal to it.
1417 */ 1418 */
1418 if (lid != 0) { 1419 if (lid != 0) {
1419 l = lwp_find(p, lid); 1420 l = lwp_find(p, lid);
1420 if (l != NULL) { 1421 if (l != NULL) {
1421 if ((error = sigput(&l->l_sigpend, p, kp)) != 0) 1422 if ((error = sigput(&l->l_sigpend, p, kp)) != 0)
1422 goto out; 1423 goto out;
1423 membar_producer(); 1424 membar_producer();
1424 if (sigpost(l, action, prop, kp->ksi_signo) != 0) 1425 if (sigpost(l, action, prop, kp->ksi_signo) != 0)
1425 signo = -1; 1426 signo = -1;
1426 } 1427 }
1427 goto out; 1428 goto out;
1428 } 1429 }
1429 1430
1430 /* 1431 /*
1431 * Some signals go to all LWPs, even if posted with _lwp_kill() 1432 * Some signals go to all LWPs, even if posted with _lwp_kill()
1432 * or for an SA process. 1433 * or for an SA process.
1433 */ 1434 */
1434 if (p->p_stat == SACTIVE && (p->p_sflag & PS_STOPPING) == 0) { 1435 if (p->p_stat == SACTIVE && (p->p_sflag & PS_STOPPING) == 0) {
1435 if (traced) 1436 if (traced)
1436 goto deliver; 1437 goto deliver;
1437 1438
1438 /* 1439 /*
1439 * If SIGCONT is default (or ignored) and process is 1440 * If SIGCONT is default (or ignored) and process is
1440 * asleep, we are finished; the process should not 1441 * asleep, we are finished; the process should not
1441 * be awakened. 1442 * be awakened.
1442 */ 1443 */
1443 if ((prop & SA_CONT) != 0 && action == SIG_DFL) 1444 if ((prop & SA_CONT) != 0 && action == SIG_DFL)
1444 goto out; 1445 goto out;
1445 } else { 1446 } else {
1446 /* 1447 /*
1447 * Process is stopped or stopping. 1448 * Process is stopped or stopping.
1448 * - If traced, then no action is needed, unless killing. 1449 * - If traced, then no action is needed, unless killing.
1449 * - Run the process only if sending SIGCONT or SIGKILL. 1450 * - Run the process only if sending SIGCONT or SIGKILL.
1450 */ 1451 */
1451 if (traced && signo != SIGKILL) { 1452 if (traced && signo != SIGKILL) {
1452 goto out; 1453 goto out;
1453 } 1454 }
1454 if ((prop & SA_CONT) != 0 || signo == SIGKILL) { 1455 if ((prop & SA_CONT) != 0 || signo == SIGKILL) {
1455 /* 1456 /*
1456 * Re-adjust p_nstopchild if the process was 1457 * Re-adjust p_nstopchild if the process was
1457 * stopped but not yet collected by its parent. 1458 * stopped but not yet collected by its parent.
1458 */ 1459 */
1459 if (p->p_stat == SSTOP && !p->p_waited) 1460 if (p->p_stat == SSTOP && !p->p_waited)
1460 p->p_pptr->p_nstopchild--; 1461 p->p_pptr->p_nstopchild--;
1461 p->p_stat = SACTIVE; 1462 p->p_stat = SACTIVE;
1462 p->p_sflag &= ~PS_STOPPING; 1463 p->p_sflag &= ~PS_STOPPING;
1463 if (traced) { 1464 if (traced) {
1464 KASSERT(signo == SIGKILL); 1465 KASSERT(signo == SIGKILL);
1465 goto deliver; 1466 goto deliver;
1466 } 1467 }
1467 /* 1468 /*
1468 * Do not make signal pending if SIGCONT is default. 1469 * Do not make signal pending if SIGCONT is default.
1469 * 1470 *
1470 * If the process catches SIGCONT, let it handle the 1471 * If the process catches SIGCONT, let it handle the
1471 * signal itself (if waiting on event - process runs, 1472 * signal itself (if waiting on event - process runs,
1472 * otherwise continues sleeping). 1473 * otherwise continues sleeping).
1473 */ 1474 */
1474 if ((prop & SA_CONT) != 0) { 1475 if ((prop & SA_CONT) != 0) {
1475 p->p_xsig = SIGCONT; 1476 p->p_xsig = SIGCONT;
1476 p->p_sflag |= PS_CONTINUED; 1477 p->p_sflag |= PS_CONTINUED;
1477 child_psignal(p, 0); 1478 child_psignal(p, 0);
1478 if (action == SIG_DFL) { 1479 if (action == SIG_DFL) {
1479 KASSERT(signo != SIGKILL); 1480 KASSERT(signo != SIGKILL);
1480 goto deliver; 1481 goto deliver;
1481 } 1482 }
1482 } 1483 }
1483 } else if ((prop & SA_STOP) != 0) { 1484 } else if ((prop & SA_STOP) != 0) {
1484 /* 1485 /*
1485 * Already stopped, don't need to stop again. 1486 * Already stopped, don't need to stop again.
1486 * (If we did the shell could get confused.) 1487 * (If we did the shell could get confused.)
1487 */ 1488 */
1488 goto out; 1489 goto out;
1489 } 1490 }
1490 } 1491 }
1491 /* 1492 /*
1492 * Make signal pending. 1493 * Make signal pending.
1493 */ 1494 */
1494 KASSERT(!traced); 1495 KASSERT(!traced);
1495 if ((error = sigput(&p->p_sigpend, p, kp)) != 0) 1496 if ((error = sigput(&p->p_sigpend, p, kp)) != 0)
1496 goto out; 1497 goto out;
1497deliver: 1498deliver:
1498 /* 1499 /*
1499 * Before we set LW_PENDSIG on any LWP, ensure that the signal is 1500 * Before we set LW_PENDSIG on any LWP, ensure that the signal is
1500 * visible on the per process list (for sigispending()). This 1501 * visible on the per process list (for sigispending()). This
1501 * is unlikely to be needed in practice, but... 1502 * is unlikely to be needed in practice, but...
1502 */ 1503 */
1503 membar_producer(); 1504 membar_producer();
1504 1505
1505 /* 1506 /*
1506 * Try to find an LWP that can take the signal. 1507 * Try to find an LWP that can take the signal.
1507 */ 1508 */
1508 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 1509 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1509 if (sigpost(l, action, prop, kp->ksi_signo) && !toall) 1510 if (sigpost(l, action, prop, kp->ksi_signo) && !toall)
1510 break; 1511 break;
1511 } 1512 }
1512 signo = -1; 1513 signo = -1;
1513out: 1514out:
1514 /* 1515 /*
1515 * If the ksiginfo wasn't used, then bin it. XXXSMP freeing memory 1516 * If the ksiginfo wasn't used, then bin it. XXXSMP freeing memory
1516 * with locks held. The caller should take care of this. 1517 * with locks held. The caller should take care of this.
1517 */ 1518 */
1518 ksiginfo_free(kp); 1519 ksiginfo_free(kp);
1519 if (signo == -1) 1520 if (signo == -1)
1520 return error; 1521 return error;
1521discard: 1522discard:
1522 SDT_PROBE(proc, kernel, , signal__discard, l, p, signo, 0, 0); 1523 SDT_PROBE(proc, kernel, , signal__discard, l, p, signo, 0, 0);
1523 return error; 1524 return error;
1524} 1525}
1525 1526
1526void 1527void
1527kpsendsig(struct lwp *l, const ksiginfo_t *ksi, const sigset_t *mask) 1528kpsendsig(struct lwp *l, const ksiginfo_t *ksi, const sigset_t *mask)
1528{ 1529{
1529 struct proc *p = l->l_proc; 1530 struct proc *p = l->l_proc;
1530 1531
1531 KASSERT(mutex_owned(p->p_lock)); 1532 KASSERT(mutex_owned(p->p_lock));
1532 (*p->p_emul->e_sendsig)(ksi, mask); 1533 (*p->p_emul->e_sendsig)(ksi, mask);
1533} 1534}
1534 1535
1535/* 1536/*
1536 * Stop any LWPs sleeping interruptably. 1537 * Stop any LWPs sleeping interruptably.
1537 */ 1538 */
1538static void 1539static void
1539proc_stop_lwps(struct proc *p) 1540proc_stop_lwps(struct proc *p)
1540{ 1541{
1541 struct lwp *l; 1542 struct lwp *l;
1542 1543
1543 KASSERT(mutex_owned(p->p_lock)); 1544 KASSERT(mutex_owned(p->p_lock));
1544 KASSERT((p->p_sflag & PS_STOPPING) != 0); 1545 KASSERT((p->p_sflag & PS_STOPPING) != 0);
1545 1546
1546 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 1547 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1547 lwp_lock(l); 1548 lwp_lock(l);
1548 if (l->l_stat == LSSLEEP && (l->l_flag & LW_SINTR) != 0) { 1549 if (l->l_stat == LSSLEEP && (l->l_flag & LW_SINTR) != 0) {
1549 l->l_stat = LSSTOP; 1550 l->l_stat = LSSTOP;
1550 p->p_nrlwps--; 1551 p->p_nrlwps--;
1551 } 1552 }
1552 lwp_unlock(l); 1553 lwp_unlock(l);
1553 } 1554 }
1554} 1555}
1555 1556
1556/* 1557/*
1557 * Finish stopping of a process. Mark it stopped and notify the parent. 1558 * Finish stopping of a process. Mark it stopped and notify the parent.
1558 * 1559 *
1559 * Drop p_lock briefly if ppsig is true. 1560 * Drop p_lock briefly if ppsig is true.
1560 */ 1561 */
1561static void 1562static void
1562proc_stop_done(struct proc *p, int ppmask) 1563proc_stop_done(struct proc *p, int ppmask)
1563{ 1564{
1564 1565
1565 KASSERT(mutex_owned(&proc_lock)); 1566 KASSERT(mutex_owned(&proc_lock));
1566 KASSERT(mutex_owned(p->p_lock)); 1567 KASSERT(mutex_owned(p->p_lock));
1567 KASSERT((p->p_sflag & PS_STOPPING) != 0); 1568 KASSERT((p->p_sflag & PS_STOPPING) != 0);
1568 KASSERT(p->p_nrlwps == 0 || (p->p_nrlwps == 1 && p == curproc)); 1569 KASSERT(p->p_nrlwps == 0 || (p->p_nrlwps == 1 && p == curproc));
1569 1570
1570 p->p_sflag &= ~PS_STOPPING; 1571 p->p_sflag &= ~PS_STOPPING;
1571 p->p_stat = SSTOP; 1572 p->p_stat = SSTOP;
1572 p->p_waited = 0; 1573 p->p_waited = 0;
1573 p->p_pptr->p_nstopchild++; 1574 p->p_pptr->p_nstopchild++;
1574 1575
1575 /* child_psignal drops p_lock briefly. */ 1576 /* child_psignal drops p_lock briefly. */
1576 child_psignal(p, ppmask); 1577 child_psignal(p, ppmask);
1577 cv_broadcast(&p->p_pptr->p_waitcv); 1578 cv_broadcast(&p->p_pptr->p_waitcv);
1578} 1579}
1579 1580
1580/* 1581/*
1581 * Stop the current process and switch away to the debugger notifying 1582 * Stop the current process and switch away to the debugger notifying
1582 * an event specific to a traced process only. 1583 * an event specific to a traced process only.
1583 */ 1584 */
1584void 1585void
1585eventswitch(int code, int pe_report_event, int entity) 1586eventswitch(int code, int pe_report_event, int entity)
1586{ 1587{
1587 struct lwp *l = curlwp; 1588 struct lwp *l = curlwp;
1588 struct proc *p = l->l_proc; 1589 struct proc *p = l->l_proc;
1589 struct sigacts *ps; 1590 struct sigacts *ps;
1590 sigset_t *mask; 1591 sigset_t *mask;
1591 sig_t action; 1592 sig_t action;
1592 ksiginfo_t ksi; 1593 ksiginfo_t ksi;
1593 const int signo = SIGTRAP; 1594 const int signo = SIGTRAP;
1594 1595
1595 KASSERT(mutex_owned(&proc_lock)); 1596 KASSERT(mutex_owned(&proc_lock));
1596 KASSERT(mutex_owned(p->p_lock)); 1597 KASSERT(mutex_owned(p->p_lock));
1597 KASSERT(p->p_pptr != initproc); 1598 KASSERT(p->p_pptr != initproc);
1598 KASSERT(l->l_stat == LSONPROC); 1599 KASSERT(l->l_stat == LSONPROC);
1599 KASSERT(ISSET(p->p_slflag, PSL_TRACED)); 1600 KASSERT(ISSET(p->p_slflag, PSL_TRACED));
1600 KASSERT(!ISSET(l->l_flag, LW_SYSTEM)); 1601 KASSERT(!ISSET(l->l_flag, LW_SYSTEM));
1601 KASSERT(p->p_nrlwps > 0); 1602 KASSERT(p->p_nrlwps > 0);
1602 KASSERT((code == TRAP_CHLD) || (code == TRAP_LWP) || 1603 KASSERT((code == TRAP_CHLD) || (code == TRAP_LWP) ||
1603 (code == TRAP_EXEC)); 1604 (code == TRAP_EXEC));
1604 KASSERT((code != TRAP_CHLD) || (entity > 1)); /* prevent pid1 */ 1605 KASSERT((code != TRAP_CHLD) || (entity > 1)); /* prevent pid1 */
1605 KASSERT((code != TRAP_LWP) || (entity > 0)); 1606 KASSERT((code != TRAP_LWP) || (entity > 0));
1606 1607
1607repeat: 1608repeat:
1608 /* 1609 /*
1609 * If we are exiting, demise now. 1610 * If we are exiting, demise now.
1610 * 1611 *
1611 * This avoids notifying tracer and deadlocking. 1612 * This avoids notifying tracer and deadlocking.
1612 */ 1613 */
1613 if (__predict_false(ISSET(p->p_sflag, PS_WEXIT))) { 1614 if (__predict_false(ISSET(p->p_sflag, PS_WEXIT))) {
1614 mutex_exit(p->p_lock); 1615 mutex_exit(p->p_lock);
1615 mutex_exit(&proc_lock); 1616 mutex_exit(&proc_lock);
1616 1617
1617 if (pe_report_event == PTRACE_LWP_EXIT) { 1618 if (pe_report_event == PTRACE_LWP_EXIT) {
1618 /* Avoid double lwp_exit() and panic. */ 1619 /* Avoid double lwp_exit() and panic. */
1619 return; 1620 return;
1620 } 1621 }
1621 1622
1622 lwp_exit(l); 1623 lwp_exit(l);
1623 panic("eventswitch"); 1624 panic("eventswitch");
1624 /* NOTREACHED */ 1625 /* NOTREACHED */
1625 } 1626 }
1626 1627
1627 /* 1628 /*
1628 * If we are no longer traced, abandon this event signal. 1629 * If we are no longer traced, abandon this event signal.
1629 * 1630 *
1630 * This avoids killing a process after detaching the debugger. 1631 * This avoids killing a process after detaching the debugger.
1631 */ 1632 */
1632 if (__predict_false(!ISSET(p->p_slflag, PSL_TRACED))) { 1633 if (__predict_false(!ISSET(p->p_slflag, PSL_TRACED))) {
1633 mutex_exit(p->p_lock); 1634 mutex_exit(p->p_lock);
1634 mutex_exit(&proc_lock); 1635 mutex_exit(&proc_lock);
1635 return; 1636 return;
1636 } 1637 }
1637 1638
1638 /* 1639 /*
1639 * If there's a pending SIGKILL process it immediately. 1640 * If there's a pending SIGKILL process it immediately.
1640 */ 1641 */
1641 if (p->p_xsig == SIGKILL || 1642 if (p->p_xsig == SIGKILL ||
1642 sigismember(&p->p_sigpend.sp_set, SIGKILL)) { 1643 sigismember(&p->p_sigpend.sp_set, SIGKILL)) {
1643 mutex_exit(p->p_lock); 1644 mutex_exit(p->p_lock);
1644 mutex_exit(&proc_lock); 1645 mutex_exit(&proc_lock);
1645 return; 1646 return;
1646 } 1647 }
1647 1648
1648 /* 1649 /*
1649 * The process is already stopping. 1650 * The process is already stopping.
1650 */ 1651 */
1651 if ((p->p_sflag & PS_STOPPING) != 0) { 1652 if ((p->p_sflag & PS_STOPPING) != 0) {
1652 mutex_exit(&proc_lock); 1653 mutex_exit(&proc_lock);
1653 sigswitch_unlock_and_switch_away(l); 1654 sigswitch_unlock_and_switch_away(l);
1654 mutex_enter(&proc_lock); 1655 mutex_enter(&proc_lock);
1655 mutex_enter(p->p_lock); 1656 mutex_enter(p->p_lock);
1656 goto repeat; 1657 goto repeat;
1657 } 1658 }
1658 1659
1659 KSI_INIT_TRAP(&ksi); 1660 KSI_INIT_TRAP(&ksi);
1660 ksi.ksi_lid = l->l_lid; 1661 ksi.ksi_lid = l->l_lid;
1661 ksi.ksi_signo = signo; 1662 ksi.ksi_signo = signo;
1662 ksi.ksi_code = code; 1663 ksi.ksi_code = code;
1663 ksi.ksi_pe_report_event = pe_report_event; 1664 ksi.ksi_pe_report_event = pe_report_event;
1664 1665
1665 CTASSERT(sizeof(ksi.ksi_pe_other_pid) == sizeof(ksi.ksi_pe_lwp)); 1666 CTASSERT(sizeof(ksi.ksi_pe_other_pid) == sizeof(ksi.ksi_pe_lwp));
1666 ksi.ksi_pe_other_pid = entity; 1667 ksi.ksi_pe_other_pid = entity;
1667 1668
1668 /* Needed for ktrace */ 1669 /* Needed for ktrace */
1669 ps = p->p_sigacts; 1670 ps = p->p_sigacts;
1670 action = SIGACTION_PS(ps, signo).sa_handler; 1671 action = SIGACTION_PS(ps, signo).sa_handler;
1671 mask = &l->l_sigmask; 1672 mask = &l->l_sigmask;
1672 1673
1673 p->p_xsig = signo; 1674 p->p_xsig = signo;
1674 p->p_sigctx.ps_faked = true; 1675 p->p_sigctx.ps_faked = true;
1675 p->p_sigctx.ps_lwp = ksi.ksi_lid; 1676 p->p_sigctx.ps_lwp = ksi.ksi_lid;
1676 p->p_sigctx.ps_info = ksi.ksi_info; 1677 p->p_sigctx.ps_info = ksi.ksi_info;
1677 1678
1678 sigswitch(0, signo, true); 1679 sigswitch(0, signo, true);
1679 1680
1680 if (code == TRAP_CHLD) { 1681 if (code == TRAP_CHLD) {
1681 mutex_enter(&proc_lock); 1682 mutex_enter(&proc_lock);
1682 while (l->l_vforkwaiting) 1683 while (l->l_vforkwaiting)
1683 cv_wait(&l->l_waitcv, &proc_lock); 1684 cv_wait(&l->l_waitcv, &proc_lock);
1684 mutex_exit(&proc_lock); 1685 mutex_exit(&proc_lock);
1685 } 1686 }
1686 1687
1687 if (ktrpoint(KTR_PSIG)) { 1688 if (ktrpoint(KTR_PSIG)) {
1688 if (p->p_emul->e_ktrpsig) 1689 if (p->p_emul->e_ktrpsig)
1689 p->p_emul->e_ktrpsig(signo, action, mask, &ksi); 1690 p->p_emul->e_ktrpsig(signo, action, mask, &ksi);
1690 else 1691 else
1691 ktrpsig(signo, action, mask, &ksi); 1692 ktrpsig(signo, action, mask, &ksi);
1692 } 1693 }
1693} 1694}
1694 1695
1695void 1696void
1696eventswitchchild(struct proc *p, int code, int pe_report_event) 1697eventswitchchild(struct proc *p, int code, int pe_report_event)
1697{ 1698{
1698 mutex_enter(&proc_lock); 1699 mutex_enter(&proc_lock);
1699 mutex_enter(p->p_lock); 1700 mutex_enter(p->p_lock);
1700 if ((p->p_slflag & (PSL_TRACED|PSL_TRACEDCHILD)) != 1701 if ((p->p_slflag & (PSL_TRACED|PSL_TRACEDCHILD)) !=
1701 (PSL_TRACED|PSL_TRACEDCHILD)) { 1702 (PSL_TRACED|PSL_TRACEDCHILD)) {
1702 mutex_exit(p->p_lock); 1703 mutex_exit(p->p_lock);
1703 mutex_exit(&proc_lock); 1704 mutex_exit(&proc_lock);
1704 return; 1705 return;
1705 } 1706 }
1706 eventswitch(code, pe_report_event, p->p_oppid); 1707 eventswitch(code, pe_report_event, p->p_oppid);
1707} 1708}
1708 1709
1709/* 1710/*
1710 * Stop the current process and switch away when being stopped or traced. 1711 * Stop the current process and switch away when being stopped or traced.
1711 */ 1712 */
1712static void 1713static void
1713sigswitch(int ppmask, int signo, bool proc_lock_held) 1714sigswitch(int ppmask, int signo, bool proc_lock_held)
1714{ 1715{
1715 struct lwp *l = curlwp; 1716 struct lwp *l = curlwp;
1716 struct proc *p = l->l_proc; 1717 struct proc *p = l->l_proc;
1717 1718
1718 KASSERT(mutex_owned(p->p_lock)); 1719 KASSERT(mutex_owned(p->p_lock));
1719 KASSERT(l->l_stat == LSONPROC); 1720 KASSERT(l->l_stat == LSONPROC);
1720 KASSERT(p->p_nrlwps > 0); 1721 KASSERT(p->p_nrlwps > 0);
1721 1722
1722 if (proc_lock_held) { 1723 if (proc_lock_held) {
1723 KASSERT(mutex_owned(&proc_lock)); 1724 KASSERT(mutex_owned(&proc_lock));
1724 } else { 1725 } else {
1725 KASSERT(!mutex_owned(&proc_lock)); 1726 KASSERT(!mutex_owned(&proc_lock));
1726 } 1727 }
1727 1728
1728 /* 1729 /*
1729 * On entry we know that the process needs to stop. If it's 1730 * On entry we know that the process needs to stop. If it's
1730 * the result of a 'sideways' stop signal that has been sourced 1731 * the result of a 'sideways' stop signal that has been sourced
1731 * through issignal(), then stop other LWPs in the process too. 1732 * through issignal(), then stop other LWPs in the process too.
1732 */ 1733 */
1733 if (p->p_stat == SACTIVE && (p->p_sflag & PS_STOPPING) == 0) { 1734 if (p->p_stat == SACTIVE && (p->p_sflag & PS_STOPPING) == 0) {
1734 KASSERT(signo != 0); 1735 KASSERT(signo != 0);
1735 proc_stop(p, signo); 1736 proc_stop(p, signo);
1736 KASSERT(p->p_nrlwps > 0); 1737 KASSERT(p->p_nrlwps > 0);
1737 } 1738 }
1738 1739
1739 /* 1740 /*
1740 * If we are the last live LWP, and the stop was a result of 1741 * If we are the last live LWP, and the stop was a result of
1741 * a new signal, then signal the parent. 1742 * a new signal, then signal the parent.
1742 */ 1743 */
1743 if ((p->p_sflag & PS_STOPPING) != 0) { 1744 if ((p->p_sflag & PS_STOPPING) != 0) {
1744 if (!proc_lock_held && !mutex_tryenter(&proc_lock)) { 1745 if (!proc_lock_held && !mutex_tryenter(&proc_lock)) {
1745 mutex_exit(p->p_lock); 1746 mutex_exit(p->p_lock);
1746 mutex_enter(&proc_lock); 1747 mutex_enter(&proc_lock);
1747 mutex_enter(p->p_lock); 1748 mutex_enter(p->p_lock);
1748 } 1749 }
1749 1750
1750 if (p->p_nrlwps == 1 && (p->p_sflag & PS_STOPPING) != 0) { 1751 if (p->p_nrlwps == 1 && (p->p_sflag & PS_STOPPING) != 0) {
1751 /* 1752 /*
1752 * Note that proc_stop_done() can drop 1753 * Note that proc_stop_done() can drop
1753 * p->p_lock briefly. 1754 * p->p_lock briefly.
1754 */ 1755 */
1755 proc_stop_done(p, ppmask); 1756 proc_stop_done(p, ppmask);
1756 } 1757 }
1757 1758
1758 mutex_exit(&proc_lock); 1759 mutex_exit(&proc_lock);
1759 } 1760 }
1760 1761
1761 sigswitch_unlock_and_switch_away(l); 1762 sigswitch_unlock_and_switch_away(l);
1762} 1763}
1763 1764
1764/* 1765/*
1765 * Unlock and switch away. 1766 * Unlock and switch away.
1766 */ 1767 */
1767static void 1768static void
1768sigswitch_unlock_and_switch_away(struct lwp *l) 1769sigswitch_unlock_and_switch_away(struct lwp *l)
1769{ 1770{
1770 struct proc *p; 1771 struct proc *p;
1771 int biglocks; 1772 int biglocks;
1772 1773
1773 p = l->l_proc; 1774 p = l->l_proc;
1774 1775
1775 KASSERT(mutex_owned(p->p_lock)); 1776 KASSERT(mutex_owned(p->p_lock));
1776 KASSERT(!mutex_owned(&proc_lock)); 1777 KASSERT(!mutex_owned(&proc_lock));
1777 1778
1778 KASSERT(l->l_stat == LSONPROC); 1779 KASSERT(l->l_stat == LSONPROC);
1779 KASSERT(p->p_nrlwps > 0); 1780 KASSERT(p->p_nrlwps > 0);
1780 1781
1781 KERNEL_UNLOCK_ALL(l, &biglocks); 1782 KERNEL_UNLOCK_ALL(l, &biglocks);
1782 if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) { 1783 if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) {
1783 p->p_nrlwps--; 1784 p->p_nrlwps--;
1784 lwp_lock(l); 1785 lwp_lock(l);
1785 KASSERT(l->l_stat == LSONPROC || l->l_stat == LSSLEEP); 1786 KASSERT(l->l_stat == LSONPROC || l->l_stat == LSSLEEP);
1786 l->l_stat = LSSTOP; 1787 l->l_stat = LSSTOP;
1787 lwp_unlock(l); 1788 lwp_unlock(l);
1788 } 1789 }
1789 1790
1790 mutex_exit(p->p_lock); 1791 mutex_exit(p->p_lock);
1791 lwp_lock(l); 1792 lwp_lock(l);
1792 spc_lock(l->l_cpu); 1793 spc_lock(l->l_cpu);
1793 mi_switch(l); 1794 mi_switch(l);
1794 KERNEL_LOCK(biglocks, l); 1795 KERNEL_LOCK(biglocks, l);
1795} 1796}
1796 1797
1797/* 1798/*
1798 * Check for a signal from the debugger. 1799 * Check for a signal from the debugger.
1799 */ 1800 */
1800static int 1801static int
1801sigchecktrace(void) 1802sigchecktrace(void)
1802{ 1803{
1803 struct lwp *l = curlwp; 1804 struct lwp *l = curlwp;
1804 struct proc *p = l->l_proc; 1805 struct proc *p = l->l_proc;
1805 int signo; 1806 int signo;
1806 1807
1807 KASSERT(mutex_owned(p->p_lock)); 1808 KASSERT(mutex_owned(p->p_lock));
1808 1809
1809 /* If there's a pending SIGKILL, process it immediately. */ 1810 /* If there's a pending SIGKILL, process it immediately. */
1810 if (sigismember(&p->p_sigpend.sp_set, SIGKILL)) 1811 if (sigismember(&p->p_sigpend.sp_set, SIGKILL))
1811 return 0; 1812 return 0;
1812 1813
1813 /* 1814 /*
1814 * If we are no longer being traced, or the parent didn't 1815 * If we are no longer being traced, or the parent didn't
1815 * give us a signal, or we're stopping, look for more signals. 1816 * give us a signal, or we're stopping, look for more signals.
1816 */ 1817 */
1817 if ((p->p_slflag & PSL_TRACED) == 0 || p->p_xsig == 0 || 1818 if ((p->p_slflag & PSL_TRACED) == 0 || p->p_xsig == 0 ||
1818 (p->p_sflag & PS_STOPPING) != 0) 1819 (p->p_sflag & PS_STOPPING) != 0)
1819 return 0; 1820 return 0;
1820 1821
1821 /* 1822 /*
1822 * If the new signal is being masked, look for other signals. 1823 * If the new signal is being masked, look for other signals.
1823 * `p->p_sigctx.ps_siglist |= mask' is done in setrunnable(). 1824 * `p->p_sigctx.ps_siglist |= mask' is done in setrunnable().
1824 */ 1825 */
1825 signo = p->p_xsig; 1826 signo = p->p_xsig;
1826 p->p_xsig = 0; 1827 p->p_xsig = 0;
1827 if (sigismember(&l->l_sigmask, signo)) { 1828 if (sigismember(&l->l_sigmask, signo)) {
1828 signo = 0; 1829 signo = 0;
1829 } 1830 }
1830 return signo; 1831 return signo;
1831} 1832}
1832 1833
1833/* 1834/*
1834 * If the current process has received a signal (should be caught or cause 1835 * If the current process has received a signal (should be caught or cause
1835 * termination, should interrupt current syscall), return the signal number. 1836 * termination, should interrupt current syscall), return the signal number.
1836 * 1837 *
1837 * Stop signals with default action are processed immediately, then cleared; 1838 * Stop signals with default action are processed immediately, then cleared;
1838 * they aren't returned. This is checked after each entry to the system for 1839 * they aren't returned. This is checked after each entry to the system for
1839 * a syscall or trap. 1840 * a syscall or trap.
1840 * 1841 *
1841 * We will also return -1 if the process is exiting and the current LWP must 1842 * We will also return -1 if the process is exiting and the current LWP must
1842 * follow suit. 1843 * follow suit.
1843 */ 1844 */
1844int 1845int
1845issignal(struct lwp *l) 1846issignal(struct lwp *l)
1846{ 1847{
1847 struct proc *p; 1848 struct proc *p;
1848 int siglwp, signo, prop; 1849 int siglwp, signo, prop;
1849 sigpend_t *sp; 1850 sigpend_t *sp;
1850 sigset_t ss; 1851 sigset_t ss;
1851 bool traced; 1852 bool traced;
1852 1853
1853 p = l->l_proc; 1854 p = l->l_proc;
1854 sp = NULL; 1855 sp = NULL;
1855 signo = 0; 1856 signo = 0;
1856 1857
1857 KASSERT(p == curproc); 1858 KASSERT(p == curproc);
1858 KASSERT(mutex_owned(p->p_lock)); 1859 KASSERT(mutex_owned(p->p_lock));
1859 1860
1860 for (;;) { 1861 for (;;) {
1861 /* Discard any signals that we have decided not to take. */ 1862 /* Discard any signals that we have decided not to take. */
1862 if (signo != 0) { 1863 if (signo != 0) {
1863 (void)sigget(sp, NULL, signo, NULL); 1864 (void)sigget(sp, NULL, signo, NULL);
1864 } 1865 }
1865 1866
1866 /* 1867 /*
1867 * If the process is stopped/stopping, then stop ourselves 1868 * If the process is stopped/stopping, then stop ourselves
1868 * now that we're on the kernel/userspace boundary. When 1869 * now that we're on the kernel/userspace boundary. When
1869 * we awaken, check for a signal from the debugger. 1870 * we awaken, check for a signal from the debugger.
1870 */ 1871 */
1871 if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) { 1872 if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) {
1872 sigswitch_unlock_and_switch_away(l); 1873 sigswitch_unlock_and_switch_away(l);
1873 mutex_enter(p->p_lock); 1874 mutex_enter(p->p_lock);
1874 continue; 1875 continue;
1875 } else if (p->p_stat == SACTIVE) 1876 } else if (p->p_stat == SACTIVE)
1876 signo = sigchecktrace(); 1877 signo = sigchecktrace();
1877 else 1878 else
1878 signo = 0; 1879 signo = 0;
1879 1880
1880 /* Signals from the debugger are "out of band". */ 1881 /* Signals from the debugger are "out of band". */
1881 sp = NULL; 1882 sp = NULL;
1882 1883
1883 /* 1884 /*
1884 * If the debugger didn't provide a signal, find a pending 1885 * If the debugger didn't provide a signal, find a pending
1885 * signal from our set. Check per-LWP signals first, and 1886 * signal from our set. Check per-LWP signals first, and
1886 * then per-process. 1887 * then per-process.
1887 */ 1888 */
1888 if (signo == 0) { 1889 if (signo == 0) {
1889 sp = &l->l_sigpend; 1890 sp = &l->l_sigpend;
1890 ss = sp->sp_set; 1891 ss = sp->sp_set;
1891 siglwp = l->l_lid; 1892 siglwp = l->l_lid;
1892 if ((p->p_lflag & PL_PPWAIT) != 0) 1893 if ((p->p_lflag & PL_PPWAIT) != 0)
1893 sigminusset(&vforksigmask, &ss); 1894 sigminusset(&vforksigmask, &ss);
1894 sigminusset(&l->l_sigmask, &ss); 1895 sigminusset(&l->l_sigmask, &ss);
1895 1896
1896 if ((signo = firstsig(&ss)) == 0) { 1897 if ((signo = firstsig(&ss)) == 0) {
1897 sp = &p->p_sigpend; 1898 sp = &p->p_sigpend;
1898 ss = sp->sp_set; 1899 ss = sp->sp_set;
1899 siglwp = 0; 1900 siglwp = 0;
1900 if ((p->p_lflag & PL_PPWAIT) != 0) 1901 if ((p->p_lflag & PL_PPWAIT) != 0)
1901 sigminusset(&vforksigmask, &ss); 1902 sigminusset(&vforksigmask, &ss);
1902 sigminusset(&l->l_sigmask, &ss); 1903 sigminusset(&l->l_sigmask, &ss);
1903 1904
1904 if ((signo = firstsig(&ss)) == 0) { 1905 if ((signo = firstsig(&ss)) == 0) {
1905 /* 1906 /*
1906 * No signal pending - clear the 1907 * No signal pending - clear the
1907 * indicator and bail out. 1908 * indicator and bail out.
1908 */ 1909 */
1909 lwp_lock(l); 1910 lwp_lock(l);
1910 l->l_flag &= ~LW_PENDSIG; 1911 l->l_flag &= ~LW_PENDSIG;
1911 lwp_unlock(l); 1912 lwp_unlock(l);
1912 sp = NULL; 1913 sp = NULL;
1913 break; 1914 break;
1914 } 1915 }
1915 } 1916 }
1916 } 1917 }
1917 1918
1918 traced = ISSET(p->p_slflag, PSL_TRACED) && 1919 traced = ISSET(p->p_slflag, PSL_TRACED) &&
1919 !sigismember(&p->p_sigctx.ps_sigpass, signo); 1920 !sigismember(&p->p_sigctx.ps_sigpass, signo);
1920 1921
1921 if (sp) { 1922 if (sp) {
1922 /* Overwrite process' signal context to correspond 1923 /* Overwrite process' signal context to correspond
1923 * to the currently reported LWP. This is necessary 1924 * to the currently reported LWP. This is necessary
1924 * for PT_GET_SIGINFO to report the correct signal when 1925 * for PT_GET_SIGINFO to report the correct signal when
1925 * multiple LWPs have pending signals. We do this only 1926 * multiple LWPs have pending signals. We do this only
1926 * when the signal comes from the queue, for signals 1927 * when the signal comes from the queue, for signals
1927 * created by the debugger we assume it set correct 1928 * created by the debugger we assume it set correct
1928 * siginfo. 1929 * siginfo.
1929 */ 1930 */
1930 ksiginfo_t *ksi = TAILQ_FIRST(&sp->sp_info); 1931 ksiginfo_t *ksi = TAILQ_FIRST(&sp->sp_info);
1931 if (ksi) { 1932 if (ksi) {
1932 p->p_sigctx.ps_lwp = ksi->ksi_lid; 1933 p->p_sigctx.ps_lwp = ksi->ksi_lid;
1933 p->p_sigctx.ps_info = ksi->ksi_info; 1934 p->p_sigctx.ps_info = ksi->ksi_info;
1934 } else { 1935 } else {
1935 p->p_sigctx.ps_lwp = siglwp; 1936 p->p_sigctx.ps_lwp = siglwp;
1936 memset(&p->p_sigctx.ps_info, 0, 1937 memset(&p->p_sigctx.ps_info, 0,
1937 sizeof(p->p_sigctx.ps_info)); 1938 sizeof(p->p_sigctx.ps_info));
1938 p->p_sigctx.ps_info._signo = signo; 1939 p->p_sigctx.ps_info._signo = signo;
1939 p->p_sigctx.ps_info._code = SI_NOINFO; 1940 p->p_sigctx.ps_info._code = SI_NOINFO;
1940 } 1941 }
1941 } 1942 }
1942 1943
1943 /* 1944 /*
1944 * We should see pending but ignored signals only if 1945 * We should see pending but ignored signals only if
1945 * we are being traced. 1946 * we are being traced.
1946 */ 1947 */
1947 if (sigismember(&p->p_sigctx.ps_sigignore, signo) && 1948 if (sigismember(&p->p_sigctx.ps_sigignore, signo) &&
1948 !traced) { 1949 !traced) {
1949 /* Discard the signal. */ 1950 /* Discard the signal. */
1950 continue; 1951 continue;
1951 } 1952 }
1952 1953
1953 /* 1954 /*
1954 * If traced, always stop, and stay stopped until released 1955 * If traced, always stop, and stay stopped until released
1955 * by the debugger. If the our parent is our debugger waiting 1956 * by the debugger. If the our parent is our debugger waiting
1956 * for us and we vforked, don't hang as we could deadlock. 1957 * for us and we vforked, don't hang as we could deadlock.
1957 */ 1958 */
1958 if (traced && signo != SIGKILL && 1959 if (traced && signo != SIGKILL &&
1959 !(ISSET(p->p_lflag, PL_PPWAIT) && 1960 !(ISSET(p->p_lflag, PL_PPWAIT) &&
1960 (p->p_pptr == p->p_opptr))) { 1961 (p->p_pptr == p->p_opptr))) {
1961 /* 1962 /*
1962 * Take the signal, but don't remove it from the 1963 * Take the signal, but don't remove it from the
1963 * siginfo queue, because the debugger can send 1964 * siginfo queue, because the debugger can send
1964 * it later. 1965 * it later.
1965 */ 1966 */
1966 if (sp) 1967 if (sp)
1967 sigdelset(&sp->sp_set, signo); 1968 sigdelset(&sp->sp_set, signo);
1968 p->p_xsig = signo; 1969 p->p_xsig = signo;
1969 1970
1970 /* Handling of signal trace */ 1971 /* Handling of signal trace */
1971 sigswitch(0, signo, false); 1972 sigswitch(0, signo, false);
1972 mutex_enter(p->p_lock); 1973 mutex_enter(p->p_lock);
1973 1974
1974 /* Check for a signal from the debugger. */ 1975 /* Check for a signal from the debugger. */
1975 if ((signo = sigchecktrace()) == 0) 1976 if ((signo = sigchecktrace()) == 0)
1976 continue; 1977 continue;
1977 1978
1978 /* Signals from the debugger are "out of band". */ 1979 /* Signals from the debugger are "out of band". */
1979 sp = NULL; 1980 sp = NULL;
1980 } 1981 }
1981 1982
1982 prop = sigprop[signo]; 1983 prop = sigprop[signo];
1983 1984
1984 /* 1985 /*
1985 * Decide whether the signal should be returned. 1986 * Decide whether the signal should be returned.
1986 */ 1987 */
1987 switch ((long)SIGACTION(p, signo).sa_handler) { 1988 switch ((long)SIGACTION(p, signo).sa_handler) {
1988 case (long)SIG_DFL: 1989 case (long)SIG_DFL:
1989 /* 1990 /*
1990 * Don't take default actions on system processes. 1991 * Don't take default actions on system processes.
1991 */ 1992 */
1992 if (p->p_pid <= 1) { 1993 if (p->p_pid <= 1) {
1993#ifdef DIAGNOSTIC 1994#ifdef DIAGNOSTIC
1994 /* 1995 /*
1995 * Are you sure you want to ignore SIGSEGV 1996 * Are you sure you want to ignore SIGSEGV
1996 * in init? XXX 1997 * in init? XXX
1997 */ 1998 */
1998 printf_nolog("Process (pid %d) got sig %d\n", 1999 printf_nolog("Process (pid %d) got sig %d\n",
1999 p->p_pid, signo); 2000 p->p_pid, signo);
2000#endif 2001#endif
2001 continue; 2002 continue;
2002 } 2003 }
2003 2004
2004 /* 2005 /*
2005 * If there is a pending stop signal to process with 2006 * If there is a pending stop signal to process with
2006 * default action, stop here, then clear the signal.  2007 * default action, stop here, then clear the signal.
2007 * However, if process is member of an orphaned 2008 * However, if process is member of an orphaned
2008 * process group, ignore tty stop signals. 2009 * process group, ignore tty stop signals.
2009 */ 2010 */
2010 if (prop & SA_STOP) { 2011 if (prop & SA_STOP) {
2011 /* 2012 /*
2012 * XXX Don't hold proc_lock for p_lflag, 2013 * XXX Don't hold proc_lock for p_lflag,
2013 * but it's not a big deal. 2014 * but it's not a big deal.
2014 */ 2015 */
2015 if ((traced && 2016 if ((traced &&
2016 !(ISSET(p->p_lflag, PL_PPWAIT) && 2017 !(ISSET(p->p_lflag, PL_PPWAIT) &&
2017 (p->p_pptr == p->p_opptr))) || 2018 (p->p_pptr == p->p_opptr))) ||
2018 ((p->p_lflag & PL_ORPHANPG) != 0 && 2019 ((p->p_lflag & PL_ORPHANPG) != 0 &&
2019 prop & SA_TTYSTOP)) { 2020 prop & SA_TTYSTOP)) {
2020 /* Ignore the signal. */ 2021 /* Ignore the signal. */
2021 continue; 2022 continue;
2022 } 2023 }
2023 /* Take the signal. */ 2024 /* Take the signal. */
2024 (void)sigget(sp, NULL, signo, NULL); 2025 (void)sigget(sp, NULL, signo, NULL);
2025 p->p_xsig = signo; 2026 p->p_xsig = signo;
2026 p->p_sflag &= ~PS_CONTINUED; 2027 p->p_sflag &= ~PS_CONTINUED;
2027 signo = 0; 2028 signo = 0;
2028 sigswitch(PS_NOCLDSTOP, p->p_xsig, false); 2029 sigswitch(PS_NOCLDSTOP, p->p_xsig, false);
2029 mutex_enter(p->p_lock); 2030 mutex_enter(p->p_lock);
2030 } else if (prop & SA_IGNORE) { 2031 } else if (prop & SA_IGNORE) {
2031 /* 2032 /*
2032 * Except for SIGCONT, shouldn't get here. 2033 * Except for SIGCONT, shouldn't get here.
2033 * Default action is to ignore; drop it. 2034 * Default action is to ignore; drop it.
2034 */ 2035 */
2035 continue; 2036 continue;
2036 } 2037 }
2037 break; 2038 break;
2038 2039
2039 case (long)SIG_IGN: 2040 case (long)SIG_IGN:
2040#ifdef DEBUG_ISSIGNAL 2041#ifdef DEBUG_ISSIGNAL
2041 /* 2042 /*
2042 * Masking above should prevent us ever trying 2043 * Masking above should prevent us ever trying
2043 * to take action on an ignored signal other 2044 * to take action on an ignored signal other
2044 * than SIGCONT, unless process is traced. 2045 * than SIGCONT, unless process is traced.
2045 */ 2046 */
2046 if ((prop & SA_CONT) == 0 && !traced) 2047 if ((prop & SA_CONT) == 0 && !traced)
2047 printf_nolog("issignal\n"); 2048 printf_nolog("issignal\n");
2048#endif 2049#endif
2049 continue; 2050 continue;
2050 2051
2051 default: 2052 default:
2052 /* 2053 /*
2053 * This signal has an action, let postsig() process 2054 * This signal has an action, let postsig() process
2054 * it. 2055 * it.
2055 */ 2056 */
2056 break; 2057 break;
2057 } 2058 }
2058 2059
2059 break; 2060 break;
2060 } 2061 }
2061 2062
2062 l->l_sigpendset = sp; 2063 l->l_sigpendset = sp;
2063 return signo; 2064 return signo;
2064} 2065}
2065 2066
2066/* 2067/*
2067 * Take the action for the specified signal 2068 * Take the action for the specified signal
2068 * from the current set of pending signals. 2069 * from the current set of pending signals.
2069 */ 2070 */
2070void 2071void
2071postsig(int signo) 2072postsig(int signo)
2072{ 2073{
2073 struct lwp *l; 2074 struct lwp *l;
2074 struct proc *p; 2075 struct proc *p;
2075 struct sigacts *ps; 2076 struct sigacts *ps;
2076 sig_t action; 2077 sig_t action;
2077 sigset_t *returnmask; 2078 sigset_t *returnmask;
2078 ksiginfo_t ksi; 2079 ksiginfo_t ksi;
2079 2080
2080 l = curlwp; 2081 l = curlwp;
2081 p = l->l_proc; 2082 p = l->l_proc;
2082 ps = p->p_sigacts; 2083 ps = p->p_sigacts;
2083 2084
2084 KASSERT(mutex_owned(p->p_lock)); 2085 KASSERT(mutex_owned(p->p_lock));
2085 KASSERT(signo > 0); 2086 KASSERT(signo > 0);
2086 2087
2087 /* 2088 /*
2088 * Set the new mask value and also defer further occurrences of this 2089 * Set the new mask value and also defer further occurrences of this
2089 * signal. 2090 * signal.
2090 * 2091 *
2091 * Special case: user has done a sigsuspend. Here the current mask is 2092 * Special case: user has done a sigsuspend. Here the current mask is
2092 * not of interest, but rather the mask from before the sigsuspend is 2093 * not of interest, but rather the mask from before the sigsuspend is
2093 * what we want restored after the signal processing is completed. 2094 * what we want restored after the signal processing is completed.
2094 */ 2095 */
2095 if (l->l_sigrestore) { 2096 if (l->l_sigrestore) {
2096 returnmask = &l->l_sigoldmask; 2097 returnmask = &l->l_sigoldmask;
2097 l->l_sigrestore = 0; 2098 l->l_sigrestore = 0;
2098 } else 2099 } else
2099 returnmask = &l->l_sigmask; 2100 returnmask = &l->l_sigmask;
2100 2101
2101 /* 2102 /*
2102 * Commit to taking the signal before releasing the mutex. 2103 * Commit to taking the signal before releasing the mutex.
2103 */ 2104 */
2104 action = SIGACTION_PS(ps, signo).sa_handler; 2105 action = SIGACTION_PS(ps, signo).sa_handler;
2105 l->l_ru.ru_nsignals++; 2106 l->l_ru.ru_nsignals++;
2106 if (l->l_sigpendset == NULL) { 2107 if (l->l_sigpendset == NULL) {
2107 /* From the debugger */ 2108 /* From the debugger */
2108 if (p->p_sigctx.ps_faked && 2109 if (p->p_sigctx.ps_faked &&
2109 signo == p->p_sigctx.ps_info._signo) { 2110 signo == p->p_sigctx.ps_info._signo) {
2110 KSI_INIT(&ksi); 2111 KSI_INIT(&ksi);
2111 ksi.ksi_info = p->p_sigctx.ps_info; 2112 ksi.ksi_info = p->p_sigctx.ps_info;
2112 ksi.ksi_lid = p->p_sigctx.ps_lwp; 2113 ksi.ksi_lid = p->p_sigctx.ps_lwp;
2113 p->p_sigctx.ps_faked = false; 2114 p->p_sigctx.ps_faked = false;
2114 } else { 2115 } else {
2115 if (!siggetinfo(&l->l_sigpend, &ksi, signo)) 2116 if (!siggetinfo(&l->l_sigpend, &ksi, signo))
2116 (void)siggetinfo(&p->p_sigpend, &ksi, signo); 2117 (void)siggetinfo(&p->p_sigpend, &ksi, signo);
2117 } 2118 }
2118 } else 2119 } else
2119 sigget(l->l_sigpendset, &ksi, signo, NULL); 2120 sigget(l->l_sigpendset, &ksi, signo, NULL);
2120 2121
2121 if (ktrpoint(KTR_PSIG)) { 2122 if (ktrpoint(KTR_PSIG)) {
2122 mutex_exit(p->p_lock); 2123 mutex_exit(p->p_lock);
2123 if (p->p_emul->e_ktrpsig) 2124 if (p->p_emul->e_ktrpsig)
2124 p->p_emul->e_ktrpsig(signo, action, 2125 p->p_emul->e_ktrpsig(signo, action,
2125 returnmask, &ksi); 2126 returnmask, &ksi);
2126 else 2127 else
2127 ktrpsig(signo, action, returnmask, &ksi); 2128 ktrpsig(signo, action, returnmask, &ksi);
2128 mutex_enter(p->p_lock); 2129 mutex_enter(p->p_lock);
2129 } 2130 }
2130 2131
2131 SDT_PROBE(proc, kernel, , signal__handle, signo, &ksi, action, 0, 0); 2132 SDT_PROBE(proc, kernel, , signal__handle, signo, &ksi, action, 0, 0);
2132 2133
2133 if (action == SIG_DFL) { 2134 if (action == SIG_DFL) {
2134 /* 2135 /*
2135 * Default action, where the default is to kill 2136 * Default action, where the default is to kill
2136 * the process. (Other cases were ignored above.) 2137 * the process. (Other cases were ignored above.)
2137 */ 2138 */
2138 sigexit(l, signo); 2139 sigexit(l, signo);
2139 return; 2140 return;
2140 } 2141 }
2141 2142
2142 /* 2143 /*
2143 * If we get here, the signal must be caught. 2144 * If we get here, the signal must be caught.
2144 */ 2145 */
2145#ifdef DIAGNOSTIC 2146#ifdef DIAGNOSTIC
2146 if (action == SIG_IGN || sigismember(&l->l_sigmask, signo)) 2147 if (action == SIG_IGN || sigismember(&l->l_sigmask, signo))
2147 panic("postsig action"); 2148 panic("postsig action");
2148#endif 2149#endif
2149 2150
2150 kpsendsig(l, &ksi, returnmask); 2151 kpsendsig(l, &ksi, returnmask);
2151} 2152}
2152 2153
2153/* 2154/*
2154 * sendsig: 2155 * sendsig:
2155 * 2156 *
2156 * Default signal delivery method for NetBSD. 2157 * Default signal delivery method for NetBSD.
2157 */ 2158 */
2158void 2159void
2159sendsig(const struct ksiginfo *ksi, const sigset_t *mask) 2160sendsig(const struct ksiginfo *ksi, const sigset_t *mask)
2160{ 2161{
2161 struct sigacts *sa; 2162 struct sigacts *sa;
2162 int sig; 2163 int sig;
2163 2164
2164 sig = ksi->ksi_signo; 2165 sig = ksi->ksi_signo;
2165 sa = curproc->p_sigacts; 2166 sa = curproc->p_sigacts;
2166 2167
2167 switch (sa->sa_sigdesc[sig].sd_vers) { 2168 switch (sa->sa_sigdesc[sig].sd_vers) {
2168 case 0: 2169 case 0:
2169 case 1: 2170 case 1:
2170 /* Compat for 1.6 and earlier. */ 2171 /* Compat for 1.6 and earlier. */
2171 MODULE_HOOK_CALL_VOID(sendsig_sigcontext_16_hook, (ksi, mask), 2172 MODULE_HOOK_CALL_VOID(sendsig_sigcontext_16_hook, (ksi, mask),
2172 break); 2173 break);
2173 return; 2174 return;
2174 case 2: 2175 case 2:
2175 case 3: 2176 case 3:
2176 sendsig_siginfo(ksi, mask); 2177 sendsig_siginfo(ksi, mask);
2177 return; 2178 return;
2178 default: 2179 default:
2179 break; 2180 break;
2180 } 2181 }
2181 2182
2182 printf("sendsig: bad version %d\n", sa->sa_sigdesc[sig].sd_vers); 2183 printf("sendsig: bad version %d\n", sa->sa_sigdesc[sig].sd_vers);
2183 sigexit(curlwp, SIGILL); 2184 sigexit(curlwp, SIGILL);
2184} 2185}
2185 2186
2186/* 2187/*
2187 * sendsig_reset: 2188 * sendsig_reset:
2188 * 2189 *
2189 * Reset the signal action. Called from emulation specific sendsig() 2190 * Reset the signal action. Called from emulation specific sendsig()
2190 * before unlocking to deliver the signal. 2191 * before unlocking to deliver the signal.
2191 */ 2192 */
2192void 2193void
2193sendsig_reset(struct lwp *l, int signo) 2194sendsig_reset(struct lwp *l, int signo)
2194{ 2195{
2195 struct proc *p = l->l_proc; 2196 struct proc *p = l->l_proc;
2196 struct sigacts *ps = p->p_sigacts; 2197 struct sigacts *ps = p->p_sigacts;
2197 2198
2198 KASSERT(mutex_owned(p->p_lock)); 2199 KASSERT(mutex_owned(p->p_lock));
2199 2200
2200 p->p_sigctx.ps_lwp = 0; 2201 p->p_sigctx.ps_lwp = 0;
2201 memset(&p->p_sigctx.ps_info, 0, sizeof(p->p_sigctx.ps_info)); 2202 memset(&p->p_sigctx.ps_info, 0, sizeof(p->p_sigctx.ps_info));
2202 2203
2203 mutex_enter(&ps->sa_mutex); 2204 mutex_enter(&ps->sa_mutex);
2204 sigplusset(&SIGACTION_PS(ps, signo).sa_mask, &l->l_sigmask); 2205 sigplusset(&SIGACTION_PS(ps, signo).sa_mask, &l->l_sigmask);
2205 if (SIGACTION_PS(ps, signo).sa_flags & SA_RESETHAND) { 2206 if (SIGACTION_PS(ps, signo).sa_flags & SA_RESETHAND) {
2206 sigdelset(&p->p_sigctx.ps_sigcatch, signo); 2207 sigdelset(&p->p_sigctx.ps_sigcatch, signo);
2207 if (signo != SIGCONT && sigprop[signo] & SA_IGNORE) 2208 if (signo != SIGCONT && sigprop[signo] & SA_IGNORE)
2208 sigaddset(&p->p_sigctx.ps_sigignore, signo); 2209 sigaddset(&p->p_sigctx.ps_sigignore, signo);
2209 SIGACTION_PS(ps, signo).sa_handler = SIG_DFL; 2210 SIGACTION_PS(ps, signo).sa_handler = SIG_DFL;
2210 } 2211 }
2211 mutex_exit(&ps->sa_mutex); 2212 mutex_exit(&ps->sa_mutex);
2212} 2213}
2213 2214
2214/* 2215/*
2215 * Kill the current process for stated reason. 2216 * Kill the current process for stated reason.
2216 */ 2217 */
2217void 2218void
2218killproc(struct proc *p, const char *why) 2219killproc(struct proc *p, const char *why)
2219{ 2220{
2220 2221
2221 KASSERT(mutex_owned(&proc_lock)); 2222 KASSERT(mutex_owned(&proc_lock));
2222 2223
2223 log(LOG_ERR, "pid %d was killed: %s\n", p->p_pid, why); 2224 log(LOG_ERR, "pid %d was killed: %s\n", p->p_pid, why);
2224 uprintf_locked("sorry, pid %d was killed: %s\n", p->p_pid, why); 2225 uprintf_locked("sorry, pid %d was killed: %s\n", p->p_pid, why);
2225 psignal(p, SIGKILL); 2226 psignal(p, SIGKILL);
2226} 2227}
2227 2228
2228/* 2229/*
2229 * Force the current process to exit with the specified signal, dumping core 2230 * Force the current process to exit with the specified signal, dumping core
2230 * if appropriate. We bypass the normal tests for masked and caught 2231 * if appropriate. We bypass the normal tests for masked and caught
2231 * signals, allowing unrecoverable failures to terminate the process without 2232 * signals, allowing unrecoverable failures to terminate the process without
2232 * changing signal state. Mark the accounting record with the signal 2233 * changing signal state. Mark the accounting record with the signal
2233 * termination. If dumping core, save the signal number for the debugger.  2234 * termination. If dumping core, save the signal number for the debugger.
2234 * Calls exit and does not return. 2235 * Calls exit and does not return.
2235 */ 2236 */
2236void 2237void
2237sigexit(struct lwp *l, int signo) 2238sigexit(struct lwp *l, int signo)
2238{ 2239{
2239 int exitsig, error, docore; 2240 int exitsig, error, docore;
2240 struct proc *p; 2241 struct proc *p;
2241 struct lwp *t; 2242 struct lwp *t;
2242 2243
2243 p = l->l_proc; 2244 p = l->l_proc;
2244 2245
2245 KASSERT(mutex_owned(p->p_lock)); 2246 KASSERT(mutex_owned(p->p_lock));
2246 KERNEL_UNLOCK_ALL(l, NULL); 2247 KERNEL_UNLOCK_ALL(l, NULL);
2247 2248
2248 /* 2249 /*
2249 * Don't permit coredump() multiple times in the same process. 2250 * Don't permit coredump() multiple times in the same process.
2250 * Call back into sigexit, where we will be suspended until 2251 * Call back into sigexit, where we will be suspended until
2251 * the deed is done. Note that this is a recursive call, but 2252 * the deed is done. Note that this is a recursive call, but
2252 * LW_WCORE will prevent us from coming back this way. 2253 * LW_WCORE will prevent us from coming back this way.
2253 */ 2254 */
2254 if ((p->p_sflag & PS_WCORE) != 0) { 2255 if ((p->p_sflag & PS_WCORE) != 0) {
2255 lwp_lock(l); 2256 lwp_lock(l);
2256 l->l_flag |= (LW_WCORE | LW_WEXIT | LW_WSUSPEND); 2257 l->l_flag |= (LW_WCORE | LW_WEXIT | LW_WSUSPEND);
2257 lwp_unlock(l); 2258 lwp_unlock(l);
2258 mutex_exit(p->p_lock); 2259 mutex_exit(p->p_lock);
2259 lwp_userret(l); 2260 lwp_userret(l);
2260 panic("sigexit 1"); 2261 panic("sigexit 1");
2261 /* NOTREACHED */ 2262 /* NOTREACHED */
2262 } 2263 }
2263 2264
2264 /* If process is already on the way out, then bail now. */ 2265 /* If process is already on the way out, then bail now. */
2265 if ((p->p_sflag & PS_WEXIT) != 0) { 2266 if ((p->p_sflag & PS_WEXIT) != 0) {
2266 mutex_exit(p->p_lock); 2267 mutex_exit(p->p_lock);
2267 lwp_exit(l); 2268 lwp_exit(l);
2268 panic("sigexit 2"); 2269 panic("sigexit 2");
2269 /* NOTREACHED */ 2270 /* NOTREACHED */
2270 } 2271 }
2271 2272
2272 /* 2273 /*
2273 * Prepare all other LWPs for exit. If dumping core, suspend them 2274 * Prepare all other LWPs for exit. If dumping core, suspend them
2274 * so that their registers are available long enough to be dumped. 2275 * so that their registers are available long enough to be dumped.
2275 */ 2276 */
2276 if ((docore = (sigprop[signo] & SA_CORE)) != 0) { 2277 if ((docore = (sigprop[signo] & SA_CORE)) != 0) {
2277 p->p_sflag |= PS_WCORE; 2278 p->p_sflag |= PS_WCORE;
2278 for (;;) { 2279 for (;;) {
2279 LIST_FOREACH(t, &p->p_lwps, l_sibling) { 2280 LIST_FOREACH(t, &p->p_lwps, l_sibling) {
2280 lwp_lock(t); 2281 lwp_lock(t);
2281 if (t == l) { 2282 if (t == l) {
2282 t->l_flag &= 2283 t->l_flag &=
2283 ~(LW_WSUSPEND | LW_DBGSUSPEND); 2284 ~(LW_WSUSPEND | LW_DBGSUSPEND);
2284 lwp_unlock(t); 2285 lwp_unlock(t);
2285 continue; 2286 continue;
2286 } 2287 }
2287 t->l_flag |= (LW_WCORE | LW_WEXIT); 2288 t->l_flag |= (LW_WCORE | LW_WEXIT);
2288 lwp_suspend(l, t); 2289 lwp_suspend(l, t);
2289 } 2290 }
2290 2291
2291 if (p->p_nrlwps == 1) 2292 if (p->p_nrlwps == 1)
2292 break; 2293 break;
2293 2294
2294 /* 2295 /*
2295 * Kick any LWPs sitting in lwp_wait1(), and wait 2296 * Kick any LWPs sitting in lwp_wait1(), and wait
2296 * for everyone else to stop before proceeding. 2297 * for everyone else to stop before proceeding.
2297 */ 2298 */
2298 p->p_nlwpwait++; 2299 p->p_nlwpwait++;
2299 cv_broadcast(&p->p_lwpcv); 2300 cv_broadcast(&p->p_lwpcv);
2300 cv_wait(&p->p_lwpcv, p->p_lock); 2301 cv_wait(&p->p_lwpcv, p->p_lock);
2301 p->p_nlwpwait--; 2302 p->p_nlwpwait--;
2302 } 2303 }
2303 } 2304 }
2304 2305
2305 exitsig = signo; 2306 exitsig = signo;
2306 p->p_acflag |= AXSIG; 2307 p->p_acflag |= AXSIG;
2307 memset(&p->p_sigctx.ps_info, 0, sizeof(p->p_sigctx.ps_info)); 2308 memset(&p->p_sigctx.ps_info, 0, sizeof(p->p_sigctx.ps_info));
2308 p->p_sigctx.ps_info._signo = signo; 2309 p->p_sigctx.ps_info._signo = signo;
2309 p->p_sigctx.ps_info._code = SI_NOINFO; 2310 p->p_sigctx.ps_info._code = SI_NOINFO;
2310 2311
2311 if (docore) { 2312 if (docore) {
2312 mutex_exit(p->p_lock); 2313 mutex_exit(p->p_lock);
2313 MODULE_HOOK_CALL(coredump_hook, (l, NULL), enosys(), error); 2314 MODULE_HOOK_CALL(coredump_hook, (l, NULL), enosys(), error);
2314 2315
2315 if (kern_logsigexit) { 2316 if (kern_logsigexit) {
2316 int uid = l->l_cred ? 2317 int uid = l->l_cred ?
2317 (int)kauth_cred_geteuid(l->l_cred) : -1; 2318 (int)kauth_cred_geteuid(l->l_cred) : -1;
2318 2319
2319 if (error) 2320 if (error)
2320 log(LOG_INFO, lognocoredump, p->p_pid, 2321 log(LOG_INFO, lognocoredump, p->p_pid,
2321 p->p_comm, uid, signo, error); 2322 p->p_comm, uid, signo, error);
2322 else 2323 else
2323 log(LOG_INFO, logcoredump, p->p_pid, 2324 log(LOG_INFO, logcoredump, p->p_pid,
2324 p->p_comm, uid, signo); 2325 p->p_comm, uid, signo);
2325 } 2326 }
2326 2327
2327#ifdef PAX_SEGVGUARD 2328#ifdef PAX_SEGVGUARD
2328 rw_enter(&exec_lock, RW_WRITER); 2329 rw_enter(&exec_lock, RW_WRITER);
2329 pax_segvguard(l, p->p_textvp, p->p_comm, true); 2330 pax_segvguard(l, p->p_textvp, p->p_comm, true);
2330 rw_exit(&exec_lock); 2331 rw_exit(&exec_lock);
2331#endif /* PAX_SEGVGUARD */ 2332#endif /* PAX_SEGVGUARD */
2332 2333
2333 /* Acquire the sched state mutex. exit1() will release it. */ 2334 /* Acquire the sched state mutex. exit1() will release it. */
2334 mutex_enter(p->p_lock); 2335 mutex_enter(p->p_lock);
2335 if (error == 0) 2336 if (error == 0)
2336 p->p_sflag |= PS_COREDUMP; 2337 p->p_sflag |= PS_COREDUMP;
2337 } 2338 }
2338 2339
2339 /* No longer dumping core. */ 2340 /* No longer dumping core. */
2340 p->p_sflag &= ~PS_WCORE; 2341 p->p_sflag &= ~PS_WCORE;
2341 2342
2342 exit1(l, 0, exitsig); 2343 exit1(l, 0, exitsig);
2343 /* NOTREACHED */ 2344 /* NOTREACHED */
2344} 2345}
2345 2346
2346/* 2347/*
2347 * Since the "real" code may (or may not) be present in loadable module, 2348 * Since the "real" code may (or may not) be present in loadable module,
2348 * we provide routines here which calls the module hooks. 2349 * we provide routines here which calls the module hooks.
2349 */ 2350 */
2350int 2351int
2351coredump_netbsd(struct lwp *l, struct coredump_iostate *iocookie) 2352coredump_netbsd(struct lwp *l, struct coredump_iostate *iocookie)
2352{ 2353{
2353 int retval; 2354 int retval;
2354 2355
2355 MODULE_HOOK_CALL(coredump_netbsd_hook, (l, iocookie), ENOSYS, retval); 2356 MODULE_HOOK_CALL(coredump_netbsd_hook, (l, iocookie), ENOSYS, retval);
2356 return retval; 2357 return retval;
2357} 2358}
2358 2359
2359#if !defined(_LP64) || defined(COMPAT_NETBSD32) 2360#ifdef EXEC_ELF32
2360int 2361int
2361coredump_elf32(struct lwp *l, struct coredump_iostate *iocookie) 2362coredump_elf32(struct lwp *l, struct coredump_iostate *iocookie)
2362{ 2363{
2363 int retval; 2364 int retval;
2364 2365
2365 MODULE_HOOK_CALL(coredump_elf32_hook, (l, iocookie), ENOSYS, retval); 2366 MODULE_HOOK_CALL(coredump_elf32_hook, (l, iocookie), ENOSYS, retval);
2366 return retval; 2367 return retval;
2367} 2368}
2368#endif 2369#endif
2369 2370
2370#ifdef _LP64 2371#ifdef EXEC_ELF64
2371int 2372int
2372coredump_elf64(struct lwp *l, struct coredump_iostate *iocookie) 2373coredump_elf64(struct lwp *l, struct coredump_iostate *iocookie)
2373{ 2374{
2374 int retval; 2375 int retval;
2375 2376
2376 MODULE_HOOK_CALL(coredump_elf64_hook, (l, iocookie), ENOSYS, retval); 2377 MODULE_HOOK_CALL(coredump_elf64_hook, (l, iocookie), ENOSYS, retval);
2377 return retval; 2378 return retval;
2378} 2379}
2379#endif 2380#endif
2380 2381
2381/* 2382/*
2382 * Put process 'p' into the stopped state and optionally, notify the parent. 2383 * Put process 'p' into the stopped state and optionally, notify the parent.
2383 */ 2384 */
2384void 2385void
2385proc_stop(struct proc *p, int signo) 2386proc_stop(struct proc *p, int signo)
2386{ 2387{
2387 struct lwp *l; 2388 struct lwp *l;
2388 2389
2389 KASSERT(mutex_owned(p->p_lock)); 2390 KASSERT(mutex_owned(p->p_lock));
2390 2391
2391 /* 2392 /*
2392 * First off, set the stopping indicator and bring all sleeping 2393 * First off, set the stopping indicator and bring all sleeping
2393 * LWPs to a halt so they are included in p->p_nrlwps. We musn't 2394 * LWPs to a halt so they are included in p->p_nrlwps. We musn't
2394 * unlock between here and the p->p_nrlwps check below. 2395 * unlock between here and the p->p_nrlwps check below.
2395 */ 2396 */
2396 p->p_sflag |= PS_STOPPING; 2397 p->p_sflag |= PS_STOPPING;
2397 membar_producer(); 2398 membar_producer();
2398 2399
2399 proc_stop_lwps(p); 2400 proc_stop_lwps(p);
2400 2401
2401 /* 2402 /*
2402 * If there are no LWPs available to take the signal, then we 2403 * If there are no LWPs available to take the signal, then we
2403 * signal the parent process immediately. Otherwise, the last 2404 * signal the parent process immediately. Otherwise, the last
2404 * LWP to stop will take care of it. 2405 * LWP to stop will take care of it.
2405 */ 2406 */
2406 2407
2407 if (p->p_nrlwps == 0) { 2408 if (p->p_nrlwps == 0) {
2408 proc_stop_done(p, PS_NOCLDSTOP); 2409 proc_stop_done(p, PS_NOCLDSTOP);
2409 } else { 2410 } else {
2410 /* 2411 /*
2411 * Have the remaining LWPs come to a halt, and trigger 2412 * Have the remaining LWPs come to a halt, and trigger
2412 * proc_stop_callout() to ensure that they do. 2413 * proc_stop_callout() to ensure that they do.
2413 */ 2414 */
2414 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 2415 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
2415 sigpost(l, SIG_DFL, SA_STOP, signo); 2416 sigpost(l, SIG_DFL, SA_STOP, signo);
2416 } 2417 }
2417 callout_schedule(&proc_stop_ch, 1); 2418 callout_schedule(&proc_stop_ch, 1);
2418 } 2419 }
2419} 2420}
2420 2421
2421/* 2422/*
2422 * When stopping a process, we do not immediatly set sleeping LWPs stopped, 2423 * When stopping a process, we do not immediatly set sleeping LWPs stopped,
2423 * but wait for them to come to a halt at the kernel-user boundary. This is 2424 * but wait for them to come to a halt at the kernel-user boundary. This is
2424 * to allow LWPs to release any locks that they may hold before stopping. 2425 * to allow LWPs to release any locks that they may hold before stopping.
2425 * 2426 *
2426 * Non-interruptable sleeps can be long, and there is the potential for an 2427 * Non-interruptable sleeps can be long, and there is the potential for an
2427 * LWP to begin sleeping interruptably soon after the process has been set 2428 * LWP to begin sleeping interruptably soon after the process has been set
2428 * stopping (PS_STOPPING). These LWPs will not notice that the process is 2429 * stopping (PS_STOPPING). These LWPs will not notice that the process is
2429 * stopping, and so complete halt of the process and the return of status 2430 * stopping, and so complete halt of the process and the return of status
2430 * information to the parent could be delayed indefinitely. 2431 * information to the parent could be delayed indefinitely.
2431 * 2432 *
2432 * To handle this race, proc_stop_callout() runs once per tick while there 2433 * To handle this race, proc_stop_callout() runs once per tick while there
2433 * are stopping processes in the system. It sets LWPs that are sleeping 2434 * are stopping processes in the system. It sets LWPs that are sleeping
2434 * interruptably into the LSSTOP state. 2435 * interruptably into the LSSTOP state.
2435 * 2436 *
2436 * Note that we are not concerned about keeping all LWPs stopped while the 2437 * Note that we are not concerned about keeping all LWPs stopped while the
2437 * process is stopped: stopped LWPs can awaken briefly to handle signals.  2438 * process is stopped: stopped LWPs can awaken briefly to handle signals.
2438 * What we do need to ensure is that all LWPs in a stopping process have 2439 * What we do need to ensure is that all LWPs in a stopping process have
2439 * stopped at least once, so that notification can be sent to the parent 2440 * stopped at least once, so that notification can be sent to the parent
2440 * process. 2441 * process.
2441 */ 2442 */
2442static void 2443static void
2443proc_stop_callout(void *cookie) 2444proc_stop_callout(void *cookie)
2444{ 2445{
2445 bool more, restart; 2446 bool more, restart;
2446 struct proc *p; 2447 struct proc *p;
2447 2448
2448 (void)cookie; 2449 (void)cookie;
2449 2450
2450 do { 2451 do {
2451 restart = false; 2452 restart = false;
2452 more = false; 2453 more = false;
2453 2454
2454 mutex_enter(&proc_lock); 2455 mutex_enter(&proc_lock);
2455 PROCLIST_FOREACH(p, &allproc) { 2456 PROCLIST_FOREACH(p, &allproc) {
2456 mutex_enter(p->p_lock); 2457 mutex_enter(p->p_lock);
2457 2458
2458 if ((p->p_sflag & PS_STOPPING) == 0) { 2459 if ((p->p_sflag & PS_STOPPING) == 0) {
2459 mutex_exit(p->p_lock); 2460 mutex_exit(p->p_lock);
2460 continue; 2461 continue;
2461 } 2462 }
2462 2463
2463 /* Stop any LWPs sleeping interruptably. */ 2464 /* Stop any LWPs sleeping interruptably. */
2464 proc_stop_lwps(p); 2465 proc_stop_lwps(p);
2465 if (p->p_nrlwps == 0) { 2466 if (p->p_nrlwps == 0) {
2466 /* 2467 /*
2467 * We brought the process to a halt. 2468 * We brought the process to a halt.
2468 * Mark it as stopped and notify the 2469 * Mark it as stopped and notify the
2469 * parent. 2470 * parent.
2470 * 2471 *
2471 * Note that proc_stop_done() will 2472 * Note that proc_stop_done() will
2472 * drop p->p_lock briefly. 2473 * drop p->p_lock briefly.
2473 * Arrange to restart and check 2474 * Arrange to restart and check
2474 * all processes again. 2475 * all processes again.
2475 */ 2476 */
2476 restart = true; 2477 restart = true;
2477 proc_stop_done(p, PS_NOCLDSTOP); 2478 proc_stop_done(p, PS_NOCLDSTOP);
2478 } else 2479 } else
2479 more = true; 2480 more = true;
2480 2481
2481 mutex_exit(p->p_lock); 2482 mutex_exit(p->p_lock);
2482 if (restart) 2483 if (restart)
2483 break; 2484 break;
2484 } 2485 }
2485 mutex_exit(&proc_lock); 2486 mutex_exit(&proc_lock);
2486 } while (restart); 2487 } while (restart);
2487 2488
2488 /* 2489 /*
2489 * If we noted processes that are stopping but still have 2490 * If we noted processes that are stopping but still have
2490 * running LWPs, then arrange to check again in 1 tick. 2491 * running LWPs, then arrange to check again in 1 tick.
2491 */ 2492 */
2492 if (more) 2493 if (more)
2493 callout_schedule(&proc_stop_ch, 1); 2494 callout_schedule(&proc_stop_ch, 1);
2494} 2495}
2495 2496
2496/* 2497/*
2497 * Given a process in state SSTOP, set the state back to SACTIVE and 2498 * Given a process in state SSTOP, set the state back to SACTIVE and
2498 * move LSSTOP'd LWPs to LSSLEEP or make them runnable. 2499 * move LSSTOP'd LWPs to LSSLEEP or make them runnable.
2499 */ 2500 */
2500void 2501void
2501proc_unstop(struct proc *p) 2502proc_unstop(struct proc *p)
2502{ 2503{
2503 struct lwp *l; 2504 struct lwp *l;
2504 int sig; 2505 int sig;
2505 2506
2506 KASSERT(mutex_owned(&proc_lock)); 2507 KASSERT(mutex_owned(&proc_lock));
2507 KASSERT(mutex_owned(p->p_lock)); 2508 KASSERT(mutex_owned(p->p_lock));
2508 2509
2509 p->p_stat = SACTIVE; 2510 p->p_stat = SACTIVE;
2510 p->p_sflag &= ~PS_STOPPING; 2511 p->p_sflag &= ~PS_STOPPING;
2511 sig = p->p_xsig; 2512 sig = p->p_xsig;
2512 2513
2513 if (!p->p_waited) 2514 if (!p->p_waited)
2514 p->p_pptr->p_nstopchild--; 2515 p->p_pptr->p_nstopchild--;
2515 2516
2516 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 2517 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
2517 lwp_lock(l); 2518 lwp_lock(l);
2518 if (l->l_stat != LSSTOP || (l->l_flag & LW_DBGSUSPEND) != 0) { 2519 if (l->l_stat != LSSTOP || (l->l_flag & LW_DBGSUSPEND) != 0) {
2519 lwp_unlock(l); 2520 lwp_unlock(l);
2520 continue; 2521 continue;
2521 } 2522 }
2522 if (l->l_wchan == NULL) { 2523 if (l->l_wchan == NULL) {
2523 setrunnable(l); 2524 setrunnable(l);
2524 continue; 2525 continue;
2525 } 2526 }
2526 if (sig && (l->l_flag & LW_SINTR) != 0) { 2527 if (sig && (l->l_flag & LW_SINTR) != 0) {
2527 setrunnable(l); 2528 setrunnable(l);
2528 sig = 0; 2529 sig = 0;
2529 } else { 2530 } else {
2530 l->l_stat = LSSLEEP; 2531 l->l_stat = LSSLEEP;
2531 p->p_nrlwps++; 2532 p->p_nrlwps++;
2532 lwp_unlock(l); 2533 lwp_unlock(l);
2533 } 2534 }
2534 } 2535 }
2535} 2536}
2536 2537
2537void 2538void
2538proc_stoptrace(int trapno, int sysnum, const register_t args[], 2539proc_stoptrace(int trapno, int sysnum, const register_t args[],
2539 const register_t *ret, int error) 2540 const register_t *ret, int error)
2540{ 2541{
2541 struct lwp *l = curlwp; 2542 struct lwp *l = curlwp;
2542 struct proc *p = l->l_proc; 2543 struct proc *p = l->l_proc;
2543 struct sigacts *ps; 2544 struct sigacts *ps;
2544 sigset_t *mask; 2545 sigset_t *mask;
2545 sig_t action; 2546 sig_t action;
2546 ksiginfo_t ksi; 2547 ksiginfo_t ksi;
2547 size_t i, sy_narg; 2548 size_t i, sy_narg;
2548 const int signo = SIGTRAP; 2549 const int signo = SIGTRAP;
2549 2550
2550 KASSERT((trapno == TRAP_SCE) || (trapno == TRAP_SCX)); 2551 KASSERT((trapno == TRAP_SCE) || (trapno == TRAP_SCX));
2551 KASSERT(p->p_pptr != initproc); 2552 KASSERT(p->p_pptr != initproc);
2552 KASSERT(ISSET(p->p_slflag, PSL_TRACED)); 2553 KASSERT(ISSET(p->p_slflag, PSL_TRACED));
2553 KASSERT(ISSET(p->p_slflag, PSL_SYSCALL)); 2554 KASSERT(ISSET(p->p_slflag, PSL_SYSCALL));
2554 2555
2555 sy_narg = p->p_emul->e_sysent[sysnum].sy_narg; 2556 sy_narg = p->p_emul->e_sysent[sysnum].sy_narg;
2556 2557
2557 KSI_INIT_TRAP(&ksi); 2558 KSI_INIT_TRAP(&ksi);
2558 ksi.ksi_lid = l->l_lid; 2559 ksi.ksi_lid = l->l_lid;
2559 ksi.ksi_signo = signo; 2560 ksi.ksi_signo = signo;
2560 ksi.ksi_code = trapno; 2561 ksi.ksi_code = trapno;
2561 2562
2562 ksi.ksi_sysnum = sysnum; 2563 ksi.ksi_sysnum = sysnum;
2563 if (trapno == TRAP_SCE) { 2564 if (trapno == TRAP_SCE) {
2564 ksi.ksi_retval[0] = 0; 2565 ksi.ksi_retval[0] = 0;
2565 ksi.ksi_retval[1] = 0; 2566 ksi.ksi_retval[1] = 0;
2566 ksi.ksi_error = 0; 2567 ksi.ksi_error = 0;
2567 } else { 2568 } else {
2568 ksi.ksi_retval[0] = ret[0]; 2569 ksi.ksi_retval[0] = ret[0];
2569 ksi.ksi_retval[1] = ret[1]; 2570 ksi.ksi_retval[1] = ret[1];
2570 ksi.ksi_error = error; 2571 ksi.ksi_error = error;
2571 } 2572 }
2572 2573
2573 memset(ksi.ksi_args, 0, sizeof(ksi.ksi_args)); 2574 memset(ksi.ksi_args, 0, sizeof(ksi.ksi_args));
2574 2575
2575 for (i = 0; i < sy_narg; i++) 2576 for (i = 0; i < sy_narg; i++)
2576 ksi.ksi_args[i] = args[i]; 2577 ksi.ksi_args[i] = args[i];
2577 2578
2578 mutex_enter(p->p_lock); 2579 mutex_enter(p->p_lock);
2579 2580
2580repeat: 2581repeat:
2581 /* 2582 /*
2582 * If we are exiting, demise now. 2583 * If we are exiting, demise now.
2583 * 2584 *
2584 * This avoids notifying tracer and deadlocking. 2585 * This avoids notifying tracer and deadlocking.
2585 */ 2586 */
2586 if (__predict_false(ISSET(p->p_sflag, PS_WEXIT))) { 2587 if (__predict_false(ISSET(p->p_sflag, PS_WEXIT))) {
2587 mutex_exit(p->p_lock); 2588 mutex_exit(p->p_lock);
2588 lwp_exit(l); 2589 lwp_exit(l);
2589 panic("proc_stoptrace"); 2590 panic("proc_stoptrace");
2590 /* NOTREACHED */ 2591 /* NOTREACHED */
2591 } 2592 }
2592 2593
2593 /* 2594 /*
2594 * If there's a pending SIGKILL process it immediately. 2595 * If there's a pending SIGKILL process it immediately.
2595 */ 2596 */
2596 if (p->p_xsig == SIGKILL || 2597 if (p->p_xsig == SIGKILL ||
2597 sigismember(&p->p_sigpend.sp_set, SIGKILL)) { 2598 sigismember(&p->p_sigpend.sp_set, SIGKILL)) {
2598 mutex_exit(p->p_lock); 2599 mutex_exit(p->p_lock);
2599 return; 2600 return;
2600 } 2601 }
2601 2602
2602 /* 2603 /*
2603 * If we are no longer traced, abandon this event signal. 2604 * If we are no longer traced, abandon this event signal.
2604 * 2605 *
2605 * This avoids killing a process after detaching the debugger. 2606 * This avoids killing a process after detaching the debugger.
2606 */ 2607 */
2607 if (__predict_false(!ISSET(p->p_slflag, PSL_TRACED))) { 2608 if (__predict_false(!ISSET(p->p_slflag, PSL_TRACED))) {
2608 mutex_exit(p->p_lock); 2609 mutex_exit(p->p_lock);
2609 return; 2610 return;
2610 } 2611 }
2611 2612
2612 /* 2613 /*
2613 * The process is already stopping. 2614 * The process is already stopping.
2614 */ 2615 */
2615 if ((p->p_sflag & PS_STOPPING) != 0) { 2616 if ((p->p_sflag & PS_STOPPING) != 0) {
2616 sigswitch_unlock_and_switch_away(l); 2617 sigswitch_unlock_and_switch_away(l);
2617 mutex_enter(p->p_lock); 2618 mutex_enter(p->p_lock);
2618 goto repeat; 2619 goto repeat;
2619 } 2620 }
2620 2621
2621 /* Needed for ktrace */ 2622 /* Needed for ktrace */
2622 ps = p->p_sigacts; 2623 ps = p->p_sigacts;
2623 action = SIGACTION_PS(ps, signo).sa_handler; 2624 action = SIGACTION_PS(ps, signo).sa_handler;
2624 mask = &l->l_sigmask; 2625 mask = &l->l_sigmask;
2625 2626
2626 p->p_xsig = signo; 2627 p->p_xsig = signo;
2627 p->p_sigctx.ps_lwp = ksi.ksi_lid; 2628 p->p_sigctx.ps_lwp = ksi.ksi_lid;
2628 p->p_sigctx.ps_info = ksi.ksi_info; 2629 p->p_sigctx.ps_info = ksi.ksi_info;
2629 sigswitch(0, signo, false); 2630 sigswitch(0, signo, false);
2630 2631
2631 if (ktrpoint(KTR_PSIG)) { 2632 if (ktrpoint(KTR_PSIG)) {
2632 if (p->p_emul->e_ktrpsig) 2633 if (p->p_emul->e_ktrpsig)
2633 p->p_emul->e_ktrpsig(signo, action, mask, &ksi); 2634 p->p_emul->e_ktrpsig(signo, action, mask, &ksi);
2634 else 2635 else
2635 ktrpsig(signo, action, mask, &ksi); 2636 ktrpsig(signo, action, mask, &ksi);
2636 } 2637 }
2637} 2638}
2638 2639
2639static int 2640static int
2640filt_sigattach(struct knote *kn) 2641filt_sigattach(struct knote *kn)
2641{ 2642{
2642 struct proc *p = curproc; 2643 struct proc *p = curproc;
2643 2644
2644 kn->kn_obj = p; 2645 kn->kn_obj = p;
2645 kn->kn_flags |= EV_CLEAR; /* automatically set */ 2646 kn->kn_flags |= EV_CLEAR; /* automatically set */
2646 2647
2647 mutex_enter(p->p_lock); 2648 mutex_enter(p->p_lock);
2648 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext); 2649 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
2649 mutex_exit(p->p_lock); 2650 mutex_exit(p->p_lock);
2650 2651
2651 return 0; 2652 return 0;
2652} 2653}
2653 2654
2654static void 2655static void
2655filt_sigdetach(struct knote *kn) 2656filt_sigdetach(struct knote *kn)
2656{ 2657{
2657 struct proc *p = kn->kn_obj; 2658 struct proc *p = kn->kn_obj;
2658 2659
2659 mutex_enter(p->p_lock); 2660 mutex_enter(p->p_lock);
2660 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext); 2661 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
2661 mutex_exit(p->p_lock); 2662 mutex_exit(p->p_lock);
2662} 2663}
2663 2664
2664/* 2665/*
2665 * Signal knotes are shared with proc knotes, so we apply a mask to 2666 * Signal knotes are shared with proc knotes, so we apply a mask to
2666 * the hint in order to differentiate them from process hints. This 2667 * the hint in order to differentiate them from process hints. This
2667 * could be avoided by using a signal-specific knote list, but probably 2668 * could be avoided by using a signal-specific knote list, but probably
2668 * isn't worth the trouble. 2669 * isn't worth the trouble.
2669 */ 2670 */
2670static int 2671static int
2671filt_signal(struct knote *kn, long hint) 2672filt_signal(struct knote *kn, long hint)
2672{ 2673{
2673 2674
2674 if (hint & NOTE_SIGNAL) { 2675 if (hint & NOTE_SIGNAL) {
2675 hint &= ~NOTE_SIGNAL; 2676 hint &= ~NOTE_SIGNAL;
2676 2677
2677 if (kn->kn_id == hint) 2678 if (kn->kn_id == hint)
2678 kn->kn_data++; 2679 kn->kn_data++;
2679 } 2680 }
2680 return (kn->kn_data != 0); 2681 return (kn->kn_data != 0);
2681} 2682}
2682 2683
2683const struct filterops sig_filtops = { 2684const struct filterops sig_filtops = {
2684 .f_isfd = 0, 2685 .f_isfd = 0,
2685 .f_attach = filt_sigattach, 2686 .f_attach = filt_sigattach,
2686 .f_detach = filt_sigdetach, 2687 .f_detach = filt_sigdetach,
2687 .f_event = filt_signal, 2688 .f_event = filt_signal,
2688}; 2689};