| @@ -1,1391 +1,1391 @@ | | | @@ -1,1391 +1,1391 @@ |
1 | /* $NetBSD: kern_exec.c,v 1.318 2011/08/25 19:14:07 reinoud Exp $ */ | | 1 | /* $NetBSD: kern_exec.c,v 1.319 2011/08/25 19:54:30 reinoud Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 2008 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 2008 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * Redistribution and use in source and binary forms, with or without | | 7 | * Redistribution and use in source and binary forms, with or without |
8 | * modification, are permitted provided that the following conditions | | 8 | * modification, are permitted provided that the following conditions |
9 | * are met: | | 9 | * are met: |
10 | * 1. Redistributions of source code must retain the above copyright | | 10 | * 1. Redistributions of source code must retain the above copyright |
11 | * notice, this list of conditions and the following disclaimer. | | 11 | * notice, this list of conditions and the following disclaimer. |
12 | * 2. Redistributions in binary form must reproduce the above copyright | | 12 | * 2. Redistributions in binary form must reproduce the above copyright |
13 | * notice, this list of conditions and the following disclaimer in the | | 13 | * notice, this list of conditions and the following disclaimer in the |
14 | * documentation and/or other materials provided with the distribution. | | 14 | * documentation and/or other materials provided with the distribution. |
15 | * | | 15 | * |
16 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 16 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
17 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 17 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
18 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 18 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
19 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 19 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
20 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 20 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
21 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 21 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
22 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 22 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
23 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 23 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
24 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 24 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
25 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 25 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
26 | * POSSIBILITY OF SUCH DAMAGE. | | 26 | * POSSIBILITY OF SUCH DAMAGE. |
27 | */ | | 27 | */ |
28 | | | 28 | |
29 | /*- | | 29 | /*- |
30 | * Copyright (C) 1993, 1994, 1996 Christopher G. Demetriou | | 30 | * Copyright (C) 1993, 1994, 1996 Christopher G. Demetriou |
31 | * Copyright (C) 1992 Wolfgang Solfrank. | | 31 | * Copyright (C) 1992 Wolfgang Solfrank. |
32 | * Copyright (C) 1992 TooLs GmbH. | | 32 | * Copyright (C) 1992 TooLs GmbH. |
33 | * All rights reserved. | | 33 | * All rights reserved. |
34 | * | | 34 | * |
35 | * Redistribution and use in source and binary forms, with or without | | 35 | * Redistribution and use in source and binary forms, with or without |
36 | * modification, are permitted provided that the following conditions | | 36 | * modification, are permitted provided that the following conditions |
37 | * are met: | | 37 | * are met: |
38 | * 1. Redistributions of source code must retain the above copyright | | 38 | * 1. Redistributions of source code must retain the above copyright |
39 | * notice, this list of conditions and the following disclaimer. | | 39 | * notice, this list of conditions and the following disclaimer. |
40 | * 2. Redistributions in binary form must reproduce the above copyright | | 40 | * 2. Redistributions in binary form must reproduce the above copyright |
41 | * notice, this list of conditions and the following disclaimer in the | | 41 | * notice, this list of conditions and the following disclaimer in the |
42 | * documentation and/or other materials provided with the distribution. | | 42 | * documentation and/or other materials provided with the distribution. |
43 | * 3. All advertising materials mentioning features or use of this software | | 43 | * 3. All advertising materials mentioning features or use of this software |
44 | * must display the following acknowledgement: | | 44 | * must display the following acknowledgement: |
45 | * This product includes software developed by TooLs GmbH. | | 45 | * This product includes software developed by TooLs GmbH. |
46 | * 4. The name of TooLs GmbH may not be used to endorse or promote products | | 46 | * 4. The name of TooLs GmbH may not be used to endorse or promote products |
47 | * derived from this software without specific prior written permission. | | 47 | * derived from this software without specific prior written permission. |
48 | * | | 48 | * |
49 | * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR | | 49 | * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR |
50 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | | 50 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
51 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | | 51 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
52 | * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | | 52 | * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
53 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | | 53 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
54 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; | | 54 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; |
55 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | | 55 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
56 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR | | 56 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR |
57 | * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF | | 57 | * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF |
58 | * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | | 58 | * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
59 | */ | | 59 | */ |
60 | | | 60 | |
61 | #include <sys/cdefs.h> | | 61 | #include <sys/cdefs.h> |
62 | __KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.318 2011/08/25 19:14:07 reinoud Exp $"); | | 62 | __KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.319 2011/08/25 19:54:30 reinoud Exp $"); |
63 | | | 63 | |
64 | #include "opt_ktrace.h" | | 64 | #include "opt_ktrace.h" |
65 | #include "opt_modular.h" | | 65 | #include "opt_modular.h" |
66 | #include "opt_syscall_debug.h" | | 66 | #include "opt_syscall_debug.h" |
67 | #include "veriexec.h" | | 67 | #include "veriexec.h" |
68 | #include "opt_pax.h" | | 68 | #include "opt_pax.h" |
69 | #include "opt_sa.h" | | 69 | #include "opt_sa.h" |
70 | | | 70 | |
71 | #include <sys/param.h> | | 71 | #include <sys/param.h> |
72 | #include <sys/systm.h> | | 72 | #include <sys/systm.h> |
73 | #include <sys/filedesc.h> | | 73 | #include <sys/filedesc.h> |
74 | #include <sys/kernel.h> | | 74 | #include <sys/kernel.h> |
75 | #include <sys/proc.h> | | 75 | #include <sys/proc.h> |
76 | #include <sys/mount.h> | | 76 | #include <sys/mount.h> |
77 | #include <sys/malloc.h> | | 77 | #include <sys/malloc.h> |
78 | #include <sys/kmem.h> | | 78 | #include <sys/kmem.h> |
79 | #include <sys/namei.h> | | 79 | #include <sys/namei.h> |
80 | #include <sys/vnode.h> | | 80 | #include <sys/vnode.h> |
81 | #include <sys/file.h> | | 81 | #include <sys/file.h> |
82 | #include <sys/acct.h> | | 82 | #include <sys/acct.h> |
83 | #include <sys/exec.h> | | 83 | #include <sys/exec.h> |
84 | #include <sys/ktrace.h> | | 84 | #include <sys/ktrace.h> |
85 | #include <sys/uidinfo.h> | | 85 | #include <sys/uidinfo.h> |
86 | #include <sys/wait.h> | | 86 | #include <sys/wait.h> |
87 | #include <sys/mman.h> | | 87 | #include <sys/mman.h> |
88 | #include <sys/ras.h> | | 88 | #include <sys/ras.h> |
89 | #include <sys/signalvar.h> | | 89 | #include <sys/signalvar.h> |
90 | #include <sys/stat.h> | | 90 | #include <sys/stat.h> |
91 | #include <sys/syscall.h> | | 91 | #include <sys/syscall.h> |
92 | #include <sys/kauth.h> | | 92 | #include <sys/kauth.h> |
93 | #include <sys/lwpctl.h> | | 93 | #include <sys/lwpctl.h> |
94 | #include <sys/pax.h> | | 94 | #include <sys/pax.h> |
95 | #include <sys/cpu.h> | | 95 | #include <sys/cpu.h> |
96 | #include <sys/module.h> | | 96 | #include <sys/module.h> |
97 | #include <sys/sa.h> | | 97 | #include <sys/sa.h> |
98 | #include <sys/savar.h> | | 98 | #include <sys/savar.h> |
99 | #include <sys/syscallvar.h> | | 99 | #include <sys/syscallvar.h> |
100 | #include <sys/syscallargs.h> | | 100 | #include <sys/syscallargs.h> |
101 | #if NVERIEXEC > 0 | | 101 | #if NVERIEXEC > 0 |
102 | #include <sys/verified_exec.h> | | 102 | #include <sys/verified_exec.h> |
103 | #endif /* NVERIEXEC > 0 */ | | 103 | #endif /* NVERIEXEC > 0 */ |
104 | #include <sys/sdt.h> | | 104 | #include <sys/sdt.h> |
105 | | | 105 | |
106 | #include <uvm/uvm_extern.h> | | 106 | #include <uvm/uvm_extern.h> |
107 | | | 107 | |
108 | #include <machine/reg.h> | | 108 | #include <machine/reg.h> |
109 | | | 109 | |
110 | #include <compat/common/compat_util.h> | | 110 | #include <compat/common/compat_util.h> |
111 | | | 111 | |
112 | static int exec_sigcode_map(struct proc *, const struct emul *); | | 112 | static int exec_sigcode_map(struct proc *, const struct emul *); |
113 | | | 113 | |
114 | #ifdef DEBUG_EXEC | | 114 | #ifdef DEBUG_EXEC |
115 | #define DPRINTF(a) printf a | | 115 | #define DPRINTF(a) printf a |
116 | #define COPYPRINTF(s, a, b) printf("%s, %d: copyout%s @%p %zu\n", __func__, \ | | 116 | #define COPYPRINTF(s, a, b) printf("%s, %d: copyout%s @%p %zu\n", __func__, \ |
117 | __LINE__, (s), (a), (b)) | | 117 | __LINE__, (s), (a), (b)) |
118 | #else | | 118 | #else |
119 | #define DPRINTF(a) | | 119 | #define DPRINTF(a) |
120 | #define COPYPRINTF(s, a, b) | | 120 | #define COPYPRINTF(s, a, b) |
121 | #endif /* DEBUG_EXEC */ | | 121 | #endif /* DEBUG_EXEC */ |
122 | | | 122 | |
123 | /* | | 123 | /* |
124 | * DTrace SDT provider definitions | | 124 | * DTrace SDT provider definitions |
125 | */ | | 125 | */ |
126 | SDT_PROBE_DEFINE(proc,,,exec, | | 126 | SDT_PROBE_DEFINE(proc,,,exec, |
127 | "char *", NULL, | | 127 | "char *", NULL, |
128 | NULL, NULL, NULL, NULL, | | 128 | NULL, NULL, NULL, NULL, |
129 | NULL, NULL, NULL, NULL); | | 129 | NULL, NULL, NULL, NULL); |
130 | SDT_PROBE_DEFINE(proc,,,exec_success, | | 130 | SDT_PROBE_DEFINE(proc,,,exec_success, |
131 | "char *", NULL, | | 131 | "char *", NULL, |
132 | NULL, NULL, NULL, NULL, | | 132 | NULL, NULL, NULL, NULL, |
133 | NULL, NULL, NULL, NULL); | | 133 | NULL, NULL, NULL, NULL); |
134 | SDT_PROBE_DEFINE(proc,,,exec_failure, | | 134 | SDT_PROBE_DEFINE(proc,,,exec_failure, |
135 | "int", NULL, | | 135 | "int", NULL, |
136 | NULL, NULL, NULL, NULL, | | 136 | NULL, NULL, NULL, NULL, |
137 | NULL, NULL, NULL, NULL); | | 137 | NULL, NULL, NULL, NULL); |
138 | | | 138 | |
139 | /* | | 139 | /* |
140 | * Exec function switch: | | 140 | * Exec function switch: |
141 | * | | 141 | * |
142 | * Note that each makecmds function is responsible for loading the | | 142 | * Note that each makecmds function is responsible for loading the |
143 | * exec package with the necessary functions for any exec-type-specific | | 143 | * exec package with the necessary functions for any exec-type-specific |
144 | * handling. | | 144 | * handling. |
145 | * | | 145 | * |
146 | * Functions for specific exec types should be defined in their own | | 146 | * Functions for specific exec types should be defined in their own |
147 | * header file. | | 147 | * header file. |
148 | */ | | 148 | */ |
149 | static const struct execsw **execsw = NULL; | | 149 | static const struct execsw **execsw = NULL; |
150 | static int nexecs; | | 150 | static int nexecs; |
151 | | | 151 | |
152 | u_int exec_maxhdrsz; /* must not be static - used by netbsd32 */ | | 152 | u_int exec_maxhdrsz; /* must not be static - used by netbsd32 */ |
153 | | | 153 | |
154 | /* list of dynamically loaded execsw entries */ | | 154 | /* list of dynamically loaded execsw entries */ |
155 | static LIST_HEAD(execlist_head, exec_entry) ex_head = | | 155 | static LIST_HEAD(execlist_head, exec_entry) ex_head = |
156 | LIST_HEAD_INITIALIZER(ex_head); | | 156 | LIST_HEAD_INITIALIZER(ex_head); |
157 | struct exec_entry { | | 157 | struct exec_entry { |
158 | LIST_ENTRY(exec_entry) ex_list; | | 158 | LIST_ENTRY(exec_entry) ex_list; |
159 | SLIST_ENTRY(exec_entry) ex_slist; | | 159 | SLIST_ENTRY(exec_entry) ex_slist; |
160 | const struct execsw *ex_sw; | | 160 | const struct execsw *ex_sw; |
161 | }; | | 161 | }; |
162 | | | 162 | |
163 | #ifndef __HAVE_SYSCALL_INTERN | | 163 | #ifndef __HAVE_SYSCALL_INTERN |
164 | void syscall(void); | | 164 | void syscall(void); |
165 | #endif | | 165 | #endif |
166 | | | 166 | |
167 | #ifdef KERN_SA | | 167 | #ifdef KERN_SA |
168 | static struct sa_emul saemul_netbsd = { | | 168 | static struct sa_emul saemul_netbsd = { |
169 | sizeof(ucontext_t), | | 169 | sizeof(ucontext_t), |
170 | sizeof(struct sa_t), | | 170 | sizeof(struct sa_t), |
171 | sizeof(struct sa_t *), | | 171 | sizeof(struct sa_t *), |
172 | NULL, | | 172 | NULL, |
173 | NULL, | | 173 | NULL, |
174 | cpu_upcall, | | 174 | cpu_upcall, |
175 | (void (*)(struct lwp *, void *))getucontext_sa, | | 175 | (void (*)(struct lwp *, void *))getucontext_sa, |
176 | sa_ucsp | | 176 | sa_ucsp |
177 | }; | | 177 | }; |
178 | #endif /* KERN_SA */ | | 178 | #endif /* KERN_SA */ |
179 | | | 179 | |
180 | /* NetBSD emul struct */ | | 180 | /* NetBSD emul struct */ |
181 | struct emul emul_netbsd = { | | 181 | struct emul emul_netbsd = { |
182 | .e_name = "netbsd", | | 182 | .e_name = "netbsd", |
183 | .e_path = NULL, | | 183 | .e_path = NULL, |
184 | #ifndef __HAVE_MINIMAL_EMUL | | 184 | #ifndef __HAVE_MINIMAL_EMUL |
185 | .e_flags = EMUL_HAS_SYS___syscall, | | 185 | .e_flags = EMUL_HAS_SYS___syscall, |
186 | .e_errno = NULL, | | 186 | .e_errno = NULL, |
187 | .e_nosys = SYS_syscall, | | 187 | .e_nosys = SYS_syscall, |
188 | .e_nsysent = SYS_NSYSENT, | | 188 | .e_nsysent = SYS_NSYSENT, |
189 | #endif | | 189 | #endif |
190 | .e_sysent = sysent, | | 190 | .e_sysent = sysent, |
191 | #ifdef SYSCALL_DEBUG | | 191 | #ifdef SYSCALL_DEBUG |
192 | .e_syscallnames = syscallnames, | | 192 | .e_syscallnames = syscallnames, |
193 | #else | | 193 | #else |
194 | .e_syscallnames = NULL, | | 194 | .e_syscallnames = NULL, |
195 | #endif | | 195 | #endif |
196 | .e_sendsig = sendsig, | | 196 | .e_sendsig = sendsig, |
197 | .e_trapsignal = trapsignal, | | 197 | .e_trapsignal = trapsignal, |
198 | .e_tracesig = NULL, | | 198 | .e_tracesig = NULL, |
199 | .e_sigcode = NULL, | | 199 | .e_sigcode = NULL, |
200 | .e_esigcode = NULL, | | 200 | .e_esigcode = NULL, |
201 | .e_sigobject = NULL, | | 201 | .e_sigobject = NULL, |
202 | .e_setregs = setregs, | | 202 | .e_setregs = setregs, |
203 | .e_proc_exec = NULL, | | 203 | .e_proc_exec = NULL, |
204 | .e_proc_fork = NULL, | | 204 | .e_proc_fork = NULL, |
205 | .e_proc_exit = NULL, | | 205 | .e_proc_exit = NULL, |
206 | .e_lwp_fork = NULL, | | 206 | .e_lwp_fork = NULL, |
207 | .e_lwp_exit = NULL, | | 207 | .e_lwp_exit = NULL, |
208 | #ifdef __HAVE_SYSCALL_INTERN | | 208 | #ifdef __HAVE_SYSCALL_INTERN |
209 | .e_syscall_intern = syscall_intern, | | 209 | .e_syscall_intern = syscall_intern, |
210 | #else | | 210 | #else |
211 | .e_syscall = syscall, | | 211 | .e_syscall = syscall, |
212 | #endif | | 212 | #endif |
213 | .e_sysctlovly = NULL, | | 213 | .e_sysctlovly = NULL, |
214 | .e_fault = NULL, | | 214 | .e_fault = NULL, |
215 | .e_vm_default_addr = uvm_default_mapaddr, | | 215 | .e_vm_default_addr = uvm_default_mapaddr, |
216 | .e_usertrap = NULL, | | 216 | .e_usertrap = NULL, |
217 | #ifdef KERN_SA | | 217 | #ifdef KERN_SA |
218 | .e_sa = &saemul_netbsd, | | 218 | .e_sa = &saemul_netbsd, |
219 | #else | | 219 | #else |
220 | .e_sa = NULL, | | 220 | .e_sa = NULL, |
221 | #endif | | 221 | #endif |
222 | .e_ucsize = sizeof(ucontext_t), | | 222 | .e_ucsize = sizeof(ucontext_t), |
223 | .e_startlwp = startlwp | | 223 | .e_startlwp = startlwp |
224 | }; | | 224 | }; |
225 | | | 225 | |
226 | /* | | 226 | /* |
227 | * Exec lock. Used to control access to execsw[] structures. | | 227 | * Exec lock. Used to control access to execsw[] structures. |
228 | * This must not be static so that netbsd32 can access it, too. | | 228 | * This must not be static so that netbsd32 can access it, too. |
229 | */ | | 229 | */ |
230 | krwlock_t exec_lock; | | 230 | krwlock_t exec_lock; |
231 | | | 231 | |
232 | static kmutex_t sigobject_lock; | | 232 | static kmutex_t sigobject_lock; |
233 | | | 233 | |
234 | static void * | | 234 | static void * |
235 | exec_pool_alloc(struct pool *pp, int flags) | | 235 | exec_pool_alloc(struct pool *pp, int flags) |
236 | { | | 236 | { |
237 | | | 237 | |
238 | return (void *)uvm_km_alloc(kernel_map, NCARGS, 0, | | 238 | return (void *)uvm_km_alloc(kernel_map, NCARGS, 0, |
239 | UVM_KMF_PAGEABLE | UVM_KMF_WAITVA); | | 239 | UVM_KMF_PAGEABLE | UVM_KMF_WAITVA); |
240 | } | | 240 | } |
241 | | | 241 | |
242 | static void | | 242 | static void |
243 | exec_pool_free(struct pool *pp, void *addr) | | 243 | exec_pool_free(struct pool *pp, void *addr) |
244 | { | | 244 | { |
245 | | | 245 | |
246 | uvm_km_free(kernel_map, (vaddr_t)addr, NCARGS, UVM_KMF_PAGEABLE); | | 246 | uvm_km_free(kernel_map, (vaddr_t)addr, NCARGS, UVM_KMF_PAGEABLE); |
247 | } | | 247 | } |
248 | | | 248 | |
249 | static struct pool exec_pool; | | 249 | static struct pool exec_pool; |
250 | | | 250 | |
251 | static struct pool_allocator exec_palloc = { | | 251 | static struct pool_allocator exec_palloc = { |
252 | .pa_alloc = exec_pool_alloc, | | 252 | .pa_alloc = exec_pool_alloc, |
253 | .pa_free = exec_pool_free, | | 253 | .pa_free = exec_pool_free, |
254 | .pa_pagesz = NCARGS | | 254 | .pa_pagesz = NCARGS |
255 | }; | | 255 | }; |
256 | | | 256 | |
257 | /* | | 257 | /* |
258 | * check exec: | | 258 | * check exec: |
259 | * given an "executable" described in the exec package's namei info, | | 259 | * given an "executable" described in the exec package's namei info, |
260 | * see what we can do with it. | | 260 | * see what we can do with it. |
261 | * | | 261 | * |
262 | * ON ENTRY: | | 262 | * ON ENTRY: |
263 | * exec package with appropriate namei info | | 263 | * exec package with appropriate namei info |
264 | * lwp pointer of exec'ing lwp | | 264 | * lwp pointer of exec'ing lwp |
265 | * NO SELF-LOCKED VNODES | | 265 | * NO SELF-LOCKED VNODES |
266 | * | | 266 | * |
267 | * ON EXIT: | | 267 | * ON EXIT: |
268 | * error: nothing held, etc. exec header still allocated. | | 268 | * error: nothing held, etc. exec header still allocated. |
269 | * ok: filled exec package, executable's vnode (unlocked). | | 269 | * ok: filled exec package, executable's vnode (unlocked). |
270 | * | | 270 | * |
271 | * EXEC SWITCH ENTRY: | | 271 | * EXEC SWITCH ENTRY: |
272 | * Locked vnode to check, exec package, proc. | | 272 | * Locked vnode to check, exec package, proc. |
273 | * | | 273 | * |
274 | * EXEC SWITCH EXIT: | | 274 | * EXEC SWITCH EXIT: |
275 | * ok: return 0, filled exec package, executable's vnode (unlocked). | | 275 | * ok: return 0, filled exec package, executable's vnode (unlocked). |
276 | * error: destructive: | | 276 | * error: destructive: |
277 | * everything deallocated execept exec header. | | 277 | * everything deallocated execept exec header. |
278 | * non-destructive: | | 278 | * non-destructive: |
279 | * error code, executable's vnode (unlocked), | | 279 | * error code, executable's vnode (unlocked), |
280 | * exec header unmodified. | | 280 | * exec header unmodified. |
281 | */ | | 281 | */ |
282 | int | | 282 | int |
283 | /*ARGSUSED*/ | | 283 | /*ARGSUSED*/ |
284 | check_exec(struct lwp *l, struct exec_package *epp, struct pathbuf *pb) | | 284 | check_exec(struct lwp *l, struct exec_package *epp, struct pathbuf *pb) |
285 | { | | 285 | { |
286 | int error, i; | | 286 | int error, i; |
287 | struct vnode *vp; | | 287 | struct vnode *vp; |
288 | struct nameidata nd; | | 288 | struct nameidata nd; |
289 | size_t resid; | | 289 | size_t resid; |
290 | | | 290 | |
291 | NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, pb); | | 291 | NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, pb); |
292 | | | 292 | |
293 | /* first get the vnode */ | | 293 | /* first get the vnode */ |
294 | if ((error = namei(&nd)) != 0) | | 294 | if ((error = namei(&nd)) != 0) |
295 | return error; | | 295 | return error; |
296 | epp->ep_vp = vp = nd.ni_vp; | | 296 | epp->ep_vp = vp = nd.ni_vp; |
297 | /* this cannot overflow as both are size PATH_MAX */ | | 297 | /* this cannot overflow as both are size PATH_MAX */ |
298 | strcpy(epp->ep_resolvedname, nd.ni_pnbuf); | | 298 | strcpy(epp->ep_resolvedname, nd.ni_pnbuf); |
299 | | | 299 | |
300 | #ifdef DIAGNOSTIC | | 300 | #ifdef DIAGNOSTIC |
301 | /* paranoia (take this out once namei stuff stabilizes) */ | | 301 | /* paranoia (take this out once namei stuff stabilizes) */ |
302 | memset(nd.ni_pnbuf, '~', PATH_MAX); | | 302 | memset(nd.ni_pnbuf, '~', PATH_MAX); |
303 | #endif | | 303 | #endif |
304 | | | 304 | |
305 | /* check access and type */ | | 305 | /* check access and type */ |
306 | if (vp->v_type != VREG) { | | 306 | if (vp->v_type != VREG) { |
307 | error = EACCES; | | 307 | error = EACCES; |
308 | goto bad1; | | 308 | goto bad1; |
309 | } | | 309 | } |
310 | if ((error = VOP_ACCESS(vp, VEXEC, l->l_cred)) != 0) | | 310 | if ((error = VOP_ACCESS(vp, VEXEC, l->l_cred)) != 0) |
311 | goto bad1; | | 311 | goto bad1; |
312 | | | 312 | |
313 | /* get attributes */ | | 313 | /* get attributes */ |
314 | if ((error = VOP_GETATTR(vp, epp->ep_vap, l->l_cred)) != 0) | | 314 | if ((error = VOP_GETATTR(vp, epp->ep_vap, l->l_cred)) != 0) |
315 | goto bad1; | | 315 | goto bad1; |
316 | | | 316 | |
317 | /* Check mount point */ | | 317 | /* Check mount point */ |
318 | if (vp->v_mount->mnt_flag & MNT_NOEXEC) { | | 318 | if (vp->v_mount->mnt_flag & MNT_NOEXEC) { |
319 | error = EACCES; | | 319 | error = EACCES; |
320 | goto bad1; | | 320 | goto bad1; |
321 | } | | 321 | } |
322 | if (vp->v_mount->mnt_flag & MNT_NOSUID) | | 322 | if (vp->v_mount->mnt_flag & MNT_NOSUID) |
323 | epp->ep_vap->va_mode &= ~(S_ISUID | S_ISGID); | | 323 | epp->ep_vap->va_mode &= ~(S_ISUID | S_ISGID); |
324 | | | 324 | |
325 | /* try to open it */ | | 325 | /* try to open it */ |
326 | if ((error = VOP_OPEN(vp, FREAD, l->l_cred)) != 0) | | 326 | if ((error = VOP_OPEN(vp, FREAD, l->l_cred)) != 0) |
327 | goto bad1; | | 327 | goto bad1; |
328 | | | 328 | |
329 | /* unlock vp, since we need it unlocked from here on out. */ | | 329 | /* unlock vp, since we need it unlocked from here on out. */ |
330 | VOP_UNLOCK(vp); | | 330 | VOP_UNLOCK(vp); |
331 | | | 331 | |
332 | #if NVERIEXEC > 0 | | 332 | #if NVERIEXEC > 0 |
333 | error = veriexec_verify(l, vp, epp->ep_resolvedname, | | 333 | error = veriexec_verify(l, vp, epp->ep_resolvedname, |
334 | epp->ep_flags & EXEC_INDIR ? VERIEXEC_INDIRECT : VERIEXEC_DIRECT, | | 334 | epp->ep_flags & EXEC_INDIR ? VERIEXEC_INDIRECT : VERIEXEC_DIRECT, |
335 | NULL); | | 335 | NULL); |
336 | if (error) | | 336 | if (error) |
337 | goto bad2; | | 337 | goto bad2; |
338 | #endif /* NVERIEXEC > 0 */ | | 338 | #endif /* NVERIEXEC > 0 */ |
339 | | | 339 | |
340 | #ifdef PAX_SEGVGUARD | | 340 | #ifdef PAX_SEGVGUARD |
341 | error = pax_segvguard(l, vp, epp->ep_resolvedname, false); | | 341 | error = pax_segvguard(l, vp, epp->ep_resolvedname, false); |
342 | if (error) | | 342 | if (error) |
343 | goto bad2; | | 343 | goto bad2; |
344 | #endif /* PAX_SEGVGUARD */ | | 344 | #endif /* PAX_SEGVGUARD */ |
345 | | | 345 | |
346 | /* now we have the file, get the exec header */ | | 346 | /* now we have the file, get the exec header */ |
347 | error = vn_rdwr(UIO_READ, vp, epp->ep_hdr, epp->ep_hdrlen, 0, | | 347 | error = vn_rdwr(UIO_READ, vp, epp->ep_hdr, epp->ep_hdrlen, 0, |
348 | UIO_SYSSPACE, 0, l->l_cred, &resid, NULL); | | 348 | UIO_SYSSPACE, 0, l->l_cred, &resid, NULL); |
349 | if (error) | | 349 | if (error) |
350 | goto bad2; | | 350 | goto bad2; |
351 | epp->ep_hdrvalid = epp->ep_hdrlen - resid; | | 351 | epp->ep_hdrvalid = epp->ep_hdrlen - resid; |
352 | | | 352 | |
353 | /* | | 353 | /* |
354 | * Set up default address space limits. Can be overridden | | 354 | * Set up default address space limits. Can be overridden |
355 | * by individual exec packages. | | 355 | * by individual exec packages. |
356 | * | | 356 | * |
357 | * XXX probably should be all done in the exec packages. | | 357 | * XXX probably should be all done in the exec packages. |
358 | */ | | 358 | */ |
359 | epp->ep_vm_minaddr = VM_MIN_ADDRESS; | | 359 | epp->ep_vm_minaddr = VM_MIN_ADDRESS; |
360 | epp->ep_vm_maxaddr = VM_MAXUSER_ADDRESS; | | 360 | epp->ep_vm_maxaddr = VM_MAXUSER_ADDRESS; |
361 | /* | | 361 | /* |
362 | * set up the vmcmds for creation of the process | | 362 | * set up the vmcmds for creation of the process |
363 | * address space | | 363 | * address space |
364 | */ | | 364 | */ |
365 | error = ENOEXEC; | | 365 | error = ENOEXEC; |
366 | for (i = 0; i < nexecs; i++) { | | 366 | for (i = 0; i < nexecs; i++) { |
367 | int newerror; | | 367 | int newerror; |
368 | | | 368 | |
369 | epp->ep_esch = execsw[i]; | | 369 | epp->ep_esch = execsw[i]; |
370 | newerror = (*execsw[i]->es_makecmds)(l, epp); | | 370 | newerror = (*execsw[i]->es_makecmds)(l, epp); |
371 | | | 371 | |
372 | if (!newerror) { | | 372 | if (!newerror) { |
373 | /* Seems ok: check that entry point is not too high */ | | 373 | /* Seems ok: check that entry point is not too high */ |
374 | if (epp->ep_entry > VM_MAXUSER_ADDRESS) { | | 374 | if (epp->ep_entry > VM_MAXUSER_ADDRESS) { |
375 | aprint_verbose("check_exec: rejecting due to " | | 375 | aprint_verbose("check_exec: rejecting due to " |
376 | "too high entry address\n"); | | 376 | "too high entry address\n"); |
377 | error = ENOEXEC; | | 377 | error = ENOEXEC; |
378 | break; | | 378 | break; |
379 | } | | 379 | } |
380 | /* Seems ok: check that entry point is not too low */ | | 380 | /* Seems ok: check that entry point is not too low */ |
381 | if (epp->ep_entry < VM_MIN_ADDRESS) { | | 381 | if (epp->ep_entry < VM_MIN_ADDRESS) { |
382 | aprint_verbose("check_exec: rejecting due to " | | 382 | aprint_verbose("check_exec: rejecting due to " |
383 | "too low entry address\n"); | | 383 | "too low entry address\n"); |
384 | error = ENOEXEC; | | 384 | error = ENOEXEC; |
385 | break; | | 385 | break; |
386 | } | | 386 | } |
387 | | | 387 | |
388 | /* check limits */ | | 388 | /* check limits */ |
389 | if ((epp->ep_tsize > MAXTSIZ) || | | 389 | if ((epp->ep_tsize > MAXTSIZ) || |
390 | (epp->ep_dsize > (u_quad_t)l->l_proc->p_rlimit | | 390 | (epp->ep_dsize > (u_quad_t)l->l_proc->p_rlimit |
391 | [RLIMIT_DATA].rlim_cur)) { | | 391 | [RLIMIT_DATA].rlim_cur)) { |
392 | aprint_debug("check_exec: rejecting due to " | | 392 | aprint_verbose("check_exec: rejecting due to " |
393 | "limits\n"); | | 393 | "limits\n"); |
394 | error = ENOMEM; | | 394 | error = ENOMEM; |
395 | break; | | 395 | break; |
396 | } | | 396 | } |
397 | return 0; | | 397 | return 0; |
398 | } | | 398 | } |
399 | | | 399 | |
400 | if (epp->ep_emul_root != NULL) { | | 400 | if (epp->ep_emul_root != NULL) { |
401 | vrele(epp->ep_emul_root); | | 401 | vrele(epp->ep_emul_root); |
402 | epp->ep_emul_root = NULL; | | 402 | epp->ep_emul_root = NULL; |
403 | } | | 403 | } |
404 | if (epp->ep_interp != NULL) { | | 404 | if (epp->ep_interp != NULL) { |
405 | vrele(epp->ep_interp); | | 405 | vrele(epp->ep_interp); |
406 | epp->ep_interp = NULL; | | 406 | epp->ep_interp = NULL; |
407 | } | | 407 | } |
408 | | | 408 | |
409 | /* make sure the first "interesting" error code is saved. */ | | 409 | /* make sure the first "interesting" error code is saved. */ |
410 | if (error == ENOEXEC) | | 410 | if (error == ENOEXEC) |
411 | error = newerror; | | 411 | error = newerror; |
412 | | | 412 | |
413 | if (epp->ep_flags & EXEC_DESTR) | | 413 | if (epp->ep_flags & EXEC_DESTR) |
414 | /* Error from "#!" code, tidied up by recursive call */ | | 414 | /* Error from "#!" code, tidied up by recursive call */ |
415 | return error; | | 415 | return error; |
416 | } | | 416 | } |
417 | | | 417 | |
418 | /* not found, error */ | | 418 | /* not found, error */ |
419 | | | 419 | |
420 | /* | | 420 | /* |
421 | * free any vmspace-creation commands, | | 421 | * free any vmspace-creation commands, |
422 | * and release their references | | 422 | * and release their references |
423 | */ | | 423 | */ |
424 | kill_vmcmds(&epp->ep_vmcmds); | | 424 | kill_vmcmds(&epp->ep_vmcmds); |
425 | | | 425 | |
426 | bad2: | | 426 | bad2: |
427 | /* | | 427 | /* |
428 | * close and release the vnode, restore the old one, free the | | 428 | * close and release the vnode, restore the old one, free the |
429 | * pathname buf, and punt. | | 429 | * pathname buf, and punt. |
430 | */ | | 430 | */ |
431 | vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); | | 431 | vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); |
432 | VOP_CLOSE(vp, FREAD, l->l_cred); | | 432 | VOP_CLOSE(vp, FREAD, l->l_cred); |
433 | vput(vp); | | 433 | vput(vp); |
434 | return error; | | 434 | return error; |
435 | | | 435 | |
436 | bad1: | | 436 | bad1: |
437 | /* | | 437 | /* |
438 | * free the namei pathname buffer, and put the vnode | | 438 | * free the namei pathname buffer, and put the vnode |
439 | * (which we don't yet have open). | | 439 | * (which we don't yet have open). |
440 | */ | | 440 | */ |
441 | vput(vp); /* was still locked */ | | 441 | vput(vp); /* was still locked */ |
442 | return error; | | 442 | return error; |
443 | } | | 443 | } |
444 | | | 444 | |
445 | #ifdef __MACHINE_STACK_GROWS_UP | | 445 | #ifdef __MACHINE_STACK_GROWS_UP |
446 | #define STACK_PTHREADSPACE NBPG | | 446 | #define STACK_PTHREADSPACE NBPG |
447 | #else | | 447 | #else |
448 | #define STACK_PTHREADSPACE 0 | | 448 | #define STACK_PTHREADSPACE 0 |
449 | #endif | | 449 | #endif |
450 | | | 450 | |
451 | static int | | 451 | static int |
452 | execve_fetch_element(char * const *array, size_t index, char **value) | | 452 | execve_fetch_element(char * const *array, size_t index, char **value) |
453 | { | | 453 | { |
454 | return copyin(array + index, value, sizeof(*value)); | | 454 | return copyin(array + index, value, sizeof(*value)); |
455 | } | | 455 | } |
456 | | | 456 | |
457 | /* | | 457 | /* |
458 | * exec system call | | 458 | * exec system call |
459 | */ | | 459 | */ |
460 | /* ARGSUSED */ | | 460 | /* ARGSUSED */ |
461 | int | | 461 | int |
462 | sys_execve(struct lwp *l, const struct sys_execve_args *uap, register_t *retval) | | 462 | sys_execve(struct lwp *l, const struct sys_execve_args *uap, register_t *retval) |
463 | { | | 463 | { |
464 | /* { | | 464 | /* { |
465 | syscallarg(const char *) path; | | 465 | syscallarg(const char *) path; |
466 | syscallarg(char * const *) argp; | | 466 | syscallarg(char * const *) argp; |
467 | syscallarg(char * const *) envp; | | 467 | syscallarg(char * const *) envp; |
468 | } */ | | 468 | } */ |
469 | | | 469 | |
470 | return execve1(l, SCARG(uap, path), SCARG(uap, argp), | | 470 | return execve1(l, SCARG(uap, path), SCARG(uap, argp), |
471 | SCARG(uap, envp), execve_fetch_element); | | 471 | SCARG(uap, envp), execve_fetch_element); |
472 | } | | 472 | } |
473 | | | 473 | |
474 | int | | 474 | int |
475 | sys_fexecve(struct lwp *l, const struct sys_fexecve_args *uap, | | 475 | sys_fexecve(struct lwp *l, const struct sys_fexecve_args *uap, |
476 | register_t *retval) | | 476 | register_t *retval) |
477 | { | | 477 | { |
478 | /* { | | 478 | /* { |
479 | syscallarg(int) fd; | | 479 | syscallarg(int) fd; |
480 | syscallarg(char * const *) argp; | | 480 | syscallarg(char * const *) argp; |
481 | syscallarg(char * const *) envp; | | 481 | syscallarg(char * const *) envp; |
482 | } */ | | 482 | } */ |
483 | | | 483 | |
484 | return ENOSYS; | | 484 | return ENOSYS; |
485 | } | | 485 | } |
486 | | | 486 | |
487 | /* | | 487 | /* |
488 | * Load modules to try and execute an image that we do not understand. | | 488 | * Load modules to try and execute an image that we do not understand. |
489 | * If no execsw entries are present, we load those likely to be needed | | 489 | * If no execsw entries are present, we load those likely to be needed |
490 | * in order to run native images only. Otherwise, we autoload all | | 490 | * in order to run native images only. Otherwise, we autoload all |
491 | * possible modules that could let us run the binary. XXX lame | | 491 | * possible modules that could let us run the binary. XXX lame |
492 | */ | | 492 | */ |
493 | static void | | 493 | static void |
494 | exec_autoload(void) | | 494 | exec_autoload(void) |
495 | { | | 495 | { |
496 | #ifdef MODULAR | | 496 | #ifdef MODULAR |
497 | static const char * const native[] = { | | 497 | static const char * const native[] = { |
498 | "exec_elf32", | | 498 | "exec_elf32", |
499 | "exec_elf64", | | 499 | "exec_elf64", |
500 | "exec_script", | | 500 | "exec_script", |
501 | NULL | | 501 | NULL |
502 | }; | | 502 | }; |
503 | static const char * const compat[] = { | | 503 | static const char * const compat[] = { |
504 | "exec_elf32", | | 504 | "exec_elf32", |
505 | "exec_elf64", | | 505 | "exec_elf64", |
506 | "exec_script", | | 506 | "exec_script", |
507 | "exec_aout", | | 507 | "exec_aout", |
508 | "exec_coff", | | 508 | "exec_coff", |
509 | "exec_ecoff", | | 509 | "exec_ecoff", |
510 | "compat_aoutm68k", | | 510 | "compat_aoutm68k", |
511 | "compat_freebsd", | | 511 | "compat_freebsd", |
512 | "compat_ibcs2", | | 512 | "compat_ibcs2", |
513 | "compat_linux", | | 513 | "compat_linux", |
514 | "compat_linux32", | | 514 | "compat_linux32", |
515 | "compat_netbsd32", | | 515 | "compat_netbsd32", |
516 | "compat_sunos", | | 516 | "compat_sunos", |
517 | "compat_sunos32", | | 517 | "compat_sunos32", |
518 | "compat_svr4", | | 518 | "compat_svr4", |
519 | "compat_svr4_32", | | 519 | "compat_svr4_32", |
520 | "compat_ultrix", | | 520 | "compat_ultrix", |
521 | NULL | | 521 | NULL |
522 | }; | | 522 | }; |
523 | char const * const *list; | | 523 | char const * const *list; |
524 | int i; | | 524 | int i; |
525 | | | 525 | |
526 | list = (nexecs == 0 ? native : compat); | | 526 | list = (nexecs == 0 ? native : compat); |
527 | for (i = 0; list[i] != NULL; i++) { | | 527 | for (i = 0; list[i] != NULL; i++) { |
528 | if (module_autoload(list[i], MODULE_CLASS_MISC) != 0) { | | 528 | if (module_autoload(list[i], MODULE_CLASS_MISC) != 0) { |
529 | continue; | | 529 | continue; |
530 | } | | 530 | } |
531 | yield(); | | 531 | yield(); |
532 | } | | 532 | } |
533 | #endif | | 533 | #endif |
534 | } | | 534 | } |
535 | | | 535 | |
536 | int | | 536 | int |
537 | execve1(struct lwp *l, const char *path, char * const *args, | | 537 | execve1(struct lwp *l, const char *path, char * const *args, |
538 | char * const *envs, execve_fetch_element_t fetch_element) | | 538 | char * const *envs, execve_fetch_element_t fetch_element) |
539 | { | | 539 | { |
540 | int error; | | 540 | int error; |
541 | struct exec_package pack; | | 541 | struct exec_package pack; |
542 | struct pathbuf *pb; | | 542 | struct pathbuf *pb; |
543 | struct vattr attr; | | 543 | struct vattr attr; |
544 | struct proc *p; | | 544 | struct proc *p; |
545 | char *argp; | | 545 | char *argp; |
546 | char *dp, *sp; | | 546 | char *dp, *sp; |
547 | long argc, envc; | | 547 | long argc, envc; |
548 | size_t i, len; | | 548 | size_t i, len; |
549 | char *stack; | | 549 | char *stack; |
550 | struct ps_strings arginfo; | | 550 | struct ps_strings arginfo; |
551 | struct ps_strings32 arginfo32; | | 551 | struct ps_strings32 arginfo32; |
552 | void *aip; | | 552 | void *aip; |
553 | struct vmspace *vm; | | 553 | struct vmspace *vm; |
554 | struct exec_fakearg *tmpfap; | | 554 | struct exec_fakearg *tmpfap; |
555 | int szsigcode; | | 555 | int szsigcode; |
556 | struct exec_vmcmd *base_vcp; | | 556 | struct exec_vmcmd *base_vcp; |
557 | int oldlwpflags; | | 557 | int oldlwpflags; |
558 | ksiginfo_t ksi; | | 558 | ksiginfo_t ksi; |
559 | ksiginfoq_t kq; | | 559 | ksiginfoq_t kq; |
560 | const char *pathstring; | | 560 | const char *pathstring; |
561 | char *resolvedpathbuf; | | 561 | char *resolvedpathbuf; |
562 | const char *commandname; | | 562 | const char *commandname; |
563 | u_int modgen; | | 563 | u_int modgen; |
564 | size_t ps_strings_sz; | | 564 | size_t ps_strings_sz; |
565 | | | 565 | |
566 | p = l->l_proc; | | 566 | p = l->l_proc; |
567 | modgen = 0; | | 567 | modgen = 0; |
568 | | | 568 | |
569 | SDT_PROBE(proc,,,exec, path, 0, 0, 0, 0); | | 569 | SDT_PROBE(proc,,,exec, path, 0, 0, 0, 0); |
570 | | | 570 | |
571 | /* | | 571 | /* |
572 | * Check if we have exceeded our number of processes limit. | | 572 | * Check if we have exceeded our number of processes limit. |
573 | * This is so that we handle the case where a root daemon | | 573 | * This is so that we handle the case where a root daemon |
574 | * forked, ran setuid to become the desired user and is trying | | 574 | * forked, ran setuid to become the desired user and is trying |
575 | * to exec. The obvious place to do the reference counting check | | 575 | * to exec. The obvious place to do the reference counting check |
576 | * is setuid(), but we don't do the reference counting check there | | 576 | * is setuid(), but we don't do the reference counting check there |
577 | * like other OS's do because then all the programs that use setuid() | | 577 | * like other OS's do because then all the programs that use setuid() |
578 | * must be modified to check the return code of setuid() and exit(). | | 578 | * must be modified to check the return code of setuid() and exit(). |
579 | * It is dangerous to make setuid() fail, because it fails open and | | 579 | * It is dangerous to make setuid() fail, because it fails open and |
580 | * the program will continue to run as root. If we make it succeed | | 580 | * the program will continue to run as root. If we make it succeed |
581 | * and return an error code, again we are not enforcing the limit. | | 581 | * and return an error code, again we are not enforcing the limit. |
582 | * The best place to enforce the limit is here, when the process tries | | 582 | * The best place to enforce the limit is here, when the process tries |
583 | * to execute a new image, because eventually the process will need | | 583 | * to execute a new image, because eventually the process will need |
584 | * to call exec in order to do something useful. | | 584 | * to call exec in order to do something useful. |
585 | */ | | 585 | */ |
586 | retry: | | 586 | retry: |
587 | if ((p->p_flag & PK_SUGID) && kauth_authorize_generic(l->l_cred, | | 587 | if ((p->p_flag & PK_SUGID) && kauth_authorize_generic(l->l_cred, |
588 | KAUTH_GENERIC_ISSUSER, NULL) != 0 && chgproccnt(kauth_cred_getuid( | | 588 | KAUTH_GENERIC_ISSUSER, NULL) != 0 && chgproccnt(kauth_cred_getuid( |
589 | l->l_cred), 0) > p->p_rlimit[RLIMIT_NPROC].rlim_cur) | | 589 | l->l_cred), 0) > p->p_rlimit[RLIMIT_NPROC].rlim_cur) |
590 | return EAGAIN; | | 590 | return EAGAIN; |
591 | | | 591 | |
592 | oldlwpflags = l->l_flag & (LW_SA | LW_SA_UPCALL); | | 592 | oldlwpflags = l->l_flag & (LW_SA | LW_SA_UPCALL); |
593 | if (l->l_flag & LW_SA) { | | 593 | if (l->l_flag & LW_SA) { |
594 | lwp_lock(l); | | 594 | lwp_lock(l); |
595 | l->l_flag &= ~(LW_SA | LW_SA_UPCALL); | | 595 | l->l_flag &= ~(LW_SA | LW_SA_UPCALL); |
596 | lwp_unlock(l); | | 596 | lwp_unlock(l); |
597 | } | | 597 | } |
598 | | | 598 | |
599 | /* | | 599 | /* |
600 | * Drain existing references and forbid new ones. The process | | 600 | * Drain existing references and forbid new ones. The process |
601 | * should be left alone until we're done here. This is necessary | | 601 | * should be left alone until we're done here. This is necessary |
602 | * to avoid race conditions - e.g. in ptrace() - that might allow | | 602 | * to avoid race conditions - e.g. in ptrace() - that might allow |
603 | * a local user to illicitly obtain elevated privileges. | | 603 | * a local user to illicitly obtain elevated privileges. |
604 | */ | | 604 | */ |
605 | rw_enter(&p->p_reflock, RW_WRITER); | | 605 | rw_enter(&p->p_reflock, RW_WRITER); |
606 | | | 606 | |
607 | base_vcp = NULL; | | 607 | base_vcp = NULL; |
608 | /* | | 608 | /* |
609 | * Init the namei data to point the file user's program name. | | 609 | * Init the namei data to point the file user's program name. |
610 | * This is done here rather than in check_exec(), so that it's | | 610 | * This is done here rather than in check_exec(), so that it's |
611 | * possible to override this settings if any of makecmd/probe | | 611 | * possible to override this settings if any of makecmd/probe |
612 | * functions call check_exec() recursively - for example, | | 612 | * functions call check_exec() recursively - for example, |
613 | * see exec_script_makecmds(). | | 613 | * see exec_script_makecmds(). |
614 | */ | | 614 | */ |
615 | error = pathbuf_copyin(path, &pb); | | 615 | error = pathbuf_copyin(path, &pb); |
616 | if (error) { | | 616 | if (error) { |
617 | DPRINTF(("%s: pathbuf_copyin path @%p %d\n", __func__, | | 617 | DPRINTF(("%s: pathbuf_copyin path @%p %d\n", __func__, |
618 | path, error)); | | 618 | path, error)); |
619 | goto clrflg; | | 619 | goto clrflg; |
620 | } | | 620 | } |
621 | pathstring = pathbuf_stringcopy_get(pb); | | 621 | pathstring = pathbuf_stringcopy_get(pb); |
622 | resolvedpathbuf = PNBUF_GET(); | | 622 | resolvedpathbuf = PNBUF_GET(); |
623 | #ifdef DIAGNOSTIC | | 623 | #ifdef DIAGNOSTIC |
624 | strcpy(resolvedpathbuf, "/wrong"); | | 624 | strcpy(resolvedpathbuf, "/wrong"); |
625 | #endif | | 625 | #endif |
626 | | | 626 | |
627 | /* | | 627 | /* |
628 | * initialize the fields of the exec package. | | 628 | * initialize the fields of the exec package. |
629 | */ | | 629 | */ |
630 | pack.ep_name = path; | | 630 | pack.ep_name = path; |
631 | pack.ep_kname = pathstring; | | 631 | pack.ep_kname = pathstring; |
632 | pack.ep_resolvedname = resolvedpathbuf; | | 632 | pack.ep_resolvedname = resolvedpathbuf; |
633 | pack.ep_hdr = kmem_alloc(exec_maxhdrsz, KM_SLEEP); | | 633 | pack.ep_hdr = kmem_alloc(exec_maxhdrsz, KM_SLEEP); |
634 | pack.ep_hdrlen = exec_maxhdrsz; | | 634 | pack.ep_hdrlen = exec_maxhdrsz; |
635 | pack.ep_hdrvalid = 0; | | 635 | pack.ep_hdrvalid = 0; |
636 | pack.ep_emul_arg = NULL; | | 636 | pack.ep_emul_arg = NULL; |
637 | pack.ep_vmcmds.evs_cnt = 0; | | 637 | pack.ep_vmcmds.evs_cnt = 0; |
638 | pack.ep_vmcmds.evs_used = 0; | | 638 | pack.ep_vmcmds.evs_used = 0; |
639 | pack.ep_vap = &attr; | | 639 | pack.ep_vap = &attr; |
640 | pack.ep_flags = 0; | | 640 | pack.ep_flags = 0; |
641 | pack.ep_emul_root = NULL; | | 641 | pack.ep_emul_root = NULL; |
642 | pack.ep_interp = NULL; | | 642 | pack.ep_interp = NULL; |
643 | pack.ep_esch = NULL; | | 643 | pack.ep_esch = NULL; |
644 | pack.ep_pax_flags = 0; | | 644 | pack.ep_pax_flags = 0; |
645 | | | 645 | |
646 | rw_enter(&exec_lock, RW_READER); | | 646 | rw_enter(&exec_lock, RW_READER); |
647 | | | 647 | |
648 | /* see if we can run it. */ | | 648 | /* see if we can run it. */ |
649 | if ((error = check_exec(l, &pack, pb)) != 0) { | | 649 | if ((error = check_exec(l, &pack, pb)) != 0) { |
650 | if (error != ENOENT) { | | 650 | if (error != ENOENT) { |
651 | DPRINTF(("%s: check exec failed %d\n", | | 651 | DPRINTF(("%s: check exec failed %d\n", |
652 | __func__, error)); | | 652 | __func__, error)); |
653 | } | | 653 | } |
654 | goto freehdr; | | 654 | goto freehdr; |
655 | } | | 655 | } |
656 | | | 656 | |
657 | /* XXX -- THE FOLLOWING SECTION NEEDS MAJOR CLEANUP */ | | 657 | /* XXX -- THE FOLLOWING SECTION NEEDS MAJOR CLEANUP */ |
658 | | | 658 | |
659 | /* allocate an argument buffer */ | | 659 | /* allocate an argument buffer */ |
660 | argp = pool_get(&exec_pool, PR_WAITOK); | | 660 | argp = pool_get(&exec_pool, PR_WAITOK); |
661 | KASSERT(argp != NULL); | | 661 | KASSERT(argp != NULL); |
662 | dp = argp; | | 662 | dp = argp; |
663 | argc = 0; | | 663 | argc = 0; |
664 | | | 664 | |
665 | /* copy the fake args list, if there's one, freeing it as we go */ | | 665 | /* copy the fake args list, if there's one, freeing it as we go */ |
666 | if (pack.ep_flags & EXEC_HASARGL) { | | 666 | if (pack.ep_flags & EXEC_HASARGL) { |
667 | tmpfap = pack.ep_fa; | | 667 | tmpfap = pack.ep_fa; |
668 | while (tmpfap->fa_arg != NULL) { | | 668 | while (tmpfap->fa_arg != NULL) { |
669 | const char *cp; | | 669 | const char *cp; |
670 | | | 670 | |
671 | cp = tmpfap->fa_arg; | | 671 | cp = tmpfap->fa_arg; |
672 | while (*cp) | | 672 | while (*cp) |
673 | *dp++ = *cp++; | | 673 | *dp++ = *cp++; |
674 | *dp++ = '\0'; | | 674 | *dp++ = '\0'; |
675 | ktrexecarg(tmpfap->fa_arg, cp - tmpfap->fa_arg); | | 675 | ktrexecarg(tmpfap->fa_arg, cp - tmpfap->fa_arg); |
676 | | | 676 | |
677 | kmem_free(tmpfap->fa_arg, tmpfap->fa_len); | | 677 | kmem_free(tmpfap->fa_arg, tmpfap->fa_len); |
678 | tmpfap++; argc++; | | 678 | tmpfap++; argc++; |
679 | } | | 679 | } |
680 | kmem_free(pack.ep_fa, pack.ep_fa_len); | | 680 | kmem_free(pack.ep_fa, pack.ep_fa_len); |
681 | pack.ep_flags &= ~EXEC_HASARGL; | | 681 | pack.ep_flags &= ~EXEC_HASARGL; |
682 | } | | 682 | } |
683 | | | 683 | |
684 | /* Now get argv & environment */ | | 684 | /* Now get argv & environment */ |
685 | if (args == NULL) { | | 685 | if (args == NULL) { |
686 | DPRINTF(("%s: null args\n", __func__)); | | 686 | DPRINTF(("%s: null args\n", __func__)); |
687 | error = EINVAL; | | 687 | error = EINVAL; |
688 | goto bad; | | 688 | goto bad; |
689 | } | | 689 | } |
690 | /* 'i' will index the argp/envp element to be retrieved */ | | 690 | /* 'i' will index the argp/envp element to be retrieved */ |
691 | i = 0; | | 691 | i = 0; |
692 | if (pack.ep_flags & EXEC_SKIPARG) | | 692 | if (pack.ep_flags & EXEC_SKIPARG) |
693 | i++; | | 693 | i++; |
694 | | | 694 | |
695 | while (1) { | | 695 | while (1) { |
696 | len = argp + ARG_MAX - dp; | | 696 | len = argp + ARG_MAX - dp; |
697 | if ((error = (*fetch_element)(args, i, &sp)) != 0) { | | 697 | if ((error = (*fetch_element)(args, i, &sp)) != 0) { |
698 | DPRINTF(("%s: fetch_element args %d\n", | | 698 | DPRINTF(("%s: fetch_element args %d\n", |
699 | __func__, error)); | | 699 | __func__, error)); |
700 | goto bad; | | 700 | goto bad; |
701 | } | | 701 | } |
702 | if (!sp) | | 702 | if (!sp) |
703 | break; | | 703 | break; |
704 | if ((error = copyinstr(sp, dp, len, &len)) != 0) { | | 704 | if ((error = copyinstr(sp, dp, len, &len)) != 0) { |
705 | DPRINTF(("%s: copyinstr args %d\n", __func__, error)); | | 705 | DPRINTF(("%s: copyinstr args %d\n", __func__, error)); |
706 | if (error == ENAMETOOLONG) | | 706 | if (error == ENAMETOOLONG) |
707 | error = E2BIG; | | 707 | error = E2BIG; |
708 | goto bad; | | 708 | goto bad; |
709 | } | | 709 | } |
710 | ktrexecarg(dp, len - 1); | | 710 | ktrexecarg(dp, len - 1); |
711 | dp += len; | | 711 | dp += len; |
712 | i++; | | 712 | i++; |
713 | argc++; | | 713 | argc++; |
714 | } | | 714 | } |
715 | | | 715 | |
716 | envc = 0; | | 716 | envc = 0; |
717 | /* environment need not be there */ | | 717 | /* environment need not be there */ |
718 | if (envs != NULL) { | | 718 | if (envs != NULL) { |
719 | i = 0; | | 719 | i = 0; |
720 | while (1) { | | 720 | while (1) { |
721 | len = argp + ARG_MAX - dp; | | 721 | len = argp + ARG_MAX - dp; |
722 | if ((error = (*fetch_element)(envs, i, &sp)) != 0) { | | 722 | if ((error = (*fetch_element)(envs, i, &sp)) != 0) { |
723 | DPRINTF(("%s: fetch_element env %d\n", | | 723 | DPRINTF(("%s: fetch_element env %d\n", |
724 | __func__, error)); | | 724 | __func__, error)); |
725 | goto bad; | | 725 | goto bad; |
726 | } | | 726 | } |
727 | if (!sp) | | 727 | if (!sp) |
728 | break; | | 728 | break; |
729 | if ((error = copyinstr(sp, dp, len, &len)) != 0) { | | 729 | if ((error = copyinstr(sp, dp, len, &len)) != 0) { |
730 | DPRINTF(("%s: copyinstr env %d\n", | | 730 | DPRINTF(("%s: copyinstr env %d\n", |
731 | __func__, error)); | | 731 | __func__, error)); |
732 | if (error == ENAMETOOLONG) | | 732 | if (error == ENAMETOOLONG) |
733 | error = E2BIG; | | 733 | error = E2BIG; |
734 | goto bad; | | 734 | goto bad; |
735 | } | | 735 | } |
736 | ktrexecenv(dp, len - 1); | | 736 | ktrexecenv(dp, len - 1); |
737 | dp += len; | | 737 | dp += len; |
738 | i++; | | 738 | i++; |
739 | envc++; | | 739 | envc++; |
740 | } | | 740 | } |
741 | } | | 741 | } |
742 | | | 742 | |
743 | dp = (char *) ALIGN(dp); | | 743 | dp = (char *) ALIGN(dp); |
744 | | | 744 | |
745 | szsigcode = pack.ep_esch->es_emul->e_esigcode - | | 745 | szsigcode = pack.ep_esch->es_emul->e_esigcode - |
746 | pack.ep_esch->es_emul->e_sigcode; | | 746 | pack.ep_esch->es_emul->e_sigcode; |
747 | | | 747 | |
748 | #ifdef __MACHINE_STACK_GROWS_UP | | 748 | #ifdef __MACHINE_STACK_GROWS_UP |
749 | /* See big comment lower down */ | | 749 | /* See big comment lower down */ |
750 | #define RTLD_GAP 32 | | 750 | #define RTLD_GAP 32 |
751 | #else | | 751 | #else |
752 | #define RTLD_GAP 0 | | 752 | #define RTLD_GAP 0 |
753 | #endif | | 753 | #endif |
754 | | | 754 | |
755 | /* Now check if args & environ fit into new stack */ | | 755 | /* Now check if args & environ fit into new stack */ |
756 | if (pack.ep_flags & EXEC_32) { | | 756 | if (pack.ep_flags & EXEC_32) { |
757 | aip = &arginfo32; | | 757 | aip = &arginfo32; |
758 | ps_strings_sz = sizeof(struct ps_strings32); | | 758 | ps_strings_sz = sizeof(struct ps_strings32); |
759 | len = ((argc + envc + 2 + pack.ep_esch->es_arglen) * | | 759 | len = ((argc + envc + 2 + pack.ep_esch->es_arglen) * |
760 | sizeof(int) + sizeof(int) + dp + RTLD_GAP + | | 760 | sizeof(int) + sizeof(int) + dp + RTLD_GAP + |
761 | szsigcode + ps_strings_sz + STACK_PTHREADSPACE) | | 761 | szsigcode + ps_strings_sz + STACK_PTHREADSPACE) |
762 | - argp; | | 762 | - argp; |
763 | } else { | | 763 | } else { |
764 | aip = &arginfo; | | 764 | aip = &arginfo; |
765 | ps_strings_sz = sizeof(struct ps_strings); | | 765 | ps_strings_sz = sizeof(struct ps_strings); |
766 | len = ((argc + envc + 2 + pack.ep_esch->es_arglen) * | | 766 | len = ((argc + envc + 2 + pack.ep_esch->es_arglen) * |
767 | sizeof(char *) + sizeof(int) + dp + RTLD_GAP + | | 767 | sizeof(char *) + sizeof(int) + dp + RTLD_GAP + |
768 | szsigcode + ps_strings_sz + STACK_PTHREADSPACE) | | 768 | szsigcode + ps_strings_sz + STACK_PTHREADSPACE) |
769 | - argp; | | 769 | - argp; |
770 | } | | 770 | } |
771 | | | 771 | |
772 | #ifdef PAX_ASLR | | 772 | #ifdef PAX_ASLR |
773 | if (pax_aslr_active(l)) | | 773 | if (pax_aslr_active(l)) |
774 | len += (arc4random() % PAGE_SIZE); | | 774 | len += (arc4random() % PAGE_SIZE); |
775 | #endif /* PAX_ASLR */ | | 775 | #endif /* PAX_ASLR */ |
776 | | | 776 | |
777 | #ifdef STACKLALIGN /* arm, etc. */ | | 777 | #ifdef STACKLALIGN /* arm, etc. */ |
778 | len = STACKALIGN(len); /* make the stack "safely" aligned */ | | 778 | len = STACKALIGN(len); /* make the stack "safely" aligned */ |
779 | #else | | 779 | #else |
780 | len = ALIGN(len); /* make the stack "safely" aligned */ | | 780 | len = ALIGN(len); /* make the stack "safely" aligned */ |
781 | #endif | | 781 | #endif |
782 | | | 782 | |
783 | if (len > pack.ep_ssize) { /* in effect, compare to initial limit */ | | 783 | if (len > pack.ep_ssize) { /* in effect, compare to initial limit */ |
784 | DPRINTF(("%s: stack limit exceeded %zu\n", __func__, len)); | | 784 | DPRINTF(("%s: stack limit exceeded %zu\n", __func__, len)); |
785 | error = ENOMEM; | | 785 | error = ENOMEM; |
786 | goto bad; | | 786 | goto bad; |
787 | } | | 787 | } |
788 | | | 788 | |
789 | /* Get rid of other LWPs. */ | | 789 | /* Get rid of other LWPs. */ |
790 | if (p->p_sa || p->p_nlwps > 1) { | | 790 | if (p->p_sa || p->p_nlwps > 1) { |
791 | mutex_enter(p->p_lock); | | 791 | mutex_enter(p->p_lock); |
792 | exit_lwps(l); | | 792 | exit_lwps(l); |
793 | mutex_exit(p->p_lock); | | 793 | mutex_exit(p->p_lock); |
794 | } | | 794 | } |
795 | KDASSERT(p->p_nlwps == 1); | | 795 | KDASSERT(p->p_nlwps == 1); |
796 | | | 796 | |
797 | /* Destroy any lwpctl info. */ | | 797 | /* Destroy any lwpctl info. */ |
798 | if (p->p_lwpctl != NULL) | | 798 | if (p->p_lwpctl != NULL) |
799 | lwp_ctl_exit(); | | 799 | lwp_ctl_exit(); |
800 | | | 800 | |
801 | #ifdef KERN_SA | | 801 | #ifdef KERN_SA |
802 | /* Release any SA state. */ | | 802 | /* Release any SA state. */ |
803 | if (p->p_sa) | | 803 | if (p->p_sa) |
804 | sa_release(p); | | 804 | sa_release(p); |
805 | #endif /* KERN_SA */ | | 805 | #endif /* KERN_SA */ |
806 | | | 806 | |
807 | /* Remove POSIX timers */ | | 807 | /* Remove POSIX timers */ |
808 | timers_free(p, TIMERS_POSIX); | | 808 | timers_free(p, TIMERS_POSIX); |
809 | | | 809 | |
810 | /* adjust "active stack depth" for process VSZ */ | | 810 | /* adjust "active stack depth" for process VSZ */ |
811 | pack.ep_ssize = len; /* maybe should go elsewhere, but... */ | | 811 | pack.ep_ssize = len; /* maybe should go elsewhere, but... */ |
812 | | | 812 | |
813 | /* | | 813 | /* |
814 | * Do whatever is necessary to prepare the address space | | 814 | * Do whatever is necessary to prepare the address space |
815 | * for remapping. Note that this might replace the current | | 815 | * for remapping. Note that this might replace the current |
816 | * vmspace with another! | | 816 | * vmspace with another! |
817 | */ | | 817 | */ |
818 | uvmspace_exec(l, pack.ep_vm_minaddr, pack.ep_vm_maxaddr); | | 818 | uvmspace_exec(l, pack.ep_vm_minaddr, pack.ep_vm_maxaddr); |
819 | | | 819 | |
820 | /* record proc's vnode, for use by procfs and others */ | | 820 | /* record proc's vnode, for use by procfs and others */ |
821 | if (p->p_textvp) | | 821 | if (p->p_textvp) |
822 | vrele(p->p_textvp); | | 822 | vrele(p->p_textvp); |
823 | vref(pack.ep_vp); | | 823 | vref(pack.ep_vp); |
824 | p->p_textvp = pack.ep_vp; | | 824 | p->p_textvp = pack.ep_vp; |
825 | | | 825 | |
826 | /* Now map address space */ | | 826 | /* Now map address space */ |
827 | vm = p->p_vmspace; | | 827 | vm = p->p_vmspace; |
828 | vm->vm_taddr = (void *)pack.ep_taddr; | | 828 | vm->vm_taddr = (void *)pack.ep_taddr; |
829 | vm->vm_tsize = btoc(pack.ep_tsize); | | 829 | vm->vm_tsize = btoc(pack.ep_tsize); |
830 | vm->vm_daddr = (void*)pack.ep_daddr; | | 830 | vm->vm_daddr = (void*)pack.ep_daddr; |
831 | vm->vm_dsize = btoc(pack.ep_dsize); | | 831 | vm->vm_dsize = btoc(pack.ep_dsize); |
832 | vm->vm_ssize = btoc(pack.ep_ssize); | | 832 | vm->vm_ssize = btoc(pack.ep_ssize); |
833 | vm->vm_issize = 0; | | 833 | vm->vm_issize = 0; |
834 | vm->vm_maxsaddr = (void *)pack.ep_maxsaddr; | | 834 | vm->vm_maxsaddr = (void *)pack.ep_maxsaddr; |
835 | vm->vm_minsaddr = (void *)pack.ep_minsaddr; | | 835 | vm->vm_minsaddr = (void *)pack.ep_minsaddr; |
836 | | | 836 | |
837 | #ifdef PAX_ASLR | | 837 | #ifdef PAX_ASLR |
838 | pax_aslr_init(l, vm); | | 838 | pax_aslr_init(l, vm); |
839 | #endif /* PAX_ASLR */ | | 839 | #endif /* PAX_ASLR */ |
840 | | | 840 | |
841 | /* create the new process's VM space by running the vmcmds */ | | 841 | /* create the new process's VM space by running the vmcmds */ |
842 | #ifdef DIAGNOSTIC | | 842 | #ifdef DIAGNOSTIC |
843 | if (pack.ep_vmcmds.evs_used == 0) | | 843 | if (pack.ep_vmcmds.evs_used == 0) |
844 | panic("%s: no vmcmds", __func__); | | 844 | panic("%s: no vmcmds", __func__); |
845 | #endif | | 845 | #endif |
846 | for (i = 0; i < pack.ep_vmcmds.evs_used && !error; i++) { | | 846 | for (i = 0; i < pack.ep_vmcmds.evs_used && !error; i++) { |
847 | struct exec_vmcmd *vcp; | | 847 | struct exec_vmcmd *vcp; |
848 | | | 848 | |
849 | vcp = &pack.ep_vmcmds.evs_cmds[i]; | | 849 | vcp = &pack.ep_vmcmds.evs_cmds[i]; |
850 | if (vcp->ev_flags & VMCMD_RELATIVE) { | | 850 | if (vcp->ev_flags & VMCMD_RELATIVE) { |
851 | #ifdef DIAGNOSTIC | | 851 | #ifdef DIAGNOSTIC |
852 | if (base_vcp == NULL) | | 852 | if (base_vcp == NULL) |
853 | panic("%s: relative vmcmd with no base", | | 853 | panic("%s: relative vmcmd with no base", |
854 | __func__); | | 854 | __func__); |
855 | if (vcp->ev_flags & VMCMD_BASE) | | 855 | if (vcp->ev_flags & VMCMD_BASE) |
856 | panic("%s: illegal base & relative vmcmd", | | 856 | panic("%s: illegal base & relative vmcmd", |
857 | __func__); | | 857 | __func__); |
858 | #endif | | 858 | #endif |
859 | vcp->ev_addr += base_vcp->ev_addr; | | 859 | vcp->ev_addr += base_vcp->ev_addr; |
860 | } | | 860 | } |
861 | error = (*vcp->ev_proc)(l, vcp); | | 861 | error = (*vcp->ev_proc)(l, vcp); |
862 | #ifdef DEBUG_EXEC | | 862 | #ifdef DEBUG_EXEC |
863 | if (error) { | | 863 | if (error) { |
864 | size_t j; | | 864 | size_t j; |
865 | struct exec_vmcmd *vp = &pack.ep_vmcmds.evs_cmds[0]; | | 865 | struct exec_vmcmd *vp = &pack.ep_vmcmds.evs_cmds[0]; |
866 | uprintf("vmcmds %zu/%u, error %d\n", i, | | 866 | uprintf("vmcmds %zu/%u, error %d\n", i, |
867 | pack.ep_vmcmds.evs_used, error); | | 867 | pack.ep_vmcmds.evs_used, error); |
868 | for (j = 0; j <= i; j++) | | 868 | for (j = 0; j <= i; j++) |
869 | uprintf("vmcmd[%zu] = vmcmd_map_%s %#" | | 869 | uprintf("vmcmd[%zu] = vmcmd_map_%s %#" |
870 | PRIxVADDR"/%#"PRIxVSIZE" fd@%#" | | 870 | PRIxVADDR"/%#"PRIxVSIZE" fd@%#" |
871 | PRIxVSIZE" prot=0%o flags=%d\n", j, | | 871 | PRIxVSIZE" prot=0%o flags=%d\n", j, |
872 | vp[j].ev_proc == vmcmd_map_pagedvn ? | | 872 | vp[j].ev_proc == vmcmd_map_pagedvn ? |
873 | "pagedvn" : | | 873 | "pagedvn" : |
874 | vp[j].ev_proc == vmcmd_map_readvn ? | | 874 | vp[j].ev_proc == vmcmd_map_readvn ? |
875 | "readvn" : | | 875 | "readvn" : |
876 | vp[j].ev_proc == vmcmd_map_zero ? | | 876 | vp[j].ev_proc == vmcmd_map_zero ? |
877 | "zero" : "*unknown*", | | 877 | "zero" : "*unknown*", |
878 | vp[j].ev_addr, vp[j].ev_len, | | 878 | vp[j].ev_addr, vp[j].ev_len, |
879 | vp[j].ev_offset, vp[j].ev_prot, | | 879 | vp[j].ev_offset, vp[j].ev_prot, |
880 | vp[j].ev_flags); | | 880 | vp[j].ev_flags); |
881 | } | | 881 | } |
882 | #endif /* DEBUG_EXEC */ | | 882 | #endif /* DEBUG_EXEC */ |
883 | if (vcp->ev_flags & VMCMD_BASE) | | 883 | if (vcp->ev_flags & VMCMD_BASE) |
884 | base_vcp = vcp; | | 884 | base_vcp = vcp; |
885 | } | | 885 | } |
886 | | | 886 | |
887 | /* free the vmspace-creation commands, and release their references */ | | 887 | /* free the vmspace-creation commands, and release their references */ |
888 | kill_vmcmds(&pack.ep_vmcmds); | | 888 | kill_vmcmds(&pack.ep_vmcmds); |
889 | | | 889 | |
890 | vn_lock(pack.ep_vp, LK_EXCLUSIVE | LK_RETRY); | | 890 | vn_lock(pack.ep_vp, LK_EXCLUSIVE | LK_RETRY); |
891 | VOP_CLOSE(pack.ep_vp, FREAD, l->l_cred); | | 891 | VOP_CLOSE(pack.ep_vp, FREAD, l->l_cred); |
892 | vput(pack.ep_vp); | | 892 | vput(pack.ep_vp); |
893 | | | 893 | |
894 | /* if an error happened, deallocate and punt */ | | 894 | /* if an error happened, deallocate and punt */ |
895 | if (error) { | | 895 | if (error) { |
896 | DPRINTF(("%s: vmcmd %zu failed: %d\n", __func__, i - 1, error)); | | 896 | DPRINTF(("%s: vmcmd %zu failed: %d\n", __func__, i - 1, error)); |
897 | goto exec_abort; | | 897 | goto exec_abort; |
898 | } | | 898 | } |
899 | | | 899 | |
900 | /* remember information about the process */ | | 900 | /* remember information about the process */ |
901 | arginfo.ps_nargvstr = argc; | | 901 | arginfo.ps_nargvstr = argc; |
902 | arginfo.ps_nenvstr = envc; | | 902 | arginfo.ps_nenvstr = envc; |
903 | | | 903 | |
904 | /* set command name & other accounting info */ | | 904 | /* set command name & other accounting info */ |
905 | commandname = strrchr(pack.ep_resolvedname, '/'); | | 905 | commandname = strrchr(pack.ep_resolvedname, '/'); |
906 | if (commandname != NULL) { | | 906 | if (commandname != NULL) { |
907 | commandname++; | | 907 | commandname++; |
908 | } else { | | 908 | } else { |
909 | commandname = pack.ep_resolvedname; | | 909 | commandname = pack.ep_resolvedname; |
910 | } | | 910 | } |
911 | i = min(strlen(commandname), MAXCOMLEN); | | 911 | i = min(strlen(commandname), MAXCOMLEN); |
912 | (void)memcpy(p->p_comm, commandname, i); | | 912 | (void)memcpy(p->p_comm, commandname, i); |
913 | p->p_comm[i] = '\0'; | | 913 | p->p_comm[i] = '\0'; |
914 | | | 914 | |
915 | dp = PNBUF_GET(); | | 915 | dp = PNBUF_GET(); |
916 | /* | | 916 | /* |
917 | * If the path starts with /, we don't need to do any work. | | 917 | * If the path starts with /, we don't need to do any work. |
918 | * This handles the majority of the cases. | | 918 | * This handles the majority of the cases. |
919 | * In the future perhaps we could canonicalize it? | | 919 | * In the future perhaps we could canonicalize it? |
920 | */ | | 920 | */ |
921 | if (pathstring[0] == '/') | | 921 | if (pathstring[0] == '/') |
922 | (void)strlcpy(pack.ep_path = dp, pathstring, MAXPATHLEN); | | 922 | (void)strlcpy(pack.ep_path = dp, pathstring, MAXPATHLEN); |
923 | #ifdef notyet | | 923 | #ifdef notyet |
924 | /* | | 924 | /* |
925 | * Although this works most of the time [since the entry was just | | 925 | * Although this works most of the time [since the entry was just |
926 | * entered in the cache] we don't use it because it theoretically | | 926 | * entered in the cache] we don't use it because it theoretically |
927 | * can fail and it is not the cleanest interface, because there | | 927 | * can fail and it is not the cleanest interface, because there |
928 | * could be races. When the namei cache is re-written, this can | | 928 | * could be races. When the namei cache is re-written, this can |
929 | * be changed to use the appropriate function. | | 929 | * be changed to use the appropriate function. |
930 | */ | | 930 | */ |
931 | else if (!(error = vnode_to_path(dp, MAXPATHLEN, p->p_textvp, l, p))) | | 931 | else if (!(error = vnode_to_path(dp, MAXPATHLEN, p->p_textvp, l, p))) |
932 | pack.ep_path = dp; | | 932 | pack.ep_path = dp; |
933 | #endif | | 933 | #endif |
934 | else { | | 934 | else { |
935 | #ifdef notyet | | 935 | #ifdef notyet |
936 | printf("Cannot get path for pid %d [%s] (error %d)", | | 936 | printf("Cannot get path for pid %d [%s] (error %d)", |
937 | (int)p->p_pid, p->p_comm, error); | | 937 | (int)p->p_pid, p->p_comm, error); |
938 | #endif | | 938 | #endif |
939 | pack.ep_path = NULL; | | 939 | pack.ep_path = NULL; |
940 | PNBUF_PUT(dp); | | 940 | PNBUF_PUT(dp); |
941 | } | | 941 | } |
942 | | | 942 | |
943 | stack = (char *)STACK_ALLOC(STACK_GROW(vm->vm_minsaddr, | | 943 | stack = (char *)STACK_ALLOC(STACK_GROW(vm->vm_minsaddr, |
944 | STACK_PTHREADSPACE + ps_strings_sz + szsigcode), | | 944 | STACK_PTHREADSPACE + ps_strings_sz + szsigcode), |
945 | len - (ps_strings_sz + szsigcode)); | | 945 | len - (ps_strings_sz + szsigcode)); |
946 | | | 946 | |
947 | #ifdef __MACHINE_STACK_GROWS_UP | | 947 | #ifdef __MACHINE_STACK_GROWS_UP |
948 | /* | | 948 | /* |
949 | * The copyargs call always copies into lower addresses | | 949 | * The copyargs call always copies into lower addresses |
950 | * first, moving towards higher addresses, starting with | | 950 | * first, moving towards higher addresses, starting with |
951 | * the stack pointer that we give. When the stack grows | | 951 | * the stack pointer that we give. When the stack grows |
952 | * down, this puts argc/argv/envp very shallow on the | | 952 | * down, this puts argc/argv/envp very shallow on the |
953 | * stack, right at the first user stack pointer. | | 953 | * stack, right at the first user stack pointer. |
954 | * When the stack grows up, the situation is reversed. | | 954 | * When the stack grows up, the situation is reversed. |
955 | * | | 955 | * |
956 | * Normally, this is no big deal. But the ld_elf.so _rtld() | | 956 | * Normally, this is no big deal. But the ld_elf.so _rtld() |
957 | * function expects to be called with a single pointer to | | 957 | * function expects to be called with a single pointer to |
958 | * a region that has a few words it can stash values into, | | 958 | * a region that has a few words it can stash values into, |
959 | * followed by argc/argv/envp. When the stack grows down, | | 959 | * followed by argc/argv/envp. When the stack grows down, |
960 | * it's easy to decrement the stack pointer a little bit to | | 960 | * it's easy to decrement the stack pointer a little bit to |
961 | * allocate the space for these few words and pass the new | | 961 | * allocate the space for these few words and pass the new |
962 | * stack pointer to _rtld. When the stack grows up, however, | | 962 | * stack pointer to _rtld. When the stack grows up, however, |
963 | * a few words before argc is part of the signal trampoline, XXX | | 963 | * a few words before argc is part of the signal trampoline, XXX |
964 | * so we have a problem. | | 964 | * so we have a problem. |
965 | * | | 965 | * |
966 | * Instead of changing how _rtld works, we take the easy way | | 966 | * Instead of changing how _rtld works, we take the easy way |
967 | * out and steal 32 bytes before we call copyargs. | | 967 | * out and steal 32 bytes before we call copyargs. |
968 | * This extra space was allowed for when 'len' was calculated. | | 968 | * This extra space was allowed for when 'len' was calculated. |
969 | */ | | 969 | */ |
970 | stack += RTLD_GAP; | | 970 | stack += RTLD_GAP; |
971 | #endif /* __MACHINE_STACK_GROWS_UP */ | | 971 | #endif /* __MACHINE_STACK_GROWS_UP */ |
972 | | | 972 | |
973 | /* Now copy argc, args & environ to new stack */ | | 973 | /* Now copy argc, args & environ to new stack */ |
974 | error = (*pack.ep_esch->es_copyargs)(l, &pack, &arginfo, &stack, argp); | | 974 | error = (*pack.ep_esch->es_copyargs)(l, &pack, &arginfo, &stack, argp); |
975 | if (pack.ep_path) { | | 975 | if (pack.ep_path) { |
976 | PNBUF_PUT(pack.ep_path); | | 976 | PNBUF_PUT(pack.ep_path); |
977 | pack.ep_path = NULL; | | 977 | pack.ep_path = NULL; |
978 | } | | 978 | } |
979 | if (error) { | | 979 | if (error) { |
980 | DPRINTF(("%s: copyargs failed %d\n", __func__, error)); | | 980 | DPRINTF(("%s: copyargs failed %d\n", __func__, error)); |
981 | goto exec_abort; | | 981 | goto exec_abort; |
982 | } | | 982 | } |
983 | /* Move the stack back to original point */ | | 983 | /* Move the stack back to original point */ |
984 | stack = (char *)STACK_GROW(vm->vm_minsaddr, len); | | 984 | stack = (char *)STACK_GROW(vm->vm_minsaddr, len); |
985 | | | 985 | |
986 | /* fill process ps_strings info */ | | 986 | /* fill process ps_strings info */ |
987 | p->p_psstrp = (vaddr_t)STACK_ALLOC(STACK_GROW(vm->vm_minsaddr, | | 987 | p->p_psstrp = (vaddr_t)STACK_ALLOC(STACK_GROW(vm->vm_minsaddr, |
988 | STACK_PTHREADSPACE), ps_strings_sz); | | 988 | STACK_PTHREADSPACE), ps_strings_sz); |
989 | | | 989 | |
990 | if (pack.ep_flags & EXEC_32) { | | 990 | if (pack.ep_flags & EXEC_32) { |
991 | arginfo32.ps_argvstr = (vaddr_t)arginfo.ps_argvstr; | | 991 | arginfo32.ps_argvstr = (vaddr_t)arginfo.ps_argvstr; |
992 | arginfo32.ps_nargvstr = arginfo.ps_nargvstr; | | 992 | arginfo32.ps_nargvstr = arginfo.ps_nargvstr; |
993 | arginfo32.ps_envstr = (vaddr_t)arginfo.ps_envstr; | | 993 | arginfo32.ps_envstr = (vaddr_t)arginfo.ps_envstr; |
994 | arginfo32.ps_nenvstr = arginfo.ps_nenvstr; | | 994 | arginfo32.ps_nenvstr = arginfo.ps_nenvstr; |
995 | } | | 995 | } |
996 | | | 996 | |
997 | /* copy out the process's ps_strings structure */ | | 997 | /* copy out the process's ps_strings structure */ |
998 | if ((error = copyout(aip, (void *)p->p_psstrp, ps_strings_sz)) != 0) { | | 998 | if ((error = copyout(aip, (void *)p->p_psstrp, ps_strings_sz)) != 0) { |
999 | DPRINTF(("%s: ps_strings copyout %p->%p size %zu failed\n", | | 999 | DPRINTF(("%s: ps_strings copyout %p->%p size %zu failed\n", |
1000 | __func__, aip, (void *)p->p_psstrp, ps_strings_sz)); | | 1000 | __func__, aip, (void *)p->p_psstrp, ps_strings_sz)); |
1001 | goto exec_abort; | | 1001 | goto exec_abort; |
1002 | } | | 1002 | } |
1003 | | | 1003 | |
1004 | cwdexec(p); | | 1004 | cwdexec(p); |
1005 | fd_closeexec(); /* handle close on exec */ | | 1005 | fd_closeexec(); /* handle close on exec */ |
1006 | | | 1006 | |
1007 | if (__predict_false(ktrace_on)) | | 1007 | if (__predict_false(ktrace_on)) |
1008 | fd_ktrexecfd(); | | 1008 | fd_ktrexecfd(); |
1009 | | | 1009 | |
1010 | execsigs(p); /* reset catched signals */ | | 1010 | execsigs(p); /* reset catched signals */ |
1011 | | | 1011 | |
1012 | l->l_ctxlink = NULL; /* reset ucontext link */ | | 1012 | l->l_ctxlink = NULL; /* reset ucontext link */ |
1013 | | | 1013 | |
1014 | | | 1014 | |
1015 | p->p_acflag &= ~AFORK; | | 1015 | p->p_acflag &= ~AFORK; |
1016 | mutex_enter(p->p_lock); | | 1016 | mutex_enter(p->p_lock); |
1017 | p->p_flag |= PK_EXEC; | | 1017 | p->p_flag |= PK_EXEC; |
1018 | mutex_exit(p->p_lock); | | 1018 | mutex_exit(p->p_lock); |
1019 | | | 1019 | |
1020 | /* | | 1020 | /* |
1021 | * Stop profiling. | | 1021 | * Stop profiling. |
1022 | */ | | 1022 | */ |
1023 | if ((p->p_stflag & PST_PROFIL) != 0) { | | 1023 | if ((p->p_stflag & PST_PROFIL) != 0) { |
1024 | mutex_spin_enter(&p->p_stmutex); | | 1024 | mutex_spin_enter(&p->p_stmutex); |
1025 | stopprofclock(p); | | 1025 | stopprofclock(p); |
1026 | mutex_spin_exit(&p->p_stmutex); | | 1026 | mutex_spin_exit(&p->p_stmutex); |
1027 | } | | 1027 | } |
1028 | | | 1028 | |
1029 | /* | | 1029 | /* |
1030 | * It's OK to test PL_PPWAIT unlocked here, as other LWPs have | | 1030 | * It's OK to test PL_PPWAIT unlocked here, as other LWPs have |
1031 | * exited and exec()/exit() are the only places it will be cleared. | | 1031 | * exited and exec()/exit() are the only places it will be cleared. |
1032 | */ | | 1032 | */ |
1033 | if ((p->p_lflag & PL_PPWAIT) != 0) { | | 1033 | if ((p->p_lflag & PL_PPWAIT) != 0) { |
1034 | mutex_enter(proc_lock); | | 1034 | mutex_enter(proc_lock); |
1035 | l->l_lwpctl = NULL; /* was on loan from blocked parent */ | | 1035 | l->l_lwpctl = NULL; /* was on loan from blocked parent */ |
1036 | p->p_lflag &= ~PL_PPWAIT; | | 1036 | p->p_lflag &= ~PL_PPWAIT; |
1037 | cv_broadcast(&p->p_pptr->p_waitcv); | | 1037 | cv_broadcast(&p->p_pptr->p_waitcv); |
1038 | mutex_exit(proc_lock); | | 1038 | mutex_exit(proc_lock); |
1039 | } | | 1039 | } |
1040 | | | 1040 | |
1041 | /* | | 1041 | /* |
1042 | * Deal with set[ug]id. MNT_NOSUID has already been used to disable | | 1042 | * Deal with set[ug]id. MNT_NOSUID has already been used to disable |
1043 | * s[ug]id. It's OK to check for PSL_TRACED here as we have blocked | | 1043 | * s[ug]id. It's OK to check for PSL_TRACED here as we have blocked |
1044 | * out additional references on the process for the moment. | | 1044 | * out additional references on the process for the moment. |
1045 | */ | | 1045 | */ |
1046 | if ((p->p_slflag & PSL_TRACED) == 0 && | | 1046 | if ((p->p_slflag & PSL_TRACED) == 0 && |
1047 | | | 1047 | |
1048 | (((attr.va_mode & S_ISUID) != 0 && | | 1048 | (((attr.va_mode & S_ISUID) != 0 && |
1049 | kauth_cred_geteuid(l->l_cred) != attr.va_uid) || | | 1049 | kauth_cred_geteuid(l->l_cred) != attr.va_uid) || |
1050 | | | 1050 | |
1051 | ((attr.va_mode & S_ISGID) != 0 && | | 1051 | ((attr.va_mode & S_ISGID) != 0 && |
1052 | kauth_cred_getegid(l->l_cred) != attr.va_gid))) { | | 1052 | kauth_cred_getegid(l->l_cred) != attr.va_gid))) { |
1053 | /* | | 1053 | /* |
1054 | * Mark the process as SUGID before we do | | 1054 | * Mark the process as SUGID before we do |
1055 | * anything that might block. | | 1055 | * anything that might block. |
1056 | */ | | 1056 | */ |
1057 | proc_crmod_enter(); | | 1057 | proc_crmod_enter(); |
1058 | proc_crmod_leave(NULL, NULL, true); | | 1058 | proc_crmod_leave(NULL, NULL, true); |
1059 | | | 1059 | |
1060 | /* Make sure file descriptors 0..2 are in use. */ | | 1060 | /* Make sure file descriptors 0..2 are in use. */ |
1061 | if ((error = fd_checkstd()) != 0) { | | 1061 | if ((error = fd_checkstd()) != 0) { |
1062 | DPRINTF(("%s: fdcheckstd failed %d\n", | | 1062 | DPRINTF(("%s: fdcheckstd failed %d\n", |
1063 | __func__, error)); | | 1063 | __func__, error)); |
1064 | goto exec_abort; | | 1064 | goto exec_abort; |
1065 | } | | 1065 | } |
1066 | | | 1066 | |
1067 | /* | | 1067 | /* |
1068 | * Copy the credential so other references don't see our | | 1068 | * Copy the credential so other references don't see our |
1069 | * changes. | | 1069 | * changes. |
1070 | */ | | 1070 | */ |
1071 | l->l_cred = kauth_cred_copy(l->l_cred); | | 1071 | l->l_cred = kauth_cred_copy(l->l_cred); |
1072 | #ifdef KTRACE | | 1072 | #ifdef KTRACE |
1073 | /* | | 1073 | /* |
1074 | * If the persistent trace flag isn't set, turn off. | | 1074 | * If the persistent trace flag isn't set, turn off. |
1075 | */ | | 1075 | */ |
1076 | if (p->p_tracep) { | | 1076 | if (p->p_tracep) { |
1077 | mutex_enter(&ktrace_lock); | | 1077 | mutex_enter(&ktrace_lock); |
1078 | if (!(p->p_traceflag & KTRFAC_PERSISTENT)) | | 1078 | if (!(p->p_traceflag & KTRFAC_PERSISTENT)) |
1079 | ktrderef(p); | | 1079 | ktrderef(p); |
1080 | mutex_exit(&ktrace_lock); | | 1080 | mutex_exit(&ktrace_lock); |
1081 | } | | 1081 | } |
1082 | #endif | | 1082 | #endif |
1083 | if (attr.va_mode & S_ISUID) | | 1083 | if (attr.va_mode & S_ISUID) |
1084 | kauth_cred_seteuid(l->l_cred, attr.va_uid); | | 1084 | kauth_cred_seteuid(l->l_cred, attr.va_uid); |
1085 | if (attr.va_mode & S_ISGID) | | 1085 | if (attr.va_mode & S_ISGID) |
1086 | kauth_cred_setegid(l->l_cred, attr.va_gid); | | 1086 | kauth_cred_setegid(l->l_cred, attr.va_gid); |
1087 | } else { | | 1087 | } else { |
1088 | if (kauth_cred_geteuid(l->l_cred) == | | 1088 | if (kauth_cred_geteuid(l->l_cred) == |
1089 | kauth_cred_getuid(l->l_cred) && | | 1089 | kauth_cred_getuid(l->l_cred) && |
1090 | kauth_cred_getegid(l->l_cred) == | | 1090 | kauth_cred_getegid(l->l_cred) == |
1091 | kauth_cred_getgid(l->l_cred)) | | 1091 | kauth_cred_getgid(l->l_cred)) |
1092 | p->p_flag &= ~PK_SUGID; | | 1092 | p->p_flag &= ~PK_SUGID; |
1093 | } | | 1093 | } |
1094 | | | 1094 | |
1095 | /* | | 1095 | /* |
1096 | * Copy the credential so other references don't see our changes. | | 1096 | * Copy the credential so other references don't see our changes. |
1097 | * Test to see if this is necessary first, since in the common case | | 1097 | * Test to see if this is necessary first, since in the common case |
1098 | * we won't need a private reference. | | 1098 | * we won't need a private reference. |
1099 | */ | | 1099 | */ |
1100 | if (kauth_cred_geteuid(l->l_cred) != kauth_cred_getsvuid(l->l_cred) || | | 1100 | if (kauth_cred_geteuid(l->l_cred) != kauth_cred_getsvuid(l->l_cred) || |
1101 | kauth_cred_getegid(l->l_cred) != kauth_cred_getsvgid(l->l_cred)) { | | 1101 | kauth_cred_getegid(l->l_cred) != kauth_cred_getsvgid(l->l_cred)) { |
1102 | l->l_cred = kauth_cred_copy(l->l_cred); | | 1102 | l->l_cred = kauth_cred_copy(l->l_cred); |
1103 | kauth_cred_setsvuid(l->l_cred, kauth_cred_geteuid(l->l_cred)); | | 1103 | kauth_cred_setsvuid(l->l_cred, kauth_cred_geteuid(l->l_cred)); |
1104 | kauth_cred_setsvgid(l->l_cred, kauth_cred_getegid(l->l_cred)); | | 1104 | kauth_cred_setsvgid(l->l_cred, kauth_cred_getegid(l->l_cred)); |
1105 | } | | 1105 | } |
1106 | | | 1106 | |
1107 | /* Update the master credentials. */ | | 1107 | /* Update the master credentials. */ |
1108 | if (l->l_cred != p->p_cred) { | | 1108 | if (l->l_cred != p->p_cred) { |
1109 | kauth_cred_t ocred; | | 1109 | kauth_cred_t ocred; |
1110 | | | 1110 | |
1111 | kauth_cred_hold(l->l_cred); | | 1111 | kauth_cred_hold(l->l_cred); |
1112 | mutex_enter(p->p_lock); | | 1112 | mutex_enter(p->p_lock); |
1113 | ocred = p->p_cred; | | 1113 | ocred = p->p_cred; |
1114 | p->p_cred = l->l_cred; | | 1114 | p->p_cred = l->l_cred; |
1115 | mutex_exit(p->p_lock); | | 1115 | mutex_exit(p->p_lock); |
1116 | kauth_cred_free(ocred); | | 1116 | kauth_cred_free(ocred); |
1117 | } | | 1117 | } |
1118 | | | 1118 | |
1119 | #if defined(__HAVE_RAS) | | 1119 | #if defined(__HAVE_RAS) |
1120 | /* | | 1120 | /* |
1121 | * Remove all RASs from the address space. | | 1121 | * Remove all RASs from the address space. |
1122 | */ | | 1122 | */ |
1123 | ras_purgeall(); | | 1123 | ras_purgeall(); |
1124 | #endif | | 1124 | #endif |
1125 | | | 1125 | |
1126 | doexechooks(p); | | 1126 | doexechooks(p); |
1127 | | | 1127 | |
1128 | /* setup new registers and do misc. setup. */ | | 1128 | /* setup new registers and do misc. setup. */ |
1129 | (*pack.ep_esch->es_emul->e_setregs)(l, &pack, (vaddr_t)stack); | | 1129 | (*pack.ep_esch->es_emul->e_setregs)(l, &pack, (vaddr_t)stack); |
1130 | if (pack.ep_esch->es_setregs) | | 1130 | if (pack.ep_esch->es_setregs) |
1131 | (*pack.ep_esch->es_setregs)(l, &pack, (vaddr_t)stack); | | 1131 | (*pack.ep_esch->es_setregs)(l, &pack, (vaddr_t)stack); |
1132 | | | 1132 | |
1133 | /* Provide a consistent LWP private setting */ | | 1133 | /* Provide a consistent LWP private setting */ |
1134 | (void)lwp_setprivate(l, NULL); | | 1134 | (void)lwp_setprivate(l, NULL); |
1135 | | | 1135 | |
1136 | /* Discard all PCU state; need to start fresh */ | | 1136 | /* Discard all PCU state; need to start fresh */ |
1137 | pcu_discard_all(l); | | 1137 | pcu_discard_all(l); |
1138 | | | 1138 | |
1139 | /* map the process's signal trampoline code */ | | 1139 | /* map the process's signal trampoline code */ |
1140 | if ((error = exec_sigcode_map(p, pack.ep_esch->es_emul)) != 0) { | | 1140 | if ((error = exec_sigcode_map(p, pack.ep_esch->es_emul)) != 0) { |
1141 | DPRINTF(("%s: map sigcode failed %d\n", __func__, error)); | | 1141 | DPRINTF(("%s: map sigcode failed %d\n", __func__, error)); |
1142 | goto exec_abort; | | 1142 | goto exec_abort; |
1143 | } | | 1143 | } |
1144 | | | 1144 | |
1145 | pool_put(&exec_pool, argp); | | 1145 | pool_put(&exec_pool, argp); |
1146 | | | 1146 | |
1147 | /* notify others that we exec'd */ | | 1147 | /* notify others that we exec'd */ |
1148 | KNOTE(&p->p_klist, NOTE_EXEC); | | 1148 | KNOTE(&p->p_klist, NOTE_EXEC); |
1149 | | | 1149 | |
1150 | kmem_free(pack.ep_hdr, pack.ep_hdrlen); | | 1150 | kmem_free(pack.ep_hdr, pack.ep_hdrlen); |
1151 | | | 1151 | |
1152 | SDT_PROBE(proc,,,exec_success, path, 0, 0, 0, 0); | | 1152 | SDT_PROBE(proc,,,exec_success, path, 0, 0, 0, 0); |
1153 | | | 1153 | |
1154 | /* The emulation root will usually have been found when we looked | | 1154 | /* The emulation root will usually have been found when we looked |
1155 | * for the elf interpreter (or similar), if not look now. */ | | 1155 | * for the elf interpreter (or similar), if not look now. */ |
1156 | if (pack.ep_esch->es_emul->e_path != NULL && pack.ep_emul_root == NULL) | | 1156 | if (pack.ep_esch->es_emul->e_path != NULL && pack.ep_emul_root == NULL) |
1157 | emul_find_root(l, &pack); | | 1157 | emul_find_root(l, &pack); |
1158 | | | 1158 | |
1159 | /* Any old emulation root got removed by fdcloseexec */ | | 1159 | /* Any old emulation root got removed by fdcloseexec */ |
1160 | rw_enter(&p->p_cwdi->cwdi_lock, RW_WRITER); | | 1160 | rw_enter(&p->p_cwdi->cwdi_lock, RW_WRITER); |
1161 | p->p_cwdi->cwdi_edir = pack.ep_emul_root; | | 1161 | p->p_cwdi->cwdi_edir = pack.ep_emul_root; |
1162 | rw_exit(&p->p_cwdi->cwdi_lock); | | 1162 | rw_exit(&p->p_cwdi->cwdi_lock); |
1163 | pack.ep_emul_root = NULL; | | 1163 | pack.ep_emul_root = NULL; |
1164 | if (pack.ep_interp != NULL) | | 1164 | if (pack.ep_interp != NULL) |
1165 | vrele(pack.ep_interp); | | 1165 | vrele(pack.ep_interp); |
1166 | | | 1166 | |
1167 | /* | | 1167 | /* |
1168 | * Call emulation specific exec hook. This can setup per-process | | 1168 | * Call emulation specific exec hook. This can setup per-process |
1169 | * p->p_emuldata or do any other per-process stuff an emulation needs. | | 1169 | * p->p_emuldata or do any other per-process stuff an emulation needs. |
1170 | * | | 1170 | * |
1171 | * If we are executing process of different emulation than the | | 1171 | * If we are executing process of different emulation than the |
1172 | * original forked process, call e_proc_exit() of the old emulation | | 1172 | * original forked process, call e_proc_exit() of the old emulation |
1173 | * first, then e_proc_exec() of new emulation. If the emulation is | | 1173 | * first, then e_proc_exec() of new emulation. If the emulation is |
1174 | * same, the exec hook code should deallocate any old emulation | | 1174 | * same, the exec hook code should deallocate any old emulation |
1175 | * resources held previously by this process. | | 1175 | * resources held previously by this process. |
1176 | */ | | 1176 | */ |
1177 | if (p->p_emul && p->p_emul->e_proc_exit | | 1177 | if (p->p_emul && p->p_emul->e_proc_exit |
1178 | && p->p_emul != pack.ep_esch->es_emul) | | 1178 | && p->p_emul != pack.ep_esch->es_emul) |
1179 | (*p->p_emul->e_proc_exit)(p); | | 1179 | (*p->p_emul->e_proc_exit)(p); |
1180 | | | 1180 | |
1181 | /* | | 1181 | /* |
1182 | * This is now LWP 1. | | 1182 | * This is now LWP 1. |
1183 | */ | | 1183 | */ |
1184 | mutex_enter(p->p_lock); | | 1184 | mutex_enter(p->p_lock); |
1185 | p->p_nlwpid = 1; | | 1185 | p->p_nlwpid = 1; |
1186 | l->l_lid = 1; | | 1186 | l->l_lid = 1; |
1187 | mutex_exit(p->p_lock); | | 1187 | mutex_exit(p->p_lock); |
1188 | | | 1188 | |
1189 | /* | | 1189 | /* |
1190 | * Call exec hook. Emulation code may NOT store reference to anything | | 1190 | * Call exec hook. Emulation code may NOT store reference to anything |
1191 | * from &pack. | | 1191 | * from &pack. |
1192 | */ | | 1192 | */ |
1193 | if (pack.ep_esch->es_emul->e_proc_exec) | | 1193 | if (pack.ep_esch->es_emul->e_proc_exec) |
1194 | (*pack.ep_esch->es_emul->e_proc_exec)(p, &pack); | | 1194 | (*pack.ep_esch->es_emul->e_proc_exec)(p, &pack); |
1195 | | | 1195 | |
1196 | /* update p_emul, the old value is no longer needed */ | | 1196 | /* update p_emul, the old value is no longer needed */ |
1197 | p->p_emul = pack.ep_esch->es_emul; | | 1197 | p->p_emul = pack.ep_esch->es_emul; |
1198 | | | 1198 | |
1199 | /* ...and the same for p_execsw */ | | 1199 | /* ...and the same for p_execsw */ |
1200 | p->p_execsw = pack.ep_esch; | | 1200 | p->p_execsw = pack.ep_esch; |
1201 | | | 1201 | |
1202 | #ifdef __HAVE_SYSCALL_INTERN | | 1202 | #ifdef __HAVE_SYSCALL_INTERN |
1203 | (*p->p_emul->e_syscall_intern)(p); | | 1203 | (*p->p_emul->e_syscall_intern)(p); |
1204 | #endif | | 1204 | #endif |
1205 | ktremul(); | | 1205 | ktremul(); |
1206 | | | 1206 | |
1207 | /* Allow new references from the debugger/procfs. */ | | 1207 | /* Allow new references from the debugger/procfs. */ |
1208 | rw_exit(&p->p_reflock); | | 1208 | rw_exit(&p->p_reflock); |
1209 | rw_exit(&exec_lock); | | 1209 | rw_exit(&exec_lock); |
1210 | | | 1210 | |
1211 | mutex_enter(proc_lock); | | 1211 | mutex_enter(proc_lock); |
1212 | | | 1212 | |
1213 | if ((p->p_slflag & (PSL_TRACED|PSL_SYSCALL)) == PSL_TRACED) { | | 1213 | if ((p->p_slflag & (PSL_TRACED|PSL_SYSCALL)) == PSL_TRACED) { |
1214 | KSI_INIT_EMPTY(&ksi); | | 1214 | KSI_INIT_EMPTY(&ksi); |
1215 | ksi.ksi_signo = SIGTRAP; | | 1215 | ksi.ksi_signo = SIGTRAP; |
1216 | ksi.ksi_lid = l->l_lid; | | 1216 | ksi.ksi_lid = l->l_lid; |
1217 | kpsignal(p, &ksi, NULL); | | 1217 | kpsignal(p, &ksi, NULL); |
1218 | } | | 1218 | } |
1219 | | | 1219 | |
1220 | if (p->p_sflag & PS_STOPEXEC) { | | 1220 | if (p->p_sflag & PS_STOPEXEC) { |
1221 | KERNEL_UNLOCK_ALL(l, &l->l_biglocks); | | 1221 | KERNEL_UNLOCK_ALL(l, &l->l_biglocks); |
1222 | p->p_pptr->p_nstopchild++; | | 1222 | p->p_pptr->p_nstopchild++; |
1223 | p->p_pptr->p_waited = 0; | | 1223 | p->p_pptr->p_waited = 0; |
1224 | mutex_enter(p->p_lock); | | 1224 | mutex_enter(p->p_lock); |
1225 | ksiginfo_queue_init(&kq); | | 1225 | ksiginfo_queue_init(&kq); |
1226 | sigclearall(p, &contsigmask, &kq); | | 1226 | sigclearall(p, &contsigmask, &kq); |
1227 | lwp_lock(l); | | 1227 | lwp_lock(l); |
1228 | l->l_stat = LSSTOP; | | 1228 | l->l_stat = LSSTOP; |
1229 | p->p_stat = SSTOP; | | 1229 | p->p_stat = SSTOP; |
1230 | p->p_nrlwps--; | | 1230 | p->p_nrlwps--; |
1231 | lwp_unlock(l); | | 1231 | lwp_unlock(l); |
1232 | mutex_exit(p->p_lock); | | 1232 | mutex_exit(p->p_lock); |
1233 | mutex_exit(proc_lock); | | 1233 | mutex_exit(proc_lock); |
1234 | lwp_lock(l); | | 1234 | lwp_lock(l); |
1235 | mi_switch(l); | | 1235 | mi_switch(l); |
1236 | ksiginfo_queue_drain(&kq); | | 1236 | ksiginfo_queue_drain(&kq); |
1237 | KERNEL_LOCK(l->l_biglocks, l); | | 1237 | KERNEL_LOCK(l->l_biglocks, l); |
1238 | } else { | | 1238 | } else { |
1239 | mutex_exit(proc_lock); | | 1239 | mutex_exit(proc_lock); |
1240 | } | | 1240 | } |
1241 | | | 1241 | |
1242 | pathbuf_stringcopy_put(pb, pathstring); | | 1242 | pathbuf_stringcopy_put(pb, pathstring); |
1243 | pathbuf_destroy(pb); | | 1243 | pathbuf_destroy(pb); |
1244 | PNBUF_PUT(resolvedpathbuf); | | 1244 | PNBUF_PUT(resolvedpathbuf); |
1245 | return (EJUSTRETURN); | | 1245 | return (EJUSTRETURN); |
1246 | | | 1246 | |
1247 | bad: | | 1247 | bad: |
1248 | /* free the vmspace-creation commands, and release their references */ | | 1248 | /* free the vmspace-creation commands, and release their references */ |
1249 | kill_vmcmds(&pack.ep_vmcmds); | | 1249 | kill_vmcmds(&pack.ep_vmcmds); |
1250 | /* kill any opened file descriptor, if necessary */ | | 1250 | /* kill any opened file descriptor, if necessary */ |
1251 | if (pack.ep_flags & EXEC_HASFD) { | | 1251 | if (pack.ep_flags & EXEC_HASFD) { |
1252 | pack.ep_flags &= ~EXEC_HASFD; | | 1252 | pack.ep_flags &= ~EXEC_HASFD; |
1253 | fd_close(pack.ep_fd); | | 1253 | fd_close(pack.ep_fd); |
1254 | } | | 1254 | } |
1255 | /* close and put the exec'd file */ | | 1255 | /* close and put the exec'd file */ |
1256 | vn_lock(pack.ep_vp, LK_EXCLUSIVE | LK_RETRY); | | 1256 | vn_lock(pack.ep_vp, LK_EXCLUSIVE | LK_RETRY); |
1257 | VOP_CLOSE(pack.ep_vp, FREAD, l->l_cred); | | 1257 | VOP_CLOSE(pack.ep_vp, FREAD, l->l_cred); |
1258 | vput(pack.ep_vp); | | 1258 | vput(pack.ep_vp); |
1259 | pool_put(&exec_pool, argp); | | 1259 | pool_put(&exec_pool, argp); |
1260 | | | 1260 | |
1261 | freehdr: | | 1261 | freehdr: |
1262 | kmem_free(pack.ep_hdr, pack.ep_hdrlen); | | 1262 | kmem_free(pack.ep_hdr, pack.ep_hdrlen); |
1263 | if (pack.ep_emul_root != NULL) | | 1263 | if (pack.ep_emul_root != NULL) |
1264 | vrele(pack.ep_emul_root); | | 1264 | vrele(pack.ep_emul_root); |
1265 | if (pack.ep_interp != NULL) | | 1265 | if (pack.ep_interp != NULL) |
1266 | vrele(pack.ep_interp); | | 1266 | vrele(pack.ep_interp); |
1267 | | | 1267 | |
1268 | rw_exit(&exec_lock); | | 1268 | rw_exit(&exec_lock); |
1269 | | | 1269 | |
1270 | pathbuf_stringcopy_put(pb, pathstring); | | 1270 | pathbuf_stringcopy_put(pb, pathstring); |
1271 | pathbuf_destroy(pb); | | 1271 | pathbuf_destroy(pb); |
1272 | PNBUF_PUT(resolvedpathbuf); | | 1272 | PNBUF_PUT(resolvedpathbuf); |
1273 | | | 1273 | |
1274 | clrflg: | | 1274 | clrflg: |
1275 | lwp_lock(l); | | 1275 | lwp_lock(l); |
1276 | l->l_flag |= oldlwpflags; | | 1276 | l->l_flag |= oldlwpflags; |
1277 | lwp_unlock(l); | | 1277 | lwp_unlock(l); |
1278 | rw_exit(&p->p_reflock); | | 1278 | rw_exit(&p->p_reflock); |
1279 | | | 1279 | |
1280 | if (modgen != module_gen && error == ENOEXEC) { | | 1280 | if (modgen != module_gen && error == ENOEXEC) { |
1281 | modgen = module_gen; | | 1281 | modgen = module_gen; |
1282 | exec_autoload(); | | 1282 | exec_autoload(); |
1283 | goto retry; | | 1283 | goto retry; |
1284 | } | | 1284 | } |
1285 | | | 1285 | |
1286 | SDT_PROBE(proc,,,exec_failure, error, 0, 0, 0, 0); | | 1286 | SDT_PROBE(proc,,,exec_failure, error, 0, 0, 0, 0); |
1287 | return error; | | 1287 | return error; |
1288 | | | 1288 | |
1289 | exec_abort: | | 1289 | exec_abort: |
1290 | SDT_PROBE(proc,,,exec_failure, error, 0, 0, 0, 0); | | 1290 | SDT_PROBE(proc,,,exec_failure, error, 0, 0, 0, 0); |
1291 | rw_exit(&p->p_reflock); | | 1291 | rw_exit(&p->p_reflock); |
1292 | rw_exit(&exec_lock); | | 1292 | rw_exit(&exec_lock); |
1293 | | | 1293 | |
1294 | pathbuf_stringcopy_put(pb, pathstring); | | 1294 | pathbuf_stringcopy_put(pb, pathstring); |
1295 | pathbuf_destroy(pb); | | 1295 | pathbuf_destroy(pb); |
1296 | PNBUF_PUT(resolvedpathbuf); | | 1296 | PNBUF_PUT(resolvedpathbuf); |
1297 | | | 1297 | |
1298 | /* | | 1298 | /* |
1299 | * the old process doesn't exist anymore. exit gracefully. | | 1299 | * the old process doesn't exist anymore. exit gracefully. |
1300 | * get rid of the (new) address space we have created, if any, get rid | | 1300 | * get rid of the (new) address space we have created, if any, get rid |
1301 | * of our namei data and vnode, and exit noting failure | | 1301 | * of our namei data and vnode, and exit noting failure |
1302 | */ | | 1302 | */ |
1303 | uvm_deallocate(&vm->vm_map, VM_MIN_ADDRESS, | | 1303 | uvm_deallocate(&vm->vm_map, VM_MIN_ADDRESS, |
1304 | VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS); | | 1304 | VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS); |
1305 | if (pack.ep_emul_arg) | | 1305 | if (pack.ep_emul_arg) |
1306 | free(pack.ep_emul_arg, M_TEMP); | | 1306 | free(pack.ep_emul_arg, M_TEMP); |
1307 | pool_put(&exec_pool, argp); | | 1307 | pool_put(&exec_pool, argp); |
1308 | kmem_free(pack.ep_hdr, pack.ep_hdrlen); | | 1308 | kmem_free(pack.ep_hdr, pack.ep_hdrlen); |
1309 | if (pack.ep_emul_root != NULL) | | 1309 | if (pack.ep_emul_root != NULL) |
1310 | vrele(pack.ep_emul_root); | | 1310 | vrele(pack.ep_emul_root); |
1311 | if (pack.ep_interp != NULL) | | 1311 | if (pack.ep_interp != NULL) |
1312 | vrele(pack.ep_interp); | | 1312 | vrele(pack.ep_interp); |
1313 | | | 1313 | |
1314 | /* Acquire the sched-state mutex (exit1() will release it). */ | | 1314 | /* Acquire the sched-state mutex (exit1() will release it). */ |
1315 | mutex_enter(p->p_lock); | | 1315 | mutex_enter(p->p_lock); |
1316 | exit1(l, W_EXITCODE(error, SIGABRT)); | | 1316 | exit1(l, W_EXITCODE(error, SIGABRT)); |
1317 | | | 1317 | |
1318 | /* NOTREACHED */ | | 1318 | /* NOTREACHED */ |
1319 | return 0; | | 1319 | return 0; |
1320 | } | | 1320 | } |
1321 | | | 1321 | |
1322 | int | | 1322 | int |
1323 | copyargs(struct lwp *l, struct exec_package *pack, struct ps_strings *arginfo, | | 1323 | copyargs(struct lwp *l, struct exec_package *pack, struct ps_strings *arginfo, |
1324 | char **stackp, void *argp) | | 1324 | char **stackp, void *argp) |
1325 | { | | 1325 | { |
1326 | char **cpp, *dp, *sp; | | 1326 | char **cpp, *dp, *sp; |
1327 | size_t len; | | 1327 | size_t len; |
1328 | void *nullp; | | 1328 | void *nullp; |
1329 | long argc, envc; | | 1329 | long argc, envc; |
1330 | int error; | | 1330 | int error; |
1331 | | | 1331 | |
1332 | cpp = (char **)*stackp; | | 1332 | cpp = (char **)*stackp; |
1333 | nullp = NULL; | | 1333 | nullp = NULL; |
1334 | argc = arginfo->ps_nargvstr; | | 1334 | argc = arginfo->ps_nargvstr; |
1335 | envc = arginfo->ps_nenvstr; | | 1335 | envc = arginfo->ps_nenvstr; |
1336 | if ((error = copyout(&argc, cpp++, sizeof(argc))) != 0) { | | 1336 | if ((error = copyout(&argc, cpp++, sizeof(argc))) != 0) { |
1337 | COPYPRINTF("", cpp - 1, sizeof(argc)); | | 1337 | COPYPRINTF("", cpp - 1, sizeof(argc)); |
1338 | return error; | | 1338 | return error; |
1339 | } | | 1339 | } |
1340 | | | 1340 | |
1341 | dp = (char *) (cpp + argc + envc + 2 + pack->ep_esch->es_arglen); | | 1341 | dp = (char *) (cpp + argc + envc + 2 + pack->ep_esch->es_arglen); |
1342 | sp = argp; | | 1342 | sp = argp; |
1343 | | | 1343 | |
1344 | /* XXX don't copy them out, remap them! */ | | 1344 | /* XXX don't copy them out, remap them! */ |
1345 | arginfo->ps_argvstr = cpp; /* remember location of argv for later */ | | 1345 | arginfo->ps_argvstr = cpp; /* remember location of argv for later */ |
1346 | | | 1346 | |
1347 | for (; --argc >= 0; sp += len, dp += len) { | | 1347 | for (; --argc >= 0; sp += len, dp += len) { |
1348 | if ((error = copyout(&dp, cpp++, sizeof(dp))) != 0) { | | 1348 | if ((error = copyout(&dp, cpp++, sizeof(dp))) != 0) { |
1349 | COPYPRINTF("", cpp - 1, sizeof(dp)); | | 1349 | COPYPRINTF("", cpp - 1, sizeof(dp)); |
1350 | return error; | | 1350 | return error; |
1351 | } | | 1351 | } |
1352 | if ((error = copyoutstr(sp, dp, ARG_MAX, &len)) != 0) { | | 1352 | if ((error = copyoutstr(sp, dp, ARG_MAX, &len)) != 0) { |
1353 | COPYPRINTF("str", dp, (size_t)ARG_MAX); | | 1353 | COPYPRINTF("str", dp, (size_t)ARG_MAX); |
1354 | return error; | | 1354 | return error; |
1355 | } | | 1355 | } |
1356 | } | | 1356 | } |
1357 | | | 1357 | |
1358 | if ((error = copyout(&nullp, cpp++, sizeof(nullp))) != 0) { | | 1358 | if ((error = copyout(&nullp, cpp++, sizeof(nullp))) != 0) { |
1359 | COPYPRINTF("", cpp - 1, sizeof(nullp)); | | 1359 | COPYPRINTF("", cpp - 1, sizeof(nullp)); |
1360 | return error; | | 1360 | return error; |
1361 | } | | 1361 | } |
1362 | | | 1362 | |
1363 | arginfo->ps_envstr = cpp; /* remember location of envp for later */ | | 1363 | arginfo->ps_envstr = cpp; /* remember location of envp for later */ |
1364 | | | 1364 | |
1365 | for (; --envc >= 0; sp += len, dp += len) { | | 1365 | for (; --envc >= 0; sp += len, dp += len) { |
1366 | if ((error = copyout(&dp, cpp++, sizeof(dp))) != 0) { | | 1366 | if ((error = copyout(&dp, cpp++, sizeof(dp))) != 0) { |
1367 | COPYPRINTF("", cpp - 1, sizeof(dp)); | | 1367 | COPYPRINTF("", cpp - 1, sizeof(dp)); |
1368 | return error; | | 1368 | return error; |
1369 | } | | 1369 | } |
1370 | if ((error = copyoutstr(sp, dp, ARG_MAX, &len)) != 0) { | | 1370 | if ((error = copyoutstr(sp, dp, ARG_MAX, &len)) != 0) { |
1371 | COPYPRINTF("str", dp, (size_t)ARG_MAX); | | 1371 | COPYPRINTF("str", dp, (size_t)ARG_MAX); |
1372 | return error; | | 1372 | return error; |
1373 | } | | 1373 | } |
1374 | } | | 1374 | } |
1375 | | | 1375 | |
1376 | if ((error = copyout(&nullp, cpp++, sizeof(nullp))) != 0) { | | 1376 | if ((error = copyout(&nullp, cpp++, sizeof(nullp))) != 0) { |
1377 | COPYPRINTF("", cpp - 1, sizeof(nullp)); | | 1377 | COPYPRINTF("", cpp - 1, sizeof(nullp)); |
1378 | return error; | | 1378 | return error; |
1379 | } | | 1379 | } |
1380 | | | 1380 | |
1381 | *stackp = (char *)cpp; | | 1381 | *stackp = (char *)cpp; |
1382 | return 0; | | 1382 | return 0; |
1383 | } | | 1383 | } |
1384 | | | 1384 | |
1385 | | | 1385 | |
1386 | /* | | 1386 | /* |
1387 | * Add execsw[] entries. | | 1387 | * Add execsw[] entries. |
1388 | */ | | 1388 | */ |
1389 | int | | 1389 | int |
1390 | exec_add(struct execsw *esp, int count) | | 1390 | exec_add(struct execsw *esp, int count) |
1391 | { | | 1391 | { |