| @@ -1,1394 +1,1400 @@ | | | @@ -1,1394 +1,1400 @@ |
1 | /* $NetBSD: kern_exec.c,v 1.321 2011/08/26 09:07:48 reinoud Exp $ */ | | 1 | /* $NetBSD: kern_exec.c,v 1.322 2011/08/26 09:13:08 reinoud Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 2008 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 2008 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * Redistribution and use in source and binary forms, with or without | | 7 | * Redistribution and use in source and binary forms, with or without |
8 | * modification, are permitted provided that the following conditions | | 8 | * modification, are permitted provided that the following conditions |
9 | * are met: | | 9 | * are met: |
10 | * 1. Redistributions of source code must retain the above copyright | | 10 | * 1. Redistributions of source code must retain the above copyright |
11 | * notice, this list of conditions and the following disclaimer. | | 11 | * notice, this list of conditions and the following disclaimer. |
12 | * 2. Redistributions in binary form must reproduce the above copyright | | 12 | * 2. Redistributions in binary form must reproduce the above copyright |
13 | * notice, this list of conditions and the following disclaimer in the | | 13 | * notice, this list of conditions and the following disclaimer in the |
14 | * documentation and/or other materials provided with the distribution. | | 14 | * documentation and/or other materials provided with the distribution. |
15 | * | | 15 | * |
16 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 16 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
17 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 17 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
18 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 18 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
19 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 19 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
20 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 20 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
21 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 21 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
22 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 22 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
23 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 23 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
24 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 24 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
25 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 25 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
26 | * POSSIBILITY OF SUCH DAMAGE. | | 26 | * POSSIBILITY OF SUCH DAMAGE. |
27 | */ | | 27 | */ |
28 | | | 28 | |
29 | /*- | | 29 | /*- |
30 | * Copyright (C) 1993, 1994, 1996 Christopher G. Demetriou | | 30 | * Copyright (C) 1993, 1994, 1996 Christopher G. Demetriou |
31 | * Copyright (C) 1992 Wolfgang Solfrank. | | 31 | * Copyright (C) 1992 Wolfgang Solfrank. |
32 | * Copyright (C) 1992 TooLs GmbH. | | 32 | * Copyright (C) 1992 TooLs GmbH. |
33 | * All rights reserved. | | 33 | * All rights reserved. |
34 | * | | 34 | * |
35 | * Redistribution and use in source and binary forms, with or without | | 35 | * Redistribution and use in source and binary forms, with or without |
36 | * modification, are permitted provided that the following conditions | | 36 | * modification, are permitted provided that the following conditions |
37 | * are met: | | 37 | * are met: |
38 | * 1. Redistributions of source code must retain the above copyright | | 38 | * 1. Redistributions of source code must retain the above copyright |
39 | * notice, this list of conditions and the following disclaimer. | | 39 | * notice, this list of conditions and the following disclaimer. |
40 | * 2. Redistributions in binary form must reproduce the above copyright | | 40 | * 2. Redistributions in binary form must reproduce the above copyright |
41 | * notice, this list of conditions and the following disclaimer in the | | 41 | * notice, this list of conditions and the following disclaimer in the |
42 | * documentation and/or other materials provided with the distribution. | | 42 | * documentation and/or other materials provided with the distribution. |
43 | * 3. All advertising materials mentioning features or use of this software | | 43 | * 3. All advertising materials mentioning features or use of this software |
44 | * must display the following acknowledgement: | | 44 | * must display the following acknowledgement: |
45 | * This product includes software developed by TooLs GmbH. | | 45 | * This product includes software developed by TooLs GmbH. |
46 | * 4. The name of TooLs GmbH may not be used to endorse or promote products | | 46 | * 4. The name of TooLs GmbH may not be used to endorse or promote products |
47 | * derived from this software without specific prior written permission. | | 47 | * derived from this software without specific prior written permission. |
48 | * | | 48 | * |
49 | * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR | | 49 | * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR |
50 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | | 50 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
51 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | | 51 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
52 | * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | | 52 | * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
53 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | | 53 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
54 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; | | 54 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; |
55 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | | 55 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
56 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR | | 56 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR |
57 | * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF | | 57 | * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF |
58 | * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | | 58 | * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
59 | */ | | 59 | */ |
60 | | | 60 | |
61 | #include <sys/cdefs.h> | | 61 | #include <sys/cdefs.h> |
62 | __KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.321 2011/08/26 09:07:48 reinoud Exp $"); | | 62 | __KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.322 2011/08/26 09:13:08 reinoud Exp $"); |
63 | | | 63 | |
64 | #include "opt_ktrace.h" | | 64 | #include "opt_ktrace.h" |
65 | #include "opt_modular.h" | | 65 | #include "opt_modular.h" |
66 | #include "opt_syscall_debug.h" | | 66 | #include "opt_syscall_debug.h" |
67 | #include "veriexec.h" | | 67 | #include "veriexec.h" |
68 | #include "opt_pax.h" | | 68 | #include "opt_pax.h" |
69 | #include "opt_sa.h" | | 69 | #include "opt_sa.h" |
70 | | | 70 | |
71 | #include <sys/param.h> | | 71 | #include <sys/param.h> |
72 | #include <sys/systm.h> | | 72 | #include <sys/systm.h> |
73 | #include <sys/filedesc.h> | | 73 | #include <sys/filedesc.h> |
74 | #include <sys/kernel.h> | | 74 | #include <sys/kernel.h> |
75 | #include <sys/proc.h> | | 75 | #include <sys/proc.h> |
76 | #include <sys/mount.h> | | 76 | #include <sys/mount.h> |
77 | #include <sys/malloc.h> | | 77 | #include <sys/malloc.h> |
78 | #include <sys/kmem.h> | | 78 | #include <sys/kmem.h> |
79 | #include <sys/namei.h> | | 79 | #include <sys/namei.h> |
80 | #include <sys/vnode.h> | | 80 | #include <sys/vnode.h> |
81 | #include <sys/file.h> | | 81 | #include <sys/file.h> |
82 | #include <sys/acct.h> | | 82 | #include <sys/acct.h> |
83 | #include <sys/exec.h> | | 83 | #include <sys/exec.h> |
84 | #include <sys/ktrace.h> | | 84 | #include <sys/ktrace.h> |
85 | #include <sys/uidinfo.h> | | 85 | #include <sys/uidinfo.h> |
86 | #include <sys/wait.h> | | 86 | #include <sys/wait.h> |
87 | #include <sys/mman.h> | | 87 | #include <sys/mman.h> |
88 | #include <sys/ras.h> | | 88 | #include <sys/ras.h> |
89 | #include <sys/signalvar.h> | | 89 | #include <sys/signalvar.h> |
90 | #include <sys/stat.h> | | 90 | #include <sys/stat.h> |
91 | #include <sys/syscall.h> | | 91 | #include <sys/syscall.h> |
92 | #include <sys/kauth.h> | | 92 | #include <sys/kauth.h> |
93 | #include <sys/lwpctl.h> | | 93 | #include <sys/lwpctl.h> |
94 | #include <sys/pax.h> | | 94 | #include <sys/pax.h> |
95 | #include <sys/cpu.h> | | 95 | #include <sys/cpu.h> |
96 | #include <sys/module.h> | | 96 | #include <sys/module.h> |
97 | #include <sys/sa.h> | | 97 | #include <sys/sa.h> |
98 | #include <sys/savar.h> | | 98 | #include <sys/savar.h> |
99 | #include <sys/syscallvar.h> | | 99 | #include <sys/syscallvar.h> |
100 | #include <sys/syscallargs.h> | | 100 | #include <sys/syscallargs.h> |
101 | #if NVERIEXEC > 0 | | 101 | #if NVERIEXEC > 0 |
102 | #include <sys/verified_exec.h> | | 102 | #include <sys/verified_exec.h> |
103 | #endif /* NVERIEXEC > 0 */ | | 103 | #endif /* NVERIEXEC > 0 */ |
104 | #include <sys/sdt.h> | | 104 | #include <sys/sdt.h> |
105 | | | 105 | |
106 | #include <uvm/uvm_extern.h> | | 106 | #include <uvm/uvm_extern.h> |
107 | | | 107 | |
108 | #include <machine/reg.h> | | 108 | #include <machine/reg.h> |
109 | | | 109 | |
110 | #include <compat/common/compat_util.h> | | 110 | #include <compat/common/compat_util.h> |
111 | | | 111 | |
112 | static int exec_sigcode_map(struct proc *, const struct emul *); | | 112 | static int exec_sigcode_map(struct proc *, const struct emul *); |
113 | | | 113 | |
114 | #ifdef DEBUG_EXEC | | 114 | #ifdef DEBUG_EXEC |
115 | #define DPRINTF(a) printf a | | 115 | #define DPRINTF(a) printf a |
116 | #define COPYPRINTF(s, a, b) printf("%s, %d: copyout%s @%p %zu\n", __func__, \ | | 116 | #define COPYPRINTF(s, a, b) printf("%s, %d: copyout%s @%p %zu\n", __func__, \ |
117 | __LINE__, (s), (a), (b)) | | 117 | __LINE__, (s), (a), (b)) |
118 | #else | | 118 | #else |
119 | #define DPRINTF(a) | | 119 | #define DPRINTF(a) |
120 | #define COPYPRINTF(s, a, b) | | 120 | #define COPYPRINTF(s, a, b) |
121 | #endif /* DEBUG_EXEC */ | | 121 | #endif /* DEBUG_EXEC */ |
122 | | | 122 | |
123 | /* | | 123 | /* |
124 | * DTrace SDT provider definitions | | 124 | * DTrace SDT provider definitions |
125 | */ | | 125 | */ |
126 | SDT_PROBE_DEFINE(proc,,,exec, | | 126 | SDT_PROBE_DEFINE(proc,,,exec, |
127 | "char *", NULL, | | 127 | "char *", NULL, |
128 | NULL, NULL, NULL, NULL, | | 128 | NULL, NULL, NULL, NULL, |
129 | NULL, NULL, NULL, NULL); | | 129 | NULL, NULL, NULL, NULL); |
130 | SDT_PROBE_DEFINE(proc,,,exec_success, | | 130 | SDT_PROBE_DEFINE(proc,,,exec_success, |
131 | "char *", NULL, | | 131 | "char *", NULL, |
132 | NULL, NULL, NULL, NULL, | | 132 | NULL, NULL, NULL, NULL, |
133 | NULL, NULL, NULL, NULL); | | 133 | NULL, NULL, NULL, NULL); |
134 | SDT_PROBE_DEFINE(proc,,,exec_failure, | | 134 | SDT_PROBE_DEFINE(proc,,,exec_failure, |
135 | "int", NULL, | | 135 | "int", NULL, |
136 | NULL, NULL, NULL, NULL, | | 136 | NULL, NULL, NULL, NULL, |
137 | NULL, NULL, NULL, NULL); | | 137 | NULL, NULL, NULL, NULL); |
138 | | | 138 | |
139 | /* | | 139 | /* |
140 | * Exec function switch: | | 140 | * Exec function switch: |
141 | * | | 141 | * |
142 | * Note that each makecmds function is responsible for loading the | | 142 | * Note that each makecmds function is responsible for loading the |
143 | * exec package with the necessary functions for any exec-type-specific | | 143 | * exec package with the necessary functions for any exec-type-specific |
144 | * handling. | | 144 | * handling. |
145 | * | | 145 | * |
146 | * Functions for specific exec types should be defined in their own | | 146 | * Functions for specific exec types should be defined in their own |
147 | * header file. | | 147 | * header file. |
148 | */ | | 148 | */ |
149 | static const struct execsw **execsw = NULL; | | 149 | static const struct execsw **execsw = NULL; |
150 | static int nexecs; | | 150 | static int nexecs; |
151 | | | 151 | |
152 | u_int exec_maxhdrsz; /* must not be static - used by netbsd32 */ | | 152 | u_int exec_maxhdrsz; /* must not be static - used by netbsd32 */ |
153 | | | 153 | |
154 | /* list of dynamically loaded execsw entries */ | | 154 | /* list of dynamically loaded execsw entries */ |
155 | static LIST_HEAD(execlist_head, exec_entry) ex_head = | | 155 | static LIST_HEAD(execlist_head, exec_entry) ex_head = |
156 | LIST_HEAD_INITIALIZER(ex_head); | | 156 | LIST_HEAD_INITIALIZER(ex_head); |
157 | struct exec_entry { | | 157 | struct exec_entry { |
158 | LIST_ENTRY(exec_entry) ex_list; | | 158 | LIST_ENTRY(exec_entry) ex_list; |
159 | SLIST_ENTRY(exec_entry) ex_slist; | | 159 | SLIST_ENTRY(exec_entry) ex_slist; |
160 | const struct execsw *ex_sw; | | 160 | const struct execsw *ex_sw; |
161 | }; | | 161 | }; |
162 | | | 162 | |
163 | #ifndef __HAVE_SYSCALL_INTERN | | 163 | #ifndef __HAVE_SYSCALL_INTERN |
164 | void syscall(void); | | 164 | void syscall(void); |
165 | #endif | | 165 | #endif |
166 | | | 166 | |
167 | #ifdef KERN_SA | | 167 | #ifdef KERN_SA |
168 | static struct sa_emul saemul_netbsd = { | | 168 | static struct sa_emul saemul_netbsd = { |
169 | sizeof(ucontext_t), | | 169 | sizeof(ucontext_t), |
170 | sizeof(struct sa_t), | | 170 | sizeof(struct sa_t), |
171 | sizeof(struct sa_t *), | | 171 | sizeof(struct sa_t *), |
172 | NULL, | | 172 | NULL, |
173 | NULL, | | 173 | NULL, |
174 | cpu_upcall, | | 174 | cpu_upcall, |
175 | (void (*)(struct lwp *, void *))getucontext_sa, | | 175 | (void (*)(struct lwp *, void *))getucontext_sa, |
176 | sa_ucsp | | 176 | sa_ucsp |
177 | }; | | 177 | }; |
178 | #endif /* KERN_SA */ | | 178 | #endif /* KERN_SA */ |
179 | | | 179 | |
180 | /* NetBSD emul struct */ | | 180 | /* NetBSD emul struct */ |
181 | struct emul emul_netbsd = { | | 181 | struct emul emul_netbsd = { |
182 | .e_name = "netbsd", | | 182 | .e_name = "netbsd", |
183 | .e_path = NULL, | | 183 | .e_path = NULL, |
184 | #ifndef __HAVE_MINIMAL_EMUL | | 184 | #ifndef __HAVE_MINIMAL_EMUL |
185 | .e_flags = EMUL_HAS_SYS___syscall, | | 185 | .e_flags = EMUL_HAS_SYS___syscall, |
186 | .e_errno = NULL, | | 186 | .e_errno = NULL, |
187 | .e_nosys = SYS_syscall, | | 187 | .e_nosys = SYS_syscall, |
188 | .e_nsysent = SYS_NSYSENT, | | 188 | .e_nsysent = SYS_NSYSENT, |
189 | #endif | | 189 | #endif |
190 | .e_sysent = sysent, | | 190 | .e_sysent = sysent, |
191 | #ifdef SYSCALL_DEBUG | | 191 | #ifdef SYSCALL_DEBUG |
192 | .e_syscallnames = syscallnames, | | 192 | .e_syscallnames = syscallnames, |
193 | #else | | 193 | #else |
194 | .e_syscallnames = NULL, | | 194 | .e_syscallnames = NULL, |
195 | #endif | | 195 | #endif |
196 | .e_sendsig = sendsig, | | 196 | .e_sendsig = sendsig, |
197 | .e_trapsignal = trapsignal, | | 197 | .e_trapsignal = trapsignal, |
198 | .e_tracesig = NULL, | | 198 | .e_tracesig = NULL, |
199 | .e_sigcode = NULL, | | 199 | .e_sigcode = NULL, |
200 | .e_esigcode = NULL, | | 200 | .e_esigcode = NULL, |
201 | .e_sigobject = NULL, | | 201 | .e_sigobject = NULL, |
202 | .e_setregs = setregs, | | 202 | .e_setregs = setregs, |
203 | .e_proc_exec = NULL, | | 203 | .e_proc_exec = NULL, |
204 | .e_proc_fork = NULL, | | 204 | .e_proc_fork = NULL, |
205 | .e_proc_exit = NULL, | | 205 | .e_proc_exit = NULL, |
206 | .e_lwp_fork = NULL, | | 206 | .e_lwp_fork = NULL, |
207 | .e_lwp_exit = NULL, | | 207 | .e_lwp_exit = NULL, |
208 | #ifdef __HAVE_SYSCALL_INTERN | | 208 | #ifdef __HAVE_SYSCALL_INTERN |
209 | .e_syscall_intern = syscall_intern, | | 209 | .e_syscall_intern = syscall_intern, |
210 | #else | | 210 | #else |
211 | .e_syscall = syscall, | | 211 | .e_syscall = syscall, |
212 | #endif | | 212 | #endif |
213 | .e_sysctlovly = NULL, | | 213 | .e_sysctlovly = NULL, |
214 | .e_fault = NULL, | | 214 | .e_fault = NULL, |
215 | .e_vm_default_addr = uvm_default_mapaddr, | | 215 | .e_vm_default_addr = uvm_default_mapaddr, |
216 | .e_usertrap = NULL, | | 216 | .e_usertrap = NULL, |
217 | #ifdef KERN_SA | | 217 | #ifdef KERN_SA |
218 | .e_sa = &saemul_netbsd, | | 218 | .e_sa = &saemul_netbsd, |
219 | #else | | 219 | #else |
220 | .e_sa = NULL, | | 220 | .e_sa = NULL, |
221 | #endif | | 221 | #endif |
222 | .e_ucsize = sizeof(ucontext_t), | | 222 | .e_ucsize = sizeof(ucontext_t), |
223 | .e_startlwp = startlwp | | 223 | .e_startlwp = startlwp |
224 | }; | | 224 | }; |
225 | | | 225 | |
226 | /* | | 226 | /* |
227 | * Exec lock. Used to control access to execsw[] structures. | | 227 | * Exec lock. Used to control access to execsw[] structures. |
228 | * This must not be static so that netbsd32 can access it, too. | | 228 | * This must not be static so that netbsd32 can access it, too. |
229 | */ | | 229 | */ |
230 | krwlock_t exec_lock; | | 230 | krwlock_t exec_lock; |
231 | | | 231 | |
232 | static kmutex_t sigobject_lock; | | 232 | static kmutex_t sigobject_lock; |
233 | | | 233 | |
234 | static void * | | 234 | static void * |
235 | exec_pool_alloc(struct pool *pp, int flags) | | 235 | exec_pool_alloc(struct pool *pp, int flags) |
236 | { | | 236 | { |
237 | | | 237 | |
238 | return (void *)uvm_km_alloc(kernel_map, NCARGS, 0, | | 238 | return (void *)uvm_km_alloc(kernel_map, NCARGS, 0, |
239 | UVM_KMF_PAGEABLE | UVM_KMF_WAITVA); | | 239 | UVM_KMF_PAGEABLE | UVM_KMF_WAITVA); |
240 | } | | 240 | } |
241 | | | 241 | |
242 | static void | | 242 | static void |
243 | exec_pool_free(struct pool *pp, void *addr) | | 243 | exec_pool_free(struct pool *pp, void *addr) |
244 | { | | 244 | { |
245 | | | 245 | |
246 | uvm_km_free(kernel_map, (vaddr_t)addr, NCARGS, UVM_KMF_PAGEABLE); | | 246 | uvm_km_free(kernel_map, (vaddr_t)addr, NCARGS, UVM_KMF_PAGEABLE); |
247 | } | | 247 | } |
248 | | | 248 | |
249 | static struct pool exec_pool; | | 249 | static struct pool exec_pool; |
250 | | | 250 | |
251 | static struct pool_allocator exec_palloc = { | | 251 | static struct pool_allocator exec_palloc = { |
252 | .pa_alloc = exec_pool_alloc, | | 252 | .pa_alloc = exec_pool_alloc, |
253 | .pa_free = exec_pool_free, | | 253 | .pa_free = exec_pool_free, |
254 | .pa_pagesz = NCARGS | | 254 | .pa_pagesz = NCARGS |
255 | }; | | 255 | }; |
256 | | | 256 | |
257 | /* | | 257 | /* |
258 | * check exec: | | 258 | * check exec: |
259 | * given an "executable" described in the exec package's namei info, | | 259 | * given an "executable" described in the exec package's namei info, |
260 | * see what we can do with it. | | 260 | * see what we can do with it. |
261 | * | | 261 | * |
262 | * ON ENTRY: | | 262 | * ON ENTRY: |
263 | * exec package with appropriate namei info | | 263 | * exec package with appropriate namei info |
264 | * lwp pointer of exec'ing lwp | | 264 | * lwp pointer of exec'ing lwp |
265 | * NO SELF-LOCKED VNODES | | 265 | * NO SELF-LOCKED VNODES |
266 | * | | 266 | * |
267 | * ON EXIT: | | 267 | * ON EXIT: |
268 | * error: nothing held, etc. exec header still allocated. | | 268 | * error: nothing held, etc. exec header still allocated. |
269 | * ok: filled exec package, executable's vnode (unlocked). | | 269 | * ok: filled exec package, executable's vnode (unlocked). |
270 | * | | 270 | * |
271 | * EXEC SWITCH ENTRY: | | 271 | * EXEC SWITCH ENTRY: |
272 | * Locked vnode to check, exec package, proc. | | 272 | * Locked vnode to check, exec package, proc. |
273 | * | | 273 | * |
274 | * EXEC SWITCH EXIT: | | 274 | * EXEC SWITCH EXIT: |
275 | * ok: return 0, filled exec package, executable's vnode (unlocked). | | 275 | * ok: return 0, filled exec package, executable's vnode (unlocked). |
276 | * error: destructive: | | 276 | * error: destructive: |
277 | * everything deallocated execept exec header. | | 277 | * everything deallocated execept exec header. |
278 | * non-destructive: | | 278 | * non-destructive: |
279 | * error code, executable's vnode (unlocked), | | 279 | * error code, executable's vnode (unlocked), |
280 | * exec header unmodified. | | 280 | * exec header unmodified. |
281 | */ | | 281 | */ |
282 | int | | 282 | int |
283 | /*ARGSUSED*/ | | 283 | /*ARGSUSED*/ |
284 | check_exec(struct lwp *l, struct exec_package *epp, struct pathbuf *pb) | | 284 | check_exec(struct lwp *l, struct exec_package *epp, struct pathbuf *pb) |
285 | { | | 285 | { |
286 | int error, i; | | 286 | int error, i; |
287 | struct vnode *vp; | | 287 | struct vnode *vp; |
288 | struct nameidata nd; | | 288 | struct nameidata nd; |
289 | size_t resid; | | 289 | size_t resid; |
290 | | | 290 | |
291 | NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, pb); | | 291 | NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, pb); |
292 | | | 292 | |
293 | /* first get the vnode */ | | 293 | /* first get the vnode */ |
294 | if ((error = namei(&nd)) != 0) | | 294 | if ((error = namei(&nd)) != 0) |
295 | return error; | | 295 | return error; |
296 | epp->ep_vp = vp = nd.ni_vp; | | 296 | epp->ep_vp = vp = nd.ni_vp; |
297 | /* this cannot overflow as both are size PATH_MAX */ | | 297 | /* this cannot overflow as both are size PATH_MAX */ |
298 | strcpy(epp->ep_resolvedname, nd.ni_pnbuf); | | 298 | strcpy(epp->ep_resolvedname, nd.ni_pnbuf); |
299 | | | 299 | |
300 | #ifdef DIAGNOSTIC | | 300 | #ifdef DIAGNOSTIC |
301 | /* paranoia (take this out once namei stuff stabilizes) */ | | 301 | /* paranoia (take this out once namei stuff stabilizes) */ |
302 | memset(nd.ni_pnbuf, '~', PATH_MAX); | | 302 | memset(nd.ni_pnbuf, '~', PATH_MAX); |
303 | #endif | | 303 | #endif |
304 | | | 304 | |
305 | /* check access and type */ | | 305 | /* check access and type */ |
306 | if (vp->v_type != VREG) { | | 306 | if (vp->v_type != VREG) { |
307 | error = EACCES; | | 307 | error = EACCES; |
308 | goto bad1; | | 308 | goto bad1; |
309 | } | | 309 | } |
310 | if ((error = VOP_ACCESS(vp, VEXEC, l->l_cred)) != 0) | | 310 | if ((error = VOP_ACCESS(vp, VEXEC, l->l_cred)) != 0) |
311 | goto bad1; | | 311 | goto bad1; |
312 | | | 312 | |
313 | /* get attributes */ | | 313 | /* get attributes */ |
314 | if ((error = VOP_GETATTR(vp, epp->ep_vap, l->l_cred)) != 0) | | 314 | if ((error = VOP_GETATTR(vp, epp->ep_vap, l->l_cred)) != 0) |
315 | goto bad1; | | 315 | goto bad1; |
316 | | | 316 | |
317 | /* Check mount point */ | | 317 | /* Check mount point */ |
318 | if (vp->v_mount->mnt_flag & MNT_NOEXEC) { | | 318 | if (vp->v_mount->mnt_flag & MNT_NOEXEC) { |
319 | error = EACCES; | | 319 | error = EACCES; |
320 | goto bad1; | | 320 | goto bad1; |
321 | } | | 321 | } |
322 | if (vp->v_mount->mnt_flag & MNT_NOSUID) | | 322 | if (vp->v_mount->mnt_flag & MNT_NOSUID) |
323 | epp->ep_vap->va_mode &= ~(S_ISUID | S_ISGID); | | 323 | epp->ep_vap->va_mode &= ~(S_ISUID | S_ISGID); |
324 | | | 324 | |
325 | /* try to open it */ | | 325 | /* try to open it */ |
326 | if ((error = VOP_OPEN(vp, FREAD, l->l_cred)) != 0) | | 326 | if ((error = VOP_OPEN(vp, FREAD, l->l_cred)) != 0) |
327 | goto bad1; | | 327 | goto bad1; |
328 | | | 328 | |
329 | /* unlock vp, since we need it unlocked from here on out. */ | | 329 | /* unlock vp, since we need it unlocked from here on out. */ |
330 | VOP_UNLOCK(vp); | | 330 | VOP_UNLOCK(vp); |
331 | | | 331 | |
332 | #if NVERIEXEC > 0 | | 332 | #if NVERIEXEC > 0 |
333 | error = veriexec_verify(l, vp, epp->ep_resolvedname, | | 333 | error = veriexec_verify(l, vp, epp->ep_resolvedname, |
334 | epp->ep_flags & EXEC_INDIR ? VERIEXEC_INDIRECT : VERIEXEC_DIRECT, | | 334 | epp->ep_flags & EXEC_INDIR ? VERIEXEC_INDIRECT : VERIEXEC_DIRECT, |
335 | NULL); | | 335 | NULL); |
336 | if (error) | | 336 | if (error) |
337 | goto bad2; | | 337 | goto bad2; |
338 | #endif /* NVERIEXEC > 0 */ | | 338 | #endif /* NVERIEXEC > 0 */ |
339 | | | 339 | |
340 | #ifdef PAX_SEGVGUARD | | 340 | #ifdef PAX_SEGVGUARD |
341 | error = pax_segvguard(l, vp, epp->ep_resolvedname, false); | | 341 | error = pax_segvguard(l, vp, epp->ep_resolvedname, false); |
342 | if (error) | | 342 | if (error) |
343 | goto bad2; | | 343 | goto bad2; |
344 | #endif /* PAX_SEGVGUARD */ | | 344 | #endif /* PAX_SEGVGUARD */ |
345 | | | 345 | |
346 | /* now we have the file, get the exec header */ | | 346 | /* now we have the file, get the exec header */ |
347 | error = vn_rdwr(UIO_READ, vp, epp->ep_hdr, epp->ep_hdrlen, 0, | | 347 | error = vn_rdwr(UIO_READ, vp, epp->ep_hdr, epp->ep_hdrlen, 0, |
348 | UIO_SYSSPACE, 0, l->l_cred, &resid, NULL); | | 348 | UIO_SYSSPACE, 0, l->l_cred, &resid, NULL); |
349 | if (error) | | 349 | if (error) |
350 | goto bad2; | | 350 | goto bad2; |
351 | epp->ep_hdrvalid = epp->ep_hdrlen - resid; | | 351 | epp->ep_hdrvalid = epp->ep_hdrlen - resid; |
352 | | | 352 | |
353 | /* | | 353 | /* |
354 | * Set up default address space limits. Can be overridden | | 354 | * Set up default address space limits. Can be overridden |
355 | * by individual exec packages. | | 355 | * by individual exec packages. |
356 | * | | 356 | * |
357 | * XXX probably should be all done in the exec packages. | | 357 | * XXX probably should be all done in the exec packages. |
358 | */ | | 358 | */ |
359 | epp->ep_vm_minaddr = VM_MIN_ADDRESS; | | 359 | epp->ep_vm_minaddr = VM_MIN_ADDRESS; |
360 | epp->ep_vm_maxaddr = VM_MAXUSER_ADDRESS; | | 360 | epp->ep_vm_maxaddr = VM_MAXUSER_ADDRESS; |
361 | /* | | 361 | /* |
362 | * set up the vmcmds for creation of the process | | 362 | * set up the vmcmds for creation of the process |
363 | * address space | | 363 | * address space |
364 | */ | | 364 | */ |
365 | error = ENOEXEC; | | 365 | error = ENOEXEC; |
366 | for (i = 0; i < nexecs; i++) { | | 366 | for (i = 0; i < nexecs; i++) { |
367 | int newerror; | | 367 | int newerror; |
368 | | | 368 | |
369 | epp->ep_esch = execsw[i]; | | 369 | epp->ep_esch = execsw[i]; |
370 | newerror = (*execsw[i]->es_makecmds)(l, epp); | | 370 | newerror = (*execsw[i]->es_makecmds)(l, epp); |
371 | | | 371 | |
372 | if (!newerror) { | | 372 | if (!newerror) { |
373 | /* Seems ok: check that entry point is not too high */ | | 373 | /* Seems ok: check that entry point is not too high */ |
374 | if (epp->ep_entry > VM_MAXUSER_ADDRESS) { | | 374 | if (epp->ep_entry > VM_MAXUSER_ADDRESS) { |
375 | aprint_verbose("check_exec: rejecting due to " | | 375 | #ifdef DIAGNOSTIC |
| | | 376 | printf("check_exec: rejecting due to " |
376 | "too high entry address\n"); | | 377 | "too high entry address\n"); |
| | | 378 | #endif |
377 | error = ENOEXEC; | | 379 | error = ENOEXEC; |
378 | break; | | 380 | break; |
379 | } | | 381 | } |
380 | #ifdef VM_CHECK_MIN_ADDRESS | | 382 | #ifdef VM_CHECK_MIN_ADDRESS |
381 | /* Seems ok: check that entry point is not too low */ | | 383 | /* Seems ok: check that entry point is not too low */ |
382 | if (epp->ep_entry < VM_MIN_ADDRESS) { | | 384 | if (epp->ep_entry < VM_MIN_ADDRESS) { |
383 | aprint_verbose("check_exec: rejecting due to " | | 385 | #ifdef DIAGNOSTIC |
| | | 386 | printf("check_exec: rejecting due to " |
384 | "too low entry address\n"); | | 387 | "too low entry address\n"); |
| | | 388 | #endif |
385 | error = ENOEXEC; | | 389 | error = ENOEXEC; |
386 | break; | | 390 | break; |
387 | } | | 391 | } |
388 | #endif | | 392 | #endif |
389 | | | 393 | |
390 | /* check limits */ | | 394 | /* check limits */ |
391 | if ((epp->ep_tsize > MAXTSIZ) || | | 395 | if ((epp->ep_tsize > MAXTSIZ) || |
392 | (epp->ep_dsize > (u_quad_t)l->l_proc->p_rlimit | | 396 | (epp->ep_dsize > (u_quad_t)l->l_proc->p_rlimit |
393 | [RLIMIT_DATA].rlim_cur)) { | | 397 | [RLIMIT_DATA].rlim_cur)) { |
394 | aprint_verbose("check_exec: rejecting due to " | | 398 | #ifdef DIAGNOSTIC |
| | | 399 | printf("check_exec: rejecting due to " |
395 | "limits\n"); | | 400 | "limits\n"); |
| | | 401 | #endif |
396 | error = ENOMEM; | | 402 | error = ENOMEM; |
397 | break; | | 403 | break; |
398 | } | | 404 | } |
399 | return 0; | | 405 | return 0; |
400 | } | | 406 | } |
401 | | | 407 | |
402 | if (epp->ep_emul_root != NULL) { | | 408 | if (epp->ep_emul_root != NULL) { |
403 | vrele(epp->ep_emul_root); | | 409 | vrele(epp->ep_emul_root); |
404 | epp->ep_emul_root = NULL; | | 410 | epp->ep_emul_root = NULL; |
405 | } | | 411 | } |
406 | if (epp->ep_interp != NULL) { | | 412 | if (epp->ep_interp != NULL) { |
407 | vrele(epp->ep_interp); | | 413 | vrele(epp->ep_interp); |
408 | epp->ep_interp = NULL; | | 414 | epp->ep_interp = NULL; |
409 | } | | 415 | } |
410 | | | 416 | |
411 | /* make sure the first "interesting" error code is saved. */ | | 417 | /* make sure the first "interesting" error code is saved. */ |
412 | if (error == ENOEXEC) | | 418 | if (error == ENOEXEC) |
413 | error = newerror; | | 419 | error = newerror; |
414 | | | 420 | |
415 | if (epp->ep_flags & EXEC_DESTR) | | 421 | if (epp->ep_flags & EXEC_DESTR) |
416 | /* Error from "#!" code, tidied up by recursive call */ | | 422 | /* Error from "#!" code, tidied up by recursive call */ |
417 | return error; | | 423 | return error; |
418 | } | | 424 | } |
419 | | | 425 | |
420 | /* not found, error */ | | 426 | /* not found, error */ |
421 | | | 427 | |
422 | /* | | 428 | /* |
423 | * free any vmspace-creation commands, | | 429 | * free any vmspace-creation commands, |
424 | * and release their references | | 430 | * and release their references |
425 | */ | | 431 | */ |
426 | kill_vmcmds(&epp->ep_vmcmds); | | 432 | kill_vmcmds(&epp->ep_vmcmds); |
427 | | | 433 | |
428 | bad2: | | 434 | bad2: |
429 | /* | | 435 | /* |
430 | * close and release the vnode, restore the old one, free the | | 436 | * close and release the vnode, restore the old one, free the |
431 | * pathname buf, and punt. | | 437 | * pathname buf, and punt. |
432 | */ | | 438 | */ |
433 | vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); | | 439 | vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); |
434 | VOP_CLOSE(vp, FREAD, l->l_cred); | | 440 | VOP_CLOSE(vp, FREAD, l->l_cred); |
435 | vput(vp); | | 441 | vput(vp); |
436 | return error; | | 442 | return error; |
437 | | | 443 | |
438 | bad1: | | 444 | bad1: |
439 | /* | | 445 | /* |
440 | * free the namei pathname buffer, and put the vnode | | 446 | * free the namei pathname buffer, and put the vnode |
441 | * (which we don't yet have open). | | 447 | * (which we don't yet have open). |
442 | */ | | 448 | */ |
443 | vput(vp); /* was still locked */ | | 449 | vput(vp); /* was still locked */ |
444 | return error; | | 450 | return error; |
445 | } | | 451 | } |
446 | | | 452 | |
447 | #ifdef __MACHINE_STACK_GROWS_UP | | 453 | #ifdef __MACHINE_STACK_GROWS_UP |
448 | #define STACK_PTHREADSPACE NBPG | | 454 | #define STACK_PTHREADSPACE NBPG |
449 | #else | | 455 | #else |
450 | #define STACK_PTHREADSPACE 0 | | 456 | #define STACK_PTHREADSPACE 0 |
451 | #endif | | 457 | #endif |
452 | | | 458 | |
453 | static int | | 459 | static int |
454 | execve_fetch_element(char * const *array, size_t index, char **value) | | 460 | execve_fetch_element(char * const *array, size_t index, char **value) |
455 | { | | 461 | { |
456 | return copyin(array + index, value, sizeof(*value)); | | 462 | return copyin(array + index, value, sizeof(*value)); |
457 | } | | 463 | } |
458 | | | 464 | |
459 | /* | | 465 | /* |
460 | * exec system call | | 466 | * exec system call |
461 | */ | | 467 | */ |
462 | /* ARGSUSED */ | | 468 | /* ARGSUSED */ |
463 | int | | 469 | int |
464 | sys_execve(struct lwp *l, const struct sys_execve_args *uap, register_t *retval) | | 470 | sys_execve(struct lwp *l, const struct sys_execve_args *uap, register_t *retval) |
465 | { | | 471 | { |
466 | /* { | | 472 | /* { |
467 | syscallarg(const char *) path; | | 473 | syscallarg(const char *) path; |
468 | syscallarg(char * const *) argp; | | 474 | syscallarg(char * const *) argp; |
469 | syscallarg(char * const *) envp; | | 475 | syscallarg(char * const *) envp; |
470 | } */ | | 476 | } */ |
471 | | | 477 | |
472 | return execve1(l, SCARG(uap, path), SCARG(uap, argp), | | 478 | return execve1(l, SCARG(uap, path), SCARG(uap, argp), |
473 | SCARG(uap, envp), execve_fetch_element); | | 479 | SCARG(uap, envp), execve_fetch_element); |
474 | } | | 480 | } |
475 | | | 481 | |
476 | int | | 482 | int |
477 | sys_fexecve(struct lwp *l, const struct sys_fexecve_args *uap, | | 483 | sys_fexecve(struct lwp *l, const struct sys_fexecve_args *uap, |
478 | register_t *retval) | | 484 | register_t *retval) |
479 | { | | 485 | { |
480 | /* { | | 486 | /* { |
481 | syscallarg(int) fd; | | 487 | syscallarg(int) fd; |
482 | syscallarg(char * const *) argp; | | 488 | syscallarg(char * const *) argp; |
483 | syscallarg(char * const *) envp; | | 489 | syscallarg(char * const *) envp; |
484 | } */ | | 490 | } */ |
485 | | | 491 | |
486 | return ENOSYS; | | 492 | return ENOSYS; |
487 | } | | 493 | } |
488 | | | 494 | |
489 | /* | | 495 | /* |
490 | * Load modules to try and execute an image that we do not understand. | | 496 | * Load modules to try and execute an image that we do not understand. |
491 | * If no execsw entries are present, we load those likely to be needed | | 497 | * If no execsw entries are present, we load those likely to be needed |
492 | * in order to run native images only. Otherwise, we autoload all | | 498 | * in order to run native images only. Otherwise, we autoload all |
493 | * possible modules that could let us run the binary. XXX lame | | 499 | * possible modules that could let us run the binary. XXX lame |
494 | */ | | 500 | */ |
495 | static void | | 501 | static void |
496 | exec_autoload(void) | | 502 | exec_autoload(void) |
497 | { | | 503 | { |
498 | #ifdef MODULAR | | 504 | #ifdef MODULAR |
499 | static const char * const native[] = { | | 505 | static const char * const native[] = { |
500 | "exec_elf32", | | 506 | "exec_elf32", |
501 | "exec_elf64", | | 507 | "exec_elf64", |
502 | "exec_script", | | 508 | "exec_script", |
503 | NULL | | 509 | NULL |
504 | }; | | 510 | }; |
505 | static const char * const compat[] = { | | 511 | static const char * const compat[] = { |
506 | "exec_elf32", | | 512 | "exec_elf32", |
507 | "exec_elf64", | | 513 | "exec_elf64", |
508 | "exec_script", | | 514 | "exec_script", |
509 | "exec_aout", | | 515 | "exec_aout", |
510 | "exec_coff", | | 516 | "exec_coff", |
511 | "exec_ecoff", | | 517 | "exec_ecoff", |
512 | "compat_aoutm68k", | | 518 | "compat_aoutm68k", |
513 | "compat_freebsd", | | 519 | "compat_freebsd", |
514 | "compat_ibcs2", | | 520 | "compat_ibcs2", |
515 | "compat_linux", | | 521 | "compat_linux", |
516 | "compat_linux32", | | 522 | "compat_linux32", |
517 | "compat_netbsd32", | | 523 | "compat_netbsd32", |
518 | "compat_sunos", | | 524 | "compat_sunos", |
519 | "compat_sunos32", | | 525 | "compat_sunos32", |
520 | "compat_svr4", | | 526 | "compat_svr4", |
521 | "compat_svr4_32", | | 527 | "compat_svr4_32", |
522 | "compat_ultrix", | | 528 | "compat_ultrix", |
523 | NULL | | 529 | NULL |
524 | }; | | 530 | }; |
525 | char const * const *list; | | 531 | char const * const *list; |
526 | int i; | | 532 | int i; |
527 | | | 533 | |
528 | list = (nexecs == 0 ? native : compat); | | 534 | list = (nexecs == 0 ? native : compat); |
529 | for (i = 0; list[i] != NULL; i++) { | | 535 | for (i = 0; list[i] != NULL; i++) { |
530 | if (module_autoload(list[i], MODULE_CLASS_MISC) != 0) { | | 536 | if (module_autoload(list[i], MODULE_CLASS_MISC) != 0) { |
531 | continue; | | 537 | continue; |
532 | } | | 538 | } |
533 | yield(); | | 539 | yield(); |
534 | } | | 540 | } |
535 | #endif | | 541 | #endif |
536 | } | | 542 | } |
537 | | | 543 | |
538 | int | | 544 | int |
539 | execve1(struct lwp *l, const char *path, char * const *args, | | 545 | execve1(struct lwp *l, const char *path, char * const *args, |
540 | char * const *envs, execve_fetch_element_t fetch_element) | | 546 | char * const *envs, execve_fetch_element_t fetch_element) |
541 | { | | 547 | { |
542 | int error; | | 548 | int error; |
543 | struct exec_package pack; | | 549 | struct exec_package pack; |
544 | struct pathbuf *pb; | | 550 | struct pathbuf *pb; |
545 | struct vattr attr; | | 551 | struct vattr attr; |
546 | struct proc *p; | | 552 | struct proc *p; |
547 | char *argp; | | 553 | char *argp; |
548 | char *dp, *sp; | | 554 | char *dp, *sp; |
549 | long argc, envc; | | 555 | long argc, envc; |
550 | size_t i, len; | | 556 | size_t i, len; |
551 | char *stack; | | 557 | char *stack; |
552 | struct ps_strings arginfo; | | 558 | struct ps_strings arginfo; |
553 | struct ps_strings32 arginfo32; | | 559 | struct ps_strings32 arginfo32; |
554 | void *aip; | | 560 | void *aip; |
555 | struct vmspace *vm; | | 561 | struct vmspace *vm; |
556 | struct exec_fakearg *tmpfap; | | 562 | struct exec_fakearg *tmpfap; |
557 | int szsigcode; | | 563 | int szsigcode; |
558 | struct exec_vmcmd *base_vcp; | | 564 | struct exec_vmcmd *base_vcp; |
559 | int oldlwpflags; | | 565 | int oldlwpflags; |
560 | ksiginfo_t ksi; | | 566 | ksiginfo_t ksi; |
561 | ksiginfoq_t kq; | | 567 | ksiginfoq_t kq; |
562 | const char *pathstring; | | 568 | const char *pathstring; |
563 | char *resolvedpathbuf; | | 569 | char *resolvedpathbuf; |
564 | const char *commandname; | | 570 | const char *commandname; |
565 | u_int modgen; | | 571 | u_int modgen; |
566 | size_t ps_strings_sz; | | 572 | size_t ps_strings_sz; |
567 | | | 573 | |
568 | p = l->l_proc; | | 574 | p = l->l_proc; |
569 | modgen = 0; | | 575 | modgen = 0; |
570 | | | 576 | |
571 | SDT_PROBE(proc,,,exec, path, 0, 0, 0, 0); | | 577 | SDT_PROBE(proc,,,exec, path, 0, 0, 0, 0); |
572 | | | 578 | |
573 | /* | | 579 | /* |
574 | * Check if we have exceeded our number of processes limit. | | 580 | * Check if we have exceeded our number of processes limit. |
575 | * This is so that we handle the case where a root daemon | | 581 | * This is so that we handle the case where a root daemon |
576 | * forked, ran setuid to become the desired user and is trying | | 582 | * forked, ran setuid to become the desired user and is trying |
577 | * to exec. The obvious place to do the reference counting check | | 583 | * to exec. The obvious place to do the reference counting check |
578 | * is setuid(), but we don't do the reference counting check there | | 584 | * is setuid(), but we don't do the reference counting check there |
579 | * like other OS's do because then all the programs that use setuid() | | 585 | * like other OS's do because then all the programs that use setuid() |
580 | * must be modified to check the return code of setuid() and exit(). | | 586 | * must be modified to check the return code of setuid() and exit(). |
581 | * It is dangerous to make setuid() fail, because it fails open and | | 587 | * It is dangerous to make setuid() fail, because it fails open and |
582 | * the program will continue to run as root. If we make it succeed | | 588 | * the program will continue to run as root. If we make it succeed |
583 | * and return an error code, again we are not enforcing the limit. | | 589 | * and return an error code, again we are not enforcing the limit. |
584 | * The best place to enforce the limit is here, when the process tries | | 590 | * The best place to enforce the limit is here, when the process tries |
585 | * to execute a new image, because eventually the process will need | | 591 | * to execute a new image, because eventually the process will need |
586 | * to call exec in order to do something useful. | | 592 | * to call exec in order to do something useful. |
587 | */ | | 593 | */ |
588 | retry: | | 594 | retry: |
589 | if ((p->p_flag & PK_SUGID) && kauth_authorize_generic(l->l_cred, | | 595 | if ((p->p_flag & PK_SUGID) && kauth_authorize_generic(l->l_cred, |
590 | KAUTH_GENERIC_ISSUSER, NULL) != 0 && chgproccnt(kauth_cred_getuid( | | 596 | KAUTH_GENERIC_ISSUSER, NULL) != 0 && chgproccnt(kauth_cred_getuid( |
591 | l->l_cred), 0) > p->p_rlimit[RLIMIT_NPROC].rlim_cur) | | 597 | l->l_cred), 0) > p->p_rlimit[RLIMIT_NPROC].rlim_cur) |
592 | return EAGAIN; | | 598 | return EAGAIN; |
593 | | | 599 | |
594 | oldlwpflags = l->l_flag & (LW_SA | LW_SA_UPCALL); | | 600 | oldlwpflags = l->l_flag & (LW_SA | LW_SA_UPCALL); |
595 | if (l->l_flag & LW_SA) { | | 601 | if (l->l_flag & LW_SA) { |
596 | lwp_lock(l); | | 602 | lwp_lock(l); |
597 | l->l_flag &= ~(LW_SA | LW_SA_UPCALL); | | 603 | l->l_flag &= ~(LW_SA | LW_SA_UPCALL); |
598 | lwp_unlock(l); | | 604 | lwp_unlock(l); |
599 | } | | 605 | } |
600 | | | 606 | |
601 | /* | | 607 | /* |
602 | * Drain existing references and forbid new ones. The process | | 608 | * Drain existing references and forbid new ones. The process |
603 | * should be left alone until we're done here. This is necessary | | 609 | * should be left alone until we're done here. This is necessary |
604 | * to avoid race conditions - e.g. in ptrace() - that might allow | | 610 | * to avoid race conditions - e.g. in ptrace() - that might allow |
605 | * a local user to illicitly obtain elevated privileges. | | 611 | * a local user to illicitly obtain elevated privileges. |
606 | */ | | 612 | */ |
607 | rw_enter(&p->p_reflock, RW_WRITER); | | 613 | rw_enter(&p->p_reflock, RW_WRITER); |
608 | | | 614 | |
609 | base_vcp = NULL; | | 615 | base_vcp = NULL; |
610 | /* | | 616 | /* |
611 | * Init the namei data to point the file user's program name. | | 617 | * Init the namei data to point the file user's program name. |
612 | * This is done here rather than in check_exec(), so that it's | | 618 | * This is done here rather than in check_exec(), so that it's |
613 | * possible to override this settings if any of makecmd/probe | | 619 | * possible to override this settings if any of makecmd/probe |
614 | * functions call check_exec() recursively - for example, | | 620 | * functions call check_exec() recursively - for example, |
615 | * see exec_script_makecmds(). | | 621 | * see exec_script_makecmds(). |
616 | */ | | 622 | */ |
617 | error = pathbuf_copyin(path, &pb); | | 623 | error = pathbuf_copyin(path, &pb); |
618 | if (error) { | | 624 | if (error) { |
619 | DPRINTF(("%s: pathbuf_copyin path @%p %d\n", __func__, | | 625 | DPRINTF(("%s: pathbuf_copyin path @%p %d\n", __func__, |
620 | path, error)); | | 626 | path, error)); |
621 | goto clrflg; | | 627 | goto clrflg; |
622 | } | | 628 | } |
623 | pathstring = pathbuf_stringcopy_get(pb); | | 629 | pathstring = pathbuf_stringcopy_get(pb); |
624 | resolvedpathbuf = PNBUF_GET(); | | 630 | resolvedpathbuf = PNBUF_GET(); |
625 | #ifdef DIAGNOSTIC | | 631 | #ifdef DIAGNOSTIC |
626 | strcpy(resolvedpathbuf, "/wrong"); | | 632 | strcpy(resolvedpathbuf, "/wrong"); |
627 | #endif | | 633 | #endif |
628 | | | 634 | |
629 | /* | | 635 | /* |
630 | * initialize the fields of the exec package. | | 636 | * initialize the fields of the exec package. |
631 | */ | | 637 | */ |
632 | pack.ep_name = path; | | 638 | pack.ep_name = path; |
633 | pack.ep_kname = pathstring; | | 639 | pack.ep_kname = pathstring; |
634 | pack.ep_resolvedname = resolvedpathbuf; | | 640 | pack.ep_resolvedname = resolvedpathbuf; |
635 | pack.ep_hdr = kmem_alloc(exec_maxhdrsz, KM_SLEEP); | | 641 | pack.ep_hdr = kmem_alloc(exec_maxhdrsz, KM_SLEEP); |
636 | pack.ep_hdrlen = exec_maxhdrsz; | | 642 | pack.ep_hdrlen = exec_maxhdrsz; |
637 | pack.ep_hdrvalid = 0; | | 643 | pack.ep_hdrvalid = 0; |
638 | pack.ep_emul_arg = NULL; | | 644 | pack.ep_emul_arg = NULL; |
639 | pack.ep_vmcmds.evs_cnt = 0; | | 645 | pack.ep_vmcmds.evs_cnt = 0; |
640 | pack.ep_vmcmds.evs_used = 0; | | 646 | pack.ep_vmcmds.evs_used = 0; |
641 | pack.ep_vap = &attr; | | 647 | pack.ep_vap = &attr; |
642 | pack.ep_flags = 0; | | 648 | pack.ep_flags = 0; |
643 | pack.ep_emul_root = NULL; | | 649 | pack.ep_emul_root = NULL; |
644 | pack.ep_interp = NULL; | | 650 | pack.ep_interp = NULL; |
645 | pack.ep_esch = NULL; | | 651 | pack.ep_esch = NULL; |
646 | pack.ep_pax_flags = 0; | | 652 | pack.ep_pax_flags = 0; |
647 | | | 653 | |
648 | rw_enter(&exec_lock, RW_READER); | | 654 | rw_enter(&exec_lock, RW_READER); |
649 | | | 655 | |
650 | /* see if we can run it. */ | | 656 | /* see if we can run it. */ |
651 | if ((error = check_exec(l, &pack, pb)) != 0) { | | 657 | if ((error = check_exec(l, &pack, pb)) != 0) { |
652 | if (error != ENOENT) { | | 658 | if (error != ENOENT) { |
653 | DPRINTF(("%s: check exec failed %d\n", | | 659 | DPRINTF(("%s: check exec failed %d\n", |
654 | __func__, error)); | | 660 | __func__, error)); |
655 | } | | 661 | } |
656 | goto freehdr; | | 662 | goto freehdr; |
657 | } | | 663 | } |
658 | | | 664 | |
659 | /* XXX -- THE FOLLOWING SECTION NEEDS MAJOR CLEANUP */ | | 665 | /* XXX -- THE FOLLOWING SECTION NEEDS MAJOR CLEANUP */ |
660 | | | 666 | |
661 | /* allocate an argument buffer */ | | 667 | /* allocate an argument buffer */ |
662 | argp = pool_get(&exec_pool, PR_WAITOK); | | 668 | argp = pool_get(&exec_pool, PR_WAITOK); |
663 | KASSERT(argp != NULL); | | 669 | KASSERT(argp != NULL); |
664 | dp = argp; | | 670 | dp = argp; |
665 | argc = 0; | | 671 | argc = 0; |
666 | | | 672 | |
667 | /* copy the fake args list, if there's one, freeing it as we go */ | | 673 | /* copy the fake args list, if there's one, freeing it as we go */ |
668 | if (pack.ep_flags & EXEC_HASARGL) { | | 674 | if (pack.ep_flags & EXEC_HASARGL) { |
669 | tmpfap = pack.ep_fa; | | 675 | tmpfap = pack.ep_fa; |
670 | while (tmpfap->fa_arg != NULL) { | | 676 | while (tmpfap->fa_arg != NULL) { |
671 | const char *cp; | | 677 | const char *cp; |
672 | | | 678 | |
673 | cp = tmpfap->fa_arg; | | 679 | cp = tmpfap->fa_arg; |
674 | while (*cp) | | 680 | while (*cp) |
675 | *dp++ = *cp++; | | 681 | *dp++ = *cp++; |
676 | *dp++ = '\0'; | | 682 | *dp++ = '\0'; |
677 | ktrexecarg(tmpfap->fa_arg, cp - tmpfap->fa_arg); | | 683 | ktrexecarg(tmpfap->fa_arg, cp - tmpfap->fa_arg); |
678 | | | 684 | |
679 | kmem_free(tmpfap->fa_arg, tmpfap->fa_len); | | 685 | kmem_free(tmpfap->fa_arg, tmpfap->fa_len); |
680 | tmpfap++; argc++; | | 686 | tmpfap++; argc++; |
681 | } | | 687 | } |
682 | kmem_free(pack.ep_fa, pack.ep_fa_len); | | 688 | kmem_free(pack.ep_fa, pack.ep_fa_len); |
683 | pack.ep_flags &= ~EXEC_HASARGL; | | 689 | pack.ep_flags &= ~EXEC_HASARGL; |
684 | } | | 690 | } |
685 | | | 691 | |
686 | /* Now get argv & environment */ | | 692 | /* Now get argv & environment */ |
687 | if (args == NULL) { | | 693 | if (args == NULL) { |
688 | DPRINTF(("%s: null args\n", __func__)); | | 694 | DPRINTF(("%s: null args\n", __func__)); |
689 | error = EINVAL; | | 695 | error = EINVAL; |
690 | goto bad; | | 696 | goto bad; |
691 | } | | 697 | } |
692 | /* 'i' will index the argp/envp element to be retrieved */ | | 698 | /* 'i' will index the argp/envp element to be retrieved */ |
693 | i = 0; | | 699 | i = 0; |
694 | if (pack.ep_flags & EXEC_SKIPARG) | | 700 | if (pack.ep_flags & EXEC_SKIPARG) |
695 | i++; | | 701 | i++; |
696 | | | 702 | |
697 | while (1) { | | 703 | while (1) { |
698 | len = argp + ARG_MAX - dp; | | 704 | len = argp + ARG_MAX - dp; |
699 | if ((error = (*fetch_element)(args, i, &sp)) != 0) { | | 705 | if ((error = (*fetch_element)(args, i, &sp)) != 0) { |
700 | DPRINTF(("%s: fetch_element args %d\n", | | 706 | DPRINTF(("%s: fetch_element args %d\n", |
701 | __func__, error)); | | 707 | __func__, error)); |
702 | goto bad; | | 708 | goto bad; |
703 | } | | 709 | } |
704 | if (!sp) | | 710 | if (!sp) |
705 | break; | | 711 | break; |
706 | if ((error = copyinstr(sp, dp, len, &len)) != 0) { | | 712 | if ((error = copyinstr(sp, dp, len, &len)) != 0) { |
707 | DPRINTF(("%s: copyinstr args %d\n", __func__, error)); | | 713 | DPRINTF(("%s: copyinstr args %d\n", __func__, error)); |
708 | if (error == ENAMETOOLONG) | | 714 | if (error == ENAMETOOLONG) |
709 | error = E2BIG; | | 715 | error = E2BIG; |
710 | goto bad; | | 716 | goto bad; |
711 | } | | 717 | } |
712 | ktrexecarg(dp, len - 1); | | 718 | ktrexecarg(dp, len - 1); |
713 | dp += len; | | 719 | dp += len; |
714 | i++; | | 720 | i++; |
715 | argc++; | | 721 | argc++; |
716 | } | | 722 | } |
717 | | | 723 | |
718 | envc = 0; | | 724 | envc = 0; |
719 | /* environment need not be there */ | | 725 | /* environment need not be there */ |
720 | if (envs != NULL) { | | 726 | if (envs != NULL) { |
721 | i = 0; | | 727 | i = 0; |
722 | while (1) { | | 728 | while (1) { |
723 | len = argp + ARG_MAX - dp; | | 729 | len = argp + ARG_MAX - dp; |
724 | if ((error = (*fetch_element)(envs, i, &sp)) != 0) { | | 730 | if ((error = (*fetch_element)(envs, i, &sp)) != 0) { |
725 | DPRINTF(("%s: fetch_element env %d\n", | | 731 | DPRINTF(("%s: fetch_element env %d\n", |
726 | __func__, error)); | | 732 | __func__, error)); |
727 | goto bad; | | 733 | goto bad; |
728 | } | | 734 | } |
729 | if (!sp) | | 735 | if (!sp) |
730 | break; | | 736 | break; |
731 | if ((error = copyinstr(sp, dp, len, &len)) != 0) { | | 737 | if ((error = copyinstr(sp, dp, len, &len)) != 0) { |
732 | DPRINTF(("%s: copyinstr env %d\n", | | 738 | DPRINTF(("%s: copyinstr env %d\n", |
733 | __func__, error)); | | 739 | __func__, error)); |
734 | if (error == ENAMETOOLONG) | | 740 | if (error == ENAMETOOLONG) |
735 | error = E2BIG; | | 741 | error = E2BIG; |
736 | goto bad; | | 742 | goto bad; |
737 | } | | 743 | } |
738 | ktrexecenv(dp, len - 1); | | 744 | ktrexecenv(dp, len - 1); |
739 | dp += len; | | 745 | dp += len; |
740 | i++; | | 746 | i++; |
741 | envc++; | | 747 | envc++; |
742 | } | | 748 | } |
743 | } | | 749 | } |
744 | | | 750 | |
745 | dp = (char *) ALIGN(dp); | | 751 | dp = (char *) ALIGN(dp); |
746 | | | 752 | |
747 | szsigcode = pack.ep_esch->es_emul->e_esigcode - | | 753 | szsigcode = pack.ep_esch->es_emul->e_esigcode - |
748 | pack.ep_esch->es_emul->e_sigcode; | | 754 | pack.ep_esch->es_emul->e_sigcode; |
749 | | | 755 | |
750 | #ifdef __MACHINE_STACK_GROWS_UP | | 756 | #ifdef __MACHINE_STACK_GROWS_UP |
751 | /* See big comment lower down */ | | 757 | /* See big comment lower down */ |
752 | #define RTLD_GAP 32 | | 758 | #define RTLD_GAP 32 |
753 | #else | | 759 | #else |
754 | #define RTLD_GAP 0 | | 760 | #define RTLD_GAP 0 |
755 | #endif | | 761 | #endif |
756 | | | 762 | |
757 | /* Now check if args & environ fit into new stack */ | | 763 | /* Now check if args & environ fit into new stack */ |
758 | if (pack.ep_flags & EXEC_32) { | | 764 | if (pack.ep_flags & EXEC_32) { |
759 | aip = &arginfo32; | | 765 | aip = &arginfo32; |
760 | ps_strings_sz = sizeof(struct ps_strings32); | | 766 | ps_strings_sz = sizeof(struct ps_strings32); |
761 | len = ((argc + envc + 2 + pack.ep_esch->es_arglen) * | | 767 | len = ((argc + envc + 2 + pack.ep_esch->es_arglen) * |
762 | sizeof(int) + sizeof(int) + dp + RTLD_GAP + | | 768 | sizeof(int) + sizeof(int) + dp + RTLD_GAP + |
763 | szsigcode + ps_strings_sz + STACK_PTHREADSPACE) | | 769 | szsigcode + ps_strings_sz + STACK_PTHREADSPACE) |
764 | - argp; | | 770 | - argp; |
765 | } else { | | 771 | } else { |
766 | aip = &arginfo; | | 772 | aip = &arginfo; |
767 | ps_strings_sz = sizeof(struct ps_strings); | | 773 | ps_strings_sz = sizeof(struct ps_strings); |
768 | len = ((argc + envc + 2 + pack.ep_esch->es_arglen) * | | 774 | len = ((argc + envc + 2 + pack.ep_esch->es_arglen) * |
769 | sizeof(char *) + sizeof(int) + dp + RTLD_GAP + | | 775 | sizeof(char *) + sizeof(int) + dp + RTLD_GAP + |
770 | szsigcode + ps_strings_sz + STACK_PTHREADSPACE) | | 776 | szsigcode + ps_strings_sz + STACK_PTHREADSPACE) |
771 | - argp; | | 777 | - argp; |
772 | } | | 778 | } |
773 | | | 779 | |
774 | #ifdef PAX_ASLR | | 780 | #ifdef PAX_ASLR |
775 | if (pax_aslr_active(l)) | | 781 | if (pax_aslr_active(l)) |
776 | len += (arc4random() % PAGE_SIZE); | | 782 | len += (arc4random() % PAGE_SIZE); |
777 | #endif /* PAX_ASLR */ | | 783 | #endif /* PAX_ASLR */ |
778 | | | 784 | |
779 | #ifdef STACKLALIGN /* arm, etc. */ | | 785 | #ifdef STACKLALIGN /* arm, etc. */ |
780 | len = STACKALIGN(len); /* make the stack "safely" aligned */ | | 786 | len = STACKALIGN(len); /* make the stack "safely" aligned */ |
781 | #else | | 787 | #else |
782 | len = ALIGN(len); /* make the stack "safely" aligned */ | | 788 | len = ALIGN(len); /* make the stack "safely" aligned */ |
783 | #endif | | 789 | #endif |
784 | | | 790 | |
785 | if (len > pack.ep_ssize) { /* in effect, compare to initial limit */ | | 791 | if (len > pack.ep_ssize) { /* in effect, compare to initial limit */ |
786 | DPRINTF(("%s: stack limit exceeded %zu\n", __func__, len)); | | 792 | DPRINTF(("%s: stack limit exceeded %zu\n", __func__, len)); |
787 | error = ENOMEM; | | 793 | error = ENOMEM; |
788 | goto bad; | | 794 | goto bad; |
789 | } | | 795 | } |
790 | | | 796 | |
791 | /* Get rid of other LWPs. */ | | 797 | /* Get rid of other LWPs. */ |
792 | if (p->p_sa || p->p_nlwps > 1) { | | 798 | if (p->p_sa || p->p_nlwps > 1) { |
793 | mutex_enter(p->p_lock); | | 799 | mutex_enter(p->p_lock); |
794 | exit_lwps(l); | | 800 | exit_lwps(l); |
795 | mutex_exit(p->p_lock); | | 801 | mutex_exit(p->p_lock); |
796 | } | | 802 | } |
797 | KDASSERT(p->p_nlwps == 1); | | 803 | KDASSERT(p->p_nlwps == 1); |
798 | | | 804 | |
799 | /* Destroy any lwpctl info. */ | | 805 | /* Destroy any lwpctl info. */ |
800 | if (p->p_lwpctl != NULL) | | 806 | if (p->p_lwpctl != NULL) |
801 | lwp_ctl_exit(); | | 807 | lwp_ctl_exit(); |
802 | | | 808 | |
803 | #ifdef KERN_SA | | 809 | #ifdef KERN_SA |
804 | /* Release any SA state. */ | | 810 | /* Release any SA state. */ |
805 | if (p->p_sa) | | 811 | if (p->p_sa) |
806 | sa_release(p); | | 812 | sa_release(p); |
807 | #endif /* KERN_SA */ | | 813 | #endif /* KERN_SA */ |
808 | | | 814 | |
809 | /* Remove POSIX timers */ | | 815 | /* Remove POSIX timers */ |
810 | timers_free(p, TIMERS_POSIX); | | 816 | timers_free(p, TIMERS_POSIX); |
811 | | | 817 | |
812 | /* adjust "active stack depth" for process VSZ */ | | 818 | /* adjust "active stack depth" for process VSZ */ |
813 | pack.ep_ssize = len; /* maybe should go elsewhere, but... */ | | 819 | pack.ep_ssize = len; /* maybe should go elsewhere, but... */ |
814 | | | 820 | |
815 | /* | | 821 | /* |
816 | * Do whatever is necessary to prepare the address space | | 822 | * Do whatever is necessary to prepare the address space |
817 | * for remapping. Note that this might replace the current | | 823 | * for remapping. Note that this might replace the current |
818 | * vmspace with another! | | 824 | * vmspace with another! |
819 | */ | | 825 | */ |
820 | uvmspace_exec(l, pack.ep_vm_minaddr, pack.ep_vm_maxaddr); | | 826 | uvmspace_exec(l, pack.ep_vm_minaddr, pack.ep_vm_maxaddr); |
821 | | | 827 | |
822 | /* record proc's vnode, for use by procfs and others */ | | 828 | /* record proc's vnode, for use by procfs and others */ |
823 | if (p->p_textvp) | | 829 | if (p->p_textvp) |
824 | vrele(p->p_textvp); | | 830 | vrele(p->p_textvp); |
825 | vref(pack.ep_vp); | | 831 | vref(pack.ep_vp); |
826 | p->p_textvp = pack.ep_vp; | | 832 | p->p_textvp = pack.ep_vp; |
827 | | | 833 | |
828 | /* Now map address space */ | | 834 | /* Now map address space */ |
829 | vm = p->p_vmspace; | | 835 | vm = p->p_vmspace; |
830 | vm->vm_taddr = (void *)pack.ep_taddr; | | 836 | vm->vm_taddr = (void *)pack.ep_taddr; |
831 | vm->vm_tsize = btoc(pack.ep_tsize); | | 837 | vm->vm_tsize = btoc(pack.ep_tsize); |
832 | vm->vm_daddr = (void*)pack.ep_daddr; | | 838 | vm->vm_daddr = (void*)pack.ep_daddr; |
833 | vm->vm_dsize = btoc(pack.ep_dsize); | | 839 | vm->vm_dsize = btoc(pack.ep_dsize); |
834 | vm->vm_ssize = btoc(pack.ep_ssize); | | 840 | vm->vm_ssize = btoc(pack.ep_ssize); |
835 | vm->vm_issize = 0; | | 841 | vm->vm_issize = 0; |
836 | vm->vm_maxsaddr = (void *)pack.ep_maxsaddr; | | 842 | vm->vm_maxsaddr = (void *)pack.ep_maxsaddr; |
837 | vm->vm_minsaddr = (void *)pack.ep_minsaddr; | | 843 | vm->vm_minsaddr = (void *)pack.ep_minsaddr; |
838 | | | 844 | |
839 | #ifdef PAX_ASLR | | 845 | #ifdef PAX_ASLR |
840 | pax_aslr_init(l, vm); | | 846 | pax_aslr_init(l, vm); |
841 | #endif /* PAX_ASLR */ | | 847 | #endif /* PAX_ASLR */ |
842 | | | 848 | |
843 | /* create the new process's VM space by running the vmcmds */ | | 849 | /* create the new process's VM space by running the vmcmds */ |
844 | #ifdef DIAGNOSTIC | | 850 | #ifdef DIAGNOSTIC |
845 | if (pack.ep_vmcmds.evs_used == 0) | | 851 | if (pack.ep_vmcmds.evs_used == 0) |
846 | panic("%s: no vmcmds", __func__); | | 852 | panic("%s: no vmcmds", __func__); |
847 | #endif | | 853 | #endif |
848 | for (i = 0; i < pack.ep_vmcmds.evs_used && !error; i++) { | | 854 | for (i = 0; i < pack.ep_vmcmds.evs_used && !error; i++) { |
849 | struct exec_vmcmd *vcp; | | 855 | struct exec_vmcmd *vcp; |
850 | | | 856 | |
851 | vcp = &pack.ep_vmcmds.evs_cmds[i]; | | 857 | vcp = &pack.ep_vmcmds.evs_cmds[i]; |
852 | if (vcp->ev_flags & VMCMD_RELATIVE) { | | 858 | if (vcp->ev_flags & VMCMD_RELATIVE) { |
853 | #ifdef DIAGNOSTIC | | 859 | #ifdef DIAGNOSTIC |
854 | if (base_vcp == NULL) | | 860 | if (base_vcp == NULL) |
855 | panic("%s: relative vmcmd with no base", | | 861 | panic("%s: relative vmcmd with no base", |
856 | __func__); | | 862 | __func__); |
857 | if (vcp->ev_flags & VMCMD_BASE) | | 863 | if (vcp->ev_flags & VMCMD_BASE) |
858 | panic("%s: illegal base & relative vmcmd", | | 864 | panic("%s: illegal base & relative vmcmd", |
859 | __func__); | | 865 | __func__); |
860 | #endif | | 866 | #endif |
861 | vcp->ev_addr += base_vcp->ev_addr; | | 867 | vcp->ev_addr += base_vcp->ev_addr; |
862 | } | | 868 | } |
863 | error = (*vcp->ev_proc)(l, vcp); | | 869 | error = (*vcp->ev_proc)(l, vcp); |
864 | #ifdef DEBUG_EXEC | | 870 | #ifdef DEBUG_EXEC |
865 | if (error) { | | 871 | if (error) { |
866 | size_t j; | | 872 | size_t j; |
867 | struct exec_vmcmd *vp = &pack.ep_vmcmds.evs_cmds[0]; | | 873 | struct exec_vmcmd *vp = &pack.ep_vmcmds.evs_cmds[0]; |
868 | uprintf("vmcmds %zu/%u, error %d\n", i, | | 874 | uprintf("vmcmds %zu/%u, error %d\n", i, |
869 | pack.ep_vmcmds.evs_used, error); | | 875 | pack.ep_vmcmds.evs_used, error); |
870 | for (j = 0; j <= i; j++) | | 876 | for (j = 0; j <= i; j++) |
871 | uprintf("vmcmd[%zu] = vmcmd_map_%s %#" | | 877 | uprintf("vmcmd[%zu] = vmcmd_map_%s %#" |
872 | PRIxVADDR"/%#"PRIxVSIZE" fd@%#" | | 878 | PRIxVADDR"/%#"PRIxVSIZE" fd@%#" |
873 | PRIxVSIZE" prot=0%o flags=%d\n", j, | | 879 | PRIxVSIZE" prot=0%o flags=%d\n", j, |
874 | vp[j].ev_proc == vmcmd_map_pagedvn ? | | 880 | vp[j].ev_proc == vmcmd_map_pagedvn ? |
875 | "pagedvn" : | | 881 | "pagedvn" : |
876 | vp[j].ev_proc == vmcmd_map_readvn ? | | 882 | vp[j].ev_proc == vmcmd_map_readvn ? |
877 | "readvn" : | | 883 | "readvn" : |
878 | vp[j].ev_proc == vmcmd_map_zero ? | | 884 | vp[j].ev_proc == vmcmd_map_zero ? |
879 | "zero" : "*unknown*", | | 885 | "zero" : "*unknown*", |
880 | vp[j].ev_addr, vp[j].ev_len, | | 886 | vp[j].ev_addr, vp[j].ev_len, |
881 | vp[j].ev_offset, vp[j].ev_prot, | | 887 | vp[j].ev_offset, vp[j].ev_prot, |
882 | vp[j].ev_flags); | | 888 | vp[j].ev_flags); |
883 | } | | 889 | } |
884 | #endif /* DEBUG_EXEC */ | | 890 | #endif /* DEBUG_EXEC */ |
885 | if (vcp->ev_flags & VMCMD_BASE) | | 891 | if (vcp->ev_flags & VMCMD_BASE) |
886 | base_vcp = vcp; | | 892 | base_vcp = vcp; |
887 | } | | 893 | } |
888 | | | 894 | |
889 | /* free the vmspace-creation commands, and release their references */ | | 895 | /* free the vmspace-creation commands, and release their references */ |
890 | kill_vmcmds(&pack.ep_vmcmds); | | 896 | kill_vmcmds(&pack.ep_vmcmds); |
891 | | | 897 | |
892 | vn_lock(pack.ep_vp, LK_EXCLUSIVE | LK_RETRY); | | 898 | vn_lock(pack.ep_vp, LK_EXCLUSIVE | LK_RETRY); |
893 | VOP_CLOSE(pack.ep_vp, FREAD, l->l_cred); | | 899 | VOP_CLOSE(pack.ep_vp, FREAD, l->l_cred); |
894 | vput(pack.ep_vp); | | 900 | vput(pack.ep_vp); |
895 | | | 901 | |
896 | /* if an error happened, deallocate and punt */ | | 902 | /* if an error happened, deallocate and punt */ |
897 | if (error) { | | 903 | if (error) { |
898 | DPRINTF(("%s: vmcmd %zu failed: %d\n", __func__, i - 1, error)); | | 904 | DPRINTF(("%s: vmcmd %zu failed: %d\n", __func__, i - 1, error)); |
899 | goto exec_abort; | | 905 | goto exec_abort; |
900 | } | | 906 | } |
901 | | | 907 | |
902 | /* remember information about the process */ | | 908 | /* remember information about the process */ |
903 | arginfo.ps_nargvstr = argc; | | 909 | arginfo.ps_nargvstr = argc; |
904 | arginfo.ps_nenvstr = envc; | | 910 | arginfo.ps_nenvstr = envc; |
905 | | | 911 | |
906 | /* set command name & other accounting info */ | | 912 | /* set command name & other accounting info */ |
907 | commandname = strrchr(pack.ep_resolvedname, '/'); | | 913 | commandname = strrchr(pack.ep_resolvedname, '/'); |
908 | if (commandname != NULL) { | | 914 | if (commandname != NULL) { |
909 | commandname++; | | 915 | commandname++; |
910 | } else { | | 916 | } else { |
911 | commandname = pack.ep_resolvedname; | | 917 | commandname = pack.ep_resolvedname; |
912 | } | | 918 | } |
913 | i = min(strlen(commandname), MAXCOMLEN); | | 919 | i = min(strlen(commandname), MAXCOMLEN); |
914 | (void)memcpy(p->p_comm, commandname, i); | | 920 | (void)memcpy(p->p_comm, commandname, i); |
915 | p->p_comm[i] = '\0'; | | 921 | p->p_comm[i] = '\0'; |
916 | | | 922 | |
917 | dp = PNBUF_GET(); | | 923 | dp = PNBUF_GET(); |
918 | /* | | 924 | /* |
919 | * If the path starts with /, we don't need to do any work. | | 925 | * If the path starts with /, we don't need to do any work. |
920 | * This handles the majority of the cases. | | 926 | * This handles the majority of the cases. |
921 | * In the future perhaps we could canonicalize it? | | 927 | * In the future perhaps we could canonicalize it? |
922 | */ | | 928 | */ |
923 | if (pathstring[0] == '/') | | 929 | if (pathstring[0] == '/') |
924 | (void)strlcpy(pack.ep_path = dp, pathstring, MAXPATHLEN); | | 930 | (void)strlcpy(pack.ep_path = dp, pathstring, MAXPATHLEN); |
925 | #ifdef notyet | | 931 | #ifdef notyet |
926 | /* | | 932 | /* |
927 | * Although this works most of the time [since the entry was just | | 933 | * Although this works most of the time [since the entry was just |
928 | * entered in the cache] we don't use it because it theoretically | | 934 | * entered in the cache] we don't use it because it theoretically |
929 | * can fail and it is not the cleanest interface, because there | | 935 | * can fail and it is not the cleanest interface, because there |
930 | * could be races. When the namei cache is re-written, this can | | 936 | * could be races. When the namei cache is re-written, this can |
931 | * be changed to use the appropriate function. | | 937 | * be changed to use the appropriate function. |
932 | */ | | 938 | */ |
933 | else if (!(error = vnode_to_path(dp, MAXPATHLEN, p->p_textvp, l, p))) | | 939 | else if (!(error = vnode_to_path(dp, MAXPATHLEN, p->p_textvp, l, p))) |
934 | pack.ep_path = dp; | | 940 | pack.ep_path = dp; |
935 | #endif | | 941 | #endif |
936 | else { | | 942 | else { |
937 | #ifdef notyet | | 943 | #ifdef notyet |
938 | printf("Cannot get path for pid %d [%s] (error %d)", | | 944 | printf("Cannot get path for pid %d [%s] (error %d)", |
939 | (int)p->p_pid, p->p_comm, error); | | 945 | (int)p->p_pid, p->p_comm, error); |
940 | #endif | | 946 | #endif |
941 | pack.ep_path = NULL; | | 947 | pack.ep_path = NULL; |
942 | PNBUF_PUT(dp); | | 948 | PNBUF_PUT(dp); |
943 | } | | 949 | } |
944 | | | 950 | |
945 | stack = (char *)STACK_ALLOC(STACK_GROW(vm->vm_minsaddr, | | 951 | stack = (char *)STACK_ALLOC(STACK_GROW(vm->vm_minsaddr, |
946 | STACK_PTHREADSPACE + ps_strings_sz + szsigcode), | | 952 | STACK_PTHREADSPACE + ps_strings_sz + szsigcode), |
947 | len - (ps_strings_sz + szsigcode)); | | 953 | len - (ps_strings_sz + szsigcode)); |
948 | | | 954 | |
949 | #ifdef __MACHINE_STACK_GROWS_UP | | 955 | #ifdef __MACHINE_STACK_GROWS_UP |
950 | /* | | 956 | /* |
951 | * The copyargs call always copies into lower addresses | | 957 | * The copyargs call always copies into lower addresses |
952 | * first, moving towards higher addresses, starting with | | 958 | * first, moving towards higher addresses, starting with |
953 | * the stack pointer that we give. When the stack grows | | 959 | * the stack pointer that we give. When the stack grows |
954 | * down, this puts argc/argv/envp very shallow on the | | 960 | * down, this puts argc/argv/envp very shallow on the |
955 | * stack, right at the first user stack pointer. | | 961 | * stack, right at the first user stack pointer. |
956 | * When the stack grows up, the situation is reversed. | | 962 | * When the stack grows up, the situation is reversed. |
957 | * | | 963 | * |
958 | * Normally, this is no big deal. But the ld_elf.so _rtld() | | 964 | * Normally, this is no big deal. But the ld_elf.so _rtld() |
959 | * function expects to be called with a single pointer to | | 965 | * function expects to be called with a single pointer to |
960 | * a region that has a few words it can stash values into, | | 966 | * a region that has a few words it can stash values into, |
961 | * followed by argc/argv/envp. When the stack grows down, | | 967 | * followed by argc/argv/envp. When the stack grows down, |
962 | * it's easy to decrement the stack pointer a little bit to | | 968 | * it's easy to decrement the stack pointer a little bit to |
963 | * allocate the space for these few words and pass the new | | 969 | * allocate the space for these few words and pass the new |
964 | * stack pointer to _rtld. When the stack grows up, however, | | 970 | * stack pointer to _rtld. When the stack grows up, however, |
965 | * a few words before argc is part of the signal trampoline, XXX | | 971 | * a few words before argc is part of the signal trampoline, XXX |
966 | * so we have a problem. | | 972 | * so we have a problem. |
967 | * | | 973 | * |
968 | * Instead of changing how _rtld works, we take the easy way | | 974 | * Instead of changing how _rtld works, we take the easy way |
969 | * out and steal 32 bytes before we call copyargs. | | 975 | * out and steal 32 bytes before we call copyargs. |
970 | * This extra space was allowed for when 'len' was calculated. | | 976 | * This extra space was allowed for when 'len' was calculated. |
971 | */ | | 977 | */ |
972 | stack += RTLD_GAP; | | 978 | stack += RTLD_GAP; |
973 | #endif /* __MACHINE_STACK_GROWS_UP */ | | 979 | #endif /* __MACHINE_STACK_GROWS_UP */ |
974 | | | 980 | |
975 | /* Now copy argc, args & environ to new stack */ | | 981 | /* Now copy argc, args & environ to new stack */ |
976 | error = (*pack.ep_esch->es_copyargs)(l, &pack, &arginfo, &stack, argp); | | 982 | error = (*pack.ep_esch->es_copyargs)(l, &pack, &arginfo, &stack, argp); |
977 | if (pack.ep_path) { | | 983 | if (pack.ep_path) { |
978 | PNBUF_PUT(pack.ep_path); | | 984 | PNBUF_PUT(pack.ep_path); |
979 | pack.ep_path = NULL; | | 985 | pack.ep_path = NULL; |
980 | } | | 986 | } |
981 | if (error) { | | 987 | if (error) { |
982 | DPRINTF(("%s: copyargs failed %d\n", __func__, error)); | | 988 | DPRINTF(("%s: copyargs failed %d\n", __func__, error)); |
983 | goto exec_abort; | | 989 | goto exec_abort; |
984 | } | | 990 | } |
985 | /* Move the stack back to original point */ | | 991 | /* Move the stack back to original point */ |
986 | stack = (char *)STACK_GROW(vm->vm_minsaddr, len); | | 992 | stack = (char *)STACK_GROW(vm->vm_minsaddr, len); |
987 | | | 993 | |
988 | /* fill process ps_strings info */ | | 994 | /* fill process ps_strings info */ |
989 | p->p_psstrp = (vaddr_t)STACK_ALLOC(STACK_GROW(vm->vm_minsaddr, | | 995 | p->p_psstrp = (vaddr_t)STACK_ALLOC(STACK_GROW(vm->vm_minsaddr, |
990 | STACK_PTHREADSPACE), ps_strings_sz); | | 996 | STACK_PTHREADSPACE), ps_strings_sz); |
991 | | | 997 | |
992 | if (pack.ep_flags & EXEC_32) { | | 998 | if (pack.ep_flags & EXEC_32) { |
993 | arginfo32.ps_argvstr = (vaddr_t)arginfo.ps_argvstr; | | 999 | arginfo32.ps_argvstr = (vaddr_t)arginfo.ps_argvstr; |
994 | arginfo32.ps_nargvstr = arginfo.ps_nargvstr; | | 1000 | arginfo32.ps_nargvstr = arginfo.ps_nargvstr; |
995 | arginfo32.ps_envstr = (vaddr_t)arginfo.ps_envstr; | | 1001 | arginfo32.ps_envstr = (vaddr_t)arginfo.ps_envstr; |
996 | arginfo32.ps_nenvstr = arginfo.ps_nenvstr; | | 1002 | arginfo32.ps_nenvstr = arginfo.ps_nenvstr; |
997 | } | | 1003 | } |
998 | | | 1004 | |
999 | /* copy out the process's ps_strings structure */ | | 1005 | /* copy out the process's ps_strings structure */ |
1000 | if ((error = copyout(aip, (void *)p->p_psstrp, ps_strings_sz)) != 0) { | | 1006 | if ((error = copyout(aip, (void *)p->p_psstrp, ps_strings_sz)) != 0) { |
1001 | DPRINTF(("%s: ps_strings copyout %p->%p size %zu failed\n", | | 1007 | DPRINTF(("%s: ps_strings copyout %p->%p size %zu failed\n", |
1002 | __func__, aip, (void *)p->p_psstrp, ps_strings_sz)); | | 1008 | __func__, aip, (void *)p->p_psstrp, ps_strings_sz)); |
1003 | goto exec_abort; | | 1009 | goto exec_abort; |
1004 | } | | 1010 | } |
1005 | | | 1011 | |
1006 | cwdexec(p); | | 1012 | cwdexec(p); |
1007 | fd_closeexec(); /* handle close on exec */ | | 1013 | fd_closeexec(); /* handle close on exec */ |
1008 | | | 1014 | |
1009 | if (__predict_false(ktrace_on)) | | 1015 | if (__predict_false(ktrace_on)) |
1010 | fd_ktrexecfd(); | | 1016 | fd_ktrexecfd(); |
1011 | | | 1017 | |
1012 | execsigs(p); /* reset catched signals */ | | 1018 | execsigs(p); /* reset catched signals */ |
1013 | | | 1019 | |
1014 | l->l_ctxlink = NULL; /* reset ucontext link */ | | 1020 | l->l_ctxlink = NULL; /* reset ucontext link */ |
1015 | | | 1021 | |
1016 | | | 1022 | |
1017 | p->p_acflag &= ~AFORK; | | 1023 | p->p_acflag &= ~AFORK; |
1018 | mutex_enter(p->p_lock); | | 1024 | mutex_enter(p->p_lock); |
1019 | p->p_flag |= PK_EXEC; | | 1025 | p->p_flag |= PK_EXEC; |
1020 | mutex_exit(p->p_lock); | | 1026 | mutex_exit(p->p_lock); |
1021 | | | 1027 | |
1022 | /* | | 1028 | /* |
1023 | * Stop profiling. | | 1029 | * Stop profiling. |
1024 | */ | | 1030 | */ |
1025 | if ((p->p_stflag & PST_PROFIL) != 0) { | | 1031 | if ((p->p_stflag & PST_PROFIL) != 0) { |
1026 | mutex_spin_enter(&p->p_stmutex); | | 1032 | mutex_spin_enter(&p->p_stmutex); |
1027 | stopprofclock(p); | | 1033 | stopprofclock(p); |
1028 | mutex_spin_exit(&p->p_stmutex); | | 1034 | mutex_spin_exit(&p->p_stmutex); |
1029 | } | | 1035 | } |
1030 | | | 1036 | |
1031 | /* | | 1037 | /* |
1032 | * It's OK to test PL_PPWAIT unlocked here, as other LWPs have | | 1038 | * It's OK to test PL_PPWAIT unlocked here, as other LWPs have |
1033 | * exited and exec()/exit() are the only places it will be cleared. | | 1039 | * exited and exec()/exit() are the only places it will be cleared. |
1034 | */ | | 1040 | */ |
1035 | if ((p->p_lflag & PL_PPWAIT) != 0) { | | 1041 | if ((p->p_lflag & PL_PPWAIT) != 0) { |
1036 | mutex_enter(proc_lock); | | 1042 | mutex_enter(proc_lock); |
1037 | l->l_lwpctl = NULL; /* was on loan from blocked parent */ | | 1043 | l->l_lwpctl = NULL; /* was on loan from blocked parent */ |
1038 | p->p_lflag &= ~PL_PPWAIT; | | 1044 | p->p_lflag &= ~PL_PPWAIT; |
1039 | cv_broadcast(&p->p_pptr->p_waitcv); | | 1045 | cv_broadcast(&p->p_pptr->p_waitcv); |
1040 | mutex_exit(proc_lock); | | 1046 | mutex_exit(proc_lock); |
1041 | } | | 1047 | } |
1042 | | | 1048 | |
1043 | /* | | 1049 | /* |
1044 | * Deal with set[ug]id. MNT_NOSUID has already been used to disable | | 1050 | * Deal with set[ug]id. MNT_NOSUID has already been used to disable |
1045 | * s[ug]id. It's OK to check for PSL_TRACED here as we have blocked | | 1051 | * s[ug]id. It's OK to check for PSL_TRACED here as we have blocked |
1046 | * out additional references on the process for the moment. | | 1052 | * out additional references on the process for the moment. |
1047 | */ | | 1053 | */ |
1048 | if ((p->p_slflag & PSL_TRACED) == 0 && | | 1054 | if ((p->p_slflag & PSL_TRACED) == 0 && |
1049 | | | 1055 | |
1050 | (((attr.va_mode & S_ISUID) != 0 && | | 1056 | (((attr.va_mode & S_ISUID) != 0 && |
1051 | kauth_cred_geteuid(l->l_cred) != attr.va_uid) || | | 1057 | kauth_cred_geteuid(l->l_cred) != attr.va_uid) || |
1052 | | | 1058 | |
1053 | ((attr.va_mode & S_ISGID) != 0 && | | 1059 | ((attr.va_mode & S_ISGID) != 0 && |
1054 | kauth_cred_getegid(l->l_cred) != attr.va_gid))) { | | 1060 | kauth_cred_getegid(l->l_cred) != attr.va_gid))) { |
1055 | /* | | 1061 | /* |
1056 | * Mark the process as SUGID before we do | | 1062 | * Mark the process as SUGID before we do |
1057 | * anything that might block. | | 1063 | * anything that might block. |
1058 | */ | | 1064 | */ |
1059 | proc_crmod_enter(); | | 1065 | proc_crmod_enter(); |
1060 | proc_crmod_leave(NULL, NULL, true); | | 1066 | proc_crmod_leave(NULL, NULL, true); |
1061 | | | 1067 | |
1062 | /* Make sure file descriptors 0..2 are in use. */ | | 1068 | /* Make sure file descriptors 0..2 are in use. */ |
1063 | if ((error = fd_checkstd()) != 0) { | | 1069 | if ((error = fd_checkstd()) != 0) { |
1064 | DPRINTF(("%s: fdcheckstd failed %d\n", | | 1070 | DPRINTF(("%s: fdcheckstd failed %d\n", |
1065 | __func__, error)); | | 1071 | __func__, error)); |
1066 | goto exec_abort; | | 1072 | goto exec_abort; |
1067 | } | | 1073 | } |
1068 | | | 1074 | |
1069 | /* | | 1075 | /* |
1070 | * Copy the credential so other references don't see our | | 1076 | * Copy the credential so other references don't see our |
1071 | * changes. | | 1077 | * changes. |
1072 | */ | | 1078 | */ |
1073 | l->l_cred = kauth_cred_copy(l->l_cred); | | 1079 | l->l_cred = kauth_cred_copy(l->l_cred); |
1074 | #ifdef KTRACE | | 1080 | #ifdef KTRACE |
1075 | /* | | 1081 | /* |
1076 | * If the persistent trace flag isn't set, turn off. | | 1082 | * If the persistent trace flag isn't set, turn off. |
1077 | */ | | 1083 | */ |
1078 | if (p->p_tracep) { | | 1084 | if (p->p_tracep) { |
1079 | mutex_enter(&ktrace_lock); | | 1085 | mutex_enter(&ktrace_lock); |
1080 | if (!(p->p_traceflag & KTRFAC_PERSISTENT)) | | 1086 | if (!(p->p_traceflag & KTRFAC_PERSISTENT)) |
1081 | ktrderef(p); | | 1087 | ktrderef(p); |
1082 | mutex_exit(&ktrace_lock); | | 1088 | mutex_exit(&ktrace_lock); |
1083 | } | | 1089 | } |
1084 | #endif | | 1090 | #endif |
1085 | if (attr.va_mode & S_ISUID) | | 1091 | if (attr.va_mode & S_ISUID) |
1086 | kauth_cred_seteuid(l->l_cred, attr.va_uid); | | 1092 | kauth_cred_seteuid(l->l_cred, attr.va_uid); |
1087 | if (attr.va_mode & S_ISGID) | | 1093 | if (attr.va_mode & S_ISGID) |
1088 | kauth_cred_setegid(l->l_cred, attr.va_gid); | | 1094 | kauth_cred_setegid(l->l_cred, attr.va_gid); |
1089 | } else { | | 1095 | } else { |
1090 | if (kauth_cred_geteuid(l->l_cred) == | | 1096 | if (kauth_cred_geteuid(l->l_cred) == |
1091 | kauth_cred_getuid(l->l_cred) && | | 1097 | kauth_cred_getuid(l->l_cred) && |
1092 | kauth_cred_getegid(l->l_cred) == | | 1098 | kauth_cred_getegid(l->l_cred) == |
1093 | kauth_cred_getgid(l->l_cred)) | | 1099 | kauth_cred_getgid(l->l_cred)) |
1094 | p->p_flag &= ~PK_SUGID; | | 1100 | p->p_flag &= ~PK_SUGID; |
1095 | } | | 1101 | } |
1096 | | | 1102 | |
1097 | /* | | 1103 | /* |
1098 | * Copy the credential so other references don't see our changes. | | 1104 | * Copy the credential so other references don't see our changes. |
1099 | * Test to see if this is necessary first, since in the common case | | 1105 | * Test to see if this is necessary first, since in the common case |
1100 | * we won't need a private reference. | | 1106 | * we won't need a private reference. |
1101 | */ | | 1107 | */ |
1102 | if (kauth_cred_geteuid(l->l_cred) != kauth_cred_getsvuid(l->l_cred) || | | 1108 | if (kauth_cred_geteuid(l->l_cred) != kauth_cred_getsvuid(l->l_cred) || |
1103 | kauth_cred_getegid(l->l_cred) != kauth_cred_getsvgid(l->l_cred)) { | | 1109 | kauth_cred_getegid(l->l_cred) != kauth_cred_getsvgid(l->l_cred)) { |
1104 | l->l_cred = kauth_cred_copy(l->l_cred); | | 1110 | l->l_cred = kauth_cred_copy(l->l_cred); |
1105 | kauth_cred_setsvuid(l->l_cred, kauth_cred_geteuid(l->l_cred)); | | 1111 | kauth_cred_setsvuid(l->l_cred, kauth_cred_geteuid(l->l_cred)); |
1106 | kauth_cred_setsvgid(l->l_cred, kauth_cred_getegid(l->l_cred)); | | 1112 | kauth_cred_setsvgid(l->l_cred, kauth_cred_getegid(l->l_cred)); |
1107 | } | | 1113 | } |
1108 | | | 1114 | |
1109 | /* Update the master credentials. */ | | 1115 | /* Update the master credentials. */ |
1110 | if (l->l_cred != p->p_cred) { | | 1116 | if (l->l_cred != p->p_cred) { |
1111 | kauth_cred_t ocred; | | 1117 | kauth_cred_t ocred; |
1112 | | | 1118 | |
1113 | kauth_cred_hold(l->l_cred); | | 1119 | kauth_cred_hold(l->l_cred); |
1114 | mutex_enter(p->p_lock); | | 1120 | mutex_enter(p->p_lock); |
1115 | ocred = p->p_cred; | | 1121 | ocred = p->p_cred; |
1116 | p->p_cred = l->l_cred; | | 1122 | p->p_cred = l->l_cred; |
1117 | mutex_exit(p->p_lock); | | 1123 | mutex_exit(p->p_lock); |
1118 | kauth_cred_free(ocred); | | 1124 | kauth_cred_free(ocred); |
1119 | } | | 1125 | } |
1120 | | | 1126 | |
1121 | #if defined(__HAVE_RAS) | | 1127 | #if defined(__HAVE_RAS) |
1122 | /* | | 1128 | /* |
1123 | * Remove all RASs from the address space. | | 1129 | * Remove all RASs from the address space. |
1124 | */ | | 1130 | */ |
1125 | ras_purgeall(); | | 1131 | ras_purgeall(); |
1126 | #endif | | 1132 | #endif |
1127 | | | 1133 | |
1128 | doexechooks(p); | | 1134 | doexechooks(p); |
1129 | | | 1135 | |
1130 | /* setup new registers and do misc. setup. */ | | 1136 | /* setup new registers and do misc. setup. */ |
1131 | (*pack.ep_esch->es_emul->e_setregs)(l, &pack, (vaddr_t)stack); | | 1137 | (*pack.ep_esch->es_emul->e_setregs)(l, &pack, (vaddr_t)stack); |
1132 | if (pack.ep_esch->es_setregs) | | 1138 | if (pack.ep_esch->es_setregs) |
1133 | (*pack.ep_esch->es_setregs)(l, &pack, (vaddr_t)stack); | | 1139 | (*pack.ep_esch->es_setregs)(l, &pack, (vaddr_t)stack); |
1134 | | | 1140 | |
1135 | /* Provide a consistent LWP private setting */ | | 1141 | /* Provide a consistent LWP private setting */ |
1136 | (void)lwp_setprivate(l, NULL); | | 1142 | (void)lwp_setprivate(l, NULL); |
1137 | | | 1143 | |
1138 | /* Discard all PCU state; need to start fresh */ | | 1144 | /* Discard all PCU state; need to start fresh */ |
1139 | pcu_discard_all(l); | | 1145 | pcu_discard_all(l); |
1140 | | | 1146 | |
1141 | /* map the process's signal trampoline code */ | | 1147 | /* map the process's signal trampoline code */ |
1142 | if ((error = exec_sigcode_map(p, pack.ep_esch->es_emul)) != 0) { | | 1148 | if ((error = exec_sigcode_map(p, pack.ep_esch->es_emul)) != 0) { |
1143 | DPRINTF(("%s: map sigcode failed %d\n", __func__, error)); | | 1149 | DPRINTF(("%s: map sigcode failed %d\n", __func__, error)); |
1144 | goto exec_abort; | | 1150 | goto exec_abort; |
1145 | } | | 1151 | } |
1146 | | | 1152 | |
1147 | pool_put(&exec_pool, argp); | | 1153 | pool_put(&exec_pool, argp); |
1148 | | | 1154 | |
1149 | /* notify others that we exec'd */ | | 1155 | /* notify others that we exec'd */ |
1150 | KNOTE(&p->p_klist, NOTE_EXEC); | | 1156 | KNOTE(&p->p_klist, NOTE_EXEC); |
1151 | | | 1157 | |
1152 | kmem_free(pack.ep_hdr, pack.ep_hdrlen); | | 1158 | kmem_free(pack.ep_hdr, pack.ep_hdrlen); |
1153 | | | 1159 | |
1154 | SDT_PROBE(proc,,,exec_success, path, 0, 0, 0, 0); | | 1160 | SDT_PROBE(proc,,,exec_success, path, 0, 0, 0, 0); |
1155 | | | 1161 | |
1156 | /* The emulation root will usually have been found when we looked | | 1162 | /* The emulation root will usually have been found when we looked |
1157 | * for the elf interpreter (or similar), if not look now. */ | | 1163 | * for the elf interpreter (or similar), if not look now. */ |
1158 | if (pack.ep_esch->es_emul->e_path != NULL && pack.ep_emul_root == NULL) | | 1164 | if (pack.ep_esch->es_emul->e_path != NULL && pack.ep_emul_root == NULL) |
1159 | emul_find_root(l, &pack); | | 1165 | emul_find_root(l, &pack); |
1160 | | | 1166 | |
1161 | /* Any old emulation root got removed by fdcloseexec */ | | 1167 | /* Any old emulation root got removed by fdcloseexec */ |
1162 | rw_enter(&p->p_cwdi->cwdi_lock, RW_WRITER); | | 1168 | rw_enter(&p->p_cwdi->cwdi_lock, RW_WRITER); |
1163 | p->p_cwdi->cwdi_edir = pack.ep_emul_root; | | 1169 | p->p_cwdi->cwdi_edir = pack.ep_emul_root; |
1164 | rw_exit(&p->p_cwdi->cwdi_lock); | | 1170 | rw_exit(&p->p_cwdi->cwdi_lock); |
1165 | pack.ep_emul_root = NULL; | | 1171 | pack.ep_emul_root = NULL; |
1166 | if (pack.ep_interp != NULL) | | 1172 | if (pack.ep_interp != NULL) |
1167 | vrele(pack.ep_interp); | | 1173 | vrele(pack.ep_interp); |
1168 | | | 1174 | |
1169 | /* | | 1175 | /* |
1170 | * Call emulation specific exec hook. This can setup per-process | | 1176 | * Call emulation specific exec hook. This can setup per-process |
1171 | * p->p_emuldata or do any other per-process stuff an emulation needs. | | 1177 | * p->p_emuldata or do any other per-process stuff an emulation needs. |
1172 | * | | 1178 | * |
1173 | * If we are executing process of different emulation than the | | 1179 | * If we are executing process of different emulation than the |
1174 | * original forked process, call e_proc_exit() of the old emulation | | 1180 | * original forked process, call e_proc_exit() of the old emulation |
1175 | * first, then e_proc_exec() of new emulation. If the emulation is | | 1181 | * first, then e_proc_exec() of new emulation. If the emulation is |
1176 | * same, the exec hook code should deallocate any old emulation | | 1182 | * same, the exec hook code should deallocate any old emulation |
1177 | * resources held previously by this process. | | 1183 | * resources held previously by this process. |
1178 | */ | | 1184 | */ |
1179 | if (p->p_emul && p->p_emul->e_proc_exit | | 1185 | if (p->p_emul && p->p_emul->e_proc_exit |
1180 | && p->p_emul != pack.ep_esch->es_emul) | | 1186 | && p->p_emul != pack.ep_esch->es_emul) |
1181 | (*p->p_emul->e_proc_exit)(p); | | 1187 | (*p->p_emul->e_proc_exit)(p); |
1182 | | | 1188 | |
1183 | /* | | 1189 | /* |
1184 | * This is now LWP 1. | | 1190 | * This is now LWP 1. |
1185 | */ | | 1191 | */ |
1186 | mutex_enter(p->p_lock); | | 1192 | mutex_enter(p->p_lock); |
1187 | p->p_nlwpid = 1; | | 1193 | p->p_nlwpid = 1; |
1188 | l->l_lid = 1; | | 1194 | l->l_lid = 1; |
1189 | mutex_exit(p->p_lock); | | 1195 | mutex_exit(p->p_lock); |
1190 | | | 1196 | |
1191 | /* | | 1197 | /* |
1192 | * Call exec hook. Emulation code may NOT store reference to anything | | 1198 | * Call exec hook. Emulation code may NOT store reference to anything |
1193 | * from &pack. | | 1199 | * from &pack. |
1194 | */ | | 1200 | */ |
1195 | if (pack.ep_esch->es_emul->e_proc_exec) | | 1201 | if (pack.ep_esch->es_emul->e_proc_exec) |
1196 | (*pack.ep_esch->es_emul->e_proc_exec)(p, &pack); | | 1202 | (*pack.ep_esch->es_emul->e_proc_exec)(p, &pack); |
1197 | | | 1203 | |
1198 | /* update p_emul, the old value is no longer needed */ | | 1204 | /* update p_emul, the old value is no longer needed */ |
1199 | p->p_emul = pack.ep_esch->es_emul; | | 1205 | p->p_emul = pack.ep_esch->es_emul; |
1200 | | | 1206 | |
1201 | /* ...and the same for p_execsw */ | | 1207 | /* ...and the same for p_execsw */ |
1202 | p->p_execsw = pack.ep_esch; | | 1208 | p->p_execsw = pack.ep_esch; |
1203 | | | 1209 | |
1204 | #ifdef __HAVE_SYSCALL_INTERN | | 1210 | #ifdef __HAVE_SYSCALL_INTERN |
1205 | (*p->p_emul->e_syscall_intern)(p); | | 1211 | (*p->p_emul->e_syscall_intern)(p); |
1206 | #endif | | 1212 | #endif |
1207 | ktremul(); | | 1213 | ktremul(); |
1208 | | | 1214 | |
1209 | /* Allow new references from the debugger/procfs. */ | | 1215 | /* Allow new references from the debugger/procfs. */ |
1210 | rw_exit(&p->p_reflock); | | 1216 | rw_exit(&p->p_reflock); |
1211 | rw_exit(&exec_lock); | | 1217 | rw_exit(&exec_lock); |
1212 | | | 1218 | |
1213 | mutex_enter(proc_lock); | | 1219 | mutex_enter(proc_lock); |
1214 | | | 1220 | |
1215 | if ((p->p_slflag & (PSL_TRACED|PSL_SYSCALL)) == PSL_TRACED) { | | 1221 | if ((p->p_slflag & (PSL_TRACED|PSL_SYSCALL)) == PSL_TRACED) { |
1216 | KSI_INIT_EMPTY(&ksi); | | 1222 | KSI_INIT_EMPTY(&ksi); |
1217 | ksi.ksi_signo = SIGTRAP; | | 1223 | ksi.ksi_signo = SIGTRAP; |
1218 | ksi.ksi_lid = l->l_lid; | | 1224 | ksi.ksi_lid = l->l_lid; |
1219 | kpsignal(p, &ksi, NULL); | | 1225 | kpsignal(p, &ksi, NULL); |
1220 | } | | 1226 | } |
1221 | | | 1227 | |
1222 | if (p->p_sflag & PS_STOPEXEC) { | | 1228 | if (p->p_sflag & PS_STOPEXEC) { |
1223 | KERNEL_UNLOCK_ALL(l, &l->l_biglocks); | | 1229 | KERNEL_UNLOCK_ALL(l, &l->l_biglocks); |
1224 | p->p_pptr->p_nstopchild++; | | 1230 | p->p_pptr->p_nstopchild++; |
1225 | p->p_pptr->p_waited = 0; | | 1231 | p->p_pptr->p_waited = 0; |
1226 | mutex_enter(p->p_lock); | | 1232 | mutex_enter(p->p_lock); |
1227 | ksiginfo_queue_init(&kq); | | 1233 | ksiginfo_queue_init(&kq); |
1228 | sigclearall(p, &contsigmask, &kq); | | 1234 | sigclearall(p, &contsigmask, &kq); |
1229 | lwp_lock(l); | | 1235 | lwp_lock(l); |
1230 | l->l_stat = LSSTOP; | | 1236 | l->l_stat = LSSTOP; |
1231 | p->p_stat = SSTOP; | | 1237 | p->p_stat = SSTOP; |
1232 | p->p_nrlwps--; | | 1238 | p->p_nrlwps--; |
1233 | lwp_unlock(l); | | 1239 | lwp_unlock(l); |
1234 | mutex_exit(p->p_lock); | | 1240 | mutex_exit(p->p_lock); |
1235 | mutex_exit(proc_lock); | | 1241 | mutex_exit(proc_lock); |
1236 | lwp_lock(l); | | 1242 | lwp_lock(l); |
1237 | mi_switch(l); | | 1243 | mi_switch(l); |
1238 | ksiginfo_queue_drain(&kq); | | 1244 | ksiginfo_queue_drain(&kq); |
1239 | KERNEL_LOCK(l->l_biglocks, l); | | 1245 | KERNEL_LOCK(l->l_biglocks, l); |
1240 | } else { | | 1246 | } else { |
1241 | mutex_exit(proc_lock); | | 1247 | mutex_exit(proc_lock); |
1242 | } | | 1248 | } |
1243 | | | 1249 | |
1244 | pathbuf_stringcopy_put(pb, pathstring); | | 1250 | pathbuf_stringcopy_put(pb, pathstring); |
1245 | pathbuf_destroy(pb); | | 1251 | pathbuf_destroy(pb); |
1246 | PNBUF_PUT(resolvedpathbuf); | | 1252 | PNBUF_PUT(resolvedpathbuf); |
1247 | return (EJUSTRETURN); | | 1253 | return (EJUSTRETURN); |
1248 | | | 1254 | |
1249 | bad: | | 1255 | bad: |
1250 | /* free the vmspace-creation commands, and release their references */ | | 1256 | /* free the vmspace-creation commands, and release their references */ |
1251 | kill_vmcmds(&pack.ep_vmcmds); | | 1257 | kill_vmcmds(&pack.ep_vmcmds); |
1252 | /* kill any opened file descriptor, if necessary */ | | 1258 | /* kill any opened file descriptor, if necessary */ |
1253 | if (pack.ep_flags & EXEC_HASFD) { | | 1259 | if (pack.ep_flags & EXEC_HASFD) { |
1254 | pack.ep_flags &= ~EXEC_HASFD; | | 1260 | pack.ep_flags &= ~EXEC_HASFD; |
1255 | fd_close(pack.ep_fd); | | 1261 | fd_close(pack.ep_fd); |
1256 | } | | 1262 | } |
1257 | /* close and put the exec'd file */ | | 1263 | /* close and put the exec'd file */ |
1258 | vn_lock(pack.ep_vp, LK_EXCLUSIVE | LK_RETRY); | | 1264 | vn_lock(pack.ep_vp, LK_EXCLUSIVE | LK_RETRY); |
1259 | VOP_CLOSE(pack.ep_vp, FREAD, l->l_cred); | | 1265 | VOP_CLOSE(pack.ep_vp, FREAD, l->l_cred); |
1260 | vput(pack.ep_vp); | | 1266 | vput(pack.ep_vp); |
1261 | pool_put(&exec_pool, argp); | | 1267 | pool_put(&exec_pool, argp); |
1262 | | | 1268 | |
1263 | freehdr: | | 1269 | freehdr: |
1264 | kmem_free(pack.ep_hdr, pack.ep_hdrlen); | | 1270 | kmem_free(pack.ep_hdr, pack.ep_hdrlen); |
1265 | if (pack.ep_emul_root != NULL) | | 1271 | if (pack.ep_emul_root != NULL) |
1266 | vrele(pack.ep_emul_root); | | 1272 | vrele(pack.ep_emul_root); |
1267 | if (pack.ep_interp != NULL) | | 1273 | if (pack.ep_interp != NULL) |
1268 | vrele(pack.ep_interp); | | 1274 | vrele(pack.ep_interp); |
1269 | | | 1275 | |
1270 | rw_exit(&exec_lock); | | 1276 | rw_exit(&exec_lock); |
1271 | | | 1277 | |
1272 | pathbuf_stringcopy_put(pb, pathstring); | | 1278 | pathbuf_stringcopy_put(pb, pathstring); |
1273 | pathbuf_destroy(pb); | | 1279 | pathbuf_destroy(pb); |
1274 | PNBUF_PUT(resolvedpathbuf); | | 1280 | PNBUF_PUT(resolvedpathbuf); |
1275 | | | 1281 | |
1276 | clrflg: | | 1282 | clrflg: |
1277 | lwp_lock(l); | | 1283 | lwp_lock(l); |
1278 | l->l_flag |= oldlwpflags; | | 1284 | l->l_flag |= oldlwpflags; |
1279 | lwp_unlock(l); | | 1285 | lwp_unlock(l); |
1280 | rw_exit(&p->p_reflock); | | 1286 | rw_exit(&p->p_reflock); |
1281 | | | 1287 | |
1282 | if (modgen != module_gen && error == ENOEXEC) { | | 1288 | if (modgen != module_gen && error == ENOEXEC) { |
1283 | modgen = module_gen; | | 1289 | modgen = module_gen; |
1284 | exec_autoload(); | | 1290 | exec_autoload(); |
1285 | goto retry; | | 1291 | goto retry; |
1286 | } | | 1292 | } |
1287 | | | 1293 | |
1288 | SDT_PROBE(proc,,,exec_failure, error, 0, 0, 0, 0); | | 1294 | SDT_PROBE(proc,,,exec_failure, error, 0, 0, 0, 0); |
1289 | return error; | | 1295 | return error; |
1290 | | | 1296 | |
1291 | exec_abort: | | 1297 | exec_abort: |
1292 | SDT_PROBE(proc,,,exec_failure, error, 0, 0, 0, 0); | | 1298 | SDT_PROBE(proc,,,exec_failure, error, 0, 0, 0, 0); |
1293 | rw_exit(&p->p_reflock); | | 1299 | rw_exit(&p->p_reflock); |
1294 | rw_exit(&exec_lock); | | 1300 | rw_exit(&exec_lock); |
1295 | | | 1301 | |
1296 | pathbuf_stringcopy_put(pb, pathstring); | | 1302 | pathbuf_stringcopy_put(pb, pathstring); |
1297 | pathbuf_destroy(pb); | | 1303 | pathbuf_destroy(pb); |
1298 | PNBUF_PUT(resolvedpathbuf); | | 1304 | PNBUF_PUT(resolvedpathbuf); |
1299 | | | 1305 | |
1300 | /* | | 1306 | /* |
1301 | * the old process doesn't exist anymore. exit gracefully. | | 1307 | * the old process doesn't exist anymore. exit gracefully. |
1302 | * get rid of the (new) address space we have created, if any, get rid | | 1308 | * get rid of the (new) address space we have created, if any, get rid |
1303 | * of our namei data and vnode, and exit noting failure | | 1309 | * of our namei data and vnode, and exit noting failure |
1304 | */ | | 1310 | */ |
1305 | uvm_deallocate(&vm->vm_map, VM_MIN_ADDRESS, | | 1311 | uvm_deallocate(&vm->vm_map, VM_MIN_ADDRESS, |
1306 | VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS); | | 1312 | VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS); |
1307 | if (pack.ep_emul_arg) | | 1313 | if (pack.ep_emul_arg) |
1308 | free(pack.ep_emul_arg, M_TEMP); | | 1314 | free(pack.ep_emul_arg, M_TEMP); |
1309 | pool_put(&exec_pool, argp); | | 1315 | pool_put(&exec_pool, argp); |
1310 | kmem_free(pack.ep_hdr, pack.ep_hdrlen); | | 1316 | kmem_free(pack.ep_hdr, pack.ep_hdrlen); |
1311 | if (pack.ep_emul_root != NULL) | | 1317 | if (pack.ep_emul_root != NULL) |
1312 | vrele(pack.ep_emul_root); | | 1318 | vrele(pack.ep_emul_root); |
1313 | if (pack.ep_interp != NULL) | | 1319 | if (pack.ep_interp != NULL) |
1314 | vrele(pack.ep_interp); | | 1320 | vrele(pack.ep_interp); |
1315 | | | 1321 | |
1316 | /* Acquire the sched-state mutex (exit1() will release it). */ | | 1322 | /* Acquire the sched-state mutex (exit1() will release it). */ |
1317 | mutex_enter(p->p_lock); | | 1323 | mutex_enter(p->p_lock); |
1318 | exit1(l, W_EXITCODE(error, SIGABRT)); | | 1324 | exit1(l, W_EXITCODE(error, SIGABRT)); |
1319 | | | 1325 | |
1320 | /* NOTREACHED */ | | 1326 | /* NOTREACHED */ |
1321 | return 0; | | 1327 | return 0; |
1322 | } | | 1328 | } |
1323 | | | 1329 | |
1324 | int | | 1330 | int |
1325 | copyargs(struct lwp *l, struct exec_package *pack, struct ps_strings *arginfo, | | 1331 | copyargs(struct lwp *l, struct exec_package *pack, struct ps_strings *arginfo, |
1326 | char **stackp, void *argp) | | 1332 | char **stackp, void *argp) |
1327 | { | | 1333 | { |
1328 | char **cpp, *dp, *sp; | | 1334 | char **cpp, *dp, *sp; |
1329 | size_t len; | | 1335 | size_t len; |
1330 | void *nullp; | | 1336 | void *nullp; |
1331 | long argc, envc; | | 1337 | long argc, envc; |
1332 | int error; | | 1338 | int error; |
1333 | | | 1339 | |
1334 | cpp = (char **)*stackp; | | 1340 | cpp = (char **)*stackp; |
1335 | nullp = NULL; | | 1341 | nullp = NULL; |
1336 | argc = arginfo->ps_nargvstr; | | 1342 | argc = arginfo->ps_nargvstr; |
1337 | envc = arginfo->ps_nenvstr; | | 1343 | envc = arginfo->ps_nenvstr; |
1338 | if ((error = copyout(&argc, cpp++, sizeof(argc))) != 0) { | | 1344 | if ((error = copyout(&argc, cpp++, sizeof(argc))) != 0) { |
1339 | COPYPRINTF("", cpp - 1, sizeof(argc)); | | 1345 | COPYPRINTF("", cpp - 1, sizeof(argc)); |
1340 | return error; | | 1346 | return error; |
1341 | } | | 1347 | } |
1342 | | | 1348 | |
1343 | dp = (char *) (cpp + argc + envc + 2 + pack->ep_esch->es_arglen); | | 1349 | dp = (char *) (cpp + argc + envc + 2 + pack->ep_esch->es_arglen); |
1344 | sp = argp; | | 1350 | sp = argp; |
1345 | | | 1351 | |
1346 | /* XXX don't copy them out, remap them! */ | | 1352 | /* XXX don't copy them out, remap them! */ |
1347 | arginfo->ps_argvstr = cpp; /* remember location of argv for later */ | | 1353 | arginfo->ps_argvstr = cpp; /* remember location of argv for later */ |
1348 | | | 1354 | |
1349 | for (; --argc >= 0; sp += len, dp += len) { | | 1355 | for (; --argc >= 0; sp += len, dp += len) { |
1350 | if ((error = copyout(&dp, cpp++, sizeof(dp))) != 0) { | | 1356 | if ((error = copyout(&dp, cpp++, sizeof(dp))) != 0) { |
1351 | COPYPRINTF("", cpp - 1, sizeof(dp)); | | 1357 | COPYPRINTF("", cpp - 1, sizeof(dp)); |
1352 | return error; | | 1358 | return error; |
1353 | } | | 1359 | } |
1354 | if ((error = copyoutstr(sp, dp, ARG_MAX, &len)) != 0) { | | 1360 | if ((error = copyoutstr(sp, dp, ARG_MAX, &len)) != 0) { |
1355 | COPYPRINTF("str", dp, (size_t)ARG_MAX); | | 1361 | COPYPRINTF("str", dp, (size_t)ARG_MAX); |
1356 | return error; | | 1362 | return error; |
1357 | } | | 1363 | } |
1358 | } | | 1364 | } |
1359 | | | 1365 | |
1360 | if ((error = copyout(&nullp, cpp++, sizeof(nullp))) != 0) { | | 1366 | if ((error = copyout(&nullp, cpp++, sizeof(nullp))) != 0) { |
1361 | COPYPRINTF("", cpp - 1, sizeof(nullp)); | | 1367 | COPYPRINTF("", cpp - 1, sizeof(nullp)); |
1362 | return error; | | 1368 | return error; |
1363 | } | | 1369 | } |
1364 | | | 1370 | |
1365 | arginfo->ps_envstr = cpp; /* remember location of envp for later */ | | 1371 | arginfo->ps_envstr = cpp; /* remember location of envp for later */ |
1366 | | | 1372 | |
1367 | for (; --envc >= 0; sp += len, dp += len) { | | 1373 | for (; --envc >= 0; sp += len, dp += len) { |
1368 | if ((error = copyout(&dp, cpp++, sizeof(dp))) != 0) { | | 1374 | if ((error = copyout(&dp, cpp++, sizeof(dp))) != 0) { |
1369 | COPYPRINTF("", cpp - 1, sizeof(dp)); | | 1375 | COPYPRINTF("", cpp - 1, sizeof(dp)); |
1370 | return error; | | 1376 | return error; |
1371 | } | | 1377 | } |
1372 | if ((error = copyoutstr(sp, dp, ARG_MAX, &len)) != 0) { | | 1378 | if ((error = copyoutstr(sp, dp, ARG_MAX, &len)) != 0) { |
1373 | COPYPRINTF("str", dp, (size_t)ARG_MAX); | | 1379 | COPYPRINTF("str", dp, (size_t)ARG_MAX); |
1374 | return error; | | 1380 | return error; |
1375 | } | | 1381 | } |
1376 | } | | 1382 | } |
1377 | | | 1383 | |
1378 | if ((error = copyout(&nullp, cpp++, sizeof(nullp))) != 0) { | | 1384 | if ((error = copyout(&nullp, cpp++, sizeof(nullp))) != 0) { |
1379 | COPYPRINTF("", cpp - 1, sizeof(nullp)); | | 1385 | COPYPRINTF("", cpp - 1, sizeof(nullp)); |
1380 | return error; | | 1386 | return error; |
1381 | } | | 1387 | } |
1382 | | | 1388 | |
1383 | *stackp = (char *)cpp; | | 1389 | *stackp = (char *)cpp; |
1384 | return 0; | | 1390 | return 0; |
1385 | } | | 1391 | } |
1386 | | | 1392 | |
1387 | | | 1393 | |
1388 | /* | | 1394 | /* |
1389 | * Add execsw[] entries. | | 1395 | * Add execsw[] entries. |
1390 | */ | | 1396 | */ |
1391 | int | | 1397 | int |
1392 | exec_add(struct execsw *esp, int count) | | 1398 | exec_add(struct execsw *esp, int count) |
1393 | { | | 1399 | { |
1394 | struct exec_entry *it; | | 1400 | struct exec_entry *it; |