| @@ -1,951 +1,953 @@ | | | @@ -1,951 +1,953 @@ |
1 | /* $NetBSD: exec_elf.c,v 1.31 2011/08/02 16:44:01 christos Exp $ */ | | 1 | /* $NetBSD: exec_elf.c,v 1.32 2011/08/27 17:53:21 reinoud Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 1994, 2000, 2005 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 1994, 2000, 2005 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Christos Zoulas. | | 8 | * by Christos Zoulas. |
9 | * | | 9 | * |
10 | * Redistribution and use in source and binary forms, with or without | | 10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions | | 11 | * modification, are permitted provided that the following conditions |
12 | * are met: | | 12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright | | 13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. | | 14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright | | 15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the | | 16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. | | 17 | * documentation and/or other materials provided with the distribution. |
18 | * | | 18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. | | 29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ | | 30 | */ |
31 | | | 31 | |
32 | /* | | 32 | /* |
33 | * Copyright (c) 1996 Christopher G. Demetriou | | 33 | * Copyright (c) 1996 Christopher G. Demetriou |
34 | * All rights reserved. | | 34 | * All rights reserved. |
35 | * | | 35 | * |
36 | * Redistribution and use in source and binary forms, with or without | | 36 | * Redistribution and use in source and binary forms, with or without |
37 | * modification, are permitted provided that the following conditions | | 37 | * modification, are permitted provided that the following conditions |
38 | * are met: | | 38 | * are met: |
39 | * 1. Redistributions of source code must retain the above copyright | | 39 | * 1. Redistributions of source code must retain the above copyright |
40 | * notice, this list of conditions and the following disclaimer. | | 40 | * notice, this list of conditions and the following disclaimer. |
41 | * 2. Redistributions in binary form must reproduce the above copyright | | 41 | * 2. Redistributions in binary form must reproduce the above copyright |
42 | * notice, this list of conditions and the following disclaimer in the | | 42 | * notice, this list of conditions and the following disclaimer in the |
43 | * documentation and/or other materials provided with the distribution. | | 43 | * documentation and/or other materials provided with the distribution. |
44 | * 3. The name of the author may not be used to endorse or promote products | | 44 | * 3. The name of the author may not be used to endorse or promote products |
45 | * derived from this software without specific prior written permission | | 45 | * derived from this software without specific prior written permission |
46 | * | | 46 | * |
47 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | | 47 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
48 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | | 48 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
49 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | | 49 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
50 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | | 50 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
51 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | | 51 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
52 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | | 52 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
53 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | | 53 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
54 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | | 54 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
55 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | | 55 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
56 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | | 56 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
57 | */ | | 57 | */ |
58 | | | 58 | |
59 | #include <sys/cdefs.h> | | 59 | #include <sys/cdefs.h> |
60 | __KERNEL_RCSID(1, "$NetBSD: exec_elf.c,v 1.31 2011/08/02 16:44:01 christos Exp $"); | | 60 | __KERNEL_RCSID(1, "$NetBSD: exec_elf.c,v 1.32 2011/08/27 17:53:21 reinoud Exp $"); |
61 | | | 61 | |
62 | #ifdef _KERNEL_OPT | | 62 | #ifdef _KERNEL_OPT |
63 | #include "opt_pax.h" | | 63 | #include "opt_pax.h" |
64 | #endif /* _KERNEL_OPT */ | | 64 | #endif /* _KERNEL_OPT */ |
65 | | | 65 | |
66 | #include <sys/param.h> | | 66 | #include <sys/param.h> |
67 | #include <sys/proc.h> | | 67 | #include <sys/proc.h> |
68 | #include <sys/malloc.h> | | 68 | #include <sys/malloc.h> |
69 | #include <sys/kmem.h> | | 69 | #include <sys/kmem.h> |
70 | #include <sys/namei.h> | | 70 | #include <sys/namei.h> |
71 | #include <sys/vnode.h> | | 71 | #include <sys/vnode.h> |
72 | #include <sys/exec.h> | | 72 | #include <sys/exec.h> |
73 | #include <sys/exec_elf.h> | | 73 | #include <sys/exec_elf.h> |
74 | #include <sys/syscall.h> | | 74 | #include <sys/syscall.h> |
75 | #include <sys/signalvar.h> | | 75 | #include <sys/signalvar.h> |
76 | #include <sys/mount.h> | | 76 | #include <sys/mount.h> |
77 | #include <sys/stat.h> | | 77 | #include <sys/stat.h> |
78 | #include <sys/kauth.h> | | 78 | #include <sys/kauth.h> |
79 | #include <sys/bitops.h> | | 79 | #include <sys/bitops.h> |
80 | | | 80 | |
81 | #include <sys/cpu.h> | | 81 | #include <sys/cpu.h> |
82 | #include <machine/reg.h> | | 82 | #include <machine/reg.h> |
83 | | | 83 | |
84 | #include <compat/common/compat_util.h> | | 84 | #include <compat/common/compat_util.h> |
85 | | | 85 | |
86 | #include <sys/pax.h> | | 86 | #include <sys/pax.h> |
87 | | | 87 | |
88 | extern struct emul emul_netbsd; | | 88 | extern struct emul emul_netbsd; |
89 | | | 89 | |
90 | #define elf_check_header ELFNAME(check_header) | | 90 | #define elf_check_header ELFNAME(check_header) |
91 | #define elf_copyargs ELFNAME(copyargs) | | 91 | #define elf_copyargs ELFNAME(copyargs) |
92 | #define elf_load_file ELFNAME(load_file) | | 92 | #define elf_load_file ELFNAME(load_file) |
93 | #define elf_load_psection ELFNAME(load_psection) | | 93 | #define elf_load_psection ELFNAME(load_psection) |
94 | #define exec_elf_makecmds ELFNAME2(exec,makecmds) | | 94 | #define exec_elf_makecmds ELFNAME2(exec,makecmds) |
95 | #define netbsd_elf_signature ELFNAME2(netbsd,signature) | | 95 | #define netbsd_elf_signature ELFNAME2(netbsd,signature) |
96 | #define netbsd_elf_probe ELFNAME2(netbsd,probe) | | 96 | #define netbsd_elf_probe ELFNAME2(netbsd,probe) |
97 | #define coredump ELFNAMEEND(coredump) | | 97 | #define coredump ELFNAMEEND(coredump) |
98 | | | 98 | |
99 | int elf_load_file(struct lwp *, struct exec_package *, char *, | | 99 | int elf_load_file(struct lwp *, struct exec_package *, char *, |
100 | struct exec_vmcmd_set *, u_long *, struct elf_args *, Elf_Addr *); | | 100 | struct exec_vmcmd_set *, u_long *, struct elf_args *, Elf_Addr *); |
101 | void elf_load_psection(struct exec_vmcmd_set *, struct vnode *, | | 101 | void elf_load_psection(struct exec_vmcmd_set *, struct vnode *, |
102 | const Elf_Phdr *, Elf_Addr *, u_long *, int *, int); | | 102 | const Elf_Phdr *, Elf_Addr *, u_long *, int *, int); |
103 | | | 103 | |
104 | int netbsd_elf_signature(struct lwp *, struct exec_package *, Elf_Ehdr *); | | 104 | int netbsd_elf_signature(struct lwp *, struct exec_package *, Elf_Ehdr *); |
105 | int netbsd_elf_probe(struct lwp *, struct exec_package *, void *, char *, | | 105 | int netbsd_elf_probe(struct lwp *, struct exec_package *, void *, char *, |
106 | vaddr_t *); | | 106 | vaddr_t *); |
107 | | | 107 | |
108 | /* round up and down to page boundaries. */ | | 108 | /* round up and down to page boundaries. */ |
109 | #define ELF_ROUND(a, b) (((a) + (b) - 1) & ~((b) - 1)) | | 109 | #define ELF_ROUND(a, b) (((a) + (b) - 1) & ~((b) - 1)) |
110 | #define ELF_TRUNC(a, b) ((a) & ~((b) - 1)) | | 110 | #define ELF_TRUNC(a, b) ((a) & ~((b) - 1)) |
111 | | | 111 | |
112 | /* | | 112 | /* |
113 | * Arbitrary limits to avoid DoS for excessive memory allocation. | | 113 | * Arbitrary limits to avoid DoS for excessive memory allocation. |
114 | */ | | 114 | */ |
115 | #define MAXPHNUM 128 | | 115 | #define MAXPHNUM 128 |
116 | #define MAXSHNUM 32768 | | 116 | #define MAXSHNUM 32768 |
117 | #define MAXNOTESIZE 1024 | | 117 | #define MAXNOTESIZE 1024 |
118 | | | 118 | |
119 | static void | | 119 | static void |
120 | elf_placedynexec(struct lwp *l, struct exec_package *epp, Elf_Ehdr *eh, | | 120 | elf_placedynexec(struct lwp *l, struct exec_package *epp, Elf_Ehdr *eh, |
121 | Elf_Phdr *ph) | | 121 | Elf_Phdr *ph) |
122 | { | | 122 | { |
123 | Elf_Addr align, offset; | | 123 | Elf_Addr align, offset; |
124 | int i; | | 124 | int i; |
125 | | | 125 | |
126 | for (align = i = 0; i < eh->e_phnum; i++) | | 126 | for (align = i = 0; i < eh->e_phnum; i++) |
127 | if (ph[i].p_type == PT_LOAD && ph[i].p_align > align) | | 127 | if (ph[i].p_type == PT_LOAD && ph[i].p_align > align) |
128 | align = ph[i].p_align; | | 128 | align = ph[i].p_align; |
129 | | | 129 | |
130 | #ifdef PAX_ASLR | | 130 | #ifdef PAX_ASLR |
131 | if (pax_aslr_active(l)) { | | 131 | if (pax_aslr_active(l)) { |
132 | size_t pax_align, l2, delta; | | 132 | size_t pax_align, l2, delta; |
133 | uint32_t r; | | 133 | uint32_t r; |
134 | | | 134 | |
135 | pax_align = align; | | 135 | pax_align = align; |
136 | | | 136 | |
137 | r = arc4random(); | | 137 | r = arc4random(); |
138 | | | 138 | |
139 | if (pax_align == 0) | | 139 | if (pax_align == 0) |
140 | pax_align = PGSHIFT; | | 140 | pax_align = PGSHIFT; |
141 | l2 = ilog2(pax_align); | | 141 | l2 = ilog2(pax_align); |
142 | delta = PAX_ASLR_DELTA(r, l2, PAX_ASLR_DELTA_EXEC_LEN); | | 142 | delta = PAX_ASLR_DELTA(r, l2, PAX_ASLR_DELTA_EXEC_LEN); |
143 | offset = ELF_TRUNC(delta, pax_align) + PAGE_SIZE; | | 143 | offset = ELF_TRUNC(delta, pax_align) + PAGE_SIZE; |
144 | #ifdef PAX_ASLR_DEBUG | | 144 | #ifdef PAX_ASLR_DEBUG |
145 | uprintf("r=0x%x l2=0x%zx PGSHIFT=0x%x Delta=0x%zx\n", r, l2, | | 145 | uprintf("r=0x%x l2=0x%zx PGSHIFT=0x%x Delta=0x%zx\n", r, l2, |
146 | PGSHIFT, delta); | | 146 | PGSHIFT, delta); |
147 | uprintf("pax offset=0x%llx entry=0x%llx\n", | | 147 | uprintf("pax offset=0x%llx entry=0x%llx\n", |
148 | (unsigned long long)offset, | | 148 | (unsigned long long)offset, |
149 | (unsigned long long)eh->e_entry); | | 149 | (unsigned long long)eh->e_entry); |
150 | #endif /* PAX_ASLR_DEBUG */ | | 150 | #endif /* PAX_ASLR_DEBUG */ |
151 | } else | | 151 | } else |
152 | #endif /* PAX_ASLR */ | | 152 | #endif /* PAX_ASLR */ |
153 | offset = MAX(align, PAGE_SIZE); | | 153 | offset = MAX(align, PAGE_SIZE); |
154 | | | 154 | |
| | | 155 | offset += epp->ep_vm_minaddr; |
| | | 156 | |
155 | for (i = 0; i < eh->e_phnum; i++) | | 157 | for (i = 0; i < eh->e_phnum; i++) |
156 | ph[i].p_vaddr += offset; | | 158 | ph[i].p_vaddr += offset; |
157 | eh->e_entry += offset; | | 159 | eh->e_entry += offset; |
158 | } | | 160 | } |
159 | | | 161 | |
160 | /* | | 162 | /* |
161 | * Copy arguments onto the stack in the normal way, but add some | | 163 | * Copy arguments onto the stack in the normal way, but add some |
162 | * extra information in case of dynamic binding. | | 164 | * extra information in case of dynamic binding. |
163 | */ | | 165 | */ |
164 | int | | 166 | int |
165 | elf_copyargs(struct lwp *l, struct exec_package *pack, | | 167 | elf_copyargs(struct lwp *l, struct exec_package *pack, |
166 | struct ps_strings *arginfo, char **stackp, void *argp) | | 168 | struct ps_strings *arginfo, char **stackp, void *argp) |
167 | { | | 169 | { |
168 | size_t len, vlen; | | 170 | size_t len, vlen; |
169 | AuxInfo ai[ELF_AUX_ENTRIES], *a, *execname; | | 171 | AuxInfo ai[ELF_AUX_ENTRIES], *a, *execname; |
170 | struct elf_args *ap; | | 172 | struct elf_args *ap; |
171 | int error; | | 173 | int error; |
172 | | | 174 | |
173 | if ((error = copyargs(l, pack, arginfo, stackp, argp)) != 0) | | 175 | if ((error = copyargs(l, pack, arginfo, stackp, argp)) != 0) |
174 | return error; | | 176 | return error; |
175 | | | 177 | |
176 | a = ai; | | 178 | a = ai; |
177 | execname = NULL; | | 179 | execname = NULL; |
178 | | | 180 | |
179 | /* | | 181 | /* |
180 | * Push extra arguments on the stack needed by dynamically | | 182 | * Push extra arguments on the stack needed by dynamically |
181 | * linked binaries | | 183 | * linked binaries |
182 | */ | | 184 | */ |
183 | if ((ap = (struct elf_args *)pack->ep_emul_arg)) { | | 185 | if ((ap = (struct elf_args *)pack->ep_emul_arg)) { |
184 | struct vattr *vap = pack->ep_vap; | | 186 | struct vattr *vap = pack->ep_vap; |
185 | | | 187 | |
186 | a->a_type = AT_PHDR; | | 188 | a->a_type = AT_PHDR; |
187 | a->a_v = ap->arg_phaddr; | | 189 | a->a_v = ap->arg_phaddr; |
188 | a++; | | 190 | a++; |
189 | | | 191 | |
190 | a->a_type = AT_PHENT; | | 192 | a->a_type = AT_PHENT; |
191 | a->a_v = ap->arg_phentsize; | | 193 | a->a_v = ap->arg_phentsize; |
192 | a++; | | 194 | a++; |
193 | | | 195 | |
194 | a->a_type = AT_PHNUM; | | 196 | a->a_type = AT_PHNUM; |
195 | a->a_v = ap->arg_phnum; | | 197 | a->a_v = ap->arg_phnum; |
196 | a++; | | 198 | a++; |
197 | | | 199 | |
198 | a->a_type = AT_PAGESZ; | | 200 | a->a_type = AT_PAGESZ; |
199 | a->a_v = PAGE_SIZE; | | 201 | a->a_v = PAGE_SIZE; |
200 | a++; | | 202 | a++; |
201 | | | 203 | |
202 | a->a_type = AT_BASE; | | 204 | a->a_type = AT_BASE; |
203 | a->a_v = ap->arg_interp; | | 205 | a->a_v = ap->arg_interp; |
204 | a++; | | 206 | a++; |
205 | | | 207 | |
206 | a->a_type = AT_FLAGS; | | 208 | a->a_type = AT_FLAGS; |
207 | a->a_v = 0; | | 209 | a->a_v = 0; |
208 | a++; | | 210 | a++; |
209 | | | 211 | |
210 | a->a_type = AT_ENTRY; | | 212 | a->a_type = AT_ENTRY; |
211 | a->a_v = ap->arg_entry; | | 213 | a->a_v = ap->arg_entry; |
212 | a++; | | 214 | a++; |
213 | | | 215 | |
214 | a->a_type = AT_EUID; | | 216 | a->a_type = AT_EUID; |
215 | if (vap->va_mode & S_ISUID) | | 217 | if (vap->va_mode & S_ISUID) |
216 | a->a_v = vap->va_uid; | | 218 | a->a_v = vap->va_uid; |
217 | else | | 219 | else |
218 | a->a_v = kauth_cred_geteuid(l->l_cred); | | 220 | a->a_v = kauth_cred_geteuid(l->l_cred); |
219 | a++; | | 221 | a++; |
220 | | | 222 | |
221 | a->a_type = AT_RUID; | | 223 | a->a_type = AT_RUID; |
222 | a->a_v = kauth_cred_getuid(l->l_cred); | | 224 | a->a_v = kauth_cred_getuid(l->l_cred); |
223 | a++; | | 225 | a++; |
224 | | | 226 | |
225 | a->a_type = AT_EGID; | | 227 | a->a_type = AT_EGID; |
226 | if (vap->va_mode & S_ISGID) | | 228 | if (vap->va_mode & S_ISGID) |
227 | a->a_v = vap->va_gid; | | 229 | a->a_v = vap->va_gid; |
228 | else | | 230 | else |
229 | a->a_v = kauth_cred_getegid(l->l_cred); | | 231 | a->a_v = kauth_cred_getegid(l->l_cred); |
230 | a++; | | 232 | a++; |
231 | | | 233 | |
232 | a->a_type = AT_RGID; | | 234 | a->a_type = AT_RGID; |
233 | a->a_v = kauth_cred_getgid(l->l_cred); | | 235 | a->a_v = kauth_cred_getgid(l->l_cred); |
234 | a++; | | 236 | a++; |
235 | | | 237 | |
236 | if (pack->ep_path) { | | 238 | if (pack->ep_path) { |
237 | execname = a; | | 239 | execname = a; |
238 | a->a_type = AT_SUN_EXECNAME; | | 240 | a->a_type = AT_SUN_EXECNAME; |
239 | a++; | | 241 | a++; |
240 | } | | 242 | } |
241 | | | 243 | |
242 | free(ap, M_TEMP); | | 244 | free(ap, M_TEMP); |
243 | pack->ep_emul_arg = NULL; | | 245 | pack->ep_emul_arg = NULL; |
244 | } | | 246 | } |
245 | | | 247 | |
246 | a->a_type = AT_NULL; | | 248 | a->a_type = AT_NULL; |
247 | a->a_v = 0; | | 249 | a->a_v = 0; |
248 | a++; | | 250 | a++; |
249 | | | 251 | |
250 | vlen = (a - ai) * sizeof(AuxInfo); | | 252 | vlen = (a - ai) * sizeof(AuxInfo); |
251 | | | 253 | |
252 | if (execname) { | | 254 | if (execname) { |
253 | char *path = pack->ep_path; | | 255 | char *path = pack->ep_path; |
254 | execname->a_v = (uintptr_t)(*stackp + vlen); | | 256 | execname->a_v = (uintptr_t)(*stackp + vlen); |
255 | len = strlen(path) + 1; | | 257 | len = strlen(path) + 1; |
256 | if ((error = copyout(path, (*stackp + vlen), len)) != 0) | | 258 | if ((error = copyout(path, (*stackp + vlen), len)) != 0) |
257 | return error; | | 259 | return error; |
258 | len = ALIGN(len); | | 260 | len = ALIGN(len); |
259 | } else | | 261 | } else |
260 | len = 0; | | 262 | len = 0; |
261 | | | 263 | |
262 | if ((error = copyout(ai, *stackp, vlen)) != 0) | | 264 | if ((error = copyout(ai, *stackp, vlen)) != 0) |
263 | return error; | | 265 | return error; |
264 | *stackp += vlen + len; | | 266 | *stackp += vlen + len; |
265 | | | 267 | |
266 | return 0; | | 268 | return 0; |
267 | } | | 269 | } |
268 | | | 270 | |
269 | /* | | 271 | /* |
270 | * elf_check_header(): | | 272 | * elf_check_header(): |
271 | * | | 273 | * |
272 | * Check header for validity; return 0 of ok ENOEXEC if error | | 274 | * Check header for validity; return 0 of ok ENOEXEC if error |
273 | */ | | 275 | */ |
274 | int | | 276 | int |
275 | elf_check_header(Elf_Ehdr *eh, int type) | | 277 | elf_check_header(Elf_Ehdr *eh, int type) |
276 | { | | 278 | { |
277 | | | 279 | |
278 | if (memcmp(eh->e_ident, ELFMAG, SELFMAG) != 0 || | | 280 | if (memcmp(eh->e_ident, ELFMAG, SELFMAG) != 0 || |
279 | eh->e_ident[EI_CLASS] != ELFCLASS) | | 281 | eh->e_ident[EI_CLASS] != ELFCLASS) |
280 | return ENOEXEC; | | 282 | return ENOEXEC; |
281 | | | 283 | |
282 | switch (eh->e_machine) { | | 284 | switch (eh->e_machine) { |
283 | | | 285 | |
284 | ELFDEFNNAME(MACHDEP_ID_CASES) | | 286 | ELFDEFNNAME(MACHDEP_ID_CASES) |
285 | | | 287 | |
286 | default: | | 288 | default: |
287 | return ENOEXEC; | | 289 | return ENOEXEC; |
288 | } | | 290 | } |
289 | | | 291 | |
290 | if (ELF_EHDR_FLAGS_OK(eh) == 0) | | 292 | if (ELF_EHDR_FLAGS_OK(eh) == 0) |
291 | return ENOEXEC; | | 293 | return ENOEXEC; |
292 | | | 294 | |
293 | if (eh->e_type != type) | | 295 | if (eh->e_type != type) |
294 | return ENOEXEC; | | 296 | return ENOEXEC; |
295 | | | 297 | |
296 | if (eh->e_shnum > MAXSHNUM || eh->e_phnum > MAXPHNUM) | | 298 | if (eh->e_shnum > MAXSHNUM || eh->e_phnum > MAXPHNUM) |
297 | return ENOEXEC; | | 299 | return ENOEXEC; |
298 | | | 300 | |
299 | return 0; | | 301 | return 0; |
300 | } | | 302 | } |
301 | | | 303 | |
302 | /* | | 304 | /* |
303 | * elf_load_psection(): | | 305 | * elf_load_psection(): |
304 | * | | 306 | * |
305 | * Load a psection at the appropriate address | | 307 | * Load a psection at the appropriate address |
306 | */ | | 308 | */ |
307 | void | | 309 | void |
308 | elf_load_psection(struct exec_vmcmd_set *vcset, struct vnode *vp, | | 310 | elf_load_psection(struct exec_vmcmd_set *vcset, struct vnode *vp, |
309 | const Elf_Phdr *ph, Elf_Addr *addr, u_long *size, int *prot, int flags) | | 311 | const Elf_Phdr *ph, Elf_Addr *addr, u_long *size, int *prot, int flags) |
310 | { | | 312 | { |
311 | u_long msize, psize, rm, rf; | | 313 | u_long msize, psize, rm, rf; |
312 | long diff, offset; | | 314 | long diff, offset; |
313 | | | 315 | |
314 | /* | | 316 | /* |
315 | * If the user specified an address, then we load there. | | 317 | * If the user specified an address, then we load there. |
316 | */ | | 318 | */ |
317 | if (*addr == ELFDEFNNAME(NO_ADDR)) | | 319 | if (*addr == ELFDEFNNAME(NO_ADDR)) |
318 | *addr = ph->p_vaddr; | | 320 | *addr = ph->p_vaddr; |
319 | | | 321 | |
320 | if (ph->p_align > 1) { | | 322 | if (ph->p_align > 1) { |
321 | /* | | 323 | /* |
322 | * Make sure we are virtually aligned as we are supposed to be. | | 324 | * Make sure we are virtually aligned as we are supposed to be. |
323 | */ | | 325 | */ |
324 | diff = ph->p_vaddr - ELF_TRUNC(ph->p_vaddr, ph->p_align); | | 326 | diff = ph->p_vaddr - ELF_TRUNC(ph->p_vaddr, ph->p_align); |
325 | KASSERT(*addr - diff == ELF_TRUNC(*addr, ph->p_align)); | | 327 | KASSERT(*addr - diff == ELF_TRUNC(*addr, ph->p_align)); |
326 | /* | | 328 | /* |
327 | * But make sure to not map any pages before the start of the | | 329 | * But make sure to not map any pages before the start of the |
328 | * psection by limiting the difference to within a page. | | 330 | * psection by limiting the difference to within a page. |
329 | */ | | 331 | */ |
330 | diff &= PAGE_MASK; | | 332 | diff &= PAGE_MASK; |
331 | } else | | 333 | } else |
332 | diff = 0; | | 334 | diff = 0; |
333 | | | 335 | |
334 | *prot |= (ph->p_flags & PF_R) ? VM_PROT_READ : 0; | | 336 | *prot |= (ph->p_flags & PF_R) ? VM_PROT_READ : 0; |
335 | *prot |= (ph->p_flags & PF_W) ? VM_PROT_WRITE : 0; | | 337 | *prot |= (ph->p_flags & PF_W) ? VM_PROT_WRITE : 0; |
336 | *prot |= (ph->p_flags & PF_X) ? VM_PROT_EXECUTE : 0; | | 338 | *prot |= (ph->p_flags & PF_X) ? VM_PROT_EXECUTE : 0; |
337 | | | 339 | |
338 | /* | | 340 | /* |
339 | * Adjust everything so it all starts on a page boundary. | | 341 | * Adjust everything so it all starts on a page boundary. |
340 | */ | | 342 | */ |
341 | *addr -= diff; | | 343 | *addr -= diff; |
342 | offset = ph->p_offset - diff; | | 344 | offset = ph->p_offset - diff; |
343 | *size = ph->p_filesz + diff; | | 345 | *size = ph->p_filesz + diff; |
344 | msize = ph->p_memsz + diff; | | 346 | msize = ph->p_memsz + diff; |
345 | | | 347 | |
346 | if (ph->p_align >= PAGE_SIZE) { | | 348 | if (ph->p_align >= PAGE_SIZE) { |
347 | if ((ph->p_flags & PF_W) != 0) { | | 349 | if ((ph->p_flags & PF_W) != 0) { |
348 | /* | | 350 | /* |
349 | * Because the pagedvn pager can't handle zero fill | | 351 | * Because the pagedvn pager can't handle zero fill |
350 | * of the last data page if it's not page aligned we | | 352 | * of the last data page if it's not page aligned we |
351 | * map the last page readvn. | | 353 | * map the last page readvn. |
352 | */ | | 354 | */ |
353 | psize = trunc_page(*size); | | 355 | psize = trunc_page(*size); |
354 | } else { | | 356 | } else { |
355 | psize = round_page(*size); | | 357 | psize = round_page(*size); |
356 | } | | 358 | } |
357 | } else { | | 359 | } else { |
358 | psize = *size; | | 360 | psize = *size; |
359 | } | | 361 | } |
360 | | | 362 | |
361 | if (psize > 0) { | | 363 | if (psize > 0) { |
362 | NEW_VMCMD2(vcset, ph->p_align < PAGE_SIZE ? | | 364 | NEW_VMCMD2(vcset, ph->p_align < PAGE_SIZE ? |
363 | vmcmd_map_readvn : vmcmd_map_pagedvn, psize, *addr, vp, | | 365 | vmcmd_map_readvn : vmcmd_map_pagedvn, psize, *addr, vp, |
364 | offset, *prot, flags); | | 366 | offset, *prot, flags); |
365 | flags &= VMCMD_RELATIVE; | | 367 | flags &= VMCMD_RELATIVE; |
366 | } | | 368 | } |
367 | if (psize < *size) { | | 369 | if (psize < *size) { |
368 | NEW_VMCMD2(vcset, vmcmd_map_readvn, *size - psize, | | 370 | NEW_VMCMD2(vcset, vmcmd_map_readvn, *size - psize, |
369 | *addr + psize, vp, offset + psize, *prot, flags); | | 371 | *addr + psize, vp, offset + psize, *prot, flags); |
370 | } | | 372 | } |
371 | | | 373 | |
372 | /* | | 374 | /* |
373 | * Check if we need to extend the size of the segment (does | | 375 | * Check if we need to extend the size of the segment (does |
374 | * bss extend page the next page boundary)? | | 376 | * bss extend page the next page boundary)? |
375 | */ | | 377 | */ |
376 | rm = round_page(*addr + msize); | | 378 | rm = round_page(*addr + msize); |
377 | rf = round_page(*addr + *size); | | 379 | rf = round_page(*addr + *size); |
378 | | | 380 | |
379 | if (rm != rf) { | | 381 | if (rm != rf) { |
380 | NEW_VMCMD2(vcset, vmcmd_map_zero, rm - rf, rf, NULLVP, | | 382 | NEW_VMCMD2(vcset, vmcmd_map_zero, rm - rf, rf, NULLVP, |
381 | 0, *prot, flags & VMCMD_RELATIVE); | | 383 | 0, *prot, flags & VMCMD_RELATIVE); |
382 | *size = msize; | | 384 | *size = msize; |
383 | } | | 385 | } |
384 | } | | 386 | } |
385 | | | 387 | |
386 | /* | | 388 | /* |
387 | * elf_load_file(): | | 389 | * elf_load_file(): |
388 | * | | 390 | * |
389 | * Load a file (interpreter/library) pointed to by path | | 391 | * Load a file (interpreter/library) pointed to by path |
390 | * [stolen from coff_load_shlib()]. Made slightly generic | | 392 | * [stolen from coff_load_shlib()]. Made slightly generic |
391 | * so it might be used externally. | | 393 | * so it might be used externally. |
392 | */ | | 394 | */ |
393 | int | | 395 | int |
394 | elf_load_file(struct lwp *l, struct exec_package *epp, char *path, | | 396 | elf_load_file(struct lwp *l, struct exec_package *epp, char *path, |
395 | struct exec_vmcmd_set *vcset, u_long *entryoff, struct elf_args *ap, | | 397 | struct exec_vmcmd_set *vcset, u_long *entryoff, struct elf_args *ap, |
396 | Elf_Addr *last) | | 398 | Elf_Addr *last) |
397 | { | | 399 | { |
398 | int error, i; | | 400 | int error, i; |
399 | struct vnode *vp; | | 401 | struct vnode *vp; |
400 | struct vattr attr; | | 402 | struct vattr attr; |
401 | Elf_Ehdr eh; | | 403 | Elf_Ehdr eh; |
402 | Elf_Phdr *ph = NULL; | | 404 | Elf_Phdr *ph = NULL; |
403 | const Elf_Phdr *ph0; | | 405 | const Elf_Phdr *ph0; |
404 | const Elf_Phdr *base_ph; | | 406 | const Elf_Phdr *base_ph; |
405 | const Elf_Phdr *last_ph; | | 407 | const Elf_Phdr *last_ph; |
406 | u_long phsize; | | 408 | u_long phsize; |
407 | Elf_Addr addr = *last; | | 409 | Elf_Addr addr = *last; |
408 | struct proc *p; | | 410 | struct proc *p; |
409 | | | 411 | |
410 | p = l->l_proc; | | 412 | p = l->l_proc; |
411 | | | 413 | |
412 | /* | | 414 | /* |
413 | * 1. open file | | 415 | * 1. open file |
414 | * 2. read filehdr | | 416 | * 2. read filehdr |
415 | * 3. map text, data, and bss out of it using VM_* | | 417 | * 3. map text, data, and bss out of it using VM_* |
416 | */ | | 418 | */ |
417 | vp = epp->ep_interp; | | 419 | vp = epp->ep_interp; |
418 | if (vp == NULL) { | | 420 | if (vp == NULL) { |
419 | error = emul_find_interp(l, epp, path); | | 421 | error = emul_find_interp(l, epp, path); |
420 | if (error != 0) | | 422 | if (error != 0) |
421 | return error; | | 423 | return error; |
422 | vp = epp->ep_interp; | | 424 | vp = epp->ep_interp; |
423 | } | | 425 | } |
424 | /* We'll tidy this ourselves - otherwise we have locking issues */ | | 426 | /* We'll tidy this ourselves - otherwise we have locking issues */ |
425 | epp->ep_interp = NULL; | | 427 | epp->ep_interp = NULL; |
426 | vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); | | 428 | vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); |
427 | | | 429 | |
428 | /* | | 430 | /* |
429 | * Similarly, if it's not marked as executable, or it's not a regular | | 431 | * Similarly, if it's not marked as executable, or it's not a regular |
430 | * file, we don't allow it to be used. | | 432 | * file, we don't allow it to be used. |
431 | */ | | 433 | */ |
432 | if (vp->v_type != VREG) { | | 434 | if (vp->v_type != VREG) { |
433 | error = EACCES; | | 435 | error = EACCES; |
434 | goto badunlock; | | 436 | goto badunlock; |
435 | } | | 437 | } |
436 | if ((error = VOP_ACCESS(vp, VEXEC, l->l_cred)) != 0) | | 438 | if ((error = VOP_ACCESS(vp, VEXEC, l->l_cred)) != 0) |
437 | goto badunlock; | | 439 | goto badunlock; |
438 | | | 440 | |
439 | /* get attributes */ | | 441 | /* get attributes */ |
440 | if ((error = VOP_GETATTR(vp, &attr, l->l_cred)) != 0) | | 442 | if ((error = VOP_GETATTR(vp, &attr, l->l_cred)) != 0) |
441 | goto badunlock; | | 443 | goto badunlock; |
442 | | | 444 | |
443 | /* | | 445 | /* |
444 | * Check mount point. Though we're not trying to exec this binary, | | 446 | * Check mount point. Though we're not trying to exec this binary, |
445 | * we will be executing code from it, so if the mount point | | 447 | * we will be executing code from it, so if the mount point |
446 | * disallows execution or set-id-ness, we punt or kill the set-id. | | 448 | * disallows execution or set-id-ness, we punt or kill the set-id. |
447 | */ | | 449 | */ |
448 | if (vp->v_mount->mnt_flag & MNT_NOEXEC) { | | 450 | if (vp->v_mount->mnt_flag & MNT_NOEXEC) { |
449 | error = EACCES; | | 451 | error = EACCES; |
450 | goto badunlock; | | 452 | goto badunlock; |
451 | } | | 453 | } |
452 | if (vp->v_mount->mnt_flag & MNT_NOSUID) | | 454 | if (vp->v_mount->mnt_flag & MNT_NOSUID) |
453 | epp->ep_vap->va_mode &= ~(S_ISUID | S_ISGID); | | 455 | epp->ep_vap->va_mode &= ~(S_ISUID | S_ISGID); |
454 | | | 456 | |
455 | #ifdef notyet /* XXX cgd 960926 */ | | 457 | #ifdef notyet /* XXX cgd 960926 */ |
456 | XXX cgd 960926: (maybe) VOP_OPEN it (and VOP_CLOSE in copyargs?) | | 458 | XXX cgd 960926: (maybe) VOP_OPEN it (and VOP_CLOSE in copyargs?) |
457 | | | 459 | |
458 | XXXps: this problem will make it impossible to use an interpreter | | 460 | XXXps: this problem will make it impossible to use an interpreter |
459 | from a file system which actually does something in VOP_OPEN | | 461 | from a file system which actually does something in VOP_OPEN |
460 | #endif | | 462 | #endif |
461 | | | 463 | |
462 | error = vn_marktext(vp); | | 464 | error = vn_marktext(vp); |
463 | if (error) | | 465 | if (error) |
464 | goto badunlock; | | 466 | goto badunlock; |
465 | | | 467 | |
466 | VOP_UNLOCK(vp); | | 468 | VOP_UNLOCK(vp); |
467 | | | 469 | |
468 | if ((error = exec_read_from(l, vp, 0, &eh, sizeof(eh))) != 0) | | 470 | if ((error = exec_read_from(l, vp, 0, &eh, sizeof(eh))) != 0) |
469 | goto bad; | | 471 | goto bad; |
470 | | | 472 | |
471 | if ((error = elf_check_header(&eh, ET_DYN)) != 0) | | 473 | if ((error = elf_check_header(&eh, ET_DYN)) != 0) |
472 | goto bad; | | 474 | goto bad; |
473 | | | 475 | |
474 | if (eh.e_phnum > MAXPHNUM || eh.e_phnum == 0) { | | 476 | if (eh.e_phnum > MAXPHNUM || eh.e_phnum == 0) { |
475 | error = ENOEXEC; | | 477 | error = ENOEXEC; |
476 | goto bad; | | 478 | goto bad; |
477 | } | | 479 | } |
478 | | | 480 | |
479 | phsize = eh.e_phnum * sizeof(Elf_Phdr); | | 481 | phsize = eh.e_phnum * sizeof(Elf_Phdr); |
480 | ph = kmem_alloc(phsize, KM_SLEEP); | | 482 | ph = kmem_alloc(phsize, KM_SLEEP); |
481 | | | 483 | |
482 | if ((error = exec_read_from(l, vp, eh.e_phoff, ph, phsize)) != 0) | | 484 | if ((error = exec_read_from(l, vp, eh.e_phoff, ph, phsize)) != 0) |
483 | goto bad; | | 485 | goto bad; |
484 | | | 486 | |
485 | #ifdef ELF_INTERP_NON_RELOCATABLE | | 487 | #ifdef ELF_INTERP_NON_RELOCATABLE |
486 | /* | | 488 | /* |
487 | * Evil hack: Only MIPS should be non-relocatable, and the | | 489 | * Evil hack: Only MIPS should be non-relocatable, and the |
488 | * psections should have a high address (typically 0x5ffe0000). | | 490 | * psections should have a high address (typically 0x5ffe0000). |
489 | * If it's now relocatable, it should be linked at 0 and the | | 491 | * If it's now relocatable, it should be linked at 0 and the |
490 | * psections should have zeros in the upper part of the address. | | 492 | * psections should have zeros in the upper part of the address. |
491 | * Otherwise, force the load at the linked address. | | 493 | * Otherwise, force the load at the linked address. |
492 | */ | | 494 | */ |
493 | if (*last == ELF_LINK_ADDR && (ph->p_vaddr & 0xffff0000) == 0) | | 495 | if (*last == ELF_LINK_ADDR && (ph->p_vaddr & 0xffff0000) == 0) |
494 | *last = ELFDEFNNAME(NO_ADDR); | | 496 | *last = ELFDEFNNAME(NO_ADDR); |
495 | #endif | | 497 | #endif |
496 | | | 498 | |
497 | /* | | 499 | /* |
498 | * If no position to load the interpreter was set by a probe | | 500 | * If no position to load the interpreter was set by a probe |
499 | * function, pick the same address that a non-fixed mmap(0, ..) | | 501 | * function, pick the same address that a non-fixed mmap(0, ..) |
500 | * would (i.e. something safely out of the way). | | 502 | * would (i.e. something safely out of the way). |
501 | */ | | 503 | */ |
502 | if (*last == ELFDEFNNAME(NO_ADDR)) { | | 504 | if (*last == ELFDEFNNAME(NO_ADDR)) { |
503 | u_long limit = 0; | | 505 | u_long limit = 0; |
504 | /* | | 506 | /* |
505 | * Find the start and ending addresses of the psections to | | 507 | * Find the start and ending addresses of the psections to |
506 | * be loaded. This will give us the size. | | 508 | * be loaded. This will give us the size. |
507 | */ | | 509 | */ |
508 | for (i = 0, ph0 = ph, base_ph = NULL; i < eh.e_phnum; | | 510 | for (i = 0, ph0 = ph, base_ph = NULL; i < eh.e_phnum; |
509 | i++, ph0++) { | | 511 | i++, ph0++) { |
510 | if (ph0->p_type == PT_LOAD) { | | 512 | if (ph0->p_type == PT_LOAD) { |
511 | u_long psize = ph0->p_vaddr + ph0->p_memsz; | | 513 | u_long psize = ph0->p_vaddr + ph0->p_memsz; |
512 | if (base_ph == NULL) | | 514 | if (base_ph == NULL) |
513 | base_ph = ph0; | | 515 | base_ph = ph0; |
514 | if (psize > limit) | | 516 | if (psize > limit) |
515 | limit = psize; | | 517 | limit = psize; |
516 | } | | 518 | } |
517 | } | | 519 | } |
518 | | | 520 | |
519 | if (base_ph == NULL) { | | 521 | if (base_ph == NULL) { |
520 | error = ENOEXEC; | | 522 | error = ENOEXEC; |
521 | goto bad; | | 523 | goto bad; |
522 | } | | 524 | } |
523 | | | 525 | |
524 | /* | | 526 | /* |
525 | * Now compute the size and load address. | | 527 | * Now compute the size and load address. |
526 | */ | | 528 | */ |
527 | addr = (*epp->ep_esch->es_emul->e_vm_default_addr)(p, | | 529 | addr = (*epp->ep_esch->es_emul->e_vm_default_addr)(p, |
528 | epp->ep_daddr, | | 530 | epp->ep_daddr, |
529 | round_page(limit) - trunc_page(base_ph->p_vaddr)); | | 531 | round_page(limit) - trunc_page(base_ph->p_vaddr)); |
530 | } else | | 532 | } else |
531 | addr = *last; /* may be ELF_LINK_ADDR */ | | 533 | addr = *last; /* may be ELF_LINK_ADDR */ |
532 | | | 534 | |
533 | /* | | 535 | /* |
534 | * Load all the necessary sections | | 536 | * Load all the necessary sections |
535 | */ | | 537 | */ |
536 | for (i = 0, ph0 = ph, base_ph = NULL, last_ph = NULL; | | 538 | for (i = 0, ph0 = ph, base_ph = NULL, last_ph = NULL; |
537 | i < eh.e_phnum; i++, ph0++) { | | 539 | i < eh.e_phnum; i++, ph0++) { |
538 | switch (ph0->p_type) { | | 540 | switch (ph0->p_type) { |
539 | case PT_LOAD: { | | 541 | case PT_LOAD: { |
540 | u_long size; | | 542 | u_long size; |
541 | int prot = 0; | | 543 | int prot = 0; |
542 | int flags; | | 544 | int flags; |
543 | | | 545 | |
544 | if (base_ph == NULL) { | | 546 | if (base_ph == NULL) { |
545 | /* | | 547 | /* |
546 | * First encountered psection is always the | | 548 | * First encountered psection is always the |
547 | * base psection. Make sure it's aligned | | 549 | * base psection. Make sure it's aligned |
548 | * properly (align down for topdown and align | | 550 | * properly (align down for topdown and align |
549 | * upwards for not topdown). | | 551 | * upwards for not topdown). |
550 | */ | | 552 | */ |
551 | base_ph = ph0; | | 553 | base_ph = ph0; |
552 | flags = VMCMD_BASE; | | 554 | flags = VMCMD_BASE; |
553 | if (addr == ELF_LINK_ADDR) | | 555 | if (addr == ELF_LINK_ADDR) |
554 | addr = ph0->p_vaddr; | | 556 | addr = ph0->p_vaddr; |
555 | if (p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN) | | 557 | if (p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN) |
556 | addr = ELF_TRUNC(addr, ph0->p_align); | | 558 | addr = ELF_TRUNC(addr, ph0->p_align); |
557 | else | | 559 | else |
558 | addr = ELF_ROUND(addr, ph0->p_align); | | 560 | addr = ELF_ROUND(addr, ph0->p_align); |
559 | } else { | | 561 | } else { |
560 | u_long limit = round_page(last_ph->p_vaddr | | 562 | u_long limit = round_page(last_ph->p_vaddr |
561 | + last_ph->p_memsz); | | 563 | + last_ph->p_memsz); |
562 | u_long base = trunc_page(ph0->p_vaddr); | | 564 | u_long base = trunc_page(ph0->p_vaddr); |
563 | | | 565 | |
564 | /* | | 566 | /* |
565 | * If there is a gap in between the psections, | | 567 | * If there is a gap in between the psections, |
566 | * map it as inaccessible so nothing else | | 568 | * map it as inaccessible so nothing else |
567 | * mmap'ed will be placed there. | | 569 | * mmap'ed will be placed there. |
568 | */ | | 570 | */ |
569 | if (limit != base) { | | 571 | if (limit != base) { |
570 | NEW_VMCMD2(vcset, vmcmd_map_zero, | | 572 | NEW_VMCMD2(vcset, vmcmd_map_zero, |
571 | base - limit, | | 573 | base - limit, |
572 | limit - base_ph->p_vaddr, NULLVP, | | 574 | limit - base_ph->p_vaddr, NULLVP, |
573 | 0, VM_PROT_NONE, VMCMD_RELATIVE); | | 575 | 0, VM_PROT_NONE, VMCMD_RELATIVE); |
574 | } | | 576 | } |
575 | | | 577 | |
576 | addr = ph0->p_vaddr - base_ph->p_vaddr; | | 578 | addr = ph0->p_vaddr - base_ph->p_vaddr; |
577 | flags = VMCMD_RELATIVE; | | 579 | flags = VMCMD_RELATIVE; |
578 | } | | 580 | } |
579 | last_ph = ph0; | | 581 | last_ph = ph0; |
580 | elf_load_psection(vcset, vp, &ph[i], &addr, | | 582 | elf_load_psection(vcset, vp, &ph[i], &addr, |
581 | &size, &prot, flags); | | 583 | &size, &prot, flags); |
582 | /* | | 584 | /* |
583 | * If entry is within this psection then this | | 585 | * If entry is within this psection then this |
584 | * must contain the .text section. *entryoff is | | 586 | * must contain the .text section. *entryoff is |
585 | * relative to the base psection. | | 587 | * relative to the base psection. |
586 | */ | | 588 | */ |
587 | if (eh.e_entry >= ph0->p_vaddr && | | 589 | if (eh.e_entry >= ph0->p_vaddr && |
588 | eh.e_entry < (ph0->p_vaddr + size)) { | | 590 | eh.e_entry < (ph0->p_vaddr + size)) { |
589 | *entryoff = eh.e_entry - base_ph->p_vaddr; | | 591 | *entryoff = eh.e_entry - base_ph->p_vaddr; |
590 | } | | 592 | } |
591 | addr += size; | | 593 | addr += size; |
592 | break; | | 594 | break; |
593 | } | | 595 | } |
594 | | | 596 | |
595 | case PT_DYNAMIC: | | 597 | case PT_DYNAMIC: |
596 | case PT_PHDR: | | 598 | case PT_PHDR: |
597 | break; | | 599 | break; |
598 | | | 600 | |
599 | case PT_NOTE: | | 601 | case PT_NOTE: |
600 | break; | | 602 | break; |
601 | | | 603 | |
602 | default: | | 604 | default: |
603 | break; | | 605 | break; |
604 | } | | 606 | } |
605 | } | | 607 | } |
606 | | | 608 | |
607 | kmem_free(ph, phsize); | | 609 | kmem_free(ph, phsize); |
608 | /* | | 610 | /* |
609 | * This value is ignored if TOPDOWN. | | 611 | * This value is ignored if TOPDOWN. |
610 | */ | | 612 | */ |
611 | *last = addr; | | 613 | *last = addr; |
612 | vrele(vp); | | 614 | vrele(vp); |
613 | return 0; | | 615 | return 0; |
614 | | | 616 | |
615 | badunlock: | | 617 | badunlock: |
616 | VOP_UNLOCK(vp); | | 618 | VOP_UNLOCK(vp); |
617 | | | 619 | |
618 | bad: | | 620 | bad: |
619 | if (ph != NULL) | | 621 | if (ph != NULL) |
620 | kmem_free(ph, phsize); | | 622 | kmem_free(ph, phsize); |
621 | #ifdef notyet /* XXX cgd 960926 */ | | 623 | #ifdef notyet /* XXX cgd 960926 */ |
622 | (maybe) VOP_CLOSE it | | 624 | (maybe) VOP_CLOSE it |
623 | #endif | | 625 | #endif |
624 | vrele(vp); | | 626 | vrele(vp); |
625 | return error; | | 627 | return error; |
626 | } | | 628 | } |
627 | | | 629 | |
628 | /* | | 630 | /* |
629 | * exec_elf_makecmds(): Prepare an Elf binary's exec package | | 631 | * exec_elf_makecmds(): Prepare an Elf binary's exec package |
630 | * | | 632 | * |
631 | * First, set of the various offsets/lengths in the exec package. | | 633 | * First, set of the various offsets/lengths in the exec package. |
632 | * | | 634 | * |
633 | * Then, mark the text image busy (so it can be demand paged) or error | | 635 | * Then, mark the text image busy (so it can be demand paged) or error |
634 | * out if this is not possible. Finally, set up vmcmds for the | | 636 | * out if this is not possible. Finally, set up vmcmds for the |
635 | * text, data, bss, and stack segments. | | 637 | * text, data, bss, and stack segments. |
636 | */ | | 638 | */ |
637 | int | | 639 | int |
638 | exec_elf_makecmds(struct lwp *l, struct exec_package *epp) | | 640 | exec_elf_makecmds(struct lwp *l, struct exec_package *epp) |
639 | { | | 641 | { |
640 | Elf_Ehdr *eh = epp->ep_hdr; | | 642 | Elf_Ehdr *eh = epp->ep_hdr; |
641 | Elf_Phdr *ph, *pp; | | 643 | Elf_Phdr *ph, *pp; |
642 | Elf_Addr phdr = 0, computed_phdr = 0, pos = 0, end_text = 0; | | 644 | Elf_Addr phdr = 0, computed_phdr = 0, pos = 0, end_text = 0; |
643 | int error, i, nload; | | 645 | int error, i, nload; |
644 | char *interp = NULL; | | 646 | char *interp = NULL; |
645 | u_long phsize; | | 647 | u_long phsize; |
646 | struct proc *p; | | 648 | struct proc *p; |
647 | struct elf_args *ap = NULL; | | 649 | struct elf_args *ap = NULL; |
648 | bool is_dyn; | | 650 | bool is_dyn; |
649 | | | 651 | |
650 | if (epp->ep_hdrvalid < sizeof(Elf_Ehdr)) | | 652 | if (epp->ep_hdrvalid < sizeof(Elf_Ehdr)) |
651 | return ENOEXEC; | | 653 | return ENOEXEC; |
652 | | | 654 | |
653 | is_dyn = elf_check_header(eh, ET_DYN) == 0; | | 655 | is_dyn = elf_check_header(eh, ET_DYN) == 0; |
654 | /* | | 656 | /* |
655 | * XXX allow for executing shared objects. It seems silly | | 657 | * XXX allow for executing shared objects. It seems silly |
656 | * but other ELF-based systems allow it as well. | | 658 | * but other ELF-based systems allow it as well. |
657 | */ | | 659 | */ |
658 | if (elf_check_header(eh, ET_EXEC) != 0 && !is_dyn) | | 660 | if (elf_check_header(eh, ET_EXEC) != 0 && !is_dyn) |
659 | return ENOEXEC; | | 661 | return ENOEXEC; |
660 | | | 662 | |
661 | if (eh->e_phnum > MAXPHNUM || eh->e_phnum == 0) | | 663 | if (eh->e_phnum > MAXPHNUM || eh->e_phnum == 0) |
662 | return ENOEXEC; | | 664 | return ENOEXEC; |
663 | | | 665 | |
664 | error = vn_marktext(epp->ep_vp); | | 666 | error = vn_marktext(epp->ep_vp); |
665 | if (error) | | 667 | if (error) |
666 | return error; | | 668 | return error; |
667 | | | 669 | |
668 | /* | | 670 | /* |
669 | * Allocate space to hold all the program headers, and read them | | 671 | * Allocate space to hold all the program headers, and read them |
670 | * from the file | | 672 | * from the file |
671 | */ | | 673 | */ |
672 | p = l->l_proc; | | 674 | p = l->l_proc; |
673 | phsize = eh->e_phnum * sizeof(Elf_Phdr); | | 675 | phsize = eh->e_phnum * sizeof(Elf_Phdr); |
674 | ph = kmem_alloc(phsize, KM_SLEEP); | | 676 | ph = kmem_alloc(phsize, KM_SLEEP); |
675 | | | 677 | |
676 | if ((error = exec_read_from(l, epp->ep_vp, eh->e_phoff, ph, phsize)) != | | 678 | if ((error = exec_read_from(l, epp->ep_vp, eh->e_phoff, ph, phsize)) != |
677 | 0) | | 679 | 0) |
678 | goto bad; | | 680 | goto bad; |
679 | | | 681 | |
680 | epp->ep_taddr = epp->ep_tsize = ELFDEFNNAME(NO_ADDR); | | 682 | epp->ep_taddr = epp->ep_tsize = ELFDEFNNAME(NO_ADDR); |
681 | epp->ep_daddr = epp->ep_dsize = ELFDEFNNAME(NO_ADDR); | | 683 | epp->ep_daddr = epp->ep_dsize = ELFDEFNNAME(NO_ADDR); |
682 | | | 684 | |
683 | for (i = 0; i < eh->e_phnum; i++) { | | 685 | for (i = 0; i < eh->e_phnum; i++) { |
684 | pp = &ph[i]; | | 686 | pp = &ph[i]; |
685 | if (pp->p_type == PT_INTERP) { | | 687 | if (pp->p_type == PT_INTERP) { |
686 | if (pp->p_filesz >= MAXPATHLEN) { | | 688 | if (pp->p_filesz >= MAXPATHLEN) { |
687 | error = ENOEXEC; | | 689 | error = ENOEXEC; |
688 | goto bad; | | 690 | goto bad; |
689 | } | | 691 | } |
690 | interp = PNBUF_GET(); | | 692 | interp = PNBUF_GET(); |
691 | interp[0] = '\0'; | | 693 | interp[0] = '\0'; |
692 | if ((error = exec_read_from(l, epp->ep_vp, | | 694 | if ((error = exec_read_from(l, epp->ep_vp, |
693 | pp->p_offset, interp, pp->p_filesz)) != 0) | | 695 | pp->p_offset, interp, pp->p_filesz)) != 0) |
694 | goto bad; | | 696 | goto bad; |
695 | break; | | 697 | break; |
696 | } | | 698 | } |
697 | } | | 699 | } |
698 | | | 700 | |
699 | /* | | 701 | /* |
700 | * On the same architecture, we may be emulating different systems. | | 702 | * On the same architecture, we may be emulating different systems. |
701 | * See which one will accept this executable. | | 703 | * See which one will accept this executable. |
702 | * | | 704 | * |
703 | * Probe functions would normally see if the interpreter (if any) | | 705 | * Probe functions would normally see if the interpreter (if any) |
704 | * exists. Emulation packages may possibly replace the interpreter in | | 706 | * exists. Emulation packages may possibly replace the interpreter in |
705 | * interp[] with a changed path (/emul/xxx/<path>). | | 707 | * interp[] with a changed path (/emul/xxx/<path>). |
706 | */ | | 708 | */ |
707 | pos = ELFDEFNNAME(NO_ADDR); | | 709 | pos = ELFDEFNNAME(NO_ADDR); |
708 | if (epp->ep_esch->u.elf_probe_func) { | | 710 | if (epp->ep_esch->u.elf_probe_func) { |
709 | vaddr_t startp = (vaddr_t)pos; | | 711 | vaddr_t startp = (vaddr_t)pos; |
710 | | | 712 | |
711 | error = (*epp->ep_esch->u.elf_probe_func)(l, epp, eh, interp, | | 713 | error = (*epp->ep_esch->u.elf_probe_func)(l, epp, eh, interp, |
712 | &startp); | | 714 | &startp); |
713 | if (error) | | 715 | if (error) |
714 | goto bad; | | 716 | goto bad; |
715 | pos = (Elf_Addr)startp; | | 717 | pos = (Elf_Addr)startp; |
716 | } | | 718 | } |
717 | | | 719 | |
718 | #if defined(PAX_MPROTECT) || defined(PAX_SEGVGUARD) || defined(PAX_ASLR) | | 720 | #if defined(PAX_MPROTECT) || defined(PAX_SEGVGUARD) || defined(PAX_ASLR) |
719 | p->p_pax = epp->ep_pax_flags; | | 721 | p->p_pax = epp->ep_pax_flags; |
720 | #endif /* PAX_MPROTECT || PAX_SEGVGUARD || PAX_ASLR */ | | 722 | #endif /* PAX_MPROTECT || PAX_SEGVGUARD || PAX_ASLR */ |
721 | | | 723 | |
722 | if (is_dyn) | | 724 | if (is_dyn) |
723 | elf_placedynexec(l, epp, eh, ph); | | 725 | elf_placedynexec(l, epp, eh, ph); |
724 | | | 726 | |
725 | /* | | 727 | /* |
726 | * Load all the necessary sections | | 728 | * Load all the necessary sections |
727 | */ | | 729 | */ |
728 | for (i = nload = 0; i < eh->e_phnum; i++) { | | 730 | for (i = nload = 0; i < eh->e_phnum; i++) { |
729 | Elf_Addr addr = ELFDEFNNAME(NO_ADDR); | | 731 | Elf_Addr addr = ELFDEFNNAME(NO_ADDR); |
730 | u_long size = 0; | | 732 | u_long size = 0; |
731 | int prot = 0; | | 733 | int prot = 0; |
732 | | | 734 | |
733 | pp = &ph[i]; | | 735 | pp = &ph[i]; |
734 | | | 736 | |
735 | switch (ph[i].p_type) { | | 737 | switch (ph[i].p_type) { |
736 | case PT_LOAD: | | 738 | case PT_LOAD: |
737 | elf_load_psection(&epp->ep_vmcmds, epp->ep_vp, | | 739 | elf_load_psection(&epp->ep_vmcmds, epp->ep_vp, |
738 | &ph[i], &addr, &size, &prot, VMCMD_FIXED); | | 740 | &ph[i], &addr, &size, &prot, VMCMD_FIXED); |
739 | | | 741 | |
740 | /* | | 742 | /* |
741 | * Consider this as text segment, if it is executable. | | 743 | * Consider this as text segment, if it is executable. |
742 | * If there is more than one text segment, pick the | | 744 | * If there is more than one text segment, pick the |
743 | * largest. | | 745 | * largest. |
744 | */ | | 746 | */ |
745 | if (ph[i].p_flags & PF_X) { | | 747 | if (ph[i].p_flags & PF_X) { |
746 | if (epp->ep_taddr == ELFDEFNNAME(NO_ADDR) || | | 748 | if (epp->ep_taddr == ELFDEFNNAME(NO_ADDR) || |
747 | size > epp->ep_tsize) { | | 749 | size > epp->ep_tsize) { |
748 | epp->ep_taddr = addr; | | 750 | epp->ep_taddr = addr; |
749 | epp->ep_tsize = size; | | 751 | epp->ep_tsize = size; |
750 | } | | 752 | } |
751 | end_text = addr + size; | | 753 | end_text = addr + size; |
752 | } else { | | 754 | } else { |
753 | epp->ep_daddr = addr; | | 755 | epp->ep_daddr = addr; |
754 | epp->ep_dsize = size; | | 756 | epp->ep_dsize = size; |
755 | } | | 757 | } |
756 | if (ph[i].p_offset == 0) { | | 758 | if (ph[i].p_offset == 0) { |
757 | computed_phdr = ph[i].p_vaddr + eh->e_phoff; | | 759 | computed_phdr = ph[i].p_vaddr + eh->e_phoff; |
758 | } | | 760 | } |
759 | break; | | 761 | break; |
760 | | | 762 | |
761 | case PT_SHLIB: | | 763 | case PT_SHLIB: |
762 | /* SCO has these sections. */ | | 764 | /* SCO has these sections. */ |
763 | case PT_INTERP: | | 765 | case PT_INTERP: |
764 | /* Already did this one. */ | | 766 | /* Already did this one. */ |
765 | case PT_DYNAMIC: | | 767 | case PT_DYNAMIC: |
766 | break; | | 768 | break; |
767 | case PT_NOTE: | | 769 | case PT_NOTE: |
768 | break; | | 770 | break; |
769 | case PT_PHDR: | | 771 | case PT_PHDR: |
770 | /* Note address of program headers (in text segment) */ | | 772 | /* Note address of program headers (in text segment) */ |
771 | phdr = pp->p_vaddr; | | 773 | phdr = pp->p_vaddr; |
772 | break; | | 774 | break; |
773 | | | 775 | |
774 | default: | | 776 | default: |
775 | /* | | 777 | /* |
776 | * Not fatal; we don't need to understand everything. | | 778 | * Not fatal; we don't need to understand everything. |
777 | */ | | 779 | */ |
778 | break; | | 780 | break; |
779 | } | | 781 | } |
780 | } | | 782 | } |
781 | if (interp || (epp->ep_flags & EXEC_FORCEAUX) != 0) { | | 783 | if (interp || (epp->ep_flags & EXEC_FORCEAUX) != 0) { |
782 | ap = malloc(sizeof(struct elf_args), M_TEMP, M_WAITOK); | | 784 | ap = malloc(sizeof(struct elf_args), M_TEMP, M_WAITOK); |
783 | ap->arg_interp = (vaddr_t)NULL; | | 785 | ap->arg_interp = (vaddr_t)NULL; |
784 | } | | 786 | } |
785 | | | 787 | |
786 | if (epp->ep_daddr == ELFDEFNNAME(NO_ADDR)) { | | 788 | if (epp->ep_daddr == ELFDEFNNAME(NO_ADDR)) { |
787 | epp->ep_daddr = round_page(end_text); | | 789 | epp->ep_daddr = round_page(end_text); |
788 | epp->ep_dsize = 0; | | 790 | epp->ep_dsize = 0; |
789 | } | | 791 | } |
790 | | | 792 | |
791 | /* | | 793 | /* |
792 | * Check if we found a dynamically linked binary and arrange to load | | 794 | * Check if we found a dynamically linked binary and arrange to load |
793 | * its interpreter | | 795 | * its interpreter |
794 | */ | | 796 | */ |
795 | if (interp) { | | 797 | if (interp) { |
796 | int j = epp->ep_vmcmds.evs_used; | | 798 | int j = epp->ep_vmcmds.evs_used; |
797 | u_long interp_offset; | | 799 | u_long interp_offset; |
798 | | | 800 | |
799 | if ((error = elf_load_file(l, epp, interp, | | 801 | if ((error = elf_load_file(l, epp, interp, |
800 | &epp->ep_vmcmds, &interp_offset, ap, &pos)) != 0) { | | 802 | &epp->ep_vmcmds, &interp_offset, ap, &pos)) != 0) { |
801 | goto bad; | | 803 | goto bad; |
802 | } | | 804 | } |
803 | ap->arg_interp = epp->ep_vmcmds.evs_cmds[j].ev_addr; | | 805 | ap->arg_interp = epp->ep_vmcmds.evs_cmds[j].ev_addr; |
804 | epp->ep_entry = ap->arg_interp + interp_offset; | | 806 | epp->ep_entry = ap->arg_interp + interp_offset; |
805 | PNBUF_PUT(interp); | | 807 | PNBUF_PUT(interp); |
806 | } else | | 808 | } else |
807 | epp->ep_entry = eh->e_entry; | | 809 | epp->ep_entry = eh->e_entry; |
808 | | | 810 | |
809 | if (ap) { | | 811 | if (ap) { |
810 | ap->arg_phaddr = phdr ? phdr : computed_phdr; | | 812 | ap->arg_phaddr = phdr ? phdr : computed_phdr; |
811 | ap->arg_phentsize = eh->e_phentsize; | | 813 | ap->arg_phentsize = eh->e_phentsize; |
812 | ap->arg_phnum = eh->e_phnum; | | 814 | ap->arg_phnum = eh->e_phnum; |
813 | ap->arg_entry = eh->e_entry; | | 815 | ap->arg_entry = eh->e_entry; |
814 | epp->ep_emul_arg = ap; | | 816 | epp->ep_emul_arg = ap; |
815 | } | | 817 | } |
816 | | | 818 | |
817 | #ifdef ELF_MAP_PAGE_ZERO | | 819 | #ifdef ELF_MAP_PAGE_ZERO |
818 | /* Dell SVR4 maps page zero, yeuch! */ | | 820 | /* Dell SVR4 maps page zero, yeuch! */ |
819 | NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, PAGE_SIZE, 0, | | 821 | NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, PAGE_SIZE, 0, |
820 | epp->ep_vp, 0, VM_PROT_READ); | | 822 | epp->ep_vp, 0, VM_PROT_READ); |
821 | #endif | | 823 | #endif |
822 | kmem_free(ph, phsize); | | 824 | kmem_free(ph, phsize); |
823 | return (*epp->ep_esch->es_setup_stack)(l, epp); | | 825 | return (*epp->ep_esch->es_setup_stack)(l, epp); |
824 | | | 826 | |
825 | bad: | | 827 | bad: |
826 | if (interp) | | 828 | if (interp) |
827 | PNBUF_PUT(interp); | | 829 | PNBUF_PUT(interp); |
828 | if (ap) | | 830 | if (ap) |
829 | free(ap, M_TEMP); | | 831 | free(ap, M_TEMP); |
830 | kmem_free(ph, phsize); | | 832 | kmem_free(ph, phsize); |
831 | kill_vmcmds(&epp->ep_vmcmds); | | 833 | kill_vmcmds(&epp->ep_vmcmds); |
832 | return error; | | 834 | return error; |
833 | } | | 835 | } |
834 | | | 836 | |
835 | int | | 837 | int |
836 | netbsd_elf_signature(struct lwp *l, struct exec_package *epp, | | 838 | netbsd_elf_signature(struct lwp *l, struct exec_package *epp, |
837 | Elf_Ehdr *eh) | | 839 | Elf_Ehdr *eh) |
838 | { | | 840 | { |
839 | size_t i; | | 841 | size_t i; |
840 | Elf_Shdr *sh; | | 842 | Elf_Shdr *sh; |
841 | Elf_Nhdr *np; | | 843 | Elf_Nhdr *np; |
842 | size_t shsize; | | 844 | size_t shsize; |
843 | int error; | | 845 | int error; |
844 | int isnetbsd = 0; | | 846 | int isnetbsd = 0; |
845 | char *ndata; | | 847 | char *ndata; |
846 | | | 848 | |
847 | epp->ep_pax_flags = 0; | | 849 | epp->ep_pax_flags = 0; |
848 | if (eh->e_shnum > MAXSHNUM || eh->e_shnum == 0) | | 850 | if (eh->e_shnum > MAXSHNUM || eh->e_shnum == 0) |
849 | return ENOEXEC; | | 851 | return ENOEXEC; |
850 | | | 852 | |
851 | shsize = eh->e_shnum * sizeof(Elf_Shdr); | | 853 | shsize = eh->e_shnum * sizeof(Elf_Shdr); |
852 | sh = kmem_alloc(shsize, KM_SLEEP); | | 854 | sh = kmem_alloc(shsize, KM_SLEEP); |
853 | error = exec_read_from(l, epp->ep_vp, eh->e_shoff, sh, shsize); | | 855 | error = exec_read_from(l, epp->ep_vp, eh->e_shoff, sh, shsize); |
854 | if (error) | | 856 | if (error) |
855 | goto out; | | 857 | goto out; |
856 | | | 858 | |
857 | np = kmem_alloc(MAXNOTESIZE, KM_SLEEP); | | 859 | np = kmem_alloc(MAXNOTESIZE, KM_SLEEP); |
858 | for (i = 0; i < eh->e_shnum; i++) { | | 860 | for (i = 0; i < eh->e_shnum; i++) { |
859 | Elf_Shdr *shp = &sh[i]; | | 861 | Elf_Shdr *shp = &sh[i]; |
860 | | | 862 | |
861 | if (shp->sh_type != SHT_NOTE || | | 863 | if (shp->sh_type != SHT_NOTE || |
862 | shp->sh_size > MAXNOTESIZE || | | 864 | shp->sh_size > MAXNOTESIZE || |
863 | shp->sh_size < sizeof(Elf_Nhdr) + ELF_NOTE_NETBSD_NAMESZ) | | 865 | shp->sh_size < sizeof(Elf_Nhdr) + ELF_NOTE_NETBSD_NAMESZ) |
864 | continue; | | 866 | continue; |
865 | | | 867 | |
866 | error = exec_read_from(l, epp->ep_vp, shp->sh_offset, np, | | 868 | error = exec_read_from(l, epp->ep_vp, shp->sh_offset, np, |
867 | shp->sh_size); | | 869 | shp->sh_size); |
868 | if (error) | | 870 | if (error) |
869 | continue; | | 871 | continue; |
870 | | | 872 | |
871 | ndata = (char *)(np + 1); | | 873 | ndata = (char *)(np + 1); |
872 | switch (np->n_type) { | | 874 | switch (np->n_type) { |
873 | case ELF_NOTE_TYPE_NETBSD_TAG: | | 875 | case ELF_NOTE_TYPE_NETBSD_TAG: |
874 | if (np->n_namesz != ELF_NOTE_NETBSD_NAMESZ || | | 876 | if (np->n_namesz != ELF_NOTE_NETBSD_NAMESZ || |
875 | np->n_descsz != ELF_NOTE_NETBSD_DESCSZ || | | 877 | np->n_descsz != ELF_NOTE_NETBSD_DESCSZ || |
876 | memcmp(ndata, ELF_NOTE_NETBSD_NAME, | | 878 | memcmp(ndata, ELF_NOTE_NETBSD_NAME, |
877 | ELF_NOTE_NETBSD_NAMESZ)) | | 879 | ELF_NOTE_NETBSD_NAMESZ)) |
878 | goto bad; | | 880 | goto bad; |
879 | isnetbsd = 1; | | 881 | isnetbsd = 1; |
880 | break; | | 882 | break; |
881 | | | 883 | |
882 | case ELF_NOTE_TYPE_PAX_TAG: | | 884 | case ELF_NOTE_TYPE_PAX_TAG: |
883 | if (np->n_namesz != ELF_NOTE_PAX_NAMESZ || | | 885 | if (np->n_namesz != ELF_NOTE_PAX_NAMESZ || |
884 | np->n_descsz != ELF_NOTE_PAX_DESCSZ || | | 886 | np->n_descsz != ELF_NOTE_PAX_DESCSZ || |
885 | memcmp(ndata, ELF_NOTE_PAX_NAME, | | 887 | memcmp(ndata, ELF_NOTE_PAX_NAME, |
886 | ELF_NOTE_PAX_NAMESZ)) { | | 888 | ELF_NOTE_PAX_NAMESZ)) { |
887 | bad: | | 889 | bad: |
888 | /* | | 890 | /* |
889 | * Ignore GNU tags | | 891 | * Ignore GNU tags |
890 | */ | | 892 | */ |
891 | if (np->n_namesz == ELF_NOTE_GNU_NAMESZ && | | 893 | if (np->n_namesz == ELF_NOTE_GNU_NAMESZ && |
892 | memcmp(ndata, ELF_NOTE_GNU_NAME, | | 894 | memcmp(ndata, ELF_NOTE_GNU_NAME, |
893 | ELF_NOTE_GNU_NAMESZ) == 0) | | 895 | ELF_NOTE_GNU_NAMESZ) == 0) |
894 | break; | | 896 | break; |
895 | #ifdef DIAGNOSTIC | | 897 | #ifdef DIAGNOSTIC |
896 | printf("%s: bad tag %d: " | | 898 | printf("%s: bad tag %d: " |
897 | "[%d %d, %d %d, %*.*s %*.*s]\n", | | 899 | "[%d %d, %d %d, %*.*s %*.*s]\n", |
898 | epp->ep_kname, | | 900 | epp->ep_kname, |
899 | np->n_type, | | 901 | np->n_type, |
900 | np->n_namesz, ELF_NOTE_PAX_NAMESZ, | | 902 | np->n_namesz, ELF_NOTE_PAX_NAMESZ, |
901 | np->n_descsz, ELF_NOTE_PAX_DESCSZ, | | 903 | np->n_descsz, ELF_NOTE_PAX_DESCSZ, |
902 | ELF_NOTE_PAX_NAMESZ, | | 904 | ELF_NOTE_PAX_NAMESZ, |
903 | ELF_NOTE_PAX_NAMESZ, | | 905 | ELF_NOTE_PAX_NAMESZ, |
904 | ndata, | | 906 | ndata, |
905 | ELF_NOTE_PAX_NAMESZ, | | 907 | ELF_NOTE_PAX_NAMESZ, |
906 | ELF_NOTE_PAX_NAMESZ, | | 908 | ELF_NOTE_PAX_NAMESZ, |
907 | ELF_NOTE_PAX_NAME); | | 909 | ELF_NOTE_PAX_NAME); |
908 | #endif | | 910 | #endif |
909 | continue; | | 911 | continue; |
910 | } | | 912 | } |
911 | (void)memcpy(&epp->ep_pax_flags, | | 913 | (void)memcpy(&epp->ep_pax_flags, |
912 | ndata + ELF_NOTE_PAX_NAMESZ, | | 914 | ndata + ELF_NOTE_PAX_NAMESZ, |
913 | sizeof(epp->ep_pax_flags)); | | 915 | sizeof(epp->ep_pax_flags)); |
914 | break; | | 916 | break; |
915 | | | 917 | |
916 | case ELF_NOTE_TYPE_SUSE_TAG: | | 918 | case ELF_NOTE_TYPE_SUSE_TAG: |
917 | break; | | 919 | break; |
918 | | | 920 | |
919 | default: | | 921 | default: |
920 | #ifdef DIAGNOSTIC | | 922 | #ifdef DIAGNOSTIC |
921 | printf("%s: unknown note type %d\n", epp->ep_kname, | | 923 | printf("%s: unknown note type %d\n", epp->ep_kname, |
922 | np->n_type); | | 924 | np->n_type); |
923 | #endif | | 925 | #endif |
924 | break; | | 926 | break; |
925 | } | | 927 | } |
926 | } | | 928 | } |
927 | kmem_free(np, MAXNOTESIZE); | | 929 | kmem_free(np, MAXNOTESIZE); |
928 | | | 930 | |
929 | error = isnetbsd ? 0 : ENOEXEC; | | 931 | error = isnetbsd ? 0 : ENOEXEC; |
930 | out: | | 932 | out: |
931 | kmem_free(sh, shsize); | | 933 | kmem_free(sh, shsize); |
932 | return error; | | 934 | return error; |
933 | } | | 935 | } |
934 | | | 936 | |
935 | int | | 937 | int |
936 | netbsd_elf_probe(struct lwp *l, struct exec_package *epp, void *eh, char *itp, | | 938 | netbsd_elf_probe(struct lwp *l, struct exec_package *epp, void *eh, char *itp, |
937 | vaddr_t *pos) | | 939 | vaddr_t *pos) |
938 | { | | 940 | { |
939 | int error; | | 941 | int error; |
940 | | | 942 | |
941 | if ((error = netbsd_elf_signature(l, epp, eh)) != 0) | | 943 | if ((error = netbsd_elf_signature(l, epp, eh)) != 0) |
942 | return error; | | 944 | return error; |
943 | #ifdef ELF_MD_PROBE_FUNC | | 945 | #ifdef ELF_MD_PROBE_FUNC |
944 | if ((error = ELF_MD_PROBE_FUNC(l, epp, eh, itp, pos)) != 0) | | 946 | if ((error = ELF_MD_PROBE_FUNC(l, epp, eh, itp, pos)) != 0) |
945 | return error; | | 947 | return error; |
946 | #elif defined(ELF_INTERP_NON_RELOCATABLE) | | 948 | #elif defined(ELF_INTERP_NON_RELOCATABLE) |
947 | *pos = ELF_LINK_ADDR; | | 949 | *pos = ELF_LINK_ADDR; |
948 | #endif | | 950 | #endif |
949 | epp->ep_flags |= EXEC_FORCEAUX; | | 951 | epp->ep_flags |= EXEC_FORCEAUX; |
950 | return 0; | | 952 | return 0; |
951 | } | | 953 | } |