| @@ -1,555 +1,553 @@ | | | @@ -1,555 +1,553 @@ |
1 | /*- | | 1 | /*- |
2 | * Copyright (c) 2014 The NetBSD Foundation, Inc. | | 2 | * Copyright (c) 2014 The NetBSD Foundation, Inc. |
3 | * All rights reserved. | | 3 | * All rights reserved. |
4 | * | | 4 | * |
5 | * This code is derived from software contributed to The NetBSD Foundation | | 5 | * This code is derived from software contributed to The NetBSD Foundation |
6 | * by Matt Thomas of 3am Software Foundry. | | 6 | * by Matt Thomas of 3am Software Foundry. |
7 | * | | 7 | * |
8 | * Redistribution and use in source and binary forms, with or without | | 8 | * Redistribution and use in source and binary forms, with or without |
9 | * modification, are permitted provided that the following conditions | | 9 | * modification, are permitted provided that the following conditions |
10 | * are met: | | 10 | * are met: |
11 | * 1. Redistributions of source code must retain the above copyright | | 11 | * 1. Redistributions of source code must retain the above copyright |
12 | * notice, this list of conditions and the following disclaimer. | | 12 | * notice, this list of conditions and the following disclaimer. |
13 | * 2. Redistributions in binary form must reproduce the above copyright | | 13 | * 2. Redistributions in binary form must reproduce the above copyright |
14 | * notice, this list of conditions and the following disclaimer in the | | 14 | * notice, this list of conditions and the following disclaimer in the |
15 | * documentation and/or other materials provided with the distribution. | | 15 | * documentation and/or other materials provided with the distribution. |
16 | * | | 16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 17 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
18 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 18 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
19 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 19 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
20 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 20 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
21 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 21 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
22 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 22 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
23 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 23 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
24 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 24 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
25 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 25 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
26 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 26 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
27 | * POSSIBILITY OF SUCH DAMAGE. | | 27 | * POSSIBILITY OF SUCH DAMAGE. |
28 | */ | | 28 | */ |
29 | | | 29 | |
30 | #include <sys/cdefs.h> | | 30 | #include <sys/cdefs.h> |
31 | | | 31 | |
32 | #define __PMAP_PRIVATE | | 32 | #define __PMAP_PRIVATE |
33 | #define __UFETCHSTORE_PRIVATE | | 33 | #define __UFETCHSTORE_PRIVATE |
34 | | | 34 | |
35 | __RCSID("$NetBSD: trap.c,v 1.9 2020/11/01 21:06:22 skrll Exp $"); | | 35 | __RCSID("$NetBSD: trap.c,v 1.10 2020/11/01 21:09:48 skrll Exp $"); |
36 | | | 36 | |
37 | #include <sys/param.h> | | 37 | #include <sys/param.h> |
38 | #include <sys/systm.h> | | 38 | #include <sys/systm.h> |
39 | #include <sys/atomic.h> | | 39 | #include <sys/atomic.h> |
40 | | | 40 | |
41 | #include <sys/signal.h> | | 41 | #include <sys/signal.h> |
42 | #include <sys/signalvar.h> | | 42 | #include <sys/signalvar.h> |
43 | #include <sys/siginfo.h> | | 43 | #include <sys/siginfo.h> |
44 | | | 44 | |
45 | #include <uvm/uvm.h> | | 45 | #include <uvm/uvm.h> |
46 | | | 46 | |
47 | #include <riscv/locore.h> | | 47 | #include <riscv/locore.h> |
48 | | | 48 | |
49 | #define INSTRUCTION_TRAP_MASK (__BIT(CAUSE_PRIVILEGED_INSTRUCTION) \ | | 49 | #define INSTRUCTION_TRAP_MASK (__BIT(CAUSE_ILLEGAL_INSTRUCTION)) |
50 | |__BIT(CAUSE_ILLEGAL_INSTRUCTION)) | | | |
51 | | | 50 | |
52 | #define FAULT_TRAP_MASK (__BIT(CAUSE_FAULT_FETCH) \ | | 51 | #define FAULT_TRAP_MASK (__BIT(CAUSE_FETCH_ACCESS) \ |
53 | |__BIT(CAUSE_FAULT_LOAD) \ | | 52 | |__BIT(CAUSE_LOAD_ACCESS) \ |
54 | |__BIT(CAUSE_FAULT_STORE)) | | 53 | |__BIT(CAUSE_STORE_ACCESS)) |
55 | | | 54 | |
56 | #define MISALIGNED_TRAP_MASK (__BIT(CAUSE_MISALIGNED_FETCH) \ | | 55 | #define MISALIGNED_TRAP_MASK (__BIT(CAUSE_FETCH_MISALIGNED) \ |
57 | |__BIT(CAUSE_MISALIGNED_LOAD) \ | | 56 | |__BIT(CAUSE_LOAD_MISALIGNED) \ |
58 | |__BIT(CAUSE_MISALIGNED_STORE)) | | 57 | |__BIT(CAUSE_STORE_MISALIGNED)) |
59 | | | 58 | |
60 | static const char * const causenames[] = { | | 59 | static const char * const causenames[] = { |
61 | [CAUSE_MISALIGNED_FETCH] = "misaligned fetch", | | 60 | [CAUSE_FETCH_MISALIGNED] = "misaligned fetch", |
62 | [CAUSE_MISALIGNED_LOAD] = "misaligned load", | | 61 | [CAUSE_LOAD_MISALIGNED] = "misaligned load", |
63 | [CAUSE_MISALIGNED_STORE] = "misaligned store", | | 62 | [CAUSE_STORE_MISALIGNED] = "misaligned store", |
64 | [CAUSE_FAULT_FETCH] = "fetch", | | 63 | [CAUSE_FETCH_ACCESS] = "fetch", |
65 | [CAUSE_FAULT_LOAD] = "load", | | 64 | [CAUSE_LOAD_ACCESS] = "load", |
66 | [CAUSE_FAULT_STORE] = "store", | | 65 | [CAUSE_STORE_ACCESS] = "store", |
67 | [CAUSE_FP_DISABLED] = "fp disabled", | | | |
68 | [CAUSE_ILLEGAL_INSTRUCTION] = "illegal instruction", | | 66 | [CAUSE_ILLEGAL_INSTRUCTION] = "illegal instruction", |
69 | [CAUSE_PRIVILEGED_INSTRUCTION] = "privileged instruction", | | | |
70 | [CAUSE_BREAKPOINT] = "breakpoint", | | 67 | [CAUSE_BREAKPOINT] = "breakpoint", |
71 | }; | | 68 | }; |
72 | | | 69 | |
73 | void | | 70 | void |
74 | cpu_jump_onfault(struct trapframe *tf, const struct faultbuf *fb) | | 71 | cpu_jump_onfault(struct trapframe *tf, const struct faultbuf *fb) |
75 | { | | 72 | { |
76 | tf->tf_a0 = fb->fb_reg[FB_A0]; | | 73 | tf->tf_a0 = fb->fb_reg[FB_A0]; |
77 | tf->tf_ra = fb->fb_reg[FB_RA]; | | 74 | tf->tf_ra = fb->fb_reg[FB_RA]; |
78 | tf->tf_s0 = fb->fb_reg[FB_S0]; | | 75 | tf->tf_s0 = fb->fb_reg[FB_S0]; |
79 | tf->tf_s1 = fb->fb_reg[FB_S1]; | | 76 | tf->tf_s1 = fb->fb_reg[FB_S1]; |
80 | tf->tf_s2 = fb->fb_reg[FB_S2]; | | 77 | tf->tf_s2 = fb->fb_reg[FB_S2]; |
81 | tf->tf_s3 = fb->fb_reg[FB_S3]; | | 78 | tf->tf_s3 = fb->fb_reg[FB_S3]; |
82 | tf->tf_s4 = fb->fb_reg[FB_S4]; | | 79 | tf->tf_s4 = fb->fb_reg[FB_S4]; |
83 | tf->tf_s5 = fb->fb_reg[FB_S5]; | | 80 | tf->tf_s5 = fb->fb_reg[FB_S5]; |
84 | tf->tf_s6 = fb->fb_reg[FB_S6]; | | 81 | tf->tf_s6 = fb->fb_reg[FB_S6]; |
85 | tf->tf_s7 = fb->fb_reg[FB_S7]; | | 82 | tf->tf_s7 = fb->fb_reg[FB_S7]; |
86 | tf->tf_s8 = fb->fb_reg[FB_S8]; | | 83 | tf->tf_s8 = fb->fb_reg[FB_S8]; |
87 | tf->tf_s9 = fb->fb_reg[FB_S9]; | | 84 | tf->tf_s9 = fb->fb_reg[FB_S9]; |
88 | tf->tf_s10 = fb->fb_reg[FB_S10]; | | 85 | tf->tf_s10 = fb->fb_reg[FB_S10]; |
89 | tf->tf_s11 = fb->fb_reg[FB_S11]; | | 86 | tf->tf_s11 = fb->fb_reg[FB_S11]; |
90 | } | | 87 | } |
91 | | | 88 | |
92 | int | | 89 | int |
93 | copyin(const void *uaddr, void *kaddr, size_t len) | | 90 | copyin(const void *uaddr, void *kaddr, size_t len) |
94 | { | | 91 | { |
95 | struct faultbuf fb; | | 92 | struct faultbuf fb; |
96 | int error; | | 93 | int error; |
97 | | | 94 | |
98 | if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) { | | 95 | if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) { |
99 | memcpy(kaddr, uaddr, len); | | 96 | memcpy(kaddr, uaddr, len); |
100 | cpu_unset_onfault(); | | 97 | cpu_unset_onfault(); |
101 | } | | 98 | } |
102 | return error; | | 99 | return error; |
103 | } | | 100 | } |
104 | | | 101 | |
105 | int | | 102 | int |
106 | copyout(const void *kaddr, void *uaddr, size_t len) | | 103 | copyout(const void *kaddr, void *uaddr, size_t len) |
107 | { | | 104 | { |
108 | struct faultbuf fb; | | 105 | struct faultbuf fb; |
109 | int error; | | 106 | int error; |
110 | | | 107 | |
111 | if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) { | | 108 | if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) { |
112 | memcpy(uaddr, kaddr, len); | | 109 | memcpy(uaddr, kaddr, len); |
113 | cpu_unset_onfault(); | | 110 | cpu_unset_onfault(); |
114 | } | | 111 | } |
115 | return error; | | 112 | return error; |
116 | } | | 113 | } |
117 | | | 114 | |
118 | int | | 115 | int |
119 | kcopy(const void *kfaddr, void *kdaddr, size_t len) | | 116 | kcopy(const void *kfaddr, void *kdaddr, size_t len) |
120 | { | | 117 | { |
121 | struct faultbuf fb; | | 118 | struct faultbuf fb; |
122 | int error; | | 119 | int error; |
123 | | | 120 | |
124 | if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) { | | 121 | if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) { |
125 | memcpy(kdaddr, kfaddr, len); | | 122 | memcpy(kdaddr, kfaddr, len); |
126 | cpu_unset_onfault(); | | 123 | cpu_unset_onfault(); |
127 | } | | 124 | } |
128 | return error; | | 125 | return error; |
129 | } | | 126 | } |
130 | | | 127 | |
131 | int | | 128 | int |
132 | copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done) | | 129 | copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done) |
133 | { | | 130 | { |
134 | struct faultbuf fb; | | 131 | struct faultbuf fb; |
135 | int error; | | 132 | int error; |
136 | | | 133 | |
137 | if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) { | | 134 | if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) { |
138 | len = strlcpy(kaddr, uaddr, len); | | 135 | len = strlcpy(kaddr, uaddr, len); |
139 | cpu_unset_onfault(); | | 136 | cpu_unset_onfault(); |
140 | if (done != NULL) { | | 137 | if (done != NULL) { |
141 | *done = len; | | 138 | *done = len; |
142 | } | | 139 | } |
143 | } | | 140 | } |
144 | return error; | | 141 | return error; |
145 | } | | 142 | } |
146 | | | 143 | |
147 | int | | 144 | int |
148 | copyoutstr(const void *kaddr, void *uaddr, size_t len, size_t *done) | | 145 | copyoutstr(const void *kaddr, void *uaddr, size_t len, size_t *done) |
149 | { | | 146 | { |
150 | struct faultbuf fb; | | 147 | struct faultbuf fb; |
151 | int error; | | 148 | int error; |
152 | | | 149 | |
153 | if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) { | | 150 | if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) { |
154 | len = strlcpy(uaddr, kaddr, len); | | 151 | len = strlcpy(uaddr, kaddr, len); |
155 | cpu_unset_onfault(); | | 152 | cpu_unset_onfault(); |
156 | if (done != NULL) { | | 153 | if (done != NULL) { |
157 | *done = len; | | 154 | *done = len; |
158 | } | | 155 | } |
159 | } | | 156 | } |
160 | return error; | | 157 | return error; |
161 | } | | 158 | } |
162 | | | 159 | |
163 | static void | | 160 | static void |
164 | dump_trapframe(const struct trapframe *tf, void (*pr)(const char *, ...)) | | 161 | dump_trapframe(const struct trapframe *tf, void (*pr)(const char *, ...)) |
165 | { | | 162 | { |
166 | const char *causestr = "?"; | | 163 | const char *causestr = "?"; |
167 | if (tf->tf_cause < __arraycount(causenames) | | 164 | if (tf->tf_cause < __arraycount(causenames) |
168 | && causenames[tf->tf_cause] != NULL) | | 165 | && causenames[tf->tf_cause] != NULL) |
169 | causestr = causenames[tf->tf_cause]; | | 166 | causestr = causenames[tf->tf_cause]; |
170 | (*pr)("Trapframe @ %p " | | 167 | (*pr)("Trapframe @ %p " |
171 | "(cause=%d (%s), status=%#x, pc=%#16"PRIxREGISTER | | 168 | "(cause=%d (%s), status=%#x, pc=%#16"PRIxREGISTER |
172 | ", va=%#"PRIxREGISTER"):\n", | | 169 | ", va=%#"PRIxREGISTER"):\n", |
173 | tf, tf->tf_cause, causestr, tf->tf_sr, tf->tf_pc, tf->tf_badaddr); | | 170 | tf, tf->tf_cause, causestr, tf->tf_sr, tf->tf_pc, tf->tf_badaddr); |
174 | (*pr)("ra=%#16"PRIxREGISTER", sp=%#16"PRIxREGISTER | | 171 | (*pr)("ra=%#16"PRIxREGISTER", sp=%#16"PRIxREGISTER |
175 | ", gp=%#16"PRIxREGISTER", tp=%#16"PRIxREGISTER"\n", | | 172 | ", gp=%#16"PRIxREGISTER", tp=%#16"PRIxREGISTER"\n", |
176 | tf->tf_ra, tf->tf_sp, tf->tf_gp, tf->tf_tp); | | 173 | tf->tf_ra, tf->tf_sp, tf->tf_gp, tf->tf_tp); |
177 | (*pr)("s0=%#16"PRIxREGISTER", s1=%#16"PRIxREGISTER | | 174 | (*pr)("s0=%#16"PRIxREGISTER", s1=%#16"PRIxREGISTER |
178 | ", s2=%#16"PRIxREGISTER", s3=%#16"PRIxREGISTER"\n", | | 175 | ", s2=%#16"PRIxREGISTER", s3=%#16"PRIxREGISTER"\n", |
179 | tf->tf_s0, tf->tf_s1, tf->tf_s2, tf->tf_s3); | | 176 | tf->tf_s0, tf->tf_s1, tf->tf_s2, tf->tf_s3); |
180 | (*pr)("s4=%#16"PRIxREGISTER", s5=%#16"PRIxREGISTER | | 177 | (*pr)("s4=%#16"PRIxREGISTER", s5=%#16"PRIxREGISTER |
181 | ", s5=%#16"PRIxREGISTER", s3=%#16"PRIxREGISTER"\n", | | 178 | ", s5=%#16"PRIxREGISTER", s3=%#16"PRIxREGISTER"\n", |
182 | tf->tf_s4, tf->tf_s5, tf->tf_s2, tf->tf_s3); | | 179 | tf->tf_s4, tf->tf_s5, tf->tf_s2, tf->tf_s3); |
183 | (*pr)("s8=%#16"PRIxREGISTER", s9=%#16"PRIxREGISTER | | 180 | (*pr)("s8=%#16"PRIxREGISTER", s9=%#16"PRIxREGISTER |
184 | ", s10=%#16"PRIxREGISTER", s11=%#16"PRIxREGISTER"\n", | | 181 | ", s10=%#16"PRIxREGISTER", s11=%#16"PRIxREGISTER"\n", |
185 | tf->tf_s8, tf->tf_s9, tf->tf_s10, tf->tf_s11); | | 182 | tf->tf_s8, tf->tf_s9, tf->tf_s10, tf->tf_s11); |
186 | (*pr)("a0=%#16"PRIxREGISTER", a1=%#16"PRIxREGISTER | | 183 | (*pr)("a0=%#16"PRIxREGISTER", a1=%#16"PRIxREGISTER |
187 | ", a2=%#16"PRIxREGISTER", a3=%#16"PRIxREGISTER"\n", | | 184 | ", a2=%#16"PRIxREGISTER", a3=%#16"PRIxREGISTER"\n", |
188 | tf->tf_a0, tf->tf_a1, tf->tf_a2, tf->tf_a3); | | 185 | tf->tf_a0, tf->tf_a1, tf->tf_a2, tf->tf_a3); |
189 | (*pr)("a4=%#16"PRIxREGISTER", a5=%#16"PRIxREGISTER | | 186 | (*pr)("a4=%#16"PRIxREGISTER", a5=%#16"PRIxREGISTER |
190 | ", a5=%#16"PRIxREGISTER", a7=%#16"PRIxREGISTER"\n", | | 187 | ", a5=%#16"PRIxREGISTER", a7=%#16"PRIxREGISTER"\n", |
191 | tf->tf_a4, tf->tf_a5, tf->tf_a6, tf->tf_a7); | | 188 | tf->tf_a4, tf->tf_a5, tf->tf_a6, tf->tf_a7); |
192 | (*pr)("t0=%#16"PRIxREGISTER", t1=%#16"PRIxREGISTER | | 189 | (*pr)("t0=%#16"PRIxREGISTER", t1=%#16"PRIxREGISTER |
193 | ", t2=%#16"PRIxREGISTER", t3=%#16"PRIxREGISTER"\n", | | 190 | ", t2=%#16"PRIxREGISTER", t3=%#16"PRIxREGISTER"\n", |
194 | tf->tf_t0, tf->tf_t1, tf->tf_t2, tf->tf_t3); | | 191 | tf->tf_t0, tf->tf_t1, tf->tf_t2, tf->tf_t3); |
195 | (*pr)("t4=%#16"PRIxREGISTER", t5=%#16"PRIxREGISTER | | 192 | (*pr)("t4=%#16"PRIxREGISTER", t5=%#16"PRIxREGISTER |
196 | ", t6=%#16"PRIxREGISTER"\n", | | 193 | ", t6=%#16"PRIxREGISTER"\n", |
197 | tf->tf_t4, tf->tf_t5, tf->tf_t6); | | 194 | tf->tf_t4, tf->tf_t5, tf->tf_t6); |
198 | } | | 195 | } |
199 | | | 196 | |
200 | static inline void | | 197 | static inline void |
201 | trap_ksi_init(ksiginfo_t *ksi, int signo, int code, vaddr_t addr, | | 198 | trap_ksi_init(ksiginfo_t *ksi, int signo, int code, vaddr_t addr, |
202 | register_t cause) | | 199 | register_t cause) |
203 | { | | 200 | { |
204 | KSI_INIT_TRAP(ksi); | | 201 | KSI_INIT_TRAP(ksi); |
205 | ksi->ksi_signo = signo; | | 202 | ksi->ksi_signo = signo; |
206 | ksi->ksi_code = code; | | 203 | ksi->ksi_code = code; |
207 | ksi->ksi_addr = (void *)addr; | | 204 | ksi->ksi_addr = (void *)addr; |
208 | ksi->ksi_trap = cause; | | 205 | ksi->ksi_trap = cause; |
209 | } | | 206 | } |
210 | | | 207 | |
211 | static void | | 208 | static void |
212 | cpu_trapsignal(struct trapframe *tf, ksiginfo_t *ksi) | | 209 | cpu_trapsignal(struct trapframe *tf, ksiginfo_t *ksi) |
213 | { | | 210 | { |
214 | if (cpu_printfataltraps) { | | 211 | if (cpu_printfataltraps) { |
215 | dump_trapframe(tf, printf); | | 212 | dump_trapframe(tf, printf); |
216 | } | | 213 | } |
217 | (*curlwp->l_proc->p_emul->e_trapsignal)(curlwp, ksi); | | 214 | (*curlwp->l_proc->p_emul->e_trapsignal)(curlwp, ksi); |
218 | } | | 215 | } |
219 | | | 216 | |
220 | static inline vm_prot_t | | 217 | static inline vm_prot_t |
221 | get_faulttype(register_t cause) | | 218 | get_faulttype(register_t cause) |
222 | { | | 219 | { |
223 | if (cause == CAUSE_FAULT_LOAD) | | 220 | if (cause == CAUSE_LOAD_ACCESS) |
224 | return VM_PROT_READ; | | 221 | return VM_PROT_READ; |
225 | if (cause == CAUSE_FAULT_STORE) | | 222 | if (cause == CAUSE_STORE_ACCESS) |
226 | return VM_PROT_READ | VM_PROT_WRITE; | | 223 | return VM_PROT_READ | VM_PROT_WRITE; |
227 | KASSERT(cause == CAUSE_FAULT_FETCH); | | 224 | KASSERT(cause == CAUSE_FETCH_ACCESS); |
228 | return VM_PROT_READ | VM_PROT_EXECUTE; | | 225 | return VM_PROT_READ | VM_PROT_EXECUTE; |
229 | } | | 226 | } |
230 | | | 227 | |
231 | static bool | | 228 | static bool |
232 | trap_pagefault_fixup(struct trapframe *tf, struct pmap *pmap, register_t cause, | | 229 | trap_pagefault_fixup(struct trapframe *tf, struct pmap *pmap, register_t cause, |
233 | intptr_t addr) | | 230 | intptr_t addr) |
234 | { | | 231 | { |
235 | pt_entry_t * const ptep = pmap_pte_lookup(pmap, addr); | | 232 | pt_entry_t * const ptep = pmap_pte_lookup(pmap, addr); |
236 | struct vm_page *pg; | | 233 | struct vm_page *pg; |
237 | | | 234 | |
238 | if (ptep == NULL) | | 235 | if (ptep == NULL) |
239 | return false; | | 236 | return false; |
240 | | | 237 | |
241 | pt_entry_t opte = *ptep; | | 238 | pt_entry_t opte = *ptep; |
242 | pt_entry_t npte; | | 239 | pt_entry_t npte; |
243 | u_int attr; | | 240 | u_int attr; |
244 | do { | | 241 | do { |
245 | if ((opte & ~PTE_G) == 0) | | 242 | if ((opte & ~PTE_G) == 0) |
246 | return false; | | 243 | return false; |
247 | | | 244 | |
248 | pg = PHYS_TO_VM_PAGE(pte_to_paddr(opte)); | | 245 | pg = PHYS_TO_VM_PAGE(pte_to_paddr(opte)); |
249 | if (pg == NULL) | | 246 | if (pg == NULL) |
250 | return false; | | 247 | return false; |
251 | | | 248 | |
252 | attr = 0; | | 249 | attr = 0; |
253 | npte = opte; | | 250 | npte = opte; |
254 | if ((npte & PTE_V) == 0) { | | 251 | if ((npte & PTE_V) == 0) { |
255 | npte |= PTE_V; | | 252 | npte |= PTE_V; |
256 | attr |= VM_PAGEMD_REFERENCED; | | 253 | attr |= VM_PAGEMD_REFERENCED; |
257 | } | | 254 | } |
258 | #if 0 /* XXX Outdated */ | | 255 | #if 0 /* XXX Outdated */ |
259 | if (cause == CAUSE_FAULT_STORE) { | | 256 | if (cause == CAUSE_STORE_ACCESS) { |
260 | if ((npte & PTE_NW) != 0) { | | 257 | if ((npte & PTE_NW) != 0) { |
261 | npte &= ~PTE_NW; | | 258 | npte &= ~PTE_NW; |
262 | attr |= VM_PAGEMD_MODIFIED; | | 259 | attr |= VM_PAGEMD_MODIFIED; |
263 | } | | 260 | } |
264 | } else if (cause == CAUSE_FAULT_FETCH) { | | 261 | } else if (cause == CAUSE_FETCH_ACCESS) { |
265 | if ((npte & PTE_NX) != 0) { | | 262 | if ((npte & PTE_NX) != 0) { |
266 | npte &= ~PTE_NX; | | 263 | npte &= ~PTE_NX; |
267 | attr |= VM_PAGEMD_EXECPAGE; | | 264 | attr |= VM_PAGEMD_EXECPAGE; |
268 | } | | 265 | } |
269 | } | | 266 | } |
270 | #endif | | 267 | #endif |
271 | if (attr == 0) | | 268 | if (attr == 0) |
272 | return false; | | 269 | return false; |
273 | | | 270 | |
274 | } while (opte != atomic_cas_pte(ptep, opte, npte)); | | 271 | } while (opte != atomic_cas_pte(ptep, opte, npte)); |
275 | | | 272 | |
276 | pmap_page_set_attributes(VM_PAGE_TO_MD(pg), attr); | | 273 | pmap_page_set_attributes(VM_PAGE_TO_MD(pg), attr); |
277 | pmap_tlb_update_addr(pmap, addr, npte, 0); | | 274 | pmap_tlb_update_addr(pmap, addr, npte, 0); |
278 | | | 275 | |
279 | if (attr & VM_PAGEMD_EXECPAGE) | | 276 | if (attr & VM_PAGEMD_EXECPAGE) |
280 | pmap_md_page_syncicache(pg, curcpu()->ci_data.cpu_kcpuset); | | 277 | pmap_md_page_syncicache(pg, curcpu()->ci_data.cpu_kcpuset); |
281 | | | 278 | |
282 | return true; | | 279 | return true; |
283 | } | | 280 | } |
284 | | | 281 | |
285 | static bool | | 282 | static bool |
286 | trap_pagefault(struct trapframe *tf, register_t epc, register_t status, | | 283 | trap_pagefault(struct trapframe *tf, register_t epc, register_t status, |
287 | register_t cause, register_t badaddr, bool usertrap_p, ksiginfo_t *ksi) | | 284 | register_t cause, register_t badaddr, bool usertrap_p, ksiginfo_t *ksi) |
288 | { | | 285 | { |
289 | struct proc * const p = curlwp->l_proc; | | 286 | struct proc * const p = curlwp->l_proc; |
290 | const intptr_t addr = trunc_page(badaddr); | | 287 | const intptr_t addr = trunc_page(badaddr); |
291 | | | 288 | |
292 | if (__predict_false(usertrap_p | | 289 | if (__predict_false(usertrap_p |
293 | && (false | | 290 | && (false |
294 | // Make this address is not trying to access kernel space. | | 291 | // Make this address is not trying to access kernel space. |
295 | || addr < 0 | | 292 | || addr < 0 |
296 | #ifdef _LP64 | | 293 | #ifdef _LP64 |
297 | // If this is a process using a 32-bit address space, make | | 294 | // If this is a process using a 32-bit address space, make |
298 | // sure the address is a signed 32-bit number. | | 295 | // sure the address is a signed 32-bit number. |
299 | || ((p->p_flag & PK_32) && (int32_t) addr != addr) | | 296 | || ((p->p_flag & PK_32) && (int32_t) addr != addr) |
300 | #endif | | 297 | #endif |
301 | || false))) { | | 298 | || false))) { |
302 | trap_ksi_init(ksi, SIGSEGV, SEGV_MAPERR, addr, cause); | | 299 | trap_ksi_init(ksi, SIGSEGV, SEGV_MAPERR, addr, cause); |
303 | return false; | | 300 | return false; |
304 | } | | 301 | } |
305 | | | 302 | |
306 | struct vm_map * const map = (addr >= 0 ? &p->p_vmspace->vm_map : kernel_map); | | 303 | struct vm_map * const map = (addr >= 0 ? &p->p_vmspace->vm_map : kernel_map); |
307 | | | 304 | |
308 | // See if this fault is for reference/modified/execpage tracking | | 305 | // See if this fault is for reference/modified/execpage tracking |
309 | if (trap_pagefault_fixup(tf, map->pmap, cause, addr)) | | 306 | if (trap_pagefault_fixup(tf, map->pmap, cause, addr)) |
310 | return true; | | 307 | return true; |
311 | | | 308 | |
312 | const vm_prot_t ftype = get_faulttype(cause); | | 309 | const vm_prot_t ftype = get_faulttype(cause); |
313 | | | 310 | |
314 | if (usertrap_p) { | | 311 | if (usertrap_p) { |
315 | int error = uvm_fault(&p->p_vmspace->vm_map, addr, ftype); | | 312 | int error = uvm_fault(&p->p_vmspace->vm_map, addr, ftype); |
316 | if (error) { | | 313 | if (error) { |
317 | trap_ksi_init(ksi, SIGSEGV, | | 314 | trap_ksi_init(ksi, SIGSEGV, |
318 | error == EACCES ? SEGV_ACCERR : SEGV_MAPERR, | | 315 | error == EACCES ? SEGV_ACCERR : SEGV_MAPERR, |
319 | (intptr_t)badaddr, cause); | | 316 | (intptr_t)badaddr, cause); |
320 | return false; | | 317 | return false; |
321 | } | | 318 | } |
322 | uvm_grow(p, addr); | | 319 | uvm_grow(p, addr); |
323 | return true; | | 320 | return true; |
324 | } | | 321 | } |
325 | | | 322 | |
326 | // Page fault are not allowed while dealing with interrupts | | 323 | // Page fault are not allowed while dealing with interrupts |
327 | if (cpu_intr_p()) | | 324 | if (cpu_intr_p()) |
328 | return false; | | 325 | return false; |
329 | | | 326 | |
330 | struct faultbuf * const fb = cpu_disable_onfault(); | | 327 | struct faultbuf * const fb = cpu_disable_onfault(); |
331 | int error = uvm_fault(map, addr, ftype); | | 328 | int error = uvm_fault(map, addr, ftype); |
332 | cpu_enable_onfault(fb); | | 329 | cpu_enable_onfault(fb); |
333 | if (error == 0) { | | 330 | if (error == 0) { |
334 | if (map != kernel_map) { | | 331 | if (map != kernel_map) { |
335 | uvm_grow(p, addr); | | 332 | uvm_grow(p, addr); |
336 | } | | 333 | } |
337 | return true; | | 334 | return true; |
338 | } | | 335 | } |
339 | | | 336 | |
340 | if (fb == NULL) { | | 337 | if (fb == NULL) { |
341 | return false; | | 338 | return false; |
342 | } | | 339 | } |
343 | | | 340 | |
344 | cpu_jump_onfault(tf, fb); | | 341 | cpu_jump_onfault(tf, fb); |
345 | return true; | | 342 | return true; |
346 | } | | 343 | } |
347 | | | 344 | |
348 | static bool | | 345 | static bool |
349 | trap_instruction(struct trapframe *tf, register_t epc, register_t status, | | 346 | trap_instruction(struct trapframe *tf, register_t epc, register_t status, |
350 | register_t cause, register_t badaddr, bool usertrap_p, ksiginfo_t *ksi) | | 347 | register_t cause, register_t badaddr, bool usertrap_p, ksiginfo_t *ksi) |
351 | { | | 348 | { |
352 | const bool prvopc_p = (cause == CAUSE_PRIVILEGED_INSTRUCTION); | | | |
353 | if (usertrap_p) { | | 349 | if (usertrap_p) { |
354 | trap_ksi_init(ksi, SIGILL, prvopc_p ? ILL_PRVOPC : ILL_ILLOPC, | | 350 | trap_ksi_init(ksi, SIGILL, ILL_ILLOPC, |
355 | (intptr_t)badaddr, cause); | | 351 | (intptr_t)badaddr, cause); |
356 | } | | 352 | } |
357 | return false; | | 353 | return false; |
358 | } | | 354 | } |
359 | | | 355 | |
360 | static bool | | 356 | static bool |
361 | trap_misalignment(struct trapframe *tf, register_t epc, register_t status, | | 357 | trap_misalignment(struct trapframe *tf, register_t epc, register_t status, |
362 | register_t cause, register_t badaddr, bool usertrap_p, ksiginfo_t *ksi) | | 358 | register_t cause, register_t badaddr, bool usertrap_p, ksiginfo_t *ksi) |
363 | { | | 359 | { |
364 | if (usertrap_p) { | | 360 | if (usertrap_p) { |
365 | trap_ksi_init(ksi, SIGBUS, BUS_ADRALN, | | 361 | trap_ksi_init(ksi, SIGBUS, BUS_ADRALN, |
366 | (intptr_t)badaddr, cause); | | 362 | (intptr_t)badaddr, cause); |
367 | } | | 363 | } |
368 | return false; | | 364 | return false; |
369 | } | | 365 | } |
370 | | | 366 | |
371 | void | | 367 | void |
372 | cpu_trap(struct trapframe *tf, register_t epc, register_t status, | | 368 | cpu_trap(struct trapframe *tf, register_t epc, register_t status, |
373 | register_t cause, register_t badaddr) | | 369 | register_t cause, register_t badaddr) |
374 | { | | 370 | { |
375 | const u_int fault_mask = 1U << cause; | | 371 | const u_int fault_mask = 1U << cause; |
376 | const intptr_t addr = badaddr; | | 372 | const intptr_t addr = badaddr; |
377 | const bool usertrap_p = (status & SR_PS) == 0; | | 373 | const bool usertrap_p = (status & SR_PS) == 0; |
378 | bool ok = true; | | 374 | bool ok = true; |
379 | ksiginfo_t ksi; | | 375 | ksiginfo_t ksi; |
380 | | | 376 | |
381 | if (__predict_true(fault_mask & FAULT_TRAP_MASK)) { | | 377 | if (__predict_true(fault_mask & FAULT_TRAP_MASK)) { |
382 | #ifndef _LP64 | | 378 | #ifndef _LP64 |
383 | // This fault may be cause the kernel's page table got a new | | 379 | // This fault may be cause the kernel's page table got a new |
384 | // page table page and this pmap's page table doesn't know | | 380 | // page table page and this pmap's page table doesn't know |
385 | // about it. See | | 381 | // about it. See |
386 | struct pmap * const pmap = curlwp->l_proc->p_vmspace->vm_map.pmap; | | 382 | struct pmap * const pmap = curlwp->l_proc->p_vmspace->vm_map.pmap; |
387 | if ((intptr_t) addr < 0 | | 383 | if ((intptr_t) addr < 0 |
388 | && pmap != pmap_kernel() | | 384 | && pmap != pmap_kernel() |
389 | && pmap_pdetab_fixup(pmap, addr)) { | | 385 | && pmap_pdetab_fixup(pmap, addr)) { |
390 | return; | | 386 | return; |
391 | } | | 387 | } |
392 | #endif | | 388 | #endif |
393 | ok = trap_pagefault(tf, epc, status, cause, addr, | | 389 | ok = trap_pagefault(tf, epc, status, cause, addr, |
394 | usertrap_p, &ksi); | | 390 | usertrap_p, &ksi); |
395 | } else if (fault_mask & INSTRUCTION_TRAP_MASK) { | | 391 | } else if (fault_mask & INSTRUCTION_TRAP_MASK) { |
396 | ok = trap_instruction(tf, epc, status, cause, addr, | | 392 | ok = trap_instruction(tf, epc, status, cause, addr, |
397 | usertrap_p, &ksi); | | 393 | usertrap_p, &ksi); |
| | | 394 | #if 0 |
398 | } else if (fault_mask && __BIT(CAUSE_FP_DISABLED)) { | | 395 | } else if (fault_mask && __BIT(CAUSE_FP_DISABLED)) { |
399 | if (!usertrap_p) { | | 396 | if (!usertrap_p) { |
400 | panic("%s: fp used @ %#"PRIxREGISTER" in kernel!", | | 397 | panic("%s: fp used @ %#"PRIxREGISTER" in kernel!", |
401 | __func__, tf->tf_pc); | | 398 | __func__, tf->tf_pc); |
402 | } | | 399 | } |
403 | fpu_load(); | | 400 | fpu_load(); |
| | | 401 | #endif |
404 | } else if (fault_mask & MISALIGNED_TRAP_MASK) { | | 402 | } else if (fault_mask & MISALIGNED_TRAP_MASK) { |
405 | ok = trap_misalignment(tf, epc, status, cause, addr, | | 403 | ok = trap_misalignment(tf, epc, status, cause, addr, |
406 | usertrap_p, &ksi); | | 404 | usertrap_p, &ksi); |
407 | } else { | | 405 | } else { |
408 | dump_trapframe(tf, printf); | | 406 | dump_trapframe(tf, printf); |
409 | panic("%s: unknown kernel trap", __func__); | | 407 | panic("%s: unknown kernel trap", __func__); |
410 | } | | 408 | } |
411 | | | 409 | |
412 | if (usertrap_p) { | | 410 | if (usertrap_p) { |
413 | if (!ok) | | 411 | if (!ok) |
414 | cpu_trapsignal(tf, &ksi); | | 412 | cpu_trapsignal(tf, &ksi); |
415 | userret(curlwp); | | 413 | userret(curlwp); |
416 | } else if (!ok) { | | 414 | } else if (!ok) { |
417 | dump_trapframe(tf, printf); | | 415 | dump_trapframe(tf, printf); |
418 | panic("%s: fatal kernel trap", __func__); | | 416 | panic("%s: fatal kernel trap", __func__); |
419 | } | | 417 | } |
420 | } | | 418 | } |
421 | | | 419 | |
422 | void | | 420 | void |
423 | cpu_ast(struct trapframe *tf) | | 421 | cpu_ast(struct trapframe *tf) |
424 | { | | 422 | { |
425 | | | 423 | |
426 | atomic_swap_uint(&curlwp->l_md.md_astpending, 0); | | 424 | atomic_swap_uint(&curlwp->l_md.md_astpending, 0); |
427 | | | 425 | |
428 | if (curlwp->l_pflag & LP_OWEUPC) { | | 426 | if (curlwp->l_pflag & LP_OWEUPC) { |
429 | curlwp->l_pflag &= ~LP_OWEUPC; | | 427 | curlwp->l_pflag &= ~LP_OWEUPC; |
430 | ADDUPROF(curlwp); | | 428 | ADDUPROF(curlwp); |
431 | } | | 429 | } |
432 | } | | 430 | } |
433 | | | 431 | |
434 | void | | 432 | void |
435 | cpu_intr(struct trapframe *tf, register_t epc, register_t status, | | 433 | cpu_intr(struct trapframe *tf, register_t epc, register_t status, |
436 | register_t cause) | | 434 | register_t cause) |
437 | { | | 435 | { |
438 | /* XXX */ | | 436 | /* XXX */ |
439 | } | | 437 | } |
440 | | | 438 | |
441 | static int | | 439 | static int |
442 | fetch_user_data(const void *uaddr, void *valp, size_t size) | | 440 | fetch_user_data(const void *uaddr, void *valp, size_t size) |
443 | { | | 441 | { |
444 | struct faultbuf fb; | | 442 | struct faultbuf fb; |
445 | int error; | | 443 | int error; |
446 | | | 444 | |
447 | if ((error = cpu_set_onfault(&fb, 1)) != 0) | | 445 | if ((error = cpu_set_onfault(&fb, 1)) != 0) |
448 | return error; | | 446 | return error; |
449 | | | 447 | |
450 | switch (size) { | | 448 | switch (size) { |
451 | case 1: | | 449 | case 1: |
452 | *(uint8_t *)valp = *(volatile const uint8_t *)uaddr; | | 450 | *(uint8_t *)valp = *(volatile const uint8_t *)uaddr; |
453 | break; | | 451 | break; |
454 | case 2: | | 452 | case 2: |
455 | *(uint16_t *)valp = *(volatile const uint16_t *)uaddr; | | 453 | *(uint16_t *)valp = *(volatile const uint16_t *)uaddr; |
456 | break; | | 454 | break; |
457 | case 4: | | 455 | case 4: |
458 | *(uint32_t *)valp = *(volatile const uint32_t *)uaddr; | | 456 | *(uint32_t *)valp = *(volatile const uint32_t *)uaddr; |
459 | break; | | 457 | break; |
460 | #ifdef _LP64 | | 458 | #ifdef _LP64 |
461 | case 8: | | 459 | case 8: |
462 | *(uint64_t *)valp = *(volatile const uint64_t *)uaddr; | | 460 | *(uint64_t *)valp = *(volatile const uint64_t *)uaddr; |
463 | break; | | 461 | break; |
464 | #endif /* _LP64 */ | | 462 | #endif /* _LP64 */ |
465 | default: | | 463 | default: |
466 | error = EINVAL; | | 464 | error = EINVAL; |
467 | } | | 465 | } |
468 | | | 466 | |
469 | cpu_unset_onfault(); | | 467 | cpu_unset_onfault(); |
470 | return error; | | 468 | return error; |
471 | } | | 469 | } |
472 | | | 470 | |
473 | int | | 471 | int |
474 | _ufetch_8(const uint8_t *uaddr, uint8_t *valp) | | 472 | _ufetch_8(const uint8_t *uaddr, uint8_t *valp) |
475 | { | | 473 | { |
476 | return fetch_user_data(uaddr, valp, sizeof(*valp)); | | 474 | return fetch_user_data(uaddr, valp, sizeof(*valp)); |
477 | } | | 475 | } |
478 | | | 476 | |
479 | int | | 477 | int |
480 | _ufetch_16(const uint16_t *uaddr, uint16_t *valp) | | 478 | _ufetch_16(const uint16_t *uaddr, uint16_t *valp) |
481 | { | | 479 | { |
482 | return fetch_user_data(uaddr, valp, sizeof(*valp)); | | 480 | return fetch_user_data(uaddr, valp, sizeof(*valp)); |
483 | } | | 481 | } |
484 | | | 482 | |
485 | int | | 483 | int |
486 | _ufetch_32(const uint32_t *uaddr, uint32_t *valp) | | 484 | _ufetch_32(const uint32_t *uaddr, uint32_t *valp) |
487 | { | | 485 | { |
488 | return fetch_user_data(uaddr, valp, sizeof(*valp)); | | 486 | return fetch_user_data(uaddr, valp, sizeof(*valp)); |
489 | } | | 487 | } |
490 | | | 488 | |
491 | #ifdef _LP64 | | 489 | #ifdef _LP64 |
492 | int | | 490 | int |
493 | _ufetch_64(const uint64_t *uaddr, uint64_t *valp) | | 491 | _ufetch_64(const uint64_t *uaddr, uint64_t *valp) |
494 | { | | 492 | { |
495 | return fetch_user_data(uaddr, valp, sizeof(*valp)); | | 493 | return fetch_user_data(uaddr, valp, sizeof(*valp)); |
496 | } | | 494 | } |
497 | #endif /* _LP64 */ | | 495 | #endif /* _LP64 */ |
498 | | | 496 | |
499 | static int | | 497 | static int |
500 | store_user_data(void *uaddr, const void *valp, size_t size) | | 498 | store_user_data(void *uaddr, const void *valp, size_t size) |
501 | { | | 499 | { |
502 | struct faultbuf fb; | | 500 | struct faultbuf fb; |
503 | int error; | | 501 | int error; |
504 | | | 502 | |
505 | if ((error = cpu_set_onfault(&fb, 1)) != 0) | | 503 | if ((error = cpu_set_onfault(&fb, 1)) != 0) |
506 | return error; | | 504 | return error; |
507 | | | 505 | |
508 | switch (size) { | | 506 | switch (size) { |
509 | case 1: | | 507 | case 1: |
510 | *(volatile uint8_t *)uaddr = *(const uint8_t *)valp; | | 508 | *(volatile uint8_t *)uaddr = *(const uint8_t *)valp; |
511 | break; | | 509 | break; |
512 | case 2: | | 510 | case 2: |
513 | *(volatile uint16_t *)uaddr = *(const uint8_t *)valp; | | 511 | *(volatile uint16_t *)uaddr = *(const uint8_t *)valp; |
514 | break; | | 512 | break; |
515 | case 4: | | 513 | case 4: |
516 | *(volatile uint32_t *)uaddr = *(const uint32_t *)valp; | | 514 | *(volatile uint32_t *)uaddr = *(const uint32_t *)valp; |
517 | break; | | 515 | break; |
518 | #ifdef _LP64 | | 516 | #ifdef _LP64 |
519 | case 8: | | 517 | case 8: |
520 | *(volatile uint64_t *)uaddr = *(const uint64_t *)valp; | | 518 | *(volatile uint64_t *)uaddr = *(const uint64_t *)valp; |
521 | break; | | 519 | break; |
522 | #endif /* _LP64 */ | | 520 | #endif /* _LP64 */ |
523 | default: | | 521 | default: |
524 | error = EINVAL; | | 522 | error = EINVAL; |
525 | } | | 523 | } |
526 | | | 524 | |
527 | cpu_unset_onfault(); | | 525 | cpu_unset_onfault(); |
528 | return error; | | 526 | return error; |
529 | } | | 527 | } |
530 | | | 528 | |
531 | int | | 529 | int |
532 | _ustore_8(uint8_t *uaddr, uint8_t val) | | 530 | _ustore_8(uint8_t *uaddr, uint8_t val) |
533 | { | | 531 | { |
534 | return store_user_data(uaddr, &val, sizeof(val)); | | 532 | return store_user_data(uaddr, &val, sizeof(val)); |
535 | } | | 533 | } |
536 | | | 534 | |
537 | int | | 535 | int |
538 | _ustore_16(uint16_t *uaddr, uint16_t val) | | 536 | _ustore_16(uint16_t *uaddr, uint16_t val) |
539 | { | | 537 | { |
540 | return store_user_data(uaddr, &val, sizeof(val)); | | 538 | return store_user_data(uaddr, &val, sizeof(val)); |
541 | } | | 539 | } |
542 | | | 540 | |
543 | int | | 541 | int |
544 | _ustore_32(uint32_t *uaddr, uint32_t val) | | 542 | _ustore_32(uint32_t *uaddr, uint32_t val) |
545 | { | | 543 | { |
546 | return store_user_data(uaddr, &val, sizeof(val)); | | 544 | return store_user_data(uaddr, &val, sizeof(val)); |
547 | } | | 545 | } |
548 | | | 546 | |
549 | #ifdef _LP64 | | 547 | #ifdef _LP64 |
550 | int | | 548 | int |
551 | _ustore_64(uint64_t *uaddr, uint64_t val) | | 549 | _ustore_64(uint64_t *uaddr, uint64_t val) |
552 | { | | 550 | { |
553 | return store_user_data(uaddr, &val, sizeof(val)); | | 551 | return store_user_data(uaddr, &val, sizeof(val)); |
554 | } | | 552 | } |
555 | #endif /* _LP64 */ | | 553 | #endif /* _LP64 */ |