Mon Jul 6 11:23:59 2020 UTC ()
This file is not used for ibm4xx.


(rin)
diff -r1.159 -r1.160 src/sys/arch/powerpc/powerpc/trap.c

cvs diff -r1.159 -r1.160 src/sys/arch/powerpc/powerpc/trap.c (switch to unified diff)

--- src/sys/arch/powerpc/powerpc/trap.c 2020/07/06 11:08:21 1.159
+++ src/sys/arch/powerpc/powerpc/trap.c 2020/07/06 11:23:59 1.160
@@ -1,1162 +1,1162 @@ @@ -1,1162 +1,1162 @@
1/* $NetBSD: trap.c,v 1.159 2020/07/06 11:08:21 rin Exp $ */ 1/* $NetBSD: trap.c,v 1.160 2020/07/06 11:23:59 rin Exp $ */
2 2
3/* 3/*
4 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 4 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
5 * Copyright (C) 1995, 1996 TooLs GmbH. 5 * Copyright (C) 1995, 1996 TooLs GmbH.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions 9 * modification, are permitted provided that the following conditions
10 * are met: 10 * are met:
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution. 15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software 16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement: 17 * must display the following acknowledgement:
18 * This product includes software developed by TooLs GmbH. 18 * This product includes software developed by TooLs GmbH.
19 * 4. The name of TooLs GmbH may not be used to endorse or promote products 19 * 4. The name of TooLs GmbH may not be used to endorse or promote products
20 * derived from this software without specific prior written permission. 20 * derived from this software without specific prior written permission.
21 * 21 *
22 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 22 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 28 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 29 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 30 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */ 32 */
33 33
34#define __UFETCHSTORE_PRIVATE 34#define __UFETCHSTORE_PRIVATE
35#define __UCAS_PRIVATE 35#define __UCAS_PRIVATE
36 36
37#include <sys/cdefs.h> 37#include <sys/cdefs.h>
38__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.159 2020/07/06 11:08:21 rin Exp $"); 38__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.160 2020/07/06 11:23:59 rin Exp $");
39 39
40#ifdef _KERNEL_OPT 40#ifdef _KERNEL_OPT
41#include "opt_altivec.h" 41#include "opt_altivec.h"
42#include "opt_ddb.h" 42#include "opt_ddb.h"
43#include "opt_ppcarch.h" 43#include "opt_ppcarch.h"
44#endif 44#endif
45 45
46#include <sys/param.h> 46#include <sys/param.h>
47 47
48#include <sys/proc.h> 48#include <sys/proc.h>
49#include <sys/ras.h> 49#include <sys/ras.h>
50#include <sys/reboot.h> 50#include <sys/reboot.h>
51#include <sys/systm.h> 51#include <sys/systm.h>
52#include <sys/kauth.h> 52#include <sys/kauth.h>
53#include <sys/cpu.h> 53#include <sys/cpu.h>
54 54
55#include <uvm/uvm_extern.h> 55#include <uvm/uvm_extern.h>
56 56
57#include <dev/cons.h> 57#include <dev/cons.h>
58 58
59#include <powerpc/altivec.h> 59#include <powerpc/altivec.h>
60#include <powerpc/db_machdep.h> 60#include <powerpc/db_machdep.h>
61#include <powerpc/fpu.h> 61#include <powerpc/fpu.h>
62#include <powerpc/frame.h> 62#include <powerpc/frame.h>
63#include <powerpc/instr.h> 63#include <powerpc/instr.h>
64#include <powerpc/pcb.h> 64#include <powerpc/pcb.h>
65#include <powerpc/pmap.h> 65#include <powerpc/pmap.h>
66#include <powerpc/trap.h> 66#include <powerpc/trap.h>
67#include <powerpc/userret.h> 67#include <powerpc/userret.h>
68 68
69#include <powerpc/spr.h> 69#include <powerpc/spr.h>
70#include <powerpc/oea/spr.h> 70#include <powerpc/oea/spr.h>
71 71
72static int emulated_opcode(struct lwp *, struct trapframe *); 72static int emulated_opcode(struct lwp *, struct trapframe *);
73static int fix_unaligned(struct lwp *, struct trapframe *); 73static int fix_unaligned(struct lwp *, struct trapframe *);
74static inline vaddr_t setusr(vaddr_t, size_t *); 74static inline vaddr_t setusr(vaddr_t, size_t *);
75static inline void unsetusr(void); 75static inline void unsetusr(void);
76 76
77void trap(struct trapframe *); /* Called from locore / trap_subr */ 77void trap(struct trapframe *); /* Called from locore / trap_subr */
78/* Why are these not defined in a header? */ 78/* Why are these not defined in a header? */
79int badaddr(void *, size_t); 79int badaddr(void *, size_t);
80int badaddr_read(void *, size_t, int *); 80int badaddr_read(void *, size_t, int *);
81 81
82struct dsi_info { 82struct dsi_info {
83 uint16_t indicator; 83 uint16_t indicator;
84 uint16_t flags; 84 uint16_t flags;
85}; 85};
86 86
87static const struct dsi_info* get_dsi_info(register_t); 87static const struct dsi_info* get_dsi_info(register_t);
88 88
89void 89void
90trap(struct trapframe *tf) 90trap(struct trapframe *tf)
91{ 91{
92 struct cpu_info * const ci = curcpu(); 92 struct cpu_info * const ci = curcpu();
93 struct lwp * const l = curlwp; 93 struct lwp * const l = curlwp;
94 struct proc * const p = l->l_proc; 94 struct proc * const p = l->l_proc;
95 struct pcb * const pcb = curpcb; 95 struct pcb * const pcb = curpcb;
96 struct vm_map *map; 96 struct vm_map *map;
97 ksiginfo_t ksi; 97 ksiginfo_t ksi;
98 const bool usertrap = (tf->tf_srr1 & PSL_PR); 98 const bool usertrap = (tf->tf_srr1 & PSL_PR);
99 int type = tf->tf_exc; 99 int type = tf->tf_exc;
100 int ftype, rv; 100 int ftype, rv;
101 101
102 ci->ci_ev_traps.ev_count++; 102 ci->ci_ev_traps.ev_count++;
103 103
104 KASSERTMSG(!usertrap || tf == l->l_md.md_utf, 104 KASSERTMSG(!usertrap || tf == l->l_md.md_utf,
105 "trap: tf=%p is invalid: trapframe(%p)=%p", tf, l, l->l_md.md_utf); 105 "trap: tf=%p is invalid: trapframe(%p)=%p", tf, l, l->l_md.md_utf);
106 106
107 if (usertrap) { 107 if (usertrap) {
108 type |= EXC_USER; 108 type |= EXC_USER;
109#ifdef DIAGNOSTIC 109#ifdef DIAGNOSTIC
110 if (l == NULL || p == NULL) 110 if (l == NULL || p == NULL)
111 panic("trap: user trap %d with lwp = %p, proc = %p", 111 panic("trap: user trap %d with lwp = %p, proc = %p",
112 type, l, p); 112 type, l, p);
113#endif 113#endif
114 LWP_CACHE_CREDS(l, p); 114 LWP_CACHE_CREDS(l, p);
115 } 115 }
116 116
117 ci->ci_data.cpu_ntrap++; 117 ci->ci_data.cpu_ntrap++;
118 118
119 switch (type) { 119 switch (type) {
120 case EXC_RUNMODETRC|EXC_USER: 120 case EXC_RUNMODETRC|EXC_USER:
121 /* FALLTHROUGH */ 121 /* FALLTHROUGH */
122 case EXC_TRC|EXC_USER: 122 case EXC_TRC|EXC_USER:
123 tf->tf_srr1 &= ~PSL_SE; 123 tf->tf_srr1 &= ~PSL_SE;
124 if (p->p_raslist == NULL || 124 if (p->p_raslist == NULL ||
125 ras_lookup(p, (void *)tf->tf_srr0) == (void *) -1) { 125 ras_lookup(p, (void *)tf->tf_srr0) == (void *) -1) {
126 KSI_INIT_TRAP(&ksi); 126 KSI_INIT_TRAP(&ksi);
127 ksi.ksi_signo = SIGTRAP; 127 ksi.ksi_signo = SIGTRAP;
128 ksi.ksi_trap = EXC_TRC; 128 ksi.ksi_trap = EXC_TRC;
129 ksi.ksi_addr = (void *)tf->tf_srr0; 129 ksi.ksi_addr = (void *)tf->tf_srr0;
130 ksi.ksi_code = TRAP_TRACE; 130 ksi.ksi_code = TRAP_TRACE;
131 (*p->p_emul->e_trapsignal)(l, &ksi); 131 (*p->p_emul->e_trapsignal)(l, &ksi);
132 } 132 }
133 break; 133 break;
134 case EXC_DSI: { 134 case EXC_DSI: {
135 struct faultbuf * const fb = pcb->pcb_onfault; 135 struct faultbuf * const fb = pcb->pcb_onfault;
136 vaddr_t va = tf->tf_dar; 136 vaddr_t va = tf->tf_dar;
137 137
138 ci->ci_ev_kdsi.ev_count++; 138 ci->ci_ev_kdsi.ev_count++;
139 139
140 /* 140 /*
141 * Only query UVM if no interrupts are active. 141 * Only query UVM if no interrupts are active.
142 */ 142 */
143 if (ci->ci_idepth < 0) { 143 if (ci->ci_idepth < 0) {
144 if ((va >> ADDR_SR_SHFT) == pcb->pcb_kmapsr) { 144 if ((va >> ADDR_SR_SHFT) == pcb->pcb_kmapsr) {
145 va &= ADDR_PIDX | ADDR_POFF; 145 va &= ADDR_PIDX | ADDR_POFF;
146 va |= pcb->pcb_umapsr << ADDR_SR_SHFT; 146 va |= pcb->pcb_umapsr << ADDR_SR_SHFT;
147 map = &p->p_vmspace->vm_map; 147 map = &p->p_vmspace->vm_map;
148#ifdef PPC_OEA64 148#ifdef PPC_OEA64
149 if ((tf->tf_dsisr & DSISR_NOTFOUND) && 149 if ((tf->tf_dsisr & DSISR_NOTFOUND) &&
150 vm_map_pmap(map)->pm_ste_evictions > 0 && 150 vm_map_pmap(map)->pm_ste_evictions > 0 &&
151 pmap_ste_spill(vm_map_pmap(map), 151 pmap_ste_spill(vm_map_pmap(map),
152 trunc_page(va), false)) { 152 trunc_page(va), false)) {
153 return; 153 return;
154 } 154 }
155#endif 155#endif
156 156
157 if ((tf->tf_dsisr & DSISR_NOTFOUND) && 157 if ((tf->tf_dsisr & DSISR_NOTFOUND) &&
158 vm_map_pmap(map)->pm_evictions > 0 && 158 vm_map_pmap(map)->pm_evictions > 0 &&
159 pmap_pte_spill(vm_map_pmap(map), 159 pmap_pte_spill(vm_map_pmap(map),
160 trunc_page(va), false)) { 160 trunc_page(va), false)) {
161 return; 161 return;
162 } 162 }
163#if defined(DIAGNOSTIC) && !defined(PPC_OEA64) && !defined (PPC_IBM4XX) 163#if defined(DIAGNOSTIC) && !defined(PPC_OEA64)
164 } else if ((va >> ADDR_SR_SHFT) == USER_SR) { 164 } else if ((va >> ADDR_SR_SHFT) == USER_SR) {
165 printf("trap: kernel %s DSI trap @ %#lx by %#lx" 165 printf("trap: kernel %s DSI trap @ %#lx by %#lx"
166 " (DSISR %#x): USER_SR unset\n", 166 " (DSISR %#x): USER_SR unset\n",
167 (tf->tf_dsisr & DSISR_STORE) 167 (tf->tf_dsisr & DSISR_STORE)
168 ? "write" : "read", 168 ? "write" : "read",
169 va, tf->tf_srr0, tf->tf_dsisr); 169 va, tf->tf_srr0, tf->tf_dsisr);
170 goto brain_damage2; 170 goto brain_damage2;
171#endif 171#endif
172 } else { 172 } else {
173 map = kernel_map; 173 map = kernel_map;
174 } 174 }
175 175
176 if (tf->tf_dsisr & DSISR_STORE) 176 if (tf->tf_dsisr & DSISR_STORE)
177 ftype = VM_PROT_WRITE; 177 ftype = VM_PROT_WRITE;
178 else 178 else
179 ftype = VM_PROT_READ; 179 ftype = VM_PROT_READ;
180 180
181 pcb->pcb_onfault = NULL; 181 pcb->pcb_onfault = NULL;
182 rv = uvm_fault(map, trunc_page(va), ftype); 182 rv = uvm_fault(map, trunc_page(va), ftype);
183 pcb->pcb_onfault = fb; 183 pcb->pcb_onfault = fb;
184 184
185 if (map != kernel_map) { 185 if (map != kernel_map) {
186 /* 186 /*
187 * Record any stack growth... 187 * Record any stack growth...
188 */ 188 */
189 if (rv == 0) 189 if (rv == 0)
190 uvm_grow(p, trunc_page(va)); 190 uvm_grow(p, trunc_page(va));
191 } 191 }
192 if (rv == 0) 192 if (rv == 0)
193 return; 193 return;
194 if (rv == EACCES) 194 if (rv == EACCES)
195 rv = EFAULT; 195 rv = EFAULT;
196 } else { 196 } else {
197 /* 197 /*
198 * Note that this implies that access to the USER 198 * Note that this implies that access to the USER
199 * segment is not allowed in interrupt context. 199 * segment is not allowed in interrupt context.
200 */ 200 */
201 rv = EFAULT; 201 rv = EFAULT;
202 } 202 }
203 if (fb != NULL) { 203 if (fb != NULL) {
204 tf->tf_srr0 = fb->fb_pc; 204 tf->tf_srr0 = fb->fb_pc;
205 tf->tf_cr = fb->fb_cr; 205 tf->tf_cr = fb->fb_cr;
206 tf->tf_fixreg[1] = fb->fb_sp; 206 tf->tf_fixreg[1] = fb->fb_sp;
207 tf->tf_fixreg[2] = fb->fb_r2; 207 tf->tf_fixreg[2] = fb->fb_r2;
208 tf->tf_fixreg[3] = rv; 208 tf->tf_fixreg[3] = rv;
209 memcpy(&tf->tf_fixreg[13], fb->fb_fixreg, 209 memcpy(&tf->tf_fixreg[13], fb->fb_fixreg,
210 sizeof(fb->fb_fixreg)); 210 sizeof(fb->fb_fixreg));
211 return; 211 return;
212 } 212 }
213 printf("trap: kernel %s DSI trap @ %#lx by %#lx (DSISR %#x, err" 213 printf("trap: kernel %s DSI trap @ %#lx by %#lx (DSISR %#x, err"
214 "=%d), lr %#lx\n", (tf->tf_dsisr & DSISR_STORE) ? "write" : "read", 214 "=%d), lr %#lx\n", (tf->tf_dsisr & DSISR_STORE) ? "write" : "read",
215 va, tf->tf_srr0, tf->tf_dsisr, rv, tf->tf_lr); 215 va, tf->tf_srr0, tf->tf_dsisr, rv, tf->tf_lr);
216 goto brain_damage2; 216 goto brain_damage2;
217 } 217 }
218 case EXC_DSI|EXC_USER: 218 case EXC_DSI|EXC_USER:
219 ci->ci_ev_udsi.ev_count++; 219 ci->ci_ev_udsi.ev_count++;
220 if (tf->tf_dsisr & DSISR_STORE) 220 if (tf->tf_dsisr & DSISR_STORE)
221 ftype = VM_PROT_WRITE; 221 ftype = VM_PROT_WRITE;
222 else 222 else
223 ftype = VM_PROT_READ; 223 ftype = VM_PROT_READ;
224 224
225 /* 225 /*
226 * Try to spill an evicted pte into the page table 226 * Try to spill an evicted pte into the page table
227 * if this wasn't a protection fault and the pmap 227 * if this wasn't a protection fault and the pmap
228 * has some evicted pte's. 228 * has some evicted pte's.
229 */ 229 */
230 map = &p->p_vmspace->vm_map; 230 map = &p->p_vmspace->vm_map;
231#ifdef PPC_OEA64 231#ifdef PPC_OEA64
232 if ((tf->tf_dsisr & DSISR_NOTFOUND) && 232 if ((tf->tf_dsisr & DSISR_NOTFOUND) &&
233 vm_map_pmap(map)->pm_ste_evictions > 0 && 233 vm_map_pmap(map)->pm_ste_evictions > 0 &&
234 pmap_ste_spill(vm_map_pmap(map), trunc_page(tf->tf_dar), 234 pmap_ste_spill(vm_map_pmap(map), trunc_page(tf->tf_dar),
235 false)) { 235 false)) {
236 break; 236 break;
237 } 237 }
238#endif 238#endif
239 239
240 if ((tf->tf_dsisr & DSISR_NOTFOUND) && 240 if ((tf->tf_dsisr & DSISR_NOTFOUND) &&
241 vm_map_pmap(map)->pm_evictions > 0 && 241 vm_map_pmap(map)->pm_evictions > 0 &&
242 pmap_pte_spill(vm_map_pmap(map), trunc_page(tf->tf_dar), 242 pmap_pte_spill(vm_map_pmap(map), trunc_page(tf->tf_dar),
243 false)) { 243 false)) {
244 break; 244 break;
245 } 245 }
246 246
247 KASSERT(pcb->pcb_onfault == NULL); 247 KASSERT(pcb->pcb_onfault == NULL);
248 rv = uvm_fault(map, trunc_page(tf->tf_dar), ftype); 248 rv = uvm_fault(map, trunc_page(tf->tf_dar), ftype);
249 if (rv == 0) { 249 if (rv == 0) {
250 /* 250 /*
251 * Record any stack growth... 251 * Record any stack growth...
252 */ 252 */
253 uvm_grow(p, trunc_page(tf->tf_dar)); 253 uvm_grow(p, trunc_page(tf->tf_dar));
254 break; 254 break;
255 } 255 }
256 ci->ci_ev_udsi_fatal.ev_count++; 256 ci->ci_ev_udsi_fatal.ev_count++;
257 if (cpu_printfataltraps 257 if (cpu_printfataltraps
258 && (p->p_slflag & PSL_TRACED) == 0 258 && (p->p_slflag & PSL_TRACED) == 0
259 && !sigismember(&p->p_sigctx.ps_sigcatch, SIGSEGV)) { 259 && !sigismember(&p->p_sigctx.ps_sigcatch, SIGSEGV)) {
260 printf("trap: pid %d.%d (%s): user %s DSI trap @ %#lx " 260 printf("trap: pid %d.%d (%s): user %s DSI trap @ %#lx "
261 "by %#lx (DSISR %#x, err=%d)\n", 261 "by %#lx (DSISR %#x, err=%d)\n",
262 p->p_pid, l->l_lid, p->p_comm, 262 p->p_pid, l->l_lid, p->p_comm,
263 (tf->tf_dsisr & DSISR_STORE) ? "write" : "read", 263 (tf->tf_dsisr & DSISR_STORE) ? "write" : "read",
264 tf->tf_dar, tf->tf_srr0, tf->tf_dsisr, rv); 264 tf->tf_dar, tf->tf_srr0, tf->tf_dsisr, rv);
265 } 265 }
266 KSI_INIT_TRAP(&ksi); 266 KSI_INIT_TRAP(&ksi);
267 ksi.ksi_trap = EXC_DSI; 267 ksi.ksi_trap = EXC_DSI;
268 ksi.ksi_addr = (void *)tf->tf_dar; 268 ksi.ksi_addr = (void *)tf->tf_dar;
269vm_signal: 269vm_signal:
270 switch (rv) { 270 switch (rv) {
271 case EINVAL: 271 case EINVAL:
272 ksi.ksi_signo = SIGBUS; 272 ksi.ksi_signo = SIGBUS;
273 ksi.ksi_code = BUS_ADRERR; 273 ksi.ksi_code = BUS_ADRERR;
274 break; 274 break;
275 case EACCES: 275 case EACCES:
276 ksi.ksi_signo = SIGSEGV; 276 ksi.ksi_signo = SIGSEGV;
277 ksi.ksi_code = SEGV_ACCERR; 277 ksi.ksi_code = SEGV_ACCERR;
278 break; 278 break;
279 case ENOMEM: 279 case ENOMEM:
280 ksi.ksi_signo = SIGKILL; 280 ksi.ksi_signo = SIGKILL;
281 printf("UVM: pid %d.%d (%s), uid %d killed: " 281 printf("UVM: pid %d.%d (%s), uid %d killed: "
282 "out of swap\n", p->p_pid, l->l_lid, p->p_comm, 282 "out of swap\n", p->p_pid, l->l_lid, p->p_comm,
283 l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1); 283 l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1);
284 break; 284 break;
285 default: 285 default:
286 ksi.ksi_signo = SIGSEGV; 286 ksi.ksi_signo = SIGSEGV;
287 ksi.ksi_code = SEGV_MAPERR; 287 ksi.ksi_code = SEGV_MAPERR;
288 break; 288 break;
289 } 289 }
290 (*p->p_emul->e_trapsignal)(l, &ksi); 290 (*p->p_emul->e_trapsignal)(l, &ksi);
291 break; 291 break;
292 292
293 case EXC_ISI: 293 case EXC_ISI:
294 ci->ci_ev_kisi.ev_count++; 294 ci->ci_ev_kisi.ev_count++;
295 295
296 printf("trap: kernel ISI by %#lx (SRR1 %#lx), lr: %#lx\n", 296 printf("trap: kernel ISI by %#lx (SRR1 %#lx), lr: %#lx\n",
297 tf->tf_srr0, tf->tf_srr1, tf->tf_lr); 297 tf->tf_srr0, tf->tf_srr1, tf->tf_lr);
298 goto brain_damage2; 298 goto brain_damage2;
299 299
300 case EXC_ISI|EXC_USER: 300 case EXC_ISI|EXC_USER:
301 ci->ci_ev_isi.ev_count++; 301 ci->ci_ev_isi.ev_count++;
302 302
303 /* 303 /*
304 * Try to spill an evicted pte into the page table 304 * Try to spill an evicted pte into the page table
305 * if this wasn't a protection fault and the pmap 305 * if this wasn't a protection fault and the pmap
306 * has some evicted pte's. 306 * has some evicted pte's.
307 */ 307 */
308 map = &p->p_vmspace->vm_map; 308 map = &p->p_vmspace->vm_map;
309#ifdef PPC_OEA64 309#ifdef PPC_OEA64
310 if (vm_map_pmap(map)->pm_ste_evictions > 0 && 310 if (vm_map_pmap(map)->pm_ste_evictions > 0 &&
311 pmap_ste_spill(vm_map_pmap(map), trunc_page(tf->tf_srr0), 311 pmap_ste_spill(vm_map_pmap(map), trunc_page(tf->tf_srr0),
312 true)) { 312 true)) {
313 break; 313 break;
314 } 314 }
315#endif 315#endif
316 316
317 if (vm_map_pmap(map)->pm_evictions > 0 && 317 if (vm_map_pmap(map)->pm_evictions > 0 &&
318 pmap_pte_spill(vm_map_pmap(map), trunc_page(tf->tf_srr0), 318 pmap_pte_spill(vm_map_pmap(map), trunc_page(tf->tf_srr0),
319 true)) { 319 true)) {
320 break; 320 break;
321 } 321 }
322 322
323 ftype = VM_PROT_EXECUTE; 323 ftype = VM_PROT_EXECUTE;
324 KASSERT(pcb->pcb_onfault == NULL); 324 KASSERT(pcb->pcb_onfault == NULL);
325 rv = uvm_fault(map, trunc_page(tf->tf_srr0), ftype); 325 rv = uvm_fault(map, trunc_page(tf->tf_srr0), ftype);
326 if (rv == 0) { 326 if (rv == 0) {
327 break; 327 break;
328 } 328 }
329 ci->ci_ev_isi_fatal.ev_count++; 329 ci->ci_ev_isi_fatal.ev_count++;
330 if (cpu_printfataltraps 330 if (cpu_printfataltraps
331 && (p->p_slflag & PSL_TRACED) == 0 331 && (p->p_slflag & PSL_TRACED) == 0
332 && !sigismember(&p->p_sigctx.ps_sigcatch, SIGSEGV)) { 332 && !sigismember(&p->p_sigctx.ps_sigcatch, SIGSEGV)) {
333 printf("trap: pid %d.%d (%s): user ISI trap @ %#lx " 333 printf("trap: pid %d.%d (%s): user ISI trap @ %#lx "
334 "(SRR1=%#lx)\n", p->p_pid, l->l_lid, p->p_comm, 334 "(SRR1=%#lx)\n", p->p_pid, l->l_lid, p->p_comm,
335 tf->tf_srr0, tf->tf_srr1); 335 tf->tf_srr0, tf->tf_srr1);
336 } 336 }
337 KSI_INIT_TRAP(&ksi); 337 KSI_INIT_TRAP(&ksi);
338 ksi.ksi_trap = EXC_ISI; 338 ksi.ksi_trap = EXC_ISI;
339 ksi.ksi_addr = (void *)tf->tf_srr0; 339 ksi.ksi_addr = (void *)tf->tf_srr0;
340 goto vm_signal; 340 goto vm_signal;
341 341
342 case EXC_FPU|EXC_USER: 342 case EXC_FPU|EXC_USER:
343 ci->ci_ev_fpu.ev_count++; 343 ci->ci_ev_fpu.ev_count++;
344 fpu_load(); 344 fpu_load();
345 break; 345 break;
346 346
347 case EXC_AST|EXC_USER: 347 case EXC_AST|EXC_USER:
348 cpu_ast(l, ci); 348 cpu_ast(l, ci);
349 break; 349 break;
350 350
351 case EXC_ALI|EXC_USER: 351 case EXC_ALI|EXC_USER:
352 ci->ci_ev_ali.ev_count++; 352 ci->ci_ev_ali.ev_count++;
353 if (fix_unaligned(l, tf) != 0) { 353 if (fix_unaligned(l, tf) != 0) {
354 ci->ci_ev_ali_fatal.ev_count++; 354 ci->ci_ev_ali_fatal.ev_count++;
355 if (cpu_printfataltraps 355 if (cpu_printfataltraps
356 && (p->p_slflag & PSL_TRACED) == 0 356 && (p->p_slflag & PSL_TRACED) == 0
357 && !sigismember(&p->p_sigctx.ps_sigcatch, SIGBUS)) { 357 && !sigismember(&p->p_sigctx.ps_sigcatch, SIGBUS)) {
358 printf("trap: pid %d.%d (%s): user ALI trap @ " 358 printf("trap: pid %d.%d (%s): user ALI trap @ "
359 "%#lx by %#lx (DSISR %#x)\n", 359 "%#lx by %#lx (DSISR %#x)\n",
360 p->p_pid, l->l_lid, p->p_comm, 360 p->p_pid, l->l_lid, p->p_comm,
361 tf->tf_dar, tf->tf_srr0, tf->tf_dsisr); 361 tf->tf_dar, tf->tf_srr0, tf->tf_dsisr);
362 } 362 }
363 KSI_INIT_TRAP(&ksi); 363 KSI_INIT_TRAP(&ksi);
364 ksi.ksi_signo = SIGBUS; 364 ksi.ksi_signo = SIGBUS;
365 ksi.ksi_trap = EXC_ALI; 365 ksi.ksi_trap = EXC_ALI;
366 ksi.ksi_addr = (void *)tf->tf_dar; 366 ksi.ksi_addr = (void *)tf->tf_dar;
367 ksi.ksi_code = BUS_ADRALN; 367 ksi.ksi_code = BUS_ADRALN;
368 (*p->p_emul->e_trapsignal)(l, &ksi); 368 (*p->p_emul->e_trapsignal)(l, &ksi);
369 } else 369 } else
370 tf->tf_srr0 += 4; 370 tf->tf_srr0 += 4;
371 break; 371 break;
372 372
373 case EXC_PERF|EXC_USER: 373 case EXC_PERF|EXC_USER:
374 /* Not really, but needed due to how trap_subr.S works */ 374 /* Not really, but needed due to how trap_subr.S works */
375 case EXC_VEC|EXC_USER: 375 case EXC_VEC|EXC_USER:
376 ci->ci_ev_vec.ev_count++; 376 ci->ci_ev_vec.ev_count++;
377#ifdef ALTIVEC 377#ifdef ALTIVEC
378 vec_load(); 378 vec_load();
379 break; 379 break;
380#else 380#else
381 if (cpu_printfataltraps 381 if (cpu_printfataltraps
382 && (p->p_slflag & PSL_TRACED) == 0 382 && (p->p_slflag & PSL_TRACED) == 0
383 && !sigismember(&p->p_sigctx.ps_sigcatch, SIGILL)) { 383 && !sigismember(&p->p_sigctx.ps_sigcatch, SIGILL)) {
384 printf("trap: pid %d.%d (%s): user VEC trap @ %#lx " 384 printf("trap: pid %d.%d (%s): user VEC trap @ %#lx "
385 "(SRR1=%#lx)\n", 385 "(SRR1=%#lx)\n",
386 p->p_pid, l->l_lid, p->p_comm, 386 p->p_pid, l->l_lid, p->p_comm,
387 tf->tf_srr0, tf->tf_srr1); 387 tf->tf_srr0, tf->tf_srr1);
388 } 388 }
389 KSI_INIT_TRAP(&ksi); 389 KSI_INIT_TRAP(&ksi);
390 ksi.ksi_signo = SIGILL; 390 ksi.ksi_signo = SIGILL;
391 ksi.ksi_trap = EXC_PGM; 391 ksi.ksi_trap = EXC_PGM;
392 ksi.ksi_addr = (void *)tf->tf_srr0; 392 ksi.ksi_addr = (void *)tf->tf_srr0;
393 ksi.ksi_code = ILL_ILLOPC; 393 ksi.ksi_code = ILL_ILLOPC;
394 (*p->p_emul->e_trapsignal)(l, &ksi); 394 (*p->p_emul->e_trapsignal)(l, &ksi);
395 break; 395 break;
396#endif 396#endif
397 case EXC_MCHK|EXC_USER: 397 case EXC_MCHK|EXC_USER:
398 ci->ci_ev_umchk.ev_count++; 398 ci->ci_ev_umchk.ev_count++;
399 if (cpu_printfataltraps 399 if (cpu_printfataltraps
400 && (p->p_slflag & PSL_TRACED) == 0 400 && (p->p_slflag & PSL_TRACED) == 0
401 && !sigismember(&p->p_sigctx.ps_sigcatch, SIGBUS)) { 401 && !sigismember(&p->p_sigctx.ps_sigcatch, SIGBUS)) {
402 printf("trap: pid %d (%s): user MCHK trap @ %#lx " 402 printf("trap: pid %d (%s): user MCHK trap @ %#lx "
403 "(SRR1=%#lx)\n", 403 "(SRR1=%#lx)\n",
404 p->p_pid, p->p_comm, tf->tf_srr0, tf->tf_srr1); 404 p->p_pid, p->p_comm, tf->tf_srr0, tf->tf_srr1);
405 } 405 }
406 KSI_INIT_TRAP(&ksi); 406 KSI_INIT_TRAP(&ksi);
407 ksi.ksi_signo = SIGBUS; 407 ksi.ksi_signo = SIGBUS;
408 ksi.ksi_trap = EXC_MCHK; 408 ksi.ksi_trap = EXC_MCHK;
409 ksi.ksi_addr = (void *)tf->tf_srr0; 409 ksi.ksi_addr = (void *)tf->tf_srr0;
410 ksi.ksi_code = BUS_OBJERR; 410 ksi.ksi_code = BUS_OBJERR;
411 (*p->p_emul->e_trapsignal)(l, &ksi); 411 (*p->p_emul->e_trapsignal)(l, &ksi);
412 break; 412 break;
413 413
414 case EXC_PGM|EXC_USER: 414 case EXC_PGM|EXC_USER:
415 ci->ci_ev_pgm.ev_count++; 415 ci->ci_ev_pgm.ev_count++;
416 if (tf->tf_srr1 & 0x00020000) { /* Bit 14 is set if trap */ 416 if (tf->tf_srr1 & 0x00020000) { /* Bit 14 is set if trap */
417 if (p->p_raslist == NULL || 417 if (p->p_raslist == NULL ||
418 ras_lookup(p, (void *)tf->tf_srr0) == (void *) -1) { 418 ras_lookup(p, (void *)tf->tf_srr0) == (void *) -1) {
419 KSI_INIT_TRAP(&ksi); 419 KSI_INIT_TRAP(&ksi);
420 ksi.ksi_signo = SIGTRAP; 420 ksi.ksi_signo = SIGTRAP;
421 ksi.ksi_trap = EXC_PGM; 421 ksi.ksi_trap = EXC_PGM;
422 ksi.ksi_addr = (void *)tf->tf_srr0; 422 ksi.ksi_addr = (void *)tf->tf_srr0;
423 ksi.ksi_code = TRAP_BRKPT; 423 ksi.ksi_code = TRAP_BRKPT;
424 (*p->p_emul->e_trapsignal)(l, &ksi); 424 (*p->p_emul->e_trapsignal)(l, &ksi);
425 } else { 425 } else {
426 /* skip the trap instruction */ 426 /* skip the trap instruction */
427 tf->tf_srr0 += 4; 427 tf->tf_srr0 += 4;
428 } 428 }
429 } else { 429 } else {
430 KSI_INIT_TRAP(&ksi); 430 KSI_INIT_TRAP(&ksi);
431 ksi.ksi_signo = SIGILL; 431 ksi.ksi_signo = SIGILL;
432 ksi.ksi_trap = EXC_PGM; 432 ksi.ksi_trap = EXC_PGM;
433 ksi.ksi_addr = (void *)tf->tf_srr0; 433 ksi.ksi_addr = (void *)tf->tf_srr0;
434 if (tf->tf_srr1 & 0x100000) { 434 if (tf->tf_srr1 & 0x100000) {
435 ksi.ksi_signo = SIGFPE; 435 ksi.ksi_signo = SIGFPE;
436 ksi.ksi_code = fpu_get_fault_code(); 436 ksi.ksi_code = fpu_get_fault_code();
437 } else if (tf->tf_srr1 & 0x40000) { 437 } else if (tf->tf_srr1 & 0x40000) {
438 if (emulated_opcode(l, tf)) { 438 if (emulated_opcode(l, tf)) {
439 tf->tf_srr0 += 4; 439 tf->tf_srr0 += 4;
440 break; 440 break;
441 } 441 }
442 ksi.ksi_code = ILL_PRVOPC; 442 ksi.ksi_code = ILL_PRVOPC;
443 } else 443 } else
444 ksi.ksi_code = ILL_ILLOPC; 444 ksi.ksi_code = ILL_ILLOPC;
445 if (cpu_printfataltraps 445 if (cpu_printfataltraps
446 && (p->p_slflag & PSL_TRACED) == 0 446 && (p->p_slflag & PSL_TRACED) == 0
447 && !sigismember(&p->p_sigctx.ps_sigcatch, 447 && !sigismember(&p->p_sigctx.ps_sigcatch,
448 ksi.ksi_signo)) { 448 ksi.ksi_signo)) {
449 printf("trap: pid %d.%d (%s): user PGM trap @" 449 printf("trap: pid %d.%d (%s): user PGM trap @"
450 " %#lx (SRR1=%#lx)\n", p->p_pid, l->l_lid, 450 " %#lx (SRR1=%#lx)\n", p->p_pid, l->l_lid,
451 p->p_comm, tf->tf_srr0, tf->tf_srr1); 451 p->p_comm, tf->tf_srr0, tf->tf_srr1);
452 } 452 }
453 (*p->p_emul->e_trapsignal)(l, &ksi); 453 (*p->p_emul->e_trapsignal)(l, &ksi);
454 } 454 }
455 break; 455 break;
456 456
457 case EXC_MCHK: { 457 case EXC_MCHK: {
458 struct faultbuf *fb; 458 struct faultbuf *fb;
459 459
460 if ((fb = pcb->pcb_onfault) != NULL) { 460 if ((fb = pcb->pcb_onfault) != NULL) {
461 tf->tf_srr0 = fb->fb_pc; 461 tf->tf_srr0 = fb->fb_pc;
462 tf->tf_fixreg[1] = fb->fb_sp; 462 tf->tf_fixreg[1] = fb->fb_sp;
463 tf->tf_fixreg[2] = fb->fb_r2; 463 tf->tf_fixreg[2] = fb->fb_r2;
464 tf->tf_fixreg[3] = EFAULT; 464 tf->tf_fixreg[3] = EFAULT;
465 tf->tf_cr = fb->fb_cr; 465 tf->tf_cr = fb->fb_cr;
466 memcpy(&tf->tf_fixreg[13], fb->fb_fixreg, 466 memcpy(&tf->tf_fixreg[13], fb->fb_fixreg,
467 sizeof(fb->fb_fixreg)); 467 sizeof(fb->fb_fixreg));
468 return; 468 return;
469 } 469 }
470 printf("trap: pid %d.%d (%s): kernel MCHK trap @" 470 printf("trap: pid %d.%d (%s): kernel MCHK trap @"
471 " %#lx (SRR1=%#lx)\n", p->p_pid, l->l_lid, 471 " %#lx (SRR1=%#lx)\n", p->p_pid, l->l_lid,
472 p->p_comm, tf->tf_srr0, tf->tf_srr1); 472 p->p_comm, tf->tf_srr0, tf->tf_srr1);
473 goto brain_damage2; 473 goto brain_damage2;
474 } 474 }
475 case EXC_ALI: 475 case EXC_ALI:
476 printf("trap: pid %d.%d (%s): kernel ALI trap @ %#lx by %#lx " 476 printf("trap: pid %d.%d (%s): kernel ALI trap @ %#lx by %#lx "
477 "(DSISR %#x)\n", p->p_pid, l->l_lid, p->p_comm, 477 "(DSISR %#x)\n", p->p_pid, l->l_lid, p->p_comm,
478 tf->tf_dar, tf->tf_srr0, tf->tf_dsisr); 478 tf->tf_dar, tf->tf_srr0, tf->tf_dsisr);
479 goto brain_damage2; 479 goto brain_damage2;
480 case EXC_PGM: 480 case EXC_PGM:
481 printf("trap: pid %d.%d (%s): kernel PGM trap @" 481 printf("trap: pid %d.%d (%s): kernel PGM trap @"
482 " %#lx (SRR1=%#lx)\n", p->p_pid, l->l_lid, 482 " %#lx (SRR1=%#lx)\n", p->p_pid, l->l_lid,
483 p->p_comm, tf->tf_srr0, tf->tf_srr1); 483 p->p_comm, tf->tf_srr0, tf->tf_srr1);
484 goto brain_damage2; 484 goto brain_damage2;
485 485
486 default: 486 default:
487 printf("trap type %x at %lx\n", type, tf->tf_srr0); 487 printf("trap type %x at %lx\n", type, tf->tf_srr0);
488brain_damage2: 488brain_damage2:
489#ifdef DDBX 489#ifdef DDBX
490 if (kdb_trap(type, tf)) 490 if (kdb_trap(type, tf))
491 return; 491 return;
492#endif 492#endif
493#ifdef TRAP_PANICWAIT 493#ifdef TRAP_PANICWAIT
494 printf("Press a key to panic.\n"); 494 printf("Press a key to panic.\n");
495 cnpollc(1); 495 cnpollc(1);
496 cngetc(); 496 cngetc();
497 cnpollc(0); 497 cnpollc(0);
498#endif 498#endif
499 panic("trap"); 499 panic("trap");
500 } 500 }
501 userret(l, tf); 501 userret(l, tf);
502} 502}
503 503
504#ifdef _LP64 504#ifdef _LP64
505static inline vaddr_t 505static inline vaddr_t
506setusr(vaddr_t uva, size_t *len_p) 506setusr(vaddr_t uva, size_t *len_p)
507{ 507{
508 *len_p = SEGMENT_LENGTH - (uva & ~SEGMENT_MASK); 508 *len_p = SEGMENT_LENGTH - (uva & ~SEGMENT_MASK);
509 return pmap_setusr(uva) + (uva & ~SEGMENT_MASK); 509 return pmap_setusr(uva) + (uva & ~SEGMENT_MASK);
510} 510}
511static void 511static void
512unsetusr(void) 512unsetusr(void)
513{ 513{
514 pmap_unsetusr(); 514 pmap_unsetusr();
515} 515}
516#else 516#else
517static inline vaddr_t 517static inline vaddr_t
518setusr(vaddr_t uva, size_t *len_p) 518setusr(vaddr_t uva, size_t *len_p)
519{ 519{
520 struct pcb *pcb = curpcb; 520 struct pcb *pcb = curpcb;
521 vaddr_t p; 521 vaddr_t p;
522 KASSERT(pcb != NULL); 522 KASSERT(pcb != NULL);
523 KASSERT(pcb->pcb_kmapsr == 0); 523 KASSERT(pcb->pcb_kmapsr == 0);
524 pcb->pcb_kmapsr = USER_SR; 524 pcb->pcb_kmapsr = USER_SR;
525 pcb->pcb_umapsr = uva >> ADDR_SR_SHFT; 525 pcb->pcb_umapsr = uva >> ADDR_SR_SHFT;
526 *len_p = SEGMENT_LENGTH - (uva & ~SEGMENT_MASK); 526 *len_p = SEGMENT_LENGTH - (uva & ~SEGMENT_MASK);
527 p = (USER_SR << ADDR_SR_SHFT) + (uva & ~SEGMENT_MASK); 527 p = (USER_SR << ADDR_SR_SHFT) + (uva & ~SEGMENT_MASK);
528 __asm volatile ("isync; mtsr %0,%1; isync" 528 __asm volatile ("isync; mtsr %0,%1; isync"
529 :: "n"(USER_SR), "r"(pcb->pcb_pm->pm_sr[pcb->pcb_umapsr])); 529 :: "n"(USER_SR), "r"(pcb->pcb_pm->pm_sr[pcb->pcb_umapsr]));
530 return p; 530 return p;
531} 531}
532 532
533static void 533static void
534unsetusr(void) 534unsetusr(void)
535{ 535{
536 curpcb->pcb_kmapsr = 0; 536 curpcb->pcb_kmapsr = 0;
537 __asm volatile ("isync; mtsr %0,%1; isync" 537 __asm volatile ("isync; mtsr %0,%1; isync"
538 :: "n"(USER_SR), "r"(EMPTY_SEGMENT)); 538 :: "n"(USER_SR), "r"(EMPTY_SEGMENT));
539} 539}
540#endif 540#endif
541 541
542#define UFETCH(sz) \ 542#define UFETCH(sz) \
543int \ 543int \
544_ufetch_ ## sz(const uint ## sz ## _t *uaddr, uint ## sz ## _t *valp) \ 544_ufetch_ ## sz(const uint ## sz ## _t *uaddr, uint ## sz ## _t *valp) \
545{ \ 545{ \
546 struct faultbuf env; \ 546 struct faultbuf env; \
547 vaddr_t p; \ 547 vaddr_t p; \
548 size_t seglen; \ 548 size_t seglen; \
549 int rv; \ 549 int rv; \
550 \ 550 \
551 if ((rv = setfault(&env)) != 0) { \ 551 if ((rv = setfault(&env)) != 0) { \
552 goto out; \ 552 goto out; \
553 } \ 553 } \
554 p = setusr((vaddr_t)uaddr, &seglen); \ 554 p = setusr((vaddr_t)uaddr, &seglen); \
555 *valp = *(const volatile uint ## sz ## _t *)p; \ 555 *valp = *(const volatile uint ## sz ## _t *)p; \
556 out: \ 556 out: \
557 unsetusr(); \ 557 unsetusr(); \
558 curpcb->pcb_onfault = 0; \ 558 curpcb->pcb_onfault = 0; \
559 return rv; \ 559 return rv; \
560} 560}
561 561
562UFETCH(8) 562UFETCH(8)
563UFETCH(16) 563UFETCH(16)
564UFETCH(32) 564UFETCH(32)
565#ifdef _LP64 565#ifdef _LP64
566UFETCH(64) 566UFETCH(64)
567#endif 567#endif
568 568
569#undef UFETCH 569#undef UFETCH
570 570
571#define USTORE(sz) \ 571#define USTORE(sz) \
572int \ 572int \
573_ustore_ ## sz(uint ## sz ## _t *uaddr, uint ## sz ## _t val) \ 573_ustore_ ## sz(uint ## sz ## _t *uaddr, uint ## sz ## _t val) \
574{ \ 574{ \
575 struct faultbuf env; \ 575 struct faultbuf env; \
576 vaddr_t p; \ 576 vaddr_t p; \
577 size_t seglen; \ 577 size_t seglen; \
578 int rv; \ 578 int rv; \
579 \ 579 \
580 if ((rv = setfault(&env)) != 0) { \ 580 if ((rv = setfault(&env)) != 0) { \
581 goto out; \ 581 goto out; \
582 } \ 582 } \
583 p = setusr((vaddr_t)uaddr, &seglen); \ 583 p = setusr((vaddr_t)uaddr, &seglen); \
584 *(volatile uint ## sz ## _t *)p = val; \ 584 *(volatile uint ## sz ## _t *)p = val; \
585 out: \ 585 out: \
586 unsetusr(); \ 586 unsetusr(); \
587 curpcb->pcb_onfault = 0; \ 587 curpcb->pcb_onfault = 0; \
588 return rv; \ 588 return rv; \
589} 589}
590 590
591USTORE(8) 591USTORE(8)
592USTORE(16) 592USTORE(16)
593USTORE(32) 593USTORE(32)
594#ifdef _LP64 594#ifdef _LP64
595USTORE(64) 595USTORE(64)
596#endif 596#endif
597 597
598#undef USTORE 598#undef USTORE
599 599
600int 600int
601copyin(const void *udaddr, void *kaddr, size_t len) 601copyin(const void *udaddr, void *kaddr, size_t len)
602{ 602{
603 vaddr_t uva = (vaddr_t) udaddr; 603 vaddr_t uva = (vaddr_t) udaddr;
604 char *kp = kaddr; 604 char *kp = kaddr;
605 struct faultbuf env; 605 struct faultbuf env;
606 int rv; 606 int rv;
607 607
608 if ((rv = setfault(&env)) != 0) { 608 if ((rv = setfault(&env)) != 0) {
609 unsetusr(); 609 unsetusr();
610 goto out; 610 goto out;
611 } 611 }
612 612
613 while (len > 0) { 613 while (len > 0) {
614 size_t seglen; 614 size_t seglen;
615 vaddr_t p = setusr(uva, &seglen); 615 vaddr_t p = setusr(uva, &seglen);
616 if (seglen > len) 616 if (seglen > len)
617 seglen = len; 617 seglen = len;
618 memcpy(kp, (const char *) p, seglen); 618 memcpy(kp, (const char *) p, seglen);
619 uva += seglen; 619 uva += seglen;
620 kp += seglen; 620 kp += seglen;
621 len -= seglen; 621 len -= seglen;
622 unsetusr(); 622 unsetusr();
623 } 623 }
624 624
625 out: 625 out:
626 curpcb->pcb_onfault = 0; 626 curpcb->pcb_onfault = 0;
627 return rv; 627 return rv;
628} 628}
629 629
630int 630int
631copyout(const void *kaddr, void *udaddr, size_t len) 631copyout(const void *kaddr, void *udaddr, size_t len)
632{ 632{
633 const char *kp = kaddr; 633 const char *kp = kaddr;
634 vaddr_t uva = (vaddr_t) udaddr; 634 vaddr_t uva = (vaddr_t) udaddr;
635 struct faultbuf env; 635 struct faultbuf env;
636 int rv; 636 int rv;
637 637
638 if ((rv = setfault(&env)) != 0) { 638 if ((rv = setfault(&env)) != 0) {
639 unsetusr(); 639 unsetusr();
640 goto out; 640 goto out;
641 } 641 }
642 642
643 while (len > 0) { 643 while (len > 0) {
644 size_t seglen; 644 size_t seglen;
645 vaddr_t p = setusr(uva, &seglen); 645 vaddr_t p = setusr(uva, &seglen);
646 if (seglen > len) 646 if (seglen > len)
647 seglen = len; 647 seglen = len;
648 memcpy((char *)p, kp, seglen); 648 memcpy((char *)p, kp, seglen);
649 uva += seglen; 649 uva += seglen;
650 kp += seglen; 650 kp += seglen;
651 len -= seglen; 651 len -= seglen;
652 unsetusr(); 652 unsetusr();
653 } 653 }
654 654
655 out: 655 out:
656 curpcb->pcb_onfault = 0; 656 curpcb->pcb_onfault = 0;
657 return rv; 657 return rv;
658} 658}
659 659
660/* 660/*
661 * kcopy(const void *src, void *dst, size_t len); 661 * kcopy(const void *src, void *dst, size_t len);
662 * 662 *
663 * Copy len bytes from src to dst, aborting if we encounter a fatal 663 * Copy len bytes from src to dst, aborting if we encounter a fatal
664 * page fault. 664 * page fault.
665 * 665 *
666 * kcopy() _must_ save and restore the old fault handler since it is 666 * kcopy() _must_ save and restore the old fault handler since it is
667 * called by uiomove(), which may be in the path of servicing a non-fatal 667 * called by uiomove(), which may be in the path of servicing a non-fatal
668 * page fault. 668 * page fault.
669 */ 669 */
670int 670int
671kcopy(const void *src, void *dst, size_t len) 671kcopy(const void *src, void *dst, size_t len)
672{ 672{
673 struct faultbuf env, *oldfault; 673 struct faultbuf env, *oldfault;
674 int rv; 674 int rv;
675 675
676 oldfault = curpcb->pcb_onfault; 676 oldfault = curpcb->pcb_onfault;
677 677
678 if ((rv = setfault(&env)) == 0) 678 if ((rv = setfault(&env)) == 0)
679 memcpy(dst, src, len); 679 memcpy(dst, src, len);
680 680
681 curpcb->pcb_onfault = oldfault; 681 curpcb->pcb_onfault = oldfault;
682 return rv; 682 return rv;
683} 683}
684 684
685#if 0 /* XXX CPU configuration spaghetti */ 685#if 0 /* XXX CPU configuration spaghetti */
686int 686int
687_ucas_32(volatile uint32_t *uptr, uint32_t old, uint32_t new, uint32_t *ret) 687_ucas_32(volatile uint32_t *uptr, uint32_t old, uint32_t new, uint32_t *ret)
688{ 688{
689 extern int do_ucas_32(volatile int32_t *, int32_t, int32_t, int32_t *); 689 extern int do_ucas_32(volatile int32_t *, int32_t, int32_t, int32_t *);
690 vaddr_t uva = (vaddr_t)uptr; 690 vaddr_t uva = (vaddr_t)uptr;
691 vaddr_t p; 691 vaddr_t p;
692 struct faultbuf env; 692 struct faultbuf env;
693 size_t seglen; 693 size_t seglen;
694 int rv; 694 int rv;
695 695
696 if ((rv = setfault(&env)) != 0) { 696 if ((rv = setfault(&env)) != 0) {
697 unsetusr(); 697 unsetusr();
698 goto out; 698 goto out;
699 } 699 }
700 p = setusr(uva, &seglen); 700 p = setusr(uva, &seglen);
701 KASSERT(seglen >= sizeof(*uptr)); 701 KASSERT(seglen >= sizeof(*uptr));
702 do_ucas_32((void *)p, old, new, ret); 702 do_ucas_32((void *)p, old, new, ret);
703 unsetusr(); 703 unsetusr();
704 704
705out: 705out:
706 curpcb->pcb_onfault = 0; 706 curpcb->pcb_onfault = 0;
707 return rv; 707 return rv;
708} 708}
709#endif 709#endif
710 710
711int 711int
712badaddr(void *addr, size_t size) 712badaddr(void *addr, size_t size)
713{ 713{
714 return badaddr_read(addr, size, NULL); 714 return badaddr_read(addr, size, NULL);
715} 715}
716 716
717int 717int
718badaddr_read(void *addr, size_t size, int *rptr) 718badaddr_read(void *addr, size_t size, int *rptr)
719{ 719{
720 struct faultbuf env; 720 struct faultbuf env;
721 int x; 721 int x;
722 722
723 /* Get rid of any stale machine checks that have been waiting. */ 723 /* Get rid of any stale machine checks that have been waiting. */
724 __asm volatile ("sync; isync"); 724 __asm volatile ("sync; isync");
725 725
726 if (setfault(&env)) { 726 if (setfault(&env)) {
727 curpcb->pcb_onfault = 0; 727 curpcb->pcb_onfault = 0;
728 __asm volatile ("sync"); 728 __asm volatile ("sync");
729 return 1; 729 return 1;
730 } 730 }
731 731
732 __asm volatile ("sync"); 732 __asm volatile ("sync");
733 733
734 switch (size) { 734 switch (size) {
735 case 1: 735 case 1:
736 x = *(volatile int8_t *)addr; 736 x = *(volatile int8_t *)addr;
737 break; 737 break;
738 case 2: 738 case 2:
739 x = *(volatile int16_t *)addr; 739 x = *(volatile int16_t *)addr;
740 break; 740 break;
741 case 4: 741 case 4:
742 x = *(volatile int32_t *)addr; 742 x = *(volatile int32_t *)addr;
743 break; 743 break;
744 default: 744 default:
745 panic("badaddr: invalid size (%lu)", (u_long) size); 745 panic("badaddr: invalid size (%lu)", (u_long) size);
746 } 746 }
747 747
748 /* Make sure we took the machine check, if we caused one. */ 748 /* Make sure we took the machine check, if we caused one. */
749 __asm volatile ("sync; isync"); 749 __asm volatile ("sync; isync");
750 750
751 curpcb->pcb_onfault = 0; 751 curpcb->pcb_onfault = 0;
752 __asm volatile ("sync"); /* To be sure. */ 752 __asm volatile ("sync"); /* To be sure. */
753 753
754 /* Use the value to avoid reorder. */ 754 /* Use the value to avoid reorder. */
755 if (rptr) 755 if (rptr)
756 *rptr = x; 756 *rptr = x;
757 757
758 return 0; 758 return 0;
759} 759}
760 760
761/* 761/*
762 * For now, this only deals with the particular unaligned access case 762 * For now, this only deals with the particular unaligned access case
763 * that gcc tends to generate. Eventually it should handle all of the 763 * that gcc tends to generate. Eventually it should handle all of the
764 * possibilities that can happen on a 32-bit PowerPC in big-endian mode. 764 * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
765 */ 765 */
766 766
767static int 767static int
768fix_unaligned(struct lwp *l, struct trapframe *tf) 768fix_unaligned(struct lwp *l, struct trapframe *tf)
769{ 769{
770 const struct dsi_info* dsi = get_dsi_info(tf->tf_dsisr); 770 const struct dsi_info* dsi = get_dsi_info(tf->tf_dsisr);
771 771
772 if ( !dsi ) 772 if ( !dsi )
773 return -1; 773 return -1;
774 774
775 switch (dsi->indicator) { 775 switch (dsi->indicator) {
776 case EXC_ALI_DCBZ: 776 case EXC_ALI_DCBZ:
777 { 777 {
778 /* 778 /*
779 * The DCBZ (Data Cache Block Zero) instruction 779 * The DCBZ (Data Cache Block Zero) instruction
780 * gives an alignment fault if used on non-cacheable 780 * gives an alignment fault if used on non-cacheable
781 * memory. We handle the fault mainly for the 781 * memory. We handle the fault mainly for the
782 * case when we are running with the cache disabled 782 * case when we are running with the cache disabled
783 * for debugging. 783 * for debugging.
784 */ 784 */
785 static char zeroes[MAXCACHELINESIZE]; 785 static char zeroes[MAXCACHELINESIZE];
786 int error; 786 int error;
787 error = copyout(zeroes, 787 error = copyout(zeroes,
788 (void *)(tf->tf_dar & -curcpu()->ci_ci.dcache_line_size), 788 (void *)(tf->tf_dar & -curcpu()->ci_ci.dcache_line_size),
789 curcpu()->ci_ci.dcache_line_size); 789 curcpu()->ci_ci.dcache_line_size);
790 if (error) 790 if (error)
791 return -1; 791 return -1;
792 return 0; 792 return 0;
793 } 793 }
794 break; 794 break;
795 795
796 case EXC_ALI_LFD: 796 case EXC_ALI_LFD:
797 case EXC_ALI_LFDU: 797 case EXC_ALI_LFDU:
798 case EXC_ALI_LDFX: 798 case EXC_ALI_LDFX:
799 case EXC_ALI_LFDUX: 799 case EXC_ALI_LFDUX:
800 { 800 {
801 struct pcb * const pcb = lwp_getpcb(l); 801 struct pcb * const pcb = lwp_getpcb(l);
802 const int reg = EXC_ALI_RST(tf->tf_dsisr); 802 const int reg = EXC_ALI_RST(tf->tf_dsisr);
803 const int a_reg = EXC_ALI_RA(tf->tf_dsisr); 803 const int a_reg = EXC_ALI_RA(tf->tf_dsisr);
804 uint64_t * const fpreg = &pcb->pcb_fpu.fpreg[reg]; 804 uint64_t * const fpreg = &pcb->pcb_fpu.fpreg[reg];
805 register_t* a_reg_addr = &tf->tf_fixreg[a_reg]; 805 register_t* a_reg_addr = &tf->tf_fixreg[a_reg];
806 806
807 /* 807 /*
808 * Juggle the FPU to ensure that we've initialized 808 * Juggle the FPU to ensure that we've initialized
809 * the FPRs, and that their current state is in 809 * the FPRs, and that their current state is in
810 * the PCB. 810 * the PCB.
811 */ 811 */
812 812
813 KASSERT(l == curlwp); 813 KASSERT(l == curlwp);
814 if (!fpu_used_p(l)) { 814 if (!fpu_used_p(l)) {
815 memset(&pcb->pcb_fpu, 0, sizeof(pcb->pcb_fpu)); 815 memset(&pcb->pcb_fpu, 0, sizeof(pcb->pcb_fpu));
816 fpu_mark_used(l); 816 fpu_mark_used(l);
817 } else { 817 } else {
818 fpu_save(l); 818 fpu_save(l);
819 } 819 }
820 820
821 if (copyin((void *)tf->tf_dar, fpreg, 821 if (copyin((void *)tf->tf_dar, fpreg,
822 sizeof(double)) != 0) 822 sizeof(double)) != 0)
823 return -1; 823 return -1;
824 824
825 if (dsi->flags & DSI_OP_INDEXED) { 825 if (dsi->flags & DSI_OP_INDEXED) {
826 /* do nothing */ 826 /* do nothing */
827 } 827 }
828 828
829 if (dsi->flags & DSI_OP_UPDATE) { 829 if (dsi->flags & DSI_OP_UPDATE) {
830 /* this is valid for 601, but to simplify logic don't pass for any */ 830 /* this is valid for 601, but to simplify logic don't pass for any */
831 if (a_reg == 0) 831 if (a_reg == 0)
832 return -1; 832 return -1;
833 else 833 else
834 *a_reg_addr = tf->tf_dar; 834 *a_reg_addr = tf->tf_dar;
835 } 835 }
836 836
837 fpu_load(); 837 fpu_load();
838 return 0; 838 return 0;
839 } 839 }
840 break; 840 break;
841 841
842 case EXC_ALI_STFD: 842 case EXC_ALI_STFD:
843 case EXC_ALI_STFDU: 843 case EXC_ALI_STFDU:
844 case EXC_ALI_STFDX: 844 case EXC_ALI_STFDX:
845 case EXC_ALI_STFDUX: 845 case EXC_ALI_STFDUX:
846 { 846 {
847 struct pcb * const pcb = lwp_getpcb(l); 847 struct pcb * const pcb = lwp_getpcb(l);
848 const int reg = EXC_ALI_RST(tf->tf_dsisr); 848 const int reg = EXC_ALI_RST(tf->tf_dsisr);
849 const int a_reg = EXC_ALI_RA(tf->tf_dsisr); 849 const int a_reg = EXC_ALI_RA(tf->tf_dsisr);
850 uint64_t * const fpreg = &pcb->pcb_fpu.fpreg[reg]; 850 uint64_t * const fpreg = &pcb->pcb_fpu.fpreg[reg];
851 register_t* a_reg_addr = &tf->tf_fixreg[a_reg]; 851 register_t* a_reg_addr = &tf->tf_fixreg[a_reg];
852 852
853 /* 853 /*
854 * Juggle the FPU to ensure that we've initialized 854 * Juggle the FPU to ensure that we've initialized
855 * the FPRs, and that their current state is in 855 * the FPRs, and that their current state is in
856 * the PCB. 856 * the PCB.
857 */ 857 */
858 858
859 KASSERT(l == curlwp); 859 KASSERT(l == curlwp);
860 if (!fpu_used_p(l)) { 860 if (!fpu_used_p(l)) {
861 memset(&pcb->pcb_fpu, 0, sizeof(pcb->pcb_fpu)); 861 memset(&pcb->pcb_fpu, 0, sizeof(pcb->pcb_fpu));
862 fpu_mark_used(l); 862 fpu_mark_used(l);
863 } else { 863 } else {
864 fpu_save(l); 864 fpu_save(l);
865 } 865 }
866 866
867 if (copyout(fpreg, (void *)tf->tf_dar, 867 if (copyout(fpreg, (void *)tf->tf_dar,
868 sizeof(double)) != 0) 868 sizeof(double)) != 0)
869 return -1; 869 return -1;
870 870
871 if (dsi->flags & DSI_OP_INDEXED) { 871 if (dsi->flags & DSI_OP_INDEXED) {
872 /* do nothing */ 872 /* do nothing */
873 } 873 }
874 874
875 if (dsi->flags & DSI_OP_UPDATE) { 875 if (dsi->flags & DSI_OP_UPDATE) {
876 /* this is valid for 601, but to simplify logic don't pass for any */ 876 /* this is valid for 601, but to simplify logic don't pass for any */
877 if (a_reg == 0) 877 if (a_reg == 0)
878 return -1; 878 return -1;
879 else 879 else
880 *a_reg_addr = tf->tf_dar; 880 *a_reg_addr = tf->tf_dar;
881 } 881 }
882 882
883 fpu_load(); 883 fpu_load();
884 return 0; 884 return 0;
885 } 885 }
886 break; 886 break;
887 887
888 case EXC_ALI_LHZ: 888 case EXC_ALI_LHZ:
889 case EXC_ALI_LHZU: 889 case EXC_ALI_LHZU:
890 case EXC_ALI_LHZX: 890 case EXC_ALI_LHZX:
891 case EXC_ALI_LHZUX: 891 case EXC_ALI_LHZUX:
892 case EXC_ALI_LHA: 892 case EXC_ALI_LHA:
893 case EXC_ALI_LHAU: 893 case EXC_ALI_LHAU:
894 case EXC_ALI_LHAX: 894 case EXC_ALI_LHAX:
895 case EXC_ALI_LHAUX: 895 case EXC_ALI_LHAUX:
896 case EXC_ALI_LHBRX: 896 case EXC_ALI_LHBRX:
897 { 897 {
898 const register_t ea_addr = tf->tf_dar; 898 const register_t ea_addr = tf->tf_dar;
899 const unsigned int t_reg = EXC_ALI_RST(tf->tf_dsisr); 899 const unsigned int t_reg = EXC_ALI_RST(tf->tf_dsisr);
900 const unsigned int a_reg = EXC_ALI_RA(tf->tf_dsisr); 900 const unsigned int a_reg = EXC_ALI_RA(tf->tf_dsisr);
901 register_t* t_reg_addr = &tf->tf_fixreg[t_reg]; 901 register_t* t_reg_addr = &tf->tf_fixreg[t_reg];
902 register_t* a_reg_addr = &tf->tf_fixreg[a_reg]; 902 register_t* a_reg_addr = &tf->tf_fixreg[a_reg];
903 903
904 /* load into lower 2 bytes of reg */ 904 /* load into lower 2 bytes of reg */
905 if (copyin((void *)ea_addr, 905 if (copyin((void *)ea_addr,
906 t_reg_addr+2, 906 t_reg_addr+2,
907 sizeof(uint16_t)) != 0) 907 sizeof(uint16_t)) != 0)
908 return -1; 908 return -1;
909 909
910 if (dsi->flags & DSI_OP_UPDATE) { 910 if (dsi->flags & DSI_OP_UPDATE) {
911 /* this is valid for 601, but to simplify logic don't pass for any */ 911 /* this is valid for 601, but to simplify logic don't pass for any */
912 if (a_reg == 0) 912 if (a_reg == 0)
913 return -1; 913 return -1;
914 else 914 else
915 *a_reg_addr = ea_addr; 915 *a_reg_addr = ea_addr;
916 } 916 }
917 917
918 if (dsi->flags & DSI_OP_INDEXED) { 918 if (dsi->flags & DSI_OP_INDEXED) {
919 /* do nothing , indexed address already in ea */ 919 /* do nothing , indexed address already in ea */
920 } 920 }
921 921
922 if (dsi->flags & DSI_OP_ZERO) { 922 if (dsi->flags & DSI_OP_ZERO) {
923 /* clear upper 2 bytes */ 923 /* clear upper 2 bytes */
924 *t_reg_addr &= 0x0000ffff; 924 *t_reg_addr &= 0x0000ffff;
925 } else if (dsi->flags & DSI_OP_ALGEBRAIC) { 925 } else if (dsi->flags & DSI_OP_ALGEBRAIC) {
926 /* sign extend upper 2 bytes */ 926 /* sign extend upper 2 bytes */
927 if (*t_reg_addr & 0x00008000) 927 if (*t_reg_addr & 0x00008000)
928 *t_reg_addr |= 0xffff0000; 928 *t_reg_addr |= 0xffff0000;
929 else 929 else
930 *t_reg_addr &= 0x0000ffff; 930 *t_reg_addr &= 0x0000ffff;
931 } 931 }
932 932
933 if (dsi->flags & DSI_OP_REVERSED) { 933 if (dsi->flags & DSI_OP_REVERSED) {
934 /* reverse lower 2 bytes */ 934 /* reverse lower 2 bytes */
935 uint32_t temp = *t_reg_addr; 935 uint32_t temp = *t_reg_addr;
936 936
937 *t_reg_addr = ((temp & 0x000000ff) << 8 ) | 937 *t_reg_addr = ((temp & 0x000000ff) << 8 ) |
938 ((temp & 0x0000ff00) >> 8 ); 938 ((temp & 0x0000ff00) >> 8 );
939 } 939 }
940 return 0; 940 return 0;
941 } 941 }
942 break; 942 break;
943 943
944 case EXC_ALI_STH: 944 case EXC_ALI_STH:
945 case EXC_ALI_STHU: 945 case EXC_ALI_STHU:
946 case EXC_ALI_STHX: 946 case EXC_ALI_STHX:
947 case EXC_ALI_STHUX: 947 case EXC_ALI_STHUX:
948 case EXC_ALI_STHBRX: 948 case EXC_ALI_STHBRX:
949 { 949 {
950 const register_t ea_addr = tf->tf_dar; 950 const register_t ea_addr = tf->tf_dar;
951 const unsigned int s_reg = EXC_ALI_RST(tf->tf_dsisr); 951 const unsigned int s_reg = EXC_ALI_RST(tf->tf_dsisr);
952 const unsigned int a_reg = EXC_ALI_RA(tf->tf_dsisr); 952 const unsigned int a_reg = EXC_ALI_RA(tf->tf_dsisr);
953 register_t* s_reg_addr = &tf->tf_fixreg[s_reg]; 953 register_t* s_reg_addr = &tf->tf_fixreg[s_reg];
954 register_t* a_reg_addr = &tf->tf_fixreg[a_reg]; 954 register_t* a_reg_addr = &tf->tf_fixreg[a_reg];
955 955
956 /* byte-reversed write out of lower 2 bytes */ 956 /* byte-reversed write out of lower 2 bytes */
957 if (dsi->flags & DSI_OP_REVERSED) { 957 if (dsi->flags & DSI_OP_REVERSED) {
958 uint16_t tmp = *s_reg_addr & 0xffff; 958 uint16_t tmp = *s_reg_addr & 0xffff;
959 tmp = bswap16(tmp); 959 tmp = bswap16(tmp);
960 960
961 if (copyout(&tmp, 961 if (copyout(&tmp,
962 (void *)ea_addr, 962 (void *)ea_addr,
963 sizeof(uint16_t)) != 0) 963 sizeof(uint16_t)) != 0)
964 return -1; 964 return -1;
965 } 965 }
966 /* write out lower 2 bytes */ 966 /* write out lower 2 bytes */
967 else if (copyout(s_reg_addr+2, 967 else if (copyout(s_reg_addr+2,
968 (void *)ea_addr, 968 (void *)ea_addr,
969 sizeof(uint16_t)) != 0) { 969 sizeof(uint16_t)) != 0) {
970 return -1; 970 return -1;
971 } 971 }
972 972
973 if (dsi->flags & DSI_OP_INDEXED) { 973 if (dsi->flags & DSI_OP_INDEXED) {
974 /* do nothing, indexed address already in ea */ 974 /* do nothing, indexed address already in ea */
975 } 975 }
976 976
977 if (dsi->flags & DSI_OP_UPDATE) { 977 if (dsi->flags & DSI_OP_UPDATE) {
978 /* this is valid for 601, but to simplify logic don't pass for any */ 978 /* this is valid for 601, but to simplify logic don't pass for any */
979 if (a_reg == 0) 979 if (a_reg == 0)
980 return -1; 980 return -1;
981 else 981 else
982 *a_reg_addr = ea_addr; 982 *a_reg_addr = ea_addr;
983 } 983 }
984 984
985 return 0; 985 return 0;
986 } 986 }
987 break; 987 break;
988 988
989 case EXC_ALI_LWARX_LWZ: 989 case EXC_ALI_LWARX_LWZ:
990 case EXC_ALI_LWZU: 990 case EXC_ALI_LWZU:
991 case EXC_ALI_LWZX: 991 case EXC_ALI_LWZX:
992 case EXC_ALI_LWZUX: 992 case EXC_ALI_LWZUX:
993 case EXC_ALI_LWBRX: 993 case EXC_ALI_LWBRX:
994 { 994 {
995 const register_t ea_addr = tf->tf_dar; 995 const register_t ea_addr = tf->tf_dar;
996 const unsigned int t_reg = EXC_ALI_RST(tf->tf_dsisr); 996 const unsigned int t_reg = EXC_ALI_RST(tf->tf_dsisr);
997 const unsigned int a_reg = EXC_ALI_RA(tf->tf_dsisr); 997 const unsigned int a_reg = EXC_ALI_RA(tf->tf_dsisr);
998 register_t* t_reg_addr = &tf->tf_fixreg[t_reg]; 998 register_t* t_reg_addr = &tf->tf_fixreg[t_reg];
999 register_t* a_reg_addr = &tf->tf_fixreg[a_reg]; 999 register_t* a_reg_addr = &tf->tf_fixreg[a_reg];
1000 1000
1001 if (copyin((void *)ea_addr, 1001 if (copyin((void *)ea_addr,
1002 t_reg_addr, 1002 t_reg_addr,
1003 sizeof(uint32_t)) != 0) 1003 sizeof(uint32_t)) != 0)
1004 return -1; 1004 return -1;
1005 1005
1006 if (dsi->flags & DSI_OP_UPDATE) { 1006 if (dsi->flags & DSI_OP_UPDATE) {
1007 /* this is valid for 601, but to simplify logic don't pass for any */ 1007 /* this is valid for 601, but to simplify logic don't pass for any */
1008 if (a_reg == 0) 1008 if (a_reg == 0)
1009 return -1; 1009 return -1;
1010 else 1010 else
1011 *a_reg_addr = ea_addr; 1011 *a_reg_addr = ea_addr;
1012 } 1012 }
1013 1013
1014 if (dsi->flags & DSI_OP_INDEXED) { 1014 if (dsi->flags & DSI_OP_INDEXED) {
1015 /* do nothing , indexed address already in ea */ 1015 /* do nothing , indexed address already in ea */
1016 } 1016 }
1017 1017
1018 if (dsi->flags & DSI_OP_ZERO) { 1018 if (dsi->flags & DSI_OP_ZERO) {
1019 /* XXX - 64bit clear upper word */ 1019 /* XXX - 64bit clear upper word */
1020 } 1020 }
1021 1021
1022 if (dsi->flags & DSI_OP_REVERSED) { 1022 if (dsi->flags & DSI_OP_REVERSED) {
1023 /* reverse bytes */ 1023 /* reverse bytes */
1024 register_t temp = bswap32(*t_reg_addr); 1024 register_t temp = bswap32(*t_reg_addr);
1025 *t_reg_addr = temp; 1025 *t_reg_addr = temp;
1026 } 1026 }
1027 1027
1028 return 0; 1028 return 0;
1029 } 1029 }
1030 break; 1030 break;
1031 1031
1032 case EXC_ALI_STW: 1032 case EXC_ALI_STW:
1033 case EXC_ALI_STWU: 1033 case EXC_ALI_STWU:
1034 case EXC_ALI_STWX: 1034 case EXC_ALI_STWX:
1035 case EXC_ALI_STWUX: 1035 case EXC_ALI_STWUX:
1036 case EXC_ALI_STWBRX: 1036 case EXC_ALI_STWBRX:
1037 { 1037 {
1038 const register_t ea_addr = tf->tf_dar; 1038 const register_t ea_addr = tf->tf_dar;
1039 const unsigned int s_reg = EXC_ALI_RST(tf->tf_dsisr); 1039 const unsigned int s_reg = EXC_ALI_RST(tf->tf_dsisr);
1040 const unsigned int a_reg = EXC_ALI_RA(tf->tf_dsisr); 1040 const unsigned int a_reg = EXC_ALI_RA(tf->tf_dsisr);
1041 register_t* s_reg_addr = &tf->tf_fixreg[s_reg]; 1041 register_t* s_reg_addr = &tf->tf_fixreg[s_reg];
1042 register_t* a_reg_addr = &tf->tf_fixreg[a_reg]; 1042 register_t* a_reg_addr = &tf->tf_fixreg[a_reg];
1043 1043
1044 if (dsi->flags & DSI_OP_REVERSED) { 1044 if (dsi->flags & DSI_OP_REVERSED) {
1045 /* byte-reversed write out */ 1045 /* byte-reversed write out */
1046 register_t temp = bswap32(*s_reg_addr); 1046 register_t temp = bswap32(*s_reg_addr);
1047 1047
1048 if (copyout(&temp, 1048 if (copyout(&temp,
1049 (void *)ea_addr, 1049 (void *)ea_addr,
1050 sizeof(uint32_t)) != 0) 1050 sizeof(uint32_t)) != 0)
1051 return -1; 1051 return -1;
1052 } 1052 }
1053 /* write out word */ 1053 /* write out word */
1054 else if (copyout(s_reg_addr, 1054 else if (copyout(s_reg_addr,
1055 (void *)ea_addr, 1055 (void *)ea_addr,
1056 sizeof(uint32_t)) != 0) 1056 sizeof(uint32_t)) != 0)
1057 return -1; 1057 return -1;
1058 1058
1059 if (dsi->flags & DSI_OP_INDEXED) { 1059 if (dsi->flags & DSI_OP_INDEXED) {
1060 /* do nothing, indexed address already in ea */ 1060 /* do nothing, indexed address already in ea */
1061 } 1061 }
1062 1062
1063 if (dsi->flags & DSI_OP_UPDATE) { 1063 if (dsi->flags & DSI_OP_UPDATE) {
1064 /* this is valid for 601, but to simplify logic don't pass for any */ 1064 /* this is valid for 601, but to simplify logic don't pass for any */
1065 if (a_reg == 0) 1065 if (a_reg == 0)
1066 return -1; 1066 return -1;
1067 else 1067 else
1068 *a_reg_addr = ea_addr; 1068 *a_reg_addr = ea_addr;
1069 } 1069 }
1070 1070
1071 return 0; 1071 return 0;
1072 } 1072 }
1073 break; 1073 break;
1074 } 1074 }
1075 1075
1076 return -1; 1076 return -1;
1077} 1077}
1078 1078
1079int 1079int
1080emulated_opcode(struct lwp *l, struct trapframe *tf) 1080emulated_opcode(struct lwp *l, struct trapframe *tf)
1081{ 1081{
1082 uint32_t opcode; 1082 uint32_t opcode;
1083 1083
1084 if (copyin((void *)tf->tf_srr0, &opcode, sizeof(opcode)) != 0) 1084 if (copyin((void *)tf->tf_srr0, &opcode, sizeof(opcode)) != 0)
1085 return 0; 1085 return 0;
1086 1086
1087 if (OPC_MFSPR_P(opcode, SPR_PVR)) { 1087 if (OPC_MFSPR_P(opcode, SPR_PVR)) {
1088 __asm ("mfpvr %0" : "=r"(tf->tf_fixreg[OPC_MFSPR_REG(opcode)])); 1088 __asm ("mfpvr %0" : "=r"(tf->tf_fixreg[OPC_MFSPR_REG(opcode)]));
1089 return 1; 1089 return 1;
1090 } 1090 }
1091 1091
1092 if (OPC_MFMSR_P(opcode)) { 1092 if (OPC_MFMSR_P(opcode)) {
1093 struct pcb * const pcb = lwp_getpcb(l); 1093 struct pcb * const pcb = lwp_getpcb(l);
1094 register_t msr = tf->tf_srr1 & PSL_USERSRR1; 1094 register_t msr = tf->tf_srr1 & PSL_USERSRR1;
1095 1095
1096 if (fpu_used_p(l)) 1096 if (fpu_used_p(l))
1097 msr |= PSL_FP; 1097 msr |= PSL_FP;
1098 msr |= (pcb->pcb_flags & (PCB_FE0|PCB_FE1)); 1098 msr |= (pcb->pcb_flags & (PCB_FE0|PCB_FE1));
1099#ifdef ALTIVEC 1099#ifdef ALTIVEC
1100 if (vec_used_p(l)) 1100 if (vec_used_p(l))
1101 msr |= PSL_VEC; 1101 msr |= PSL_VEC;
1102#endif 1102#endif
1103 tf->tf_fixreg[OPC_MFMSR_REG(opcode)] = msr; 1103 tf->tf_fixreg[OPC_MFMSR_REG(opcode)] = msr;
1104 return 1; 1104 return 1;
1105 } 1105 }
1106 1106
1107#define OPC_MTMSR_CODE 0x7c000124 1107#define OPC_MTMSR_CODE 0x7c000124
1108#define OPC_MTMSR_MASK 0xfc1fffff 1108#define OPC_MTMSR_MASK 0xfc1fffff
1109#define OPC_MTMSR OPC_MTMSR_CODE 1109#define OPC_MTMSR OPC_MTMSR_CODE
1110#define OPC_MTMSR_REG(o) (((o) >> 21) & 0x1f) 1110#define OPC_MTMSR_REG(o) (((o) >> 21) & 0x1f)
1111#define OPC_MTMSR_P(o) (((o) & OPC_MTMSR_MASK) == OPC_MTMSR_CODE) 1111#define OPC_MTMSR_P(o) (((o) & OPC_MTMSR_MASK) == OPC_MTMSR_CODE)
1112 1112
1113 if (OPC_MTMSR_P(opcode)) { 1113 if (OPC_MTMSR_P(opcode)) {
1114 struct pcb * const pcb = lwp_getpcb(l); 1114 struct pcb * const pcb = lwp_getpcb(l);
1115 register_t msr = tf->tf_fixreg[OPC_MTMSR_REG(opcode)]; 1115 register_t msr = tf->tf_fixreg[OPC_MTMSR_REG(opcode)];
1116 1116
1117 /* 1117 /*
1118 * Ignore the FP enable bit in the requested MSR. 1118 * Ignore the FP enable bit in the requested MSR.
1119 * It might be set in the thread's actual MSR but the 1119 * It might be set in the thread's actual MSR but the
1120 * user code isn't allowed to change it. 1120 * user code isn't allowed to change it.
1121 */ 1121 */
1122 msr &= ~PSL_FP; 1122 msr &= ~PSL_FP;
1123#ifdef ALTIVEC 1123#ifdef ALTIVEC
1124 msr &= ~PSL_VEC; 1124 msr &= ~PSL_VEC;
1125#endif 1125#endif
1126 1126
1127 /* 1127 /*
1128 * Don't let the user muck with bits he's not allowed to. 1128 * Don't let the user muck with bits he's not allowed to.
1129 */ 1129 */
1130 if (!PSL_USEROK_P(msr)) 1130 if (!PSL_USEROK_P(msr))
1131 return 0; 1131 return 0;
1132 1132
1133 /* 1133 /*
1134 * For now, only update the FP exception mode. 1134 * For now, only update the FP exception mode.
1135 */ 1135 */
1136 pcb->pcb_flags &= ~(PSL_FE0|PSL_FE1); 1136 pcb->pcb_flags &= ~(PSL_FE0|PSL_FE1);
1137 pcb->pcb_flags |= msr & (PSL_FE0|PSL_FE1); 1137 pcb->pcb_flags |= msr & (PSL_FE0|PSL_FE1);
1138 1138
1139 /* 1139 /*
1140 * If we think we have the FPU, update SRR1 too. If we're 1140 * If we think we have the FPU, update SRR1 too. If we're
1141 * wrong userret() will take care of it. 1141 * wrong userret() will take care of it.
1142 */ 1142 */
1143 if (tf->tf_srr1 & PSL_FP) { 1143 if (tf->tf_srr1 & PSL_FP) {
1144 tf->tf_srr1 &= ~(PSL_FE0|PSL_FE1); 1144 tf->tf_srr1 &= ~(PSL_FE0|PSL_FE1);
1145 tf->tf_srr1 |= msr & (PSL_FE0|PSL_FE1); 1145 tf->tf_srr1 |= msr & (PSL_FE0|PSL_FE1);
1146 } 1146 }
1147 return 1; 1147 return 1;
1148 } 1148 }
1149 1149
1150 return 0; 1150 return 0;
1151} 1151}
1152 1152
1153int 1153int
1154copyinstr(const void *udaddr, void *kaddr, size_t len, size_t *done) 1154copyinstr(const void *udaddr, void *kaddr, size_t len, size_t *done)
1155{ 1155{
1156 vaddr_t uva = (vaddr_t) udaddr; 1156 vaddr_t uva = (vaddr_t) udaddr;
1157 char *kp = kaddr; 1157 char *kp = kaddr;
1158 struct faultbuf env; 1158 struct faultbuf env;
1159 int rv; 1159 int rv;
1160 1160
1161 if ((rv = setfault(&env)) != 0) { 1161 if ((rv = setfault(&env)) != 0) {
1162 unsetusr(); 1162 unsetusr();