Mon Mar 29 03:22:18 2021 UTC ()
(Very) minimal kernel support for dtrace on MIPS; enough to system call
tracing to work for example.


(simonb)
diff -r1.259 -r1.260 src/sys/arch/mips/mips/trap.c

cvs diff -r1.259 -r1.260 src/sys/arch/mips/mips/trap.c (switch to unified diff)

--- src/sys/arch/mips/mips/trap.c 2021/03/17 11:05:37 1.259
+++ src/sys/arch/mips/mips/trap.c 2021/03/29 03:22:17 1.260
@@ -1,866 +1,877 @@ @@ -1,866 +1,877 @@
1/* $NetBSD: trap.c,v 1.259 2021/03/17 11:05:37 simonb Exp $ */ 1/* $NetBSD: trap.c,v 1.260 2021/03/29 03:22:17 simonb Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1988 University of Utah. 4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1992, 1993 5 * Copyright (c) 1992, 1993
6 * The Regents of the University of California. All rights reserved. 6 * The Regents of the University of California. All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to Berkeley by 8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer 9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department and Ralph Campbell. 10 * Science Department and Ralph Campbell.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
15 * 1. Redistributions of source code must retain the above copyright 15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer. 16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright 17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the 18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution. 19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors 20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software 21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission. 22 * without specific prior written permission.
23 * 23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE. 34 * SUCH DAMAGE.
35 * 35 *
36 * from: Utah Hdr: trap.c 1.32 91/04/06 36 * from: Utah Hdr: trap.c 1.32 91/04/06
37 * 37 *
38 * @(#)trap.c 8.5 (Berkeley) 1/11/94 38 * @(#)trap.c 8.5 (Berkeley) 1/11/94
39 */ 39 */
40 40
41#include <sys/cdefs.h> 41#include <sys/cdefs.h>
42__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.259 2021/03/17 11:05:37 simonb Exp $"); 42__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.260 2021/03/29 03:22:17 simonb Exp $");
43 43
44#include "opt_cputype.h" /* which mips CPU levels do we support? */ 44#include "opt_cputype.h" /* which mips CPU levels do we support? */
45#include "opt_ddb.h" 45#include "opt_ddb.h"
 46#include "opt_dtrace.h"
46#include "opt_kgdb.h" 47#include "opt_kgdb.h"
47#include "opt_multiprocessor.h" 48#include "opt_multiprocessor.h"
48 49
49#include <sys/param.h> 50#include <sys/param.h>
50#include <sys/systm.h> 51#include <sys/systm.h>
51#include <sys/kernel.h> 52#include <sys/kernel.h>
52#include <sys/cpu.h> 53#include <sys/cpu.h>
53#include <sys/proc.h> 54#include <sys/proc.h>
54#include <sys/ras.h> 55#include <sys/ras.h>
55#include <sys/signalvar.h> 56#include <sys/signalvar.h>
56#include <sys/syscall.h> 57#include <sys/syscall.h>
57#include <sys/buf.h> 58#include <sys/buf.h>
58#include <sys/ktrace.h> 59#include <sys/ktrace.h>
59#include <sys/kauth.h> 60#include <sys/kauth.h>
60#include <sys/atomic.h> 61#include <sys/atomic.h>
61 62
62#include <mips/cache.h> 63#include <mips/cache.h>
63#include <mips/locore.h> 64#include <mips/locore.h>
64#include <mips/mips_opcode.h> 65#include <mips/mips_opcode.h>
65 66
66#include <uvm/uvm.h> 67#include <uvm/uvm.h>
67 68
68#include <mips/trap.h> 69#include <mips/trap.h>
69#include <mips/reg.h> 70#include <mips/reg.h>
70#include <mips/regnum.h> /* symbolic register indices */ 71#include <mips/regnum.h> /* symbolic register indices */
71#include <mips/pcb.h> 72#include <mips/pcb.h>
72#include <mips/pte.h> 73#include <mips/pte.h>
73#include <mips/psl.h> 74#include <mips/psl.h>
74#include <mips/userret.h> 75#include <mips/userret.h>
75 76
76#ifdef DDB 77#ifdef DDB
77#include <machine/db_machdep.h> 78#include <machine/db_machdep.h>
78#include <ddb/db_sym.h> 79#include <ddb/db_sym.h>
79#endif 80#endif
80 81
81#ifdef KGDB 82#ifdef KGDB
82#include <sys/kgdb.h> 83#include <sys/kgdb.h>
83#endif 84#endif
84 85
85const char * const trap_names[] = { 86const char * const trap_names[] = {
86 "external interrupt", 87 "external interrupt",
87 "TLB modification", 88 "TLB modification",
88 "TLB miss (load or instr. fetch)", 89 "TLB miss (load or instr. fetch)",
89 "TLB miss (store)", 90 "TLB miss (store)",
90 "address error (load or I-fetch)", 91 "address error (load or I-fetch)",
91 "address error (store)", 92 "address error (store)",
92 "bus error (I-fetch)", 93 "bus error (I-fetch)",
93 "bus error (load or store)", 94 "bus error (load or store)",
94 "system call", 95 "system call",
95 "breakpoint", 96 "breakpoint",
96 "reserved instruction", 97 "reserved instruction",
97 "coprocessor unusable", 98 "coprocessor unusable",
98 "arithmetic overflow", 99 "arithmetic overflow",
99 "r4k trap/r3k reserved 13", 100 "r4k trap/r3k reserved 13",
100 "r4k virtual coherency instruction/r3k reserved 14", 101 "r4k virtual coherency instruction/r3k reserved 14",
101 "r4k floating point/ r3k reserved 15", 102 "r4k floating point/ r3k reserved 15",
102 "mips NMI", 103 "mips NMI",
103 "reserved 17", 104 "reserved 17",
104 "mipsNN cp2 exception", 105 "mipsNN cp2 exception",
105 "mipsNN TLBRI", 106 "mipsNN TLBRI",
106 "mipsNN TLBXI", 107 "mipsNN TLBXI",
107 "reserved 21", 108 "reserved 21",
108 "mips64 MDMX", 109 "mips64 MDMX",
109 "r4k watch", 110 "r4k watch",
110 "mipsNN machine check", 111 "mipsNN machine check",
111 "mipsNN thread", 112 "mipsNN thread",
112 "DSP exception", 113 "DSP exception",
113 "reserved 27", 114 "reserved 27",
114 "reserved 28", 115 "reserved 28",
115 "reserved 29", 116 "reserved 29",
116 "mipsNN cache error", 117 "mipsNN cache error",
117 "r4000 virtual coherency data", 118 "r4000 virtual coherency data",
118}; 119};
119 120
120void trap(uint32_t, uint32_t, vaddr_t, vaddr_t, struct trapframe *); 121void trap(uint32_t, uint32_t, vaddr_t, vaddr_t, struct trapframe *);
121void ast(void); 122void ast(void);
122 123
123#ifdef TRAP_SIGDEBUG 124#ifdef TRAP_SIGDEBUG
124static void sigdebug(const struct trapframe *, const ksiginfo_t *, int, 125static void sigdebug(const struct trapframe *, const ksiginfo_t *, int,
125 vaddr_t); 126 vaddr_t);
126#define SIGDEBUG(a, b, c, d) sigdebug(a, b, c, d) 127#define SIGDEBUG(a, b, c, d) sigdebug(a, b, c, d)
127#else 128#else
128#define SIGDEBUG(a, b, c, d) 129#define SIGDEBUG(a, b, c, d)
129#endif 130#endif
130 131
131/* 132/*
132 * fork syscall returns directly to user process via lwp_trampoline(), 133 * fork syscall returns directly to user process via lwp_trampoline(),
133 * which will be called the very first time when child gets running. 134 * which will be called the very first time when child gets running.
134 */ 135 */
135void 136void
136md_child_return(struct lwp *l) 137md_child_return(struct lwp *l)
137{ 138{
138 struct trapframe *utf = l->l_md.md_utf; 139 struct trapframe *utf = l->l_md.md_utf;
139 140
140 utf->tf_regs[_R_V0] = 0; 141 utf->tf_regs[_R_V0] = 0;
141 utf->tf_regs[_R_V1] = 1; 142 utf->tf_regs[_R_V1] = 1;
142 utf->tf_regs[_R_A3] = 0; 143 utf->tf_regs[_R_A3] = 0;
143 userret(l); 144 userret(l);
144} 145}
145 146
146#ifdef MIPS3_PLUS 147#ifdef MIPS3_PLUS
147#define TRAPTYPE(x) (((x) & MIPS3_CR_EXC_CODE) >> MIPS_CR_EXC_CODE_SHIFT) 148#define TRAPTYPE(x) (((x) & MIPS3_CR_EXC_CODE) >> MIPS_CR_EXC_CODE_SHIFT)
148#else 149#else
149#define TRAPTYPE(x) (((x) & MIPS1_CR_EXC_CODE) >> MIPS_CR_EXC_CODE_SHIFT) 150#define TRAPTYPE(x) (((x) & MIPS1_CR_EXC_CODE) >> MIPS_CR_EXC_CODE_SHIFT)
150#endif 151#endif
151#define KERNLAND_P(x) ((intptr_t)(x) < 0) 152#define KERNLAND_P(x) ((intptr_t)(x) < 0)
152 153
153/* 154/*
154 * Trap is called from locore to handle most types of processor traps. 155 * Trap is called from locore to handle most types of processor traps.
155 * System calls are broken out for efficiency. MIPS can handle software 156 * System calls are broken out for efficiency. MIPS can handle software
156 * interrupts as a part of real interrupt processing. 157 * interrupts as a part of real interrupt processing.
157 */ 158 */
158void 159void
159trap(uint32_t status, uint32_t cause, vaddr_t vaddr, vaddr_t pc, 160trap(uint32_t status, uint32_t cause, vaddr_t vaddr, vaddr_t pc,
160 struct trapframe *tf) 161 struct trapframe *tf)
161{ 162{
162 struct lwp * const l = curlwp; 163 struct lwp * const l = curlwp;
163 struct proc * const p = curproc; 164 struct proc * const p = curproc;
164 struct trapframe * const utf = l->l_md.md_utf; 165 struct trapframe * const utf = l->l_md.md_utf;
165 struct pcb * const pcb = lwp_getpcb(l); 166 struct pcb * const pcb = lwp_getpcb(l);
166 vm_prot_t ftype; 167 vm_prot_t ftype;
167 ksiginfo_t ksi; 168 ksiginfo_t ksi;
168 extern void fswintrberr(void); 169 extern void fswintrberr(void);
169 void *onfault; 170 void *onfault;
170 InstFmt insn; 171 InstFmt insn;
171 uint32_t instr; 172 uint32_t instr;
172 int type; 173 int type;
173 int rv = 0; 174 int rv = 0;
174 175
175 KSI_INIT_TRAP(&ksi); 176 KSI_INIT_TRAP(&ksi);
176 177
177 curcpu()->ci_data.cpu_ntrap++; 178 curcpu()->ci_data.cpu_ntrap++;
178 if (CPUISMIPS3 && (status & MIPS3_SR_NMI)) { 179 if (CPUISMIPS3 && (status & MIPS3_SR_NMI)) {
179 type = T_NMI; 180 type = T_NMI;
180 } else { 181 } else {
181 type = TRAPTYPE(cause); 182 type = TRAPTYPE(cause);
182 } 183 }
183 if (USERMODE(status)) { 184 if (USERMODE(status)) {
184 tf = utf; 185 tf = utf;
185 type |= T_USER; 186 type |= T_USER;
186 LWP_CACHE_CREDS(l, p); 187 LWP_CACHE_CREDS(l, p);
187 } 188 }
188 189
189 switch (type) { 190 switch (type) {
190 default: 191 default:
191 dopanic: 192 dopanic:
192 (void)splhigh(); 193 (void)splhigh();
193 194
194 /* 195 /*
195 * use snprintf to allow a single, idempotent, readable printf 196 * use snprintf to allow a single, idempotent, readable printf
196 */ 197 */
197 char strbuf[256], *str = strbuf; 198 char strbuf[256], *str = strbuf;
198 int n, sz = sizeof(strbuf); 199 int n, sz = sizeof(strbuf);
199 200
200 n = snprintf(str, sz, "pid %d(%s): ", p->p_pid, p->p_comm); 201 n = snprintf(str, sz, "pid %d(%s): ", p->p_pid, p->p_comm);
201 sz -= n; 202 sz -= n;
202 str += n; 203 str += n;
203 n = snprintf(str, sz, "trap: cpu%d, %s in %s mode\n", 204 n = snprintf(str, sz, "trap: cpu%d, %s in %s mode\n",
204 cpu_number(), trap_names[TRAPTYPE(cause)], 205 cpu_number(), trap_names[TRAPTYPE(cause)],
205 USERMODE(status) ? "user" : "kernel"); 206 USERMODE(status) ? "user" : "kernel");
206 sz -= n; 207 sz -= n;
207 str += n; 208 str += n;
208 n = snprintf(str, sz, "status=%#x, cause=%#x, epc=%#" 209 n = snprintf(str, sz, "status=%#x, cause=%#x, epc=%#"
209 PRIxVADDR ", vaddr=%#" PRIxVADDR "\n", 210 PRIxVADDR ", vaddr=%#" PRIxVADDR "\n",
210 status, cause, pc, vaddr); 211 status, cause, pc, vaddr);
211 sz -= n; 212 sz -= n;
212 str += n; 213 str += n;
213 if (USERMODE(status)) { 214 if (USERMODE(status)) {
214 KASSERT(tf == utf); 215 KASSERT(tf == utf);
215 n = snprintf(str, sz, "frame=%p usp=%#" PRIxREGISTER 216 n = snprintf(str, sz, "frame=%p usp=%#" PRIxREGISTER
216 " ra=%#" PRIxREGISTER "\n", 217 " ra=%#" PRIxREGISTER "\n",
217 tf, tf->tf_regs[_R_SP], tf->tf_regs[_R_RA]); 218 tf, tf->tf_regs[_R_SP], tf->tf_regs[_R_RA]);
218 sz -= n; 219 sz -= n;
219 str += n; 220 str += n;
220 } else { 221 } else {
221 n = snprintf(str, sz, "tf=%p ksp=%p ra=%#" 222 n = snprintf(str, sz, "tf=%p ksp=%p ra=%#"
222 PRIxREGISTER " ppl=%#x\n", tf, 223 PRIxREGISTER " ppl=%#x\n", tf,
223 type == T_NMI 224 type == T_NMI
224 ? (void*)(uintptr_t)tf->tf_regs[_R_SP] 225 ? (void*)(uintptr_t)tf->tf_regs[_R_SP]
225 : tf+1, 226 : tf+1,
226 tf->tf_regs[_R_RA], tf->tf_ppl); 227 tf->tf_regs[_R_RA], tf->tf_ppl);
227 sz -= n; 228 sz -= n;
228 str += n; 229 str += n;
229 } 230 }
230 printf("%s", strbuf); 231 printf("%s", strbuf);
231 232
232 if (type == T_BUS_ERR_IFETCH || type == T_BUS_ERR_LD_ST) 233 if (type == T_BUS_ERR_IFETCH || type == T_BUS_ERR_LD_ST)
233 (void)(*mips_locoresw.lsw_bus_error)(cause); 234 (void)(*mips_locoresw.lsw_bus_error)(cause);
234 235
235#if defined(DDB) 236#if defined(DDB)
236 kdb_trap(type, &tf->tf_registers); 237 kdb_trap(type, &tf->tf_registers);
237 /* XXX force halt XXX */ 238 /* XXX force halt XXX */
238#elif defined(KGDB) 239#elif defined(KGDB)
239 { 240 {
240 extern mips_reg_t kgdb_cause, kgdb_vaddr; 241 extern mips_reg_t kgdb_cause, kgdb_vaddr;
241 struct reg *regs = &ddb_regs; 242 struct reg *regs = &ddb_regs;
242 kgdb_cause = cause; 243 kgdb_cause = cause;
243 kgdb_vaddr = vaddr; 244 kgdb_vaddr = vaddr;
244 245
245 /* 246 /*
246 * init global ddb_regs, used in db_interface.c routines 247 * init global ddb_regs, used in db_interface.c routines
247 * shared between ddb and gdb. Send ddb_regs to gdb so 248 * shared between ddb and gdb. Send ddb_regs to gdb so
248 * that db_machdep.h macros will work with it, and 249 * that db_machdep.h macros will work with it, and
249 * allow gdb to alter the PC. 250 * allow gdb to alter the PC.
250 */ 251 */
251 db_set_ddb_regs(type, tf); 252 db_set_ddb_regs(type, tf);
252 PC_BREAK_ADVANCE(regs); 253 PC_BREAK_ADVANCE(regs);
253 if (kgdb_trap(type, regs)) { 254 if (kgdb_trap(type, regs)) {
254 tf->tf_regs[TF_EPC] = regs->r_regs[_R_PC]; 255 tf->tf_regs[TF_EPC] = regs->r_regs[_R_PC];
255 return; 256 return;
256 } 257 }
257 } 258 }
258#else 259#else
259 panic("trap"); 260 panic("trap");
260#endif 261#endif
261 /*NOTREACHED*/ 262 /*NOTREACHED*/
262 case T_TLB_MOD: 263 case T_TLB_MOD:
263 case T_TLB_MOD+T_USER: { 264 case T_TLB_MOD+T_USER: {
264 const bool user_p = (type & T_USER) || !KERNLAND_P(vaddr); 265 const bool user_p = (type & T_USER) || !KERNLAND_P(vaddr);
265 pmap_t pmap = user_p 266 pmap_t pmap = user_p
266 ? p->p_vmspace->vm_map.pmap 267 ? p->p_vmspace->vm_map.pmap
267 : pmap_kernel(); 268 : pmap_kernel();
268 269
269 kpreempt_disable(); 270 kpreempt_disable();
270 271
271 pt_entry_t * const ptep = pmap_pte_lookup(pmap, vaddr); 272 pt_entry_t * const ptep = pmap_pte_lookup(pmap, vaddr);
272 if (!ptep) 273 if (!ptep)
273 panic("%ctlbmod: %#"PRIxVADDR": no pte", 274 panic("%ctlbmod: %#"PRIxVADDR": no pte",
274 user_p ? 'u' : 'k', vaddr); 275 user_p ? 'u' : 'k', vaddr);
275 pt_entry_t pte = *ptep; 276 pt_entry_t pte = *ptep;
276 if (!pte_valid_p(pte)) { 277 if (!pte_valid_p(pte)) {
277 panic("%ctlbmod: %#"PRIxVADDR": invalid pte %#"PRIx32 278 panic("%ctlbmod: %#"PRIxVADDR": invalid pte %#"PRIx32
278 " @ ptep %p", user_p ? 'u' : 'k', vaddr, 279 " @ ptep %p", user_p ? 'u' : 'k', vaddr,
279 pte_value(pte), ptep); 280 pte_value(pte), ptep);
280 } 281 }
281 if (pte_readonly_p(pte)) { 282 if (pte_readonly_p(pte)) {
282 /* write to read only page */ 283 /* write to read only page */
283 ftype = VM_PROT_WRITE; 284 ftype = VM_PROT_WRITE;
284 kpreempt_enable(); 285 kpreempt_enable();
285 if (user_p) { 286 if (user_p) {
286 goto pagefault; 287 goto pagefault;
287 } else { 288 } else {
288 goto kernelfault; 289 goto kernelfault;
289 } 290 }
290 } 291 }
291 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 292 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
292 UVMHIST_LOG(maphist, "%ctlbmod(va=%#lx, pc=%#lx, tf=%#jx)", 293 UVMHIST_LOG(maphist, "%ctlbmod(va=%#lx, pc=%#lx, tf=%#jx)",
293 user_p ? 'u' : 'k', vaddr, pc, (uintptr_t)tf); 294 user_p ? 'u' : 'k', vaddr, pc, (uintptr_t)tf);
294 if (!pte_modified_p(pte)) { 295 if (!pte_modified_p(pte)) {
295 pte |= mips_pg_m_bit(); 296 pte |= mips_pg_m_bit();
296#ifdef MULTIPROCESSOR 297#ifdef MULTIPROCESSOR
297 atomic_or_32(ptep, mips_pg_m_bit()); 298 atomic_or_32(ptep, mips_pg_m_bit());
298#else 299#else
299 *ptep = pte; 300 *ptep = pte;
300#endif 301#endif
301 } 302 }
302 // We got a TLB MOD exception so we must have a valid ASID 303 // We got a TLB MOD exception so we must have a valid ASID
303 // and there must be a matching entry in the TLB. So when 304 // and there must be a matching entry in the TLB. So when
304 // we try to update it, we better have done it. 305 // we try to update it, we better have done it.
305 KASSERTMSG(pte_valid_p(pte), "%#"PRIx32, pte_value(pte)); 306 KASSERTMSG(pte_valid_p(pte), "%#"PRIx32, pte_value(pte));
306 vaddr = trunc_page(vaddr); 307 vaddr = trunc_page(vaddr);
307 int ok = pmap_tlb_update_addr(pmap, vaddr, pte, 0); 308 int ok = pmap_tlb_update_addr(pmap, vaddr, pte, 0);
308 kpreempt_enable(); 309 kpreempt_enable();
309 if (ok != 1) { 310 if (ok != 1) {
310#if 0 /* PMAP_FAULTINFO? */ 311#if 0 /* PMAP_FAULTINFO? */
311 /* 312 /*
312 * Since we don't block interrupts here, 313 * Since we don't block interrupts here,
313 * this can legitimately happen if we get 314 * this can legitimately happen if we get
314 * a TLB miss that's serviced in an interrupt 315 * a TLB miss that's serviced in an interrupt
315 * hander that happens to randomly evict the 316 * hander that happens to randomly evict the
316 * TLB entry we're concerned about. 317 * TLB entry we're concerned about.
317 */ 318 */
318 printf("pmap_tlb_update_addr(%p,%#" 319 printf("pmap_tlb_update_addr(%p,%#"
319 PRIxVADDR",%#"PRIxPTE", 0) returned %d\n", 320 PRIxVADDR",%#"PRIxPTE", 0) returned %d\n",
320 pmap, vaddr, pte_value(pte), ok); 321 pmap, vaddr, pte_value(pte), ok);
321#endif 322#endif
322 } 323 }
323 paddr_t pa = pte_to_paddr(pte); 324 paddr_t pa = pte_to_paddr(pte);
324 KASSERTMSG(uvm_pageismanaged(pa), 325 KASSERTMSG(uvm_pageismanaged(pa),
325 "%#"PRIxVADDR" pa %#"PRIxPADDR, vaddr, pa); 326 "%#"PRIxVADDR" pa %#"PRIxPADDR, vaddr, pa);
326 pmap_set_modified(pa); 327 pmap_set_modified(pa);
327 if (type & T_USER) 328 if (type & T_USER)
328 userret(l); 329 userret(l);
329 UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0); 330 UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
330 return; /* GEN */ 331 return; /* GEN */
331 } 332 }
332 case T_TLB_LD_MISS: 333 case T_TLB_LD_MISS:
333 case T_TLB_ST_MISS: 334 case T_TLB_ST_MISS:
334 ftype = (type == T_TLB_LD_MISS) ? VM_PROT_READ : VM_PROT_WRITE; 335 ftype = (type == T_TLB_LD_MISS) ? VM_PROT_READ : VM_PROT_WRITE;
335 if (KERNLAND_P(vaddr)) 336 if (KERNLAND_P(vaddr))
336 goto kernelfault; 337 goto kernelfault;
337 /* 338 /*
338 * It is an error for the kernel to access user space except 339 * It is an error for the kernel to access user space except
339 * through the copyin/copyout routines. 340 * through the copyin/copyout routines.
340 */ 341 */
341 if (pcb->pcb_onfault == NULL) { 342 if (pcb->pcb_onfault == NULL) {
342 goto dopanic; 343 goto dopanic;
343 } 344 }
344 goto pagefault; 345 goto pagefault;
345 case T_TLB_LD_MISS+T_USER: 346 case T_TLB_LD_MISS+T_USER:
346 ftype = VM_PROT_READ; 347 ftype = VM_PROT_READ;
347 goto pagefault; 348 goto pagefault;
348 case T_TLB_ST_MISS+T_USER: 349 case T_TLB_ST_MISS+T_USER:
349 ftype = VM_PROT_WRITE; 350 ftype = VM_PROT_WRITE;
350 pagefault: { 351 pagefault: {
351 const vaddr_t va = trunc_page(vaddr); 352 const vaddr_t va = trunc_page(vaddr);
352 struct vmspace * const vm = p->p_vmspace; 353 struct vmspace * const vm = p->p_vmspace;
353 struct vm_map * const map = &vm->vm_map; 354 struct vm_map * const map = &vm->vm_map;
354#ifdef PMAP_FAULTINFO 355#ifdef PMAP_FAULTINFO
355 struct pcb_faultinfo * const pfi = &pcb->pcb_faultinfo; 356 struct pcb_faultinfo * const pfi = &pcb->pcb_faultinfo;
356#endif 357#endif
357 358
358 kpreempt_disable(); 359 kpreempt_disable();
359#ifdef _LP64 360#ifdef _LP64
360 /* 361 /*
361 * If the pmap has been activated and we allocated the segtab 362 * If the pmap has been activated and we allocated the segtab
362 * for the low 4GB, seg0tab may still be NULL. We can't 363 * for the low 4GB, seg0tab may still be NULL. We can't
363 * really fix this in pmap_enter (we can only update the local 364 * really fix this in pmap_enter (we can only update the local
364 * cpu's cpu_info but not other cpu's) so we need to detect 365 * cpu's cpu_info but not other cpu's) so we need to detect
365 * and fix this here. 366 * and fix this here.
366 */ 367 */
367 struct cpu_info * const ci = curcpu(); 368 struct cpu_info * const ci = curcpu();
368 if ((va >> XSEGSHIFT) == 0 && 369 if ((va >> XSEGSHIFT) == 0 &&
369 __predict_false(ci->ci_pmap_user_seg0tab == NULL 370 __predict_false(ci->ci_pmap_user_seg0tab == NULL
370 && ci->ci_pmap_user_segtab->seg_seg[0] != NULL)) { 371 && ci->ci_pmap_user_segtab->seg_seg[0] != NULL)) {
371 ci->ci_pmap_user_seg0tab = 372 ci->ci_pmap_user_seg0tab =
372 ci->ci_pmap_user_segtab->seg_seg[0]; 373 ci->ci_pmap_user_segtab->seg_seg[0];
373 kpreempt_enable(); 374 kpreempt_enable();
374 if (type & T_USER) { 375 if (type & T_USER) {
375 userret(l); 376 userret(l);
376 } 377 }
377 return; /* GEN */ 378 return; /* GEN */
378 } 379 }
379#endif 380#endif
380 KASSERT(KERNLAND_P(va) || curcpu()->ci_pmap_asid_cur != 0); 381 KASSERT(KERNLAND_P(va) || curcpu()->ci_pmap_asid_cur != 0);
381 pmap_tlb_asid_check(); 382 pmap_tlb_asid_check();
382 kpreempt_enable(); 383 kpreempt_enable();
383 384
384#ifdef PMAP_FAULTINFO 385#ifdef PMAP_FAULTINFO
385 if (p->p_pid == pfi->pfi_lastpid && va == pfi->pfi_faultaddr) { 386 if (p->p_pid == pfi->pfi_lastpid && va == pfi->pfi_faultaddr) {
386 if (++pfi->pfi_repeats > 4) { 387 if (++pfi->pfi_repeats > 4) {
387 tlb_asid_t asid = tlb_get_asid(); 388 tlb_asid_t asid = tlb_get_asid();
388 pt_entry_t *ptep = pfi->pfi_faultptep; 389 pt_entry_t *ptep = pfi->pfi_faultptep;
389 printf("trap: fault #%u (%s/%s) for %#" 390 printf("trap: fault #%u (%s/%s) for %#"
390 PRIxVADDR" (%#"PRIxVADDR") at pc %#" 391 PRIxVADDR" (%#"PRIxVADDR") at pc %#"
391 PRIxVADDR" curpid=%u/%u ptep@%p=%#" 392 PRIxVADDR" curpid=%u/%u ptep@%p=%#"
392 PRIxPTE")\n", pfi->pfi_repeats, 393 PRIxPTE")\n", pfi->pfi_repeats,
393 trap_names[TRAPTYPE(cause)], 394 trap_names[TRAPTYPE(cause)],
394 trap_names[pfi->pfi_faulttype], va, 395 trap_names[pfi->pfi_faulttype], va,
395 vaddr, pc, map->pmap->pm_pai[0].pai_asid, 396 vaddr, pc, map->pmap->pm_pai[0].pai_asid,
396 asid, ptep, ptep ? pte_value(*ptep) : 0); 397 asid, ptep, ptep ? pte_value(*ptep) : 0);
397 if (pfi->pfi_repeats >= 4) { 398 if (pfi->pfi_repeats >= 4) {
398 cpu_Debugger(); 399 cpu_Debugger();
399 } else { 400 } else {
400 pfi->pfi_faulttype = TRAPTYPE(cause); 401 pfi->pfi_faulttype = TRAPTYPE(cause);
401 } 402 }
402 } 403 }
403 } else { 404 } else {
404 pfi->pfi_lastpid = p->p_pid; 405 pfi->pfi_lastpid = p->p_pid;
405 pfi->pfi_faultaddr = va; 406 pfi->pfi_faultaddr = va;
406 pfi->pfi_repeats = 0; 407 pfi->pfi_repeats = 0;
407 pfi->pfi_faultptep = NULL; 408 pfi->pfi_faultptep = NULL;
408 pfi->pfi_faulttype = TRAPTYPE(cause); 409 pfi->pfi_faulttype = TRAPTYPE(cause);
409 } 410 }
410#endif /* PMAP_FAULTINFO */ 411#endif /* PMAP_FAULTINFO */
411 412
412 onfault = pcb->pcb_onfault; 413 onfault = pcb->pcb_onfault;
413 pcb->pcb_onfault = NULL; 414 pcb->pcb_onfault = NULL;
414 rv = uvm_fault(map, va, ftype); 415 rv = uvm_fault(map, va, ftype);
415 pcb->pcb_onfault = onfault; 416 pcb->pcb_onfault = onfault;
416 417
417#if defined(VMFAULT_TRACE) 418#if defined(VMFAULT_TRACE)
418 if (!KERNLAND_P(va)) 419 if (!KERNLAND_P(va))
419 printf( 420 printf(
420 "uvm_fault(%p (pmap %p), %#"PRIxVADDR 421 "uvm_fault(%p (pmap %p), %#"PRIxVADDR
421 " (%"PRIxVADDR"), %d) -> %d at pc %#"PRIxVADDR"\n", 422 " (%"PRIxVADDR"), %d) -> %d at pc %#"PRIxVADDR"\n",
422 map, vm->vm_map.pmap, va, vaddr, ftype, rv, pc); 423 map, vm->vm_map.pmap, va, vaddr, ftype, rv, pc);
423#endif 424#endif
424 /* 425 /*
425 * If this was a stack access we keep track of the maximum 426 * If this was a stack access we keep track of the maximum
426 * accessed stack size. Also, if vm_fault gets a protection 427 * accessed stack size. Also, if vm_fault gets a protection
427 * failure it is due to accessing the stack region outside 428 * failure it is due to accessing the stack region outside
428 * the current limit and we need to reflect that as an access 429 * the current limit and we need to reflect that as an access
429 * error. 430 * error.
430 */ 431 */
431 if ((void *)va >= vm->vm_maxsaddr) { 432 if ((void *)va >= vm->vm_maxsaddr) {
432 if (rv == 0) 433 if (rv == 0)
433 uvm_grow(p, va); 434 uvm_grow(p, va);
434 else if (rv == EACCES) 435 else if (rv == EACCES)
435 rv = EFAULT; 436 rv = EFAULT;
436 } 437 }
437 if (rv == 0) { 438 if (rv == 0) {
438#ifdef PMAP_FAULTINFO 439#ifdef PMAP_FAULTINFO
439 if (pfi->pfi_repeats == 0) { 440 if (pfi->pfi_repeats == 0) {
440 pfi->pfi_faultptep = 441 pfi->pfi_faultptep =
441 pmap_pte_lookup(map->pmap, va); 442 pmap_pte_lookup(map->pmap, va);
442 } 443 }
443 KASSERT(*(pt_entry_t *)pfi->pfi_faultptep); 444 KASSERT(*(pt_entry_t *)pfi->pfi_faultptep);
444#endif 445#endif
445 if (type & T_USER) { 446 if (type & T_USER) {
446 userret(l); 447 userret(l);
447 } 448 }
448 return; /* GEN */ 449 return; /* GEN */
449 } 450 }
450 if ((type & T_USER) == 0) 451 if ((type & T_USER) == 0)
451 goto copyfault; 452 goto copyfault;
452 453
453 KSI_INIT_TRAP(&ksi); 454 KSI_INIT_TRAP(&ksi);
454 switch (rv) { 455 switch (rv) {
455 case EINVAL: 456 case EINVAL:
456 ksi.ksi_signo = SIGBUS; 457 ksi.ksi_signo = SIGBUS;
457 ksi.ksi_code = BUS_ADRERR; 458 ksi.ksi_code = BUS_ADRERR;
458 break; 459 break;
459 case EACCES: 460 case EACCES:
460 ksi.ksi_signo = SIGSEGV; 461 ksi.ksi_signo = SIGSEGV;
461 ksi.ksi_code = SEGV_ACCERR; 462 ksi.ksi_code = SEGV_ACCERR;
462 break; 463 break;
463 case ENOMEM: 464 case ENOMEM:
464 ksi.ksi_signo = SIGKILL; 465 ksi.ksi_signo = SIGKILL;
465 printf("UVM: pid %d.%d (%s), uid %d killed: " 466 printf("UVM: pid %d.%d (%s), uid %d killed: "
466 "out of swap\n", p->p_pid, l->l_lid, p->p_comm, 467 "out of swap\n", p->p_pid, l->l_lid, p->p_comm,
467 l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1); 468 l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1);
468 break; 469 break;
469 default: 470 default:
470 ksi.ksi_signo = SIGSEGV; 471 ksi.ksi_signo = SIGSEGV;
471 ksi.ksi_code = SEGV_MAPERR; 472 ksi.ksi_code = SEGV_MAPERR;
472 break; 473 break;
473 } 474 }
474 ksi.ksi_trap = type & ~T_USER; 475 ksi.ksi_trap = type & ~T_USER;
475 ksi.ksi_addr = (void *)vaddr; 476 ksi.ksi_addr = (void *)vaddr;
476 break; /* SIGNAL */ 477 break; /* SIGNAL */
477 } 478 }
478 kernelfault: { 479 kernelfault: {
479 onfault = pcb->pcb_onfault; 480 onfault = pcb->pcb_onfault;
480 481
481 pcb->pcb_onfault = NULL; 482 pcb->pcb_onfault = NULL;
482 rv = uvm_fault(kernel_map, trunc_page(vaddr), ftype); 483 rv = uvm_fault(kernel_map, trunc_page(vaddr), ftype);
483 pcb->pcb_onfault = onfault; 484 pcb->pcb_onfault = onfault;
484 if (rv == 0) 485 if (rv == 0)
485 return; /* KERN */ 486 return; /* KERN */
486 goto copyfault; 487 goto copyfault;
487 } 488 }
488 case T_ADDR_ERR_LD: /* misaligned access */ 489 case T_ADDR_ERR_LD: /* misaligned access */
489 case T_ADDR_ERR_ST: /* misaligned access */ 490 case T_ADDR_ERR_ST: /* misaligned access */
490 case T_BUS_ERR_LD_ST: /* BERR asserted to CPU */ 491 case T_BUS_ERR_LD_ST: /* BERR asserted to CPU */
491 onfault = pcb->pcb_onfault; 492 onfault = pcb->pcb_onfault;
492 rv = EFAULT; 493 rv = EFAULT;
493 copyfault: 494 copyfault:
494 if (onfault == NULL) { 495 if (onfault == NULL) {
495 goto dopanic; 496 goto dopanic;
496 } 497 }
497 tf->tf_regs[_R_PC] = (intptr_t)onfault; 498 tf->tf_regs[_R_PC] = (intptr_t)onfault;
498 tf->tf_regs[_R_V0] = rv; 499 tf->tf_regs[_R_V0] = rv;
499 return; /* KERN */ 500 return; /* KERN */
500 501
501 case T_ADDR_ERR_LD+T_USER: /* misaligned or kseg access */ 502 case T_ADDR_ERR_LD+T_USER: /* misaligned or kseg access */
502 case T_ADDR_ERR_ST+T_USER: /* misaligned or kseg access */ 503 case T_ADDR_ERR_ST+T_USER: /* misaligned or kseg access */
503 case T_BUS_ERR_IFETCH+T_USER: /* BERR asserted to CPU */ 504 case T_BUS_ERR_IFETCH+T_USER: /* BERR asserted to CPU */
504 case T_BUS_ERR_LD_ST+T_USER: /* BERR asserted to CPU */ 505 case T_BUS_ERR_LD_ST+T_USER: /* BERR asserted to CPU */
505 ksi.ksi_trap = type & ~T_USER; 506 ksi.ksi_trap = type & ~T_USER;
506 ksi.ksi_addr = (void *)vaddr; 507 ksi.ksi_addr = (void *)vaddr;
507 if (KERNLAND_P(vaddr)) { 508 if (KERNLAND_P(vaddr)) {
508 ksi.ksi_signo = SIGSEGV; 509 ksi.ksi_signo = SIGSEGV;
509 ksi.ksi_code = SEGV_MAPERR; 510 ksi.ksi_code = SEGV_MAPERR;
510 } else { 511 } else {
511 ksi.ksi_signo = SIGBUS; 512 ksi.ksi_signo = SIGBUS;
512 if (type == T_BUS_ERR_IFETCH+T_USER 513 if (type == T_BUS_ERR_IFETCH+T_USER
513 || type == T_BUS_ERR_LD_ST+T_USER) 514 || type == T_BUS_ERR_LD_ST+T_USER)
514 ksi.ksi_code = BUS_OBJERR; 515 ksi.ksi_code = BUS_OBJERR;
515 else 516 else
516 ksi.ksi_code = BUS_ADRALN; 517 ksi.ksi_code = BUS_ADRALN;
517 } 518 }
518 break; /* SIGNAL */ 519 break; /* SIGNAL */
519 520
520 case T_WATCH: 521 case T_WATCH:
521 case T_BREAK: 522 case T_BREAK:
522#if defined(DDB) 523#if defined(DDB)
523 kdb_trap(type, &tf->tf_registers); 524 kdb_trap(type, &tf->tf_registers);
524 return; /* KERN */ 525 return; /* KERN */
525#elif defined(KGDB) 526#elif defined(KGDB)
526 { 527 {
527 extern mips_reg_t kgdb_cause, kgdb_vaddr; 528 extern mips_reg_t kgdb_cause, kgdb_vaddr;
528 struct reg *regs = &ddb_regs; 529 struct reg *regs = &ddb_regs;
529 kgdb_cause = cause; 530 kgdb_cause = cause;
530 kgdb_vaddr = vaddr; 531 kgdb_vaddr = vaddr;
531 532
532 /* 533 /*
533 * init global ddb_regs, used in db_interface.c routines 534 * init global ddb_regs, used in db_interface.c routines
534 * shared between ddb and gdb. Send ddb_regs to gdb so 535 * shared between ddb and gdb. Send ddb_regs to gdb so
535 * that db_machdep.h macros will work with it, and 536 * that db_machdep.h macros will work with it, and
536 * allow gdb to alter the PC. 537 * allow gdb to alter the PC.
537 */ 538 */
538 db_set_ddb_regs(type, &tf->tf_registers); 539 db_set_ddb_regs(type, &tf->tf_registers);
539 PC_BREAK_ADVANCE(regs); 540 PC_BREAK_ADVANCE(regs);
540 if (!kgdb_trap(type, regs)) 541 if (!kgdb_trap(type, regs))
541 printf("kgdb: ignored %s\n", 542 printf("kgdb: ignored %s\n",
542 trap_names[TRAPTYPE(cause)]); 543 trap_names[TRAPTYPE(cause)]);
543 else 544 else
544 tf->tf_regs[_R_PC] = regs->r_regs[_R_PC]; 545 tf->tf_regs[_R_PC] = regs->r_regs[_R_PC];
545 546
546 return; 547 return;
547 } 548 }
548#else 549#else
549 goto dopanic; 550 goto dopanic;
550#endif 551#endif
551 case T_BREAK+T_USER: { 552 case T_BREAK+T_USER: {
552 /* compute address of break instruction */ 553 /* compute address of break instruction */
553 vaddr_t va = pc + (cause & MIPS_CR_BR_DELAY ? sizeof(int) : 0); 554 vaddr_t va = pc + (cause & MIPS_CR_BR_DELAY ? sizeof(int) : 0);
554 555
555 /* read break instruction */ 556 /* read break instruction */
556 instr = mips_ufetch32((void *)va); 557 instr = mips_ufetch32((void *)va);
557 insn.word = instr; 558 insn.word = instr;
558 559
559 if (l->l_md.md_ss_addr != va || instr != MIPS_BREAK_SSTEP) { 560 if (l->l_md.md_ss_addr != va || instr != MIPS_BREAK_SSTEP) {
560 bool advance_pc = false; 561 bool advance_pc = false;
561 562
562 ksi.ksi_trap = type & ~T_USER; 563 ksi.ksi_trap = type & ~T_USER;
563 ksi.ksi_signo = SIGTRAP; 564 ksi.ksi_signo = SIGTRAP;
564 ksi.ksi_addr = (void *)va; 565 ksi.ksi_addr = (void *)va;
565 ksi.ksi_code = TRAP_TRACE; 566 ksi.ksi_code = TRAP_TRACE;
566 567
567 if ((insn.JType.op == OP_SPECIAL) && 568 if ((insn.JType.op == OP_SPECIAL) &&
568 (insn.RType.func == OP_BREAK)) { 569 (insn.RType.func == OP_BREAK)) {
569 int code = (insn.RType.rs << 5) | insn.RType.rt; 570 int code = (insn.RType.rs << 5) | insn.RType.rt;
570 switch (code) { 571 switch (code) {
571 case 0: 572 case 0:
572 /* we broke, skip it to avoid infinite loop */ 573 /* we broke, skip it to avoid infinite loop */
573 advance_pc = true; 574 advance_pc = true;
574 break; 575 break;
575 case MIPS_BREAK_INTOVERFLOW: 576 case MIPS_BREAK_INTOVERFLOW:
576 ksi.ksi_signo = SIGFPE; 577 ksi.ksi_signo = SIGFPE;
577 ksi.ksi_code = FPE_INTOVF; 578 ksi.ksi_code = FPE_INTOVF;
578 advance_pc = true; 579 advance_pc = true;
579 break; 580 break;
580 case MIPS_BREAK_INTDIVZERO: 581 case MIPS_BREAK_INTDIVZERO:
581 ksi.ksi_signo = SIGFPE; 582 ksi.ksi_signo = SIGFPE;
582 ksi.ksi_code = FPE_INTDIV; 583 ksi.ksi_code = FPE_INTDIV;
583 advance_pc = true; 584 advance_pc = true;
584 break; 585 break;
585 default: 586 default:
586 /* do nothing */ 587 /* do nothing */
587 break; 588 break;
588 } 589 }
589 } 590 }
590 591
591 if (advance_pc) 592 if (advance_pc)
592 tf->tf_regs[_R_PC] += 4; 593 tf->tf_regs[_R_PC] += 4;
593 break; 594 break;
594 } 595 }
595 /* 596 /*
596 * Restore original instruction and clear BP 597 * Restore original instruction and clear BP
597 */ 598 */
598 rv = mips_ustore32_isync((void *)va, l->l_md.md_ss_instr); 599 rv = mips_ustore32_isync((void *)va, l->l_md.md_ss_instr);
599 if (rv != 0) { 600 if (rv != 0) {
600 vaddr_t sa, ea; 601 vaddr_t sa, ea;
601 sa = trunc_page(va); 602 sa = trunc_page(va);
602 ea = round_page(va + sizeof(int) - 1); 603 ea = round_page(va + sizeof(int) - 1);
603 rv = uvm_map_protect(&p->p_vmspace->vm_map, 604 rv = uvm_map_protect(&p->p_vmspace->vm_map,
604 sa, ea, VM_PROT_ALL, false); 605 sa, ea, VM_PROT_ALL, false);
605 if (rv == 0) { 606 if (rv == 0) {
606 rv = mips_ustore32_isync((void *)va, 607 rv = mips_ustore32_isync((void *)va,
607 l->l_md.md_ss_instr); 608 l->l_md.md_ss_instr);
608 (void)uvm_map_protect(&p->p_vmspace->vm_map, 609 (void)uvm_map_protect(&p->p_vmspace->vm_map,
609 sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, false); 610 sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, false);
610 } 611 }
611 } 612 }
612 mips_icache_sync_all(); /* XXXJRT -- necessary? */ 613 mips_icache_sync_all(); /* XXXJRT -- necessary? */
613 mips_dcache_wbinv_all(); /* XXXJRT -- necessary? */ 614 mips_dcache_wbinv_all(); /* XXXJRT -- necessary? */
614 615
615 if (rv != 0) 616 if (rv != 0)
616 printf("Warning: can't restore instruction" 617 printf("Warning: can't restore instruction"
617 " at %#"PRIxVADDR": 0x%x\n", 618 " at %#"PRIxVADDR": 0x%x\n",
618 l->l_md.md_ss_addr, l->l_md.md_ss_instr); 619 l->l_md.md_ss_addr, l->l_md.md_ss_instr);
619 l->l_md.md_ss_addr = 0; 620 l->l_md.md_ss_addr = 0;
620 ksi.ksi_trap = type & ~T_USER; 621 ksi.ksi_trap = type & ~T_USER;
621 ksi.ksi_signo = SIGTRAP; 622 ksi.ksi_signo = SIGTRAP;
622 ksi.ksi_addr = (void *)va; 623 ksi.ksi_addr = (void *)va;
623 ksi.ksi_code = TRAP_BRKPT; 624 ksi.ksi_code = TRAP_BRKPT;
624 break; /* SIGNAL */ 625 break; /* SIGNAL */
625 } 626 }
626 case T_DSP+T_USER: 627 case T_DSP+T_USER:
627#if (MIPS32R2 + MIPS64R2) > 0 628#if (MIPS32R2 + MIPS64R2) > 0
628 if (MIPS_HAS_DSP) { 629 if (MIPS_HAS_DSP) {
629 dsp_load(); 630 dsp_load();
630 userret(l); 631 userret(l);
631 return; /* GEN */ 632 return; /* GEN */
632 } 633 }
633#endif /* (MIPS32R3 + MIPS64R2) > 0 */ 634#endif /* (MIPS32R3 + MIPS64R2) > 0 */
634 /* FALLTHROUGH */ 635 /* FALLTHROUGH */
635 case T_RES_INST+T_USER: 636 case T_RES_INST+T_USER:
636 case T_COP_UNUSABLE+T_USER: 637 case T_COP_UNUSABLE+T_USER:
637#if !defined(FPEMUL) && !defined(NOFPU) 638#if !defined(FPEMUL) && !defined(NOFPU)
638 if (__SHIFTOUT(cause, MIPS_CR_COP_ERR) == MIPS_CR_COP_ERR_CU1) { 639 if (__SHIFTOUT(cause, MIPS_CR_COP_ERR) == MIPS_CR_COP_ERR_CU1) {
639 fpu_load(); /* load FPA */ 640 fpu_load(); /* load FPA */
640 } else 641 } else
641#endif 642#endif
642 { 643 {
643 mips_emul_inst(status, cause, pc, utf); 644 mips_emul_inst(status, cause, pc, utf);
644 } 645 }
645 userret(l); 646 userret(l);
646 return; /* GEN */ 647 return; /* GEN */
647 case T_FPE+T_USER: 648 case T_FPE+T_USER:
648#if defined(FPEMUL) 649#if defined(FPEMUL)
649 mips_emul_inst(status, cause, pc, utf); 650 mips_emul_inst(status, cause, pc, utf);
650#elif !defined(NOFPU) 651#elif !defined(NOFPU)
651 utf->tf_regs[_R_CAUSE] = cause; 652 utf->tf_regs[_R_CAUSE] = cause;
652 mips_fpu_trap(pc, utf); 653 mips_fpu_trap(pc, utf);
653#endif 654#endif
654 userret(l); 655 userret(l);
655 return; /* GEN */ 656 return; /* GEN */
656 case T_OVFLOW+T_USER: 657 case T_OVFLOW+T_USER:
657 case T_TRAP+T_USER: { 658 case T_TRAP+T_USER: {
658 /* compute address of trap/faulting instruction */ 659 /* compute address of trap/faulting instruction */
659 vaddr_t va = pc + (cause & MIPS_CR_BR_DELAY ? sizeof(int) : 0); 660 vaddr_t va = pc + (cause & MIPS_CR_BR_DELAY ? sizeof(int) : 0);
660 bool advance_pc = false; 661 bool advance_pc = false;
661 662
662 /* read break instruction */ 663 /* read break instruction */
663 instr = mips_ufetch32((void *)va); 664 instr = mips_ufetch32((void *)va);
664 insn.word = instr; 665 insn.word = instr;
665 666
666 ksi.ksi_trap = type & ~T_USER; 667 ksi.ksi_trap = type & ~T_USER;
667 ksi.ksi_signo = SIGFPE; 668 ksi.ksi_signo = SIGFPE;
668 ksi.ksi_addr = (void *)(intptr_t)pc /*utf->tf_regs[_R_PC]*/; 669 ksi.ksi_addr = (void *)(intptr_t)pc /*utf->tf_regs[_R_PC]*/;
669 ksi.ksi_code = FPE_FLTOVF; /* XXX */ 670 ksi.ksi_code = FPE_FLTOVF; /* XXX */
670 671
671 if ((insn.JType.op == OP_SPECIAL) && 672 if ((insn.JType.op == OP_SPECIAL) &&
672 (insn.RType.func == OP_TEQ)) { 673 (insn.RType.func == OP_TEQ)) {
673 int code = (insn.RType.rd << 5) | insn.RType.shamt; 674 int code = (insn.RType.rd << 5) | insn.RType.shamt;
674 switch (code) { 675 switch (code) {
675 case MIPS_BREAK_INTOVERFLOW: 676 case MIPS_BREAK_INTOVERFLOW:
676 ksi.ksi_code = FPE_INTOVF; 677 ksi.ksi_code = FPE_INTOVF;
677 advance_pc = true; 678 advance_pc = true;
678 break; 679 break;
679 case MIPS_BREAK_INTDIVZERO: 680 case MIPS_BREAK_INTDIVZERO:
680 ksi.ksi_code = FPE_INTDIV; 681 ksi.ksi_code = FPE_INTDIV;
681 advance_pc = true; 682 advance_pc = true;
682 break; 683 break;
683 } 684 }
684 } 685 }
685 686
686 /* XXX when else do we advance the PC? */ 687 /* XXX when else do we advance the PC? */
687 if (advance_pc) 688 if (advance_pc)
688 tf->tf_regs[_R_PC] += 4; 689 tf->tf_regs[_R_PC] += 4;
689 break; /* SIGNAL */ 690 break; /* SIGNAL */
690 } 691 }
691 } 692 }
692 utf->tf_regs[_R_CAUSE] = cause; 693 utf->tf_regs[_R_CAUSE] = cause;
693 utf->tf_regs[_R_BADVADDR] = vaddr; 694 utf->tf_regs[_R_BADVADDR] = vaddr;
694 SIGDEBUG(utf, &ksi, rv, pc); 695 SIGDEBUG(utf, &ksi, rv, pc);
695 (*p->p_emul->e_trapsignal)(l, &ksi); 696 (*p->p_emul->e_trapsignal)(l, &ksi);
696 if ((type & T_USER) == 0) { 697 if ((type & T_USER) == 0) {
697#ifdef DDB 698#ifdef DDB
698 Debugger(); 699 Debugger();
699#endif 700#endif
700 panic("trapsignal"); 701 panic("trapsignal");
701 } 702 }
702 userret(l); 703 userret(l);
703 return; 704 return;
704} 705}
705 706
706/* 707/*
707 * Handle asynchronous software traps. 708 * Handle asynchronous software traps.
708 * This is called from MachUserIntr() either to deliver signals or 709 * This is called from MachUserIntr() either to deliver signals or
709 * to make involuntary context switch (preemption). 710 * to make involuntary context switch (preemption).
710 */ 711 */
711void 712void
712ast(void) 713ast(void)
713{ 714{
714 struct lwp * const l = curlwp; 715 struct lwp * const l = curlwp;
715 u_int astpending; 716 u_int astpending;
716 717
717 while ((astpending = l->l_md.md_astpending) != 0) { 718 while ((astpending = l->l_md.md_astpending) != 0) {
718 //curcpu()->ci_data.cpu_nast++; 719 //curcpu()->ci_data.cpu_nast++;
719 l->l_md.md_astpending = 0; 720 l->l_md.md_astpending = 0;
720 721
721#ifdef MULTIPROCESSOR 722#ifdef MULTIPROCESSOR
722 { 723 {
723 kpreempt_disable(); 724 kpreempt_disable();
724 struct cpu_info * const ci = l->l_cpu; 725 struct cpu_info * const ci = l->l_cpu;
725 if (ci->ci_tlb_info->ti_synci_page_bitmap != 0) 726 if (ci->ci_tlb_info->ti_synci_page_bitmap != 0)
726 pmap_tlb_syncicache_ast(ci); 727 pmap_tlb_syncicache_ast(ci);
727 kpreempt_enable(); 728 kpreempt_enable();
728 } 729 }
729#endif 730#endif
730 731
731 if (l->l_pflag & LP_OWEUPC) { 732 if (l->l_pflag & LP_OWEUPC) {
732 l->l_pflag &= ~LP_OWEUPC; 733 l->l_pflag &= ~LP_OWEUPC;
733 ADDUPROF(l); 734 ADDUPROF(l);
734 } 735 }
735 736
736 userret(l); 737 userret(l);
737 738
738 if (l->l_cpu->ci_want_resched) { 739 if (l->l_cpu->ci_want_resched) {
739 /* 740 /*
740 * We are being preempted. 741 * We are being preempted.
741 */ 742 */
742 preempt(); 743 preempt();
743 } 744 }
744 } 745 }
745} 746}
746 747
747 748
748/* XXX need to rewrite ancient comment XXX 749/* XXX need to rewrite ancient comment XXX
749 * This routine is called by procxmt() to single step one instruction. 750 * This routine is called by procxmt() to single step one instruction.
750 * We do this by storing a break instruction after the current instruction, 751 * We do this by storing a break instruction after the current instruction,
751 * resuming execution, and then restoring the old instruction. 752 * resuming execution, and then restoring the old instruction.
752 */ 753 */
753int 754int
754mips_singlestep(struct lwp *l) 755mips_singlestep(struct lwp *l)
755{ 756{
756 struct trapframe * const tf = l->l_md.md_utf; 757 struct trapframe * const tf = l->l_md.md_utf;
757 struct proc * const p = l->l_proc; 758 struct proc * const p = l->l_proc;
758 vaddr_t pc, va; 759 vaddr_t pc, va;
759 int rv; 760 int rv;
760 761
761 if (l->l_md.md_ss_addr) { 762 if (l->l_md.md_ss_addr) {
762 printf("SS %s (%d): breakpoint already set at %#"PRIxVADDR"\n", 763 printf("SS %s (%d): breakpoint already set at %#"PRIxVADDR"\n",
763 p->p_comm, p->p_pid, l->l_md.md_ss_addr); 764 p->p_comm, p->p_pid, l->l_md.md_ss_addr);
764 return EFAULT; 765 return EFAULT;
765 } 766 }
766 pc = (vaddr_t)tf->tf_regs[_R_PC]; 767 pc = (vaddr_t)tf->tf_regs[_R_PC];
767 if (mips_ufetch32((void *)pc) != 0) { /* not a NOP instruction */ 768 if (mips_ufetch32((void *)pc) != 0) { /* not a NOP instruction */
768 struct pcb * const pcb = lwp_getpcb(l); 769 struct pcb * const pcb = lwp_getpcb(l);
769 va = mips_emul_branch(tf, pc, PCB_FSR(pcb), true); 770 va = mips_emul_branch(tf, pc, PCB_FSR(pcb), true);
770 } else { 771 } else {
771 va = pc + sizeof(int); 772 va = pc + sizeof(int);
772 } 773 }
773 774
774 /* 775 /*
775 * We can't single-step into a RAS. Check if we're in 776 * We can't single-step into a RAS. Check if we're in
776 * a RAS, and set the breakpoint just past it. 777 * a RAS, and set the breakpoint just past it.
777 */ 778 */
778 if (p->p_raslist != NULL) { 779 if (p->p_raslist != NULL) {
779 while (ras_lookup(p, (void *)va) != (void *)-1) 780 while (ras_lookup(p, (void *)va) != (void *)-1)
780 va += sizeof(int); 781 va += sizeof(int);
781 } 782 }
782 783
783 l->l_md.md_ss_addr = va; 784 l->l_md.md_ss_addr = va;
784 l->l_md.md_ss_instr = mips_ufetch32((void *)va); 785 l->l_md.md_ss_instr = mips_ufetch32((void *)va);
785 rv = mips_ustore32_isync((void *)va, MIPS_BREAK_SSTEP); 786 rv = mips_ustore32_isync((void *)va, MIPS_BREAK_SSTEP);
786 if (rv != 0) { 787 if (rv != 0) {
787 vaddr_t sa, ea; 788 vaddr_t sa, ea;
788 sa = trunc_page(va); 789 sa = trunc_page(va);
789 ea = round_page(va + sizeof(int) - 1); 790 ea = round_page(va + sizeof(int) - 1);
790 rv = uvm_map_protect(&p->p_vmspace->vm_map, 791 rv = uvm_map_protect(&p->p_vmspace->vm_map,
791 sa, ea, VM_PROT_ALL, false); 792 sa, ea, VM_PROT_ALL, false);
792 if (rv == 0) { 793 if (rv == 0) {
793 rv = mips_ustore32_isync((void *)va, 794 rv = mips_ustore32_isync((void *)va,
794 MIPS_BREAK_SSTEP); 795 MIPS_BREAK_SSTEP);
795 (void)uvm_map_protect(&p->p_vmspace->vm_map, 796 (void)uvm_map_protect(&p->p_vmspace->vm_map,
796 sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, false); 797 sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, false);
797 } 798 }
798 } 799 }
799#if 0 800#if 0
800 printf("SS %s (%d): breakpoint set at %x: %x (pc %x) br %x\n", 801 printf("SS %s (%d): breakpoint set at %x: %x (pc %x) br %x\n",
801 p->p_comm, p->p_pid, p->p_md.md_ss_addr, 802 p->p_comm, p->p_pid, p->p_md.md_ss_addr,
802 p->p_md.md_ss_instr, pc, mips_ufetch32((void *)va)); /* XXX */ 803 p->p_md.md_ss_instr, pc, mips_ufetch32((void *)va)); /* XXX */
803#endif 804#endif
804 return 0; 805 return 0;
805} 806}
806 807
 808#ifdef KDTRACE_HOOKS
 809#include <sys/dtrace_bsd.h>
 810
 811/* Not used for now, but needed for dtrace/fbt modules */
 812dtrace_doubletrap_func_t dtrace_doubletrap_func = NULL;
 813dtrace_trap_func_t dtrace_trap_func = NULL;
 814
 815int (* dtrace_invop_jump_addr)(struct trapframe *);
 816#endif /* KDTRACE_HOOKS */
 817
807#ifdef TRAP_SIGDEBUG 818#ifdef TRAP_SIGDEBUG
808static void 819static void
809frame_dump(const struct trapframe *tf, struct pcb *pcb) 820frame_dump(const struct trapframe *tf, struct pcb *pcb)
810{ 821{
811 822
812 printf("trapframe %p\n", tf); 823 printf("trapframe %p\n", tf);
813 printf("ast %#018lx v0 %#018lx v1 %#018lx\n", 824 printf("ast %#018lx v0 %#018lx v1 %#018lx\n",
814 tf->tf_regs[_R_AST], tf->tf_regs[_R_V0], tf->tf_regs[_R_V1]); 825 tf->tf_regs[_R_AST], tf->tf_regs[_R_V0], tf->tf_regs[_R_V1]);
815 printf(" a0 %#018lx a1 %#018lx a2 %#018lx\n", 826 printf(" a0 %#018lx a1 %#018lx a2 %#018lx\n",
816 tf->tf_regs[_R_A0], tf->tf_regs[_R_A1], tf->tf_regs[_R_A2]); 827 tf->tf_regs[_R_A0], tf->tf_regs[_R_A1], tf->tf_regs[_R_A2]);
817#if defined(__mips_n32) || defined(__mips_n64) 828#if defined(__mips_n32) || defined(__mips_n64)
818 printf(" a3 %#018lx a4 %#018lx a5 %#018lx\n", 829 printf(" a3 %#018lx a4 %#018lx a5 %#018lx\n",
819 tf->tf_regs[_R_A3], tf->tf_regs[_R_A4], tf->tf_regs[_R_A5]); 830 tf->tf_regs[_R_A3], tf->tf_regs[_R_A4], tf->tf_regs[_R_A5]);
820 printf(" a6 %#018lx a7 %#018lx t0 %#018lx\n", 831 printf(" a6 %#018lx a7 %#018lx t0 %#018lx\n",
821 tf->tf_regs[_R_A6], tf->tf_regs[_R_A7], tf->tf_regs[_R_T0]); 832 tf->tf_regs[_R_A6], tf->tf_regs[_R_A7], tf->tf_regs[_R_T0]);
822 printf(" t1 %#018lx t2 %#018lx t3 %#018lx\n", 833 printf(" t1 %#018lx t2 %#018lx t3 %#018lx\n",
823 tf->tf_regs[_R_T1], tf->tf_regs[_R_T2], tf->tf_regs[_R_T3]); 834 tf->tf_regs[_R_T1], tf->tf_regs[_R_T2], tf->tf_regs[_R_T3]);
824#else 835#else
825 printf(" a3 %#018lx t0 %#018lx t1 %#018lx\n", 836 printf(" a3 %#018lx t0 %#018lx t1 %#018lx\n",
826 tf->tf_regs[_R_A3], tf->tf_regs[_R_T0], tf->tf_regs[_R_T1]); 837 tf->tf_regs[_R_A3], tf->tf_regs[_R_T0], tf->tf_regs[_R_T1]);
827 printf(" t2 %#018lx t3 %#018lx t4 %#018lx\n", 838 printf(" t2 %#018lx t3 %#018lx t4 %#018lx\n",
828 tf->tf_regs[_R_T2], tf->tf_regs[_R_T3], tf->tf_regs[_R_T4]); 839 tf->tf_regs[_R_T2], tf->tf_regs[_R_T3], tf->tf_regs[_R_T4]);
829 printf(" t5 %#018lx t6 %#018lx t7 %#018lx\n", 840 printf(" t5 %#018lx t6 %#018lx t7 %#018lx\n",
830 tf->tf_regs[_R_T5], tf->tf_regs[_R_T6], tf->tf_regs[_R_T7]); 841 tf->tf_regs[_R_T5], tf->tf_regs[_R_T6], tf->tf_regs[_R_T7]);
831#endif 842#endif
832 printf(" s0 %#018lx s1 %#018lx s2 %#018lx\n", 843 printf(" s0 %#018lx s1 %#018lx s2 %#018lx\n",
833 tf->tf_regs[_R_S0], tf->tf_regs[_R_S1], tf->tf_regs[_R_S2]); 844 tf->tf_regs[_R_S0], tf->tf_regs[_R_S1], tf->tf_regs[_R_S2]);
834 printf(" s3 %#018lx s4 %#018lx s5 %#018lx\n", 845 printf(" s3 %#018lx s4 %#018lx s5 %#018lx\n",
835 tf->tf_regs[_R_S3], tf->tf_regs[_R_S4], tf->tf_regs[_R_S5]); 846 tf->tf_regs[_R_S3], tf->tf_regs[_R_S4], tf->tf_regs[_R_S5]);
836 printf(" s6 %#018lx s7 %#018lx t8 %#018lx\n", 847 printf(" s6 %#018lx s7 %#018lx t8 %#018lx\n",
837 tf->tf_regs[_R_S6], tf->tf_regs[_R_S7], tf->tf_regs[_R_T8]); 848 tf->tf_regs[_R_S6], tf->tf_regs[_R_S7], tf->tf_regs[_R_T8]);
838 printf(" t9 %#018lx k0 %#018lx k1 %#018lx\n", 849 printf(" t9 %#018lx k0 %#018lx k1 %#018lx\n",
839 tf->tf_regs[_R_T9], tf->tf_regs[_R_K0], tf->tf_regs[_R_K1]); 850 tf->tf_regs[_R_T9], tf->tf_regs[_R_K0], tf->tf_regs[_R_K1]);
840 printf(" gp %#018lx sp %#018lx s8 %#018lx\n", 851 printf(" gp %#018lx sp %#018lx s8 %#018lx\n",
841 tf->tf_regs[_R_GP], tf->tf_regs[_R_SP], tf->tf_regs[_R_S8]); 852 tf->tf_regs[_R_GP], tf->tf_regs[_R_SP], tf->tf_regs[_R_S8]);
842 printf(" ra %#018lx sr %#018lx pc %#018lx\n", 853 printf(" ra %#018lx sr %#018lx pc %#018lx\n",
843 tf->tf_regs[_R_RA], tf->tf_regs[_R_SR], tf->tf_regs[_R_PC]); 854 tf->tf_regs[_R_RA], tf->tf_regs[_R_SR], tf->tf_regs[_R_PC]);
844 printf(" mullo %#018lx mulhi %#018lx\n", 855 printf(" mullo %#018lx mulhi %#018lx\n",
845 tf->tf_regs[_R_MULLO], tf->tf_regs[_R_MULHI]); 856 tf->tf_regs[_R_MULLO], tf->tf_regs[_R_MULHI]);
846 printf(" badvaddr %#018lx cause %#018lx\n", 857 printf(" badvaddr %#018lx cause %#018lx\n",
847 tf->tf_regs[_R_BADVADDR], tf->tf_regs[_R_CAUSE]); 858 tf->tf_regs[_R_BADVADDR], tf->tf_regs[_R_CAUSE]);
848 printf("\n"); 859 printf("\n");
849 hexdump(printf, "Stack dump", tf, 256); 860 hexdump(printf, "Stack dump", tf, 256);
850} 861}
851 862
852static void 863static void
853sigdebug(const struct trapframe *tf, const ksiginfo_t *ksi, int e, 864sigdebug(const struct trapframe *tf, const ksiginfo_t *ksi, int e,
854 vaddr_t pc) 865 vaddr_t pc)
855{ 866{
856 struct lwp *l = curlwp; 867 struct lwp *l = curlwp;
857 struct proc *p = l->l_proc; 868 struct proc *p = l->l_proc;
858 869
859 printf("pid %d.%d (%s): signal %d code=%d (trap %#lx) " 870 printf("pid %d.%d (%s): signal %d code=%d (trap %#lx) "
860 "@pc %#lx addr %#lx error=%d\n", 871 "@pc %#lx addr %#lx error=%d\n",
861 p->p_pid, l->l_lid, p->p_comm, ksi->ksi_signo, ksi->ksi_code, 872 p->p_pid, l->l_lid, p->p_comm, ksi->ksi_signo, ksi->ksi_code,
862 tf->tf_regs[_R_CAUSE], (unsigned long)pc, tf->tf_regs[_R_BADVADDR], 873 tf->tf_regs[_R_CAUSE], (unsigned long)pc, tf->tf_regs[_R_BADVADDR],
863 e); 874 e);
864 frame_dump(tf, lwp_getpcb(l)); 875 frame_dump(tf, lwp_getpcb(l));
865} 876}
866#endif 877#endif /* TRAP_SIGDEBUG */