Sun Nov 1 21:09:48 2020 UTC ()
Update CAUSE_* defines to reflect riscv-privileged-20190608.pdf


(skrll)
diff -r1.5 -r1.6 src/sys/arch/riscv/include/sysreg.h
diff -r1.9 -r1.10 src/sys/arch/riscv/riscv/trap.c

cvs diff -r1.5 -r1.6 src/sys/arch/riscv/include/sysreg.h (switch to unified diff)

--- src/sys/arch/riscv/include/sysreg.h 2020/03/14 16:12:16 1.5
+++ src/sys/arch/riscv/include/sysreg.h 2020/11/01 21:09:48 1.6
@@ -1,221 +1,227 @@ @@ -1,221 +1,227 @@
1/* $NetBSD: sysreg.h,v 1.5 2020/03/14 16:12:16 skrll Exp $ */ 1/* $NetBSD: sysreg.h,v 1.6 2020/11/01 21:09:48 skrll Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2014 The NetBSD Foundation, Inc. 4 * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry. 8 * by Matt Thomas of 3am Software Foundry.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#ifndef _RISCV_SYSREG_H_ 32#ifndef _RISCV_SYSREG_H_
33#define _RISCV_SYSREG_H_ 33#define _RISCV_SYSREG_H_
34 34
35#ifndef _KERNEL 35#ifndef _KERNEL
36#include <sys/param.h> 36#include <sys/param.h>
37#endif 37#endif
38 38
39#define FCSR_FMASK 0 // no exception bits 39#define FCSR_FMASK 0 // no exception bits
40#define FCSR_FRM __BITS(7,5) 40#define FCSR_FRM __BITS(7,5)
41#define FCSR_FRM_RNE 0b000 // Round Nearest, ties to Even 41#define FCSR_FRM_RNE 0b000 // Round Nearest, ties to Even
42#define FCSR_FRM_RTZ 0b001 // Round Towards Zero 42#define FCSR_FRM_RTZ 0b001 // Round Towards Zero
43#define FCSR_FRM_RDN 0b010 // Round DowN (-infinity) 43#define FCSR_FRM_RDN 0b010 // Round DowN (-infinity)
44#define FCSR_FRM_RUP 0b011 // Round UP (+infinity) 44#define FCSR_FRM_RUP 0b011 // Round UP (+infinity)
45#define FCSR_FRM_RMM 0b100 // Round to nearest, ties to Max Magnitude 45#define FCSR_FRM_RMM 0b100 // Round to nearest, ties to Max Magnitude
46#define FCSR_FFLAGS __BITS(4,0) // Sticky bits 46#define FCSR_FFLAGS __BITS(4,0) // Sticky bits
47#define FCSR_NV __BIT(4) // iNValid operation 47#define FCSR_NV __BIT(4) // iNValid operation
48#define FCSR_DZ __BIT(3) // Divide by Zero 48#define FCSR_DZ __BIT(3) // Divide by Zero
49#define FCSR_OF __BIT(2) // OverFlow 49#define FCSR_OF __BIT(2) // OverFlow
50#define FCSR_UF __BIT(1) // UnderFlow 50#define FCSR_UF __BIT(1) // UnderFlow
51#define FCSR_NX __BIT(0) // iNeXact 51#define FCSR_NX __BIT(0) // iNeXact
52 52
53static inline uint32_t 53static inline uint32_t
54riscvreg_fcsr_read(void) 54riscvreg_fcsr_read(void)
55{ 55{
56 uint32_t __fcsr; 56 uint32_t __fcsr;
57 __asm("frcsr %0" : "=r"(__fcsr)); 57 __asm("frcsr %0" : "=r"(__fcsr));
58 return __fcsr; 58 return __fcsr;
59} 59}
60 60
61 61
62static inline uint32_t 62static inline uint32_t
63riscvreg_fcsr_write(uint32_t __new) 63riscvreg_fcsr_write(uint32_t __new)
64{ 64{
65 uint32_t __old; 65 uint32_t __old;
66 __asm("fscsr %0, %1" : "=r"(__old) : "r"(__new)); 66 __asm("fscsr %0, %1" : "=r"(__old) : "r"(__new));
67 return __old; 67 return __old;
68} 68}
69 69
70static inline uint32_t 70static inline uint32_t
71riscvreg_fcsr_read_fflags(void) 71riscvreg_fcsr_read_fflags(void)
72{ 72{
73 uint32_t __old; 73 uint32_t __old;
74 __asm("frflags %0" : "=r"(__old)); 74 __asm("frflags %0" : "=r"(__old));
75 return __SHIFTOUT(__old, FCSR_FFLAGS); 75 return __SHIFTOUT(__old, FCSR_FFLAGS);
76} 76}
77 77
78static inline uint32_t 78static inline uint32_t
79riscvreg_fcsr_write_fflags(uint32_t __new) 79riscvreg_fcsr_write_fflags(uint32_t __new)
80{ 80{
81 uint32_t __old; 81 uint32_t __old;
82 __new = __SHIFTIN(__new, FCSR_FFLAGS); 82 __new = __SHIFTIN(__new, FCSR_FFLAGS);
83 __asm("fsflags %0, %1" : "=r"(__old) : "r"(__new)); 83 __asm("fsflags %0, %1" : "=r"(__old) : "r"(__new));
84 return __SHIFTOUT(__old, FCSR_FFLAGS); 84 return __SHIFTOUT(__old, FCSR_FFLAGS);
85} 85}
86 86
87static inline uint32_t 87static inline uint32_t
88riscvreg_fcsr_read_frm(void) 88riscvreg_fcsr_read_frm(void)
89{ 89{
90 uint32_t __old; 90 uint32_t __old;
91 __asm("frrm\t%0" : "=r"(__old)); 91 __asm("frrm\t%0" : "=r"(__old));
92 return __SHIFTOUT(__old, FCSR_FRM); 92 return __SHIFTOUT(__old, FCSR_FRM);
93} 93}
94 94
95static inline uint32_t 95static inline uint32_t
96riscvreg_fcsr_write_frm(uint32_t __new) 96riscvreg_fcsr_write_frm(uint32_t __new)
97{ 97{
98 uint32_t __old; 98 uint32_t __old;
99 __new = __SHIFTIN(__new, FCSR_FRM); 99 __new = __SHIFTIN(__new, FCSR_FRM);
100 __asm volatile("fsrm\t%0, %1" : "=r"(__old) : "r"(__new)); 100 __asm volatile("fsrm\t%0, %1" : "=r"(__old) : "r"(__new));
101 return __SHIFTOUT(__old, FCSR_FRM); 101 return __SHIFTOUT(__old, FCSR_FRM);
102} 102}
103 103
104// Status Register 104// Status Register
105#define SR_IP __BITS(31,24) // Pending interrupts 105#define SR_IP __BITS(31,24) // Pending interrupts
106#define SR_IM __BITS(23,16) // Interrupt Mask 106#define SR_IM __BITS(23,16) // Interrupt Mask
107#define SR_VM __BIT(7) // MMU On 107#define SR_VM __BIT(7) // MMU On
108#define SR_S64 __BIT(6) // RV64 supervisor mode 108#define SR_S64 __BIT(6) // RV64 supervisor mode
109#define SR_U64 __BIT(5) // RV64 user mode 109#define SR_U64 __BIT(5) // RV64 user mode
110#define SR_EF __BIT(4) // Enable Floating Point 110#define SR_EF __BIT(4) // Enable Floating Point
111#define SR_PEI __BIT(3) // Previous EI setting 111#define SR_PEI __BIT(3) // Previous EI setting
112#define SR_EI __BIT(2) // Enable interrupts 112#define SR_EI __BIT(2) // Enable interrupts
113#define SR_PS __BIT(1) // Previous (S) supervisor setting 113#define SR_PS __BIT(1) // Previous (S) supervisor setting
114#define SR_S __BIT(0) // Supervisor 114#define SR_S __BIT(0) // Supervisor
115 115
116#ifdef _LP64 116#ifdef _LP64
117#define SR_USER (SR_EI|SR_U64|SR_S64|SR_VM|SR_IM) 117#define SR_USER (SR_EI|SR_U64|SR_S64|SR_VM|SR_IM)
118#define SR_USER32 (SR_USER & ~SR_U64) 118#define SR_USER32 (SR_USER & ~SR_U64)
119#define SR_KERNEL (SR_S|SR_EI|SR_U64|SR_S64|SR_VM) 119#define SR_KERNEL (SR_S|SR_EI|SR_U64|SR_S64|SR_VM)
120#else 120#else
121#define SR_USER (SR_EI|SR_VM|SR_IM) 121#define SR_USER (SR_EI|SR_VM|SR_IM)
122#define SR_KERNEL (SR_S|SR_EI|SR_VM) 122#define SR_KERNEL (SR_S|SR_EI|SR_VM)
123#endif 123#endif
124 124
125static inline uint32_t 125static inline uint32_t
126riscvreg_status_read(void) 126riscvreg_status_read(void)
127{ 127{
128 uint32_t __sr; 128 uint32_t __sr;
129 __asm("csrr\t%0, sstatus" : "=r"(__sr)); 129 __asm("csrr\t%0, sstatus" : "=r"(__sr));
130 return __sr; 130 return __sr;
131} 131}
132 132
133static inline uint32_t 133static inline uint32_t
134riscvreg_status_clear(uint32_t __mask) 134riscvreg_status_clear(uint32_t __mask)
135{ 135{
136 uint32_t __sr; 136 uint32_t __sr;
137 if (__builtin_constant_p(__mask) && __mask < 0x20) { 137 if (__builtin_constant_p(__mask) && __mask < 0x20) {
138 __asm("csrrci\t%0, sstatus, %1" : "=r"(__sr) : "i"(__mask)); 138 __asm("csrrci\t%0, sstatus, %1" : "=r"(__sr) : "i"(__mask));
139 } else { 139 } else {
140 __asm("csrrc\t%0, sstatus, %1" : "=r"(__sr) : "r"(__mask)); 140 __asm("csrrc\t%0, sstatus, %1" : "=r"(__sr) : "r"(__mask));
141 } 141 }
142 return __sr; 142 return __sr;
143} 143}
144 144
145static inline uint32_t 145static inline uint32_t
146riscvreg_status_set(uint32_t __mask) 146riscvreg_status_set(uint32_t __mask)
147{ 147{
148 uint32_t __sr; 148 uint32_t __sr;
149 if (__builtin_constant_p(__mask) && __mask < 0x20) { 149 if (__builtin_constant_p(__mask) && __mask < 0x20) {
150 __asm("csrrsi\t%0, sstatus, %1" : "=r"(__sr) : "i"(__mask)); 150 __asm("csrrsi\t%0, sstatus, %1" : "=r"(__sr) : "i"(__mask));
151 } else { 151 } else {
152 __asm("csrrs\t%0, sstatus, %1" : "=r"(__sr) : "r"(__mask)); 152 __asm("csrrs\t%0, sstatus, %1" : "=r"(__sr) : "r"(__mask));
153 } 153 }
154 return __sr; 154 return __sr;
155} 155}
156 156
157// Cause register 157// Cause register
158#define CAUSE_MISALIGNED_FETCH 0 158#define CAUSE_FETCH_MISALIGNED 0
159#define CAUSE_FAULT_FETCH 1 159#define CAUSE_FETCH_ACCESS 1
160#define CAUSE_ILLEGAL_INSTRUCTION 2 160#define CAUSE_ILLEGAL_INSTRUCTION 2
161#define CAUSE_PRIVILEGED_INSTRUCTION 3 161#define CAUSE_BREAKPOINT 3
162#define CAUSE_MISALIGNED_LOAD 4 162#define CAUSE_LOAD_MISALIGNED 4
163#define CAUSE_FAULT_LOAD 5 163#define CAUSE_LOAD_ACCESS 5
164#define CAUSE_MISALIGNED_STORE 6 164#define CAUSE_STORE_MISALIGNED 6
165#define CAUSE_FAULT_STORE 7 165#define CAUSE_STORE_ACCESS 7
166#define CAUSE_SYSCALL 8 166#define CAUSE_SYSCALL 8
167#define CAUSE_BREAKPOINT 9 167#define CAUSE_USER_ECALL 8
168#define CAUSE_FP_DISABLED 10 168#define CAUSE_SUPERVISOR_ECALL 9
169#define CAUSE_ACCELERATOR_DISABLED 12 169/* 10 is reserved */
 170#define CAUSE_MACHINE_ECALL 11
 171#define CAUSE_FETCH_PAGE_FAULT 12
 172#define CAUSE_LOAD_PAGE_FAULT 13
 173/* 14 is Reserved */
 174#define CAUSE_STORE_PAGE_FAULT 15
 175/* >= 16 is reserved */
170 176
171static inline uint64_t 177static inline uint64_t
172riscvreg_cycle_read(void) 178riscvreg_cycle_read(void)
173{ 179{
174#ifdef _LP64 180#ifdef _LP64
175 uint64_t __lo; 181 uint64_t __lo;
176 __asm __volatile("csrr\t%0, cycle" : "=r"(__lo)); 182 __asm __volatile("csrr\t%0, cycle" : "=r"(__lo));
177 return __lo; 183 return __lo;
178#else 184#else
179 uint32_t __hi0, __hi1, __lo0; 185 uint32_t __hi0, __hi1, __lo0;
180 do { 186 do {
181 __asm __volatile( 187 __asm __volatile(
182 "csrr\t%[__hi0], cycleh" 188 "csrr\t%[__hi0], cycleh"
183 "\n\t" "csrr\t%[__lo0], cycle" 189 "\n\t" "csrr\t%[__lo0], cycle"
184 "\n\t" "csrr\t%[__hi1], cycleh" 190 "\n\t" "csrr\t%[__hi1], cycleh"
185 : [__hi0] "=r"(__hi0), 191 : [__hi0] "=r"(__hi0),
186 [__lo0] "=r"(__lo0), 192 [__lo0] "=r"(__lo0),
187 [__hi1] "=r"(__hi1)); 193 [__hi1] "=r"(__hi1));
188 } while (__hi0 != __hi1); 194 } while (__hi0 != __hi1);
189 return ((uint64_t)__hi0 << 32) | (uint64_t)__lo0; 195 return ((uint64_t)__hi0 << 32) | (uint64_t)__lo0;
190#endif 196#endif
191} 197}
192 198
193#ifdef _LP64 199#ifdef _LP64
194#define SATP_MODE __BITS(63,60) 200#define SATP_MODE __BITS(63,60)
195#define SATP_ASID __BITS(59,44) 201#define SATP_ASID __BITS(59,44)
196#define SATP_PPN __BITS(43,0) 202#define SATP_PPN __BITS(43,0)
197#else 203#else
198#define SATP_MODE __BIT(31) 204#define SATP_MODE __BIT(31)
199#define SATP_ASID __BITS(30,22) 205#define SATP_ASID __BITS(30,22)
200#define SATP_PPN __BITS(21,0) 206#define SATP_PPN __BITS(21,0)
201#endif 207#endif
202 208
203static inline uint32_t 209static inline uint32_t
204riscvreg_asid_read(void) 210riscvreg_asid_read(void)
205{ 211{
206 uintptr_t satp; 212 uintptr_t satp;
207 __asm __volatile("csrr %0, satp" : "=r" (satp)); 213 __asm __volatile("csrr %0, satp" : "=r" (satp));
208 return __SHIFTOUT(satp, SATP_ASID); 214 return __SHIFTOUT(satp, SATP_ASID);
209} 215}
210 216
211static inline void 217static inline void
212riscvreg_asid_write(uint32_t asid) 218riscvreg_asid_write(uint32_t asid)
213{ 219{
214 uintptr_t satp; 220 uintptr_t satp;
215 __asm __volatile("csrr %0, satp" : "=r" (satp)); 221 __asm __volatile("csrr %0, satp" : "=r" (satp));
216 satp &= ~SATP_ASID; 222 satp &= ~SATP_ASID;
217 satp |= __SHIFTIN((uintptr_t)asid, SATP_ASID); 223 satp |= __SHIFTIN((uintptr_t)asid, SATP_ASID);
218 __asm __volatile("csrw satp, %0" :: "r" (satp)); 224 __asm __volatile("csrw satp, %0" :: "r" (satp));
219} 225}
220 226
221#endif /* _RISCV_SYSREG_H_ */ 227#endif /* _RISCV_SYSREG_H_ */

cvs diff -r1.9 -r1.10 src/sys/arch/riscv/riscv/trap.c (switch to unified diff)

--- src/sys/arch/riscv/riscv/trap.c 2020/11/01 21:06:22 1.9
+++ src/sys/arch/riscv/riscv/trap.c 2020/11/01 21:09:48 1.10
@@ -1,555 +1,553 @@ @@ -1,555 +1,553 @@
1/*- 1/*-
2 * Copyright (c) 2014 The NetBSD Foundation, Inc. 2 * Copyright (c) 2014 The NetBSD Foundation, Inc.
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This code is derived from software contributed to The NetBSD Foundation 5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas of 3am Software Foundry. 6 * by Matt Thomas of 3am Software Foundry.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions 9 * modification, are permitted provided that the following conditions
10 * are met: 10 * are met:
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution. 15 * documentation and/or other materials provided with the distribution.
16 * 16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE. 27 * POSSIBILITY OF SUCH DAMAGE.
28 */ 28 */
29 29
30#include <sys/cdefs.h> 30#include <sys/cdefs.h>
31 31
32#define __PMAP_PRIVATE 32#define __PMAP_PRIVATE
33#define __UFETCHSTORE_PRIVATE 33#define __UFETCHSTORE_PRIVATE
34 34
35__RCSID("$NetBSD: trap.c,v 1.9 2020/11/01 21:06:22 skrll Exp $"); 35__RCSID("$NetBSD: trap.c,v 1.10 2020/11/01 21:09:48 skrll Exp $");
36 36
37#include <sys/param.h> 37#include <sys/param.h>
38#include <sys/systm.h> 38#include <sys/systm.h>
39#include <sys/atomic.h> 39#include <sys/atomic.h>
40 40
41#include <sys/signal.h> 41#include <sys/signal.h>
42#include <sys/signalvar.h> 42#include <sys/signalvar.h>
43#include <sys/siginfo.h> 43#include <sys/siginfo.h>
44 44
45#include <uvm/uvm.h> 45#include <uvm/uvm.h>
46 46
47#include <riscv/locore.h> 47#include <riscv/locore.h>
48 48
49#define INSTRUCTION_TRAP_MASK (__BIT(CAUSE_PRIVILEGED_INSTRUCTION) \ 49#define INSTRUCTION_TRAP_MASK (__BIT(CAUSE_ILLEGAL_INSTRUCTION))
50 |__BIT(CAUSE_ILLEGAL_INSTRUCTION)) 
51 50
52#define FAULT_TRAP_MASK (__BIT(CAUSE_FAULT_FETCH) \ 51#define FAULT_TRAP_MASK (__BIT(CAUSE_FETCH_ACCESS) \
53 |__BIT(CAUSE_FAULT_LOAD) \ 52 |__BIT(CAUSE_LOAD_ACCESS) \
54 |__BIT(CAUSE_FAULT_STORE)) 53 |__BIT(CAUSE_STORE_ACCESS))
55 54
56#define MISALIGNED_TRAP_MASK (__BIT(CAUSE_MISALIGNED_FETCH) \ 55#define MISALIGNED_TRAP_MASK (__BIT(CAUSE_FETCH_MISALIGNED) \
57 |__BIT(CAUSE_MISALIGNED_LOAD) \ 56 |__BIT(CAUSE_LOAD_MISALIGNED) \
58 |__BIT(CAUSE_MISALIGNED_STORE)) 57 |__BIT(CAUSE_STORE_MISALIGNED))
59 58
60static const char * const causenames[] = { 59static const char * const causenames[] = {
61 [CAUSE_MISALIGNED_FETCH] = "misaligned fetch", 60 [CAUSE_FETCH_MISALIGNED] = "misaligned fetch",
62 [CAUSE_MISALIGNED_LOAD] = "misaligned load", 61 [CAUSE_LOAD_MISALIGNED] = "misaligned load",
63 [CAUSE_MISALIGNED_STORE] = "misaligned store", 62 [CAUSE_STORE_MISALIGNED] = "misaligned store",
64 [CAUSE_FAULT_FETCH] = "fetch", 63 [CAUSE_FETCH_ACCESS] = "fetch",
65 [CAUSE_FAULT_LOAD] = "load", 64 [CAUSE_LOAD_ACCESS] = "load",
66 [CAUSE_FAULT_STORE] = "store", 65 [CAUSE_STORE_ACCESS] = "store",
67 [CAUSE_FP_DISABLED] = "fp disabled", 
68 [CAUSE_ILLEGAL_INSTRUCTION] = "illegal instruction", 66 [CAUSE_ILLEGAL_INSTRUCTION] = "illegal instruction",
69 [CAUSE_PRIVILEGED_INSTRUCTION] = "privileged instruction", 
70 [CAUSE_BREAKPOINT] = "breakpoint", 67 [CAUSE_BREAKPOINT] = "breakpoint",
71}; 68};
72 69
73void 70void
74cpu_jump_onfault(struct trapframe *tf, const struct faultbuf *fb) 71cpu_jump_onfault(struct trapframe *tf, const struct faultbuf *fb)
75{ 72{
76 tf->tf_a0 = fb->fb_reg[FB_A0]; 73 tf->tf_a0 = fb->fb_reg[FB_A0];
77 tf->tf_ra = fb->fb_reg[FB_RA]; 74 tf->tf_ra = fb->fb_reg[FB_RA];
78 tf->tf_s0 = fb->fb_reg[FB_S0]; 75 tf->tf_s0 = fb->fb_reg[FB_S0];
79 tf->tf_s1 = fb->fb_reg[FB_S1]; 76 tf->tf_s1 = fb->fb_reg[FB_S1];
80 tf->tf_s2 = fb->fb_reg[FB_S2]; 77 tf->tf_s2 = fb->fb_reg[FB_S2];
81 tf->tf_s3 = fb->fb_reg[FB_S3]; 78 tf->tf_s3 = fb->fb_reg[FB_S3];
82 tf->tf_s4 = fb->fb_reg[FB_S4]; 79 tf->tf_s4 = fb->fb_reg[FB_S4];
83 tf->tf_s5 = fb->fb_reg[FB_S5]; 80 tf->tf_s5 = fb->fb_reg[FB_S5];
84 tf->tf_s6 = fb->fb_reg[FB_S6]; 81 tf->tf_s6 = fb->fb_reg[FB_S6];
85 tf->tf_s7 = fb->fb_reg[FB_S7]; 82 tf->tf_s7 = fb->fb_reg[FB_S7];
86 tf->tf_s8 = fb->fb_reg[FB_S8]; 83 tf->tf_s8 = fb->fb_reg[FB_S8];
87 tf->tf_s9 = fb->fb_reg[FB_S9]; 84 tf->tf_s9 = fb->fb_reg[FB_S9];
88 tf->tf_s10 = fb->fb_reg[FB_S10]; 85 tf->tf_s10 = fb->fb_reg[FB_S10];
89 tf->tf_s11 = fb->fb_reg[FB_S11]; 86 tf->tf_s11 = fb->fb_reg[FB_S11];
90} 87}
91 88
92int 89int
93copyin(const void *uaddr, void *kaddr, size_t len) 90copyin(const void *uaddr, void *kaddr, size_t len)
94{ 91{
95 struct faultbuf fb; 92 struct faultbuf fb;
96 int error; 93 int error;
97 94
98 if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) { 95 if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) {
99 memcpy(kaddr, uaddr, len); 96 memcpy(kaddr, uaddr, len);
100 cpu_unset_onfault(); 97 cpu_unset_onfault();
101 } 98 }
102 return error; 99 return error;
103} 100}
104 101
105int 102int
106copyout(const void *kaddr, void *uaddr, size_t len) 103copyout(const void *kaddr, void *uaddr, size_t len)
107{ 104{
108 struct faultbuf fb; 105 struct faultbuf fb;
109 int error; 106 int error;
110 107
111 if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) { 108 if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) {
112 memcpy(uaddr, kaddr, len); 109 memcpy(uaddr, kaddr, len);
113 cpu_unset_onfault(); 110 cpu_unset_onfault();
114 } 111 }
115 return error; 112 return error;
116} 113}
117 114
118int 115int
119kcopy(const void *kfaddr, void *kdaddr, size_t len) 116kcopy(const void *kfaddr, void *kdaddr, size_t len)
120{ 117{
121 struct faultbuf fb; 118 struct faultbuf fb;
122 int error; 119 int error;
123 120
124 if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) { 121 if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) {
125 memcpy(kdaddr, kfaddr, len); 122 memcpy(kdaddr, kfaddr, len);
126 cpu_unset_onfault(); 123 cpu_unset_onfault();
127 } 124 }
128 return error; 125 return error;
129} 126}
130 127
131int 128int
132copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done) 129copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
133{ 130{
134 struct faultbuf fb; 131 struct faultbuf fb;
135 int error; 132 int error;
136 133
137 if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) { 134 if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) {
138 len = strlcpy(kaddr, uaddr, len); 135 len = strlcpy(kaddr, uaddr, len);
139 cpu_unset_onfault(); 136 cpu_unset_onfault();
140 if (done != NULL) { 137 if (done != NULL) {
141 *done = len; 138 *done = len;
142 } 139 }
143 } 140 }
144 return error; 141 return error;
145} 142}
146 143
147int 144int
148copyoutstr(const void *kaddr, void *uaddr, size_t len, size_t *done) 145copyoutstr(const void *kaddr, void *uaddr, size_t len, size_t *done)
149{ 146{
150 struct faultbuf fb; 147 struct faultbuf fb;
151 int error; 148 int error;
152 149
153 if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) { 150 if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) {
154 len = strlcpy(uaddr, kaddr, len); 151 len = strlcpy(uaddr, kaddr, len);
155 cpu_unset_onfault(); 152 cpu_unset_onfault();
156 if (done != NULL) { 153 if (done != NULL) {
157 *done = len; 154 *done = len;
158 } 155 }
159 } 156 }
160 return error; 157 return error;
161} 158}
162 159
163static void 160static void
164dump_trapframe(const struct trapframe *tf, void (*pr)(const char *, ...)) 161dump_trapframe(const struct trapframe *tf, void (*pr)(const char *, ...))
165{ 162{
166 const char *causestr = "?"; 163 const char *causestr = "?";
167 if (tf->tf_cause < __arraycount(causenames) 164 if (tf->tf_cause < __arraycount(causenames)
168 && causenames[tf->tf_cause] != NULL) 165 && causenames[tf->tf_cause] != NULL)
169 causestr = causenames[tf->tf_cause]; 166 causestr = causenames[tf->tf_cause];
170 (*pr)("Trapframe @ %p " 167 (*pr)("Trapframe @ %p "
171 "(cause=%d (%s), status=%#x, pc=%#16"PRIxREGISTER 168 "(cause=%d (%s), status=%#x, pc=%#16"PRIxREGISTER
172 ", va=%#"PRIxREGISTER"):\n", 169 ", va=%#"PRIxREGISTER"):\n",
173 tf, tf->tf_cause, causestr, tf->tf_sr, tf->tf_pc, tf->tf_badaddr); 170 tf, tf->tf_cause, causestr, tf->tf_sr, tf->tf_pc, tf->tf_badaddr);
174 (*pr)("ra=%#16"PRIxREGISTER", sp=%#16"PRIxREGISTER 171 (*pr)("ra=%#16"PRIxREGISTER", sp=%#16"PRIxREGISTER
175 ", gp=%#16"PRIxREGISTER", tp=%#16"PRIxREGISTER"\n", 172 ", gp=%#16"PRIxREGISTER", tp=%#16"PRIxREGISTER"\n",
176 tf->tf_ra, tf->tf_sp, tf->tf_gp, tf->tf_tp); 173 tf->tf_ra, tf->tf_sp, tf->tf_gp, tf->tf_tp);
177 (*pr)("s0=%#16"PRIxREGISTER", s1=%#16"PRIxREGISTER 174 (*pr)("s0=%#16"PRIxREGISTER", s1=%#16"PRIxREGISTER
178 ", s2=%#16"PRIxREGISTER", s3=%#16"PRIxREGISTER"\n", 175 ", s2=%#16"PRIxREGISTER", s3=%#16"PRIxREGISTER"\n",
179 tf->tf_s0, tf->tf_s1, tf->tf_s2, tf->tf_s3); 176 tf->tf_s0, tf->tf_s1, tf->tf_s2, tf->tf_s3);
180 (*pr)("s4=%#16"PRIxREGISTER", s5=%#16"PRIxREGISTER 177 (*pr)("s4=%#16"PRIxREGISTER", s5=%#16"PRIxREGISTER
181 ", s5=%#16"PRIxREGISTER", s3=%#16"PRIxREGISTER"\n", 178 ", s5=%#16"PRIxREGISTER", s3=%#16"PRIxREGISTER"\n",
182 tf->tf_s4, tf->tf_s5, tf->tf_s2, tf->tf_s3); 179 tf->tf_s4, tf->tf_s5, tf->tf_s2, tf->tf_s3);
183 (*pr)("s8=%#16"PRIxREGISTER", s9=%#16"PRIxREGISTER 180 (*pr)("s8=%#16"PRIxREGISTER", s9=%#16"PRIxREGISTER
184 ", s10=%#16"PRIxREGISTER", s11=%#16"PRIxREGISTER"\n", 181 ", s10=%#16"PRIxREGISTER", s11=%#16"PRIxREGISTER"\n",
185 tf->tf_s8, tf->tf_s9, tf->tf_s10, tf->tf_s11); 182 tf->tf_s8, tf->tf_s9, tf->tf_s10, tf->tf_s11);
186 (*pr)("a0=%#16"PRIxREGISTER", a1=%#16"PRIxREGISTER 183 (*pr)("a0=%#16"PRIxREGISTER", a1=%#16"PRIxREGISTER
187 ", a2=%#16"PRIxREGISTER", a3=%#16"PRIxREGISTER"\n", 184 ", a2=%#16"PRIxREGISTER", a3=%#16"PRIxREGISTER"\n",
188 tf->tf_a0, tf->tf_a1, tf->tf_a2, tf->tf_a3); 185 tf->tf_a0, tf->tf_a1, tf->tf_a2, tf->tf_a3);
189 (*pr)("a4=%#16"PRIxREGISTER", a5=%#16"PRIxREGISTER 186 (*pr)("a4=%#16"PRIxREGISTER", a5=%#16"PRIxREGISTER
190 ", a5=%#16"PRIxREGISTER", a7=%#16"PRIxREGISTER"\n", 187 ", a5=%#16"PRIxREGISTER", a7=%#16"PRIxREGISTER"\n",
191 tf->tf_a4, tf->tf_a5, tf->tf_a6, tf->tf_a7); 188 tf->tf_a4, tf->tf_a5, tf->tf_a6, tf->tf_a7);
192 (*pr)("t0=%#16"PRIxREGISTER", t1=%#16"PRIxREGISTER 189 (*pr)("t0=%#16"PRIxREGISTER", t1=%#16"PRIxREGISTER
193 ", t2=%#16"PRIxREGISTER", t3=%#16"PRIxREGISTER"\n", 190 ", t2=%#16"PRIxREGISTER", t3=%#16"PRIxREGISTER"\n",
194 tf->tf_t0, tf->tf_t1, tf->tf_t2, tf->tf_t3); 191 tf->tf_t0, tf->tf_t1, tf->tf_t2, tf->tf_t3);
195 (*pr)("t4=%#16"PRIxREGISTER", t5=%#16"PRIxREGISTER 192 (*pr)("t4=%#16"PRIxREGISTER", t5=%#16"PRIxREGISTER
196 ", t6=%#16"PRIxREGISTER"\n", 193 ", t6=%#16"PRIxREGISTER"\n",
197 tf->tf_t4, tf->tf_t5, tf->tf_t6); 194 tf->tf_t4, tf->tf_t5, tf->tf_t6);
198} 195}
199 196
200static inline void 197static inline void
201trap_ksi_init(ksiginfo_t *ksi, int signo, int code, vaddr_t addr, 198trap_ksi_init(ksiginfo_t *ksi, int signo, int code, vaddr_t addr,
202 register_t cause) 199 register_t cause)
203{ 200{
204 KSI_INIT_TRAP(ksi); 201 KSI_INIT_TRAP(ksi);
205 ksi->ksi_signo = signo; 202 ksi->ksi_signo = signo;
206 ksi->ksi_code = code; 203 ksi->ksi_code = code;
207 ksi->ksi_addr = (void *)addr; 204 ksi->ksi_addr = (void *)addr;
208 ksi->ksi_trap = cause; 205 ksi->ksi_trap = cause;
209} 206}
210 207
211static void 208static void
212cpu_trapsignal(struct trapframe *tf, ksiginfo_t *ksi) 209cpu_trapsignal(struct trapframe *tf, ksiginfo_t *ksi)
213{ 210{
214 if (cpu_printfataltraps) { 211 if (cpu_printfataltraps) {
215 dump_trapframe(tf, printf); 212 dump_trapframe(tf, printf);
216 } 213 }
217 (*curlwp->l_proc->p_emul->e_trapsignal)(curlwp, ksi); 214 (*curlwp->l_proc->p_emul->e_trapsignal)(curlwp, ksi);
218} 215}
219 216
220static inline vm_prot_t 217static inline vm_prot_t
221get_faulttype(register_t cause) 218get_faulttype(register_t cause)
222{ 219{
223 if (cause == CAUSE_FAULT_LOAD) 220 if (cause == CAUSE_LOAD_ACCESS)
224 return VM_PROT_READ; 221 return VM_PROT_READ;
225 if (cause == CAUSE_FAULT_STORE) 222 if (cause == CAUSE_STORE_ACCESS)
226 return VM_PROT_READ | VM_PROT_WRITE; 223 return VM_PROT_READ | VM_PROT_WRITE;
227 KASSERT(cause == CAUSE_FAULT_FETCH); 224 KASSERT(cause == CAUSE_FETCH_ACCESS);
228 return VM_PROT_READ | VM_PROT_EXECUTE; 225 return VM_PROT_READ | VM_PROT_EXECUTE;
229} 226}
230 227
231static bool 228static bool
232trap_pagefault_fixup(struct trapframe *tf, struct pmap *pmap, register_t cause, 229trap_pagefault_fixup(struct trapframe *tf, struct pmap *pmap, register_t cause,
233 intptr_t addr) 230 intptr_t addr)
234{ 231{
235 pt_entry_t * const ptep = pmap_pte_lookup(pmap, addr); 232 pt_entry_t * const ptep = pmap_pte_lookup(pmap, addr);
236 struct vm_page *pg; 233 struct vm_page *pg;
237 234
238 if (ptep == NULL) 235 if (ptep == NULL)
239 return false; 236 return false;
240 237
241 pt_entry_t opte = *ptep; 238 pt_entry_t opte = *ptep;
242 pt_entry_t npte; 239 pt_entry_t npte;
243 u_int attr; 240 u_int attr;
244 do { 241 do {
245 if ((opte & ~PTE_G) == 0) 242 if ((opte & ~PTE_G) == 0)
246 return false; 243 return false;
247 244
248 pg = PHYS_TO_VM_PAGE(pte_to_paddr(opte)); 245 pg = PHYS_TO_VM_PAGE(pte_to_paddr(opte));
249 if (pg == NULL) 246 if (pg == NULL)
250 return false; 247 return false;
251 248
252 attr = 0; 249 attr = 0;
253 npte = opte; 250 npte = opte;
254 if ((npte & PTE_V) == 0) { 251 if ((npte & PTE_V) == 0) {
255 npte |= PTE_V; 252 npte |= PTE_V;
256 attr |= VM_PAGEMD_REFERENCED; 253 attr |= VM_PAGEMD_REFERENCED;
257 } 254 }
258#if 0 /* XXX Outdated */ 255#if 0 /* XXX Outdated */
259 if (cause == CAUSE_FAULT_STORE) { 256 if (cause == CAUSE_STORE_ACCESS) {
260 if ((npte & PTE_NW) != 0) { 257 if ((npte & PTE_NW) != 0) {
261 npte &= ~PTE_NW; 258 npte &= ~PTE_NW;
262 attr |= VM_PAGEMD_MODIFIED; 259 attr |= VM_PAGEMD_MODIFIED;
263 } 260 }
264 } else if (cause == CAUSE_FAULT_FETCH) { 261 } else if (cause == CAUSE_FETCH_ACCESS) {
265 if ((npte & PTE_NX) != 0) { 262 if ((npte & PTE_NX) != 0) {
266 npte &= ~PTE_NX; 263 npte &= ~PTE_NX;
267 attr |= VM_PAGEMD_EXECPAGE; 264 attr |= VM_PAGEMD_EXECPAGE;
268 } 265 }
269 } 266 }
270#endif 267#endif
271 if (attr == 0) 268 if (attr == 0)
272 return false; 269 return false;
273 270
274 } while (opte != atomic_cas_pte(ptep, opte, npte)); 271 } while (opte != atomic_cas_pte(ptep, opte, npte));
275 272
276 pmap_page_set_attributes(VM_PAGE_TO_MD(pg), attr); 273 pmap_page_set_attributes(VM_PAGE_TO_MD(pg), attr);
277 pmap_tlb_update_addr(pmap, addr, npte, 0); 274 pmap_tlb_update_addr(pmap, addr, npte, 0);
278 275
279 if (attr & VM_PAGEMD_EXECPAGE) 276 if (attr & VM_PAGEMD_EXECPAGE)
280 pmap_md_page_syncicache(pg, curcpu()->ci_data.cpu_kcpuset); 277 pmap_md_page_syncicache(pg, curcpu()->ci_data.cpu_kcpuset);
281 278
282 return true; 279 return true;
283} 280}
284 281
285static bool 282static bool
286trap_pagefault(struct trapframe *tf, register_t epc, register_t status, 283trap_pagefault(struct trapframe *tf, register_t epc, register_t status,
287 register_t cause, register_t badaddr, bool usertrap_p, ksiginfo_t *ksi) 284 register_t cause, register_t badaddr, bool usertrap_p, ksiginfo_t *ksi)
288{ 285{
289 struct proc * const p = curlwp->l_proc; 286 struct proc * const p = curlwp->l_proc;
290 const intptr_t addr = trunc_page(badaddr); 287 const intptr_t addr = trunc_page(badaddr);
291 288
292 if (__predict_false(usertrap_p 289 if (__predict_false(usertrap_p
293 && (false 290 && (false
294 // Make this address is not trying to access kernel space. 291 // Make this address is not trying to access kernel space.
295 || addr < 0 292 || addr < 0
296#ifdef _LP64 293#ifdef _LP64
297 // If this is a process using a 32-bit address space, make 294 // If this is a process using a 32-bit address space, make
298 // sure the address is a signed 32-bit number. 295 // sure the address is a signed 32-bit number.
299 || ((p->p_flag & PK_32) && (int32_t) addr != addr) 296 || ((p->p_flag & PK_32) && (int32_t) addr != addr)
300#endif 297#endif
301 || false))) { 298 || false))) {
302 trap_ksi_init(ksi, SIGSEGV, SEGV_MAPERR, addr, cause); 299 trap_ksi_init(ksi, SIGSEGV, SEGV_MAPERR, addr, cause);
303 return false; 300 return false;
304 } 301 }
305 302
306 struct vm_map * const map = (addr >= 0 ? &p->p_vmspace->vm_map : kernel_map); 303 struct vm_map * const map = (addr >= 0 ? &p->p_vmspace->vm_map : kernel_map);
307 304
308 // See if this fault is for reference/modified/execpage tracking 305 // See if this fault is for reference/modified/execpage tracking
309 if (trap_pagefault_fixup(tf, map->pmap, cause, addr)) 306 if (trap_pagefault_fixup(tf, map->pmap, cause, addr))
310 return true; 307 return true;
311 308
312 const vm_prot_t ftype = get_faulttype(cause); 309 const vm_prot_t ftype = get_faulttype(cause);
313 310
314 if (usertrap_p) { 311 if (usertrap_p) {
315 int error = uvm_fault(&p->p_vmspace->vm_map, addr, ftype); 312 int error = uvm_fault(&p->p_vmspace->vm_map, addr, ftype);
316 if (error) { 313 if (error) {
317 trap_ksi_init(ksi, SIGSEGV, 314 trap_ksi_init(ksi, SIGSEGV,
318 error == EACCES ? SEGV_ACCERR : SEGV_MAPERR, 315 error == EACCES ? SEGV_ACCERR : SEGV_MAPERR,
319 (intptr_t)badaddr, cause); 316 (intptr_t)badaddr, cause);
320 return false; 317 return false;
321 } 318 }
322 uvm_grow(p, addr); 319 uvm_grow(p, addr);
323 return true; 320 return true;
324 } 321 }
325 322
326 // Page fault are not allowed while dealing with interrupts 323 // Page fault are not allowed while dealing with interrupts
327 if (cpu_intr_p()) 324 if (cpu_intr_p())
328 return false; 325 return false;
329 326
330 struct faultbuf * const fb = cpu_disable_onfault(); 327 struct faultbuf * const fb = cpu_disable_onfault();
331 int error = uvm_fault(map, addr, ftype); 328 int error = uvm_fault(map, addr, ftype);
332 cpu_enable_onfault(fb); 329 cpu_enable_onfault(fb);
333 if (error == 0) { 330 if (error == 0) {
334 if (map != kernel_map) { 331 if (map != kernel_map) {
335 uvm_grow(p, addr); 332 uvm_grow(p, addr);
336 } 333 }
337 return true; 334 return true;
338 } 335 }
339 336
340 if (fb == NULL) { 337 if (fb == NULL) {
341 return false; 338 return false;
342 } 339 }
343 340
344 cpu_jump_onfault(tf, fb); 341 cpu_jump_onfault(tf, fb);
345 return true; 342 return true;
346} 343}
347 344
348static bool 345static bool
349trap_instruction(struct trapframe *tf, register_t epc, register_t status, 346trap_instruction(struct trapframe *tf, register_t epc, register_t status,
350 register_t cause, register_t badaddr, bool usertrap_p, ksiginfo_t *ksi) 347 register_t cause, register_t badaddr, bool usertrap_p, ksiginfo_t *ksi)
351{ 348{
352 const bool prvopc_p = (cause == CAUSE_PRIVILEGED_INSTRUCTION); 
353 if (usertrap_p) { 349 if (usertrap_p) {
354 trap_ksi_init(ksi, SIGILL, prvopc_p ? ILL_PRVOPC : ILL_ILLOPC, 350 trap_ksi_init(ksi, SIGILL, ILL_ILLOPC,
355 (intptr_t)badaddr, cause); 351 (intptr_t)badaddr, cause);
356 } 352 }
357 return false; 353 return false;
358} 354}
359 355
360static bool 356static bool
361trap_misalignment(struct trapframe *tf, register_t epc, register_t status, 357trap_misalignment(struct trapframe *tf, register_t epc, register_t status,
362 register_t cause, register_t badaddr, bool usertrap_p, ksiginfo_t *ksi) 358 register_t cause, register_t badaddr, bool usertrap_p, ksiginfo_t *ksi)
363{ 359{
364 if (usertrap_p) { 360 if (usertrap_p) {
365 trap_ksi_init(ksi, SIGBUS, BUS_ADRALN, 361 trap_ksi_init(ksi, SIGBUS, BUS_ADRALN,
366 (intptr_t)badaddr, cause); 362 (intptr_t)badaddr, cause);
367 } 363 }
368 return false; 364 return false;
369} 365}
370 366
371void 367void
372cpu_trap(struct trapframe *tf, register_t epc, register_t status, 368cpu_trap(struct trapframe *tf, register_t epc, register_t status,
373 register_t cause, register_t badaddr) 369 register_t cause, register_t badaddr)
374{ 370{
375 const u_int fault_mask = 1U << cause; 371 const u_int fault_mask = 1U << cause;
376 const intptr_t addr = badaddr; 372 const intptr_t addr = badaddr;
377 const bool usertrap_p = (status & SR_PS) == 0; 373 const bool usertrap_p = (status & SR_PS) == 0;
378 bool ok = true; 374 bool ok = true;
379 ksiginfo_t ksi; 375 ksiginfo_t ksi;
380 376
381 if (__predict_true(fault_mask & FAULT_TRAP_MASK)) { 377 if (__predict_true(fault_mask & FAULT_TRAP_MASK)) {
382#ifndef _LP64 378#ifndef _LP64
383 // This fault may be cause the kernel's page table got a new 379 // This fault may be cause the kernel's page table got a new
384 // page table page and this pmap's page table doesn't know 380 // page table page and this pmap's page table doesn't know
385 // about it. See 381 // about it. See
386 struct pmap * const pmap = curlwp->l_proc->p_vmspace->vm_map.pmap; 382 struct pmap * const pmap = curlwp->l_proc->p_vmspace->vm_map.pmap;
387 if ((intptr_t) addr < 0 383 if ((intptr_t) addr < 0
388 && pmap != pmap_kernel() 384 && pmap != pmap_kernel()
389 && pmap_pdetab_fixup(pmap, addr)) { 385 && pmap_pdetab_fixup(pmap, addr)) {
390 return; 386 return;
391 } 387 }
392#endif 388#endif
393 ok = trap_pagefault(tf, epc, status, cause, addr, 389 ok = trap_pagefault(tf, epc, status, cause, addr,
394 usertrap_p, &ksi); 390 usertrap_p, &ksi);
395 } else if (fault_mask & INSTRUCTION_TRAP_MASK) { 391 } else if (fault_mask & INSTRUCTION_TRAP_MASK) {
396 ok = trap_instruction(tf, epc, status, cause, addr, 392 ok = trap_instruction(tf, epc, status, cause, addr,
397 usertrap_p, &ksi); 393 usertrap_p, &ksi);
 394#if 0
398 } else if (fault_mask && __BIT(CAUSE_FP_DISABLED)) { 395 } else if (fault_mask && __BIT(CAUSE_FP_DISABLED)) {
399 if (!usertrap_p) { 396 if (!usertrap_p) {
400 panic("%s: fp used @ %#"PRIxREGISTER" in kernel!", 397 panic("%s: fp used @ %#"PRIxREGISTER" in kernel!",
401 __func__, tf->tf_pc); 398 __func__, tf->tf_pc);
402 } 399 }
403 fpu_load(); 400 fpu_load();
 401#endif
404 } else if (fault_mask & MISALIGNED_TRAP_MASK) { 402 } else if (fault_mask & MISALIGNED_TRAP_MASK) {
405 ok = trap_misalignment(tf, epc, status, cause, addr, 403 ok = trap_misalignment(tf, epc, status, cause, addr,
406 usertrap_p, &ksi); 404 usertrap_p, &ksi);
407 } else { 405 } else {
408 dump_trapframe(tf, printf); 406 dump_trapframe(tf, printf);
409 panic("%s: unknown kernel trap", __func__); 407 panic("%s: unknown kernel trap", __func__);
410 } 408 }
411 409
412 if (usertrap_p) { 410 if (usertrap_p) {
413 if (!ok) 411 if (!ok)
414 cpu_trapsignal(tf, &ksi); 412 cpu_trapsignal(tf, &ksi);
415 userret(curlwp); 413 userret(curlwp);
416 } else if (!ok) { 414 } else if (!ok) {
417 dump_trapframe(tf, printf); 415 dump_trapframe(tf, printf);
418 panic("%s: fatal kernel trap", __func__); 416 panic("%s: fatal kernel trap", __func__);
419 } 417 }
420} 418}
421 419
422void 420void
423cpu_ast(struct trapframe *tf) 421cpu_ast(struct trapframe *tf)
424{ 422{
425 423
426 atomic_swap_uint(&curlwp->l_md.md_astpending, 0); 424 atomic_swap_uint(&curlwp->l_md.md_astpending, 0);
427 425
428 if (curlwp->l_pflag & LP_OWEUPC) { 426 if (curlwp->l_pflag & LP_OWEUPC) {
429 curlwp->l_pflag &= ~LP_OWEUPC; 427 curlwp->l_pflag &= ~LP_OWEUPC;
430 ADDUPROF(curlwp); 428 ADDUPROF(curlwp);
431 } 429 }
432} 430}
433 431
434void 432void
435cpu_intr(struct trapframe *tf, register_t epc, register_t status, 433cpu_intr(struct trapframe *tf, register_t epc, register_t status,
436 register_t cause) 434 register_t cause)
437{ 435{
438 /* XXX */ 436 /* XXX */
439} 437}
440 438
441static int 439static int
442fetch_user_data(const void *uaddr, void *valp, size_t size) 440fetch_user_data(const void *uaddr, void *valp, size_t size)
443{ 441{
444 struct faultbuf fb; 442 struct faultbuf fb;
445 int error; 443 int error;
446 444
447 if ((error = cpu_set_onfault(&fb, 1)) != 0) 445 if ((error = cpu_set_onfault(&fb, 1)) != 0)
448 return error; 446 return error;
449 447
450 switch (size) { 448 switch (size) {
451 case 1: 449 case 1:
452 *(uint8_t *)valp = *(volatile const uint8_t *)uaddr; 450 *(uint8_t *)valp = *(volatile const uint8_t *)uaddr;
453 break; 451 break;
454 case 2: 452 case 2:
455 *(uint16_t *)valp = *(volatile const uint16_t *)uaddr; 453 *(uint16_t *)valp = *(volatile const uint16_t *)uaddr;
456 break; 454 break;
457 case 4: 455 case 4:
458 *(uint32_t *)valp = *(volatile const uint32_t *)uaddr; 456 *(uint32_t *)valp = *(volatile const uint32_t *)uaddr;
459 break; 457 break;
460#ifdef _LP64 458#ifdef _LP64
461 case 8: 459 case 8:
462 *(uint64_t *)valp = *(volatile const uint64_t *)uaddr; 460 *(uint64_t *)valp = *(volatile const uint64_t *)uaddr;
463 break; 461 break;
464#endif /* _LP64 */ 462#endif /* _LP64 */
465 default: 463 default:
466 error = EINVAL; 464 error = EINVAL;
467 } 465 }
468 466
469 cpu_unset_onfault(); 467 cpu_unset_onfault();
470 return error; 468 return error;
471} 469}
472 470
473int 471int
474_ufetch_8(const uint8_t *uaddr, uint8_t *valp) 472_ufetch_8(const uint8_t *uaddr, uint8_t *valp)
475{ 473{
476 return fetch_user_data(uaddr, valp, sizeof(*valp)); 474 return fetch_user_data(uaddr, valp, sizeof(*valp));
477} 475}
478 476
479int 477int
480_ufetch_16(const uint16_t *uaddr, uint16_t *valp) 478_ufetch_16(const uint16_t *uaddr, uint16_t *valp)
481{ 479{
482 return fetch_user_data(uaddr, valp, sizeof(*valp)); 480 return fetch_user_data(uaddr, valp, sizeof(*valp));
483} 481}
484 482
485int 483int
486_ufetch_32(const uint32_t *uaddr, uint32_t *valp) 484_ufetch_32(const uint32_t *uaddr, uint32_t *valp)
487{ 485{
488 return fetch_user_data(uaddr, valp, sizeof(*valp)); 486 return fetch_user_data(uaddr, valp, sizeof(*valp));
489} 487}
490 488
491#ifdef _LP64 489#ifdef _LP64
492int 490int
493_ufetch_64(const uint64_t *uaddr, uint64_t *valp) 491_ufetch_64(const uint64_t *uaddr, uint64_t *valp)
494{ 492{
495 return fetch_user_data(uaddr, valp, sizeof(*valp)); 493 return fetch_user_data(uaddr, valp, sizeof(*valp));
496} 494}
497#endif /* _LP64 */ 495#endif /* _LP64 */
498 496
499static int 497static int
500store_user_data(void *uaddr, const void *valp, size_t size) 498store_user_data(void *uaddr, const void *valp, size_t size)
501{ 499{
502 struct faultbuf fb; 500 struct faultbuf fb;
503 int error; 501 int error;
504 502
505 if ((error = cpu_set_onfault(&fb, 1)) != 0) 503 if ((error = cpu_set_onfault(&fb, 1)) != 0)
506 return error; 504 return error;
507 505
508 switch (size) { 506 switch (size) {
509 case 1: 507 case 1:
510 *(volatile uint8_t *)uaddr = *(const uint8_t *)valp; 508 *(volatile uint8_t *)uaddr = *(const uint8_t *)valp;
511 break; 509 break;
512 case 2: 510 case 2:
513 *(volatile uint16_t *)uaddr = *(const uint8_t *)valp; 511 *(volatile uint16_t *)uaddr = *(const uint8_t *)valp;
514 break; 512 break;
515 case 4: 513 case 4:
516 *(volatile uint32_t *)uaddr = *(const uint32_t *)valp; 514 *(volatile uint32_t *)uaddr = *(const uint32_t *)valp;
517 break; 515 break;
518#ifdef _LP64 516#ifdef _LP64
519 case 8: 517 case 8:
520 *(volatile uint64_t *)uaddr = *(const uint64_t *)valp; 518 *(volatile uint64_t *)uaddr = *(const uint64_t *)valp;
521 break; 519 break;
522#endif /* _LP64 */ 520#endif /* _LP64 */
523 default: 521 default:
524 error = EINVAL; 522 error = EINVAL;
525 } 523 }
526 524
527 cpu_unset_onfault(); 525 cpu_unset_onfault();
528 return error; 526 return error;
529} 527}
530 528
531int 529int
532_ustore_8(uint8_t *uaddr, uint8_t val) 530_ustore_8(uint8_t *uaddr, uint8_t val)
533{ 531{
534 return store_user_data(uaddr, &val, sizeof(val)); 532 return store_user_data(uaddr, &val, sizeof(val));
535} 533}
536 534
537int 535int
538_ustore_16(uint16_t *uaddr, uint16_t val) 536_ustore_16(uint16_t *uaddr, uint16_t val)
539{ 537{
540 return store_user_data(uaddr, &val, sizeof(val)); 538 return store_user_data(uaddr, &val, sizeof(val));
541} 539}
542 540
543int 541int
544_ustore_32(uint32_t *uaddr, uint32_t val) 542_ustore_32(uint32_t *uaddr, uint32_t val)
545{ 543{
546 return store_user_data(uaddr, &val, sizeof(val)); 544 return store_user_data(uaddr, &val, sizeof(val));
547} 545}
548 546
549#ifdef _LP64 547#ifdef _LP64
550int 548int
551_ustore_64(uint64_t *uaddr, uint64_t val) 549_ustore_64(uint64_t *uaddr, uint64_t val)
552{ 550{
553 return store_user_data(uaddr, &val, sizeof(val)); 551 return store_user_data(uaddr, &val, sizeof(val));
554} 552}
555#endif /* _LP64 */ 553#endif /* _LP64 */