Miscellaneous updates to reflect riscv-privileged-20190608.pdf Some from zmcgrew@diff -r1.10 -r1.11 src/sys/arch/riscv/conf/GENERIC
(skrll)
--- src/sys/arch/riscv/conf/GENERIC 2019/06/01 12:42:27 1.10
+++ src/sys/arch/riscv/conf/GENERIC 2020/11/04 06:56:56 1.11
@@ -1,172 +1,172 @@ | @@ -1,172 +1,172 @@ | |||
1 | # $NetBSD: GENERIC,v 1.10 2019/06/01 12:42:27 maxv Exp $ | 1 | # $NetBSD: GENERIC,v 1.11 2020/11/04 06:56:56 skrll Exp $ | |
2 | # | 2 | # | |
3 | # GENERIC machine description file | 3 | # GENERIC machine description file | |
4 | # | 4 | # | |
5 | # This machine description file is used to generate the default NetBSD | 5 | # This machine description file is used to generate the default NetBSD | |
6 | # kernel. The generic kernel does not include all options, subsystems | 6 | # kernel. The generic kernel does not include all options, subsystems | |
7 | # and device drivers, but should be useful for most applications. | 7 | # and device drivers, but should be useful for most applications. | |
8 | # | 8 | # | |
9 | # The machine description file can be customised for your specific | 9 | # The machine description file can be customised for your specific | |
10 | # machine to reduce the kernel size and improve its performance. | 10 | # machine to reduce the kernel size and improve its performance. | |
11 | # | 11 | # | |
12 | # For further information on compiling NetBSD kernels, see the config(8) | 12 | # For further information on compiling NetBSD kernels, see the config(8) | |
13 | # man page. | 13 | # man page. | |
14 | # | 14 | # | |
15 | # For further information on hardware support for this architecture, see | 15 | # For further information on hardware support for this architecture, see | |
16 | # the intro(4) man page. For further information about kernel options | 16 | # the intro(4) man page. For further information about kernel options | |
17 | # for this architecture, see the options(4) man page. For an explanation | 17 | # for this architecture, see the options(4) man page. For an explanation | |
18 | # of each device driver in this file see the section 4 man page for the | 18 | # of each device driver in this file see the section 4 man page for the | |
19 | # device. | 19 | # device. | |
20 | 20 | |||
21 | include "arch/riscv/conf/std.riscv64" | 21 | include "arch/riscv/conf/std.riscv64" | |
22 | 22 | |||
23 | options INCLUDE_CONFIG_FILE # embed config file in kernel binary | 23 | options INCLUDE_CONFIG_FILE # embed config file in kernel binary | |
24 | 24 | |||
25 | #ident "GENERIC-$Revision: 1.10 $" | 25 | #ident "GENERIC-$Revision: 1.11 $" | |
26 | 26 | |||
27 | maxusers 64 # estimated number of users | 27 | maxusers 64 # estimated number of users | |
28 | 28 | |||
29 | # Standard system options | 29 | # Standard system options | |
30 | 30 | #options FPE # Floating-point extension support | ||
31 | options NTP # NTP phase/frequency locked loop | 31 | options NTP # NTP phase/frequency locked loop | |
32 | 32 | |||
33 | options KTRACE # system call tracing via ktrace(1) | 33 | options KTRACE # system call tracing via ktrace(1) | |
34 | 34 | |||
35 | # Note: SysV IPC parameters could be changed dynamically, see sysctl(8). | 35 | # Note: SysV IPC parameters could be changed dynamically, see sysctl(8). | |
36 | options SYSVMSG # System V-like message queues | 36 | options SYSVMSG # System V-like message queues | |
37 | options SYSVSEM # System V-like semaphores | 37 | options SYSVSEM # System V-like semaphores | |
38 | options SYSVSHM # System V-like memory sharing | 38 | options SYSVSHM # System V-like memory sharing | |
39 | 39 | |||
40 | options MODULAR # new style module(7) framework | 40 | options MODULAR # new style module(7) framework | |
41 | options MODULAR_DEFAULT_AUTOLOAD | 41 | options MODULAR_DEFAULT_AUTOLOAD | |
42 | options USERCONF # userconf(4) support | 42 | options USERCONF # userconf(4) support | |
43 | #options PIPE_SOCKETPAIR # smaller, but slower pipe(2) | 43 | #options PIPE_SOCKETPAIR # smaller, but slower pipe(2) | |
44 | options SYSCTL_INCLUDE_DESCR # Include sysctl descriptions in kernel | 44 | options SYSCTL_INCLUDE_DESCR # Include sysctl descriptions in kernel | |
45 | 45 | |||
46 | # Alternate buffer queue strategies for better responsiveness under high | 46 | # Alternate buffer queue strategies for better responsiveness under high | |
47 | # disk I/O load. | 47 | # disk I/O load. | |
48 | #options BUFQ_READPRIO | 48 | #options BUFQ_READPRIO | |
49 | options BUFQ_PRIOCSCAN | 49 | options BUFQ_PRIOCSCAN | |
50 | 50 | |||
51 | # Diagnostic/debugging support options | 51 | # Diagnostic/debugging support options | |
52 | options DIAGNOSTIC # inexpensive kernel consistency checks | 52 | options DIAGNOSTIC # inexpensive kernel consistency checks | |
53 | # XXX to be commented out on release branch | 53 | # XXX to be commented out on release branch | |
54 | #options DEBUG # expensive debugging checks/support | 54 | #options DEBUG # expensive debugging checks/support | |
55 | #options LOCKDEBUG # expensive locking checks/support | 55 | #options LOCKDEBUG # expensive locking checks/support | |
56 | 56 | |||
57 | # | 57 | # | |
58 | # Because gcc omits the frame pointer for any -O level, the line below | 58 | # Because gcc omits the frame pointer for any -O level, the line below | |
59 | # is needed to make backtraces in DDB work. | 59 | # is needed to make backtraces in DDB work. | |
60 | # | 60 | # | |
61 | #makeoptions COPTS="-O2 -fno-omit-frame-pointer" | 61 | #makeoptions COPTS="-O2 -fno-omit-frame-pointer" | |
62 | makeoptions COPY_SYMTAB=1 | 62 | makeoptions COPY_SYMTAB=1 | |
63 | options DDB # in-kernel debugger | 63 | options DDB # in-kernel debugger | |
64 | #options DDB_COMMANDONENTER="bt" # execute command when ddb is entered | 64 | #options DDB_COMMANDONENTER="bt" # execute command when ddb is entered | |
65 | #options DDB_ONPANIC=1 # see also sysctl(7): `ddb.onpanic' | 65 | #options DDB_ONPANIC=1 # see also sysctl(7): `ddb.onpanic' | |
66 | options DDB_HISTORY_SIZE=512 # enable history editing in DDB | 66 | options DDB_HISTORY_SIZE=512 # enable history editing in DDB | |
67 | #options KGDB # remote debugger | 67 | #options KGDB # remote debugger | |
68 | #options KGDB_DEVNAME="\"com\"",KGDB_DEVADDR=0x3f8,KGDB_DEVRATE=9600 | 68 | #options KGDB_DEVNAME="\"com\"",KGDB_DEVADDR=0x3f8,KGDB_DEVRATE=9600 | |
69 | #options SYSCALL_STATS # per syscall counts | 69 | #options SYSCALL_STATS # per syscall counts | |
70 | #options SYSCALL_TIMES # per syscall times | 70 | #options SYSCALL_TIMES # per syscall times | |
71 | #options SYSCALL_TIMES_HASCOUNTER # use 'broken' rdtsc (soekris) | 71 | #options SYSCALL_TIMES_HASCOUNTER # use 'broken' rdtsc (soekris) | |
72 | 72 | |||
73 | # Compatibility options | 73 | # Compatibility options | |
74 | include "conf/compat_netbsd70.config" | 74 | include "conf/compat_netbsd70.config" | |
75 | 75 | |||
76 | options COMPAT_OSSAUDIO | 76 | options COMPAT_OSSAUDIO | |
77 | #options COMPAT_NETBSD32 | 77 | #options COMPAT_NETBSD32 | |
78 | #options EXEC_ELF32 | 78 | #options EXEC_ELF32 | |
79 | 79 | |||
80 | # Wedge support | 80 | # Wedge support | |
81 | options DKWEDGE_AUTODISCOVER # Automatically add dk(4) instances | 81 | options DKWEDGE_AUTODISCOVER # Automatically add dk(4) instances | |
82 | options DKWEDGE_METHOD_GPT # Supports GPT partitions as wedges | 82 | options DKWEDGE_METHOD_GPT # Supports GPT partitions as wedges | |
83 | options DKWEDGE_METHOD_BSDLABEL # Support disklabel entries as wedges | 83 | options DKWEDGE_METHOD_BSDLABEL # Support disklabel entries as wedges | |
84 | options DKWEDGE_METHOD_MBR # Support MBR partitions as wedges | 84 | options DKWEDGE_METHOD_MBR # Support MBR partitions as wedges | |
85 | options DKWEDGE_METHOD_APPLE # Support Apple partitions as wedges | 85 | options DKWEDGE_METHOD_APPLE # Support Apple partitions as wedges | |
86 | #options DKWEDGE_METHOD_RDB # Support RDB partitions as wedges | 86 | #options DKWEDGE_METHOD_RDB # Support RDB partitions as wedges | |
87 | 87 | |||
88 | include "conf/filesystems.config" | 88 | include "conf/filesystems.config" | |
89 | 89 | |||
90 | # File system options | 90 | # File system options | |
91 | options QUOTA # legacy UFS quotas | 91 | options QUOTA # legacy UFS quotas | |
92 | options QUOTA2 # new, in-filesystem UFS quotas | 92 | options QUOTA2 # new, in-filesystem UFS quotas | |
93 | #options DISKLABEL_EI # disklabel Endian Independent support | 93 | #options DISKLABEL_EI # disklabel Endian Independent support | |
94 | options FFS_EI # FFS Endian Independent support | 94 | options FFS_EI # FFS Endian Independent support | |
95 | options WAPBL # File system journaling support | 95 | options WAPBL # File system journaling support | |
96 | # Note that UFS_DIRHASH is suspected of causing kernel memory corruption. | 96 | # Note that UFS_DIRHASH is suspected of causing kernel memory corruption. | |
97 | # It is not recommended for general use. | 97 | # It is not recommended for general use. | |
98 | #options UFS_DIRHASH # UFS Large Directory Hashing - Experimental | 98 | #options UFS_DIRHASH # UFS Large Directory Hashing - Experimental | |
99 | options NFSSERVER # Network File System server | 99 | options NFSSERVER # Network File System server | |
100 | #options EXT2FS_SYSTEM_FLAGS # makes ext2fs file flags (append and | 100 | #options EXT2FS_SYSTEM_FLAGS # makes ext2fs file flags (append and | |
101 | # immutable) behave as system flags. | 101 | # immutable) behave as system flags. | |
102 | #options FFS_NO_SNAPSHOT # No FFS snapshot support | 102 | #options FFS_NO_SNAPSHOT # No FFS snapshot support | |
103 | 103 | |||
104 | # Networking options | 104 | # Networking options | |
105 | #options GATEWAY # packet forwarding | 105 | #options GATEWAY # packet forwarding | |
106 | options INET # IP + ICMP + TCP + UDP | 106 | options INET # IP + ICMP + TCP + UDP | |
107 | options INET6 # IPV6 | 107 | options INET6 # IPV6 | |
108 | options IPSEC # IP security | 108 | options IPSEC # IP security | |
109 | #options IPSEC_DEBUG # debug for IP security | 109 | #options IPSEC_DEBUG # debug for IP security | |
110 | #options MPLS # MultiProtocol Label Switching (needs mpls) | 110 | #options MPLS # MultiProtocol Label Switching (needs mpls) | |
111 | #options MROUTING # IP multicast routing | 111 | #options MROUTING # IP multicast routing | |
112 | #options PIM # Protocol Independent Multicast | 112 | #options PIM # Protocol Independent Multicast | |
113 | #options NETATALK # AppleTalk networking protocols | 113 | #options NETATALK # AppleTalk networking protocols | |
114 | #options PPP_BSDCOMP # BSD-Compress compression support for PPP | 114 | #options PPP_BSDCOMP # BSD-Compress compression support for PPP | |
115 | #options PPP_DEFLATE # Deflate compression support for PPP | 115 | #options PPP_DEFLATE # Deflate compression support for PPP | |
116 | #options PPP_FILTER # Active filter support for PPP (requires bpf) | 116 | #options PPP_FILTER # Active filter support for PPP (requires bpf) | |
117 | #options TCP_DEBUG # Record last TCP_NDEBUG packets with SO_DEBUG | 117 | #options TCP_DEBUG # Record last TCP_NDEBUG packets with SO_DEBUG | |
118 | 118 | |||
119 | #options ALTQ # Manipulate network interfaces' output queues | 119 | #options ALTQ # Manipulate network interfaces' output queues | |
120 | #options ALTQ_BLUE # Stochastic Fair Blue | 120 | #options ALTQ_BLUE # Stochastic Fair Blue | |
121 | #options ALTQ_CBQ # Class-Based Queueing | 121 | #options ALTQ_CBQ # Class-Based Queueing | |
122 | #options ALTQ_CDNR # Diffserv Traffic Conditioner | 122 | #options ALTQ_CDNR # Diffserv Traffic Conditioner | |
123 | #options ALTQ_FIFOQ # First-In First-Out Queue | 123 | #options ALTQ_FIFOQ # First-In First-Out Queue | |
124 | #options ALTQ_FLOWVALVE # RED/flow-valve (red-penalty-box) | 124 | #options ALTQ_FLOWVALVE # RED/flow-valve (red-penalty-box) | |
125 | #options ALTQ_HFSC # Hierarchical Fair Service Curve | 125 | #options ALTQ_HFSC # Hierarchical Fair Service Curve | |
126 | #options ALTQ_LOCALQ # Local queueing discipline | 126 | #options ALTQ_LOCALQ # Local queueing discipline | |
127 | #options ALTQ_PRIQ # Priority Queueing | 127 | #options ALTQ_PRIQ # Priority Queueing | |
128 | #options ALTQ_RED # Random Early Detection | 128 | #options ALTQ_RED # Random Early Detection | |
129 | #options ALTQ_RIO # RED with IN/OUT | 129 | #options ALTQ_RIO # RED with IN/OUT | |
130 | #options ALTQ_WFQ # Weighted Fair Queueing | 130 | #options ALTQ_WFQ # Weighted Fair Queueing | |
131 | 131 | |||
132 | # These options enable verbose messages for several subsystems. | 132 | # These options enable verbose messages for several subsystems. | |
133 | # Warning, these may compile large string tables into the kernel! | 133 | # Warning, these may compile large string tables into the kernel! | |
134 | 134 | |||
135 | # Kernel root file system and dump configuration. | 135 | # Kernel root file system and dump configuration. | |
136 | config netbsd root on ? type ? | 136 | config netbsd root on ? type ? | |
137 | 137 | |||
138 | # | 138 | # | |
139 | # Device configuration | 139 | # Device configuration | |
140 | # | 140 | # | |
141 | mainbus0 at root | 141 | mainbus0 at root | |
142 | cpu0 at mainbus0 | 142 | cpu0 at mainbus0 | |
143 | htif0 at mainbus0 | 143 | htif0 at mainbus0 | |
144 | htifcons0 at htif0 | 144 | htifcons0 at htif0 | |
145 | htifdisk0 at htif0 | 145 | htifdisk0 at htif0 | |
146 | ld0 at htifdisk0 | 146 | ld0 at htifdisk0 | |
147 | 147 | |||
148 | 148 | |||
149 | # Pseudo-Devices | 149 | # Pseudo-Devices | |
150 | 150 | |||
151 | pseudo-device crypto # /dev/crypto device | 151 | pseudo-device crypto # /dev/crypto device | |
152 | pseudo-device swcrypto # software crypto implementation | 152 | pseudo-device swcrypto # software crypto implementation | |
153 | 153 | |||
154 | # disk/mass storage pseudo-devices | 154 | # disk/mass storage pseudo-devices | |
155 | #pseudo-device md # memory disk device (ramdisk) | 155 | #pseudo-device md # memory disk device (ramdisk) | |
156 | #options MEMORY_DISK_HOOKS # enable root ramdisk | 156 | #options MEMORY_DISK_HOOKS # enable root ramdisk | |
157 | #options MEMORY_DISK_DYNAMIC # loaded via kernel module(7) | 157 | #options MEMORY_DISK_DYNAMIC # loaded via kernel module(7) | |
158 | 158 | |||
159 | pseudo-device vnd # disk-like interface to files | 159 | pseudo-device vnd # disk-like interface to files | |
160 | options VND_COMPRESSION # compressed vnd(4) | 160 | options VND_COMPRESSION # compressed vnd(4) | |
161 | 161 | |||
162 | pseudo-device loop # network loopback | 162 | pseudo-device loop # network loopback | |
163 | pseudo-device pty # pseudo-terminals | 163 | pseudo-device pty # pseudo-terminals | |
164 | pseudo-device clockctl # user control of clock subsystem | 164 | pseudo-device clockctl # user control of clock subsystem | |
165 | pseudo-device ksyms # /dev/ksyms | 165 | pseudo-device ksyms # /dev/ksyms | |
166 | pseudo-device lockstat # lock profiling | 166 | pseudo-device lockstat # lock profiling | |
167 | 167 | |||
168 | # userland interface to drivers, including autoconf and properties retrieval | 168 | # userland interface to drivers, including autoconf and properties retrieval | |
169 | pseudo-device drvctl | 169 | pseudo-device drvctl | |
170 | 170 | |||
171 | options PAX_MPROTECT=0 # PaX mprotect(2) restrictions | 171 | options PAX_MPROTECT=0 # PaX mprotect(2) restrictions | |
172 | options PAX_ASLR=0 # PaX Address Space Layout Randomization | 172 | options PAX_ASLR=0 # PaX Address Space Layout Randomization |
--- src/sys/arch/riscv/conf/files.riscv 2020/10/21 13:31:51 1.6
+++ src/sys/arch/riscv/conf/files.riscv 2020/11/04 06:56:56 1.7
@@ -1,74 +1,74 @@ | @@ -1,74 +1,74 @@ | |||
1 | # $NetBSD: files.riscv,v 1.6 2020/10/21 13:31:51 christos Exp $ | 1 | # $NetBSD: files.riscv,v 1.7 2020/11/04 06:56:56 skrll Exp $ | |
2 | # | 2 | # | |
3 | 3 | |||
4 | maxpartitions 16 | 4 | maxpartitions 16 | |
5 | maxusers 8 32 64 | 5 | maxusers 8 32 64 | |
6 | 6 | |||
7 | defflag opt_ddb.h DDB_TRACE | 7 | defflag opt_ddb.h DDB_TRACE | |
8 | 8 | |||
9 | #file arch/riscv/riscv/locore.S | 9 | #file arch/riscv/riscv/locore.S | |
10 | file arch/riscv/riscv/spl.S | 10 | file arch/riscv/riscv/spl.S | |
11 | 11 | |||
12 | file arch/riscv/riscv/autoconf.c | 12 | file arch/riscv/riscv/autoconf.c | |
13 | file arch/riscv/riscv/cpu_subr.c | 13 | file arch/riscv/riscv/cpu_subr.c | |
14 | file arch/riscv/riscv/db_disasm.c ddb | 14 | file arch/riscv/riscv/db_disasm.c ddb | |
15 | file arch/riscv/riscv/db_trace.c ddb | 15 | file arch/riscv/riscv/db_trace.c ddb | |
16 | file arch/riscv/riscv/fixup.c | 16 | file arch/riscv/riscv/fixup.c | |
17 | file arch/riscv/riscv/fpu.c | 17 | file arch/riscv/riscv/fpu.c fpe | |
18 | file arch/riscv/riscv/ipifuncs.c multiprocessor | 18 | file arch/riscv/riscv/ipifuncs.c multiprocessor | |
19 | file arch/riscv/riscv/stubs.c | 19 | file arch/riscv/riscv/stubs.c | |
20 | file arch/riscv/riscv/syscall.c # syscall handler | 20 | file arch/riscv/riscv/syscall.c # syscall handler | |
21 | file arch/riscv/riscv/trap.c # trap handlers | 21 | file arch/riscv/riscv/trap.c # trap handlers | |
22 | 22 | |||
23 | file arch/riscv/riscv/core_machdep.c coredump | 23 | file arch/riscv/riscv/core_machdep.c coredump | |
24 | file arch/riscv/riscv/clock_machdep.c | 24 | file arch/riscv/riscv/clock_machdep.c | |
25 | file arch/riscv/riscv/db_machdep.c ddb | kgdb | 25 | file arch/riscv/riscv/db_machdep.c ddb | kgdb | |
26 | file arch/riscv/riscv/exec_machdep.c | 26 | file arch/riscv/riscv/exec_machdep.c | |
27 | file arch/riscv/riscv/kgdb_machdep.c kgdb | 27 | file arch/riscv/riscv/kgdb_machdep.c kgdb | |
28 | file arch/riscv/riscv/kobj_machdep.c modular | 28 | file arch/riscv/riscv/kobj_machdep.c modular | |
29 | file arch/riscv/riscv/pmap_machdep.c | 29 | file arch/riscv/riscv/pmap_machdep.c | |
30 | file arch/riscv/riscv/process_machdep.c | 30 | file arch/riscv/riscv/process_machdep.c | |
31 | file arch/riscv/riscv/procfs_machdep.c procfs | 31 | file arch/riscv/riscv/procfs_machdep.c procfs | |
32 | file arch/riscv/riscv/riscv_machdep.c | 32 | file arch/riscv/riscv/riscv_machdep.c | |
33 | file arch/riscv/riscv/sig_machdep.c # signal delivery | 33 | file arch/riscv/riscv/sig_machdep.c # signal delivery | |
34 | file arch/riscv/riscv/softint_machdep.c | 34 | file arch/riscv/riscv/softint_machdep.c | |
35 | file arch/riscv/riscv/sys_machdep.c | 35 | file arch/riscv/riscv/sys_machdep.c | |
36 | file arch/riscv/riscv/vm_machdep.c | 36 | file arch/riscv/riscv/vm_machdep.c | |
37 | 37 | |||
38 | file dev/cons.c | 38 | file dev/cons.c | |
39 | file dev/md_root.c memory_disk_hooks | 39 | file dev/md_root.c memory_disk_hooks | |
40 | 40 | |||
41 | file kern/subr_disk_mbr.c disk | 41 | file kern/subr_disk_mbr.c disk | |
42 | 42 | |||
43 | file uvm/pmap/pmap.c | 43 | file uvm/pmap/pmap.c | |
44 | file uvm/pmap/pmap_segtab.c | 44 | file uvm/pmap/pmap_segtab.c | |
45 | file uvm/pmap/pmap_tlb.c | 45 | file uvm/pmap/pmap_tlb.c | |
46 | 46 | |||
47 | # Binary compatibility with 32bit NetBSD (COMPAT_NETBSD32) | 47 | # Binary compatibility with 32bit NetBSD (COMPAT_NETBSD32) | |
48 | file arch/riscv/riscv/core32_machdep.c compat_netbsd32 & coredump | 48 | file arch/riscv/riscv/core32_machdep.c compat_netbsd32 & coredump | |
49 | file arch/riscv/riscv/netbsd32_machdep.c compat_netbsd32 | 49 | file arch/riscv/riscv/netbsd32_machdep.c compat_netbsd32 | |
50 | file arch/riscv/riscv/sig32_machdep.c compat_netbsd32 | 50 | file arch/riscv/riscv/sig32_machdep.c compat_netbsd32 | |
51 | include "compat/netbsd32/files.netbsd32" | 51 | include "compat/netbsd32/files.netbsd32" | |
52 | 52 | |||
53 | device mainbus { [instance=-1] } | 53 | device mainbus { [instance=-1] } | |
54 | attach mainbus at root | 54 | attach mainbus at root | |
55 | file arch/riscv/riscv/mainbus.c mainbus | 55 | file arch/riscv/riscv/mainbus.c mainbus | |
56 | 56 | |||
57 | device cpu | 57 | device cpu | |
58 | attach cpu at mainbus with cpu_mainbus | 58 | attach cpu at mainbus with cpu_mainbus | |
59 | file arch/riscv/riscv/cpu_mainbus.c cpu_mainbus | 59 | file arch/riscv/riscv/cpu_mainbus.c cpu_mainbus | |
60 | 60 | |||
61 | device htif { } | 61 | device htif { } | |
62 | attach htif at mainbus with htif_mainbus | 62 | attach htif at mainbus with htif_mainbus | |
63 | file arch/riscv/htif/htif.c htif_mainbus | 63 | file arch/riscv/htif/htif.c htif_mainbus | |
64 | 64 | |||
65 | device htifcons { } : tty | 65 | device htifcons { } : tty | |
66 | attach htifcons at htif with htif_cons | 66 | attach htifcons at htif with htif_cons | |
67 | file arch/riscv/htif/htif_cons.c htif_cons | 67 | file arch/riscv/htif/htif_cons.c htif_cons | |
68 | 68 | |||
69 | device htifdisk { } : disk | 69 | device htifdisk { } : disk | |
70 | attach htifdisk at htif with htif_disk | 70 | attach htifdisk at htif with htif_disk | |
71 | attach ld at htifdisk with ld_htifdisk | 71 | attach ld at htifdisk with ld_htifdisk | |
72 | file arch/riscv/htif/htif_disk.c htif_disk | 72 | file arch/riscv/htif/htif_disk.c htif_disk | |
73 | 73 | |||
74 | include "arch/riscv/conf/majors.riscv" | 74 | include "arch/riscv/conf/majors.riscv" |
--- src/sys/arch/riscv/include/frame.h 2020/03/14 16:12:16 1.2
+++ src/sys/arch/riscv/include/frame.h 2020/11/04 06:56:56 1.3
@@ -1,52 +1,52 @@ | @@ -1,52 +1,52 @@ | |||
1 | /* $NetBSD: frame.h,v 1.2 2020/03/14 16:12:16 skrll Exp $ */ | 1 | /* $NetBSD: frame.h,v 1.3 2020/11/04 06:56:56 skrll Exp $ */ | |
2 | 2 | |||
3 | /*- | 3 | /*- | |
4 | * Copyright (c) 2014 The NetBSD Foundation, Inc. | 4 | * Copyright (c) 2014 The NetBSD Foundation, Inc. | |
5 | * All rights reserved. | 5 | * All rights reserved. | |
6 | * | 6 | * | |
7 | * This code is derived from software contributed to The NetBSD Foundation | 7 | * This code is derived from software contributed to The NetBSD Foundation | |
8 | * by Matt Thomas of 3am Software Foundry. | 8 | * by Matt Thomas of 3am Software Foundry. | |
9 | * | 9 | * | |
10 | * Redistribution and use in source and binary forms, with or without | 10 | * Redistribution and use in source and binary forms, with or without | |
11 | * modification, are permitted provided that the following conditions | 11 | * modification, are permitted provided that the following conditions | |
12 | * are met: | 12 | * are met: | |
13 | * 1. Redistributions of source code must retain the above copyright | 13 | * 1. Redistributions of source code must retain the above copyright | |
14 | * notice, this list of conditions and the following disclaimer. | 14 | * notice, this list of conditions and the following disclaimer. | |
15 | * 2. Redistributions in binary form must reproduce the above copyright | 15 | * 2. Redistributions in binary form must reproduce the above copyright | |
16 | * notice, this list of conditions and the following disclaimer in the | 16 | * notice, this list of conditions and the following disclaimer in the | |
17 | * documentation and/or other materials provided with the distribution. | 17 | * documentation and/or other materials provided with the distribution. | |
18 | * | 18 | * | |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | 19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | 20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | 21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | 23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
29 | * POSSIBILITY OF SUCH DAMAGE. | 29 | * POSSIBILITY OF SUCH DAMAGE. | |
30 | */ | 30 | */ | |
31 | 31 | |||
32 | #ifndef _RISCV_FRAME_H_ | 32 | #ifndef _RISCV_FRAME_H_ | |
33 | #define _RISCV_FRAME_H_ | 33 | #define _RISCV_FRAME_H_ | |
34 | 34 | |||
35 | #include <riscv/reg.h> | 35 | #include <riscv/reg.h> | |
36 | 36 | |||
37 | struct trapframe { | 37 | struct trapframe { | |
38 | struct reg tf_regs __aligned(8); | 38 | struct reg tf_regs __aligned(8); | |
39 | register_t tf_badvaddr; | 39 | register_t tf_stval; // supervisor trap value | |
40 | register_t tf_pc; | 40 | register_t tf_sepc; // supervisor exception program counter | |
41 | uint32_t tf_cause; // 32-bit register | 41 | register_t tf_scause; // supervisor cause register | |
42 | uint32_t tf_sr; // 32-bit register | 42 | register_t tf_sr; // supervisor status register | |
43 | #define tf_reg tf_regs.r_reg | 43 | #define tf_reg tf_regs.r_reg | |
44 | #define tf_a0 tf_reg[_X_A0] | 44 | #define tf_a0 tf_reg[_X_A0] | |
45 | #define tf_t0 tf_reg[_X_T0] | 45 | #define tf_t0 tf_reg[_X_T0] | |
46 | #define tf_v0 tf_reg[_X_V0] | 46 | #define tf_v0 tf_reg[_X_V0] | |
47 | #define tf_v1 tf_reg[_X_V1] | 47 | #define tf_v1 tf_reg[_X_V1] | |
48 | #define tf_ra tf_reg[_X_RA] | 48 | #define tf_ra tf_reg[_X_RA] | |
49 | #define tf_sp tf_reg[_X_SP] | 49 | #define tf_sp tf_reg[_X_SP] | |
50 | }; | 50 | }; | |
51 | 51 | |||
52 | #endif /* _RISCV_FRAME_H_ */ | 52 | #endif /* _RISCV_FRAME_H_ */ |
--- src/sys/arch/riscv/include/locore.h 2020/03/14 16:12:16 1.5
+++ src/sys/arch/riscv/include/locore.h 2020/11/04 06:56:56 1.6
@@ -1,204 +1,204 @@ | @@ -1,204 +1,204 @@ | |||
1 | /* $NetBSD: locore.h,v 1.5 2020/03/14 16:12:16 skrll Exp $ */ | 1 | /* $NetBSD: locore.h,v 1.6 2020/11/04 06:56:56 skrll Exp $ */ | |
2 | /*- | 2 | /*- | |
3 | * Copyright (c) 2014 The NetBSD Foundation, Inc. | 3 | * Copyright (c) 2014 The NetBSD Foundation, Inc. | |
4 | * All rights reserved. | 4 | * All rights reserved. | |
5 | * | 5 | * | |
6 | * This code is derived from software contributed to The NetBSD Foundation | 6 | * This code is derived from software contributed to The NetBSD Foundation | |
7 | * by Matt Thomas of 3am Software Foundry. | 7 | * by Matt Thomas of 3am Software Foundry. | |
8 | * | 8 | * | |
9 | * Redistribution and use in source and binary forms, with or without | 9 | * Redistribution and use in source and binary forms, with or without | |
10 | * modification, are permitted provided that the following conditions | 10 | * modification, are permitted provided that the following conditions | |
11 | * are met: | 11 | * are met: | |
12 | * 1. Redistributions of source code must retain the above copyright | 12 | * 1. Redistributions of source code must retain the above copyright | |
13 | * notice, this list of conditions and the following disclaimer. | 13 | * notice, this list of conditions and the following disclaimer. | |
14 | * 2. Redistributions in binary form must reproduce the above copyright | 14 | * 2. Redistributions in binary form must reproduce the above copyright | |
15 | * notice, this list of conditions and the following disclaimer in the | 15 | * notice, this list of conditions and the following disclaimer in the | |
16 | * documentation and/or other materials provided with the distribution. | 16 | * documentation and/or other materials provided with the distribution. | |
17 | * | 17 | * | |
18 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | 18 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | |
19 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | 19 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | |
20 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | 20 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
21 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | 21 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | |
22 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | 22 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
23 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | 23 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
24 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | 24 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
25 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | 25 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
26 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | 26 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
27 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | 27 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
28 | * POSSIBILITY OF SUCH DAMAGE. | 28 | * POSSIBILITY OF SUCH DAMAGE. | |
29 | */ | 29 | */ | |
30 | 30 | |||
31 | #ifndef _RISCV_LOCORE_H_ | 31 | #ifndef _RISCV_LOCORE_H_ | |
32 | #define _RISCV_LOCORE_H_ | 32 | #define _RISCV_LOCORE_H_ | |
33 | 33 | |||
34 | #include <sys/lwp.h> | 34 | #include <sys/lwp.h> | |
35 | #include <sys/userret.h> | 35 | #include <sys/userret.h> | |
36 | 36 | |||
37 | #include <riscv/reg.h> | 37 | #include <riscv/reg.h> | |
38 | #include <riscv/sysreg.h> | 38 | #include <riscv/sysreg.h> | |
39 | 39 | |||
40 | struct trapframe { | 40 | struct trapframe { | |
41 | struct reg tf_regs; | 41 | struct reg tf_regs; | |
42 | register_t tf_badaddr; | 42 | register_t tf_tval; | |
43 | uint32_t tf_cause; // 32-bit register | 43 | register_t tf_cause; | |
44 | uint32_t tf_sr; // 32-bit register | 44 | register_t tf_sr; | |
45 | #define tf_reg tf_regs.r_reg | 45 | #define tf_reg tf_regs.r_reg | |
46 | #define tf_pc tf_regs.r_pc | 46 | #define tf_pc tf_regs.r_pc | |
47 | #define tf_ra tf_reg[_X_RA] | 47 | #define tf_ra tf_reg[_X_RA] | |
48 | #define tf_sp tf_reg[_X_SP] | 48 | #define tf_sp tf_reg[_X_SP] | |
49 | #define tf_gp tf_reg[_X_GP] | 49 | #define tf_gp tf_reg[_X_GP] | |
50 | #define tf_tp tf_reg[_X_TP] | 50 | #define tf_tp tf_reg[_X_TP] | |
51 | #define tf_t0 tf_reg[_X_T0] | 51 | #define tf_t0 tf_reg[_X_T0] | |
52 | #define tf_t1 tf_reg[_X_T1] | 52 | #define tf_t1 tf_reg[_X_T1] | |
53 | #define tf_t2 tf_reg[_X_T2] | 53 | #define tf_t2 tf_reg[_X_T2] | |
54 | #define tf_s0 tf_reg[_X_S0] | 54 | #define tf_s0 tf_reg[_X_S0] | |
55 | #define tf_s1 tf_reg[_X_S1] | 55 | #define tf_s1 tf_reg[_X_S1] | |
56 | #define tf_a0 tf_reg[_X_A0] | 56 | #define tf_a0 tf_reg[_X_A0] | |
57 | #define tf_a1 tf_reg[_X_A1] | 57 | #define tf_a1 tf_reg[_X_A1] | |
58 | #define tf_a2 tf_reg[_X_A2] | 58 | #define tf_a2 tf_reg[_X_A2] | |
59 | #define tf_a3 tf_reg[_X_A3] | 59 | #define tf_a3 tf_reg[_X_A3] | |
60 | #define tf_a4 tf_reg[_X_A4] | 60 | #define tf_a4 tf_reg[_X_A4] | |
61 | #define tf_a5 tf_reg[_X_A5] | 61 | #define tf_a5 tf_reg[_X_A5] | |
62 | #define tf_a6 tf_reg[_X_A6] | 62 | #define tf_a6 tf_reg[_X_A6] | |
63 | #define tf_a7 tf_reg[_X_A7] | 63 | #define tf_a7 tf_reg[_X_A7] | |
64 | #define tf_s2 tf_reg[_X_S2] | 64 | #define tf_s2 tf_reg[_X_S2] | |
65 | #define tf_s3 tf_reg[_X_S3] | 65 | #define tf_s3 tf_reg[_X_S3] | |
66 | #define tf_s4 tf_reg[_X_S4] | 66 | #define tf_s4 tf_reg[_X_S4] | |
67 | #define tf_s5 tf_reg[_X_S5] | 67 | #define tf_s5 tf_reg[_X_S5] | |
68 | #define tf_s6 tf_reg[_X_S6] | 68 | #define tf_s6 tf_reg[_X_S6] | |
69 | #define tf_s7 tf_reg[_X_S7] | 69 | #define tf_s7 tf_reg[_X_S7] | |
70 | #define tf_s8 tf_reg[_X_S8] | 70 | #define tf_s8 tf_reg[_X_S8] | |
71 | #define tf_s9 tf_reg[_X_S9] | 71 | #define tf_s9 tf_reg[_X_S9] | |
72 | #define tf_s10 tf_reg[_X_S10] | 72 | #define tf_s10 tf_reg[_X_S10] | |
73 | #define tf_s11 tf_reg[_X_S11] | 73 | #define tf_s11 tf_reg[_X_S11] | |
74 | #define tf_t3 tf_reg[_X_T3] | 74 | #define tf_t3 tf_reg[_X_T3] | |
75 | #define tf_t4 tf_reg[_X_T4] | 75 | #define tf_t4 tf_reg[_X_T4] | |
76 | #define tf_t5 tf_reg[_X_T5] | 76 | #define tf_t5 tf_reg[_X_T5] | |
77 | #define tf_t6 tf_reg[_X_T6] | 77 | #define tf_t6 tf_reg[_X_T6] | |
78 | }; | 78 | }; | |
79 | 79 | |||
80 | // For COMPAT_NETBSD32 coredumps | 80 | // For COMPAT_NETBSD32 coredumps | |
81 | struct trapframe32 { | 81 | struct trapframe32 { | |
82 | struct reg32 tf_regs; | 82 | struct reg32 tf_regs; | |
83 | register32_t tf_badaddr; | 83 | register32_t tf_tval; | |
84 | uint32_t tf_cause; // 32-bit register | 84 | register32_t tf_cause; | |
85 | uint32_t tf_sr; // 32-bit register | 85 | register32_t tf_sr; | |
86 | }; | 86 | }; | |
87 | 87 | |||
88 | #define FB_A0 0 | 88 | #define FB_A0 0 | |
89 | #define FB_RA 1 | 89 | #define FB_RA 1 | |
90 | #define FB_SP 2 | 90 | #define FB_SP 2 | |
91 | #define FB_GP 3 | 91 | #define FB_GP 3 | |
92 | #define FB_S0 4 | 92 | #define FB_S0 4 | |
93 | #define FB_S1 5 | 93 | #define FB_S1 5 | |
94 | #define FB_S2 6 | 94 | #define FB_S2 6 | |
95 | #define FB_S3 7 | 95 | #define FB_S3 7 | |
96 | #define FB_S4 8 | 96 | #define FB_S4 8 | |
97 | #define FB_S5 9 | 97 | #define FB_S5 9 | |
98 | #define FB_S6 10 | 98 | #define FB_S6 10 | |
99 | #define FB_S7 11 | 99 | #define FB_S7 11 | |
100 | #define FB_S8 12 | 100 | #define FB_S8 12 | |
101 | #define FB_S9 13 | 101 | #define FB_S9 13 | |
102 | #define FB_S10 14 | 102 | #define FB_S10 14 | |
103 | #define FB_S11 15 | 103 | #define FB_S11 15 | |
104 | #define FB_MAX 16 | 104 | #define FB_MAX 16 | |
105 | 105 | |||
106 | struct faultbuf { | 106 | struct faultbuf { | |
107 | register_t fb_reg[FB_MAX]; | 107 | register_t fb_reg[FB_MAX]; | |
108 | uint32_t fb_sr; | 108 | register_t fb_sr; | |
109 | }; | 109 | }; | |
110 | 110 | |||
111 | CTASSERT(sizeof(label_t) == sizeof(struct faultbuf)); | 111 | CTASSERT(sizeof(label_t) == sizeof(struct faultbuf)); | |
112 | 112 | |||
113 | struct mainbus_attach_args { | 113 | struct mainbus_attach_args { | |
114 | const char *maa_name; | 114 | const char *maa_name; | |
115 | u_int maa_instance; | 115 | u_int maa_instance; | |
116 | }; | 116 | }; | |
117 | 117 | |||
118 | #ifdef _KERNEL | 118 | #ifdef _KERNEL | |
119 | extern int cpu_printfataltraps; | 119 | extern int cpu_printfataltraps; | |
120 | extern const pcu_ops_t pcu_fpu_ops; | 120 | extern const pcu_ops_t pcu_fpu_ops; | |
121 | 121 | |||
122 | static inline vaddr_t | 122 | static inline vaddr_t | |
123 | stack_align(vaddr_t sp) | 123 | stack_align(vaddr_t sp) | |
124 | { | 124 | { | |
125 | return sp & ~STACK_ALIGNBYTES; | 125 | return sp & ~STACK_ALIGNBYTES; | |
126 | } | 126 | } | |
127 | 127 | |||
128 | static inline void | 128 | static inline void | |
129 | userret(struct lwp *l) | 129 | userret(struct lwp *l) | |
130 | { | 130 | { | |
131 | mi_userret(l); | 131 | mi_userret(l); | |
132 | } | 132 | } | |
133 | 133 | |||
134 | static inline void | 134 | static inline void | |
135 | fpu_load(void) | 135 | fpu_load(void) | |
136 | { | 136 | { | |
137 | pcu_load(&pcu_fpu_ops); | 137 | pcu_load(&pcu_fpu_ops); | |
138 | } | 138 | } | |
139 | 139 | |||
140 | static inline void | 140 | static inline void | |
141 | fpu_save(lwp_t *l) | 141 | fpu_save(lwp_t *l) | |
142 | { | 142 | { | |
143 | pcu_save(&pcu_fpu_ops, l); | 143 | pcu_save(&pcu_fpu_ops, l); | |
144 | } | 144 | } | |
145 | 145 | |||
146 | static inline void | 146 | static inline void | |
147 | fpu_discard(lwp_t *l) | 147 | fpu_discard(lwp_t *l) | |
148 | { | 148 | { | |
149 | pcu_discard(&pcu_fpu_ops, l, false); | 149 | pcu_discard(&pcu_fpu_ops, l, false); | |
150 | } | 150 | } | |
151 | 151 | |||
152 | static inline void | 152 | static inline void | |
153 | fpu_replace(lwp_t *l) | 153 | fpu_replace(lwp_t *l) | |
154 | { | 154 | { | |
155 | pcu_discard(&pcu_fpu_ops, l, true); | 155 | pcu_discard(&pcu_fpu_ops, l, true); | |
156 | } | 156 | } | |
157 | 157 | |||
158 | static inline bool | 158 | static inline bool | |
159 | fpu_valid_p(lwp_t *l) | 159 | fpu_valid_p(lwp_t *l) | |
160 | { | 160 | { | |
161 | return pcu_valid_p(&pcu_fpu_ops, l); | 161 | return pcu_valid_p(&pcu_fpu_ops, l); | |
162 | } | 162 | } | |
163 | 163 | |||
164 | void __syncicache(const void *, size_t); | 164 | void __syncicache(const void *, size_t); | |
165 | 165 | |||
166 | int cpu_set_onfault(struct faultbuf *, register_t) __returns_twice; | 166 | int cpu_set_onfault(struct faultbuf *, register_t) __returns_twice; | |
167 | void cpu_jump_onfault(struct trapframe *, const struct faultbuf *); | 167 | void cpu_jump_onfault(struct trapframe *, const struct faultbuf *); | |
168 | 168 | |||
169 | static inline void | 169 | static inline void | |
170 | cpu_unset_onfault(void) | 170 | cpu_unset_onfault(void) | |
171 | { | 171 | { | |
172 | curlwp->l_md.md_onfault = NULL; | 172 | curlwp->l_md.md_onfault = NULL; | |
173 | } | 173 | } | |
174 | 174 | |||
175 | static inline struct faultbuf * | 175 | static inline struct faultbuf * | |
176 | cpu_disable_onfault(void) | 176 | cpu_disable_onfault(void) | |
177 | { | 177 | { | |
178 | struct faultbuf * const fb = curlwp->l_md.md_onfault; | 178 | struct faultbuf * const fb = curlwp->l_md.md_onfault; | |
179 | curlwp->l_md.md_onfault = NULL; | 179 | curlwp->l_md.md_onfault = NULL; | |
180 | return fb; | 180 | return fb; | |
181 | } | 181 | } | |
182 | 182 | |||
183 | static inline void | 183 | static inline void | |
184 | cpu_enable_onfault(struct faultbuf *fb) | 184 | cpu_enable_onfault(struct faultbuf *fb) | |
185 | { | 185 | { | |
186 | curlwp->l_md.md_onfault = fb; | 186 | curlwp->l_md.md_onfault = fb; | |
187 | } | 187 | } | |
188 | 188 | |||
189 | void cpu_intr(struct trapframe */*tf*/, register_t /*epc*/, | 189 | void cpu_intr(struct trapframe */*tf*/, register_t /*epc*/, | |
190 | register_t /*status*/, register_t /*cause*/); | 190 | register_t /*status*/, register_t /*cause*/); | |
191 | void cpu_trap(struct trapframe */*tf*/, register_t /*epc*/, | 191 | void cpu_trap(struct trapframe */*tf*/, register_t /*epc*/, | |
192 | register_t /*status*/, register_t /*cause*/, | 192 | register_t /*status*/, register_t /*cause*/, | |
193 | register_t /*badvaddr*/); | 193 | register_t /*badvaddr*/); | |
194 | void cpu_ast(struct trapframe *); | 194 | void cpu_ast(struct trapframe *); | |
195 | void cpu_fast_switchto(struct lwp *, int); | 195 | void cpu_fast_switchto(struct lwp *, int); | |
196 | 196 | |||
197 | void cpu_lwp_trampoline(void); | 197 | void cpu_lwp_trampoline(void); | |
198 | 198 | |||
199 | void * cpu_sendsig_getframe(struct lwp *, int, bool *); | 199 | void * cpu_sendsig_getframe(struct lwp *, int, bool *); | |
200 | 200 | |||
201 | void init_riscv(vaddr_t, vaddr_t); | 201 | void init_riscv(vaddr_t, vaddr_t); | |
202 | #endif | 202 | #endif | |
203 | 203 | |||
204 | #endif /* _RISCV_LOCORE_H_ */ | 204 | #endif /* _RISCV_LOCORE_H_ */ |
--- src/sys/arch/riscv/include/sysreg.h 2020/11/02 08:37:59 1.8
+++ src/sys/arch/riscv/include/sysreg.h 2020/11/04 06:56:56 1.9
@@ -1,230 +1,283 @@ | @@ -1,230 +1,283 @@ | |||
1 | /* $NetBSD: sysreg.h,v 1.8 2020/11/02 08:37:59 skrll Exp $ */ | 1 | /* $NetBSD: sysreg.h,v 1.9 2020/11/04 06:56:56 skrll Exp $ */ | |
2 | 2 | |||
3 | /* | 3 | /* | |
4 | * Copyright (c) 2014 The NetBSD Foundation, Inc. | 4 | * Copyright (c) 2014 The NetBSD Foundation, Inc. | |
5 | * All rights reserved. | 5 | * All rights reserved. | |
6 | * | 6 | * | |
7 | * This code is derived from software contributed to The NetBSD Foundation | 7 | * This code is derived from software contributed to The NetBSD Foundation | |
8 | * by Matt Thomas of 3am Software Foundry. | 8 | * by Matt Thomas of 3am Software Foundry. | |
9 | * | 9 | * | |
10 | * Redistribution and use in source and binary forms, with or without | 10 | * Redistribution and use in source and binary forms, with or without | |
11 | * modification, are permitted provided that the following conditions | 11 | * modification, are permitted provided that the following conditions | |
12 | * are met: | 12 | * are met: | |
13 | * 1. Redistributions of source code must retain the above copyright | 13 | * 1. Redistributions of source code must retain the above copyright | |
14 | * notice, this list of conditions and the following disclaimer. | 14 | * notice, this list of conditions and the following disclaimer. | |
15 | * 2. Redistributions in binary form must reproduce the above copyright | 15 | * 2. Redistributions in binary form must reproduce the above copyright | |
16 | * notice, this list of conditions and the following disclaimer in the | 16 | * notice, this list of conditions and the following disclaimer in the | |
17 | * documentation and/or other materials provided with the distribution. | 17 | * documentation and/or other materials provided with the distribution. | |
18 | * | 18 | * | |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | 19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | 20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | 21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | 23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
29 | * POSSIBILITY OF SUCH DAMAGE. | 29 | * POSSIBILITY OF SUCH DAMAGE. | |
30 | */ | 30 | */ | |
31 | 31 | |||
32 | #ifndef _RISCV_SYSREG_H_ | 32 | #ifndef _RISCV_SYSREG_H_ | |
33 | #define _RISCV_SYSREG_H_ | 33 | #define _RISCV_SYSREG_H_ | |
34 | 34 | |||
35 | #ifndef _KERNEL | 35 | #ifndef _KERNEL | |
36 | #include <sys/param.h> | 36 | #include <sys/param.h> | |
37 | #endif | 37 | #endif | |
38 | 38 | |||
39 | #define FCSR_FMASK 0 // no exception bits | 39 | #define FCSR_FMASK 0 // no exception bits | |
40 | #define FCSR_FRM __BITS(7,5) | 40 | #define FCSR_FRM __BITS(7,5) | |
41 | #define FCSR_FRM_RNE 0b000 // Round Nearest, ties to Even | 41 | #define FCSR_FRM_RNE 0b000 // Round Nearest, ties to Even | |
42 | #define FCSR_FRM_RTZ 0b001 // Round Towards Zero | 42 | #define FCSR_FRM_RTZ 0b001 // Round Towards Zero | |
43 | #define FCSR_FRM_RDN 0b010 // Round DowN (-infinity) | 43 | #define FCSR_FRM_RDN 0b010 // Round DowN (-infinity) | |
44 | #define FCSR_FRM_RUP 0b011 // Round UP (+infinity) | 44 | #define FCSR_FRM_RUP 0b011 // Round UP (+infinity) | |
45 | #define FCSR_FRM_RMM 0b100 // Round to nearest, ties to Max Magnitude | 45 | #define FCSR_FRM_RMM 0b100 // Round to nearest, ties to Max Magnitude | |
46 | #define FCSR_FRM_DYN 0b111 // Dynamic rounding | |||
46 | #define FCSR_FFLAGS __BITS(4,0) // Sticky bits | 47 | #define FCSR_FFLAGS __BITS(4,0) // Sticky bits | |
47 | #define FCSR_NV __BIT(4) // iNValid operation | 48 | #define FCSR_NV __BIT(4) // iNValid operation | |
48 | #define FCSR_DZ __BIT(3) // Divide by Zero | 49 | #define FCSR_DZ __BIT(3) // Divide by Zero | |
49 | #define FCSR_OF __BIT(2) // OverFlow | 50 | #define FCSR_OF __BIT(2) // OverFlow | |
50 | #define FCSR_UF __BIT(1) // UnderFlow | 51 | #define FCSR_UF __BIT(1) // UnderFlow | |
51 | #define FCSR_NX __BIT(0) // iNeXact | 52 | #define FCSR_NX __BIT(0) // iNeXact | |
52 | 53 | |||
53 | static inline uint32_t | 54 | static inline uint32_t | |
54 | riscvreg_fcsr_read(void) | 55 | riscvreg_fcsr_read(void) | |
55 | { | 56 | { | |
56 | uint32_t __fcsr; | 57 | uint32_t __fcsr; | |
57 | __asm("frcsr %0" : "=r"(__fcsr)); | 58 | __asm("frcsr %0" : "=r"(__fcsr)); | |
58 | return __fcsr; | 59 | return __fcsr; | |
59 | } | 60 | } | |
60 | 61 | |||
61 | 62 | |||
62 | static inline uint32_t | 63 | static inline uint32_t | |
63 | riscvreg_fcsr_write(uint32_t __new) | 64 | riscvreg_fcsr_write(uint32_t __new) | |
64 | { | 65 | { | |
65 | uint32_t __old; | 66 | uint32_t __old; | |
66 | __asm("fscsr %0, %1" : "=r"(__old) : "r"(__new)); | 67 | __asm("fscsr %0, %1" : "=r"(__old) : "r"(__new)); | |
67 | return __old; | 68 | return __old; | |
68 | } | 69 | } | |
69 | 70 | |||
70 | static inline uint32_t | 71 | static inline uint32_t | |
71 | riscvreg_fcsr_read_fflags(void) | 72 | riscvreg_fcsr_read_fflags(void) | |
72 | { | 73 | { | |
73 | uint32_t __old; | 74 | uint32_t __old; | |
74 | __asm("frflags %0" : "=r"(__old)); | 75 | __asm("frflags %0" : "=r"(__old)); | |
75 | return __SHIFTOUT(__old, FCSR_FFLAGS); | 76 | return __SHIFTOUT(__old, FCSR_FFLAGS); | |
76 | } | 77 | } | |
77 | 78 | |||
78 | static inline uint32_t | 79 | static inline uint32_t | |
79 | riscvreg_fcsr_write_fflags(uint32_t __new) | 80 | riscvreg_fcsr_write_fflags(uint32_t __new) | |
80 | { | 81 | { | |
81 | uint32_t __old; | 82 | uint32_t __old; | |
82 | __new = __SHIFTIN(__new, FCSR_FFLAGS); | 83 | __new = __SHIFTIN(__new, FCSR_FFLAGS); | |
83 | __asm("fsflags %0, %1" : "=r"(__old) : "r"(__new)); | 84 | __asm("fsflags %0, %1" : "=r"(__old) : "r"(__new)); | |
84 | return __SHIFTOUT(__old, FCSR_FFLAGS); | 85 | return __SHIFTOUT(__old, FCSR_FFLAGS); | |
85 | } | 86 | } | |
86 | 87 | |||
87 | static inline uint32_t | 88 | static inline uint32_t | |
88 | riscvreg_fcsr_read_frm(void) | 89 | riscvreg_fcsr_read_frm(void) | |
89 | { | 90 | { | |
90 | uint32_t __old; | 91 | uint32_t __old; | |
91 | __asm("frrm\t%0" : "=r"(__old)); | 92 | __asm("frrm\t%0" : "=r"(__old)); | |
92 | return __SHIFTOUT(__old, FCSR_FRM); | 93 | return __SHIFTOUT(__old, FCSR_FRM); | |
93 | } | 94 | } | |
94 | 95 | |||
95 | static inline uint32_t | 96 | static inline uint32_t | |
96 | riscvreg_fcsr_write_frm(uint32_t __new) | 97 | riscvreg_fcsr_write_frm(uint32_t __new) | |
97 | { | 98 | { | |
98 | uint32_t __old; | 99 | uint32_t __old; | |
99 | __new = __SHIFTIN(__new, FCSR_FRM); | 100 | __new = __SHIFTIN(__new, FCSR_FRM); | |
100 | __asm volatile("fsrm\t%0, %1" : "=r"(__old) : "r"(__new)); | 101 | __asm volatile("fsrm\t%0, %1" : "=r"(__old) : "r"(__new)); | |
101 | return __SHIFTOUT(__old, FCSR_FRM); | 102 | return __SHIFTOUT(__old, FCSR_FRM); | |
102 | } | 103 | } | |
103 | 104 | |||
104 | // Status Register | 105 | /* Supervisor Status Register */ | |
105 | #define SR_IP __BITS(31,24) // Pending interrupts | 106 | #ifdef _LP64 | |
106 | #define SR_IM __BITS(23,16) // Interrupt Mask | 107 | #define SR_WPRI __BITS(62, 34) | __BITS(31,20) | __BIT(17) | \ | |
107 | #define SR_VM __BIT(7) // MMU On | 108 | __BITS(12,9) | __BITS(7,6) | __BITS(3,2) | |
108 | #define SR_S64 __BIT(6) // RV64 supervisor mode | 109 | #define SR_SD __BIT(63) | |
109 | #define SR_U64 __BIT(5) // RV64 user mode | 110 | /* Bits 62-34 are WPRI */ | |
110 | #define SR_EF __BIT(4) // Enable Floating Point | 111 | #define SR_UXL __BITS(33,32) | |
111 | #define SR_PEI __BIT(3) // Previous EI setting | 112 | #define SR_UXL_32 1 | |
112 | #define SR_EI __BIT(2) // Enable interrupts | 113 | #define SR_UXL_64 2 | |
113 | #define SR_PS __BIT(1) // Previous (S) supervisor setting | 114 | #define SR_UXL_128 3 | |
114 | #define SR_S __BIT(0) // Supervisor | 115 | /* Bits 31-20 are WPRI*/ | |
116 | #else | |||
117 | #define SR_WPRI __BITS(30,20) | __BIT(17) | __BITS(12,9) | \ | |||
118 | __BITS(7,6) | __BITS(3,2) | |||
119 | #define SR_SD __BIT(31) | |||
120 | /* Bits 30-20 are WPRI*/ | |||
121 | #endif /* _LP64 */ | |||
122 | ||||
123 | /* Both RV32 and RV64 have the bottom 20 bits shared */ | |||
124 | #define SR_MXR __BIT(19) | |||
125 | #define SR_SUM __BIT(18) | |||
126 | /* Bit 17 is WPRI */ | |||
127 | #define SR_XS __BITS(16,15) | |||
128 | #define SR_FS __BITS(14,13) | |||
129 | #define SR_FS_OFF 0 | |||
130 | #define SR_FS_INITIAL 1 | |||
131 | #define SR_FS_CLEAN 2 | |||
132 | #define SR_FS_DIRTY 3 | |||
133 | ||||
134 | /* Bits 12-9 are WPRI */ | |||
135 | #define SR_SPP __BIT(8) | |||
136 | /* Bits 7-6 are WPRI */ | |||
137 | #define SR_SPIE __BIT(5) | |||
138 | #define SR_UPIE __BIT(4) | |||
139 | /* Bits 3-2 are WPRI */ | |||
140 | #define SR_SIE __BIT(1) | |||
141 | #define SR_UIE __BIT(0) | |||
142 | ||||
143 | /* Supervisor interrupt registers */ | |||
144 | /* ... interupt pending register (sip) */ | |||
145 | /* Bit (XLEN-1)-10 is WIRI */ | |||
146 | #define SIP_SEIP __BIT(9) | |||
147 | #define SIP_UEIP __BIT(8) | |||
148 | /* Bit 7-6 is WIRI */ | |||
149 | #define SIP_STIP __BIT(5) | |||
150 | #define SIP_UTIP __BIT(4) | |||
151 | /* Bit 3-2 is WIRI */ | |||
152 | #define SIP_SSIP __BIT(1) | |||
153 | #define SIP_USIP __BIT(0) | |||
154 | ||||
155 | /* ... interupt-enable register (sie) */ | |||
156 | /* Bit (XLEN-1) - 10 is WIRI */ | |||
157 | #define SIE_SEIE __BIT(9) | |||
158 | #define SIE_UEIE __BIT(8) | |||
159 | /* Bit 7-6 is WIRI */ | |||
160 | #define SIE_STIE __BIT(5) | |||
161 | #define SIE_UTIE __BIT(4) | |||
162 | /* Bit 3-2 is WIRI */ | |||
163 | #define SIE_SSIE __BIT(1) | |||
164 | #define SIE_USIE __BIT(0) | |||
165 | ||||
166 | /* Mask for all interrupts */ | |||
167 | #define SIE_IM (SIE_SEI|SIE_UEIE|SIE_STIE|SIE_UTIE|SIE_SSIE|SIE_USIE) | |||
115 | 168 | |||
116 | #ifdef _LP64 | 169 | #ifdef _LP64 | |
117 | #define SR_USER (SR_EI|SR_U64|SR_S64|SR_VM|SR_IM) | 170 | #define SR_USER (SR_UIE | SR_U64 | SR_S64 | SR_IM) | |
118 | #define SR_USER32 (SR_USER & ~SR_U64) | 171 | #define SR_USER32 (SR_USER & ~SR_U64) | |
119 | #define SR_KERNEL (SR_S|SR_EI|SR_U64|SR_S64|SR_VM) | 172 | #define SR_KERNEL (SR_S | SR_UIE | SR_U64 | SR_S64) | |
120 | #else | 173 | #else | |
121 | #define SR_USER (SR_EI|SR_VM|SR_IM) | 174 | #define SR_USER (SR_UIE||SR_IM) | |
122 | #define SR_KERNEL (SR_S|SR_EI|SR_VM) | 175 | #define SR_KERNEL (SR_S|SR_UIE) | |
123 | #endif | 176 | #endif | |
124 | 177 | |||
125 | static inline uint32_t | 178 | static inline uint32_t | |
126 | riscvreg_status_read(void) | 179 | riscvreg_status_read(void) | |
127 | { | 180 | { | |
128 | uint32_t __sr; | 181 | uint32_t __sr; | |
129 | __asm("csrr\t%0, sstatus" : "=r"(__sr)); | 182 | __asm("csrr\t%0, sstatus" : "=r"(__sr)); | |
130 | return __sr; | 183 | return __sr; | |
131 | } | 184 | } | |
132 | 185 | |||
133 | static inline uint32_t | 186 | static inline uint32_t | |
134 | riscvreg_status_clear(uint32_t __mask) | 187 | riscvreg_status_clear(uint32_t __mask) | |
135 | { | 188 | { | |
136 | uint32_t __sr; | 189 | uint32_t __sr; | |
137 | if (__builtin_constant_p(__mask) && __mask < 0x20) { | 190 | if (__builtin_constant_p(__mask) && __mask < 0x20) { | |
138 | __asm("csrrci\t%0, sstatus, %1" : "=r"(__sr) : "i"(__mask)); | 191 | __asm("csrrci\t%0, sstatus, %1" : "=r"(__sr) : "i"(__mask)); | |
139 | } else { | 192 | } else { | |
140 | __asm("csrrc\t%0, sstatus, %1" : "=r"(__sr) : "r"(__mask)); | 193 | __asm("csrrc\t%0, sstatus, %1" : "=r"(__sr) : "r"(__mask)); | |
141 | } | 194 | } | |
142 | return __sr; | 195 | return __sr; | |
143 | } | 196 | } | |
144 | 197 | |||
145 | static inline uint32_t | 198 | static inline uint32_t | |
146 | riscvreg_status_set(uint32_t __mask) | 199 | riscvreg_status_set(uint32_t __mask) | |
147 | { | 200 | { | |
148 | uint32_t __sr; | 201 | uint32_t __sr; | |
149 | if (__builtin_constant_p(__mask) && __mask < 0x20) { | 202 | if (__builtin_constant_p(__mask) && __mask < 0x20) { | |
150 | __asm("csrrsi\t%0, sstatus, %1" : "=r"(__sr) : "i"(__mask)); | 203 | __asm("csrrsi\t%0, sstatus, %1" : "=r"(__sr) : "i"(__mask)); | |
151 | } else { | 204 | } else { | |
152 | __asm("csrrs\t%0, sstatus, %1" : "=r"(__sr) : "r"(__mask)); | 205 | __asm("csrrs\t%0, sstatus, %1" : "=r"(__sr) : "r"(__mask)); | |
153 | } | 206 | } | |
154 | return __sr; | 207 | return __sr; | |
155 | } | 208 | } | |
156 | 209 | |||
157 | // Cause register | 210 | // Cause register | |
158 | #define CAUSE_FETCH_MISALIGNED 0 | 211 | #define CAUSE_FETCH_MISALIGNED 0 | |
159 | #define CAUSE_FETCH_ACCESS 1 | 212 | #define CAUSE_FETCH_ACCESS 1 | |
160 | #define CAUSE_ILLEGAL_INSTRUCTION 2 | 213 | #define CAUSE_ILLEGAL_INSTRUCTION 2 | |
161 | #define CAUSE_BREAKPOINT 3 | 214 | #define CAUSE_BREAKPOINT 3 | |
162 | #define CAUSE_LOAD_MISALIGNED 4 | 215 | #define CAUSE_LOAD_MISALIGNED 4 | |
163 | #define CAUSE_LOAD_ACCESS 5 | 216 | #define CAUSE_LOAD_ACCESS 5 | |
164 | #define CAUSE_STORE_MISALIGNED 6 | 217 | #define CAUSE_STORE_MISALIGNED 6 | |
165 | #define CAUSE_STORE_ACCESS 7 | 218 | #define CAUSE_STORE_ACCESS 7 | |
166 | #define CAUSE_SYSCALL 8 | 219 | #define CAUSE_SYSCALL 8 | |
167 | #define CAUSE_USER_ECALL 8 | 220 | #define CAUSE_USER_ECALL 8 | |
168 | #define CAUSE_SUPERVISOR_ECALL 9 | 221 | #define CAUSE_SUPERVISOR_ECALL 9 | |
169 | /* 10 is reserved */ | 222 | /* 10 is reserved */ | |
170 | #define CAUSE_MACHINE_ECALL 11 | 223 | #define CAUSE_MACHINE_ECALL 11 | |
171 | #define CAUSE_FETCH_PAGE_FAULT 12 | 224 | #define CAUSE_FETCH_PAGE_FAULT 12 | |
172 | #define CAUSE_LOAD_PAGE_FAULT 13 | 225 | #define CAUSE_LOAD_PAGE_FAULT 13 | |
173 | /* 14 is Reserved */ | 226 | /* 14 is Reserved */ | |
174 | #define CAUSE_STORE_PAGE_FAULT 15 | 227 | #define CAUSE_STORE_PAGE_FAULT 15 | |
175 | /* >= 16 is reserved */ | 228 | /* >= 16 is reserved */ | |
176 | 229 | |||
177 | static inline uint64_t | 230 | static inline uint64_t | |
178 | riscvreg_cycle_read(void) | 231 | riscvreg_cycle_read(void) | |
179 | { | 232 | { | |
180 | #ifdef _LP64 | 233 | #ifdef _LP64 | |
181 | uint64_t __lo; | 234 | uint64_t __lo; | |
182 | __asm __volatile("csrr\t%0, cycle" : "=r"(__lo)); | 235 | __asm __volatile("csrr\t%0, cycle" : "=r"(__lo)); | |
183 | return __lo; | 236 | return __lo; | |
184 | #else | 237 | #else | |
185 | uint32_t __hi0, __hi1, __lo0; | 238 | uint32_t __hi0, __hi1, __lo0; | |
186 | do { | 239 | do { | |
187 | __asm __volatile( | 240 | __asm __volatile( | |
188 | "csrr\t%[__hi0], cycleh" | 241 | "csrr\t%[__hi0], cycleh" | |
189 | "\n\t" "csrr\t%[__lo0], cycle" | 242 | "\n\t" "csrr\t%[__lo0], cycle" | |
190 | "\n\t" "csrr\t%[__hi1], cycleh" | 243 | "\n\t" "csrr\t%[__hi1], cycleh" | |
191 | : [__hi0] "=r"(__hi0), | 244 | : [__hi0] "=r"(__hi0), | |
192 | [__lo0] "=r"(__lo0), | 245 | [__lo0] "=r"(__lo0), | |
193 | [__hi1] "=r"(__hi1)); | 246 | [__hi1] "=r"(__hi1)); | |
194 | } while (__hi0 != __hi1); | 247 | } while (__hi0 != __hi1); | |
195 | return ((uint64_t)__hi0 << 32) | (uint64_t)__lo0; | 248 | return ((uint64_t)__hi0 << 32) | (uint64_t)__lo0; | |
196 | #endif | 249 | #endif | |
197 | } | 250 | } | |
198 | 251 | |||
199 | #ifdef _LP64 | 252 | #ifdef _LP64 | |
200 | #define SATP_MODE __BITS(63,60) | 253 | #define SATP_MODE __BITS(63,60) | |
201 | #define SATP_MODE_SV39 8 | 254 | #define SATP_MODE_SV39 8 | |
202 | #define SATP_MODE_SV48 9 | 255 | #define SATP_MODE_SV48 9 | |
203 | #define SATP_ASID __BITS(59,44) | 256 | #define SATP_ASID __BITS(59,44) | |
204 | #define SATP_PPN __BITS(43,0) | 257 | #define SATP_PPN __BITS(43,0) | |
205 | #else | 258 | #else | |
206 | #define SATP_MODE __BIT(31) | 259 | #define SATP_MODE __BIT(31) | |
207 | #define SATP_MODE_SV32 1 | 260 | #define SATP_MODE_SV32 1 | |
208 | #define SATP_ASID __BITS(30,22) | 261 | #define SATP_ASID __BITS(30,22) | |
209 | #define SATP_PPN __BITS(21,0) | 262 | #define SATP_PPN __BITS(21,0) | |
210 | #endif | 263 | #endif | |
211 | 264 | |||
212 | static inline uint32_t | 265 | static inline uint32_t | |
213 | riscvreg_asid_read(void) | 266 | riscvreg_asid_read(void) | |
214 | { | 267 | { | |
215 | uintptr_t satp; | 268 | uintptr_t satp; | |
216 | __asm __volatile("csrr %0, satp" : "=r" (satp)); | 269 | __asm __volatile("csrr %0, satp" : "=r" (satp)); | |
217 | return __SHIFTOUT(satp, SATP_ASID); | 270 | return __SHIFTOUT(satp, SATP_ASID); | |
218 | } | 271 | } | |
219 | 272 | |||
220 | static inline void | 273 | static inline void | |
221 | riscvreg_asid_write(uint32_t asid) | 274 | riscvreg_asid_write(uint32_t asid) | |
222 | { | 275 | { | |
223 | uintptr_t satp; | 276 | uintptr_t satp; | |
224 | __asm __volatile("csrr %0, satp" : "=r" (satp)); | 277 | __asm __volatile("csrr %0, satp" : "=r" (satp)); | |
225 | satp &= ~SATP_ASID; | 278 | satp &= ~SATP_ASID; | |
226 | satp |= __SHIFTIN((uintptr_t)asid, SATP_ASID); | 279 | satp |= __SHIFTIN((uintptr_t)asid, SATP_ASID); | |
227 | __asm __volatile("csrw satp, %0" :: "r" (satp)); | 280 | __asm __volatile("csrw satp, %0" :: "r" (satp)); | |
228 | } | 281 | } | |
229 | 282 | |||
230 | #endif /* _RISCV_SYSREG_H_ */ | 283 | #endif /* _RISCV_SYSREG_H_ */ |
--- src/sys/arch/riscv/riscv/core_machdep.c 2019/11/20 19:37:52 1.3
+++ src/sys/arch/riscv/riscv/core_machdep.c 2020/11/04 06:56:56 1.4
@@ -1,105 +1,105 @@ | @@ -1,105 +1,105 @@ | |||
1 | /*- | 1 | /*- | |
2 | * Copyright (c) 2014 The NetBSD Foundation, Inc. | 2 | * Copyright (c) 2014 The NetBSD Foundation, Inc. | |
3 | * All rights reserved. | 3 | * All rights reserved. | |
4 | * | 4 | * | |
5 | * This code is derived from software contributed to The NetBSD Foundation | 5 | * This code is derived from software contributed to The NetBSD Foundation | |
6 | * by Matt Thomas of 3am Software Foundry. | 6 | * by Matt Thomas of 3am Software Foundry. | |
7 | * | 7 | * | |
8 | * Redistribution and use in source and binary forms, with or without | 8 | * Redistribution and use in source and binary forms, with or without | |
9 | * modification, are permitted provided that the following conditions | 9 | * modification, are permitted provided that the following conditions | |
10 | * are met: | 10 | * are met: | |
11 | * 1. Redistributions of source code must retain the above copyright | 11 | * 1. Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | 12 | * notice, this list of conditions and the following disclaimer. | |
13 | * 2. Redistributions in binary form must reproduce the above copyright | 13 | * 2. Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in the | 14 | * notice, this list of conditions and the following disclaimer in the | |
15 | * documentation and/or other materials provided with the distribution. | 15 | * documentation and/or other materials provided with the distribution. | |
16 | * | 16 | * | |
17 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | 17 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | |
18 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | 18 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | |
19 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | 19 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
20 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | 20 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | |
21 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | 21 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
22 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | 22 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
23 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | 23 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
24 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | 24 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
25 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | 25 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
26 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | 26 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
27 | * POSSIBILITY OF SUCH DAMAGE. | 27 | * POSSIBILITY OF SUCH DAMAGE. | |
28 | */ | 28 | */ | |
29 | 29 | |||
30 | #include <sys/cdefs.h> | 30 | #include <sys/cdefs.h> | |
31 | 31 | |||
32 | #ifndef CORENAME | 32 | #ifndef CORENAME | |
33 | __RCSID("$NetBSD: core_machdep.c,v 1.3 2019/11/20 19:37:52 pgoyette Exp $"); | 33 | __RCSID("$NetBSD: core_machdep.c,v 1.4 2020/11/04 06:56:56 skrll Exp $"); | |
34 | #endif | 34 | #endif | |
35 | 35 | |||
36 | #include <sys/param.h> | 36 | #include <sys/param.h> | |
37 | #include <sys/systm.h> | 37 | #include <sys/systm.h> | |
38 | #include <sys/proc.h> | 38 | #include <sys/proc.h> | |
39 | #include <sys/core.h> | 39 | #include <sys/core.h> | |
40 | #include <sys/exec.h> | 40 | #include <sys/exec.h> | |
41 | #include <sys/cpu.h> | 41 | #include <sys/cpu.h> | |
42 | #include <sys/compat_stub.h> | 42 | #include <sys/compat_stub.h> | |
43 | 43 | |||
44 | #include <riscv/locore.h> | 44 | #include <riscv/locore.h> | |
45 | 45 | |||
46 | #ifndef CORENAME | 46 | #ifndef CORENAME | |
47 | #define CORENAME(n) n | 47 | #define CORENAME(n) n | |
48 | #endif | 48 | #endif | |
49 | #ifdef COREINC | 49 | #ifdef COREINC | |
50 | #include COREINC | 50 | #include COREINC | |
51 | #endif | 51 | #endif | |
52 | 52 | |||
53 | /* | 53 | /* | |
54 | * Dump the machine specific segment at the start of a core dump. | 54 | * Dump the machine specific segment at the start of a core dump. | |
55 | */ | 55 | */ | |
56 | int | 56 | int | |
57 | CORENAME(cpu_coredump)(struct lwp *l, struct coredump_iostate *iocookie, | 57 | CORENAME(cpu_coredump)(struct lwp *l, struct coredump_iostate *iocookie, | |
58 | struct CORENAME(core) *chdr) | 58 | struct CORENAME(core) *chdr) | |
59 | { | 59 | { | |
60 | int error; | 60 | int error; | |
61 | struct CORENAME(coreseg) cseg; | 61 | struct CORENAME(coreseg) cseg; | |
62 | struct cpustate { | 62 | struct cpustate { | |
63 | struct CORENAME(trapframe) tf; | 63 | struct CORENAME(trapframe) tf; | |
64 | struct fpreg fpregs; | 64 | struct fpreg fpregs; | |
65 | } cpustate; | 65 | } cpustate; | |
66 | 66 | |||
67 | if (iocookie == NULL) { | 67 | if (iocookie == NULL) { | |
68 | CORE_SETMAGIC(*chdr, COREMAGIC, MID_MACHINE, 0); | 68 | CORE_SETMAGIC(*chdr, COREMAGIC, MID_MACHINE, 0); | |
69 | chdr->c_hdrsize = ALIGN(sizeof(*chdr)); | 69 | chdr->c_hdrsize = ALIGN(sizeof(*chdr)); | |
70 | chdr->c_seghdrsize = ALIGN(sizeof(cseg)); | 70 | chdr->c_seghdrsize = ALIGN(sizeof(cseg)); | |
71 | chdr->c_cpusize = sizeof(struct cpustate); | 71 | chdr->c_cpusize = sizeof(struct cpustate); | |
72 | chdr->c_nseg++; | 72 | chdr->c_nseg++; | |
73 | return 0; | 73 | return 0; | |
74 | } | 74 | } | |
75 | 75 | |||
76 | pcu_save_all(l); | 76 | pcu_save_all(l); | |
77 | 77 | |||
78 | // Can't use structure assignment if this is doing COMPAT_NETBSD32 | 78 | // Can't use structure assignment if this is doing COMPAT_NETBSD32 | |
79 | const struct trapframe * const tf = l->l_md.md_utf; | 79 | const struct trapframe * const tf = l->l_md.md_utf; | |
80 | for (size_t i = _X_RA; i <= _X_GP; i++) { | 80 | for (size_t i = _X_RA; i <= _X_GP; i++) { | |
81 | cpustate.tf.tf_reg[i] = tf->tf_reg[i]; | 81 | cpustate.tf.tf_reg[i] = tf->tf_reg[i]; | |
82 | } | 82 | } | |
83 | cpustate.tf.tf_pc = tf->tf_pc; | 83 | cpustate.tf.tf_pc = tf->tf_pc; | |
84 | cpustate.tf.tf_badaddr = tf->tf_badaddr; | 84 | cpustate.tf.tf_tval = tf->tf_tval; | |
85 | cpustate.tf.tf_cause = tf->tf_cause; | 85 | cpustate.tf.tf_cause = tf->tf_cause; | |
86 | cpustate.tf.tf_sr = tf->tf_sr; | 86 | cpustate.tf.tf_sr = tf->tf_sr; | |
87 | if (fpu_valid_p(l)) { | 87 | if (fpu_valid_p(l)) { | |
88 | cpustate.fpregs = ((struct pcb *)lwp_getpcb(l))->pcb_fpregs; | 88 | cpustate.fpregs = ((struct pcb *)lwp_getpcb(l))->pcb_fpregs; | |
89 | } else { | 89 | } else { | |
90 | memset(&cpustate.fpregs, 0, sizeof(cpustate.fpregs)); | 90 | memset(&cpustate.fpregs, 0, sizeof(cpustate.fpregs)); | |
91 | } | 91 | } | |
92 | CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_MACHINE, CORE_CPU); | 92 | CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_MACHINE, CORE_CPU); | |
93 | cseg.c_addr = 0; | 93 | cseg.c_addr = 0; | |
94 | cseg.c_size = chdr->c_cpusize; | 94 | cseg.c_size = chdr->c_cpusize; | |
95 | 95 | |||
96 | MODULE_HOOK_CALL(coredump_write_hook, (iocookie, UIO_SYSSPACE, &cseg, | 96 | MODULE_HOOK_CALL(coredump_write_hook, (iocookie, UIO_SYSSPACE, &cseg, | |
97 | chdr->c_seghdrsize), ENOSYS, error); | 97 | chdr->c_seghdrsize), ENOSYS, error); | |
98 | if (error) | 98 | if (error) | |
99 | return error; | 99 | return error; | |
100 | 100 | |||
101 | MODULE_HOOK_CALL(coredump_write_hook, (iocookie, UIO_SYSSPACE, | 101 | MODULE_HOOK_CALL(coredump_write_hook, (iocookie, UIO_SYSSPACE, | |
102 | &cpustate, chdr->c_cpusize), ENOSYS, error); | 102 | &cpustate, chdr->c_cpusize), ENOSYS, error); | |
103 | 103 | |||
104 | return error; | 104 | return error; | |
105 | } | 105 | } |
--- src/sys/arch/riscv/riscv/db_machdep.c 2020/03/14 16:12:16 1.4
+++ src/sys/arch/riscv/riscv/db_machdep.c 2020/11/04 06:56:56 1.5
@@ -1,246 +1,246 @@ | @@ -1,246 +1,246 @@ | |||
1 | /*- | 1 | /*- | |
2 | * Copyright (c) 2014 The NetBSD Foundation, Inc. | 2 | * Copyright (c) 2014 The NetBSD Foundation, Inc. | |
3 | * All rights reserved. | 3 | * All rights reserved. | |
4 | * | 4 | * | |
5 | * This code is derived from software contributed to The NetBSD Foundation | 5 | * This code is derived from software contributed to The NetBSD Foundation | |
6 | * by Matt Thomas of 3am Software Foundry. | 6 | * by Matt Thomas of 3am Software Foundry. | |
7 | * | 7 | * | |
8 | * Redistribution and use in source and binary forms, with or without | 8 | * Redistribution and use in source and binary forms, with or without | |
9 | * modification, are permitted provided that the following conditions | 9 | * modification, are permitted provided that the following conditions | |
10 | * are met: | 10 | * are met: | |
11 | * 1. Redistributions of source code must retain the above copyright | 11 | * 1. Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | 12 | * notice, this list of conditions and the following disclaimer. | |
13 | * 2. Redistributions in binary form must reproduce the above copyright | 13 | * 2. Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in the | 14 | * notice, this list of conditions and the following disclaimer in the | |
15 | * documentation and/or other materials provided with the distribution. | 15 | * documentation and/or other materials provided with the distribution. | |
16 | * | 16 | * | |
17 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | 17 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | |
18 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | 18 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | |
19 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | 19 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
20 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | 20 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | |
21 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | 21 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
22 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | 22 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
23 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | 23 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
24 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | 24 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
25 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | 25 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
26 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | 26 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
27 | * POSSIBILITY OF SUCH DAMAGE. | 27 | * POSSIBILITY OF SUCH DAMAGE. | |
28 | */ | 28 | */ | |
29 | 29 | |||
30 | #include <sys/cdefs.h> | 30 | #include <sys/cdefs.h> | |
31 | 31 | |||
32 | __RCSID("$NetBSD: db_machdep.c,v 1.4 2020/03/14 16:12:16 skrll Exp $"); | 32 | __RCSID("$NetBSD: db_machdep.c,v 1.5 2020/11/04 06:56:56 skrll Exp $"); | |
33 | 33 | |||
34 | #include <sys/param.h> | 34 | #include <sys/param.h> | |
35 | 35 | |||
36 | #include <riscv/insn.h> | 36 | #include <riscv/insn.h> | |
37 | #include <riscv/db_machdep.h> | 37 | #include <riscv/db_machdep.h> | |
38 | 38 | |||
39 | #include <ddb/db_access.h> | 39 | #include <ddb/db_access.h> | |
40 | #include <ddb/db_interface.h> | 40 | #include <ddb/db_interface.h> | |
41 | #include <ddb/db_extern.h> | 41 | #include <ddb/db_extern.h> | |
42 | #include <ddb/db_variables.h> | 42 | #include <ddb/db_variables.h> | |
43 | 43 | |||
44 | int db_active = 0; | 44 | int db_active = 0; | |
45 | 45 | |||
46 | static int db_rw_ddbreg(const struct db_variable *, db_expr_t *, int); | 46 | static int db_rw_ddbreg(const struct db_variable *, db_expr_t *, int); | |
47 | 47 | |||
48 | const struct db_variable db_regs[] = { | 48 | const struct db_variable db_regs[] = { | |
49 | { "ra", (void *)offsetof(struct trapframe, tf_ra), db_rw_ddbreg, NULL }, | 49 | { "ra", (void *)offsetof(struct trapframe, tf_ra), db_rw_ddbreg, NULL }, | |
50 | { "sp", (void *)offsetof(struct trapframe, tf_sp), db_rw_ddbreg, NULL }, | 50 | { "sp", (void *)offsetof(struct trapframe, tf_sp), db_rw_ddbreg, NULL }, | |
51 | { "gp", (void *)offsetof(struct trapframe, tf_gp), db_rw_ddbreg, NULL }, | 51 | { "gp", (void *)offsetof(struct trapframe, tf_gp), db_rw_ddbreg, NULL }, | |
52 | { "tp", (void *)offsetof(struct trapframe, tf_tp), db_rw_ddbreg, NULL }, | 52 | { "tp", (void *)offsetof(struct trapframe, tf_tp), db_rw_ddbreg, NULL }, | |
53 | { "s0", (void *)offsetof(struct trapframe, tf_s0), db_rw_ddbreg, NULL }, | 53 | { "s0", (void *)offsetof(struct trapframe, tf_s0), db_rw_ddbreg, NULL }, | |
54 | { "s1", (void *)offsetof(struct trapframe, tf_s1), db_rw_ddbreg, NULL }, | 54 | { "s1", (void *)offsetof(struct trapframe, tf_s1), db_rw_ddbreg, NULL }, | |
55 | { "s2", (void *)offsetof(struct trapframe, tf_s2), db_rw_ddbreg, NULL }, | 55 | { "s2", (void *)offsetof(struct trapframe, tf_s2), db_rw_ddbreg, NULL }, | |
56 | { "s3", (void *)offsetof(struct trapframe, tf_s3), db_rw_ddbreg, NULL }, | 56 | { "s3", (void *)offsetof(struct trapframe, tf_s3), db_rw_ddbreg, NULL }, | |
57 | { "s4", (void *)offsetof(struct trapframe, tf_s4), db_rw_ddbreg, NULL }, | 57 | { "s4", (void *)offsetof(struct trapframe, tf_s4), db_rw_ddbreg, NULL }, | |
58 | { "s5", (void *)offsetof(struct trapframe, tf_s5), db_rw_ddbreg, NULL }, | 58 | { "s5", (void *)offsetof(struct trapframe, tf_s5), db_rw_ddbreg, NULL }, | |
59 | { "s6", (void *)offsetof(struct trapframe, tf_s6), db_rw_ddbreg, NULL }, | 59 | { "s6", (void *)offsetof(struct trapframe, tf_s6), db_rw_ddbreg, NULL }, | |
60 | { "s7", (void *)offsetof(struct trapframe, tf_s7), db_rw_ddbreg, NULL }, | 60 | { "s7", (void *)offsetof(struct trapframe, tf_s7), db_rw_ddbreg, NULL }, | |
61 | { "s8", (void *)offsetof(struct trapframe, tf_s8), db_rw_ddbreg, NULL }, | 61 | { "s8", (void *)offsetof(struct trapframe, tf_s8), db_rw_ddbreg, NULL }, | |
62 | { "s9", (void *)offsetof(struct trapframe, tf_s9), db_rw_ddbreg, NULL }, | 62 | { "s9", (void *)offsetof(struct trapframe, tf_s9), db_rw_ddbreg, NULL }, | |
63 | { "s10", (void *)offsetof(struct trapframe, tf_s10), db_rw_ddbreg, NULL }, | 63 | { "s10", (void *)offsetof(struct trapframe, tf_s10), db_rw_ddbreg, NULL }, | |
64 | { "s11", (void *)offsetof(struct trapframe, tf_s11), db_rw_ddbreg, NULL }, | 64 | { "s11", (void *)offsetof(struct trapframe, tf_s11), db_rw_ddbreg, NULL }, | |
65 | { "a0", (void *)offsetof(struct trapframe, tf_a0), db_rw_ddbreg, NULL }, | 65 | { "a0", (void *)offsetof(struct trapframe, tf_a0), db_rw_ddbreg, NULL }, | |
66 | { "a1", (void *)offsetof(struct trapframe, tf_a1), db_rw_ddbreg, NULL }, | 66 | { "a1", (void *)offsetof(struct trapframe, tf_a1), db_rw_ddbreg, NULL }, | |
67 | { "a2", (void *)offsetof(struct trapframe, tf_a2), db_rw_ddbreg, NULL }, | 67 | { "a2", (void *)offsetof(struct trapframe, tf_a2), db_rw_ddbreg, NULL }, | |
68 | { "a3", (void *)offsetof(struct trapframe, tf_a3), db_rw_ddbreg, NULL }, | 68 | { "a3", (void *)offsetof(struct trapframe, tf_a3), db_rw_ddbreg, NULL }, | |
69 | { "a4", (void *)offsetof(struct trapframe, tf_a4), db_rw_ddbreg, NULL }, | 69 | { "a4", (void *)offsetof(struct trapframe, tf_a4), db_rw_ddbreg, NULL }, | |
70 | { "a5", (void *)offsetof(struct trapframe, tf_a5), db_rw_ddbreg, NULL }, | 70 | { "a5", (void *)offsetof(struct trapframe, tf_a5), db_rw_ddbreg, NULL }, | |
71 | { "a6", (void *)offsetof(struct trapframe, tf_a6), db_rw_ddbreg, NULL }, | 71 | { "a6", (void *)offsetof(struct trapframe, tf_a6), db_rw_ddbreg, NULL }, | |
72 | { "a7", (void *)offsetof(struct trapframe, tf_a7), db_rw_ddbreg, NULL }, | 72 | { "a7", (void *)offsetof(struct trapframe, tf_a7), db_rw_ddbreg, NULL }, | |
73 | { "t0", (void *)offsetof(struct trapframe, tf_t0), db_rw_ddbreg, NULL }, | 73 | { "t0", (void *)offsetof(struct trapframe, tf_t0), db_rw_ddbreg, NULL }, | |
74 | { "t1", (void *)offsetof(struct trapframe, tf_t1), db_rw_ddbreg, NULL }, | 74 | { "t1", (void *)offsetof(struct trapframe, tf_t1), db_rw_ddbreg, NULL }, | |
75 | { "t2", (void *)offsetof(struct trapframe, tf_t2), db_rw_ddbreg, NULL }, | 75 | { "t2", (void *)offsetof(struct trapframe, tf_t2), db_rw_ddbreg, NULL }, | |
76 | { "t3", (void *)offsetof(struct trapframe, tf_t3), db_rw_ddbreg, NULL }, | 76 | { "t3", (void *)offsetof(struct trapframe, tf_t3), db_rw_ddbreg, NULL }, | |
77 | { "t4", (void *)offsetof(struct trapframe, tf_t4), db_rw_ddbreg, NULL }, | 77 | { "t4", (void *)offsetof(struct trapframe, tf_t4), db_rw_ddbreg, NULL }, | |
78 | { "t5", (void *)offsetof(struct trapframe, tf_t5), db_rw_ddbreg, NULL }, | 78 | { "t5", (void *)offsetof(struct trapframe, tf_t5), db_rw_ddbreg, NULL }, | |
79 | { "t6", (void *)offsetof(struct trapframe, tf_t6), db_rw_ddbreg, NULL }, | 79 | { "t6", (void *)offsetof(struct trapframe, tf_t6), db_rw_ddbreg, NULL }, | |
80 | { "pc", (void *)offsetof(struct trapframe, tf_pc), db_rw_ddbreg, NULL }, | 80 | { "pc", (void *)offsetof(struct trapframe, tf_pc), db_rw_ddbreg, NULL }, | |
81 | { "status", (void *)offsetof(struct trapframe, tf_sr), db_rw_ddbreg, "i" }, | 81 | { "status", (void *)offsetof(struct trapframe, tf_sr), db_rw_ddbreg, "i" }, | |
82 | { "cause", (void *)offsetof(struct trapframe, tf_cause), db_rw_ddbreg, "i" }, | 82 | { "cause", (void *)offsetof(struct trapframe, tf_cause), db_rw_ddbreg, "i" }, | |
83 | { "badaddr", (void *)offsetof(struct trapframe, tf_badaddr), db_rw_ddbreg, NULL }, | 83 | { "tval", (void *)offsetof(struct trapframe, tf_tval), db_rw_ddbreg, NULL }, | |
84 | }; | 84 | }; | |
85 | const struct db_variable * const db_eregs = db_regs + __arraycount(db_regs); | 85 | const struct db_variable * const db_eregs = db_regs + __arraycount(db_regs); | |
86 | 86 | |||
87 | int | 87 | int | |
88 | db_rw_ddbreg(const struct db_variable *vp, db_expr_t *valp, int rw) | 88 | db_rw_ddbreg(const struct db_variable *vp, db_expr_t *valp, int rw) | |
89 | { | 89 | { | |
90 | struct trapframe * const tf = curcpu()->ci_ddb_regs; | 90 | struct trapframe * const tf = curcpu()->ci_ddb_regs; | |
91 | KASSERT(db_regs <= vp && vp < db_regs + __arraycount(db_regs)); | 91 | KASSERT(db_regs <= vp && vp < db_regs + __arraycount(db_regs)); | |
92 | const uintptr_t addr = (uintptr_t)tf + (uintptr_t)vp->valuep; | 92 | const uintptr_t addr = (uintptr_t)tf + (uintptr_t)vp->valuep; | |
93 | if (vp->modif != NULL && vp->modif[0] == 'i') { | 93 | if (vp->modif != NULL && vp->modif[0] == 'i') { | |
94 | if (rw == DB_VAR_GET) { | 94 | if (rw == DB_VAR_GET) { | |
95 | *valp = *(const uint32_t *)addr; | 95 | *valp = *(const uint32_t *)addr; | |
96 | } else { | 96 | } else { | |
97 | *(uint32_t *)addr = *valp; | 97 | *(uint32_t *)addr = *valp; | |
98 | } | 98 | } | |
99 | } else { | 99 | } else { | |
100 | if (rw == DB_VAR_GET) { | 100 | if (rw == DB_VAR_GET) { | |
101 | *valp = *(const register_t *)addr; | 101 | *valp = *(const register_t *)addr; | |
102 | } else { | 102 | } else { | |
103 | *(register_t *)addr = *valp; | 103 | *(register_t *)addr = *valp; | |
104 | } | 104 | } | |
105 | } | 105 | } | |
106 | return 0; | 106 | return 0; | |
107 | } | 107 | } | |
108 | 108 | |||
109 | // These are for the software implementation of single-stepping. | 109 | // These are for the software implementation of single-stepping. | |
110 | // | 110 | // | |
111 | // returns true is the instruction might branch | 111 | // returns true is the instruction might branch | |
112 | bool | 112 | bool | |
113 | inst_branch(uint32_t insn) | 113 | inst_branch(uint32_t insn) | |
114 | { | 114 | { | |
115 | return OPCODE_P(insn, BRANCH); | 115 | return OPCODE_P(insn, BRANCH); | |
116 | } | 116 | } | |
117 | 117 | |||
118 | // returns true is the instruction might branch | 118 | // returns true is the instruction might branch | |
119 | bool | 119 | bool | |
120 | inst_call(uint32_t insn) | 120 | inst_call(uint32_t insn) | |
121 | { | 121 | { | |
122 | const union riscv_insn ri = { .val = insn }; | 122 | const union riscv_insn ri = { .val = insn }; | |
123 | return (OPCODE_P(insn, JAL) && ri.type_uj.uj_rd == 1) | 123 | return (OPCODE_P(insn, JAL) && ri.type_uj.uj_rd == 1) | |
124 | || (OPCODE_P(insn, JALR) && ri.type_i.i_rd == 1); | 124 | || (OPCODE_P(insn, JALR) && ri.type_i.i_rd == 1); | |
125 | } | 125 | } | |
126 | 126 | |||
127 | // return true if the instructon is an uncondition branch/jump. | 127 | // return true if the instructon is an uncondition branch/jump. | |
128 | bool | 128 | bool | |
129 | inst_unconditional_flow_transfer(uint32_t insn) | 129 | inst_unconditional_flow_transfer(uint32_t insn) | |
130 | { | 130 | { | |
131 | // we should check for beq xN,xN but why use that instead of jal x0,... | 131 | // we should check for beq xN,xN but why use that instead of jal x0,... | |
132 | return OPCODE_P(insn, JAL) || OPCODE_P(insn, JALR); | 132 | return OPCODE_P(insn, JAL) || OPCODE_P(insn, JALR); | |
133 | } | 133 | } | |
134 | 134 | |||
135 | bool | 135 | bool | |
136 | inst_return(uint32_t insn) | 136 | inst_return(uint32_t insn) | |
137 | { | 137 | { | |
138 | const union riscv_insn ri = { .val = insn }; | 138 | const union riscv_insn ri = { .val = insn }; | |
139 | return OPCODE_P(insn, JALR) && ri.type_i.i_rs1 == 1; | 139 | return OPCODE_P(insn, JALR) && ri.type_i.i_rs1 == 1; | |
140 | } | 140 | } | |
141 | 141 | |||
142 | bool | 142 | bool | |
143 | inst_load(uint32_t insn) | 143 | inst_load(uint32_t insn) | |
144 | { | 144 | { | |
145 | return OPCODE_P(insn, LOAD) || OPCODE_P(insn, LOADFP); | 145 | return OPCODE_P(insn, LOAD) || OPCODE_P(insn, LOADFP); | |
146 | } | 146 | } | |
147 | 147 | |||
148 | bool | 148 | bool | |
149 | inst_store(uint32_t insn) | 149 | inst_store(uint32_t insn) | |
150 | { | 150 | { | |
151 | return OPCODE_P(insn, STORE) || OPCODE_P(insn, STOREFP); | 151 | return OPCODE_P(insn, STORE) || OPCODE_P(insn, STOREFP); | |
152 | } | 152 | } | |
153 | 153 | |||
154 | static inline register_t | 154 | static inline register_t | |
155 | get_reg_value(const db_regs_t *tf, u_int regno) | 155 | get_reg_value(const db_regs_t *tf, u_int regno) | |
156 | { | 156 | { | |
157 | return (regno == 0 ? 0 : tf->tf_reg[regno - 1]); | 157 | return (regno == 0 ? 0 : tf->tf_reg[regno - 1]); | |
158 | } | 158 | } | |
159 | 159 | |||
160 | db_addr_t | 160 | db_addr_t | |
161 | branch_taken(uint32_t insn, db_addr_t pc, db_regs_t *tf) | 161 | branch_taken(uint32_t insn, db_addr_t pc, db_regs_t *tf) | |
162 | { | 162 | { | |
163 | const union riscv_insn i = { .val = insn }; | 163 | const union riscv_insn i = { .val = insn }; | |
164 | intptr_t displacement; | 164 | intptr_t displacement; | |
165 | 165 | |||
166 | if (OPCODE_P(insn, JALR)) { | 166 | if (OPCODE_P(insn, JALR)) { | |
167 | return i.type_i.i_imm11to0 + get_reg_value(tf, i.type_i.i_rs1); | 167 | return i.type_i.i_imm11to0 + get_reg_value(tf, i.type_i.i_rs1); | |
168 | } | 168 | } | |
169 | if (OPCODE_P(insn, JAL)) { | 169 | if (OPCODE_P(insn, JAL)) { | |
170 | displacement = i.type_uj.uj_imm20 << 20; | 170 | displacement = i.type_uj.uj_imm20 << 20; | |
171 | displacement |= i.type_uj.uj_imm19to12 << 12; | 171 | displacement |= i.type_uj.uj_imm19to12 << 12; | |
172 | displacement |= i.type_uj.uj_imm11 << 11; | 172 | displacement |= i.type_uj.uj_imm11 << 11; | |
173 | displacement |= i.type_uj.uj_imm10to1 << 1; | 173 | displacement |= i.type_uj.uj_imm10to1 << 1; | |
174 | } else { | 174 | } else { | |
175 | KASSERT(OPCODE_P(insn, BRANCH)); | 175 | KASSERT(OPCODE_P(insn, BRANCH)); | |
176 | register_t rs1 = get_reg_value(tf, i.type_sb.sb_rs1); | 176 | register_t rs1 = get_reg_value(tf, i.type_sb.sb_rs1); | |
177 | register_t rs2 = get_reg_value(tf, i.type_sb.sb_rs2); | 177 | register_t rs2 = get_reg_value(tf, i.type_sb.sb_rs2); | |
178 | bool branch_p; // = false; | 178 | bool branch_p; // = false; | |
179 | switch (i.type_sb.sb_funct3 & 0b110U) { | 179 | switch (i.type_sb.sb_funct3 & 0b110U) { | |
180 | case 0b000U: | 180 | case 0b000U: | |
181 | branch_p = (rs1 == rs2); | 181 | branch_p = (rs1 == rs2); | |
182 | break; | 182 | break; | |
183 | case 0b010U: | 183 | case 0b010U: | |
184 | branch_p = ((rs1 & (1 << (i.type_sb.sb_rs2))) != 0); | 184 | branch_p = ((rs1 & (1 << (i.type_sb.sb_rs2))) != 0); | |
185 | break; | 185 | break; | |
186 | case 0b100U: | 186 | case 0b100U: | |
187 | branch_p = (rs1 < rs2); | 187 | branch_p = (rs1 < rs2); | |
188 | break; | 188 | break; | |
189 | default: // stupid gcc | 189 | default: // stupid gcc | |
190 | case 0b110U: | 190 | case 0b110U: | |
191 | branch_p = ((uregister_t)rs1 < (uregister_t)rs2); | 191 | branch_p = ((uregister_t)rs1 < (uregister_t)rs2); | |
192 | break; | 192 | break; | |
193 | } | 193 | } | |
194 | 194 | |||
195 | if (i.type_sb.sb_funct3 & 1) | 195 | if (i.type_sb.sb_funct3 & 1) | |
196 | branch_p = !branch_p; | 196 | branch_p = !branch_p; | |
197 | 197 | |||
198 | if (!branch_p) { | 198 | if (!branch_p) { | |
199 | displacement = 4; | 199 | displacement = 4; | |
200 | } else { | 200 | } else { | |
201 | displacement = i.type_sb.sb_imm12 << 12; | 201 | displacement = i.type_sb.sb_imm12 << 12; | |
202 | displacement |= i.type_sb.sb_imm11 << 11; | 202 | displacement |= i.type_sb.sb_imm11 << 11; | |
203 | displacement |= i.type_sb.sb_imm10to5 << 5; | 203 | displacement |= i.type_sb.sb_imm10to5 << 5; | |
204 | displacement |= i.type_sb.sb_imm4to1 << 1; | 204 | displacement |= i.type_sb.sb_imm4to1 << 1; | |
205 | } | 205 | } | |
206 | } | 206 | } | |
207 | 207 | |||
208 | return pc + displacement; | 208 | return pc + displacement; | |
209 | } | 209 | } | |
210 | 210 | |||
211 | db_addr_t | 211 | db_addr_t | |
212 | next_instr_address(db_addr_t pc, bool bdslot_p) | 212 | next_instr_address(db_addr_t pc, bool bdslot_p) | |
213 | { | 213 | { | |
214 | return pc + (bdslot_p ? 0 : 4); | 214 | return pc + (bdslot_p ? 0 : 4); | |
215 | } | 215 | } | |
216 | 216 | |||
217 | void | 217 | void | |
218 | db_read_bytes(db_addr_t addr, size_t len, char *data) | 218 | db_read_bytes(db_addr_t addr, size_t len, char *data) | |
219 | { | 219 | { | |
220 | const char *src = (char *)addr; | 220 | const char *src = (char *)addr; | |
221 | 221 | |||
222 | while (len--) { | 222 | while (len--) { | |
223 | *data++ = *src++; | 223 | *data++ = *src++; | |
224 | } | 224 | } | |
225 | } | 225 | } | |
226 | 226 | |||
227 | /* | 227 | /* | |
228 | * Write bytes to kernel address space for debugger. | 228 | * Write bytes to kernel address space for debugger. | |
229 | */ | 229 | */ | |
230 | void | 230 | void | |
231 | db_write_bytes(vaddr_t addr, size_t len, const char *data) | 231 | db_write_bytes(vaddr_t addr, size_t len, const char *data) | |
232 | { | 232 | { | |
233 | if (len == 8) { | 233 | if (len == 8) { | |
234 | *(uint64_t *)addr = *(const uint64_t *) data; | 234 | *(uint64_t *)addr = *(const uint64_t *) data; | |
235 | } else if (len == 4) { | 235 | } else if (len == 4) { | |
236 | *(uint32_t *)addr = *(const uint32_t *) data; | 236 | *(uint32_t *)addr = *(const uint32_t *) data; | |
237 | } else if (len == 2) { | 237 | } else if (len == 2) { | |
238 | *(uint16_t *)addr = *(const uint16_t *) data; | 238 | *(uint16_t *)addr = *(const uint16_t *) data; | |
239 | } else { | 239 | } else { | |
240 | *(uint8_t *)addr = *(const uint8_t *) data; | 240 | *(uint8_t *)addr = *(const uint8_t *) data; | |
241 | } | 241 | } | |
242 | __asm("fence rw,rw; fence.i"); | 242 | __asm("fence rw,rw; fence.i"); | |
243 | } | 243 | } | |
244 | 244 | |||
245 | 245 | |||
246 | 246 |
--- src/sys/arch/riscv/riscv/genassym.cf 2020/01/08 17:38:42 1.7
+++ src/sys/arch/riscv/riscv/genassym.cf 2020/11/04 06:56:56 1.8
@@ -1,198 +1,199 @@ | @@ -1,198 +1,199 @@ | |||
1 | # $NetBSD: genassym.cf,v 1.7 2020/01/08 17:38:42 ad Exp $ | 1 | # $NetBSD: genassym.cf,v 1.8 2020/11/04 06:56:56 skrll Exp $ | |
2 | #- | 2 | #- | |
3 | # Copyright (c) 2014 The NetBSD Foundation, Inc. | 3 | # Copyright (c) 2014 The NetBSD Foundation, Inc. | |
4 | # All rights reserved. | 4 | # All rights reserved. | |
5 | # | 5 | # | |
6 | # This code is derived from software contributed to The NetBSD Foundation | 6 | # This code is derived from software contributed to The NetBSD Foundation | |
7 | # by Matt Thomas of 3am Software Foundry. | 7 | # by Matt Thomas of 3am Software Foundry. | |
8 | # | 8 | # | |
9 | # Redistribution and use in source and binary forms, with or without | 9 | # Redistribution and use in source and binary forms, with or without | |
10 | # modification, are permitted provided that the following conditions | 10 | # modification, are permitted provided that the following conditions | |
11 | # are met: | 11 | # are met: | |
12 | # 1. Redistributions of source code must retain the above copyright | 12 | # 1. Redistributions of source code must retain the above copyright | |
13 | # notice, this list of conditions and the following disclaimer. | 13 | # notice, this list of conditions and the following disclaimer. | |
14 | # 2. Redistributions in binary form must reproduce the above copyright | 14 | # 2. Redistributions in binary form must reproduce the above copyright | |
15 | # notice, this list of conditions and the following disclaimer in the | 15 | # notice, this list of conditions and the following disclaimer in the | |
16 | # documentation and/or other materials provided with the distribution. | 16 | # documentation and/or other materials provided with the distribution. | |
17 | # | 17 | # | |
18 | # THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | 18 | # THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | |
19 | # ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | 19 | # ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | |
20 | # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | 20 | # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
21 | # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | 21 | # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | |
22 | # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | 22 | # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
23 | # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | 23 | # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
24 | # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | 24 | # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
25 | # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | 25 | # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
26 | # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | 26 | # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
27 | # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | 27 | # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
28 | # POSSIBILITY OF SUCH DAMAGE. | 28 | # POSSIBILITY OF SUCH DAMAGE. | |
29 | #+ | 29 | #+ | |
30 | 30 | |||
31 | quote #define __MUTEX_PRIVATE | 31 | quote #define __MUTEX_PRIVATE | |
32 | quote #define __RWLOCK_PRIVATE | 32 | quote #define __RWLOCK_PRIVATE | |
33 | quote #define __INTR_PRIVATE | 33 | quote #define __INTR_PRIVATE | |
34 | quote #define __PMAP_PRIVATE | 34 | quote #define __PMAP_PRIVATE | |
35 | 35 | |||
36 | include <sys/param.h> | 36 | include <sys/param.h> | |
37 | include <sys/bitops.h> | 37 | include <sys/bitops.h> | |
38 | include <sys/cpu.h> | 38 | include <sys/cpu.h> | |
39 | include <sys/intr.h> | 39 | include <sys/intr.h> | |
40 | include <sys/lwp.h> | 40 | include <sys/lwp.h> | |
41 | include <sys/mutex.h> | 41 | include <sys/mutex.h> | |
42 | include <sys/proc.h> | 42 | include <sys/proc.h> | |
43 | include <sys/rwlock.h> | 43 | include <sys/rwlock.h> | |
44 | 44 | |||
45 | include <uvm/uvm_extern.h> | 45 | include <uvm/uvm_extern.h> | |
46 | 46 | |||
47 | include <riscv/locore.h> | 47 | include <riscv/locore.h> | |
48 | include <riscv/sysreg.h> | 48 | include <riscv/sysreg.h> | |
49 | 49 | |||
50 | define SR_IM SR_IM | 50 | #define SR_IM SR_IM | |
51 | define SR_IM_LSHIFT __SIZEOF_LONG__ * 8 - (ilog2(SR_IM) + 1) | 51 | #define SR_IM_LSHIFT __SIZEOF_LONG__ * 8 - (ilog2(SR_IM) + 1) | |
52 | define SR_IM_RSHIFT ilog2(__LOWEST_SET_BIT(SR_IM)) | 52 | #define SR_IM_RSHIFT ilog2(__LOWEST_SET_BIT(SR_IM)) | |
53 | define SR_VM SR_VM | 53 | #define SR_VM SR_VM | |
54 | define SR_U64 SR_U64 | 54 | #define SR_U64 SR_U64 | |
55 | define SR_S64 SR_S64 | 55 | #define SR_S64 SR_S64 | |
56 | define SR_EF SR_EF | 56 | #define SR_EF SR_EF | |
57 | define SR_PEI SR_PEI | 57 | #define SR_PEI SR_PEI | |
58 | define SR_EI SR_EI | 58 | #define SR_EI SR_EI | |
59 | define SR_PS SR_PS | 59 | #define SR_PS SR_PS | |
60 | define SR_S SR_S | 60 | #define SR_S SR_S | |
61 | define SR_SIE SR_SIE | |||
61 | 62 | |||
62 | define CAUSE_SYSCALL CAUSE_SYSCALL | 63 | define CAUSE_SYSCALL CAUSE_SYSCALL | |
63 | 64 | |||
64 | define IPL_HIGH IPL_HIGH | 65 | define IPL_HIGH IPL_HIGH | |
65 | define IPL_DDB IPL_DDB | 66 | define IPL_DDB IPL_DDB | |
66 | define IPL_SCHED IPL_SCHED | 67 | define IPL_SCHED IPL_SCHED | |
67 | define IPL_VM IPL_VM | 68 | define IPL_VM IPL_VM | |
68 | define IPL_SOFTSERIAL IPL_SOFTSERIAL | 69 | define IPL_SOFTSERIAL IPL_SOFTSERIAL | |
69 | define IPL_SOFTNET IPL_SOFTNET | 70 | define IPL_SOFTNET IPL_SOFTNET | |
70 | define IPL_SOFTBIO IPL_SOFTBIO | 71 | define IPL_SOFTBIO IPL_SOFTBIO | |
71 | define IPL_SOFTCLOCK IPL_SOFTCLOCK | 72 | define IPL_SOFTCLOCK IPL_SOFTCLOCK | |
72 | define IPL_NONE IPL_NONE | 73 | define IPL_NONE IPL_NONE | |
73 | 74 | |||
74 | #define CPU_MAXNUM CPU_MAXNUM | 75 | #define CPU_MAXNUM CPU_MAXNUM | |
75 | 76 | |||
76 | define TF_LEN roundup(sizeof(struct trapframe), STACK_ALIGNBYTES+1) | 77 | define TF_LEN roundup(sizeof(struct trapframe), STACK_ALIGNBYTES+1) | |
77 | define TF_RA offsetof(struct trapframe, tf_reg[_X_RA]) | 78 | define TF_RA offsetof(struct trapframe, tf_reg[_X_RA]) | |
78 | define TF_S0 offsetof(struct trapframe, tf_reg[_X_S0]) | 79 | define TF_S0 offsetof(struct trapframe, tf_reg[_X_S0]) | |
79 | define TF_S1 offsetof(struct trapframe, tf_reg[_X_S1]) | 80 | define TF_S1 offsetof(struct trapframe, tf_reg[_X_S1]) | |
80 | define TF_S2 offsetof(struct trapframe, tf_reg[_X_S2]) | 81 | define TF_S2 offsetof(struct trapframe, tf_reg[_X_S2]) | |
81 | define TF_S3 offsetof(struct trapframe, tf_reg[_X_S3]) | 82 | define TF_S3 offsetof(struct trapframe, tf_reg[_X_S3]) | |
82 | define TF_S4 offsetof(struct trapframe, tf_reg[_X_S4]) | 83 | define TF_S4 offsetof(struct trapframe, tf_reg[_X_S4]) | |
83 | define TF_S5 offsetof(struct trapframe, tf_reg[_X_S5]) | 84 | define TF_S5 offsetof(struct trapframe, tf_reg[_X_S5]) | |
84 | define TF_S6 offsetof(struct trapframe, tf_reg[_X_S6]) | 85 | define TF_S6 offsetof(struct trapframe, tf_reg[_X_S6]) | |
85 | define TF_S7 offsetof(struct trapframe, tf_reg[_X_S7]) | 86 | define TF_S7 offsetof(struct trapframe, tf_reg[_X_S7]) | |
86 | define TF_S8 offsetof(struct trapframe, tf_reg[_X_S8]) | 87 | define TF_S8 offsetof(struct trapframe, tf_reg[_X_S8]) | |
87 | define TF_S9 offsetof(struct trapframe, tf_reg[_X_S9]) | 88 | define TF_S9 offsetof(struct trapframe, tf_reg[_X_S9]) | |
88 | define TF_S10 offsetof(struct trapframe, tf_reg[_X_S10]) | 89 | define TF_S10 offsetof(struct trapframe, tf_reg[_X_S10]) | |
89 | define TF_S11 offsetof(struct trapframe, tf_reg[_X_S11]) | 90 | define TF_S11 offsetof(struct trapframe, tf_reg[_X_S11]) | |
90 | define TF_SP offsetof(struct trapframe, tf_reg[_X_SP]) | 91 | define TF_SP offsetof(struct trapframe, tf_reg[_X_SP]) | |
91 | define TF_TP offsetof(struct trapframe, tf_reg[_X_TP]) | 92 | define TF_TP offsetof(struct trapframe, tf_reg[_X_TP]) | |
92 | define TF_A0 offsetof(struct trapframe, tf_reg[_X_A0]) | 93 | define TF_A0 offsetof(struct trapframe, tf_reg[_X_A0]) | |
93 | define TF_A1 offsetof(struct trapframe, tf_reg[_X_A1]) | 94 | define TF_A1 offsetof(struct trapframe, tf_reg[_X_A1]) | |
94 | define TF_A2 offsetof(struct trapframe, tf_reg[_X_A2]) | 95 | define TF_A2 offsetof(struct trapframe, tf_reg[_X_A2]) | |
95 | define TF_A3 offsetof(struct trapframe, tf_reg[_X_A3]) | 96 | define TF_A3 offsetof(struct trapframe, tf_reg[_X_A3]) | |
96 | define TF_A4 offsetof(struct trapframe, tf_reg[_X_A4]) | 97 | define TF_A4 offsetof(struct trapframe, tf_reg[_X_A4]) | |
97 | define TF_A5 offsetof(struct trapframe, tf_reg[_X_A5]) | 98 | define TF_A5 offsetof(struct trapframe, tf_reg[_X_A5]) | |
98 | define TF_A6 offsetof(struct trapframe, tf_reg[_X_A6]) | 99 | define TF_A6 offsetof(struct trapframe, tf_reg[_X_A6]) | |
99 | define TF_A7 offsetof(struct trapframe, tf_reg[_X_A7]) | 100 | define TF_A7 offsetof(struct trapframe, tf_reg[_X_A7]) | |
100 | define TF_T0 offsetof(struct trapframe, tf_reg[_X_T0]) | 101 | define TF_T0 offsetof(struct trapframe, tf_reg[_X_T0]) | |
101 | define TF_T1 offsetof(struct trapframe, tf_reg[_X_T1]) | 102 | define TF_T1 offsetof(struct trapframe, tf_reg[_X_T1]) | |
102 | define TF_T2 offsetof(struct trapframe, tf_reg[_X_T2]) | 103 | define TF_T2 offsetof(struct trapframe, tf_reg[_X_T2]) | |
103 | define TF_T3 offsetof(struct trapframe, tf_reg[_X_T3]) | 104 | define TF_T3 offsetof(struct trapframe, tf_reg[_X_T3]) | |
104 | define TF_T4 offsetof(struct trapframe, tf_reg[_X_T4]) | 105 | define TF_T4 offsetof(struct trapframe, tf_reg[_X_T4]) | |
105 | define TF_T5 offsetof(struct trapframe, tf_reg[_X_T5]) | 106 | define TF_T5 offsetof(struct trapframe, tf_reg[_X_T5]) | |
106 | define TF_T6 offsetof(struct trapframe, tf_reg[_X_T6]) | 107 | define TF_T6 offsetof(struct trapframe, tf_reg[_X_T6]) | |
107 | define TF_GP offsetof(struct trapframe, tf_reg[_X_GP]) | 108 | define TF_GP offsetof(struct trapframe, tf_reg[_X_GP]) | |
108 | define TF_PC offsetof(struct trapframe, tf_pc) | 109 | define TF_PC offsetof(struct trapframe, tf_pc) | |
109 | define TF_CAUSE offsetof(struct trapframe, tf_cause) | 110 | define TF_SCAUSE offsetof(struct trapframe, tf_scause) | |
110 | define TF_BADADDR offsetof(struct trapframe, tf_badaddr) | 111 | define TF_STVAL offsetof(struct trapframe, tf_stval) | |
111 | define TF_SR offsetof(struct trapframe, tf_sr) | 112 | define TF_SR offsetof(struct trapframe, tf_sr) | |
112 | 113 | |||
113 | define L_CPU offsetof(struct lwp, l_cpu) | 114 | define L_CPU offsetof(struct lwp, l_cpu) | |
114 | define L_MD_ASTPENDING offsetof(struct lwp, l_md.md_astpending) | 115 | define L_MD_ASTPENDING offsetof(struct lwp, l_md.md_astpending) | |
115 | define L_MD_ONFAULT offsetof(struct lwp, l_md.md_onfault) | 116 | define L_MD_ONFAULT offsetof(struct lwp, l_md.md_onfault) | |
116 | define L_MD_USP offsetof(struct lwp, l_md.md_usp) | 117 | define L_MD_USP offsetof(struct lwp, l_md.md_usp) | |
117 | define L_MD_UTF offsetof(struct lwp, l_md.md_utf) | 118 | define L_MD_UTF offsetof(struct lwp, l_md.md_utf) | |
118 | define L_MD_KTF offsetof(struct lwp, l_md.md_ktf) | 119 | define L_MD_KTF offsetof(struct lwp, l_md.md_ktf) | |
119 | define L_PCB offsetof(struct lwp, l_addr) | 120 | define L_PCB offsetof(struct lwp, l_addr) | |
120 | define L_PROC offsetof(struct lwp, l_proc) | 121 | define L_PROC offsetof(struct lwp, l_proc) | |
121 | 122 | |||
122 | define P_MD_SYSCALL offsetof(struct proc, p_md.md_syscall) | 123 | define P_MD_SYSCALL offsetof(struct proc, p_md.md_syscall) | |
123 | 124 | |||
124 | define CI_SIZE sizeof(struct cpu_info) | 125 | define CI_SIZE sizeof(struct cpu_info) | |
125 | define CI_CPL offsetof(struct cpu_info, ci_cpl) | 126 | define CI_CPL offsetof(struct cpu_info, ci_cpl) | |
126 | define CI_CURLWP offsetof(struct cpu_info, ci_curlwp) | 127 | define CI_CURLWP offsetof(struct cpu_info, ci_curlwp) | |
127 | define CI_INTR_DEPTH offsetof(struct cpu_info, ci_intr_depth) | 128 | define CI_INTR_DEPTH offsetof(struct cpu_info, ci_intr_depth) | |
128 | define CI_IDLELWP offsetof(struct cpu_info, ci_data.cpu_idlelwp) | 129 | define CI_IDLELWP offsetof(struct cpu_info, ci_data.cpu_idlelwp) | |
129 | define CI_MTX_COUNT offsetof(struct cpu_info, ci_mtx_count) | 130 | define CI_MTX_COUNT offsetof(struct cpu_info, ci_mtx_count) | |
130 | define CI_MTX_OLDSPL offsetof(struct cpu_info, ci_mtx_oldspl) | 131 | define CI_MTX_OLDSPL offsetof(struct cpu_info, ci_mtx_oldspl) | |
131 | define CI_SOFTINTS offsetof(struct cpu_info, ci_softints) | 132 | define CI_SOFTINTS offsetof(struct cpu_info, ci_softints) | |
132 | 133 | |||
133 | define FB_A0 offsetof(struct faultbuf, fb_reg[FB_A0]) | 134 | define FB_A0 offsetof(struct faultbuf, fb_reg[FB_A0]) | |
134 | define FB_RA offsetof(struct faultbuf, fb_reg[FB_RA]) | 135 | define FB_RA offsetof(struct faultbuf, fb_reg[FB_RA]) | |
135 | define FB_S0 offsetof(struct faultbuf, fb_reg[FB_S0]) | 136 | define FB_S0 offsetof(struct faultbuf, fb_reg[FB_S0]) | |
136 | define FB_S1 offsetof(struct faultbuf, fb_reg[FB_S1]) | 137 | define FB_S1 offsetof(struct faultbuf, fb_reg[FB_S1]) | |
137 | define FB_S2 offsetof(struct faultbuf, fb_reg[FB_S2]) | 138 | define FB_S2 offsetof(struct faultbuf, fb_reg[FB_S2]) | |
138 | define FB_S3 offsetof(struct faultbuf, fb_reg[FB_S3]) | 139 | define FB_S3 offsetof(struct faultbuf, fb_reg[FB_S3]) | |
139 | define FB_S4 offsetof(struct faultbuf, fb_reg[FB_S4]) | 140 | define FB_S4 offsetof(struct faultbuf, fb_reg[FB_S4]) | |
140 | define FB_S5 offsetof(struct faultbuf, fb_reg[FB_S5]) | 141 | define FB_S5 offsetof(struct faultbuf, fb_reg[FB_S5]) | |
141 | define FB_S6 offsetof(struct faultbuf, fb_reg[FB_S6]) | 142 | define FB_S6 offsetof(struct faultbuf, fb_reg[FB_S6]) | |
142 | define FB_S7 offsetof(struct faultbuf, fb_reg[FB_S7]) | 143 | define FB_S7 offsetof(struct faultbuf, fb_reg[FB_S7]) | |
143 | define FB_S8 offsetof(struct faultbuf, fb_reg[FB_S8]) | 144 | define FB_S8 offsetof(struct faultbuf, fb_reg[FB_S8]) | |
144 | define FB_S9 offsetof(struct faultbuf, fb_reg[FB_S9]) | 145 | define FB_S9 offsetof(struct faultbuf, fb_reg[FB_S9]) | |
145 | define FB_S10 offsetof(struct faultbuf, fb_reg[FB_S10]) | 146 | define FB_S10 offsetof(struct faultbuf, fb_reg[FB_S10]) | |
146 | define FB_S11 offsetof(struct faultbuf, fb_reg[FB_S11]) | 147 | define FB_S11 offsetof(struct faultbuf, fb_reg[FB_S11]) | |
147 | define FB_SP offsetof(struct faultbuf, fb_reg[FB_SP]) | 148 | define FB_SP offsetof(struct faultbuf, fb_reg[FB_SP]) | |
148 | define FB_SR offsetof(struct faultbuf, fb_sr) | 149 | define FB_SR offsetof(struct faultbuf, fb_sr) | |
149 | 150 | |||
150 | define PAGE_SIZE PAGE_SIZE | 151 | define PAGE_SIZE PAGE_SIZE | |
151 | define PAGE_MASK PAGE_MASK | 152 | define PAGE_MASK PAGE_MASK | |
152 | define PAGE_SHIFT PAGE_SHIFT | 153 | define PAGE_SHIFT PAGE_SHIFT | |
153 | define USRSTACK USRSTACK | 154 | define USRSTACK USRSTACK | |
154 | 155 | |||
155 | ifdef __HAVE_FAST_SOFTINTS | 156 | ifdef __HAVE_FAST_SOFTINTS | |
156 | define __HAVE_FAST_SOFTINTS 1 | 157 | define __HAVE_FAST_SOFTINTS 1 | |
157 | endif | 158 | endif | |
158 | 159 | |||
159 | ifdef __HAVE_MUTEX_STUBS | 160 | ifdef __HAVE_MUTEX_STUBS | |
160 | define __HAVE_MUTEX_STUBS 1 | 161 | define __HAVE_MUTEX_STUBS 1 | |
161 | endif | 162 | endif | |
162 | 163 | |||
163 | ifdef __HAVE_MUTEX_SPIN_STUBS | 164 | ifdef __HAVE_MUTEX_SPIN_STUBS | |
164 | define __HAVE_MUTEX_SPIN_STUBS 1 | 165 | define __HAVE_MUTEX_SPIN_STUBS 1 | |
165 | endif | 166 | endif | |
166 | 167 | |||
167 | ifdef __HAVE_RW_STUBS | 168 | ifdef __HAVE_RW_STUBS | |
168 | define __HAVE_RW_STUBS 1 | 169 | define __HAVE_RW_STUBS 1 | |
169 | endif | 170 | endif | |
170 | 171 | |||
171 | define RW_OWNER offsetof(struct krwlock, rw_owner) | 172 | define RW_OWNER offsetof(struct krwlock, rw_owner) | |
172 | define RW_WRITE_LOCKED RW_WRITE_LOCKED | 173 | define RW_WRITE_LOCKED RW_WRITE_LOCKED | |
173 | define RW_READ_INCR RW_READ_INCR | 174 | define RW_READ_INCR RW_READ_INCR | |
174 | define RW_READER RW_READER | 175 | define RW_READER RW_READER | |
175 | 176 | |||
176 | define VM_MIN_KERNEL_ADDRESS VM_MIN_KERNEL_ADDRESS | 177 | define VM_MIN_KERNEL_ADDRESS VM_MIN_KERNEL_ADDRESS | |
177 | define VM_MAX_KERNEL_ADDRESS VM_MAX_KERNEL_ADDRESS | 178 | define VM_MAX_KERNEL_ADDRESS VM_MAX_KERNEL_ADDRESS | |
178 | 179 | |||
179 | define USPACE USPACE | 180 | define USPACE USPACE | |
180 | ifdef XSEGSHIFT | 181 | ifdef XSEGSHIFT | |
181 | define XSEGSHIFT XSEGSHIFT | 182 | define XSEGSHIFT XSEGSHIFT | |
182 | endif | 183 | endif | |
183 | define SEGSHIFT SEGSHIFT | 184 | define SEGSHIFT SEGSHIFT | |
184 | define PGSHIFT PGSHIFT | 185 | define PGSHIFT PGSHIFT | |
185 | define NPDEPG NPDEPG | 186 | define NPDEPG NPDEPG | |
186 | define NBSEG NBSEG | 187 | define NBSEG NBSEG | |
187 | 188 | |||
188 | define PTE_D PTE_D | 189 | define PTE_D PTE_D | |
189 | define PTE_A PTE_A | 190 | define PTE_A PTE_A | |
190 | define PTE_G PTE_G | 191 | define PTE_G PTE_G | |
191 | define PTE_U PTE_U | 192 | define PTE_U PTE_U | |
192 | define PTE_X PTE_X | 193 | define PTE_X PTE_X | |
193 | define PTE_W PTE_W | 194 | define PTE_W PTE_W | |
194 | define PTE_R PTE_R | 195 | define PTE_R PTE_R | |
195 | define PTE_V PTE_V | 196 | define PTE_V PTE_V | |
196 | 197 | |||
197 | define PM_MD_PDETAB offsetof(struct pmap, pm_md.md_pdetab) | 198 | define PM_MD_PDETAB offsetof(struct pmap, pm_md.md_pdetab) | |
198 | define PM_MD_PTBR offsetof(struct pmap, pm_md.md_ptbr) | 199 | define PM_MD_PTBR offsetof(struct pmap, pm_md.md_ptbr) |
--- src/sys/arch/riscv/riscv/locore.S 2020/10/31 15:18:09 1.13
+++ src/sys/arch/riscv/riscv/locore.S 2020/11/04 06:56:56 1.14
@@ -1,566 +1,554 @@ | @@ -1,566 +1,554 @@ | |||
1 | /* $NetBSD: locore.S,v 1.13 2020/10/31 15:18:09 skrll Exp $ */ | 1 | /* $NetBSD: locore.S,v 1.14 2020/11/04 06:56:56 skrll Exp $ */ | |
2 | /*- | 2 | /*- | |
3 | * Copyright (c) 2014 The NetBSD Foundation, Inc. | 3 | * Copyright (c) 2014 The NetBSD Foundation, Inc. | |
4 | * All rights reserved. | 4 | * All rights reserved. | |
5 | * | 5 | * | |
6 | * This code is derived from software contributed to The NetBSD Foundation | 6 | * This code is derived from software contributed to The NetBSD Foundation | |
7 | * by Matt Thomas of 3am Software Foundry. | 7 | * by Matt Thomas of 3am Software Foundry. | |
8 | * | 8 | * | |
9 | * Redistribution and use in source and binary forms, with or without | 9 | * Redistribution and use in source and binary forms, with or without | |
10 | * modification, are permitted provided that the following conditions | 10 | * modification, are permitted provided that the following conditions | |
11 | * are met: | 11 | * are met: | |
12 | * 1. Redistributions of source code must retain the above copyright | 12 | * 1. Redistributions of source code must retain the above copyright | |
13 | * notice, this list of conditions and the following disclaimer. | 13 | * notice, this list of conditions and the following disclaimer. | |
14 | * 2. Redistributions in binary form must reproduce the above copyright | 14 | * 2. Redistributions in binary form must reproduce the above copyright | |
15 | * notice, this list of conditions and the following disclaimer in the | 15 | * notice, this list of conditions and the following disclaimer in the | |
16 | * documentation and/or other materials provided with the distribution. | 16 | * documentation and/or other materials provided with the distribution. | |
17 | * | 17 | * | |
18 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | 18 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | |
19 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | 19 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | |
20 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | 20 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
21 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | 21 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | |
22 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | 22 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
23 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | 23 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
24 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | 24 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
25 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | 25 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
26 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | 26 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
27 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | 27 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
28 | * POSSIBILITY OF SUCH DAMAGE. | 28 | * POSSIBILITY OF SUCH DAMAGE. | |
29 | */ | 29 | */ | |
30 | 30 | |||
31 | #include <machine/asm.h> | 31 | #include <machine/asm.h> | |
32 | #include "assym.h" | 32 | #include "assym.h" | |
33 | 33 | |||
34 | .globl _C_LABEL(exception_userexit) | 34 | .globl _C_LABEL(exception_userexit) | |
35 | .globl _C_LABEL(cpu_Debugger_insn) | 35 | .globl _C_LABEL(cpu_Debugger_insn) | |
36 | 36 | |||
37 | ENTRY_NP(start) | 37 | ENTRY_NP(start) | |
38 | // We get loaded and starting running at or near 0, not where we | 38 | // We get loaded and starting running at or near 0, not where we | |
39 | // should be. We need to construct an initial PDETAB | 39 | // should be. We need to construct an initial PDETAB | |
40 | 40 | |||
41 | #ifdef _LP64 | 41 | #ifdef _LP64 | |
42 | li t0, SR_U64|SR_S64 | 42 | li t0, SR_U64|SR_S64 | |
43 | li t1, SR_IM|SR_VM|SR_EI | 43 | li t1, SR_IM|SR_SIE | |
44 | csrs sstatus, t0 | 44 | csrs sstatus, t0 | |
45 | #else | 45 | #else | |
46 | li t1, SR_IM|SR_VM|SR_U64|SR_S64|R_EI | 46 | li t1, SR_IM|SR_U64|SR_S64|SR_EI | |
47 | #endif | 47 | #endif | |
48 | csrc sstatus, t1 | 48 | csrc sstatus, t1 | |
49 | 49 | |||
50 | li s11, VM_MAX_KERNEL_ADDRESS | 50 | li s11, VM_MAX_KERNEL_ADDRESS | |
51 | li s10, PAGE_SIZE | 51 | li s10, PAGE_SIZE | |
52 | li s9, USPACE | 52 | li s9, USPACE | |
53 | 53 | |||
54 | /* | 54 | /* | |
55 | * XXX XXX XXX: This is completely broken and wrong, we should map only | 55 | * XXX XXX XXX: This is completely broken and wrong, we should map only | |
56 | * the kernel sections, and the direct map should be mapped later in C. | 56 | * the kernel sections, and the direct map should be mapped later in C. | |
57 | */ | 57 | */ | |
58 | #if 0 | 58 | #if 0 | |
59 | #if 0 | 59 | #if 0 | |
60 | // The kernel doesn't use gp/_gp since we'd have to reload it on | 60 | // The kernel doesn't use gp/_gp since we'd have to reload it on | |
61 | // each exception. | 61 | // each exception. | |
62 | PTR_LA gp, _C_LABEL(_gp) | 62 | PTR_LA gp, _C_LABEL(_gp) | |
63 | #endif | 63 | #endif | |
64 | 64 | |||
65 | PTR_LA a0, _C_LABEL(__bss_start) | 65 | PTR_LA a0, _C_LABEL(__bss_start) | |
66 | PTR_LA s1, _C_LABEL(_end) | 66 | PTR_LA s1, _C_LABEL(_end) | |
67 | li a1, 0 | 67 | li a1, 0 | |
68 | 68 | |||
69 | add s1, s1, s10 // PAGE_SIZE | 69 | add s1, s1, s10 // PAGE_SIZE | |
70 | addi s1, s1, -1 // -1 == PAGE_MASK | 70 | addi s1, s1, -1 // -1 == PAGE_MASK | |
71 | neg a1, a0 // -PAGE_SIZE | 71 | neg a1, a0 // -PAGE_SIZE | |
72 | and s1, s1, a1 // s1 is page aligned end of kernel | 72 | and s1, s1, a1 // s1 is page aligned end of kernel | |
73 | // s1 = uarea | 73 | // s1 = uarea | |
74 | add s2, s1, s9 // s2 = first PDE page | 74 | add s2, s1, s9 // s2 = first PDE page | |
75 | #ifdef _LP64 | 75 | #ifdef _LP64 | |
76 | add s3, s2, s10 // s3 = second PDE page (RV64 only) | 76 | add s3, s2, s10 // s3 = second PDE page (RV64 only) | |
77 | #else | 77 | #else | |
78 | mv s3, 22 | 78 | mv s3, 22 | |
79 | #endif | 79 | #endif | |
80 | add s4, s3, s10 // s4 = first kernel PTE page | 80 | add s4, s3, s10 // s4 = first kernel PTE page | |
81 | add s5, s1, s9 // s5 = kernel_end | 81 | add s5, s1, s9 // s5 = kernel_end | |
82 | sub a2, s5, a0 | 82 | sub a2, s5, a0 | |
83 | call memset // zero through kernel_end | 83 | call memset // zero through kernel_end | |
84 | 84 | |||
85 | // As a temporary hack, word 0 contains the amount of memory in MB | 85 | // As a temporary hack, word 0 contains the amount of memory in MB | |
86 | INT_L a7, (zero) // load memory size | 86 | INT_L a7, (zero) // load memory size | |
87 | slli a7, a7, (20-PGSHIFT) // convert MB to pages | 87 | slli a7, a7, (20-PGSHIFT) // convert MB to pages | |
88 | .L01: PTR_LA t0, physmem | 88 | .L01: PTR_LA t0, physmem | |
89 | INT_S a7, (t0) // store it in physmem | 89 | INT_S a7, (t0) // store it in physmem | |
90 | 90 | |||
91 | li t4, PTE_V | PTE_SX | PTE_SW | PTE_SR | PTE_G | 91 | li t4, PTE_V | PTE_SX | PTE_SW | PTE_SR | PTE_G | |
92 | #ifdef _LP64 | 92 | #ifdef _LP64 | |
93 | REG_S t4, 0(s2) // keep a mapping for the first 8GB. | 93 | REG_S t4, 0(s2) // keep a mapping for the first 8GB. | |
94 | or t0, s3, t4 // point to next page | 94 | or t0, s3, t4 // point to next page | |
95 | or t0, t0, PTE_T // tranverse it. | 95 | or t0, t0, PTE_T // tranverse it. | |
96 | REG_S t0, -SZREG(s3) // store in highest first level PDE | 96 | REG_S t0, -SZREG(s3) // store in highest first level PDE | |
97 | #endif | 97 | #endif | |
98 | 98 | |||
99 | #if (VM_MIN_KERNEL_ADDRESS >> XSEGSHIFT) != (VM_MAX_KERNEL_ADDRESS >> XSEGSHIFT) | 99 | #if (VM_MIN_KERNEL_ADDRESS >> XSEGSHIFT) != (VM_MAX_KERNEL_ADDRESS >> XSEGSHIFT) | |
100 | #error VM_MIN_KERNEL_ADDRESS not in same first level PDE as VM_MAX_KERNEL_ADDRESS | 100 | #error VM_MIN_KERNEL_ADDRESS not in same first level PDE as VM_MAX_KERNEL_ADDRESS | |
101 | #endif | 101 | #endif | |
102 | // We allocated the kernel first PTE page so let's insert in the | 102 | // We allocated the kernel first PTE page so let's insert in the | |
103 | // page table. For now, we assume it's in the same PDE page as the | 103 | // page table. For now, we assume it's in the same PDE page as the | |
104 | // direct-mapped memory. | 104 | // direct-mapped memory. | |
105 | or t0, s4, t4 | 105 | or t0, s4, t4 | |
106 | or t0, t0, PTE_T | 106 | or t0, t0, PTE_T | |
107 | #if ((VM_MIN_KERNEL_ADDRESS >> SEGSHIFT) & (NPDEPG-1)) * SZREG | 107 | #if ((VM_MIN_KERNEL_ADDRESS >> SEGSHIFT) & (NPDEPG-1)) * SZREG | |
108 | li t1, ((VM_MIN_KERNEL_ADDRESS >> SEGSHIFT) & (NPDEPG-1)) * SZREG | 108 | li t1, ((VM_MIN_KERNEL_ADDRESS >> SEGSHIFT) & (NPDEPG-1)) * SZREG | |
109 | add t1, t1, s3 | 109 | add t1, t1, s3 | |
110 | REG_S t0, 0(t1) | 110 | REG_S t0, 0(t1) | |
111 | #else | 111 | #else | |
112 | REG_S t0, 0(s3) | 112 | REG_S t0, 0(s3) | |
113 | #endif | 113 | #endif | |
114 | 114 | |||
115 | li t0, ((VM_MAX_KERNEL_ADDRESS >> SEGSHIFT) & (NPDEPG-1)) * SZREG | 115 | li t0, ((VM_MAX_KERNEL_ADDRESS >> SEGSHIFT) & (NPDEPG-1)) * SZREG | |
116 | add s3, s3, t0 | 116 | add s3, s3, t0 | |
117 | srli a7, a7, (SEGSHIFT-PGSHIFT) // pages to segments | 117 | srli a7, a7, (SEGSHIFT-PGSHIFT) // pages to segments | |
118 | li t3, NBSEG // load for ease | 118 | li t3, NBSEG // load for ease | |
119 | 119 | |||
120 | // | 120 | // | |
121 | // Fill in the PDEs to direct map memory. | 121 | // Fill in the PDEs to direct map memory. | |
122 | // | 122 | // | |
123 | .Lfill: REG_S t4, 0(s3) // store PDE | 123 | .Lfill: REG_S t4, 0(s3) // store PDE | |
124 | add t4, t4, t3 // advance PA in PDE to next segment | 124 | add t4, t4, t3 // advance PA in PDE to next segment | |
125 | add s3, s3, SZREG // advance to next PDE slot | 125 | add s3, s3, SZREG // advance to next PDE slot | |
126 | addi a7, a7, -1 // count down segment | 126 | addi a7, a7, -1 // count down segment | |
127 | bgtz a6, .Lfill // loop if more | 127 | bgtz a6, .Lfill // loop if more | |
128 | #endif | 128 | #endif | |
129 | 129 | |||
130 | csrw sptbr, s1 // set the page table base | |||
131 | li t0, SR_VM | |||
132 | csrs sstatus, t0 // Enable VM | |||
133 | ||||
134 | // We should have a VM so let's start using our real addresses | 130 | // We should have a VM so let's start using our real addresses | |
135 | lui t0, %hi(.Lmmu_on) // load hi part of absolute address | 131 | lui t0, %hi(.Lmmu_on) // load hi part of absolute address | |
136 | jr t0, %lo(.Lmmu_on) // jump to absolute address | 132 | jr t0, %lo(.Lmmu_on) // jump to absolute address | |
137 | 133 | |||
138 | .Lmmu_on: | 134 | .Lmmu_on: | |
139 | // MMU is on! | 135 | // MMU is on! | |
140 | csrw sscratch, zero // zero in sscratch to mark kernel | 136 | csrw sscratch, zero // zero in sscratch to mark kernel | |
141 | 137 | |||
142 | PTR_LA tp, _C_LABEL(lwp0) // put curlwp in tp | 138 | PTR_LA tp, _C_LABEL(lwp0) // put curlwp in tp | |
143 | 139 | |||
144 | PTR_LA a0, _C_LABEL(cpu_exception_handler) | 140 | PTR_LA a0, _C_LABEL(cpu_exception_handler) | |
145 | csrw stvec, a0 | 141 | csrw stvec, a0 | |
146 | 142 | |||
147 | PTR_S s1, L_PCB(tp) // set uarea of lwp (already zeroed) | 143 | PTR_S s1, L_PCB(tp) // set uarea of lwp (already zeroed) | |
148 | addi sp, s2, -TF_LEN // switch to new stack | 144 | addi sp, s2, -TF_LEN // switch to new stack | |
149 | PTR_S sp, L_MD_UTF(tp) // store pointer to empty trapframe | 145 | PTR_S sp, L_MD_UTF(tp) // store pointer to empty trapframe | |
150 | 146 | |||
151 | PTR_LA t1, _C_LABEL(kernel_pmap_store) | 147 | PTR_LA t1, _C_LABEL(kernel_pmap_store) | |
152 | add t2, s2, s11 // PA -> VA | 148 | add t2, s2, s11 // PA -> VA | |
153 | PTR_S t2, PM_MD_PDETAB(t1) // VA of kernel PDETAB | 149 | PTR_S t2, PM_MD_PDETAB(t1) // VA of kernel PDETAB | |
154 | PTR_S s2, PM_MD_PTBR(t1) // PA of kernel PDETAB | 150 | PTR_S s2, PM_MD_PTBR(t1) // PA of kernel PDETAB | |
155 | 151 | |||
156 | // Now we should ready to start initializing the kernel. | 152 | // Now we should ready to start initializing the kernel. | |
157 | PTR_LA a0, _C_LABEL(start) // kernel_start | 153 | PTR_LA a0, _C_LABEL(start) // kernel_start | |
158 | add a1, s5, s11 // kernel_end | 154 | add a1, s5, s11 // kernel_end | |
159 | call _C_LABEL(init_riscv) // do MD startup | 155 | call _C_LABEL(init_riscv) // do MD startup | |
160 | tail _C_LABEL(main) // and transfer to main | 156 | tail _C_LABEL(main) // and transfer to main | |
161 | // not reached | 157 | // not reached | |
162 | END(start) | 158 | END(start) | |
163 | 159 | |||
164 | // | 160 | // | |
165 | // struct lwp *cpu_switchto(struct lwp *oldl, struct lwp *newl, bool returning); | 161 | // struct lwp *cpu_switchto(struct lwp *oldl, struct lwp *newl, bool returning); | |
166 | // | 162 | // | |
167 | ENTRY_NP(cpu_switchto) | 163 | ENTRY_NP(cpu_switchto) | |
168 | addi sp, sp, -TF_LEN // allocate trapframe | 164 | addi sp, sp, -TF_LEN // allocate trapframe | |
169 | 165 | |||
170 | REG_S ra, TF_RA(sp) // save return address | 166 | REG_S ra, TF_RA(sp) // save return address | |
171 | REG_S s0, TF_S0(sp) // save callee saved address | 167 | REG_S s0, TF_S0(sp) // save callee saved address | |
172 | REG_S s1, TF_S1(sp) // save callee saved address | 168 | REG_S s1, TF_S1(sp) // save callee saved address | |
173 | REG_S s2, TF_S2(sp) // save callee saved address | 169 | REG_S s2, TF_S2(sp) // save callee saved address | |
174 | REG_S s3, TF_S3(sp) // save callee saved address | 170 | REG_S s3, TF_S3(sp) // save callee saved address | |
175 | REG_S s4, TF_S4(sp) // save callee saved address | 171 | REG_S s4, TF_S4(sp) // save callee saved address | |
176 | REG_S s5, TF_S5(sp) // save callee saved address | 172 | REG_S s5, TF_S5(sp) // save callee saved address | |
177 | REG_S s6, TF_S6(sp) // save callee saved address | 173 | REG_S s6, TF_S6(sp) // save callee saved address | |
178 | REG_S s7, TF_S7(sp) // save callee saved address | 174 | REG_S s7, TF_S7(sp) // save callee saved address | |
179 | REG_S s8, TF_S8(sp) // save callee saved address | 175 | REG_S s8, TF_S8(sp) // save callee saved address | |
180 | REG_S s9, TF_S9(sp) // save callee saved address | 176 | REG_S s9, TF_S9(sp) // save callee saved address | |
181 | REG_S s10, TF_S10(sp) // save callee saved address | 177 | REG_S s10, TF_S10(sp) // save callee saved address | |
182 | REG_S s11, TF_S11(sp) // save callee saved address | 178 | REG_S s11, TF_S11(sp) // save callee saved address | |
183 | csrr t4, sstatus // get status for intr state | 179 | csrr t4, sstatus // get status for intr state | |
184 | REG_S t4, TF_SR(sp) // save it | 180 | REG_S t4, TF_SR(sp) // save it | |
185 | 181 | |||
186 | REG_S sp, L_MD_KTF(a0) // record trapframe pointer | 182 | REG_S sp, L_MD_KTF(a0) // record trapframe pointer | |
187 | 183 | |||
188 | csrrci t0, sstatus, SR_EI // # disable interrupts | 184 | csrrci t0, sstatus, SR_SIE // # disable interrupts | |
189 | 185 | |||
190 | mv tp, a1 // # put the new lwp in thread pointer | 186 | mv tp, a1 // # put the new lwp in thread pointer | |
191 | 187 | |||
192 | PTR_L t1, L_CPU(tp) // # get curcpu | 188 | PTR_L t1, L_CPU(tp) // # get curcpu | |
193 | PTR_S tp, CI_CURLWP(t1) // # update curcpu with the new curlwp | 189 | PTR_S tp, CI_CURLWP(t1) // # update curcpu with the new curlwp | |
194 | 190 | |||
195 | REG_L sp, L_MD_KTF(tp) // # load its kernel stack pointer | 191 | REG_L sp, L_MD_KTF(tp) // # load its kernel stack pointer | |
196 | REG_L t4, TF_SR(sp) // # fetch status register | 192 | REG_L t4, TF_SR(sp) // # fetch status register | |
197 | csrw sstatus, t4 // # restore it (and interrupts?) | 193 | csrw sstatus, t4 // # restore it (and interrupts?) | |
198 | 194 | |||
199 | REG_L s0, TF_S0(sp) // restore callee saved | 195 | REG_L s0, TF_S0(sp) // restore callee saved | |
200 | REG_L s1, TF_S1(sp) // restore callee saved | 196 | REG_L s1, TF_S1(sp) // restore callee saved | |
201 | REG_L s2, TF_S2(sp) // restore callee saved | 197 | REG_L s2, TF_S2(sp) // restore callee saved | |
202 | REG_L s3, TF_S3(sp) // restore callee saved | 198 | REG_L s3, TF_S3(sp) // restore callee saved | |
203 | REG_L s4, TF_S4(sp) // restore callee saved | 199 | REG_L s4, TF_S4(sp) // restore callee saved | |
204 | REG_L s5, TF_S5(sp) // restore callee saved | 200 | REG_L s5, TF_S5(sp) // restore callee saved | |
205 | REG_L s6, TF_S6(sp) // restore callee saved | 201 | REG_L s6, TF_S6(sp) // restore callee saved | |
206 | REG_L s7, TF_S7(sp) // restore callee saved | 202 | REG_L s7, TF_S7(sp) // restore callee saved | |
207 | REG_L s8, TF_S8(sp) // restore callee saved | 203 | REG_L s8, TF_S8(sp) // restore callee saved | |
208 | REG_L s9, TF_S9(sp) // restore callee saved | 204 | REG_L s9, TF_S9(sp) // restore callee saved | |
209 | REG_L s10, TF_S10(sp) // restore callee saved | 205 | REG_L s10, TF_S10(sp) // restore callee saved | |
210 | REG_L s11, TF_S11(sp) // restore callee saved | 206 | REG_L s11, TF_S11(sp) // restore callee saved | |
211 | 207 | |||
212 | REG_L ra, TF_RA(sp) // restore return address | 208 | REG_L ra, TF_RA(sp) // restore return address | |
213 | 209 | |||
214 | addi sp, sp, TF_LEN // remove trapframe | 210 | addi sp, sp, TF_LEN // remove trapframe | |
215 | 211 | |||
216 | // a0 = oldl | 212 | // a0 = oldl | |
217 | // a1 = curcpu() | 213 | // a1 = curcpu() | |
218 | // tp = newl | 214 | // tp = newl | |
219 | 215 | |||
220 | ret | 216 | ret | |
221 | END(cpu_switchto) | 217 | END(cpu_switchto) | |
222 | 218 | |||
223 | ENTRY_NP(cpu_lwp_trampoline) | 219 | ENTRY_NP(cpu_lwp_trampoline) | |
224 | mv a1, tp // get new lwp | 220 | mv a1, tp // get new lwp | |
225 | call _C_LABEL(lwp_startup) // call lwp startup | 221 | call _C_LABEL(lwp_startup) // call lwp startup | |
226 | 222 | |||
227 | mv a0, s1 // get saved arg | 223 | mv a0, s1 // get saved arg | |
228 | jalr s0 // call saved func | 224 | jalr s0 // call saved func | |
229 | 225 | |||
230 | // If the saved func returns, we are returning to user land. | 226 | // If the saved func returns, we are returning to user land. | |
231 | j _C_LABEL(exception_userexit) | 227 | j _C_LABEL(exception_userexit) | |
232 | END(cpu_lwp_trampoline) | 228 | END(cpu_lwp_trampoline) | |
233 | 229 | |||
234 | ENTRY_NP(cpu_fast_switchto_cleanup) | 230 | ENTRY_NP(cpu_fast_switchto_cleanup) | |
235 | INT_L t0, CI_MTX_COUNT(a1) // get mutex count | 231 | INT_L t0, CI_MTX_COUNT(a1) // get mutex count | |
236 | REG_L ra, CALLFRAME_RA(sp) // get return address | 232 | REG_L ra, CALLFRAME_RA(sp) // get return address | |
237 | REG_L a0, CALLFRAME_S0(sp) // get pinned LWP | 233 | REG_L a0, CALLFRAME_S0(sp) // get pinned LWP | |
238 | addi t0, t0, 1 // increment mutex count | 234 | addi t0, t0, 1 // increment mutex count | |
239 | INT_S t0, CI_MTX_COUNT(a1) // save it | 235 | INT_S t0, CI_MTX_COUNT(a1) // save it | |
240 | addi sp, sp, CALLFRAME_SIZ // remove callframe | 236 | addi sp, sp, CALLFRAME_SIZ // remove callframe | |
241 | #if IPL_SCHED != IPL_HIGH | 237 | #if IPL_SCHED != IPL_HIGH | |
242 | tail _C_LABEL(splhigh) // go back to IPL HIGH | 238 | tail _C_LABEL(splhigh) // go back to IPL HIGH | |
243 | #else | 239 | #else | |
244 | ret // just return | 240 | ret // just return | |
245 | #endif | 241 | #endif | |
246 | END(cpu_fast_switchto_cleanup) | 242 | END(cpu_fast_switchto_cleanup) | |
247 | 243 | |||
248 | // | 244 | // | |
249 | // void cpu_fast_switchto(struct lwp *, int s); | 245 | // void cpu_fast_switchto(struct lwp *, int s); | |
250 | // | 246 | // | |
251 | ENTRY_NP(cpu_fast_switchto) | 247 | ENTRY_NP(cpu_fast_switchto) | |
252 | addi sp, sp, -(TF_LEN + CALLFRAME_SIZ) | 248 | addi sp, sp, -(TF_LEN + CALLFRAME_SIZ) | |
253 | REG_S a0, (TF_LEN + CALLFRAME_S0)(sp) | 249 | REG_S a0, (TF_LEN + CALLFRAME_S0)(sp) | |
254 | REG_S ra, (TF_LEN + CALLFRAME_RA)(sp) | 250 | REG_S ra, (TF_LEN + CALLFRAME_RA)(sp) | |
255 | 251 | |||
256 | PTR_LA t2, _C_LABEL(cpu_fast_switchto_cleanup) | 252 | PTR_LA t2, _C_LABEL(cpu_fast_switchto_cleanup) | |
257 | 253 | |||
258 | REG_S t2, TF_RA(sp) // return to someplace else | 254 | REG_S t2, TF_RA(sp) // return to someplace else | |
259 | REG_S s0, TF_S0(sp) // save callee saved register | 255 | REG_S s0, TF_S0(sp) // save callee saved register | |
260 | REG_S s1, TF_S1(sp) // save callee saved register | 256 | REG_S s1, TF_S1(sp) // save callee saved register | |
261 | REG_S s2, TF_S2(sp) // save callee saved register | 257 | REG_S s2, TF_S2(sp) // save callee saved register | |
262 | REG_S s3, TF_S3(sp) // save callee saved register | 258 | REG_S s3, TF_S3(sp) // save callee saved register | |
263 | REG_S s4, TF_S4(sp) // save callee saved register | 259 | REG_S s4, TF_S4(sp) // save callee saved register | |
264 | REG_S s5, TF_S5(sp) // save callee saved register | 260 | REG_S s5, TF_S5(sp) // save callee saved register | |
265 | REG_S s6, TF_S6(sp) // save callee saved register | 261 | REG_S s6, TF_S6(sp) // save callee saved register | |
266 | REG_S s7, TF_S7(sp) // save callee saved register | 262 | REG_S s7, TF_S7(sp) // save callee saved register | |
267 | REG_S s8, TF_S8(sp) // save callee saved register | 263 | REG_S s8, TF_S8(sp) // save callee saved register | |
268 | REG_S s9, TF_S9(sp) // save callee saved register | 264 | REG_S s9, TF_S9(sp) // save callee saved register | |
269 | REG_S s10, TF_S10(sp) // save callee saved register | 265 | REG_S s10, TF_S10(sp) // save callee saved register | |
270 | REG_S s11, TF_S11(sp) // save callee saved register | 266 | REG_S s11, TF_S11(sp) // save callee saved register | |
271 | csrr t4, sstatus // get status register (for intr state) | 267 | csrr t4, sstatus // get status register (for intr state) | |
272 | REG_S t4, TF_SR(sp) // save it | 268 | REG_S t4, TF_SR(sp) // save it | |
273 | 269 | |||
274 | mv s0, tp // remember curlwp | 270 | mv s0, tp // remember curlwp | |
275 | mv s1, sp // remember kernel stack | 271 | mv s1, sp // remember kernel stack | |
276 | 272 | |||
277 | #if 0 | 273 | csrrci t0, sstatus, SR_SIE // disable interrupts | |
278 | csrrci t0, sstatus, SR_EI // disable interrupts | |||
279 | #endif | |||
280 | PTR_L t1, L_CPU(tp) // get curcpu() | 274 | PTR_L t1, L_CPU(tp) // get curcpu() | |
281 | 275 | |||
282 | PTR_S sp, L_MD_KTF(tp) // save trapframe ptr in oldlwp | 276 | PTR_S sp, L_MD_KTF(tp) // save trapframe ptr in oldlwp | |
283 | mv tp, a0 // set thread pointer to newlwp | 277 | mv tp, a0 // set thread pointer to newlwp | |
284 | PTR_S tp, CI_CURLWP(t1) // update curlwp | 278 | PTR_S tp, CI_CURLWP(t1) // update curlwp | |
285 | PTR_L sp, L_MD_KTF(tp) // switch to its stack | 279 | PTR_L sp, L_MD_KTF(tp) // switch to its stack | |
286 | #if 0 | |||
287 | csrw sstatus, t0 // reenable interrupts | 280 | csrw sstatus, t0 // reenable interrupts | |
288 | #endif | |||
289 | call _C_LABEL(softint_dispatch) | 281 | call _C_LABEL(softint_dispatch) | |
290 | #if 0 | 282 | csrrci t0, sstatus, SR_SIE // disable interrupts | |
291 | csrrci t0, sstatus, SR_EI // disable interrupts | |||
292 | #endif | |||
293 | PTR_L t1, L_CPU(tp) // get curcpu() again | 283 | PTR_L t1, L_CPU(tp) // get curcpu() again | |
294 | mv tp, s0 // return to pinned lwp | 284 | mv tp, s0 // return to pinned lwp | |
295 | PTR_S tp, CI_CURLWP(t1) // restore curlwp | 285 | PTR_S tp, CI_CURLWP(t1) // restore curlwp | |
296 | #if 0 | |||
297 | csrw sstatus, t0 // reeanble interrupts | 286 | csrw sstatus, t0 // reeanble interrupts | |
298 | #endif | |||
299 | mv sp, s1 // restore stack pointer | 287 | mv sp, s1 // restore stack pointer | |
300 | 288 | |||
301 | REG_L ra, (TF_RA + CALLFRAME_RA)(sp) // get return address | 289 | REG_L ra, (TF_RA + CALLFRAME_RA)(sp) // get return address | |
302 | REG_L s0, TF_S0(sp) // restore register we used | 290 | REG_L s0, TF_S0(sp) // restore register we used | |
303 | REG_L s1, TF_S1(sp) // restore register we used | 291 | REG_L s1, TF_S1(sp) // restore register we used | |
304 | 292 | |||
305 | addi sp, sp, TF_LEN+CALLFRAME_SIZ // drop trapframe/callframe | 293 | addi sp, sp, TF_LEN+CALLFRAME_SIZ // drop trapframe/callframe | |
306 | ret // return | 294 | ret // return | |
307 | END(cpu_fast_switchto) | 295 | END(cpu_fast_switchto) | |
308 | 296 | |||
309 | // RISCV only has a simple exception handler handles both synchronous traps | 297 | // RISCV only has a simple exception handler handles both synchronous traps | |
310 | // and interrupts. | 298 | // and interrupts. | |
311 | ENTRY_NP(cpu_exception_handler) | 299 | ENTRY_NP(cpu_exception_handler) | |
312 | csrrw tp, sscratch, tp // swap scratch and thread pointer | 300 | csrrw tp, sscratch, tp // swap scratch and thread pointer | |
313 | beqz tp, .Lexception_kernel // tp == 0, already on kernel stack | 301 | beqz tp, .Lexception_kernel // tp == 0, already on kernel stack | |
314 | // | 302 | // | |
315 | // The execption happened while user code was executing. We need to | 303 | // The execption happened while user code was executing. We need to | |
316 | // get the pointer to the user trapframe from the LWP md area. Then we | 304 | // get the pointer to the user trapframe from the LWP md area. Then we | |
317 | // save t1 and tp so we have a register to work with and to get curlwp | 305 | // save t1 and tp so we have a register to work with and to get curlwp | |
318 | // into tp. We also save the saved SP into the trapframe. | 306 | // into tp. We also save the saved SP into the trapframe. | |
319 | // Upon entry on an exception from user, sscratch will contain curlwp. | 307 | // Upon entry on an exception from user, sscratch will contain curlwp. | |
320 | // | 308 | // | |
321 | REG_S sp, L_MD_USP(tp) // save user stack pointer temporarily | 309 | REG_S sp, L_MD_USP(tp) // save user stack pointer temporarily | |
322 | PTR_L sp, L_MD_UTF(sp) // trapframe pointer loaded | 310 | PTR_L sp, L_MD_UTF(sp) // trapframe pointer loaded | |
323 | REG_S t1, TF_T1(sp) // save t1 | 311 | REG_S t1, TF_T1(sp) // save t1 | |
324 | REG_L t1, L_MD_USP(tp) // get user stack pointer | 312 | REG_L t1, L_MD_USP(tp) // get user stack pointer | |
325 | REG_S t1, TF_SP(sp) // save thread pointer in trapframe | 313 | REG_S t1, TF_SP(sp) // save thread pointer in trapframe | |
326 | csrrw t1, sscratch, zero // swap saved thread pointer with 0 | 314 | csrrw t1, sscratch, zero // swap saved thread pointer with 0 | |
327 | REG_L t1, TF_TP(sp) // save thread pointer in trapframe | 315 | REG_L t1, TF_TP(sp) // save thread pointer in trapframe | |
328 | li t1, 0 // indicate user exception | 316 | li t1, 0 // indicate user exception | |
329 | j .Lexception_common | 317 | j .Lexception_common | |
330 | 318 | |||
331 | // | 319 | // | |
332 | // The exception happened while we were already in the kernel. That | 320 | // The exception happened while we were already in the kernel. That | |
333 | // means tp already has curlwp and sp has the kernel stack pointer so | 321 | // means tp already has curlwp and sp has the kernel stack pointer so | |
334 | // just need to restore it and then adjust it down for space for the | 322 | // just need to restore it and then adjust it down for space for the | |
335 | // trap frame. We save t1 so we can use it the original sp into the | 323 | // trap frame. We save t1 so we can use it the original sp into the | |
336 | // trapframe for use by the exception exiting code. | 324 | // trapframe for use by the exception exiting code. | |
337 | // | 325 | // | |
338 | .Lexception_kernel: | 326 | .Lexception_kernel: | |
339 | csrrw tp, sscratch, zero // get back our thread pointer | 327 | csrrw tp, sscratch, zero // get back our thread pointer | |
340 | addi sp, sp, -TF_LEN // allocate stack frame | 328 | addi sp, sp, -TF_LEN // allocate stack frame | |
341 | REG_S t1, TF_T1(sp) // save t1 | 329 | REG_S t1, TF_T1(sp) // save t1 | |
342 | addi t1, sp, TF_LEN | 330 | addi t1, sp, TF_LEN | |
343 | REG_S t1, TF_SP(sp) // save SP | 331 | REG_S t1, TF_SP(sp) // save SP | |
344 | li t1, 1 // indicate kernel exception | 332 | li t1, 1 // indicate kernel exception | |
345 | 333 | |||
346 | .Lexception_common: | 334 | .Lexception_common: | |
347 | // Now we save all the temporary registers into the trapframe since | 335 | // Now we save all the temporary registers into the trapframe since | |
348 | // they will most certainly be changed. | 336 | // they will most certainly be changed. | |
349 | REG_S ra, TF_RA(sp) // save return address | 337 | REG_S ra, TF_RA(sp) // save return address | |
350 | REG_S gp, TF_GP(sp) // save gp | 338 | REG_S gp, TF_GP(sp) // save gp | |
351 | REG_S a0, TF_A0(sp) // save a0 | 339 | REG_S a0, TF_A0(sp) // save a0 | |
352 | REG_S a1, TF_A1(sp) // save a1 | 340 | REG_S a1, TF_A1(sp) // save a1 | |
353 | REG_S a2, TF_A2(sp) // save a2 | 341 | REG_S a2, TF_A2(sp) // save a2 | |
354 | REG_S a3, TF_A3(sp) // save a3 | 342 | REG_S a3, TF_A3(sp) // save a3 | |
355 | REG_S a4, TF_A4(sp) // save a4 | 343 | REG_S a4, TF_A4(sp) // save a4 | |
356 | REG_S a5, TF_A5(sp) // save a5 | 344 | REG_S a5, TF_A5(sp) // save a5 | |
357 | REG_S a6, TF_A6(sp) // save a6 | 345 | REG_S a6, TF_A6(sp) // save a6 | |
358 | REG_S a7, TF_A7(sp) // save a7 | 346 | REG_S a7, TF_A7(sp) // save a7 | |
359 | REG_S t0, TF_T0(sp) // save t0 | 347 | REG_S t0, TF_T0(sp) // save t0 | |
360 | // t1 is already saved | 348 | // t1 is already saved | |
361 | REG_S t2, TF_T2(sp) // save t2 | 349 | REG_S t2, TF_T2(sp) // save t2 | |
362 | REG_S t3, TF_T3(sp) // save t3 | 350 | REG_S t3, TF_T3(sp) // save t3 | |
363 | REG_S t4, TF_T4(sp) // save t4 | 351 | REG_S t4, TF_T4(sp) // save t4 | |
364 | REG_S t5, TF_T5(sp) // save t5 | 352 | REG_S t5, TF_T5(sp) // save t5 | |
365 | REG_S t6, TF_T6(sp) // save t6 | 353 | REG_S t6, TF_T6(sp) // save t6 | |
366 | 354 | |||
367 | // Now we get the | 355 | // Now we get the | |
368 | mv a0, sp // trapframe pointer | 356 | mv a0, sp // trapframe pointer | |
369 | csrr a1, sepc // get execption pc | 357 | csrr a1, sepc // get execption pc | |
370 | csrr a2, sstatus // get status | 358 | csrr a2, sstatus // get status | |
371 | csrr a3, scause // get cause | 359 | csrr a3, scause // get cause | |
372 | 360 | |||
373 | REG_S a1, TF_PC(sp) | 361 | REG_S a1, TF_PC(sp) | |
374 | INT_S a2, TF_SR(sp) | 362 | INT_S a2, TF_SR(sp) | |
375 | INT_S a3, TF_CAUSE(sp) // save cause | 363 | INT_S a3, TF_CAUSE(sp) // save cause | |
376 | 364 | |||
377 | // Now we've saved the trapfame, the cause is still in a3. | 365 | // Now we've saved the trapfame, the cause is still in a3. | |
378 | 366 | |||
379 | bltz a3, intr_handler // MSB is set if interrupt | 367 | bltz a3, intr_handler // MSB is set if interrupt | |
380 | 368 | |||
381 | // badaddr is only relavent for non-interrupts | 369 | // stval is only relavent for non-interrupts | |
382 | csrr a4, sbadaddr // get badaddr | 370 | csrr a4, stval // get stval | |
383 | REG_S a4, TF_BADADDR(sp) | 371 | REG_S a4, TF_TVAL(sp) | |
384 | 372 | |||
385 | beqz t1, trap_user // this was a user trap | 373 | beqz t1, trap_user // this was a user trap | |
386 | // This was a kernel exception | 374 | // This was a kernel exception | |
387 | call _C_LABEL(cpu_trap) // just call trap to handle it | 375 | call _C_LABEL(cpu_trap) // just call trap to handle it | |
388 | exception_kernexit: | 376 | exception_kernexit: | |
389 | // If we got here, we are returning from a kernel exception (either a | 377 | // If we got here, we are returning from a kernel exception (either a | |
390 | // trap or interrupt). Simply return the volatile registers and the | 378 | // trap or interrupt). Simply return the volatile registers and the | |
391 | // exception PC and status, load the saved SP from the trapframe, and | 379 | // exception PC and status, load the saved SP from the trapframe, and | |
392 | // return from the exception | 380 | // return from the exception | |
393 | csrrci zero, sstatus, SR_EI // disable interrupts | 381 | csrrci zero, sstatus, SR_SIE // disable interrupts | |
394 | 382 | |||
395 | REG_L ra, TF_RA(sp) // restore return address | 383 | REG_L ra, TF_RA(sp) // restore return address | |
396 | REG_L gp, TF_GP(sp) // restore gp | 384 | REG_L gp, TF_GP(sp) // restore gp | |
397 | REG_L a0, TF_A0(sp) // restore a0 | 385 | REG_L a0, TF_A0(sp) // restore a0 | |
398 | REG_L a1, TF_A1(sp) // restore a1 | 386 | REG_L a1, TF_A1(sp) // restore a1 | |
399 | REG_L a2, TF_A2(sp) // restore a2 | 387 | REG_L a2, TF_A2(sp) // restore a2 | |
400 | REG_L a3, TF_A3(sp) // restore a3 | 388 | REG_L a3, TF_A3(sp) // restore a3 | |
401 | REG_L a4, TF_A4(sp) // restore a4 | 389 | REG_L a4, TF_A4(sp) // restore a4 | |
402 | REG_L a5, TF_A5(sp) // restore a5 | 390 | REG_L a5, TF_A5(sp) // restore a5 | |
403 | REG_L a6, TF_A6(sp) // restore a6 | 391 | REG_L a6, TF_A6(sp) // restore a6 | |
404 | REG_L a7, TF_A7(sp) // restore a7 | 392 | REG_L a7, TF_A7(sp) // restore a7 | |
405 | REG_L t2, TF_T2(sp) // restore t2 | 393 | REG_L t2, TF_T2(sp) // restore t2 | |
406 | REG_L t3, TF_T3(sp) // restore t3 | 394 | REG_L t3, TF_T3(sp) // restore t3 | |
407 | REG_L t4, TF_T4(sp) // restore t4 | 395 | REG_L t4, TF_T4(sp) // restore t4 | |
408 | REG_L t5, TF_T3(sp) // restore t5 | 396 | REG_L t5, TF_T3(sp) // restore t5 | |
409 | REG_L t6, TF_T4(sp) // restore t6 | 397 | REG_L t6, TF_T4(sp) // restore t6 | |
410 | 398 | |||
411 | REG_L t0, TF_PC(sp) // fetch execption PC | 399 | REG_L t0, TF_PC(sp) // fetch execption PC | |
412 | REG_L t1, TF_SR(sp) // fetch status | 400 | REG_L t1, TF_SR(sp) // fetch status | |
413 | 401 | |||
414 | csrw sepc, t0 // restore execption PC | 402 | csrw sepc, t0 // restore execption PC | |
415 | csrw sstatus, t1 // restore status | 403 | csrw sstatus, t1 // restore status | |
416 | 404 | |||
417 | REG_L t0, TF_T0(sp) // restore t0 | 405 | REG_L t0, TF_T0(sp) // restore t0 | |
418 | REG_L t1, TF_T1(sp) // restore t1 | 406 | REG_L t1, TF_T1(sp) // restore t1 | |
419 | REG_L sp, TF_SP(sp) // restore SP | 407 | REG_L sp, TF_SP(sp) // restore SP | |
420 | sret // and we're done | 408 | sret // and we're done | |
421 | 409 | |||
422 | trap_user: | 410 | trap_user: | |
423 | REG_S s0, TF_S0(sp) // only save from userland | 411 | REG_S s0, TF_S0(sp) // only save from userland | |
424 | REG_S s1, TF_S1(sp) // only save from userland | 412 | REG_S s1, TF_S1(sp) // only save from userland | |
425 | REG_S s2, TF_S2(sp) // only save from userland | 413 | REG_S s2, TF_S2(sp) // only save from userland | |
426 | REG_S s3, TF_S3(sp) // only save from userland | 414 | REG_S s3, TF_S3(sp) // only save from userland | |
427 | REG_S s4, TF_S4(sp) // only save from userland | 415 | REG_S s4, TF_S4(sp) // only save from userland | |
428 | REG_S s5, TF_S5(sp) // only save from userland | 416 | REG_S s5, TF_S5(sp) // only save from userland | |
429 | REG_S s6, TF_S6(sp) // only save from userland | 417 | REG_S s6, TF_S6(sp) // only save from userland | |
430 | REG_S s7, TF_S7(sp) // only save from userland | 418 | REG_S s7, TF_S7(sp) // only save from userland | |
431 | REG_S s8, TF_S8(sp) // only save from userland | 419 | REG_S s8, TF_S8(sp) // only save from userland | |
432 | REG_S s9, TF_S9(sp) // only save from userland | 420 | REG_S s9, TF_S9(sp) // only save from userland | |
433 | REG_S s10, TF_S10(sp) // only save from userland | 421 | REG_S s10, TF_S10(sp) // only save from userland | |
434 | REG_S s11, TF_S11(sp) // only save from userland | 422 | REG_S s11, TF_S11(sp) // only save from userland | |
435 | 423 | |||
436 | csrsi sstatus, SR_EI // reenable interrupts | 424 | csrsi sstatus, SR_SIE // reenable interrupts | |
437 | 425 | |||
438 | li t0, CAUSE_SYSCALL // let's see if this was a syscall | 426 | li t0, CAUSE_SYSCALL // let's see if this was a syscall | |
439 | beq a3, t0, trap_syscall // yes it was | 427 | beq a3, t0, trap_syscall // yes it was | |
440 | 428 | |||
441 | call _C_LABEL(cpu_trap) // nope, just a regular trap | 429 | call _C_LABEL(cpu_trap) // nope, just a regular trap | |
442 | _C_LABEL(exception_userexit): | 430 | _C_LABEL(exception_userexit): | |
443 | INT_L t0, L_MD_ASTPENDING(tp) // ast pending? | 431 | INT_L t0, L_MD_ASTPENDING(tp) // ast pending? | |
444 | bnez t0, trap_doast // yes, handle it. | 432 | bnez t0, trap_doast // yes, handle it. | |
445 | csrrci zero, sstatus, SR_EI // disable interrupts | 433 | csrrci zero, sstatus, SR_SIE // disable interrupts | |
446 | csrw sscratch, tp // show we are coming from userland | 434 | csrw sscratch, tp // show we are coming from userland | |
447 | REG_L tp, TF_TP(sp) // only restore from userland | 435 | REG_L tp, TF_TP(sp) // only restore from userland | |
448 | REG_L s0, TF_S0(sp) // only restore from userland | 436 | REG_L s0, TF_S0(sp) // only restore from userland | |
449 | REG_L s1, TF_S1(sp) // only restore from userland | 437 | REG_L s1, TF_S1(sp) // only restore from userland | |
450 | REG_L s2, TF_S2(sp) // only restore from userland | 438 | REG_L s2, TF_S2(sp) // only restore from userland | |
451 | REG_L s3, TF_S3(sp) // only restore from userland | 439 | REG_L s3, TF_S3(sp) // only restore from userland | |
452 | REG_L s4, TF_S4(sp) // only restore from userland | 440 | REG_L s4, TF_S4(sp) // only restore from userland | |
453 | REG_L s5, TF_S5(sp) // only restore from userland | 441 | REG_L s5, TF_S5(sp) // only restore from userland | |
454 | REG_L s6, TF_S6(sp) // only restore from userland | 442 | REG_L s6, TF_S6(sp) // only restore from userland | |
455 | REG_L s7, TF_S7(sp) // only restore from userland | 443 | REG_L s7, TF_S7(sp) // only restore from userland | |
456 | REG_L s8, TF_S8(sp) // only restore from userland | 444 | REG_L s8, TF_S8(sp) // only restore from userland | |
457 | REG_L s9, TF_S9(sp) // only restore from userland | 445 | REG_L s9, TF_S9(sp) // only restore from userland | |
458 | REG_L s10, TF_S10(sp) // only restore from userland | 446 | REG_L s10, TF_S10(sp) // only restore from userland | |
459 | REG_L s11, TF_S11(sp) // only restore from userland | 447 | REG_L s11, TF_S11(sp) // only restore from userland | |
460 | j exception_kernexit | 448 | j exception_kernexit | |
461 | 449 | |||
462 | trap_syscall: | 450 | trap_syscall: | |
463 | .L0: PTR_L ra, exception_userexit | 451 | .L0: PTR_L ra, exception_userexit | |
464 | PTR_L t0, L_PROC(tp) // get proc struct | 452 | PTR_L t0, L_PROC(tp) // get proc struct | |
465 | PTR_L t0, P_MD_SYSCALL(t0) // get syscall address from proc | 453 | PTR_L t0, P_MD_SYSCALL(t0) // get syscall address from proc | |
466 | jr t0 // and jump to it | 454 | jr t0 // and jump to it | |
467 | 455 | |||
468 | intr_usersave: | 456 | intr_usersave: | |
469 | REG_S s0, TF_S0(sp) // only save from userland | 457 | REG_S s0, TF_S0(sp) // only save from userland | |
470 | REG_S s1, TF_S1(sp) // only save from userland | 458 | REG_S s1, TF_S1(sp) // only save from userland | |
471 | REG_S s2, TF_S2(sp) // only save from userland | 459 | REG_S s2, TF_S2(sp) // only save from userland | |
472 | REG_S s3, TF_S3(sp) // only save from userland | 460 | REG_S s3, TF_S3(sp) // only save from userland | |
473 | REG_S s4, TF_S4(sp) // only save from userland | 461 | REG_S s4, TF_S4(sp) // only save from userland | |
474 | REG_S s5, TF_S5(sp) // only save from userland | 462 | REG_S s5, TF_S5(sp) // only save from userland | |
475 | REG_S s6, TF_S6(sp) // only save from userland | 463 | REG_S s6, TF_S6(sp) // only save from userland | |
476 | REG_S s7, TF_S7(sp) // only save from userland | 464 | REG_S s7, TF_S7(sp) // only save from userland | |
477 | REG_S s8, TF_S8(sp) // only save from userland | 465 | REG_S s8, TF_S8(sp) // only save from userland | |
478 | REG_S s9, TF_S9(sp) // only save from userland | 466 | REG_S s9, TF_S9(sp) // only save from userland | |
479 | REG_S s10, TF_S10(sp) // only save from userland | 467 | REG_S s10, TF_S10(sp) // only save from userland | |
480 | REG_S s11, TF_S11(sp) // only save from userland | 468 | REG_S s11, TF_S11(sp) // only save from userland | |
481 | PTR_LA ra, exception_userexit | 469 | PTR_LA ra, exception_userexit | |
482 | trap_doast: | 470 | trap_doast: | |
483 | mv a0, sp // only argument is trapframe | 471 | mv a0, sp // only argument is trapframe | |
484 | tail _C_LABEL(cpu_ast) | 472 | tail _C_LABEL(cpu_ast) | |
485 | 473 | |||
486 | intr_user: | 474 | intr_user: | |
487 | call _C_LABEL(cpu_intr) // handle interrupt | 475 | call _C_LABEL(cpu_intr) // handle interrupt | |
488 | INT_L t0, L_MD_ASTPENDING(tp) // get astpending | 476 | INT_L t0, L_MD_ASTPENDING(tp) // get astpending | |
489 | bnez t0, intr_usersave // if one is pending, deal with in | 477 | bnez t0, intr_usersave // if one is pending, deal with in | |
490 | 478 | |||
491 | csrw sscratch, tp // show we are coming from userland | 479 | csrw sscratch, tp // show we are coming from userland | |
492 | REG_L tp, TF_TP(sp) // restore thread pointer | 480 | REG_L tp, TF_TP(sp) // restore thread pointer | |
493 | j exception_kernexit // do standard exception exit | 481 | j exception_kernexit // do standard exception exit | |
494 | 482 | |||
495 | intr_handler: | 483 | intr_handler: | |
496 | beqz t1, intr_user | 484 | beqz t1, intr_user | |
497 | call _C_LABEL(cpu_intr) | 485 | call _C_LABEL(cpu_intr) | |
498 | j exception_kernexit | 486 | j exception_kernexit | |
499 | END(cpu_exception_handler) | 487 | END(cpu_exception_handler) | |
500 | 488 | |||
501 | // int cpu_set_onfault(struct faultbuf *fb, register_t retval) | 489 | // int cpu_set_onfault(struct faultbuf *fb, register_t retval) | |
502 | // | 490 | // | |
503 | ENTRY(cpu_set_onfault) | 491 | ENTRY(cpu_set_onfault) | |
504 | REG_S ra, FB_RA(a0) | 492 | REG_S ra, FB_RA(a0) | |
505 | REG_S s0, FB_S0(a0) | 493 | REG_S s0, FB_S0(a0) | |
506 | REG_S s1, FB_S1(a0) | 494 | REG_S s1, FB_S1(a0) | |
507 | REG_S s2, FB_S2(a0) | 495 | REG_S s2, FB_S2(a0) | |
508 | REG_S s3, FB_S3(a0) | 496 | REG_S s3, FB_S3(a0) | |
509 | REG_S s4, FB_S4(a0) | 497 | REG_S s4, FB_S4(a0) | |
510 | REG_S s5, FB_S5(a0) | 498 | REG_S s5, FB_S5(a0) | |
511 | REG_S s6, FB_S6(a0) | 499 | REG_S s6, FB_S6(a0) | |
512 | REG_S s7, FB_S7(a0) | 500 | REG_S s7, FB_S7(a0) | |
513 | REG_S s8, FB_S8(a0) | 501 | REG_S s8, FB_S8(a0) | |
514 | REG_S s9, FB_S9(a0) | 502 | REG_S s9, FB_S9(a0) | |
515 | REG_S s10, FB_S10(a0) | 503 | REG_S s10, FB_S10(a0) | |
516 | REG_S s11, FB_S11(a0) | 504 | REG_S s11, FB_S11(a0) | |
517 | REG_S sp, FB_SP(a0) | 505 | REG_S sp, FB_SP(a0) | |
518 | REG_S a1, FB_A0(a0) | 506 | REG_S a1, FB_A0(a0) | |
519 | PTR_S a0, L_MD_ONFAULT(tp) | 507 | PTR_S a0, L_MD_ONFAULT(tp) | |
520 | li a0, 0 | 508 | li a0, 0 | |
521 | ret | 509 | ret | |
522 | END(cpu_set_onfault) | 510 | END(cpu_set_onfault) | |
523 | 511 | |||
524 | ENTRY(setjmp) | 512 | ENTRY(setjmp) | |
525 | REG_S ra, FB_RA(a0) | 513 | REG_S ra, FB_RA(a0) | |
526 | REG_S s0, FB_S0(a0) | 514 | REG_S s0, FB_S0(a0) | |
527 | REG_S s1, FB_S1(a0) | 515 | REG_S s1, FB_S1(a0) | |
528 | REG_S s2, FB_S2(a0) | 516 | REG_S s2, FB_S2(a0) | |
529 | REG_S s3, FB_S3(a0) | 517 | REG_S s3, FB_S3(a0) | |
530 | REG_S s4, FB_S4(a0) | 518 | REG_S s4, FB_S4(a0) | |
531 | REG_S s5, FB_S5(a0) | 519 | REG_S s5, FB_S5(a0) | |
532 | REG_S s6, FB_S6(a0) | 520 | REG_S s6, FB_S6(a0) | |
533 | REG_S s7, FB_S7(a0) | 521 | REG_S s7, FB_S7(a0) | |
534 | REG_S s8, FB_S8(a0) | 522 | REG_S s8, FB_S8(a0) | |
535 | REG_S s9, FB_S9(a0) | 523 | REG_S s9, FB_S9(a0) | |
536 | REG_S s10, FB_S10(a0) | 524 | REG_S s10, FB_S10(a0) | |
537 | REG_S s11, FB_S11(a0) | 525 | REG_S s11, FB_S11(a0) | |
538 | REG_S sp, FB_SP(a0) | 526 | REG_S sp, FB_SP(a0) | |
539 | li a0, 0 | 527 | li a0, 0 | |
540 | ret | 528 | ret | |
541 | END(setjmp) | 529 | END(setjmp) | |
542 | 530 | |||
543 | ENTRY(longjmp) | 531 | ENTRY(longjmp) | |
544 | REG_L ra, FB_RA(a0) | 532 | REG_L ra, FB_RA(a0) | |
545 | REG_L s0, FB_S0(a0) | 533 | REG_L s0, FB_S0(a0) | |
546 | REG_L s1, FB_S1(a0) | 534 | REG_L s1, FB_S1(a0) | |
547 | REG_L s2, FB_S2(a0) | 535 | REG_L s2, FB_S2(a0) | |
548 | REG_L s3, FB_S3(a0) | 536 | REG_L s3, FB_S3(a0) | |
549 | REG_L s4, FB_S4(a0) | 537 | REG_L s4, FB_S4(a0) | |
550 | REG_L s5, FB_S5(a0) | 538 | REG_L s5, FB_S5(a0) | |
551 | REG_L s6, FB_S6(a0) | 539 | REG_L s6, FB_S6(a0) | |
552 | REG_L s7, FB_S7(a0) | 540 | REG_L s7, FB_S7(a0) | |
553 | REG_L s8, FB_S8(a0) | 541 | REG_L s8, FB_S8(a0) | |
554 | REG_L s9, FB_S9(a0) | 542 | REG_L s9, FB_S9(a0) | |
555 | REG_L s10, FB_S10(a0) | 543 | REG_L s10, FB_S10(a0) | |
556 | REG_L s11, FB_S11(a0) | 544 | REG_L s11, FB_S11(a0) | |
557 | REG_L sp, FB_SP(a0) | 545 | REG_L sp, FB_SP(a0) | |
558 | mv a0, a1 | 546 | mv a0, a1 | |
559 | ret | 547 | ret | |
560 | END(longjmp) | 548 | END(longjmp) | |
561 | 549 | |||
562 | ENTRY_NP(cpu_Debugger) | 550 | ENTRY_NP(cpu_Debugger) | |
563 | cpu_Debugger_insn: | 551 | cpu_Debugger_insn: | |
564 | sbreak | 552 | sbreak | |
565 | ret | 553 | ret | |
566 | END(cpu_Debugger) | 554 | END(cpu_Debugger) |
--- src/sys/arch/riscv/riscv/trap.c 2020/11/01 21:09:48 1.10
+++ src/sys/arch/riscv/riscv/trap.c 2020/11/04 06:56:56 1.11
@@ -1,553 +1,553 @@ | @@ -1,553 +1,553 @@ | |||
1 | /*- | 1 | /*- | |
2 | * Copyright (c) 2014 The NetBSD Foundation, Inc. | 2 | * Copyright (c) 2014 The NetBSD Foundation, Inc. | |
3 | * All rights reserved. | 3 | * All rights reserved. | |
4 | * | 4 | * | |
5 | * This code is derived from software contributed to The NetBSD Foundation | 5 | * This code is derived from software contributed to The NetBSD Foundation | |
6 | * by Matt Thomas of 3am Software Foundry. | 6 | * by Matt Thomas of 3am Software Foundry. | |
7 | * | 7 | * | |
8 | * Redistribution and use in source and binary forms, with or without | 8 | * Redistribution and use in source and binary forms, with or without | |
9 | * modification, are permitted provided that the following conditions | 9 | * modification, are permitted provided that the following conditions | |
10 | * are met: | 10 | * are met: | |
11 | * 1. Redistributions of source code must retain the above copyright | 11 | * 1. Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | 12 | * notice, this list of conditions and the following disclaimer. | |
13 | * 2. Redistributions in binary form must reproduce the above copyright | 13 | * 2. Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in the | 14 | * notice, this list of conditions and the following disclaimer in the | |
15 | * documentation and/or other materials provided with the distribution. | 15 | * documentation and/or other materials provided with the distribution. | |
16 | * | 16 | * | |
17 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | 17 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | |
18 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | 18 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | |
19 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | 19 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
20 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | 20 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | |
21 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | 21 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
22 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | 22 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
23 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | 23 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
24 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | 24 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
25 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | 25 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
26 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | 26 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
27 | * POSSIBILITY OF SUCH DAMAGE. | 27 | * POSSIBILITY OF SUCH DAMAGE. | |
28 | */ | 28 | */ | |
29 | 29 | |||
30 | #include <sys/cdefs.h> | 30 | #include <sys/cdefs.h> | |
31 | 31 | |||
32 | #define __PMAP_PRIVATE | 32 | #define __PMAP_PRIVATE | |
33 | #define __UFETCHSTORE_PRIVATE | 33 | #define __UFETCHSTORE_PRIVATE | |
34 | 34 | |||
35 | __RCSID("$NetBSD: trap.c,v 1.10 2020/11/01 21:09:48 skrll Exp $"); | 35 | __RCSID("$NetBSD: trap.c,v 1.11 2020/11/04 06:56:56 skrll Exp $"); | |
36 | 36 | |||
37 | #include <sys/param.h> | 37 | #include <sys/param.h> | |
38 | #include <sys/systm.h> | 38 | #include <sys/systm.h> | |
39 | #include <sys/atomic.h> | 39 | #include <sys/atomic.h> | |
40 | 40 | |||
41 | #include <sys/signal.h> | 41 | #include <sys/signal.h> | |
42 | #include <sys/signalvar.h> | 42 | #include <sys/signalvar.h> | |
43 | #include <sys/siginfo.h> | 43 | #include <sys/siginfo.h> | |
44 | 44 | |||
45 | #include <uvm/uvm.h> | 45 | #include <uvm/uvm.h> | |
46 | 46 | |||
47 | #include <riscv/locore.h> | 47 | #include <riscv/locore.h> | |
48 | 48 | |||
49 | #define INSTRUCTION_TRAP_MASK (__BIT(CAUSE_ILLEGAL_INSTRUCTION)) | 49 | #define INSTRUCTION_TRAP_MASK (__BIT(CAUSE_ILLEGAL_INSTRUCTION)) | |
50 | 50 | |||
51 | #define FAULT_TRAP_MASK (__BIT(CAUSE_FETCH_ACCESS) \ | 51 | #define FAULT_TRAP_MASK (__BIT(CAUSE_FETCH_ACCESS) \ | |
52 | |__BIT(CAUSE_LOAD_ACCESS) \ | 52 | |__BIT(CAUSE_LOAD_ACCESS) \ | |
53 | |__BIT(CAUSE_STORE_ACCESS)) | 53 | |__BIT(CAUSE_STORE_ACCESS)) | |
54 | 54 | |||
55 | #define MISALIGNED_TRAP_MASK (__BIT(CAUSE_FETCH_MISALIGNED) \ | 55 | #define MISALIGNED_TRAP_MASK (__BIT(CAUSE_FETCH_MISALIGNED) \ | |
56 | |__BIT(CAUSE_LOAD_MISALIGNED) \ | 56 | |__BIT(CAUSE_LOAD_MISALIGNED) \ | |
57 | |__BIT(CAUSE_STORE_MISALIGNED)) | 57 | |__BIT(CAUSE_STORE_MISALIGNED)) | |
58 | 58 | |||
59 | static const char * const causenames[] = { | 59 | static const char * const causenames[] = { | |
60 | [CAUSE_FETCH_MISALIGNED] = "misaligned fetch", | 60 | [CAUSE_FETCH_MISALIGNED] = "misaligned fetch", | |
61 | [CAUSE_LOAD_MISALIGNED] = "misaligned load", | 61 | [CAUSE_LOAD_MISALIGNED] = "misaligned load", | |
62 | [CAUSE_STORE_MISALIGNED] = "misaligned store", | 62 | [CAUSE_STORE_MISALIGNED] = "misaligned store", | |
63 | [CAUSE_FETCH_ACCESS] = "fetch", | 63 | [CAUSE_FETCH_ACCESS] = "fetch", | |
64 | [CAUSE_LOAD_ACCESS] = "load", | 64 | [CAUSE_LOAD_ACCESS] = "load", | |
65 | [CAUSE_STORE_ACCESS] = "store", | 65 | [CAUSE_STORE_ACCESS] = "store", | |
66 | [CAUSE_ILLEGAL_INSTRUCTION] = "illegal instruction", | 66 | [CAUSE_ILLEGAL_INSTRUCTION] = "illegal instruction", | |
67 | [CAUSE_BREAKPOINT] = "breakpoint", | 67 | [CAUSE_BREAKPOINT] = "breakpoint", | |
68 | }; | 68 | }; | |
69 | 69 | |||
70 | void | 70 | void | |
71 | cpu_jump_onfault(struct trapframe *tf, const struct faultbuf *fb) | 71 | cpu_jump_onfault(struct trapframe *tf, const struct faultbuf *fb) | |
72 | { | 72 | { | |
73 | tf->tf_a0 = fb->fb_reg[FB_A0]; | 73 | tf->tf_a0 = fb->fb_reg[FB_A0]; | |
74 | tf->tf_ra = fb->fb_reg[FB_RA]; | 74 | tf->tf_ra = fb->fb_reg[FB_RA]; | |
75 | tf->tf_s0 = fb->fb_reg[FB_S0]; | 75 | tf->tf_s0 = fb->fb_reg[FB_S0]; | |
76 | tf->tf_s1 = fb->fb_reg[FB_S1]; | 76 | tf->tf_s1 = fb->fb_reg[FB_S1]; | |
77 | tf->tf_s2 = fb->fb_reg[FB_S2]; | 77 | tf->tf_s2 = fb->fb_reg[FB_S2]; | |
78 | tf->tf_s3 = fb->fb_reg[FB_S3]; | 78 | tf->tf_s3 = fb->fb_reg[FB_S3]; | |
79 | tf->tf_s4 = fb->fb_reg[FB_S4]; | 79 | tf->tf_s4 = fb->fb_reg[FB_S4]; | |
80 | tf->tf_s5 = fb->fb_reg[FB_S5]; | 80 | tf->tf_s5 = fb->fb_reg[FB_S5]; | |
81 | tf->tf_s6 = fb->fb_reg[FB_S6]; | 81 | tf->tf_s6 = fb->fb_reg[FB_S6]; | |
82 | tf->tf_s7 = fb->fb_reg[FB_S7]; | 82 | tf->tf_s7 = fb->fb_reg[FB_S7]; | |
83 | tf->tf_s8 = fb->fb_reg[FB_S8]; | 83 | tf->tf_s8 = fb->fb_reg[FB_S8]; | |
84 | tf->tf_s9 = fb->fb_reg[FB_S9]; | 84 | tf->tf_s9 = fb->fb_reg[FB_S9]; | |
85 | tf->tf_s10 = fb->fb_reg[FB_S10]; | 85 | tf->tf_s10 = fb->fb_reg[FB_S10]; | |
86 | tf->tf_s11 = fb->fb_reg[FB_S11]; | 86 | tf->tf_s11 = fb->fb_reg[FB_S11]; | |
87 | } | 87 | } | |
88 | 88 | |||
89 | int | 89 | int | |
90 | copyin(const void *uaddr, void *kaddr, size_t len) | 90 | copyin(const void *uaddr, void *kaddr, size_t len) | |
91 | { | 91 | { | |
92 | struct faultbuf fb; | 92 | struct faultbuf fb; | |
93 | int error; | 93 | int error; | |
94 | 94 | |||
95 | if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) { | 95 | if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) { | |
96 | memcpy(kaddr, uaddr, len); | 96 | memcpy(kaddr, uaddr, len); | |
97 | cpu_unset_onfault(); | 97 | cpu_unset_onfault(); | |
98 | } | 98 | } | |
99 | return error; | 99 | return error; | |
100 | } | 100 | } | |
101 | 101 | |||
102 | int | 102 | int | |
103 | copyout(const void *kaddr, void *uaddr, size_t len) | 103 | copyout(const void *kaddr, void *uaddr, size_t len) | |
104 | { | 104 | { | |
105 | struct faultbuf fb; | 105 | struct faultbuf fb; | |
106 | int error; | 106 | int error; | |
107 | 107 | |||
108 | if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) { | 108 | if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) { | |
109 | memcpy(uaddr, kaddr, len); | 109 | memcpy(uaddr, kaddr, len); | |
110 | cpu_unset_onfault(); | 110 | cpu_unset_onfault(); | |
111 | } | 111 | } | |
112 | return error; | 112 | return error; | |
113 | } | 113 | } | |
114 | 114 | |||
115 | int | 115 | int | |
116 | kcopy(const void *kfaddr, void *kdaddr, size_t len) | 116 | kcopy(const void *kfaddr, void *kdaddr, size_t len) | |
117 | { | 117 | { | |
118 | struct faultbuf fb; | 118 | struct faultbuf fb; | |
119 | int error; | 119 | int error; | |
120 | 120 | |||
121 | if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) { | 121 | if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) { | |
122 | memcpy(kdaddr, kfaddr, len); | 122 | memcpy(kdaddr, kfaddr, len); | |
123 | cpu_unset_onfault(); | 123 | cpu_unset_onfault(); | |
124 | } | 124 | } | |
125 | return error; | 125 | return error; | |
126 | } | 126 | } | |
127 | 127 | |||
128 | int | 128 | int | |
129 | copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done) | 129 | copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done) | |
130 | { | 130 | { | |
131 | struct faultbuf fb; | 131 | struct faultbuf fb; | |
132 | int error; | 132 | int error; | |
133 | 133 | |||
134 | if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) { | 134 | if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) { | |
135 | len = strlcpy(kaddr, uaddr, len); | 135 | len = strlcpy(kaddr, uaddr, len); | |
136 | cpu_unset_onfault(); | 136 | cpu_unset_onfault(); | |
137 | if (done != NULL) { | 137 | if (done != NULL) { | |
138 | *done = len; | 138 | *done = len; | |
139 | } | 139 | } | |
140 | } | 140 | } | |
141 | return error; | 141 | return error; | |
142 | } | 142 | } | |
143 | 143 | |||
144 | int | 144 | int | |
145 | copyoutstr(const void *kaddr, void *uaddr, size_t len, size_t *done) | 145 | copyoutstr(const void *kaddr, void *uaddr, size_t len, size_t *done) | |
146 | { | 146 | { | |
147 | struct faultbuf fb; | 147 | struct faultbuf fb; | |
148 | int error; | 148 | int error; | |
149 | 149 | |||
150 | if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) { | 150 | if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) { | |
151 | len = strlcpy(uaddr, kaddr, len); | 151 | len = strlcpy(uaddr, kaddr, len); | |
152 | cpu_unset_onfault(); | 152 | cpu_unset_onfault(); | |
153 | if (done != NULL) { | 153 | if (done != NULL) { | |
154 | *done = len; | 154 | *done = len; | |
155 | } | 155 | } | |
156 | } | 156 | } | |
157 | return error; | 157 | return error; | |
158 | } | 158 | } | |
159 | 159 | |||
160 | static void | 160 | static void | |
161 | dump_trapframe(const struct trapframe *tf, void (*pr)(const char *, ...)) | 161 | dump_trapframe(const struct trapframe *tf, void (*pr)(const char *, ...)) | |
162 | { | 162 | { | |
163 | const char *causestr = "?"; | 163 | const char *causestr = "?"; | |
164 | if (tf->tf_cause < __arraycount(causenames) | 164 | if (tf->tf_cause < __arraycount(causenames) | |
165 | && causenames[tf->tf_cause] != NULL) | 165 | && causenames[tf->tf_cause] != NULL) | |
166 | causestr = causenames[tf->tf_cause]; | 166 | causestr = causenames[tf->tf_cause]; | |
167 | (*pr)("Trapframe @ %p " | 167 | (*pr)("Trapframe @ %p " | |
168 | "(cause=%d (%s), status=%#x, pc=%#16"PRIxREGISTER | 168 | "(cause=%d (%s), status=%#x, pc=%#16"PRIxREGISTER | |
169 | ", va=%#"PRIxREGISTER"):\n", | 169 | ", va=%#"PRIxREGISTER"):\n", | |
170 | tf, tf->tf_cause, causestr, tf->tf_sr, tf->tf_pc, tf->tf_badaddr); | 170 | tf, tf->tf_cause, causestr, tf->tf_sr, tf->tf_pc, tf->tf_tval); | |
171 | (*pr)("ra=%#16"PRIxREGISTER", sp=%#16"PRIxREGISTER | 171 | (*pr)("ra=%#16"PRIxREGISTER", sp=%#16"PRIxREGISTER | |
172 | ", gp=%#16"PRIxREGISTER", tp=%#16"PRIxREGISTER"\n", | 172 | ", gp=%#16"PRIxREGISTER", tp=%#16"PRIxREGISTER"\n", | |
173 | tf->tf_ra, tf->tf_sp, tf->tf_gp, tf->tf_tp); | 173 | tf->tf_ra, tf->tf_sp, tf->tf_gp, tf->tf_tp); | |
174 | (*pr)("s0=%#16"PRIxREGISTER", s1=%#16"PRIxREGISTER | 174 | (*pr)("s0=%#16"PRIxREGISTER", s1=%#16"PRIxREGISTER | |
175 | ", s2=%#16"PRIxREGISTER", s3=%#16"PRIxREGISTER"\n", | 175 | ", s2=%#16"PRIxREGISTER", s3=%#16"PRIxREGISTER"\n", | |
176 | tf->tf_s0, tf->tf_s1, tf->tf_s2, tf->tf_s3); | 176 | tf->tf_s0, tf->tf_s1, tf->tf_s2, tf->tf_s3); | |
177 | (*pr)("s4=%#16"PRIxREGISTER", s5=%#16"PRIxREGISTER | 177 | (*pr)("s4=%#16"PRIxREGISTER", s5=%#16"PRIxREGISTER | |
178 | ", s5=%#16"PRIxREGISTER", s3=%#16"PRIxREGISTER"\n", | 178 | ", s5=%#16"PRIxREGISTER", s3=%#16"PRIxREGISTER"\n", | |
179 | tf->tf_s4, tf->tf_s5, tf->tf_s2, tf->tf_s3); | 179 | tf->tf_s4, tf->tf_s5, tf->tf_s2, tf->tf_s3); | |
180 | (*pr)("s8=%#16"PRIxREGISTER", s9=%#16"PRIxREGISTER | 180 | (*pr)("s8=%#16"PRIxREGISTER", s9=%#16"PRIxREGISTER | |
181 | ", s10=%#16"PRIxREGISTER", s11=%#16"PRIxREGISTER"\n", | 181 | ", s10=%#16"PRIxREGISTER", s11=%#16"PRIxREGISTER"\n", | |
182 | tf->tf_s8, tf->tf_s9, tf->tf_s10, tf->tf_s11); | 182 | tf->tf_s8, tf->tf_s9, tf->tf_s10, tf->tf_s11); | |
183 | (*pr)("a0=%#16"PRIxREGISTER", a1=%#16"PRIxREGISTER | 183 | (*pr)("a0=%#16"PRIxREGISTER", a1=%#16"PRIxREGISTER | |
184 | ", a2=%#16"PRIxREGISTER", a3=%#16"PRIxREGISTER"\n", | 184 | ", a2=%#16"PRIxREGISTER", a3=%#16"PRIxREGISTER"\n", | |
185 | tf->tf_a0, tf->tf_a1, tf->tf_a2, tf->tf_a3); | 185 | tf->tf_a0, tf->tf_a1, tf->tf_a2, tf->tf_a3); | |
186 | (*pr)("a4=%#16"PRIxREGISTER", a5=%#16"PRIxREGISTER | 186 | (*pr)("a4=%#16"PRIxREGISTER", a5=%#16"PRIxREGISTER | |
187 | ", a5=%#16"PRIxREGISTER", a7=%#16"PRIxREGISTER"\n", | 187 | ", a5=%#16"PRIxREGISTER", a7=%#16"PRIxREGISTER"\n", | |
188 | tf->tf_a4, tf->tf_a5, tf->tf_a6, tf->tf_a7); | 188 | tf->tf_a4, tf->tf_a5, tf->tf_a6, tf->tf_a7); | |
189 | (*pr)("t0=%#16"PRIxREGISTER", t1=%#16"PRIxREGISTER | 189 | (*pr)("t0=%#16"PRIxREGISTER", t1=%#16"PRIxREGISTER | |
190 | ", t2=%#16"PRIxREGISTER", t3=%#16"PRIxREGISTER"\n", | 190 | ", t2=%#16"PRIxREGISTER", t3=%#16"PRIxREGISTER"\n", | |
191 | tf->tf_t0, tf->tf_t1, tf->tf_t2, tf->tf_t3); | 191 | tf->tf_t0, tf->tf_t1, tf->tf_t2, tf->tf_t3); | |
192 | (*pr)("t4=%#16"PRIxREGISTER", t5=%#16"PRIxREGISTER | 192 | (*pr)("t4=%#16"PRIxREGISTER", t5=%#16"PRIxREGISTER | |
193 | ", t6=%#16"PRIxREGISTER"\n", | 193 | ", t6=%#16"PRIxREGISTER"\n", | |
194 | tf->tf_t4, tf->tf_t5, tf->tf_t6); | 194 | tf->tf_t4, tf->tf_t5, tf->tf_t6); | |
195 | } | 195 | } | |
196 | 196 | |||
197 | static inline void | 197 | static inline void | |
198 | trap_ksi_init(ksiginfo_t *ksi, int signo, int code, vaddr_t addr, | 198 | trap_ksi_init(ksiginfo_t *ksi, int signo, int code, vaddr_t addr, | |
199 | register_t cause) | 199 | register_t cause) | |
200 | { | 200 | { | |
201 | KSI_INIT_TRAP(ksi); | 201 | KSI_INIT_TRAP(ksi); | |
202 | ksi->ksi_signo = signo; | 202 | ksi->ksi_signo = signo; | |
203 | ksi->ksi_code = code; | 203 | ksi->ksi_code = code; | |
204 | ksi->ksi_addr = (void *)addr; | 204 | ksi->ksi_addr = (void *)addr; | |
205 | ksi->ksi_trap = cause; | 205 | ksi->ksi_trap = cause; | |
206 | } | 206 | } | |
207 | 207 | |||
208 | static void | 208 | static void | |
209 | cpu_trapsignal(struct trapframe *tf, ksiginfo_t *ksi) | 209 | cpu_trapsignal(struct trapframe *tf, ksiginfo_t *ksi) | |
210 | { | 210 | { | |
211 | if (cpu_printfataltraps) { | 211 | if (cpu_printfataltraps) { | |
212 | dump_trapframe(tf, printf); | 212 | dump_trapframe(tf, printf); | |
213 | } | 213 | } | |
214 | (*curlwp->l_proc->p_emul->e_trapsignal)(curlwp, ksi); | 214 | (*curlwp->l_proc->p_emul->e_trapsignal)(curlwp, ksi); | |
215 | } | 215 | } | |
216 | 216 | |||
217 | static inline vm_prot_t | 217 | static inline vm_prot_t | |
218 | get_faulttype(register_t cause) | 218 | get_faulttype(register_t cause) | |
219 | { | 219 | { | |
220 | if (cause == CAUSE_LOAD_ACCESS) | 220 | if (cause == CAUSE_LOAD_ACCESS) | |
221 | return VM_PROT_READ; | 221 | return VM_PROT_READ; | |
222 | if (cause == CAUSE_STORE_ACCESS) | 222 | if (cause == CAUSE_STORE_ACCESS) | |
223 | return VM_PROT_READ | VM_PROT_WRITE; | 223 | return VM_PROT_READ | VM_PROT_WRITE; | |
224 | KASSERT(cause == CAUSE_FETCH_ACCESS); | 224 | KASSERT(cause == CAUSE_FETCH_ACCESS); | |
225 | return VM_PROT_READ | VM_PROT_EXECUTE; | 225 | return VM_PROT_READ | VM_PROT_EXECUTE; | |
226 | } | 226 | } | |
227 | 227 | |||
228 | static bool | 228 | static bool | |
229 | trap_pagefault_fixup(struct trapframe *tf, struct pmap *pmap, register_t cause, | 229 | trap_pagefault_fixup(struct trapframe *tf, struct pmap *pmap, register_t cause, | |
230 | intptr_t addr) | 230 | intptr_t addr) | |
231 | { | 231 | { | |
232 | pt_entry_t * const ptep = pmap_pte_lookup(pmap, addr); | 232 | pt_entry_t * const ptep = pmap_pte_lookup(pmap, addr); | |
233 | struct vm_page *pg; | 233 | struct vm_page *pg; | |
234 | 234 | |||
235 | if (ptep == NULL) | 235 | if (ptep == NULL) | |
236 | return false; | 236 | return false; | |
237 | 237 | |||
238 | pt_entry_t opte = *ptep; | 238 | pt_entry_t opte = *ptep; | |
239 | pt_entry_t npte; | 239 | pt_entry_t npte; | |
240 | u_int attr; | 240 | u_int attr; | |
241 | do { | 241 | do { | |
242 | if ((opte & ~PTE_G) == 0) | 242 | if ((opte & ~PTE_G) == 0) | |
243 | return false; | 243 | return false; | |
244 | 244 | |||
245 | pg = PHYS_TO_VM_PAGE(pte_to_paddr(opte)); | 245 | pg = PHYS_TO_VM_PAGE(pte_to_paddr(opte)); | |
246 | if (pg == NULL) | 246 | if (pg == NULL) | |
247 | return false; | 247 | return false; | |
248 | 248 | |||
249 | attr = 0; | 249 | attr = 0; | |
250 | npte = opte; | 250 | npte = opte; | |
251 | if ((npte & PTE_V) == 0) { | 251 | if ((npte & PTE_V) == 0) { | |
252 | npte |= PTE_V; | 252 | npte |= PTE_V; | |
253 | attr |= VM_PAGEMD_REFERENCED; | 253 | attr |= VM_PAGEMD_REFERENCED; | |
254 | } | 254 | } | |
255 | #if 0 /* XXX Outdated */ | 255 | #if 0 /* XXX Outdated */ | |
256 | if (cause == CAUSE_STORE_ACCESS) { | 256 | if (cause == CAUSE_STORE_ACCESS) { | |
257 | if ((npte & PTE_NW) != 0) { | 257 | if ((npte & PTE_NW) != 0) { | |
258 | npte &= ~PTE_NW; | 258 | npte &= ~PTE_NW; | |
259 | attr |= VM_PAGEMD_MODIFIED; | 259 | attr |= VM_PAGEMD_MODIFIED; | |
260 | } | 260 | } | |
261 | } else if (cause == CAUSE_FETCH_ACCESS) { | 261 | } else if (cause == CAUSE_FETCH_ACCESS) { | |
262 | if ((npte & PTE_NX) != 0) { | 262 | if ((npte & PTE_NX) != 0) { | |
263 | npte &= ~PTE_NX; | 263 | npte &= ~PTE_NX; | |
264 | attr |= VM_PAGEMD_EXECPAGE; | 264 | attr |= VM_PAGEMD_EXECPAGE; | |
265 | } | 265 | } | |
266 | } | 266 | } | |
267 | #endif | 267 | #endif | |
268 | if (attr == 0) | 268 | if (attr == 0) | |
269 | return false; | 269 | return false; | |
270 | 270 | |||
271 | } while (opte != atomic_cas_pte(ptep, opte, npte)); | 271 | } while (opte != atomic_cas_pte(ptep, opte, npte)); | |
272 | 272 | |||
273 | pmap_page_set_attributes(VM_PAGE_TO_MD(pg), attr); | 273 | pmap_page_set_attributes(VM_PAGE_TO_MD(pg), attr); | |
274 | pmap_tlb_update_addr(pmap, addr, npte, 0); | 274 | pmap_tlb_update_addr(pmap, addr, npte, 0); | |
275 | 275 | |||
276 | if (attr & VM_PAGEMD_EXECPAGE) | 276 | if (attr & VM_PAGEMD_EXECPAGE) | |
277 | pmap_md_page_syncicache(pg, curcpu()->ci_data.cpu_kcpuset); | 277 | pmap_md_page_syncicache(pg, curcpu()->ci_data.cpu_kcpuset); | |
278 | 278 | |||
279 | return true; | 279 | return true; | |
280 | } | 280 | } | |
281 | 281 | |||
282 | static bool | 282 | static bool | |
283 | trap_pagefault(struct trapframe *tf, register_t epc, register_t status, | 283 | trap_pagefault(struct trapframe *tf, register_t epc, register_t status, | |
284 | register_t cause, register_t badaddr, bool usertrap_p, ksiginfo_t *ksi) | 284 | register_t cause, register_t tval, bool usertrap_p, ksiginfo_t *ksi) | |
285 | { | 285 | { | |
286 | struct proc * const p = curlwp->l_proc; | 286 | struct proc * const p = curlwp->l_proc; | |
287 | const intptr_t addr = trunc_page(badaddr); | 287 | const intptr_t addr = trunc_page(tval); | |
288 | 288 | |||
289 | if (__predict_false(usertrap_p | 289 | if (__predict_false(usertrap_p | |
290 | && (false | 290 | && (false | |
291 | // Make this address is not trying to access kernel space. | 291 | // Make this address is not trying to access kernel space. | |
292 | || addr < 0 | 292 | || addr < 0 | |
293 | #ifdef _LP64 | 293 | #ifdef _LP64 | |
294 | // If this is a process using a 32-bit address space, make | 294 | // If this is a process using a 32-bit address space, make | |
295 | // sure the address is a signed 32-bit number. | 295 | // sure the address is a signed 32-bit number. | |
296 | || ((p->p_flag & PK_32) && (int32_t) addr != addr) | 296 | || ((p->p_flag & PK_32) && (int32_t) addr != addr) | |
297 | #endif | 297 | #endif | |
298 | || false))) { | 298 | || false))) { | |
299 | trap_ksi_init(ksi, SIGSEGV, SEGV_MAPERR, addr, cause); | 299 | trap_ksi_init(ksi, SIGSEGV, SEGV_MAPERR, addr, cause); | |
300 | return false; | 300 | return false; | |
301 | } | 301 | } | |
302 | 302 | |||
303 | struct vm_map * const map = (addr >= 0 ? &p->p_vmspace->vm_map : kernel_map); | 303 | struct vm_map * const map = (addr >= 0 ? &p->p_vmspace->vm_map : kernel_map); | |
304 | 304 | |||
305 | // See if this fault is for reference/modified/execpage tracking | 305 | // See if this fault is for reference/modified/execpage tracking | |
306 | if (trap_pagefault_fixup(tf, map->pmap, cause, addr)) | 306 | if (trap_pagefault_fixup(tf, map->pmap, cause, addr)) | |
307 | return true; | 307 | return true; | |
308 | 308 | |||
309 | const vm_prot_t ftype = get_faulttype(cause); | 309 | const vm_prot_t ftype = get_faulttype(cause); | |
310 | 310 | |||
311 | if (usertrap_p) { | 311 | if (usertrap_p) { | |
312 | int error = uvm_fault(&p->p_vmspace->vm_map, addr, ftype); | 312 | int error = uvm_fault(&p->p_vmspace->vm_map, addr, ftype); | |
313 | if (error) { | 313 | if (error) { | |
314 | trap_ksi_init(ksi, SIGSEGV, | 314 | trap_ksi_init(ksi, SIGSEGV, | |
315 | error == EACCES ? SEGV_ACCERR : SEGV_MAPERR, | 315 | error == EACCES ? SEGV_ACCERR : SEGV_MAPERR, | |
316 | (intptr_t)badaddr, cause); | 316 | (intptr_t)tval, cause); | |
317 | return false; | 317 | return false; | |
318 | } | 318 | } | |
319 | uvm_grow(p, addr); | 319 | uvm_grow(p, addr); | |
320 | return true; | 320 | return true; | |
321 | } | 321 | } | |
322 | 322 | |||
323 | // Page fault are not allowed while dealing with interrupts | 323 | // Page fault are not allowed while dealing with interrupts | |
324 | if (cpu_intr_p()) | 324 | if (cpu_intr_p()) | |
325 | return false; | 325 | return false; | |
326 | 326 | |||
327 | struct faultbuf * const fb = cpu_disable_onfault(); | 327 | struct faultbuf * const fb = cpu_disable_onfault(); | |
328 | int error = uvm_fault(map, addr, ftype); | 328 | int error = uvm_fault(map, addr, ftype); | |
329 | cpu_enable_onfault(fb); | 329 | cpu_enable_onfault(fb); | |
330 | if (error == 0) { | 330 | if (error == 0) { | |
331 | if (map != kernel_map) { | 331 | if (map != kernel_map) { | |
332 | uvm_grow(p, addr); | 332 | uvm_grow(p, addr); | |
333 | } | 333 | } | |
334 | return true; | 334 | return true; | |
335 | } | 335 | } | |
336 | 336 | |||
337 | if (fb == NULL) { | 337 | if (fb == NULL) { | |
338 | return false; | 338 | return false; | |
339 | } | 339 | } | |
340 | 340 | |||
341 | cpu_jump_onfault(tf, fb); | 341 | cpu_jump_onfault(tf, fb); | |
342 | return true; | 342 | return true; | |
343 | } | 343 | } | |
344 | 344 | |||
345 | static bool | 345 | static bool | |
346 | trap_instruction(struct trapframe *tf, register_t epc, register_t status, | 346 | trap_instruction(struct trapframe *tf, register_t epc, register_t status, | |
347 | register_t cause, register_t badaddr, bool usertrap_p, ksiginfo_t *ksi) | 347 | register_t cause, register_t tval, bool usertrap_p, ksiginfo_t *ksi) | |
348 | { | 348 | { | |
349 | if (usertrap_p) { | 349 | if (usertrap_p) { | |
350 | trap_ksi_init(ksi, SIGILL, ILL_ILLOPC, | 350 | trap_ksi_init(ksi, SIGILL, ILL_ILLOPC, | |
351 | (intptr_t)badaddr, cause); | 351 | (intptr_t)tval, cause); | |
352 | } | 352 | } | |
353 | return false; | 353 | return false; | |
354 | } | 354 | } | |
355 | 355 | |||
356 | static bool | 356 | static bool | |
357 | trap_misalignment(struct trapframe *tf, register_t epc, register_t status, | 357 | trap_misalignment(struct trapframe *tf, register_t epc, register_t status, | |
358 | register_t cause, register_t badaddr, bool usertrap_p, ksiginfo_t *ksi) | 358 | register_t cause, register_t tval, bool usertrap_p, ksiginfo_t *ksi) | |
359 | { | 359 | { | |
360 | if (usertrap_p) { | 360 | if (usertrap_p) { | |
361 | trap_ksi_init(ksi, SIGBUS, BUS_ADRALN, | 361 | trap_ksi_init(ksi, SIGBUS, BUS_ADRALN, | |
362 | (intptr_t)badaddr, cause); | 362 | (intptr_t)tval, cause); | |
363 | } | 363 | } | |
364 | return false; | 364 | return false; | |
365 | } | 365 | } | |
366 | 366 | |||
367 | void | 367 | void | |
368 | cpu_trap(struct trapframe *tf, register_t epc, register_t status, | 368 | cpu_trap(struct trapframe *tf, register_t epc, register_t status, | |
369 | register_t cause, register_t badaddr) | 369 | register_t cause, register_t tval) | |
370 | { | 370 | { | |
371 | const u_int fault_mask = 1U << cause; | 371 | const u_int fault_mask = 1U << cause; | |
372 | const intptr_t addr = badaddr; | 372 | const intptr_t addr = tval; | |
373 | const bool usertrap_p = (status & SR_PS) == 0; | 373 | const bool usertrap_p = (status & SR_PS) == 0; | |
374 | bool ok = true; | 374 | bool ok = true; | |
375 | ksiginfo_t ksi; | 375 | ksiginfo_t ksi; | |
376 | 376 | |||
377 | if (__predict_true(fault_mask & FAULT_TRAP_MASK)) { | 377 | if (__predict_true(fault_mask & FAULT_TRAP_MASK)) { | |
378 | #ifndef _LP64 | 378 | #ifndef _LP64 | |
379 | // This fault may be cause the kernel's page table got a new | 379 | // This fault may be cause the kernel's page table got a new | |
380 | // page table page and this pmap's page table doesn't know | 380 | // page table page and this pmap's page table doesn't know | |
381 | // about it. See | 381 | // about it. See | |
382 | struct pmap * const pmap = curlwp->l_proc->p_vmspace->vm_map.pmap; | 382 | struct pmap * const pmap = curlwp->l_proc->p_vmspace->vm_map.pmap; | |
383 | if ((intptr_t) addr < 0 | 383 | if ((intptr_t) addr < 0 | |
384 | && pmap != pmap_kernel() | 384 | && pmap != pmap_kernel() | |
385 | && pmap_pdetab_fixup(pmap, addr)) { | 385 | && pmap_pdetab_fixup(pmap, addr)) { | |
386 | return; | 386 | return; | |
387 | } | 387 | } | |
388 | #endif | 388 | #endif | |
389 | ok = trap_pagefault(tf, epc, status, cause, addr, | 389 | ok = trap_pagefault(tf, epc, status, cause, addr, | |
390 | usertrap_p, &ksi); | 390 | usertrap_p, &ksi); | |
391 | } else if (fault_mask & INSTRUCTION_TRAP_MASK) { | 391 | } else if (fault_mask & INSTRUCTION_TRAP_MASK) { | |
392 | ok = trap_instruction(tf, epc, status, cause, addr, | 392 | ok = trap_instruction(tf, epc, status, cause, addr, | |
393 | usertrap_p, &ksi); | 393 | usertrap_p, &ksi); | |
394 | #if 0 | 394 | #if 0 | |
395 | } else if (fault_mask && __BIT(CAUSE_FP_DISABLED)) { | 395 | } else if (fault_mask && __BIT(CAUSE_FP_DISABLED)) { | |
396 | if (!usertrap_p) { | 396 | if (!usertrap_p) { | |
397 | panic("%s: fp used @ %#"PRIxREGISTER" in kernel!", | 397 | panic("%s: fp used @ %#"PRIxREGISTER" in kernel!", | |
398 | __func__, tf->tf_pc); | 398 | __func__, tf->tf_pc); | |
399 | } | 399 | } | |
400 | fpu_load(); | 400 | fpu_load(); | |
401 | #endif | 401 | #endif | |
402 | } else if (fault_mask & MISALIGNED_TRAP_MASK) { | 402 | } else if (fault_mask & MISALIGNED_TRAP_MASK) { | |
403 | ok = trap_misalignment(tf, epc, status, cause, addr, | 403 | ok = trap_misalignment(tf, epc, status, cause, addr, | |
404 | usertrap_p, &ksi); | 404 | usertrap_p, &ksi); | |
405 | } else { | 405 | } else { | |
406 | dump_trapframe(tf, printf); | 406 | dump_trapframe(tf, printf); | |
407 | panic("%s: unknown kernel trap", __func__); | 407 | panic("%s: unknown kernel trap", __func__); | |
408 | } | 408 | } | |
409 | 409 | |||
410 | if (usertrap_p) { | 410 | if (usertrap_p) { | |
411 | if (!ok) | 411 | if (!ok) | |
412 | cpu_trapsignal(tf, &ksi); | 412 | cpu_trapsignal(tf, &ksi); | |
413 | userret(curlwp); | 413 | userret(curlwp); | |
414 | } else if (!ok) { | 414 | } else if (!ok) { | |
415 | dump_trapframe(tf, printf); | 415 | dump_trapframe(tf, printf); | |
416 | panic("%s: fatal kernel trap", __func__); | 416 | panic("%s: fatal kernel trap", __func__); | |
417 | } | 417 | } | |
418 | } | 418 | } | |
419 | 419 | |||
420 | void | 420 | void | |
421 | cpu_ast(struct trapframe *tf) | 421 | cpu_ast(struct trapframe *tf) | |
422 | { | 422 | { | |
423 | 423 | |||
424 | atomic_swap_uint(&curlwp->l_md.md_astpending, 0); | 424 | atomic_swap_uint(&curlwp->l_md.md_astpending, 0); | |
425 | 425 | |||
426 | if (curlwp->l_pflag & LP_OWEUPC) { | 426 | if (curlwp->l_pflag & LP_OWEUPC) { | |
427 | curlwp->l_pflag &= ~LP_OWEUPC; | 427 | curlwp->l_pflag &= ~LP_OWEUPC; | |
428 | ADDUPROF(curlwp); | 428 | ADDUPROF(curlwp); | |
429 | } | 429 | } | |
430 | } | 430 | } | |
431 | 431 | |||
432 | void | 432 | void | |
433 | cpu_intr(struct trapframe *tf, register_t epc, register_t status, | 433 | cpu_intr(struct trapframe *tf, register_t epc, register_t status, | |
434 | register_t cause) | 434 | register_t cause) | |
435 | { | 435 | { | |
436 | /* XXX */ | 436 | /* XXX */ | |
437 | } | 437 | } | |
438 | 438 | |||
439 | static int | 439 | static int | |
440 | fetch_user_data(const void *uaddr, void *valp, size_t size) | 440 | fetch_user_data(const void *uaddr, void *valp, size_t size) | |
441 | { | 441 | { | |
442 | struct faultbuf fb; | 442 | struct faultbuf fb; | |
443 | int error; | 443 | int error; | |
444 | 444 | |||
445 | if ((error = cpu_set_onfault(&fb, 1)) != 0) | 445 | if ((error = cpu_set_onfault(&fb, 1)) != 0) | |
446 | return error; | 446 | return error; | |
447 | 447 | |||
448 | switch (size) { | 448 | switch (size) { | |
449 | case 1: | 449 | case 1: | |
450 | *(uint8_t *)valp = *(volatile const uint8_t *)uaddr; | 450 | *(uint8_t *)valp = *(volatile const uint8_t *)uaddr; | |
451 | break; | 451 | break; | |
452 | case 2: | 452 | case 2: | |
453 | *(uint16_t *)valp = *(volatile const uint16_t *)uaddr; | 453 | *(uint16_t *)valp = *(volatile const uint16_t *)uaddr; | |
454 | break; | 454 | break; | |
455 | case 4: | 455 | case 4: | |
456 | *(uint32_t *)valp = *(volatile const uint32_t *)uaddr; | 456 | *(uint32_t *)valp = *(volatile const uint32_t *)uaddr; | |
457 | break; | 457 | break; | |
458 | #ifdef _LP64 | 458 | #ifdef _LP64 | |
459 | case 8: | 459 | case 8: | |
460 | *(uint64_t *)valp = *(volatile const uint64_t *)uaddr; | 460 | *(uint64_t *)valp = *(volatile const uint64_t *)uaddr; | |
461 | break; | 461 | break; | |
462 | #endif /* _LP64 */ | 462 | #endif /* _LP64 */ | |
463 | default: | 463 | default: | |
464 | error = EINVAL; | 464 | error = EINVAL; | |
465 | } | 465 | } | |
466 | 466 | |||
467 | cpu_unset_onfault(); | 467 | cpu_unset_onfault(); | |
468 | return error; | 468 | return error; | |
469 | } | 469 | } | |
470 | 470 | |||
471 | int | 471 | int | |
472 | _ufetch_8(const uint8_t *uaddr, uint8_t *valp) | 472 | _ufetch_8(const uint8_t *uaddr, uint8_t *valp) | |
473 | { | 473 | { | |
474 | return fetch_user_data(uaddr, valp, sizeof(*valp)); | 474 | return fetch_user_data(uaddr, valp, sizeof(*valp)); | |
475 | } | 475 | } | |
476 | 476 | |||
477 | int | 477 | int | |
478 | _ufetch_16(const uint16_t *uaddr, uint16_t *valp) | 478 | _ufetch_16(const uint16_t *uaddr, uint16_t *valp) | |
479 | { | 479 | { | |
480 | return fetch_user_data(uaddr, valp, sizeof(*valp)); | 480 | return fetch_user_data(uaddr, valp, sizeof(*valp)); | |
481 | } | 481 | } | |
482 | 482 | |||
483 | int | 483 | int | |
484 | _ufetch_32(const uint32_t *uaddr, uint32_t *valp) | 484 | _ufetch_32(const uint32_t *uaddr, uint32_t *valp) | |
485 | { | 485 | { | |
486 | return fetch_user_data(uaddr, valp, sizeof(*valp)); | 486 | return fetch_user_data(uaddr, valp, sizeof(*valp)); | |
487 | } | 487 | } | |
488 | 488 | |||
489 | #ifdef _LP64 | 489 | #ifdef _LP64 | |
490 | int | 490 | int | |
491 | _ufetch_64(const uint64_t *uaddr, uint64_t *valp) | 491 | _ufetch_64(const uint64_t *uaddr, uint64_t *valp) | |
492 | { | 492 | { | |
493 | return fetch_user_data(uaddr, valp, sizeof(*valp)); | 493 | return fetch_user_data(uaddr, valp, sizeof(*valp)); | |
494 | } | 494 | } | |
495 | #endif /* _LP64 */ | 495 | #endif /* _LP64 */ | |
496 | 496 | |||
497 | static int | 497 | static int | |
498 | store_user_data(void *uaddr, const void *valp, size_t size) | 498 | store_user_data(void *uaddr, const void *valp, size_t size) | |
499 | { | 499 | { | |
500 | struct faultbuf fb; | 500 | struct faultbuf fb; | |
501 | int error; | 501 | int error; | |
502 | 502 | |||
503 | if ((error = cpu_set_onfault(&fb, 1)) != 0) | 503 | if ((error = cpu_set_onfault(&fb, 1)) != 0) | |
504 | return error; | 504 | return error; | |
505 | 505 | |||
506 | switch (size) { | 506 | switch (size) { | |
507 | case 1: | 507 | case 1: | |
508 | *(volatile uint8_t *)uaddr = *(const uint8_t *)valp; | 508 | *(volatile uint8_t *)uaddr = *(const uint8_t *)valp; | |
509 | break; | 509 | break; | |
510 | case 2: | 510 | case 2: | |
511 | *(volatile uint16_t *)uaddr = *(const uint8_t *)valp; | 511 | *(volatile uint16_t *)uaddr = *(const uint8_t *)valp; | |
512 | break; | 512 | break; | |
513 | case 4: | 513 | case 4: | |
514 | *(volatile uint32_t *)uaddr = *(const uint32_t *)valp; | 514 | *(volatile uint32_t *)uaddr = *(const uint32_t *)valp; | |
515 | break; | 515 | break; | |
516 | #ifdef _LP64 | 516 | #ifdef _LP64 | |
517 | case 8: | 517 | case 8: | |
518 | *(volatile uint64_t *)uaddr = *(const uint64_t *)valp; | 518 | *(volatile uint64_t *)uaddr = *(const uint64_t *)valp; | |
519 | break; | 519 | break; | |
520 | #endif /* _LP64 */ | 520 | #endif /* _LP64 */ | |
521 | default: | 521 | default: | |
522 | error = EINVAL; | 522 | error = EINVAL; | |
523 | } | 523 | } | |
524 | 524 | |||
525 | cpu_unset_onfault(); | 525 | cpu_unset_onfault(); | |
526 | return error; | 526 | return error; | |
527 | } | 527 | } | |
528 | 528 | |||
529 | int | 529 | int | |
530 | _ustore_8(uint8_t *uaddr, uint8_t val) | 530 | _ustore_8(uint8_t *uaddr, uint8_t val) | |
531 | { | 531 | { | |
532 | return store_user_data(uaddr, &val, sizeof(val)); | 532 | return store_user_data(uaddr, &val, sizeof(val)); | |
533 | } | 533 | } | |
534 | 534 | |||
535 | int | 535 | int | |
536 | _ustore_16(uint16_t *uaddr, uint16_t val) | 536 | _ustore_16(uint16_t *uaddr, uint16_t val) | |
537 | { | 537 | { | |
538 | return store_user_data(uaddr, &val, sizeof(val)); | 538 | return store_user_data(uaddr, &val, sizeof(val)); | |
539 | } | 539 | } | |
540 | 540 | |||
541 | int | 541 | int | |
542 | _ustore_32(uint32_t *uaddr, uint32_t val) | 542 | _ustore_32(uint32_t *uaddr, uint32_t val) | |
543 | { | 543 | { | |
544 | return store_user_data(uaddr, &val, sizeof(val)); | 544 | return store_user_data(uaddr, &val, sizeof(val)); | |
545 | } | 545 | } | |
546 | 546 | |||
547 | #ifdef _LP64 | 547 | #ifdef _LP64 | |
548 | int | 548 | int | |
549 | _ustore_64(uint64_t *uaddr, uint64_t val) | 549 | _ustore_64(uint64_t *uaddr, uint64_t val) | |
550 | { | 550 | { | |
551 | return store_user_data(uaddr, &val, sizeof(val)); | 551 | return store_user_data(uaddr, &val, sizeof(val)); | |
552 | } | 552 | } | |
553 | #endif /* _LP64 */ | 553 | #endif /* _LP64 */ |