Wed Aug 16 22:49:05 2017 UTC ()
add cpu_set_onfault glue


(nisimura)
diff -r1.1 -r1.2 src/sys/arch/aarch64/aarch64/locore.S

cvs diff -r1.1 -r1.2 src/sys/arch/aarch64/aarch64/locore.S (switch to unified diff)

--- src/sys/arch/aarch64/aarch64/locore.S 2014/08/10 05:47:37 1.1
+++ src/sys/arch/aarch64/aarch64/locore.S 2017/08/16 22:49:05 1.2
@@ -1,217 +1,255 @@ @@ -1,217 +1,255 @@
1/* $NetBSD: locore.S,v 1.1 2014/08/10 05:47:37 matt Exp $ */ 1/* $NetBSD: locore.S,v 1.2 2017/08/16 22:49:05 nisimura Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2014 The NetBSD Foundation, Inc. 4 * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry. 8 * by Matt Thomas of 3am Software Foundry.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <aarch64/asm.h> 32#include <aarch64/asm.h>
33#include "assym.h" 33#include "assym.h"
34 34
35#include "opt_ddb.h" 35#include "opt_ddb.h"
36 36
37RCSID("$NetBSD: locore.S,v 1.1 2014/08/10 05:47:37 matt Exp $") 37RCSID("$NetBSD: locore.S,v 1.2 2017/08/16 22:49:05 nisimura Exp $")
 38
 39// XXX:AARCH64
 40lr .req x30
38 41
39/* 42/*
40 * At IPL_SCHED: 43 * At IPL_SCHED:
41 * x0 = oldlwp (maybe be NULL) 44 * x0 = oldlwp (maybe be NULL)
42 * x1 = newlwp 45 * x1 = newlwp
43 * x2 = returning 46 * x2 = returning
44 * returns x0-x2 unchanged 47 * returns x0-x2 unchanged
45 */ 48 */
46ENTRY_NP(cpu_switchto) 49ENTRY_NP(cpu_switchto)
47 cbz x0, .Lrestore_lwp 50 cbz x0, .Lrestore_lwp
48 51
49 /* 52 /*
50 * Store the callee saved register on the stack in a trapframe 53 * Store the callee saved register on the stack in a trapframe
51 */ 54 */
52 sub sp, sp, #TF_SIZE 55 sub sp, sp, #TF_SIZE
53 stp x19, x20, [sp, #TF_X19] 56 stp x19, x20, [sp, #TF_X19]
54 stp x21, x22, [sp, #TF_X21] 57 stp x21, x22, [sp, #TF_X21]
55 stp x23, x24, [sp, #TF_X23] 58 stp x23, x24, [sp, #TF_X23]
56 stp x25, x26, [sp, #TF_X25] 59 stp x25, x26, [sp, #TF_X25]
57 stp x27, x28, [sp, #TF_X27] 60 stp x27, x28, [sp, #TF_X27]
58 stp x29, x30, [sp, #TF_X29] 61 stp x29, x30, [sp, #TF_X29]
59 62
60 /* 63 /*
61 * Get the previous trapframe pointer and the user writeable Thread ID 64 * Get the previous trapframe pointer and the user writeable Thread ID
62 * register and save them in the trap frame. 65 * register and save them in the trap frame.
63 */ 66 */
64 ldr x5, [x0, #L_MD_KTF] 67 ldr x5, [x0, #L_MD_KTF]
65 mrs x4, tpidr_el0 68 mrs x4, tpidr_el0
66#if TF_TPIDR + 8 == TF_CHAIN 69#if TF_TPIDR + 8 == TF_CHAIN
67 str x4, x5, [sp, #TF_TPIDR] 70 str x4, x5, [sp, #TF_TPIDR]
68#else 71#else
69 str x4, [sp, #TF_TPIDR] 72 str x4, [sp, #TF_TPIDR]
70 str x5, [sp, #TF_CHAIN] 73 str x5, [sp, #TF_CHAIN]
71#endif 74#endif
72 75
73 /* 76 /*
74 * Get the current stack pointer and the CPACR and save them in 77 * Get the current stack pointer and the CPACR and save them in
75 * old lwp md area. 78 * old lwp md area.
76 */ 79 */
77 mov x4, sp  80 mov x4, sp
78 mrs x5, cpacr_el1 81 mrs x5, cpacr_el1
79#if L_MD_KTF + 8 == L_MD_CPACR 82#if L_MD_KTF + 8 == L_MD_CPACR
80 stp x4, x5, [x0, #L_MD_KTF] 83 stp x4, x5, [x0, #L_MD_KTF]
81#else 84#else
82 str x4, [x0, #L_MD_KTF] 85 str x4, [x0, #L_MD_KTF]
83 str x5, [x0, #L_MD_CPACR] 86 str x5, [x0, #L_MD_CPACR]
84#endif 87#endif
85 88
86 /* We are done with the old lwp */ 89 /* We are done with the old lwp */
87 90
88.Lrestore_lwp: 91.Lrestore_lwp:
89#if L_MD_KTF + 8 == L_MD_CPACR 92#if L_MD_KTF + 8 == L_MD_CPACR
90 ldp x4, x5, [x1, #L_MD_KTF] // get trapframe ptr and cpacr_el1 93 ldp x4, x5, [x1, #L_MD_KTF] // get trapframe ptr and cpacr_el1
91#else 94#else
92 ldr x4, [x0, #L_MD_KTF] // get trapframe ptr (aka SP) 95 ldr x4, [x0, #L_MD_KTF] // get trapframe ptr (aka SP)
93 ldr x5, [x0, #L_MD_CPACR] // get cpacr_el1 96 ldr x5, [x0, #L_MD_CPACR] // get cpacr_el1
94#endif 97#endif
95 mov sp, x4 // restore stack pointer 98 mov sp, x4 // restore stack pointer
96 msr cpacr_el1, x5 // restore cpacr_el1 99 msr cpacr_el1, x5 // restore cpacr_el1
97 100
98 ldr x4, [sp, #TF_TPIDR] // load user writeable thread ip reg 101 ldr x4, [sp, #TF_TPIDR] // load user writeable thread ip reg
99 msr tpidr_el0, x4 // restore it 102 msr tpidr_el0, x4 // restore it
100 103
101 mrs x3, tpidr_el1 // get curcpu 104 mrs x3, tpidr_el1 // get curcpu
102 str x1, [x3, #CI_CURLWP] // show as curlwp 105 str x1, [x3, #CI_CURLWP] // show as curlwp
103 106
104 /* 107 /*
105 * Restore callee save registers 108 * Restore callee save registers
106 */ 109 */
107 ldp x19, x20, [sp, #TF_X19] 110 ldp x19, x20, [sp, #TF_X19]
108 ldp x21, x22, [sp, #TF_X21] 111 ldp x21, x22, [sp, #TF_X21]
109 ldp x23, x24, [sp, #TF_X23] 112 ldp x23, x24, [sp, #TF_X23]
110 ldp x25, x26, [sp, #TF_X25] 113 ldp x25, x26, [sp, #TF_X25]
111 ldp x27, x28, [sp, #TF_X27] 114 ldp x27, x28, [sp, #TF_X27]
112 ldp x29, x30, [sp, #TF_X29] 115 ldp x29, x30, [sp, #TF_X29]
113 add sp, sp, #TF_SIZE /* pop trapframe from stack */ 116 add sp, sp, #TF_SIZE /* pop trapframe from stack */
114 117
115 ret 118 ret
116END(cpu_switchto) 119END(cpu_switchto)
117 120
118/* 121/*
 122 * x0 = lwp
 123 * x1 = ipl
 124 */
 125ENTRY_NP(cpu_switchto_softint)
 126//
 127//XXXAARCH64
 128//
 129 ret
 130END(cpu_switchto_softint)
 131
 132
 133/*
119 * Called at IPL_SCHED 134 * Called at IPL_SCHED
120 * x0 = old lwp (from cpu_switchto) 135 * x0 = old lwp (from cpu_switchto)
121 * x1 = new lwp (from cpu_switchto) 136 * x1 = new lwp (from cpu_switchto)
122 * x27 = func 137 * x27 = func
123 * x28 = arg 138 * x28 = arg
124 */ 139 */
125ENTRY_NP(lwp_trampoline) 140ENTRY_NP(lwp_trampoline)
126#if defined(MULTIPROCESSOR) 141#if defined(MULTIPROCESSOR)
127 mov x19, x0 142 mov x19, x0
128 mov x20, x1 143 mov x20, x1
129 bl _C_LABEL(proc_trampoline_mp) 144 bl _C_LABEL(proc_trampoline_mp)
130 mov x1, x20 145 mov x1, x20
131 mov x0, x19 146 mov x0, x19
132#endif 147#endif
133 bl _C_LABEL(lwp_startup) 148 bl _C_LABEL(lwp_startup)
134 149
135 /* 150 /*
136 * If the function returns, have it return to the exception trap return 151 * If the function returns, have it return to the exception trap return
137 * handler which will restore all user state before returning to EL0. 152 * handler which will restore all user state before returning to EL0.
138 */ 153 */
139 adr x30, exception_trap_exit // set function return address 154 adr x30, exception_trap_exit // set function return address
140 mov x0, x28 // mov arg into place 155 mov x0, x28 // mov arg into place
141 br x27 // call function with arg 156 br x27 // call function with arg
142END(lwp_trampoline) 157END(lwp_trampoline)
143 158
144/* 159/*
145 * Return from exception. There's a trap return, an intr return, and 160 * Return from exception. There's a trap return, an intr return, and
146 * a syscall return. 161 * a syscall return.
147 */ 162 */
148ENTRY_NP(exception_trap_exit) 163ENTRY_NP(exception_trap_exit)
149 ldp x0, x1, [sp, #TF_X0] 164 ldp x0, x1, [sp, #TF_X0]
150 ldp x2, x3, [sp, #TF_X2] 165 ldp x2, x3, [sp, #TF_X2]
151 ldp x4, x5, [sp, #TF_X4] 166 ldp x4, x5, [sp, #TF_X4]
152 ldp x6, x7, [sp, #TF_X6] 167 ldp x6, x7, [sp, #TF_X6]
153 ldp x8, x9, [sp, #TF_X8] 168 ldp x8, x9, [sp, #TF_X8]
154 ldp x10, x11, [sp, #TF_X10] 169 ldp x10, x11, [sp, #TF_X10]
155 ldp x12, x13, [sp, #TF_X12] 170 ldp x12, x13, [sp, #TF_X12]
156 ldp x14, x15, [sp, #TF_X14] 171 ldp x14, x15, [sp, #TF_X14]
157exception_syscall_exit: 172exception_syscall_exit:
158 ldp x16, x17, [sp, #TF_X16] 173 ldp x16, x17, [sp, #TF_X16]
159 ldr x18, [sp, #TF_X18] 174 ldr x18, [sp, #TF_X18]
160 175
161#if TF_SP + 8 == TF_PC 176#if TF_SP + 8 == TF_PC
162 ldp x20, x21, [sp, #TF_SP] 177 ldp x20, x21, [sp, #TF_SP]
163#else 178#else
164 ldr x20, [sp, #TF_SP] 179 ldr x20, [sp, #TF_SP]
165 ldr x21, [sp, #TF_PC] 180 ldr x21, [sp, #TF_PC]
166#endif 181#endif
167 ldr x22, [sp, #TF_SPSR] 182 ldr x22, [sp, #TF_SPSR]
168 msr sp_el0, x20 183 msr sp_el0, x20
169 msr elr_el1, x21 184 msr elr_el1, x21
170 msr spsr_el1, x22 185 msr spsr_el1, x22
171 186
172 ldp x19, x20, [sp, #TF_X19] 187 ldp x19, x20, [sp, #TF_X19]
173 ldp x21, x22, [sp, #TF_X21] 188 ldp x21, x22, [sp, #TF_X21]
174 ldp x23, x24, [sp, #TF_X23] 189 ldp x23, x24, [sp, #TF_X23]
175 ldp x25, x26, [sp, #TF_X25] 190 ldp x25, x26, [sp, #TF_X25]
176 ldp x27, x28, [sp, #TF_X27] 191 ldp x27, x28, [sp, #TF_X27]
177 ldp x29, x30, [sp, #TF_X29] 192 ldp x29, x30, [sp, #TF_X29]
178 193
179 /* 194 /*
180 * Don't adjust the stack for the trapframe since we would 195 * Don't adjust the stack for the trapframe since we would
181 * just add subtract it again upon exception entry. 196 * just add subtract it again upon exception entry.
182 */ 197 */
183 eret 198 eret
184END(exception_trap_exit) 199END(exception_trap_exit)
185 200
186#ifdef DDB 201#ifdef DDB
187ENTRY(cpu_Debugger) 202ENTRY(cpu_Debugger)
188 brk #0xffff 203 brk #0xffff
189 ret 204 ret
190END(cpu_Debugger) 205END(cpu_Debugger)
191#endif /* DDB */ 206#endif /* DDB */
192 207
 208/*
 209 * int cpu_set_onfault(struct faultbuf *fb, register_t retval)
 210 */
 211ENTRY(cpu_set_onfault)
 212 mov x9, sp
 213 stp x19, x20, [x0, #0]
 214 stp x21, x22, [x0, #16]
 215 stp x23, x24, [x0, #32]
 216 stp x25, x26, [x0, #48]
 217 stp x27, x28, [x0, #64]
 218 stp x29, x9, [x0, #80]
 219 stp lr, x1, [x0, #96]
 220 mrs x3, tpidr_el1 /* curcpu */
 221 ldr x2, [x3, #CI_CURLWP] /* curlwp */
 222 str x0, [x2, #L_MD_ONFAULT] /* l_md.md_onfault = fb */
 223 mov x0, #0
 224END(cpu_set_onfault)
 225
 226/*
 227 * setjmp(9)
 228 * int setjmp(label_t *label);
 229 * void longjmp(label_t *label);
 230 */
193ENTRY(setjmp) 231ENTRY(setjmp)
194 stp x19, x20, [x0, #0] 232 stp x19, x20, [x0, #0]
195 stp x21, x22, [x0, #16] 233 stp x21, x22, [x0, #16]
196 stp x23, x24, [x0, #32] 234 stp x23, x24, [x0, #32]
197 stp x25, x26, [x0, #48] 235 stp x25, x26, [x0, #48]
198 stp x27, x28, [x0, #64] 236 stp x27, x28, [x0, #64]
199 stp x29, x30, [x0, #80] 237 stp x29, x30, [x0, #80]
200 mov x1, sp 238 mov x1, sp
201 str x1, [x0, #96] 239 str x1, [x0, #96]
202 mov x0, #0 240 mov x0, #0
203 ret 241 ret
204END(setjmp) 242END(setjmp)
205 243
206ENTRY(longjmp) 244ENTRY(longjmp)
207 ldp x19, x20, [x0, #0] 245 ldp x19, x20, [x0, #0]
208 ldp x21, x22, [x0, #16] 246 ldp x21, x22, [x0, #16]
209 ldp x23, x24, [x0, #32] 247 ldp x23, x24, [x0, #32]
210 ldp x25, x26, [x0, #48] 248 ldp x25, x26, [x0, #48]
211 ldp x27, x28, [x0, #64] 249 ldp x27, x28, [x0, #64]
212 ldp x29, x30, [x0, #80] 250 ldp x29, x30, [x0, #80]
213 ldr x1, [x0, #96] 251 ldr x1, [x0, #96]
214 mov sp, x1 252 mov sp, x1
215 mov x0, #1 253 mov x0, #1
216 ret 254 ret
217END(longjmp) 255END(longjmp)