| @@ -1,1482 +1,1521 @@ | | | @@ -1,1482 +1,1521 @@ |
1 | /* $NetBSD: locore.s,v 1.138 2021/07/07 02:44:04 thorpej Exp $ */ | | 1 | /* $NetBSD: locore.s,v 1.139 2021/07/07 03:30:35 thorpej Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 1999, 2000, 2019 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 1999, 2000, 2019 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, | | 8 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, |
9 | * NASA Ames Research Center. | | 9 | * NASA Ames Research Center. |
10 | * | | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | | 11 | * Redistribution and use in source and binary forms, with or without |
12 | * modification, are permitted provided that the following conditions | | 12 | * modification, are permitted provided that the following conditions |
13 | * are met: | | 13 | * are met: |
14 | * 1. Redistributions of source code must retain the above copyright | | 14 | * 1. Redistributions of source code must retain the above copyright |
15 | * notice, this list of conditions and the following disclaimer. | | 15 | * notice, this list of conditions and the following disclaimer. |
16 | * 2. Redistributions in binary form must reproduce the above copyright | | 16 | * 2. Redistributions in binary form must reproduce the above copyright |
17 | * notice, this list of conditions and the following disclaimer in the | | 17 | * notice, this list of conditions and the following disclaimer in the |
18 | * documentation and/or other materials provided with the distribution. | | 18 | * documentation and/or other materials provided with the distribution. |
19 | * | | 19 | * |
20 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 20 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
21 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 21 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
22 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 22 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
23 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 23 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
24 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 24 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
30 | * POSSIBILITY OF SUCH DAMAGE. | | 30 | * POSSIBILITY OF SUCH DAMAGE. |
31 | */ | | 31 | */ |
32 | | | 32 | |
33 | /* | | 33 | /* |
34 | * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University. | | 34 | * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University. |
35 | * All rights reserved. | | 35 | * All rights reserved. |
36 | * | | 36 | * |
37 | * Author: Chris G. Demetriou | | 37 | * Author: Chris G. Demetriou |
38 | * | | 38 | * |
39 | * Permission to use, copy, modify and distribute this software and | | 39 | * Permission to use, copy, modify and distribute this software and |
40 | * its documentation is hereby granted, provided that both the copyright | | 40 | * its documentation is hereby granted, provided that both the copyright |
41 | * notice and this permission notice appear in all copies of the | | 41 | * notice and this permission notice appear in all copies of the |
42 | * software, derivative works or modified versions, and any portions | | 42 | * software, derivative works or modified versions, and any portions |
43 | * thereof, and that both notices appear in supporting documentation. | | 43 | * thereof, and that both notices appear in supporting documentation. |
44 | * | | 44 | * |
45 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | | 45 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
46 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND | | 46 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND |
47 | * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | | 47 | * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
48 | * | | 48 | * |
49 | * Carnegie Mellon requests users of this software to return to | | 49 | * Carnegie Mellon requests users of this software to return to |
50 | * | | 50 | * |
51 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | | 51 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
52 | * School of Computer Science | | 52 | * School of Computer Science |
53 | * Carnegie Mellon University | | 53 | * Carnegie Mellon University |
54 | * Pittsburgh PA 15213-3890 | | 54 | * Pittsburgh PA 15213-3890 |
55 | * | | 55 | * |
56 | * any improvements or extensions that they make and grant Carnegie the | | 56 | * any improvements or extensions that they make and grant Carnegie the |
57 | * rights to redistribute these changes. | | 57 | * rights to redistribute these changes. |
58 | */ | | 58 | */ |
59 | | | 59 | |
60 | .stabs __FILE__,100,0,0,kernel_text | | 60 | .stabs __FILE__,100,0,0,kernel_text |
61 | | | 61 | |
62 | #include "opt_ddb.h" | | 62 | #include "opt_ddb.h" |
63 | #include "opt_kgdb.h" | | 63 | #include "opt_kgdb.h" |
64 | #include "opt_multiprocessor.h" | | 64 | #include "opt_multiprocessor.h" |
65 | #include "opt_lockdebug.h" | | 65 | #include "opt_lockdebug.h" |
66 | #include "opt_compat_netbsd.h" | | 66 | #include "opt_compat_netbsd.h" |
67 | | | 67 | |
68 | #include <machine/asm.h> | | 68 | #include <machine/asm.h> |
69 | | | 69 | |
70 | __KERNEL_RCSID(0, "$NetBSD: locore.s,v 1.138 2021/07/07 02:44:04 thorpej Exp $"); | | 70 | __KERNEL_RCSID(0, "$NetBSD: locore.s,v 1.139 2021/07/07 03:30:35 thorpej Exp $"); |
71 | | | 71 | |
72 | #include "assym.h" | | 72 | #include "assym.h" |
73 | | | 73 | |
74 | .stabs __FILE__,132,0,0,kernel_text | | 74 | .stabs __FILE__,132,0,0,kernel_text |
75 | | | 75 | |
76 | /* don't reorder instructions; paranoia. */ | | 76 | /* don't reorder instructions; paranoia. */ |
77 | .set noreorder | | 77 | .set noreorder |
78 | .text | | 78 | .text |
79 | | | 79 | |
80 | .macro bfalse reg, dst | | 80 | .macro bfalse reg, dst |
81 | beq \reg, \dst | | 81 | beq \reg, \dst |
82 | .endm | | 82 | .endm |
83 | | | 83 | |
84 | .macro btrue reg, dst | | 84 | .macro btrue reg, dst |
85 | bne \reg, \dst | | 85 | bne \reg, \dst |
86 | .endm | | 86 | .endm |
87 | | | 87 | |
88 | /* | | 88 | /* |
89 | * This is for kvm_mkdb, and should be the address of the beginning | | 89 | * This is for kvm_mkdb, and should be the address of the beginning |
90 | * of the kernel text segment (not necessarily the same as kernbase). | | 90 | * of the kernel text segment (not necessarily the same as kernbase). |
91 | */ | | 91 | */ |
92 | EXPORT(kernel_text) | | 92 | EXPORT(kernel_text) |
93 | .loc 1 __LINE__ | | 93 | .loc 1 __LINE__ |
94 | kernel_text: | | 94 | kernel_text: |
95 | | | 95 | |
96 | /* | | 96 | /* |
97 | * bootstack: a temporary stack, for booting. | | 97 | * bootstack: a temporary stack, for booting. |
98 | * | | 98 | * |
99 | * Extends from 'start' down. | | 99 | * Extends from 'start' down. |
100 | */ | | 100 | */ |
101 | bootstack: | | 101 | bootstack: |
102 | | | 102 | |
103 | /* | | 103 | /* |
104 | * locorestart: Kernel start. This is no longer the actual entry | | 104 | * locorestart: Kernel start. This is no longer the actual entry |
105 | * point, although jumping to here (the first kernel address) will | | 105 | * point, although jumping to here (the first kernel address) will |
106 | * in fact work just fine. | | 106 | * in fact work just fine. |
107 | * | | 107 | * |
108 | * Arguments: | | 108 | * Arguments: |
109 | * a0 is the first free page frame number (PFN) | | 109 | * a0 is the first free page frame number (PFN) |
110 | * a1 is the page table base register (PTBR) | | 110 | * a1 is the page table base register (PTBR) |
111 | * a2 is the bootinfo magic number | | 111 | * a2 is the bootinfo magic number |
112 | * a3 is the pointer to the bootinfo structure | | 112 | * a3 is the pointer to the bootinfo structure |
113 | * | | 113 | * |
114 | * All arguments are passed to alpha_init(). | | 114 | * All arguments are passed to alpha_init(). |
115 | */ | | 115 | */ |
116 | IMPORT(prom_mapped, 4) | | 116 | IMPORT(prom_mapped, 4) |
117 | NESTED_NOPROFILE(locorestart,1,0,ra,0,0) | | 117 | NESTED_NOPROFILE(locorestart,1,0,ra,0,0) |
118 | br pv,1f | | 118 | br pv,1f |
119 | 1: LDGP(pv) | | 119 | 1: LDGP(pv) |
120 | | | 120 | |
121 | /* Switch to the boot stack. */ | | 121 | /* Switch to the boot stack. */ |
122 | lda sp,bootstack | | 122 | lda sp,bootstack |
123 | | | 123 | |
124 | /* Load KGP with current GP. */ | | 124 | /* Load KGP with current GP. */ |
125 | mov a0, s0 /* save pfn */ | | 125 | mov a0, s0 /* save pfn */ |
126 | mov gp, a0 | | 126 | mov gp, a0 |
127 | call_pal PAL_OSF1_wrkgp /* clobbers a0, t0, t8-t11 */ | | 127 | call_pal PAL_OSF1_wrkgp /* clobbers a0, t0, t8-t11 */ |
128 | mov s0, a0 /* restore pfn */ | | 128 | mov s0, a0 /* restore pfn */ |
129 | | | 129 | |
130 | /* | | 130 | /* |
131 | * Call alpha_init() to do pre-main initialization. | | 131 | * Call alpha_init() to do pre-main initialization. |
132 | * alpha_init() gets the arguments we were called with, | | 132 | * alpha_init() gets the arguments we were called with, |
133 | * which are already in a0, a1, a2, a3, and a4. | | 133 | * which are already in a0, a1, a2, a3, and a4. |
134 | */ | | 134 | */ |
135 | CALL(alpha_init) | | 135 | CALL(alpha_init) |
136 | | | 136 | |
137 | /* Set up the virtual page table pointer. */ | | 137 | /* Set up the virtual page table pointer. */ |
138 | ldiq a0, VPTBASE | | 138 | ldiq a0, VPTBASE |
139 | call_pal PAL_OSF1_wrvptptr /* clobbers a0, t0, t8-t11 */ | | 139 | call_pal PAL_OSF1_wrvptptr /* clobbers a0, t0, t8-t11 */ |
140 | | | 140 | |
141 | /* | | 141 | /* |
142 | * Switch to lwp0's PCB. | | 142 | * Switch to lwp0's PCB. |
143 | */ | | 143 | */ |
144 | lda a0, lwp0 | | 144 | lda a0, lwp0 |
145 | ldq a0, L_MD_PCBPADDR(a0) /* phys addr of PCB */ | | 145 | ldq a0, L_MD_PCBPADDR(a0) /* phys addr of PCB */ |
146 | call_pal PAL_OSF1_swpctx /* clobbers a0, t0, t8-t11, a0 */ | | 146 | call_pal PAL_OSF1_swpctx /* clobbers a0, t0, t8-t11, a0 */ |
147 | | | 147 | |
148 | /* PROM is no longer mapped. */ | | 148 | /* PROM is no longer mapped. */ |
149 | lda t0, prom_mapped | | 149 | lda t0, prom_mapped |
150 | stl zero, 0(t0) | | 150 | stl zero, 0(t0) |
151 | | | 151 | |
152 | /* | | 152 | /* |
153 | * We've switched to a new page table base, so invalidate the TLB | | 153 | * We've switched to a new page table base, so invalidate the TLB |
154 | * and I-stream. This happens automatically everywhere but here. | | 154 | * and I-stream. This happens automatically everywhere but here. |
155 | */ | | 155 | */ |
156 | ldiq a0, -2 /* TBIA */ | | 156 | ldiq a0, -2 /* TBIA */ |
157 | call_pal PAL_OSF1_tbi | | 157 | call_pal PAL_OSF1_tbi |
158 | call_pal PAL_imb | | 158 | call_pal PAL_imb |
159 | | | 159 | |
160 | /* | | 160 | /* |
161 | * All ready to go! Call main()! | | 161 | * All ready to go! Call main()! |
162 | * | | 162 | * |
163 | * We're going to play a little trick there, though. We are | | 163 | * We're going to play a little trick there, though. We are |
164 | * going to fake our return address as the kthread backstop. | | 164 | * going to fake our return address as the kthread backstop. |
165 | * Hitting the backstop will trigger a panic, and we want lwp0 | | 165 | * Hitting the backstop will trigger a panic, and we want lwp0 |
166 | * to work like other kthreads in that regard. We will still | | 166 | * to work like other kthreads in that regard. We will still |
167 | * leep the "main returned" backstop here in case something | | 167 | * leep the "main returned" backstop here in case something |
168 | * goes horribly wrong. | | 168 | * goes horribly wrong. |
169 | */ | | 169 | */ |
170 | lda ra, alpha_kthread_backstop | | 170 | lda ra, alpha_kthread_backstop |
171 | jsr s0, main | | 171 | jsr s0, main |
172 | ldgp gp, 0(s0) | | 172 | ldgp gp, 0(s0) |
173 | | | 173 | |
174 | /* This should never happen. */ | | 174 | /* This should never happen. */ |
175 | PANIC("main() returned",Lmain_returned_pmsg) | | 175 | PANIC("main() returned",Lmain_returned_pmsg) |
176 | END(locorestart) | | 176 | END(locorestart) |
177 | | | 177 | |
178 | /**************************************************************************/ | | 178 | /**************************************************************************/ |
179 | | | 179 | |
180 | /* | | 180 | /* |
181 | * Pull in the PROM interface routines; these are needed for | | 181 | * Pull in the PROM interface routines; these are needed for |
182 | * prom printf (while bootstrapping), and for determining the | | 182 | * prom printf (while bootstrapping), and for determining the |
183 | * boot device, etc. | | 183 | * boot device, etc. |
184 | */ | | 184 | */ |
185 | #include <alpha/alpha/prom_disp.s> | | 185 | #include <alpha/alpha/prom_disp.s> |
186 | | | 186 | |
187 | /**************************************************************************/ | | 187 | /**************************************************************************/ |
188 | | | 188 | |
189 | /* | | 189 | /* |
190 | * Pull in the PALcode function stubs. | | 190 | * Pull in the PALcode function stubs. |
191 | */ | | 191 | */ |
192 | #include <alpha/alpha/pal.s> | | 192 | #include <alpha/alpha/pal.s> |
193 | | | 193 | |
194 | /**************************************************************************/ | | 194 | /**************************************************************************/ |
195 | | | 195 | |
196 | /**************************************************************************/ | | 196 | /**************************************************************************/ |
197 | | | 197 | |
198 | #if defined(MULTIPROCESSOR) | | 198 | #if defined(MULTIPROCESSOR) |
199 | /* | | 199 | /* |
200 | * Pull in the multiprocssor glue. | | 200 | * Pull in the multiprocssor glue. |
201 | */ | | 201 | */ |
202 | #include <alpha/alpha/multiproc.s> | | 202 | #include <alpha/alpha/multiproc.s> |
203 | #endif /* MULTIPROCESSOR */ | | 203 | #endif /* MULTIPROCESSOR */ |
204 | | | 204 | |
205 | /**************************************************************************/ | | 205 | /**************************************************************************/ |
206 | | | 206 | |
207 | /**************************************************************************/ | | 207 | /**************************************************************************/ |
208 | | | 208 | |
209 | #if defined(DDB) || defined(KGDB) | | 209 | #if defined(DDB) || defined(KGDB) |
210 | /* | | 210 | /* |
211 | * Pull in debugger glue. | | 211 | * Pull in debugger glue. |
212 | */ | | 212 | */ |
213 | #include <alpha/alpha/debug.s> | | 213 | #include <alpha/alpha/debug.s> |
214 | #endif /* DDB || KGDB */ | | 214 | #endif /* DDB || KGDB */ |
215 | | | 215 | |
216 | /**************************************************************************/ | | 216 | /**************************************************************************/ |
217 | | | 217 | |
218 | /**************************************************************************/ | | 218 | /**************************************************************************/ |
219 | | | 219 | |
220 | .text | | 220 | .text |
221 | .stabs __FILE__,132,0,0,backtolocore1 /* done with includes */ | | 221 | .stabs __FILE__,132,0,0,backtolocore1 /* done with includes */ |
222 | .loc 1 __LINE__ | | 222 | .loc 1 __LINE__ |
223 | backtolocore1: | | 223 | backtolocore1: |
224 | /**************************************************************************/ | | 224 | /**************************************************************************/ |
225 | | | 225 | |
226 | #ifdef COMPAT_16 | | 226 | #ifdef COMPAT_16 |
227 | /* | | 227 | /* |
228 | * Signal "trampoline" code. | | 228 | * Signal "trampoline" code. |
229 | * | | 229 | * |
230 | * The kernel arranges for the handler to be invoked directly. This | | 230 | * The kernel arranges for the handler to be invoked directly. This |
231 | * trampoline is used only to return from the signal. | | 231 | * trampoline is used only to return from the signal. |
232 | * | | 232 | * |
233 | * The stack pointer points to the saved sigcontext. | | 233 | * The stack pointer points to the saved sigcontext. |
234 | */ | | 234 | */ |
235 | | | 235 | |
236 | NESTED_NOPROFILE(sigcode,0,0,ra,0,0) | | 236 | NESTED_NOPROFILE(sigcode,0,0,ra,0,0) |
237 | mov sp, a0 /* get pointer to sigcontext */ | | 237 | mov sp, a0 /* get pointer to sigcontext */ |
238 | CALLSYS_NOERROR(compat_16___sigreturn14) /* and call sigreturn() with it. */ | | 238 | CALLSYS_NOERROR(compat_16___sigreturn14) /* and call sigreturn() with it. */ |
239 | mov v0, a0 /* if that failed, get error code */ | | 239 | mov v0, a0 /* if that failed, get error code */ |
240 | CALLSYS_NOERROR(exit) /* and call exit() with it. */ | | 240 | CALLSYS_NOERROR(exit) /* and call exit() with it. */ |
241 | XNESTED(esigcode,0) | | 241 | XNESTED(esigcode,0) |
242 | END(sigcode) | | 242 | END(sigcode) |
243 | #endif /* COMPAT_16 */ | | 243 | #endif /* COMPAT_16 */ |
244 | | | 244 | |
245 | /**************************************************************************/ | | 245 | /**************************************************************************/ |
246 | | | 246 | |
247 | /* | | 247 | /* |
248 | * exception_return: return from trap, exception, or syscall | | 248 | * exception_return: return from trap, exception, or syscall |
249 | */ | | 249 | */ |
250 | | | 250 | |
251 | LEAF(exception_return, 1) /* XXX should be NESTED */ | | 251 | LEAF(exception_return, 1) /* XXX should be NESTED */ |
252 | br pv, 1f | | 252 | br pv, 1f |
253 | 1: LDGP(pv) | | 253 | 1: LDGP(pv) |
254 | | | 254 | |
255 | ldq s1, (FRAME_PS * 8)(sp) /* s1 = new PSL */ | | 255 | ldq s1, (FRAME_PS * 8)(sp) /* s1 = new PSL */ |
256 | and s1, ALPHA_PSL_IPL_MASK, s3 /* s3 = new ipl */ | | 256 | and s1, ALPHA_PSL_IPL_MASK, s3 /* s3 = new ipl */ |
257 | | | 257 | |
258 | /* --- BEGIN inline spllower() --- */ | | 258 | /* --- BEGIN inline spllower() --- */ |
259 | | | 259 | |
260 | cmpult s3, ALPHA_PSL_IPL_SOFT_HI, t1 /* new IPL < SOFT_HI? */ | | 260 | cmpult s3, ALPHA_PSL_IPL_SOFT_HI, t1 /* new IPL < SOFT_HI? */ |
261 | beq t1, 5f /* no, can't do AST or SI */ | | 261 | beq t1, 5f /* no, can't do AST or SI */ |
262 | /* yes */ | | 262 | /* yes */ |
263 | | | 263 | |
264 | /* GET_CURLWP clobbers v0, t0, t8...t11. */ | | 264 | /* GET_CURLWP clobbers v0, t0, t8...t11. */ |
265 | GET_CURLWP | | 265 | GET_CURLWP |
266 | mov v0, s0 /* s0 = curlwp */ | | 266 | mov v0, s0 /* s0 = curlwp */ |
267 | | | 267 | |
268 | 2: /* | | 268 | 2: /* |
269 | * Check to see if a soft interrupt is pending. We need to only | | 269 | * Check to see if a soft interrupt is pending. We need to only |
270 | * check for soft ints eligible to run at the new IPL. We generate | | 270 | * check for soft ints eligible to run at the new IPL. We generate |
271 | * the mask of elible soft ints to run by masking the ssir with: | | 271 | * the mask of elible soft ints to run by masking the ssir with: |
272 | * | | 272 | * |
273 | * (ALPHA_ALL_SOFTINTS << ((ipl) << 1)) | | 273 | * (ALPHA_ALL_SOFTINTS << ((ipl) << 1)) |
274 | * | | 274 | * |
275 | * See alpha_softint_dispatch(). | | 275 | * See alpha_softint_dispatch(). |
276 | */ | | 276 | */ |
277 | ldq t1, L_CPU(s0) /* t1 = curlwp->l_cpu */ | | 277 | ldq t1, L_CPU(s0) /* t1 = curlwp->l_cpu */ |
278 | ldiq t2, ALPHA_ALL_SOFTINTS /* t2 = ALPHA_ALL_SOFTINTS */ | | 278 | ldiq t2, ALPHA_ALL_SOFTINTS /* t2 = ALPHA_ALL_SOFTINTS */ |
279 | ldq t1, CPU_INFO_SSIR(t1) /* t1 = t1->ci_ssir */ | | 279 | ldq t1, CPU_INFO_SSIR(t1) /* t1 = t1->ci_ssir */ |
280 | sll s3, 1, t3 /* t3 = ipl << 1 */ | | 280 | sll s3, 1, t3 /* t3 = ipl << 1 */ |
281 | sll t2, t3, t2 /* t2 <<= t3 */ | | 281 | sll t2, t3, t2 /* t2 <<= t3 */ |
282 | and t1, t2, t1 /* t1 &= t2 */ | | 282 | and t1, t2, t1 /* t1 &= t2 */ |
283 | bne t1, 6f /* yes */ | | 283 | bne t1, 6f /* yes */ |
284 | /* no */ | | 284 | /* no */ |
285 | | | 285 | |
286 | /* --- END inline spllower() --- */ | | 286 | /* --- END inline spllower() --- */ |
287 | | | 287 | |
288 | and s1, ALPHA_PSL_USERMODE, t0 /* are we returning to user? */ | | 288 | and s1, ALPHA_PSL_USERMODE, t0 /* are we returning to user? */ |
289 | beq t0, 5f /* no: just return */ | | 289 | beq t0, 5f /* no: just return */ |
290 | /* yes */ | | 290 | /* yes */ |
291 | | | 291 | |
292 | /* check for AST */ | | 292 | /* check for AST */ |
293 | 3: ldl t3, L_MD_ASTPENDING(s0) /* AST pending? */ | | 293 | 3: ldl t3, L_MD_ASTPENDING(s0) /* AST pending? */ |
294 | bne t3, 7f /* yes */ | | 294 | bne t3, 7f /* yes */ |
295 | /* no: headed back to user space */ | | 295 | /* no: headed back to user space */ |
296 | | | 296 | |
297 | /* Enable the FPU based on whether MDLWP_FPACTIVE is set. */ | | 297 | /* Enable the FPU based on whether MDLWP_FPACTIVE is set. */ |
298 | 4: ldq t2, L_MD_FLAGS(s0) | | 298 | 4: ldq t2, L_MD_FLAGS(s0) |
299 | cmplt t2, zero, a0 | | 299 | cmplt t2, zero, a0 |
300 | call_pal PAL_OSF1_wrfen | | 300 | call_pal PAL_OSF1_wrfen |
301 | | | 301 | |
302 | /* restore the registers, and return */ | | 302 | /* restore the registers, and return */ |
303 | 5: bsr ra, exception_restore_regs /* jmp/CALL trashes pv/t12 */ | | 303 | 5: bsr ra, exception_restore_regs /* jmp/CALL trashes pv/t12 */ |
304 | ldq ra,(FRAME_RA*8)(sp) | | 304 | ldq ra,(FRAME_RA*8)(sp) |
305 | .set noat | | 305 | .set noat |
306 | ldq at_reg,(FRAME_AT*8)(sp) | | 306 | ldq at_reg,(FRAME_AT*8)(sp) |
307 | | | 307 | |
308 | lda sp,(FRAME_SW_SIZE*8)(sp) | | 308 | lda sp,(FRAME_SW_SIZE*8)(sp) |
309 | call_pal PAL_OSF1_rti | | 309 | call_pal PAL_OSF1_rti |
310 | .set at | | 310 | .set at |
311 | /* NOTREACHED */ | | 311 | /* NOTREACHED */ |
312 | | | 312 | |
313 | /* We've got a softint */ | | 313 | /* We've got a softint */ |
314 | 6: ldiq a0, ALPHA_PSL_IPL_HIGH | | 314 | 6: ldiq a0, ALPHA_PSL_IPL_HIGH |
315 | call_pal PAL_OSF1_swpipl | | 315 | call_pal PAL_OSF1_swpipl |
316 | mov v0, s2 /* remember old IPL */ | | 316 | mov v0, s2 /* remember old IPL */ |
317 | mov s3, a0 /* pass new ipl */ | | 317 | mov s3, a0 /* pass new ipl */ |
318 | CALL(alpha_softint_dispatch) | | 318 | CALL(alpha_softint_dispatch) |
319 | | | 319 | |
320 | /* SI handled; restore IPL and check again */ | | 320 | /* SI handled; restore IPL and check again */ |
321 | mov s2, a0 | | 321 | mov s2, a0 |
322 | call_pal PAL_OSF1_swpipl | | 322 | call_pal PAL_OSF1_swpipl |
323 | br 2b | | 323 | br 2b |
324 | | | 324 | |
325 | /* We've got an AST */ | | 325 | /* We've got an AST */ |
326 | 7: stl zero, L_MD_ASTPENDING(s0) /* no AST pending */ | | 326 | 7: stl zero, L_MD_ASTPENDING(s0) /* no AST pending */ |
327 | | | 327 | |
328 | ldiq a0, ALPHA_PSL_IPL_0 /* drop IPL to zero */ | | 328 | ldiq a0, ALPHA_PSL_IPL_0 /* drop IPL to zero */ |
329 | call_pal PAL_OSF1_swpipl | | 329 | call_pal PAL_OSF1_swpipl |
330 | mov v0, s2 /* remember old IPL */ | | 330 | mov v0, s2 /* remember old IPL */ |
331 | | | 331 | |
332 | mov sp, a0 /* only arg is frame */ | | 332 | mov sp, a0 /* only arg is frame */ |
333 | CALL(ast) | | 333 | CALL(ast) |
334 | | | 334 | |
335 | /* AST handled; restore IPL and check again */ | | 335 | /* AST handled; restore IPL and check again */ |
336 | mov s2, a0 | | 336 | mov s2, a0 |
337 | call_pal PAL_OSF1_swpipl | | 337 | call_pal PAL_OSF1_swpipl |
338 | br 3b | | 338 | br 3b |
339 | | | 339 | |
340 | END(exception_return) | | 340 | END(exception_return) |
341 | | | 341 | |
342 | LEAF(exception_save_regs, 0) | | 342 | LEAF(exception_save_regs, 0) |
343 | stq v0,(FRAME_V0*8)(sp) | | 343 | stq v0,(FRAME_V0*8)(sp) |
344 | stq a3,(FRAME_A3*8)(sp) | | 344 | stq a3,(FRAME_A3*8)(sp) |
345 | stq a4,(FRAME_A4*8)(sp) | | 345 | stq a4,(FRAME_A4*8)(sp) |
346 | stq a5,(FRAME_A5*8)(sp) | | 346 | stq a5,(FRAME_A5*8)(sp) |
347 | stq s0,(FRAME_S0*8)(sp) | | 347 | stq s0,(FRAME_S0*8)(sp) |
348 | stq s1,(FRAME_S1*8)(sp) | | 348 | stq s1,(FRAME_S1*8)(sp) |
349 | stq s2,(FRAME_S2*8)(sp) | | 349 | stq s2,(FRAME_S2*8)(sp) |
350 | stq s3,(FRAME_S3*8)(sp) | | 350 | stq s3,(FRAME_S3*8)(sp) |
351 | stq s4,(FRAME_S4*8)(sp) | | 351 | stq s4,(FRAME_S4*8)(sp) |
352 | stq s5,(FRAME_S5*8)(sp) | | 352 | stq s5,(FRAME_S5*8)(sp) |
353 | stq s6,(FRAME_S6*8)(sp) | | 353 | stq s6,(FRAME_S6*8)(sp) |
354 | stq t0,(FRAME_T0*8)(sp) | | 354 | stq t0,(FRAME_T0*8)(sp) |
355 | stq t1,(FRAME_T1*8)(sp) | | 355 | stq t1,(FRAME_T1*8)(sp) |
356 | stq t2,(FRAME_T2*8)(sp) | | 356 | stq t2,(FRAME_T2*8)(sp) |
357 | stq t3,(FRAME_T3*8)(sp) | | 357 | stq t3,(FRAME_T3*8)(sp) |
358 | stq t4,(FRAME_T4*8)(sp) | | 358 | stq t4,(FRAME_T4*8)(sp) |
359 | stq t5,(FRAME_T5*8)(sp) | | 359 | stq t5,(FRAME_T5*8)(sp) |
360 | stq t6,(FRAME_T6*8)(sp) | | 360 | stq t6,(FRAME_T6*8)(sp) |
361 | stq t7,(FRAME_T7*8)(sp) | | 361 | stq t7,(FRAME_T7*8)(sp) |
362 | stq t8,(FRAME_T8*8)(sp) | | 362 | stq t8,(FRAME_T8*8)(sp) |
363 | stq t9,(FRAME_T9*8)(sp) | | 363 | stq t9,(FRAME_T9*8)(sp) |
364 | stq t10,(FRAME_T10*8)(sp) | | 364 | stq t10,(FRAME_T10*8)(sp) |
365 | stq t11,(FRAME_T11*8)(sp) | | 365 | stq t11,(FRAME_T11*8)(sp) |
366 | stq t12,(FRAME_T12*8)(sp) | | 366 | stq t12,(FRAME_T12*8)(sp) |
367 | RET | | 367 | RET |
368 | END(exception_save_regs) | | 368 | END(exception_save_regs) |
369 | | | 369 | |
370 | LEAF(exception_restore_regs, 0) | | 370 | LEAF(exception_restore_regs, 0) |
371 | ldq v0,(FRAME_V0*8)(sp) | | 371 | ldq v0,(FRAME_V0*8)(sp) |
372 | ldq a3,(FRAME_A3*8)(sp) | | 372 | ldq a3,(FRAME_A3*8)(sp) |
373 | ldq a4,(FRAME_A4*8)(sp) | | 373 | ldq a4,(FRAME_A4*8)(sp) |
374 | ldq a5,(FRAME_A5*8)(sp) | | 374 | ldq a5,(FRAME_A5*8)(sp) |
375 | ldq s0,(FRAME_S0*8)(sp) | | 375 | ldq s0,(FRAME_S0*8)(sp) |
376 | ldq s1,(FRAME_S1*8)(sp) | | 376 | ldq s1,(FRAME_S1*8)(sp) |
377 | ldq s2,(FRAME_S2*8)(sp) | | 377 | ldq s2,(FRAME_S2*8)(sp) |
378 | ldq s3,(FRAME_S3*8)(sp) | | 378 | ldq s3,(FRAME_S3*8)(sp) |
379 | ldq s4,(FRAME_S4*8)(sp) | | 379 | ldq s4,(FRAME_S4*8)(sp) |
380 | ldq s5,(FRAME_S5*8)(sp) | | 380 | ldq s5,(FRAME_S5*8)(sp) |
381 | ldq s6,(FRAME_S6*8)(sp) | | 381 | ldq s6,(FRAME_S6*8)(sp) |
382 | ldq t0,(FRAME_T0*8)(sp) | | 382 | ldq t0,(FRAME_T0*8)(sp) |
383 | ldq t1,(FRAME_T1*8)(sp) | | 383 | ldq t1,(FRAME_T1*8)(sp) |
384 | ldq t2,(FRAME_T2*8)(sp) | | 384 | ldq t2,(FRAME_T2*8)(sp) |
385 | ldq t3,(FRAME_T3*8)(sp) | | 385 | ldq t3,(FRAME_T3*8)(sp) |
386 | ldq t4,(FRAME_T4*8)(sp) | | 386 | ldq t4,(FRAME_T4*8)(sp) |
387 | ldq t5,(FRAME_T5*8)(sp) | | 387 | ldq t5,(FRAME_T5*8)(sp) |
388 | ldq t6,(FRAME_T6*8)(sp) | | 388 | ldq t6,(FRAME_T6*8)(sp) |
389 | ldq t7,(FRAME_T7*8)(sp) | | 389 | ldq t7,(FRAME_T7*8)(sp) |
390 | ldq t8,(FRAME_T8*8)(sp) | | 390 | ldq t8,(FRAME_T8*8)(sp) |
391 | ldq t9,(FRAME_T9*8)(sp) | | 391 | ldq t9,(FRAME_T9*8)(sp) |
392 | ldq t10,(FRAME_T10*8)(sp) | | 392 | ldq t10,(FRAME_T10*8)(sp) |
393 | ldq t11,(FRAME_T11*8)(sp) | | 393 | ldq t11,(FRAME_T11*8)(sp) |
394 | ldq t12,(FRAME_T12*8)(sp) | | 394 | ldq t12,(FRAME_T12*8)(sp) |
395 | RET | | 395 | RET |
396 | END(exception_restore_regs) | | 396 | END(exception_restore_regs) |
397 | | | 397 | |
398 | /**************************************************************************/ | | 398 | /**************************************************************************/ |
399 | | | 399 | |
400 | /* | | 400 | /* |
401 | * XentArith: | | 401 | * XentArith: |
402 | * System arithmetic trap entry point. | | 402 | * System arithmetic trap entry point. |
403 | */ | | 403 | */ |
404 | | | 404 | |
405 | PALVECT(XentArith) /* setup frame, save registers */ | | 405 | PALVECT(XentArith) /* setup frame, save registers */ |
406 | | | 406 | |
407 | /* a0, a1, & a2 already set up */ | | 407 | /* a0, a1, & a2 already set up */ |
408 | ldiq a3, ALPHA_KENTRY_ARITH | | 408 | ldiq a3, ALPHA_KENTRY_ARITH |
409 | mov sp, a4 ; .loc 1 __LINE__ | | 409 | mov sp, a4 ; .loc 1 __LINE__ |
410 | CALL(trap) | | 410 | CALL(trap) |
411 | | | 411 | |
412 | jmp zero, exception_return | | 412 | jmp zero, exception_return |
413 | END(XentArith) | | 413 | END(XentArith) |
414 | | | 414 | |
415 | /**************************************************************************/ | | 415 | /**************************************************************************/ |
416 | | | 416 | |
417 | /* | | 417 | /* |
418 | * XentIF: | | 418 | * XentIF: |
419 | * System instruction fault trap entry point. | | 419 | * System instruction fault trap entry point. |
420 | */ | | 420 | */ |
421 | | | 421 | |
422 | PALVECT(XentIF) /* setup frame, save registers */ | | 422 | PALVECT(XentIF) /* setup frame, save registers */ |
423 | | | 423 | |
424 | /* a0, a1, & a2 already set up */ | | 424 | /* a0, a1, & a2 already set up */ |
425 | ldiq a3, ALPHA_KENTRY_IF | | 425 | ldiq a3, ALPHA_KENTRY_IF |
426 | mov sp, a4 ; .loc 1 __LINE__ | | 426 | mov sp, a4 ; .loc 1 __LINE__ |
427 | CALL(trap) | | 427 | CALL(trap) |
428 | jmp zero, exception_return | | 428 | jmp zero, exception_return |
429 | END(XentIF) | | 429 | END(XentIF) |
430 | | | 430 | |
431 | /**************************************************************************/ | | 431 | /**************************************************************************/ |
432 | | | 432 | |
433 | /* | | 433 | /* |
434 | * XentInt: | | 434 | * XentInt: |
435 | * System interrupt entry point. | | 435 | * System interrupt entry point. |
436 | */ | | 436 | */ |
437 | | | 437 | |
438 | PALVECT(XentInt) /* setup frame, save registers */ | | 438 | PALVECT(XentInt) /* setup frame, save registers */ |
439 | | | 439 | |
440 | /* a0, a1, & a2 already set up */ | | 440 | /* a0, a1, & a2 already set up */ |
441 | mov sp, a3 ; .loc 1 __LINE__ | | 441 | mov sp, a3 ; .loc 1 __LINE__ |
442 | CALL(interrupt) | | 442 | CALL(interrupt) |
443 | jmp zero, exception_return | | 443 | jmp zero, exception_return |
444 | END(XentInt) | | 444 | END(XentInt) |
445 | | | 445 | |
446 | /**************************************************************************/ | | 446 | /**************************************************************************/ |
447 | | | 447 | |
448 | /* | | 448 | /* |
449 | * XentMM: | | 449 | * XentMM: |
450 | * System memory management fault entry point. | | 450 | * System memory management fault entry point. |
451 | */ | | 451 | */ |
452 | | | 452 | |
453 | PALVECT(XentMM) /* setup frame, save registers */ | | 453 | PALVECT(XentMM) /* setup frame, save registers */ |
454 | | | 454 | |
455 | /* a0, a1, & a2 already set up */ | | 455 | /* a0, a1, & a2 already set up */ |
456 | ldiq a3, ALPHA_KENTRY_MM | | 456 | ldiq a3, ALPHA_KENTRY_MM |
457 | mov sp, a4 ; .loc 1 __LINE__ | | 457 | mov sp, a4 ; .loc 1 __LINE__ |
458 | CALL(trap) | | 458 | CALL(trap) |
459 | | | 459 | |
460 | jmp zero, exception_return | | 460 | jmp zero, exception_return |
461 | END(XentMM) | | 461 | END(XentMM) |
462 | | | 462 | |
463 | /**************************************************************************/ | | 463 | /**************************************************************************/ |
464 | | | 464 | |
465 | /* | | 465 | /* |
466 | * XentSys: | | 466 | * XentSys: |
467 | * System call entry point. | | 467 | * System call entry point. |
468 | */ | | 468 | */ |
469 | | | 469 | |
470 | ESETUP(XentSys) ; .loc 1 __LINE__ | | 470 | ESETUP(XentSys) ; .loc 1 __LINE__ |
471 | | | 471 | |
472 | stq v0,(FRAME_V0*8)(sp) /* in case we need to restart */ | | 472 | stq v0,(FRAME_V0*8)(sp) /* in case we need to restart */ |
473 | stq s0,(FRAME_S0*8)(sp) | | 473 | stq s0,(FRAME_S0*8)(sp) |
474 | stq s1,(FRAME_S1*8)(sp) | | 474 | stq s1,(FRAME_S1*8)(sp) |
475 | stq s2,(FRAME_S2*8)(sp) | | 475 | stq s2,(FRAME_S2*8)(sp) |
476 | stq s3,(FRAME_S3*8)(sp) | | 476 | stq s3,(FRAME_S3*8)(sp) |
477 | stq s4,(FRAME_S4*8)(sp) | | 477 | stq s4,(FRAME_S4*8)(sp) |
478 | stq s5,(FRAME_S5*8)(sp) | | 478 | stq s5,(FRAME_S5*8)(sp) |
479 | stq s6,(FRAME_S6*8)(sp) | | 479 | stq s6,(FRAME_S6*8)(sp) |
480 | stq a0,(FRAME_A0*8)(sp) | | 480 | stq a0,(FRAME_A0*8)(sp) |
481 | stq a1,(FRAME_A1*8)(sp) | | 481 | stq a1,(FRAME_A1*8)(sp) |
482 | stq a2,(FRAME_A2*8)(sp) | | 482 | stq a2,(FRAME_A2*8)(sp) |
483 | stq a3,(FRAME_A3*8)(sp) | | 483 | stq a3,(FRAME_A3*8)(sp) |
484 | stq a4,(FRAME_A4*8)(sp) | | 484 | stq a4,(FRAME_A4*8)(sp) |
485 | stq a5,(FRAME_A5*8)(sp) | | 485 | stq a5,(FRAME_A5*8)(sp) |
486 | stq ra,(FRAME_RA*8)(sp) | | 486 | stq ra,(FRAME_RA*8)(sp) |
487 | | | 487 | |
488 | /* syscall number, passed in v0, is first arg, frame pointer second */ | | 488 | /* syscall number, passed in v0, is first arg, frame pointer second */ |
489 | mov v0,a1 | | 489 | mov v0,a1 |
490 | GET_CURLWP | | 490 | GET_CURLWP |
491 | mov v0,a0 | | 491 | mov v0,a0 |
492 | mov sp,a2 ; .loc 1 __LINE__ | | 492 | mov sp,a2 ; .loc 1 __LINE__ |
493 | ldq t11,L_PROC(a0) | | 493 | ldq t11,L_PROC(a0) |
494 | ldq t12,P_MD_SYSCALL(t11) | | 494 | ldq t12,P_MD_SYSCALL(t11) |
495 | CALL((t12)) | | 495 | CALL((t12)) |
496 | | | 496 | |
497 | jmp zero, exception_return | | 497 | jmp zero, exception_return |
498 | END(XentSys) | | 498 | END(XentSys) |
499 | | | 499 | |
500 | /**************************************************************************/ | | 500 | /**************************************************************************/ |
501 | | | 501 | |
502 | /* | | 502 | /* |
503 | * XentUna: | | 503 | * XentUna: |
504 | * System unaligned access entry point. | | 504 | * System unaligned access entry point. |
505 | */ | | 505 | */ |
506 | | | 506 | |
507 | LEAF(XentUna, 3) /* XXX should be NESTED */ | | 507 | LEAF(XentUna, 3) /* XXX should be NESTED */ |
508 | .set noat | | 508 | .set noat |
509 | lda sp,-(FRAME_SW_SIZE*8)(sp) | | 509 | lda sp,-(FRAME_SW_SIZE*8)(sp) |
510 | stq at_reg,(FRAME_AT*8)(sp) | | 510 | stq at_reg,(FRAME_AT*8)(sp) |
511 | .set at | | 511 | .set at |
512 | stq ra,(FRAME_RA*8)(sp) | | 512 | stq ra,(FRAME_RA*8)(sp) |
513 | bsr ra, exception_save_regs /* jmp/CALL trashes pv/t12 */ | | 513 | bsr ra, exception_save_regs /* jmp/CALL trashes pv/t12 */ |
514 | | | 514 | |
515 | /* a0, a1, & a2 already set up */ | | 515 | /* a0, a1, & a2 already set up */ |
516 | ldiq a3, ALPHA_KENTRY_UNA | | 516 | ldiq a3, ALPHA_KENTRY_UNA |
517 | mov sp, a4 ; .loc 1 __LINE__ | | 517 | mov sp, a4 ; .loc 1 __LINE__ |
518 | CALL(trap) | | 518 | CALL(trap) |
519 | | | 519 | |
520 | jmp zero, exception_return | | 520 | jmp zero, exception_return |
521 | END(XentUna) | | 521 | END(XentUna) |
522 | | | 522 | |
523 | /**************************************************************************/ | | 523 | /**************************************************************************/ |
524 | | | 524 | |
525 | /* | | 525 | /* |
526 | * savefpstate: Save a process's floating point state. | | 526 | * savefpstate: Save a process's floating point state. |
527 | * | | 527 | * |
528 | * Arguments: | | 528 | * Arguments: |
529 | * a0 'struct fpstate *' to save into | | 529 | * a0 'struct fpstate *' to save into |
530 | */ | | 530 | */ |
531 | | | 531 | |
532 | LEAF(savefpstate, 1) | | 532 | LEAF(savefpstate, 1) |
533 | LDGP(pv) | | 533 | LDGP(pv) |
534 | /* save all of the FP registers */ | | 534 | /* save all of the FP registers */ |
535 | lda t1, FPREG_FPR_REGS(a0) /* get address of FP reg. save area */ | | 535 | lda t1, FPREG_FPR_REGS(a0) /* get address of FP reg. save area */ |
536 | stt $f0, (0 * 8)(t1) /* save first register, using hw name */ | | 536 | stt $f0, (0 * 8)(t1) /* save first register, using hw name */ |
537 | stt $f1, (1 * 8)(t1) /* etc. */ | | 537 | stt $f1, (1 * 8)(t1) /* etc. */ |
538 | stt $f2, (2 * 8)(t1) | | 538 | stt $f2, (2 * 8)(t1) |
539 | stt $f3, (3 * 8)(t1) | | 539 | stt $f3, (3 * 8)(t1) |
540 | stt $f4, (4 * 8)(t1) | | 540 | stt $f4, (4 * 8)(t1) |
541 | stt $f5, (5 * 8)(t1) | | 541 | stt $f5, (5 * 8)(t1) |
542 | stt $f6, (6 * 8)(t1) | | 542 | stt $f6, (6 * 8)(t1) |
543 | stt $f7, (7 * 8)(t1) | | 543 | stt $f7, (7 * 8)(t1) |
544 | stt $f8, (8 * 8)(t1) | | 544 | stt $f8, (8 * 8)(t1) |
545 | stt $f9, (9 * 8)(t1) | | 545 | stt $f9, (9 * 8)(t1) |
546 | stt $f10, (10 * 8)(t1) | | 546 | stt $f10, (10 * 8)(t1) |
547 | stt $f11, (11 * 8)(t1) | | 547 | stt $f11, (11 * 8)(t1) |
548 | stt $f12, (12 * 8)(t1) | | 548 | stt $f12, (12 * 8)(t1) |
549 | stt $f13, (13 * 8)(t1) | | 549 | stt $f13, (13 * 8)(t1) |
550 | stt $f14, (14 * 8)(t1) | | 550 | stt $f14, (14 * 8)(t1) |
551 | stt $f15, (15 * 8)(t1) | | 551 | stt $f15, (15 * 8)(t1) |
552 | stt $f16, (16 * 8)(t1) | | 552 | stt $f16, (16 * 8)(t1) |
553 | stt $f17, (17 * 8)(t1) | | 553 | stt $f17, (17 * 8)(t1) |
554 | stt $f18, (18 * 8)(t1) | | 554 | stt $f18, (18 * 8)(t1) |
555 | stt $f19, (19 * 8)(t1) | | 555 | stt $f19, (19 * 8)(t1) |
556 | stt $f20, (20 * 8)(t1) | | 556 | stt $f20, (20 * 8)(t1) |
557 | stt $f21, (21 * 8)(t1) | | 557 | stt $f21, (21 * 8)(t1) |
558 | stt $f22, (22 * 8)(t1) | | 558 | stt $f22, (22 * 8)(t1) |
559 | stt $f23, (23 * 8)(t1) | | 559 | stt $f23, (23 * 8)(t1) |
560 | stt $f24, (24 * 8)(t1) | | 560 | stt $f24, (24 * 8)(t1) |
561 | stt $f25, (25 * 8)(t1) | | 561 | stt $f25, (25 * 8)(t1) |
562 | stt $f26, (26 * 8)(t1) | | 562 | stt $f26, (26 * 8)(t1) |
563 | stt $f27, (27 * 8)(t1) | | 563 | stt $f27, (27 * 8)(t1) |
564 | .set noat | | 564 | .set noat |
565 | stt $f28, (28 * 8)(t1) | | 565 | stt $f28, (28 * 8)(t1) |
566 | .set at | | 566 | .set at |
567 | stt $f29, (29 * 8)(t1) | | 567 | stt $f29, (29 * 8)(t1) |
568 | stt $f30, (30 * 8)(t1) | | 568 | stt $f30, (30 * 8)(t1) |
569 | | | 569 | |
570 | /* | | 570 | /* |
571 | * Then save the FPCR; note that the necessary 'trapb's are taken | | 571 | * Then save the FPCR; note that the necessary 'trapb's are taken |
572 | * care of on kernel entry and exit. | | 572 | * care of on kernel entry and exit. |
573 | */ | | 573 | */ |
574 | mf_fpcr ft0 | | 574 | mf_fpcr ft0 |
575 | stt ft0, FPREG_FPR_CR(a0) /* store to FPCR save area */ | | 575 | stt ft0, FPREG_FPR_CR(a0) /* store to FPCR save area */ |
576 | | | 576 | |
577 | RET | | 577 | RET |
578 | END(savefpstate) | | 578 | END(savefpstate) |
579 | | | 579 | |
580 | /**************************************************************************/ | | 580 | /**************************************************************************/ |
581 | | | 581 | |
582 | /* | | 582 | /* |
583 | * restorefpstate: Restore a process's floating point state. | | 583 | * restorefpstate: Restore a process's floating point state. |
584 | * | | 584 | * |
585 | * Arguments: | | 585 | * Arguments: |
586 | * a0 'struct fpstate *' to restore from | | 586 | * a0 'struct fpstate *' to restore from |
587 | */ | | 587 | */ |
588 | | | 588 | |
589 | LEAF(restorefpstate, 1) | | 589 | LEAF(restorefpstate, 1) |
590 | LDGP(pv) | | 590 | LDGP(pv) |
591 | /* | | 591 | /* |
592 | * Restore the FPCR; note that the necessary 'trapb's are taken care of | | 592 | * Restore the FPCR; note that the necessary 'trapb's are taken care of |
593 | * on kernel entry and exit. | | 593 | * on kernel entry and exit. |
594 | */ | | 594 | */ |
595 | ldt ft0, FPREG_FPR_CR(a0) /* load from FPCR save area */ | | 595 | ldt ft0, FPREG_FPR_CR(a0) /* load from FPCR save area */ |
596 | mt_fpcr ft0 | | 596 | mt_fpcr ft0 |
597 | | | 597 | |
598 | /* Restore all of the FP registers. */ | | 598 | /* Restore all of the FP registers. */ |
599 | lda t1, FPREG_FPR_REGS(a0) /* get address of FP reg. save area */ | | 599 | lda t1, FPREG_FPR_REGS(a0) /* get address of FP reg. save area */ |
600 | ldt $f0, (0 * 8)(t1) /* restore first reg., using hw name */ | | 600 | ldt $f0, (0 * 8)(t1) /* restore first reg., using hw name */ |
601 | ldt $f1, (1 * 8)(t1) /* etc. */ | | 601 | ldt $f1, (1 * 8)(t1) /* etc. */ |
602 | ldt $f2, (2 * 8)(t1) | | 602 | ldt $f2, (2 * 8)(t1) |
603 | ldt $f3, (3 * 8)(t1) | | 603 | ldt $f3, (3 * 8)(t1) |
604 | ldt $f4, (4 * 8)(t1) | | 604 | ldt $f4, (4 * 8)(t1) |
605 | ldt $f5, (5 * 8)(t1) | | 605 | ldt $f5, (5 * 8)(t1) |
606 | ldt $f6, (6 * 8)(t1) | | 606 | ldt $f6, (6 * 8)(t1) |
607 | ldt $f7, (7 * 8)(t1) | | 607 | ldt $f7, (7 * 8)(t1) |
608 | ldt $f8, (8 * 8)(t1) | | 608 | ldt $f8, (8 * 8)(t1) |
609 | ldt $f9, (9 * 8)(t1) | | 609 | ldt $f9, (9 * 8)(t1) |
610 | ldt $f10, (10 * 8)(t1) | | 610 | ldt $f10, (10 * 8)(t1) |
611 | ldt $f11, (11 * 8)(t1) | | 611 | ldt $f11, (11 * 8)(t1) |
612 | ldt $f12, (12 * 8)(t1) | | 612 | ldt $f12, (12 * 8)(t1) |
613 | ldt $f13, (13 * 8)(t1) | | 613 | ldt $f13, (13 * 8)(t1) |
614 | ldt $f14, (14 * 8)(t1) | | 614 | ldt $f14, (14 * 8)(t1) |
615 | ldt $f15, (15 * 8)(t1) | | 615 | ldt $f15, (15 * 8)(t1) |
616 | ldt $f16, (16 * 8)(t1) | | 616 | ldt $f16, (16 * 8)(t1) |
617 | ldt $f17, (17 * 8)(t1) | | 617 | ldt $f17, (17 * 8)(t1) |
618 | ldt $f18, (18 * 8)(t1) | | 618 | ldt $f18, (18 * 8)(t1) |
619 | ldt $f19, (19 * 8)(t1) | | 619 | ldt $f19, (19 * 8)(t1) |
620 | ldt $f20, (20 * 8)(t1) | | 620 | ldt $f20, (20 * 8)(t1) |
621 | ldt $f21, (21 * 8)(t1) | | 621 | ldt $f21, (21 * 8)(t1) |
622 | ldt $f22, (22 * 8)(t1) | | 622 | ldt $f22, (22 * 8)(t1) |
623 | ldt $f23, (23 * 8)(t1) | | 623 | ldt $f23, (23 * 8)(t1) |
624 | ldt $f24, (24 * 8)(t1) | | 624 | ldt $f24, (24 * 8)(t1) |
625 | ldt $f25, (25 * 8)(t1) | | 625 | ldt $f25, (25 * 8)(t1) |
626 | ldt $f26, (26 * 8)(t1) | | 626 | ldt $f26, (26 * 8)(t1) |
627 | ldt $f27, (27 * 8)(t1) | | 627 | ldt $f27, (27 * 8)(t1) |
628 | ldt $f28, (28 * 8)(t1) | | 628 | ldt $f28, (28 * 8)(t1) |
629 | ldt $f29, (29 * 8)(t1) | | 629 | ldt $f29, (29 * 8)(t1) |
630 | ldt $f30, (30 * 8)(t1) | | 630 | ldt $f30, (30 * 8)(t1) |
631 | | | 631 | |
632 | RET | | 632 | RET |
633 | END(restorefpstate) | | 633 | END(restorefpstate) |
634 | | | 634 | |
635 | /**************************************************************************/ | | 635 | /**************************************************************************/ |
636 | | | 636 | |
637 | /* | | 637 | /* |
638 | * savectx: save process context, i.e. callee-saved registers | | 638 | * savectx: save process context, i.e. callee-saved registers |
639 | * | | 639 | * |
640 | * Note that savectx() only works for processes other than curlwp, | | 640 | * Note that savectx() only works for processes other than curlwp, |
641 | * since cpu_switchto will copy over the info saved here. (It _can_ | | 641 | * since cpu_switchto will copy over the info saved here. (It _can_ |
642 | * sanely be used for curlwp iff cpu_switchto won't be called again, e.g. | | 642 | * sanely be used for curlwp iff cpu_switchto won't be called again, e.g. |
643 | * if called from boot().) | | 643 | * if called from boot().) |
644 | * | | 644 | * |
645 | * Arguments: | | 645 | * Arguments: |
646 | * a0 'struct pcb *' of the process that needs its context saved | | 646 | * a0 'struct pcb *' of the process that needs its context saved |
647 | * | | 647 | * |
648 | * Return: | | 648 | * Return: |
649 | * v0 0. (note that for child processes, it seems | | 649 | * v0 0. (note that for child processes, it seems |
650 | * like savectx() returns 1, because the return address | | 650 | * like savectx() returns 1, because the return address |
651 | * in the PCB is set to the return address from savectx().) | | 651 | * in the PCB is set to the return address from savectx().) |
652 | */ | | 652 | */ |
653 | | | 653 | |
654 | LEAF(savectx, 1) | | 654 | LEAF(savectx, 1) |
655 | br pv, 1f | | 655 | br pv, 1f |
656 | 1: LDGP(pv) | | 656 | 1: LDGP(pv) |
657 | stq sp, PCB_HWPCB_KSP(a0) /* store sp */ | | 657 | stq sp, PCB_HWPCB_KSP(a0) /* store sp */ |
658 | stq s0, PCB_CONTEXT+(0 * 8)(a0) /* store s0 - s6 */ | | 658 | stq s0, PCB_CONTEXT+(0 * 8)(a0) /* store s0 - s6 */ |
659 | stq s1, PCB_CONTEXT+(1 * 8)(a0) | | 659 | stq s1, PCB_CONTEXT+(1 * 8)(a0) |
660 | stq s2, PCB_CONTEXT+(2 * 8)(a0) | | 660 | stq s2, PCB_CONTEXT+(2 * 8)(a0) |
661 | stq s3, PCB_CONTEXT+(3 * 8)(a0) | | 661 | stq s3, PCB_CONTEXT+(3 * 8)(a0) |
662 | stq s4, PCB_CONTEXT+(4 * 8)(a0) | | 662 | stq s4, PCB_CONTEXT+(4 * 8)(a0) |
663 | stq s5, PCB_CONTEXT+(5 * 8)(a0) | | 663 | stq s5, PCB_CONTEXT+(5 * 8)(a0) |
664 | stq s6, PCB_CONTEXT+(6 * 8)(a0) | | 664 | stq s6, PCB_CONTEXT+(6 * 8)(a0) |
665 | stq ra, PCB_CONTEXT+(7 * 8)(a0) /* store ra */ | | 665 | stq ra, PCB_CONTEXT+(7 * 8)(a0) /* store ra */ |
666 | call_pal PAL_OSF1_rdps /* NOTE: doesn't kill a0 */ | | 666 | call_pal PAL_OSF1_rdps /* NOTE: doesn't kill a0 */ |
667 | stq v0, PCB_CONTEXT+(8 * 8)(a0) /* store ps, for ipl */ | | 667 | stq v0, PCB_CONTEXT+(8 * 8)(a0) /* store ps, for ipl */ |
668 | | | 668 | |
669 | mov zero, v0 | | 669 | mov zero, v0 |
670 | RET | | 670 | RET |
671 | END(savectx) | | 671 | END(savectx) |
672 | | | 672 | |
673 | /**************************************************************************/ | | 673 | /**************************************************************************/ |
674 | | | 674 | |
675 | /* | | 675 | /* |
676 | * void alpha_softint_switchto(struct lwp *current, int ipl, struct lwp *next) | | 676 | * void alpha_softint_switchto(struct lwp *current, int ipl, struct lwp *next) |
677 | * Switch away from the current LWP to the specified softint LWP, and | | 677 | * Switch away from the current LWP to the specified softint LWP, and |
678 | * dispatch to softint processing. | | 678 | * dispatch to softint processing. |
679 | * Aguments: | | 679 | * Aguments: |
680 | * a0 'struct lwp *' of the LWP to switch from | | 680 | * a0 'struct lwp *' of the LWP to switch from |
681 | * a1 IPL that the softint will run at | | 681 | * a1 IPL that the softint will run at |
682 | * a2 'struct lwp *' of the LWP to switch to | | 682 | * a2 'struct lwp *' of the LWP to switch to |
683 | * | | 683 | * |
684 | * N.B. We have arranged that a0 and a1 are already set up correctly | | 684 | * N.B. We have arranged that a0 and a1 are already set up correctly |
685 | * for the call to softint_dispatch(). | | 685 | * for the call to softint_dispatch(). |
686 | */ | | 686 | */ |
687 | NESTED_NOPROFILE(alpha_softint_switchto, 3, 16, ra, IM_RA, 0) | | 687 | NESTED_NOPROFILE(alpha_softint_switchto, 3, 16, ra, IM_RA, 0) |
688 | LDGP(pv) | | 688 | LDGP(pv) |
689 | | | 689 | |
690 | ldq a3, L_PCB(a0) /* a3 = from->l_pcb */ | | 690 | ldq a3, L_PCB(a0) /* a3 = from->l_pcb */ |
691 | | | 691 | |
692 | lda sp, -16(sp) /* set up stack frame */ | | 692 | lda sp, -16(sp) /* set up stack frame */ |
693 | stq ra, 0(sp) /* save ra */ | | 693 | stq ra, 0(sp) /* save ra */ |
694 | | | 694 | |
695 | /* | | 695 | /* |
696 | * Step 1: Save the current LWP's context. We don't | | 696 | * Step 1: Save the current LWP's context. We don't |
697 | * save the return address directly; instead, we arrange | | 697 | * save the return address directly; instead, we arrange |
698 | * for it to bounce through a trampoline that fixes up | | 698 | * for it to bounce through a trampoline that fixes up |
699 | * the state in case the softint LWP blocks. | | 699 | * the state in case the softint LWP blocks. |
700 | */ | | 700 | */ |
701 | stq sp, PCB_HWPCB_KSP(a3) /* store sp */ | | 701 | stq sp, PCB_HWPCB_KSP(a3) /* store sp */ |
702 | stq s0, PCB_CONTEXT+(0 * 8)(a3) /* store s0 - s6 */ | | 702 | stq s0, PCB_CONTEXT+(0 * 8)(a3) /* store s0 - s6 */ |
703 | stq s1, PCB_CONTEXT+(1 * 8)(a3) | | 703 | stq s1, PCB_CONTEXT+(1 * 8)(a3) |
704 | stq s2, PCB_CONTEXT+(2 * 8)(a3) | | 704 | stq s2, PCB_CONTEXT+(2 * 8)(a3) |
705 | stq s3, PCB_CONTEXT+(3 * 8)(a3) | | 705 | stq s3, PCB_CONTEXT+(3 * 8)(a3) |
706 | stq s4, PCB_CONTEXT+(4 * 8)(a3) | | 706 | stq s4, PCB_CONTEXT+(4 * 8)(a3) |
707 | stq s5, PCB_CONTEXT+(5 * 8)(a3) | | 707 | stq s5, PCB_CONTEXT+(5 * 8)(a3) |
708 | stq s6, PCB_CONTEXT+(6 * 8)(a3) | | 708 | stq s6, PCB_CONTEXT+(6 * 8)(a3) |
709 | | | 709 | |
710 | /* Set the trampoline address in saved context. */ | | 710 | /* Set the trampoline address in saved context. */ |
711 | lda v0, alpha_softint_return | | 711 | lda v0, alpha_softint_return |
712 | stq v0, PCB_CONTEXT+(7 * 8)(a3) /* store ra */ | | 712 | stq v0, PCB_CONTEXT+(7 * 8)(a3) /* store ra */ |
713 | | | 713 | |
714 | /* | | 714 | /* |
715 | * Step 2: Switch to the softint LWP's stack. | | 715 | * Step 2: Switch to the softint LWP's stack. |
716 | * We always start at the top of the stack (i.e. | | 716 | * We always start at the top of the stack (i.e. |
717 | * just below the trapframe). | | 717 | * just below the trapframe). |
718 | * | | 718 | * |
719 | * N.B. There is no need to restore any other registers | | 719 | * N.B. There is no need to restore any other registers |
720 | * from the softint LWP's context; we are starting from | | 720 | * from the softint LWP's context; we are starting from |
721 | * the root of the call graph. | | 721 | * the root of the call graph. |
722 | */ | | 722 | */ |
723 | ldq sp, L_MD_TF(a2) | | 723 | ldq sp, L_MD_TF(a2) |
724 | | | 724 | |
725 | /* | | 725 | /* |
726 | * Step 3: Update curlwp. | | 726 | * Step 3: Update curlwp. |
727 | * | | 727 | * |
728 | * N.B. We save off the from-LWP argument that will be passed | | 728 | * N.B. We save off the from-LWP argument that will be passed |
729 | * to softint_dispatch() in s0, which we'll need to restore | | 729 | * to softint_dispatch() in s0, which we'll need to restore |
730 | * before returning. If we bounce through the trampoline, the | | 730 | * before returning. If we bounce through the trampoline, the |
731 | * context switch will restore it for us. | | 731 | * context switch will restore it for us. |
732 | */ | | 732 | */ |
733 | mov a0, s0 /* s0 = from LWP */ | | 733 | mov a0, s0 /* s0 = from LWP */ |
734 | SET_CURLWP(a2) /* clobbers a0, v0, t0, t8..t11 */ | | 734 | SET_CURLWP(a2) /* clobbers a0, v0, t0, t8..t11 */ |
735 | | | 735 | |
736 | /* | | 736 | /* |
737 | * Step 4: Call softint_dispatch(). | | 737 | * Step 4: Call softint_dispatch(). |
738 | * | | 738 | * |
739 | * N.B. a1 already has the IPL argument. | | 739 | * N.B. a1 already has the IPL argument. |
740 | */ | | 740 | */ |
741 | mov s0, a0 /* a0 = from LWP */ | | 741 | mov s0, a0 /* a0 = from LWP */ |
742 | CALL(softint_dispatch) | | 742 | CALL(softint_dispatch) |
743 | | | 743 | |
744 | /* | | 744 | /* |
745 | * Step 5: Restore everything and return. | | 745 | * Step 5: Restore everything and return. |
746 | */ | | 746 | */ |
747 | ldq a3, L_PCB(s0) /* a3 = from->l_pcb */ | | 747 | ldq a3, L_PCB(s0) /* a3 = from->l_pcb */ |
748 | SET_CURLWP(s0) /* clobbers a0, v0, t0, t8..t11 */ | | 748 | SET_CURLWP(s0) /* clobbers a0, v0, t0, t8..t11 */ |
749 | ldq sp, PCB_HWPCB_KSP(a3) /* restore sp */ | | 749 | ldq sp, PCB_HWPCB_KSP(a3) /* restore sp */ |
750 | ldq s0, PCB_CONTEXT+(0 * 8)(a3) /* restore s0 */ | | 750 | ldq s0, PCB_CONTEXT+(0 * 8)(a3) /* restore s0 */ |
751 | ldq ra, 0(sp) /* restore ra */ | | 751 | ldq ra, 0(sp) /* restore ra */ |
752 | lda sp, 16(sp) /* pop stack frame */ | | 752 | lda sp, 16(sp) /* pop stack frame */ |
753 | RET | | 753 | RET |
754 | END(alpha_softint_switchto) | | 754 | END(alpha_softint_switchto) |
755 | | | 755 | |
756 | LEAF_NOPROFILE(alpha_softint_return, 0) | | 756 | LEAF_NOPROFILE(alpha_softint_return, 0) |
757 | /* | | 757 | /* |
758 | * Step 1: Go to IPL_HIGH, which is what the alpha_softint_dispatch() | | 758 | * Step 1: Go to IPL_HIGH, which is what the alpha_softint_dispatch() |
759 | * expects. We will have arrived here at IPL_SCHED. | | 759 | * expects. We will have arrived here at IPL_SCHED. |
760 | */ | | 760 | */ |
761 | ldiq a0, ALPHA_PSL_IPL_HIGH | | 761 | ldiq a0, ALPHA_PSL_IPL_HIGH |
762 | call_pal PAL_OSF1_swpipl | | 762 | call_pal PAL_OSF1_swpipl |
763 | | | 763 | |
764 | /* | | 764 | /* |
765 | * Step 2: Re-adjust the mutex count after mi_switch(). | | 765 | * Step 2: Re-adjust the mutex count after mi_switch(). |
766 | */ | | 766 | */ |
767 | GET_CURLWP | | 767 | GET_CURLWP |
768 | ldq v0, L_CPU(v0) | | 768 | ldq v0, L_CPU(v0) |
769 | ldl t0, CPU_INFO_MTX_COUNT(v0) | | 769 | ldl t0, CPU_INFO_MTX_COUNT(v0) |
770 | addl t0, 1, t0 | | 770 | addl t0, 1, t0 |
771 | stl t0, CPU_INFO_MTX_COUNT(v0) | | 771 | stl t0, CPU_INFO_MTX_COUNT(v0) |
772 | | | 772 | |
773 | /* | | 773 | /* |
774 | * Step 3: Pop alpha_softint_switchto()'s stack frame | | 774 | * Step 3: Pop alpha_softint_switchto()'s stack frame |
775 | * and return. | | 775 | * and return. |
776 | */ | | 776 | */ |
777 | ldq ra, 0(sp) /* restore ra */ | | 777 | ldq ra, 0(sp) /* restore ra */ |
778 | lda sp, 16(sp) /* pop stack frame */ | | 778 | lda sp, 16(sp) /* pop stack frame */ |
779 | RET | | 779 | RET |
780 | END(alpha_softint_return) | | 780 | END(alpha_softint_return) |
781 | | | 781 | |
782 | /* | | 782 | /* |
783 | * struct lwp *cpu_switchto(struct lwp *current, struct lwp *next, | | 783 | * struct lwp *cpu_switchto(struct lwp *current, struct lwp *next, |
784 | * bool returning) | | 784 | * bool returning) |
785 | * Switch to the specified next LWP | | 785 | * Switch to the specified next LWP |
786 | * Arguments: | | 786 | * Arguments: |
787 | * a0 'struct lwp *' of the LWP to switch from | | 787 | * a0 'struct lwp *' of the LWP to switch from |
788 | * a1 'struct lwp *' of the LWP to switch to | | 788 | * a1 'struct lwp *' of the LWP to switch to |
789 | * a2 non-zero if we're returning to an interrupted LWP | | 789 | * a2 non-zero if we're returning to an interrupted LWP |
790 | * from a soft interrupt | | 790 | * from a soft interrupt |
791 | */ | | 791 | */ |
792 | LEAF(cpu_switchto, 0) | | 792 | LEAF(cpu_switchto, 0) |
793 | LDGP(pv) | | 793 | LDGP(pv) |
794 | | | 794 | |
795 | /* | | 795 | /* |
796 | * do an inline savectx(), to save old context | | 796 | * do an inline savectx(), to save old context |
797 | */ | | 797 | */ |
798 | ldq a3, L_PCB(a0) | | 798 | ldq a3, L_PCB(a0) |
799 | /* NOTE: ksp is stored by the swpctx */ | | 799 | /* NOTE: ksp is stored by the swpctx */ |
800 | stq s0, PCB_CONTEXT+(0 * 8)(a3) /* store s0 - s6 */ | | 800 | stq s0, PCB_CONTEXT+(0 * 8)(a3) /* store s0 - s6 */ |
801 | stq s1, PCB_CONTEXT+(1 * 8)(a3) | | 801 | stq s1, PCB_CONTEXT+(1 * 8)(a3) |
802 | stq s2, PCB_CONTEXT+(2 * 8)(a3) | | 802 | stq s2, PCB_CONTEXT+(2 * 8)(a3) |
803 | stq s3, PCB_CONTEXT+(3 * 8)(a3) | | 803 | stq s3, PCB_CONTEXT+(3 * 8)(a3) |
804 | stq s4, PCB_CONTEXT+(4 * 8)(a3) | | 804 | stq s4, PCB_CONTEXT+(4 * 8)(a3) |
805 | stq s5, PCB_CONTEXT+(5 * 8)(a3) | | 805 | stq s5, PCB_CONTEXT+(5 * 8)(a3) |
806 | stq s6, PCB_CONTEXT+(6 * 8)(a3) | | 806 | stq s6, PCB_CONTEXT+(6 * 8)(a3) |
807 | stq ra, PCB_CONTEXT+(7 * 8)(a3) /* store ra */ | | 807 | stq ra, PCB_CONTEXT+(7 * 8)(a3) /* store ra */ |
808 | | | 808 | |
809 | mov a0, s4 /* save old curlwp */ | | 809 | mov a0, s4 /* save old curlwp */ |
810 | mov a1, s2 /* save new lwp */ | | 810 | mov a1, s2 /* save new lwp */ |
811 | | | 811 | |
812 | /* | | 812 | /* |
813 | * Check to see if we're doing a light-weight switch back to | | 813 | * Check to see if we're doing a light-weight switch back to |
814 | * an interrupted LWP (referred to as the "pinned" LWP) from | | 814 | * an interrupted LWP (referred to as the "pinned" LWP) from |
815 | * a softint LWP. In this case we have been running on the | | 815 | * a softint LWP. In this case we have been running on the |
816 | * pinned LWP's context -- swpctx was not used to get here -- | | 816 | * pinned LWP's context -- swpctx was not used to get here -- |
817 | * so we won't be using swpctx to go back, either. | | 817 | * so we won't be using swpctx to go back, either. |
818 | */ | | 818 | */ |
819 | bne a2, 3f /* yes, go handle it */ | | 819 | bne a2, 3f /* yes, go handle it */ |
820 | /* no, normal context switch */ | | 820 | /* no, normal context switch */ |
821 | | | 821 | |
822 | /* Switch to the new PCB. */ | | 822 | /* Switch to the new PCB. */ |
823 | ldq a0, L_MD_PCBPADDR(s2) | | 823 | ldq a0, L_MD_PCBPADDR(s2) |
824 | call_pal PAL_OSF1_swpctx /* clobbers a0, t0, t8-t11, v0 */ | | 824 | call_pal PAL_OSF1_swpctx /* clobbers a0, t0, t8-t11, v0 */ |
825 | | | 825 | |
826 | 1: SET_CURLWP(s2) /* curlwp = l */ | | 826 | 1: SET_CURLWP(s2) /* curlwp = l */ |
827 | | | 827 | |
828 | /* | | 828 | /* |
829 | * Now running on the new PCB. | | 829 | * Now running on the new PCB. |
830 | */ | | 830 | */ |
831 | ldq s0, L_PCB(s2) | | 831 | ldq s0, L_PCB(s2) |
832 | | | 832 | |
833 | /* | | 833 | /* |
834 | * Check for restartable atomic sequences (RAS). | | 834 | * Check for restartable atomic sequences (RAS). |
835 | */ | | 835 | */ |
836 | ldq a0, L_PROC(s2) /* first ras_lookup() arg */ | | 836 | ldq a0, L_PROC(s2) /* first ras_lookup() arg */ |
837 | ldq t0, P_RASLIST(a0) /* any RAS entries? */ | | 837 | ldq t0, P_RASLIST(a0) /* any RAS entries? */ |
838 | beq t0, 2f /* no, skip */ | | 838 | beq t0, 2f /* no, skip */ |
839 | ldq s1, L_MD_TF(s2) /* s1 = l->l_md.md_tf */ | | 839 | ldq s1, L_MD_TF(s2) /* s1 = l->l_md.md_tf */ |
840 | ldq a1, (FRAME_PC*8)(s1) /* second ras_lookup() arg */ | | 840 | ldq a1, (FRAME_PC*8)(s1) /* second ras_lookup() arg */ |
841 | CALL(ras_lookup) /* ras_lookup(p, PC) */ | | 841 | CALL(ras_lookup) /* ras_lookup(p, PC) */ |
842 | addq v0, 1, t0 /* -1 means "not in ras" */ | | 842 | addq v0, 1, t0 /* -1 means "not in ras" */ |
843 | beq t0, 2f | | 843 | beq t0, 2f |
844 | stq v0, (FRAME_PC*8)(s1) | | 844 | stq v0, (FRAME_PC*8)(s1) |
845 | | | 845 | |
846 | 2: | | 846 | 2: |
847 | mov s4, v0 /* return the old lwp */ | | 847 | mov s4, v0 /* return the old lwp */ |
848 | /* | | 848 | /* |
849 | * Restore registers and return. | | 849 | * Restore registers and return. |
850 | * NOTE: ksp is restored by the swpctx. | | 850 | * NOTE: ksp is restored by the swpctx. |
851 | */ | | 851 | */ |
852 | ldq s1, PCB_CONTEXT+(1 * 8)(s0) /* restore s1-s6 */ | | 852 | ldq s1, PCB_CONTEXT+(1 * 8)(s0) /* restore s1-s6 */ |
853 | ldq s2, PCB_CONTEXT+(2 * 8)(s0) | | 853 | ldq s2, PCB_CONTEXT+(2 * 8)(s0) |
854 | ldq s3, PCB_CONTEXT+(3 * 8)(s0) | | 854 | ldq s3, PCB_CONTEXT+(3 * 8)(s0) |
855 | ldq s4, PCB_CONTEXT+(4 * 8)(s0) | | 855 | ldq s4, PCB_CONTEXT+(4 * 8)(s0) |
856 | ldq s5, PCB_CONTEXT+(5 * 8)(s0) | | 856 | ldq s5, PCB_CONTEXT+(5 * 8)(s0) |
857 | ldq s6, PCB_CONTEXT+(6 * 8)(s0) | | 857 | ldq s6, PCB_CONTEXT+(6 * 8)(s0) |
858 | ldq ra, PCB_CONTEXT+(7 * 8)(s0) /* restore ra */ | | 858 | ldq ra, PCB_CONTEXT+(7 * 8)(s0) /* restore ra */ |
859 | ldq s0, PCB_CONTEXT+(0 * 8)(s0) /* restore s0 */ | | 859 | ldq s0, PCB_CONTEXT+(0 * 8)(s0) /* restore s0 */ |
860 | | | 860 | |
861 | RET | | 861 | RET |
862 | | | 862 | |
863 | 3: /* | | 863 | 3: /* |
864 | * Registers right now: | | 864 | * Registers right now: |
865 | * | | 865 | * |
866 | * a0 old LWP | | 866 | * a0 old LWP |
867 | * a1 new LWP | | 867 | * a1 new LWP |
868 | * a3 old PCB | | 868 | * a3 old PCB |
869 | * | | 869 | * |
870 | * What we need to do here is swap the stack, since we won't | | 870 | * What we need to do here is swap the stack, since we won't |
871 | * be getting that from swpctx. | | 871 | * be getting that from swpctx. |
872 | */ | | 872 | */ |
873 | ldq a2, L_PCB(a1) /* a2 = new PCB */ | | 873 | ldq a2, L_PCB(a1) /* a2 = new PCB */ |
874 | stq sp, PCB_HWPCB_KSP(a3) /* save old SP */ | | 874 | stq sp, PCB_HWPCB_KSP(a3) /* save old SP */ |
875 | ldq sp, PCB_HWPCB_KSP(a2) /* restore new SP */ | | 875 | ldq sp, PCB_HWPCB_KSP(a2) /* restore new SP */ |
876 | br 1b /* finish up */ | | 876 | br 1b /* finish up */ |
877 | END(cpu_switchto) | | 877 | END(cpu_switchto) |
878 | | | 878 | |
879 | /* | | 879 | /* |
880 | * lwp_trampoline() | | 880 | * lwp_trampoline() |
881 | * | | 881 | * |
882 | * Arrange for a function to be invoked neatly, after a cpu_lwp_fork(), | | 882 | * Arrange for a function to be invoked neatly, after a cpu_lwp_fork(), |
883 | * which has set up our pcb_context for us. But we actually *get here* | | 883 | * which has set up our pcb_context for us. But we actually *get here* |
884 | * via cpu_switchto(), which returns the LWP we switched away from in v0. | | 884 | * via cpu_switchto(), which returns the LWP we switched away from in v0. |
885 | * | | 885 | * |
886 | * Invokes the function specified by the s0 register with the return | | 886 | * Invokes the function specified by the s0 register with the return |
887 | * address specified by the s1 register and with one argument specified | | 887 | * address specified by the s1 register and with one argument specified |
888 | * by the s2 register. | | 888 | * by the s2 register. |
889 | */ | | 889 | */ |
890 | LEAF_NOPROFILE(lwp_trampoline, 0) | | 890 | LEAF_NOPROFILE(lwp_trampoline, 0) |
891 | mov v0, a0 /* a0 = prev_lwp (from cpu_switchto()) */ | | 891 | mov v0, a0 /* a0 = prev_lwp (from cpu_switchto()) */ |
892 | mov s3, a1 /* a1 = new_lwp (that's us!) */ | | 892 | mov s3, a1 /* a1 = new_lwp (that's us!) */ |
893 | CALL(lwp_startup) /* lwp_startup(prev_lwp, new_lwp); */ | | 893 | CALL(lwp_startup) /* lwp_startup(prev_lwp, new_lwp); */ |
894 | mov s0, pv /* pv = func */ | | 894 | mov s0, pv /* pv = func */ |
895 | mov s1, ra /* ra = (probably exception_return()) */ | | 895 | mov s1, ra /* ra = (probably exception_return()) */ |
896 | mov s2, a0 /* a0 = arg */ | | 896 | mov s2, a0 /* a0 = arg */ |
897 | jmp zero, (pv) /* func(arg) */ | | 897 | jmp zero, (pv) /* func(arg) */ |
898 | END(lwp_trampoline) | | 898 | END(lwp_trampoline) |
899 | | | 899 | |
900 | /**************************************************************************/ | | 900 | /**************************************************************************/ |
901 | | | 901 | |
902 | /* | | 902 | /* |
903 | * alpha_copystr(const void *from, void *to, size_t len, size_t *donep) | | 903 | * alpha_copystr(const void *from, void *to, size_t len, size_t *donep) |
904 | */ | | 904 | */ |
| | | 905 | .arch ev56 |
| | | 906 | LEAF(alpha_copystr_bwx, 4) |
| | | 907 | LDGP(pv) |
| | | 908 | |
| | | 909 | mov a2, t0 /* t0 = i = len */ |
| | | 910 | beq a2, 5f /* if (len == 0), bail */ |
| | | 911 | |
| | | 912 | 1: ldbu t1, 0(a0) /* t1 = *from */ |
| | | 913 | subl a2, 1, a2 /* len-- */ |
| | | 914 | addq a0, 1, a0 /* from++ */ |
| | | 915 | stb t1, 0(a1) /* *to = t1 */ |
| | | 916 | beq t1, 2f /* if (t1 == '\0'), bail out */ |
| | | 917 | addq a1, 1, a1 /* to++ */ |
| | | 918 | bne a2, 1b /* if (len != 0), copy more */ |
| | | 919 | |
| | | 920 | 2: beq a3, 3f /* if (lenp != NULL) */ |
| | | 921 | subl t0, a2, t0 /* *lenp = (i - len) */ |
| | | 922 | stq t0, 0(a3) |
| | | 923 | 3: bne t1, 4f /* *from != '\0'; leave in a huff */ |
| | | 924 | |
| | | 925 | mov zero, v0 /* return 0. */ |
| | | 926 | RET |
| | | 927 | |
| | | 928 | 4: ldiq v0, ENAMETOOLONG |
| | | 929 | RET |
| | | 930 | |
| | | 931 | 5: ldiq t1, 1 /* fool the test above... */ |
| | | 932 | br zero, 2b |
| | | 933 | |
| | | 934 | nop /* pad to same length as... */ |
| | | 935 | nop /* non-BWX version. */ |
| | | 936 | nop |
| | | 937 | nop |
| | | 938 | nop |
| | | 939 | EXPORT(alpha_copystr_bwx_end) |
| | | 940 | END(alpha_copystr_bwx) |
| | | 941 | .arch ev4 |
| | | 942 | |
905 | LEAF(alpha_copystr, 4) | | 943 | LEAF(alpha_copystr, 4) |
906 | LDGP(pv) | | 944 | LDGP(pv) |
907 | | | 945 | |
908 | mov a2, t0 /* t0 = i = len */ | | 946 | mov a2, t0 /* t0 = i = len */ |
909 | beq a2, 5f /* if (len == 0), bail */ | | 947 | beq a2, 5f /* if (len == 0), bail */ |
910 | | | 948 | |
911 | 1: ldq_u t1, 0(a0) /* t1 = *from */ | | 949 | 1: ldq_u t1, 0(a0) /* t1 = *from */ |
912 | extbl t1, a0, t1 | | 950 | extbl t1, a0, t1 |
913 | ldq_u t3, 0(a1) /* set up t2 with quad around *to */ | | 951 | ldq_u t3, 0(a1) /* set up t2 with quad around *to */ |
914 | insbl t1, a1, t2 | | 952 | insbl t1, a1, t2 |
915 | mskbl t3, a1, t3 | | 953 | mskbl t3, a1, t3 |
916 | or t3, t2, t3 /* add *from to quad around *to */ | | 954 | or t3, t2, t3 /* add *from to quad around *to */ |
917 | stq_u t3, 0(a1) /* write out that quad */ | | 955 | stq_u t3, 0(a1) /* write out that quad */ |
918 | | | 956 | |
919 | subl a2, 1, a2 /* len-- */ | | 957 | subl a2, 1, a2 /* len-- */ |
920 | beq t1, 2f /* if (*from == 0), bail out */ | | 958 | beq t1, 2f /* if (*from == 0), bail out */ |
921 | addq a1, 1, a1 /* to++ */ | | 959 | addq a1, 1, a1 /* to++ */ |
922 | addq a0, 1, a0 /* from++ */ | | 960 | addq a0, 1, a0 /* from++ */ |
923 | bne a2, 1b /* if (len != 0) copy more */ | | 961 | bne a2, 1b /* if (len != 0) copy more */ |
924 | | | 962 | |
925 | 2: beq a3, 3f /* if (lenp != NULL) */ | | 963 | 2: beq a3, 3f /* if (lenp != NULL) */ |
926 | subl t0, a2, t0 /* *lenp = (i - len) */ | | 964 | subl t0, a2, t0 /* *lenp = (i - len) */ |
927 | stq t0, 0(a3) | | 965 | stq t0, 0(a3) |
928 | 3: bne t1, 4f /* *from != '\0'; leave in a huff */ | | 966 | 3: bne t1, 4f /* *from != '\0'; leave in a huff */ |
929 | | | 967 | |
930 | mov zero, v0 /* return 0. */ | | 968 | mov zero, v0 /* return 0. */ |
931 | RET | | 969 | RET |
932 | | | 970 | |
933 | 4: ldiq v0, ENAMETOOLONG | | 971 | 4: ldiq v0, ENAMETOOLONG |
934 | RET | | 972 | RET |
935 | | | 973 | |
936 | 5: ldiq t1, 1 /* fool the test above... */ | | 974 | 5: ldiq t1, 1 /* fool the test above... */ |
937 | br zero, 2b | | 975 | br zero, 2b |
| | | 976 | EXPORT(alpha_copystr_end) |
938 | END(alpha_copystr) | | 977 | END(alpha_copystr) |
939 | | | 978 | |
940 | NESTED(copyinstr, 4, 16, ra, IM_RA|IM_S0, 0) | | 979 | NESTED(copyinstr, 4, 16, ra, IM_RA|IM_S0, 0) |
941 | LDGP(pv) | | 980 | LDGP(pv) |
942 | lda sp, -16(sp) /* set up stack frame */ | | 981 | lda sp, -16(sp) /* set up stack frame */ |
943 | stq ra, (16-8)(sp) /* save ra */ | | 982 | stq ra, (16-8)(sp) /* save ra */ |
944 | stq s0, (16-16)(sp) /* save s0 */ | | 983 | stq s0, (16-16)(sp) /* save s0 */ |
945 | ldiq t0, VM_MAX_ADDRESS /* make sure that src addr */ | | 984 | ldiq t0, VM_MAX_ADDRESS /* make sure that src addr */ |
946 | cmpult a0, t0, t1 /* is in user space. */ | | 985 | cmpult a0, t0, t1 /* is in user space. */ |
947 | beq t1, copyerr_efault /* if it's not, error out. */ | | 986 | beq t1, copyerr_efault /* if it's not, error out. */ |
948 | /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ | | 987 | /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ |
949 | GET_CURLWP | | 988 | GET_CURLWP |
950 | ldq s0, L_PCB(v0) /* s0 = pcb */ | | 989 | ldq s0, L_PCB(v0) /* s0 = pcb */ |
951 | lda v0, copyerr /* set up fault handler. */ | | 990 | lda v0, copyerr /* set up fault handler. */ |
952 | stq v0, PCB_ONFAULT(s0) | | 991 | stq v0, PCB_ONFAULT(s0) |
953 | CALL(alpha_copystr) /* do the copy. */ | | 992 | CALL(alpha_copystr) /* do the copy. */ |
954 | stq zero, PCB_ONFAULT(s0) /* kill the fault handler. */ | | 993 | stq zero, PCB_ONFAULT(s0) /* kill the fault handler. */ |
955 | ldq ra, (16-8)(sp) /* restore ra. */ | | 994 | ldq ra, (16-8)(sp) /* restore ra. */ |
956 | ldq s0, (16-16)(sp) /* restore s0. */ | | 995 | ldq s0, (16-16)(sp) /* restore s0. */ |
957 | lda sp, 16(sp) /* kill stack frame. */ | | 996 | lda sp, 16(sp) /* kill stack frame. */ |
958 | RET /* v0 left over from copystr */ | | 997 | RET /* v0 left over from copystr */ |
959 | END(copyinstr) | | 998 | END(copyinstr) |
960 | | | 999 | |
961 | NESTED(copyoutstr, 4, 16, ra, IM_RA|IM_S0, 0) | | 1000 | NESTED(copyoutstr, 4, 16, ra, IM_RA|IM_S0, 0) |
962 | LDGP(pv) | | 1001 | LDGP(pv) |
963 | lda sp, -16(sp) /* set up stack frame */ | | 1002 | lda sp, -16(sp) /* set up stack frame */ |
964 | stq ra, (16-8)(sp) /* save ra */ | | 1003 | stq ra, (16-8)(sp) /* save ra */ |
965 | stq s0, (16-16)(sp) /* save s0 */ | | 1004 | stq s0, (16-16)(sp) /* save s0 */ |
966 | ldiq t0, VM_MAX_ADDRESS /* make sure that dest addr */ | | 1005 | ldiq t0, VM_MAX_ADDRESS /* make sure that dest addr */ |
967 | cmpult a1, t0, t1 /* is in user space. */ | | 1006 | cmpult a1, t0, t1 /* is in user space. */ |
968 | beq t1, copyerr_efault /* if it's not, error out. */ | | 1007 | beq t1, copyerr_efault /* if it's not, error out. */ |
969 | /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ | | 1008 | /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ |
970 | GET_CURLWP | | 1009 | GET_CURLWP |
971 | ldq s0, L_PCB(v0) /* s0 = pcb */ | | 1010 | ldq s0, L_PCB(v0) /* s0 = pcb */ |
972 | lda v0, copyerr /* set up fault handler. */ | | 1011 | lda v0, copyerr /* set up fault handler. */ |
973 | stq v0, PCB_ONFAULT(s0) | | 1012 | stq v0, PCB_ONFAULT(s0) |
974 | CALL(alpha_copystr) /* do the copy. */ | | 1013 | CALL(alpha_copystr) /* do the copy. */ |
975 | stq zero, PCB_ONFAULT(s0) /* kill the fault handler. */ | | 1014 | stq zero, PCB_ONFAULT(s0) /* kill the fault handler. */ |
976 | ldq ra, (16-8)(sp) /* restore ra. */ | | 1015 | ldq ra, (16-8)(sp) /* restore ra. */ |
977 | ldq s0, (16-16)(sp) /* restore s0. */ | | 1016 | ldq s0, (16-16)(sp) /* restore s0. */ |
978 | lda sp, 16(sp) /* kill stack frame. */ | | 1017 | lda sp, 16(sp) /* kill stack frame. */ |
979 | RET /* v0 left over from copystr */ | | 1018 | RET /* v0 left over from copystr */ |
980 | END(copyoutstr) | | 1019 | END(copyoutstr) |
981 | | | 1020 | |
982 | /* | | 1021 | /* |
983 | * kcopy(const void *src, void *dst, size_t len); | | 1022 | * kcopy(const void *src, void *dst, size_t len); |
984 | * | | 1023 | * |
985 | * Copy len bytes from src to dst, aborting if we encounter a fatal | | 1024 | * Copy len bytes from src to dst, aborting if we encounter a fatal |
986 | * page fault. | | 1025 | * page fault. |
987 | * | | 1026 | * |
988 | * kcopy() _must_ save and restore the old fault handler since it is | | 1027 | * kcopy() _must_ save and restore the old fault handler since it is |
989 | * called by uiomove(), which may be in the path of servicing a non-fatal | | 1028 | * called by uiomove(), which may be in the path of servicing a non-fatal |
990 | * page fault. | | 1029 | * page fault. |
991 | * | | 1030 | * |
992 | * N.B. This implementation is a wrapper around memcpy(), which is | | 1031 | * N.B. This implementation is a wrapper around memcpy(), which is |
993 | * implemented in src/common/lib/libc/arch/alpha/string/bcopy.S. | | 1032 | * implemented in src/common/lib/libc/arch/alpha/string/bcopy.S. |
994 | * This is safe ONLY because we know that, as implemented, it is | | 1033 | * This is safe ONLY because we know that, as implemented, it is |
995 | * a LEAF function (and thus does not use any callee-saved registers). | | 1034 | * a LEAF function (and thus does not use any callee-saved registers). |
996 | */ | | 1035 | */ |
997 | NESTED(kcopy, 3, 32, ra, IM_RA|IM_S0|IM_S1, 0) | | 1036 | NESTED(kcopy, 3, 32, ra, IM_RA|IM_S0|IM_S1, 0) |
998 | LDGP(pv) | | 1037 | LDGP(pv) |
999 | lda sp, -32(sp) /* set up stack frame */ | | 1038 | lda sp, -32(sp) /* set up stack frame */ |
1000 | stq ra, (32-8)(sp) /* save ra */ | | 1039 | stq ra, (32-8)(sp) /* save ra */ |
1001 | stq s0, (32-16)(sp) /* save s0 */ | | 1040 | stq s0, (32-16)(sp) /* save s0 */ |
1002 | stq s1, (32-24)(sp) /* save s1 */ | | 1041 | stq s1, (32-24)(sp) /* save s1 */ |
1003 | /* Swap a0, a1, for call to memcpy(). */ | | 1042 | /* Swap a0, a1, for call to memcpy(). */ |
1004 | mov a1, v0 | | 1043 | mov a1, v0 |
1005 | mov a0, a1 | | 1044 | mov a0, a1 |
1006 | mov v0, a0 | | 1045 | mov v0, a0 |
1007 | /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ | | 1046 | /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ |
1008 | GET_CURLWP | | 1047 | GET_CURLWP |
1009 | ldq s1, L_PCB(v0) /* s1 = pcb */ | | 1048 | ldq s1, L_PCB(v0) /* s1 = pcb */ |
1010 | lda v0, kcopyerr /* set up fault handler. */ | | 1049 | lda v0, kcopyerr /* set up fault handler. */ |
1011 | ldq s0, PCB_ONFAULT(s1) /* save old handler. */ | | 1050 | ldq s0, PCB_ONFAULT(s1) /* save old handler. */ |
1012 | stq v0, PCB_ONFAULT(s1) | | 1051 | stq v0, PCB_ONFAULT(s1) |
1013 | CALL(memcpy) /* do the copy. */ | | 1052 | CALL(memcpy) /* do the copy. */ |
1014 | stq s0, PCB_ONFAULT(s1) /* restore the old handler. */ | | 1053 | stq s0, PCB_ONFAULT(s1) /* restore the old handler. */ |
1015 | ldq ra, (32-8)(sp) /* restore ra. */ | | 1054 | ldq ra, (32-8)(sp) /* restore ra. */ |
1016 | ldq s0, (32-16)(sp) /* restore s0. */ | | 1055 | ldq s0, (32-16)(sp) /* restore s0. */ |
1017 | ldq s1, (32-24)(sp) /* restore s1. */ | | 1056 | ldq s1, (32-24)(sp) /* restore s1. */ |
1018 | lda sp, 32(sp) /* kill stack frame. */ | | 1057 | lda sp, 32(sp) /* kill stack frame. */ |
1019 | mov zero, v0 /* return 0. */ | | 1058 | mov zero, v0 /* return 0. */ |
1020 | RET | | 1059 | RET |
1021 | END(kcopy) | | 1060 | END(kcopy) |
1022 | | | 1061 | |
1023 | LEAF(kcopyerr, 0) | | 1062 | LEAF(kcopyerr, 0) |
1024 | LDGP(pv) | | 1063 | LDGP(pv) |
1025 | stq s0, PCB_ONFAULT(s1) /* s1 == pcb (from above) */ | | 1064 | stq s0, PCB_ONFAULT(s1) /* s1 == pcb (from above) */ |
1026 | ldq ra, (32-8)(sp) /* restore ra. */ | | 1065 | ldq ra, (32-8)(sp) /* restore ra. */ |
1027 | ldq s0, (32-16)(sp) /* restore s0. */ | | 1066 | ldq s0, (32-16)(sp) /* restore s0. */ |
1028 | ldq s1, (32-24)(sp) /* restore s1. */ | | 1067 | ldq s1, (32-24)(sp) /* restore s1. */ |
1029 | lda sp, 32(sp) /* kill stack frame. */ | | 1068 | lda sp, 32(sp) /* kill stack frame. */ |
1030 | RET | | 1069 | RET |
1031 | END(kcopyerr) | | 1070 | END(kcopyerr) |
1032 | | | 1071 | |
1033 | NESTED(copyin, 3, 16, ra, IM_RA|IM_S0, 0) | | 1072 | NESTED(copyin, 3, 16, ra, IM_RA|IM_S0, 0) |
1034 | LDGP(pv) | | 1073 | LDGP(pv) |
1035 | lda sp, -16(sp) /* set up stack frame */ | | 1074 | lda sp, -16(sp) /* set up stack frame */ |
1036 | stq ra, (16-8)(sp) /* save ra */ | | 1075 | stq ra, (16-8)(sp) /* save ra */ |
1037 | stq s0, (16-16)(sp) /* save s0 */ | | 1076 | stq s0, (16-16)(sp) /* save s0 */ |
1038 | ldiq t0, VM_MAX_ADDRESS /* make sure that src addr */ | | 1077 | ldiq t0, VM_MAX_ADDRESS /* make sure that src addr */ |
1039 | cmpult a0, t0, t1 /* is in user space. */ | | 1078 | cmpult a0, t0, t1 /* is in user space. */ |
1040 | beq t1, copyerr_efault /* if it's not, error out. */ | | 1079 | beq t1, copyerr_efault /* if it's not, error out. */ |
1041 | /* Swap a0, a1, for call to memcpy(). */ | | 1080 | /* Swap a0, a1, for call to memcpy(). */ |
1042 | mov a1, v0 | | 1081 | mov a1, v0 |
1043 | mov a0, a1 | | 1082 | mov a0, a1 |
1044 | mov v0, a0 | | 1083 | mov v0, a0 |
1045 | /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ | | 1084 | /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ |
1046 | GET_CURLWP | | 1085 | GET_CURLWP |
1047 | ldq s0, L_PCB(v0) /* s = pcb */ | | 1086 | ldq s0, L_PCB(v0) /* s = pcb */ |
1048 | lda v0, copyerr /* set up fault handler. */ | | 1087 | lda v0, copyerr /* set up fault handler. */ |
1049 | stq v0, PCB_ONFAULT(s0) | | 1088 | stq v0, PCB_ONFAULT(s0) |
1050 | CALL(memcpy) /* do the copy. */ | | 1089 | CALL(memcpy) /* do the copy. */ |
1051 | stq zero, PCB_ONFAULT(s0) /* kill the fault handler. */ | | 1090 | stq zero, PCB_ONFAULT(s0) /* kill the fault handler. */ |
1052 | ldq ra, (16-8)(sp) /* restore ra. */ | | 1091 | ldq ra, (16-8)(sp) /* restore ra. */ |
1053 | ldq s0, (16-16)(sp) /* restore s0. */ | | 1092 | ldq s0, (16-16)(sp) /* restore s0. */ |
1054 | lda sp, 16(sp) /* kill stack frame. */ | | 1093 | lda sp, 16(sp) /* kill stack frame. */ |
1055 | mov zero, v0 /* return 0. */ | | 1094 | mov zero, v0 /* return 0. */ |
1056 | RET | | 1095 | RET |
1057 | END(copyin) | | 1096 | END(copyin) |
1058 | | | 1097 | |
1059 | NESTED(copyout, 3, 16, ra, IM_RA|IM_S0, 0) | | 1098 | NESTED(copyout, 3, 16, ra, IM_RA|IM_S0, 0) |
1060 | LDGP(pv) | | 1099 | LDGP(pv) |
1061 | lda sp, -16(sp) /* set up stack frame */ | | 1100 | lda sp, -16(sp) /* set up stack frame */ |
1062 | stq ra, (16-8)(sp) /* save ra */ | | 1101 | stq ra, (16-8)(sp) /* save ra */ |
1063 | stq s0, (16-16)(sp) /* save s0 */ | | 1102 | stq s0, (16-16)(sp) /* save s0 */ |
1064 | ldiq t0, VM_MAX_ADDRESS /* make sure that dest addr */ | | 1103 | ldiq t0, VM_MAX_ADDRESS /* make sure that dest addr */ |
1065 | cmpult a1, t0, t1 /* is in user space. */ | | 1104 | cmpult a1, t0, t1 /* is in user space. */ |
1066 | beq t1, copyerr_efault /* if it's not, error out. */ | | 1105 | beq t1, copyerr_efault /* if it's not, error out. */ |
1067 | /* Swap a0, a1, for call to memcpy(). */ | | 1106 | /* Swap a0, a1, for call to memcpy(). */ |
1068 | mov a1, v0 | | 1107 | mov a1, v0 |
1069 | mov a0, a1 | | 1108 | mov a0, a1 |
1070 | mov v0, a0 | | 1109 | mov v0, a0 |
1071 | /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ | | 1110 | /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ |
1072 | GET_CURLWP | | 1111 | GET_CURLWP |
1073 | ldq s0, L_PCB(v0) /* s0 = pcb */ | | 1112 | ldq s0, L_PCB(v0) /* s0 = pcb */ |
1074 | lda v0, copyerr /* set up fault handler. */ | | 1113 | lda v0, copyerr /* set up fault handler. */ |
1075 | stq v0, PCB_ONFAULT(s0) | | 1114 | stq v0, PCB_ONFAULT(s0) |
1076 | CALL(memcpy) /* do the copy. */ | | 1115 | CALL(memcpy) /* do the copy. */ |
1077 | stq zero, PCB_ONFAULT(s0) /* kill the fault handler. */ | | 1116 | stq zero, PCB_ONFAULT(s0) /* kill the fault handler. */ |
1078 | ldq ra, (16-8)(sp) /* restore ra. */ | | 1117 | ldq ra, (16-8)(sp) /* restore ra. */ |
1079 | ldq s0, (16-16)(sp) /* restore s0. */ | | 1118 | ldq s0, (16-16)(sp) /* restore s0. */ |
1080 | lda sp, 16(sp) /* kill stack frame. */ | | 1119 | lda sp, 16(sp) /* kill stack frame. */ |
1081 | mov zero, v0 /* return 0. */ | | 1120 | mov zero, v0 /* return 0. */ |
1082 | RET | | 1121 | RET |
1083 | END(copyout) | | 1122 | END(copyout) |
1084 | | | 1123 | |
1085 | LEAF(copyerr_efault, 0) | | 1124 | LEAF(copyerr_efault, 0) |
1086 | ldiq v0, EFAULT /* return EFAULT. */ | | 1125 | ldiq v0, EFAULT /* return EFAULT. */ |
1087 | XLEAF(copyerr, 0) | | 1126 | XLEAF(copyerr, 0) |
1088 | LDGP(pv) | | 1127 | LDGP(pv) |
1089 | ldq ra, (16-8)(sp) /* restore ra. */ | | 1128 | ldq ra, (16-8)(sp) /* restore ra. */ |
1090 | ldq s0, (16-16)(sp) /* restore s0. */ | | 1129 | ldq s0, (16-16)(sp) /* restore s0. */ |
1091 | lda sp, 16(sp) /* kill stack frame. */ | | 1130 | lda sp, 16(sp) /* kill stack frame. */ |
1092 | RET | | 1131 | RET |
1093 | END(copyerr) | | 1132 | END(copyerr) |
1094 | | | 1133 | |
1095 | /**************************************************************************/ | | 1134 | /**************************************************************************/ |
1096 | | | 1135 | |
1097 | #define UFETCHSTORE_PROLOGUE \ | | 1136 | #define UFETCHSTORE_PROLOGUE \ |
1098 | br pv, 1f ;\ | | 1137 | br pv, 1f ;\ |
1099 | 1: LDGP(pv) ;\ | | 1138 | 1: LDGP(pv) ;\ |
1100 | ldiq t0, VM_MAX_ADDRESS /* make sure that addr */ ;\ | | 1139 | ldiq t0, VM_MAX_ADDRESS /* make sure that addr */ ;\ |
1101 | cmpult a0, t0, t1 /* is in user space. */ ;\ | | 1140 | cmpult a0, t0, t1 /* is in user space. */ ;\ |
1102 | beq t1, ufetchstoreerr_efault /* if it's not, error out. */ | | 1141 | beq t1, ufetchstoreerr_efault /* if it's not, error out. */ |
1103 | | | 1142 | |
1104 | /* LINTSTUB: int _ufetch_8(const uint8_t *uaddr, uint8_t *valp); */ | | 1143 | /* LINTSTUB: int _ufetch_8(const uint8_t *uaddr, uint8_t *valp); */ |
1105 | LEAF_NOPROFILE(_ufetch_8, 2) | | 1144 | LEAF_NOPROFILE(_ufetch_8, 2) |
1106 | UFETCHSTORE_PROLOGUE | | 1145 | UFETCHSTORE_PROLOGUE |
1107 | .L_ufetch_8_start: | | 1146 | .L_ufetch_8_start: |
1108 | ldq_u t0, 0(a0) /* load quad containing byte */ | | 1147 | ldq_u t0, 0(a0) /* load quad containing byte */ |
1109 | .L_ufetch_8_end: | | 1148 | .L_ufetch_8_end: |
1110 | extbl t0, a0, a0 /* a0 = extracted byte */ | | 1149 | extbl t0, a0, a0 /* a0 = extracted byte */ |
1111 | ldq_u t0, 0(a1) /* load dest quad */ | | 1150 | ldq_u t0, 0(a1) /* load dest quad */ |
1112 | insbl a0, a1, a0 /* a0 = byte in target position */ | | 1151 | insbl a0, a1, a0 /* a0 = byte in target position */ |
1113 | mskbl t0, a1, t0 /* clear target byte in destination */ | | 1152 | mskbl t0, a1, t0 /* clear target byte in destination */ |
1114 | or a0, t0, a0 /* or in byte to destionation */ | | 1153 | or a0, t0, a0 /* or in byte to destionation */ |
1115 | stq_u a0, 0(a1) /* *a1 = fetched byte! */ | | 1154 | stq_u a0, 0(a1) /* *a1 = fetched byte! */ |
1116 | mov zero, v0 | | 1155 | mov zero, v0 |
1117 | RET | | 1156 | RET |
1118 | END(_ufetch_8) | | 1157 | END(_ufetch_8) |
1119 | | | 1158 | |
1120 | /* LINTSTUB: int _ufetch_16(const uint16_t *uaddr, uint16_t *valp); */ | | 1159 | /* LINTSTUB: int _ufetch_16(const uint16_t *uaddr, uint16_t *valp); */ |
1121 | LEAF_NOPROFILE(_ufetch_16, 2) | | 1160 | LEAF_NOPROFILE(_ufetch_16, 2) |
1122 | UFETCHSTORE_PROLOGUE | | 1161 | UFETCHSTORE_PROLOGUE |
1123 | .L_ufetch_16_start: | | 1162 | .L_ufetch_16_start: |
1124 | ldq_u t0, 0(a0) /* load quad containing short */ | | 1163 | ldq_u t0, 0(a0) /* load quad containing short */ |
1125 | .L_ufetch_16_end: | | 1164 | .L_ufetch_16_end: |
1126 | extwl t0, a0, a0 /* a0 = extracted short */ | | 1165 | extwl t0, a0, a0 /* a0 = extracted short */ |
1127 | ldq_u t0, 0(a1) /* load dest quad */ | | 1166 | ldq_u t0, 0(a1) /* load dest quad */ |
1128 | inswl a0, a1, a0 /* a0 = short in target position */ | | 1167 | inswl a0, a1, a0 /* a0 = short in target position */ |
1129 | mskwl t0, a1, t0 /* clear target short in destination */ | | 1168 | mskwl t0, a1, t0 /* clear target short in destination */ |
1130 | or a0, t0, a0 /* or in short to destionation */ | | 1169 | or a0, t0, a0 /* or in short to destionation */ |
1131 | stq_u a0, 0(a1) /* *a1 = fetched short! */ | | 1170 | stq_u a0, 0(a1) /* *a1 = fetched short! */ |
1132 | mov zero, v0 | | 1171 | mov zero, v0 |
1133 | RET | | 1172 | RET |
1134 | END(_ufetch_16) | | 1173 | END(_ufetch_16) |
1135 | | | 1174 | |
1136 | /* LINTSTUB: int _ufetch_32(const uint32_t *uaddr, uint32_t *valp); */ | | 1175 | /* LINTSTUB: int _ufetch_32(const uint32_t *uaddr, uint32_t *valp); */ |
1137 | LEAF_NOPROFILE(_ufetch_32, 2) | | 1176 | LEAF_NOPROFILE(_ufetch_32, 2) |
1138 | UFETCHSTORE_PROLOGUE | | 1177 | UFETCHSTORE_PROLOGUE |
1139 | .L_ufetch_32_start: | | 1178 | .L_ufetch_32_start: |
1140 | ldl v0, 0(a0) | | 1179 | ldl v0, 0(a0) |
1141 | .L_ufetch_32_end: | | 1180 | .L_ufetch_32_end: |
1142 | stl v0, 0(a1) | | 1181 | stl v0, 0(a1) |
1143 | mov zero, v0 | | 1182 | mov zero, v0 |
1144 | RET | | 1183 | RET |
1145 | END(_ufetch_32) | | 1184 | END(_ufetch_32) |
1146 | | | 1185 | |
1147 | /* LINTSTUB: int _ufetch_64(const uint64_t *uaddr, uint64_t *valp); */ | | 1186 | /* LINTSTUB: int _ufetch_64(const uint64_t *uaddr, uint64_t *valp); */ |
1148 | LEAF_NOPROFILE(_ufetch_64, 2) | | 1187 | LEAF_NOPROFILE(_ufetch_64, 2) |
1149 | UFETCHSTORE_PROLOGUE | | 1188 | UFETCHSTORE_PROLOGUE |
1150 | .L_ufetch_64_start: | | 1189 | .L_ufetch_64_start: |
1151 | ldq v0, 0(a0) | | 1190 | ldq v0, 0(a0) |
1152 | .L_ufetch_64_end: | | 1191 | .L_ufetch_64_end: |
1153 | stq v0, 0(a1) | | 1192 | stq v0, 0(a1) |
1154 | mov zero, v0 | | 1193 | mov zero, v0 |
1155 | RET | | 1194 | RET |
1156 | END(_ufetch_64) | | 1195 | END(_ufetch_64) |
1157 | | | 1196 | |
1158 | /* LINTSTUB: int _ustore_8(uint8_t *uaddr, uint8_t val); */ | | 1197 | /* LINTSTUB: int _ustore_8(uint8_t *uaddr, uint8_t val); */ |
1159 | LEAF_NOPROFILE(_ustore_8, 2) | | 1198 | LEAF_NOPROFILE(_ustore_8, 2) |
1160 | UFETCHSTORE_PROLOGUE | | 1199 | UFETCHSTORE_PROLOGUE |
1161 | zap a1, 0xfe, a1 /* kill arg's high bytes */ | | 1200 | zap a1, 0xfe, a1 /* kill arg's high bytes */ |
1162 | insbl a1, a0, a1 /* move it to the right spot */ | | 1201 | insbl a1, a0, a1 /* move it to the right spot */ |
1163 | .L_ustore_8_start: | | 1202 | .L_ustore_8_start: |
1164 | ldq_u t0, 0(a0) /* load quad around byte */ | | 1203 | ldq_u t0, 0(a0) /* load quad around byte */ |
1165 | mskbl t0, a0, t0 /* kill the target byte */ | | 1204 | mskbl t0, a0, t0 /* kill the target byte */ |
1166 | or t0, a1, a1 /* put the result together */ | | 1205 | or t0, a1, a1 /* put the result together */ |
1167 | stq_u a1, 0(a0) /* and store it. */ | | 1206 | stq_u a1, 0(a0) /* and store it. */ |
1168 | .L_ustore_8_end: | | 1207 | .L_ustore_8_end: |
1169 | mov zero, v0 | | 1208 | mov zero, v0 |
1170 | RET | | 1209 | RET |
1171 | END(_ustore_8) | | 1210 | END(_ustore_8) |
1172 | | | 1211 | |
1173 | /* LINTSTUB: int _ustore_16(uint16_t *uaddr, uint16_t val); */ | | 1212 | /* LINTSTUB: int _ustore_16(uint16_t *uaddr, uint16_t val); */ |
1174 | LEAF_NOPROFILE(_ustore_16, 2) | | 1213 | LEAF_NOPROFILE(_ustore_16, 2) |
1175 | UFETCHSTORE_PROLOGUE | | 1214 | UFETCHSTORE_PROLOGUE |
1176 | zap a1, 0xfc, a1 /* kill arg's high bytes */ | | 1215 | zap a1, 0xfc, a1 /* kill arg's high bytes */ |
1177 | inswl a1, a0, a1 /* move it to the right spot */ | | 1216 | inswl a1, a0, a1 /* move it to the right spot */ |
1178 | .L_ustore_16_start: | | 1217 | .L_ustore_16_start: |
1179 | ldq_u t0, 0(a0) /* load quad around short */ | | 1218 | ldq_u t0, 0(a0) /* load quad around short */ |
1180 | mskwl t0, a0, t0 /* kill the target short */ | | 1219 | mskwl t0, a0, t0 /* kill the target short */ |
1181 | or t0, a1, a1 /* put the result together */ | | 1220 | or t0, a1, a1 /* put the result together */ |
1182 | stq_u a1, 0(a0) /* and store it. */ | | 1221 | stq_u a1, 0(a0) /* and store it. */ |
1183 | .L_ustore_16_end: | | 1222 | .L_ustore_16_end: |
1184 | mov zero, v0 | | 1223 | mov zero, v0 |
1185 | RET | | 1224 | RET |
1186 | END(_ustore_16) | | 1225 | END(_ustore_16) |
1187 | | | 1226 | |
1188 | /* LINTSTUB: int _ustore_32(uint32_t *uaddr, uint32_t val); */ | | 1227 | /* LINTSTUB: int _ustore_32(uint32_t *uaddr, uint32_t val); */ |
1189 | LEAF_NOPROFILE(_ustore_32, 2) | | 1228 | LEAF_NOPROFILE(_ustore_32, 2) |
1190 | UFETCHSTORE_PROLOGUE | | 1229 | UFETCHSTORE_PROLOGUE |
1191 | .L_ustore_32_start: | | 1230 | .L_ustore_32_start: |
1192 | stl a1, 0(a0) | | 1231 | stl a1, 0(a0) |
1193 | .L_ustore_32_end: | | 1232 | .L_ustore_32_end: |
1194 | mov zero, v0 | | 1233 | mov zero, v0 |
1195 | RET | | 1234 | RET |
1196 | END(_ustore_32) | | 1235 | END(_ustore_32) |
1197 | | | 1236 | |
1198 | /* LINTSTUB: int _ustore_64(uint64_t *uaddr, uint64_t val); */ | | 1237 | /* LINTSTUB: int _ustore_64(uint64_t *uaddr, uint64_t val); */ |
1199 | LEAF_NOPROFILE(_ustore_64, 2) | | 1238 | LEAF_NOPROFILE(_ustore_64, 2) |
1200 | UFETCHSTORE_PROLOGUE | | 1239 | UFETCHSTORE_PROLOGUE |
1201 | .L_ustore_64_start: | | 1240 | .L_ustore_64_start: |
1202 | stq a1, 0(a0) | | 1241 | stq a1, 0(a0) |
1203 | .L_ustore_64_end: | | 1242 | .L_ustore_64_end: |
1204 | mov zero, v0 | | 1243 | mov zero, v0 |
1205 | RET | | 1244 | RET |
1206 | END(_ustore_64) | | 1245 | END(_ustore_64) |
1207 | | | 1246 | |
1208 | LEAF_NOPROFILE(ufetchstoreerr_efault, 0) | | 1247 | LEAF_NOPROFILE(ufetchstoreerr_efault, 0) |
1209 | ldiq v0, EFAULT /* return EFAULT. */ | | 1248 | ldiq v0, EFAULT /* return EFAULT. */ |
1210 | XLEAF(ufetchstoreerr, 0) | | 1249 | XLEAF(ufetchstoreerr, 0) |
1211 | LDGP(pv) | | 1250 | LDGP(pv) |
1212 | RET | | 1251 | RET |
1213 | END(ufetchstoreerr_efault) | | 1252 | END(ufetchstoreerr_efault) |
1214 | | | 1253 | |
1215 | /**************************************************************************/ | | 1254 | /**************************************************************************/ |
1216 | | | 1255 | |
1217 | /* | | 1256 | /* |
1218 | * int _ucas_32(volatile uint32_t *uptr, uint32_t old, uint32_t new, | | 1257 | * int _ucas_32(volatile uint32_t *uptr, uint32_t old, uint32_t new, |
1219 | * uint32_t *ret); | | 1258 | * uint32_t *ret); |
1220 | */ | | 1259 | */ |
1221 | LEAF_NOPROFILE(_ucas_32, 4) | | 1260 | LEAF_NOPROFILE(_ucas_32, 4) |
1222 | UFETCHSTORE_PROLOGUE | | 1261 | UFETCHSTORE_PROLOGUE |
1223 | 3: | | 1262 | 3: |
1224 | .Lucas_32_start: | | 1263 | .Lucas_32_start: |
1225 | mov a2, t2 | | 1264 | mov a2, t2 |
1226 | ldl_l t0, 0(a0) /* t0 = *uptr */ | | 1265 | ldl_l t0, 0(a0) /* t0 = *uptr */ |
1227 | cmpeq t0, a1, t1 /* does t0 = old? */ | | 1266 | cmpeq t0, a1, t1 /* does t0 = old? */ |
1228 | beq t1, 1f /* if not, skip */ | | 1267 | beq t1, 1f /* if not, skip */ |
1229 | stl_c t2, 0(a0) /* *uptr ~= new */ | | 1268 | stl_c t2, 0(a0) /* *uptr ~= new */ |
1230 | .Lucas_32_end: | | 1269 | .Lucas_32_end: |
1231 | beq t1, 2f /* did it work? */ | | 1270 | beq t1, 2f /* did it work? */ |
1232 | 1: | | 1271 | 1: |
1233 | stl t0, 0(a3) /* *ret = t0 */ | | 1272 | stl t0, 0(a3) /* *ret = t0 */ |
1234 | mov zero, v0 | | 1273 | mov zero, v0 |
1235 | RET | | 1274 | RET |
1236 | 2: | | 1275 | 2: |
1237 | br 3b | | 1276 | br 3b |
1238 | END(_ucas_32) | | 1277 | END(_ucas_32) |
1239 | | | 1278 | |
1240 | /* | | 1279 | /* |
1241 | * int _ucas_64(volatile uint64_t *uptr, uint64_t old, uint64_t new, | | 1280 | * int _ucas_64(volatile uint64_t *uptr, uint64_t old, uint64_t new, |
1242 | * uint64_t *ret); | | 1281 | * uint64_t *ret); |
1243 | */ | | 1282 | */ |
1244 | LEAF_NOPROFILE(_ucas_64, 4) | | 1283 | LEAF_NOPROFILE(_ucas_64, 4) |
1245 | UFETCHSTORE_PROLOGUE | | 1284 | UFETCHSTORE_PROLOGUE |
1246 | 3: | | 1285 | 3: |
1247 | .Lucas_64_start: | | 1286 | .Lucas_64_start: |
1248 | mov a2, t2 | | 1287 | mov a2, t2 |
1249 | ldq_l t0, 0(a0) /* t0 = *uptr */ | | 1288 | ldq_l t0, 0(a0) /* t0 = *uptr */ |
1250 | cmpeq t0, a1, t1 /* does t0 = old? */ | | 1289 | cmpeq t0, a1, t1 /* does t0 = old? */ |
1251 | beq t1, 1f /* if not, skip */ | | 1290 | beq t1, 1f /* if not, skip */ |
1252 | stq_c t2, 0(a0) /* *uptr ~= new */ | | 1291 | stq_c t2, 0(a0) /* *uptr ~= new */ |
1253 | .Lucas_64_end: | | 1292 | .Lucas_64_end: |
1254 | beq t1, 2f /* did it work? */ | | 1293 | beq t1, 2f /* did it work? */ |
1255 | 1: | | 1294 | 1: |
1256 | stq t0, 0(a3) /* *ret = t0 */ | | 1295 | stq t0, 0(a3) /* *ret = t0 */ |
1257 | mov zero, v0 | | 1296 | mov zero, v0 |
1258 | RET | | 1297 | RET |
1259 | 2: | | 1298 | 2: |
1260 | br 3b | | 1299 | br 3b |
1261 | END(_ucas_64) | | 1300 | END(_ucas_64) |
1262 | | | 1301 | |
1263 | /**************************************************************************/ | | 1302 | /**************************************************************************/ |
1264 | | | 1303 | |
1265 | /* | | 1304 | /* |
1266 | * Fault table of user access functions for trap(). | | 1305 | * Fault table of user access functions for trap(). |
1267 | */ | | 1306 | */ |
1268 | .section ".rodata" | | 1307 | .section ".rodata" |
1269 | .globl onfault_table | | 1308 | .globl onfault_table |
1270 | onfault_table: | | 1309 | onfault_table: |
1271 | .quad .L_ufetch_8_start | | 1310 | .quad .L_ufetch_8_start |
1272 | .quad .L_ufetch_8_end | | 1311 | .quad .L_ufetch_8_end |
1273 | .quad ufetchstoreerr | | 1312 | .quad ufetchstoreerr |
1274 | | | 1313 | |
1275 | .quad .L_ufetch_16_start | | 1314 | .quad .L_ufetch_16_start |
1276 | .quad .L_ufetch_16_end | | 1315 | .quad .L_ufetch_16_end |
1277 | .quad ufetchstoreerr | | 1316 | .quad ufetchstoreerr |
1278 | | | 1317 | |
1279 | .quad .L_ufetch_32_start | | 1318 | .quad .L_ufetch_32_start |
1280 | .quad .L_ufetch_32_end | | 1319 | .quad .L_ufetch_32_end |
1281 | .quad ufetchstoreerr | | 1320 | .quad ufetchstoreerr |
1282 | | | 1321 | |
1283 | .quad .L_ufetch_64_start | | 1322 | .quad .L_ufetch_64_start |
1284 | .quad .L_ufetch_64_end | | 1323 | .quad .L_ufetch_64_end |
1285 | .quad ufetchstoreerr | | 1324 | .quad ufetchstoreerr |
1286 | | | 1325 | |
1287 | .quad .L_ustore_8_start | | 1326 | .quad .L_ustore_8_start |
1288 | .quad .L_ustore_8_end | | 1327 | .quad .L_ustore_8_end |
1289 | .quad ufetchstoreerr | | 1328 | .quad ufetchstoreerr |
1290 | | | 1329 | |
1291 | .quad .L_ustore_16_start | | 1330 | .quad .L_ustore_16_start |
1292 | .quad .L_ustore_16_end | | 1331 | .quad .L_ustore_16_end |
1293 | .quad ufetchstoreerr | | 1332 | .quad ufetchstoreerr |
1294 | | | 1333 | |
1295 | .quad .L_ustore_32_start | | 1334 | .quad .L_ustore_32_start |
1296 | .quad .L_ustore_32_end | | 1335 | .quad .L_ustore_32_end |
1297 | .quad ufetchstoreerr | | 1336 | .quad ufetchstoreerr |
1298 | | | 1337 | |
1299 | .quad .L_ustore_64_start | | 1338 | .quad .L_ustore_64_start |
1300 | .quad .L_ustore_64_end | | 1339 | .quad .L_ustore_64_end |
1301 | .quad ufetchstoreerr | | 1340 | .quad ufetchstoreerr |
1302 | | | 1341 | |
1303 | .quad .Lucas_32_start | | 1342 | .quad .Lucas_32_start |
1304 | .quad .Lucas_32_end | | 1343 | .quad .Lucas_32_end |
1305 | .quad ufetchstoreerr | | 1344 | .quad ufetchstoreerr |
1306 | | | 1345 | |
1307 | .quad .Lucas_64_start | | 1346 | .quad .Lucas_64_start |
1308 | .quad .Lucas_64_end | | 1347 | .quad .Lucas_64_end |
1309 | .quad ufetchstoreerr | | 1348 | .quad ufetchstoreerr |
1310 | | | 1349 | |
1311 | .quad 0 | | 1350 | .quad 0 |
1312 | | | 1351 | |
1313 | .text | | 1352 | .text |
1314 | | | 1353 | |
1315 | /**************************************************************************/ | | 1354 | /**************************************************************************/ |
1316 | | | 1355 | |
1317 | /* | | 1356 | /* |
1318 | * console 'restart' routine to be placed in HWRPB. | | 1357 | * console 'restart' routine to be placed in HWRPB. |
1319 | */ | | 1358 | */ |
1320 | LEAF(XentRestart, 1) /* XXX should be NESTED */ | | 1359 | LEAF(XentRestart, 1) /* XXX should be NESTED */ |
1321 | .set noat | | 1360 | .set noat |
1322 | lda sp,-(FRAME_SIZE*8)(sp) | | 1361 | lda sp,-(FRAME_SIZE*8)(sp) |
1323 | stq at_reg,(FRAME_AT*8)(sp) | | 1362 | stq at_reg,(FRAME_AT*8)(sp) |
1324 | .set at | | 1363 | .set at |
1325 | stq v0,(FRAME_V0*8)(sp) | | 1364 | stq v0,(FRAME_V0*8)(sp) |
1326 | stq a0,(FRAME_A0*8)(sp) | | 1365 | stq a0,(FRAME_A0*8)(sp) |
1327 | stq a1,(FRAME_A1*8)(sp) | | 1366 | stq a1,(FRAME_A1*8)(sp) |
1328 | stq a2,(FRAME_A2*8)(sp) | | 1367 | stq a2,(FRAME_A2*8)(sp) |
1329 | stq a3,(FRAME_A3*8)(sp) | | 1368 | stq a3,(FRAME_A3*8)(sp) |
1330 | stq a4,(FRAME_A4*8)(sp) | | 1369 | stq a4,(FRAME_A4*8)(sp) |
1331 | stq a5,(FRAME_A5*8)(sp) | | 1370 | stq a5,(FRAME_A5*8)(sp) |
1332 | stq s0,(FRAME_S0*8)(sp) | | 1371 | stq s0,(FRAME_S0*8)(sp) |
1333 | stq s1,(FRAME_S1*8)(sp) | | 1372 | stq s1,(FRAME_S1*8)(sp) |
1334 | stq s2,(FRAME_S2*8)(sp) | | 1373 | stq s2,(FRAME_S2*8)(sp) |
1335 | stq s3,(FRAME_S3*8)(sp) | | 1374 | stq s3,(FRAME_S3*8)(sp) |
1336 | stq s4,(FRAME_S4*8)(sp) | | 1375 | stq s4,(FRAME_S4*8)(sp) |
1337 | stq s5,(FRAME_S5*8)(sp) | | 1376 | stq s5,(FRAME_S5*8)(sp) |
1338 | stq s6,(FRAME_S6*8)(sp) | | 1377 | stq s6,(FRAME_S6*8)(sp) |
1339 | stq t0,(FRAME_T0*8)(sp) | | 1378 | stq t0,(FRAME_T0*8)(sp) |
1340 | stq t1,(FRAME_T1*8)(sp) | | 1379 | stq t1,(FRAME_T1*8)(sp) |
1341 | stq t2,(FRAME_T2*8)(sp) | | 1380 | stq t2,(FRAME_T2*8)(sp) |
1342 | stq t3,(FRAME_T3*8)(sp) | | 1381 | stq t3,(FRAME_T3*8)(sp) |
1343 | stq t4,(FRAME_T4*8)(sp) | | 1382 | stq t4,(FRAME_T4*8)(sp) |
1344 | stq t5,(FRAME_T5*8)(sp) | | 1383 | stq t5,(FRAME_T5*8)(sp) |
1345 | stq t6,(FRAME_T6*8)(sp) | | 1384 | stq t6,(FRAME_T6*8)(sp) |
1346 | stq t7,(FRAME_T7*8)(sp) | | 1385 | stq t7,(FRAME_T7*8)(sp) |
1347 | stq t8,(FRAME_T8*8)(sp) | | 1386 | stq t8,(FRAME_T8*8)(sp) |
1348 | stq t9,(FRAME_T9*8)(sp) | | 1387 | stq t9,(FRAME_T9*8)(sp) |
1349 | stq t10,(FRAME_T10*8)(sp) | | 1388 | stq t10,(FRAME_T10*8)(sp) |
1350 | stq t11,(FRAME_T11*8)(sp) | | 1389 | stq t11,(FRAME_T11*8)(sp) |
1351 | stq t12,(FRAME_T12*8)(sp) | | 1390 | stq t12,(FRAME_T12*8)(sp) |
1352 | stq ra,(FRAME_RA*8)(sp) | | 1391 | stq ra,(FRAME_RA*8)(sp) |
1353 | | | 1392 | |
1354 | br pv,1f | | 1393 | br pv,1f |
1355 | 1: LDGP(pv) | | 1394 | 1: LDGP(pv) |
1356 | | | 1395 | |
1357 | mov sp,a0 | | 1396 | mov sp,a0 |
1358 | CALL(console_restart) | | 1397 | CALL(console_restart) |
1359 | | | 1398 | |
1360 | call_pal PAL_halt | | 1399 | call_pal PAL_halt |
1361 | END(XentRestart) | | 1400 | END(XentRestart) |
1362 | | | 1401 | |
1363 | /**************************************************************************/ | | 1402 | /**************************************************************************/ |
1364 | | | 1403 | |
1365 | /* | | 1404 | /* |
1366 | * Kernel setjmp and longjmp. Rather minimalist. | | 1405 | * Kernel setjmp and longjmp. Rather minimalist. |
1367 | * | | 1406 | * |
1368 | * longjmp(label_t *a) | | 1407 | * longjmp(label_t *a) |
1369 | * will generate a "return (1)" from the last call to | | 1408 | * will generate a "return (1)" from the last call to |
1370 | * setjmp(label_t *a) | | 1409 | * setjmp(label_t *a) |
1371 | * by restoring registers from the stack, | | 1410 | * by restoring registers from the stack, |
1372 | */ | | 1411 | */ |
1373 | | | 1412 | |
1374 | .set noreorder | | 1413 | .set noreorder |
1375 | | | 1414 | |
1376 | LEAF(setjmp, 1) | | 1415 | LEAF(setjmp, 1) |
1377 | LDGP(pv) | | 1416 | LDGP(pv) |
1378 | | | 1417 | |
1379 | stq ra, (0 * 8)(a0) /* return address */ | | 1418 | stq ra, (0 * 8)(a0) /* return address */ |
1380 | stq s0, (1 * 8)(a0) /* callee-saved registers */ | | 1419 | stq s0, (1 * 8)(a0) /* callee-saved registers */ |
1381 | stq s1, (2 * 8)(a0) | | 1420 | stq s1, (2 * 8)(a0) |
1382 | stq s2, (3 * 8)(a0) | | 1421 | stq s2, (3 * 8)(a0) |
1383 | stq s3, (4 * 8)(a0) | | 1422 | stq s3, (4 * 8)(a0) |
1384 | stq s4, (5 * 8)(a0) | | 1423 | stq s4, (5 * 8)(a0) |
1385 | stq s5, (6 * 8)(a0) | | 1424 | stq s5, (6 * 8)(a0) |
1386 | stq s6, (7 * 8)(a0) | | 1425 | stq s6, (7 * 8)(a0) |
1387 | stq sp, (8 * 8)(a0) | | 1426 | stq sp, (8 * 8)(a0) |
1388 | | | 1427 | |
1389 | ldiq t0, 0xbeeffedadeadbabe /* set magic number */ | | 1428 | ldiq t0, 0xbeeffedadeadbabe /* set magic number */ |
1390 | stq t0, (9 * 8)(a0) | | 1429 | stq t0, (9 * 8)(a0) |
1391 | | | 1430 | |
1392 | mov zero, v0 /* return zero */ | | 1431 | mov zero, v0 /* return zero */ |
1393 | RET | | 1432 | RET |
1394 | END(setjmp) | | 1433 | END(setjmp) |
1395 | | | 1434 | |
1396 | LEAF(longjmp, 1) | | 1435 | LEAF(longjmp, 1) |
1397 | LDGP(pv) | | 1436 | LDGP(pv) |
1398 | | | 1437 | |
1399 | ldiq t0, 0xbeeffedadeadbabe /* check magic number */ | | 1438 | ldiq t0, 0xbeeffedadeadbabe /* check magic number */ |
1400 | ldq t1, (9 * 8)(a0) | | 1439 | ldq t1, (9 * 8)(a0) |
1401 | cmpeq t0, t1, t0 | | 1440 | cmpeq t0, t1, t0 |
1402 | beq t0, longjmp_botch /* if bad, punt */ | | 1441 | beq t0, longjmp_botch /* if bad, punt */ |
1403 | | | 1442 | |
1404 | ldq ra, (0 * 8)(a0) /* return address */ | | 1443 | ldq ra, (0 * 8)(a0) /* return address */ |
1405 | ldq s0, (1 * 8)(a0) /* callee-saved registers */ | | 1444 | ldq s0, (1 * 8)(a0) /* callee-saved registers */ |
1406 | ldq s1, (2 * 8)(a0) | | 1445 | ldq s1, (2 * 8)(a0) |
1407 | ldq s2, (3 * 8)(a0) | | 1446 | ldq s2, (3 * 8)(a0) |
1408 | ldq s3, (4 * 8)(a0) | | 1447 | ldq s3, (4 * 8)(a0) |
1409 | ldq s4, (5 * 8)(a0) | | 1448 | ldq s4, (5 * 8)(a0) |
1410 | ldq s5, (6 * 8)(a0) | | 1449 | ldq s5, (6 * 8)(a0) |
1411 | ldq s6, (7 * 8)(a0) | | 1450 | ldq s6, (7 * 8)(a0) |
1412 | ldq sp, (8 * 8)(a0) | | 1451 | ldq sp, (8 * 8)(a0) |
1413 | | | 1452 | |
1414 | ldiq v0, 1 | | 1453 | ldiq v0, 1 |
1415 | RET | | 1454 | RET |
1416 | | | 1455 | |
1417 | longjmp_botch: | | 1456 | longjmp_botch: |
1418 | lda a0, longjmp_botchmsg | | 1457 | lda a0, longjmp_botchmsg |
1419 | mov ra, a1 | | 1458 | mov ra, a1 |
1420 | CALL(panic) | | 1459 | CALL(panic) |
1421 | call_pal PAL_bugchk | | 1460 | call_pal PAL_bugchk |
1422 | | | 1461 | |
1423 | .data | | 1462 | .data |
1424 | longjmp_botchmsg: | | 1463 | longjmp_botchmsg: |
1425 | .asciz "longjmp botch from %p" | | 1464 | .asciz "longjmp botch from %p" |
1426 | .text | | 1465 | .text |
1427 | END(longjmp) | | 1466 | END(longjmp) |
1428 | | | 1467 | |
1429 | /* | | 1468 | /* |
1430 | * void sts(int rn, u_int32_t *rval); | | 1469 | * void sts(int rn, u_int32_t *rval); |
1431 | * void stt(int rn, u_int64_t *rval); | | 1470 | * void stt(int rn, u_int64_t *rval); |
1432 | * void lds(int rn, u_int32_t *rval); | | 1471 | * void lds(int rn, u_int32_t *rval); |
1433 | * void ldt(int rn, u_int64_t *rval); | | 1472 | * void ldt(int rn, u_int64_t *rval); |
1434 | */ | | 1473 | */ |
1435 | | | 1474 | |
1436 | .macro make_freg_util name, op | | 1475 | .macro make_freg_util name, op |
1437 | LEAF(alpha_\name, 2) | | 1476 | LEAF(alpha_\name, 2) |
1438 | and a0, 0x1f, a0 | | 1477 | and a0, 0x1f, a0 |
1439 | s8addq a0, pv, pv | | 1478 | s8addq a0, pv, pv |
1440 | addq pv, 1f - alpha_\name, pv | | 1479 | addq pv, 1f - alpha_\name, pv |
1441 | jmp (pv) | | 1480 | jmp (pv) |
1442 | 1: | | 1481 | 1: |
1443 | rn = 0 | | 1482 | rn = 0 |
1444 | .rept 32 | | 1483 | .rept 32 |
1445 | \op $f0 + rn, 0(a1) | | 1484 | \op $f0 + rn, 0(a1) |
1446 | RET | | 1485 | RET |
1447 | rn = rn + 1 | | 1486 | rn = rn + 1 |
1448 | .endr | | 1487 | .endr |
1449 | END(alpha_\name) | | 1488 | END(alpha_\name) |
1450 | .endm | | 1489 | .endm |
1451 | /* | | 1490 | /* |
1452 | LEAF(alpha_sts, 2) | | 1491 | LEAF(alpha_sts, 2) |
1453 | LEAF(alpha_stt, 2) | | 1492 | LEAF(alpha_stt, 2) |
1454 | LEAF(alpha_lds, 2) | | 1493 | LEAF(alpha_lds, 2) |
1455 | LEAF(alpha_ldt, 2) | | 1494 | LEAF(alpha_ldt, 2) |
1456 | */ | | 1495 | */ |
1457 | make_freg_util sts, sts | | 1496 | make_freg_util sts, sts |
1458 | make_freg_util stt, stt | | 1497 | make_freg_util stt, stt |
1459 | make_freg_util lds, lds | | 1498 | make_freg_util lds, lds |
1460 | make_freg_util ldt, ldt | | 1499 | make_freg_util ldt, ldt |
1461 | | | 1500 | |
1462 | LEAF(alpha_read_fpcr, 0); f30save = 0; rettmp = 8; framesz = 16 | | 1501 | LEAF(alpha_read_fpcr, 0); f30save = 0; rettmp = 8; framesz = 16 |
1463 | lda sp, -framesz(sp) | | 1502 | lda sp, -framesz(sp) |
1464 | stt $f30, f30save(sp) | | 1503 | stt $f30, f30save(sp) |
1465 | mf_fpcr $f30 | | 1504 | mf_fpcr $f30 |
1466 | stt $f30, rettmp(sp) | | 1505 | stt $f30, rettmp(sp) |
1467 | ldt $f30, f30save(sp) | | 1506 | ldt $f30, f30save(sp) |
1468 | ldq v0, rettmp(sp) | | 1507 | ldq v0, rettmp(sp) |
1469 | lda sp, framesz(sp) | | 1508 | lda sp, framesz(sp) |
1470 | RET | | 1509 | RET |
1471 | END(alpha_read_fpcr) | | 1510 | END(alpha_read_fpcr) |
1472 | | | 1511 | |
1473 | LEAF(alpha_write_fpcr, 1); f30save = 0; fpcrtmp = 8; framesz = 16 | | 1512 | LEAF(alpha_write_fpcr, 1); f30save = 0; fpcrtmp = 8; framesz = 16 |
1474 | lda sp, -framesz(sp) | | 1513 | lda sp, -framesz(sp) |
1475 | stq a0, fpcrtmp(sp) | | 1514 | stq a0, fpcrtmp(sp) |
1476 | stt $f30, f30save(sp) | | 1515 | stt $f30, f30save(sp) |
1477 | ldt $f30, fpcrtmp(sp) | | 1516 | ldt $f30, fpcrtmp(sp) |
1478 | mt_fpcr $f30 | | 1517 | mt_fpcr $f30 |
1479 | ldt $f30, f30save(sp) | | 1518 | ldt $f30, f30save(sp) |
1480 | lda sp, framesz(sp) | | 1519 | lda sp, framesz(sp) |
1481 | RET | | 1520 | RET |
1482 | END(alpha_write_fpcr) | | 1521 | END(alpha_write_fpcr) |