Wed Jan 23 21:03:25 2013 UTC ()
Fix sparc64_ipi_ccall by adding proper trap setup.
Fixes xcall(9) problems, as exposed by pserialize(9). Noticed by
J. Hannken-Illjes, cause pointed out by Takeshi Nakayama.


(martin)
diff -r1.346 -r1.347 src/sys/arch/sparc64/sparc64/locore.s
diff -r1.3 -r1.4 src/sys/arch/sparc64/sparc64/mp_subr.S

cvs diff -r1.346 -r1.347 src/sys/arch/sparc64/sparc64/locore.s (switch to unified diff)

--- src/sys/arch/sparc64/sparc64/locore.s 2013/01/23 12:19:02 1.346
+++ src/sys/arch/sparc64/sparc64/locore.s 2013/01/23 21:03:25 1.347
@@ -1,1000 +1,1000 @@ @@ -1,1000 +1,1000 @@
1/* $NetBSD: locore.s,v 1.346 2013/01/23 12:19:02 martin Exp $ */ 1/* $NetBSD: locore.s,v 1.347 2013/01/23 21:03:25 martin Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2006-2010 Matthew R. Green 4 * Copyright (c) 2006-2010 Matthew R. Green
5 * Copyright (c) 1996-2002 Eduardo Horvath 5 * Copyright (c) 1996-2002 Eduardo Horvath
6 * Copyright (c) 1996 Paul Kranenburg 6 * Copyright (c) 1996 Paul Kranenburg
7 * Copyright (c) 1996 7 * Copyright (c) 1996
8 * The President and Fellows of Harvard College. 8 * The President and Fellows of Harvard College.
9 * All rights reserved. 9 * All rights reserved.
10 * Copyright (c) 1992, 1993 10 * Copyright (c) 1992, 1993
11 * The Regents of the University of California. 11 * The Regents of the University of California.
12 * All rights reserved. 12 * All rights reserved.
13 * 13 *
14 * This software was developed by the Computer Systems Engineering group 14 * This software was developed by the Computer Systems Engineering group
15 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 15 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
16 * contributed to Berkeley. 16 * contributed to Berkeley.
17 * 17 *
18 * All advertising materials mentioning features or use of this software 18 * All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement: 19 * must display the following acknowledgement:
20 * This product includes software developed by the University of 20 * This product includes software developed by the University of
21 * California, Lawrence Berkeley Laboratory. 21 * California, Lawrence Berkeley Laboratory.
22 * This product includes software developed by Harvard University. 22 * This product includes software developed by Harvard University.
23 * 23 *
24 * Redistribution and use in source and binary forms, with or without 24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions 25 * modification, are permitted provided that the following conditions
26 * are met: 26 * are met:
27 * 1. Redistributions of source code must retain the above copyright 27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer. 28 * notice, this list of conditions and the following disclaimer.
29 * 2. Redistributions in binary form must reproduce the above copyright 29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the 30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the 31 * documentation and/or other materials provided with the
32 * distribution. 32 * distribution.
33 * 3. All advertising materials mentioning features or use of this 33 * 3. All advertising materials mentioning features or use of this
34 * software must display the following acknowledgement: 34 * software must display the following acknowledgement:
35 * This product includes software developed by the University of 35 * This product includes software developed by the University of
36 * California, Berkeley and its contributors. 36 * California, Berkeley and its contributors.
37 * This product includes software developed by Harvard University. 37 * This product includes software developed by Harvard University.
38 * This product includes software developed by Paul Kranenburg. 38 * This product includes software developed by Paul Kranenburg.
39 * 4. Neither the name of the University nor the names of its 39 * 4. Neither the name of the University nor the names of its
40 * contributors may be used to endorse or promote products derived 40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission. 41 * from this software without specific prior written permission.
42 * 42 *
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' 43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS''
44 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 44 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
45 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 45 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
46 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR 46 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
47 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 47 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
51 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 51 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
52 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF 52 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
53 * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 53 * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54 * DAMAGE. 54 * DAMAGE.
55 * 55 *
56 * @(#)locore.s 8.4 (Berkeley) 12/10/93 56 * @(#)locore.s 8.4 (Berkeley) 12/10/93
57 */ 57 */
58 58
59#undef PARANOID /* Extremely expensive consistency checks */ 59#undef PARANOID /* Extremely expensive consistency checks */
60#undef NO_VCACHE /* Map w/D$ disabled */ 60#undef NO_VCACHE /* Map w/D$ disabled */
61#undef TRAPSTATS /* Count traps */ 61#undef TRAPSTATS /* Count traps */
62#undef TRAPS_USE_IG /* Use Interrupt Globals for all traps */ 62#undef TRAPS_USE_IG /* Use Interrupt Globals for all traps */
63#define HWREF /* Track ref/mod bits in trap handlers */ 63#define HWREF /* Track ref/mod bits in trap handlers */
64#undef DCACHE_BUG /* Flush D$ around ASI_PHYS accesses */ 64#undef DCACHE_BUG /* Flush D$ around ASI_PHYS accesses */
65#undef NO_TSB /* Don't use TSB */ 65#undef NO_TSB /* Don't use TSB */
66#define BB_ERRATA_1 /* writes to TICK_CMPR may fail */ 66#define BB_ERRATA_1 /* writes to TICK_CMPR may fail */
67#undef TLB_FLUSH_LOWVA /* also flush 32-bit entries from the MMU */ 67#undef TLB_FLUSH_LOWVA /* also flush 32-bit entries from the MMU */
68 68
69#include "opt_ddb.h" 69#include "opt_ddb.h"
70#include "opt_kgdb.h" 70#include "opt_kgdb.h"
71#include "opt_multiprocessor.h" 71#include "opt_multiprocessor.h"
72#include "opt_compat_netbsd.h" 72#include "opt_compat_netbsd.h"
73#include "opt_compat_netbsd32.h" 73#include "opt_compat_netbsd32.h"
74#include "opt_lockdebug.h" 74#include "opt_lockdebug.h"
75 75
76#include "assym.h" 76#include "assym.h"
77#include <machine/param.h> 77#include <machine/param.h>
78#include <machine/types.h> 78#include <machine/types.h>
79#include <sparc64/sparc64/intreg.h> 79#include <sparc64/sparc64/intreg.h>
80#include <sparc64/sparc64/timerreg.h> 80#include <sparc64/sparc64/timerreg.h>
81#include <machine/ctlreg.h> 81#include <machine/ctlreg.h>
82#include <machine/psl.h> 82#include <machine/psl.h>
83#include <machine/signal.h> 83#include <machine/signal.h>
84#include <machine/trap.h> 84#include <machine/trap.h>
85#include <machine/frame.h> 85#include <machine/frame.h>
86#include <machine/pte.h> 86#include <machine/pte.h>
87#include <machine/pmap.h> 87#include <machine/pmap.h>
88#include <machine/intr.h> 88#include <machine/intr.h>
89#include <machine/asm.h> 89#include <machine/asm.h>
90#include <machine/locore.h> 90#include <machine/locore.h>
91#include <sys/syscall.h> 91#include <sys/syscall.h>
92 92
93#define BLOCK_SIZE SPARC64_BLOCK_SIZE 93#define BLOCK_SIZE SPARC64_BLOCK_SIZE
94#define BLOCK_ALIGN SPARC64_BLOCK_ALIGN 94#define BLOCK_ALIGN SPARC64_BLOCK_ALIGN
95 95
96#include "ksyms.h" 96#include "ksyms.h"
97 97
98#if 1 98#if 1
99/* 99/*
100 * Try to issue an elf note to ask the Solaris 100 * Try to issue an elf note to ask the Solaris
101 * bootloader to align the kernel properly. 101 * bootloader to align the kernel properly.
102 */ 102 */
103 .section .note 103 .section .note
104 .word 0x0d 104 .word 0x0d
105 .word 4 ! Dunno why 105 .word 4 ! Dunno why
106 .word 1 106 .word 1
1070: .asciz "SUNW Solaris" 1070: .asciz "SUNW Solaris"
1081: 1081:
109 .align 4 109 .align 4
110 .word 0x0400000 110 .word 0x0400000
111#endif 111#endif
112 112
113 .register %g2,#scratch 113 .register %g2,#scratch
114 .register %g3,#scratch 114 .register %g3,#scratch
115 115
116 116
117 .data 117 .data
118 .globl _C_LABEL(data_start) 118 .globl _C_LABEL(data_start)
119_C_LABEL(data_start): ! Start of data segment 119_C_LABEL(data_start): ! Start of data segment
120 120
121#ifdef KGDB 121#ifdef KGDB
122/* 122/*
123 * Another item that must be aligned, easiest to put it here. 123 * Another item that must be aligned, easiest to put it here.
124 */ 124 */
125KGDB_STACK_SIZE = 2048 125KGDB_STACK_SIZE = 2048
126 .globl _C_LABEL(kgdb_stack) 126 .globl _C_LABEL(kgdb_stack)
127_C_LABEL(kgdb_stack): 127_C_LABEL(kgdb_stack):
128 .space KGDB_STACK_SIZE ! hope this is enough 128 .space KGDB_STACK_SIZE ! hope this is enough
129#endif 129#endif
130 130
131#ifdef NOTDEF_DEBUG 131#ifdef NOTDEF_DEBUG
132/* 132/*
133 * This stack is used when we detect kernel stack corruption. 133 * This stack is used when we detect kernel stack corruption.
134 */ 134 */
135 .space USPACE 135 .space USPACE
136 .align 16 136 .align 16
137panicstack: 137panicstack:
138#endif 138#endif
139 139
140/* 140/*
141 * romp is the prom entry pointer 141 * romp is the prom entry pointer
142 * romtba is the prom trap table base address 142 * romtba is the prom trap table base address
143 */ 143 */
144 .globl romp 144 .globl romp
145romp: POINTER 0 145romp: POINTER 0
146 .globl romtba 146 .globl romtba
147romtba: POINTER 0 147romtba: POINTER 0
148 148
149 _ALIGN 149 _ALIGN
150 .text 150 .text
151 151
152/* 152/*
153 * The v9 trap frame is stored in the special trap registers. The 153 * The v9 trap frame is stored in the special trap registers. The
154 * register window is only modified on window overflow, underflow, 154 * register window is only modified on window overflow, underflow,
155 * and clean window traps, where it points to the register window 155 * and clean window traps, where it points to the register window
156 * needing service. Traps have space for 8 instructions, except for 156 * needing service. Traps have space for 8 instructions, except for
157 * the window overflow, underflow, and clean window traps which are 157 * the window overflow, underflow, and clean window traps which are
158 * 32 instructions long, large enough to in-line. 158 * 32 instructions long, large enough to in-line.
159 * 159 *
160 * The spitfire CPU (Ultra I) has 4 different sets of global registers. 160 * The spitfire CPU (Ultra I) has 4 different sets of global registers.
161 * (blah blah...) 161 * (blah blah...)
162 * 162 *
163 * I used to generate these numbers by address arithmetic, but gas's 163 * I used to generate these numbers by address arithmetic, but gas's
164 * expression evaluator has about as much sense as your average slug 164 * expression evaluator has about as much sense as your average slug
165 * (oddly enough, the code looks about as slimy too). Thus, all the 165 * (oddly enough, the code looks about as slimy too). Thus, all the
166 * trap numbers are given as arguments to the trap macros. This means 166 * trap numbers are given as arguments to the trap macros. This means
167 * there is one line per trap. Sigh. 167 * there is one line per trap. Sigh.
168 * 168 *
169 * Hardware interrupt vectors can be `linked'---the linkage is to regular 169 * Hardware interrupt vectors can be `linked'---the linkage is to regular
170 * C code---or rewired to fast in-window handlers. The latter are good 170 * C code---or rewired to fast in-window handlers. The latter are good
171 * for unbuffered hardware like the Zilog serial chip and the AMD audio 171 * for unbuffered hardware like the Zilog serial chip and the AMD audio
172 * chip, where many interrupts can be handled trivially with pseudo-DMA 172 * chip, where many interrupts can be handled trivially with pseudo-DMA
173 * or similar. Only one `fast' interrupt can be used per level, however, 173 * or similar. Only one `fast' interrupt can be used per level, however,
174 * and direct and `fast' interrupts are incompatible. Routines in intr.c 174 * and direct and `fast' interrupts are incompatible. Routines in intr.c
175 * handle setting these, with optional paranoia. 175 * handle setting these, with optional paranoia.
176 */ 176 */
177 177
178/* 178/*
179 * TA8 -- trap align for 8 instruction traps 179 * TA8 -- trap align for 8 instruction traps
180 * TA32 -- trap align for 32 instruction traps 180 * TA32 -- trap align for 32 instruction traps
181 */ 181 */
182#define TA8 .align 32 182#define TA8 .align 32
183#define TA32 .align 128 183#define TA32 .align 128
184 184
185/* 185/*
186 * v9 trap macros: 186 * v9 trap macros:
187 * 187 *
188 * We have a problem with v9 traps; we have no registers to put the 188 * We have a problem with v9 traps; we have no registers to put the
189 * trap type into. But we do have a %tt register which already has 189 * trap type into. But we do have a %tt register which already has
190 * that information. Trap types in these macros are all dummys. 190 * that information. Trap types in these macros are all dummys.
191 */ 191 */
192 /* regular vectored traps */ 192 /* regular vectored traps */
193 193
194#define VTRAP(type, label) \ 194#define VTRAP(type, label) \
195 ba,a,pt %icc,label; nop; NOTREACHED; TA8 195 ba,a,pt %icc,label; nop; NOTREACHED; TA8
196 196
197 /* hardware interrupts (can be linked or made `fast') */ 197 /* hardware interrupts (can be linked or made `fast') */
198#define HARDINT4U(lev) \ 198#define HARDINT4U(lev) \
199 VTRAP(lev, _C_LABEL(sparc_interrupt)) 199 VTRAP(lev, _C_LABEL(sparc_interrupt))
200 200
201 /* software interrupts (may not be made direct, sorry---but you 201 /* software interrupts (may not be made direct, sorry---but you
202 should not be using them trivially anyway) */ 202 should not be using them trivially anyway) */
203#define SOFTINT4U(lev, bit) \ 203#define SOFTINT4U(lev, bit) \
204 HARDINT4U(lev) 204 HARDINT4U(lev)
205 205
206 /* traps that just call trap() */ 206 /* traps that just call trap() */
207#define TRAP(type) VTRAP(type, slowtrap) 207#define TRAP(type) VTRAP(type, slowtrap)
208 208
209 /* architecturally undefined traps (cause panic) */ 209 /* architecturally undefined traps (cause panic) */
210#ifndef DEBUG 210#ifndef DEBUG
211#define UTRAP(type) sir; VTRAP(type, slowtrap) 211#define UTRAP(type) sir; VTRAP(type, slowtrap)
212#else 212#else
213#define UTRAP(type) VTRAP(type, slowtrap) 213#define UTRAP(type) VTRAP(type, slowtrap)
214#endif 214#endif
215 215
216 /* software undefined traps (may be replaced) */ 216 /* software undefined traps (may be replaced) */
217#define STRAP(type) VTRAP(type, slowtrap) 217#define STRAP(type) VTRAP(type, slowtrap)
218 218
219/* breakpoint acts differently under kgdb */ 219/* breakpoint acts differently under kgdb */
220#ifdef KGDB 220#ifdef KGDB
221#define BPT VTRAP(T_BREAKPOINT, bpt) 221#define BPT VTRAP(T_BREAKPOINT, bpt)
222#define BPT_KGDB_EXEC VTRAP(T_KGDB_EXEC, bpt) 222#define BPT_KGDB_EXEC VTRAP(T_KGDB_EXEC, bpt)
223#else 223#else
224#define BPT TRAP(T_BREAKPOINT) 224#define BPT TRAP(T_BREAKPOINT)
225#define BPT_KGDB_EXEC TRAP(T_KGDB_EXEC) 225#define BPT_KGDB_EXEC TRAP(T_KGDB_EXEC)
226#endif 226#endif
227 227
228#define SYSCALL VTRAP(0x100, syscall_setup) 228#define SYSCALL VTRAP(0x100, syscall_setup)
229#ifdef notyet 229#ifdef notyet
230#define ZS_INTERRUPT ba,a,pt %icc, zshard; nop; TA8 230#define ZS_INTERRUPT ba,a,pt %icc, zshard; nop; TA8
231#else 231#else
232#define ZS_INTERRUPT4U HARDINT4U(12) 232#define ZS_INTERRUPT4U HARDINT4U(12)
233#endif 233#endif
234 234
235 235
236/* 236/*
237 * Macro to clear %tt so we don't get confused with old traps. 237 * Macro to clear %tt so we don't get confused with old traps.
238 */ 238 */
239#ifdef DEBUG 239#ifdef DEBUG
240#define CLRTT wrpr %g0,0x1ff,%tt 240#define CLRTT wrpr %g0,0x1ff,%tt
241#else 241#else
242#define CLRTT 242#define CLRTT
243#endif 243#endif
244 244
245/* 245/*
246 * Here are some oft repeated traps as macros. 246 * Here are some oft repeated traps as macros.
247 */ 247 */
248 248
249 /* spill a 64-bit register window */ 249 /* spill a 64-bit register window */
250#define SPILL64(label,as) \ 250#define SPILL64(label,as) \
251label: \ 251label: \
252 wr %g0, as, %asi; \ 252 wr %g0, as, %asi; \
253 stxa %l0, [%sp+BIAS+0x00]%asi; \ 253 stxa %l0, [%sp+BIAS+0x00]%asi; \
254 stxa %l1, [%sp+BIAS+0x08]%asi; \ 254 stxa %l1, [%sp+BIAS+0x08]%asi; \
255 stxa %l2, [%sp+BIAS+0x10]%asi; \ 255 stxa %l2, [%sp+BIAS+0x10]%asi; \
256 stxa %l3, [%sp+BIAS+0x18]%asi; \ 256 stxa %l3, [%sp+BIAS+0x18]%asi; \
257 stxa %l4, [%sp+BIAS+0x20]%asi; \ 257 stxa %l4, [%sp+BIAS+0x20]%asi; \
258 stxa %l5, [%sp+BIAS+0x28]%asi; \ 258 stxa %l5, [%sp+BIAS+0x28]%asi; \
259 stxa %l6, [%sp+BIAS+0x30]%asi; \ 259 stxa %l6, [%sp+BIAS+0x30]%asi; \
260 \ 260 \
261 stxa %l7, [%sp+BIAS+0x38]%asi; \ 261 stxa %l7, [%sp+BIAS+0x38]%asi; \
262 stxa %i0, [%sp+BIAS+0x40]%asi; \ 262 stxa %i0, [%sp+BIAS+0x40]%asi; \
263 stxa %i1, [%sp+BIAS+0x48]%asi; \ 263 stxa %i1, [%sp+BIAS+0x48]%asi; \
264 stxa %i2, [%sp+BIAS+0x50]%asi; \ 264 stxa %i2, [%sp+BIAS+0x50]%asi; \
265 stxa %i3, [%sp+BIAS+0x58]%asi; \ 265 stxa %i3, [%sp+BIAS+0x58]%asi; \
266 stxa %i4, [%sp+BIAS+0x60]%asi; \ 266 stxa %i4, [%sp+BIAS+0x60]%asi; \
267 stxa %i5, [%sp+BIAS+0x68]%asi; \ 267 stxa %i5, [%sp+BIAS+0x68]%asi; \
268 stxa %i6, [%sp+BIAS+0x70]%asi; \ 268 stxa %i6, [%sp+BIAS+0x70]%asi; \
269 \ 269 \
270 stxa %i7, [%sp+BIAS+0x78]%asi; \ 270 stxa %i7, [%sp+BIAS+0x78]%asi; \
271 saved; \ 271 saved; \
272 CLRTT; \ 272 CLRTT; \
273 retry; \ 273 retry; \
274 NOTREACHED; \ 274 NOTREACHED; \
275 TA32 275 TA32
276 276
277 /* spill a 32-bit register window */ 277 /* spill a 32-bit register window */
278#define SPILL32(label,as) \ 278#define SPILL32(label,as) \
279label: \ 279label: \
280 wr %g0, as, %asi; \ 280 wr %g0, as, %asi; \
281 srl %sp, 0, %sp; /* fixup 32-bit pointers */ \ 281 srl %sp, 0, %sp; /* fixup 32-bit pointers */ \
282 stwa %l0, [%sp+0x00]%asi; \ 282 stwa %l0, [%sp+0x00]%asi; \
283 stwa %l1, [%sp+0x04]%asi; \ 283 stwa %l1, [%sp+0x04]%asi; \
284 stwa %l2, [%sp+0x08]%asi; \ 284 stwa %l2, [%sp+0x08]%asi; \
285 stwa %l3, [%sp+0x0c]%asi; \ 285 stwa %l3, [%sp+0x0c]%asi; \
286 stwa %l4, [%sp+0x10]%asi; \ 286 stwa %l4, [%sp+0x10]%asi; \
287 stwa %l5, [%sp+0x14]%asi; \ 287 stwa %l5, [%sp+0x14]%asi; \
288 \ 288 \
289 stwa %l6, [%sp+0x18]%asi; \ 289 stwa %l6, [%sp+0x18]%asi; \
290 stwa %l7, [%sp+0x1c]%asi; \ 290 stwa %l7, [%sp+0x1c]%asi; \
291 stwa %i0, [%sp+0x20]%asi; \ 291 stwa %i0, [%sp+0x20]%asi; \
292 stwa %i1, [%sp+0x24]%asi; \ 292 stwa %i1, [%sp+0x24]%asi; \
293 stwa %i2, [%sp+0x28]%asi; \ 293 stwa %i2, [%sp+0x28]%asi; \
294 stwa %i3, [%sp+0x2c]%asi; \ 294 stwa %i3, [%sp+0x2c]%asi; \
295 stwa %i4, [%sp+0x30]%asi; \ 295 stwa %i4, [%sp+0x30]%asi; \
296 stwa %i5, [%sp+0x34]%asi; \ 296 stwa %i5, [%sp+0x34]%asi; \
297 \ 297 \
298 stwa %i6, [%sp+0x38]%asi; \ 298 stwa %i6, [%sp+0x38]%asi; \
299 stwa %i7, [%sp+0x3c]%asi; \ 299 stwa %i7, [%sp+0x3c]%asi; \
300 saved; \ 300 saved; \
301 CLRTT; \ 301 CLRTT; \
302 retry; \ 302 retry; \
303 NOTREACHED; \ 303 NOTREACHED; \
304 TA32 304 TA32
305 305
306 /* Spill either 32-bit or 64-bit register window. */ 306 /* Spill either 32-bit or 64-bit register window. */
307#define SPILLBOTH(label64,label32,as) \ 307#define SPILLBOTH(label64,label32,as) \
308 andcc %sp, 1, %g0; \ 308 andcc %sp, 1, %g0; \
309 bnz,pt %xcc, label64+4; /* Is it a v9 or v8 stack? */ \ 309 bnz,pt %xcc, label64+4; /* Is it a v9 or v8 stack? */ \
310 wr %g0, as, %asi; \ 310 wr %g0, as, %asi; \
311 ba,pt %xcc, label32+8; \ 311 ba,pt %xcc, label32+8; \
312 srl %sp, 0, %sp; /* fixup 32-bit pointers */ \ 312 srl %sp, 0, %sp; /* fixup 32-bit pointers */ \
313 NOTREACHED; \ 313 NOTREACHED; \
314 TA32 314 TA32
315 315
316 /* fill a 64-bit register window */ 316 /* fill a 64-bit register window */
317#define FILL64(label,as) \ 317#define FILL64(label,as) \
318label: \ 318label: \
319 wr %g0, as, %asi; \ 319 wr %g0, as, %asi; \
320 ldxa [%sp+BIAS+0x00]%asi, %l0; \ 320 ldxa [%sp+BIAS+0x00]%asi, %l0; \
321 ldxa [%sp+BIAS+0x08]%asi, %l1; \ 321 ldxa [%sp+BIAS+0x08]%asi, %l1; \
322 ldxa [%sp+BIAS+0x10]%asi, %l2; \ 322 ldxa [%sp+BIAS+0x10]%asi, %l2; \
323 ldxa [%sp+BIAS+0x18]%asi, %l3; \ 323 ldxa [%sp+BIAS+0x18]%asi, %l3; \
324 ldxa [%sp+BIAS+0x20]%asi, %l4; \ 324 ldxa [%sp+BIAS+0x20]%asi, %l4; \
325 ldxa [%sp+BIAS+0x28]%asi, %l5; \ 325 ldxa [%sp+BIAS+0x28]%asi, %l5; \
326 ldxa [%sp+BIAS+0x30]%asi, %l6; \ 326 ldxa [%sp+BIAS+0x30]%asi, %l6; \
327 \ 327 \
328 ldxa [%sp+BIAS+0x38]%asi, %l7; \ 328 ldxa [%sp+BIAS+0x38]%asi, %l7; \
329 ldxa [%sp+BIAS+0x40]%asi, %i0; \ 329 ldxa [%sp+BIAS+0x40]%asi, %i0; \
330 ldxa [%sp+BIAS+0x48]%asi, %i1; \ 330 ldxa [%sp+BIAS+0x48]%asi, %i1; \
331 ldxa [%sp+BIAS+0x50]%asi, %i2; \ 331 ldxa [%sp+BIAS+0x50]%asi, %i2; \
332 ldxa [%sp+BIAS+0x58]%asi, %i3; \ 332 ldxa [%sp+BIAS+0x58]%asi, %i3; \
333 ldxa [%sp+BIAS+0x60]%asi, %i4; \ 333 ldxa [%sp+BIAS+0x60]%asi, %i4; \
334 ldxa [%sp+BIAS+0x68]%asi, %i5; \ 334 ldxa [%sp+BIAS+0x68]%asi, %i5; \
335 ldxa [%sp+BIAS+0x70]%asi, %i6; \ 335 ldxa [%sp+BIAS+0x70]%asi, %i6; \
336 \ 336 \
337 ldxa [%sp+BIAS+0x78]%asi, %i7; \ 337 ldxa [%sp+BIAS+0x78]%asi, %i7; \
338 restored; \ 338 restored; \
339 CLRTT; \ 339 CLRTT; \
340 retry; \ 340 retry; \
341 NOTREACHED; \ 341 NOTREACHED; \
342 TA32 342 TA32
343 343
344 /* fill a 32-bit register window */ 344 /* fill a 32-bit register window */
345#define FILL32(label,as) \ 345#define FILL32(label,as) \
346label: \ 346label: \
347 wr %g0, as, %asi; \ 347 wr %g0, as, %asi; \
348 srl %sp, 0, %sp; /* fixup 32-bit pointers */ \ 348 srl %sp, 0, %sp; /* fixup 32-bit pointers */ \
349 lda [%sp+0x00]%asi, %l0; \ 349 lda [%sp+0x00]%asi, %l0; \
350 lda [%sp+0x04]%asi, %l1; \ 350 lda [%sp+0x04]%asi, %l1; \
351 lda [%sp+0x08]%asi, %l2; \ 351 lda [%sp+0x08]%asi, %l2; \
352 lda [%sp+0x0c]%asi, %l3; \ 352 lda [%sp+0x0c]%asi, %l3; \
353 lda [%sp+0x10]%asi, %l4; \ 353 lda [%sp+0x10]%asi, %l4; \
354 lda [%sp+0x14]%asi, %l5; \ 354 lda [%sp+0x14]%asi, %l5; \
355 \ 355 \
356 lda [%sp+0x18]%asi, %l6; \ 356 lda [%sp+0x18]%asi, %l6; \
357 lda [%sp+0x1c]%asi, %l7; \ 357 lda [%sp+0x1c]%asi, %l7; \
358 lda [%sp+0x20]%asi, %i0; \ 358 lda [%sp+0x20]%asi, %i0; \
359 lda [%sp+0x24]%asi, %i1; \ 359 lda [%sp+0x24]%asi, %i1; \
360 lda [%sp+0x28]%asi, %i2; \ 360 lda [%sp+0x28]%asi, %i2; \
361 lda [%sp+0x2c]%asi, %i3; \ 361 lda [%sp+0x2c]%asi, %i3; \
362 lda [%sp+0x30]%asi, %i4; \ 362 lda [%sp+0x30]%asi, %i4; \
363 lda [%sp+0x34]%asi, %i5; \ 363 lda [%sp+0x34]%asi, %i5; \
364 \ 364 \
365 lda [%sp+0x38]%asi, %i6; \ 365 lda [%sp+0x38]%asi, %i6; \
366 lda [%sp+0x3c]%asi, %i7; \ 366 lda [%sp+0x3c]%asi, %i7; \
367 restored; \ 367 restored; \
368 CLRTT; \ 368 CLRTT; \
369 retry; \ 369 retry; \
370 NOTREACHED; \ 370 NOTREACHED; \
371 TA32 371 TA32
372 372
373 /* fill either 32-bit or 64-bit register window. */ 373 /* fill either 32-bit or 64-bit register window. */
374#define FILLBOTH(label64,label32,as) \ 374#define FILLBOTH(label64,label32,as) \
375 andcc %sp, 1, %i0; \ 375 andcc %sp, 1, %i0; \
376 bnz (label64)+4; /* See if it's a v9 stack or v8 */ \ 376 bnz (label64)+4; /* See if it's a v9 stack or v8 */ \
377 wr %g0, as, %asi; \ 377 wr %g0, as, %asi; \
378 ba (label32)+8; \ 378 ba (label32)+8; \
379 srl %sp, 0, %sp; /* fixup 32-bit pointers */ \ 379 srl %sp, 0, %sp; /* fixup 32-bit pointers */ \
380 NOTREACHED; \ 380 NOTREACHED; \
381 TA32 381 TA32
382 382
383 .globl start, _C_LABEL(kernel_text) 383 .globl start, _C_LABEL(kernel_text)
384 _C_LABEL(kernel_text) = kernel_start ! for kvm_mkdb(8) 384 _C_LABEL(kernel_text) = kernel_start ! for kvm_mkdb(8)
385kernel_start: 385kernel_start:
386 /* Traps from TL=0 -- traps from user mode */ 386 /* Traps from TL=0 -- traps from user mode */
387#ifdef __STDC__ 387#ifdef __STDC__
388#define TABLE(name) user_ ## name 388#define TABLE(name) user_ ## name
389#else 389#else
390#define TABLE(name) user_/**/name 390#define TABLE(name) user_/**/name
391#endif 391#endif
392 .globl _C_LABEL(trapbase) 392 .globl _C_LABEL(trapbase)
393_C_LABEL(trapbase): 393_C_LABEL(trapbase):
394 b dostart; nop; TA8 ! 000 = reserved -- Use it to boot 394 b dostart; nop; TA8 ! 000 = reserved -- Use it to boot
395 /* We should not get the next 5 traps */ 395 /* We should not get the next 5 traps */
396 UTRAP(0x001) ! 001 = POR Reset -- ROM should get this 396 UTRAP(0x001) ! 001 = POR Reset -- ROM should get this
397 UTRAP(0x002) ! 002 = WDR -- ROM should get this 397 UTRAP(0x002) ! 002 = WDR -- ROM should get this
398 UTRAP(0x003) ! 003 = XIR -- ROM should get this 398 UTRAP(0x003) ! 003 = XIR -- ROM should get this
399 UTRAP(0x004) ! 004 = SIR -- ROM should get this 399 UTRAP(0x004) ! 004 = SIR -- ROM should get this
400 UTRAP(0x005) ! 005 = RED state exception 400 UTRAP(0x005) ! 005 = RED state exception
401 UTRAP(0x006); UTRAP(0x007) 401 UTRAP(0x006); UTRAP(0x007)
402 VTRAP(T_INST_EXCEPT, textfault) ! 008 = instr. access except 402 VTRAP(T_INST_EXCEPT, textfault) ! 008 = instr. access except
403 VTRAP(T_TEXTFAULT, textfault) ! 009 = instr access MMU miss 403 VTRAP(T_TEXTFAULT, textfault) ! 009 = instr access MMU miss
404 VTRAP(T_INST_ERROR, textfault) ! 00a = instr. access err 404 VTRAP(T_INST_ERROR, textfault) ! 00a = instr. access err
405 UTRAP(0x00b); UTRAP(0x00c); UTRAP(0x00d); UTRAP(0x00e); UTRAP(0x00f) 405 UTRAP(0x00b); UTRAP(0x00c); UTRAP(0x00d); UTRAP(0x00e); UTRAP(0x00f)
406 TRAP(T_ILLINST) ! 010 = illegal instruction 406 TRAP(T_ILLINST) ! 010 = illegal instruction
407 TRAP(T_PRIVINST) ! 011 = privileged instruction 407 TRAP(T_PRIVINST) ! 011 = privileged instruction
408 UTRAP(0x012) ! 012 = unimplemented LDD 408 UTRAP(0x012) ! 012 = unimplemented LDD
409 UTRAP(0x013) ! 013 = unimplemented STD 409 UTRAP(0x013) ! 013 = unimplemented STD
410 UTRAP(0x014); UTRAP(0x015); UTRAP(0x016); UTRAP(0x017); UTRAP(0x018) 410 UTRAP(0x014); UTRAP(0x015); UTRAP(0x016); UTRAP(0x017); UTRAP(0x018)
411 UTRAP(0x019); UTRAP(0x01a); UTRAP(0x01b); UTRAP(0x01c); UTRAP(0x01d) 411 UTRAP(0x019); UTRAP(0x01a); UTRAP(0x01b); UTRAP(0x01c); UTRAP(0x01d)
412 UTRAP(0x01e); UTRAP(0x01f) 412 UTRAP(0x01e); UTRAP(0x01f)
413 TRAP(T_FPDISABLED) ! 020 = fp instr, but EF bit off in psr 413 TRAP(T_FPDISABLED) ! 020 = fp instr, but EF bit off in psr
414 TRAP(T_FP_IEEE_754) ! 021 = ieee 754 exception 414 TRAP(T_FP_IEEE_754) ! 021 = ieee 754 exception
415 TRAP(T_FP_OTHER) ! 022 = other fp exception 415 TRAP(T_FP_OTHER) ! 022 = other fp exception
416 TRAP(T_TAGOF) ! 023 = tag overflow 416 TRAP(T_TAGOF) ! 023 = tag overflow
417 rdpr %cleanwin, %o7 ! 024-027 = clean window trap 417 rdpr %cleanwin, %o7 ! 024-027 = clean window trap
418 inc %o7 ! This handler is in-lined and cannot fault 418 inc %o7 ! This handler is in-lined and cannot fault
419#ifdef DEBUG 419#ifdef DEBUG
420 set 0xbadcafe, %l0 ! DEBUG -- compiler should not rely on zero-ed registers. 420 set 0xbadcafe, %l0 ! DEBUG -- compiler should not rely on zero-ed registers.
421#else 421#else
422 clr %l0 422 clr %l0
423#endif 423#endif
424 wrpr %g0, %o7, %cleanwin ! Nucleus (trap&IRQ) code does not need clean windows 424 wrpr %g0, %o7, %cleanwin ! Nucleus (trap&IRQ) code does not need clean windows
425 425
426 mov %l0,%l1; mov %l0,%l2 ! Clear out %l0-%l8 and %o0-%o8 and inc %cleanwin and done 426 mov %l0,%l1; mov %l0,%l2 ! Clear out %l0-%l8 and %o0-%o8 and inc %cleanwin and done
427 mov %l0,%l3; mov %l0,%l4 427 mov %l0,%l3; mov %l0,%l4
428#if 0 428#if 0
429#ifdef DIAGNOSTIC 429#ifdef DIAGNOSTIC
430 !! 430 !!
431 !! Check the sp redzone 431 !! Check the sp redzone
432 !! 432 !!
433 !! Since we can't spill the current window, we'll just keep 433 !! Since we can't spill the current window, we'll just keep
434 !! track of the frame pointer. Problems occur when the routine 434 !! track of the frame pointer. Problems occur when the routine
435 !! allocates and uses stack storage. 435 !! allocates and uses stack storage.
436 !! 436 !!
437! rdpr %wstate, %l5 ! User stack? 437! rdpr %wstate, %l5 ! User stack?
438! cmp %l5, WSTATE_KERN 438! cmp %l5, WSTATE_KERN
439! bne,pt %icc, 7f 439! bne,pt %icc, 7f
440 sethi %hi(CPCB), %l5 440 sethi %hi(CPCB), %l5
441 LDPTR [%l5 + %lo(CPCB)], %l5 ! If pcb < fp < pcb+sizeof(pcb) 441 LDPTR [%l5 + %lo(CPCB)], %l5 ! If pcb < fp < pcb+sizeof(pcb)
442 inc PCB_SIZE, %l5 ! then we have a stack overflow 442 inc PCB_SIZE, %l5 ! then we have a stack overflow
443 btst %fp, 1 ! 64-bit stack? 443 btst %fp, 1 ! 64-bit stack?
444 sub %fp, %l5, %l7 444 sub %fp, %l5, %l7
445 bnz,a,pt %icc, 1f 445 bnz,a,pt %icc, 1f
446 inc BIAS, %l7 ! Remove BIAS 446 inc BIAS, %l7 ! Remove BIAS
4471: 4471:
448 cmp %l7, PCB_SIZE 448 cmp %l7, PCB_SIZE
449 blu %xcc, cleanwin_overflow 449 blu %xcc, cleanwin_overflow
450#endif 450#endif
451#endif 451#endif
452 mov %l0, %l5 452 mov %l0, %l5
453 mov %l0, %l6; mov %l0, %l7; mov %l0, %o0; mov %l0, %o1 453 mov %l0, %l6; mov %l0, %l7; mov %l0, %o0; mov %l0, %o1
454 454
455 mov %l0, %o2; mov %l0, %o3; mov %l0, %o4; mov %l0, %o5; 455 mov %l0, %o2; mov %l0, %o3; mov %l0, %o4; mov %l0, %o5;
456 mov %l0, %o6; mov %l0, %o7 456 mov %l0, %o6; mov %l0, %o7
457 CLRTT 457 CLRTT
458 retry; nop; NOTREACHED; TA32 458 retry; nop; NOTREACHED; TA32
459 TRAP(T_DIV0) ! 028 = divide by zero 459 TRAP(T_DIV0) ! 028 = divide by zero
460 UTRAP(0x029) ! 029 = internal processor error 460 UTRAP(0x029) ! 029 = internal processor error
461 UTRAP(0x02a); UTRAP(0x02b); UTRAP(0x02c); UTRAP(0x02d); UTRAP(0x02e); UTRAP(0x02f) 461 UTRAP(0x02a); UTRAP(0x02b); UTRAP(0x02c); UTRAP(0x02d); UTRAP(0x02e); UTRAP(0x02f)
462 VTRAP(T_DATAFAULT, winfault) ! 030 = data fetch fault 462 VTRAP(T_DATAFAULT, winfault) ! 030 = data fetch fault
463 UTRAP(0x031) ! 031 = data MMU miss -- no MMU 463 UTRAP(0x031) ! 031 = data MMU miss -- no MMU
464 VTRAP(T_DATA_ERROR, winfault) ! 032 = data access error 464 VTRAP(T_DATA_ERROR, winfault) ! 032 = data access error
465 VTRAP(T_DATA_PROT, winfault) ! 033 = data protection fault 465 VTRAP(T_DATA_PROT, winfault) ! 033 = data protection fault
466 TRAP(T_ALIGN) ! 034 = address alignment error -- we could fix it inline... 466 TRAP(T_ALIGN) ! 034 = address alignment error -- we could fix it inline...
467 TRAP(T_LDDF_ALIGN) ! 035 = LDDF address alignment error -- we could fix it inline... 467 TRAP(T_LDDF_ALIGN) ! 035 = LDDF address alignment error -- we could fix it inline...
468 TRAP(T_STDF_ALIGN) ! 036 = STDF address alignment error -- we could fix it inline... 468 TRAP(T_STDF_ALIGN) ! 036 = STDF address alignment error -- we could fix it inline...
469 TRAP(T_PRIVACT) ! 037 = privileged action 469 TRAP(T_PRIVACT) ! 037 = privileged action
470 UTRAP(0x038); UTRAP(0x039); UTRAP(0x03a); UTRAP(0x03b); UTRAP(0x03c); 470 UTRAP(0x038); UTRAP(0x039); UTRAP(0x03a); UTRAP(0x03b); UTRAP(0x03c);
471 UTRAP(0x03d); UTRAP(0x03e); UTRAP(0x03f); 471 UTRAP(0x03d); UTRAP(0x03e); UTRAP(0x03f);
472 VTRAP(T_ASYNC_ERROR, winfault) ! 040 = data fetch fault 472 VTRAP(T_ASYNC_ERROR, winfault) ! 040 = data fetch fault
473 SOFTINT4U(1, IE_L1) ! 041 = level 1 interrupt 473 SOFTINT4U(1, IE_L1) ! 041 = level 1 interrupt
474 HARDINT4U(2) ! 042 = level 2 interrupt 474 HARDINT4U(2) ! 042 = level 2 interrupt
475 HARDINT4U(3) ! 043 = level 3 interrupt 475 HARDINT4U(3) ! 043 = level 3 interrupt
476 SOFTINT4U(4, IE_L4) ! 044 = level 4 interrupt 476 SOFTINT4U(4, IE_L4) ! 044 = level 4 interrupt
477 HARDINT4U(5) ! 045 = level 5 interrupt 477 HARDINT4U(5) ! 045 = level 5 interrupt
478 SOFTINT4U(6, IE_L6) ! 046 = level 6 interrupt 478 SOFTINT4U(6, IE_L6) ! 046 = level 6 interrupt
479 HARDINT4U(7) ! 047 = level 7 interrupt 479 HARDINT4U(7) ! 047 = level 7 interrupt
480 HARDINT4U(8) ! 048 = level 8 interrupt 480 HARDINT4U(8) ! 048 = level 8 interrupt
481 HARDINT4U(9) ! 049 = level 9 interrupt 481 HARDINT4U(9) ! 049 = level 9 interrupt
482 HARDINT4U(10) ! 04a = level 10 interrupt 482 HARDINT4U(10) ! 04a = level 10 interrupt
483 HARDINT4U(11) ! 04b = level 11 interrupt 483 HARDINT4U(11) ! 04b = level 11 interrupt
484 ZS_INTERRUPT4U ! 04c = level 12 (zs) interrupt 484 ZS_INTERRUPT4U ! 04c = level 12 (zs) interrupt
485 HARDINT4U(13) ! 04d = level 13 interrupt 485 HARDINT4U(13) ! 04d = level 13 interrupt
486 HARDINT4U(14) ! 04e = level 14 interrupt 486 HARDINT4U(14) ! 04e = level 14 interrupt
487 HARDINT4U(15) ! 04f = nonmaskable interrupt 487 HARDINT4U(15) ! 04f = nonmaskable interrupt
488 UTRAP(0x050); UTRAP(0x051); UTRAP(0x052); UTRAP(0x053); UTRAP(0x054); UTRAP(0x055) 488 UTRAP(0x050); UTRAP(0x051); UTRAP(0x052); UTRAP(0x053); UTRAP(0x054); UTRAP(0x055)
489 UTRAP(0x056); UTRAP(0x057); UTRAP(0x058); UTRAP(0x059); UTRAP(0x05a); UTRAP(0x05b) 489 UTRAP(0x056); UTRAP(0x057); UTRAP(0x058); UTRAP(0x059); UTRAP(0x05a); UTRAP(0x05b)
490 UTRAP(0x05c); UTRAP(0x05d); UTRAP(0x05e); UTRAP(0x05f) 490 UTRAP(0x05c); UTRAP(0x05d); UTRAP(0x05e); UTRAP(0x05f)
491 VTRAP(0x060, interrupt_vector); ! 060 = interrupt vector 491 VTRAP(0x060, interrupt_vector); ! 060 = interrupt vector
492 TRAP(T_PA_WATCHPT) ! 061 = physical address data watchpoint 492 TRAP(T_PA_WATCHPT) ! 061 = physical address data watchpoint
493 TRAP(T_VA_WATCHPT) ! 062 = virtual address data watchpoint 493 TRAP(T_VA_WATCHPT) ! 062 = virtual address data watchpoint
494 TRAP(T_ECCERR) ! 063 = corrected ECC error 494 TRAP(T_ECCERR) ! 063 = corrected ECC error
495ufast_IMMU_miss: ! 064 = fast instr access MMU miss 495ufast_IMMU_miss: ! 064 = fast instr access MMU miss
496 ldxa [%g0] ASI_IMMU_8KPTR, %g2 ! Load IMMU 8K TSB pointer 496 ldxa [%g0] ASI_IMMU_8KPTR, %g2 ! Load IMMU 8K TSB pointer
497#ifdef NO_TSB 497#ifdef NO_TSB
498 ba,a %icc, instr_miss 498 ba,a %icc, instr_miss
499#endif 499#endif
500 ldxa [%g0] ASI_IMMU, %g1 ! Load IMMU tag target register 500 ldxa [%g0] ASI_IMMU, %g1 ! Load IMMU tag target register
501 ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 ! Load TSB tag:data into %g4:%g5 501 ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 ! Load TSB tag:data into %g4:%g5
502 brgez,pn %g5, instr_miss ! Entry invalid? Punt 502 brgez,pn %g5, instr_miss ! Entry invalid? Punt
503 cmp %g1, %g4 ! Compare TLB tags 503 cmp %g1, %g4 ! Compare TLB tags
504 bne,pn %xcc, instr_miss ! Got right tag? 504 bne,pn %xcc, instr_miss ! Got right tag?
505 nop 505 nop
506 CLRTT 506 CLRTT
507 stxa %g5, [%g0] ASI_IMMU_DATA_IN ! Enter new mapping 507 stxa %g5, [%g0] ASI_IMMU_DATA_IN ! Enter new mapping
508 retry ! Try new mapping 508 retry ! Try new mapping
5091: 5091:
510 sir 510 sir
511 TA32 511 TA32
512ufast_DMMU_miss: ! 068 = fast data access MMU miss 512ufast_DMMU_miss: ! 068 = fast data access MMU miss
513 ldxa [%g0] ASI_DMMU_8KPTR, %g2! Load DMMU 8K TSB pointer 513 ldxa [%g0] ASI_DMMU_8KPTR, %g2! Load DMMU 8K TSB pointer
514#ifdef NO_TSB 514#ifdef NO_TSB
515 ba,a %icc, data_miss 515 ba,a %icc, data_miss
516#endif 516#endif
517 ldxa [%g0] ASI_DMMU, %g1 ! Load DMMU tag target register 517 ldxa [%g0] ASI_DMMU, %g1 ! Load DMMU tag target register
518 ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 ! Load TSB tag and data into %g4 and %g5 518 ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 ! Load TSB tag and data into %g4 and %g5
519 brgez,pn %g5, data_miss ! Entry invalid? Punt 519 brgez,pn %g5, data_miss ! Entry invalid? Punt
520 cmp %g1, %g4 ! Compare TLB tags 520 cmp %g1, %g4 ! Compare TLB tags
521 bnz,pn %xcc, data_miss ! Got right tag? 521 bnz,pn %xcc, data_miss ! Got right tag?
522 nop 522 nop
523 CLRTT 523 CLRTT
524#ifdef TRAPSTATS 524#ifdef TRAPSTATS
525 sethi %hi(_C_LABEL(udhit)), %g1 525 sethi %hi(_C_LABEL(udhit)), %g1
526 lduw [%g1+%lo(_C_LABEL(udhit))], %g2 526 lduw [%g1+%lo(_C_LABEL(udhit))], %g2
527 inc %g2 527 inc %g2
528 stw %g2, [%g1+%lo(_C_LABEL(udhit))] 528 stw %g2, [%g1+%lo(_C_LABEL(udhit))]
529#endif 529#endif
530 stxa %g5, [%g0] ASI_DMMU_DATA_IN ! Enter new mapping 530 stxa %g5, [%g0] ASI_DMMU_DATA_IN ! Enter new mapping
531 retry ! Try new mapping 531 retry ! Try new mapping
5321: 5321:
533 sir 533 sir
534 TA32 534 TA32
535ufast_DMMU_protection: ! 06c = fast data access MMU protection 535ufast_DMMU_protection: ! 06c = fast data access MMU protection
536#ifdef TRAPSTATS 536#ifdef TRAPSTATS
537 sethi %hi(_C_LABEL(udprot)), %g1 537 sethi %hi(_C_LABEL(udprot)), %g1
538 lduw [%g1+%lo(_C_LABEL(udprot))], %g2 538 lduw [%g1+%lo(_C_LABEL(udprot))], %g2
539 inc %g2 539 inc %g2
540 stw %g2, [%g1+%lo(_C_LABEL(udprot))] 540 stw %g2, [%g1+%lo(_C_LABEL(udprot))]
541#endif 541#endif
542#ifdef HWREF 542#ifdef HWREF
543 ba,a,pt %xcc, dmmu_write_fault 543 ba,a,pt %xcc, dmmu_write_fault
544#else 544#else
545 ba,a,pt %xcc, winfault 545 ba,a,pt %xcc, winfault
546#endif 546#endif
547 nop 547 nop
548 TA32 548 TA32
549 UTRAP(0x070) ! Implementation dependent traps 549 UTRAP(0x070) ! Implementation dependent traps
550 UTRAP(0x071); UTRAP(0x072); UTRAP(0x073); UTRAP(0x074); UTRAP(0x075); UTRAP(0x076) 550 UTRAP(0x071); UTRAP(0x072); UTRAP(0x073); UTRAP(0x074); UTRAP(0x075); UTRAP(0x076)
551 UTRAP(0x077); UTRAP(0x078); UTRAP(0x079); UTRAP(0x07a); UTRAP(0x07b); UTRAP(0x07c) 551 UTRAP(0x077); UTRAP(0x078); UTRAP(0x079); UTRAP(0x07a); UTRAP(0x07b); UTRAP(0x07c)
552 UTRAP(0x07d); UTRAP(0x07e); UTRAP(0x07f) 552 UTRAP(0x07d); UTRAP(0x07e); UTRAP(0x07f)
553TABLE(uspill): 553TABLE(uspill):
554 SPILL64(uspill8,ASI_AIUS) ! 0x080 spill_0_normal -- used to save user windows in user mode 554 SPILL64(uspill8,ASI_AIUS) ! 0x080 spill_0_normal -- used to save user windows in user mode
555 SPILL32(uspill4,ASI_AIUS) ! 0x084 spill_1_normal 555 SPILL32(uspill4,ASI_AIUS) ! 0x084 spill_1_normal
556 SPILLBOTH(uspill8,uspill4,ASI_AIUS) ! 0x088 spill_2_normal 556 SPILLBOTH(uspill8,uspill4,ASI_AIUS) ! 0x088 spill_2_normal
557 UTRAP(0x08c); TA32 ! 0x08c spill_3_normal 557 UTRAP(0x08c); TA32 ! 0x08c spill_3_normal
558TABLE(kspill): 558TABLE(kspill):
559 SPILL64(kspill8,ASI_N) ! 0x090 spill_4_normal -- used to save supervisor windows 559 SPILL64(kspill8,ASI_N) ! 0x090 spill_4_normal -- used to save supervisor windows
560 SPILL32(kspill4,ASI_N) ! 0x094 spill_5_normal 560 SPILL32(kspill4,ASI_N) ! 0x094 spill_5_normal
561 SPILLBOTH(kspill8,kspill4,ASI_N) ! 0x098 spill_6_normal 561 SPILLBOTH(kspill8,kspill4,ASI_N) ! 0x098 spill_6_normal
562 UTRAP(0x09c); TA32 ! 0x09c spill_7_normal 562 UTRAP(0x09c); TA32 ! 0x09c spill_7_normal
563TABLE(uspillk): 563TABLE(uspillk):
564 SPILL64(uspillk8,ASI_AIUS) ! 0x0a0 spill_0_other -- used to save user windows in supervisor mode 564 SPILL64(uspillk8,ASI_AIUS) ! 0x0a0 spill_0_other -- used to save user windows in supervisor mode
565 SPILL32(uspillk4,ASI_AIUS) ! 0x0a4 spill_1_other 565 SPILL32(uspillk4,ASI_AIUS) ! 0x0a4 spill_1_other
566 SPILLBOTH(uspillk8,uspillk4,ASI_AIUS) ! 0x0a8 spill_2_other 566 SPILLBOTH(uspillk8,uspillk4,ASI_AIUS) ! 0x0a8 spill_2_other
567 UTRAP(0x0ac); TA32 ! 0x0ac spill_3_other 567 UTRAP(0x0ac); TA32 ! 0x0ac spill_3_other
568 UTRAP(0x0b0); TA32 ! 0x0b0 spill_4_other 568 UTRAP(0x0b0); TA32 ! 0x0b0 spill_4_other
569 UTRAP(0x0b4); TA32 ! 0x0b4 spill_5_other 569 UTRAP(0x0b4); TA32 ! 0x0b4 spill_5_other
570 UTRAP(0x0b8); TA32 ! 0x0b8 spill_6_other 570 UTRAP(0x0b8); TA32 ! 0x0b8 spill_6_other
571 UTRAP(0x0bc); TA32 ! 0x0bc spill_7_other 571 UTRAP(0x0bc); TA32 ! 0x0bc spill_7_other
572TABLE(ufill): 572TABLE(ufill):
573 FILL64(ufill8,ASI_AIUS) ! 0x0c0 fill_0_normal -- used to fill windows when running user mode 573 FILL64(ufill8,ASI_AIUS) ! 0x0c0 fill_0_normal -- used to fill windows when running user mode
574 FILL32(ufill4,ASI_AIUS) ! 0x0c4 fill_1_normal 574 FILL32(ufill4,ASI_AIUS) ! 0x0c4 fill_1_normal
575 FILLBOTH(ufill8,ufill4,ASI_AIUS) ! 0x0c8 fill_2_normal 575 FILLBOTH(ufill8,ufill4,ASI_AIUS) ! 0x0c8 fill_2_normal
576 UTRAP(0x0cc); TA32 ! 0x0cc fill_3_normal 576 UTRAP(0x0cc); TA32 ! 0x0cc fill_3_normal
577TABLE(kfill): 577TABLE(kfill):
578 FILL64(kfill8,ASI_N) ! 0x0d0 fill_4_normal -- used to fill windows when running supervisor mode 578 FILL64(kfill8,ASI_N) ! 0x0d0 fill_4_normal -- used to fill windows when running supervisor mode
579 FILL32(kfill4,ASI_N) ! 0x0d4 fill_5_normal 579 FILL32(kfill4,ASI_N) ! 0x0d4 fill_5_normal
580 FILLBOTH(kfill8,kfill4,ASI_N) ! 0x0d8 fill_6_normal 580 FILLBOTH(kfill8,kfill4,ASI_N) ! 0x0d8 fill_6_normal
581 UTRAP(0x0dc); TA32 ! 0x0dc fill_7_normal 581 UTRAP(0x0dc); TA32 ! 0x0dc fill_7_normal
582TABLE(ufillk): 582TABLE(ufillk):
583 FILL64(ufillk8,ASI_AIUS) ! 0x0e0 fill_0_other 583 FILL64(ufillk8,ASI_AIUS) ! 0x0e0 fill_0_other
584 FILL32(ufillk4,ASI_AIUS) ! 0x0e4 fill_1_other 584 FILL32(ufillk4,ASI_AIUS) ! 0x0e4 fill_1_other
585 FILLBOTH(ufillk8,ufillk4,ASI_AIUS) ! 0x0e8 fill_2_other 585 FILLBOTH(ufillk8,ufillk4,ASI_AIUS) ! 0x0e8 fill_2_other
586 UTRAP(0x0ec); TA32 ! 0x0ec fill_3_other 586 UTRAP(0x0ec); TA32 ! 0x0ec fill_3_other
587 UTRAP(0x0f0); TA32 ! 0x0f0 fill_4_other 587 UTRAP(0x0f0); TA32 ! 0x0f0 fill_4_other
588 UTRAP(0x0f4); TA32 ! 0x0f4 fill_5_other 588 UTRAP(0x0f4); TA32 ! 0x0f4 fill_5_other
589 UTRAP(0x0f8); TA32 ! 0x0f8 fill_6_other 589 UTRAP(0x0f8); TA32 ! 0x0f8 fill_6_other
590 UTRAP(0x0fc); TA32 ! 0x0fc fill_7_other 590 UTRAP(0x0fc); TA32 ! 0x0fc fill_7_other
591TABLE(syscall): 591TABLE(syscall):
592 SYSCALL ! 0x100 = sun syscall 592 SYSCALL ! 0x100 = sun syscall
593 BPT ! 0x101 = pseudo breakpoint instruction 593 BPT ! 0x101 = pseudo breakpoint instruction
594 STRAP(0x102); STRAP(0x103); STRAP(0x104); STRAP(0x105); STRAP(0x106); STRAP(0x107) 594 STRAP(0x102); STRAP(0x103); STRAP(0x104); STRAP(0x105); STRAP(0x106); STRAP(0x107)
595 SYSCALL ! 0x108 = svr4 syscall 595 SYSCALL ! 0x108 = svr4 syscall
596 SYSCALL ! 0x109 = bsd syscall 596 SYSCALL ! 0x109 = bsd syscall
597 BPT_KGDB_EXEC ! 0x10a = enter kernel gdb on kernel startup 597 BPT_KGDB_EXEC ! 0x10a = enter kernel gdb on kernel startup
598 STRAP(0x10b); STRAP(0x10c); STRAP(0x10d); STRAP(0x10e); STRAP(0x10f); 598 STRAP(0x10b); STRAP(0x10c); STRAP(0x10d); STRAP(0x10e); STRAP(0x10f);
599 STRAP(0x110); STRAP(0x111); STRAP(0x112); STRAP(0x113); STRAP(0x114); STRAP(0x115); STRAP(0x116); STRAP(0x117) 599 STRAP(0x110); STRAP(0x111); STRAP(0x112); STRAP(0x113); STRAP(0x114); STRAP(0x115); STRAP(0x116); STRAP(0x117)
600 STRAP(0x118); STRAP(0x119); STRAP(0x11a); STRAP(0x11b); STRAP(0x11c); STRAP(0x11d); STRAP(0x11e); STRAP(0x11f) 600 STRAP(0x118); STRAP(0x119); STRAP(0x11a); STRAP(0x11b); STRAP(0x11c); STRAP(0x11d); STRAP(0x11e); STRAP(0x11f)
601 STRAP(0x120); STRAP(0x121); STRAP(0x122); STRAP(0x123); STRAP(0x124); STRAP(0x125); STRAP(0x126); STRAP(0x127) 601 STRAP(0x120); STRAP(0x121); STRAP(0x122); STRAP(0x123); STRAP(0x124); STRAP(0x125); STRAP(0x126); STRAP(0x127)
602 STRAP(0x128); STRAP(0x129); STRAP(0x12a); STRAP(0x12b); STRAP(0x12c); STRAP(0x12d); STRAP(0x12e); STRAP(0x12f) 602 STRAP(0x128); STRAP(0x129); STRAP(0x12a); STRAP(0x12b); STRAP(0x12c); STRAP(0x12d); STRAP(0x12e); STRAP(0x12f)
603 STRAP(0x130); STRAP(0x131); STRAP(0x132); STRAP(0x133); STRAP(0x134); STRAP(0x135); STRAP(0x136); STRAP(0x137) 603 STRAP(0x130); STRAP(0x131); STRAP(0x132); STRAP(0x133); STRAP(0x134); STRAP(0x135); STRAP(0x136); STRAP(0x137)
604 STRAP(0x138); STRAP(0x139); STRAP(0x13a); STRAP(0x13b); STRAP(0x13c); STRAP(0x13d); STRAP(0x13e); STRAP(0x13f) 604 STRAP(0x138); STRAP(0x139); STRAP(0x13a); STRAP(0x13b); STRAP(0x13c); STRAP(0x13d); STRAP(0x13e); STRAP(0x13f)
605 SYSCALL ! 0x140 SVID syscall (Solaris 2.7) 605 SYSCALL ! 0x140 SVID syscall (Solaris 2.7)
606 SYSCALL ! 0x141 SPARC International syscall 606 SYSCALL ! 0x141 SPARC International syscall
607 SYSCALL ! 0x142 OS Vendor syscall 607 SYSCALL ! 0x142 OS Vendor syscall
608 SYSCALL ! 0x143 HW OEM syscall 608 SYSCALL ! 0x143 HW OEM syscall
609 STRAP(0x144); STRAP(0x145); STRAP(0x146); STRAP(0x147) 609 STRAP(0x144); STRAP(0x145); STRAP(0x146); STRAP(0x147)
610 STRAP(0x148); STRAP(0x149); STRAP(0x14a); STRAP(0x14b); STRAP(0x14c); STRAP(0x14d); STRAP(0x14e); STRAP(0x14f) 610 STRAP(0x148); STRAP(0x149); STRAP(0x14a); STRAP(0x14b); STRAP(0x14c); STRAP(0x14d); STRAP(0x14e); STRAP(0x14f)
611 STRAP(0x150); STRAP(0x151); STRAP(0x152); STRAP(0x153); STRAP(0x154); STRAP(0x155); STRAP(0x156); STRAP(0x157) 611 STRAP(0x150); STRAP(0x151); STRAP(0x152); STRAP(0x153); STRAP(0x154); STRAP(0x155); STRAP(0x156); STRAP(0x157)
612 STRAP(0x158); STRAP(0x159); STRAP(0x15a); STRAP(0x15b); STRAP(0x15c); STRAP(0x15d); STRAP(0x15e); STRAP(0x15f) 612 STRAP(0x158); STRAP(0x159); STRAP(0x15a); STRAP(0x15b); STRAP(0x15c); STRAP(0x15d); STRAP(0x15e); STRAP(0x15f)
613 STRAP(0x160); STRAP(0x161); STRAP(0x162); STRAP(0x163); STRAP(0x164); STRAP(0x165); STRAP(0x166); STRAP(0x167) 613 STRAP(0x160); STRAP(0x161); STRAP(0x162); STRAP(0x163); STRAP(0x164); STRAP(0x165); STRAP(0x166); STRAP(0x167)
614 STRAP(0x168); STRAP(0x169); STRAP(0x16a); STRAP(0x16b); STRAP(0x16c); STRAP(0x16d); STRAP(0x16e); STRAP(0x16f) 614 STRAP(0x168); STRAP(0x169); STRAP(0x16a); STRAP(0x16b); STRAP(0x16c); STRAP(0x16d); STRAP(0x16e); STRAP(0x16f)
615 STRAP(0x170); STRAP(0x171); STRAP(0x172); STRAP(0x173); STRAP(0x174); STRAP(0x175); STRAP(0x176); STRAP(0x177) 615 STRAP(0x170); STRAP(0x171); STRAP(0x172); STRAP(0x173); STRAP(0x174); STRAP(0x175); STRAP(0x176); STRAP(0x177)
616 STRAP(0x178); STRAP(0x179); STRAP(0x17a); STRAP(0x17b); STRAP(0x17c); STRAP(0x17d); STRAP(0x17e); STRAP(0x17f) 616 STRAP(0x178); STRAP(0x179); STRAP(0x17a); STRAP(0x17b); STRAP(0x17c); STRAP(0x17d); STRAP(0x17e); STRAP(0x17f)
617 ! Traps beyond 0x17f are reserved 617 ! Traps beyond 0x17f are reserved
618 UTRAP(0x180); UTRAP(0x181); UTRAP(0x182); UTRAP(0x183); UTRAP(0x184); UTRAP(0x185); UTRAP(0x186); UTRAP(0x187) 618 UTRAP(0x180); UTRAP(0x181); UTRAP(0x182); UTRAP(0x183); UTRAP(0x184); UTRAP(0x185); UTRAP(0x186); UTRAP(0x187)
619 UTRAP(0x188); UTRAP(0x189); UTRAP(0x18a); UTRAP(0x18b); UTRAP(0x18c); UTRAP(0x18d); UTRAP(0x18e); UTRAP(0x18f) 619 UTRAP(0x188); UTRAP(0x189); UTRAP(0x18a); UTRAP(0x18b); UTRAP(0x18c); UTRAP(0x18d); UTRAP(0x18e); UTRAP(0x18f)
620 UTRAP(0x190); UTRAP(0x191); UTRAP(0x192); UTRAP(0x193); UTRAP(0x194); UTRAP(0x195); UTRAP(0x196); UTRAP(0x197) 620 UTRAP(0x190); UTRAP(0x191); UTRAP(0x192); UTRAP(0x193); UTRAP(0x194); UTRAP(0x195); UTRAP(0x196); UTRAP(0x197)
621 UTRAP(0x198); UTRAP(0x199); UTRAP(0x19a); UTRAP(0x19b); UTRAP(0x19c); UTRAP(0x19d); UTRAP(0x19e); UTRAP(0x19f) 621 UTRAP(0x198); UTRAP(0x199); UTRAP(0x19a); UTRAP(0x19b); UTRAP(0x19c); UTRAP(0x19d); UTRAP(0x19e); UTRAP(0x19f)
622 UTRAP(0x1a0); UTRAP(0x1a1); UTRAP(0x1a2); UTRAP(0x1a3); UTRAP(0x1a4); UTRAP(0x1a5); UTRAP(0x1a6); UTRAP(0x1a7) 622 UTRAP(0x1a0); UTRAP(0x1a1); UTRAP(0x1a2); UTRAP(0x1a3); UTRAP(0x1a4); UTRAP(0x1a5); UTRAP(0x1a6); UTRAP(0x1a7)
623 UTRAP(0x1a8); UTRAP(0x1a9); UTRAP(0x1aa); UTRAP(0x1ab); UTRAP(0x1ac); UTRAP(0x1ad); UTRAP(0x1ae); UTRAP(0x1af) 623 UTRAP(0x1a8); UTRAP(0x1a9); UTRAP(0x1aa); UTRAP(0x1ab); UTRAP(0x1ac); UTRAP(0x1ad); UTRAP(0x1ae); UTRAP(0x1af)
624 UTRAP(0x1b0); UTRAP(0x1b1); UTRAP(0x1b2); UTRAP(0x1b3); UTRAP(0x1b4); UTRAP(0x1b5); UTRAP(0x1b6); UTRAP(0x1b7) 624 UTRAP(0x1b0); UTRAP(0x1b1); UTRAP(0x1b2); UTRAP(0x1b3); UTRAP(0x1b4); UTRAP(0x1b5); UTRAP(0x1b6); UTRAP(0x1b7)
625 UTRAP(0x1b8); UTRAP(0x1b9); UTRAP(0x1ba); UTRAP(0x1bb); UTRAP(0x1bc); UTRAP(0x1bd); UTRAP(0x1be); UTRAP(0x1bf) 625 UTRAP(0x1b8); UTRAP(0x1b9); UTRAP(0x1ba); UTRAP(0x1bb); UTRAP(0x1bc); UTRAP(0x1bd); UTRAP(0x1be); UTRAP(0x1bf)
626 UTRAP(0x1c0); UTRAP(0x1c1); UTRAP(0x1c2); UTRAP(0x1c3); UTRAP(0x1c4); UTRAP(0x1c5); UTRAP(0x1c6); UTRAP(0x1c7) 626 UTRAP(0x1c0); UTRAP(0x1c1); UTRAP(0x1c2); UTRAP(0x1c3); UTRAP(0x1c4); UTRAP(0x1c5); UTRAP(0x1c6); UTRAP(0x1c7)
627 UTRAP(0x1c8); UTRAP(0x1c9); UTRAP(0x1ca); UTRAP(0x1cb); UTRAP(0x1cc); UTRAP(0x1cd); UTRAP(0x1ce); UTRAP(0x1cf) 627 UTRAP(0x1c8); UTRAP(0x1c9); UTRAP(0x1ca); UTRAP(0x1cb); UTRAP(0x1cc); UTRAP(0x1cd); UTRAP(0x1ce); UTRAP(0x1cf)
628 UTRAP(0x1d0); UTRAP(0x1d1); UTRAP(0x1d2); UTRAP(0x1d3); UTRAP(0x1d4); UTRAP(0x1d5); UTRAP(0x1d6); UTRAP(0x1d7) 628 UTRAP(0x1d0); UTRAP(0x1d1); UTRAP(0x1d2); UTRAP(0x1d3); UTRAP(0x1d4); UTRAP(0x1d5); UTRAP(0x1d6); UTRAP(0x1d7)
629 UTRAP(0x1d8); UTRAP(0x1d9); UTRAP(0x1da); UTRAP(0x1db); UTRAP(0x1dc); UTRAP(0x1dd); UTRAP(0x1de); UTRAP(0x1df) 629 UTRAP(0x1d8); UTRAP(0x1d9); UTRAP(0x1da); UTRAP(0x1db); UTRAP(0x1dc); UTRAP(0x1dd); UTRAP(0x1de); UTRAP(0x1df)
630 UTRAP(0x1e0); UTRAP(0x1e1); UTRAP(0x1e2); UTRAP(0x1e3); UTRAP(0x1e4); UTRAP(0x1e5); UTRAP(0x1e6); UTRAP(0x1e7) 630 UTRAP(0x1e0); UTRAP(0x1e1); UTRAP(0x1e2); UTRAP(0x1e3); UTRAP(0x1e4); UTRAP(0x1e5); UTRAP(0x1e6); UTRAP(0x1e7)
631 UTRAP(0x1e8); UTRAP(0x1e9); UTRAP(0x1ea); UTRAP(0x1eb); UTRAP(0x1ec); UTRAP(0x1ed); UTRAP(0x1ee); UTRAP(0x1ef) 631 UTRAP(0x1e8); UTRAP(0x1e9); UTRAP(0x1ea); UTRAP(0x1eb); UTRAP(0x1ec); UTRAP(0x1ed); UTRAP(0x1ee); UTRAP(0x1ef)
632 UTRAP(0x1f0); UTRAP(0x1f1); UTRAP(0x1f2); UTRAP(0x1f3); UTRAP(0x1f4); UTRAP(0x1f5); UTRAP(0x1f6); UTRAP(0x1f7) 632 UTRAP(0x1f0); UTRAP(0x1f1); UTRAP(0x1f2); UTRAP(0x1f3); UTRAP(0x1f4); UTRAP(0x1f5); UTRAP(0x1f6); UTRAP(0x1f7)
633 UTRAP(0x1f8); UTRAP(0x1f9); UTRAP(0x1fa); UTRAP(0x1fb); UTRAP(0x1fc); UTRAP(0x1fd); UTRAP(0x1fe); UTRAP(0x1ff) 633 UTRAP(0x1f8); UTRAP(0x1f9); UTRAP(0x1fa); UTRAP(0x1fb); UTRAP(0x1fc); UTRAP(0x1fd); UTRAP(0x1fe); UTRAP(0x1ff)
634 634
635 /* Traps from TL>0 -- traps from supervisor mode */ 635 /* Traps from TL>0 -- traps from supervisor mode */
636#undef TABLE 636#undef TABLE
637#ifdef __STDC__ 637#ifdef __STDC__
638#define TABLE(name) nucleus_ ## name 638#define TABLE(name) nucleus_ ## name
639#else 639#else
640#define TABLE(name) nucleus_/**/name 640#define TABLE(name) nucleus_/**/name
641#endif 641#endif
642trapbase_priv: 642trapbase_priv:
643 UTRAP(0x000) ! 000 = reserved -- Use it to boot 643 UTRAP(0x000) ! 000 = reserved -- Use it to boot
644 /* We should not get the next 5 traps */ 644 /* We should not get the next 5 traps */
645 UTRAP(0x001) ! 001 = POR Reset -- ROM should get this 645 UTRAP(0x001) ! 001 = POR Reset -- ROM should get this
646 UTRAP(0x002) ! 002 = WDR Watchdog -- ROM should get this 646 UTRAP(0x002) ! 002 = WDR Watchdog -- ROM should get this
647 UTRAP(0x003) ! 003 = XIR -- ROM should get this 647 UTRAP(0x003) ! 003 = XIR -- ROM should get this
648 UTRAP(0x004) ! 004 = SIR -- ROM should get this 648 UTRAP(0x004) ! 004 = SIR -- ROM should get this
649 UTRAP(0x005) ! 005 = RED state exception 649 UTRAP(0x005) ! 005 = RED state exception
650 UTRAP(0x006); UTRAP(0x007) 650 UTRAP(0x006); UTRAP(0x007)
651ktextfault: 651ktextfault:
652 VTRAP(T_INST_EXCEPT, textfault) ! 008 = instr. access except 652 VTRAP(T_INST_EXCEPT, textfault) ! 008 = instr. access except
653 VTRAP(T_TEXTFAULT, textfault) ! 009 = instr access MMU miss -- no MMU 653 VTRAP(T_TEXTFAULT, textfault) ! 009 = instr access MMU miss -- no MMU
654 VTRAP(T_INST_ERROR, textfault) ! 00a = instr. access err 654 VTRAP(T_INST_ERROR, textfault) ! 00a = instr. access err
655 UTRAP(0x00b); UTRAP(0x00c); UTRAP(0x00d); UTRAP(0x00e); UTRAP(0x00f) 655 UTRAP(0x00b); UTRAP(0x00c); UTRAP(0x00d); UTRAP(0x00e); UTRAP(0x00f)
656 TRAP(T_ILLINST) ! 010 = illegal instruction 656 TRAP(T_ILLINST) ! 010 = illegal instruction
657 TRAP(T_PRIVINST) ! 011 = privileged instruction 657 TRAP(T_PRIVINST) ! 011 = privileged instruction
658 UTRAP(0x012) ! 012 = unimplemented LDD 658 UTRAP(0x012) ! 012 = unimplemented LDD
659 UTRAP(0x013) ! 013 = unimplemented STD 659 UTRAP(0x013) ! 013 = unimplemented STD
660 UTRAP(0x014); UTRAP(0x015); UTRAP(0x016); UTRAP(0x017); UTRAP(0x018) 660 UTRAP(0x014); UTRAP(0x015); UTRAP(0x016); UTRAP(0x017); UTRAP(0x018)
661 UTRAP(0x019); UTRAP(0x01a); UTRAP(0x01b); UTRAP(0x01c); UTRAP(0x01d) 661 UTRAP(0x019); UTRAP(0x01a); UTRAP(0x01b); UTRAP(0x01c); UTRAP(0x01d)
662 UTRAP(0x01e); UTRAP(0x01f) 662 UTRAP(0x01e); UTRAP(0x01f)
663 TRAP(T_FPDISABLED) ! 020 = fp instr, but EF bit off in psr 663 TRAP(T_FPDISABLED) ! 020 = fp instr, but EF bit off in psr
664 TRAP(T_FP_IEEE_754) ! 021 = ieee 754 exception 664 TRAP(T_FP_IEEE_754) ! 021 = ieee 754 exception
665 TRAP(T_FP_OTHER) ! 022 = other fp exception 665 TRAP(T_FP_OTHER) ! 022 = other fp exception
666 TRAP(T_TAGOF) ! 023 = tag overflow 666 TRAP(T_TAGOF) ! 023 = tag overflow
667 clr %l0 667 clr %l0
668#ifdef DEBUG 668#ifdef DEBUG
669 set 0xbadbeef, %l0 ! DEBUG 669 set 0xbadbeef, %l0 ! DEBUG
670#endif 670#endif
671 mov %l0, %l1; mov %l0, %l2 ! 024-027 = clean window trap 671 mov %l0, %l1; mov %l0, %l2 ! 024-027 = clean window trap
672 rdpr %cleanwin, %o7 ! This handler is in-lined and cannot fault 672 rdpr %cleanwin, %o7 ! This handler is in-lined and cannot fault
673 inc %o7; mov %l0, %l3 ! Nucleus (trap&IRQ) code does not need clean windows 673 inc %o7; mov %l0, %l3 ! Nucleus (trap&IRQ) code does not need clean windows
674 wrpr %g0, %o7, %cleanwin ! Clear out %l0-%l8 and %o0-%o8 and inc %cleanwin and done 674 wrpr %g0, %o7, %cleanwin ! Clear out %l0-%l8 and %o0-%o8 and inc %cleanwin and done
675#ifdef NOT_DEBUG 675#ifdef NOT_DEBUG
676 !! 676 !!
677 !! Check the sp redzone 677 !! Check the sp redzone
678 !! 678 !!
679 rdpr %wstate, t1 679 rdpr %wstate, t1
680 cmp t1, WSTATE_KERN 680 cmp t1, WSTATE_KERN
681 bne,pt icc, 7f 681 bne,pt icc, 7f
682 sethi %hi(_C_LABEL(redzone)), t1 682 sethi %hi(_C_LABEL(redzone)), t1
683 ldx [t1 + %lo(_C_LABEL(redzone))], t2 683 ldx [t1 + %lo(_C_LABEL(redzone))], t2
684 cmp %sp, t2 ! if sp >= t2, not in red zone 684 cmp %sp, t2 ! if sp >= t2, not in red zone
685 blu panic_red ! and can continue normally 685 blu panic_red ! and can continue normally
6867: 6867:
687#endif 687#endif
688 mov %l0, %l4; mov %l0, %l5; mov %l0, %l6; mov %l0, %l7 688 mov %l0, %l4; mov %l0, %l5; mov %l0, %l6; mov %l0, %l7
689 mov %l0, %o0; mov %l0, %o1; mov %l0, %o2; mov %l0, %o3 689 mov %l0, %o0; mov %l0, %o1; mov %l0, %o2; mov %l0, %o3
690 690
691 mov %l0, %o4; mov %l0, %o5; mov %l0, %o6; mov %l0, %o7 691 mov %l0, %o4; mov %l0, %o5; mov %l0, %o6; mov %l0, %o7
692 CLRTT 692 CLRTT
693 retry; nop; TA32 693 retry; nop; TA32
694 TRAP(T_DIV0) ! 028 = divide by zero 694 TRAP(T_DIV0) ! 028 = divide by zero
695 UTRAP(0x029) ! 029 = internal processor error 695 UTRAP(0x029) ! 029 = internal processor error
696 UTRAP(0x02a); UTRAP(0x02b); UTRAP(0x02c); UTRAP(0x02d); UTRAP(0x02e); UTRAP(0x02f) 696 UTRAP(0x02a); UTRAP(0x02b); UTRAP(0x02c); UTRAP(0x02d); UTRAP(0x02e); UTRAP(0x02f)
697kdatafault: 697kdatafault:
698 VTRAP(T_DATAFAULT, winfault) ! 030 = data fetch fault 698 VTRAP(T_DATAFAULT, winfault) ! 030 = data fetch fault
699 UTRAP(0x031) ! 031 = data MMU miss -- no MMU 699 UTRAP(0x031) ! 031 = data MMU miss -- no MMU
700 VTRAP(T_DATA_ERROR, winfault) ! 032 = data fetch fault 700 VTRAP(T_DATA_ERROR, winfault) ! 032 = data fetch fault
701 VTRAP(T_DATA_PROT, winfault) ! 033 = data fetch fault 701 VTRAP(T_DATA_PROT, winfault) ! 033 = data fetch fault
702 VTRAP(T_ALIGN, checkalign) ! 034 = address alignment error -- we could fix it inline... 702 VTRAP(T_ALIGN, checkalign) ! 034 = address alignment error -- we could fix it inline...
703 TRAP(T_LDDF_ALIGN) ! 035 = LDDF address alignment error -- we could fix it inline... 703 TRAP(T_LDDF_ALIGN) ! 035 = LDDF address alignment error -- we could fix it inline...
704 TRAP(T_STDF_ALIGN) ! 036 = STDF address alignment error -- we could fix it inline... 704 TRAP(T_STDF_ALIGN) ! 036 = STDF address alignment error -- we could fix it inline...
705 TRAP(T_PRIVACT) ! 037 = privileged action 705 TRAP(T_PRIVACT) ! 037 = privileged action
706 UTRAP(0x038); UTRAP(0x039); UTRAP(0x03a); UTRAP(0x03b); UTRAP(0x03c); 706 UTRAP(0x038); UTRAP(0x039); UTRAP(0x03a); UTRAP(0x03b); UTRAP(0x03c);
707 UTRAP(0x03d); UTRAP(0x03e); UTRAP(0x03f); 707 UTRAP(0x03d); UTRAP(0x03e); UTRAP(0x03f);
708 VTRAP(T_ASYNC_ERROR, winfault) ! 040 = data fetch fault 708 VTRAP(T_ASYNC_ERROR, winfault) ! 040 = data fetch fault
709 SOFTINT4U(1, IE_L1) ! 041 = level 1 interrupt 709 SOFTINT4U(1, IE_L1) ! 041 = level 1 interrupt
710 HARDINT4U(2) ! 042 = level 2 interrupt 710 HARDINT4U(2) ! 042 = level 2 interrupt
711 HARDINT4U(3) ! 043 = level 3 interrupt 711 HARDINT4U(3) ! 043 = level 3 interrupt
712 SOFTINT4U(4, IE_L4) ! 044 = level 4 interrupt 712 SOFTINT4U(4, IE_L4) ! 044 = level 4 interrupt
713 HARDINT4U(5) ! 045 = level 5 interrupt 713 HARDINT4U(5) ! 045 = level 5 interrupt
714 SOFTINT4U(6, IE_L6) ! 046 = level 6 interrupt 714 SOFTINT4U(6, IE_L6) ! 046 = level 6 interrupt
715 HARDINT4U(7) ! 047 = level 7 interrupt 715 HARDINT4U(7) ! 047 = level 7 interrupt
716 HARDINT4U(8) ! 048 = level 8 interrupt 716 HARDINT4U(8) ! 048 = level 8 interrupt
717 HARDINT4U(9) ! 049 = level 9 interrupt 717 HARDINT4U(9) ! 049 = level 9 interrupt
718 HARDINT4U(10) ! 04a = level 10 interrupt 718 HARDINT4U(10) ! 04a = level 10 interrupt
719 HARDINT4U(11) ! 04b = level 11 interrupt 719 HARDINT4U(11) ! 04b = level 11 interrupt
720 ZS_INTERRUPT4U ! 04c = level 12 (zs) interrupt 720 ZS_INTERRUPT4U ! 04c = level 12 (zs) interrupt
721 HARDINT4U(13) ! 04d = level 13 interrupt 721 HARDINT4U(13) ! 04d = level 13 interrupt
722 HARDINT4U(14) ! 04e = level 14 interrupt 722 HARDINT4U(14) ! 04e = level 14 interrupt
723 HARDINT4U(15) ! 04f = nonmaskable interrupt 723 HARDINT4U(15) ! 04f = nonmaskable interrupt
724 UTRAP(0x050); UTRAP(0x051); UTRAP(0x052); UTRAP(0x053); UTRAP(0x054); UTRAP(0x055) 724 UTRAP(0x050); UTRAP(0x051); UTRAP(0x052); UTRAP(0x053); UTRAP(0x054); UTRAP(0x055)
725 UTRAP(0x056); UTRAP(0x057); UTRAP(0x058); UTRAP(0x059); UTRAP(0x05a); UTRAP(0x05b) 725 UTRAP(0x056); UTRAP(0x057); UTRAP(0x058); UTRAP(0x059); UTRAP(0x05a); UTRAP(0x05b)
726 UTRAP(0x05c); UTRAP(0x05d); UTRAP(0x05e); UTRAP(0x05f) 726 UTRAP(0x05c); UTRAP(0x05d); UTRAP(0x05e); UTRAP(0x05f)
727 VTRAP(0x060, interrupt_vector); ! 060 = interrupt vector 727 VTRAP(0x060, interrupt_vector); ! 060 = interrupt vector
728 TRAP(T_PA_WATCHPT) ! 061 = physical address data watchpoint 728 TRAP(T_PA_WATCHPT) ! 061 = physical address data watchpoint
729 TRAP(T_VA_WATCHPT) ! 062 = virtual address data watchpoint 729 TRAP(T_VA_WATCHPT) ! 062 = virtual address data watchpoint
730 TRAP(T_ECCERR) ! 063 = corrected ECC error 730 TRAP(T_ECCERR) ! 063 = corrected ECC error
731kfast_IMMU_miss: ! 064 = fast instr access MMU miss 731kfast_IMMU_miss: ! 064 = fast instr access MMU miss
732 ldxa [%g0] ASI_IMMU_8KPTR, %g2 ! Load IMMU 8K TSB pointer 732 ldxa [%g0] ASI_IMMU_8KPTR, %g2 ! Load IMMU 8K TSB pointer
733#ifdef NO_TSB 733#ifdef NO_TSB
734 ba,a %icc, instr_miss 734 ba,a %icc, instr_miss
735#endif 735#endif
736 ldxa [%g0] ASI_IMMU, %g1 ! Load IMMU tag target register 736 ldxa [%g0] ASI_IMMU, %g1 ! Load IMMU tag target register
737 ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 ! Load TSB tag:data into %g4:%g5 737 ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 ! Load TSB tag:data into %g4:%g5
738 brgez,pn %g5, instr_miss ! Entry invalid? Punt 738 brgez,pn %g5, instr_miss ! Entry invalid? Punt
739 cmp %g1, %g4 ! Compare TLB tags 739 cmp %g1, %g4 ! Compare TLB tags
740 bne,pn %xcc, instr_miss ! Got right tag? 740 bne,pn %xcc, instr_miss ! Got right tag?
741 nop 741 nop
742 CLRTT 742 CLRTT
743 stxa %g5, [%g0] ASI_IMMU_DATA_IN ! Enter new mapping 743 stxa %g5, [%g0] ASI_IMMU_DATA_IN ! Enter new mapping
744 retry ! Try new mapping 744 retry ! Try new mapping
7451: 7451:
746 sir 746 sir
747 TA32 747 TA32
748kfast_DMMU_miss: ! 068 = fast data access MMU miss 748kfast_DMMU_miss: ! 068 = fast data access MMU miss
749 ldxa [%g0] ASI_DMMU_8KPTR, %g2! Load DMMU 8K TSB pointer 749 ldxa [%g0] ASI_DMMU_8KPTR, %g2! Load DMMU 8K TSB pointer
750#ifdef NO_TSB 750#ifdef NO_TSB
751 ba,a %icc, data_miss 751 ba,a %icc, data_miss
752#endif 752#endif
753 ldxa [%g0] ASI_DMMU, %g1 ! Load DMMU tag target register 753 ldxa [%g0] ASI_DMMU, %g1 ! Load DMMU tag target register
754 ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 ! Load TSB tag and data into %g4 and %g5 754 ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 ! Load TSB tag and data into %g4 and %g5
755 brgez,pn %g5, data_miss ! Entry invalid? Punt 755 brgez,pn %g5, data_miss ! Entry invalid? Punt
756 cmp %g1, %g4 ! Compare TLB tags 756 cmp %g1, %g4 ! Compare TLB tags
757 bnz,pn %xcc, data_miss ! Got right tag? 757 bnz,pn %xcc, data_miss ! Got right tag?
758 nop 758 nop
759 CLRTT 759 CLRTT
760#ifdef TRAPSTATS 760#ifdef TRAPSTATS
761 sethi %hi(_C_LABEL(kdhit)), %g1 761 sethi %hi(_C_LABEL(kdhit)), %g1
762 lduw [%g1+%lo(_C_LABEL(kdhit))], %g2 762 lduw [%g1+%lo(_C_LABEL(kdhit))], %g2
763 inc %g2 763 inc %g2
764 stw %g2, [%g1+%lo(_C_LABEL(kdhit))] 764 stw %g2, [%g1+%lo(_C_LABEL(kdhit))]
765#endif 765#endif
766 stxa %g5, [%g0] ASI_DMMU_DATA_IN ! Enter new mapping 766 stxa %g5, [%g0] ASI_DMMU_DATA_IN ! Enter new mapping
767 retry ! Try new mapping 767 retry ! Try new mapping
7681: 7681:
769 sir 769 sir
770 TA32 770 TA32
771kfast_DMMU_protection: ! 06c = fast data access MMU protection 771kfast_DMMU_protection: ! 06c = fast data access MMU protection
772#ifdef TRAPSTATS 772#ifdef TRAPSTATS
773 sethi %hi(_C_LABEL(kdprot)), %g1 773 sethi %hi(_C_LABEL(kdprot)), %g1
774 lduw [%g1+%lo(_C_LABEL(kdprot))], %g2 774 lduw [%g1+%lo(_C_LABEL(kdprot))], %g2
775 inc %g2 775 inc %g2
776 stw %g2, [%g1+%lo(_C_LABEL(kdprot))] 776 stw %g2, [%g1+%lo(_C_LABEL(kdprot))]
777#endif 777#endif
778#ifdef HWREF 778#ifdef HWREF
779 ba,a,pt %xcc, dmmu_write_fault 779 ba,a,pt %xcc, dmmu_write_fault
780#else 780#else
781 ba,a,pt %xcc, winfault 781 ba,a,pt %xcc, winfault
782#endif 782#endif
783 nop 783 nop
784 TA32 784 TA32
785 UTRAP(0x070) ! Implementation dependent traps 785 UTRAP(0x070) ! Implementation dependent traps
786 UTRAP(0x071); UTRAP(0x072); UTRAP(0x073); UTRAP(0x074); UTRAP(0x075); UTRAP(0x076) 786 UTRAP(0x071); UTRAP(0x072); UTRAP(0x073); UTRAP(0x074); UTRAP(0x075); UTRAP(0x076)
787 UTRAP(0x077); UTRAP(0x078); UTRAP(0x079); UTRAP(0x07a); UTRAP(0x07b); UTRAP(0x07c) 787 UTRAP(0x077); UTRAP(0x078); UTRAP(0x079); UTRAP(0x07a); UTRAP(0x07b); UTRAP(0x07c)
788 UTRAP(0x07d); UTRAP(0x07e); UTRAP(0x07f) 788 UTRAP(0x07d); UTRAP(0x07e); UTRAP(0x07f)
789TABLE(uspill): 789TABLE(uspill):
790 SPILL64(1,ASI_AIUS) ! 0x080 spill_0_normal -- used to save user windows 790 SPILL64(1,ASI_AIUS) ! 0x080 spill_0_normal -- used to save user windows
791 SPILL32(2,ASI_AIUS) ! 0x084 spill_1_normal 791 SPILL32(2,ASI_AIUS) ! 0x084 spill_1_normal
792 SPILLBOTH(1b,2b,ASI_AIUS) ! 0x088 spill_2_normal 792 SPILLBOTH(1b,2b,ASI_AIUS) ! 0x088 spill_2_normal
793 UTRAP(0x08c); TA32 ! 0x08c spill_3_normal 793 UTRAP(0x08c); TA32 ! 0x08c spill_3_normal
794TABLE(kspill): 794TABLE(kspill):
795 SPILL64(1,ASI_N) ! 0x090 spill_4_normal -- used to save supervisor windows 795 SPILL64(1,ASI_N) ! 0x090 spill_4_normal -- used to save supervisor windows
796 SPILL32(2,ASI_N) ! 0x094 spill_5_normal 796 SPILL32(2,ASI_N) ! 0x094 spill_5_normal
797 SPILLBOTH(1b,2b,ASI_N) ! 0x098 spill_6_normal 797 SPILLBOTH(1b,2b,ASI_N) ! 0x098 spill_6_normal
798 UTRAP(0x09c); TA32 ! 0x09c spill_7_normal 798 UTRAP(0x09c); TA32 ! 0x09c spill_7_normal
799TABLE(uspillk): 799TABLE(uspillk):
800 SPILL64(1,ASI_AIUS) ! 0x0a0 spill_0_other -- used to save user windows in nucleus mode 800 SPILL64(1,ASI_AIUS) ! 0x0a0 spill_0_other -- used to save user windows in nucleus mode
801 SPILL32(2,ASI_AIUS) ! 0x0a4 spill_1_other 801 SPILL32(2,ASI_AIUS) ! 0x0a4 spill_1_other
802 SPILLBOTH(1b,2b,ASI_AIUS) ! 0x0a8 spill_2_other 802 SPILLBOTH(1b,2b,ASI_AIUS) ! 0x0a8 spill_2_other
803 UTRAP(0x0ac); TA32 ! 0x0ac spill_3_other 803 UTRAP(0x0ac); TA32 ! 0x0ac spill_3_other
804 UTRAP(0x0b0); TA32 ! 0x0b0 spill_4_other 804 UTRAP(0x0b0); TA32 ! 0x0b0 spill_4_other
805 UTRAP(0x0b4); TA32 ! 0x0b4 spill_5_other 805 UTRAP(0x0b4); TA32 ! 0x0b4 spill_5_other
806 UTRAP(0x0b8); TA32 ! 0x0b8 spill_6_other 806 UTRAP(0x0b8); TA32 ! 0x0b8 spill_6_other
807 UTRAP(0x0bc); TA32 ! 0x0bc spill_7_other 807 UTRAP(0x0bc); TA32 ! 0x0bc spill_7_other
808TABLE(ufill): 808TABLE(ufill):
809 FILL64(nufill8,ASI_AIUS) ! 0x0c0 fill_0_normal -- used to fill windows when running nucleus mode from user 809 FILL64(nufill8,ASI_AIUS) ! 0x0c0 fill_0_normal -- used to fill windows when running nucleus mode from user
810 FILL32(nufill4,ASI_AIUS) ! 0x0c4 fill_1_normal 810 FILL32(nufill4,ASI_AIUS) ! 0x0c4 fill_1_normal
811 FILLBOTH(nufill8,nufill4,ASI_AIUS) ! 0x0c8 fill_2_normal 811 FILLBOTH(nufill8,nufill4,ASI_AIUS) ! 0x0c8 fill_2_normal
812 UTRAP(0x0cc); TA32 ! 0x0cc fill_3_normal 812 UTRAP(0x0cc); TA32 ! 0x0cc fill_3_normal
813TABLE(sfill): 813TABLE(sfill):
814 FILL64(sfill8,ASI_N) ! 0x0d0 fill_4_normal -- used to fill windows when running nucleus mode from supervisor 814 FILL64(sfill8,ASI_N) ! 0x0d0 fill_4_normal -- used to fill windows when running nucleus mode from supervisor
815 FILL32(sfill4,ASI_N) ! 0x0d4 fill_5_normal 815 FILL32(sfill4,ASI_N) ! 0x0d4 fill_5_normal
816 FILLBOTH(sfill8,sfill4,ASI_N) ! 0x0d8 fill_6_normal 816 FILLBOTH(sfill8,sfill4,ASI_N) ! 0x0d8 fill_6_normal
817 UTRAP(0x0dc); TA32 ! 0x0dc fill_7_normal 817 UTRAP(0x0dc); TA32 ! 0x0dc fill_7_normal
818TABLE(kfill): 818TABLE(kfill):
819 FILL64(nkfill8,ASI_AIUS) ! 0x0e0 fill_0_other -- used to fill user windows when running nucleus mode -- will we ever use this? 819 FILL64(nkfill8,ASI_AIUS) ! 0x0e0 fill_0_other -- used to fill user windows when running nucleus mode -- will we ever use this?
820 FILL32(nkfill4,ASI_AIUS) ! 0x0e4 fill_1_other 820 FILL32(nkfill4,ASI_AIUS) ! 0x0e4 fill_1_other
821 FILLBOTH(nkfill8,nkfill4,ASI_AIUS)! 0x0e8 fill_2_other 821 FILLBOTH(nkfill8,nkfill4,ASI_AIUS)! 0x0e8 fill_2_other
822 UTRAP(0x0ec); TA32 ! 0x0ec fill_3_other 822 UTRAP(0x0ec); TA32 ! 0x0ec fill_3_other
823 UTRAP(0x0f0); TA32 ! 0x0f0 fill_4_other 823 UTRAP(0x0f0); TA32 ! 0x0f0 fill_4_other
824 UTRAP(0x0f4); TA32 ! 0x0f4 fill_5_other 824 UTRAP(0x0f4); TA32 ! 0x0f4 fill_5_other
825 UTRAP(0x0f8); TA32 ! 0x0f8 fill_6_other 825 UTRAP(0x0f8); TA32 ! 0x0f8 fill_6_other
826 UTRAP(0x0fc); TA32 ! 0x0fc fill_7_other 826 UTRAP(0x0fc); TA32 ! 0x0fc fill_7_other
827TABLE(syscall): 827TABLE(syscall):
828 SYSCALL ! 0x100 = sun syscall 828 SYSCALL ! 0x100 = sun syscall
829 BPT ! 0x101 = pseudo breakpoint instruction 829 BPT ! 0x101 = pseudo breakpoint instruction
830 STRAP(0x102); STRAP(0x103); STRAP(0x104); STRAP(0x105); STRAP(0x106); STRAP(0x107) 830 STRAP(0x102); STRAP(0x103); STRAP(0x104); STRAP(0x105); STRAP(0x106); STRAP(0x107)
831 SYSCALL ! 0x108 = svr4 syscall 831 SYSCALL ! 0x108 = svr4 syscall
832 SYSCALL ! 0x109 = bsd syscall 832 SYSCALL ! 0x109 = bsd syscall
833 BPT_KGDB_EXEC ! 0x10a = enter kernel gdb on kernel startup 833 BPT_KGDB_EXEC ! 0x10a = enter kernel gdb on kernel startup
834 STRAP(0x10b); STRAP(0x10c); STRAP(0x10d); STRAP(0x10e); STRAP(0x10f); 834 STRAP(0x10b); STRAP(0x10c); STRAP(0x10d); STRAP(0x10e); STRAP(0x10f);
835 STRAP(0x110); STRAP(0x111); STRAP(0x112); STRAP(0x113); STRAP(0x114); STRAP(0x115); STRAP(0x116); STRAP(0x117) 835 STRAP(0x110); STRAP(0x111); STRAP(0x112); STRAP(0x113); STRAP(0x114); STRAP(0x115); STRAP(0x116); STRAP(0x117)
836 STRAP(0x118); STRAP(0x119); STRAP(0x11a); STRAP(0x11b); STRAP(0x11c); STRAP(0x11d); STRAP(0x11e); STRAP(0x11f) 836 STRAP(0x118); STRAP(0x119); STRAP(0x11a); STRAP(0x11b); STRAP(0x11c); STRAP(0x11d); STRAP(0x11e); STRAP(0x11f)
837 STRAP(0x120); STRAP(0x121); STRAP(0x122); STRAP(0x123); STRAP(0x124); STRAP(0x125); STRAP(0x126); STRAP(0x127) 837 STRAP(0x120); STRAP(0x121); STRAP(0x122); STRAP(0x123); STRAP(0x124); STRAP(0x125); STRAP(0x126); STRAP(0x127)
838 STRAP(0x128); STRAP(0x129); STRAP(0x12a); STRAP(0x12b); STRAP(0x12c); STRAP(0x12d); STRAP(0x12e); STRAP(0x12f) 838 STRAP(0x128); STRAP(0x129); STRAP(0x12a); STRAP(0x12b); STRAP(0x12c); STRAP(0x12d); STRAP(0x12e); STRAP(0x12f)
839 STRAP(0x130); STRAP(0x131); STRAP(0x132); STRAP(0x133); STRAP(0x134); STRAP(0x135); STRAP(0x136); STRAP(0x137) 839 STRAP(0x130); STRAP(0x131); STRAP(0x132); STRAP(0x133); STRAP(0x134); STRAP(0x135); STRAP(0x136); STRAP(0x137)
840 STRAP(0x138); STRAP(0x139); STRAP(0x13a); STRAP(0x13b); STRAP(0x13c); STRAP(0x13d); STRAP(0x13e); STRAP(0x13f) 840 STRAP(0x138); STRAP(0x139); STRAP(0x13a); STRAP(0x13b); STRAP(0x13c); STRAP(0x13d); STRAP(0x13e); STRAP(0x13f)
841 STRAP(0x140); STRAP(0x141); STRAP(0x142); STRAP(0x143); STRAP(0x144); STRAP(0x145); STRAP(0x146); STRAP(0x147) 841 STRAP(0x140); STRAP(0x141); STRAP(0x142); STRAP(0x143); STRAP(0x144); STRAP(0x145); STRAP(0x146); STRAP(0x147)
842 STRAP(0x148); STRAP(0x149); STRAP(0x14a); STRAP(0x14b); STRAP(0x14c); STRAP(0x14d); STRAP(0x14e); STRAP(0x14f) 842 STRAP(0x148); STRAP(0x149); STRAP(0x14a); STRAP(0x14b); STRAP(0x14c); STRAP(0x14d); STRAP(0x14e); STRAP(0x14f)
843 STRAP(0x150); STRAP(0x151); STRAP(0x152); STRAP(0x153); STRAP(0x154); STRAP(0x155); STRAP(0x156); STRAP(0x157) 843 STRAP(0x150); STRAP(0x151); STRAP(0x152); STRAP(0x153); STRAP(0x154); STRAP(0x155); STRAP(0x156); STRAP(0x157)
844 STRAP(0x158); STRAP(0x159); STRAP(0x15a); STRAP(0x15b); STRAP(0x15c); STRAP(0x15d); STRAP(0x15e); STRAP(0x15f) 844 STRAP(0x158); STRAP(0x159); STRAP(0x15a); STRAP(0x15b); STRAP(0x15c); STRAP(0x15d); STRAP(0x15e); STRAP(0x15f)
845 STRAP(0x160); STRAP(0x161); STRAP(0x162); STRAP(0x163); STRAP(0x164); STRAP(0x165); STRAP(0x166); STRAP(0x167) 845 STRAP(0x160); STRAP(0x161); STRAP(0x162); STRAP(0x163); STRAP(0x164); STRAP(0x165); STRAP(0x166); STRAP(0x167)
846 STRAP(0x168); STRAP(0x169); STRAP(0x16a); STRAP(0x16b); STRAP(0x16c); STRAP(0x16d); STRAP(0x16e); STRAP(0x16f) 846 STRAP(0x168); STRAP(0x169); STRAP(0x16a); STRAP(0x16b); STRAP(0x16c); STRAP(0x16d); STRAP(0x16e); STRAP(0x16f)
847 STRAP(0x170); STRAP(0x171); STRAP(0x172); STRAP(0x173); STRAP(0x174); STRAP(0x175); STRAP(0x176); STRAP(0x177) 847 STRAP(0x170); STRAP(0x171); STRAP(0x172); STRAP(0x173); STRAP(0x174); STRAP(0x175); STRAP(0x176); STRAP(0x177)
848 STRAP(0x178); STRAP(0x179); STRAP(0x17a); STRAP(0x17b); STRAP(0x17c); STRAP(0x17d); STRAP(0x17e); STRAP(0x17f) 848 STRAP(0x178); STRAP(0x179); STRAP(0x17a); STRAP(0x17b); STRAP(0x17c); STRAP(0x17d); STRAP(0x17e); STRAP(0x17f)
849 ! Traps beyond 0x17f are reserved 849 ! Traps beyond 0x17f are reserved
850 UTRAP(0x180); UTRAP(0x181); UTRAP(0x182); UTRAP(0x183); UTRAP(0x184); UTRAP(0x185); UTRAP(0x186); UTRAP(0x187) 850 UTRAP(0x180); UTRAP(0x181); UTRAP(0x182); UTRAP(0x183); UTRAP(0x184); UTRAP(0x185); UTRAP(0x186); UTRAP(0x187)
851 UTRAP(0x188); UTRAP(0x189); UTRAP(0x18a); UTRAP(0x18b); UTRAP(0x18c); UTRAP(0x18d); UTRAP(0x18e); UTRAP(0x18f) 851 UTRAP(0x188); UTRAP(0x189); UTRAP(0x18a); UTRAP(0x18b); UTRAP(0x18c); UTRAP(0x18d); UTRAP(0x18e); UTRAP(0x18f)
852 UTRAP(0x190); UTRAP(0x191); UTRAP(0x192); UTRAP(0x193); UTRAP(0x194); UTRAP(0x195); UTRAP(0x196); UTRAP(0x197) 852 UTRAP(0x190); UTRAP(0x191); UTRAP(0x192); UTRAP(0x193); UTRAP(0x194); UTRAP(0x195); UTRAP(0x196); UTRAP(0x197)
853 UTRAP(0x198); UTRAP(0x199); UTRAP(0x19a); UTRAP(0x19b); UTRAP(0x19c); UTRAP(0x19d); UTRAP(0x19e); UTRAP(0x19f) 853 UTRAP(0x198); UTRAP(0x199); UTRAP(0x19a); UTRAP(0x19b); UTRAP(0x19c); UTRAP(0x19d); UTRAP(0x19e); UTRAP(0x19f)
854 UTRAP(0x1a0); UTRAP(0x1a1); UTRAP(0x1a2); UTRAP(0x1a3); UTRAP(0x1a4); UTRAP(0x1a5); UTRAP(0x1a6); UTRAP(0x1a7) 854 UTRAP(0x1a0); UTRAP(0x1a1); UTRAP(0x1a2); UTRAP(0x1a3); UTRAP(0x1a4); UTRAP(0x1a5); UTRAP(0x1a6); UTRAP(0x1a7)
855 UTRAP(0x1a8); UTRAP(0x1a9); UTRAP(0x1aa); UTRAP(0x1ab); UTRAP(0x1ac); UTRAP(0x1ad); UTRAP(0x1ae); UTRAP(0x1af) 855 UTRAP(0x1a8); UTRAP(0x1a9); UTRAP(0x1aa); UTRAP(0x1ab); UTRAP(0x1ac); UTRAP(0x1ad); UTRAP(0x1ae); UTRAP(0x1af)
856 UTRAP(0x1b0); UTRAP(0x1b1); UTRAP(0x1b2); UTRAP(0x1b3); UTRAP(0x1b4); UTRAP(0x1b5); UTRAP(0x1b6); UTRAP(0x1b7) 856 UTRAP(0x1b0); UTRAP(0x1b1); UTRAP(0x1b2); UTRAP(0x1b3); UTRAP(0x1b4); UTRAP(0x1b5); UTRAP(0x1b6); UTRAP(0x1b7)
857 UTRAP(0x1b8); UTRAP(0x1b9); UTRAP(0x1ba); UTRAP(0x1bb); UTRAP(0x1bc); UTRAP(0x1bd); UTRAP(0x1be); UTRAP(0x1bf) 857 UTRAP(0x1b8); UTRAP(0x1b9); UTRAP(0x1ba); UTRAP(0x1bb); UTRAP(0x1bc); UTRAP(0x1bd); UTRAP(0x1be); UTRAP(0x1bf)
858 UTRAP(0x1c0); UTRAP(0x1c1); UTRAP(0x1c2); UTRAP(0x1c3); UTRAP(0x1c4); UTRAP(0x1c5); UTRAP(0x1c6); UTRAP(0x1c7) 858 UTRAP(0x1c0); UTRAP(0x1c1); UTRAP(0x1c2); UTRAP(0x1c3); UTRAP(0x1c4); UTRAP(0x1c5); UTRAP(0x1c6); UTRAP(0x1c7)
859 UTRAP(0x1c8); UTRAP(0x1c9); UTRAP(0x1ca); UTRAP(0x1cb); UTRAP(0x1cc); UTRAP(0x1cd); UTRAP(0x1ce); UTRAP(0x1cf) 859 UTRAP(0x1c8); UTRAP(0x1c9); UTRAP(0x1ca); UTRAP(0x1cb); UTRAP(0x1cc); UTRAP(0x1cd); UTRAP(0x1ce); UTRAP(0x1cf)
860 UTRAP(0x1d0); UTRAP(0x1d1); UTRAP(0x1d2); UTRAP(0x1d3); UTRAP(0x1d4); UTRAP(0x1d5); UTRAP(0x1d6); UTRAP(0x1d7) 860 UTRAP(0x1d0); UTRAP(0x1d1); UTRAP(0x1d2); UTRAP(0x1d3); UTRAP(0x1d4); UTRAP(0x1d5); UTRAP(0x1d6); UTRAP(0x1d7)
861 UTRAP(0x1d8); UTRAP(0x1d9); UTRAP(0x1da); UTRAP(0x1db); UTRAP(0x1dc); UTRAP(0x1dd); UTRAP(0x1de); UTRAP(0x1df) 861 UTRAP(0x1d8); UTRAP(0x1d9); UTRAP(0x1da); UTRAP(0x1db); UTRAP(0x1dc); UTRAP(0x1dd); UTRAP(0x1de); UTRAP(0x1df)
862 UTRAP(0x1e0); UTRAP(0x1e1); UTRAP(0x1e2); UTRAP(0x1e3); UTRAP(0x1e4); UTRAP(0x1e5); UTRAP(0x1e6); UTRAP(0x1e7) 862 UTRAP(0x1e0); UTRAP(0x1e1); UTRAP(0x1e2); UTRAP(0x1e3); UTRAP(0x1e4); UTRAP(0x1e5); UTRAP(0x1e6); UTRAP(0x1e7)
863 UTRAP(0x1e8); UTRAP(0x1e9); UTRAP(0x1ea); UTRAP(0x1eb); UTRAP(0x1ec); UTRAP(0x1ed); UTRAP(0x1ee); UTRAP(0x1ef) 863 UTRAP(0x1e8); UTRAP(0x1e9); UTRAP(0x1ea); UTRAP(0x1eb); UTRAP(0x1ec); UTRAP(0x1ed); UTRAP(0x1ee); UTRAP(0x1ef)
864 UTRAP(0x1f0); UTRAP(0x1f1); UTRAP(0x1f2); UTRAP(0x1f3); UTRAP(0x1f4); UTRAP(0x1f5); UTRAP(0x1f6); UTRAP(0x1f7) 864 UTRAP(0x1f0); UTRAP(0x1f1); UTRAP(0x1f2); UTRAP(0x1f3); UTRAP(0x1f4); UTRAP(0x1f5); UTRAP(0x1f6); UTRAP(0x1f7)
865 UTRAP(0x1f8); UTRAP(0x1f9); UTRAP(0x1fa); UTRAP(0x1fb); UTRAP(0x1fc); UTRAP(0x1fd); UTRAP(0x1fe); UTRAP(0x1ff) 865 UTRAP(0x1f8); UTRAP(0x1f9); UTRAP(0x1fa); UTRAP(0x1fb); UTRAP(0x1fc); UTRAP(0x1fd); UTRAP(0x1fe); UTRAP(0x1ff)
866 866
867#if 0 867#if 0
868/* 868/*
869 * If the cleanwin trap handler detects an overfow we come here. 869 * If the cleanwin trap handler detects an overfow we come here.
870 * We need to fix up the window registers, switch to the interrupt 870 * We need to fix up the window registers, switch to the interrupt
871 * stack, and then trap to the debugger. 871 * stack, and then trap to the debugger.
872 */ 872 */
873cleanwin_overflow: 873cleanwin_overflow:
874 !! We've already incremented %cleanwin 874 !! We've already incremented %cleanwin
875 !! So restore %cwp 875 !! So restore %cwp
876 rdpr %cwp, %l0 876 rdpr %cwp, %l0
877 dec %l0 877 dec %l0
878 wrpr %l0, %g0, %cwp 878 wrpr %l0, %g0, %cwp
879 set EINTSTACK-STKB-CC64FSZ, %l0 879 set EINTSTACK-STKB-CC64FSZ, %l0
880 save %l0, 0, %sp 880 save %l0, 0, %sp
881 881
882 ta 1 ! Enter debugger 882 ta 1 ! Enter debugger
883 sethi %hi(1f), %o0 883 sethi %hi(1f), %o0
884 call _C_LABEL(panic) 884 call _C_LABEL(panic)
885 or %o0, %lo(1f), %o0 885 or %o0, %lo(1f), %o0
886 restore 886 restore
887 retry 887 retry
888 .data 888 .data
8891: 8891:
890 .asciz "Kernel stack overflow!" 890 .asciz "Kernel stack overflow!"
891 _ALIGN 891 _ALIGN
892 .text 892 .text
893#endif 893#endif
894 894
895#ifdef NOTDEF_DEBUG 895#ifdef NOTDEF_DEBUG
896/* 896/*
897 * A hardware red zone is impossible. We simulate one in software by 897 * A hardware red zone is impossible. We simulate one in software by
898 * keeping a `red zone' pointer; if %sp becomes less than this, we panic. 898 * keeping a `red zone' pointer; if %sp becomes less than this, we panic.
899 * This is expensive and is only enabled when debugging. 899 * This is expensive and is only enabled when debugging.
900 */ 900 */
901#define REDSIZE (PCB_SIZE) /* Mark used portion of pcb structure out of bounds */ 901#define REDSIZE (PCB_SIZE) /* Mark used portion of pcb structure out of bounds */
902#define REDSTACK 2048 /* size of `panic: stack overflow' region */ 902#define REDSTACK 2048 /* size of `panic: stack overflow' region */
903 .data 903 .data
904 _ALIGN 904 _ALIGN
905redzone: 905redzone:
906 .xword _C_LABEL(XXX) + REDSIZE 906 .xword _C_LABEL(XXX) + REDSIZE
907redstack: 907redstack:
908 .space REDSTACK 908 .space REDSTACK
909eredstack: 909eredstack:
910Lpanic_red: 910Lpanic_red:
911 .asciz "kernel stack overflow" 911 .asciz "kernel stack overflow"
912 _ALIGN 912 _ALIGN
913 .text 913 .text
914 914
915 /* set stack pointer redzone to base+minstack; alters base */ 915 /* set stack pointer redzone to base+minstack; alters base */
916#define SET_SP_REDZONE(base, tmp) \ 916#define SET_SP_REDZONE(base, tmp) \
917 add base, REDSIZE, base; \ 917 add base, REDSIZE, base; \
918 sethi %hi(_C_LABEL(redzone)), tmp; \ 918 sethi %hi(_C_LABEL(redzone)), tmp; \
919 stx base, [tmp + %lo(_C_LABEL(redzone))] 919 stx base, [tmp + %lo(_C_LABEL(redzone))]
920 920
921 /* variant with a constant */ 921 /* variant with a constant */
922#define SET_SP_REDZONE_CONST(const, tmp1, tmp2) \ 922#define SET_SP_REDZONE_CONST(const, tmp1, tmp2) \
923 set (const) + REDSIZE, tmp1; \ 923 set (const) + REDSIZE, tmp1; \
924 sethi %hi(_C_LABEL(redzone)), tmp2; \ 924 sethi %hi(_C_LABEL(redzone)), tmp2; \
925 stx tmp1, [tmp2 + %lo(_C_LABEL(redzone))] 925 stx tmp1, [tmp2 + %lo(_C_LABEL(redzone))]
926 926
927 /* check stack pointer against redzone (uses two temps) */ 927 /* check stack pointer against redzone (uses two temps) */
928#define CHECK_SP_REDZONE(t1, t2) \ 928#define CHECK_SP_REDZONE(t1, t2) \
929 sethi KERNBASE, t1; \ 929 sethi KERNBASE, t1; \
930 cmp %sp, t1; \ 930 cmp %sp, t1; \
931 blu,pt %xcc, 7f; \ 931 blu,pt %xcc, 7f; \
932 sethi %hi(_C_LABEL(redzone)), t1; \ 932 sethi %hi(_C_LABEL(redzone)), t1; \
933 ldx [t1 + %lo(_C_LABEL(redzone))], t2; \ 933 ldx [t1 + %lo(_C_LABEL(redzone))], t2; \
934 cmp %sp, t2; /* if sp >= t2, not in red zone */ \ 934 cmp %sp, t2; /* if sp >= t2, not in red zone */ \
935 blu panic_red; nop; /* and can continue normally */ \ 935 blu panic_red; nop; /* and can continue normally */ \
9367: 9367:
937 937
938panic_red: 938panic_red:
939 /* move to panic stack */ 939 /* move to panic stack */
940 stx %g0, [t1 + %lo(_C_LABEL(redzone))]; 940 stx %g0, [t1 + %lo(_C_LABEL(redzone))];
941 set eredstack - BIAS, %sp; 941 set eredstack - BIAS, %sp;
942 /* prevent panic() from lowering ipl */ 942 /* prevent panic() from lowering ipl */
943 sethi %hi(_C_LABEL(panicstr)), t2; 943 sethi %hi(_C_LABEL(panicstr)), t2;
944 set Lpanic_red, t2; 944 set Lpanic_red, t2;
945 st t2, [t1 + %lo(_C_LABEL(panicstr))]; 945 st t2, [t1 + %lo(_C_LABEL(panicstr))];
946 wrpr g0, 15, %pil /* t1 = splhigh() */ 946 wrpr g0, 15, %pil /* t1 = splhigh() */
947 save %sp, -CCF64SZ, %sp; /* preserve current window */ 947 save %sp, -CCF64SZ, %sp; /* preserve current window */
948 sethi %hi(Lpanic_red), %o0; 948 sethi %hi(Lpanic_red), %o0;
949 call _C_LABEL(panic); 949 call _C_LABEL(panic);
950 or %o0, %lo(Lpanic_red), %o0; 950 or %o0, %lo(Lpanic_red), %o0;
951 951
952 952
953#else 953#else
954 954
955#define SET_SP_REDZONE(base, tmp) 955#define SET_SP_REDZONE(base, tmp)
956#define SET_SP_REDZONE_CONST(const, t1, t2) 956#define SET_SP_REDZONE_CONST(const, t1, t2)
957#define CHECK_SP_REDZONE(t1, t2) 957#define CHECK_SP_REDZONE(t1, t2)
958#endif 958#endif
959 959
960#define TRACESIZ 0x01000 960#define TRACESIZ 0x01000
961 .globl _C_LABEL(trap_trace) 961 .globl _C_LABEL(trap_trace)
962 .globl _C_LABEL(trap_trace_ptr) 962 .globl _C_LABEL(trap_trace_ptr)
963 .globl _C_LABEL(trap_trace_end) 963 .globl _C_LABEL(trap_trace_end)
964 .globl _C_LABEL(trap_trace_dis) 964 .globl _C_LABEL(trap_trace_dis)
965 .data 965 .data
966_C_LABEL(trap_trace_dis): 966_C_LABEL(trap_trace_dis):
967 .word 1, 1 ! Starts disabled. DDB turns it on. 967 .word 1, 1 ! Starts disabled. DDB turns it on.
968_C_LABEL(trap_trace_ptr): 968_C_LABEL(trap_trace_ptr):
969 .word 0, 0, 0, 0 969 .word 0, 0, 0, 0
970_C_LABEL(trap_trace): 970_C_LABEL(trap_trace):
971 .space TRACESIZ 971 .space TRACESIZ
972_C_LABEL(trap_trace_end): 972_C_LABEL(trap_trace_end):
973 .space 0x20 ! safety margin 973 .space 0x20 ! safety margin
974 974
975 975
976/* 976/*
977 * v9 machines do not have a trap window. 977 * v9 machines do not have a trap window.
978 * 978 *
979 * When we take a trap the trap state is pushed on to the stack of trap 979 * When we take a trap the trap state is pushed on to the stack of trap
980 * registers, interrupts are disabled, then we switch to an alternate set 980 * registers, interrupts are disabled, then we switch to an alternate set
981 * of global registers. 981 * of global registers.
982 * 982 *
983 * The trap handling code needs to allocate a trap frame on the kernel, or 983 * The trap handling code needs to allocate a trap frame on the kernel, or
984 * for interrupts, the interrupt stack, save the out registers to the trap 984 * for interrupts, the interrupt stack, save the out registers to the trap
985 * frame, then switch to the normal globals and save them to the trap frame 985 * frame, then switch to the normal globals and save them to the trap frame
986 * too. 986 * too.
987 * 987 *
988 * XXX it would be good to save the interrupt stack frame to the kernel 988 * XXX it would be good to save the interrupt stack frame to the kernel
989 * stack so we wouldn't have to copy it later if we needed to handle a AST. 989 * stack so we wouldn't have to copy it later if we needed to handle a AST.
990 * 990 *
991 * Since kernel stacks are all on one page and the interrupt stack is entirely 991 * Since kernel stacks are all on one page and the interrupt stack is entirely
992 * within the locked TLB, we can use physical addressing to save out our 992 * within the locked TLB, we can use physical addressing to save out our
993 * trap frame so we don't trap during the TRAP_SETUP() operation. There 993 * trap frame so we don't trap during the TRAP_SETUP() operation. There
994 * is unfortunately no supportable method for issuing a non-trapping save. 994 * is unfortunately no supportable method for issuing a non-trapping save.
995 * 995 *
996 * However, if we use physical addresses to save our trapframe, we will need 996 * However, if we use physical addresses to save our trapframe, we will need
997 * to clear out the data cache before continuing much further. 997 * to clear out the data cache before continuing much further.
998 * 998 *
999 * In short, what we need to do is: 999 * In short, what we need to do is:
1000 * 1000 *
@@ -5287,1015 +5287,1076 @@ ENTRY(softint_fastintr) @@ -5287,1015 +5287,1076 @@ ENTRY(softint_fastintr)
5287 5287
5288 restore ! rewind register window 5288 restore ! rewind register window
5289 5289
5290 ld [%l0 + CI_IDEPTH], %l1 5290 ld [%l0 + CI_IDEPTH], %l1
5291 STPTR %l6, [%l0 + CI_EINTSTACK] ! restore ci_eintstack 5291 STPTR %l6, [%l0 + CI_EINTSTACK] ! restore ci_eintstack
5292 inc %l1 5292 inc %l1
5293 st %l1, [%l0 + CI_IDEPTH] ! re-adjust ci_idepth 5293 st %l1, [%l0 + CI_IDEPTH] ! re-adjust ci_idepth
5294 wrpr %g0, %l7, %pil ! restore ipl 5294 wrpr %g0, %l7, %pil ! restore ipl
5295 ret 5295 ret
5296 restore %g0, 1, %o0 5296 restore %g0, 1, %o0
5297 5297
5298/* 5298/*
5299 * Trampoline function that gets returned to by cpu_switchto() when 5299 * Trampoline function that gets returned to by cpu_switchto() when
5300 * an interrupt handler blocks. 5300 * an interrupt handler blocks.
5301 * 5301 *
5302 * Arguments: 5302 * Arguments:
5303 * o0 old lwp from cpu_switchto() 5303 * o0 old lwp from cpu_switchto()
5304 * 5304 *
5305 * from softint_fastintr(): 5305 * from softint_fastintr():
5306 * l0 CPUINFO_VA 5306 * l0 CPUINFO_VA
5307 * l6 saved ci_eintstack 5307 * l6 saved ci_eintstack
5308 * l7 saved ipl 5308 * l7 saved ipl
5309 */ 5309 */
5310softint_fastintr_ret: 5310softint_fastintr_ret:
5311 /* re-adjust after mi_switch() */ 5311 /* re-adjust after mi_switch() */
5312 ld [%l0 + CI_MTX_COUNT], %o1 5312 ld [%l0 + CI_MTX_COUNT], %o1
5313 inc %o1 ! ci_mtx_count++ 5313 inc %o1 ! ci_mtx_count++
5314 st %o1, [%l0 + CI_MTX_COUNT] 5314 st %o1, [%l0 + CI_MTX_COUNT]
5315 st %g0, [%o0 + L_CTXSWTCH] ! prev->l_ctxswtch = 0 5315 st %g0, [%o0 + L_CTXSWTCH] ! prev->l_ctxswtch = 0
5316 5316
5317 ld [%l0 + CI_IDEPTH], %l1 5317 ld [%l0 + CI_IDEPTH], %l1
5318 STPTR %l6, [%l0 + CI_EINTSTACK] ! restore ci_eintstack 5318 STPTR %l6, [%l0 + CI_EINTSTACK] ! restore ci_eintstack
5319 inc %l1 5319 inc %l1
5320 st %l1, [%l0 + CI_IDEPTH] ! re-adjust ci_idepth 5320 st %l1, [%l0 + CI_IDEPTH] ! re-adjust ci_idepth
5321 wrpr %g0, %l7, %pil ! restore ipl 5321 wrpr %g0, %l7, %pil ! restore ipl
5322 ret 5322 ret
5323 restore %g0, 1, %o0 5323 restore %g0, 1, %o0
5324 5324
5325#endif /* __HAVE_FAST_SOFTINTS */ 5325#endif /* __HAVE_FAST_SOFTINTS */
5326 5326
5327/* 5327/*
5328 * Snapshot the current process so that stack frames are up to date. 5328 * Snapshot the current process so that stack frames are up to date.
5329 * Only used just before a crash dump. 5329 * Only used just before a crash dump.
5330 */ 5330 */
5331ENTRY(snapshot) 5331ENTRY(snapshot)
5332 rdpr %pstate, %o1 ! save psr 5332 rdpr %pstate, %o1 ! save psr
5333 stx %o7, [%o0 + PCB_PC] ! save pc 5333 stx %o7, [%o0 + PCB_PC] ! save pc
5334 stx %o6, [%o0 + PCB_SP] ! save sp 5334 stx %o6, [%o0 + PCB_SP] ! save sp
5335 rdpr %pil, %o2 5335 rdpr %pil, %o2
5336 sth %o1, [%o0 + PCB_PSTATE] 5336 sth %o1, [%o0 + PCB_PSTATE]
5337 rdpr %cwp, %o3 5337 rdpr %cwp, %o3
5338 stb %o2, [%o0 + PCB_PIL] 5338 stb %o2, [%o0 + PCB_PIL]
5339 stb %o3, [%o0 + PCB_CWP] 5339 stb %o3, [%o0 + PCB_CWP]
5340 5340
5341 flushw 5341 flushw
5342 save %sp, -CC64FSZ, %sp 5342 save %sp, -CC64FSZ, %sp
5343 flushw 5343 flushw
5344 ret 5344 ret
5345 restore 5345 restore
5346 5346
5347/* 5347/*
5348 * cpu_lwp_fork() arranges for lwp_trampoline() to run when the 5348 * cpu_lwp_fork() arranges for lwp_trampoline() to run when the
5349 * nascent lwp is selected by switch(). 5349 * nascent lwp is selected by switch().
5350 * 5350 *
5351 * The switch frame will contain pointer to struct lwp of this lwp in 5351 * The switch frame will contain pointer to struct lwp of this lwp in
5352 * %l2, a pointer to the function to call in %l0, and an argument to 5352 * %l2, a pointer to the function to call in %l0, and an argument to
5353 * pass to it in %l1 (we abuse the callee-saved registers). 5353 * pass to it in %l1 (we abuse the callee-saved registers).
5354 * 5354 *
5355 * We enter lwp_trampoline as if we are "returning" from 5355 * We enter lwp_trampoline as if we are "returning" from
5356 * cpu_switchto(), so %o0 contains previous lwp (the one we are 5356 * cpu_switchto(), so %o0 contains previous lwp (the one we are
5357 * switching from) that we pass to lwp_startup(). 5357 * switching from) that we pass to lwp_startup().
5358 * 5358 *
5359 * If the function *(%l0) returns, we arrange for an immediate return 5359 * If the function *(%l0) returns, we arrange for an immediate return
5360 * to user mode. This happens in two known cases: after execve(2) of 5360 * to user mode. This happens in two known cases: after execve(2) of
5361 * init, and when returning a child to user mode after a fork(2). 5361 * init, and when returning a child to user mode after a fork(2).
5362 * 5362 *
5363 * If were setting up a kernel thread, the function *(%l0) will not 5363 * If were setting up a kernel thread, the function *(%l0) will not
5364 * return. 5364 * return.
5365 */ 5365 */
5366ENTRY(lwp_trampoline) 5366ENTRY(lwp_trampoline)
5367 /* 5367 /*
5368 * Note: cpu_lwp_fork() has set up a stack frame for us to run 5368 * Note: cpu_lwp_fork() has set up a stack frame for us to run
5369 * in, so we can call other functions from here without using 5369 * in, so we can call other functions from here without using
5370 * `save ... restore'. 5370 * `save ... restore'.
5371 */ 5371 */
5372 5372
5373 ! newlwp in %l2, oldlwp in %o0 5373 ! newlwp in %l2, oldlwp in %o0
5374 call lwp_startup 5374 call lwp_startup
5375 mov %l2, %o1 5375 mov %l2, %o1
5376 5376
5377 call %l0 ! re-use current frame 5377 call %l0 ! re-use current frame
5378 mov %l1, %o0 5378 mov %l1, %o0
5379 5379
5380 /* 5380 /*
5381 * Going to userland - set proper tstate in trap frame 5381 * Going to userland - set proper tstate in trap frame
5382 */ 5382 */
5383 set (ASI_PRIMARY_NO_FAULT<<TSTATE_ASI_SHIFT)|((PSTATE_USER)<<TSTATE_PSTATE_SHIFT), %g1 5383 set (ASI_PRIMARY_NO_FAULT<<TSTATE_ASI_SHIFT)|((PSTATE_USER)<<TSTATE_PSTATE_SHIFT), %g1
5384 stx %g1, [%sp + CC64FSZ + STKB + TF_TSTATE] 5384 stx %g1, [%sp + CC64FSZ + STKB + TF_TSTATE]
5385 5385
5386 /* 5386 /*
5387 * Here we finish up as in syscall, but simplified. 5387 * Here we finish up as in syscall, but simplified.
5388 */ 5388 */
5389 ba,a,pt %icc, return_from_trap 5389 ba,a,pt %icc, return_from_trap
5390 nop 5390 nop
5391 5391
5392/* 5392/*
5393 * pmap_zero_page_phys(pa) 5393 * pmap_zero_page_phys(pa)
5394 * 5394 *
5395 * Zero one page physically addressed 5395 * Zero one page physically addressed
5396 * 5396 *
5397 * Block load/store ASIs do not exist for physical addresses, 5397 * Block load/store ASIs do not exist for physical addresses,
5398 * so we won't use them. 5398 * so we won't use them.
5399 * 5399 *
5400 * We will execute a flush at the end to sync the I$. 5400 * We will execute a flush at the end to sync the I$.
5401 * 5401 *
5402 * This version expects to have the dcache_flush_page_all(pa) 5402 * This version expects to have the dcache_flush_page_all(pa)
5403 * to have been called before calling into here. 5403 * to have been called before calling into here.
5404 */ 5404 */
5405ENTRY(pmap_zero_page_phys) 5405ENTRY(pmap_zero_page_phys)
5406#ifndef _LP64 5406#ifndef _LP64
5407 COMBINE(%o0, %o1, %o0) 5407 COMBINE(%o0, %o1, %o0)
5408#endif 5408#endif
5409#ifdef DEBUG 5409#ifdef DEBUG
5410 set pmapdebug, %o4 5410 set pmapdebug, %o4
5411 ld [%o4], %o4 5411 ld [%o4], %o4
5412 btst 0x80, %o4 ! PDB_COPY 5412 btst 0x80, %o4 ! PDB_COPY
5413 bz,pt %icc, 3f 5413 bz,pt %icc, 3f
5414 nop 5414 nop
5415 save %sp, -CC64FSZ, %sp 5415 save %sp, -CC64FSZ, %sp
5416 set 2f, %o0 5416 set 2f, %o0
5417 call printf 5417 call printf
5418 mov %i0, %o1 5418 mov %i0, %o1
5419! ta 1; nop 5419! ta 1; nop
5420 restore 5420 restore
5421 .data 5421 .data
54222: .asciz "pmap_zero_page(%p)\n" 54222: .asciz "pmap_zero_page(%p)\n"
5423 _ALIGN 5423 _ALIGN
5424 .text 5424 .text
54253: 54253:
5426#endif 5426#endif
5427 set NBPG, %o2 ! Loop count 5427 set NBPG, %o2 ! Loop count
5428 wr %g0, ASI_PHYS_CACHED, %asi 5428 wr %g0, ASI_PHYS_CACHED, %asi
54291: 54291:
5430 /* Unroll the loop 8 times */ 5430 /* Unroll the loop 8 times */
5431 stxa %g0, [%o0 + 0x00] %asi 5431 stxa %g0, [%o0 + 0x00] %asi
5432 deccc 0x40, %o2 5432 deccc 0x40, %o2
5433 stxa %g0, [%o0 + 0x08] %asi 5433 stxa %g0, [%o0 + 0x08] %asi
5434 stxa %g0, [%o0 + 0x10] %asi 5434 stxa %g0, [%o0 + 0x10] %asi
5435 stxa %g0, [%o0 + 0x18] %asi 5435 stxa %g0, [%o0 + 0x18] %asi
5436 stxa %g0, [%o0 + 0x20] %asi 5436 stxa %g0, [%o0 + 0x20] %asi
5437 stxa %g0, [%o0 + 0x28] %asi 5437 stxa %g0, [%o0 + 0x28] %asi
5438 stxa %g0, [%o0 + 0x30] %asi 5438 stxa %g0, [%o0 + 0x30] %asi
5439 stxa %g0, [%o0 + 0x38] %asi 5439 stxa %g0, [%o0 + 0x38] %asi
5440 bg,pt %icc, 1b 5440 bg,pt %icc, 1b
5441 inc 0x40, %o0 5441 inc 0x40, %o0
5442 5442
5443 sethi %hi(KERNBASE), %o3 5443 sethi %hi(KERNBASE), %o3
5444 flush %o3 5444 flush %o3
5445 retl 5445 retl
5446 wr %g0, ASI_PRIMARY_NOFAULT, %asi ! Make C code happy 5446 wr %g0, ASI_PRIMARY_NOFAULT, %asi ! Make C code happy
5447 5447
5448/* 5448/*
5449 * pmap_copy_page_phys(paddr_t src, paddr_t dst) 5449 * pmap_copy_page_phys(paddr_t src, paddr_t dst)
5450 * 5450 *
5451 * Copy one page physically addressed 5451 * Copy one page physically addressed
5452 * We need to use a global reg for ldxa/stxa 5452 * We need to use a global reg for ldxa/stxa
5453 * so the top 32-bits cannot be lost if we take 5453 * so the top 32-bits cannot be lost if we take
5454 * a trap and need to save our stack frame to a 5454 * a trap and need to save our stack frame to a
5455 * 32-bit stack. We will unroll the loop by 4 to 5455 * 32-bit stack. We will unroll the loop by 4 to
5456 * improve performance. 5456 * improve performance.
5457 * 5457 *
5458 * This version expects to have the dcache_flush_page_all(pa) 5458 * This version expects to have the dcache_flush_page_all(pa)
5459 * to have been called before calling into here. 5459 * to have been called before calling into here.
5460 * 5460 *
5461 */ 5461 */
5462ENTRY(pmap_copy_page_phys) 5462ENTRY(pmap_copy_page_phys)
5463#ifndef _LP64 5463#ifndef _LP64
5464 COMBINE(%o0, %o1, %o0) 5464 COMBINE(%o0, %o1, %o0)
5465 COMBINE(%o2, %o3, %o1) 5465 COMBINE(%o2, %o3, %o1)
5466#endif 5466#endif
5467#ifdef DEBUG 5467#ifdef DEBUG
5468 set pmapdebug, %o4 5468 set pmapdebug, %o4
5469 ld [%o4], %o4 5469 ld [%o4], %o4
5470 btst 0x80, %o4 ! PDB_COPY 5470 btst 0x80, %o4 ! PDB_COPY
5471 bz,pt %icc, 3f 5471 bz,pt %icc, 3f
5472 nop 5472 nop
5473 save %sp, -CC64FSZ, %sp 5473 save %sp, -CC64FSZ, %sp
5474 mov %i0, %o1 5474 mov %i0, %o1
5475 set 2f, %o0 5475 set 2f, %o0
5476 call printf 5476 call printf
5477 mov %i1, %o2 5477 mov %i1, %o2
5478! ta 1; nop 5478! ta 1; nop
5479 restore 5479 restore
5480 .data 5480 .data
54812: .asciz "pmap_copy_page(%p,%p)\n" 54812: .asciz "pmap_copy_page(%p,%p)\n"
5482 _ALIGN 5482 _ALIGN
5483 .text 5483 .text
54843: 54843:
5485#endif 5485#endif
5486#if 1 5486#if 1
5487 set NBPG, %o2 5487 set NBPG, %o2
5488 wr %g0, ASI_PHYS_CACHED, %asi 5488 wr %g0, ASI_PHYS_CACHED, %asi
54891: 54891:
5490 ldxa [%o0 + 0x00] %asi, %g1 5490 ldxa [%o0 + 0x00] %asi, %g1
5491 ldxa [%o0 + 0x08] %asi, %o3 5491 ldxa [%o0 + 0x08] %asi, %o3
5492 ldxa [%o0 + 0x10] %asi, %o4 5492 ldxa [%o0 + 0x10] %asi, %o4
5493 ldxa [%o0 + 0x18] %asi, %o5 5493 ldxa [%o0 + 0x18] %asi, %o5
5494 inc 0x20, %o0 5494 inc 0x20, %o0
5495 deccc 0x20, %o2 5495 deccc 0x20, %o2
5496 stxa %g1, [%o1 + 0x00] %asi 5496 stxa %g1, [%o1 + 0x00] %asi
5497 stxa %o3, [%o1 + 0x08] %asi 5497 stxa %o3, [%o1 + 0x08] %asi
5498 stxa %o4, [%o1 + 0x10] %asi 5498 stxa %o4, [%o1 + 0x10] %asi
5499 stxa %o5, [%o1 + 0x18] %asi 5499 stxa %o5, [%o1 + 0x18] %asi
5500 bg,pt %icc, 1b ! We don't care about pages >4GB 5500 bg,pt %icc, 1b ! We don't care about pages >4GB
5501 inc 0x20, %o1 5501 inc 0x20, %o1
5502 retl 5502 retl
5503 wr %g0, ASI_PRIMARY_NOFAULT, %asi 5503 wr %g0, ASI_PRIMARY_NOFAULT, %asi
5504#else 5504#else
5505 set NBPG, %o3 5505 set NBPG, %o3
5506 add %o3, %o0, %o3 5506 add %o3, %o0, %o3
5507 mov %g1, %o4 ! Save g1 5507 mov %g1, %o4 ! Save g1
55081: 55081:
5509 ldxa [%o0] ASI_PHYS_CACHED, %g1 5509 ldxa [%o0] ASI_PHYS_CACHED, %g1
5510 inc 8, %o0 5510 inc 8, %o0
5511 cmp %o0, %o3 5511 cmp %o0, %o3
5512 stxa %g1, [%o1] ASI_PHYS_CACHED 5512 stxa %g1, [%o1] ASI_PHYS_CACHED
5513 bl,pt %icc, 1b ! We don't care about pages >4GB 5513 bl,pt %icc, 1b ! We don't care about pages >4GB
5514 inc 8, %o1 5514 inc 8, %o1
5515 retl 5515 retl
5516 mov %o4, %g1 ! Restore g1 5516 mov %o4, %g1 ! Restore g1
5517#endif 5517#endif
5518 5518
5519/* 5519/*
5520 * extern int64_t pseg_get_real(struct pmap *pm, vaddr_t addr); 5520 * extern int64_t pseg_get_real(struct pmap *pm, vaddr_t addr);
5521 * 5521 *
5522 * Return TTE at addr in pmap. Uses physical addressing only. 5522 * Return TTE at addr in pmap. Uses physical addressing only.
5523 * pmap->pm_physaddr must by the physical address of pm_segs 5523 * pmap->pm_physaddr must by the physical address of pm_segs
5524 * 5524 *
5525 */ 5525 */
5526ENTRY(pseg_get_real) 5526ENTRY(pseg_get_real)
5527! flushw ! Make sure we don't have stack probs & lose hibits of %o 5527! flushw ! Make sure we don't have stack probs & lose hibits of %o
5528#ifndef _LP64 5528#ifndef _LP64
5529 clruw %o1 ! Zero extend 5529 clruw %o1 ! Zero extend
5530#endif 5530#endif
5531 ldx [%o0 + PM_PHYS], %o2 ! pmap->pm_segs 5531 ldx [%o0 + PM_PHYS], %o2 ! pmap->pm_segs
5532 5532
5533 srax %o1, HOLESHIFT, %o3 ! Check for valid address 5533 srax %o1, HOLESHIFT, %o3 ! Check for valid address
5534 brz,pt %o3, 0f ! Should be zero or -1 5534 brz,pt %o3, 0f ! Should be zero or -1
5535 inc %o3 ! Make -1 -> 0 5535 inc %o3 ! Make -1 -> 0
5536 brnz,pn %o3, 1f ! Error! In hole! 5536 brnz,pn %o3, 1f ! Error! In hole!
55370: 55370:
5538 srlx %o1, STSHIFT, %o3 5538 srlx %o1, STSHIFT, %o3
5539 and %o3, STMASK, %o3 ! Index into pm_segs 5539 and %o3, STMASK, %o3 ! Index into pm_segs
5540 sll %o3, 3, %o3 5540 sll %o3, 3, %o3
5541 add %o2, %o3, %o2 5541 add %o2, %o3, %o2
5542 DLFLUSH(%o2,%o3) 5542 DLFLUSH(%o2,%o3)
5543 ldxa [%o2] ASI_PHYS_CACHED, %o2 ! Load page directory pointer 5543 ldxa [%o2] ASI_PHYS_CACHED, %o2 ! Load page directory pointer
5544 DLFLUSH2(%o3) 5544 DLFLUSH2(%o3)
5545 5545
5546 srlx %o1, PDSHIFT, %o3 5546 srlx %o1, PDSHIFT, %o3
5547 and %o3, PDMASK, %o3 5547 and %o3, PDMASK, %o3
5548 sll %o3, 3, %o3 5548 sll %o3, 3, %o3
5549 brz,pn %o2, 1f ! NULL entry? check somewhere else 5549 brz,pn %o2, 1f ! NULL entry? check somewhere else
5550 add %o2, %o3, %o2 5550 add %o2, %o3, %o2
5551 DLFLUSH(%o2,%o3) 5551 DLFLUSH(%o2,%o3)
5552 ldxa [%o2] ASI_PHYS_CACHED, %o2 ! Load page table pointer 5552 ldxa [%o2] ASI_PHYS_CACHED, %o2 ! Load page table pointer
5553 DLFLUSH2(%o3) 5553 DLFLUSH2(%o3)
5554 5554
5555 srlx %o1, PTSHIFT, %o3 ! Convert to ptab offset 5555 srlx %o1, PTSHIFT, %o3 ! Convert to ptab offset
5556 and %o3, PTMASK, %o3 5556 and %o3, PTMASK, %o3
5557 sll %o3, 3, %o3 5557 sll %o3, 3, %o3
5558 brz,pn %o2, 1f ! NULL entry? check somewhere else 5558 brz,pn %o2, 1f ! NULL entry? check somewhere else
5559 add %o2, %o3, %o2 5559 add %o2, %o3, %o2
5560 DLFLUSH(%o2,%o3) 5560 DLFLUSH(%o2,%o3)
5561 ldxa [%o2] ASI_PHYS_CACHED, %o0 5561 ldxa [%o2] ASI_PHYS_CACHED, %o0
5562 DLFLUSH2(%o3) 5562 DLFLUSH2(%o3)
5563 brgez,pn %o0, 1f ! Entry invalid? Punt 5563 brgez,pn %o0, 1f ! Entry invalid? Punt
5564 btst 1, %sp 5564 btst 1, %sp
5565 bz,pn %icc, 0f ! 64-bit mode? 5565 bz,pn %icc, 0f ! 64-bit mode?
5566 nop 5566 nop
5567 retl ! Yes, return full value 5567 retl ! Yes, return full value
5568 nop 5568 nop
55690: 55690:
5570#if 1 5570#if 1
5571 srl %o0, 0, %o1 5571 srl %o0, 0, %o1
5572 retl ! No, generate a %o0:%o1 double 5572 retl ! No, generate a %o0:%o1 double
5573 srlx %o0, 32, %o0 5573 srlx %o0, 32, %o0
5574#else 5574#else
5575 DLFLUSH(%o2,%o3) 5575 DLFLUSH(%o2,%o3)
5576 ldda [%o2] ASI_PHYS_CACHED, %o0 5576 ldda [%o2] ASI_PHYS_CACHED, %o0
5577 DLFLUSH2(%o3) 5577 DLFLUSH2(%o3)
5578 retl ! No, generate a %o0:%o1 double 5578 retl ! No, generate a %o0:%o1 double
5579 nop 5579 nop
5580#endif 5580#endif
55811: 55811:
5582#ifndef _LP64 5582#ifndef _LP64
5583 clr %o1 5583 clr %o1
5584#endif 5584#endif
5585 retl 5585 retl
5586 clr %o0 5586 clr %o0
5587 5587
5588/* 5588/*
5589 * In 32-bit mode: 5589 * In 32-bit mode:
5590 * 5590 *
5591 * extern int pseg_set_real(struct pmap* %o0, vaddr_t addr %o1, 5591 * extern int pseg_set_real(struct pmap* %o0, vaddr_t addr %o1,
5592 * int64_t tte %o2:%o3, paddr_t spare %o4:%o5); 5592 * int64_t tte %o2:%o3, paddr_t spare %o4:%o5);
5593 * 5593 *
5594 * In 64-bit mode: 5594 * In 64-bit mode:
5595 * 5595 *
5596 * extern int pseg_set_real(struct pmap* %o0, vaddr_t addr %o1, 5596 * extern int pseg_set_real(struct pmap* %o0, vaddr_t addr %o1,
5597 * int64_t tte %o2, paddr_t spare %o3); 5597 * int64_t tte %o2, paddr_t spare %o3);
5598 * 5598 *
5599 * Set a pseg entry to a particular TTE value. Return values are: 5599 * Set a pseg entry to a particular TTE value. Return values are:
5600 * 5600 *
5601 * -2 addr in hole 5601 * -2 addr in hole
5602 * 0 success (spare was not used if given) 5602 * 0 success (spare was not used if given)
5603 * 1 failure (spare was not given, but one is needed) 5603 * 1 failure (spare was not given, but one is needed)
5604 * 2 success (spare was given, used for L2) 5604 * 2 success (spare was given, used for L2)
5605 * 3 failure (spare was given, used for L2, another is needed for L3) 5605 * 3 failure (spare was given, used for L2, another is needed for L3)
5606 * 4 success (spare was given, used for L3) 5606 * 4 success (spare was given, used for L3)
5607 * 5607 *
5608 * rv == 0 success, spare not used if one was given 5608 * rv == 0 success, spare not used if one was given
5609 * rv & 4 spare was used for L3 5609 * rv & 4 spare was used for L3
5610 * rv & 2 spare was used for L2 5610 * rv & 2 spare was used for L2
5611 * rv & 1 failure, spare is needed 5611 * rv & 1 failure, spare is needed
5612 * 5612 *
5613 * (NB: nobody in pmap checks for the virtual hole, so the system will hang.) 5613 * (NB: nobody in pmap checks for the virtual hole, so the system will hang.)
5614 * The way to call this is: first just call it without a spare page. 5614 * The way to call this is: first just call it without a spare page.
5615 * If that fails, allocate a page and try again, passing the paddr of the 5615 * If that fails, allocate a page and try again, passing the paddr of the
5616 * new page as the spare. 5616 * new page as the spare.
5617 * If spare is non-zero it is assumed to be the address of a zeroed physical 5617 * If spare is non-zero it is assumed to be the address of a zeroed physical
5618 * page that can be used to generate a directory table or page table if needed. 5618 * page that can be used to generate a directory table or page table if needed.
5619 * 5619 *
5620 * We keep track of valid (A_TLB_V bit set) and wired (A_TLB_TSB_LOCK bit set) 5620 * We keep track of valid (A_TLB_V bit set) and wired (A_TLB_TSB_LOCK bit set)
5621 * mappings that are set here. We check both bits on the new data entered 5621 * mappings that are set here. We check both bits on the new data entered
5622 * and increment counts, as well as decrementing counts if the bits are set 5622 * and increment counts, as well as decrementing counts if the bits are set
5623 * in the value replaced by this call. 5623 * in the value replaced by this call.
5624 * The counters are 32 bit or 64 bit wide, depending on the kernel type we are 5624 * The counters are 32 bit or 64 bit wide, depending on the kernel type we are
5625 * running! 5625 * running!
5626 */ 5626 */
5627ENTRY(pseg_set_real) 5627ENTRY(pseg_set_real)
5628#ifndef _LP64 5628#ifndef _LP64
5629 clruw %o1 ! Zero extend 5629 clruw %o1 ! Zero extend
5630 COMBINE(%o2, %o3, %o2) 5630 COMBINE(%o2, %o3, %o2)
5631 COMBINE(%o4, %o5, %o3) 5631 COMBINE(%o4, %o5, %o3)
5632#endif 5632#endif
5633 !! 5633 !!
5634 !! However we managed to get here we now have: 5634 !! However we managed to get here we now have:
5635 !! 5635 !!
5636 !! %o0 = *pmap 5636 !! %o0 = *pmap
5637 !! %o1 = addr 5637 !! %o1 = addr
5638 !! %o2 = tte 5638 !! %o2 = tte
5639 !! %o3 = paddr of spare page 5639 !! %o3 = paddr of spare page
5640 !! 5640 !!
5641 srax %o1, HOLESHIFT, %o4 ! Check for valid address 5641 srax %o1, HOLESHIFT, %o4 ! Check for valid address
5642 brz,pt %o4, 0f ! Should be zero or -1 5642 brz,pt %o4, 0f ! Should be zero or -1
5643 inc %o4 ! Make -1 -> 0 5643 inc %o4 ! Make -1 -> 0
5644 brz,pt %o4, 0f 5644 brz,pt %o4, 0f
5645 nop 5645 nop
5646#ifdef DEBUG 5646#ifdef DEBUG
5647 ta 1 ! Break into debugger 5647 ta 1 ! Break into debugger
5648#endif 5648#endif
5649 retl 5649 retl
5650 mov -2, %o0 ! Error -- in hole! 5650 mov -2, %o0 ! Error -- in hole!
5651 5651
56520: 56520:
5653 ldx [%o0 + PM_PHYS], %o4 ! pmap->pm_segs 5653 ldx [%o0 + PM_PHYS], %o4 ! pmap->pm_segs
5654 clr %g1 5654 clr %g1
5655 srlx %o1, STSHIFT, %o5 5655 srlx %o1, STSHIFT, %o5
5656 and %o5, STMASK, %o5 5656 and %o5, STMASK, %o5
5657 sll %o5, 3, %o5 5657 sll %o5, 3, %o5
5658 add %o4, %o5, %o4 5658 add %o4, %o5, %o4
56590: 56590:
5660 DLFLUSH(%o4,%g5) 5660 DLFLUSH(%o4,%g5)
5661 ldxa [%o4] ASI_PHYS_CACHED, %o5 ! Load page directory pointer 5661 ldxa [%o4] ASI_PHYS_CACHED, %o5 ! Load page directory pointer
5662 DLFLUSH2(%g5) 5662 DLFLUSH2(%g5)
5663 5663
5664 brnz,a,pt %o5, 0f ! Null pointer? 5664 brnz,a,pt %o5, 0f ! Null pointer?
5665 mov %o5, %o4 5665 mov %o5, %o4
5666 brz,pn %o3, 9f ! Have a spare? 5666 brz,pn %o3, 9f ! Have a spare?
5667 mov %o3, %o5 5667 mov %o3, %o5
5668 casxa [%o4] ASI_PHYS_CACHED, %g0, %o5 5668 casxa [%o4] ASI_PHYS_CACHED, %g0, %o5
5669 brnz,pn %o5, 0b ! Something changed? 5669 brnz,pn %o5, 0b ! Something changed?
5670 DLFLUSH(%o4, %o5) 5670 DLFLUSH(%o4, %o5)
5671 mov %o3, %o4 5671 mov %o3, %o4
5672 mov 2, %g1 ! record spare used for L2 5672 mov 2, %g1 ! record spare used for L2
5673 clr %o3 ! and not available for L3 5673 clr %o3 ! and not available for L3
56740: 56740:
5675 srlx %o1, PDSHIFT, %o5 5675 srlx %o1, PDSHIFT, %o5
5676 and %o5, PDMASK, %o5 5676 and %o5, PDMASK, %o5
5677 sll %o5, 3, %o5 5677 sll %o5, 3, %o5
5678 add %o4, %o5, %o4 5678 add %o4, %o5, %o4
56790: 56790:
5680 DLFLUSH(%o4,%g5) 5680 DLFLUSH(%o4,%g5)
5681 ldxa [%o4] ASI_PHYS_CACHED, %o5 ! Load table directory pointer 5681 ldxa [%o4] ASI_PHYS_CACHED, %o5 ! Load table directory pointer
5682 DLFLUSH2(%g5) 5682 DLFLUSH2(%g5)
5683 5683
5684 brnz,a,pt %o5, 0f ! Null pointer? 5684 brnz,a,pt %o5, 0f ! Null pointer?
5685 mov %o5, %o4 5685 mov %o5, %o4
5686 brz,pn %o3, 9f ! Have a spare? 5686 brz,pn %o3, 9f ! Have a spare?
5687 mov %o3, %o5 5687 mov %o3, %o5
5688 casxa [%o4] ASI_PHYS_CACHED, %g0, %o5 5688 casxa [%o4] ASI_PHYS_CACHED, %g0, %o5
5689 brnz,pn %o5, 0b ! Something changed? 5689 brnz,pn %o5, 0b ! Something changed?
5690 DLFLUSH(%o4, %o4) 5690 DLFLUSH(%o4, %o4)
5691 mov %o3, %o4 5691 mov %o3, %o4
5692 mov 4, %g1 ! record spare used for L3 5692 mov 4, %g1 ! record spare used for L3
56930: 56930:
5694 srlx %o1, PTSHIFT, %o5 ! Convert to ptab offset 5694 srlx %o1, PTSHIFT, %o5 ! Convert to ptab offset
5695 and %o5, PTMASK, %o5 5695 and %o5, PTMASK, %o5
5696 sll %o5, 3, %o5 5696 sll %o5, 3, %o5
5697 add %o5, %o4, %o4 5697 add %o5, %o4, %o4
5698 5698
5699 DLFLUSH(%o4,%g5) 5699 DLFLUSH(%o4,%g5)
5700 ldxa [%o4] ASI_PHYS_CACHED, %o5 ! save old value in %o5 5700 ldxa [%o4] ASI_PHYS_CACHED, %o5 ! save old value in %o5
5701 stxa %o2, [%o4] ASI_PHYS_CACHED ! Easier than shift+or 5701 stxa %o2, [%o4] ASI_PHYS_CACHED ! Easier than shift+or
5702 DLFLUSH2(%g5) 5702 DLFLUSH2(%g5)
5703 5703
5704 !! at this point we have: 5704 !! at this point we have:
5705 !! %g1 = return value 5705 !! %g1 = return value
5706 !! %o0 = struct pmap * (where the counts are) 5706 !! %o0 = struct pmap * (where the counts are)
5707 !! %o2 = new TTE 5707 !! %o2 = new TTE
5708 !! %o5 = old TTE 5708 !! %o5 = old TTE
5709 5709
5710 !! see if stats needs an update 5710 !! see if stats needs an update
5711 set A_TLB_TSB_LOCK, %g5 5711 set A_TLB_TSB_LOCK, %g5
5712 xor %o2, %o5, %o3 ! %o3 - what changed 5712 xor %o2, %o5, %o3 ! %o3 - what changed
5713 5713
5714 brgez,pn %o3, 5f ! has resident changed? (we predict it has) 5714 brgez,pn %o3, 5f ! has resident changed? (we predict it has)
5715 btst %g5, %o3 ! has wired changed? 5715 btst %g5, %o3 ! has wired changed?
5716 5716
5717 LDPTR [%o0 + PM_RESIDENT], %o1 ! gonna update resident count 5717 LDPTR [%o0 + PM_RESIDENT], %o1 ! gonna update resident count
5718 brlz %o2, 0f 5718 brlz %o2, 0f
5719 mov 1, %o4 5719 mov 1, %o4
5720 neg %o4 ! new is not resident -> decrement 5720 neg %o4 ! new is not resident -> decrement
57210: add %o1, %o4, %o1 57210: add %o1, %o4, %o1
5722 STPTR %o1, [%o0 + PM_RESIDENT] 5722 STPTR %o1, [%o0 + PM_RESIDENT]
5723 btst %g5, %o3 ! has wired changed? 5723 btst %g5, %o3 ! has wired changed?
57245: bz,pt %xcc, 8f ! we predict it's not 57245: bz,pt %xcc, 8f ! we predict it's not
5725 btst %g5, %o2 ! don't waste delay slot, check if new one is wired 5725 btst %g5, %o2 ! don't waste delay slot, check if new one is wired
5726 LDPTR [%o0 + PM_WIRED], %o1 ! gonna update wired count 5726 LDPTR [%o0 + PM_WIRED], %o1 ! gonna update wired count
5727 bnz,pt %xcc, 0f ! if wired changes, we predict it increments 5727 bnz,pt %xcc, 0f ! if wired changes, we predict it increments
5728 mov 1, %o4 5728 mov 1, %o4
5729 neg %o4 ! new is not wired -> decrement 5729 neg %o4 ! new is not wired -> decrement
57300: add %o1, %o4, %o1 57300: add %o1, %o4, %o1
5731 STPTR %o1, [%o0 + PM_WIRED] 5731 STPTR %o1, [%o0 + PM_WIRED]
57328: retl 57328: retl
5733 mov %g1, %o0 ! return %g1 5733 mov %g1, %o0 ! return %g1
5734 5734
57359: retl 57359: retl
5736 or %g1, 1, %o0 ! spare needed, return flags + 1 5736 or %g1, 1, %o0 ! spare needed, return flags + 1
5737 5737
5738 5738
5739/* 5739/*
5740 * clearfpstate() 5740 * clearfpstate()
5741 * 5741 *
5742 * Drops the current fpu state, without saving it. 5742 * Drops the current fpu state, without saving it.
5743 */ 5743 */
5744ENTRY(clearfpstate) 5744ENTRY(clearfpstate)
5745 rdpr %pstate, %o1 ! enable FPU 5745 rdpr %pstate, %o1 ! enable FPU
5746 wr %g0, FPRS_FEF, %fprs 5746 wr %g0, FPRS_FEF, %fprs
5747 or %o1, PSTATE_PEF, %o1 5747 or %o1, PSTATE_PEF, %o1
5748 retl 5748 retl
5749 wrpr %o1, 0, %pstate 5749 wrpr %o1, 0, %pstate
5750 5750
5751/* 5751/*
5752 * savefpstate(f) struct fpstate *f; 5752 * savefpstate(f) struct fpstate *f;
5753 * 5753 *
5754 * Store the current FPU state. 5754 * Store the current FPU state.
5755 * 5755 *
5756 * Since the kernel may need to use the FPU and we have problems atomically 5756 * Since the kernel may need to use the FPU and we have problems atomically
5757 * testing and enabling the FPU, we leave here with the FPRS_FEF bit set. 5757 * testing and enabling the FPU, we leave here with the FPRS_FEF bit set.
5758 * Normally this should be turned on in loadfpstate(). 5758 * Normally this should be turned on in loadfpstate().
5759 */ 5759 */
5760 /* XXXXXXXXXX Assume caller created a proper stack frame */ 5760 /* XXXXXXXXXX Assume caller created a proper stack frame */
5761ENTRY(savefpstate) 5761ENTRY(savefpstate)
5762! flushw ! Make sure we don't have stack probs & lose hibits of %o 5762! flushw ! Make sure we don't have stack probs & lose hibits of %o
5763 rdpr %pstate, %o1 ! enable FP before we begin 5763 rdpr %pstate, %o1 ! enable FP before we begin
5764 rd %fprs, %o5 5764 rd %fprs, %o5
5765 wr %g0, FPRS_FEF, %fprs 5765 wr %g0, FPRS_FEF, %fprs
5766 or %o1, PSTATE_PEF, %o1 5766 or %o1, PSTATE_PEF, %o1
5767 wrpr %o1, 0, %pstate 5767 wrpr %o1, 0, %pstate
5768 5768
5769 stx %fsr, [%o0 + FS_FSR] ! f->fs_fsr = getfsr(); 5769 stx %fsr, [%o0 + FS_FSR] ! f->fs_fsr = getfsr();
5770 rd %gsr, %o4 ! Save %gsr 5770 rd %gsr, %o4 ! Save %gsr
5771 st %o4, [%o0 + FS_GSR] 5771 st %o4, [%o0 + FS_GSR]
5772 5772
5773 add %o0, FS_REGS, %o2 5773 add %o0, FS_REGS, %o2
5774#ifdef DIAGNOSTIC 5774#ifdef DIAGNOSTIC
5775 btst BLOCK_ALIGN, %o2 ! Needs to be re-executed 5775 btst BLOCK_ALIGN, %o2 ! Needs to be re-executed
5776 bnz,pn %icc, 6f ! Check alignment 5776 bnz,pn %icc, 6f ! Check alignment
5777#endif 5777#endif
5778 st %g0, [%o0 + FS_QSIZE] ! f->fs_qsize = 0; 5778 st %g0, [%o0 + FS_QSIZE] ! f->fs_qsize = 0;
5779 btst FPRS_DL|FPRS_DU, %o5 ! Both FPU halves clean? 5779 btst FPRS_DL|FPRS_DU, %o5 ! Both FPU halves clean?
5780 bz,pt %icc, 5f ! Then skip it 5780 bz,pt %icc, 5f ! Then skip it
5781 5781
5782 btst FPRS_DL, %o5 ! Lower FPU clean? 5782 btst FPRS_DL, %o5 ! Lower FPU clean?
5783 membar #Sync 5783 membar #Sync
5784 bz,a,pt %icc, 1f ! Then skip it, but upper FPU not clean 5784 bz,a,pt %icc, 1f ! Then skip it, but upper FPU not clean
5785 add %o2, 2*BLOCK_SIZE, %o2 ! Skip a block 5785 add %o2, 2*BLOCK_SIZE, %o2 ! Skip a block
5786 5786
5787 stda %f0, [%o2] ASI_BLK_P ! f->fs_f0 = etc; 5787 stda %f0, [%o2] ASI_BLK_P ! f->fs_f0 = etc;
5788 inc BLOCK_SIZE, %o2 5788 inc BLOCK_SIZE, %o2
5789 stda %f16, [%o2] ASI_BLK_P 5789 stda %f16, [%o2] ASI_BLK_P
5790 5790
5791 btst FPRS_DU, %o5 ! Upper FPU clean? 5791 btst FPRS_DU, %o5 ! Upper FPU clean?
5792 bz,pt %icc, 2f ! Then skip it 5792 bz,pt %icc, 2f ! Then skip it
5793 inc BLOCK_SIZE, %o2 5793 inc BLOCK_SIZE, %o2
57941: 57941:
5795 stda %f32, [%o2] ASI_BLK_P 5795 stda %f32, [%o2] ASI_BLK_P
5796 inc BLOCK_SIZE, %o2 5796 inc BLOCK_SIZE, %o2
5797 stda %f48, [%o2] ASI_BLK_P 5797 stda %f48, [%o2] ASI_BLK_P
57982: 57982:
5799 membar #Sync ! Finish operation so we can 5799 membar #Sync ! Finish operation so we can
58005: 58005:
5801 retl 5801 retl
5802 wr %g0, FPRS_FEF, %fprs ! Mark FPU clean 5802 wr %g0, FPRS_FEF, %fprs ! Mark FPU clean
5803 5803
5804#ifdef DIAGNOSTIC 5804#ifdef DIAGNOSTIC
5805 !! 5805 !!
5806 !! Damn thing is *NOT* aligned on a 64-byte boundary 5806 !! Damn thing is *NOT* aligned on a 64-byte boundary
5807 !!  5807 !!
58086: 58086:
5809 wr %g0, FPRS_FEF, %fprs 5809 wr %g0, FPRS_FEF, %fprs
5810 ! XXX -- we should panic instead of silently entering debugger 5810 ! XXX -- we should panic instead of silently entering debugger
5811 ta 1 5811 ta 1
5812 retl 5812 retl
5813 nop 5813 nop
5814#endif 5814#endif
5815 5815
5816/* 5816/*
5817 * Load FPU state. 5817 * Load FPU state.
5818 */ 5818 */
5819 /* XXXXXXXXXX Should test to see if we only need to do a partial restore */ 5819 /* XXXXXXXXXX Should test to see if we only need to do a partial restore */
5820ENTRY(loadfpstate) 5820ENTRY(loadfpstate)
5821 flushw ! Make sure we don't have stack probs & lose hibits of %o 5821 flushw ! Make sure we don't have stack probs & lose hibits of %o
5822 rdpr %pstate, %o1 ! enable FP before we begin 5822 rdpr %pstate, %o1 ! enable FP before we begin
5823 ld [%o0 + FS_GSR], %o4 ! Restore %gsr 5823 ld [%o0 + FS_GSR], %o4 ! Restore %gsr
5824 set PSTATE_PEF, %o2 5824 set PSTATE_PEF, %o2
5825 wr %g0, FPRS_FEF, %fprs 5825 wr %g0, FPRS_FEF, %fprs
5826 or %o1, %o2, %o1 5826 or %o1, %o2, %o1
5827 wrpr %o1, 0, %pstate 5827 wrpr %o1, 0, %pstate
5828 ldx [%o0 + FS_FSR], %fsr ! setfsr(f->fs_fsr); 5828 ldx [%o0 + FS_FSR], %fsr ! setfsr(f->fs_fsr);
5829 add %o0, FS_REGS, %o3 ! This is zero... 5829 add %o0, FS_REGS, %o3 ! This is zero...
5830#ifdef DIAGNOSTIC 5830#ifdef DIAGNOSTIC
5831 btst BLOCK_ALIGN, %o3 5831 btst BLOCK_ALIGN, %o3
5832 bne,pn %icc, 1f ! Only use block loads on aligned blocks 5832 bne,pn %icc, 1f ! Only use block loads on aligned blocks
5833#endif 5833#endif
5834 wr %o4, %g0, %gsr 5834 wr %o4, %g0, %gsr
5835 membar #Sync 5835 membar #Sync
5836 ldda [%o3] ASI_BLK_P, %f0 5836 ldda [%o3] ASI_BLK_P, %f0
5837 inc BLOCK_SIZE, %o3 5837 inc BLOCK_SIZE, %o3
5838 ldda [%o3] ASI_BLK_P, %f16 5838 ldda [%o3] ASI_BLK_P, %f16
5839 inc BLOCK_SIZE, %o3 5839 inc BLOCK_SIZE, %o3
5840 ldda [%o3] ASI_BLK_P, %f32 5840 ldda [%o3] ASI_BLK_P, %f32
5841 inc BLOCK_SIZE, %o3 5841 inc BLOCK_SIZE, %o3
5842 ldda [%o3] ASI_BLK_P, %f48 5842 ldda [%o3] ASI_BLK_P, %f48
5843 membar #Sync ! Make sure loads are complete 5843 membar #Sync ! Make sure loads are complete
5844 retl 5844 retl
5845 wr %g0, FPRS_FEF, %fprs ! Clear dirty bits 5845 wr %g0, FPRS_FEF, %fprs ! Clear dirty bits
5846 5846
5847#ifdef DIAGNOSTIC 5847#ifdef DIAGNOSTIC
5848 !! 5848 !!
5849 !! Damn thing is *NOT* aligned on a 64-byte boundary 5849 !! Damn thing is *NOT* aligned on a 64-byte boundary
5850 !!  5850 !!
58511: 58511:
5852 wr %g0, FPRS_FEF, %fprs ! Clear dirty bits 5852 wr %g0, FPRS_FEF, %fprs ! Clear dirty bits
5853 ! XXX -- we should panic instead of silently entering debugger 5853 ! XXX -- we should panic instead of silently entering debugger
5854 ta 1 5854 ta 1
5855 retl 5855 retl
5856 nop 5856 nop
5857#endif 5857#endif
5858 5858
5859/* 5859/*
5860 * ienab_bis(bis) int bis; 5860 * ienab_bis(bis) int bis;
5861 * ienab_bic(bic) int bic; 5861 * ienab_bic(bic) int bic;
5862 * 5862 *
5863 * Set and clear bits in the interrupt register. 5863 * Set and clear bits in the interrupt register.
5864 */ 5864 */
5865 5865
5866/* 5866/*
5867 * sun4u has separate asr's for clearing/setting the interrupt mask. 5867 * sun4u has separate asr's for clearing/setting the interrupt mask.
5868 */ 5868 */
5869ENTRY(ienab_bis) 5869ENTRY(ienab_bis)
5870 retl 5870 retl
5871 wr %o0, 0, SET_SOFTINT ! SET_SOFTINT 5871 wr %o0, 0, SET_SOFTINT ! SET_SOFTINT
5872 5872
5873ENTRY(ienab_bic) 5873ENTRY(ienab_bic)
5874 retl 5874 retl
5875 wr %o0, 0, CLEAR_SOFTINT ! CLEAR_SOFTINT 5875 wr %o0, 0, CLEAR_SOFTINT ! CLEAR_SOFTINT
5876 5876
5877/* 5877/*
5878 * send_softint(cpu, level, intrhand) 5878 * send_softint(cpu, level, intrhand)
5879 * 5879 *
5880 * Send a softint with an intrhand pointer so we can cause a vectored 5880 * Send a softint with an intrhand pointer so we can cause a vectored
5881 * interrupt instead of a polled interrupt. This does pretty much the same 5881 * interrupt instead of a polled interrupt. This does pretty much the same
5882 * as interrupt_vector. If cpu is -1 then send it to this CPU, if it's -2 5882 * as interrupt_vector. If cpu is -1 then send it to this CPU, if it's -2
5883 * send it to any CPU, otherwise send it to a particular CPU. 5883 * send it to any CPU, otherwise send it to a particular CPU.
5884 * 5884 *
5885 * XXXX Dispatching to different CPUs is not implemented yet. 5885 * XXXX Dispatching to different CPUs is not implemented yet.
5886 */ 5886 */
5887ENTRY(send_softint) 5887ENTRY(send_softint)
5888 rdpr %pstate, %g1 5888 rdpr %pstate, %g1
5889 andn %g1, PSTATE_IE, %g2 ! clear PSTATE.IE 5889 andn %g1, PSTATE_IE, %g2 ! clear PSTATE.IE
5890 wrpr %g2, 0, %pstate 5890 wrpr %g2, 0, %pstate
5891 5891
5892 sethi %hi(CPUINFO_VA+CI_INTRPENDING), %o3 5892 sethi %hi(CPUINFO_VA+CI_INTRPENDING), %o3
5893 LDPTR [%o2 + IH_PEND], %o5 5893 LDPTR [%o2 + IH_PEND], %o5
5894 or %o3, %lo(CPUINFO_VA+CI_INTRPENDING), %o3 5894 or %o3, %lo(CPUINFO_VA+CI_INTRPENDING), %o3
5895 brnz %o5, 1f 5895 brnz %o5, 1f
5896 sll %o1, PTRSHFT, %o5 ! Find start of table for this IPL 5896 sll %o1, PTRSHFT, %o5 ! Find start of table for this IPL
5897 add %o3, %o5, %o3 5897 add %o3, %o5, %o3
58982: 58982:
5899 LDPTR [%o3], %o5 ! Load list head 5899 LDPTR [%o3], %o5 ! Load list head
5900 STPTR %o5, [%o2+IH_PEND] ! Link our intrhand node in 5900 STPTR %o5, [%o2+IH_PEND] ! Link our intrhand node in
5901 mov %o2, %o4 5901 mov %o2, %o4
5902 CASPTR [%o3] ASI_N, %o5, %o4 5902 CASPTR [%o3] ASI_N, %o5, %o4
5903 cmp %o4, %o5 ! Did it work? 5903 cmp %o4, %o5 ! Did it work?
5904 bne,pn CCCR, 2b ! No, try again 5904 bne,pn CCCR, 2b ! No, try again
5905 .empty 5905 .empty
5906 5906
5907 mov 1, %o4 ! Change from level to bitmask 5907 mov 1, %o4 ! Change from level to bitmask
5908 sllx %o4, %o1, %o4 5908 sllx %o4, %o1, %o4
5909 wr %o4, 0, SET_SOFTINT ! SET_SOFTINT 5909 wr %o4, 0, SET_SOFTINT ! SET_SOFTINT
59101: 59101:
5911 retl 5911 retl
5912 wrpr %g1, 0, %pstate ! restore PSTATE.IE 5912 wrpr %g1, 0, %pstate ! restore PSTATE.IE
5913 5913
5914 5914
5915#define MICROPERSEC (1000000) 5915#define MICROPERSEC (1000000)
5916 5916
5917/* 5917/*
5918 * delay function 5918 * delay function
5919 * 5919 *
5920 * void delay(N) -- delay N microseconds 5920 * void delay(N) -- delay N microseconds
5921 * 5921 *
5922 * Register usage: %o0 = "N" number of usecs to go (counts down to zero) 5922 * Register usage: %o0 = "N" number of usecs to go (counts down to zero)
5923 * %o1 = "timerblurb" (stays constant) 5923 * %o1 = "timerblurb" (stays constant)
5924 * %o2 = counter for 1 usec (counts down from %o1 to zero) 5924 * %o2 = counter for 1 usec (counts down from %o1 to zero)
5925 * 5925 *
5926 * 5926 *
5927 * ci_cpu_clockrate should be tuned during CPU probe to the CPU 5927 * ci_cpu_clockrate should be tuned during CPU probe to the CPU
5928 * clockrate in Hz 5928 * clockrate in Hz
5929 * 5929 *
5930 */ 5930 */
5931ENTRY(delay) ! %o0 = n 5931ENTRY(delay) ! %o0 = n
5932#if 1 5932#if 1
5933 rdpr %tick, %o1 ! Take timer snapshot 5933 rdpr %tick, %o1 ! Take timer snapshot
5934 sethi %hi(CPUINFO_VA + CI_CLOCKRATE), %o2 5934 sethi %hi(CPUINFO_VA + CI_CLOCKRATE), %o2
5935 sethi %hi(MICROPERSEC), %o3 5935 sethi %hi(MICROPERSEC), %o3
5936 ldx [%o2 + %lo(CPUINFO_VA + CI_CLOCKRATE + 8)], %o4 ! Get scale factor 5936 ldx [%o2 + %lo(CPUINFO_VA + CI_CLOCKRATE + 8)], %o4 ! Get scale factor
5937 brnz,pt %o4, 0f 5937 brnz,pt %o4, 0f
5938 or %o3, %lo(MICROPERSEC), %o3 5938 or %o3, %lo(MICROPERSEC), %o3
5939 5939
5940 !! Calculate ticks/usec 5940 !! Calculate ticks/usec
5941 ldx [%o2 + %lo(CPUINFO_VA + CI_CLOCKRATE)], %o4 ! No, we need to calculate it 5941 ldx [%o2 + %lo(CPUINFO_VA + CI_CLOCKRATE)], %o4 ! No, we need to calculate it
5942 udivx %o4, %o3, %o4 5942 udivx %o4, %o3, %o4
5943 stx %o4, [%o2 + %lo(CPUINFO_VA + CI_CLOCKRATE + 8)] ! Save it so we don't need to divide again 5943 stx %o4, [%o2 + %lo(CPUINFO_VA + CI_CLOCKRATE + 8)] ! Save it so we don't need to divide again
59440: 59440:
5945 5945
5946 mulx %o0, %o4, %o0 ! Convert usec -> ticks 5946 mulx %o0, %o4, %o0 ! Convert usec -> ticks
5947 rdpr %tick, %o2 ! Top of next itr 5947 rdpr %tick, %o2 ! Top of next itr
59481: 59481:
5949 sub %o2, %o1, %o3 ! How many ticks have gone by? 5949 sub %o2, %o1, %o3 ! How many ticks have gone by?
5950 sub %o0, %o3, %o4 ! Decrement count by that much 5950 sub %o0, %o3, %o4 ! Decrement count by that much
5951 movrgz %o3, %o4, %o0 ! But only if we're decrementing 5951 movrgz %o3, %o4, %o0 ! But only if we're decrementing
5952 mov %o2, %o1 ! Remember last tick 5952 mov %o2, %o1 ! Remember last tick
5953 brgz,pt %o0, 1b ! Done? 5953 brgz,pt %o0, 1b ! Done?
5954 rdpr %tick, %o2 ! Get new tick 5954 rdpr %tick, %o2 ! Get new tick
5955 5955
5956 retl 5956 retl
5957 nop 5957 nop
5958#else 5958#else
5959/* This code only works if %tick does not wrap */ 5959/* This code only works if %tick does not wrap */
5960 rdpr %tick, %g1 ! Take timer snapshot 5960 rdpr %tick, %g1 ! Take timer snapshot
5961 sethi %hi(CPUINFO_VA + CI_CLOCKRATE), %g2 5961 sethi %hi(CPUINFO_VA + CI_CLOCKRATE), %g2
5962 sethi %hi(MICROPERSEC), %o2 5962 sethi %hi(MICROPERSEC), %o2
5963 ldx [%g2 + %lo(CPUINFO_VA + CI_CLOCKRATE)], %g2 ! Get scale factor 5963 ldx [%g2 + %lo(CPUINFO_VA + CI_CLOCKRATE)], %g2 ! Get scale factor
5964 or %o2, %lo(MICROPERSEC), %o2 5964 or %o2, %lo(MICROPERSEC), %o2
5965! sethi %hi(_C_LABEL(timerblurb), %o5 ! This is if we plan to tune the clock 5965! sethi %hi(_C_LABEL(timerblurb), %o5 ! This is if we plan to tune the clock
5966! ld [%o5 + %lo(_C_LABEL(timerblurb))], %o5 ! with respect to the counter/timer 5966! ld [%o5 + %lo(_C_LABEL(timerblurb))], %o5 ! with respect to the counter/timer
5967 mulx %o0, %g2, %g2 ! Scale it: (usec * Hz) / 1 x 10^6 = ticks 5967 mulx %o0, %g2, %g2 ! Scale it: (usec * Hz) / 1 x 10^6 = ticks
5968 udivx %g2, %o2, %g2 5968 udivx %g2, %o2, %g2
5969 add %g1, %g2, %g2 5969 add %g1, %g2, %g2
5970! add %o5, %g2, %g2 5, %g2, %g2 ! But this gets complicated 5970! add %o5, %g2, %g2 5, %g2, %g2 ! But this gets complicated
5971 rdpr %tick, %g1 ! Top of next itr 5971 rdpr %tick, %g1 ! Top of next itr
5972 mov %g1, %g1 ! Erratum 50 5972 mov %g1, %g1 ! Erratum 50
59731: 59731:
5974 cmp %g1, %g2 5974 cmp %g1, %g2
5975 bl,a,pn %xcc, 1b ! Done? 5975 bl,a,pn %xcc, 1b ! Done?
5976 rdpr %tick, %g1 5976 rdpr %tick, %g1
5977 5977
5978 retl 5978 retl
5979 nop 5979 nop
5980#endif 5980#endif
5981 /* 5981 /*
5982 * If something's wrong with the standard setup do this stupid loop 5982 * If something's wrong with the standard setup do this stupid loop
5983 * calibrated for a 143MHz processor. 5983 * calibrated for a 143MHz processor.
5984 */ 5984 */
5985Lstupid_delay: 5985Lstupid_delay:
5986 set 142857143/MICROPERSEC, %o1 5986 set 142857143/MICROPERSEC, %o1
5987Lstupid_loop: 5987Lstupid_loop:
5988 brnz,pt %o1, Lstupid_loop 5988 brnz,pt %o1, Lstupid_loop
5989 dec %o1 5989 dec %o1
5990 brnz,pt %o0, Lstupid_delay 5990 brnz,pt %o0, Lstupid_delay
5991 dec %o0 5991 dec %o0
5992 retl 5992 retl
5993 nop 5993 nop
5994 5994
5995/* 5995/*
5996 * next_tick(long increment) 5996 * next_tick(long increment)
5997 * 5997 *
5998 * Sets the %tick_cmpr register to fire off in `increment' machine 5998 * Sets the %tick_cmpr register to fire off in `increment' machine
5999 * cycles in the future. Also handles %tick wraparound. In 32-bit 5999 * cycles in the future. Also handles %tick wraparound. In 32-bit
6000 * mode we're limited to a 32-bit increment. 6000 * mode we're limited to a 32-bit increment.
6001 */ 6001 */
6002ENTRY(next_tick) 6002ENTRY(next_tick)
6003 rd TICK_CMPR, %o2 6003 rd TICK_CMPR, %o2
6004 rdpr %tick, %o1 6004 rdpr %tick, %o1
6005 6005
6006 mov 1, %o3 ! Mask off high bits of these registers 6006 mov 1, %o3 ! Mask off high bits of these registers
6007 sllx %o3, 63, %o3 6007 sllx %o3, 63, %o3
6008 andn %o1, %o3, %o1 6008 andn %o1, %o3, %o1
6009 andn %o2, %o3, %o2 6009 andn %o2, %o3, %o2
6010 cmp %o1, %o2 ! Did we wrap? (tick < tick_cmpr) 6010 cmp %o1, %o2 ! Did we wrap? (tick < tick_cmpr)
6011 bgt,pt %icc, 1f 6011 bgt,pt %icc, 1f
6012 add %o1, 1000, %o1 ! Need some slack so we don't lose intrs. 6012 add %o1, 1000, %o1 ! Need some slack so we don't lose intrs.
6013 6013
6014 /* 6014 /*
6015 * Handle the unlikely case of %tick wrapping. 6015 * Handle the unlikely case of %tick wrapping.
6016 * 6016 *
6017 * This should only happen every 10 years or more. 6017 * This should only happen every 10 years or more.
6018 * 6018 *
6019 * We need to increment the time base by the size of %tick in 6019 * We need to increment the time base by the size of %tick in
6020 * microseconds. This will require some divides and multiplies 6020 * microseconds. This will require some divides and multiplies
6021 * which can take time. So we re-read %tick. 6021 * which can take time. So we re-read %tick.
6022 * 6022 *
6023 */ 6023 */
6024 6024
6025 /* XXXXX NOT IMPLEMENTED */ 6025 /* XXXXX NOT IMPLEMENTED */
6026 6026
6027 6027
6028 6028
60291: 60291:
6030 add %o2, %o0, %o2 6030 add %o2, %o0, %o2
6031 andn %o2, %o3, %o4 6031 andn %o2, %o3, %o4
6032 brlz,pn %o4, Ltick_ovflw 6032 brlz,pn %o4, Ltick_ovflw
6033 cmp %o2, %o1 ! Has this tick passed? 6033 cmp %o2, %o1 ! Has this tick passed?
6034 blt,pn %xcc, 1b ! Yes 6034 blt,pn %xcc, 1b ! Yes
6035 nop 6035 nop
6036 6036
6037#ifdef BB_ERRATA_1 6037#ifdef BB_ERRATA_1
6038 ba,a 2f 6038 ba,a 2f
6039 nop 6039 nop
6040#else 6040#else
6041 retl 6041 retl
6042 wr %o2, TICK_CMPR 6042 wr %o2, TICK_CMPR
6043#endif 6043#endif
6044 6044
6045Ltick_ovflw: 6045Ltick_ovflw:
6046/* 6046/*
6047 * When we get here tick_cmpr has wrapped, but we don't know if %tick 6047 * When we get here tick_cmpr has wrapped, but we don't know if %tick
6048 * has wrapped. If bit 62 is set then we have not wrapped and we can 6048 * has wrapped. If bit 62 is set then we have not wrapped and we can
6049 * use the current value of %o4 as %tick. Otherwise we need to return 6049 * use the current value of %o4 as %tick. Otherwise we need to return
6050 * to our loop with %o4 as %tick_cmpr (%o2). 6050 * to our loop with %o4 as %tick_cmpr (%o2).
6051 */ 6051 */
6052 srlx %o3, 1, %o5 6052 srlx %o3, 1, %o5
6053 btst %o5, %o1 6053 btst %o5, %o1
6054 bz,pn %xcc, 1b 6054 bz,pn %xcc, 1b
6055 mov %o4, %o2 6055 mov %o4, %o2
6056#ifdef BB_ERRATA_1 6056#ifdef BB_ERRATA_1
6057 ba,a 2f 6057 ba,a 2f
6058 nop 6058 nop
6059 .align 64 6059 .align 64
60602: wr %o2, TICK_CMPR 60602: wr %o2, TICK_CMPR
6061 rd TICK_CMPR, %g0 6061 rd TICK_CMPR, %g0
6062 retl 6062 retl
6063 nop 6063 nop
6064#else 6064#else
6065 retl 6065 retl
6066 wr %o2, TICK_CMPR 6066 wr %o2, TICK_CMPR
6067#endif 6067#endif
6068 6068
6069/* 6069/*
6070 * next_stick(long increment) 6070 * next_stick(long increment)
6071 * 6071 *
6072 * Sets the %stick_cmpr register to fire off in `increment' machine 6072 * Sets the %stick_cmpr register to fire off in `increment' machine
6073 * cycles in the future. Also handles %stick wraparound. In 32-bit 6073 * cycles in the future. Also handles %stick wraparound. In 32-bit
6074 * mode we're limited to a 32-bit increment. 6074 * mode we're limited to a 32-bit increment.
6075 */ 6075 */
6076ENTRY(next_stick) 6076ENTRY(next_stick)
6077 rd STICK_CMPR, %o2 6077 rd STICK_CMPR, %o2
6078 rd STICK, %o1 6078 rd STICK, %o1
6079 6079
6080 mov 1, %o3 ! Mask off high bits of these registers 6080 mov 1, %o3 ! Mask off high bits of these registers
6081 sllx %o3, 63, %o3 6081 sllx %o3, 63, %o3
6082 andn %o1, %o3, %o1 6082 andn %o1, %o3, %o1
6083 andn %o2, %o3, %o2 6083 andn %o2, %o3, %o2
6084 cmp %o1, %o2 ! Did we wrap? (tick < tick_cmpr) 6084 cmp %o1, %o2 ! Did we wrap? (tick < tick_cmpr)
6085 bgt,pt %icc, 1f 6085 bgt,pt %icc, 1f
6086 add %o1, 1000, %o1 ! Need some slack so we don't lose intrs. 6086 add %o1, 1000, %o1 ! Need some slack so we don't lose intrs.
6087 6087
6088 /* 6088 /*
6089 * Handle the unlikely case of %stick wrapping. 6089 * Handle the unlikely case of %stick wrapping.
6090 * 6090 *
6091 * This should only happen every 10 years or more. 6091 * This should only happen every 10 years or more.
6092 * 6092 *
6093 * We need to increment the time base by the size of %stick in 6093 * We need to increment the time base by the size of %stick in
6094 * microseconds. This will require some divides and multiplies 6094 * microseconds. This will require some divides and multiplies
6095 * which can take time. So we re-read %stick. 6095 * which can take time. So we re-read %stick.
6096 * 6096 *
6097 */ 6097 */
6098 6098
6099 /* XXXXX NOT IMPLEMENTED */ 6099 /* XXXXX NOT IMPLEMENTED */
6100 6100
6101 6101
6102 6102
61031: 61031:
6104 add %o2, %o0, %o2 6104 add %o2, %o0, %o2
6105 andn %o2, %o3, %o4 6105 andn %o2, %o3, %o4
6106 brlz,pn %o4, Lstick_ovflw 6106 brlz,pn %o4, Lstick_ovflw
6107 cmp %o2, %o1 ! Has this stick passed? 6107 cmp %o2, %o1 ! Has this stick passed?
6108 blt,pn %xcc, 1b ! Yes 6108 blt,pn %xcc, 1b ! Yes
6109 nop 6109 nop
6110 retl 6110 retl
6111 wr %o2, STICK_CMPR 6111 wr %o2, STICK_CMPR
6112 6112
6113Lstick_ovflw: 6113Lstick_ovflw:
6114/* 6114/*
6115 * When we get here tick_cmpr has wrapped, but we don't know if %stick 6115 * When we get here tick_cmpr has wrapped, but we don't know if %stick
6116 * has wrapped. If bit 62 is set then we have not wrapped and we can 6116 * has wrapped. If bit 62 is set then we have not wrapped and we can
6117 * use the current value of %o4 as %stick. Otherwise we need to return 6117 * use the current value of %o4 as %stick. Otherwise we need to return
6118 * to our loop with %o4 as %stick_cmpr (%o2). 6118 * to our loop with %o4 as %stick_cmpr (%o2).
6119 */ 6119 */
6120 srlx %o3, 1, %o5 6120 srlx %o3, 1, %o5
6121 btst %o5, %o1 6121 btst %o5, %o1
6122 bz,pn %xcc, 1b 6122 bz,pn %xcc, 1b
6123 mov %o4, %o2 6123 mov %o4, %o2
6124 retl 6124 retl
6125 wr %o2, STICK_CMPR 6125 wr %o2, STICK_CMPR
6126 6126
6127ENTRY(setjmp) 6127ENTRY(setjmp)
6128 save %sp, -CC64FSZ, %sp ! Need a frame to return to. 6128 save %sp, -CC64FSZ, %sp ! Need a frame to return to.
6129 flushw 6129 flushw
6130 stx %fp, [%i0+0] ! 64-bit stack pointer 6130 stx %fp, [%i0+0] ! 64-bit stack pointer
6131 stx %i7, [%i0+8] ! 64-bit return pc 6131 stx %i7, [%i0+8] ! 64-bit return pc
6132 ret 6132 ret
6133 restore %g0, 0, %o0 6133 restore %g0, 0, %o0
6134 6134
6135 .data 6135 .data
6136Lpanic_ljmp: 6136Lpanic_ljmp:
6137 .asciz "longjmp botch" 6137 .asciz "longjmp botch"
6138 _ALIGN 6138 _ALIGN
6139 .text 6139 .text
6140 6140
6141ENTRY(longjmp) 6141ENTRY(longjmp)
6142 save %sp, -CC64FSZ, %sp ! prepare to restore to (old) frame 6142 save %sp, -CC64FSZ, %sp ! prepare to restore to (old) frame
6143 flushw 6143 flushw
6144 mov 1, %i2 6144 mov 1, %i2
6145 ldx [%i0+0], %fp ! get return stack 6145 ldx [%i0+0], %fp ! get return stack
6146 movrz %i1, %i1, %i2 ! compute v ? v : 1 6146 movrz %i1, %i1, %i2 ! compute v ? v : 1
6147 ldx [%i0+8], %i7 ! get rpc 6147 ldx [%i0+8], %i7 ! get rpc
6148 ret 6148 ret
6149 restore %i2, 0, %o0 6149 restore %i2, 0, %o0
6150 6150
6151#if defined(DDB) || defined(KGDB) 6151#if defined(DDB) || defined(KGDB)
6152 /* 6152 /*
6153 * Debug stuff. Dump the trap registers into buffer & set tl=0. 6153 * Debug stuff. Dump the trap registers into buffer & set tl=0.
6154 * 6154 *
6155 * %o0 = *ts 6155 * %o0 = *ts
6156 */ 6156 */
6157ENTRY(savetstate) 6157ENTRY(savetstate)
6158 mov %o0, %o1 6158 mov %o0, %o1
6159 rdpr %tl, %o0 6159 rdpr %tl, %o0
6160 brz %o0, 2f 6160 brz %o0, 2f
6161 mov %o0, %o2 6161 mov %o0, %o2
61621: 61621:
6163 rdpr %tstate, %o3 6163 rdpr %tstate, %o3
6164 stx %o3, [%o1] 6164 stx %o3, [%o1]
6165 deccc %o2 6165 deccc %o2
6166 inc 8, %o1 6166 inc 8, %o1
6167 rdpr %tpc, %o4 6167 rdpr %tpc, %o4
6168 stx %o4, [%o1] 6168 stx %o4, [%o1]
6169 inc 8, %o1 6169 inc 8, %o1
6170 rdpr %tnpc, %o5 6170 rdpr %tnpc, %o5
6171 stx %o5, [%o1] 6171 stx %o5, [%o1]
6172 inc 8, %o1 6172 inc 8, %o1
6173 rdpr %tt, %o4 6173 rdpr %tt, %o4
6174 stx %o4, [%o1] 6174 stx %o4, [%o1]
6175 inc 8, %o1 6175 inc 8, %o1
6176 bnz 1b 6176 bnz 1b
6177 wrpr %o2, 0, %tl 6177 wrpr %o2, 0, %tl
61782: 61782:
6179 retl 6179 retl
6180 nop 6180 nop
6181 6181
6182 /* 6182 /*
6183 * Debug stuff. Resore trap registers from buffer. 6183 * Debug stuff. Resore trap registers from buffer.
6184 * 6184 *
6185 * %o0 = %tl 6185 * %o0 = %tl
6186 * %o1 = *ts 6186 * %o1 = *ts
6187 * 6187 *
6188 * Maybe this should be re-written to increment tl instead of decrementing. 6188 * Maybe this should be re-written to increment tl instead of decrementing.
6189 */ 6189 */
6190ENTRY(restoretstate) 6190ENTRY(restoretstate)
6191 flushw ! Make sure we don't have stack probs & lose hibits of %o 6191 flushw ! Make sure we don't have stack probs & lose hibits of %o
6192 brz,pn %o0, 2f 6192 brz,pn %o0, 2f
6193 mov %o0, %o2 6193 mov %o0, %o2
6194 wrpr %o0, 0, %tl 6194 wrpr %o0, 0, %tl
61951: 61951:
6196 ldx [%o1], %o3 6196 ldx [%o1], %o3
6197 deccc %o2 6197 deccc %o2
6198 inc 8, %o1 6198 inc 8, %o1
6199 wrpr %o3, 0, %tstate 6199 wrpr %o3, 0, %tstate
6200 ldx [%o1], %o4 6200 ldx [%o1], %o4
6201 inc 8, %o1 6201 inc 8, %o1
6202 wrpr %o4, 0, %tpc 6202 wrpr %o4, 0, %tpc
6203 ldx [%o1], %o5 6203 ldx [%o1], %o5
6204 inc 8, %o1 6204 inc 8, %o1
6205 wrpr %o5, 0, %tnpc 6205 wrpr %o5, 0, %tnpc
6206 ldx [%o1], %o4 6206 ldx [%o1], %o4
6207 inc 8, %o1 6207 inc 8, %o1
6208 wrpr %o4, 0, %tt 6208 wrpr %o4, 0, %tt
6209 bnz 1b 6209 bnz 1b
6210 wrpr %o2, 0, %tl 6210 wrpr %o2, 0, %tl
62112: 62112:
6212 retl 6212 retl
6213 wrpr %o0, 0, %tl 6213 wrpr %o0, 0, %tl
6214 6214
6215 /* 6215 /*
6216 * Switch to context in abs(%o0) 6216 * Switch to context in abs(%o0)
6217 */ 6217 */
6218ENTRY(switchtoctx_us) 6218ENTRY(switchtoctx_us)
6219 set DEMAP_CTX_SECONDARY, %o3 6219 set DEMAP_CTX_SECONDARY, %o3
6220 stxa %o3, [%o3] ASI_DMMU_DEMAP 6220 stxa %o3, [%o3] ASI_DMMU_DEMAP
6221 mov CTX_SECONDARY, %o4 6221 mov CTX_SECONDARY, %o4
6222 stxa %o3, [%o3] ASI_IMMU_DEMAP 6222 stxa %o3, [%o3] ASI_IMMU_DEMAP
6223 membar #Sync 6223 membar #Sync
6224 stxa %o0, [%o4] ASI_DMMU ! Maybe we should invalid 6224 stxa %o0, [%o4] ASI_DMMU ! Maybe we should invalid
6225 sethi %hi(KERNBASE), %o2 6225 sethi %hi(KERNBASE), %o2
6226 membar #Sync 6226 membar #Sync
6227 flush %o2 6227 flush %o2
6228 retl 6228 retl
6229 nop 6229 nop
6230 6230
6231ENTRY(switchtoctx_usiii) 6231ENTRY(switchtoctx_usiii)
6232 mov CTX_SECONDARY, %o4 6232 mov CTX_SECONDARY, %o4
6233 ldxa [%o4] ASI_DMMU, %o2 ! Load secondary context 6233 ldxa [%o4] ASI_DMMU, %o2 ! Load secondary context
6234 mov CTX_PRIMARY, %o5 6234 mov CTX_PRIMARY, %o5
6235 ldxa [%o5] ASI_DMMU, %o1 ! Save primary context 6235 ldxa [%o5] ASI_DMMU, %o1 ! Save primary context
6236 membar #LoadStore 6236 membar #LoadStore
6237 stxa %o2, [%o5] ASI_DMMU ! Insert secondary for demap 6237 stxa %o2, [%o5] ASI_DMMU ! Insert secondary for demap
6238 membar #Sync 6238 membar #Sync
6239 set DEMAP_CTX_PRIMARY, %o3 6239 set DEMAP_CTX_PRIMARY, %o3
6240 stxa %o3, [%o3] ASI_DMMU_DEMAP 6240 stxa %o3, [%o3] ASI_DMMU_DEMAP
6241 membar #Sync 6241 membar #Sync
6242 stxa %o0, [%o4] ASI_DMMU ! Maybe we should invalid 6242 stxa %o0, [%o4] ASI_DMMU ! Maybe we should invalid
6243 membar #Sync 6243 membar #Sync
6244 stxa %o1, [%o5] ASI_DMMU ! Restore primary context 6244 stxa %o1, [%o5] ASI_DMMU ! Restore primary context
6245 sethi %hi(KERNBASE), %o2 6245 sethi %hi(KERNBASE), %o2
6246 membar #Sync 6246 membar #Sync
6247 flush %o2 6247 flush %o2
6248 retl 6248 retl
6249 nop 6249 nop
6250 6250
6251#ifndef _LP64 6251#ifndef _LP64
6252 /* 6252 /*
6253 * Convert to 32-bit stack then call OF_sym2val() 6253 * Convert to 32-bit stack then call OF_sym2val()
6254 */ 6254 */
6255ENTRY(OF_sym2val32) 6255ENTRY(OF_sym2val32)
6256 save %sp, -CC64FSZ, %sp 6256 save %sp, -CC64FSZ, %sp
6257 btst 7, %i0 6257 btst 7, %i0
6258 bnz,pn %icc, 1f 6258 bnz,pn %icc, 1f
6259 add %sp, BIAS, %o1 6259 add %sp, BIAS, %o1
6260 btst 1, %sp 6260 btst 1, %sp
6261 movnz %icc, %o1, %sp 6261 movnz %icc, %o1, %sp
6262 call _C_LABEL(OF_sym2val) 6262 call _C_LABEL(OF_sym2val)
6263 mov %i0, %o0 6263 mov %i0, %o0
62641: 62641:
6265 ret 6265 ret
6266 restore %o0, 0, %o0 6266 restore %o0, 0, %o0
6267 6267
6268 /* 6268 /*
6269 * Convert to 32-bit stack then call OF_val2sym() 6269 * Convert to 32-bit stack then call OF_val2sym()
6270 */ 6270 */
6271ENTRY(OF_val2sym32) 6271ENTRY(OF_val2sym32)
6272 save %sp, -CC64FSZ, %sp 6272 save %sp, -CC64FSZ, %sp
6273 btst 7, %i0 6273 btst 7, %i0
6274 bnz,pn %icc, 1f 6274 bnz,pn %icc, 1f
6275 add %sp, BIAS, %o1 6275 add %sp, BIAS, %o1
6276 btst 1, %sp 6276 btst 1, %sp
6277 movnz %icc, %o1, %sp 6277 movnz %icc, %o1, %sp
6278 call _C_LABEL(OF_val2sym) 6278 call _C_LABEL(OF_val2sym)
6279 mov %i0, %o0 6279 mov %i0, %o0
62801: 62801:
6281 ret 6281 ret
6282 restore %o0, 0, %o0 6282 restore %o0, 0, %o0
6283#endif /* _LP64 */ 6283#endif /* _LP64 */
6284#endif /* DDB */ 6284#endif /* DDB */
6285 6285
 6286
 6287#if defined(MULTIPROCESSOR)
 6288/*
 6289 * IPI target function to setup a C compatible environment and call a MI function.
 6290 *
 6291 * On entry:
 6292 * We are on one of the alternate set of globals
 6293 * %g2 = function to call
 6294 * %g3 = single argument to called function
 6295 */
 6296ENTRY(sparc64_ipi_ccall)
 6297#ifdef TRAPS_USE_IG
 6298 wrpr %g0, PSTATE_KERN|PSTATE_IG, %pstate ! DEBUG
 6299#endif
 6300 TRAP_SETUP(-CC64FSZ-TF_SIZE)
 6301
 6302#ifdef DEBUG
 6303 rdpr %tt, %o1 ! debug
 6304 sth %o1, [%sp + CC64FSZ + STKB + TF_TT]! debug
 6305#endif
 6306 mov %g3, %o0 ! save argument of function to call
 6307 mov %g2, %o5 ! save function pointer
 6308
 6309 wrpr %g0, PSTATE_KERN, %pstate ! Get back to normal globals
 6310 stx %g1, [%sp + CC64FSZ + STKB + TF_G + ( 1*8)]
 6311 mov %g1, %o1 ! code
 6312 rdpr %tpc, %o2 ! (pc)
 6313 stx %g2, [%sp + CC64FSZ + STKB + TF_G + ( 2*8)]
 6314 rdpr %tstate, %g1
 6315 stx %g3, [%sp + CC64FSZ + STKB + TF_G + ( 3*8)]
 6316 rdpr %tnpc, %o3
 6317 stx %g4, [%sp + CC64FSZ + STKB + TF_G + ( 4*8)]
 6318 rd %y, %o4
 6319 stx %g5, [%sp + CC64FSZ + STKB + TF_G + ( 5*8)]
 6320 stx %g6, [%sp + CC64FSZ + STKB + TF_G + ( 6*8)]
 6321 wrpr %g0, 0, %tl ! return to tl=0
 6322 stx %g7, [%sp + CC64FSZ + STKB + TF_G + ( 7*8)]
 6323
 6324 stx %g1, [%sp + CC64FSZ + STKB + TF_TSTATE]
 6325 stx %o2, [%sp + CC64FSZ + STKB + TF_PC]
 6326 stx %o3, [%sp + CC64FSZ + STKB + TF_NPC]
 6327 st %o4, [%sp + CC64FSZ + STKB + TF_Y]
 6328
 6329 rdpr %pil, %g5
 6330 stb %g5, [%sp + CC64FSZ + STKB + TF_PIL]
 6331 stb %g5, [%sp + CC64FSZ + STKB + TF_OLDPIL]
 6332
 6333 !! In the EMBEDANY memory model %g4 points to the start of the data segment.
 6334 !! In our case we need to clear it before calling any C-code
 6335 clr %g4
 6336 wr %g0, ASI_NUCLEUS, %asi ! default kernel ASI
 6337
 6338 call %o5 ! call function
 6339 nop
 6340
 6341 ba,a return_from_trap ! and return from IPI
 6342 nop
 6343
 6344#endif
 6345
 6346
6286 .data 6347 .data
6287 _ALIGN 6348 _ALIGN
6288#if NKSYMS || defined(DDB) || defined(LKM) 6349#if NKSYMS || defined(DDB) || defined(LKM)
6289 .globl _C_LABEL(esym) 6350 .globl _C_LABEL(esym)
6290_C_LABEL(esym): 6351_C_LABEL(esym):
6291 POINTER 0 6352 POINTER 0
6292 .globl _C_LABEL(ssym) 6353 .globl _C_LABEL(ssym)
6293_C_LABEL(ssym): 6354_C_LABEL(ssym):
6294 POINTER 0 6355 POINTER 0
6295#endif 6356#endif
6296 .comm _C_LABEL(promvec), PTRSZ 6357 .comm _C_LABEL(promvec), PTRSZ
6297 6358
6298#ifdef DEBUG 6359#ifdef DEBUG
6299 .comm _C_LABEL(trapdebug), 4 6360 .comm _C_LABEL(trapdebug), 4
6300 .comm _C_LABEL(pmapdebug), 4 6361 .comm _C_LABEL(pmapdebug), 4
6301#endif 6362#endif

cvs diff -r1.3 -r1.4 src/sys/arch/sparc64/sparc64/mp_subr.S (switch to unified diff)

--- src/sys/arch/sparc64/sparc64/mp_subr.S 2011/07/12 07:51:34 1.3
+++ src/sys/arch/sparc64/sparc64/mp_subr.S 2013/01/23 21:03:25 1.4
@@ -1,431 +1,413 @@ @@ -1,431 +1,413 @@
1/* $NetBSD: mp_subr.S,v 1.3 2011/07/12 07:51:34 mrg Exp $ */ 1/* $NetBSD: mp_subr.S,v 1.4 2013/01/23 21:03:25 martin Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2006-2010 Matthew R. Green 4 * Copyright (c) 2006-2010 Matthew R. Green
5 * Copyright (c) 1996-2002 Eduardo Horvath 5 * Copyright (c) 1996-2002 Eduardo Horvath
6 * Copyright (c) 1996 Paul Kranenburg 6 * Copyright (c) 1996 Paul Kranenburg
7 * Copyright (c) 1996 7 * Copyright (c) 1996
8 * The President and Fellows of Harvard College. 8 * The President and Fellows of Harvard College.
9 * All rights reserved. 9 * All rights reserved.
10 * Copyright (c) 1992, 1993 10 * Copyright (c) 1992, 1993
11 * The Regents of the University of California. 11 * The Regents of the University of California.
12 * All rights reserved. 12 * All rights reserved.
13 * 13 *
14 * This software was developed by the Computer Systems Engineering group 14 * This software was developed by the Computer Systems Engineering group
15 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 15 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
16 * contributed to Berkeley. 16 * contributed to Berkeley.
17 * 17 *
18 * All advertising materials mentioning features or use of this software 18 * All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement: 19 * must display the following acknowledgement:
20 * This product includes software developed by the University of 20 * This product includes software developed by the University of
21 * California, Lawrence Berkeley Laboratory. 21 * California, Lawrence Berkeley Laboratory.
22 * This product includes software developed by Harvard University. 22 * This product includes software developed by Harvard University.
23 * 23 *
24 * Redistribution and use in source and binary forms, with or without 24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions 25 * modification, are permitted provided that the following conditions
26 * are met: 26 * are met:
27 * 1. Redistributions of source code must retain the above copyright 27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer. 28 * notice, this list of conditions and the following disclaimer.
29 * 2. Redistributions in binary form must reproduce the above copyright 29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the 30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the 31 * documentation and/or other materials provided with the
32 * distribution. 32 * distribution.
33 * 3. All advertising materials mentioning features or use of this 33 * 3. All advertising materials mentioning features or use of this
34 * software must display the following acknowledgement: 34 * software must display the following acknowledgement:
35 * This product includes software developed by the University of 35 * This product includes software developed by the University of
36 * California, Berkeley and its contributors. 36 * California, Berkeley and its contributors.
37 * This product includes software developed by Harvard University. 37 * This product includes software developed by Harvard University.
38 * This product includes software developed by Paul Kranenburg. 38 * This product includes software developed by Paul Kranenburg.
39 * 4. Neither the name of the University nor the names of its 39 * 4. Neither the name of the University nor the names of its
40 * contributors may be used to endorse or promote products derived 40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission. 41 * from this software without specific prior written permission.
42 * 42 *
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' 43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS''
44 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 44 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
45 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 45 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
46 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR 46 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
47 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 47 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
51 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 51 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
52 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF 52 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
53 * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 53 * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54 * DAMAGE. 54 * DAMAGE.
55 * 55 *
56 * @(#)locore.s 8.4 (Berkeley) 12/10/93 56 * @(#)locore.s 8.4 (Berkeley) 12/10/93
57 */ 57 */
58 58
59#include "opt_ddb.h" 59#include "opt_ddb.h"
60#include "opt_kgdb.h" 60#include "opt_kgdb.h"
61#include "opt_multiprocessor.h" 61#include "opt_multiprocessor.h"
62#include "opt_compat_netbsd.h" 62#include "opt_compat_netbsd.h"
63#include "opt_compat_netbsd32.h" 63#include "opt_compat_netbsd32.h"
64#include "opt_lockdebug.h" 64#include "opt_lockdebug.h"
65 65
66#include "assym.h" 66#include "assym.h"
67#include <machine/param.h> 67#include <machine/param.h>
68#include <sparc64/sparc64/intreg.h> 68#include <sparc64/sparc64/intreg.h>
69#include <sparc64/sparc64/timerreg.h> 69#include <sparc64/sparc64/timerreg.h>
70#include <machine/ctlreg.h> 70#include <machine/ctlreg.h>
71#include <machine/psl.h> 71#include <machine/psl.h>
72#include <machine/signal.h> 72#include <machine/signal.h>
73#include <machine/trap.h> 73#include <machine/trap.h>
74#include <machine/frame.h> 74#include <machine/frame.h>
75#include <machine/pte.h> 75#include <machine/pte.h>
76#include <machine/pmap.h> 76#include <machine/pmap.h>
77#include <machine/intr.h> 77#include <machine/intr.h>
78#include <machine/asm.h> 78#include <machine/asm.h>
79#include <machine/locore.h> 79#include <machine/locore.h>
80#include <sys/syscall.h> 80#include <sys/syscall.h>
81 81
82#include "ksyms.h" 82#include "ksyms.h"
83 83
84 .register %g2,#scratch 84 .register %g2,#scratch
85 .register %g3,#scratch 85 .register %g3,#scratch
86 86
87#define BLOCK_SIZE SPARC64_BLOCK_SIZE 87#define BLOCK_SIZE SPARC64_BLOCK_SIZE
88#define BLOCK_ALIGN SPARC64_BLOCK_ALIGN 88#define BLOCK_ALIGN SPARC64_BLOCK_ALIGN
89 89
90#if defined(MULTIPROCESSOR) 90#if defined(MULTIPROCESSOR)
91/* 91/*
92 * IPI handler to do nothing, but causes rescheduling.. 92 * IPI handler to do nothing, but causes rescheduling..
93 * void sparc64_ipi_nop(void *); 93 * void sparc64_ipi_nop(void *);
94 */ 94 */
95ENTRY(sparc64_ipi_nop) 95ENTRY(sparc64_ipi_nop)
96 ba,a ret_from_intr_vector 96 ba,a ret_from_intr_vector
97 nop 97 nop
98 98
99/* 99/*
100 * IPI handler to halt the CPU. Just calls the C vector. 100 * IPI handler to halt the CPU. Just calls the C vector.
101 * void sparc64_ipi_halt(void *); 101 * void sparc64_ipi_halt(void *);
102 */ 102 */
103ENTRY(sparc64_ipi_halt) 103ENTRY(sparc64_ipi_halt)
104 call _C_LABEL(sparc64_ipi_halt_thiscpu) 104 call _C_LABEL(sparc64_ipi_halt_thiscpu)
105 clr %g4 105 clr %g4
106 sir 106 sir
107 107
108/* 108/*
109 * IPI handler to pause the CPU. We just trap to the debugger if it 109 * IPI handler to pause the CPU. We just trap to the debugger if it
110 * is configured, otherwise just return. 110 * is configured, otherwise just return.
111 */ 111 */
112ENTRY(sparc64_ipi_pause) 112ENTRY(sparc64_ipi_pause)
113#if defined(DDB) 113#if defined(DDB)
114 .global sparc64_ipi_pause_trap_point 114 .global sparc64_ipi_pause_trap_point
115sparc64_ipi_pause_trap_point: 115sparc64_ipi_pause_trap_point:
116 ta 1 116 ta 1
117 nop 117 nop
118#endif 118#endif
119 ba,a ret_from_intr_vector 119 ba,a ret_from_intr_vector
120 nop 120 nop
121 121
122/* 122/*
123 * Increment IPI event counter, defined in machine/{cpu,intr}.h. 123 * Increment IPI event counter, defined in machine/{cpu,intr}.h.
124 */ 124 */
125#define IPIEVC_INC(n,r1,r2) \ 125#define IPIEVC_INC(n,r1,r2) \
126 sethi %hi(CPUINFO_VA+CI_IPIEVC+EVC_SIZE*n), r2; \ 126 sethi %hi(CPUINFO_VA+CI_IPIEVC+EVC_SIZE*n), r2; \
127 ldx [r2 + %lo(CPUINFO_VA+CI_IPIEVC+EVC_SIZE*n)], r1; \ 127 ldx [r2 + %lo(CPUINFO_VA+CI_IPIEVC+EVC_SIZE*n)], r1; \
128 inc r1; \ 128 inc r1; \
129 stx r1, [r2 + %lo(CPUINFO_VA+CI_IPIEVC+EVC_SIZE*n)] 129 stx r1, [r2 + %lo(CPUINFO_VA+CI_IPIEVC+EVC_SIZE*n)]
130 130
131/* 131/*
132 * void sparc64_ipi_flush_pte_us(void *); 132 * void sparc64_ipi_flush_pte_us(void *);
133 * void sparc64_ipi_flush_pte_usiii(void *); 133 * void sparc64_ipi_flush_pte_usiii(void *);
134 * 134 *
135 * IPI handler to flush single pte. We enter here with %tl already 1 135 * IPI handler to flush single pte. We enter here with %tl already 1
136 * and PSTATE_IE already disabled, so there's no need to do it again. 136 * and PSTATE_IE already disabled, so there's no need to do it again.
137 * 137 *
138 * On entry: 138 * On entry:
139 * %g2 = vaddr_t va 139 * %g2 = vaddr_t va
140 * %g3 = int ctx 140 * %g3 = int ctx
141 */ 141 */
142ENTRY(sparc64_ipi_flush_pte_us) 142ENTRY(sparc64_ipi_flush_pte_us)
143 srlx %g2, PG_SHIFT4U, %g2 ! drop unused va bits 143 srlx %g2, PG_SHIFT4U, %g2 ! drop unused va bits
144 mov CTX_SECONDARY, %g5 144 mov CTX_SECONDARY, %g5
145 sllx %g2, PG_SHIFT4U, %g2 145 sllx %g2, PG_SHIFT4U, %g2
146 ldxa [%g5] ASI_DMMU, %g6 ! Save secondary context 146 ldxa [%g5] ASI_DMMU, %g6 ! Save secondary context
147 sethi %hi(KERNBASE), %g7 147 sethi %hi(KERNBASE), %g7
148 membar #LoadStore 148 membar #LoadStore
149 stxa %g3, [%g5] ASI_DMMU ! Insert context to demap 149 stxa %g3, [%g5] ASI_DMMU ! Insert context to demap
150 membar #Sync 150 membar #Sync
151 or %g2, DEMAP_PAGE_SECONDARY, %g2 ! Demap page from secondary context only 151 or %g2, DEMAP_PAGE_SECONDARY, %g2 ! Demap page from secondary context only
152 stxa %g2, [%g2] ASI_DMMU_DEMAP ! Do the demap 152 stxa %g2, [%g2] ASI_DMMU_DEMAP ! Do the demap
153 stxa %g2, [%g2] ASI_IMMU_DEMAP ! to both TLBs 153 stxa %g2, [%g2] ASI_IMMU_DEMAP ! to both TLBs
154#ifdef TLB_FLUSH_LOWVA 154#ifdef TLB_FLUSH_LOWVA
155 srl %g2, 0, %g2 ! and make sure it's both 32- and 64-bit entries 155 srl %g2, 0, %g2 ! and make sure it's both 32- and 64-bit entries
156 stxa %g2, [%g2] ASI_DMMU_DEMAP ! Do the demap 156 stxa %g2, [%g2] ASI_DMMU_DEMAP ! Do the demap
157 stxa %g2, [%g2] ASI_IMMU_DEMAP ! Do the demap 157 stxa %g2, [%g2] ASI_IMMU_DEMAP ! Do the demap
158#endif 158#endif
159 flush %g7 159 flush %g7
160 stxa %g6, [%g5] ASI_DMMU ! Restore secondary context 160 stxa %g6, [%g5] ASI_DMMU ! Restore secondary context
161 membar #Sync 161 membar #Sync
162 IPIEVC_INC(IPI_EVCNT_TLB_PTE,%g2,%g3) 162 IPIEVC_INC(IPI_EVCNT_TLB_PTE,%g2,%g3)
163  163
164 ba,a ret_from_intr_vector 164 ba,a ret_from_intr_vector
165 nop 165 nop
166 166
167ENTRY(sparc64_ipi_flush_pte_usiii) 167ENTRY(sparc64_ipi_flush_pte_usiii)
168 andn %g2, 0xfff, %g2 ! drop unused va bits 168 andn %g2, 0xfff, %g2 ! drop unused va bits
169 mov CTX_PRIMARY, %g5 169 mov CTX_PRIMARY, %g5
170 ldxa [%g5] ASI_DMMU, %g6 ! Save primary context 170 ldxa [%g5] ASI_DMMU, %g6 ! Save primary context
171 sethi %hi(KERNBASE), %g7 171 sethi %hi(KERNBASE), %g7
172 membar #LoadStore 172 membar #LoadStore
173 stxa %g3, [%g5] ASI_DMMU ! Insert context to demap 173 stxa %g3, [%g5] ASI_DMMU ! Insert context to demap
174 membar #Sync 174 membar #Sync
175 or %g2, DEMAP_PAGE_PRIMARY, %g2 175 or %g2, DEMAP_PAGE_PRIMARY, %g2
176 stxa %g2, [%g2] ASI_DMMU_DEMAP ! Do the demap 176 stxa %g2, [%g2] ASI_DMMU_DEMAP ! Do the demap
177 stxa %g2, [%g2] ASI_IMMU_DEMAP ! to both TLBs 177 stxa %g2, [%g2] ASI_IMMU_DEMAP ! to both TLBs
178#ifdef TLB_FLUSH_LOWVA 178#ifdef TLB_FLUSH_LOWVA
179 srl %g2, 0, %g2 ! and make sure it's both 32- and 64-bit entries 179 srl %g2, 0, %g2 ! and make sure it's both 32- and 64-bit entries
180 stxa %g2, [%g2] ASI_DMMU_DEMAP ! Do the demap 180 stxa %g2, [%g2] ASI_DMMU_DEMAP ! Do the demap
181 stxa %g2, [%g2] ASI_IMMU_DEMAP ! Do the demap 181 stxa %g2, [%g2] ASI_IMMU_DEMAP ! Do the demap
182#endif 182#endif
183 membar #Sync 183 membar #Sync
184 flush %g7 184 flush %g7
185 stxa %g6, [%g5] ASI_DMMU ! Restore primary context 185 stxa %g6, [%g5] ASI_DMMU ! Restore primary context
186 membar #Sync 186 membar #Sync
187 flush %g7 187 flush %g7
188 IPIEVC_INC(IPI_EVCNT_TLB_PTE,%g2,%g3) 188 IPIEVC_INC(IPI_EVCNT_TLB_PTE,%g2,%g3)
189 189
190 ba,a ret_from_intr_vector 190 ba,a ret_from_intr_vector
191 nop 191 nop
192 192
193 193
194/* 194/*
195 * Secondary CPU bootstrap code. 195 * Secondary CPU bootstrap code.
196 */ 196 */
197 .text 197 .text
198 .align 32 198 .align 32
1991: rd %pc, %l0 1991: rd %pc, %l0
200 LDULNG [%l0 + (4f-1b)], %l1 200 LDULNG [%l0 + (4f-1b)], %l1
201 add %l0, (6f-1b), %l2 201 add %l0, (6f-1b), %l2
202 clr %l3 202 clr %l3
2032: cmp %l3, %l1 2032: cmp %l3, %l1
204 be CCCR, 3f 204 be CCCR, 3f
205 nop 205 nop
206 ldx [%l2 + TTE_VPN], %l4 206 ldx [%l2 + TTE_VPN], %l4
207 ldx [%l2 + TTE_DATA], %l5 207 ldx [%l2 + TTE_DATA], %l5
208 wr %g0, ASI_DMMU, %asi 208 wr %g0, ASI_DMMU, %asi
209 stxa %l4, [%g0 + TLB_TAG_ACCESS] %asi 209 stxa %l4, [%g0 + TLB_TAG_ACCESS] %asi
210 stxa %l5, [%g0] ASI_DMMU_DATA_IN 210 stxa %l5, [%g0] ASI_DMMU_DATA_IN
211 wr %g0, ASI_IMMU, %asi 211 wr %g0, ASI_IMMU, %asi
212 stxa %l4, [%g0 + TLB_TAG_ACCESS] %asi 212 stxa %l4, [%g0 + TLB_TAG_ACCESS] %asi
213 stxa %l5, [%g0] ASI_IMMU_DATA_IN 213 stxa %l5, [%g0] ASI_IMMU_DATA_IN
214 membar #Sync 214 membar #Sync
215 flush %l4 215 flush %l4
216 add %l2, PTE_SIZE, %l2 216 add %l2, PTE_SIZE, %l2
217 add %l3, 1, %l3 217 add %l3, 1, %l3
218 ba %xcc, 2b 218 ba %xcc, 2b
219 nop 219 nop
2203: LDULNG [%l0 + (5f-1b)], %l1 2203: LDULNG [%l0 + (5f-1b)], %l1
221 LDULNG [%l0 + (7f-1b)], %g2 ! Load cpu_info address. 221 LDULNG [%l0 + (7f-1b)], %g2 ! Load cpu_info address.
222 jmpl %l1, %g0 222 jmpl %l1, %g0
223 nop 223 nop
224 224
225 .align PTRSZ 225 .align PTRSZ
2264: ULONG 0x0 2264: ULONG 0x0
2275: ULONG 0x0 2275: ULONG 0x0
2287: ULONG 0x0 2287: ULONG 0x0
229 _ALIGN 229 _ALIGN
2306: 2306:
231 231
232#define DATA(name) \ 232#define DATA(name) \
233 .data ; \ 233 .data ; \
234 .align PTRSZ ; \ 234 .align PTRSZ ; \
235 .globl name ; \ 235 .globl name ; \
236name: 236name:
237 237
238DATA(mp_tramp_code) 238DATA(mp_tramp_code)
239 POINTER 1b 239 POINTER 1b
240DATA(mp_tramp_code_len) 240DATA(mp_tramp_code_len)
241 ULONG 6b-1b 241 ULONG 6b-1b
242DATA(mp_tramp_tlb_slots) 242DATA(mp_tramp_tlb_slots)
243 ULONG 4b-1b 243 ULONG 4b-1b
244DATA(mp_tramp_func) 244DATA(mp_tramp_func)
245 ULONG 5b-1b 245 ULONG 5b-1b
246DATA(mp_tramp_ci) 246DATA(mp_tramp_ci)
247 ULONG 7b-1b 247 ULONG 7b-1b
248 248
249 .text 249 .text
250 .align 32 250 .align 32
251 251
252 252
253/* 253/*
254 * IPI handler to store the current FPU state. 254 * IPI handler to store the current FPU state.
255 * void sparc64_ipi_save_fpstate(void *); 255 * void sparc64_ipi_save_fpstate(void *);
256 * 256 *
257 * On entry: 257 * On entry:
258 * %g2 = lwp 258 * %g2 = lwp
259 */ 259 */
260ENTRY(sparc64_ipi_save_fpstate) 260ENTRY(sparc64_ipi_save_fpstate)
261 sethi %hi(FPLWP), %g1 261 sethi %hi(FPLWP), %g1
262 LDPTR [%g1 + %lo(FPLWP)], %g3 262 LDPTR [%g1 + %lo(FPLWP)], %g3
263 cmp %g3, %g2 263 cmp %g3, %g2
264 bne,pn CCCR, 7f ! skip if fplwp has changed 264 bne,pn CCCR, 7f ! skip if fplwp has changed
265 265
266 rdpr %pstate, %g2 ! enable FP before we begin 266 rdpr %pstate, %g2 ! enable FP before we begin
267 rd %fprs, %g5 267 rd %fprs, %g5
268 wr %g0, FPRS_FEF, %fprs 268 wr %g0, FPRS_FEF, %fprs
269 or %g2, PSTATE_PEF, %g2 269 or %g2, PSTATE_PEF, %g2
270 wrpr %g2, 0, %pstate 270 wrpr %g2, 0, %pstate
271 271
272 LDPTR [%g3 + L_FPSTATE], %g3 272 LDPTR [%g3 + L_FPSTATE], %g3
273 stx %fsr, [%g3 + FS_FSR] ! f->fs_fsr = getfsr(); 273 stx %fsr, [%g3 + FS_FSR] ! f->fs_fsr = getfsr();
274 rd %gsr, %g2 ! Save %gsr 274 rd %gsr, %g2 ! Save %gsr
275 st %g2, [%g3 + FS_GSR] 275 st %g2, [%g3 + FS_GSR]
276#if FS_REGS > 0 276#if FS_REGS > 0
277 add %g3, FS_REGS, %g3 277 add %g3, FS_REGS, %g3
278#endif 278#endif
279#ifdef DIAGNOSTIC 279#ifdef DIAGNOSTIC
280 btst BLOCK_ALIGN, %g3 ! Needs to be re-executed 280 btst BLOCK_ALIGN, %g3 ! Needs to be re-executed
281 bnz,pn %icc, 6f ! Check alignment 281 bnz,pn %icc, 6f ! Check alignment
282#endif 282#endif
283 st %g0, [%g3 + FS_QSIZE - FS_REGS] ! f->fs_qsize = 0; 283 st %g0, [%g3 + FS_QSIZE - FS_REGS] ! f->fs_qsize = 0;
284 btst FPRS_DL|FPRS_DU, %g5 ! Both FPU halves clean? 284 btst FPRS_DL|FPRS_DU, %g5 ! Both FPU halves clean?
285 bz,pt %icc, 5f ! Then skip it 285 bz,pt %icc, 5f ! Then skip it
286 286
287 mov CTX_PRIMARY, %g2 287 mov CTX_PRIMARY, %g2
288 ldxa [%g2] ASI_DMMU, %g6 288 ldxa [%g2] ASI_DMMU, %g6
289 membar #LoadStore 289 membar #LoadStore
290 stxa %g0, [%g2] ASI_DMMU ! Switch MMU to kernel primary context 290 stxa %g0, [%g2] ASI_DMMU ! Switch MMU to kernel primary context
291 membar #Sync 291 membar #Sync
292 292
293 btst FPRS_DL, %g5 ! Lower FPU clean? 293 btst FPRS_DL, %g5 ! Lower FPU clean?
294 bz,a,pt %icc, 1f ! Then skip it, but upper FPU not clean 294 bz,a,pt %icc, 1f ! Then skip it, but upper FPU not clean
295 add %g3, 2*BLOCK_SIZE, %g3 ! Skip a block 295 add %g3, 2*BLOCK_SIZE, %g3 ! Skip a block
296 296
297 stda %f0, [%g3] ASI_BLK_P ! f->fs_f0 = etc; 297 stda %f0, [%g3] ASI_BLK_P ! f->fs_f0 = etc;
298 inc BLOCK_SIZE, %g3 298 inc BLOCK_SIZE, %g3
299 stda %f16, [%g3] ASI_BLK_P 299 stda %f16, [%g3] ASI_BLK_P
300 300
301 btst FPRS_DU, %g5 ! Upper FPU clean? 301 btst FPRS_DU, %g5 ! Upper FPU clean?
302 bz,pt %icc, 2f ! Then skip it 302 bz,pt %icc, 2f ! Then skip it
303 inc BLOCK_SIZE, %g3 303 inc BLOCK_SIZE, %g3
3041: 3041:
305 stda %f32, [%g3] ASI_BLK_P 305 stda %f32, [%g3] ASI_BLK_P
306 inc BLOCK_SIZE, %g3 306 inc BLOCK_SIZE, %g3
307 stda %f48, [%g3] ASI_BLK_P 307 stda %f48, [%g3] ASI_BLK_P
3082: 3082:
309 membar #Sync ! Finish operation so we can 309 membar #Sync ! Finish operation so we can
310 brz,pn %g6, 5f ! Skip if context 0 310 brz,pn %g6, 5f ! Skip if context 0
311 nop 311 nop
312 stxa %g6, [%g2] ASI_DMMU ! Restore primary context 312 stxa %g6, [%g2] ASI_DMMU ! Restore primary context
313 membar #Sync 313 membar #Sync
3145: 3145:
315 wr %g0, FPRS_FEF, %fprs ! Mark FPU clean 315 wr %g0, FPRS_FEF, %fprs ! Mark FPU clean
316 STPTR %g0, [%g1 + %lo(FPLWP)] ! fplwp = NULL 316 STPTR %g0, [%g1 + %lo(FPLWP)] ! fplwp = NULL
3177: 3177:
318 IPIEVC_INC(IPI_EVCNT_FPU_SYNCH,%g2,%g3) 318 IPIEVC_INC(IPI_EVCNT_FPU_SYNCH,%g2,%g3)
319 ba,a ret_from_intr_vector 319 ba,a ret_from_intr_vector
320 nop 320 nop
321 321
322#ifdef DIAGNOSTIC 322#ifdef DIAGNOSTIC
323 !! 323 !!
324 !! Damn thing is *NOT* aligned on a 64-byte boundary 324 !! Damn thing is *NOT* aligned on a 64-byte boundary
325 !!  325 !!
3266: 3266:
327 wr %g0, FPRS_FEF, %fprs 327 wr %g0, FPRS_FEF, %fprs
328 ! XXX -- we should panic instead of silently entering debugger 328 ! XXX -- we should panic instead of silently entering debugger
329 ta 1 329 ta 1
330 nop 330 nop
331 ba,a ret_from_intr_vector 331 ba,a ret_from_intr_vector
332 nop 332 nop
333#endif 333#endif
334 334
335/* 335/*
336 * IPI handler to drop the current FPU state. 336 * IPI handler to drop the current FPU state.
337 * void sparc64_ipi_drop_fpstate(void *); 337 * void sparc64_ipi_drop_fpstate(void *);
338 * 338 *
339 * On entry: 339 * On entry:
340 * %g2 = lwp 340 * %g2 = lwp
341 */ 341 */
342ENTRY(sparc64_ipi_drop_fpstate) 342ENTRY(sparc64_ipi_drop_fpstate)
343 rdpr %pstate, %g1 343 rdpr %pstate, %g1
344 wr %g0, FPRS_FEF, %fprs 344 wr %g0, FPRS_FEF, %fprs
345 or %g1, PSTATE_PEF, %g1 345 or %g1, PSTATE_PEF, %g1
346 wrpr %g1, 0, %pstate 346 wrpr %g1, 0, %pstate
347 set FPLWP, %g1 347 set FPLWP, %g1
348 CASPTR [%g1] ASI_N, %g2, %g0 ! fplwp = NULL if fplwp == %g2 348 CASPTR [%g1] ASI_N, %g2, %g0 ! fplwp = NULL if fplwp == %g2
349 membar #Sync ! Should not be needed due to retry 349 membar #Sync ! Should not be needed due to retry
350 IPIEVC_INC(IPI_EVCNT_FPU_FLUSH,%g2,%g3) 350 IPIEVC_INC(IPI_EVCNT_FPU_FLUSH,%g2,%g3)
351 ba,a ret_from_intr_vector 351 ba,a ret_from_intr_vector
352 nop 352 nop
353 353
354/* 354/*
355 * IPI handler to drop the current FPU state. 355 * IPI handler to drop the current FPU state.
356 * void sparc64_ipi_dcache_flush_page_usiii(paddr_t pa, int line_size) 356 * void sparc64_ipi_dcache_flush_page_usiii(paddr_t pa, int line_size)
357 * void sparc64_ipi_dcache_flush_page_us(paddr_t pa, int line_size) 357 * void sparc64_ipi_dcache_flush_page_us(paddr_t pa, int line_size)
358 * 358 *
359 * On entry: 359 * On entry:
360 * %g2 = pa 360 * %g2 = pa
361 * %g3 = line_size 361 * %g3 = line_size
362 */ 362 */
363ENTRY(sparc64_ipi_dcache_flush_page_usiii) 363ENTRY(sparc64_ipi_dcache_flush_page_usiii)
364 set NBPG, %g1 364 set NBPG, %g1
365 add %g2, %g1, %g1 ! end address 365 add %g2, %g1, %g1 ! end address
366 366
3671: 3671:
368 stxa %g0, [%g2] ASI_DCACHE_INVALIDATE 368 stxa %g0, [%g2] ASI_DCACHE_INVALIDATE
369 add %g2, %g3, %g2 369 add %g2, %g3, %g2
370 cmp %g2, %g1 370 cmp %g2, %g1
371 bl,pt %xcc, 1b 371 bl,pt %xcc, 1b
372 nop 372 nop
373 373
374 sethi %hi(KERNBASE), %g5 374 sethi %hi(KERNBASE), %g5
375 flush %g5 375 flush %g5
376 membar #Sync 376 membar #Sync
377 ba,a ret_from_intr_vector 377 ba,a ret_from_intr_vector
378 nop 378 nop
379 379
380ENTRY(sparc64_ipi_dcache_flush_page_us) 380ENTRY(sparc64_ipi_dcache_flush_page_us)
381 mov -1, %g1 ! Generate mask for tag: bits [29..2] 381 mov -1, %g1 ! Generate mask for tag: bits [29..2]
382 srlx %g2, 13-2, %g5 ! Tag is PA bits <40:13> in bits <29:2> 382 srlx %g2, 13-2, %g5 ! Tag is PA bits <40:13> in bits <29:2>
383 clr %g4 383 clr %g4
384 srl %g1, 2, %g1 ! Now we have bits <29:0> set 384 srl %g1, 2, %g1 ! Now we have bits <29:0> set
385 set (2*NBPG), %g7 385 set (2*NBPG), %g7
386 ba,pt %icc, 1f 386 ba,pt %icc, 1f
387 andn %g1, 3, %g1 ! Now we have bits <29:2> set 387 andn %g1, 3, %g1 ! Now we have bits <29:2> set
388 388
389 .align 8 389 .align 8
3901: 3901:
391 ldxa [%g4] ASI_DCACHE_TAG, %g6 391 ldxa [%g4] ASI_DCACHE_TAG, %g6
392 mov %g4, %g2 392 mov %g4, %g2
393 deccc 32, %g7 393 deccc 32, %g7
394 bl,pn %icc, 2f 394 bl,pn %icc, 2f
395 inc 32, %g4 395 inc 32, %g4
396 396
397 xor %g6, %g5, %g6 397 xor %g6, %g5, %g6
398 andcc %g6, %g1, %g0 398 andcc %g6, %g1, %g0
399 bne,pt %xcc, 1b 399 bne,pt %xcc, 1b
400 membar #LoadStore 400 membar #LoadStore
401 401
402 stxa %g0, [%g2] ASI_DCACHE_TAG 402 stxa %g0, [%g2] ASI_DCACHE_TAG
403 ba,pt %icc, 1b 403 ba,pt %icc, 1b
404 membar #StoreLoad 404 membar #StoreLoad
4052: 4052:
406 406
407 sethi %hi(KERNBASE), %g5 407 sethi %hi(KERNBASE), %g5
408 flush %g5 408 flush %g5
409 membar #Sync 409 membar #Sync
410 ba,a ret_from_intr_vector 410 ba,a ret_from_intr_vector
411 nop 411 nop
412 412
413/* 
414 * Setup a C compatible environment and call a MI function. 
415 * 
416 * On entry: 
417 * %g2 = function to call 
418 * %g3 = single argument to called function 
419 */ 
420ENTRY(sparc64_ipi_ccall) 
421 save %sp, -CC64FSZ-16, %sp ! create a stack frame 
422 stx %g2, [%fp + BIAS -16 + 0] ! save function pointer 
423 stx %g3, [%fp + BIAS -16 + 8] ! and argument 
424 wrpr %g0, PSTATE_KERN, %pstate ! switch globals 
425 ldx [%fp + BIAS -16 + 0], %l0 ! reload function 
426 call %l0 ! call function 
427 ldx [%fp + BIAS -16 + 8], %o0 ! reload argument  
428 restore ! pop stack frame 
429 ba,a ret_from_intr_vector ! and return from IPI 
430 nop 
431#endif 413#endif