Mon Dec 19 10:57:00 2011 UTC ()
Whitespace.


(skrll)
diff -r1.55 -r1.56 src/sys/arch/hp700/hp700/locore.S

cvs diff -r1.55 -r1.56 src/sys/arch/hp700/hp700/Attic/locore.S (switch to unified diff)

--- src/sys/arch/hp700/hp700/Attic/locore.S 2011/02/07 12:19:35 1.55
+++ src/sys/arch/hp700/hp700/Attic/locore.S 2011/12/19 10:56:59 1.56
@@ -1,1128 +1,1128 @@ @@ -1,1128 +1,1128 @@
1/* $NetBSD: locore.S,v 1.55 2011/02/07 12:19:35 skrll Exp $ */ 1/* $NetBSD: locore.S,v 1.56 2011/12/19 10:56:59 skrll Exp $ */
2/* $OpenBSD: locore.S,v 1.158 2008/07/28 19:08:46 miod Exp $ */ 2/* $OpenBSD: locore.S,v 1.158 2008/07/28 19:08:46 miod Exp $ */
3 3
4/* 4/*
5 * Copyright (c) 1998-2004 Michael Shalayeff 5 * Copyright (c) 1998-2004 Michael Shalayeff
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions 9 * modification, are permitted provided that the following conditions
10 * are met: 10 * are met:
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution. 15 * documentation and/or other materials provided with the distribution.
16 * 16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, 20 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
21 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
25 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 25 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
26 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 26 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGE. 27 * THE POSSIBILITY OF SUCH DAMAGE.
28 * 28 *
29 * Portitions of this file are derived from other sources, see 29 * Portitions of this file are derived from other sources, see
30 * the copyrights and acknowledgements below. 30 * the copyrights and acknowledgements below.
31 */ 31 */
32/* 32/*
33 * Copyright (c) 1990,1991,1992,1994 The University of Utah and 33 * Copyright (c) 1990,1991,1992,1994 The University of Utah and
34 * the Computer Systems Laboratory (CSL). All rights reserved. 34 * the Computer Systems Laboratory (CSL). All rights reserved.
35 * 35 *
36 * THE UNIVERSITY OF UTAH AND CSL PROVIDE THIS SOFTWARE IN ITS "AS IS" 36 * THE UNIVERSITY OF UTAH AND CSL PROVIDE THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION, AND DISCLAIM ANY LIABILITY OF ANY KIND FOR ANY DAMAGES 37 * CONDITION, AND DISCLAIM ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
38 * WHATSOEVER RESULTING FROM ITS USE. 38 * WHATSOEVER RESULTING FROM ITS USE.
39 * 39 *
40 * CSL requests users of this software to return to csl-dist@cs.utah.edu any 40 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
41 * improvements that they make and grant CSL redistribution rights. 41 * improvements that they make and grant CSL redistribution rights.
42 * 42 *
43 * Utah $Hdr: locore.s 1.62 94/12/15$ 43 * Utah $Hdr: locore.s 1.62 94/12/15$
44 */ 44 */
45/* 45/*
46 * (c) Copyright 1988 HEWLETT-PACKARD COMPANY 46 * (c) Copyright 1988 HEWLETT-PACKARD COMPANY
47 * 47 *
48 * To anyone who acknowledges that this file is provided "AS IS" 48 * To anyone who acknowledges that this file is provided "AS IS"
49 * without any express or implied warranty: 49 * without any express or implied warranty:
50 * permission to use, copy, modify, and distribute this file 50 * permission to use, copy, modify, and distribute this file
51 * for any purpose is hereby granted without fee, provided that 51 * for any purpose is hereby granted without fee, provided that
52 * the above copyright notice and this notice appears in all 52 * the above copyright notice and this notice appears in all
53 * copies, and that the name of Hewlett-Packard Company not be 53 * copies, and that the name of Hewlett-Packard Company not be
54 * used in advertising or publicity pertaining to distribution 54 * used in advertising or publicity pertaining to distribution
55 * of the software without specific, written prior permission. 55 * of the software without specific, written prior permission.
56 * Hewlett-Packard Company makes no representations about the 56 * Hewlett-Packard Company makes no representations about the
57 * suitability of this software for any purpose. 57 * suitability of this software for any purpose.
58 */ 58 */
59 59
60#include "opt_multiprocessor.h" 60#include "opt_multiprocessor.h"
61#include "opt_cputype.h" 61#include "opt_cputype.h"
62#include "opt_ddb.h" 62#include "opt_ddb.h"
63#include "opt_kgdb.h" 63#include "opt_kgdb.h"
64 64
65#include <sys/errno.h> 65#include <sys/errno.h>
66#include <machine/param.h> 66#include <machine/param.h>
67#include <machine/cpu.h> 67#include <machine/cpu.h>
68#include <machine/asm.h> 68#include <machine/asm.h>
69#include <machine/psl.h> 69#include <machine/psl.h>
70#include <machine/trap.h> 70#include <machine/trap.h>
71#include <machine/iomod.h> 71#include <machine/iomod.h>
72#include <machine/pdc.h> 72#include <machine/pdc.h>
73#include <machine/intr.h> 73#include <machine/intr.h>
74#include <machine/frame.h> 74#include <machine/frame.h>
75#include <machine/reg.h> 75#include <machine/reg.h>
76 76
77#include "assym.h" 77#include "assym.h"
78 78
79/* Some aliases for the macros in assym.h. */ 79/* Some aliases for the macros in assym.h. */
80#define TRAPFRAME_SIZEOF trapframe_SIZEOF 80#define TRAPFRAME_SIZEOF trapframe_SIZEOF
81 81
82#define ccr cr10 82#define ccr cr10
83#define rctr cr0 83#define rctr cr0
84 84
85/* 85/*
86 * Very crude debugging macros that write to com1. 86 * Very crude debugging macros that write to com1.
87 */ 87 */
88 88
89#if 1 89#if 1
90#define COM1_TX_REG (0xffd00000 + 0x5000 + 0x800) 90#define COM1_TX_REG (0xffd00000 + 0x5000 + 0x800)
91#else 91#else
92#define COM1_TX_REG (0xf0823000 + 0x800) 92#define COM1_TX_REG (0xf0823000 + 0x800)
93#endif 93#endif
94#define _DEBUG_PUTCHAR(reg1, reg2) ! \ 94#define _DEBUG_PUTCHAR(reg1, reg2) ! \
95 ldil L%COM1_TX_REG, %reg1 ! \ 95 ldil L%COM1_TX_REG, %reg1 ! \
96 stb %reg2, R%COM1_TX_REG(%sr1, %reg1) ! \ 96 stb %reg2, R%COM1_TX_REG(%sr1, %reg1) ! \
97 ldil L%10000000, %reg1 ! \ 97 ldil L%10000000, %reg1 ! \
98 ldi 1, %reg2 ! \ 98 ldi 1, %reg2 ! \
99 comb,<>,n %reg1, %r0, -8 ! \ 99 comb,<>,n %reg1, %r0, -8 ! \
100 sub %reg1, %reg2, %reg1 100 sub %reg1, %reg2, %reg1
101#define DEBUG_PUTCHAR(reg1, reg2, ch) ! \ 101#define DEBUG_PUTCHAR(reg1, reg2, ch) ! \
102 ldi ch, %reg2 ! \ 102 ldi ch, %reg2 ! \
103 _DEBUG_PUTCHAR(reg1,reg2) 103 _DEBUG_PUTCHAR(reg1,reg2)
104#define _DEBUG_DUMPN(reg1, reg2, reg3, p) ! \ 104#define _DEBUG_DUMPN(reg1, reg2, reg3, p) ! \
105 extru %reg3, p, 4, %reg2 ! \ 105 extru %reg3, p, 4, %reg2 ! \
106 comib,>>,n 10, %reg2, 0 ! \ 106 comib,>>,n 10, %reg2, 0 ! \
107 addi 39, %reg2, %reg2 ! \ 107 addi 39, %reg2, %reg2 ! \
108 addi 48, %reg2, %reg2 ! \ 108 addi 48, %reg2, %reg2 ! \
109 _DEBUG_PUTCHAR(reg1,reg2) 109 _DEBUG_PUTCHAR(reg1,reg2)
110#define DEBUG_DUMP32(reg1, reg2, reg3) ! \ 110#define DEBUG_DUMP32(reg1, reg2, reg3) ! \
111 DEBUG_PUTCHAR(reg1,reg2,58) ! \ 111 DEBUG_PUTCHAR(reg1,reg2,58) ! \
112 _DEBUG_DUMPN(reg1, reg2, reg3, 3) ! \ 112 _DEBUG_DUMPN(reg1, reg2, reg3, 3) ! \
113 _DEBUG_DUMPN(reg1, reg2, reg3, 7) ! \ 113 _DEBUG_DUMPN(reg1, reg2, reg3, 7) ! \
114 _DEBUG_DUMPN(reg1, reg2, reg3, 11) ! \ 114 _DEBUG_DUMPN(reg1, reg2, reg3, 11) ! \
115 _DEBUG_DUMPN(reg1, reg2, reg3, 15) ! \ 115 _DEBUG_DUMPN(reg1, reg2, reg3, 15) ! \
116 _DEBUG_DUMPN(reg1, reg2, reg3, 19) ! \ 116 _DEBUG_DUMPN(reg1, reg2, reg3, 19) ! \
117 _DEBUG_DUMPN(reg1, reg2, reg3, 23) ! \ 117 _DEBUG_DUMPN(reg1, reg2, reg3, 23) ! \
118 _DEBUG_DUMPN(reg1, reg2, reg3, 27) ! \ 118 _DEBUG_DUMPN(reg1, reg2, reg3, 27) ! \
119 _DEBUG_DUMPN(reg1, reg2, reg3, 31) 119 _DEBUG_DUMPN(reg1, reg2, reg3, 31)
120 120
121#ifdef XXXNH 121#ifdef XXXNH
122/* 122/*
123 * hv-specific instructions 123 * hv-specific instructions
124 */ 124 */
125#define DR_PAGE0 diag (0x70 << 5) /* XXXNH Different */ 125#define DR_PAGE0 diag (0x70 << 5) /* XXXNH Different */
126#define DR_PAGE1 diag (0x72 << 5) /* XXXNH Different */ 126#define DR_PAGE1 diag (0x72 << 5) /* XXXNH Different */
127#define MTCPU_T(x,t) diag ((t) << 21) | ((x) << 16) | (0xc0 << 5) /* XXXNH Different */ 127#define MTCPU_T(x,t) diag ((t) << 21) | ((x) << 16) | (0xc0 << 5) /* XXXNH Different */
128#define MFCPU_T(r,x) diag ((r) << 21) | (0xa0 << 5) | (x) 128#define MFCPU_T(r,x) diag ((r) << 21) | (0xa0 << 5) | (x)
129#define MTCPU_C(x,t) diag ((t) << 21) | ((x) << 16) | (0x12 << 5) 129#define MTCPU_C(x,t) diag ((t) << 21) | ((x) << 16) | (0x12 << 5)
130#define MFCPU_C(r,x) diag ((r) << 21) | ((x) << 16) | (0x30 << 5) 130#define MFCPU_C(r,x) diag ((r) << 21) | ((x) << 16) | (0x30 << 5)
131#define MFCPU_U(r,x) .word 0x140008a0 | ((r) << 21) | ((x)) 131#define MFCPU_U(r,x) .word 0x140008a0 | ((r) << 21) | ((x))
132#define MTCPU_U(x,r) .word 0x14001840 | ((r) << 21) | ((x) << 16) 132#define MTCPU_U(x,r) .word 0x14001840 | ((r) << 21) | ((x) << 16)
133 133
134#else 134#else
135 135
136#define DR_PAGE0 .word 0x14001200 136#define DR_PAGE0 .word 0x14001200
137#define DR_PAGE1 .word 0x14001240 137#define DR_PAGE1 .word 0x14001240
138#define MTCPU_T(x,t) .word 0x14001400 | ((t) << 21) | ((x) << 16) 138#define MTCPU_T(x,t) .word 0x14001400 | ((t) << 21) | ((x) << 16)
139#define MFCPU_T(r,x) .word 0x14001400 | ((r) << 21) | (x) 139#define MFCPU_T(r,x) .word 0x14001400 | ((r) << 21) | (x)
140#define MTCPU_C(x,t) .word 0x14000240 | ((t) << 21) | ((x) << 16) 140#define MTCPU_C(x,t) .word 0x14000240 | ((t) << 21) | ((x) << 16)
141#define MFCPU_C(r,x) .word 0x14000600 | ((r) << 21) | ((x) << 16) 141#define MFCPU_C(r,x) .word 0x14000600 | ((r) << 21) | ((x) << 16)
142#define MFCPU_U(r,x) .word 0x140008a0 | ((r) << 21) | ((x)) 142#define MFCPU_U(r,x) .word 0x140008a0 | ((r) << 21) | ((x))
143#define MTCPU_U(x,r) .word 0x14001840 | ((r) << 21) | ((x) << 16) 143#define MTCPU_U(x,r) .word 0x14001840 | ((r) << 21) | ((x) << 16)
144#endif 144#endif
145 145
146 .import $global$, data 146 .import $global$, data
147 .import pdc, data 147 .import pdc, data
148 .import boothowto, data 148 .import boothowto, data
149 .import bootdev, data 149 .import bootdev, data
150 .import esym, data 150 .import esym, data
151 .import virtual_avail, data 151 .import virtual_avail, data
152 .import lwp0, data 152 .import lwp0, data
153 .import panic, code 153 .import panic, code
154 .import fpu_csw, data 154 .import fpu_csw, data
155 .import hp700_interrupt_registers, data 155 .import hp700_interrupt_registers, data
156 156
157 BSS(pdc_stack, 4) /* temp stack for PDC call */ 157 BSS(pdc_stack, 4) /* temp stack for PDC call */
158 BSS(kernelmapped, 4) /* set when kernel is mapped */ 158 BSS(kernelmapped, 4) /* set when kernel is mapped */
159 BSS(hppa_vtop, 4) /* a vtop translation table addr (pa=va) */ 159 BSS(hppa_vtop, 4) /* a vtop translation table addr (pa=va) */
160 160
161 .text 161 .text
162 .import kernel_setup, entry 162 .import kernel_setup, entry
163 163
164/* 164/*
165 * This is the starting location for the kernel 165 * This is the starting location for the kernel
166 */ 166 */
167ENTRY_NOPROFILE(start,0) 167ENTRY_NOPROFILE(start,0)
168/* 168/*
169 * bootapiver <= 2 169 * bootapiver <= 2
170 * start(pdc, boothowto, bootdev, esym, bootapiver, argv, argc) 170 * start(pdc, boothowto, bootdev, esym, bootapiver, argv, argc)
171 * 171 *
172 * bootapiver == 3 172 * bootapiver == 3
173 * start(pdc, boothowto, bootdev, esym, bootapiver, bootinfo) 173 * start(pdc, boothowto, bootdev, esym, bootapiver, bootinfo)
174 * 174 *
175 * pdc - PDC entry point 175 * pdc - PDC entry point
176 * boothowto - boot flags (see "reboot.h") 176 * boothowto - boot flags (see "reboot.h")
177 * bootdev - boot device (index into bdevsw) 177 * bootdev - boot device (index into bdevsw)
178 * esym - end of symbol table (or &end if not present) 178 * esym - end of symbol table (or &end if not present)
179 * bootapiver - /boot API version 179 * bootapiver - /boot API version
180 * argv - options block passed from /boot 180 * argv - options block passed from /boot
181 * argc - the length of the block 181 * argc - the length of the block
182 * bootinfo - pointer to a struct bootinfo. 182 * bootinfo - pointer to a struct bootinfo.
183 */ 183 */
184 184
185 /* 185 /*
186 * save the pdc, boothowto, bootdev and esym arguments 186 * save the pdc, boothowto, bootdev and esym arguments
187 */ 187 */
188 ldil L%pdc,%r1 188 ldil L%pdc,%r1
189 stw %arg0,R%pdc(%r1) 189 stw %arg0,R%pdc(%r1)
190 ldil L%boothowto,%r1 190 ldil L%boothowto,%r1
191 stw %arg1,R%boothowto(%r1) 191 stw %arg1,R%boothowto(%r1)
192 ldil L%bootdev,%r1 192 ldil L%bootdev,%r1
193 stw %arg2,R%bootdev(%r1) 193 stw %arg2,R%bootdev(%r1)
194 ldil L%esym,%r1 194 ldil L%esym,%r1
195 stw %arg3,R%esym(%r1) 195 stw %arg3,R%esym(%r1)
196 196
197 /* 197 /*
198 * Put page aligned %arg3 into %t3. It is the start of available 198 * Put page aligned %arg3 into %t3. It is the start of available
199 * memory. 199 * memory.
200 */ 200 */
201 ldo NBPG-1(%arg3), %t3 201 ldo NBPG-1(%arg3), %t3
202 dep %r0, 31, PGSHIFT, %t3 202 dep %r0, 31, PGSHIFT, %t3
203 203
204 /* bootinfo struct address for hppa_init, if bootapiver is > 2 */ 204 /* bootinfo struct address for hppa_init, if bootapiver is > 2 */
205 ldw HPPA_FRAME_ARG(4)(%sp), %t1 205 ldw HPPA_FRAME_ARG(4)(%sp), %t1
206 ldw HPPA_FRAME_ARG(5)(%sp), %r5 206 ldw HPPA_FRAME_ARG(5)(%sp), %r5
207 comiclr,< 2, %t1, %r0 207 comiclr,< 2, %t1, %r0
208 copy %r0, %r5 208 copy %r0, %r5
209 209
210 /* assuming size being page-aligned */ 210 /* assuming size being page-aligned */
211#define STACK_ALLOC(n,s) \ 211#define STACK_ALLOC(n,s) \
212 ldil L%(n), %t1 ! \ 212 ldil L%(n), %t1 ! \
213 ldil L%(s), %t2 ! \ 213 ldil L%(s), %t2 ! \
214 stw %t3, R%(n)(%t1) ! \ 214 stw %t3, R%(n)(%t1) ! \
215 add %t3, %t2, %t3 215 add %t3, %t2, %t3
216 216
217 STACK_ALLOC(pdc_stack, PDC_STACKSIZE) 217 STACK_ALLOC(pdc_stack, PDC_STACKSIZE)
218 218
219 /* zero fake trapframe and lwp0 u-area */ 219 /* zero fake trapframe and lwp0 u-area */
220 /* XXX - we should create a real trapframe for lwp0 */ 220 /* XXX - we should create a real trapframe for lwp0 */
221 copy %t3, %t2 221 copy %t3, %t2
222 ldi NBPG+TRAPFRAME_SIZEOF, %t1 222 ldi NBPG+TRAPFRAME_SIZEOF, %t1
223L$start_zero_tf: 223L$start_zero_tf:
224 stws,ma %r0, 4(%t2) 224 stws,ma %r0, 4(%t2)
225 addib,>= -8, %t1, L$start_zero_tf 225 addib,>= -8, %t1, L$start_zero_tf
226 stws,ma %r0, 4(%t2) /* XXX could use ,bc here, but gas is broken */ 226 stws,ma %r0, 4(%t2) /* XXX could use ,bc here, but gas is broken */
227 227
228 /* 228 /*
229 * kernel stack starts a page and a trapframe above uarea address. 229 * kernel stack starts a page and a trapframe above uarea address.
230 */ 230 */
231 ldo NBPG+TRAPFRAME_SIZEOF(%t3), %sp 231 ldo NBPG+TRAPFRAME_SIZEOF(%t3), %sp
232 mtctl %t3, CR_FPPADDR 232 mtctl %t3, CR_FPPADDR
233 233
234 /* initialize the pcb */ 234 /* initialize the pcb */
235 stw %r0, PCB_ONFAULT(%t3) 235 stw %r0, PCB_ONFAULT(%t3)
236 stw %r0, PCB_SPACE(%t3) /* XXX HPPA_SID_KERNEL == 0 */ 236 stw %r0, PCB_SPACE(%t3) /* XXX HPPA_SID_KERNEL == 0 */
237 237
238 /* 238 /*
239 * Setup various pointers. 239 * Setup various pointers.
240 * 240 *
241 * First free memory is %t3 plus normal U space. The last page of 241 * First free memory is %t3 plus normal U space. The last page of
242 * USPACE is the redzone if DIAGNOSTIC (see param.h). 242 * USPACE is the redzone if DIAGNOSTIC (see param.h).
243 */ 243 */
244 ldil L%USPACE, %r4 244 ldil L%USPACE, %r4
245 add %t3, %r4, %r4 245 add %t3, %r4, %r4
246 246
247 ldil L%lwp0, %t2 247 ldil L%lwp0, %t2
248 stw %t3, R%lwp0+L_PCB(%t2) /* XXXuvm_lwp_getuarea */ 248 stw %t3, R%lwp0+L_PCB(%t2) /* XXXuvm_lwp_getuarea */
249 ldo NBPG(%t3), %t1 249 ldo NBPG(%t3), %t1
250 stw %t1, R%lwp0+L_MD_REGS(%t2) 250 stw %t1, R%lwp0+L_MD_REGS(%t2)
251 251
252 ldil L%TFF_LAST, %t1 252 ldil L%TFF_LAST, %t1
253 stw %t1, TF_FLAGS-TRAPFRAME_SIZEOF(%sp) 253 stw %t1, TF_FLAGS-TRAPFRAME_SIZEOF(%sp)
254 stw %t3, TF_CR30-TRAPFRAME_SIZEOF(%sp) 254 stw %t3, TF_CR30-TRAPFRAME_SIZEOF(%sp)
255 255
256 /* 256 /*
257 * disable all coprocessors 257 * disable all coprocessors
258 */ 258 */
259 mtctl %r0, %ccr 259 mtctl %r0, %ccr
260 260
261#ifdef MULTIPROCESSOR 261#ifdef MULTIPROCESSOR
262 262
263#define PZ_MEM_RENDEZ 0x10 263#define PZ_MEM_RENDEZ 0x10
264#define PZ_MEM_RENDEZ_HI 0x28 264#define PZ_MEM_RENDEZ_HI 0x28
265 265
266 /* Setup SMP rendezvous address. */ 266 /* Setup SMP rendezvous address. */
267 ldil L%hw_cpu_spinup_trampoline, %r1 267 ldil L%hw_cpu_spinup_trampoline, %r1
268 ldo R%hw_cpu_spinup_trampoline(%r1), %r1 268 ldo R%hw_cpu_spinup_trampoline(%r1), %r1
269 stw %r1, PZ_MEM_RENDEZ(%r0) 269 stw %r1, PZ_MEM_RENDEZ(%r0)
270 stw %r0, PZ_MEM_RENDEZ_HI(%r0) 270 stw %r0, PZ_MEM_RENDEZ_HI(%r0)
271#endif 271#endif
272 272
273 /* 273 /*
274 * We need to set the Q bit so that we can take TLB misses after we 274 * We need to set the Q bit so that we can take TLB misses after we
275 * turn on virtual memory. 275 * turn on virtual memory.
276 */ 276 */
277 copy %sp, %arg0 277 copy %sp, %arg0
278 ldil L%qisnowon, %rp 278 ldil L%qisnowon, %rp
279 ldo R%qisnowon(%rp), %rp 279 ldo R%qisnowon(%rp), %rp
280 280
281 b kernel_setup 281 b kernel_setup
282 ldi PSW_Q|PSW_I, %arg1 282 ldi PSW_Q|PSW_I, %arg1
283 283
284qisnowon: 284qisnowon:
285 copy %r4, %arg0 285 copy %r4, %arg0
286 copy %r5, %arg1 286 copy %r5, %arg1
287 /* 287 /*
288 * call C routine hppa_init() to initialize VM 288 * call C routine hppa_init() to initialize VM
289 */ 289 */
290 .import hppa_init, code 290 .import hppa_init, code
291 CALL(hppa_init, %r1) 291 CALL(hppa_init, %r1)
292 292
293 /* 293 /*
294 * Cannot change the queues or IPSW with the Q-bit on 294 * Cannot change the queues or IPSW with the Q-bit on
295 */ 295 */
296 rsm RESET_PSW, %r0 296 rsm RESET_PSW, %r0
297 nop ! nop ! nop ! nop ! nop ! nop ! nop 297 nop ! nop ! nop ! nop ! nop ! nop ! nop
298 298
299 /* 299 /*
300 * We need to do an rfi to get the C bit set 300 * We need to do an rfi to get the C bit set
301 */ 301 */
302 mtctl %r0, %pcsq 302 mtctl %r0, %pcsq
303 mtctl %r0, %pcsq 303 mtctl %r0, %pcsq
304 ldil L%virtual_mode, %t1 304 ldil L%virtual_mode, %t1
305 ldo R%virtual_mode(%t1), %t1 305 ldo R%virtual_mode(%t1), %t1
306 mtctl %t1, %pcoq 306 mtctl %t1, %pcoq
307 ldo 4(%t1), %t1 307 ldo 4(%t1), %t1
308 mtctl %t1, %pcoq 308 mtctl %t1, %pcoq
309 GET_CURCPU(%t1) 309 GET_CURCPU(%t1)
310 ldw CI_PSW(%t1), %t2 310 ldw CI_PSW(%t1), %t2
311 mtctl %t2, %ipsw 311 mtctl %t2, %ipsw
312 rfi 312 rfi
313 nop 313 nop
314 nop 314 nop
315 nop 315 nop
316 nop 316 nop
317 nop 317 nop
318 nop 318 nop
319 nop 319 nop
320 320
321virtual_mode: 321virtual_mode:
322 322
323 ldil L%kernelmapped, %t1 323 ldil L%kernelmapped, %t1
324 stw %t1, R%kernelmapped(%t1) 324 stw %t1, R%kernelmapped(%t1)
325 325
326#ifdef DDB 326#ifdef DDB
327 .import Debugger, code 327 .import Debugger, code
328 /* have to call debugger from here, from virtual mode */ 328 /* have to call debugger from here, from virtual mode */
329 ldil L%boothowto, %r1 329 ldil L%boothowto, %r1
330 ldw R%boothowto(%r1), %r1 330 ldw R%boothowto(%r1), %r1
331 bb,>= %r1, 25, L$noddb 331 bb,>= %r1, 25, L$noddb
332 nop 332 nop
333 333
334 break HPPA_BREAK_KERNEL, HPPA_BREAK_KGDB 334 break HPPA_BREAK_KERNEL, HPPA_BREAK_KGDB
335 nop 335 nop
336L$noddb: 336L$noddb:
337#endif 337#endif
338 338
339 .import main,code 339 .import main,code
340 CALL(main, %r1) 340 CALL(main, %r1)
341 /* should never return... */ 341 /* should never return... */
342 bv (%rp) 342 bv (%rp)
343 nop 343 nop
344EXIT(start) 344EXIT(start)
345 345
346 346
347/* 347/*
348 * void kernel_setup(register_t sp, register_t psw) 348 * void kernel_setup(register_t sp, register_t psw)
349 */ 349 */
350LEAF_ENTRY_NOPROFILE(kernel_setup) 350LEAF_ENTRY_NOPROFILE(kernel_setup)
351 351
352 /* 352 /*
353 * disable interrupts and turn off all bits in the psw so that 353 * disable interrupts and turn off all bits in the psw so that
354 * we start in a known state. 354 * we start in a known state.
355 */ 355 */
356 rsm RESET_PSW, %r0 356 rsm RESET_PSW, %r0
357 nop ! nop ! nop ! nop ! nop ! nop 357 nop ! nop ! nop ! nop ! nop ! nop
358 358
359 /* 359 /*
360 * go to virtual mode... 360 * go to virtual mode...
361 * get things ready for the kernel to run in virtual mode 361 * get things ready for the kernel to run in virtual mode
362 */ 362 */
363 ldi HPPA_PID_KERNEL, %r1 363 ldi HPPA_PID_KERNEL, %r1
364 mtctl %r1, %pidr1 364 mtctl %r1, %pidr1
365 mtctl %r1, %pidr2 365 mtctl %r1, %pidr2
366#if pbably_not_worth_it 366#if pbably_not_worth_it
367 mtctl %r0, %pidr3 367 mtctl %r0, %pidr3
368 mtctl %r0, %pidr4 368 mtctl %r0, %pidr4
369#endif 369#endif
370 mtsp %r0, %sr0 370 mtsp %r0, %sr0
371 mtsp %r0, %sr1 371 mtsp %r0, %sr1
372 mtsp %r0, %sr2 372 mtsp %r0, %sr2
373 mtsp %r0, %sr3 373 mtsp %r0, %sr3
374 mtsp %r0, %sr4 374 mtsp %r0, %sr4
375 mtsp %r0, %sr5 375 mtsp %r0, %sr5
376 mtsp %r0, %sr6 376 mtsp %r0, %sr6
377 mtsp %r0, %sr7 377 mtsp %r0, %sr7
378 378
379 /* 379 /*
380 * to keep the spl() routines consistent we need to put the correct 380 * to keep the spl() routines consistent we need to put the correct
381 * spl level into eiem, and reset any pending interrupts 381 * spl level into eiem, and reset any pending interrupts
382 */ 382 */
383 ldi -1, %r1 383 ldi -1, %r1
384 mtctl %r0, %eiem /* disable interrupts */ 384 mtctl %r0, %eiem /* disable interrupts */
385 mtctl %r1, %eirr 385 mtctl %r1, %eirr
386 386
387 /* 387 /*
388 * load address of interrupt vector table 388 * load address of interrupt vector table
389 */ 389 */
390 ldil L%ivaaddr, %t2 390 ldil L%ivaaddr, %t2
391 ldo R%ivaaddr(%t2), %t2 391 ldo R%ivaaddr(%t2), %t2
392 mtctl %t2, %iva 392 mtctl %t2, %iva
393 393
394 /* 394 /*
395 * set up the dp pointer so that we can do quick references off of it 395 * set up the dp pointer so that we can do quick references off of it
396 */ 396 */
397 ldil L%$global$,%dp 397 ldil L%$global$, %dp
398 ldo R%$global$(%dp), %dp 398 ldo R%$global$(%dp), %dp
399 399
400 /* 400 /*
401 * Create a stack frame for us to call C with. Clear out the previous 401 * Create a stack frame for us to call C with. Clear out the previous
402 * sp marker to mark that this is the first frame on the stack. 402 * sp marker to mark that this is the first frame on the stack.
403 */ 403 */
404 copy %arg0, %sp 404 copy %arg0, %sp
405 ldo 0(%arg0), %r3 405 ldo 0(%arg0), %r3
406 stw,ma %r0, HPPA_FRAME_SIZE(%sp) 406 stw,ma %r0, HPPA_FRAME_SIZE(%sp)
407 stw %r0, HPPA_FRAME_CRP(%sp) 407 stw %r0, HPPA_FRAME_CRP(%sp)
408 stw %r0, HPPA_FRAME_PSP(%sp) 408 stw %r0, HPPA_FRAME_PSP(%sp)
409 409
410 /* 410 /*
411 * We need to set the Q bit so that we can take TLB misses after we 411 * We need to set the Q bit so that we can take TLB misses after we
412 * turn on virtual memory. 412 * turn on virtual memory.
413 */ 413 */
414 414
415 mtctl %r0, %pcsq 415 mtctl %r0, %pcsq
416 mtctl %r0, %pcsq 416 mtctl %r0, %pcsq
417 mtctl %rp, %pcoq 417 mtctl %rp, %pcoq
418 ldo 4(%rp), %rp 418 ldo 4(%rp), %rp
419 mtctl %rp, %pcoq 419 mtctl %rp, %pcoq
420 mtctl %arg1, %ipsw 420 mtctl %arg1, %ipsw
421 rfi 421 rfi
422 nop 422 nop
423 nop 423 nop
424EXIT(kernel_setup) 424EXIT(kernel_setup)
425 425
426 426
427#ifdef MULTIPROCESSOR 427#ifdef MULTIPROCESSOR
428/* 428/*
429 * Trampoline to spin up secondary processors. 429 * Trampoline to spin up secondary processors.
430 */ 430 */
431LEAF_ENTRY_NOPROFILE(hw_cpu_spinup_trampoline) 431LEAF_ENTRY_NOPROFILE(hw_cpu_spinup_trampoline)
432 432
433 /* 433 /*
434 * disable interrupts and turn off all bits in the psw so that 434 * disable interrupts and turn off all bits in the psw so that
435 * we start in a known state. 435 * we start in a known state.
436 */ 436 */
437 rsm RESET_PSW, %r0 437 rsm RESET_PSW, %r0
438 nop ! nop ! nop ! nop ! nop ! nop 438 nop ! nop ! nop ! nop ! nop ! nop
439 439
440 /* go to virtual mode... 440 /* go to virtual mode...
441 /* get things ready for the kernel to run in virtual mode */ 441 /* get things ready for the kernel to run in virtual mode */
442 ldi HPPA_PID_KERNEL, %r1 442 ldi HPPA_PID_KERNEL, %r1
443 mtctl %r1, %pidr1 443 mtctl %r1, %pidr1
444 mtctl %r1, %pidr2 444 mtctl %r1, %pidr2
445#if pbably_not_worth_it 445#if pbably_not_worth_it
446 mtctl %r0, %pidr3 446 mtctl %r0, %pidr3
447 mtctl %r0, %pidr4 447 mtctl %r0, %pidr4
448#endif 448#endif
449 mtsp %r0, %sr0 449 mtsp %r0, %sr0
450 mtsp %r0, %sr1 450 mtsp %r0, %sr1
451 mtsp %r0, %sr2 451 mtsp %r0, %sr2
452 mtsp %r0, %sr3 452 mtsp %r0, %sr3
453 mtsp %r0, %sr4 453 mtsp %r0, %sr4
454 mtsp %r0, %sr5 454 mtsp %r0, %sr5
455 mtsp %r0, %sr6 455 mtsp %r0, %sr6
456 mtsp %r0, %sr7 456 mtsp %r0, %sr7
457 457
458 /* 458 /*
459 * disable all coprocessors 459 * disable all coprocessors
460 */ 460 */
461 mtctl %r0, %ccr 461 mtctl %r0, %ccr
462 462
463 /* 463 /*
464 * to keep the spl() routines consistent we need to put the correct 464 * to keep the spl() routines consistent we need to put the correct
465 * spl level into eiem, and reset any pending interrupts 465 * spl level into eiem, and reset any pending interrupts
466 */ 466 */
467 ldi -1, %r1 467 ldi -1, %r1
468 mtctl %r0, %eiem /* disable interrupts */ 468 mtctl %r0, %eiem /* disable interrupts */
469 mtctl %r1, %eirr 469 mtctl %r1, %eirr
470 470
471 /* 471 /*
472 * load address of interrupt vector table 472 * load address of interrupt vector table
473 */ 473 */
474 ldil L%ivaaddr, %t2 474 ldil L%ivaaddr, %t2
475 ldo R%ivaaddr(%t2), %t2 475 ldo R%ivaaddr(%t2), %t2
476 mtctl %t2, %iva 476 mtctl %t2, %iva
477 477
478 /* 478 /*
479 * set up the dp pointer so that we can do quick references off of it 479 * set up the dp pointer so that we can do quick references off of it
480 */ 480 */
481 ldil L%$global$, %dp 481 ldil L%$global$, %dp
482 ldo R%$global$(%dp), %dp 482 ldo R%$global$(%dp), %dp
483 483
484 /* 484 /*
485 * Store address of cpu_info in CR_CURCPU. 485 * Store address of cpu_info in CR_CURCPU.
486 */ 486 */
487 ldil L%cpu_hatch_info, %r3 487 ldil L%cpu_hatch_info, %r3
488 ldw R%cpu_hatch_info(%r3), %r3 488 ldw R%cpu_hatch_info(%r3), %r3
489 mtctl %r3, CR_CURCPU 489 mtctl %r3, CR_CURCPU
490 490
491 /* 491 /*
492 * Setup the stack frame for us to call C with and mark this as the 492 * Setup the stack frame for us to call C with and mark this as the
493 * first frame on the stack. 493 * first frame on the stack.
494 */ 494 */
495 ldw CI_STACK(%r3), %sp 495 ldw CI_STACK(%r3), %sp
496 stw,ma %r0, HPPA_FRAME_SIZE(%sp) 496 stw,ma %r0, HPPA_FRAME_SIZE(%sp)
497 stw %r0, HPPA_FRAME_CRP(%sp) 497 stw %r0, HPPA_FRAME_CRP(%sp)
498 stw %r0, HPPA_FRAME_PSP(%sp) 498 stw %r0, HPPA_FRAME_PSP(%sp)
499 499
500 /* Provide CPU with page tables. */ 500 /* Provide CPU with page tables. */
501 ldil L%hppa_vtop, %t1 501 ldil L%hppa_vtop, %t1
502 ldw R%hppa_vtop(%t1), %t1 502 ldw R%hppa_vtop(%t1), %t1
503 mtctl %t1, CR_VTOP 503 mtctl %t1, CR_VTOP
504 504
505 /* Turn on the Q bit so that we can handle TLB traps. */ 505 /* Turn on the Q bit so that we can handle TLB traps. */
506 ldil L%qenabled, %t1 506 ldil L%qenabled, %t1
507 ldo R%qenabled(%t1), %t1 507 ldo R%qenabled(%t1), %t1
508 mtctl %r0, %pcsq 508 mtctl %r0, %pcsq
509 mtctl %r0, %pcsq 509 mtctl %r0, %pcsq
510 mtctl %t1, %pcoq 510 mtctl %t1, %pcoq
511 ldo 4(%t1), %t1 511 ldo 4(%t1), %t1
512 mtctl %t1, %pcoq 512 mtctl %t1, %pcoq
513 ldi PSW_Q|PSW_I, %t2 513 ldi PSW_Q|PSW_I, %t2
514 mtctl %t2, %ipsw 514 mtctl %t2, %ipsw
515 rfi 515 rfi
516 nop 516 nop
517 517
518qenabled: 518qenabled:
519 /* Call C routine to setup CPU. */ 519 /* Call C routine to setup CPU. */
520 .import cpu_hw_init, code 520 .import cpu_hw_init, code
521 CALL(cpu_hw_init, %r1) 521 CALL(cpu_hw_init, %r1)
522 522
523 /* Switch CPU mode. */ 523 /* Switch CPU mode. */
524 ldil L%cpu_spinup_vm, %t1 524 ldil L%cpu_spinup_vm, %t1
525 ldo R%cpu_spinup_vm(%t1), %t1 525 ldo R%cpu_spinup_vm(%t1), %t1
526 mtctl %r0, %pcsq 526 mtctl %r0, %pcsq
527 mtctl %r0, %pcsq 527 mtctl %r0, %pcsq
528 mtctl %t1, %pcoq 528 mtctl %t1, %pcoq
529 ldo 4(%t1), %t1 529 ldo 4(%t1), %t1
530 mtctl %t1, %pcoq 530 mtctl %t1, %pcoq
531 mfctl CR_CURCPU, %t2 531 mfctl CR_CURCPU, %t2
532 ldw CI_PSW(%t2), %t2 532 ldw CI_PSW(%t2), %t2
533 mtctl %t2, %ipsw 533 mtctl %t2, %ipsw
534 rfi 534 rfi
535 nop 535 nop
536 536
537cpu_spinup_vm: 537cpu_spinup_vm:
538 538
539 /* 539 /*
540 * Okay, time to return to the land of C. 540 * Okay, time to return to the land of C.
541 */ 541 */
542 b cpu_hatch 542 b cpu_hatch
543 nop 543 nop
544 544
545EXIT(hw_cpu_spinup_trampoline) 545EXIT(hw_cpu_spinup_trampoline)
546#endif 546#endif
547 547
548 548
549/* 549/*
550 * int pdc_call(iodcio_t func,int pdc_flag, ...) 550 * int pdc_call(iodcio_t func,int pdc_flag, ...)
551 */ 551 */
552ENTRY(pdc_call,160) 552ENTRY(pdc_call,160)
553 553
554 mfctl %eiem, %t1 554 mfctl %eiem, %t1
555 mtctl %r0, %eiem /* disable interrupts */ 555 mtctl %r0, %eiem /* disable interrupts */
556 stw %rp, HPPA_FRAME_CRP(%sp) 556 stw %rp, HPPA_FRAME_CRP(%sp)
557 copy %arg0, %r31 557 copy %arg0, %r31
558 copy %sp, %ret1 558 copy %sp, %ret1
559 559
560 ldil L%kernelmapped, %ret0 560 ldil L%kernelmapped, %ret0
561 ldw R%kernelmapped(%ret0), %ret0 561 ldw R%kernelmapped(%ret0), %ret0
562 comb,= %r0, %ret0, pdc_call_unmapped1 562 comb,= %r0, %ret0, pdc_call_unmapped1
563 nop 563 nop
564 564
565 ldil L%pdc_stack, %ret1 565 ldil L%pdc_stack, %ret1
566 ldw R%pdc_stack(%ret1), %ret1 566 ldw R%pdc_stack(%ret1), %ret1
567 567
568pdc_call_unmapped1: 568pdc_call_unmapped1:
569 copy %sp, %r1 569 copy %sp, %r1
570 ldo HPPA_FRAME_SIZE+24*4(%ret1), %sp 570 ldo HPPA_FRAME_SIZE+24*4(%ret1), %sp
571 571
572 stw %r1, HPPA_FRAME_PSP(%sp) 572 stw %r1, HPPA_FRAME_PSP(%sp)
573 573
574 /* save kernelmapped and eiem */ 574 /* save kernelmapped and eiem */
575 stw %ret0, HPPA_FRAME_ARG(21)(%sp) 575 stw %ret0, HPPA_FRAME_ARG(21)(%sp)
576 stw %t1, HPPA_FRAME_ARG(22)(%sp) 576 stw %t1, HPPA_FRAME_ARG(22)(%sp)
577 577
578 /* copy arguments */ 578 /* copy arguments */
579 copy %arg2, %arg0 579 copy %arg2, %arg0
580 copy %arg3, %arg1 580 copy %arg3, %arg1
581 ldw HPPA_FRAME_ARG(4)(%r1), %arg2 581 ldw HPPA_FRAME_ARG(4)(%r1), %arg2
582 ldw HPPA_FRAME_ARG(5)(%r1), %arg3 582 ldw HPPA_FRAME_ARG(5)(%r1), %arg3
583 ldw HPPA_FRAME_ARG(6)(%r1), %t1 583 ldw HPPA_FRAME_ARG(6)(%r1), %t1
584 ldw HPPA_FRAME_ARG(7)(%r1), %t2 584 ldw HPPA_FRAME_ARG(7)(%r1), %t2
585 ldw HPPA_FRAME_ARG(8)(%r1), %t3 585 ldw HPPA_FRAME_ARG(8)(%r1), %t3
586 ldw HPPA_FRAME_ARG(9)(%r1), %t4 586 ldw HPPA_FRAME_ARG(9)(%r1), %t4
587 stw %t1, HPPA_FRAME_ARG(4)(%sp) /* XXX can use ,bc */ 587 stw %t1, HPPA_FRAME_ARG(4)(%sp) /* XXX can use ,bc */
588 stw %t2, HPPA_FRAME_ARG(5)(%sp) 588 stw %t2, HPPA_FRAME_ARG(5)(%sp)
589 stw %t3, HPPA_FRAME_ARG(6)(%sp) 589 stw %t3, HPPA_FRAME_ARG(6)(%sp)
590 stw %t4, HPPA_FRAME_ARG(7)(%sp) 590 stw %t4, HPPA_FRAME_ARG(7)(%sp)
591 ldw HPPA_FRAME_ARG(10)(%r1), %t1 591 ldw HPPA_FRAME_ARG(10)(%r1), %t1
592 ldw HPPA_FRAME_ARG(11)(%r1), %t2 592 ldw HPPA_FRAME_ARG(11)(%r1), %t2
593 ldw HPPA_FRAME_ARG(12)(%r1), %t3 593 ldw HPPA_FRAME_ARG(12)(%r1), %t3
594 ldw HPPA_FRAME_ARG(13)(%r1), %t4 594 ldw HPPA_FRAME_ARG(13)(%r1), %t4
595 stw %t1, HPPA_FRAME_ARG(8)(%sp) 595 stw %t1, HPPA_FRAME_ARG(8)(%sp)
596 stw %t2, HPPA_FRAME_ARG(9)(%sp) 596 stw %t2, HPPA_FRAME_ARG(9)(%sp)
597 stw %t3, HPPA_FRAME_ARG(10)(%sp) 597 stw %t3, HPPA_FRAME_ARG(10)(%sp)
598 stw %t4, HPPA_FRAME_ARG(11)(%sp) 598 stw %t4, HPPA_FRAME_ARG(11)(%sp)
599 599
600 /* save temp control regs */ 600 /* save temp control regs */
601 mfctl %cr24, %t1 601 mfctl %cr24, %t1
602 mfctl %cr25, %t2 602 mfctl %cr25, %t2
603 mfctl %cr26, %t3 603 mfctl %cr26, %t3
604 mfctl %cr27, %t4 604 mfctl %cr27, %t4
605 stw %t1, HPPA_FRAME_ARG(12)(%sp) /* XXX can use ,bc */ 605 stw %t1, HPPA_FRAME_ARG(12)(%sp) /* XXX can use ,bc */
606 stw %t2, HPPA_FRAME_ARG(13)(%sp) 606 stw %t2, HPPA_FRAME_ARG(13)(%sp)
607 stw %t3, HPPA_FRAME_ARG(14)(%sp) 607 stw %t3, HPPA_FRAME_ARG(14)(%sp)
608 stw %t4, HPPA_FRAME_ARG(15)(%sp) 608 stw %t4, HPPA_FRAME_ARG(15)(%sp)
609 mfctl %cr28, %t1 609 mfctl %cr28, %t1
610 mfctl %cr29, %t2 610 mfctl %cr29, %t2
611 mfctl %cr30, %t3 611 mfctl %cr30, %t3
612 mfctl %cr31, %t4 612 mfctl %cr31, %t4
613 stw %t1, HPPA_FRAME_ARG(16)(%sp) 613 stw %t1, HPPA_FRAME_ARG(16)(%sp)
614 stw %t2, HPPA_FRAME_ARG(17)(%sp) 614 stw %t2, HPPA_FRAME_ARG(17)(%sp)
615 stw %t3, HPPA_FRAME_ARG(18)(%sp) 615 stw %t3, HPPA_FRAME_ARG(18)(%sp)
616 stw %t4, HPPA_FRAME_ARG(19)(%sp) 616 stw %t4, HPPA_FRAME_ARG(19)(%sp)
617 617
618 comb,= %r0, %ret0, pdc_call_unmapped2 618 comb,= %r0, %ret0, pdc_call_unmapped2
619 nop 619 nop
620 620
621 copy %arg0, %t4 621 copy %arg0, %t4
622 ldi PSW_Q, %arg0 /* (!pdc_flag && args[0] == PDC_PIM)? PSW_M:0) */ 622 ldi PSW_Q, %arg0 /* (!pdc_flag && args[0] == PDC_PIM)? PSW_M:0) */
623 break HPPA_BREAK_KERNEL, HPPA_BREAK_SET_PSW 623 break HPPA_BREAK_KERNEL, HPPA_BREAK_SET_PSW
624 nop 624 nop
625 stw %ret0, HPPA_FRAME_ARG(23)(%sp) 625 stw %ret0, HPPA_FRAME_ARG(23)(%sp)
626 copy %t4, %arg0 626 copy %t4, %arg0
627 627
628pdc_call_unmapped2: 628pdc_call_unmapped2:
629 .call 629 .call
630 blr %r0, %rp 630 blr %r0, %rp
631 bv,n (%r31) 631 bv,n (%r31)
632 nop 632 nop
633 633
634 /* load temp control regs */ 634 /* load temp control regs */
635 ldw HPPA_FRAME_ARG(12)(%sp), %t1 635 ldw HPPA_FRAME_ARG(12)(%sp), %t1
636 ldw HPPA_FRAME_ARG(13)(%sp), %t2 636 ldw HPPA_FRAME_ARG(13)(%sp), %t2
637 ldw HPPA_FRAME_ARG(14)(%sp), %t3 637 ldw HPPA_FRAME_ARG(14)(%sp), %t3
638 ldw HPPA_FRAME_ARG(15)(%sp), %t4 638 ldw HPPA_FRAME_ARG(15)(%sp), %t4
639 mtctl %t1, %cr24 639 mtctl %t1, %cr24
640 mtctl %t2, %cr25 640 mtctl %t2, %cr25
641 mtctl %t3, %cr26 641 mtctl %t3, %cr26
642 mtctl %t4, %cr27 642 mtctl %t4, %cr27
643 ldw HPPA_FRAME_ARG(16)(%sp), %t1 643 ldw HPPA_FRAME_ARG(16)(%sp), %t1
644 ldw HPPA_FRAME_ARG(17)(%sp), %t2 644 ldw HPPA_FRAME_ARG(17)(%sp), %t2
645 ldw HPPA_FRAME_ARG(18)(%sp), %t3 645 ldw HPPA_FRAME_ARG(18)(%sp), %t3
646 ldw HPPA_FRAME_ARG(19)(%sp), %t4 646 ldw HPPA_FRAME_ARG(19)(%sp), %t4
647 mtctl %t1, %cr28 647 mtctl %t1, %cr28
648 mtctl %t2, %cr29 648 mtctl %t2, %cr29
649 mtctl %t3, %cr30 649 mtctl %t3, %cr30
650 mtctl %t4, %cr31 650 mtctl %t4, %cr31
651 651
652 ldw HPPA_FRAME_ARG(21)(%sp), %t1 652 ldw HPPA_FRAME_ARG(21)(%sp), %t1
653 ldw HPPA_FRAME_ARG(22)(%sp), %t2 653 ldw HPPA_FRAME_ARG(22)(%sp), %t2
654 comb,= %r0, %t1, pdc_call_unmapped3 654 comb,= %r0, %t1, pdc_call_unmapped3
655 nop 655 nop
656 656
657 copy %ret0, %t3 657 copy %ret0, %t3
658 ldw HPPA_FRAME_ARG(23)(%sp), %arg0 658 ldw HPPA_FRAME_ARG(23)(%sp), %arg0
659 break HPPA_BREAK_KERNEL, HPPA_BREAK_SET_PSW 659 break HPPA_BREAK_KERNEL, HPPA_BREAK_SET_PSW
660 nop 660 nop
661 copy %t3, %ret0 661 copy %t3, %ret0
662 662
663pdc_call_unmapped3: 663pdc_call_unmapped3:
664 ldw HPPA_FRAME_PSP(%sp), %sp 664 ldw HPPA_FRAME_PSP(%sp), %sp
665 ldw HPPA_FRAME_CRP(%sp), %rp 665 ldw HPPA_FRAME_CRP(%sp), %rp
666 bv %r0(%rp) 666 bv %r0(%rp)
667 mtctl %t2, %eiem /* enable interrupts */ 667 mtctl %t2, %eiem /* enable interrupts */
668EXIT(pdc_call) 668EXIT(pdc_call)
669 669
670/* 670/*
671 * int splraise(int ncpl); 671 * int splraise(int ncpl);
672 */ 672 */
673LEAF_ENTRY(splraise) 673LEAF_ENTRY(splraise)
674 GET_CURCPU(%t1) 674 GET_CURCPU(%t1)
675 ldw CI_CPL(%t1), %ret0 675 ldw CI_CPL(%t1), %ret0
676 or %ret0, %arg0, %arg0 676 or %ret0, %arg0, %arg0
677 bv %r0(%rp) 677 bv %r0(%rp)
678 stw %arg0, CI_CPL(%t1) 678 stw %arg0, CI_CPL(%t1)
679EXIT(splraise) 679EXIT(splraise)
680  680
681/* 681/*
682 * int spllower(int ncpl); 682 * int spllower(int ncpl);
683 */ 683 */
684ENTRY(spllower,HPPA_FRAME_SIZE) 684ENTRY(spllower,HPPA_FRAME_SIZE)
685 GET_CURCPU(%t1) 685 GET_CURCPU(%t1)
686 686
687 ldw CI_IPENDING(%t1), %r1 ; load ipending 687 ldw CI_IPENDING(%t1), %r1 ; load ipending
688 andcm,<> %r1, %arg0, %r1 ; and with complement of new cpl 688 andcm,<> %r1, %arg0, %r1 ; and with complement of new cpl
689 bv %r0(%rp) 689 bv %r0(%rp)
690 stw %arg0, CI_CPL(%t1) ; store new cpl 690 stw %arg0, CI_CPL(%t1) ; store new cpl
691 691
692 /* 692 /*
693 * Dispatch interrupts. There's a chance 693 * Dispatch interrupts. There's a chance
694 * that we may end up not dispatching anything; 694 * that we may end up not dispatching anything;
695 * in between our load of ipending and this 695 * in between our load of ipending and this
696 * disabling of interrupts, something else may 696 * disabling of interrupts, something else may
697 * have come in and dispatched some or all 697 * have come in and dispatched some or all
698 * of what we previously saw in ipending. 698 * of what we previously saw in ipending.
699 */ 699 */
700 mfctl %eiem, %arg1 700 mfctl %eiem, %arg1
701 mtctl %r0, %eiem ; disable interrupts 701 mtctl %r0, %eiem ; disable interrupts
702 702
703 ldw CI_IPENDING(%t1), %r1 ; load ipending 703 ldw CI_IPENDING(%t1), %r1 ; load ipending
704 andcm,<> %r1, %arg0, %r1 ; and with complement of new cpl 704 andcm,<> %r1, %arg0, %r1 ; and with complement of new cpl
705 b,n spllower_out ; branch if we got beaten 705 b,n spllower_out ; branch if we got beaten
706  706
707spllower_dispatch: 707spllower_dispatch:
708 /* start stack calling convention */ 708 /* start stack calling convention */
709 stw %rp, HPPA_FRAME_CRP(%sp) 709 stw %rp, HPPA_FRAME_CRP(%sp)
710 copy %r3, %r1 710 copy %r3, %r1
711 copy %sp, %r3 711 copy %sp, %r3
712 stw,ma %r1, HPPA_FRAME_SIZE(%sp) 712 stw,ma %r1, HPPA_FRAME_SIZE(%sp)
713 713
714 /* save ncpl and %eiem */ 714 /* save ncpl and %eiem */
715 stw %arg0, HPPA_FRAME_ARG(0)(%r3) 715 stw %arg0, HPPA_FRAME_ARG(0)(%r3)
716 stw %arg1, HPPA_FRAME_ARG(1)(%r3) 716 stw %arg1, HPPA_FRAME_ARG(1)(%r3)
717 717
718 /* call hp700_intr_dispatch */ 718 /* call hp700_intr_dispatch */
719 ldil L%hp700_intr_dispatch, %r1 719 ldil L%hp700_intr_dispatch, %r1
720 ldo R%hp700_intr_dispatch(%r1), %r1 720 ldo R%hp700_intr_dispatch(%r1), %r1
721 blr %r0, %rp 721 blr %r0, %rp
722 .call 722 .call
723 bv %r0(%r1) 723 bv %r0(%r1)
724 copy %r0, %arg2 ; call with a NULL frame 724 copy %r0, %arg2 ; call with a NULL frame
725  725
726 /* restore %eiem, we don't need ncpl */ 726 /* restore %eiem, we don't need ncpl */
727 ldw HPPA_FRAME_ARG(1)(%r3), %arg1 727 ldw HPPA_FRAME_ARG(1)(%r3), %arg1
728 728
729 /* end stack calling convention */ 729 /* end stack calling convention */
730 ldw HPPA_FRAME_CRP(%r3), %rp 730 ldw HPPA_FRAME_CRP(%r3), %rp
731 ldo HPPA_FRAME_SIZE(%r3), %sp 731 ldo HPPA_FRAME_SIZE(%r3), %sp
732 ldw,mb -HPPA_FRAME_SIZE(%sp), %r3 732 ldw,mb -HPPA_FRAME_SIZE(%sp), %r3
733  733
734spllower_out: 734spllower_out:
735 /* 735 /*
736 * Now return, storing %eiem in the delay slot. 736 * Now return, storing %eiem in the delay slot.
737 * (hp700_intr_dispatch leaves it zero). I think 737 * (hp700_intr_dispatch leaves it zero). I think
738 * doing this in the delay slot is important to 738 * doing this in the delay slot is important to
739 * prevent recursion, but I might be being too 739 * prevent recursion, but I might be being too
740 * paranoid. 740 * paranoid.
741 */ 741 */
742 bv %r0(%rp) 742 bv %r0(%rp)
743 mtctl %arg1, %eiem 743 mtctl %arg1, %eiem
744EXIT(spllower) 744EXIT(spllower)
745 745
746/* 746/*
747 * void hp700_intr_schedule(int mask); 747 * void hp700_intr_schedule(int mask);
748 */ 748 */
749ENTRY(hp700_intr_schedule,0) 749ENTRY(hp700_intr_schedule,0)
750 GET_CURCPU(%t2) 750 GET_CURCPU(%t2)
751 mfctl %eiem, %arg1 751 mfctl %eiem, %arg1
752 mtctl %r0, %eiem ; disable interrupts 752 mtctl %r0, %eiem ; disable interrupts
753 ldw CI_IPENDING(%t2), %r1 ; load ipending 753 ldw CI_IPENDING(%t2), %r1 ; load ipending
754 or %r1, %arg0, %r1 ; or in mask 754 or %r1, %arg0, %r1 ; or in mask
755 stw %r1, CI_IPENDING(%t2) ; store ipending 755 stw %r1, CI_IPENDING(%t2) ; store ipending
756 ldw CI_CPL(%t2), %arg0 ; load cpl 756 ldw CI_CPL(%t2), %arg0 ; load cpl
757 andcm,= %r1, %arg0, %r1 ; and ipending with ~cpl 757 andcm,= %r1, %arg0, %r1 ; and ipending with ~cpl
758 b,n spllower_dispatch ; dispatch if we can 758 b,n spllower_dispatch ; dispatch if we can
759 bv %r0(%rp) 759 bv %r0(%rp)
760 mtctl %arg1, %eiem 760 mtctl %arg1, %eiem
761EXIT(hp700_intr_schedule) 761EXIT(hp700_intr_schedule)
762 762
763/* 763/*
764 * 764 *
765 * int hp700_intr_ipending_new(struct hp700_int_reg *int_reg, int int_req); 765 * int hp700_intr_ipending_new(struct hp700_int_reg *int_reg, int int_req);
766 * 766 *
767 * This assembles the mask of new pending interrupts. 767 * This assembles the mask of new pending interrupts.
768 * 768 *
769 */ 769 */
770ENTRY(hp700_intr_ipending_new,HPPA_FRAME_SIZE) 770ENTRY(hp700_intr_ipending_new,HPPA_FRAME_SIZE)
771 771
772 /* Start stack calling convention. */ 772 /* Start stack calling convention. */
773 stw %rp, HPPA_FRAME_CRP(%sp) 773 stw %rp, HPPA_FRAME_CRP(%sp)
774 copy %r3, %r1 774 copy %r3, %r1
775 copy %sp, %r3 775 copy %sp, %r3
776 stw,ma %r1, HPPA_FRAME_SIZE(%sp) 776 stw,ma %r1, HPPA_FRAME_SIZE(%sp)
777 777
778 /* 778 /*
779 * Get this interrupt register's interrupt bits map 779 * Get this interrupt register's interrupt bits map
780 * and start with the least significant bit and with 780 * and start with the least significant bit and with
781 * a zero ipending_new value. 781 * a zero ipending_new value.
782 */ 782 */
783 ldo IR_BITS_MAP(%arg0), %arg0 783 ldo IR_BITS_MAP(%arg0), %arg0
784 ldi 31, %arg2 784 ldi 31, %arg2
785 copy %r0, %ret0 785 copy %r0, %ret0
786 786
787 /* 787 /*
788 * The top of this loop finds the next set bit in 788 * The top of this loop finds the next set bit in
789 * the request register. Note that if the bvb does 789 * the request register. Note that if the bvb does
790 * not branch, the addib is nullified, and control 790 * not branch, the addib is nullified, and control
791 * falls out of the loop. If the bvb does branch, 791 * falls out of the loop. If the bvb does branch,
792 * the addib runs with the mtsar in its delay slot. 792 * the addib runs with the mtsar in its delay slot.
793 * If the addib branches, the mtsar is nullified. 793 * If the addib branches, the mtsar is nullified.
794 */ 794 */
795L$hp700_inew_loop: 795L$hp700_inew_loop:
796 mtsar %arg2 796 mtsar %arg2
797 bvb,>=,n %arg1, L$hp700_inew_loop 797 bvb,>=,n %arg1, L$hp700_inew_loop
798 addib,<,n -1, %arg2, L$hp700_inew_done 798 addib,<,n -1, %arg2, L$hp700_inew_done
799 799
800 /* 800 /*
801 * If the map entry for this bit has IR_BIT_REG 801 * If the map entry for this bit has IR_BIT_REG
802 * set, branch to descend into the next interrupt 802 * set, branch to descend into the next interrupt
803 * register. Otherwise, set the bits in our ipending_new 803 * register. Otherwise, set the bits in our ipending_new
804 * value and loop. 804 * value and loop.
805 */ 805 */
806 ldwx,s %arg2(%arg0), %t1 806 ldwx,s %arg2(%arg0), %t1
807 ldil L%IR_BIT_REG, %t2 807 ldil L%IR_BIT_REG, %t2
808 ldo R%IR_BIT_REG(%t2), %t2 808 ldo R%IR_BIT_REG(%t2), %t2
809 and %t1, %t2, %t3 809 and %t1, %t2, %t3
810 combt,=,n %t2, %t3, L$hp700_inew_descend 810 combt,=,n %t2, %t3, L$hp700_inew_descend
811 addib,>= -1, %arg2, L$hp700_inew_loop 811 addib,>= -1, %arg2, L$hp700_inew_loop
812 or %t1, %ret0, %ret0 812 or %t1, %ret0, %ret0
813 813
814L$hp700_inew_done: 814L$hp700_inew_done:
815 815
816 /* End stack calling convention. */ 816 /* End stack calling convention. */
817 ldw HPPA_FRAME_CRP(%r3), %rp 817 ldw HPPA_FRAME_CRP(%r3), %rp
818 ldo HPPA_FRAME_SIZE(%r3), %sp 818 ldo HPPA_FRAME_SIZE(%r3), %sp
819 bv %r0(%rp) 819 bv %r0(%rp)
820 ldw,mb -HPPA_FRAME_SIZE(%sp), %r3 820 ldw,mb -HPPA_FRAME_SIZE(%sp), %r3
821 821
822L$hp700_inew_descend: 822L$hp700_inew_descend:
823 823
824 /* 824 /*
825 * If the next interrupt register index is zero, 825 * If the next interrupt register index is zero,
826 * this interrupt bit is unused. (Index zero 826 * this interrupt bit is unused. (Index zero
827 * is the CPU interrupt register, which you can 827 * is the CPU interrupt register, which you can
828 * never descend into since it's the root.) 828 * never descend into since it's the root.)
829 */ 829 */
830 andcm,<> %t1, %t2, %t1 830 andcm,<> %t1, %t2, %t1
831 b,n L$hp700_inew_unused 831 b,n L$hp700_inew_unused
832 832
833 /* Save our state. */ 833 /* Save our state. */
834 stw %arg0, HPPA_FRAME_ARG(0)(%r3) 834 stw %arg0, HPPA_FRAME_ARG(0)(%r3)
835 stw %arg1, HPPA_FRAME_ARG(1)(%r3) 835 stw %arg1, HPPA_FRAME_ARG(1)(%r3)
836 stw %arg2, HPPA_FRAME_ARG(2)(%r3) 836 stw %arg2, HPPA_FRAME_ARG(2)(%r3)
837 stw %ret0, HPPA_FRAME_ARG(3)(%r3) 837 stw %ret0, HPPA_FRAME_ARG(3)(%r3)
838 838
839 /* Get our new interrupt register. */ 839 /* Get our new interrupt register. */
840 ldil L%hp700_interrupt_registers, %arg0 840 ldil L%hp700_interrupt_registers, %arg0
841 ldo R%hp700_interrupt_registers(%arg0), %arg0 841 ldo R%hp700_interrupt_registers(%arg0), %arg0
842 sh2add %t1, %arg0, %arg0 842 sh2add %t1, %arg0, %arg0
843 ldw 0(%arg0), %arg0 843 ldw 0(%arg0), %arg0
844 844
845 /* 845 /*
846 * Read the interrupt request register and make 846 * Read the interrupt request register and make
847 * our recursive call. The read also serves to 847 * our recursive call. The read also serves to
848 * acknowledge the interrupt to the I/O subsystem. 848 * acknowledge the interrupt to the I/O subsystem.
849 */ 849 */
850 ldw IR_REQ(%arg0), %arg1 850 ldw IR_REQ(%arg0), %arg1
851 bl hp700_intr_ipending_new, %rp 851 bl hp700_intr_ipending_new, %rp
852 ldw 0(%arg1), %arg1 852 ldw 0(%arg1), %arg1
853 853
854 /* Restore our state. */ 854 /* Restore our state. */
855 ldw HPPA_FRAME_ARG(0)(%r3), %arg0 855 ldw HPPA_FRAME_ARG(0)(%r3), %arg0
856 ldw HPPA_FRAME_ARG(1)(%r3), %arg1 856 ldw HPPA_FRAME_ARG(1)(%r3), %arg1
857 ldw HPPA_FRAME_ARG(2)(%r3), %arg2 857 ldw HPPA_FRAME_ARG(2)(%r3), %arg2
858 ldw HPPA_FRAME_ARG(3)(%r3), %ret1 858 ldw HPPA_FRAME_ARG(3)(%r3), %ret1
859 or %ret1, %ret0, %ret0 859 or %ret1, %ret0, %ret0
860 860
861L$hp700_inew_unused: 861L$hp700_inew_unused:
862 addib,>= -1, %arg2, L$hp700_inew_loop 862 addib,>= -1, %arg2, L$hp700_inew_loop
863 nop 863 nop
864 b,n L$hp700_inew_done 864 b,n L$hp700_inew_done
865EXIT(hp700_intr_ipending_new) 865EXIT(hp700_intr_ipending_new)
866 866
867/* 867/*
868 * void cpu_die(void); 868 * void cpu_die(void);
869 */ 869 */
870LEAF_ENTRY_NOPROFILE(cpu_die) 870LEAF_ENTRY_NOPROFILE(cpu_die)
871 rsm RESET_PSW, %r0 871 rsm RESET_PSW, %r0
872 nop 872 nop
873 nop 873 nop
874 mtsp %r0, %sr0 874 mtsp %r0, %sr0
875 ldil L%LBCAST_ADDR, %r25 875 ldil L%LBCAST_ADDR, %r25
876 ldi CMD_RESET, %r26 876 ldi CMD_RESET, %r26
877 stw %r26, R%iomod_command(%r25) 877 stw %r26, R%iomod_command(%r25)
878forever: ; Loop until bus reset takes effect. 878forever: ; Loop until bus reset takes effect.
879 b,n forever 879 b,n forever
880 nop 880 nop
881 nop 881 nop
882EXIT(cpu_die) 882EXIT(cpu_die)
883 883
884/* Include the system call and trap handling. */ 884/* Include the system call and trap handling. */
885#include <hppa/hppa/trap.S> 885#include <hppa/hppa/trap.S>
886 886
887/* Include the userspace copyin/copyout functions. */ 887/* Include the userspace copyin/copyout functions. */
888#include <hppa/hppa/copy.S> 888#include <hppa/hppa/copy.S>
889 889
890/* Include the support functions. */ 890/* Include the support functions. */
891#include <hppa/hppa/support.S> 891#include <hppa/hppa/support.S>
892 892
893/* 893/*
894 * struct lwp * 894 * struct lwp *
895 * cpu_switchto(struct lwp *oldl, struct lwp *newl, bool returning) 895 * cpu_switchto(struct lwp *oldl, struct lwp *newl, bool returning)
896 */ 896 */
897 .align 32 897 .align 32
898ENTRY(cpu_switchto,128) 898ENTRY(cpu_switchto,128)
899 /* start stack calling convention */ 899 /* start stack calling convention */
900 stw %rp, HPPA_FRAME_CRP(%sp) 900 stw %rp, HPPA_FRAME_CRP(%sp)
901 copy %r3, %r1 901 copy %r3, %r1
902 copy %sp, %r3 902 copy %sp, %r3
903 stwm %r1, HPPA_FRAME_SIZE+16*4(%sp) 903 stwm %r1, HPPA_FRAME_SIZE+16*4(%sp)
904 /* Frame marker and callee saves */ 904 /* Frame marker and callee saves */
905 stw %r3, HPPA_FRAME_PSP(%sp) 905 stw %r3, HPPA_FRAME_PSP(%sp)
906 906
907#ifdef DIAGNOSTIC 907#ifdef DIAGNOSTIC
908 b,n switch_diag 908 b,n switch_diag
909 909
910switch_error: 910switch_error:
911 copy %t1, %arg1 911 copy %t1, %arg1
912 ldil L%panic, %r1 912 ldil L%panic, %r1
913 ldil L%Lcspstr, %arg0 913 ldil L%Lcspstr, %arg0
914 ldo R%panic(%r1), %r1 914 ldo R%panic(%r1), %r1
915 ldo R%Lcspstr(%arg0), %arg0 915 ldo R%Lcspstr(%arg0), %arg0
916 .call 916 .call
917 blr %r0, %rp 917 blr %r0, %rp
918 bv,n %r0(%r1) 918 bv,n %r0(%r1)
919 nop 919 nop
920Lcspstr: 920Lcspstr:
921 .asciz "cpu_switchto: 0x%08x stack/len 0x%08x" 921 .asciz "cpu_switchto: 0x%08x stack/len 0x%08x"
922 .align 8 922 .align 8
923 923
924switch_diag: 924switch_diag:
925 /* 925 /*
926 * Either we must be switching to the same LWP, or 926 * Either we must be switching to the same LWP, or
927 * the new LWP's kernel stack must be reasonable. 927 * the new LWP's kernel stack must be reasonable.
928 */ 928 */
929 comb,=,n %arg0, %arg1, kstack_ok 929 comb,=,n %arg0, %arg1, kstack_ok
930 930
931 /* 931 /*
932 * cpu_lwp_fork sets the initial stack to a page above uarea address. 932 * cpu_lwp_fork sets the initial stack to a page above uarea address.
933 * Check that the stack is above this value for oldl. 933 * Check that the stack is above this value for oldl.
934 */ 934 */
935 ldw L_PCB(%arg1), %arg2 935 ldw L_PCB(%arg1), %arg2
936 ldw PCB_KSP(%arg2), %t1 /* t1 for switch_error */ 936 ldw PCB_KSP(%arg2), %t1 /* t1 for switch_error */
937 ldo NBPG(%arg2), %arg2 937 ldo NBPG(%arg2), %arg2
938 comb,>>,n %arg2, %t1, switch_error 938 comb,>>,n %arg2, %t1, switch_error
939 nop 939 nop
940 940
941 /* make sure the stack hasn't grown too big (> USPACE) */ 941 /* make sure the stack hasn't grown too big (> USPACE) */
942 sub %t1, %arg2, %t1 /* t1 for switch_error */ 942 sub %t1, %arg2, %t1 /* t1 for switch_error */
943 ldil L%USPACE, %arg2 943 ldil L%USPACE, %arg2
944 ldo R%USPACE(%arg2), %arg2 944 ldo R%USPACE(%arg2), %arg2
945 comb,<<=,n %arg2, %t1, switch_error 945 comb,<<=,n %arg2, %t1, switch_error
946 nop 946 nop
947kstack_ok: 947kstack_ok:
948#endif 948#endif
949 949
950 /* If old LWP exited, don't bother saving anything. */ 950 /* If old LWP exited, don't bother saving anything. */
951 comb,=,n %r0, %arg0, switch_exited 951 comb,=,n %r0, %arg0, switch_exited
952 952
953 /* 953 /*
954 * save old LWP context 954 * save old LWP context
955 * 955 *
956 * arg0: old LWP (oldl) 956 * arg0: old LWP (oldl)
957 * arg1: new LWP (newl) 957 * arg1: new LWP (newl)
958 */ 958 */
959 959
960 ldw L_PCB(%arg0), %t3 /* oldl pcb */ 960 ldw L_PCB(%arg0), %t3 /* oldl pcb */
961 stw %sp, PCB_KSP(%t3) 961 stw %sp, PCB_KSP(%t3)
962 fdc %r0(%t3) /* flush oldl pcb - surely fdc PCB_KSP(%t3) */ 962 fdc %r0(%t3) /* flush oldl pcb - surely fdc PCB_KSP(%t3) */
963 963
964 /* 964 /*
965 * Save the callee-save registers. We don't need to do 965 * Save the callee-save registers. We don't need to do
966 * r3 here as it was done during stack calling convention. 966 * r3 here as it was done during stack calling convention.
967 */ 967 */
968 stw %r4, 1*4(%r3) 968 stw %r4, 1*4(%r3)
969 stw %r5, 2*4(%r3) 969 stw %r5, 2*4(%r3)
970 stw %r6, 3*4(%r3) 970 stw %r6, 3*4(%r3)
971 stw %r7, 4*4(%r3) 971 stw %r7, 4*4(%r3)
972 stw %r8, 5*4(%r3) 972 stw %r8, 5*4(%r3)
973 stw %r9, 6*4(%r3) 973 stw %r9, 6*4(%r3)
974 stw %r10, 7*4(%r3) 974 stw %r10, 7*4(%r3)
975 stw %r11, 8*4(%r3) 975 stw %r11, 8*4(%r3)
976 stw %r12, 9*4(%r3) 976 stw %r12, 9*4(%r3)
977 stw %r13, 10*4(%r3) 977 stw %r13, 10*4(%r3)
978 stw %r14, 11*4(%r3) 978 stw %r14, 11*4(%r3)
979 stw %r15, 12*4(%r3) 979 stw %r15, 12*4(%r3)
980 stw %r16, 13*4(%r3) 980 stw %r16, 13*4(%r3)
981 stw %r17, 14*4(%r3) 981 stw %r17, 14*4(%r3)
982 stw %r18, 15*4(%r3) 982 stw %r18, 15*4(%r3)
983 983
984 /* 984 /*
985 * restore new LWP context 985 * restore new LWP context
986 * 986 *
987 * arg0: old LWP (oldl) 987 * arg0: old LWP (oldl)
988 * arg1: new LWP (newl) 988 * arg1: new LWP (newl)
989 */ 989 */
990switch_exited: 990switch_exited:
991 ldw L_MD(%arg1), %t1 991 ldw L_MD(%arg1), %t1
992 ldw L_PCB(%arg1), %t3 992 ldw L_PCB(%arg1), %t3
993 ldw PCB_KSP(%t3), %sp /* restore stack of newl */ 993 ldw PCB_KSP(%t3), %sp /* restore stack of newl */
994 994
995 fdc %r0(%t3) /* Flush newl PCB - why? */ 995 fdc %r0(%t3) /* Flush newl PCB - why? */
996 996
997#if 0 997#if 0
998 ldw TF_CR9(%t1), %t3 /* pmap_activate? */ 998 ldw TF_CR9(%t1), %t3 /* pmap_activate? */
999 mtctl %t3, %pidr2 /* pmap_activate? */ 999 mtctl %t3, %pidr2 /* pmap_activate? */
1000#endif 1000#endif
1001 ldw TF_CR30(%t1), %t2 /* pmap_activate? */ 1001 ldw TF_CR30(%t1), %t2 /* pmap_activate? */
1002 mtctl %t2, CR_FPPADDR /* pmap_activate? */ 1002 mtctl %t2, CR_FPPADDR /* pmap_activate? */
1003 1003
1004 SET_CURLWP(%arg1, %t2) 1004 SET_CURLWP(%arg1, %t2)
1005 1005
1006 ldo -(HPPA_FRAME_SIZE+16*4)(%sp), %r3 1006 ldo -(HPPA_FRAME_SIZE+16*4)(%sp), %r3
1007 1007
1008 ldw 1*4(%r3), %r4 1008 ldw 1*4(%r3), %r4
1009 ldw 2*4(%r3), %r5 1009 ldw 2*4(%r3), %r5
1010 ldw 3*4(%r3), %r6 1010 ldw 3*4(%r3), %r6
1011 ldw 4*4(%r3), %r7 1011 ldw 4*4(%r3), %r7
1012 ldw 5*4(%r3), %r8 1012 ldw 5*4(%r3), %r8
1013 ldw 6*4(%r3), %r9 1013 ldw 6*4(%r3), %r9
1014 ldw 7*4(%r3), %r10 1014 ldw 7*4(%r3), %r10
1015 ldw 8*4(%r3), %r11 1015 ldw 8*4(%r3), %r11
1016 ldw 9*4(%r3), %r12 1016 ldw 9*4(%r3), %r12
1017 ldw 10*4(%r3), %r13 1017 ldw 10*4(%r3), %r13
1018 ldw 11*4(%r3), %r14 1018 ldw 11*4(%r3), %r14
1019 ldw 12*4(%r3), %r15 1019 ldw 12*4(%r3), %r15
1020 ldw 13*4(%r3), %r16 1020 ldw 13*4(%r3), %r16
1021 ldw 14*4(%r3), %r17 1021 ldw 14*4(%r3), %r17
1022 ldw 15*4(%r3), %r18 1022 ldw 15*4(%r3), %r18
1023 1023
1024 /* 1024 /*
1025 * Check for restartable atomic sequences (RAS) 1025 * Check for restartable atomic sequences (RAS)
1026 */ 1026 */
1027 ldw L_PROC(%arg1), %t1 1027 ldw L_PROC(%arg1), %t1
1028 ldw P_RASLIST(%t1), %t1 1028 ldw P_RASLIST(%t1), %t1
1029 comb,=,n %r0, %t1, noras 1029 comb,=,n %r0, %t1, noras
1030 1030
1031 /* 1031 /*
1032 * Save some caller-saves we want to preserve. 1032 * Save some caller-saves we want to preserve.
1033 * 1033 *
1034 * We save oldl (%arg0) and newl (%arg1) for the benefit of 1034 * We save oldl (%arg0) and newl (%arg1) for the benefit of
1035 * lwp_trampoline() for when it calls lwp_startup(). 1035 * lwp_trampoline() for when it calls lwp_startup().
1036 * 1036 *
1037 * oldl (%arg0) is saved as it's the return value 1037 * oldl (%arg0) is saved as it's the return value
1038 */ 1038 */
1039 stw %arg0, HPPA_FRAME_ARG(0)(%r3) /* oldl */ 1039 stw %arg0, HPPA_FRAME_ARG(0)(%r3) /* oldl */
1040 stw %arg1, HPPA_FRAME_ARG(1)(%r3) /* newl */ 1040 stw %arg1, HPPA_FRAME_ARG(1)(%r3) /* newl */
1041 1041
1042 copy %arg1, %arg0 1042 copy %arg1, %arg0
1043 1043
1044 .import hppa_ras, code 1044 .import hppa_ras, code
1045 CALL(hppa_ras, %r1) 1045 CALL(hppa_ras, %r1)
1046 1046
1047 /* restore caller-saves */ 1047 /* restore caller-saves */
1048 ldw HPPA_FRAME_ARG(1)(%r3), %arg1 1048 ldw HPPA_FRAME_ARG(1)(%r3), %arg1
1049 ldw HPPA_FRAME_ARG(0)(%r3), %arg0 1049 ldw HPPA_FRAME_ARG(0)(%r3), %arg0
1050 1050
1051noras: 1051noras:
1052 /* 1052 /*
1053 * As an optimization, hppa_fpu_bootstrap 1053 * As an optimization, hppa_fpu_bootstrap
1054 * replaces this branch instruction with a 1054 * replaces this branch instruction with a
1055 * nop if there is a hardware FPU. 1055 * nop if there is a hardware FPU.
1056 */ 1056 */
1057ALTENTRY(hppa_fpu_nop1) 1057ALTENTRY(hppa_fpu_nop1)
1058 b,n switch_return 1058 b,n switch_return
1059 1059
1060 /* 1060 /*
1061 * We do have a hardware FPU. If the LWP 1061 * We do have a hardware FPU. If the LWP
1062 * that we just switched to has its state in the 1062 * that we just switched to has its state in the
1063 * FPU, enable the FPU, else disable it, so if 1063 * FPU, enable the FPU, else disable it, so if
1064 * the LWP does try to use the coprocessor 1064 * the LWP does try to use the coprocessor
1065 * we'll get an assist emulation trap to swap 1065 * we'll get an assist emulation trap to swap
1066 * states. 1066 * states.
1067 */ 1067 */
1068 GET_CURCPU(%t1) 1068 GET_CURCPU(%t1)
1069 mfctl %ccr, %r1 1069 mfctl %ccr, %r1
1070 mfctl CR_FPPADDR, %t2 1070 mfctl CR_FPPADDR, %t2
1071 ldw CI_FPU_STATE(%t1), %t1 1071 ldw CI_FPU_STATE(%t1), %t1
1072 depi 0, 25, 2, %r1 ; disables the FPU 1072 depi 0, 25, 2, %r1 ; disables the FPU
1073 comb,<>,n %t1, %t2, 0 ; nullify if LWPs different 1073 comb,<>,n %t1, %t2, 0 ; nullify if LWPs different
1074 depi 3, 25, 2, %r1 ; enables the FPU 1074 depi 3, 25, 2, %r1 ; enables the FPU
1075 mtctl %r1, %ccr 1075 mtctl %r1, %ccr
1076 1076
1077switch_return: 1077switch_return:
1078 copy %arg0, %ret0 1078 copy %arg0, %ret0
1079 1079
1080 ldw HPPA_FRAME_CRP(%r3), %rp 1080 ldw HPPA_FRAME_CRP(%r3), %rp
1081 bv 0(%rp) 1081 bv 0(%rp)
1082 ldwm -(HPPA_FRAME_SIZE+16*4)(%sp), %r3 1082 ldwm -(HPPA_FRAME_SIZE+16*4)(%sp), %r3
1083EXIT(cpu_switchto) 1083EXIT(cpu_switchto)
1084 1084
1085/* 1085/*
1086 * This is the first code run in a new LWP after 1086 * This is the first code run in a new LWP after
1087 * cpu_switchto() has switched to it for the first time. 1087 * cpu_switchto() has switched to it for the first time.
1088 * 1088 *
1089 * This happens courtesy of the setup in cpu_lwp_fork() which 1089 * This happens courtesy of the setup in cpu_lwp_fork() which
1090 * arranges for cpu_switchto() to call us with a frame containing 1090 * arranges for cpu_switchto() to call us with a frame containing
1091 * the first kernel function to call, and its argument. 1091 * the first kernel function to call, and its argument.
1092 * 1092 *
1093 * cpu_switchto() also makes sure that %arg0 and %arg1 are (still) 1093 * cpu_switchto() also makes sure that %arg0 and %arg1 are (still)
1094 * oldl and newl respectively. 1094 * oldl and newl respectively.
1095 */ 1095 */
1096ENTRY_NOPROFILE(lwp_trampoline,HPPA_FRAME_SIZE) 1096ENTRY_NOPROFILE(lwp_trampoline,HPPA_FRAME_SIZE)
1097 /* no return point */ 1097 /* no return point */
1098 stw %r0, HPPA_FRAME_CRP(%sp) 1098 stw %r0, HPPA_FRAME_CRP(%sp)
1099 1099
1100 /* %arg0, %arg1 are still valid from cpu_switchto */ 1100 /* %arg0, %arg1 are still valid from cpu_switchto */
1101 .import lwp_startup, code 1101 .import lwp_startup, code
1102 CALL(lwp_startup, %r1) 1102 CALL(lwp_startup, %r1)
1103 1103
1104 /* get trampoline func (%t3) and arg (%arg0) */ 1104 /* get trampoline func (%t3) and arg (%arg0) */
1105 ldw HPPA_FRAME_ARG(3)(%sp), %arg0 1105 ldw HPPA_FRAME_ARG(3)(%sp), %arg0
1106 ldw HPPA_FRAME_ARG(2)(%sp), %t3 1106 ldw HPPA_FRAME_ARG(2)(%sp), %t3
1107 1107
1108 /* call the first kernel function */ 1108 /* call the first kernel function */
1109 .call 1109 .call
1110 blr %r0, %rp 1110 blr %r0, %rp
1111 bv,n %r0(%t3) 1111 bv,n %r0(%t3)
1112 nop 1112 nop
1113 1113
1114 /* 1114 /*
1115 * Since the first kernel function returned, 1115 * Since the first kernel function returned,
1116 * this LWP was created by the fork() 1116 * this LWP was created by the fork()
1117 * syscall, which we now return from. 1117 * syscall, which we now return from.
1118 */ 1118 */
1119 GET_CURLWP(%t2) 1119 GET_CURLWP(%t2)
1120 .call 1120 .call
1121 b syscall_return 1121 b syscall_return
1122 ldw L_MD(%t2), %t3 1122 ldw L_MD(%t2), %t3
1123EXIT(lwp_trampoline) 1123EXIT(lwp_trampoline)
1124 1124
1125/* Include the signal code. */ 1125/* Include the signal code. */
1126#include <hppa/hppa/sigcode.S> 1126#include <hppa/hppa/sigcode.S>
1127 1127
1128 .end 1128 .end