Thu Sep 3 02:09:09 2020 UTC ()
Clean up all of the _PMAP_MAY_USE_PROM_CONSOLE crapola, centralizing the
logic in prom.c, and rename it _PROM_MAY_USE_PROM_CONSOLE in a few places
it's still needed.


(thorpej)
diff -r1.125 -r1.126 src/sys/arch/alpha/alpha/locore.s
diff -r1.362 -r1.363 src/sys/arch/alpha/alpha/machdep.c
diff -r1.270 -r1.271 src/sys/arch/alpha/alpha/pmap.c
diff -r1.53 -r1.54 src/sys/arch/alpha/alpha/prom.c
diff -r1.40 -r1.41 src/sys/arch/alpha/alpha/promcons.c
diff -r1.38 -r1.39 src/sys/arch/alpha/include/alpha.h
diff -r1.83 -r1.84 src/sys/arch/alpha/include/pmap.h
diff -r1.14 -r1.15 src/sys/arch/alpha/include/prom.h

cvs diff -r1.125 -r1.126 src/sys/arch/alpha/alpha/locore.s (switch to unified diff)

--- src/sys/arch/alpha/alpha/locore.s 2020/06/30 16:20:00 1.125
+++ src/sys/arch/alpha/alpha/locore.s 2020/09/03 02:09:09 1.126
@@ -1,1158 +1,1163 @@ @@ -1,1158 +1,1163 @@
1/* $NetBSD: locore.s,v 1.125 2020/06/30 16:20:00 maxv Exp $ */ 1/* $NetBSD: locore.s,v 1.126 2020/09/03 02:09:09 thorpej Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1999, 2000, 2019 The NetBSD Foundation, Inc. 4 * Copyright (c) 1999, 2000, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center. 9 * NASA Ames Research Center.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer. 15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE. 30 * POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32 32
33/* 33/*
34 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University. 34 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
35 * All rights reserved. 35 * All rights reserved.
36 * 36 *
37 * Author: Chris G. Demetriou 37 * Author: Chris G. Demetriou
38 * 38 *
39 * Permission to use, copy, modify and distribute this software and 39 * Permission to use, copy, modify and distribute this software and
40 * its documentation is hereby granted, provided that both the copyright 40 * its documentation is hereby granted, provided that both the copyright
41 * notice and this permission notice appear in all copies of the 41 * notice and this permission notice appear in all copies of the
42 * software, derivative works or modified versions, and any portions 42 * software, derivative works or modified versions, and any portions
43 * thereof, and that both notices appear in supporting documentation. 43 * thereof, and that both notices appear in supporting documentation.
44 * 44 *
45 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 45 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
46 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 46 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
47 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 47 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 * 48 *
49 * Carnegie Mellon requests users of this software to return to 49 * Carnegie Mellon requests users of this software to return to
50 * 50 *
51 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 51 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
52 * School of Computer Science 52 * School of Computer Science
53 * Carnegie Mellon University 53 * Carnegie Mellon University
54 * Pittsburgh PA 15213-3890 54 * Pittsburgh PA 15213-3890
55 * 55 *
56 * any improvements or extensions that they make and grant Carnegie the 56 * any improvements or extensions that they make and grant Carnegie the
57 * rights to redistribute these changes. 57 * rights to redistribute these changes.
58 */ 58 */
59 59
60.stabs __FILE__,100,0,0,kernel_text 60.stabs __FILE__,100,0,0,kernel_text
61 61
62#include "opt_ddb.h" 62#include "opt_ddb.h"
63#include "opt_kgdb.h" 63#include "opt_kgdb.h"
64#include "opt_multiprocessor.h" 64#include "opt_multiprocessor.h"
65#include "opt_lockdebug.h" 65#include "opt_lockdebug.h"
66#include "opt_compat_netbsd.h" 66#include "opt_compat_netbsd.h"
67 67
68#include <machine/asm.h> 68#include <machine/asm.h>
69 69
70__KERNEL_RCSID(0, "$NetBSD: locore.s,v 1.125 2020/06/30 16:20:00 maxv Exp $"); 70__KERNEL_RCSID(0, "$NetBSD: locore.s,v 1.126 2020/09/03 02:09:09 thorpej Exp $");
71 71
72#include "assym.h" 72#include "assym.h"
73 73
74.stabs __FILE__,132,0,0,kernel_text 74.stabs __FILE__,132,0,0,kernel_text
75 75
76/* 76/*
77 * Perform actions necessary to switch to a new context. The 77 * Perform actions necessary to switch to a new context. The
78 * hwpcb should be in a0. Clobbers v0, t0, t8..t11, a0. 78 * hwpcb should be in a0. Clobbers v0, t0, t8..t11, a0.
79 */ 79 */
80#define SWITCH_CONTEXT \ 80#define SWITCH_CONTEXT \
81 /* Make a note of the context we're running on. */ \ 81 /* Make a note of the context we're running on. */ \
82 GET_CURPCB ; \ 82 GET_CURPCB ; \
83 stq a0, 0(v0) ; \ 83 stq a0, 0(v0) ; \
84 \ 84 \
85 /* Swap in the new context. */ \ 85 /* Swap in the new context. */ \
86 call_pal PAL_OSF1_swpctx 86 call_pal PAL_OSF1_swpctx
87 87
88 88
89 /* don't reorder instructions; paranoia. */ 89 /* don't reorder instructions; paranoia. */
90 .set noreorder 90 .set noreorder
91 .text 91 .text
92 92
93 .macro bfalse reg, dst 93 .macro bfalse reg, dst
94 beq \reg, \dst 94 beq \reg, \dst
95 .endm 95 .endm
96 96
97 .macro btrue reg, dst 97 .macro btrue reg, dst
98 bne \reg, \dst 98 bne \reg, \dst
99 .endm 99 .endm
100 100
101/* 101/*
102 * This is for kvm_mkdb, and should be the address of the beginning 102 * This is for kvm_mkdb, and should be the address of the beginning
103 * of the kernel text segment (not necessarily the same as kernbase). 103 * of the kernel text segment (not necessarily the same as kernbase).
104 */ 104 */
105 EXPORT(kernel_text) 105 EXPORT(kernel_text)
106.loc 1 __LINE__ 106.loc 1 __LINE__
107kernel_text: 107kernel_text:
108 108
109/* 109/*
110 * bootstack: a temporary stack, for booting. 110 * bootstack: a temporary stack, for booting.
111 * 111 *
112 * Extends from 'start' down. 112 * Extends from 'start' down.
113 */ 113 */
114bootstack: 114bootstack:
115 115
116/* 116/*
117 * locorestart: Kernel start. This is no longer the actual entry 117 * locorestart: Kernel start. This is no longer the actual entry
118 * point, although jumping to here (the first kernel address) will 118 * point, although jumping to here (the first kernel address) will
119 * in fact work just fine. 119 * in fact work just fine.
120 * 120 *
121 * Arguments: 121 * Arguments:
122 * a0 is the first free page frame number (PFN) 122 * a0 is the first free page frame number (PFN)
123 * a1 is the page table base register (PTBR) 123 * a1 is the page table base register (PTBR)
124 * a2 is the bootinfo magic number 124 * a2 is the bootinfo magic number
125 * a3 is the pointer to the bootinfo structure 125 * a3 is the pointer to the bootinfo structure
126 * 126 *
127 * All arguments are passed to alpha_init(). 127 * All arguments are passed to alpha_init().
128 */ 128 */
 129IMPORT(prom_mapped, 4)
129NESTED_NOPROFILE(locorestart,1,0,ra,0,0) 130NESTED_NOPROFILE(locorestart,1,0,ra,0,0)
130 br pv,1f 131 br pv,1f
1311: LDGP(pv) 1321: LDGP(pv)
132 133
133 /* Switch to the boot stack. */ 134 /* Switch to the boot stack. */
134 lda sp,bootstack 135 lda sp,bootstack
135 136
136 /* Load KGP with current GP. */ 137 /* Load KGP with current GP. */
137 mov a0, s0 /* save pfn */ 138 mov a0, s0 /* save pfn */
138 mov gp, a0 139 mov gp, a0
139 call_pal PAL_OSF1_wrkgp /* clobbers a0, t0, t8-t11 */ 140 call_pal PAL_OSF1_wrkgp /* clobbers a0, t0, t8-t11 */
140 mov s0, a0 /* restore pfn */ 141 mov s0, a0 /* restore pfn */
141 142
142 /* 143 /*
143 * Call alpha_init() to do pre-main initialization. 144 * Call alpha_init() to do pre-main initialization.
144 * alpha_init() gets the arguments we were called with, 145 * alpha_init() gets the arguments we were called with,
145 * which are already in a0, a1, a2, a3, and a4. 146 * which are already in a0, a1, a2, a3, and a4.
146 */ 147 */
147 CALL(alpha_init) 148 CALL(alpha_init)
148 149
149 /* Set up the virtual page table pointer. */ 150 /* Set up the virtual page table pointer. */
150 ldiq a0, VPTBASE 151 ldiq a0, VPTBASE
151 call_pal PAL_OSF1_wrvptptr /* clobbers a0, t0, t8-t11 */ 152 call_pal PAL_OSF1_wrvptptr /* clobbers a0, t0, t8-t11 */
152 153
153 /* 154 /*
154 * Switch to lwp0's PCB. 155 * Switch to lwp0's PCB.
155 */ 156 */
156 lda a0, lwp0 157 lda a0, lwp0
157 ldq a0, L_MD_PCBPADDR(a0) /* phys addr of PCB */ 158 ldq a0, L_MD_PCBPADDR(a0) /* phys addr of PCB */
158 SWITCH_CONTEXT 159 SWITCH_CONTEXT
159 160
 161 /* PROM is no longer mapped. */
 162 lda t0, prom_mapped
 163 stl zero, 0(t0)
 164
160 /* 165 /*
161 * We've switched to a new page table base, so invalidate the TLB 166 * We've switched to a new page table base, so invalidate the TLB
162 * and I-stream. This happens automatically everywhere but here. 167 * and I-stream. This happens automatically everywhere but here.
163 */ 168 */
164 ldiq a0, -2 /* TBIA */ 169 ldiq a0, -2 /* TBIA */
165 call_pal PAL_OSF1_tbi 170 call_pal PAL_OSF1_tbi
166 call_pal PAL_imb 171 call_pal PAL_imb
167 172
168 /* 173 /*
169 * All ready to go! Call main()! 174 * All ready to go! Call main()!
170 */ 175 */
171 CALL(main) 176 CALL(main)
172 177
173 /* This should never happen. */ 178 /* This should never happen. */
174 PANIC("main() returned",Lmain_returned_pmsg) 179 PANIC("main() returned",Lmain_returned_pmsg)
175 END(locorestart) 180 END(locorestart)
176 181
177/**************************************************************************/ 182/**************************************************************************/
178 183
179/* 184/*
180 * Pull in the PROM interface routines; these are needed for 185 * Pull in the PROM interface routines; these are needed for
181 * prom printf (while bootstrapping), and for determining the 186 * prom printf (while bootstrapping), and for determining the
182 * boot device, etc. 187 * boot device, etc.
183 */ 188 */
184#include <alpha/alpha/prom_disp.s> 189#include <alpha/alpha/prom_disp.s>
185 190
186/**************************************************************************/ 191/**************************************************************************/
187 192
188/* 193/*
189 * Pull in the PALcode function stubs. 194 * Pull in the PALcode function stubs.
190 */ 195 */
191#include <alpha/alpha/pal.s> 196#include <alpha/alpha/pal.s>
192 197
193/**************************************************************************/ 198/**************************************************************************/
194 199
195/**************************************************************************/ 200/**************************************************************************/
196 201
197#if defined(MULTIPROCESSOR) 202#if defined(MULTIPROCESSOR)
198/* 203/*
199 * Pull in the multiprocssor glue. 204 * Pull in the multiprocssor glue.
200 */ 205 */
201#include <alpha/alpha/multiproc.s> 206#include <alpha/alpha/multiproc.s>
202#endif /* MULTIPROCESSOR */ 207#endif /* MULTIPROCESSOR */
203 208
204/**************************************************************************/ 209/**************************************************************************/
205 210
206/**************************************************************************/ 211/**************************************************************************/
207 212
208#if defined(DDB) || defined(KGDB) 213#if defined(DDB) || defined(KGDB)
209/* 214/*
210 * Pull in debugger glue. 215 * Pull in debugger glue.
211 */ 216 */
212#include <alpha/alpha/debug.s> 217#include <alpha/alpha/debug.s>
213#endif /* DDB || KGDB */ 218#endif /* DDB || KGDB */
214 219
215/**************************************************************************/ 220/**************************************************************************/
216 221
217/**************************************************************************/ 222/**************************************************************************/
218 223
219 .text 224 .text
220.stabs __FILE__,132,0,0,backtolocore1 /* done with includes */ 225.stabs __FILE__,132,0,0,backtolocore1 /* done with includes */
221.loc 1 __LINE__ 226.loc 1 __LINE__
222backtolocore1: 227backtolocore1:
223/**************************************************************************/ 228/**************************************************************************/
224 229
225#ifdef COMPAT_16 230#ifdef COMPAT_16
226/* 231/*
227 * Signal "trampoline" code. 232 * Signal "trampoline" code.
228 * 233 *
229 * The kernel arranges for the handler to be invoked directly. This 234 * The kernel arranges for the handler to be invoked directly. This
230 * trampoline is used only to return from the signal. 235 * trampoline is used only to return from the signal.
231 * 236 *
232 * The stack pointer points to the saved sigcontext. 237 * The stack pointer points to the saved sigcontext.
233 */ 238 */
234 239
235NESTED_NOPROFILE(sigcode,0,0,ra,0,0) 240NESTED_NOPROFILE(sigcode,0,0,ra,0,0)
236 mov sp, a0 /* get pointer to sigcontext */ 241 mov sp, a0 /* get pointer to sigcontext */
237 CALLSYS_NOERROR(compat_16___sigreturn14) /* and call sigreturn() with it. */ 242 CALLSYS_NOERROR(compat_16___sigreturn14) /* and call sigreturn() with it. */
238 mov v0, a0 /* if that failed, get error code */ 243 mov v0, a0 /* if that failed, get error code */
239 CALLSYS_NOERROR(exit) /* and call exit() with it. */ 244 CALLSYS_NOERROR(exit) /* and call exit() with it. */
240XNESTED(esigcode,0) 245XNESTED(esigcode,0)
241 END(sigcode) 246 END(sigcode)
242#endif /* COMPAT_16 */ 247#endif /* COMPAT_16 */
243 248
244/**************************************************************************/ 249/**************************************************************************/
245 250
246/* 251/*
247 * exception_return: return from trap, exception, or syscall 252 * exception_return: return from trap, exception, or syscall
248 */ 253 */
249 254
250IMPORT(ssir, 8) 255IMPORT(ssir, 8)
251 256
252LEAF(exception_return, 1) /* XXX should be NESTED */ 257LEAF(exception_return, 1) /* XXX should be NESTED */
253 br pv, 1f 258 br pv, 1f
2541: LDGP(pv) 2591: LDGP(pv)
255 260
256 ldq s1, (FRAME_PS * 8)(sp) /* get the saved PS */ 261 ldq s1, (FRAME_PS * 8)(sp) /* get the saved PS */
257 and s1, ALPHA_PSL_IPL_MASK, t0 /* look at the saved IPL */ 262 and s1, ALPHA_PSL_IPL_MASK, t0 /* look at the saved IPL */
258 bne t0, 5f /* != 0: can't do AST or SIR */ 263 bne t0, 5f /* != 0: can't do AST or SIR */
259 264
260 /* see if we can do an SIR */ 265 /* see if we can do an SIR */
2612: ldq t1, ssir /* SIR pending? */ 2662: ldq t1, ssir /* SIR pending? */
262 bne t1, 6f /* yes */ 267 bne t1, 6f /* yes */
263 /* no */ 268 /* no */
264 269
265 and s1, ALPHA_PSL_USERMODE, t0 /* are we returning to user? */ 270 and s1, ALPHA_PSL_USERMODE, t0 /* are we returning to user? */
266 beq t0, 5f /* no: just return */ 271 beq t0, 5f /* no: just return */
267 /* yes */ 272 /* yes */
268 273
269 /* GET_CPUINFO clobbers v0, t0, t8...t11. */ 274 /* GET_CPUINFO clobbers v0, t0, t8...t11. */
2703: GET_CPUINFO 2753: GET_CPUINFO
271 276
272 /* check for AST */ 277 /* check for AST */
273 ldq t1, CPU_INFO_CURLWP(v0) 278 ldq t1, CPU_INFO_CURLWP(v0)
274 ldl t3, L_MD_ASTPENDING(t1) /* AST pending? */ 279 ldl t3, L_MD_ASTPENDING(t1) /* AST pending? */
275 bne t3, 7f /* yes */ 280 bne t3, 7f /* yes */
276 /* no: headed back to user space */ 281 /* no: headed back to user space */
277 282
278 /* Enable the FPU based on whether MDLWP_FPACTIVE is set. */ 283 /* Enable the FPU based on whether MDLWP_FPACTIVE is set. */
2794: ldq t2, L_MD_FLAGS(t1) 2844: ldq t2, L_MD_FLAGS(t1)
280 cmplt t2, zero, a0 285 cmplt t2, zero, a0
281 call_pal PAL_OSF1_wrfen 286 call_pal PAL_OSF1_wrfen
282 287
283 /* restore the registers, and return */ 288 /* restore the registers, and return */
2845: bsr ra, exception_restore_regs /* jmp/CALL trashes pv/t12 */ 2895: bsr ra, exception_restore_regs /* jmp/CALL trashes pv/t12 */
285 ldq ra,(FRAME_RA*8)(sp) 290 ldq ra,(FRAME_RA*8)(sp)
286 .set noat 291 .set noat
287 ldq at_reg,(FRAME_AT*8)(sp) 292 ldq at_reg,(FRAME_AT*8)(sp)
288 293
289 lda sp,(FRAME_SW_SIZE*8)(sp) 294 lda sp,(FRAME_SW_SIZE*8)(sp)
290 call_pal PAL_OSF1_rti 295 call_pal PAL_OSF1_rti
291 .set at 296 .set at
292 /* NOTREACHED */ 297 /* NOTREACHED */
293 298
294 /* We've got a SIR */ 299 /* We've got a SIR */
2956: ldiq a0, ALPHA_PSL_IPL_SOFT 3006: ldiq a0, ALPHA_PSL_IPL_SOFT
296 call_pal PAL_OSF1_swpipl 301 call_pal PAL_OSF1_swpipl
297 mov v0, s2 /* remember old IPL */ 302 mov v0, s2 /* remember old IPL */
298 CALL(softintr_dispatch) 303 CALL(softintr_dispatch)
299 304
300 /* SIR handled; restore IPL and check again */ 305 /* SIR handled; restore IPL and check again */
301 mov s2, a0 306 mov s2, a0
302 call_pal PAL_OSF1_swpipl 307 call_pal PAL_OSF1_swpipl
303 br 2b 308 br 2b
304 309
305 /* We've got an AST */ 310 /* We've got an AST */
3067: stl zero, L_MD_ASTPENDING(t1) /* no AST pending */ 3117: stl zero, L_MD_ASTPENDING(t1) /* no AST pending */
307 312
308 ldiq a0, ALPHA_PSL_IPL_0 /* drop IPL to zero */ 313 ldiq a0, ALPHA_PSL_IPL_0 /* drop IPL to zero */
309 call_pal PAL_OSF1_swpipl 314 call_pal PAL_OSF1_swpipl
310 mov v0, s2 /* remember old IPL */ 315 mov v0, s2 /* remember old IPL */
311 316
312 mov sp, a0 /* only arg is frame */ 317 mov sp, a0 /* only arg is frame */
313 CALL(ast) 318 CALL(ast)
314 319
315 /* AST handled; restore IPL and check again */ 320 /* AST handled; restore IPL and check again */
316 mov s2, a0 321 mov s2, a0
317 call_pal PAL_OSF1_swpipl 322 call_pal PAL_OSF1_swpipl
318 br 3b 323 br 3b
319 324
320 END(exception_return) 325 END(exception_return)
321 326
322LEAF(exception_save_regs, 0) 327LEAF(exception_save_regs, 0)
323 stq v0,(FRAME_V0*8)(sp) 328 stq v0,(FRAME_V0*8)(sp)
324 stq a3,(FRAME_A3*8)(sp) 329 stq a3,(FRAME_A3*8)(sp)
325 stq a4,(FRAME_A4*8)(sp) 330 stq a4,(FRAME_A4*8)(sp)
326 stq a5,(FRAME_A5*8)(sp) 331 stq a5,(FRAME_A5*8)(sp)
327 stq s0,(FRAME_S0*8)(sp) 332 stq s0,(FRAME_S0*8)(sp)
328 stq s1,(FRAME_S1*8)(sp) 333 stq s1,(FRAME_S1*8)(sp)
329 stq s2,(FRAME_S2*8)(sp) 334 stq s2,(FRAME_S2*8)(sp)
330 stq s3,(FRAME_S3*8)(sp) 335 stq s3,(FRAME_S3*8)(sp)
331 stq s4,(FRAME_S4*8)(sp) 336 stq s4,(FRAME_S4*8)(sp)
332 stq s5,(FRAME_S5*8)(sp) 337 stq s5,(FRAME_S5*8)(sp)
333 stq s6,(FRAME_S6*8)(sp) 338 stq s6,(FRAME_S6*8)(sp)
334 stq t0,(FRAME_T0*8)(sp) 339 stq t0,(FRAME_T0*8)(sp)
335 stq t1,(FRAME_T1*8)(sp) 340 stq t1,(FRAME_T1*8)(sp)
336 stq t2,(FRAME_T2*8)(sp) 341 stq t2,(FRAME_T2*8)(sp)
337 stq t3,(FRAME_T3*8)(sp) 342 stq t3,(FRAME_T3*8)(sp)
338 stq t4,(FRAME_T4*8)(sp) 343 stq t4,(FRAME_T4*8)(sp)
339 stq t5,(FRAME_T5*8)(sp) 344 stq t5,(FRAME_T5*8)(sp)
340 stq t6,(FRAME_T6*8)(sp) 345 stq t6,(FRAME_T6*8)(sp)
341 stq t7,(FRAME_T7*8)(sp) 346 stq t7,(FRAME_T7*8)(sp)
342 stq t8,(FRAME_T8*8)(sp) 347 stq t8,(FRAME_T8*8)(sp)
343 stq t9,(FRAME_T9*8)(sp) 348 stq t9,(FRAME_T9*8)(sp)
344 stq t10,(FRAME_T10*8)(sp) 349 stq t10,(FRAME_T10*8)(sp)
345 stq t11,(FRAME_T11*8)(sp) 350 stq t11,(FRAME_T11*8)(sp)
346 stq t12,(FRAME_T12*8)(sp) 351 stq t12,(FRAME_T12*8)(sp)
347 RET 352 RET
348 END(exception_save_regs) 353 END(exception_save_regs)
349 354
350LEAF(exception_restore_regs, 0) 355LEAF(exception_restore_regs, 0)
351 ldq v0,(FRAME_V0*8)(sp) 356 ldq v0,(FRAME_V0*8)(sp)
352 ldq a3,(FRAME_A3*8)(sp) 357 ldq a3,(FRAME_A3*8)(sp)
353 ldq a4,(FRAME_A4*8)(sp) 358 ldq a4,(FRAME_A4*8)(sp)
354 ldq a5,(FRAME_A5*8)(sp) 359 ldq a5,(FRAME_A5*8)(sp)
355 ldq s0,(FRAME_S0*8)(sp) 360 ldq s0,(FRAME_S0*8)(sp)
356 ldq s1,(FRAME_S1*8)(sp) 361 ldq s1,(FRAME_S1*8)(sp)
357 ldq s2,(FRAME_S2*8)(sp) 362 ldq s2,(FRAME_S2*8)(sp)
358 ldq s3,(FRAME_S3*8)(sp) 363 ldq s3,(FRAME_S3*8)(sp)
359 ldq s4,(FRAME_S4*8)(sp) 364 ldq s4,(FRAME_S4*8)(sp)
360 ldq s5,(FRAME_S5*8)(sp) 365 ldq s5,(FRAME_S5*8)(sp)
361 ldq s6,(FRAME_S6*8)(sp) 366 ldq s6,(FRAME_S6*8)(sp)
362 ldq t0,(FRAME_T0*8)(sp) 367 ldq t0,(FRAME_T0*8)(sp)
363 ldq t1,(FRAME_T1*8)(sp) 368 ldq t1,(FRAME_T1*8)(sp)
364 ldq t2,(FRAME_T2*8)(sp) 369 ldq t2,(FRAME_T2*8)(sp)
365 ldq t3,(FRAME_T3*8)(sp) 370 ldq t3,(FRAME_T3*8)(sp)
366 ldq t4,(FRAME_T4*8)(sp) 371 ldq t4,(FRAME_T4*8)(sp)
367 ldq t5,(FRAME_T5*8)(sp) 372 ldq t5,(FRAME_T5*8)(sp)
368 ldq t6,(FRAME_T6*8)(sp) 373 ldq t6,(FRAME_T6*8)(sp)
369 ldq t7,(FRAME_T7*8)(sp) 374 ldq t7,(FRAME_T7*8)(sp)
370 ldq t8,(FRAME_T8*8)(sp) 375 ldq t8,(FRAME_T8*8)(sp)
371 ldq t9,(FRAME_T9*8)(sp) 376 ldq t9,(FRAME_T9*8)(sp)
372 ldq t10,(FRAME_T10*8)(sp) 377 ldq t10,(FRAME_T10*8)(sp)
373 ldq t11,(FRAME_T11*8)(sp) 378 ldq t11,(FRAME_T11*8)(sp)
374 ldq t12,(FRAME_T12*8)(sp) 379 ldq t12,(FRAME_T12*8)(sp)
375 RET 380 RET
376 END(exception_restore_regs) 381 END(exception_restore_regs)
377 382
378/**************************************************************************/ 383/**************************************************************************/
379 384
380/* 385/*
381 * XentArith: 386 * XentArith:
382 * System arithmetic trap entry point. 387 * System arithmetic trap entry point.
383 */ 388 */
384 389
385 PALVECT(XentArith) /* setup frame, save registers */ 390 PALVECT(XentArith) /* setup frame, save registers */
386 391
387 /* a0, a1, & a2 already set up */ 392 /* a0, a1, & a2 already set up */
388 ldiq a3, ALPHA_KENTRY_ARITH 393 ldiq a3, ALPHA_KENTRY_ARITH
389 mov sp, a4 ; .loc 1 __LINE__ 394 mov sp, a4 ; .loc 1 __LINE__
390 CALL(trap) 395 CALL(trap)
391 396
392 jmp zero, exception_return 397 jmp zero, exception_return
393 END(XentArith) 398 END(XentArith)
394 399
395/**************************************************************************/ 400/**************************************************************************/
396 401
397/* 402/*
398 * XentIF: 403 * XentIF:
399 * System instruction fault trap entry point. 404 * System instruction fault trap entry point.
400 */ 405 */
401 406
402 PALVECT(XentIF) /* setup frame, save registers */ 407 PALVECT(XentIF) /* setup frame, save registers */
403 408
404 /* a0, a1, & a2 already set up */ 409 /* a0, a1, & a2 already set up */
405 ldiq a3, ALPHA_KENTRY_IF 410 ldiq a3, ALPHA_KENTRY_IF
406 mov sp, a4 ; .loc 1 __LINE__ 411 mov sp, a4 ; .loc 1 __LINE__
407 CALL(trap) 412 CALL(trap)
408 jmp zero, exception_return  413 jmp zero, exception_return
409 END(XentIF) 414 END(XentIF)
410 415
411/**************************************************************************/ 416/**************************************************************************/
412 417
413/* 418/*
414 * XentInt: 419 * XentInt:
415 * System interrupt entry point. 420 * System interrupt entry point.
416 */ 421 */
417 422
418 PALVECT(XentInt) /* setup frame, save registers */ 423 PALVECT(XentInt) /* setup frame, save registers */
419 424
420 /* a0, a1, & a2 already set up */ 425 /* a0, a1, & a2 already set up */
421 mov sp, a3 ; .loc 1 __LINE__ 426 mov sp, a3 ; .loc 1 __LINE__
422 CALL(interrupt) 427 CALL(interrupt)
423 jmp zero, exception_return 428 jmp zero, exception_return
424 END(XentInt) 429 END(XentInt)
425 430
426/**************************************************************************/ 431/**************************************************************************/
427 432
428/* 433/*
429 * XentMM: 434 * XentMM:
430 * System memory management fault entry point. 435 * System memory management fault entry point.
431 */ 436 */
432 437
433 PALVECT(XentMM) /* setup frame, save registers */ 438 PALVECT(XentMM) /* setup frame, save registers */
434 439
435 /* a0, a1, & a2 already set up */ 440 /* a0, a1, & a2 already set up */
436 ldiq a3, ALPHA_KENTRY_MM 441 ldiq a3, ALPHA_KENTRY_MM
437 mov sp, a4 ; .loc 1 __LINE__ 442 mov sp, a4 ; .loc 1 __LINE__
438 CALL(trap) 443 CALL(trap)
439 444
440 jmp zero, exception_return 445 jmp zero, exception_return
441 END(XentMM) 446 END(XentMM)
442 447
443/**************************************************************************/ 448/**************************************************************************/
444 449
445/* 450/*
446 * XentSys: 451 * XentSys:
447 * System call entry point. 452 * System call entry point.
448 */ 453 */
449 454
450 ESETUP(XentSys) ; .loc 1 __LINE__ 455 ESETUP(XentSys) ; .loc 1 __LINE__
451 456
452 stq v0,(FRAME_V0*8)(sp) /* in case we need to restart */ 457 stq v0,(FRAME_V0*8)(sp) /* in case we need to restart */
453 stq s0,(FRAME_S0*8)(sp) 458 stq s0,(FRAME_S0*8)(sp)
454 stq s1,(FRAME_S1*8)(sp) 459 stq s1,(FRAME_S1*8)(sp)
455 stq s2,(FRAME_S2*8)(sp) 460 stq s2,(FRAME_S2*8)(sp)
456 stq s3,(FRAME_S3*8)(sp) 461 stq s3,(FRAME_S3*8)(sp)
457 stq s4,(FRAME_S4*8)(sp) 462 stq s4,(FRAME_S4*8)(sp)
458 stq s5,(FRAME_S5*8)(sp) 463 stq s5,(FRAME_S5*8)(sp)
459 stq s6,(FRAME_S6*8)(sp) 464 stq s6,(FRAME_S6*8)(sp)
460 stq a0,(FRAME_A0*8)(sp) 465 stq a0,(FRAME_A0*8)(sp)
461 stq a1,(FRAME_A1*8)(sp) 466 stq a1,(FRAME_A1*8)(sp)
462 stq a2,(FRAME_A2*8)(sp) 467 stq a2,(FRAME_A2*8)(sp)
463 stq a3,(FRAME_A3*8)(sp) 468 stq a3,(FRAME_A3*8)(sp)
464 stq a4,(FRAME_A4*8)(sp) 469 stq a4,(FRAME_A4*8)(sp)
465 stq a5,(FRAME_A5*8)(sp) 470 stq a5,(FRAME_A5*8)(sp)
466 stq ra,(FRAME_RA*8)(sp) 471 stq ra,(FRAME_RA*8)(sp)
467 472
468 /* syscall number, passed in v0, is first arg, frame pointer second */ 473 /* syscall number, passed in v0, is first arg, frame pointer second */
469 mov v0,a1 474 mov v0,a1
470 GET_CURLWP 475 GET_CURLWP
471 ldq a0,0(v0) 476 ldq a0,0(v0)
472 mov sp,a2 ; .loc 1 __LINE__ 477 mov sp,a2 ; .loc 1 __LINE__
473 ldq t11,L_PROC(a0) 478 ldq t11,L_PROC(a0)
474 ldq t12,P_MD_SYSCALL(t11) 479 ldq t12,P_MD_SYSCALL(t11)
475 CALL((t12)) 480 CALL((t12))
476 481
477 jmp zero, exception_return 482 jmp zero, exception_return
478 END(XentSys) 483 END(XentSys)
479 484
480/**************************************************************************/ 485/**************************************************************************/
481 486
482/* 487/*
483 * XentUna: 488 * XentUna:
484 * System unaligned access entry point. 489 * System unaligned access entry point.
485 */ 490 */
486 491
487LEAF(XentUna, 3) /* XXX should be NESTED */ 492LEAF(XentUna, 3) /* XXX should be NESTED */
488 .set noat 493 .set noat
489 lda sp,-(FRAME_SW_SIZE*8)(sp) 494 lda sp,-(FRAME_SW_SIZE*8)(sp)
490 stq at_reg,(FRAME_AT*8)(sp) 495 stq at_reg,(FRAME_AT*8)(sp)
491 .set at 496 .set at
492 stq ra,(FRAME_RA*8)(sp) 497 stq ra,(FRAME_RA*8)(sp)
493 bsr ra, exception_save_regs /* jmp/CALL trashes pv/t12 */ 498 bsr ra, exception_save_regs /* jmp/CALL trashes pv/t12 */
494 499
495 /* a0, a1, & a2 already set up */ 500 /* a0, a1, & a2 already set up */
496 ldiq a3, ALPHA_KENTRY_UNA 501 ldiq a3, ALPHA_KENTRY_UNA
497 mov sp, a4 ; .loc 1 __LINE__ 502 mov sp, a4 ; .loc 1 __LINE__
498 CALL(trap) 503 CALL(trap)
499 504
500 jmp zero, exception_return 505 jmp zero, exception_return
501 END(XentUna) 506 END(XentUna)
502 507
503/**************************************************************************/ 508/**************************************************************************/
504 509
505/* 510/*
506 * savefpstate: Save a process's floating point state. 511 * savefpstate: Save a process's floating point state.
507 * 512 *
508 * Arguments: 513 * Arguments:
509 * a0 'struct fpstate *' to save into 514 * a0 'struct fpstate *' to save into
510 */ 515 */
511 516
512LEAF(savefpstate, 1) 517LEAF(savefpstate, 1)
513 LDGP(pv) 518 LDGP(pv)
514 /* save all of the FP registers */ 519 /* save all of the FP registers */
515 lda t1, FPREG_FPR_REGS(a0) /* get address of FP reg. save area */ 520 lda t1, FPREG_FPR_REGS(a0) /* get address of FP reg. save area */
516 stt $f0, (0 * 8)(t1) /* save first register, using hw name */ 521 stt $f0, (0 * 8)(t1) /* save first register, using hw name */
517 stt $f1, (1 * 8)(t1) /* etc. */ 522 stt $f1, (1 * 8)(t1) /* etc. */
518 stt $f2, (2 * 8)(t1) 523 stt $f2, (2 * 8)(t1)
519 stt $f3, (3 * 8)(t1) 524 stt $f3, (3 * 8)(t1)
520 stt $f4, (4 * 8)(t1) 525 stt $f4, (4 * 8)(t1)
521 stt $f5, (5 * 8)(t1) 526 stt $f5, (5 * 8)(t1)
522 stt $f6, (6 * 8)(t1) 527 stt $f6, (6 * 8)(t1)
523 stt $f7, (7 * 8)(t1) 528 stt $f7, (7 * 8)(t1)
524 stt $f8, (8 * 8)(t1) 529 stt $f8, (8 * 8)(t1)
525 stt $f9, (9 * 8)(t1) 530 stt $f9, (9 * 8)(t1)
526 stt $f10, (10 * 8)(t1) 531 stt $f10, (10 * 8)(t1)
527 stt $f11, (11 * 8)(t1) 532 stt $f11, (11 * 8)(t1)
528 stt $f12, (12 * 8)(t1) 533 stt $f12, (12 * 8)(t1)
529 stt $f13, (13 * 8)(t1) 534 stt $f13, (13 * 8)(t1)
530 stt $f14, (14 * 8)(t1) 535 stt $f14, (14 * 8)(t1)
531 stt $f15, (15 * 8)(t1) 536 stt $f15, (15 * 8)(t1)
532 stt $f16, (16 * 8)(t1) 537 stt $f16, (16 * 8)(t1)
533 stt $f17, (17 * 8)(t1) 538 stt $f17, (17 * 8)(t1)
534 stt $f18, (18 * 8)(t1) 539 stt $f18, (18 * 8)(t1)
535 stt $f19, (19 * 8)(t1) 540 stt $f19, (19 * 8)(t1)
536 stt $f20, (20 * 8)(t1) 541 stt $f20, (20 * 8)(t1)
537 stt $f21, (21 * 8)(t1) 542 stt $f21, (21 * 8)(t1)
538 stt $f22, (22 * 8)(t1) 543 stt $f22, (22 * 8)(t1)
539 stt $f23, (23 * 8)(t1) 544 stt $f23, (23 * 8)(t1)
540 stt $f24, (24 * 8)(t1) 545 stt $f24, (24 * 8)(t1)
541 stt $f25, (25 * 8)(t1) 546 stt $f25, (25 * 8)(t1)
542 stt $f26, (26 * 8)(t1) 547 stt $f26, (26 * 8)(t1)
543 stt $f27, (27 * 8)(t1) 548 stt $f27, (27 * 8)(t1)
544 .set noat 549 .set noat
545 stt $f28, (28 * 8)(t1) 550 stt $f28, (28 * 8)(t1)
546 .set at 551 .set at
547 stt $f29, (29 * 8)(t1) 552 stt $f29, (29 * 8)(t1)
548 stt $f30, (30 * 8)(t1) 553 stt $f30, (30 * 8)(t1)
549 554
550 /* 555 /*
551 * Then save the FPCR; note that the necessary 'trapb's are taken 556 * Then save the FPCR; note that the necessary 'trapb's are taken
552 * care of on kernel entry and exit. 557 * care of on kernel entry and exit.
553 */ 558 */
554 mf_fpcr ft0 559 mf_fpcr ft0
555 stt ft0, FPREG_FPR_CR(a0) /* store to FPCR save area */ 560 stt ft0, FPREG_FPR_CR(a0) /* store to FPCR save area */
556 561
557 RET 562 RET
558 END(savefpstate) 563 END(savefpstate)
559 564
560/**************************************************************************/ 565/**************************************************************************/
561 566
562/* 567/*
563 * restorefpstate: Restore a process's floating point state. 568 * restorefpstate: Restore a process's floating point state.
564 * 569 *
565 * Arguments: 570 * Arguments:
566 * a0 'struct fpstate *' to restore from 571 * a0 'struct fpstate *' to restore from
567 */ 572 */
568 573
569LEAF(restorefpstate, 1) 574LEAF(restorefpstate, 1)
570 LDGP(pv) 575 LDGP(pv)
571 /* 576 /*
572 * Restore the FPCR; note that the necessary 'trapb's are taken care of 577 * Restore the FPCR; note that the necessary 'trapb's are taken care of
573 * on kernel entry and exit. 578 * on kernel entry and exit.
574 */ 579 */
575 ldt ft0, FPREG_FPR_CR(a0) /* load from FPCR save area */ 580 ldt ft0, FPREG_FPR_CR(a0) /* load from FPCR save area */
576 mt_fpcr ft0 581 mt_fpcr ft0
577 582
578 /* Restore all of the FP registers. */ 583 /* Restore all of the FP registers. */
579 lda t1, FPREG_FPR_REGS(a0) /* get address of FP reg. save area */ 584 lda t1, FPREG_FPR_REGS(a0) /* get address of FP reg. save area */
580 ldt $f0, (0 * 8)(t1) /* restore first reg., using hw name */ 585 ldt $f0, (0 * 8)(t1) /* restore first reg., using hw name */
581 ldt $f1, (1 * 8)(t1) /* etc. */ 586 ldt $f1, (1 * 8)(t1) /* etc. */
582 ldt $f2, (2 * 8)(t1) 587 ldt $f2, (2 * 8)(t1)
583 ldt $f3, (3 * 8)(t1) 588 ldt $f3, (3 * 8)(t1)
584 ldt $f4, (4 * 8)(t1) 589 ldt $f4, (4 * 8)(t1)
585 ldt $f5, (5 * 8)(t1) 590 ldt $f5, (5 * 8)(t1)
586 ldt $f6, (6 * 8)(t1) 591 ldt $f6, (6 * 8)(t1)
587 ldt $f7, (7 * 8)(t1) 592 ldt $f7, (7 * 8)(t1)
588 ldt $f8, (8 * 8)(t1) 593 ldt $f8, (8 * 8)(t1)
589 ldt $f9, (9 * 8)(t1) 594 ldt $f9, (9 * 8)(t1)
590 ldt $f10, (10 * 8)(t1) 595 ldt $f10, (10 * 8)(t1)
591 ldt $f11, (11 * 8)(t1) 596 ldt $f11, (11 * 8)(t1)
592 ldt $f12, (12 * 8)(t1) 597 ldt $f12, (12 * 8)(t1)
593 ldt $f13, (13 * 8)(t1) 598 ldt $f13, (13 * 8)(t1)
594 ldt $f14, (14 * 8)(t1) 599 ldt $f14, (14 * 8)(t1)
595 ldt $f15, (15 * 8)(t1) 600 ldt $f15, (15 * 8)(t1)
596 ldt $f16, (16 * 8)(t1) 601 ldt $f16, (16 * 8)(t1)
597 ldt $f17, (17 * 8)(t1) 602 ldt $f17, (17 * 8)(t1)
598 ldt $f18, (18 * 8)(t1) 603 ldt $f18, (18 * 8)(t1)
599 ldt $f19, (19 * 8)(t1) 604 ldt $f19, (19 * 8)(t1)
600 ldt $f20, (20 * 8)(t1) 605 ldt $f20, (20 * 8)(t1)
601 ldt $f21, (21 * 8)(t1) 606 ldt $f21, (21 * 8)(t1)
602 ldt $f22, (22 * 8)(t1) 607 ldt $f22, (22 * 8)(t1)
603 ldt $f23, (23 * 8)(t1) 608 ldt $f23, (23 * 8)(t1)
604 ldt $f24, (24 * 8)(t1) 609 ldt $f24, (24 * 8)(t1)
605 ldt $f25, (25 * 8)(t1) 610 ldt $f25, (25 * 8)(t1)
606 ldt $f26, (26 * 8)(t1) 611 ldt $f26, (26 * 8)(t1)
607 ldt $f27, (27 * 8)(t1) 612 ldt $f27, (27 * 8)(t1)
608 ldt $f28, (28 * 8)(t1) 613 ldt $f28, (28 * 8)(t1)
609 ldt $f29, (29 * 8)(t1) 614 ldt $f29, (29 * 8)(t1)
610 ldt $f30, (30 * 8)(t1) 615 ldt $f30, (30 * 8)(t1)
611 616
612 RET 617 RET
613 END(restorefpstate) 618 END(restorefpstate)
614 619
615/**************************************************************************/ 620/**************************************************************************/
616 621
617/* 622/*
618 * savectx: save process context, i.e. callee-saved registers 623 * savectx: save process context, i.e. callee-saved registers
619 * 624 *
620 * Note that savectx() only works for processes other than curlwp, 625 * Note that savectx() only works for processes other than curlwp,
621 * since cpu_switchto will copy over the info saved here. (It _can_ 626 * since cpu_switchto will copy over the info saved here. (It _can_
622 * sanely be used for curlwp iff cpu_switchto won't be called again, e.g. 627 * sanely be used for curlwp iff cpu_switchto won't be called again, e.g.
623 * if called from boot().) 628 * if called from boot().)
624 * 629 *
625 * Arguments: 630 * Arguments:
626 * a0 'struct pcb *' of the process that needs its context saved 631 * a0 'struct pcb *' of the process that needs its context saved
627 * 632 *
628 * Return: 633 * Return:
629 * v0 0. (note that for child processes, it seems 634 * v0 0. (note that for child processes, it seems
630 * like savectx() returns 1, because the return address 635 * like savectx() returns 1, because the return address
631 * in the PCB is set to the return address from savectx().) 636 * in the PCB is set to the return address from savectx().)
632 */ 637 */
633 638
634LEAF(savectx, 1) 639LEAF(savectx, 1)
635 br pv, 1f 640 br pv, 1f
6361: LDGP(pv) 6411: LDGP(pv)
637 stq sp, PCB_HWPCB_KSP(a0) /* store sp */ 642 stq sp, PCB_HWPCB_KSP(a0) /* store sp */
638 stq s0, PCB_CONTEXT+(0 * 8)(a0) /* store s0 - s6 */ 643 stq s0, PCB_CONTEXT+(0 * 8)(a0) /* store s0 - s6 */
639 stq s1, PCB_CONTEXT+(1 * 8)(a0) 644 stq s1, PCB_CONTEXT+(1 * 8)(a0)
640 stq s2, PCB_CONTEXT+(2 * 8)(a0) 645 stq s2, PCB_CONTEXT+(2 * 8)(a0)
641 stq s3, PCB_CONTEXT+(3 * 8)(a0) 646 stq s3, PCB_CONTEXT+(3 * 8)(a0)
642 stq s4, PCB_CONTEXT+(4 * 8)(a0) 647 stq s4, PCB_CONTEXT+(4 * 8)(a0)
643 stq s5, PCB_CONTEXT+(5 * 8)(a0) 648 stq s5, PCB_CONTEXT+(5 * 8)(a0)
644 stq s6, PCB_CONTEXT+(6 * 8)(a0) 649 stq s6, PCB_CONTEXT+(6 * 8)(a0)
645 stq ra, PCB_CONTEXT+(7 * 8)(a0) /* store ra */ 650 stq ra, PCB_CONTEXT+(7 * 8)(a0) /* store ra */
646 call_pal PAL_OSF1_rdps /* NOTE: doesn't kill a0 */ 651 call_pal PAL_OSF1_rdps /* NOTE: doesn't kill a0 */
647 stq v0, PCB_CONTEXT+(8 * 8)(a0) /* store ps, for ipl */ 652 stq v0, PCB_CONTEXT+(8 * 8)(a0) /* store ps, for ipl */
648 653
649 mov zero, v0 654 mov zero, v0
650 RET 655 RET
651 END(savectx) 656 END(savectx)
652 657
653/**************************************************************************/ 658/**************************************************************************/
654 659
655 660
656/* 661/*
657 * struct lwp *cpu_switchto(struct lwp *current, struct lwp *next) 662 * struct lwp *cpu_switchto(struct lwp *current, struct lwp *next)
658 * Switch to the specified next LWP 663 * Switch to the specified next LWP
659 * Arguments: 664 * Arguments:
660 * a0 'struct lwp *' of the LWP to switch from 665 * a0 'struct lwp *' of the LWP to switch from
661 * a1 'struct lwp *' of the LWP to switch to 666 * a1 'struct lwp *' of the LWP to switch to
662 */ 667 */
663LEAF(cpu_switchto, 0) 668LEAF(cpu_switchto, 0)
664 LDGP(pv) 669 LDGP(pv)
665 670
666 /* 671 /*
667 * do an inline savectx(), to save old context 672 * do an inline savectx(), to save old context
668 */ 673 */
669 ldq a2, L_PCB(a0) 674 ldq a2, L_PCB(a0)
670 /* NOTE: ksp is stored by the swpctx */ 675 /* NOTE: ksp is stored by the swpctx */
671 stq s0, PCB_CONTEXT+(0 * 8)(a2) /* store s0 - s6 */ 676 stq s0, PCB_CONTEXT+(0 * 8)(a2) /* store s0 - s6 */
672 stq s1, PCB_CONTEXT+(1 * 8)(a2) 677 stq s1, PCB_CONTEXT+(1 * 8)(a2)
673 stq s2, PCB_CONTEXT+(2 * 8)(a2) 678 stq s2, PCB_CONTEXT+(2 * 8)(a2)
674 stq s3, PCB_CONTEXT+(3 * 8)(a2) 679 stq s3, PCB_CONTEXT+(3 * 8)(a2)
675 stq s4, PCB_CONTEXT+(4 * 8)(a2) 680 stq s4, PCB_CONTEXT+(4 * 8)(a2)
676 stq s5, PCB_CONTEXT+(5 * 8)(a2) 681 stq s5, PCB_CONTEXT+(5 * 8)(a2)
677 stq s6, PCB_CONTEXT+(6 * 8)(a2) 682 stq s6, PCB_CONTEXT+(6 * 8)(a2)
678 stq ra, PCB_CONTEXT+(7 * 8)(a2) /* store ra */ 683 stq ra, PCB_CONTEXT+(7 * 8)(a2) /* store ra */
679 684
680 mov a0, s4 /* save old curlwp */ 685 mov a0, s4 /* save old curlwp */
681 mov a1, s2 /* save new lwp */ 686 mov a1, s2 /* save new lwp */
682 ldq a0, L_MD_PCBPADDR(s2) /* save new pcbpaddr */ 687 ldq a0, L_MD_PCBPADDR(s2) /* save new pcbpaddr */
683 688
684 SWITCH_CONTEXT /* swap the context */ 689 SWITCH_CONTEXT /* swap the context */
685 690
686 GET_CPUINFO 691 GET_CPUINFO
687 stq s2, CPU_INFO_CURLWP(v0) /* curlwp = l */ 692 stq s2, CPU_INFO_CURLWP(v0) /* curlwp = l */
688 693
689 /* 694 /*
690 * Now running on the new PCB. 695 * Now running on the new PCB.
691 */ 696 */
692 ldq s0, L_PCB(s2) 697 ldq s0, L_PCB(s2)
693 698
694 /* 699 /*
695 * Check for restartable atomic sequences (RAS). 700 * Check for restartable atomic sequences (RAS).
696 */ 701 */
697 ldq a0, L_PROC(s2) /* first ras_lookup() arg */ 702 ldq a0, L_PROC(s2) /* first ras_lookup() arg */
698 ldq t0, P_RASLIST(a0) /* any RAS entries? */ 703 ldq t0, P_RASLIST(a0) /* any RAS entries? */
699 beq t0, 1f /* no, skip */ 704 beq t0, 1f /* no, skip */
700 ldq s1, L_MD_TF(s2) /* s1 = l->l_md.md_tf */ 705 ldq s1, L_MD_TF(s2) /* s1 = l->l_md.md_tf */
701 ldq a1, (FRAME_PC*8)(s1) /* second ras_lookup() arg */ 706 ldq a1, (FRAME_PC*8)(s1) /* second ras_lookup() arg */
702 CALL(ras_lookup) /* ras_lookup(p, PC) */ 707 CALL(ras_lookup) /* ras_lookup(p, PC) */
703 addq v0, 1, t0 /* -1 means "not in ras" */ 708 addq v0, 1, t0 /* -1 means "not in ras" */
704 beq t0, 1f 709 beq t0, 1f
705 stq v0, (FRAME_PC*8)(s1) 710 stq v0, (FRAME_PC*8)(s1)
706 711
7071: 7121:
708 mov s4, v0 /* return the old lwp */ 713 mov s4, v0 /* return the old lwp */
709 /* 714 /*
710 * Restore registers and return. 715 * Restore registers and return.
711 * NOTE: ksp is restored by the swpctx. 716 * NOTE: ksp is restored by the swpctx.
712 */ 717 */
713 ldq s1, PCB_CONTEXT+(1 * 8)(s0) /* restore s1-s6 */ 718 ldq s1, PCB_CONTEXT+(1 * 8)(s0) /* restore s1-s6 */
714 ldq s2, PCB_CONTEXT+(2 * 8)(s0) 719 ldq s2, PCB_CONTEXT+(2 * 8)(s0)
715 ldq s3, PCB_CONTEXT+(3 * 8)(s0) 720 ldq s3, PCB_CONTEXT+(3 * 8)(s0)
716 ldq s4, PCB_CONTEXT+(4 * 8)(s0) 721 ldq s4, PCB_CONTEXT+(4 * 8)(s0)
717 ldq s5, PCB_CONTEXT+(5 * 8)(s0) 722 ldq s5, PCB_CONTEXT+(5 * 8)(s0)
718 ldq s6, PCB_CONTEXT+(6 * 8)(s0) 723 ldq s6, PCB_CONTEXT+(6 * 8)(s0)
719 ldq ra, PCB_CONTEXT+(7 * 8)(s0) /* restore ra */ 724 ldq ra, PCB_CONTEXT+(7 * 8)(s0) /* restore ra */
720 ldq s0, PCB_CONTEXT+(0 * 8)(s0) /* restore s0 */ 725 ldq s0, PCB_CONTEXT+(0 * 8)(s0) /* restore s0 */
721 726
722 RET 727 RET
723 END(cpu_switchto) 728 END(cpu_switchto)
724 729
725/* 730/*
726 * lwp_trampoline() 731 * lwp_trampoline()
727 * 732 *
728 * Arrange for a function to be invoked neatly, after a cpu_lwp_fork(). 733 * Arrange for a function to be invoked neatly, after a cpu_lwp_fork().
729 * 734 *
730 * Invokes the function specified by the s0 register with the return 735 * Invokes the function specified by the s0 register with the return
731 * address specified by the s1 register and with one argument specified 736 * address specified by the s1 register and with one argument specified
732 * by the s2 register. 737 * by the s2 register.
733 */ 738 */
734LEAF_NOPROFILE(lwp_trampoline, 0) 739LEAF_NOPROFILE(lwp_trampoline, 0)
735 mov v0, a0 740 mov v0, a0
736 mov s3, a1 741 mov s3, a1
737 CALL(lwp_startup) 742 CALL(lwp_startup)
738 mov s0, pv 743 mov s0, pv
739 mov s1, ra 744 mov s1, ra
740 mov s2, a0 745 mov s2, a0
741 jmp zero, (pv) 746 jmp zero, (pv)
742 END(lwp_trampoline) 747 END(lwp_trampoline)
743 748
744/**************************************************************************/ 749/**************************************************************************/
745 750
746/* 751/*
747 * XXX XXX XXX: Should be removed? 752 * XXX XXX XXX: Should be removed?
748 */ 753 */
749LEAF(alpha_copystr, 4) 754LEAF(alpha_copystr, 4)
750 LDGP(pv) 755 LDGP(pv)
751 756
752 mov a2, t0 /* t0 = i = len */ 757 mov a2, t0 /* t0 = i = len */
753 bne a2, 1f /* if (len != 0), proceed */ 758 bne a2, 1f /* if (len != 0), proceed */
754 ldiq t1, 1 /* else bail */ 759 ldiq t1, 1 /* else bail */
755 br zero, 2f 760 br zero, 2f
756 761
7571: ldq_u t1, 0(a0) /* t1 = *from */ 7621: ldq_u t1, 0(a0) /* t1 = *from */
758 extbl t1, a0, t1 763 extbl t1, a0, t1
759 ldq_u t3, 0(a1) /* set up t2 with quad around *to */ 764 ldq_u t3, 0(a1) /* set up t2 with quad around *to */
760 insbl t1, a1, t2 765 insbl t1, a1, t2
761 mskbl t3, a1, t3 766 mskbl t3, a1, t3
762 or t3, t2, t3 /* add *from to quad around *to */ 767 or t3, t2, t3 /* add *from to quad around *to */
763 stq_u t3, 0(a1) /* write out that quad */ 768 stq_u t3, 0(a1) /* write out that quad */
764 769
765 subl a2, 1, a2 /* len-- */ 770 subl a2, 1, a2 /* len-- */
766 beq t1, 2f /* if (*from == 0), bail out */ 771 beq t1, 2f /* if (*from == 0), bail out */
767 addq a1, 1, a1 /* to++ */ 772 addq a1, 1, a1 /* to++ */
768 addq a0, 1, a0 /* from++ */ 773 addq a0, 1, a0 /* from++ */
769 bne a2, 1b /* if (len != 0) copy more */ 774 bne a2, 1b /* if (len != 0) copy more */
770 775
7712: beq a3, 3f /* if (lenp != NULL) */ 7762: beq a3, 3f /* if (lenp != NULL) */
772 subl t0, a2, t0 /* *lenp = (i - len) */ 777 subl t0, a2, t0 /* *lenp = (i - len) */
773 stq t0, 0(a3) 778 stq t0, 0(a3)
7743: beq t1, 4f /* *from == '\0'; leave quietly */ 7793: beq t1, 4f /* *from == '\0'; leave quietly */
775 780
776 ldiq v0, ENAMETOOLONG /* *from != '\0'; error. */ 781 ldiq v0, ENAMETOOLONG /* *from != '\0'; error. */
777 RET 782 RET
778 783
7794: mov zero, v0 /* return 0. */ 7844: mov zero, v0 /* return 0. */
780 RET 785 RET
781 END(alpha_copystr) 786 END(alpha_copystr)
782 787
783NESTED(copyinstr, 4, 16, ra, IM_RA|IM_S0, 0) 788NESTED(copyinstr, 4, 16, ra, IM_RA|IM_S0, 0)
784 LDGP(pv) 789 LDGP(pv)
785 lda sp, -16(sp) /* set up stack frame */ 790 lda sp, -16(sp) /* set up stack frame */
786 stq ra, (16-8)(sp) /* save ra */ 791 stq ra, (16-8)(sp) /* save ra */
787 stq s0, (16-16)(sp) /* save s0 */ 792 stq s0, (16-16)(sp) /* save s0 */
788 ldiq t0, VM_MAX_ADDRESS /* make sure that src addr */ 793 ldiq t0, VM_MAX_ADDRESS /* make sure that src addr */
789 cmpult a0, t0, t1 /* is in user space. */ 794 cmpult a0, t0, t1 /* is in user space. */
790 beq t1, copyerr_efault /* if it's not, error out. */ 795 beq t1, copyerr_efault /* if it's not, error out. */
791 /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ 796 /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
792 GET_CURLWP 797 GET_CURLWP
793 mov v0, s0 798 mov v0, s0
794 lda v0, copyerr /* set up fault handler. */ 799 lda v0, copyerr /* set up fault handler. */
795 .set noat 800 .set noat
796 ldq at_reg, 0(s0) 801 ldq at_reg, 0(s0)
797 ldq at_reg, L_PCB(at_reg) 802 ldq at_reg, L_PCB(at_reg)
798 stq v0, PCB_ONFAULT(at_reg) 803 stq v0, PCB_ONFAULT(at_reg)
799 .set at 804 .set at
800 CALL(alpha_copystr) /* do the copy. */ 805 CALL(alpha_copystr) /* do the copy. */
801 .set noat 806 .set noat
802 ldq at_reg, 0(s0) /* kill the fault handler. */ 807 ldq at_reg, 0(s0) /* kill the fault handler. */
803 ldq at_reg, L_PCB(at_reg) 808 ldq at_reg, L_PCB(at_reg)
804 stq zero, PCB_ONFAULT(at_reg) 809 stq zero, PCB_ONFAULT(at_reg)
805 .set at 810 .set at
806 ldq ra, (16-8)(sp) /* restore ra. */ 811 ldq ra, (16-8)(sp) /* restore ra. */
807 ldq s0, (16-16)(sp) /* restore s0. */ 812 ldq s0, (16-16)(sp) /* restore s0. */
808 lda sp, 16(sp) /* kill stack frame. */ 813 lda sp, 16(sp) /* kill stack frame. */
809 RET /* v0 left over from copystr */ 814 RET /* v0 left over from copystr */
810 END(copyinstr) 815 END(copyinstr)
811 816
812NESTED(copyoutstr, 4, 16, ra, IM_RA|IM_S0, 0) 817NESTED(copyoutstr, 4, 16, ra, IM_RA|IM_S0, 0)
813 LDGP(pv) 818 LDGP(pv)
814 lda sp, -16(sp) /* set up stack frame */ 819 lda sp, -16(sp) /* set up stack frame */
815 stq ra, (16-8)(sp) /* save ra */ 820 stq ra, (16-8)(sp) /* save ra */
816 stq s0, (16-16)(sp) /* save s0 */ 821 stq s0, (16-16)(sp) /* save s0 */
817 ldiq t0, VM_MAX_ADDRESS /* make sure that dest addr */ 822 ldiq t0, VM_MAX_ADDRESS /* make sure that dest addr */
818 cmpult a1, t0, t1 /* is in user space. */ 823 cmpult a1, t0, t1 /* is in user space. */
819 beq t1, copyerr_efault /* if it's not, error out. */ 824 beq t1, copyerr_efault /* if it's not, error out. */
820 /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ 825 /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
821 GET_CURLWP 826 GET_CURLWP
822 mov v0, s0 827 mov v0, s0
823 lda v0, copyerr /* set up fault handler. */ 828 lda v0, copyerr /* set up fault handler. */
824 .set noat 829 .set noat
825 ldq at_reg, 0(s0) 830 ldq at_reg, 0(s0)
826 ldq at_reg, L_PCB(at_reg) 831 ldq at_reg, L_PCB(at_reg)
827 stq v0, PCB_ONFAULT(at_reg) 832 stq v0, PCB_ONFAULT(at_reg)
828 .set at 833 .set at
829 CALL(alpha_copystr) /* do the copy. */ 834 CALL(alpha_copystr) /* do the copy. */
830 .set noat 835 .set noat
831 ldq at_reg, 0(s0) /* kill the fault handler. */ 836 ldq at_reg, 0(s0) /* kill the fault handler. */
832 ldq at_reg, L_PCB(at_reg) 837 ldq at_reg, L_PCB(at_reg)
833 stq zero, PCB_ONFAULT(at_reg) 838 stq zero, PCB_ONFAULT(at_reg)
834 .set at 839 .set at
835 ldq ra, (16-8)(sp) /* restore ra. */ 840 ldq ra, (16-8)(sp) /* restore ra. */
836 ldq s0, (16-16)(sp) /* restore s0. */ 841 ldq s0, (16-16)(sp) /* restore s0. */
837 lda sp, 16(sp) /* kill stack frame. */ 842 lda sp, 16(sp) /* kill stack frame. */
838 RET /* v0 left over from copystr */ 843 RET /* v0 left over from copystr */
839 END(copyoutstr) 844 END(copyoutstr)
840 845
841/* 846/*
842 * kcopy(const void *src, void *dst, size_t len); 847 * kcopy(const void *src, void *dst, size_t len);
843 * 848 *
844 * Copy len bytes from src to dst, aborting if we encounter a fatal 849 * Copy len bytes from src to dst, aborting if we encounter a fatal
845 * page fault. 850 * page fault.
846 * 851 *
847 * kcopy() _must_ save and restore the old fault handler since it is 852 * kcopy() _must_ save and restore the old fault handler since it is
848 * called by uiomove(), which may be in the path of servicing a non-fatal 853 * called by uiomove(), which may be in the path of servicing a non-fatal
849 * page fault. 854 * page fault.
850 */ 855 */
851NESTED(kcopy, 3, 32, ra, IM_RA|IM_S0|IM_S1, 0) 856NESTED(kcopy, 3, 32, ra, IM_RA|IM_S0|IM_S1, 0)
852 LDGP(pv) 857 LDGP(pv)
853 lda sp, -32(sp) /* set up stack frame */ 858 lda sp, -32(sp) /* set up stack frame */
854 stq ra, (32-8)(sp) /* save ra */ 859 stq ra, (32-8)(sp) /* save ra */
855 stq s0, (32-16)(sp) /* save s0 */ 860 stq s0, (32-16)(sp) /* save s0 */
856 stq s1, (32-24)(sp) /* save s1 */ 861 stq s1, (32-24)(sp) /* save s1 */
857 /* Swap a0, a1, for call to memcpy(). */ 862 /* Swap a0, a1, for call to memcpy(). */
858 mov a1, v0 863 mov a1, v0
859 mov a0, a1 864 mov a0, a1
860 mov v0, a0 865 mov v0, a0
861 /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ 866 /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
862 GET_CURLWP 867 GET_CURLWP
863 ldq s1, 0(v0) /* s1 = curlwp */ 868 ldq s1, 0(v0) /* s1 = curlwp */
864 lda v0, kcopyerr /* set up fault handler. */ 869 lda v0, kcopyerr /* set up fault handler. */
865 .set noat 870 .set noat
866 ldq at_reg, L_PCB(s1) 871 ldq at_reg, L_PCB(s1)
867 ldq s0, PCB_ONFAULT(at_reg) /* save old handler. */ 872 ldq s0, PCB_ONFAULT(at_reg) /* save old handler. */
868 stq v0, PCB_ONFAULT(at_reg) 873 stq v0, PCB_ONFAULT(at_reg)
869 .set at 874 .set at
870 CALL(memcpy) /* do the copy. */ 875 CALL(memcpy) /* do the copy. */
871 .set noat 876 .set noat
872 ldq at_reg, L_PCB(s1) /* restore the old handler. */ 877 ldq at_reg, L_PCB(s1) /* restore the old handler. */
873 stq s0, PCB_ONFAULT(at_reg) 878 stq s0, PCB_ONFAULT(at_reg)
874 .set at 879 .set at
875 ldq ra, (32-8)(sp) /* restore ra. */ 880 ldq ra, (32-8)(sp) /* restore ra. */
876 ldq s0, (32-16)(sp) /* restore s0. */ 881 ldq s0, (32-16)(sp) /* restore s0. */
877 ldq s1, (32-24)(sp) /* restore s1. */ 882 ldq s1, (32-24)(sp) /* restore s1. */
878 lda sp, 32(sp) /* kill stack frame. */ 883 lda sp, 32(sp) /* kill stack frame. */
879 mov zero, v0 /* return 0. */ 884 mov zero, v0 /* return 0. */
880 RET 885 RET
881 END(kcopy) 886 END(kcopy)
882 887
883LEAF(kcopyerr, 0) 888LEAF(kcopyerr, 0)
884 LDGP(pv) 889 LDGP(pv)
885 .set noat 890 .set noat
886 ldq at_reg, L_PCB(s1) /* restore the old handler. */ 891 ldq at_reg, L_PCB(s1) /* restore the old handler. */
887 stq s0, PCB_ONFAULT(at_reg) 892 stq s0, PCB_ONFAULT(at_reg)
888 .set at 893 .set at
889 ldq ra, (32-8)(sp) /* restore ra. */ 894 ldq ra, (32-8)(sp) /* restore ra. */
890 ldq s0, (32-16)(sp) /* restore s0. */ 895 ldq s0, (32-16)(sp) /* restore s0. */
891 ldq s1, (32-24)(sp) /* restore s1. */ 896 ldq s1, (32-24)(sp) /* restore s1. */
892 lda sp, 32(sp) /* kill stack frame. */ 897 lda sp, 32(sp) /* kill stack frame. */
893 RET 898 RET
894END(kcopyerr) 899END(kcopyerr)
895 900
896NESTED(copyin, 3, 16, ra, IM_RA|IM_S0, 0) 901NESTED(copyin, 3, 16, ra, IM_RA|IM_S0, 0)
897 LDGP(pv) 902 LDGP(pv)
898 lda sp, -16(sp) /* set up stack frame */ 903 lda sp, -16(sp) /* set up stack frame */
899 stq ra, (16-8)(sp) /* save ra */ 904 stq ra, (16-8)(sp) /* save ra */
900 stq s0, (16-16)(sp) /* save s0 */ 905 stq s0, (16-16)(sp) /* save s0 */
901 ldiq t0, VM_MAX_ADDRESS /* make sure that src addr */ 906 ldiq t0, VM_MAX_ADDRESS /* make sure that src addr */
902 cmpult a0, t0, t1 /* is in user space. */ 907 cmpult a0, t0, t1 /* is in user space. */
903 beq t1, copyerr_efault /* if it's not, error out. */ 908 beq t1, copyerr_efault /* if it's not, error out. */
904 /* Swap a0, a1, for call to memcpy(). */ 909 /* Swap a0, a1, for call to memcpy(). */
905 mov a1, v0 910 mov a1, v0
906 mov a0, a1 911 mov a0, a1
907 mov v0, a0 912 mov v0, a0
908 /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ 913 /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
909 GET_CURLWP 914 GET_CURLWP
910 ldq s0, 0(v0) /* s0 = curlwp */ 915 ldq s0, 0(v0) /* s0 = curlwp */
911 lda v0, copyerr /* set up fault handler. */ 916 lda v0, copyerr /* set up fault handler. */
912 .set noat 917 .set noat
913 ldq at_reg, L_PCB(s0) 918 ldq at_reg, L_PCB(s0)
914 stq v0, PCB_ONFAULT(at_reg) 919 stq v0, PCB_ONFAULT(at_reg)
915 .set at 920 .set at
916 CALL(memcpy) /* do the copy. */ 921 CALL(memcpy) /* do the copy. */
917 .set noat 922 .set noat
918 ldq at_reg, L_PCB(s0) /* kill the fault handler. */ 923 ldq at_reg, L_PCB(s0) /* kill the fault handler. */
919 stq zero, PCB_ONFAULT(at_reg) 924 stq zero, PCB_ONFAULT(at_reg)
920 .set at 925 .set at
921 ldq ra, (16-8)(sp) /* restore ra. */ 926 ldq ra, (16-8)(sp) /* restore ra. */
922 ldq s0, (16-16)(sp) /* restore s0. */ 927 ldq s0, (16-16)(sp) /* restore s0. */
923 lda sp, 16(sp) /* kill stack frame. */ 928 lda sp, 16(sp) /* kill stack frame. */
924 mov zero, v0 /* return 0. */ 929 mov zero, v0 /* return 0. */
925 RET 930 RET
926 END(copyin) 931 END(copyin)
927 932
928NESTED(copyout, 3, 16, ra, IM_RA|IM_S0, 0) 933NESTED(copyout, 3, 16, ra, IM_RA|IM_S0, 0)
929 LDGP(pv) 934 LDGP(pv)
930 lda sp, -16(sp) /* set up stack frame */ 935 lda sp, -16(sp) /* set up stack frame */
931 stq ra, (16-8)(sp) /* save ra */ 936 stq ra, (16-8)(sp) /* save ra */
932 stq s0, (16-16)(sp) /* save s0 */ 937 stq s0, (16-16)(sp) /* save s0 */
933 ldiq t0, VM_MAX_ADDRESS /* make sure that dest addr */ 938 ldiq t0, VM_MAX_ADDRESS /* make sure that dest addr */
934 cmpult a1, t0, t1 /* is in user space. */ 939 cmpult a1, t0, t1 /* is in user space. */
935 beq t1, copyerr_efault /* if it's not, error out. */ 940 beq t1, copyerr_efault /* if it's not, error out. */
936 /* Swap a0, a1, for call to memcpy(). */ 941 /* Swap a0, a1, for call to memcpy(). */
937 mov a1, v0 942 mov a1, v0
938 mov a0, a1 943 mov a0, a1
939 mov v0, a0 944 mov v0, a0
940 /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ 945 /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
941 GET_CURLWP 946 GET_CURLWP
942 ldq s0, 0(v0) /* s0 = curlwp */ 947 ldq s0, 0(v0) /* s0 = curlwp */
943 lda v0, copyerr /* set up fault handler. */ 948 lda v0, copyerr /* set up fault handler. */
944 .set noat 949 .set noat
945 ldq at_reg, L_PCB(s0) 950 ldq at_reg, L_PCB(s0)
946 stq v0, PCB_ONFAULT(at_reg) 951 stq v0, PCB_ONFAULT(at_reg)
947 .set at 952 .set at
948 CALL(memcpy) /* do the copy. */ 953 CALL(memcpy) /* do the copy. */
949 .set noat 954 .set noat
950 ldq at_reg, L_PCB(s0) /* kill the fault handler. */ 955 ldq at_reg, L_PCB(s0) /* kill the fault handler. */
951 stq zero, PCB_ONFAULT(at_reg) 956 stq zero, PCB_ONFAULT(at_reg)
952 .set at 957 .set at
953 ldq ra, (16-8)(sp) /* restore ra. */ 958 ldq ra, (16-8)(sp) /* restore ra. */
954 ldq s0, (16-16)(sp) /* restore s0. */ 959 ldq s0, (16-16)(sp) /* restore s0. */
955 lda sp, 16(sp) /* kill stack frame. */ 960 lda sp, 16(sp) /* kill stack frame. */
956 mov zero, v0 /* return 0. */ 961 mov zero, v0 /* return 0. */
957 RET 962 RET
958 END(copyout) 963 END(copyout)
959 964
960LEAF(copyerr_efault, 0) 965LEAF(copyerr_efault, 0)
961 ldiq v0, EFAULT /* return EFAULT. */ 966 ldiq v0, EFAULT /* return EFAULT. */
962XLEAF(copyerr, 0) 967XLEAF(copyerr, 0)
963 LDGP(pv) 968 LDGP(pv)
964 ldq ra, (16-8)(sp) /* restore ra. */ 969 ldq ra, (16-8)(sp) /* restore ra. */
965 ldq s0, (16-16)(sp) /* restore s0. */ 970 ldq s0, (16-16)(sp) /* restore s0. */
966 lda sp, 16(sp) /* kill stack frame. */ 971 lda sp, 16(sp) /* kill stack frame. */
967 RET 972 RET
968END(copyerr) 973END(copyerr)
969 974
970/**************************************************************************/ 975/**************************************************************************/
971 976
972#define UFETCHSTORE_PROLOGUE \ 977#define UFETCHSTORE_PROLOGUE \
973 br pv, 1f ;\ 978 br pv, 1f ;\
9741: LDGP(pv) ;\ 9791: LDGP(pv) ;\
975 ldiq t0, VM_MAX_ADDRESS /* make sure that addr */ ;\ 980 ldiq t0, VM_MAX_ADDRESS /* make sure that addr */ ;\
976 cmpult a0, t0, t1 /* is in user space. */ ;\ 981 cmpult a0, t0, t1 /* is in user space. */ ;\
977 beq t1, ufetchstoreerr_efault /* if it's not, error out. */ 982 beq t1, ufetchstoreerr_efault /* if it's not, error out. */
978 983
979/* LINTSTUB: int _ufetch_8(const uint8_t *uaddr, uint8_t *valp); */ 984/* LINTSTUB: int _ufetch_8(const uint8_t *uaddr, uint8_t *valp); */
980LEAF_NOPROFILE(_ufetch_8, 2) 985LEAF_NOPROFILE(_ufetch_8, 2)
981 UFETCHSTORE_PROLOGUE 986 UFETCHSTORE_PROLOGUE
982.L_ufetch_8_start: 987.L_ufetch_8_start:
983 ldq_u t0, 0(a0) /* load quad containing byte */ 988 ldq_u t0, 0(a0) /* load quad containing byte */
984.L_ufetch_8_end: 989.L_ufetch_8_end:
985 extbl t0, a0, a0 /* a0 = extracted byte */ 990 extbl t0, a0, a0 /* a0 = extracted byte */
986 ldq_u t0, 0(a1) /* load dest quad */ 991 ldq_u t0, 0(a1) /* load dest quad */
987 insbl a0, a1, a0 /* a0 = byte in target position */ 992 insbl a0, a1, a0 /* a0 = byte in target position */
988 mskbl t0, a1, t0 /* clear target byte in destination */ 993 mskbl t0, a1, t0 /* clear target byte in destination */
989 or a0, t0, a0 /* or in byte to destionation */ 994 or a0, t0, a0 /* or in byte to destionation */
990 stq_u a0, 0(a1) /* *a1 = fetched byte! */ 995 stq_u a0, 0(a1) /* *a1 = fetched byte! */
991 mov zero, v0 996 mov zero, v0
992 RET 997 RET
993 END(_ufetch_8) 998 END(_ufetch_8)
994 999
995/* LINTSTUB: int _ufetch_16(const uint16_t *uaddr, uint16_t *valp); */ 1000/* LINTSTUB: int _ufetch_16(const uint16_t *uaddr, uint16_t *valp); */
996LEAF_NOPROFILE(_ufetch_16, 2) 1001LEAF_NOPROFILE(_ufetch_16, 2)
997 UFETCHSTORE_PROLOGUE 1002 UFETCHSTORE_PROLOGUE
998.L_ufetch_16_start: 1003.L_ufetch_16_start:
999 ldq_u t0, 0(a0) /* load quad containing short */ 1004 ldq_u t0, 0(a0) /* load quad containing short */
1000.L_ufetch_16_end: 1005.L_ufetch_16_end:
1001 extwl t0, a0, a0 /* a0 = extracted short */ 1006 extwl t0, a0, a0 /* a0 = extracted short */
1002 ldq_u t0, 0(a1) /* load dest quad */ 1007 ldq_u t0, 0(a1) /* load dest quad */
1003 inswl a0, a1, a0 /* a0 = short in target position */ 1008 inswl a0, a1, a0 /* a0 = short in target position */
1004 mskwl t0, a1, t0 /* clear target short in destination */ 1009 mskwl t0, a1, t0 /* clear target short in destination */
1005 or a0, t0, a0 /* or in short to destionation */ 1010 or a0, t0, a0 /* or in short to destionation */
1006 stq_u a0, 0(a1) /* *a1 = fetched short! */ 1011 stq_u a0, 0(a1) /* *a1 = fetched short! */
1007 mov zero, v0 1012 mov zero, v0
1008 RET 1013 RET
1009 END(_ufetch_16) 1014 END(_ufetch_16)
1010 1015
1011/* LINTSTUB: int _ufetch_32(const uint32_t *uaddr, uint32_t *valp); */ 1016/* LINTSTUB: int _ufetch_32(const uint32_t *uaddr, uint32_t *valp); */
1012LEAF_NOPROFILE(_ufetch_32, 2) 1017LEAF_NOPROFILE(_ufetch_32, 2)
1013 UFETCHSTORE_PROLOGUE 1018 UFETCHSTORE_PROLOGUE
1014.L_ufetch_32_start: 1019.L_ufetch_32_start:
1015 ldl v0, 0(a0) 1020 ldl v0, 0(a0)
1016.L_ufetch_32_end: 1021.L_ufetch_32_end:
1017 stl v0, 0(a1) 1022 stl v0, 0(a1)
1018 mov zero, v0 1023 mov zero, v0
1019 RET 1024 RET
1020 END(_ufetch_32) 1025 END(_ufetch_32)
1021 1026
1022/* LINTSTUB: int _ufetch_64(const uint64_t *uaddr, uint64_t *valp); */ 1027/* LINTSTUB: int _ufetch_64(const uint64_t *uaddr, uint64_t *valp); */
1023LEAF_NOPROFILE(_ufetch_64, 2) 1028LEAF_NOPROFILE(_ufetch_64, 2)
1024 UFETCHSTORE_PROLOGUE 1029 UFETCHSTORE_PROLOGUE
1025.L_ufetch_64_start: 1030.L_ufetch_64_start:
1026 ldq v0, 0(a0) 1031 ldq v0, 0(a0)
1027.L_ufetch_64_end: 1032.L_ufetch_64_end:
1028 stq v0, 0(a1) 1033 stq v0, 0(a1)
1029 mov zero, v0 1034 mov zero, v0
1030 RET 1035 RET
1031 END(_ufetch_64) 1036 END(_ufetch_64)
1032 1037
1033/* LINTSTUB: int _ustore_8(uint8_t *uaddr, uint8_t val); */ 1038/* LINTSTUB: int _ustore_8(uint8_t *uaddr, uint8_t val); */
1034LEAF_NOPROFILE(_ustore_8, 2) 1039LEAF_NOPROFILE(_ustore_8, 2)
1035 UFETCHSTORE_PROLOGUE 1040 UFETCHSTORE_PROLOGUE
1036 zap a1, 0xfe, a1 /* kill arg's high bytes */ 1041 zap a1, 0xfe, a1 /* kill arg's high bytes */
1037 insbl a1, a0, a1 /* move it to the right spot */ 1042 insbl a1, a0, a1 /* move it to the right spot */
1038.L_ustore_8_start: 1043.L_ustore_8_start:
1039 ldq_u t0, 0(a0) /* load quad around byte */ 1044 ldq_u t0, 0(a0) /* load quad around byte */
1040 mskbl t0, a0, t0 /* kill the target byte */ 1045 mskbl t0, a0, t0 /* kill the target byte */
1041 or t0, a1, a1 /* put the result together */ 1046 or t0, a1, a1 /* put the result together */
1042 stq_u a1, 0(a0) /* and store it. */ 1047 stq_u a1, 0(a0) /* and store it. */
1043.L_ustore_8_end: 1048.L_ustore_8_end:
1044 mov zero, v0 1049 mov zero, v0
1045 RET 1050 RET
1046 END(_ustore_8) 1051 END(_ustore_8)
1047 1052
1048/* LINTSTUB: int _ustore_16(uint16_t *uaddr, uint16_t val); */ 1053/* LINTSTUB: int _ustore_16(uint16_t *uaddr, uint16_t val); */
1049LEAF_NOPROFILE(_ustore_16, 2) 1054LEAF_NOPROFILE(_ustore_16, 2)
1050 UFETCHSTORE_PROLOGUE 1055 UFETCHSTORE_PROLOGUE
1051 zap a1, 0xfc, a1 /* kill arg's high bytes */ 1056 zap a1, 0xfc, a1 /* kill arg's high bytes */
1052 inswl a1, a0, a1 /* move it to the right spot */ 1057 inswl a1, a0, a1 /* move it to the right spot */
1053.L_ustore_16_start: 1058.L_ustore_16_start:
1054 ldq_u t0, 0(a0) /* load quad around short */ 1059 ldq_u t0, 0(a0) /* load quad around short */
1055 mskwl t0, a0, t0 /* kill the target short */ 1060 mskwl t0, a0, t0 /* kill the target short */
1056 or t0, a1, a1 /* put the result together */ 1061 or t0, a1, a1 /* put the result together */
1057 stq_u a1, 0(a0) /* and store it. */ 1062 stq_u a1, 0(a0) /* and store it. */
1058.L_ustore_16_end: 1063.L_ustore_16_end:
1059 mov zero, v0 1064 mov zero, v0
1060 RET 1065 RET
1061 END(_ustore_16) 1066 END(_ustore_16)
1062 1067
1063/* LINTSTUB: int _ustore_32(uint32_t *uaddr, uint32_t val); */ 1068/* LINTSTUB: int _ustore_32(uint32_t *uaddr, uint32_t val); */
1064LEAF_NOPROFILE(_ustore_32, 2) 1069LEAF_NOPROFILE(_ustore_32, 2)
1065 UFETCHSTORE_PROLOGUE 1070 UFETCHSTORE_PROLOGUE
1066.L_ustore_32_start: 1071.L_ustore_32_start:
1067 stl a1, 0(a0) 1072 stl a1, 0(a0)
1068.L_ustore_32_end: 1073.L_ustore_32_end:
1069 mov zero, v0 1074 mov zero, v0
1070 RET 1075 RET
1071 END(_ustore_32) 1076 END(_ustore_32)
1072 1077
1073/* LINTSTUB: int _ustore_64(uint64_t *uaddr, uint64_t val); */ 1078/* LINTSTUB: int _ustore_64(uint64_t *uaddr, uint64_t val); */
1074LEAF_NOPROFILE(_ustore_64, 2) 1079LEAF_NOPROFILE(_ustore_64, 2)
1075 UFETCHSTORE_PROLOGUE 1080 UFETCHSTORE_PROLOGUE
1076.L_ustore_64_start: 1081.L_ustore_64_start:
1077 stq a1, 0(a0) 1082 stq a1, 0(a0)
1078.L_ustore_64_end: 1083.L_ustore_64_end:
1079 mov zero, v0 1084 mov zero, v0
1080 RET 1085 RET
1081 END(_ustore_64) 1086 END(_ustore_64)
1082 1087
1083LEAF_NOPROFILE(ufetchstoreerr_efault, 0) 1088LEAF_NOPROFILE(ufetchstoreerr_efault, 0)
1084 ldiq v0, EFAULT /* return EFAULT. */ 1089 ldiq v0, EFAULT /* return EFAULT. */
1085XLEAF(ufetchstoreerr, 0) 1090XLEAF(ufetchstoreerr, 0)
1086 LDGP(pv) 1091 LDGP(pv)
1087 RET 1092 RET
1088 END(ufetchstoreerr_efault) 1093 END(ufetchstoreerr_efault)
1089 1094
1090/**************************************************************************/ 1095/**************************************************************************/
1091 1096
1092/* 1097/*
1093 * int _ucas_32(volatile uint32_t *uptr, uint32_t old, uint32_t new, 1098 * int _ucas_32(volatile uint32_t *uptr, uint32_t old, uint32_t new,
1094 * uint32_t *ret); 1099 * uint32_t *ret);
1095 */ 1100 */
1096LEAF_NOPROFILE(_ucas_32, 4) 1101LEAF_NOPROFILE(_ucas_32, 4)
1097 UFETCHSTORE_PROLOGUE 1102 UFETCHSTORE_PROLOGUE
10983: 11033:
1099.Lucas_32_start: 1104.Lucas_32_start:
1100 mov a2, t2 1105 mov a2, t2
1101 ldl_l t0, 0(a0) /* t0 = *uptr */ 1106 ldl_l t0, 0(a0) /* t0 = *uptr */
1102 cmpeq t0, a1, t1 /* does t0 = old? */ 1107 cmpeq t0, a1, t1 /* does t0 = old? */
1103 beq t1, 1f /* if not, skip */ 1108 beq t1, 1f /* if not, skip */
1104 stl_c t2, 0(a0) /* *uptr ~= new */ 1109 stl_c t2, 0(a0) /* *uptr ~= new */
1105.Lucas_32_end: 1110.Lucas_32_end:
1106 beq t1, 2f /* did it work? */ 1111 beq t1, 2f /* did it work? */
11071: 11121:
1108 stl t0, 0(a3) /* *ret = t0 */ 1113 stl t0, 0(a3) /* *ret = t0 */
1109 mov zero, v0 1114 mov zero, v0
1110 RET 1115 RET
11112: 11162:
1112 br 3b 1117 br 3b
1113END(_ucas_32) 1118END(_ucas_32)
1114 1119
1115/* 1120/*
1116 * int _ucas_64(volatile uint64_t *uptr, uint64_t old, uint64_t new, 1121 * int _ucas_64(volatile uint64_t *uptr, uint64_t old, uint64_t new,
1117 * uint64_t *ret); 1122 * uint64_t *ret);
1118 */ 1123 */
1119LEAF_NOPROFILE(_ucas_64, 4) 1124LEAF_NOPROFILE(_ucas_64, 4)
1120 UFETCHSTORE_PROLOGUE 1125 UFETCHSTORE_PROLOGUE
11213: 11263:
1122.Lucas_64_start: 1127.Lucas_64_start:
1123 mov a2, t2 1128 mov a2, t2
1124 ldq_l t0, 0(a0) /* t0 = *uptr */ 1129 ldq_l t0, 0(a0) /* t0 = *uptr */
1125 cmpeq t0, a1, t1 /* does t0 = old? */ 1130 cmpeq t0, a1, t1 /* does t0 = old? */
1126 beq t1, 1f /* if not, skip */ 1131 beq t1, 1f /* if not, skip */
1127 stq_c t2, 0(a0) /* *uptr ~= new */ 1132 stq_c t2, 0(a0) /* *uptr ~= new */
1128.Lucas_64_end: 1133.Lucas_64_end:
1129 beq t1, 2f /* did it work? */ 1134 beq t1, 2f /* did it work? */
11301: 11351:
1131 stq t0, 0(a3) /* *ret = t0 */ 1136 stq t0, 0(a3) /* *ret = t0 */
1132 mov zero, v0 1137 mov zero, v0
1133 RET 1138 RET
11342: 11392:
1135 br 3b 1140 br 3b
1136END(_ucas_64) 1141END(_ucas_64)
1137 1142
1138/**************************************************************************/ 1143/**************************************************************************/
1139 1144
1140/* 1145/*
1141 * Fault table of user access functions for trap(). 1146 * Fault table of user access functions for trap().
1142 */ 1147 */
1143 .section ".rodata" 1148 .section ".rodata"
1144 .globl onfault_table 1149 .globl onfault_table
1145onfault_table: 1150onfault_table:
1146 .quad .L_ufetch_8_start 1151 .quad .L_ufetch_8_start
1147 .quad .L_ufetch_8_end 1152 .quad .L_ufetch_8_end
1148 .quad ufetchstoreerr 1153 .quad ufetchstoreerr
1149 1154
1150 .quad .L_ufetch_16_start 1155 .quad .L_ufetch_16_start
1151 .quad .L_ufetch_16_end 1156 .quad .L_ufetch_16_end
1152 .quad ufetchstoreerr 1157 .quad ufetchstoreerr
1153 1158
1154 .quad .L_ufetch_32_start 1159 .quad .L_ufetch_32_start
1155 .quad .L_ufetch_32_end 1160 .quad .L_ufetch_32_end
1156 .quad ufetchstoreerr 1161 .quad ufetchstoreerr
1157 1162
1158 .quad .L_ufetch_64_start 1163 .quad .L_ufetch_64_start

cvs diff -r1.362 -r1.363 src/sys/arch/alpha/alpha/machdep.c (switch to unified diff)

--- src/sys/arch/alpha/alpha/machdep.c 2020/09/02 17:40:23 1.362
+++ src/sys/arch/alpha/alpha/machdep.c 2020/09/03 02:09:09 1.363
@@ -1,1932 +1,1900 @@ @@ -1,1932 +1,1900 @@
1/* $NetBSD: machdep.c,v 1.362 2020/09/02 17:40:23 riastradh Exp $ */ 1/* $NetBSD: machdep.c,v 1.363 2020/09/03 02:09:09 thorpej Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 1999, 2000, 2019 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 1999, 2000, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center and by Chris G. Demetriou. 9 * NASA Ames Research Center and by Chris G. Demetriou.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer. 15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE. 30 * POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32 32
33/* 33/*
34 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University. 34 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
35 * All rights reserved. 35 * All rights reserved.
36 * 36 *
37 * Author: Chris G. Demetriou 37 * Author: Chris G. Demetriou
38 * 38 *
39 * Permission to use, copy, modify and distribute this software and 39 * Permission to use, copy, modify and distribute this software and
40 * its documentation is hereby granted, provided that both the copyright 40 * its documentation is hereby granted, provided that both the copyright
41 * notice and this permission notice appear in all copies of the 41 * notice and this permission notice appear in all copies of the
42 * software, derivative works or modified versions, and any portions 42 * software, derivative works or modified versions, and any portions
43 * thereof, and that both notices appear in supporting documentation. 43 * thereof, and that both notices appear in supporting documentation.
44 * 44 *
45 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 45 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
46 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 46 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
47 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 47 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 * 48 *
49 * Carnegie Mellon requests users of this software to return to 49 * Carnegie Mellon requests users of this software to return to
50 * 50 *
51 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 51 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
52 * School of Computer Science 52 * School of Computer Science
53 * Carnegie Mellon University 53 * Carnegie Mellon University
54 * Pittsburgh PA 15213-3890 54 * Pittsburgh PA 15213-3890
55 * 55 *
56 * any improvements or extensions that they make and grant Carnegie the 56 * any improvements or extensions that they make and grant Carnegie the
57 * rights to redistribute these changes. 57 * rights to redistribute these changes.
58 */ 58 */
59 59
60#include "opt_ddb.h" 60#include "opt_ddb.h"
61#include "opt_kgdb.h" 61#include "opt_kgdb.h"
62#include "opt_modular.h" 62#include "opt_modular.h"
63#include "opt_multiprocessor.h" 63#include "opt_multiprocessor.h"
64#include "opt_dec_3000_300.h" 64#include "opt_dec_3000_300.h"
65#include "opt_dec_3000_500.h" 65#include "opt_dec_3000_500.h"
66#include "opt_execfmt.h" 66#include "opt_execfmt.h"
67 67
68#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ 68#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
69 69
70__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.362 2020/09/02 17:40:23 riastradh Exp $"); 70__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.363 2020/09/03 02:09:09 thorpej Exp $");
71 71
72#include <sys/param.h> 72#include <sys/param.h>
73#include <sys/systm.h> 73#include <sys/systm.h>
74#include <sys/signalvar.h> 74#include <sys/signalvar.h>
75#include <sys/kernel.h> 75#include <sys/kernel.h>
76#include <sys/cpu.h> 76#include <sys/cpu.h>
77#include <sys/proc.h> 77#include <sys/proc.h>
78#include <sys/ras.h> 78#include <sys/ras.h>
79#include <sys/sched.h> 79#include <sys/sched.h>
80#include <sys/reboot.h> 80#include <sys/reboot.h>
81#include <sys/device.h> 81#include <sys/device.h>
82#include <sys/malloc.h> 82#include <sys/malloc.h>
83#include <sys/module.h> 83#include <sys/module.h>
84#include <sys/mman.h> 84#include <sys/mman.h>
85#include <sys/msgbuf.h> 85#include <sys/msgbuf.h>
86#include <sys/ioctl.h> 86#include <sys/ioctl.h>
87#include <sys/tty.h> 87#include <sys/tty.h>
88#include <sys/exec.h> 88#include <sys/exec.h>
89#include <sys/exec_aout.h> /* for MID_* */ 89#include <sys/exec_aout.h> /* for MID_* */
90#include <sys/exec_ecoff.h> 90#include <sys/exec_ecoff.h>
91#include <sys/core.h> 91#include <sys/core.h>
92#include <sys/kcore.h> 92#include <sys/kcore.h>
93#include <sys/ucontext.h> 93#include <sys/ucontext.h>
94#include <sys/conf.h> 94#include <sys/conf.h>
95#include <sys/ksyms.h> 95#include <sys/ksyms.h>
96#include <sys/kauth.h> 96#include <sys/kauth.h>
97#include <sys/atomic.h> 97#include <sys/atomic.h>
98#include <sys/cpu.h> 98#include <sys/cpu.h>
99 99
100#include <machine/kcore.h> 100#include <machine/kcore.h>
101#include <machine/fpu.h> 101#include <machine/fpu.h>
102 102
103#include <sys/mount.h> 103#include <sys/mount.h>
104#include <sys/syscallargs.h> 104#include <sys/syscallargs.h>
105 105
106#include <uvm/uvm.h> 106#include <uvm/uvm.h>
107#include <sys/sysctl.h> 107#include <sys/sysctl.h>
108 108
109#include <dev/cons.h> 109#include <dev/cons.h>
110#include <dev/mm.h> 110#include <dev/mm.h>
111 111
112#include <machine/autoconf.h> 112#include <machine/autoconf.h>
113#include <machine/reg.h> 113#include <machine/reg.h>
114#include <machine/rpb.h> 114#include <machine/rpb.h>
115#include <machine/prom.h> 115#include <machine/prom.h>
116#include <machine/cpuconf.h> 116#include <machine/cpuconf.h>
117#include <machine/ieeefp.h> 117#include <machine/ieeefp.h>
118 118
119#ifdef DDB 119#ifdef DDB
120#include <machine/db_machdep.h> 120#include <machine/db_machdep.h>
121#include <ddb/db_access.h> 121#include <ddb/db_access.h>
122#include <ddb/db_sym.h> 122#include <ddb/db_sym.h>
123#include <ddb/db_extern.h> 123#include <ddb/db_extern.h>
124#include <ddb/db_interface.h> 124#include <ddb/db_interface.h>
125#endif 125#endif
126 126
127#ifdef KGDB 127#ifdef KGDB
128#include <sys/kgdb.h> 128#include <sys/kgdb.h>
129#endif 129#endif
130 130
131#ifdef DEBUG 131#ifdef DEBUG
132#include <machine/sigdebug.h> 132#include <machine/sigdebug.h>
133int sigdebug = 0x0; 133int sigdebug = 0x0;
134int sigpid = 0; 134int sigpid = 0;
135#endif 135#endif
136 136
137#include <machine/alpha.h> 137#include <machine/alpha.h>
138 138
139#include "ksyms.h" 139#include "ksyms.h"
140 140
141struct vm_map *phys_map = NULL; 141struct vm_map *phys_map = NULL;
142 142
143void *msgbufaddr; 143void *msgbufaddr;
144 144
145int maxmem; /* max memory per process */ 145int maxmem; /* max memory per process */
146 146
147int totalphysmem; /* total amount of physical memory in system */ 147int totalphysmem; /* total amount of physical memory in system */
148int resvmem; /* amount of memory reserved for PROM */ 148int resvmem; /* amount of memory reserved for PROM */
149int unusedmem; /* amount of memory for OS that we don't use */ 149int unusedmem; /* amount of memory for OS that we don't use */
150int unknownmem; /* amount of memory with an unknown use */ 150int unknownmem; /* amount of memory with an unknown use */
151 151
152int cputype; /* system type, from the RPB */ 152int cputype; /* system type, from the RPB */
153 153
154int bootdev_debug = 0; /* patchable, or from DDB */ 154int bootdev_debug = 0; /* patchable, or from DDB */
155 155
156/* 156/*
157 * XXX We need an address to which we can assign things so that they 157 * XXX We need an address to which we can assign things so that they
158 * won't be optimized away because we didn't use the value. 158 * won't be optimized away because we didn't use the value.
159 */ 159 */
160uint32_t no_optimize; 160uint32_t no_optimize;
161 161
162/* the following is used externally (sysctl_hw) */ 162/* the following is used externally (sysctl_hw) */
163char machine[] = MACHINE; /* from <machine/param.h> */ 163char machine[] = MACHINE; /* from <machine/param.h> */
164char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */ 164char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */
165 165
166/* Number of machine cycles per microsecond */ 166/* Number of machine cycles per microsecond */
167uint64_t cycles_per_usec; 167uint64_t cycles_per_usec;
168 168
169/* number of CPUs in the box. really! */ 169/* number of CPUs in the box. really! */
170int ncpus; 170int ncpus;
171 171
172struct bootinfo_kernel bootinfo; 172struct bootinfo_kernel bootinfo;
173 173
174/* For built-in TCDS */ 174/* For built-in TCDS */
175#if defined(DEC_3000_300) || defined(DEC_3000_500) 175#if defined(DEC_3000_300) || defined(DEC_3000_500)
176uint8_t dec_3000_scsiid[3], dec_3000_scsifast[3]; 176uint8_t dec_3000_scsiid[3], dec_3000_scsifast[3];
177#endif 177#endif
178 178
179struct platform platform; 179struct platform platform;
180 180
181#if NKSYMS || defined(DDB) || defined(MODULAR) 181#if NKSYMS || defined(DDB) || defined(MODULAR)
182/* start and end of kernel symbol table */ 182/* start and end of kernel symbol table */
183void *ksym_start, *ksym_end; 183void *ksym_start, *ksym_end;
184#endif 184#endif
185 185
186/* for cpu_sysctl() */ 186/* for cpu_sysctl() */
187int alpha_unaligned_print = 1; /* warn about unaligned accesses */ 187int alpha_unaligned_print = 1; /* warn about unaligned accesses */
188int alpha_unaligned_fix = 1; /* fix up unaligned accesses */ 188int alpha_unaligned_fix = 1; /* fix up unaligned accesses */
189int alpha_unaligned_sigbus = 0; /* don't SIGBUS on fixed-up accesses */ 189int alpha_unaligned_sigbus = 0; /* don't SIGBUS on fixed-up accesses */
190int alpha_fp_sync_complete = 0; /* fp fixup if sync even without /s */ 190int alpha_fp_sync_complete = 0; /* fp fixup if sync even without /s */
191 191
192/* 192/*
193 * XXX This should be dynamically sized, but we have the chicken-egg problem! 193 * XXX This should be dynamically sized, but we have the chicken-egg problem!
194 * XXX it should also be larger than it is, because not all of the mddt 194 * XXX it should also be larger than it is, because not all of the mddt
195 * XXX clusters end up being used for VM. 195 * XXX clusters end up being used for VM.
196 */ 196 */
197phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX]; /* low size bits overloaded */ 197phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX]; /* low size bits overloaded */
198int mem_cluster_cnt; 198int mem_cluster_cnt;
199 199
200int cpu_dump(void); 200int cpu_dump(void);
201int cpu_dumpsize(void); 201int cpu_dumpsize(void);
202u_long cpu_dump_mempagecnt(void); 202u_long cpu_dump_mempagecnt(void);
203void dumpsys(void); 203void dumpsys(void);
204void identifycpu(void); 204void identifycpu(void);
205void printregs(struct reg *); 205void printregs(struct reg *);
206 206
207const pcu_ops_t fpu_ops = { 207const pcu_ops_t fpu_ops = {
208 .pcu_id = PCU_FPU, 208 .pcu_id = PCU_FPU,
209 .pcu_state_load = fpu_state_load, 209 .pcu_state_load = fpu_state_load,
210 .pcu_state_save = fpu_state_save, 210 .pcu_state_save = fpu_state_save,
211 .pcu_state_release = fpu_state_release, 211 .pcu_state_release = fpu_state_release,
212}; 212};
213 213
214const pcu_ops_t * const pcu_ops_md_defs[PCU_UNIT_COUNT] = { 214const pcu_ops_t * const pcu_ops_md_defs[PCU_UNIT_COUNT] = {
215 [PCU_FPU] = &fpu_ops, 215 [PCU_FPU] = &fpu_ops,
216}; 216};
217 217
218void 218void
219alpha_init(u_long xxx_pfn __unused, u_long ptb, u_long bim, u_long bip, 219alpha_init(u_long xxx_pfn __unused, u_long ptb, u_long bim, u_long bip,
220 u_long biv) 220 u_long biv)
221 /* pfn: first free PFN number (no longer used) */ 221 /* pfn: first free PFN number (no longer used) */
222 /* ptb: PFN of current level 1 page table */ 222 /* ptb: PFN of current level 1 page table */
223 /* bim: bootinfo magic */ 223 /* bim: bootinfo magic */
224 /* bip: bootinfo pointer */ 224 /* bip: bootinfo pointer */
225 /* biv: bootinfo version */ 225 /* biv: bootinfo version */
226{ 226{
227 extern char kernel_text[], _end[]; 227 extern char kernel_text[], _end[];
228 struct mddt *mddtp; 228 struct mddt *mddtp;
229 struct mddt_cluster *memc; 229 struct mddt_cluster *memc;
230 int i, mddtweird; 230 int i, mddtweird;
231 struct pcb *pcb0; 231 struct pcb *pcb0;
232 vaddr_t kernstart, kernend, v; 232 vaddr_t kernstart, kernend, v;
233 paddr_t kernstartpfn, kernendpfn, pfn0, pfn1; 233 paddr_t kernstartpfn, kernendpfn, pfn0, pfn1;
234 cpuid_t cpu_id; 234 cpuid_t cpu_id;
235 struct cpu_info *ci; 235 struct cpu_info *ci;
236 char *p; 236 char *p;
237 const char *bootinfo_msg; 237 const char *bootinfo_msg;
238 const struct cpuinit *c; 238 const struct cpuinit *c;
239 239
240 /* NO OUTPUT ALLOWED UNTIL FURTHER NOTICE */ 240 /* NO OUTPUT ALLOWED UNTIL FURTHER NOTICE */
241 241
242 /* 242 /*
243 * Turn off interrupts (not mchecks) and floating point. 243 * Turn off interrupts (not mchecks) and floating point.
244 * Make sure the instruction and data streams are consistent. 244 * Make sure the instruction and data streams are consistent.
245 */ 245 */
246 (void)alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH); 246 (void)alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH);
247 alpha_pal_wrfen(0); 247 alpha_pal_wrfen(0);
248 ALPHA_TBIA(); 248 ALPHA_TBIA();
249 alpha_pal_imb(); 249 alpha_pal_imb();
250 250
251 /* Initialize the SCB. */ 251 /* Initialize the SCB. */
252 scb_init(); 252 scb_init();
253 253
254 cpu_id = cpu_number(); 254 cpu_id = cpu_number();
255 255
256#if defined(MULTIPROCESSOR) 256#if defined(MULTIPROCESSOR)
257 /* 257 /*
258 * Set our SysValue to the address of our cpu_info structure. 258 * Set our SysValue to the address of our cpu_info structure.
259 * Secondary processors do this in their spinup trampoline. 259 * Secondary processors do this in their spinup trampoline.
260 */ 260 */
261 alpha_pal_wrval((u_long)&cpu_info_primary); 261 alpha_pal_wrval((u_long)&cpu_info_primary);
262 cpu_info[cpu_id] = &cpu_info_primary; 262 cpu_info[cpu_id] = &cpu_info_primary;
263#endif 263#endif
264 264
265 ci = curcpu(); 265 ci = curcpu();
266 ci->ci_cpuid = cpu_id; 266 ci->ci_cpuid = cpu_id;
267 267
268 /* 268 /*
269 * Get critical system information (if possible, from the 269 * Get critical system information (if possible, from the
270 * information provided by the boot program). 270 * information provided by the boot program).
271 */ 271 */
272 bootinfo_msg = NULL; 272 bootinfo_msg = NULL;
273 if (bim == BOOTINFO_MAGIC) { 273 if (bim == BOOTINFO_MAGIC) {
274 if (biv == 0) { /* backward compat */ 274 if (biv == 0) { /* backward compat */
275 biv = *(u_long *)bip; 275 biv = *(u_long *)bip;
276 bip += 8; 276 bip += 8;
277 } 277 }
278 switch (biv) { 278 switch (biv) {
279 case 1: { 279 case 1: {
280 struct bootinfo_v1 *v1p = (struct bootinfo_v1 *)bip; 280 struct bootinfo_v1 *v1p = (struct bootinfo_v1 *)bip;
281 281
282 bootinfo.ssym = v1p->ssym; 282 bootinfo.ssym = v1p->ssym;
283 bootinfo.esym = v1p->esym; 283 bootinfo.esym = v1p->esym;
284 /* hwrpb may not be provided by boot block in v1 */ 284 /* hwrpb may not be provided by boot block in v1 */
285 if (v1p->hwrpb != NULL) { 285 if (v1p->hwrpb != NULL) {
286 bootinfo.hwrpb_phys = 286 bootinfo.hwrpb_phys =
287 ((struct rpb *)v1p->hwrpb)->rpb_phys; 287 ((struct rpb *)v1p->hwrpb)->rpb_phys;
288 bootinfo.hwrpb_size = v1p->hwrpbsize; 288 bootinfo.hwrpb_size = v1p->hwrpbsize;
289 } else { 289 } else {
290 bootinfo.hwrpb_phys = 290 bootinfo.hwrpb_phys =
291 ((struct rpb *)HWRPB_ADDR)->rpb_phys; 291 ((struct rpb *)HWRPB_ADDR)->rpb_phys;
292 bootinfo.hwrpb_size = 292 bootinfo.hwrpb_size =
293 ((struct rpb *)HWRPB_ADDR)->rpb_size; 293 ((struct rpb *)HWRPB_ADDR)->rpb_size;
294 } 294 }
295 memcpy(bootinfo.boot_flags, v1p->boot_flags, 295 memcpy(bootinfo.boot_flags, v1p->boot_flags,
296 uimin(sizeof v1p->boot_flags, 296 uimin(sizeof v1p->boot_flags,
297 sizeof bootinfo.boot_flags)); 297 sizeof bootinfo.boot_flags));
298 memcpy(bootinfo.booted_kernel, v1p->booted_kernel, 298 memcpy(bootinfo.booted_kernel, v1p->booted_kernel,
299 uimin(sizeof v1p->booted_kernel, 299 uimin(sizeof v1p->booted_kernel,
300 sizeof bootinfo.booted_kernel)); 300 sizeof bootinfo.booted_kernel));
301 /* booted dev not provided in bootinfo */ 301 /* booted dev not provided in bootinfo */
302 init_prom_interface((struct rpb *) 302 init_prom_interface(ptb, (struct rpb *)
303 ALPHA_PHYS_TO_K0SEG(bootinfo.hwrpb_phys)); 303 ALPHA_PHYS_TO_K0SEG(bootinfo.hwrpb_phys));
304 prom_getenv(PROM_E_BOOTED_DEV, bootinfo.booted_dev, 304 prom_getenv(PROM_E_BOOTED_DEV, bootinfo.booted_dev,
305 sizeof bootinfo.booted_dev); 305 sizeof bootinfo.booted_dev);
306 break; 306 break;
307 } 307 }
308 default: 308 default:
309 bootinfo_msg = "unknown bootinfo version"; 309 bootinfo_msg = "unknown bootinfo version";
310 goto nobootinfo; 310 goto nobootinfo;
311 } 311 }
312 } else { 312 } else {
313 bootinfo_msg = "boot program did not pass bootinfo"; 313 bootinfo_msg = "boot program did not pass bootinfo";
314nobootinfo: 314nobootinfo:
315 bootinfo.ssym = (u_long)_end; 315 bootinfo.ssym = (u_long)_end;
316 bootinfo.esym = (u_long)_end; 316 bootinfo.esym = (u_long)_end;
317 bootinfo.hwrpb_phys = ((struct rpb *)HWRPB_ADDR)->rpb_phys; 317 bootinfo.hwrpb_phys = ((struct rpb *)HWRPB_ADDR)->rpb_phys;
318 bootinfo.hwrpb_size = ((struct rpb *)HWRPB_ADDR)->rpb_size; 318 bootinfo.hwrpb_size = ((struct rpb *)HWRPB_ADDR)->rpb_size;
319 init_prom_interface((struct rpb *)HWRPB_ADDR); 319 init_prom_interface(ptb, (struct rpb *)HWRPB_ADDR);
320 prom_getenv(PROM_E_BOOTED_OSFLAGS, bootinfo.boot_flags, 320 prom_getenv(PROM_E_BOOTED_OSFLAGS, bootinfo.boot_flags,
321 sizeof bootinfo.boot_flags); 321 sizeof bootinfo.boot_flags);
322 prom_getenv(PROM_E_BOOTED_FILE, bootinfo.booted_kernel, 322 prom_getenv(PROM_E_BOOTED_FILE, bootinfo.booted_kernel,
323 sizeof bootinfo.booted_kernel); 323 sizeof bootinfo.booted_kernel);
324 prom_getenv(PROM_E_BOOTED_DEV, bootinfo.booted_dev, 324 prom_getenv(PROM_E_BOOTED_DEV, bootinfo.booted_dev,
325 sizeof bootinfo.booted_dev); 325 sizeof bootinfo.booted_dev);
326 } 326 }
327 327
328 /* 328 /*
329 * Initialize the kernel's mapping of the RPB. It's needed for 329 * Initialize the kernel's mapping of the RPB. It's needed for
330 * lots of things. 330 * lots of things.
331 */ 331 */
332 hwrpb = (struct rpb *)ALPHA_PHYS_TO_K0SEG(bootinfo.hwrpb_phys); 332 hwrpb = (struct rpb *)ALPHA_PHYS_TO_K0SEG(bootinfo.hwrpb_phys);
333 333
334#if defined(DEC_3000_300) || defined(DEC_3000_500) 334#if defined(DEC_3000_300) || defined(DEC_3000_500)
335 if (hwrpb->rpb_type == ST_DEC_3000_300 || 335 if (hwrpb->rpb_type == ST_DEC_3000_300 ||
336 hwrpb->rpb_type == ST_DEC_3000_500) { 336 hwrpb->rpb_type == ST_DEC_3000_500) {
337 prom_getenv(PROM_E_SCSIID, dec_3000_scsiid, 337 prom_getenv(PROM_E_SCSIID, dec_3000_scsiid,
338 sizeof(dec_3000_scsiid)); 338 sizeof(dec_3000_scsiid));
339 prom_getenv(PROM_E_SCSIFAST, dec_3000_scsifast, 339 prom_getenv(PROM_E_SCSIFAST, dec_3000_scsifast,
340 sizeof(dec_3000_scsifast)); 340 sizeof(dec_3000_scsifast));
341 } 341 }
342#endif 342#endif
343 343
344 /* 344 /*
345 * Remember how many cycles there are per microsecond, 345 * Remember how many cycles there are per microsecond,
346 * so that we can use delay(). Round up, for safety. 346 * so that we can use delay(). Round up, for safety.
347 */ 347 */
348 cycles_per_usec = (hwrpb->rpb_cc_freq + 999999) / 1000000; 348 cycles_per_usec = (hwrpb->rpb_cc_freq + 999999) / 1000000;
349 349
350 /* 350 /*
351 * Initialize the (temporary) bootstrap console interface, so 351 * Initialize the (temporary) bootstrap console interface, so
352 * we can use printf until the VM system starts being setup. 352 * we can use printf until the VM system starts being setup.
353 * The real console is initialized before then. 353 * The real console is initialized before then.
354 */ 354 */
355 init_bootstrap_console(); 355 init_bootstrap_console();
356 356
357 /* OUTPUT NOW ALLOWED */ 357 /* OUTPUT NOW ALLOWED */
358 358
359 /* delayed from above */ 359 /* delayed from above */
360 if (bootinfo_msg) 360 if (bootinfo_msg)
361 printf("WARNING: %s (0x%lx, 0x%lx, 0x%lx)\n", 361 printf("WARNING: %s (0x%lx, 0x%lx, 0x%lx)\n",
362 bootinfo_msg, bim, bip, biv); 362 bootinfo_msg, bim, bip, biv);
363 363
364 /* Initialize the trap vectors on the primary processor. */ 364 /* Initialize the trap vectors on the primary processor. */
365 trap_init(); 365 trap_init();
366 366
367 /* 367 /*
368 * Find out this system's page size, and initialize 368 * Find out this system's page size, and initialize
369 * PAGE_SIZE-dependent variables. 369 * PAGE_SIZE-dependent variables.
370 */ 370 */
371 if (hwrpb->rpb_page_size != ALPHA_PGBYTES) 371 if (hwrpb->rpb_page_size != ALPHA_PGBYTES)
372 panic("page size %lu != %d?!", hwrpb->rpb_page_size, 372 panic("page size %lu != %d?!", hwrpb->rpb_page_size,
373 ALPHA_PGBYTES); 373 ALPHA_PGBYTES);
374 uvmexp.pagesize = hwrpb->rpb_page_size; 374 uvmexp.pagesize = hwrpb->rpb_page_size;
375 uvm_md_init(); 375 uvm_md_init();
376 376
377 /* 377 /*
378 * Find out what hardware we're on, and do basic initialization. 378 * cputype has been initialized in init_prom_interface().
 379 * Perform basic platform initialization using this info.
379 */ 380 */
380 cputype = hwrpb->rpb_type; 381 KASSERT(prom_interface_initialized);
381 if (cputype < 0) { 
382 /* 
383 * At least some white-box systems have SRM which 
384 * reports a systype that's the negative of their 
385 * blue-box counterpart. 
386 */ 
387 cputype = -cputype; 
388 } 
389 c = platform_lookup(cputype); 382 c = platform_lookup(cputype);
390 if (c == NULL) { 383 if (c == NULL) {
391 platform_not_supported(); 384 platform_not_supported();
392 /* NOTREACHED */ 385 /* NOTREACHED */
393 } 386 }
394 (*c->init)(); 387 (*c->init)();
395 cpu_setmodel("%s", platform.model); 388 cpu_setmodel("%s", platform.model);
396 389
397 /* 390 /*
398 * Initialize the real console, so that the bootstrap console is 391 * Initialize the real console, so that the bootstrap console is
399 * no longer necessary. 392 * no longer necessary.
400 */ 393 */
401 (*platform.cons_init)(); 394 (*platform.cons_init)();
402 395
403#ifdef DIAGNOSTIC 396#ifdef DIAGNOSTIC
404 /* Paranoid sanity checking */ 397 /* Paranoid sanity checking */
405 398
406 /* We should always be running on the primary. */ 399 /* We should always be running on the primary. */
407 assert(hwrpb->rpb_primary_cpu_id == cpu_id); 400 assert(hwrpb->rpb_primary_cpu_id == cpu_id);
408 401
409 /* 402 /*
410 * On single-CPU systypes, the primary should always be CPU 0, 403 * On single-CPU systypes, the primary should always be CPU 0,
411 * except on Alpha 8200 systems where the CPU id is related 404 * except on Alpha 8200 systems where the CPU id is related
412 * to the VID, which is related to the Turbo Laser node id. 405 * to the VID, which is related to the Turbo Laser node id.
413 */ 406 */
414 if (cputype != ST_DEC_21000) 407 if (cputype != ST_DEC_21000)
415 assert(hwrpb->rpb_primary_cpu_id == 0); 408 assert(hwrpb->rpb_primary_cpu_id == 0);
416#endif 409#endif
417 410
418 /* NO MORE FIRMWARE ACCESS ALLOWED */ 411 /* NO MORE FIRMWARE ACCESS ALLOWED */
419#ifdef _PMAP_MAY_USE_PROM_CONSOLE 412 /* XXX Unless prom_uses_prom_console() evaluates to non-zero.) */
420 /* 
421 * XXX (unless _PMAP_MAY_USE_PROM_CONSOLE is defined and 
422 * XXX pmap_uses_prom_console() evaluates to non-zero.) 
423 */ 
424#endif 
425 413
426 /* 414 /*
427 * Find the beginning and end of the kernel (and leave a 415 * Find the beginning and end of the kernel (and leave a
428 * bit of space before the beginning for the bootstrap 416 * bit of space before the beginning for the bootstrap
429 * stack). 417 * stack).
430 */ 418 */
431 kernstart = trunc_page((vaddr_t)kernel_text) - 2 * PAGE_SIZE; 419 kernstart = trunc_page((vaddr_t)kernel_text) - 2 * PAGE_SIZE;
432#if NKSYMS || defined(DDB) || defined(MODULAR) 420#if NKSYMS || defined(DDB) || defined(MODULAR)
433 ksym_start = (void *)bootinfo.ssym; 421 ksym_start = (void *)bootinfo.ssym;
434 ksym_end = (void *)bootinfo.esym; 422 ksym_end = (void *)bootinfo.esym;
435 kernend = (vaddr_t)round_page((vaddr_t)ksym_end); 423 kernend = (vaddr_t)round_page((vaddr_t)ksym_end);
436#else 424#else
437 kernend = (vaddr_t)round_page((vaddr_t)_end); 425 kernend = (vaddr_t)round_page((vaddr_t)_end);
438#endif 426#endif
439 427
440 kernstartpfn = atop(ALPHA_K0SEG_TO_PHYS(kernstart)); 428 kernstartpfn = atop(ALPHA_K0SEG_TO_PHYS(kernstart));
441 kernendpfn = atop(ALPHA_K0SEG_TO_PHYS(kernend)); 429 kernendpfn = atop(ALPHA_K0SEG_TO_PHYS(kernend));
442 430
443 /* 431 /*
444 * Find out how much memory is available, by looking at 432 * Find out how much memory is available, by looking at
445 * the memory cluster descriptors. This also tries to do 433 * the memory cluster descriptors. This also tries to do
446 * its best to detect things things that have never been seen 434 * its best to detect things things that have never been seen
447 * before... 435 * before...
448 */ 436 */
449 mddtp = (struct mddt *)(((char *)hwrpb) + hwrpb->rpb_memdat_off); 437 mddtp = (struct mddt *)(((char *)hwrpb) + hwrpb->rpb_memdat_off);
450 438
451 /* MDDT SANITY CHECKING */ 439 /* MDDT SANITY CHECKING */
452 mddtweird = 0; 440 mddtweird = 0;
453 if (mddtp->mddt_cluster_cnt < 2) { 441 if (mddtp->mddt_cluster_cnt < 2) {
454 mddtweird = 1; 442 mddtweird = 1;
455 printf("WARNING: weird number of mem clusters: %lu\n", 443 printf("WARNING: weird number of mem clusters: %lu\n",
456 mddtp->mddt_cluster_cnt); 444 mddtp->mddt_cluster_cnt);
457 } 445 }
458 446
459#if 0 447#if 0
460 printf("Memory cluster count: %" PRIu64 "\n", mddtp->mddt_cluster_cnt); 448 printf("Memory cluster count: %" PRIu64 "\n", mddtp->mddt_cluster_cnt);
461#endif 449#endif
462 450
463 for (i = 0; i < mddtp->mddt_cluster_cnt; i++) { 451 for (i = 0; i < mddtp->mddt_cluster_cnt; i++) {
464 memc = &mddtp->mddt_clusters[i]; 452 memc = &mddtp->mddt_clusters[i];
465#if 0 453#if 0
466 printf("MEMC %d: pfn 0x%lx cnt 0x%lx usage 0x%lx\n", i, 454 printf("MEMC %d: pfn 0x%lx cnt 0x%lx usage 0x%lx\n", i,
467 memc->mddt_pfn, memc->mddt_pg_cnt, memc->mddt_usage); 455 memc->mddt_pfn, memc->mddt_pg_cnt, memc->mddt_usage);
468#endif 456#endif
469 totalphysmem += memc->mddt_pg_cnt; 457 totalphysmem += memc->mddt_pg_cnt;
470 if (mem_cluster_cnt < VM_PHYSSEG_MAX) { /* XXX */ 458 if (mem_cluster_cnt < VM_PHYSSEG_MAX) { /* XXX */
471 mem_clusters[mem_cluster_cnt].start = 459 mem_clusters[mem_cluster_cnt].start =
472 ptoa(memc->mddt_pfn); 460 ptoa(memc->mddt_pfn);
473 mem_clusters[mem_cluster_cnt].size = 461 mem_clusters[mem_cluster_cnt].size =
474 ptoa(memc->mddt_pg_cnt); 462 ptoa(memc->mddt_pg_cnt);
475 if (memc->mddt_usage & MDDT_mbz || 463 if (memc->mddt_usage & MDDT_mbz ||
476 memc->mddt_usage & MDDT_NONVOLATILE || /* XXX */ 464 memc->mddt_usage & MDDT_NONVOLATILE || /* XXX */
477 memc->mddt_usage & MDDT_PALCODE) 465 memc->mddt_usage & MDDT_PALCODE)
478 mem_clusters[mem_cluster_cnt].size |= 466 mem_clusters[mem_cluster_cnt].size |=
479 PROT_READ; 467 PROT_READ;
480 else 468 else
481 mem_clusters[mem_cluster_cnt].size |= 469 mem_clusters[mem_cluster_cnt].size |=
482 PROT_READ | PROT_WRITE | PROT_EXEC; 470 PROT_READ | PROT_WRITE | PROT_EXEC;
483 mem_cluster_cnt++; 471 mem_cluster_cnt++;
484 } 472 }
485 473
486 if (memc->mddt_usage & MDDT_mbz) { 474 if (memc->mddt_usage & MDDT_mbz) {
487 mddtweird = 1; 475 mddtweird = 1;
488 printf("WARNING: mem cluster %d has weird " 476 printf("WARNING: mem cluster %d has weird "
489 "usage 0x%lx\n", i, memc->mddt_usage); 477 "usage 0x%lx\n", i, memc->mddt_usage);
490 unknownmem += memc->mddt_pg_cnt; 478 unknownmem += memc->mddt_pg_cnt;
491 continue; 479 continue;
492 } 480 }
493 if (memc->mddt_usage & MDDT_NONVOLATILE) { 481 if (memc->mddt_usage & MDDT_NONVOLATILE) {
494 /* XXX should handle these... */ 482 /* XXX should handle these... */
495 printf("WARNING: skipping non-volatile mem " 483 printf("WARNING: skipping non-volatile mem "
496 "cluster %d\n", i); 484 "cluster %d\n", i);
497 unusedmem += memc->mddt_pg_cnt; 485 unusedmem += memc->mddt_pg_cnt;
498 continue; 486 continue;
499 } 487 }
500 if (memc->mddt_usage & MDDT_PALCODE) { 488 if (memc->mddt_usage & MDDT_PALCODE) {
501 resvmem += memc->mddt_pg_cnt; 489 resvmem += memc->mddt_pg_cnt;
502 continue; 490 continue;
503 } 491 }
504 492
505 /* 493 /*
506 * We have a memory cluster available for system 494 * We have a memory cluster available for system
507 * software use. We must determine if this cluster 495 * software use. We must determine if this cluster
508 * holds the kernel. 496 * holds the kernel.
509 */ 497 */
510#ifdef _PMAP_MAY_USE_PROM_CONSOLE 498
511 /* 499 /*
512 * XXX If the kernel uses the PROM console, we only use the 500 * XXX If the kernel uses the PROM console, we only use the
513 * XXX memory after the kernel in the first system segment, 501 * XXX memory after the kernel in the first system segment,
514 * XXX to avoid clobbering prom mapping, data, etc. 502 * XXX to avoid clobbering prom mapping, data, etc.
515 */ 503 */
516 if (!pmap_uses_prom_console() || physmem == 0) { 
517#endif /* _PMAP_MAY_USE_PROM_CONSOLE */ 
518 physmem += memc->mddt_pg_cnt; 504 physmem += memc->mddt_pg_cnt;
519 pfn0 = memc->mddt_pfn; 505 pfn0 = memc->mddt_pfn;
520 pfn1 = memc->mddt_pfn + memc->mddt_pg_cnt; 506 pfn1 = memc->mddt_pfn + memc->mddt_pg_cnt;
521 if (pfn0 <= kernstartpfn && kernendpfn <= pfn1) { 507 if (pfn0 <= kernstartpfn && kernendpfn <= pfn1) {
522 /* 508 /*
523 * Must compute the location of the kernel 509 * Must compute the location of the kernel
524 * within the segment. 510 * within the segment.
525 */ 511 */
526#if 0 512#if 0
527 printf("Cluster %d contains kernel\n", i); 513 printf("Cluster %d contains kernel\n", i);
528#endif 514#endif
529#ifdef _PMAP_MAY_USE_PROM_CONSOLE 515 if (pfn0 < kernstartpfn && !prom_uses_prom_console()) {
530 if (!pmap_uses_prom_console()) { 
531#endif /* _PMAP_MAY_USE_PROM_CONSOLE */ 
532 if (pfn0 < kernstartpfn) { 
533 /* 516 /*
534 * There is a chunk before the kernel. 517 * There is a chunk before the kernel.
535 */ 518 */
536#if 0 519#if 0
537 printf("Loading chunk before kernel: " 520 printf("Loading chunk before kernel: "
538 "0x%lx / 0x%lx\n", pfn0, kernstartpfn); 521 "0x%lx / 0x%lx\n", pfn0, kernstartpfn);
539#endif 522#endif
540 uvm_page_physload(pfn0, kernstartpfn, 523 uvm_page_physload(pfn0, kernstartpfn,
541 pfn0, kernstartpfn, VM_FREELIST_DEFAULT); 524 pfn0, kernstartpfn, VM_FREELIST_DEFAULT);
542 } 525 }
543#ifdef _PMAP_MAY_USE_PROM_CONSOLE 
544 } 
545#endif /* _PMAP_MAY_USE_PROM_CONSOLE */ 
546 if (kernendpfn < pfn1) { 526 if (kernendpfn < pfn1) {
547 /* 527 /*
548 * There is a chunk after the kernel. 528 * There is a chunk after the kernel.
549 */ 529 */
550#if 0 530#if 0
551 printf("Loading chunk after kernel: " 531 printf("Loading chunk after kernel: "
552 "0x%lx / 0x%lx\n", kernendpfn, pfn1); 532 "0x%lx / 0x%lx\n", kernendpfn, pfn1);
553#endif 533#endif
554 uvm_page_physload(kernendpfn, pfn1, 534 uvm_page_physload(kernendpfn, pfn1,
555 kernendpfn, pfn1, VM_FREELIST_DEFAULT); 535 kernendpfn, pfn1, VM_FREELIST_DEFAULT);
556 } 536 }
557 } else { 537 } else {
558 /* 538 /*
559 * Just load this cluster as one chunk. 539 * Just load this cluster as one chunk.
560 */ 540 */
561#if 0 541#if 0
562 printf("Loading cluster %d: 0x%lx / 0x%lx\n", i, 542 printf("Loading cluster %d: 0x%lx / 0x%lx\n", i,
563 pfn0, pfn1); 543 pfn0, pfn1);
564#endif 544#endif
565 uvm_page_physload(pfn0, pfn1, pfn0, pfn1, 545 uvm_page_physload(pfn0, pfn1, pfn0, pfn1,
566 VM_FREELIST_DEFAULT); 546 VM_FREELIST_DEFAULT);
567 } 547 }
568#ifdef _PMAP_MAY_USE_PROM_CONSOLE 
569 } 
570#endif /* _PMAP_MAY_USE_PROM_CONSOLE */ 
571 } 548 }
572 549
573 /* 550 /*
574 * Dump out the MDDT if it looks odd... 551 * Dump out the MDDT if it looks odd...
575 */ 552 */
576 if (mddtweird) { 553 if (mddtweird) {
577 printf("\n"); 554 printf("\n");
578 printf("complete memory cluster information:\n"); 555 printf("complete memory cluster information:\n");
579 for (i = 0; i < mddtp->mddt_cluster_cnt; i++) { 556 for (i = 0; i < mddtp->mddt_cluster_cnt; i++) {
580 printf("mddt %d:\n", i); 557 printf("mddt %d:\n", i);
581 printf("\tpfn %lx\n", 558 printf("\tpfn %lx\n",
582 mddtp->mddt_clusters[i].mddt_pfn); 559 mddtp->mddt_clusters[i].mddt_pfn);
583 printf("\tcnt %lx\n", 560 printf("\tcnt %lx\n",
584 mddtp->mddt_clusters[i].mddt_pg_cnt); 561 mddtp->mddt_clusters[i].mddt_pg_cnt);
585 printf("\ttest %lx\n", 562 printf("\ttest %lx\n",
586 mddtp->mddt_clusters[i].mddt_pg_test); 563 mddtp->mddt_clusters[i].mddt_pg_test);
587 printf("\tbva %lx\n", 564 printf("\tbva %lx\n",
588 mddtp->mddt_clusters[i].mddt_v_bitaddr); 565 mddtp->mddt_clusters[i].mddt_v_bitaddr);
589 printf("\tbpa %lx\n", 566 printf("\tbpa %lx\n",
590 mddtp->mddt_clusters[i].mddt_p_bitaddr); 567 mddtp->mddt_clusters[i].mddt_p_bitaddr);
591 printf("\tbcksum %lx\n", 568 printf("\tbcksum %lx\n",
592 mddtp->mddt_clusters[i].mddt_bit_cksum); 569 mddtp->mddt_clusters[i].mddt_bit_cksum);
593 printf("\tusage %lx\n", 570 printf("\tusage %lx\n",
594 mddtp->mddt_clusters[i].mddt_usage); 571 mddtp->mddt_clusters[i].mddt_usage);
595 } 572 }
596 printf("\n"); 573 printf("\n");
597 } 574 }
598 575
599 if (totalphysmem == 0) 576 if (totalphysmem == 0)
600 panic("can't happen: system seems to have no memory!"); 577 panic("can't happen: system seems to have no memory!");
601 maxmem = physmem; 578 maxmem = physmem;
602#if 0 579#if 0
603 printf("totalphysmem = %d\n", totalphysmem); 580 printf("totalphysmem = %d\n", totalphysmem);
604 printf("physmem = %lu\n", physmem); 581 printf("physmem = %lu\n", physmem);
605 printf("resvmem = %d\n", resvmem); 582 printf("resvmem = %d\n", resvmem);
606 printf("unusedmem = %d\n", unusedmem); 583 printf("unusedmem = %d\n", unusedmem);
607 printf("unknownmem = %d\n", unknownmem); 584 printf("unknownmem = %d\n", unknownmem);
608#endif 585#endif
609 586
610 /* 587 /*
611 * Initialize error message buffer (at end of core). 588 * Initialize error message buffer (at end of core).
612 */ 589 */
613 { 590 {
614 paddr_t end; 591 paddr_t end;
615 vsize_t sz = (vsize_t)round_page(MSGBUFSIZE); 592 vsize_t sz = (vsize_t)round_page(MSGBUFSIZE);
616 vsize_t reqsz = sz; 593 vsize_t reqsz = sz;
617 uvm_physseg_t bank; 594 uvm_physseg_t bank;
618 595
619 bank = uvm_physseg_get_last(); 596 bank = uvm_physseg_get_last();
620 597
621 /* shrink so that it'll fit in the last segment */ 598 /* shrink so that it'll fit in the last segment */
622 if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank) < atop(sz)) 599 if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank) < atop(sz))
623 sz = ptoa(uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank)); 600 sz = ptoa(uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank));
624 601
625 end = uvm_physseg_get_end(bank); 602 end = uvm_physseg_get_end(bank);
626 end -= atop(sz); 603 end -= atop(sz);
627 604
628 uvm_physseg_unplug(end, atop(sz)); 605 uvm_physseg_unplug(end, atop(sz));
629 msgbufaddr = (void *) ALPHA_PHYS_TO_K0SEG(ptoa(end)); 606 msgbufaddr = (void *) ALPHA_PHYS_TO_K0SEG(ptoa(end));
630 607
631 initmsgbuf(msgbufaddr, sz); 608 initmsgbuf(msgbufaddr, sz);
632 609
633 /* warn if the message buffer had to be shrunk */ 610 /* warn if the message buffer had to be shrunk */
634 if (sz != reqsz) 611 if (sz != reqsz)
635 printf("WARNING: %ld bytes not available for msgbuf " 612 printf("WARNING: %ld bytes not available for msgbuf "
636 "in last cluster (%ld used)\n", reqsz, sz); 613 "in last cluster (%ld used)\n", reqsz, sz);
637 614
638 } 615 }
639 616
640 /* 617 /*
641 * NOTE: It is safe to use uvm_pageboot_alloc() before 618 * NOTE: It is safe to use uvm_pageboot_alloc() before
642 * pmap_bootstrap() because our pmap_virtual_space() 619 * pmap_bootstrap() because our pmap_virtual_space()
643 * returns compile-time constants. 620 * returns compile-time constants.
644 */ 621 */
645 622
646 /* 623 /*
647 * Allocate uarea page for lwp0 and set it. 624 * Allocate uarea page for lwp0 and set it.
648 */ 625 */
649 v = uvm_pageboot_alloc(UPAGES * PAGE_SIZE); 626 v = uvm_pageboot_alloc(UPAGES * PAGE_SIZE);
650 uvm_lwp_setuarea(&lwp0, v); 627 uvm_lwp_setuarea(&lwp0, v);
651 628
652 /* 629 /*
653 * Initialize the virtual memory system, and set the 630 * Initialize the virtual memory system, and set the
654 * page table base register in proc 0's PCB. 631 * page table base register in proc 0's PCB.
655 */ 632 */
656 pmap_bootstrap(ALPHA_PHYS_TO_K0SEG(ptb << PGSHIFT), 633 pmap_bootstrap(ALPHA_PHYS_TO_K0SEG(ptb << PGSHIFT),
657 hwrpb->rpb_max_asn, hwrpb->rpb_pcs_cnt); 634 hwrpb->rpb_max_asn, hwrpb->rpb_pcs_cnt);
658 635
659 /* 636 /*
660 * Initialize the rest of lwp0's PCB and cache its physical address. 637 * Initialize the rest of lwp0's PCB and cache its physical address.
661 */ 638 */
662 pcb0 = lwp_getpcb(&lwp0); 639 pcb0 = lwp_getpcb(&lwp0);
663 lwp0.l_md.md_pcbpaddr = (void *)ALPHA_K0SEG_TO_PHYS((vaddr_t)pcb0); 640 lwp0.l_md.md_pcbpaddr = (void *)ALPHA_K0SEG_TO_PHYS((vaddr_t)pcb0);
664 641
665 /* 642 /*
666 * Set the kernel sp, reserving space for an (empty) trapframe, 643 * Set the kernel sp, reserving space for an (empty) trapframe,
667 * and make lwp0's trapframe pointer point to it for sanity. 644 * and make lwp0's trapframe pointer point to it for sanity.
668 */ 645 */
669 pcb0->pcb_hw.apcb_ksp = v + USPACE - sizeof(struct trapframe); 646 pcb0->pcb_hw.apcb_ksp = v + USPACE - sizeof(struct trapframe);
670 lwp0.l_md.md_tf = (struct trapframe *)pcb0->pcb_hw.apcb_ksp; 647 lwp0.l_md.md_tf = (struct trapframe *)pcb0->pcb_hw.apcb_ksp;
671 648
672 /* Indicate that lwp0 has a CPU. */ 649 /* Indicate that lwp0 has a CPU. */
673 lwp0.l_cpu = ci; 650 lwp0.l_cpu = ci;
674 651
675 /* 652 /*
676 * Look at arguments passed to us and compute boothowto. 653 * Look at arguments passed to us and compute boothowto.
677 */ 654 */
678 655
679 boothowto = RB_SINGLE; 656 boothowto = RB_SINGLE;
680#ifdef KADB 657#ifdef KADB
681 boothowto |= RB_KDB; 658 boothowto |= RB_KDB;
682#endif 659#endif
683 for (p = bootinfo.boot_flags; p && *p != '\0'; p++) { 660 for (p = bootinfo.boot_flags; p && *p != '\0'; p++) {
684 /* 661 /*
685 * Note that we'd really like to differentiate case here, 662 * Note that we'd really like to differentiate case here,
686 * but the Alpha AXP Architecture Reference Manual 663 * but the Alpha AXP Architecture Reference Manual
687 * says that we shouldn't. 664 * says that we shouldn't.
688 */ 665 */
689 switch (*p) { 666 switch (*p) {
690 case 'a': /* autoboot */ 667 case 'a': /* autoboot */
691 case 'A': 668 case 'A':
692 boothowto &= ~RB_SINGLE; 669 boothowto &= ~RB_SINGLE;
693 break; 670 break;
694 671
695#ifdef DEBUG 672#ifdef DEBUG
696 case 'c': /* crash dump immediately after autoconfig */ 673 case 'c': /* crash dump immediately after autoconfig */
697 case 'C': 674 case 'C':
698 boothowto |= RB_DUMP; 675 boothowto |= RB_DUMP;
699 break; 676 break;
700#endif 677#endif
701 678
702#if defined(KGDB) || defined(DDB) 679#if defined(KGDB) || defined(DDB)
703 case 'd': /* break into the kernel debugger ASAP */ 680 case 'd': /* break into the kernel debugger ASAP */
704 case 'D': 681 case 'D':
705 boothowto |= RB_KDB; 682 boothowto |= RB_KDB;
706 break; 683 break;
707#endif 684#endif
708 685
709 case 'h': /* always halt, never reboot */ 686 case 'h': /* always halt, never reboot */
710 case 'H': 687 case 'H':
711 boothowto |= RB_HALT; 688 boothowto |= RB_HALT;
712 break; 689 break;
713 690
714#if 0 691#if 0
715 case 'm': /* mini root present in memory */ 692 case 'm': /* mini root present in memory */
716 case 'M': 693 case 'M':
717 boothowto |= RB_MINIROOT; 694 boothowto |= RB_MINIROOT;
718 break; 695 break;
719#endif 696#endif
720 697
721 case 'n': /* askname */ 698 case 'n': /* askname */
722 case 'N': 699 case 'N':
723 boothowto |= RB_ASKNAME; 700 boothowto |= RB_ASKNAME;
724 break; 701 break;
725 702
726 case 's': /* single-user (default, supported for sanity) */ 703 case 's': /* single-user (default, supported for sanity) */
727 case 'S': 704 case 'S':
728 boothowto |= RB_SINGLE; 705 boothowto |= RB_SINGLE;
729 break; 706 break;
730 707
731 case 'q': /* quiet boot */ 708 case 'q': /* quiet boot */
732 case 'Q': 709 case 'Q':
733 boothowto |= AB_QUIET; 710 boothowto |= AB_QUIET;
734 break; 711 break;
735 712
736 case 'v': /* verbose boot */ 713 case 'v': /* verbose boot */
737 case 'V': 714 case 'V':
738 boothowto |= AB_VERBOSE; 715 boothowto |= AB_VERBOSE;
739 break; 716 break;
740 717
741 case '-': 718 case '-':
742 /* 719 /*
743 * Just ignore this. It's not required, but it's 720 * Just ignore this. It's not required, but it's
744 * common for it to be passed regardless. 721 * common for it to be passed regardless.
745 */ 722 */
746 break; 723 break;
747 724
748 default: 725 default:
749 printf("Unrecognized boot flag '%c'.\n", *p); 726 printf("Unrecognized boot flag '%c'.\n", *p);
750 break; 727 break;
751 } 728 }
752 } 729 }
753 730
754 /* 731 /*
755 * Perform any initial kernel patches based on the running system. 732 * Perform any initial kernel patches based on the running system.
756 * We may perform more later if we attach additional CPUs. 733 * We may perform more later if we attach additional CPUs.
757 */ 734 */
758 alpha_patch(false); 735 alpha_patch(false);
759 736
760 /* 737 /*
761 * Figure out the number of CPUs in the box, from RPB fields. 738 * Figure out the number of CPUs in the box, from RPB fields.
762 * Really. We mean it. 739 * Really. We mean it.
763 */ 740 */
764 for (i = 0; i < hwrpb->rpb_pcs_cnt; i++) { 741 for (i = 0; i < hwrpb->rpb_pcs_cnt; i++) {
765 struct pcs *pcsp; 742 struct pcs *pcsp;
766 743
767 pcsp = LOCATE_PCS(hwrpb, i); 744 pcsp = LOCATE_PCS(hwrpb, i);
768 if ((pcsp->pcs_flags & PCS_PP) != 0) 745 if ((pcsp->pcs_flags & PCS_PP) != 0)
769 ncpus++; 746 ncpus++;
770 } 747 }
771 748
772 /* 749 /*
773 * Initialize debuggers, and break into them if appropriate. 750 * Initialize debuggers, and break into them if appropriate.
774 */ 751 */
775#if NKSYMS || defined(DDB) || defined(MODULAR) 752#if NKSYMS || defined(DDB) || defined(MODULAR)
776 ksyms_addsyms_elf((int)((uint64_t)ksym_end - (uint64_t)ksym_start), 753 ksyms_addsyms_elf((int)((uint64_t)ksym_end - (uint64_t)ksym_start),
777 ksym_start, ksym_end); 754 ksym_start, ksym_end);
778#endif 755#endif
779 756
780 if (boothowto & RB_KDB) { 757 if (boothowto & RB_KDB) {
781#if defined(KGDB) 758#if defined(KGDB)
782 kgdb_debug_init = 1; 759 kgdb_debug_init = 1;
783 kgdb_connect(1); 760 kgdb_connect(1);
784#elif defined(DDB) 761#elif defined(DDB)
785 Debugger(); 762 Debugger();
786#endif 763#endif
787 } 764 }
788 765
789#ifdef DIAGNOSTIC 766#ifdef DIAGNOSTIC
790 /* 767 /*
791 * Check our clock frequency, from RPB fields. 768 * Check our clock frequency, from RPB fields.
792 */ 769 */
793 if ((hwrpb->rpb_intr_freq >> 12) != 1024) 770 if ((hwrpb->rpb_intr_freq >> 12) != 1024)
794 printf("WARNING: unbelievable rpb_intr_freq: %ld (%d hz)\n", 771 printf("WARNING: unbelievable rpb_intr_freq: %ld (%d hz)\n",
795 hwrpb->rpb_intr_freq, hz); 772 hwrpb->rpb_intr_freq, hz);
796#endif 773#endif
797} 774}
798 775
799#ifdef MODULAR 776#ifdef MODULAR
800/* Push any modules loaded by the boot loader */ 777/* Push any modules loaded by the boot loader */
801void 778void
802module_init_md(void) 779module_init_md(void)
803{ 780{
804 /* nada. */ 781 /* nada. */
805} 782}
806#endif /* MODULAR */ 783#endif /* MODULAR */
807 784
808void 785void
809consinit(void) 786consinit(void)
810{ 787{
811 788
812 /* 789 /*
813 * Everything related to console initialization is done 790 * Everything related to console initialization is done
814 * in alpha_init(). 791 * in alpha_init().
815 */ 792 */
816#if defined(DIAGNOSTIC) && defined(_PMAP_MAY_USE_PROM_CONSOLE) 793#if defined(DIAGNOSTIC) && defined(_PROM_MAY_USE_PROM_CONSOLE)
817 printf("consinit: %susing prom console\n", 794 printf("consinit: %susing prom console\n",
818 pmap_uses_prom_console() ? "" : "not "); 795 prom_uses_prom_console() ? "" : "not ");
819#endif 796#endif
820} 797}
821 798
822void 799void
823cpu_startup(void) 800cpu_startup(void)
824{ 801{
825 extern struct evcnt fpevent_use, fpevent_reuse; 802 extern struct evcnt fpevent_use, fpevent_reuse;
826 vaddr_t minaddr, maxaddr; 803 vaddr_t minaddr, maxaddr;
827 char pbuf[9]; 804 char pbuf[9];
828#if defined(DEBUG) 805#if defined(DEBUG)
829 extern int pmapdebug; 806 extern int pmapdebug;
830 int opmapdebug = pmapdebug; 807 int opmapdebug = pmapdebug;
831 808
832 pmapdebug = 0; 809 pmapdebug = 0;
833#endif 810#endif
834 811
835 /* 812 /*
836 * Good {morning,afternoon,evening,night}. 813 * Good {morning,afternoon,evening,night}.
837 */ 814 */
838 printf("%s%s", copyright, version); 815 printf("%s%s", copyright, version);
839 identifycpu(); 816 identifycpu();
840 format_bytes(pbuf, sizeof(pbuf), ptoa(totalphysmem)); 817 format_bytes(pbuf, sizeof(pbuf), ptoa(totalphysmem));
841 printf("total memory = %s\n", pbuf); 818 printf("total memory = %s\n", pbuf);
842 format_bytes(pbuf, sizeof(pbuf), ptoa(resvmem)); 819 format_bytes(pbuf, sizeof(pbuf), ptoa(resvmem));
843 printf("(%s reserved for PROM, ", pbuf); 820 printf("(%s reserved for PROM, ", pbuf);
844 format_bytes(pbuf, sizeof(pbuf), ptoa(physmem)); 821 format_bytes(pbuf, sizeof(pbuf), ptoa(physmem));
845 printf("%s used by NetBSD)\n", pbuf); 822 printf("%s used by NetBSD)\n", pbuf);
846 if (unusedmem) { 823 if (unusedmem) {
847 format_bytes(pbuf, sizeof(pbuf), ptoa(unusedmem)); 824 format_bytes(pbuf, sizeof(pbuf), ptoa(unusedmem));
848 printf("WARNING: unused memory = %s\n", pbuf); 825 printf("WARNING: unused memory = %s\n", pbuf);
849 } 826 }
850 if (unknownmem) { 827 if (unknownmem) {
851 format_bytes(pbuf, sizeof(pbuf), ptoa(unknownmem)); 828 format_bytes(pbuf, sizeof(pbuf), ptoa(unknownmem));
852 printf("WARNING: %s of memory with unknown purpose\n", pbuf); 829 printf("WARNING: %s of memory with unknown purpose\n", pbuf);
853 } 830 }
854 831
855 minaddr = 0; 832 minaddr = 0;
856 833
857 /* 834 /*
858 * Allocate a submap for physio 835 * Allocate a submap for physio
859 */ 836 */
860 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 837 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
861 VM_PHYS_SIZE, 0, false, NULL); 838 VM_PHYS_SIZE, 0, false, NULL);
862 839
863 /* 840 /*
864 * No need to allocate an mbuf cluster submap. Mbuf clusters 841 * No need to allocate an mbuf cluster submap. Mbuf clusters
865 * are allocated via the pool allocator, and we use K0SEG to 842 * are allocated via the pool allocator, and we use K0SEG to
866 * map those pages. 843 * map those pages.
867 */ 844 */
868 845
869#if defined(DEBUG) 846#if defined(DEBUG)
870 pmapdebug = opmapdebug; 847 pmapdebug = opmapdebug;
871#endif 848#endif
872 format_bytes(pbuf, sizeof(pbuf), ptoa(uvm_availmem(false))); 849 format_bytes(pbuf, sizeof(pbuf), ptoa(uvm_availmem(false)));
873 printf("avail memory = %s\n", pbuf); 850 printf("avail memory = %s\n", pbuf);
874#if 0 851#if 0
875 { 852 {
876 extern u_long pmap_pages_stolen; 853 extern u_long pmap_pages_stolen;
877 854
878 format_bytes(pbuf, sizeof(pbuf), pmap_pages_stolen * PAGE_SIZE); 855 format_bytes(pbuf, sizeof(pbuf), pmap_pages_stolen * PAGE_SIZE);
879 printf("stolen memory for VM structures = %s\n", pbuf); 856 printf("stolen memory for VM structures = %s\n", pbuf);
880 } 857 }
881#endif 858#endif
882 859
883 /* 860 /*
884 * Set up the HWPCB so that it's safe to configure secondary 861 * Set up the HWPCB so that it's safe to configure secondary
885 * CPUs. 862 * CPUs.
886 */ 863 */
887 hwrpb_primary_init(); 864 hwrpb_primary_init();
888 865
889 /* 866 /*
890 * Initialize some trap event counters. 867 * Initialize some trap event counters.
891 */ 868 */
892 evcnt_attach_dynamic_nozero(&fpevent_use, EVCNT_TYPE_MISC, NULL, 869 evcnt_attach_dynamic_nozero(&fpevent_use, EVCNT_TYPE_MISC, NULL,
893 "FP", "proc use"); 870 "FP", "proc use");
894 evcnt_attach_dynamic_nozero(&fpevent_reuse, EVCNT_TYPE_MISC, NULL, 871 evcnt_attach_dynamic_nozero(&fpevent_reuse, EVCNT_TYPE_MISC, NULL,
895 "FP", "proc re-use"); 872 "FP", "proc re-use");
896} 873}
897 874
898/* 875/*
899 * Retrieve the platform name from the DSR. 876 * Retrieve the platform name from the DSR.
900 */ 877 */
901const char * 878const char *
902alpha_dsr_sysname(void) 879alpha_dsr_sysname(void)
903{ 880{
904 struct dsrdb *dsr; 881 struct dsrdb *dsr;
905 const char *sysname; 882 const char *sysname;
906 883
907 /* 884 /*
908 * DSR does not exist on early HWRPB versions. 885 * DSR does not exist on early HWRPB versions.
909 */ 886 */
910 if (hwrpb->rpb_version < HWRPB_DSRDB_MINVERS) 887 if (hwrpb->rpb_version < HWRPB_DSRDB_MINVERS)
911 return (NULL); 888 return (NULL);
912 889
913 dsr = (struct dsrdb *)(((char *)hwrpb) + hwrpb->rpb_dsrdb_off); 890 dsr = (struct dsrdb *)(((char *)hwrpb) + hwrpb->rpb_dsrdb_off);
914 sysname = (const char *)((char *)dsr + (dsr->dsr_sysname_off + 891 sysname = (const char *)((char *)dsr + (dsr->dsr_sysname_off +
915 sizeof(uint64_t))); 892 sizeof(uint64_t)));
916 return (sysname); 893 return (sysname);
917} 894}
918 895
919/* 896/*
920 * Lookup the system specified system variation in the provided table, 897 * Lookup the system specified system variation in the provided table,
921 * returning the model string on match. 898 * returning the model string on match.
922 */ 899 */
923const char * 900const char *
924alpha_variation_name(uint64_t variation, const struct alpha_variation_table *avtp) 901alpha_variation_name(uint64_t variation, const struct alpha_variation_table *avtp)
925{ 902{
926 int i; 903 int i;
927 904
928 for (i = 0; avtp[i].avt_model != NULL; i++) 905 for (i = 0; avtp[i].avt_model != NULL; i++)
929 if (avtp[i].avt_variation == variation) 906 if (avtp[i].avt_variation == variation)
930 return (avtp[i].avt_model); 907 return (avtp[i].avt_model);
931 return (NULL); 908 return (NULL);
932} 909}
933 910
934/* 911/*
935 * Generate a default platform name based for unknown system variations. 912 * Generate a default platform name based for unknown system variations.
936 */ 913 */
937const char * 914const char *
938alpha_unknown_sysname(void) 915alpha_unknown_sysname(void)
939{ 916{
940 static char s[128]; /* safe size */ 917 static char s[128]; /* safe size */
941 918
942 snprintf(s, sizeof(s), "%s family, unknown model variation 0x%lx", 919 snprintf(s, sizeof(s), "%s family, unknown model variation 0x%lx",
943 platform.family, hwrpb->rpb_variation & SV_ST_MASK); 920 platform.family, hwrpb->rpb_variation & SV_ST_MASK);
944 return ((const char *)s); 921 return ((const char *)s);
945} 922}
946 923
947void 924void
948identifycpu(void) 925identifycpu(void)
949{ 926{
950 const char *s; 927 const char *s;
951 int i; 928 int i;
952 929
953 /* 930 /*
954 * print out CPU identification information. 931 * print out CPU identification information.
955 */ 932 */
956 printf("%s", cpu_getmodel()); 933 printf("%s", cpu_getmodel());
957 for(s = cpu_getmodel(); *s; ++s) 934 for(s = cpu_getmodel(); *s; ++s)
958 if(strncasecmp(s, "MHz", 3) == 0) 935 if(strncasecmp(s, "MHz", 3) == 0)
959 goto skipMHz; 936 goto skipMHz;
960 printf(", %ldMHz", hwrpb->rpb_cc_freq / 1000000); 937 printf(", %ldMHz", hwrpb->rpb_cc_freq / 1000000);
961skipMHz: 938skipMHz:
962 printf(", s/n "); 939 printf(", s/n ");
963 for (i = 0; i < 10; i++) 940 for (i = 0; i < 10; i++)
964 printf("%c", hwrpb->rpb_ssn[i]); 941 printf("%c", hwrpb->rpb_ssn[i]);
965 printf("\n"); 942 printf("\n");
966 printf("%ld byte page size, %d processor%s.\n", 943 printf("%ld byte page size, %d processor%s.\n",
967 hwrpb->rpb_page_size, ncpus, ncpus == 1 ? "" : "s"); 944 hwrpb->rpb_page_size, ncpus, ncpus == 1 ? "" : "s");
968#if 0 
969 /* this isn't defined for any systems that we run on? */ 
970 printf("serial number 0x%lx 0x%lx\n", 
971 ((long *)hwrpb->rpb_ssn)[0], ((long *)hwrpb->rpb_ssn)[1]); 
972 
973 /* and these aren't particularly useful! */ 
974 printf("variation: 0x%lx, revision 0x%lx\n", 
975 hwrpb->rpb_variation, *(long *)hwrpb->rpb_revision); 
976#endif 
977} 945}
978 946
979int waittime = -1; 947int waittime = -1;
980struct pcb dumppcb; 948struct pcb dumppcb;
981 949
982void 950void
983cpu_reboot(int howto, char *bootstr) 951cpu_reboot(int howto, char *bootstr)
984{ 952{
985#if defined(MULTIPROCESSOR) 953#if defined(MULTIPROCESSOR)
986 u_long cpu_id = cpu_number(); 954 u_long cpu_id = cpu_number();
987 u_long wait_mask; 955 u_long wait_mask;
988 int i; 956 int i;
989#endif 957#endif
990 958
991 /* If "always halt" was specified as a boot flag, obey. */ 959 /* If "always halt" was specified as a boot flag, obey. */
992 if ((boothowto & RB_HALT) != 0) 960 if ((boothowto & RB_HALT) != 0)
993 howto |= RB_HALT; 961 howto |= RB_HALT;
994 962
995 boothowto = howto; 963 boothowto = howto;
996 964
997 /* If system is cold, just halt. */ 965 /* If system is cold, just halt. */
998 if (cold) { 966 if (cold) {
999 boothowto |= RB_HALT; 967 boothowto |= RB_HALT;
1000 goto haltsys; 968 goto haltsys;
1001 } 969 }
1002 970
1003 if ((boothowto & RB_NOSYNC) == 0 && waittime < 0) { 971 if ((boothowto & RB_NOSYNC) == 0 && waittime < 0) {
1004 waittime = 0; 972 waittime = 0;
1005 vfs_shutdown(); 973 vfs_shutdown();
1006 /* 974 /*
1007 * If we've been adjusting the clock, the todr 975 * If we've been adjusting the clock, the todr
1008 * will be out of synch; adjust it now. 976 * will be out of synch; adjust it now.
1009 */ 977 */
1010 resettodr(); 978 resettodr();
1011 } 979 }
1012 980
1013 /* Disable interrupts. */ 981 /* Disable interrupts. */
1014 splhigh(); 982 splhigh();
1015 983
1016#if defined(MULTIPROCESSOR) 984#if defined(MULTIPROCESSOR)
1017 /* 985 /*
1018 * Halt all other CPUs. If we're not the primary, the 986 * Halt all other CPUs. If we're not the primary, the
1019 * primary will spin, waiting for us to halt. 987 * primary will spin, waiting for us to halt.
1020 */ 988 */
1021 cpu_id = cpu_number(); /* may have changed cpu */ 989 cpu_id = cpu_number(); /* may have changed cpu */
1022 wait_mask = (1UL << cpu_id) | (1UL << hwrpb->rpb_primary_cpu_id); 990 wait_mask = (1UL << cpu_id) | (1UL << hwrpb->rpb_primary_cpu_id);
1023 991
1024 alpha_broadcast_ipi(ALPHA_IPI_HALT); 992 alpha_broadcast_ipi(ALPHA_IPI_HALT);
1025 993
1026 /* Ensure any CPUs paused by DDB resume execution so they can halt */ 994 /* Ensure any CPUs paused by DDB resume execution so they can halt */
1027 cpus_paused = 0; 995 cpus_paused = 0;
1028 996
1029 for (i = 0; i < 10000; i++) { 997 for (i = 0; i < 10000; i++) {
1030 alpha_mb(); 998 alpha_mb();
1031 if (cpus_running == wait_mask) 999 if (cpus_running == wait_mask)
1032 break; 1000 break;
1033 delay(1000); 1001 delay(1000);
1034 } 1002 }
1035 alpha_mb(); 1003 alpha_mb();
1036 if (cpus_running != wait_mask) 1004 if (cpus_running != wait_mask)
1037 printf("WARNING: Unable to halt secondary CPUs (0x%lx)\n", 1005 printf("WARNING: Unable to halt secondary CPUs (0x%lx)\n",
1038 cpus_running); 1006 cpus_running);
1039#endif /* MULTIPROCESSOR */ 1007#endif /* MULTIPROCESSOR */
1040 1008
1041 /* If rebooting and a dump is requested do it. */ 1009 /* If rebooting and a dump is requested do it. */
1042#if 0 1010#if 0
1043 if ((boothowto & (RB_DUMP | RB_HALT)) == RB_DUMP) 1011 if ((boothowto & (RB_DUMP | RB_HALT)) == RB_DUMP)
1044#else 1012#else
1045 if (boothowto & RB_DUMP) 1013 if (boothowto & RB_DUMP)
1046#endif 1014#endif
1047 dumpsys(); 1015 dumpsys();
1048 1016
1049haltsys: 1017haltsys:
1050 1018
1051 /* run any shutdown hooks */ 1019 /* run any shutdown hooks */
1052 doshutdownhooks(); 1020 doshutdownhooks();
1053 1021
1054 pmf_system_shutdown(boothowto); 1022 pmf_system_shutdown(boothowto);
1055 1023
1056#ifdef BOOTKEY 1024#ifdef BOOTKEY
1057 printf("hit any key to %s...\n", howto & RB_HALT ? "halt" : "reboot"); 1025 printf("hit any key to %s...\n", howto & RB_HALT ? "halt" : "reboot");
1058 cnpollc(1); /* for proper keyboard command handling */ 1026 cnpollc(1); /* for proper keyboard command handling */
1059 cngetc(); 1027 cngetc();
1060 cnpollc(0); 1028 cnpollc(0);
1061 printf("\n"); 1029 printf("\n");
1062#endif 1030#endif
1063 1031
1064 /* Finally, powerdown/halt/reboot the system. */ 1032 /* Finally, powerdown/halt/reboot the system. */
1065 if ((boothowto & RB_POWERDOWN) == RB_POWERDOWN && 1033 if ((boothowto & RB_POWERDOWN) == RB_POWERDOWN &&
1066 platform.powerdown != NULL) { 1034 platform.powerdown != NULL) {
1067 (*platform.powerdown)(); 1035 (*platform.powerdown)();
1068 printf("WARNING: powerdown failed!\n"); 1036 printf("WARNING: powerdown failed!\n");
1069 } 1037 }
1070 printf("%s\n\n", (boothowto & RB_HALT) ? "halted." : "rebooting..."); 1038 printf("%s\n\n", (boothowto & RB_HALT) ? "halted." : "rebooting...");
1071#if defined(MULTIPROCESSOR) 1039#if defined(MULTIPROCESSOR)
1072 if (cpu_id != hwrpb->rpb_primary_cpu_id) 1040 if (cpu_id != hwrpb->rpb_primary_cpu_id)
1073 cpu_halt(); 1041 cpu_halt();
1074 else 1042 else
1075#endif 1043#endif
1076 prom_halt(boothowto & RB_HALT); 1044 prom_halt(boothowto & RB_HALT);
1077 /*NOTREACHED*/ 1045 /*NOTREACHED*/
1078} 1046}
1079 1047
1080/* 1048/*
1081 * These variables are needed by /sbin/savecore 1049 * These variables are needed by /sbin/savecore
1082 */ 1050 */
1083uint32_t dumpmag = 0x8fca0101; /* magic number */ 1051uint32_t dumpmag = 0x8fca0101; /* magic number */
1084int dumpsize = 0; /* pages */ 1052int dumpsize = 0; /* pages */
1085long dumplo = 0; /* blocks */ 1053long dumplo = 0; /* blocks */
1086 1054
1087/* 1055/*
1088 * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers. 1056 * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers.
1089 */ 1057 */
1090int 1058int
1091cpu_dumpsize(void) 1059cpu_dumpsize(void)
1092{ 1060{
1093 int size; 1061 int size;
1094 1062
1095 size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)) + 1063 size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)) +
1096 ALIGN(mem_cluster_cnt * sizeof(phys_ram_seg_t)); 1064 ALIGN(mem_cluster_cnt * sizeof(phys_ram_seg_t));
1097 if (roundup(size, dbtob(1)) != dbtob(1)) 1065 if (roundup(size, dbtob(1)) != dbtob(1))
1098 return -1; 1066 return -1;
1099 1067
1100 return (1); 1068 return (1);
1101} 1069}
1102 1070
1103/* 1071/*
1104 * cpu_dump_mempagecnt: calculate size of RAM (in pages) to be dumped. 1072 * cpu_dump_mempagecnt: calculate size of RAM (in pages) to be dumped.
1105 */ 1073 */
1106u_long 1074u_long
1107cpu_dump_mempagecnt(void) 1075cpu_dump_mempagecnt(void)
1108{ 1076{
1109 u_long i, n; 1077 u_long i, n;
1110 1078
1111 n = 0; 1079 n = 0;
1112 for (i = 0; i < mem_cluster_cnt; i++) 1080 for (i = 0; i < mem_cluster_cnt; i++)
1113 n += atop(mem_clusters[i].size); 1081 n += atop(mem_clusters[i].size);
1114 return (n); 1082 return (n);
1115} 1083}
1116 1084
1117/* 1085/*
1118 * cpu_dump: dump machine-dependent kernel core dump headers. 1086 * cpu_dump: dump machine-dependent kernel core dump headers.
1119 */ 1087 */
1120int 1088int
1121cpu_dump(void) 1089cpu_dump(void)
1122{ 1090{
1123 int (*dump)(dev_t, daddr_t, void *, size_t); 1091 int (*dump)(dev_t, daddr_t, void *, size_t);
1124 char buf[dbtob(1)]; 1092 char buf[dbtob(1)];
1125 kcore_seg_t *segp; 1093 kcore_seg_t *segp;
1126 cpu_kcore_hdr_t *cpuhdrp; 1094 cpu_kcore_hdr_t *cpuhdrp;
1127 phys_ram_seg_t *memsegp; 1095 phys_ram_seg_t *memsegp;
1128 const struct bdevsw *bdev; 1096 const struct bdevsw *bdev;
1129 int i; 1097 int i;
1130 1098
1131 bdev = bdevsw_lookup(dumpdev); 1099 bdev = bdevsw_lookup(dumpdev);
1132 if (bdev == NULL) 1100 if (bdev == NULL)
1133 return (ENXIO); 1101 return (ENXIO);
1134 dump = bdev->d_dump; 1102 dump = bdev->d_dump;
1135 1103
1136 memset(buf, 0, sizeof buf); 1104 memset(buf, 0, sizeof buf);
1137 segp = (kcore_seg_t *)buf; 1105 segp = (kcore_seg_t *)buf;
1138 cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp))]; 1106 cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp))];
1139 memsegp = (phys_ram_seg_t *)&buf[ ALIGN(sizeof(*segp)) + 1107 memsegp = (phys_ram_seg_t *)&buf[ ALIGN(sizeof(*segp)) +
1140 ALIGN(sizeof(*cpuhdrp))]; 1108 ALIGN(sizeof(*cpuhdrp))];
1141 1109
1142 /* 1110 /*
1143 * Generate a segment header. 1111 * Generate a segment header.
1144 */ 1112 */
1145 CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU); 1113 CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
1146 segp->c_size = dbtob(1) - ALIGN(sizeof(*segp)); 1114 segp->c_size = dbtob(1) - ALIGN(sizeof(*segp));
1147 1115
1148 /* 1116 /*
1149 * Add the machine-dependent header info. 1117 * Add the machine-dependent header info.
1150 */ 1118 */
1151 cpuhdrp->lev1map_pa = ALPHA_K0SEG_TO_PHYS((vaddr_t)kernel_lev1map); 1119 cpuhdrp->lev1map_pa = ALPHA_K0SEG_TO_PHYS((vaddr_t)kernel_lev1map);
1152 cpuhdrp->page_size = PAGE_SIZE; 1120 cpuhdrp->page_size = PAGE_SIZE;
1153 cpuhdrp->nmemsegs = mem_cluster_cnt; 1121 cpuhdrp->nmemsegs = mem_cluster_cnt;
1154 1122
1155 /* 1123 /*
1156 * Fill in the memory segment descriptors. 1124 * Fill in the memory segment descriptors.
1157 */ 1125 */
1158 for (i = 0; i < mem_cluster_cnt; i++) { 1126 for (i = 0; i < mem_cluster_cnt; i++) {
1159 memsegp[i].start = mem_clusters[i].start; 1127 memsegp[i].start = mem_clusters[i].start;
1160 memsegp[i].size = mem_clusters[i].size & ~PAGE_MASK; 1128 memsegp[i].size = mem_clusters[i].size & ~PAGE_MASK;
1161 } 1129 }
1162 1130
1163 return (dump(dumpdev, dumplo, (void *)buf, dbtob(1))); 1131 return (dump(dumpdev, dumplo, (void *)buf, dbtob(1)));
1164} 1132}
1165 1133
1166/* 1134/*
1167 * This is called by main to set dumplo and dumpsize. 1135 * This is called by main to set dumplo and dumpsize.
1168 * Dumps always skip the first PAGE_SIZE of disk space 1136 * Dumps always skip the first PAGE_SIZE of disk space
1169 * in case there might be a disk label stored there. 1137 * in case there might be a disk label stored there.
1170 * If there is extra space, put dump at the end to 1138 * If there is extra space, put dump at the end to
1171 * reduce the chance that swapping trashes it. 1139 * reduce the chance that swapping trashes it.
1172 */ 1140 */
1173void 1141void
1174cpu_dumpconf(void) 1142cpu_dumpconf(void)
1175{ 1143{
1176 int nblks, dumpblks; /* size of dump area */ 1144 int nblks, dumpblks; /* size of dump area */
1177 1145
1178 if (dumpdev == NODEV) 1146 if (dumpdev == NODEV)
1179 goto bad; 1147 goto bad;
1180 nblks = bdev_size(dumpdev); 1148 nblks = bdev_size(dumpdev);
1181 if (nblks <= ctod(1)) 1149 if (nblks <= ctod(1))
1182 goto bad; 1150 goto bad;
1183 1151
1184 dumpblks = cpu_dumpsize(); 1152 dumpblks = cpu_dumpsize();
1185 if (dumpblks < 0) 1153 if (dumpblks < 0)
1186 goto bad; 1154 goto bad;
1187 dumpblks += ctod(cpu_dump_mempagecnt()); 1155 dumpblks += ctod(cpu_dump_mempagecnt());
1188 1156
1189 /* If dump won't fit (incl. room for possible label), punt. */ 1157 /* If dump won't fit (incl. room for possible label), punt. */
1190 if (dumpblks > (nblks - ctod(1))) 1158 if (dumpblks > (nblks - ctod(1)))
1191 goto bad; 1159 goto bad;
1192 1160
1193 /* Put dump at end of partition */ 1161 /* Put dump at end of partition */
1194 dumplo = nblks - dumpblks; 1162 dumplo = nblks - dumpblks;
1195 1163
1196 /* dumpsize is in page units, and doesn't include headers. */ 1164 /* dumpsize is in page units, and doesn't include headers. */
1197 dumpsize = cpu_dump_mempagecnt(); 1165 dumpsize = cpu_dump_mempagecnt();
1198 return; 1166 return;
1199 1167
1200bad: 1168bad:
1201 dumpsize = 0; 1169 dumpsize = 0;
1202 return; 1170 return;
1203} 1171}
1204 1172
1205/* 1173/*
1206 * Dump the kernel's image to the swap partition. 1174 * Dump the kernel's image to the swap partition.
1207 */ 1175 */
1208#define BYTES_PER_DUMP PAGE_SIZE 1176#define BYTES_PER_DUMP PAGE_SIZE
1209 1177
1210void 1178void
1211dumpsys(void) 1179dumpsys(void)
1212{ 1180{
1213 const struct bdevsw *bdev; 1181 const struct bdevsw *bdev;
1214 u_long totalbytesleft, bytes, i, n, memcl; 1182 u_long totalbytesleft, bytes, i, n, memcl;
1215 u_long maddr; 1183 u_long maddr;
1216 int psize; 1184 int psize;
1217 daddr_t blkno; 1185 daddr_t blkno;
1218 int (*dump)(dev_t, daddr_t, void *, size_t); 1186 int (*dump)(dev_t, daddr_t, void *, size_t);
1219 int error; 1187 int error;
1220 1188
1221 /* Save registers. */ 1189 /* Save registers. */
1222 savectx(&dumppcb); 1190 savectx(&dumppcb);
1223 1191
1224 if (dumpdev == NODEV) 1192 if (dumpdev == NODEV)
1225 return; 1193 return;
1226 bdev = bdevsw_lookup(dumpdev); 1194 bdev = bdevsw_lookup(dumpdev);
1227 if (bdev == NULL || bdev->d_psize == NULL) 1195 if (bdev == NULL || bdev->d_psize == NULL)
1228 return; 1196 return;
1229 1197
1230 /* 1198 /*
1231 * For dumps during autoconfiguration, 1199 * For dumps during autoconfiguration,
1232 * if dump device has already configured... 1200 * if dump device has already configured...
1233 */ 1201 */
1234 if (dumpsize == 0) 1202 if (dumpsize == 0)
1235 cpu_dumpconf(); 1203 cpu_dumpconf();
1236 if (dumplo <= 0) { 1204 if (dumplo <= 0) {
1237 printf("\ndump to dev %u,%u not possible\n", 1205 printf("\ndump to dev %u,%u not possible\n",
1238 major(dumpdev), minor(dumpdev)); 1206 major(dumpdev), minor(dumpdev));
1239 return; 1207 return;
1240 } 1208 }
1241 printf("\ndumping to dev %u,%u offset %ld\n", 1209 printf("\ndumping to dev %u,%u offset %ld\n",
1242 major(dumpdev), minor(dumpdev), dumplo); 1210 major(dumpdev), minor(dumpdev), dumplo);
1243 1211
1244 psize = bdev_size(dumpdev); 1212 psize = bdev_size(dumpdev);
1245 printf("dump "); 1213 printf("dump ");
1246 if (psize == -1) { 1214 if (psize == -1) {
1247 printf("area unavailable\n"); 1215 printf("area unavailable\n");
1248 return; 1216 return;
1249 } 1217 }
1250 1218
1251 /* XXX should purge all outstanding keystrokes. */ 1219 /* XXX should purge all outstanding keystrokes. */
1252 1220
1253 if ((error = cpu_dump()) != 0) 1221 if ((error = cpu_dump()) != 0)
1254 goto err; 1222 goto err;
1255 1223
1256 totalbytesleft = ptoa(cpu_dump_mempagecnt()); 1224 totalbytesleft = ptoa(cpu_dump_mempagecnt());
1257 blkno = dumplo + cpu_dumpsize(); 1225 blkno = dumplo + cpu_dumpsize();
1258 dump = bdev->d_dump; 1226 dump = bdev->d_dump;
1259 error = 0; 1227 error = 0;
1260 1228
1261 for (memcl = 0; memcl < mem_cluster_cnt; memcl++) { 1229 for (memcl = 0; memcl < mem_cluster_cnt; memcl++) {
1262 maddr = mem_clusters[memcl].start; 1230 maddr = mem_clusters[memcl].start;
1263 bytes = mem_clusters[memcl].size & ~PAGE_MASK; 1231 bytes = mem_clusters[memcl].size & ~PAGE_MASK;
1264 1232
1265 for (i = 0; i < bytes; i += n, totalbytesleft -= n) { 1233 for (i = 0; i < bytes; i += n, totalbytesleft -= n) {
1266 1234
1267 /* Print out how many MBs we to go. */ 1235 /* Print out how many MBs we to go. */
1268 if ((totalbytesleft % (1024*1024)) == 0) 1236 if ((totalbytesleft % (1024*1024)) == 0)
1269 printf_nolog("%ld ", 1237 printf_nolog("%ld ",
1270 totalbytesleft / (1024 * 1024)); 1238 totalbytesleft / (1024 * 1024));
1271 1239
1272 /* Limit size for next transfer. */ 1240 /* Limit size for next transfer. */
1273 n = bytes - i; 1241 n = bytes - i;
1274 if (n > BYTES_PER_DUMP) 1242 if (n > BYTES_PER_DUMP)
1275 n = BYTES_PER_DUMP; 1243 n = BYTES_PER_DUMP;
1276 1244
1277 error = (*dump)(dumpdev, blkno, 1245 error = (*dump)(dumpdev, blkno,
1278 (void *)ALPHA_PHYS_TO_K0SEG(maddr), n); 1246 (void *)ALPHA_PHYS_TO_K0SEG(maddr), n);
1279 if (error) 1247 if (error)
1280 goto err; 1248 goto err;
1281 maddr += n; 1249 maddr += n;
1282 blkno += btodb(n); /* XXX? */ 1250 blkno += btodb(n); /* XXX? */
1283 1251
1284 /* XXX should look for keystrokes, to cancel. */ 1252 /* XXX should look for keystrokes, to cancel. */
1285 } 1253 }
1286 } 1254 }
1287 1255
1288err: 1256err:
1289 switch (error) { 1257 switch (error) {
1290 1258
1291 case ENXIO: 1259 case ENXIO:
1292 printf("device bad\n"); 1260 printf("device bad\n");
1293 break; 1261 break;
1294 1262
1295 case EFAULT: 1263 case EFAULT:
1296 printf("device not ready\n"); 1264 printf("device not ready\n");
1297 break; 1265 break;
1298 1266
1299 case EINVAL: 1267 case EINVAL:
1300 printf("area improper\n"); 1268 printf("area improper\n");
1301 break; 1269 break;
1302 1270
1303 case EIO: 1271 case EIO:
1304 printf("i/o error\n"); 1272 printf("i/o error\n");
1305 break; 1273 break;
1306 1274
1307 case EINTR: 1275 case EINTR:
1308 printf("aborted from console\n"); 1276 printf("aborted from console\n");
1309 break; 1277 break;
1310 1278
1311 case 0: 1279 case 0:
1312 printf("succeeded\n"); 1280 printf("succeeded\n");
1313 break; 1281 break;
1314 1282
1315 default: 1283 default:
1316 printf("error %d\n", error); 1284 printf("error %d\n", error);
1317 break; 1285 break;
1318 } 1286 }
1319 printf("\n\n"); 1287 printf("\n\n");
1320 delay(1000); 1288 delay(1000);
1321} 1289}
1322 1290
1323void 1291void
1324frametoreg(const struct trapframe *framep, struct reg *regp) 1292frametoreg(const struct trapframe *framep, struct reg *regp)
1325{ 1293{
1326 1294
1327 regp->r_regs[R_V0] = framep->tf_regs[FRAME_V0]; 1295 regp->r_regs[R_V0] = framep->tf_regs[FRAME_V0];
1328 regp->r_regs[R_T0] = framep->tf_regs[FRAME_T0]; 1296 regp->r_regs[R_T0] = framep->tf_regs[FRAME_T0];
1329 regp->r_regs[R_T1] = framep->tf_regs[FRAME_T1]; 1297 regp->r_regs[R_T1] = framep->tf_regs[FRAME_T1];
1330 regp->r_regs[R_T2] = framep->tf_regs[FRAME_T2]; 1298 regp->r_regs[R_T2] = framep->tf_regs[FRAME_T2];
1331 regp->r_regs[R_T3] = framep->tf_regs[FRAME_T3]; 1299 regp->r_regs[R_T3] = framep->tf_regs[FRAME_T3];
1332 regp->r_regs[R_T4] = framep->tf_regs[FRAME_T4]; 1300 regp->r_regs[R_T4] = framep->tf_regs[FRAME_T4];
1333 regp->r_regs[R_T5] = framep->tf_regs[FRAME_T5]; 1301 regp->r_regs[R_T5] = framep->tf_regs[FRAME_T5];
1334 regp->r_regs[R_T6] = framep->tf_regs[FRAME_T6]; 1302 regp->r_regs[R_T6] = framep->tf_regs[FRAME_T6];
1335 regp->r_regs[R_T7] = framep->tf_regs[FRAME_T7]; 1303 regp->r_regs[R_T7] = framep->tf_regs[FRAME_T7];
1336 regp->r_regs[R_S0] = framep->tf_regs[FRAME_S0]; 1304 regp->r_regs[R_S0] = framep->tf_regs[FRAME_S0];
1337 regp->r_regs[R_S1] = framep->tf_regs[FRAME_S1]; 1305 regp->r_regs[R_S1] = framep->tf_regs[FRAME_S1];
1338 regp->r_regs[R_S2] = framep->tf_regs[FRAME_S2]; 1306 regp->r_regs[R_S2] = framep->tf_regs[FRAME_S2];
1339 regp->r_regs[R_S3] = framep->tf_regs[FRAME_S3]; 1307 regp->r_regs[R_S3] = framep->tf_regs[FRAME_S3];
1340 regp->r_regs[R_S4] = framep->tf_regs[FRAME_S4]; 1308 regp->r_regs[R_S4] = framep->tf_regs[FRAME_S4];
1341 regp->r_regs[R_S5] = framep->tf_regs[FRAME_S5]; 1309 regp->r_regs[R_S5] = framep->tf_regs[FRAME_S5];
1342 regp->r_regs[R_S6] = framep->tf_regs[FRAME_S6]; 1310 regp->r_regs[R_S6] = framep->tf_regs[FRAME_S6];
1343 regp->r_regs[R_A0] = framep->tf_regs[FRAME_A0]; 1311 regp->r_regs[R_A0] = framep->tf_regs[FRAME_A0];
1344 regp->r_regs[R_A1] = framep->tf_regs[FRAME_A1]; 1312 regp->r_regs[R_A1] = framep->tf_regs[FRAME_A1];
1345 regp->r_regs[R_A2] = framep->tf_regs[FRAME_A2]; 1313 regp->r_regs[R_A2] = framep->tf_regs[FRAME_A2];
1346 regp->r_regs[R_A3] = framep->tf_regs[FRAME_A3]; 1314 regp->r_regs[R_A3] = framep->tf_regs[FRAME_A3];
1347 regp->r_regs[R_A4] = framep->tf_regs[FRAME_A4]; 1315 regp->r_regs[R_A4] = framep->tf_regs[FRAME_A4];
1348 regp->r_regs[R_A5] = framep->tf_regs[FRAME_A5]; 1316 regp->r_regs[R_A5] = framep->tf_regs[FRAME_A5];
1349 regp->r_regs[R_T8] = framep->tf_regs[FRAME_T8]; 1317 regp->r_regs[R_T8] = framep->tf_regs[FRAME_T8];
1350 regp->r_regs[R_T9] = framep->tf_regs[FRAME_T9]; 1318 regp->r_regs[R_T9] = framep->tf_regs[FRAME_T9];
1351 regp->r_regs[R_T10] = framep->tf_regs[FRAME_T10]; 1319 regp->r_regs[R_T10] = framep->tf_regs[FRAME_T10];
1352 regp->r_regs[R_T11] = framep->tf_regs[FRAME_T11]; 1320 regp->r_regs[R_T11] = framep->tf_regs[FRAME_T11];
1353 regp->r_regs[R_RA] = framep->tf_regs[FRAME_RA]; 1321 regp->r_regs[R_RA] = framep->tf_regs[FRAME_RA];
1354 regp->r_regs[R_T12] = framep->tf_regs[FRAME_T12]; 1322 regp->r_regs[R_T12] = framep->tf_regs[FRAME_T12];
1355 regp->r_regs[R_AT] = framep->tf_regs[FRAME_AT]; 1323 regp->r_regs[R_AT] = framep->tf_regs[FRAME_AT];
1356 regp->r_regs[R_GP] = framep->tf_regs[FRAME_GP]; 1324 regp->r_regs[R_GP] = framep->tf_regs[FRAME_GP];
1357 /* regp->r_regs[R_SP] = framep->tf_regs[FRAME_SP]; XXX */ 1325 /* regp->r_regs[R_SP] = framep->tf_regs[FRAME_SP]; XXX */
1358 regp->r_regs[R_ZERO] = 0; 1326 regp->r_regs[R_ZERO] = 0;
1359} 1327}
1360 1328
1361void 1329void
1362regtoframe(const struct reg *regp, struct trapframe *framep) 1330regtoframe(const struct reg *regp, struct trapframe *framep)
1363{ 1331{
1364 1332
1365 framep->tf_regs[FRAME_V0] = regp->r_regs[R_V0]; 1333 framep->tf_regs[FRAME_V0] = regp->r_regs[R_V0];
1366 framep->tf_regs[FRAME_T0] = regp->r_regs[R_T0]; 1334 framep->tf_regs[FRAME_T0] = regp->r_regs[R_T0];
1367 framep->tf_regs[FRAME_T1] = regp->r_regs[R_T1]; 1335 framep->tf_regs[FRAME_T1] = regp->r_regs[R_T1];
1368 framep->tf_regs[FRAME_T2] = regp->r_regs[R_T2]; 1336 framep->tf_regs[FRAME_T2] = regp->r_regs[R_T2];
1369 framep->tf_regs[FRAME_T3] = regp->r_regs[R_T3]; 1337 framep->tf_regs[FRAME_T3] = regp->r_regs[R_T3];
1370 framep->tf_regs[FRAME_T4] = regp->r_regs[R_T4]; 1338 framep->tf_regs[FRAME_T4] = regp->r_regs[R_T4];
1371 framep->tf_regs[FRAME_T5] = regp->r_regs[R_T5]; 1339 framep->tf_regs[FRAME_T5] = regp->r_regs[R_T5];
1372 framep->tf_regs[FRAME_T6] = regp->r_regs[R_T6]; 1340 framep->tf_regs[FRAME_T6] = regp->r_regs[R_T6];
1373 framep->tf_regs[FRAME_T7] = regp->r_regs[R_T7]; 1341 framep->tf_regs[FRAME_T7] = regp->r_regs[R_T7];
1374 framep->tf_regs[FRAME_S0] = regp->r_regs[R_S0]; 1342 framep->tf_regs[FRAME_S0] = regp->r_regs[R_S0];
1375 framep->tf_regs[FRAME_S1] = regp->r_regs[R_S1]; 1343 framep->tf_regs[FRAME_S1] = regp->r_regs[R_S1];
1376 framep->tf_regs[FRAME_S2] = regp->r_regs[R_S2]; 1344 framep->tf_regs[FRAME_S2] = regp->r_regs[R_S2];
1377 framep->tf_regs[FRAME_S3] = regp->r_regs[R_S3]; 1345 framep->tf_regs[FRAME_S3] = regp->r_regs[R_S3];
1378 framep->tf_regs[FRAME_S4] = regp->r_regs[R_S4]; 1346 framep->tf_regs[FRAME_S4] = regp->r_regs[R_S4];
1379 framep->tf_regs[FRAME_S5] = regp->r_regs[R_S5]; 1347 framep->tf_regs[FRAME_S5] = regp->r_regs[R_S5];
1380 framep->tf_regs[FRAME_S6] = regp->r_regs[R_S6]; 1348 framep->tf_regs[FRAME_S6] = regp->r_regs[R_S6];
1381 framep->tf_regs[FRAME_A0] = regp->r_regs[R_A0]; 1349 framep->tf_regs[FRAME_A0] = regp->r_regs[R_A0];
1382 framep->tf_regs[FRAME_A1] = regp->r_regs[R_A1]; 1350 framep->tf_regs[FRAME_A1] = regp->r_regs[R_A1];
1383 framep->tf_regs[FRAME_A2] = regp->r_regs[R_A2]; 1351 framep->tf_regs[FRAME_A2] = regp->r_regs[R_A2];
1384 framep->tf_regs[FRAME_A3] = regp->r_regs[R_A3]; 1352 framep->tf_regs[FRAME_A3] = regp->r_regs[R_A3];
1385 framep->tf_regs[FRAME_A4] = regp->r_regs[R_A4]; 1353 framep->tf_regs[FRAME_A4] = regp->r_regs[R_A4];
1386 framep->tf_regs[FRAME_A5] = regp->r_regs[R_A5]; 1354 framep->tf_regs[FRAME_A5] = regp->r_regs[R_A5];
1387 framep->tf_regs[FRAME_T8] = regp->r_regs[R_T8]; 1355 framep->tf_regs[FRAME_T8] = regp->r_regs[R_T8];
1388 framep->tf_regs[FRAME_T9] = regp->r_regs[R_T9]; 1356 framep->tf_regs[FRAME_T9] = regp->r_regs[R_T9];
1389 framep->tf_regs[FRAME_T10] = regp->r_regs[R_T10]; 1357 framep->tf_regs[FRAME_T10] = regp->r_regs[R_T10];
1390 framep->tf_regs[FRAME_T11] = regp->r_regs[R_T11]; 1358 framep->tf_regs[FRAME_T11] = regp->r_regs[R_T11];
1391 framep->tf_regs[FRAME_RA] = regp->r_regs[R_RA]; 1359 framep->tf_regs[FRAME_RA] = regp->r_regs[R_RA];
1392 framep->tf_regs[FRAME_T12] = regp->r_regs[R_T12]; 1360 framep->tf_regs[FRAME_T12] = regp->r_regs[R_T12];
1393 framep->tf_regs[FRAME_AT] = regp->r_regs[R_AT]; 1361 framep->tf_regs[FRAME_AT] = regp->r_regs[R_AT];
1394 framep->tf_regs[FRAME_GP] = regp->r_regs[R_GP]; 1362 framep->tf_regs[FRAME_GP] = regp->r_regs[R_GP];
1395 /* framep->tf_regs[FRAME_SP] = regp->r_regs[R_SP]; XXX */ 1363 /* framep->tf_regs[FRAME_SP] = regp->r_regs[R_SP]; XXX */
1396 /* ??? = regp->r_regs[R_ZERO]; */ 1364 /* ??? = regp->r_regs[R_ZERO]; */
1397} 1365}
1398 1366
1399void 1367void
1400printregs(struct reg *regp) 1368printregs(struct reg *regp)
1401{ 1369{
1402 int i; 1370 int i;
1403 1371
1404 for (i = 0; i < 32; i++) 1372 for (i = 0; i < 32; i++)
1405 printf("R%d:\t0x%016lx%s", i, regp->r_regs[i], 1373 printf("R%d:\t0x%016lx%s", i, regp->r_regs[i],
1406 i & 1 ? "\n" : "\t"); 1374 i & 1 ? "\n" : "\t");
1407} 1375}
1408 1376
1409void 1377void
1410regdump(struct trapframe *framep) 1378regdump(struct trapframe *framep)
1411{ 1379{
1412 struct reg reg; 1380 struct reg reg;
1413 1381
1414 frametoreg(framep, &reg); 1382 frametoreg(framep, &reg);
1415 reg.r_regs[R_SP] = alpha_pal_rdusp(); 1383 reg.r_regs[R_SP] = alpha_pal_rdusp();
1416 1384
1417 printf("REGISTERS:\n"); 1385 printf("REGISTERS:\n");
1418 printregs(&reg); 1386 printregs(&reg);
1419} 1387}
1420 1388
1421 1389
1422 1390
1423void * 1391void *
1424getframe(const struct lwp *l, int sig, int *onstack) 1392getframe(const struct lwp *l, int sig, int *onstack)
1425{ 1393{
1426 void *frame; 1394 void *frame;
1427 1395
1428 /* Do we need to jump onto the signal stack? */ 1396 /* Do we need to jump onto the signal stack? */
1429 *onstack = 1397 *onstack =
1430 (l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 && 1398 (l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 &&
1431 (SIGACTION(l->l_proc, sig).sa_flags & SA_ONSTACK) != 0; 1399 (SIGACTION(l->l_proc, sig).sa_flags & SA_ONSTACK) != 0;
1432 1400
1433 if (*onstack) 1401 if (*onstack)
1434 frame = (void *)((char *)l->l_sigstk.ss_sp + 1402 frame = (void *)((char *)l->l_sigstk.ss_sp +
1435 l->l_sigstk.ss_size); 1403 l->l_sigstk.ss_size);
1436 else 1404 else
1437 frame = (void *)(alpha_pal_rdusp()); 1405 frame = (void *)(alpha_pal_rdusp());
1438 return (frame); 1406 return (frame);
1439} 1407}
1440 1408
1441void 1409void
1442buildcontext(struct lwp *l, const void *catcher, const void *tramp, const void *fp) 1410buildcontext(struct lwp *l, const void *catcher, const void *tramp, const void *fp)
1443{ 1411{
1444 struct trapframe *tf = l->l_md.md_tf; 1412 struct trapframe *tf = l->l_md.md_tf;
1445 1413
1446 tf->tf_regs[FRAME_RA] = (uint64_t)tramp; 1414 tf->tf_regs[FRAME_RA] = (uint64_t)tramp;
1447 tf->tf_regs[FRAME_PC] = (uint64_t)catcher; 1415 tf->tf_regs[FRAME_PC] = (uint64_t)catcher;
1448 tf->tf_regs[FRAME_T12] = (uint64_t)catcher; 1416 tf->tf_regs[FRAME_T12] = (uint64_t)catcher;
1449 alpha_pal_wrusp((unsigned long)fp); 1417 alpha_pal_wrusp((unsigned long)fp);
1450} 1418}
1451 1419
1452 1420
1453/* 1421/*
1454 * Send an interrupt to process, new style 1422 * Send an interrupt to process, new style
1455 */ 1423 */
1456void 1424void
1457sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask) 1425sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask)
1458{ 1426{
1459 struct lwp *l = curlwp; 1427 struct lwp *l = curlwp;
1460 struct proc *p = l->l_proc; 1428 struct proc *p = l->l_proc;
1461 struct sigacts *ps = p->p_sigacts; 1429 struct sigacts *ps = p->p_sigacts;
1462 int onstack, sig = ksi->ksi_signo, error; 1430 int onstack, sig = ksi->ksi_signo, error;
1463 struct sigframe_siginfo *fp, frame; 1431 struct sigframe_siginfo *fp, frame;
1464 struct trapframe *tf; 1432 struct trapframe *tf;
1465 sig_t catcher = SIGACTION(p, ksi->ksi_signo).sa_handler; 1433 sig_t catcher = SIGACTION(p, ksi->ksi_signo).sa_handler;
1466 1434
1467 fp = (struct sigframe_siginfo *)getframe(l,ksi->ksi_signo,&onstack); 1435 fp = (struct sigframe_siginfo *)getframe(l,ksi->ksi_signo,&onstack);
1468 tf = l->l_md.md_tf; 1436 tf = l->l_md.md_tf;
1469 1437
1470 /* Allocate space for the signal handler context. */ 1438 /* Allocate space for the signal handler context. */
1471 fp--; 1439 fp--;
1472 1440
1473#ifdef DEBUG 1441#ifdef DEBUG
1474 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) 1442 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
1475 printf("sendsig_siginfo(%d): sig %d ssp %p usp %p\n", p->p_pid, 1443 printf("sendsig_siginfo(%d): sig %d ssp %p usp %p\n", p->p_pid,
1476 sig, &onstack, fp); 1444 sig, &onstack, fp);
1477#endif 1445#endif
1478 1446
1479 /* Build stack frame for signal trampoline. */ 1447 /* Build stack frame for signal trampoline. */
1480 memset(&frame, 0, sizeof(frame)); 1448 memset(&frame, 0, sizeof(frame));
1481 frame.sf_si._info = ksi->ksi_info; 1449 frame.sf_si._info = ksi->ksi_info;
1482 frame.sf_uc.uc_flags = _UC_SIGMASK; 1450 frame.sf_uc.uc_flags = _UC_SIGMASK;
1483 frame.sf_uc.uc_sigmask = *mask; 1451 frame.sf_uc.uc_sigmask = *mask;
1484 frame.sf_uc.uc_link = l->l_ctxlink; 1452 frame.sf_uc.uc_link = l->l_ctxlink;
1485 sendsig_reset(l, sig); 1453 sendsig_reset(l, sig);
1486 mutex_exit(p->p_lock); 1454 mutex_exit(p->p_lock);
1487 cpu_getmcontext(l, &frame.sf_uc.uc_mcontext, &frame.sf_uc.uc_flags); 1455 cpu_getmcontext(l, &frame.sf_uc.uc_mcontext, &frame.sf_uc.uc_flags);
1488 error = copyout(&frame, fp, sizeof(frame)); 1456 error = copyout(&frame, fp, sizeof(frame));
1489 mutex_enter(p->p_lock); 1457 mutex_enter(p->p_lock);
1490 1458
1491 if (error != 0) { 1459 if (error != 0) {
1492 /* 1460 /*
1493 * Process has trashed its stack; give it an illegal 1461 * Process has trashed its stack; give it an illegal
1494 * instruction to halt it in its tracks. 1462 * instruction to halt it in its tracks.
1495 */ 1463 */
1496#ifdef DEBUG 1464#ifdef DEBUG
1497 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) 1465 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
1498 printf("sendsig_siginfo(%d): copyout failed on sig %d\n", 1466 printf("sendsig_siginfo(%d): copyout failed on sig %d\n",
1499 p->p_pid, sig); 1467 p->p_pid, sig);
1500#endif 1468#endif
1501 sigexit(l, SIGILL); 1469 sigexit(l, SIGILL);
1502 /* NOTREACHED */ 1470 /* NOTREACHED */
1503 } 1471 }
1504 1472
1505#ifdef DEBUG 1473#ifdef DEBUG
1506 if (sigdebug & SDB_FOLLOW) 1474 if (sigdebug & SDB_FOLLOW)
1507 printf("sendsig_siginfo(%d): sig %d usp %p code %x\n", 1475 printf("sendsig_siginfo(%d): sig %d usp %p code %x\n",
1508 p->p_pid, sig, fp, ksi->ksi_code); 1476 p->p_pid, sig, fp, ksi->ksi_code);
1509#endif 1477#endif
1510 1478
1511 /* 1479 /*
1512 * Set up the registers to directly invoke the signal handler. The 1480 * Set up the registers to directly invoke the signal handler. The
1513 * signal trampoline is then used to return from the signal. Note 1481 * signal trampoline is then used to return from the signal. Note
1514 * the trampoline version numbers are coordinated with machine- 1482 * the trampoline version numbers are coordinated with machine-
1515 * dependent code in libc. 1483 * dependent code in libc.
1516 */ 1484 */
1517 1485
1518 tf->tf_regs[FRAME_A0] = sig; 1486 tf->tf_regs[FRAME_A0] = sig;
1519 tf->tf_regs[FRAME_A1] = (uint64_t)&fp->sf_si; 1487 tf->tf_regs[FRAME_A1] = (uint64_t)&fp->sf_si;
1520 tf->tf_regs[FRAME_A2] = (uint64_t)&fp->sf_uc; 1488 tf->tf_regs[FRAME_A2] = (uint64_t)&fp->sf_uc;
1521 1489
1522 buildcontext(l,catcher,ps->sa_sigdesc[sig].sd_tramp,fp); 1490 buildcontext(l,catcher,ps->sa_sigdesc[sig].sd_tramp,fp);
1523 1491
1524 /* Remember that we're now on the signal stack. */ 1492 /* Remember that we're now on the signal stack. */
1525 if (onstack) 1493 if (onstack)
1526 l->l_sigstk.ss_flags |= SS_ONSTACK; 1494 l->l_sigstk.ss_flags |= SS_ONSTACK;
1527 1495
1528#ifdef DEBUG 1496#ifdef DEBUG
1529 if (sigdebug & SDB_FOLLOW) 1497 if (sigdebug & SDB_FOLLOW)
1530 printf("sendsig_siginfo(%d): pc %lx, catcher %lx\n", p->p_pid, 1498 printf("sendsig_siginfo(%d): pc %lx, catcher %lx\n", p->p_pid,
1531 tf->tf_regs[FRAME_PC], tf->tf_regs[FRAME_A3]); 1499 tf->tf_regs[FRAME_PC], tf->tf_regs[FRAME_A3]);
1532 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) 1500 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
1533 printf("sendsig_siginfo(%d): sig %d returns\n", 1501 printf("sendsig_siginfo(%d): sig %d returns\n",
1534 p->p_pid, sig); 1502 p->p_pid, sig);
1535#endif 1503#endif
1536} 1504}
1537 1505
1538/* 1506/*
1539 * machine dependent system variables. 1507 * machine dependent system variables.
1540 */ 1508 */
1541SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup") 1509SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
1542{ 1510{
1543 1511
1544 sysctl_createv(clog, 0, NULL, NULL, 1512 sysctl_createv(clog, 0, NULL, NULL,
1545 CTLFLAG_PERMANENT, 1513 CTLFLAG_PERMANENT,
1546 CTLTYPE_NODE, "machdep", NULL, 1514 CTLTYPE_NODE, "machdep", NULL,
1547 NULL, 0, NULL, 0, 1515 NULL, 0, NULL, 0,
1548 CTL_MACHDEP, CTL_EOL); 1516 CTL_MACHDEP, CTL_EOL);
1549 1517
1550 sysctl_createv(clog, 0, NULL, NULL, 1518 sysctl_createv(clog, 0, NULL, NULL,
1551 CTLFLAG_PERMANENT, 1519 CTLFLAG_PERMANENT,
1552 CTLTYPE_STRUCT, "console_device", NULL, 1520 CTLTYPE_STRUCT, "console_device", NULL,
1553 sysctl_consdev, 0, NULL, sizeof(dev_t), 1521 sysctl_consdev, 0, NULL, sizeof(dev_t),
1554 CTL_MACHDEP, CPU_CONSDEV, CTL_EOL); 1522 CTL_MACHDEP, CPU_CONSDEV, CTL_EOL);
1555 sysctl_createv(clog, 0, NULL, NULL, 1523 sysctl_createv(clog, 0, NULL, NULL,
1556 CTLFLAG_PERMANENT, 1524 CTLFLAG_PERMANENT,
1557 CTLTYPE_STRING, "root_device", NULL, 1525 CTLTYPE_STRING, "root_device", NULL,
1558 sysctl_root_device, 0, NULL, 0, 1526 sysctl_root_device, 0, NULL, 0,
1559 CTL_MACHDEP, CPU_ROOT_DEVICE, CTL_EOL); 1527 CTL_MACHDEP, CPU_ROOT_DEVICE, CTL_EOL);
1560 sysctl_createv(clog, 0, NULL, NULL, 1528 sysctl_createv(clog, 0, NULL, NULL,
1561 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1529 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1562 CTLTYPE_INT, "unaligned_print", 1530 CTLTYPE_INT, "unaligned_print",
1563 SYSCTL_DESCR("Warn about unaligned accesses"), 1531 SYSCTL_DESCR("Warn about unaligned accesses"),
1564 NULL, 0, &alpha_unaligned_print, 0, 1532 NULL, 0, &alpha_unaligned_print, 0,
1565 CTL_MACHDEP, CPU_UNALIGNED_PRINT, CTL_EOL); 1533 CTL_MACHDEP, CPU_UNALIGNED_PRINT, CTL_EOL);
1566 sysctl_createv(clog, 0, NULL, NULL, 1534 sysctl_createv(clog, 0, NULL, NULL,
1567 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1535 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1568 CTLTYPE_INT, "unaligned_fix", 1536 CTLTYPE_INT, "unaligned_fix",
1569 SYSCTL_DESCR("Fix up unaligned accesses"), 1537 SYSCTL_DESCR("Fix up unaligned accesses"),
1570 NULL, 0, &alpha_unaligned_fix, 0, 1538 NULL, 0, &alpha_unaligned_fix, 0,
1571 CTL_MACHDEP, CPU_UNALIGNED_FIX, CTL_EOL); 1539 CTL_MACHDEP, CPU_UNALIGNED_FIX, CTL_EOL);
1572 sysctl_createv(clog, 0, NULL, NULL, 1540 sysctl_createv(clog, 0, NULL, NULL,
1573 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1541 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1574 CTLTYPE_INT, "unaligned_sigbus", 1542 CTLTYPE_INT, "unaligned_sigbus",
1575 SYSCTL_DESCR("Do SIGBUS for fixed unaligned accesses"), 1543 SYSCTL_DESCR("Do SIGBUS for fixed unaligned accesses"),
1576 NULL, 0, &alpha_unaligned_sigbus, 0, 1544 NULL, 0, &alpha_unaligned_sigbus, 0,
1577 CTL_MACHDEP, CPU_UNALIGNED_SIGBUS, CTL_EOL); 1545 CTL_MACHDEP, CPU_UNALIGNED_SIGBUS, CTL_EOL);
1578 sysctl_createv(clog, 0, NULL, NULL, 1546 sysctl_createv(clog, 0, NULL, NULL,
1579 CTLFLAG_PERMANENT, 1547 CTLFLAG_PERMANENT,
1580 CTLTYPE_STRING, "booted_kernel", NULL, 1548 CTLTYPE_STRING, "booted_kernel", NULL,
1581 NULL, 0, bootinfo.booted_kernel, 0, 1549 NULL, 0, bootinfo.booted_kernel, 0,
1582 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL); 1550 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL);
1583 sysctl_createv(clog, 0, NULL, NULL, 1551 sysctl_createv(clog, 0, NULL, NULL,
1584 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1552 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1585 CTLTYPE_INT, "fp_sync_complete", NULL, 1553 CTLTYPE_INT, "fp_sync_complete", NULL,
1586 NULL, 0, &alpha_fp_sync_complete, 0, 1554 NULL, 0, &alpha_fp_sync_complete, 0,
1587 CTL_MACHDEP, CPU_FP_SYNC_COMPLETE, CTL_EOL); 1555 CTL_MACHDEP, CPU_FP_SYNC_COMPLETE, CTL_EOL);
1588} 1556}
1589 1557
1590/* 1558/*
1591 * Set registers on exec. 1559 * Set registers on exec.
1592 */ 1560 */
1593void 1561void
1594setregs(register struct lwp *l, struct exec_package *pack, vaddr_t stack) 1562setregs(register struct lwp *l, struct exec_package *pack, vaddr_t stack)
1595{ 1563{
1596 struct trapframe *tfp = l->l_md.md_tf; 1564 struct trapframe *tfp = l->l_md.md_tf;
1597 struct pcb *pcb; 1565 struct pcb *pcb;
1598#ifdef DEBUG 1566#ifdef DEBUG
1599 int i; 1567 int i;
1600#endif 1568#endif
1601 1569
1602#ifdef DEBUG 1570#ifdef DEBUG
1603 /* 1571 /*
1604 * Crash and dump, if the user requested it. 1572 * Crash and dump, if the user requested it.
1605 */ 1573 */
1606 if (boothowto & RB_DUMP) 1574 if (boothowto & RB_DUMP)
1607 panic("crash requested by boot flags"); 1575 panic("crash requested by boot flags");
1608#endif 1576#endif
1609 1577
1610#ifdef DEBUG 1578#ifdef DEBUG
1611 for (i = 0; i < FRAME_SIZE; i++) 1579 for (i = 0; i < FRAME_SIZE; i++)
1612 tfp->tf_regs[i] = 0xbabefacedeadbeef; 1580 tfp->tf_regs[i] = 0xbabefacedeadbeef;
1613#else 1581#else
1614 memset(tfp->tf_regs, 0, FRAME_SIZE * sizeof tfp->tf_regs[0]); 1582 memset(tfp->tf_regs, 0, FRAME_SIZE * sizeof tfp->tf_regs[0]);
1615#endif 1583#endif
1616 pcb = lwp_getpcb(l); 1584 pcb = lwp_getpcb(l);
1617 memset(&pcb->pcb_fp, 0, sizeof(pcb->pcb_fp)); 1585 memset(&pcb->pcb_fp, 0, sizeof(pcb->pcb_fp));
1618 alpha_pal_wrusp(stack); 1586 alpha_pal_wrusp(stack);
1619 tfp->tf_regs[FRAME_PS] = ALPHA_PSL_USERSET; 1587 tfp->tf_regs[FRAME_PS] = ALPHA_PSL_USERSET;
1620 tfp->tf_regs[FRAME_PC] = pack->ep_entry & ~3; 1588 tfp->tf_regs[FRAME_PC] = pack->ep_entry & ~3;
1621 1589
1622 tfp->tf_regs[FRAME_A0] = stack; /* a0 = sp */ 1590 tfp->tf_regs[FRAME_A0] = stack; /* a0 = sp */
1623 tfp->tf_regs[FRAME_A1] = 0; /* a1 = rtld cleanup */ 1591 tfp->tf_regs[FRAME_A1] = 0; /* a1 = rtld cleanup */
1624 tfp->tf_regs[FRAME_A2] = 0; /* a2 = rtld object */ 1592 tfp->tf_regs[FRAME_A2] = 0; /* a2 = rtld object */
1625 tfp->tf_regs[FRAME_A3] = l->l_proc->p_psstrp; /* a3 = ps_strings */ 1593 tfp->tf_regs[FRAME_A3] = l->l_proc->p_psstrp; /* a3 = ps_strings */
1626 tfp->tf_regs[FRAME_T12] = tfp->tf_regs[FRAME_PC]; /* a.k.a. PV */ 1594 tfp->tf_regs[FRAME_T12] = tfp->tf_regs[FRAME_PC]; /* a.k.a. PV */
1627 1595
1628 if (__predict_true((l->l_md.md_flags & IEEE_INHERIT) == 0)) { 1596 if (__predict_true((l->l_md.md_flags & IEEE_INHERIT) == 0)) {
1629 l->l_md.md_flags &= ~MDLWP_FP_C; 1597 l->l_md.md_flags &= ~MDLWP_FP_C;
1630 pcb->pcb_fp.fpr_cr = FPCR_DYN(FP_RN); 1598 pcb->pcb_fp.fpr_cr = FPCR_DYN(FP_RN);
1631 } 1599 }
1632} 1600}
1633 1601
1634/* 1602/*
1635 * Wait "n" microseconds. 1603 * Wait "n" microseconds.
1636 */ 1604 */
1637void 1605void
1638delay(unsigned long n) 1606delay(unsigned long n)
1639{ 1607{
1640 unsigned long pcc0, pcc1, curcycle, cycles, usec; 1608 unsigned long pcc0, pcc1, curcycle, cycles, usec;
1641 1609
1642 if (n == 0) 1610 if (n == 0)
1643 return; 1611 return;
1644 1612
1645 pcc0 = alpha_rpcc() & 0xffffffffUL; 1613 pcc0 = alpha_rpcc() & 0xffffffffUL;
1646 cycles = 0; 1614 cycles = 0;
1647 usec = 0; 1615 usec = 0;
1648 1616
1649 while (usec <= n) { 1617 while (usec <= n) {
1650 /* 1618 /*
1651 * Get the next CPU cycle count- assumes that we cannot 1619 * Get the next CPU cycle count- assumes that we cannot
1652 * have had more than one 32 bit overflow. 1620 * have had more than one 32 bit overflow.
1653 */ 1621 */
1654 pcc1 = alpha_rpcc() & 0xffffffffUL; 1622 pcc1 = alpha_rpcc() & 0xffffffffUL;
1655 if (pcc1 < pcc0) 1623 if (pcc1 < pcc0)
1656 curcycle = (pcc1 + 0x100000000UL) - pcc0; 1624 curcycle = (pcc1 + 0x100000000UL) - pcc0;
1657 else 1625 else
1658 curcycle = pcc1 - pcc0; 1626 curcycle = pcc1 - pcc0;
1659 1627
1660 /* 1628 /*
1661 * We now have the number of processor cycles since we 1629 * We now have the number of processor cycles since we
1662 * last checked. Add the current cycle count to the 1630 * last checked. Add the current cycle count to the
1663 * running total. If it's over cycles_per_usec, increment 1631 * running total. If it's over cycles_per_usec, increment
1664 * the usec counter. 1632 * the usec counter.
1665 */ 1633 */
1666 cycles += curcycle; 1634 cycles += curcycle;
1667 while (cycles > cycles_per_usec) { 1635 while (cycles > cycles_per_usec) {
1668 usec++; 1636 usec++;
1669 cycles -= cycles_per_usec; 1637 cycles -= cycles_per_usec;
1670 } 1638 }
1671 pcc0 = pcc1; 1639 pcc0 = pcc1;
1672 } 1640 }
1673} 1641}
1674 1642
1675#ifdef EXEC_ECOFF 1643#ifdef EXEC_ECOFF
1676void 1644void
1677cpu_exec_ecoff_setregs(struct lwp *l, struct exec_package *epp, vaddr_t stack) 1645cpu_exec_ecoff_setregs(struct lwp *l, struct exec_package *epp, vaddr_t stack)
1678{ 1646{
1679 struct ecoff_exechdr *execp = (struct ecoff_exechdr *)epp->ep_hdr; 1647 struct ecoff_exechdr *execp = (struct ecoff_exechdr *)epp->ep_hdr;
1680 1648
1681 l->l_md.md_tf->tf_regs[FRAME_GP] = execp->a.gp_value; 1649 l->l_md.md_tf->tf_regs[FRAME_GP] = execp->a.gp_value;
1682} 1650}
1683 1651
1684/* 1652/*
1685 * cpu_exec_ecoff_hook(): 1653 * cpu_exec_ecoff_hook():
1686 * cpu-dependent ECOFF format hook for execve(). 1654 * cpu-dependent ECOFF format hook for execve().
1687 * 1655 *
1688 * Do any machine-dependent diddling of the exec package when doing ECOFF. 1656 * Do any machine-dependent diddling of the exec package when doing ECOFF.
1689 * 1657 *
1690 */ 1658 */
1691int 1659int
1692cpu_exec_ecoff_probe(struct lwp *l, struct exec_package *epp) 1660cpu_exec_ecoff_probe(struct lwp *l, struct exec_package *epp)
1693{ 1661{
1694 struct ecoff_exechdr *execp = (struct ecoff_exechdr *)epp->ep_hdr; 1662 struct ecoff_exechdr *execp = (struct ecoff_exechdr *)epp->ep_hdr;
1695 int error; 1663 int error;
1696 1664
1697 if (execp->f.f_magic == ECOFF_MAGIC_NETBSD_ALPHA) 1665 if (execp->f.f_magic == ECOFF_MAGIC_NETBSD_ALPHA)
1698 error = 0; 1666 error = 0;
1699 else 1667 else
1700 error = ENOEXEC; 1668 error = ENOEXEC;
1701 1669
1702 return (error); 1670 return (error);
1703} 1671}
1704#endif /* EXEC_ECOFF */ 1672#endif /* EXEC_ECOFF */
1705 1673
1706int 1674int
1707mm_md_physacc(paddr_t pa, vm_prot_t prot) 1675mm_md_physacc(paddr_t pa, vm_prot_t prot)
1708{ 1676{
1709 u_quad_t size; 1677 u_quad_t size;
1710 int i; 1678 int i;
1711 1679
1712 for (i = 0; i < mem_cluster_cnt; i++) { 1680 for (i = 0; i < mem_cluster_cnt; i++) {
1713 if (pa < mem_clusters[i].start) 1681 if (pa < mem_clusters[i].start)
1714 continue; 1682 continue;
1715 size = mem_clusters[i].size & ~PAGE_MASK; 1683 size = mem_clusters[i].size & ~PAGE_MASK;
1716 if (pa >= (mem_clusters[i].start + size)) 1684 if (pa >= (mem_clusters[i].start + size))
1717 continue; 1685 continue;
1718 if ((prot & mem_clusters[i].size & PAGE_MASK) == prot) 1686 if ((prot & mem_clusters[i].size & PAGE_MASK) == prot)
1719 return 0; 1687 return 0;
1720 } 1688 }
1721 return EFAULT; 1689 return EFAULT;
1722} 1690}
1723 1691
1724bool 1692bool
1725mm_md_direct_mapped_io(void *addr, paddr_t *paddr) 1693mm_md_direct_mapped_io(void *addr, paddr_t *paddr)
1726{ 1694{
1727 vaddr_t va = (vaddr_t)addr; 1695 vaddr_t va = (vaddr_t)addr;
1728 1696
1729 if (va >= ALPHA_K0SEG_BASE && va <= ALPHA_K0SEG_END) { 1697 if (va >= ALPHA_K0SEG_BASE && va <= ALPHA_K0SEG_END) {
1730 *paddr = ALPHA_K0SEG_TO_PHYS(va); 1698 *paddr = ALPHA_K0SEG_TO_PHYS(va);
1731 return true; 1699 return true;
1732 } 1700 }
1733 return false; 1701 return false;
1734} 1702}
1735 1703
1736bool 1704bool
1737mm_md_direct_mapped_phys(paddr_t paddr, vaddr_t *vaddr) 1705mm_md_direct_mapped_phys(paddr_t paddr, vaddr_t *vaddr)
1738{ 1706{
1739 1707
1740 *vaddr = ALPHA_PHYS_TO_K0SEG(paddr); 1708 *vaddr = ALPHA_PHYS_TO_K0SEG(paddr);
1741 return true; 1709 return true;
1742} 1710}
1743 1711
1744/* XXX XXX BEGIN XXX XXX */ 1712/* XXX XXX BEGIN XXX XXX */
1745paddr_t alpha_XXX_dmamap_or; /* XXX */ 1713paddr_t alpha_XXX_dmamap_or; /* XXX */
1746 /* XXX */ 1714 /* XXX */
1747paddr_t /* XXX */ 1715paddr_t /* XXX */
1748alpha_XXX_dmamap(vaddr_t v) /* XXX */ 1716alpha_XXX_dmamap(vaddr_t v) /* XXX */
1749{ /* XXX */ 1717{ /* XXX */
1750 /* XXX */ 1718 /* XXX */
1751 return (vtophys(v) | alpha_XXX_dmamap_or); /* XXX */ 1719 return (vtophys(v) | alpha_XXX_dmamap_or); /* XXX */
1752} /* XXX */ 1720} /* XXX */
1753/* XXX XXX END XXX XXX */ 1721/* XXX XXX END XXX XXX */
1754 1722
1755char * 1723char *
1756dot_conv(unsigned long x) 1724dot_conv(unsigned long x)
1757{ 1725{
1758 int i; 1726 int i;
1759 char *xc; 1727 char *xc;
1760 static int next; 1728 static int next;
1761 static char space[2][20]; 1729 static char space[2][20];
1762 1730
1763 xc = space[next ^= 1] + sizeof space[0]; 1731 xc = space[next ^= 1] + sizeof space[0];
1764 *--xc = '\0'; 1732 *--xc = '\0';
1765 for (i = 0;; ++i) { 1733 for (i = 0;; ++i) {
1766 if (i && (i & 3) == 0) 1734 if (i && (i & 3) == 0)
1767 *--xc = '.'; 1735 *--xc = '.';
1768 *--xc = hexdigits[x & 0xf]; 1736 *--xc = hexdigits[x & 0xf];
1769 x >>= 4; 1737 x >>= 4;
1770 if (x == 0) 1738 if (x == 0)
1771 break; 1739 break;
1772 } 1740 }
1773 return xc; 1741 return xc;
1774} 1742}
1775 1743
1776void 1744void
1777cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags) 1745cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
1778{ 1746{
1779 struct trapframe *frame = l->l_md.md_tf; 1747 struct trapframe *frame = l->l_md.md_tf;
1780 struct pcb *pcb = lwp_getpcb(l); 1748 struct pcb *pcb = lwp_getpcb(l);
1781 __greg_t *gr = mcp->__gregs; 1749 __greg_t *gr = mcp->__gregs;
1782 __greg_t ras_pc; 1750 __greg_t ras_pc;
1783 1751
1784 /* Save register context. */ 1752 /* Save register context. */
1785 frametoreg(frame, (struct reg *)gr); 1753 frametoreg(frame, (struct reg *)gr);
1786 /* XXX if there's a better, general way to get the USP of 1754 /* XXX if there's a better, general way to get the USP of
1787 * an LWP that might or might not be curlwp, I'd like to know 1755 * an LWP that might or might not be curlwp, I'd like to know
1788 * about it. 1756 * about it.
1789 */ 1757 */
1790 if (l == curlwp) { 1758 if (l == curlwp) {
1791 gr[_REG_SP] = alpha_pal_rdusp(); 1759 gr[_REG_SP] = alpha_pal_rdusp();
1792 gr[_REG_UNIQUE] = alpha_pal_rdunique(); 1760 gr[_REG_UNIQUE] = alpha_pal_rdunique();
1793 } else { 1761 } else {
1794 gr[_REG_SP] = pcb->pcb_hw.apcb_usp; 1762 gr[_REG_SP] = pcb->pcb_hw.apcb_usp;
1795 gr[_REG_UNIQUE] = pcb->pcb_hw.apcb_unique; 1763 gr[_REG_UNIQUE] = pcb->pcb_hw.apcb_unique;
1796 } 1764 }
1797 gr[_REG_PC] = frame->tf_regs[FRAME_PC]; 1765 gr[_REG_PC] = frame->tf_regs[FRAME_PC];
1798 gr[_REG_PS] = frame->tf_regs[FRAME_PS]; 1766 gr[_REG_PS] = frame->tf_regs[FRAME_PS];
1799 1767
1800 if ((ras_pc = (__greg_t)ras_lookup(l->l_proc, 1768 if ((ras_pc = (__greg_t)ras_lookup(l->l_proc,
1801 (void *) gr[_REG_PC])) != -1) 1769 (void *) gr[_REG_PC])) != -1)
1802 gr[_REG_PC] = ras_pc; 1770 gr[_REG_PC] = ras_pc;
1803 1771
1804 *flags |= _UC_CPU | _UC_TLSBASE; 1772 *flags |= _UC_CPU | _UC_TLSBASE;
1805 1773
1806 /* Save floating point register context, if any, and copy it. */ 1774 /* Save floating point register context, if any, and copy it. */
1807 if (fpu_valid_p(l)) { 1775 if (fpu_valid_p(l)) {
1808 fpu_save(l); 1776 fpu_save(l);
1809 (void)memcpy(&mcp->__fpregs, &pcb->pcb_fp, 1777 (void)memcpy(&mcp->__fpregs, &pcb->pcb_fp,
1810 sizeof (mcp->__fpregs)); 1778 sizeof (mcp->__fpregs));
1811 mcp->__fpregs.__fp_fpcr = alpha_read_fp_c(l); 1779 mcp->__fpregs.__fp_fpcr = alpha_read_fp_c(l);
1812 *flags |= _UC_FPU; 1780 *flags |= _UC_FPU;
1813 } 1781 }
1814} 1782}
1815 1783
1816int 1784int
1817cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp) 1785cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp)
1818{ 1786{
1819 const __greg_t *gr = mcp->__gregs; 1787 const __greg_t *gr = mcp->__gregs;
1820 1788
1821 if ((gr[_REG_PS] & ALPHA_PSL_USERSET) != ALPHA_PSL_USERSET || 1789 if ((gr[_REG_PS] & ALPHA_PSL_USERSET) != ALPHA_PSL_USERSET ||
1822 (gr[_REG_PS] & ALPHA_PSL_USERCLR) != 0) 1790 (gr[_REG_PS] & ALPHA_PSL_USERCLR) != 0)
1823 return EINVAL; 1791 return EINVAL;
1824 1792
1825 return 0; 1793 return 0;
1826} 1794}
1827 1795
1828int 1796int
1829cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags) 1797cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
1830{ 1798{
1831 struct trapframe *frame = l->l_md.md_tf; 1799 struct trapframe *frame = l->l_md.md_tf;
1832 struct pcb *pcb = lwp_getpcb(l); 1800 struct pcb *pcb = lwp_getpcb(l);
1833 const __greg_t *gr = mcp->__gregs; 1801 const __greg_t *gr = mcp->__gregs;
1834 int error; 1802 int error;
1835 1803
1836 /* Restore register context, if any. */ 1804 /* Restore register context, if any. */
1837 if (flags & _UC_CPU) { 1805 if (flags & _UC_CPU) {
1838 /* Check for security violations first. */ 1806 /* Check for security violations first. */
1839 error = cpu_mcontext_validate(l, mcp); 1807 error = cpu_mcontext_validate(l, mcp);
1840 if (error) 1808 if (error)
1841 return error; 1809 return error;
1842 1810
1843 regtoframe((const struct reg *)gr, l->l_md.md_tf); 1811 regtoframe((const struct reg *)gr, l->l_md.md_tf);
1844 if (l == curlwp) 1812 if (l == curlwp)
1845 alpha_pal_wrusp(gr[_REG_SP]); 1813 alpha_pal_wrusp(gr[_REG_SP]);
1846 else 1814 else
1847 pcb->pcb_hw.apcb_usp = gr[_REG_SP]; 1815 pcb->pcb_hw.apcb_usp = gr[_REG_SP];
1848 frame->tf_regs[FRAME_PC] = gr[_REG_PC]; 1816 frame->tf_regs[FRAME_PC] = gr[_REG_PC];
1849 frame->tf_regs[FRAME_PS] = gr[_REG_PS]; 1817 frame->tf_regs[FRAME_PS] = gr[_REG_PS];
1850 } 1818 }
1851 if (flags & _UC_TLSBASE) 1819 if (flags & _UC_TLSBASE)
1852 lwp_setprivate(l, (void *)(uintptr_t)gr[_REG_UNIQUE]); 1820 lwp_setprivate(l, (void *)(uintptr_t)gr[_REG_UNIQUE]);
1853 /* Restore floating point register context, if any. */ 1821 /* Restore floating point register context, if any. */
1854 if (flags & _UC_FPU) { 1822 if (flags & _UC_FPU) {
1855 /* If we have an FP register context, get rid of it. */ 1823 /* If we have an FP register context, get rid of it. */
1856 fpu_discard(l, true); 1824 fpu_discard(l, true);
1857 (void)memcpy(&pcb->pcb_fp, &mcp->__fpregs, 1825 (void)memcpy(&pcb->pcb_fp, &mcp->__fpregs,
1858 sizeof (pcb->pcb_fp)); 1826 sizeof (pcb->pcb_fp));
1859 l->l_md.md_flags = mcp->__fpregs.__fp_fpcr & MDLWP_FP_C; 1827 l->l_md.md_flags = mcp->__fpregs.__fp_fpcr & MDLWP_FP_C;
1860 } 1828 }
1861 1829
1862 return (0); 1830 return (0);
1863} 1831}
1864 1832
1865static void 1833static void
1866cpu_kick(struct cpu_info * const ci) 1834cpu_kick(struct cpu_info * const ci)
1867{ 1835{
1868#if defined(MULTIPROCESSOR) 1836#if defined(MULTIPROCESSOR)
1869 alpha_send_ipi(ci->ci_cpuid, ALPHA_IPI_AST); 1837 alpha_send_ipi(ci->ci_cpuid, ALPHA_IPI_AST);
1870#endif /* MULTIPROCESSOR */ 1838#endif /* MULTIPROCESSOR */
1871} 1839}
1872 1840
1873/* 1841/*
1874 * Preempt the current process if in interrupt from user mode, 1842 * Preempt the current process if in interrupt from user mode,
1875 * or after the current trap/syscall if in system mode. 1843 * or after the current trap/syscall if in system mode.
1876 */ 1844 */
1877void 1845void
1878cpu_need_resched(struct cpu_info *ci, struct lwp *l, int flags) 1846cpu_need_resched(struct cpu_info *ci, struct lwp *l, int flags)
1879{ 1847{
1880 1848
1881 KASSERT(kpreempt_disabled()); 1849 KASSERT(kpreempt_disabled());
1882 1850
1883 if ((flags & RESCHED_IDLE) != 0) { 1851 if ((flags & RESCHED_IDLE) != 0) {
1884 /* 1852 /*
1885 * Nothing to do here; we are not currently using WTINT 1853 * Nothing to do here; we are not currently using WTINT
1886 * in cpu_idle(). 1854 * in cpu_idle().
1887 */ 1855 */
1888 return; 1856 return;
1889 } 1857 }
1890 1858
1891 /* XXX RESCHED_KPREEMPT XXX */ 1859 /* XXX RESCHED_KPREEMPT XXX */
1892 1860
1893 KASSERT((flags & RESCHED_UPREEMPT) != 0); 1861 KASSERT((flags & RESCHED_UPREEMPT) != 0);
1894 if ((flags & RESCHED_REMOTE) != 0) { 1862 if ((flags & RESCHED_REMOTE) != 0) {
1895 cpu_kick(ci); 1863 cpu_kick(ci);
1896 } else { 1864 } else {
1897 aston(l); 1865 aston(l);
1898 } 1866 }
1899} 1867}
1900 1868
1901/* 1869/*
1902 * Notify the current lwp (l) that it has a signal pending, 1870 * Notify the current lwp (l) that it has a signal pending,
1903 * process as soon as possible. 1871 * process as soon as possible.
1904 */ 1872 */
1905void 1873void
1906cpu_signotify(struct lwp *l) 1874cpu_signotify(struct lwp *l)
1907{ 1875{
1908 1876
1909 KASSERT(kpreempt_disabled()); 1877 KASSERT(kpreempt_disabled());
1910 1878
1911 if (l->l_cpu != curcpu()) { 1879 if (l->l_cpu != curcpu()) {
1912 cpu_kick(l->l_cpu); 1880 cpu_kick(l->l_cpu);
1913 } else { 1881 } else {
1914 aston(l); 1882 aston(l);
1915 } 1883 }
1916} 1884}
1917 1885
1918/* 1886/*
1919 * Give a profiling tick to the current process when the user profiling 1887 * Give a profiling tick to the current process when the user profiling
1920 * buffer pages are invalid. On the alpha, request an AST to send us 1888 * buffer pages are invalid. On the alpha, request an AST to send us
1921 * through trap, marking the proc as needing a profiling tick. 1889 * through trap, marking the proc as needing a profiling tick.
1922 */ 1890 */
1923void 1891void
1924cpu_need_proftick(struct lwp *l) 1892cpu_need_proftick(struct lwp *l)
1925{ 1893{
1926 1894
1927 KASSERT(kpreempt_disabled()); 1895 KASSERT(kpreempt_disabled());
1928 KASSERT(l->l_cpu == curcpu()); 1896 KASSERT(l->l_cpu == curcpu());
1929 1897
1930 l->l_pflag |= LP_OWEUPC; 1898 l->l_pflag |= LP_OWEUPC;
1931 aston(l); 1899 aston(l);
1932} 1900}

cvs diff -r1.270 -r1.271 src/sys/arch/alpha/alpha/pmap.c (switch to unified diff)

--- src/sys/arch/alpha/alpha/pmap.c 2020/09/03 02:05:03 1.270
+++ src/sys/arch/alpha/alpha/pmap.c 2020/09/03 02:09:09 1.271
@@ -1,2402 +1,2372 @@ @@ -1,2402 +1,2372 @@
1/* $NetBSD: pmap.c,v 1.270 2020/09/03 02:05:03 thorpej Exp $ */ 1/* $NetBSD: pmap.c,v 1.271 2020/09/03 02:09:09 thorpej Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 1999, 2000, 2001, 2007, 2008, 2020 4 * Copyright (c) 1998, 1999, 2000, 2001, 2007, 2008, 2020
5 * The NetBSD Foundation, Inc. 5 * The NetBSD Foundation, Inc.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to The NetBSD Foundation 8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 * NASA Ames Research Center, by Andrew Doran and Mindaugas Rasiukevicius, 10 * NASA Ames Research Center, by Andrew Doran and Mindaugas Rasiukevicius,
11 * and by Chris G. Demetriou. 11 * and by Chris G. Demetriou.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions 14 * modification, are permitted provided that the following conditions
15 * are met: 15 * are met:
16 * 1. Redistributions of source code must retain the above copyright 16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer. 17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright 18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the 19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution. 20 * documentation and/or other materials provided with the distribution.
21 * 21 *
22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE. 32 * POSSIBILITY OF SUCH DAMAGE.
33 */ 33 */
34 34
35/* 35/*
36 * Copyright (c) 1991, 1993 36 * Copyright (c) 1991, 1993
37 * The Regents of the University of California. All rights reserved. 37 * The Regents of the University of California. All rights reserved.
38 * 38 *
39 * This code is derived from software contributed to Berkeley by 39 * This code is derived from software contributed to Berkeley by
40 * the Systems Programming Group of the University of Utah Computer 40 * the Systems Programming Group of the University of Utah Computer
41 * Science Department. 41 * Science Department.
42 * 42 *
43 * Redistribution and use in source and binary forms, with or without 43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions 44 * modification, are permitted provided that the following conditions
45 * are met: 45 * are met:
46 * 1. Redistributions of source code must retain the above copyright 46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer. 47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright 48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the 49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution. 50 * documentation and/or other materials provided with the distribution.
51 * 3. Neither the name of the University nor the names of its contributors 51 * 3. Neither the name of the University nor the names of its contributors
52 * may be used to endorse or promote products derived from this software 52 * may be used to endorse or promote products derived from this software
53 * without specific prior written permission. 53 * without specific prior written permission.
54 * 54 *
55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * SUCH DAMAGE. 65 * SUCH DAMAGE.
66 * 66 *
67 * @(#)pmap.c 8.6 (Berkeley) 5/27/94 67 * @(#)pmap.c 8.6 (Berkeley) 5/27/94
68 */ 68 */
69 69
70/* 70/*
71 * DEC Alpha physical map management code. 71 * DEC Alpha physical map management code.
72 * 72 *
73 * History: 73 * History:
74 * 74 *
75 * This pmap started life as a Motorola 68851/68030 pmap, 75 * This pmap started life as a Motorola 68851/68030 pmap,
76 * written by Mike Hibler at the University of Utah. 76 * written by Mike Hibler at the University of Utah.
77 * 77 *
78 * It was modified for the DEC Alpha by Chris Demetriou 78 * It was modified for the DEC Alpha by Chris Demetriou
79 * at Carnegie Mellon University. 79 * at Carnegie Mellon University.
80 * 80 *
81 * Support for non-contiguous physical memory was added by 81 * Support for non-contiguous physical memory was added by
82 * Jason R. Thorpe of the Numerical Aerospace Simulation 82 * Jason R. Thorpe of the Numerical Aerospace Simulation
83 * Facility, NASA Ames Research Center and Chris Demetriou. 83 * Facility, NASA Ames Research Center and Chris Demetriou.
84 * 84 *
85 * Page table management and a major cleanup were undertaken 85 * Page table management and a major cleanup were undertaken
86 * by Jason R. Thorpe, with lots of help from Ross Harvey of 86 * by Jason R. Thorpe, with lots of help from Ross Harvey of
87 * Avalon Computer Systems and from Chris Demetriou. 87 * Avalon Computer Systems and from Chris Demetriou.
88 * 88 *
89 * Support for the new UVM pmap interface was written by 89 * Support for the new UVM pmap interface was written by
90 * Jason R. Thorpe. 90 * Jason R. Thorpe.
91 * 91 *
92 * Support for ASNs was written by Jason R. Thorpe, again 92 * Support for ASNs was written by Jason R. Thorpe, again
93 * with help from Chris Demetriou and Ross Harvey. 93 * with help from Chris Demetriou and Ross Harvey.
94 * 94 *
95 * The locking protocol was written by Jason R. Thorpe, 95 * The locking protocol was written by Jason R. Thorpe,
96 * using Chuck Cranor's i386 pmap for UVM as a model. 96 * using Chuck Cranor's i386 pmap for UVM as a model.
97 * 97 *
98 * TLB shootdown code was written (and then subsequently 98 * TLB shootdown code was written (and then subsequently
99 * rewritten some years later, borrowing some ideas from 99 * rewritten some years later, borrowing some ideas from
100 * the x86 pmap) by Jason R. Thorpe. 100 * the x86 pmap) by Jason R. Thorpe.
101 * 101 *
102 * Multiprocessor modifications by Andrew Doran and 102 * Multiprocessor modifications by Andrew Doran and
103 * Jason R. Thorpe. 103 * Jason R. Thorpe.
104 * 104 *
105 * Notes: 105 * Notes:
106 * 106 *
107 * All user page table access is done via K0SEG. Kernel 107 * All user page table access is done via K0SEG. Kernel
108 * page table access is done via the recursive Virtual Page 108 * page table access is done via the recursive Virtual Page
109 * Table becase kernel PT pages are pre-allocated and never 109 * Table becase kernel PT pages are pre-allocated and never
110 * freed, so no VPT fault handling is requiried. 110 * freed, so no VPT fault handling is requiried.
111 */ 111 */
112 112
113/* 113/*
114 * Manages physical address maps. 114 * Manages physical address maps.
115 * 115 *
116 * Since the information managed by this module is 116 * Since the information managed by this module is
117 * also stored by the logical address mapping module, 117 * also stored by the logical address mapping module,
118 * this module may throw away valid virtual-to-physical 118 * this module may throw away valid virtual-to-physical
119 * mappings at almost any time. However, invalidations 119 * mappings at almost any time. However, invalidations
120 * of virtual-to-physical mappings must be done as 120 * of virtual-to-physical mappings must be done as
121 * requested. 121 * requested.
122 * 122 *
123 * In order to cope with hardware architectures which 123 * In order to cope with hardware architectures which
124 * make virtual-to-physical map invalidates expensive, 124 * make virtual-to-physical map invalidates expensive,
125 * this module may delay invalidate or reduced protection 125 * this module may delay invalidate or reduced protection
126 * operations until such time as they are actually 126 * operations until such time as they are actually
127 * necessary. This module is given full information as 127 * necessary. This module is given full information as
128 * to which processors are currently using which maps, 128 * to which processors are currently using which maps,
129 * and to when physical maps must be made correct. 129 * and to when physical maps must be made correct.
130 */ 130 */
131 131
132#include "opt_lockdebug.h" 132#include "opt_lockdebug.h"
133#include "opt_sysv.h" 133#include "opt_sysv.h"
134#include "opt_multiprocessor.h" 134#include "opt_multiprocessor.h"
135 135
136#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ 136#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
137 137
138__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.270 2020/09/03 02:05:03 thorpej Exp $"); 138__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.271 2020/09/03 02:09:09 thorpej Exp $");
139 139
140#include <sys/param.h> 140#include <sys/param.h>
141#include <sys/systm.h> 141#include <sys/systm.h>
142#include <sys/kernel.h> 142#include <sys/kernel.h>
143#include <sys/proc.h> 143#include <sys/proc.h>
144#include <sys/malloc.h> 144#include <sys/malloc.h>
145#include <sys/pool.h> 145#include <sys/pool.h>
146#include <sys/buf.h> 146#include <sys/buf.h>
147#include <sys/evcnt.h> 147#include <sys/evcnt.h>
148#include <sys/atomic.h> 148#include <sys/atomic.h>
149#include <sys/cpu.h> 149#include <sys/cpu.h>
150 150
151#include <uvm/uvm.h> 151#include <uvm/uvm.h>
152 152
153#if defined(_PMAP_MAY_USE_PROM_CONSOLE) || defined(MULTIPROCESSOR) 153#if defined(MULTIPROCESSOR)
154#include <machine/rpb.h> 154#include <machine/rpb.h>
155#endif 155#endif
156 156
157#ifdef DEBUG 157#ifdef DEBUG
158#define PDB_FOLLOW 0x0001 158#define PDB_FOLLOW 0x0001
159#define PDB_INIT 0x0002 159#define PDB_INIT 0x0002
160#define PDB_ENTER 0x0004 160#define PDB_ENTER 0x0004
161#define PDB_REMOVE 0x0008 161#define PDB_REMOVE 0x0008
162#define PDB_CREATE 0x0010 162#define PDB_CREATE 0x0010
163#define PDB_PTPAGE 0x0020 163#define PDB_PTPAGE 0x0020
164#define PDB_ASN 0x0040 164#define PDB_ASN 0x0040
165#define PDB_BITS 0x0080 165#define PDB_BITS 0x0080
166#define PDB_COLLECT 0x0100 166#define PDB_COLLECT 0x0100
167#define PDB_PROTECT 0x0200 167#define PDB_PROTECT 0x0200
168#define PDB_BOOTSTRAP 0x1000 168#define PDB_BOOTSTRAP 0x1000
169#define PDB_PARANOIA 0x2000 169#define PDB_PARANOIA 0x2000
170#define PDB_WIRING 0x4000 170#define PDB_WIRING 0x4000
171#define PDB_PVDUMP 0x8000 171#define PDB_PVDUMP 0x8000
172 172
173int debugmap = 0; 173int debugmap = 0;
174int pmapdebug = PDB_PARANOIA; 174int pmapdebug = PDB_PARANOIA;
175#endif 175#endif
176 176
177#if defined(MULTIPROCESSOR) 177#if defined(MULTIPROCESSOR)
178#define PMAP_MP(x) x 178#define PMAP_MP(x) x
179#else 179#else
180#define PMAP_MP(x) __nothing 180#define PMAP_MP(x) __nothing
181#endif /* MULTIPROCESSOR */ 181#endif /* MULTIPROCESSOR */
182 182
183/* 183/*
184 * Given a map and a machine independent protection code, 184 * Given a map and a machine independent protection code,
185 * convert to an alpha protection code. 185 * convert to an alpha protection code.
186 */ 186 */
187#define pte_prot(m, p) (protection_codes[m == pmap_kernel() ? 0 : 1][p]) 187#define pte_prot(m, p) (protection_codes[m == pmap_kernel() ? 0 : 1][p])
188static int protection_codes[2][8] __read_mostly; 188static int protection_codes[2][8] __read_mostly;
189 189
190/* 190/*
191 * kernel_lev1map: 191 * kernel_lev1map:
192 * 192 *
193 * Kernel level 1 page table. This maps all kernel level 2 193 * Kernel level 1 page table. This maps all kernel level 2
194 * page table pages, and is used as a template for all user 194 * page table pages, and is used as a template for all user
195 * pmap level 1 page tables. When a new user level 1 page 195 * pmap level 1 page tables. When a new user level 1 page
196 * table is allocated, all kernel_lev1map PTEs for kernel 196 * table is allocated, all kernel_lev1map PTEs for kernel
197 * addresses are copied to the new map. 197 * addresses are copied to the new map.
198 * 198 *
199 * The kernel also has an initial set of kernel level 2 page 199 * The kernel also has an initial set of kernel level 2 page
200 * table pages. These map the kernel level 3 page table pages. 200 * table pages. These map the kernel level 3 page table pages.
201 * As kernel level 3 page table pages are added, more level 2 201 * As kernel level 3 page table pages are added, more level 2
202 * page table pages may be added to map them. These pages are 202 * page table pages may be added to map them. These pages are
203 * never freed. 203 * never freed.
204 * 204 *
205 * Finally, the kernel also has an initial set of kernel level 205 * Finally, the kernel also has an initial set of kernel level
206 * 3 page table pages. These map pages in K1SEG. More level 206 * 3 page table pages. These map pages in K1SEG. More level
207 * 3 page table pages may be added at run-time if additional 207 * 3 page table pages may be added at run-time if additional
208 * K1SEG address space is required. These pages are never freed. 208 * K1SEG address space is required. These pages are never freed.
209 * 209 *
210 * NOTE: When mappings are inserted into the kernel pmap, all 210 * NOTE: When mappings are inserted into the kernel pmap, all
211 * level 2 and level 3 page table pages must already be allocated 211 * level 2 and level 3 page table pages must already be allocated
212 * and mapped into the parent page table. 212 * and mapped into the parent page table.
213 */ 213 */
214pt_entry_t *kernel_lev1map __read_mostly; 214pt_entry_t *kernel_lev1map __read_mostly;
215 215
216/* 216/*
217 * Virtual Page Table. 217 * Virtual Page Table.
218 */ 218 */
219static pt_entry_t *VPT __read_mostly; 219static pt_entry_t *VPT __read_mostly;
220 220
221static struct { 221static struct {
222 struct pmap k_pmap; 222 struct pmap k_pmap;
223} kernel_pmap_store __cacheline_aligned; 223} kernel_pmap_store __cacheline_aligned;
224 224
225struct pmap *const kernel_pmap_ptr = &kernel_pmap_store.k_pmap; 225struct pmap *const kernel_pmap_ptr = &kernel_pmap_store.k_pmap;
226 226
227/* PA of first available physical page */ 227/* PA of first available physical page */
228paddr_t avail_start __read_mostly; 228paddr_t avail_start __read_mostly;
229 229
230/* PA of last available physical page */ 230/* PA of last available physical page */
231paddr_t avail_end __read_mostly; 231paddr_t avail_end __read_mostly;
232 232
233/* VA of last avail page (end of kernel AS) */ 233/* VA of last avail page (end of kernel AS) */
234static vaddr_t virtual_end __read_mostly; 234static vaddr_t virtual_end __read_mostly;
235 235
236/* Has pmap_init completed? */ 236/* Has pmap_init completed? */
237static bool pmap_initialized __read_mostly; 237static bool pmap_initialized __read_mostly;
238 238
239/* Instrumentation */ 239/* Instrumentation */
240u_long pmap_pages_stolen __read_mostly; 240u_long pmap_pages_stolen __read_mostly;
241 241
242/* 242/*
243 * This variable contains the number of CPU IDs we need to allocate 243 * This variable contains the number of CPU IDs we need to allocate
244 * space for when allocating the pmap structure. It is used to 244 * space for when allocating the pmap structure. It is used to
245 * size a per-CPU array of ASN and ASN Generation number. 245 * size a per-CPU array of ASN and ASN Generation number.
246 */ 246 */
247static u_long pmap_ncpuids __read_mostly; 247static u_long pmap_ncpuids __read_mostly;
248 248
249#ifndef PMAP_PV_LOWAT 249#ifndef PMAP_PV_LOWAT
250#define PMAP_PV_LOWAT 16 250#define PMAP_PV_LOWAT 16
251#endif 251#endif
252int pmap_pv_lowat __read_mostly = PMAP_PV_LOWAT; 252int pmap_pv_lowat __read_mostly = PMAP_PV_LOWAT;
253 253
254/* 254/*
255 * List of all pmaps, used to update them when e.g. additional kernel 255 * List of all pmaps, used to update them when e.g. additional kernel
256 * page tables are allocated. This list is kept LRU-ordered by 256 * page tables are allocated. This list is kept LRU-ordered by
257 * pmap_activate(). 257 * pmap_activate().
258 */ 258 */
259static TAILQ_HEAD(, pmap) pmap_all_pmaps __cacheline_aligned; 259static TAILQ_HEAD(, pmap) pmap_all_pmaps __cacheline_aligned;
260 260
261/* 261/*
262 * The pools from which pmap structures and sub-structures are allocated. 262 * The pools from which pmap structures and sub-structures are allocated.
263 */ 263 */
264static struct pool_cache pmap_pmap_cache __read_mostly; 264static struct pool_cache pmap_pmap_cache __read_mostly;
265static struct pool_cache pmap_l1pt_cache __read_mostly; 265static struct pool_cache pmap_l1pt_cache __read_mostly;
266static struct pool_cache pmap_pv_cache __read_mostly; 266static struct pool_cache pmap_pv_cache __read_mostly;
267 267
268CTASSERT(offsetof(struct pmap, pm_asni[0]) == COHERENCY_UNIT); 268CTASSERT(offsetof(struct pmap, pm_asni[0]) == COHERENCY_UNIT);
269CTASSERT(PMAP_SIZEOF(ALPHA_MAXPROCS) < ALPHA_PGBYTES); 269CTASSERT(PMAP_SIZEOF(ALPHA_MAXPROCS) < ALPHA_PGBYTES);
270CTASSERT(sizeof(struct pmap_asn_info) == COHERENCY_UNIT); 270CTASSERT(sizeof(struct pmap_asn_info) == COHERENCY_UNIT);
271 271
272/* 272/*
273 * Address Space Numbers. 273 * Address Space Numbers.
274 * 274 *
275 * On many implementations of the Alpha architecture, the TLB entries and 275 * On many implementations of the Alpha architecture, the TLB entries and
276 * I-cache blocks are tagged with a unique number within an implementation- 276 * I-cache blocks are tagged with a unique number within an implementation-
277 * specified range. When a process context becomes active, the ASN is used 277 * specified range. When a process context becomes active, the ASN is used
278 * to match TLB entries; if a TLB entry for a particular VA does not match 278 * to match TLB entries; if a TLB entry for a particular VA does not match
279 * the current ASN, it is ignored (one could think of the processor as 279 * the current ASN, it is ignored (one could think of the processor as
280 * having a collection of <max ASN> separate TLBs). This allows operating 280 * having a collection of <max ASN> separate TLBs). This allows operating
281 * system software to skip the TLB flush that would otherwise be necessary 281 * system software to skip the TLB flush that would otherwise be necessary
282 * at context switch time. 282 * at context switch time.
283 * 283 *
284 * Alpha PTEs have a bit in them (PG_ASM - Address Space Match) that 284 * Alpha PTEs have a bit in them (PG_ASM - Address Space Match) that
285 * causes TLB entries to match any ASN. The PALcode also provides 285 * causes TLB entries to match any ASN. The PALcode also provides
286 * a TBI (Translation Buffer Invalidate) operation that flushes all 286 * a TBI (Translation Buffer Invalidate) operation that flushes all
287 * TLB entries that _do not_ have PG_ASM. We use this bit for kernel 287 * TLB entries that _do not_ have PG_ASM. We use this bit for kernel
288 * mappings, so that invalidation of all user mappings does not invalidate 288 * mappings, so that invalidation of all user mappings does not invalidate
289 * kernel mappings (which are consistent across all processes). 289 * kernel mappings (which are consistent across all processes).
290 * 290 *
291 * pmap_next_asn always indicates to the next ASN to use. When 291 * pmap_next_asn always indicates to the next ASN to use. When
292 * pmap_next_asn exceeds pmap_max_asn, we start a new ASN generation. 292 * pmap_next_asn exceeds pmap_max_asn, we start a new ASN generation.
293 * 293 *
294 * When a new ASN generation is created, the per-process (i.e. non-PG_ASM) 294 * When a new ASN generation is created, the per-process (i.e. non-PG_ASM)
295 * TLB entries and the I-cache are flushed, the generation number is bumped, 295 * TLB entries and the I-cache are flushed, the generation number is bumped,
296 * and pmap_next_asn is changed to indicate the first non-reserved ASN. 296 * and pmap_next_asn is changed to indicate the first non-reserved ASN.
297 * 297 *
298 * We reserve ASN #0 for pmaps that use the global kernel_lev1map. This 298 * We reserve ASN #0 for pmaps that use the global kernel_lev1map. This
299 * prevents the following scenario to ensure no accidental accesses to 299 * prevents the following scenario to ensure no accidental accesses to
300 * user space for LWPs using the kernel pmap. This is important because 300 * user space for LWPs using the kernel pmap. This is important because
301 * the PALcode may use the recursive VPT to service TLB misses. 301 * the PALcode may use the recursive VPT to service TLB misses.
302 * 302 *
303 * By reserving an ASN for the kernel, we are guaranteeing that an lwp 303 * By reserving an ASN for the kernel, we are guaranteeing that an lwp
304 * will not see any valid user space TLB entries until it passes through 304 * will not see any valid user space TLB entries until it passes through
305 * pmap_activate() for the first time. 305 * pmap_activate() for the first time.
306 * 306 *
307 * On processors that do not support ASNs, the PALcode invalidates 307 * On processors that do not support ASNs, the PALcode invalidates
308 * non-ASM TLB entries automatically on swpctx. We completely skip 308 * non-ASM TLB entries automatically on swpctx. We completely skip
309 * the ASN machinery in this case because the PALcode neither reads 309 * the ASN machinery in this case because the PALcode neither reads
310 * nor writes that field of the HWPCB. 310 * nor writes that field of the HWPCB.
311 */ 311 */
312 312
313/* max ASN supported by the system */ 313/* max ASN supported by the system */
314static u_int pmap_max_asn __read_mostly; 314static u_int pmap_max_asn __read_mostly;
315 315
316/* 316/*
317 * Locking: 317 * Locking:
318 * 318 *
319 * READ/WRITE LOCKS 319 * READ/WRITE LOCKS
320 * ---------------- 320 * ----------------
321 * 321 *
322 * * pmap_main_lock - This lock is used to prevent deadlock and/or 322 * * pmap_main_lock - This lock is used to prevent deadlock and/or
323 * provide mutex access to the pmap module. Most operations lock 323 * provide mutex access to the pmap module. Most operations lock
324 * the pmap first, then PV lists as needed. However, some operations, 324 * the pmap first, then PV lists as needed. However, some operations,
325 * such as pmap_page_protect(), lock the PV lists before locking 325 * such as pmap_page_protect(), lock the PV lists before locking
326 * the pmaps. To prevent deadlock, we require a mutex lock on the 326 * the pmaps. To prevent deadlock, we require a mutex lock on the
327 * pmap module if locking in the PV->pmap direction. This is 327 * pmap module if locking in the PV->pmap direction. This is
328 * implemented by acquiring a (shared) read lock on pmap_main_lock 328 * implemented by acquiring a (shared) read lock on pmap_main_lock
329 * if locking pmap->PV and a (exclusive) write lock if locking in 329 * if locking pmap->PV and a (exclusive) write lock if locking in
330 * the PV->pmap direction. Since only one thread can hold a write 330 * the PV->pmap direction. Since only one thread can hold a write
331 * lock at a time, this provides the mutex. 331 * lock at a time, this provides the mutex.
332 * 332 *
333 * MUTEXES 333 * MUTEXES
334 * ------- 334 * -------
335 * 335 *
336 * * pmap lock (global hash) - These locks protect the pmap structures. 336 * * pmap lock (global hash) - These locks protect the pmap structures.
337 * 337 *
338 * * pmap activation lock (global hash) - These IPL_SCHED spin locks 338 * * pmap activation lock (global hash) - These IPL_SCHED spin locks
339 * synchronize pmap_activate() and TLB shootdowns. This has a lock 339 * synchronize pmap_activate() and TLB shootdowns. This has a lock
340 * ordering constraint with the tlb_lock: 340 * ordering constraint with the tlb_lock:
341 * 341 *
342 * tlb_lock -> pmap activation lock 342 * tlb_lock -> pmap activation lock
343 * 343 *
344 * * pvh_lock (global hash) - These locks protect the PV lists for 344 * * pvh_lock (global hash) - These locks protect the PV lists for
345 * managed pages. 345 * managed pages.
346 * 346 *
347 * * tlb_lock - This IPL_VM lock serializes local and remote TLB 347 * * tlb_lock - This IPL_VM lock serializes local and remote TLB
348 * invalidation. 348 * invalidation.
349 * 349 *
350 * * pmap_all_pmaps_lock - This lock protects the global list of 350 * * pmap_all_pmaps_lock - This lock protects the global list of
351 * all pmaps. 351 * all pmaps.
352 * 352 *
353 * * pmap_growkernel_lock - This lock protects pmap_growkernel() 353 * * pmap_growkernel_lock - This lock protects pmap_growkernel()
354 * and the virtual_end variable. 354 * and the virtual_end variable.
355 * 355 *
356 * There is a lock ordering constraint for pmap_growkernel_lock. 356 * There is a lock ordering constraint for pmap_growkernel_lock.
357 * pmap_growkernel() acquires the locks in the following order: 357 * pmap_growkernel() acquires the locks in the following order:
358 * 358 *
359 * pmap_growkernel_lock (write) -> pmap_all_pmaps_lock -> 359 * pmap_growkernel_lock (write) -> pmap_all_pmaps_lock ->
360 * pmap lock 360 * pmap lock
361 * 361 *
362 * We need to ensure consistency between user pmaps and the 362 * We need to ensure consistency between user pmaps and the
363 * kernel_lev1map. For this reason, pmap_growkernel_lock must 363 * kernel_lev1map. For this reason, pmap_growkernel_lock must
364 * be held to prevent kernel_lev1map changing across pmaps 364 * be held to prevent kernel_lev1map changing across pmaps
365 * being added to / removed from the global pmaps list. 365 * being added to / removed from the global pmaps list.
366 * 366 *
367 * Address space number management (global ASN counters and per-pmap 367 * Address space number management (global ASN counters and per-pmap
368 * ASN state) are not locked; they use arrays of values indexed 368 * ASN state) are not locked; they use arrays of values indexed
369 * per-processor. 369 * per-processor.
370 * 370 *
371 * All internal functions which operate on a pmap are called 371 * All internal functions which operate on a pmap are called
372 * with the pmap already locked by the caller (which will be 372 * with the pmap already locked by the caller (which will be
373 * an interface function). 373 * an interface function).
374 */ 374 */
375static krwlock_t pmap_main_lock __cacheline_aligned; 375static krwlock_t pmap_main_lock __cacheline_aligned;
376static kmutex_t pmap_all_pmaps_lock __cacheline_aligned; 376static kmutex_t pmap_all_pmaps_lock __cacheline_aligned;
377static krwlock_t pmap_growkernel_lock __cacheline_aligned; 377static krwlock_t pmap_growkernel_lock __cacheline_aligned;
378 378
379#define PMAP_MAP_TO_HEAD_LOCK() rw_enter(&pmap_main_lock, RW_READER) 379#define PMAP_MAP_TO_HEAD_LOCK() rw_enter(&pmap_main_lock, RW_READER)
380#define PMAP_MAP_TO_HEAD_UNLOCK() rw_exit(&pmap_main_lock) 380#define PMAP_MAP_TO_HEAD_UNLOCK() rw_exit(&pmap_main_lock)
381#define PMAP_HEAD_TO_MAP_LOCK() rw_enter(&pmap_main_lock, RW_WRITER) 381#define PMAP_HEAD_TO_MAP_LOCK() rw_enter(&pmap_main_lock, RW_WRITER)
382#define PMAP_HEAD_TO_MAP_UNLOCK() rw_exit(&pmap_main_lock) 382#define PMAP_HEAD_TO_MAP_UNLOCK() rw_exit(&pmap_main_lock)
383 383
384static union { 384static union {
385 kmutex_t lock; 385 kmutex_t lock;
386 uint8_t pad[COHERENCY_UNIT]; 386 uint8_t pad[COHERENCY_UNIT];
387} pmap_pvh_locks[64] __cacheline_aligned; 387} pmap_pvh_locks[64] __cacheline_aligned;
388 388
389#define PVH_LOCK_HASH(pg) \ 389#define PVH_LOCK_HASH(pg) \
390 ((((uintptr_t)(pg)) >> 6) & 63) 390 ((((uintptr_t)(pg)) >> 6) & 63)
391 391
392static inline kmutex_t * 392static inline kmutex_t *
393pmap_pvh_lock(struct vm_page *pg) 393pmap_pvh_lock(struct vm_page *pg)
394{ 394{
395 return &pmap_pvh_locks[PVH_LOCK_HASH(pg)].lock; 395 return &pmap_pvh_locks[PVH_LOCK_HASH(pg)].lock;
396} 396}
397 397
398static union { 398static union {
399 struct { 399 struct {
400 kmutex_t lock; 400 kmutex_t lock;
401 kmutex_t activation_lock; 401 kmutex_t activation_lock;
402 } locks; 402 } locks;
403 uint8_t pad[COHERENCY_UNIT]; 403 uint8_t pad[COHERENCY_UNIT];
404} pmap_pmap_locks[64] __cacheline_aligned; 404} pmap_pmap_locks[64] __cacheline_aligned;
405 405
406#define PMAP_LOCK_HASH(pm) \ 406#define PMAP_LOCK_HASH(pm) \
407 ((((uintptr_t)(pm)) >> 6) & 63) 407 ((((uintptr_t)(pm)) >> 6) & 63)
408 408
409static inline kmutex_t * 409static inline kmutex_t *
410pmap_pmap_lock(pmap_t const pmap) 410pmap_pmap_lock(pmap_t const pmap)
411{ 411{
412 return &pmap_pmap_locks[PMAP_LOCK_HASH(pmap)].locks.lock; 412 return &pmap_pmap_locks[PMAP_LOCK_HASH(pmap)].locks.lock;
413} 413}
414 414
415static inline kmutex_t * 415static inline kmutex_t *
416pmap_activation_lock(pmap_t const pmap) 416pmap_activation_lock(pmap_t const pmap)
417{ 417{
418 return &pmap_pmap_locks[PMAP_LOCK_HASH(pmap)].locks.activation_lock; 418 return &pmap_pmap_locks[PMAP_LOCK_HASH(pmap)].locks.activation_lock;
419} 419}
420 420
421#define PMAP_LOCK(pmap) mutex_enter(pmap_pmap_lock(pmap)) 421#define PMAP_LOCK(pmap) mutex_enter(pmap_pmap_lock(pmap))
422#define PMAP_UNLOCK(pmap) mutex_exit(pmap_pmap_lock(pmap)) 422#define PMAP_UNLOCK(pmap) mutex_exit(pmap_pmap_lock(pmap))
423 423
424#define PMAP_ACT_LOCK(pmap) mutex_spin_enter(pmap_activation_lock(pmap)) 424#define PMAP_ACT_LOCK(pmap) mutex_spin_enter(pmap_activation_lock(pmap))
425#define PMAP_ACT_TRYLOCK(pmap) mutex_tryenter(pmap_activation_lock(pmap)) 425#define PMAP_ACT_TRYLOCK(pmap) mutex_tryenter(pmap_activation_lock(pmap))
426#define PMAP_ACT_UNLOCK(pmap) mutex_spin_exit(pmap_activation_lock(pmap)) 426#define PMAP_ACT_UNLOCK(pmap) mutex_spin_exit(pmap_activation_lock(pmap))
427 427
428#if defined(MULTIPROCESSOR) 428#if defined(MULTIPROCESSOR)
429#define pmap_all_cpus() cpus_running 429#define pmap_all_cpus() cpus_running
430#else 430#else
431#define pmap_all_cpus() ~0UL 431#define pmap_all_cpus() ~0UL
432#endif /* MULTIPROCESSOR */ 432#endif /* MULTIPROCESSOR */
433 433
434/* 434/*
435 * TLB management. 435 * TLB management.
436 * 436 *
437 * TLB invalidations need to be performed on local and remote CPUs 437 * TLB invalidations need to be performed on local and remote CPUs
438 * whenever parts of the PTE that the hardware or PALcode understands 438 * whenever parts of the PTE that the hardware or PALcode understands
439 * changes. In order amortize the cost of these operations, we will 439 * changes. In order amortize the cost of these operations, we will
440 * queue up to 8 addresses to invalidate in a batch. Any more than 440 * queue up to 8 addresses to invalidate in a batch. Any more than
441 * that, and we will hit the entire TLB. 441 * that, and we will hit the entire TLB.
442 8 442 8
443 * Some things that add complexity: 443 * Some things that add complexity:
444 * 444 *
445 * ==> ASNs. A CPU may have valid TLB entries for other than the current 445 * ==> ASNs. A CPU may have valid TLB entries for other than the current
446 * address spaace. We can only invalidate TLB entries for the current 446 * address spaace. We can only invalidate TLB entries for the current
447 * address space, so when asked to invalidate a VA for the non-current 447 * address space, so when asked to invalidate a VA for the non-current
448 * pmap on a given CPU, we simply invalidate the ASN for that pmap,CPU 448 * pmap on a given CPU, we simply invalidate the ASN for that pmap,CPU
449 * tuple so that new one is allocated on the next activation on that 449 * tuple so that new one is allocated on the next activation on that
450 * CPU. N.B. that for CPUs that don't implement ASNs, SWPCTX does all 450 * CPU. N.B. that for CPUs that don't implement ASNs, SWPCTX does all
451 * the work necessary, so we can skip some work in the pmap module 451 * the work necessary, so we can skip some work in the pmap module
452 * itself. 452 * itself.
453 * 453 *
454 * When a pmap is activated on a given CPU, we set a corresponding 454 * When a pmap is activated on a given CPU, we set a corresponding
455 * bit in pmap::pm_cpus, indicating that it potentially has valid 455 * bit in pmap::pm_cpus, indicating that it potentially has valid
456 * TLB entries for that address space. This bitmap is then used to 456 * TLB entries for that address space. This bitmap is then used to
457 * determine which remote CPUs need to be notified of invalidations. 457 * determine which remote CPUs need to be notified of invalidations.
458 * The bit is cleared when the ASN is invalidated on that CPU. 458 * The bit is cleared when the ASN is invalidated on that CPU.
459 * 459 *
460 * In order to serialize with activating an address space on a 460 * In order to serialize with activating an address space on a
461 * given CPU (that we can reliably send notifications only to 461 * given CPU (that we can reliably send notifications only to
462 * relevant remote CPUs), we acquire the pmap lock in pmap_activate() 462 * relevant remote CPUs), we acquire the pmap lock in pmap_activate()
463 * and also hold the lock while remote shootdowns take place. 463 * and also hold the lock while remote shootdowns take place.
464 * This does not apply to the kernel pmap; all CPUs are notified about 464 * This does not apply to the kernel pmap; all CPUs are notified about
465 * invalidations for the kernel pmap, and the pmap lock is not held 465 * invalidations for the kernel pmap, and the pmap lock is not held
466 * in pmap_activate() for the kernel pmap. 466 * in pmap_activate() for the kernel pmap.
467 * 467 *
468 * ==> P->V operations (e.g. pmap_page_protect()) may require sending 468 * ==> P->V operations (e.g. pmap_page_protect()) may require sending
469 * invalidations for multiple address spaces. We only track one 469 * invalidations for multiple address spaces. We only track one
470 * address space at a time, and if we encounter more than one, then 470 * address space at a time, and if we encounter more than one, then
471 * the notification each CPU gets is to hit the entire TLB. Note 471 * the notification each CPU gets is to hit the entire TLB. Note
472 * also that we can't serialize with pmap_activate() in this case, 472 * also that we can't serialize with pmap_activate() in this case,
473 * so all CPUs will get the notification, and they check when 473 * so all CPUs will get the notification, and they check when
474 * processing the notification if the pmap is current on that CPU. 474 * processing the notification if the pmap is current on that CPU.
475 * 475 *
476 * Invalidation information is gathered into a pmap_tlb_context structure 476 * Invalidation information is gathered into a pmap_tlb_context structure
477 * that includes room for 8 VAs, the pmap the VAs belong to, a bitmap of 477 * that includes room for 8 VAs, the pmap the VAs belong to, a bitmap of
478 * CPUs to be notified, and a list for PT pages that are freed during 478 * CPUs to be notified, and a list for PT pages that are freed during
479 * removal off mappings. The number of valid addresses in the list as 479 * removal off mappings. The number of valid addresses in the list as
480 * well as flags are sqeezed into the lower bits of the first two VAs. 480 * well as flags are sqeezed into the lower bits of the first two VAs.
481 * Storage for this structure is allocated on the stack. We need to be 481 * Storage for this structure is allocated on the stack. We need to be
482 * careful to keep the size of this struture under control. 482 * careful to keep the size of this struture under control.
483 * 483 *
484 * When notifying remote CPUs, we acquire the tlb_lock (which also 484 * When notifying remote CPUs, we acquire the tlb_lock (which also
485 * blocks IPIs), record the pointer to our context structure, set a 485 * blocks IPIs), record the pointer to our context structure, set a
486 * global bitmap off CPUs to be notified, and then send the IPIs to 486 * global bitmap off CPUs to be notified, and then send the IPIs to
487 * each victim. While the other CPUs are in-flight, we then perform 487 * each victim. While the other CPUs are in-flight, we then perform
488 * any invalidations necessary on the local CPU. Once that is done, 488 * any invalidations necessary on the local CPU. Once that is done,
489 * we then wait the the global context pointer to be cleared, which 489 * we then wait the the global context pointer to be cleared, which
490 * will be done by the final remote CPU to complete their work. This 490 * will be done by the final remote CPU to complete their work. This
491 * method reduces cache line contention during pocessing. 491 * method reduces cache line contention during pocessing.
492 * 492 *
493 * When removing mappings in user pmaps, this implemention frees page 493 * When removing mappings in user pmaps, this implemention frees page
494 * table pages back to the VM system once they contain no valid mappings. 494 * table pages back to the VM system once they contain no valid mappings.
495 * As we do this, we must ensure to invalidate TLB entries that the 495 * As we do this, we must ensure to invalidate TLB entries that the
496 * CPU might hold for the respective recursive VPT mappings. This must 496 * CPU might hold for the respective recursive VPT mappings. This must
497 * be done whenever an L1 or L2 PTE is invalidated. Until these VPT 497 * be done whenever an L1 or L2 PTE is invalidated. Until these VPT
498 * translations are invalidated, the PT pages must not be reused. For 498 * translations are invalidated, the PT pages must not be reused. For
499 * this reason, we keep a list of freed PT pages in the context stucture 499 * this reason, we keep a list of freed PT pages in the context stucture
500 * and drain them off once all invalidations are complete. 500 * and drain them off once all invalidations are complete.
501 * 501 *
502 * NOTE: The value of TLB_CTX_MAXVA is tuned to accommodate the UBC 502 * NOTE: The value of TLB_CTX_MAXVA is tuned to accommodate the UBC
503 * window size (defined as 64KB on alpha in <machine/vmparam.h>). 503 * window size (defined as 64KB on alpha in <machine/vmparam.h>).
504 */ 504 */
505 505
506#define TLB_CTX_MAXVA 8 506#define TLB_CTX_MAXVA 8
507#define TLB_CTX_ALLVA PAGE_MASK 507#define TLB_CTX_ALLVA PAGE_MASK
508 508
509#define TLB_CTX_F_ASM __BIT(0) 509#define TLB_CTX_F_ASM __BIT(0)
510#define TLB_CTX_F_IMB __BIT(1) 510#define TLB_CTX_F_IMB __BIT(1)
511#define TLB_CTX_F_KIMB __BIT(2) 511#define TLB_CTX_F_KIMB __BIT(2)
512#define TLB_CTX_F_PV __BIT(3) 512#define TLB_CTX_F_PV __BIT(3)
513#define TLB_CTX_F_MULTI __BIT(4) 513#define TLB_CTX_F_MULTI __BIT(4)
514 514
515#define TLB_CTX_COUNT(ctx) ((ctx)->t_addrdata[0] & PAGE_MASK) 515#define TLB_CTX_COUNT(ctx) ((ctx)->t_addrdata[0] & PAGE_MASK)
516#define TLB_CTX_INC_COUNT(ctx) (ctx)->t_addrdata[0]++ 516#define TLB_CTX_INC_COUNT(ctx) (ctx)->t_addrdata[0]++
517#define TLB_CTX_SET_ALLVA(ctx) (ctx)->t_addrdata[0] |= TLB_CTX_ALLVA 517#define TLB_CTX_SET_ALLVA(ctx) (ctx)->t_addrdata[0] |= TLB_CTX_ALLVA
518 518
519#define TLB_CTX_FLAGS(ctx) ((ctx)->t_addrdata[1] & PAGE_MASK) 519#define TLB_CTX_FLAGS(ctx) ((ctx)->t_addrdata[1] & PAGE_MASK)
520#define TLB_CTX_SET_FLAG(ctx, f) (ctx)->t_addrdata[1] |= (f) 520#define TLB_CTX_SET_FLAG(ctx, f) (ctx)->t_addrdata[1] |= (f)
521 521
522#define TLB_CTX_VA(ctx, i) ((ctx)->t_addrdata[(i)] & ~PAGE_MASK) 522#define TLB_CTX_VA(ctx, i) ((ctx)->t_addrdata[(i)] & ~PAGE_MASK)
523#define TLB_CTX_SETVA(ctx, i, va) \ 523#define TLB_CTX_SETVA(ctx, i, va) \
524 (ctx)->t_addrdata[(i)] = (va) | ((ctx)->t_addrdata[(i)] & PAGE_MASK) 524 (ctx)->t_addrdata[(i)] = (va) | ((ctx)->t_addrdata[(i)] & PAGE_MASK)
525 525
526struct pmap_tlb_context { 526struct pmap_tlb_context {
527 uintptr_t t_addrdata[TLB_CTX_MAXVA]; 527 uintptr_t t_addrdata[TLB_CTX_MAXVA];
528 pmap_t t_pmap; 528 pmap_t t_pmap;
529 LIST_HEAD(, vm_page) t_freeptq; 529 LIST_HEAD(, vm_page) t_freeptq;
530}; 530};
531 531
532static struct { 532static struct {
533 kmutex_t lock; 533 kmutex_t lock;
534 struct evcnt events; 534 struct evcnt events;
535} tlb_shootdown __cacheline_aligned; 535} tlb_shootdown __cacheline_aligned;
536#define tlb_lock tlb_shootdown.lock 536#define tlb_lock tlb_shootdown.lock
537#define tlb_evcnt tlb_shootdown.events 537#define tlb_evcnt tlb_shootdown.events
538#if defined(MULTIPROCESSOR) 538#if defined(MULTIPROCESSOR)
539static const struct pmap_tlb_context *tlb_context __cacheline_aligned; 539static const struct pmap_tlb_context *tlb_context __cacheline_aligned;
540static unsigned long tlb_pending __cacheline_aligned; 540static unsigned long tlb_pending __cacheline_aligned;
541#endif /* MULTIPROCESSOR */ 541#endif /* MULTIPROCESSOR */
542 542
543#if defined(TLB_STATS) 543#if defined(TLB_STATS)
544#define TLB_COUNT_DECL(cnt) static struct evcnt tlb_stat_##cnt 544#define TLB_COUNT_DECL(cnt) static struct evcnt tlb_stat_##cnt
545#define TLB_COUNT(cnt) atomic_inc_64(&tlb_stat_##cnt .ev_count) 545#define TLB_COUNT(cnt) atomic_inc_64(&tlb_stat_##cnt .ev_count)
546#define TLB_COUNT_ATTACH(cnt) \ 546#define TLB_COUNT_ATTACH(cnt) \
547 evcnt_attach_dynamic_nozero(&tlb_stat_##cnt, EVCNT_TYPE_MISC, \ 547 evcnt_attach_dynamic_nozero(&tlb_stat_##cnt, EVCNT_TYPE_MISC, \
548 NULL, "TLB", #cnt) 548 NULL, "TLB", #cnt)
549 549
550TLB_COUNT_DECL(invalidate_multi_tbia); 550TLB_COUNT_DECL(invalidate_multi_tbia);
551TLB_COUNT_DECL(invalidate_multi_tbiap); 551TLB_COUNT_DECL(invalidate_multi_tbiap);
552TLB_COUNT_DECL(invalidate_multi_imb); 552TLB_COUNT_DECL(invalidate_multi_imb);
553 553
554TLB_COUNT_DECL(invalidate_kern_tbia); 554TLB_COUNT_DECL(invalidate_kern_tbia);
555TLB_COUNT_DECL(invalidate_kern_tbis); 555TLB_COUNT_DECL(invalidate_kern_tbis);
556TLB_COUNT_DECL(invalidate_kern_imb); 556TLB_COUNT_DECL(invalidate_kern_imb);
557 557
558TLB_COUNT_DECL(invalidate_user_not_current); 558TLB_COUNT_DECL(invalidate_user_not_current);
559TLB_COUNT_DECL(invalidate_user_lazy_imb); 559TLB_COUNT_DECL(invalidate_user_lazy_imb);
560TLB_COUNT_DECL(invalidate_user_tbiap); 560TLB_COUNT_DECL(invalidate_user_tbiap);
561TLB_COUNT_DECL(invalidate_user_tbis); 561TLB_COUNT_DECL(invalidate_user_tbis);
562 562
563TLB_COUNT_DECL(shootdown_kernel); 563TLB_COUNT_DECL(shootdown_kernel);
564TLB_COUNT_DECL(shootdown_user); 564TLB_COUNT_DECL(shootdown_user);
565TLB_COUNT_DECL(shootdown_imb); 565TLB_COUNT_DECL(shootdown_imb);
566TLB_COUNT_DECL(shootdown_kimb); 566TLB_COUNT_DECL(shootdown_kimb);
567TLB_COUNT_DECL(shootdown_overflow); 567TLB_COUNT_DECL(shootdown_overflow);
568 568
569TLB_COUNT_DECL(shootdown_all_user); 569TLB_COUNT_DECL(shootdown_all_user);
570TLB_COUNT_DECL(shootdown_all_user_imb); 570TLB_COUNT_DECL(shootdown_all_user_imb);
571 571
572TLB_COUNT_DECL(shootdown_pv); 572TLB_COUNT_DECL(shootdown_pv);
573TLB_COUNT_DECL(shootdown_pv_multi); 573TLB_COUNT_DECL(shootdown_pv_multi);
574 574
575TLB_COUNT_DECL(shootnow_over_notify); 575TLB_COUNT_DECL(shootnow_over_notify);
576TLB_COUNT_DECL(shootnow_remote); 576TLB_COUNT_DECL(shootnow_remote);
577 577
578TLB_COUNT_DECL(reason_remove_kernel); 578TLB_COUNT_DECL(reason_remove_kernel);
579TLB_COUNT_DECL(reason_remove_user); 579TLB_COUNT_DECL(reason_remove_user);
580TLB_COUNT_DECL(reason_page_protect_read); 580TLB_COUNT_DECL(reason_page_protect_read);
581TLB_COUNT_DECL(reason_page_protect_none); 581TLB_COUNT_DECL(reason_page_protect_none);
582TLB_COUNT_DECL(reason_protect); 582TLB_COUNT_DECL(reason_protect);
583TLB_COUNT_DECL(reason_enter_kernel); 583TLB_COUNT_DECL(reason_enter_kernel);
584TLB_COUNT_DECL(reason_enter_user); 584TLB_COUNT_DECL(reason_enter_user);
585TLB_COUNT_DECL(reason_kenter); 585TLB_COUNT_DECL(reason_kenter);
586TLB_COUNT_DECL(reason_enter_l2pt_delref); 586TLB_COUNT_DECL(reason_enter_l2pt_delref);
587TLB_COUNT_DECL(reason_enter_l3pt_delref); 587TLB_COUNT_DECL(reason_enter_l3pt_delref);
588TLB_COUNT_DECL(reason_kremove); 588TLB_COUNT_DECL(reason_kremove);
589TLB_COUNT_DECL(reason_clear_modify); 589TLB_COUNT_DECL(reason_clear_modify);
590TLB_COUNT_DECL(reason_clear_reference); 590TLB_COUNT_DECL(reason_clear_reference);
591TLB_COUNT_DECL(reason_emulate_reference); 591TLB_COUNT_DECL(reason_emulate_reference);
592 592
593TLB_COUNT_DECL(asn_reuse); 593TLB_COUNT_DECL(asn_reuse);
594TLB_COUNT_DECL(asn_newgen); 594TLB_COUNT_DECL(asn_newgen);
595TLB_COUNT_DECL(asn_assign); 595TLB_COUNT_DECL(asn_assign);
596 596
597TLB_COUNT_DECL(activate_both_change); 597TLB_COUNT_DECL(activate_both_change);
598TLB_COUNT_DECL(activate_asn_change); 598TLB_COUNT_DECL(activate_asn_change);
599TLB_COUNT_DECL(activate_ptbr_change); 599TLB_COUNT_DECL(activate_ptbr_change);
600TLB_COUNT_DECL(activate_swpctx); 600TLB_COUNT_DECL(activate_swpctx);
601TLB_COUNT_DECL(activate_skip_swpctx); 601TLB_COUNT_DECL(activate_skip_swpctx);
602 602
603#else /* ! TLB_STATS */ 603#else /* ! TLB_STATS */
604#define TLB_COUNT(cnt) __nothing 604#define TLB_COUNT(cnt) __nothing
605#define TLB_COUNT_ATTACH(cnt) __nothing 605#define TLB_COUNT_ATTACH(cnt) __nothing
606#endif /* TLB_STATS */ 606#endif /* TLB_STATS */
607 607
608static void 608static void
609pmap_tlb_init(void) 609pmap_tlb_init(void)
610{ 610{
611 /* mutex is initialized in pmap_bootstrap(). */ 611 /* mutex is initialized in pmap_bootstrap(). */
612 612
613 evcnt_attach_dynamic_nozero(&tlb_evcnt, EVCNT_TYPE_MISC, 613 evcnt_attach_dynamic_nozero(&tlb_evcnt, EVCNT_TYPE_MISC,
614 NULL, "TLB", "shootdown"); 614 NULL, "TLB", "shootdown");
615 615
616 TLB_COUNT_ATTACH(invalidate_multi_tbia); 616 TLB_COUNT_ATTACH(invalidate_multi_tbia);
617 TLB_COUNT_ATTACH(invalidate_multi_tbiap); 617 TLB_COUNT_ATTACH(invalidate_multi_tbiap);
618 TLB_COUNT_ATTACH(invalidate_multi_imb); 618 TLB_COUNT_ATTACH(invalidate_multi_imb);
619 619
620 TLB_COUNT_ATTACH(invalidate_kern_tbia); 620 TLB_COUNT_ATTACH(invalidate_kern_tbia);
621 TLB_COUNT_ATTACH(invalidate_kern_tbis); 621 TLB_COUNT_ATTACH(invalidate_kern_tbis);
622 TLB_COUNT_ATTACH(invalidate_kern_imb); 622 TLB_COUNT_ATTACH(invalidate_kern_imb);
623 623
624 TLB_COUNT_ATTACH(invalidate_user_not_current); 624 TLB_COUNT_ATTACH(invalidate_user_not_current);
625 TLB_COUNT_ATTACH(invalidate_user_lazy_imb); 625 TLB_COUNT_ATTACH(invalidate_user_lazy_imb);
626 TLB_COUNT_ATTACH(invalidate_user_tbiap); 626 TLB_COUNT_ATTACH(invalidate_user_tbiap);
627 TLB_COUNT_ATTACH(invalidate_user_tbis); 627 TLB_COUNT_ATTACH(invalidate_user_tbis);
628 628
629 TLB_COUNT_ATTACH(shootdown_kernel); 629 TLB_COUNT_ATTACH(shootdown_kernel);
630 TLB_COUNT_ATTACH(shootdown_user); 630 TLB_COUNT_ATTACH(shootdown_user);
631 TLB_COUNT_ATTACH(shootdown_imb); 631 TLB_COUNT_ATTACH(shootdown_imb);
632 TLB_COUNT_ATTACH(shootdown_kimb); 632 TLB_COUNT_ATTACH(shootdown_kimb);
633 TLB_COUNT_ATTACH(shootdown_overflow); 633 TLB_COUNT_ATTACH(shootdown_overflow);
634 634
635 TLB_COUNT_ATTACH(shootdown_all_user); 635 TLB_COUNT_ATTACH(shootdown_all_user);
636 TLB_COUNT_ATTACH(shootdown_all_user_imb); 636 TLB_COUNT_ATTACH(shootdown_all_user_imb);
637 637
638 TLB_COUNT_ATTACH(shootdown_pv); 638 TLB_COUNT_ATTACH(shootdown_pv);
639 TLB_COUNT_ATTACH(shootdown_pv_multi); 639 TLB_COUNT_ATTACH(shootdown_pv_multi);
640 640
641 TLB_COUNT_ATTACH(shootnow_over_notify); 641 TLB_COUNT_ATTACH(shootnow_over_notify);
642 TLB_COUNT_ATTACH(shootnow_remote); 642 TLB_COUNT_ATTACH(shootnow_remote);
643 643
644 TLB_COUNT_ATTACH(reason_remove_kernel); 644 TLB_COUNT_ATTACH(reason_remove_kernel);
645 TLB_COUNT_ATTACH(reason_remove_user); 645 TLB_COUNT_ATTACH(reason_remove_user);
646 TLB_COUNT_ATTACH(reason_page_protect_read); 646 TLB_COUNT_ATTACH(reason_page_protect_read);
647 TLB_COUNT_ATTACH(reason_page_protect_none); 647 TLB_COUNT_ATTACH(reason_page_protect_none);
648 TLB_COUNT_ATTACH(reason_protect); 648 TLB_COUNT_ATTACH(reason_protect);
649 TLB_COUNT_ATTACH(reason_enter_kernel); 649 TLB_COUNT_ATTACH(reason_enter_kernel);
650 TLB_COUNT_ATTACH(reason_enter_user); 650 TLB_COUNT_ATTACH(reason_enter_user);
651 TLB_COUNT_ATTACH(reason_kenter); 651 TLB_COUNT_ATTACH(reason_kenter);
652 TLB_COUNT_ATTACH(reason_enter_l2pt_delref); 652 TLB_COUNT_ATTACH(reason_enter_l2pt_delref);
653 TLB_COUNT_ATTACH(reason_enter_l3pt_delref); 653 TLB_COUNT_ATTACH(reason_enter_l3pt_delref);
654 TLB_COUNT_ATTACH(reason_kremove); 654 TLB_COUNT_ATTACH(reason_kremove);
655 TLB_COUNT_ATTACH(reason_clear_modify); 655 TLB_COUNT_ATTACH(reason_clear_modify);
656 TLB_COUNT_ATTACH(reason_clear_reference); 656 TLB_COUNT_ATTACH(reason_clear_reference);
657 657
658 TLB_COUNT_ATTACH(asn_reuse); 658 TLB_COUNT_ATTACH(asn_reuse);
659 TLB_COUNT_ATTACH(asn_newgen); 659 TLB_COUNT_ATTACH(asn_newgen);
660 TLB_COUNT_ATTACH(asn_assign); 660 TLB_COUNT_ATTACH(asn_assign);
661 661
662 TLB_COUNT_ATTACH(activate_both_change); 662 TLB_COUNT_ATTACH(activate_both_change);
663 TLB_COUNT_ATTACH(activate_asn_change); 663 TLB_COUNT_ATTACH(activate_asn_change);
664 TLB_COUNT_ATTACH(activate_ptbr_change); 664 TLB_COUNT_ATTACH(activate_ptbr_change);
665 TLB_COUNT_ATTACH(activate_swpctx); 665 TLB_COUNT_ATTACH(activate_swpctx);
666 TLB_COUNT_ATTACH(activate_skip_swpctx); 666 TLB_COUNT_ATTACH(activate_skip_swpctx);
667} 667}
668 668
669static inline void 669static inline void
670pmap_tlb_context_init(struct pmap_tlb_context * const tlbctx) 670pmap_tlb_context_init(struct pmap_tlb_context * const tlbctx)
671{ 671{
672 /* Initialize the minimum number of fields. */ 672 /* Initialize the minimum number of fields. */
673 tlbctx->t_addrdata[0] = 0; 673 tlbctx->t_addrdata[0] = 0;
674 tlbctx->t_addrdata[1] = 0; 674 tlbctx->t_addrdata[1] = 0;
675 tlbctx->t_pmap = NULL; 675 tlbctx->t_pmap = NULL;
676 LIST_INIT(&tlbctx->t_freeptq); 676 LIST_INIT(&tlbctx->t_freeptq);
677} 677}
678 678
679static void 679static void
680pmap_tlb_shootdown(pmap_t const pmap, vaddr_t const va, 680pmap_tlb_shootdown(pmap_t const pmap, vaddr_t const va,
681 pt_entry_t const pte_bits, struct pmap_tlb_context * const tlbctx) 681 pt_entry_t const pte_bits, struct pmap_tlb_context * const tlbctx)
682{ 682{
683 KASSERT(pmap != NULL); 683 KASSERT(pmap != NULL);
684 KASSERT((va & PAGE_MASK) == 0); 684 KASSERT((va & PAGE_MASK) == 0);
685 685
686 /* 686 /*
687 * Figure out who needs to hear about this, and the scope 687 * Figure out who needs to hear about this, and the scope
688 * of an all-entries invalidate. 688 * of an all-entries invalidate.
689 */ 689 */
690 if (pmap == pmap_kernel()) { 690 if (pmap == pmap_kernel()) {
691 TLB_COUNT(shootdown_kernel); 691 TLB_COUNT(shootdown_kernel);
692 KASSERT(pte_bits & PG_ASM); 692 KASSERT(pte_bits & PG_ASM);
693 TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_ASM); 693 TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_ASM);
694 694
695 /* Note if an I-stream sync is also needed. */ 695 /* Note if an I-stream sync is also needed. */
696 if (pte_bits & PG_EXEC) { 696 if (pte_bits & PG_EXEC) {
697 TLB_COUNT(shootdown_kimb); 697 TLB_COUNT(shootdown_kimb);
698 TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_KIMB); 698 TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_KIMB);
699 } 699 }
700 } else { 700 } else {
701 TLB_COUNT(shootdown_user); 701 TLB_COUNT(shootdown_user);
702 KASSERT((pte_bits & PG_ASM) == 0); 702 KASSERT((pte_bits & PG_ASM) == 0);
703 703
704 /* Note if an I-stream sync is also needed. */ 704 /* Note if an I-stream sync is also needed. */
705 if (pte_bits & PG_EXEC) { 705 if (pte_bits & PG_EXEC) {
706 TLB_COUNT(shootdown_imb); 706 TLB_COUNT(shootdown_imb);
707 TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_IMB); 707 TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_IMB);
708 } 708 }
709 } 709 }
710 710
711 KASSERT(tlbctx->t_pmap == NULL || tlbctx->t_pmap == pmap); 711 KASSERT(tlbctx->t_pmap == NULL || tlbctx->t_pmap == pmap);
712 tlbctx->t_pmap = pmap; 712 tlbctx->t_pmap = pmap;
713 713
714 /* 714 /*
715 * If we're already at the max, just tell each active CPU 715 * If we're already at the max, just tell each active CPU
716 * to nail everything. 716 * to nail everything.
717 */ 717 */
718 const uintptr_t count = TLB_CTX_COUNT(tlbctx); 718 const uintptr_t count = TLB_CTX_COUNT(tlbctx);
719 if (count > TLB_CTX_MAXVA) { 719 if (count > TLB_CTX_MAXVA) {
720 return; 720 return;
721 } 721 }
722 if (count == TLB_CTX_MAXVA) { 722 if (count == TLB_CTX_MAXVA) {
723 TLB_COUNT(shootdown_overflow); 723 TLB_COUNT(shootdown_overflow);
724 TLB_CTX_SET_ALLVA(tlbctx); 724 TLB_CTX_SET_ALLVA(tlbctx);
725 return; 725 return;
726 } 726 }
727 727
728 TLB_CTX_SETVA(tlbctx, count, va); 728 TLB_CTX_SETVA(tlbctx, count, va);
729 TLB_CTX_INC_COUNT(tlbctx); 729 TLB_CTX_INC_COUNT(tlbctx);
730} 730}
731 731
732static void 732static void
733pmap_tlb_shootdown_all_user(pmap_t const pmap, pt_entry_t const pte_bits, 733pmap_tlb_shootdown_all_user(pmap_t const pmap, pt_entry_t const pte_bits,
734 struct pmap_tlb_context * const tlbctx) 734 struct pmap_tlb_context * const tlbctx)
735{ 735{
736 KASSERT(pmap != pmap_kernel()); 736 KASSERT(pmap != pmap_kernel());
737 737
738 TLB_COUNT(shootdown_all_user); 738 TLB_COUNT(shootdown_all_user);
739 739
740 /* Note if an I-stream sync is also needed. */ 740 /* Note if an I-stream sync is also needed. */
741 if (pte_bits & PG_EXEC) { 741 if (pte_bits & PG_EXEC) {
742 TLB_COUNT(shootdown_all_user_imb); 742 TLB_COUNT(shootdown_all_user_imb);
743 TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_IMB); 743 TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_IMB);
744 } 744 }
745 745
746 TLB_CTX_SET_ALLVA(tlbctx); 746 TLB_CTX_SET_ALLVA(tlbctx);
747} 747}
748 748
749static void 749static void
750pmap_tlb_shootdown_pv(const pv_entry_t pv, pt_entry_t const pte_bits, 750pmap_tlb_shootdown_pv(const pv_entry_t pv, pt_entry_t const pte_bits,
751 struct pmap_tlb_context * const tlbctx) 751 struct pmap_tlb_context * const tlbctx)
752{ 752{
753 uintptr_t flags = TLB_CTX_F_PV; 753 uintptr_t flags = TLB_CTX_F_PV;
754 754
755 TLB_COUNT(shootdown_pv); 755 TLB_COUNT(shootdown_pv);
756 756
757 if (tlbctx->t_pmap == NULL || tlbctx->t_pmap == pv->pv_pmap) { 757 if (tlbctx->t_pmap == NULL || tlbctx->t_pmap == pv->pv_pmap) {
758 if (tlbctx->t_pmap == NULL) { 758 if (tlbctx->t_pmap == NULL) {
759 pmap_reference(pv->pv_pmap); 759 pmap_reference(pv->pv_pmap);
760 } 760 }
761 pmap_tlb_shootdown(pv->pv_pmap, pv->pv_va, pte_bits, tlbctx); 761 pmap_tlb_shootdown(pv->pv_pmap, pv->pv_va, pte_bits, tlbctx);
762 } else { 762 } else {
763 TLB_COUNT(shootdown_pv_multi); 763 TLB_COUNT(shootdown_pv_multi);
764 flags |= TLB_CTX_F_MULTI; 764 flags |= TLB_CTX_F_MULTI;
765 if (pv->pv_pmap == pmap_kernel()) { 765 if (pv->pv_pmap == pmap_kernel()) {
766 KASSERT(pte_bits & PG_ASM); 766 KASSERT(pte_bits & PG_ASM);
767 flags |= TLB_CTX_F_ASM; 767 flags |= TLB_CTX_F_ASM;
768 } else { 768 } else {
769 KASSERT((pte_bits & PG_ASM) == 0); 769 KASSERT((pte_bits & PG_ASM) == 0);
770 } 770 }
771 771
772 /* 772 /*
773 * No need to distinguish between kernel and user IMB 773 * No need to distinguish between kernel and user IMB
774 * here; see pmap_tlb_invalidate_multi(). 774 * here; see pmap_tlb_invalidate_multi().
775 */ 775 */
776 if (pte_bits & PG_EXEC) { 776 if (pte_bits & PG_EXEC) {
777 flags |= TLB_CTX_F_IMB; 777 flags |= TLB_CTX_F_IMB;
778 } 778 }
779 TLB_CTX_SET_ALLVA(tlbctx); 779 TLB_CTX_SET_ALLVA(tlbctx);
780 } 780 }
781 TLB_CTX_SET_FLAG(tlbctx, flags); 781 TLB_CTX_SET_FLAG(tlbctx, flags);
782} 782}
783 783
784static void 784static void
785pmap_tlb_invalidate_multi(const struct pmap_tlb_context * const tlbctx) 785pmap_tlb_invalidate_multi(const struct pmap_tlb_context * const tlbctx)
786{ 786{
787 if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_ASM) { 787 if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_ASM) {
788 TLB_COUNT(invalidate_multi_tbia); 788 TLB_COUNT(invalidate_multi_tbia);
789 ALPHA_TBIA(); 789 ALPHA_TBIA();
790 } else { 790 } else {
791 TLB_COUNT(invalidate_multi_tbiap); 791 TLB_COUNT(invalidate_multi_tbiap);
792 ALPHA_TBIAP(); 792 ALPHA_TBIAP();
793 } 793 }
794 if (TLB_CTX_FLAGS(tlbctx) & (TLB_CTX_F_IMB | TLB_CTX_F_KIMB)) { 794 if (TLB_CTX_FLAGS(tlbctx) & (TLB_CTX_F_IMB | TLB_CTX_F_KIMB)) {
795 TLB_COUNT(invalidate_multi_imb); 795 TLB_COUNT(invalidate_multi_imb);
796 alpha_pal_imb(); 796 alpha_pal_imb();
797 } 797 }
798} 798}
799 799
800static void 800static void
801pmap_tlb_invalidate_kernel(const struct pmap_tlb_context * const tlbctx) 801pmap_tlb_invalidate_kernel(const struct pmap_tlb_context * const tlbctx)
802{ 802{
803 const uintptr_t count = TLB_CTX_COUNT(tlbctx); 803 const uintptr_t count = TLB_CTX_COUNT(tlbctx);
804 804
805 if (count == TLB_CTX_ALLVA) { 805 if (count == TLB_CTX_ALLVA) {
806 TLB_COUNT(invalidate_kern_tbia); 806 TLB_COUNT(invalidate_kern_tbia);
807 ALPHA_TBIA(); 807 ALPHA_TBIA();
808 } else { 808 } else {
809 TLB_COUNT(invalidate_kern_tbis); 809 TLB_COUNT(invalidate_kern_tbis);
810 for (uintptr_t i = 0; i < count; i++) { 810 for (uintptr_t i = 0; i < count; i++) {
811 ALPHA_TBIS(TLB_CTX_VA(tlbctx, i)); 811 ALPHA_TBIS(TLB_CTX_VA(tlbctx, i));
812 } 812 }
813 } 813 }
814 if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_KIMB) { 814 if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_KIMB) {
815 TLB_COUNT(invalidate_kern_imb); 815 TLB_COUNT(invalidate_kern_imb);
816 alpha_pal_imb(); 816 alpha_pal_imb();
817 } 817 }
818} 818}
819 819
820static void 820static void
821pmap_tlb_invalidate(const struct pmap_tlb_context * const tlbctx, 821pmap_tlb_invalidate(const struct pmap_tlb_context * const tlbctx,
822 const struct cpu_info * const ci) 822 const struct cpu_info * const ci)
823{ 823{
824 const uintptr_t count = TLB_CTX_COUNT(tlbctx); 824 const uintptr_t count = TLB_CTX_COUNT(tlbctx);
825 825
826 if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_MULTI) { 826 if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_MULTI) {
827 pmap_tlb_invalidate_multi(tlbctx); 827 pmap_tlb_invalidate_multi(tlbctx);
828 return; 828 return;
829 } 829 }
830 830
831 if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_ASM) { 831 if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_ASM) {
832 pmap_tlb_invalidate_kernel(tlbctx); 832 pmap_tlb_invalidate_kernel(tlbctx);
833 return; 833 return;
834 } 834 }
835 835
836 KASSERT(kpreempt_disabled()); 836 KASSERT(kpreempt_disabled());
837 837
838 pmap_t const pmap = tlbctx->t_pmap; 838 pmap_t const pmap = tlbctx->t_pmap;
839 KASSERT(pmap != NULL); 839 KASSERT(pmap != NULL);
840 840
841 const u_long cpu_mask = 1UL << ci->ci_cpuid; 841 const u_long cpu_mask = 1UL << ci->ci_cpuid;
842 842
843 if (__predict_false(pmap != ci->ci_pmap)) { 843 if (__predict_false(pmap != ci->ci_pmap)) {
844 TLB_COUNT(invalidate_user_not_current); 844 TLB_COUNT(invalidate_user_not_current);
845 845
846 /* 846 /*
847 * For CPUs that don't implement ASNs, the SWPCTX call 847 * For CPUs that don't implement ASNs, the SWPCTX call
848 * does all of the TLB invalidation work for us. 848 * does all of the TLB invalidation work for us.
849 */ 849 */
850 if (__predict_false(pmap_max_asn == 0)) { 850 if (__predict_false(pmap_max_asn == 0)) {
851 return; 851 return;
852 } 852 }
853 853
854 /* 854 /*
855 * We cannot directly invalidate the TLB in this case, 855 * We cannot directly invalidate the TLB in this case,
856 * so force allocation of a new ASN when the pmap becomes 856 * so force allocation of a new ASN when the pmap becomes
857 * active again. 857 * active again.
858 */ 858 */
859 pmap->pm_asni[ci->ci_cpuid].pma_asngen = PMAP_ASNGEN_INVALID; 859 pmap->pm_asni[ci->ci_cpuid].pma_asngen = PMAP_ASNGEN_INVALID;
860 atomic_and_ulong(&pmap->pm_cpus, ~cpu_mask); 860 atomic_and_ulong(&pmap->pm_cpus, ~cpu_mask);
861 861
862 /* 862 /*
863 * This isn't strictly necessary; when we allocate a 863 * This isn't strictly necessary; when we allocate a
864 * new ASN, we're going to clear this bit and skip 864 * new ASN, we're going to clear this bit and skip
865 * syncing the I-stream. But we will keep this bit 865 * syncing the I-stream. But we will keep this bit
866 * of accounting for internal consistency. 866 * of accounting for internal consistency.
867 */ 867 */
868 if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_IMB) { 868 if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_IMB) {
869 atomic_or_ulong(&pmap->pm_needisync, cpu_mask); 869 atomic_or_ulong(&pmap->pm_needisync, cpu_mask);
870 } 870 }
871 return; 871 return;
872 } 872 }
873 873
874 if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_IMB) { 874 if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_IMB) {
875 TLB_COUNT(invalidate_user_lazy_imb); 875 TLB_COUNT(invalidate_user_lazy_imb);
876 atomic_or_ulong(&pmap->pm_needisync, cpu_mask); 876 atomic_or_ulong(&pmap->pm_needisync, cpu_mask);
877 } 877 }
878 878
879 if (count == TLB_CTX_ALLVA) { 879 if (count == TLB_CTX_ALLVA) {
880 /* 880 /*
881 * Another option here for CPUs that implement ASNs is 881 * Another option here for CPUs that implement ASNs is
882 * to allocate a new ASN and do a SWPCTX. That's almost 882 * to allocate a new ASN and do a SWPCTX. That's almost
883 * certainly faster than a TBIAP, but would require us 883 * certainly faster than a TBIAP, but would require us
884 * to synchronize against IPIs in pmap_activate(). 884 * to synchronize against IPIs in pmap_activate().
885 */ 885 */
886 TLB_COUNT(invalidate_user_tbiap); 886 TLB_COUNT(invalidate_user_tbiap);
887 KASSERT((TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_ASM) == 0); 887 KASSERT((TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_ASM) == 0);
888 ALPHA_TBIAP(); 888 ALPHA_TBIAP();
889 } else { 889 } else {
890 TLB_COUNT(invalidate_user_tbis); 890 TLB_COUNT(invalidate_user_tbis);
891 for (uintptr_t i = 0; i < count; i++) { 891 for (uintptr_t i = 0; i < count; i++) {
892 ALPHA_TBIS(TLB_CTX_VA(tlbctx, i)); 892 ALPHA_TBIS(TLB_CTX_VA(tlbctx, i));
893 } 893 }
894 } 894 }
895} 895}
896 896
897static void 897static void
898pmap_tlb_shootnow(const struct pmap_tlb_context * const tlbctx) 898pmap_tlb_shootnow(const struct pmap_tlb_context * const tlbctx)
899{ 899{
900 900
901 if (TLB_CTX_COUNT(tlbctx) == 0) { 901 if (TLB_CTX_COUNT(tlbctx) == 0) {
902 /* No work to do. */ 902 /* No work to do. */
903 return; 903 return;
904 } 904 }
905 905
906 /* 906 /*
907 * Acquire the shootdown mutex. This will also block IPL_VM 907 * Acquire the shootdown mutex. This will also block IPL_VM
908 * interrupts and disable preemption. It is critically important 908 * interrupts and disable preemption. It is critically important
909 * that IPIs not be blocked in this routine. 909 * that IPIs not be blocked in this routine.
910 */ 910 */
911 KASSERT((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) < ALPHA_PSL_IPL_CLOCK); 911 KASSERT((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) < ALPHA_PSL_IPL_CLOCK);
912 mutex_spin_enter(&tlb_lock); 912 mutex_spin_enter(&tlb_lock);
913 tlb_evcnt.ev_count++; 913 tlb_evcnt.ev_count++;
914 914
915 const struct cpu_info *ci = curcpu(); 915 const struct cpu_info *ci = curcpu();
916 const u_long this_cpu = 1UL << ci->ci_cpuid; 916 const u_long this_cpu = 1UL << ci->ci_cpuid;
917 u_long active_cpus; 917 u_long active_cpus;
918 bool activation_locked; 918 bool activation_locked;
919 919
920 /* 920 /*
921 * Figure out who to notify. If it's for the kernel or 921 * Figure out who to notify. If it's for the kernel or
922 * multiple aaddress spaces, we notify everybody. If 922 * multiple aaddress spaces, we notify everybody. If
923 * it's a single user pmap, then we try to acquire the 923 * it's a single user pmap, then we try to acquire the
924 * activation lock so we can get an accurate accounting 924 * activation lock so we can get an accurate accounting
925 * of who needs to be notified. If we can't acquire 925 * of who needs to be notified. If we can't acquire
926 * the activation lock, then just notify everyone and 926 * the activation lock, then just notify everyone and
927 * let them sort it out when they process the IPI. 927 * let them sort it out when they process the IPI.
928 */ 928 */
929 if (TLB_CTX_FLAGS(tlbctx) & (TLB_CTX_F_ASM | TLB_CTX_F_MULTI)) { 929 if (TLB_CTX_FLAGS(tlbctx) & (TLB_CTX_F_ASM | TLB_CTX_F_MULTI)) {
930 active_cpus = pmap_all_cpus(); 930 active_cpus = pmap_all_cpus();
931 activation_locked = false; 931 activation_locked = false;
932 } else { 932 } else {
933 KASSERT(tlbctx->t_pmap != NULL); 933 KASSERT(tlbctx->t_pmap != NULL);
934 activation_locked = PMAP_ACT_TRYLOCK(tlbctx->t_pmap); 934 activation_locked = PMAP_ACT_TRYLOCK(tlbctx->t_pmap);
935 if (__predict_true(activation_locked)) { 935 if (__predict_true(activation_locked)) {
936 active_cpus = tlbctx->t_pmap->pm_cpus; 936 active_cpus = tlbctx->t_pmap->pm_cpus;
937 } else { 937 } else {
938 TLB_COUNT(shootnow_over_notify); 938 TLB_COUNT(shootnow_over_notify);
939 active_cpus = pmap_all_cpus(); 939 active_cpus = pmap_all_cpus();
940 } 940 }
941 } 941 }
942 942
943#if defined(MULTIPROCESSOR) 943#if defined(MULTIPROCESSOR)
944 /* 944 /*
945 * If there are remote CPUs that need to do work, get them 945 * If there are remote CPUs that need to do work, get them
946 * started now. 946 * started now.
947 */ 947 */
948 const u_long remote_cpus = active_cpus & ~this_cpu; 948 const u_long remote_cpus = active_cpus & ~this_cpu;
949 KASSERT(tlb_context == NULL); 949 KASSERT(tlb_context == NULL);
950 if (remote_cpus) { 950 if (remote_cpus) {
951 TLB_COUNT(shootnow_remote); 951 TLB_COUNT(shootnow_remote);
952 tlb_context = tlbctx; 952 tlb_context = tlbctx;
953 tlb_pending = remote_cpus; 953 tlb_pending = remote_cpus;
954 alpha_multicast_ipi(remote_cpus, ALPHA_IPI_SHOOTDOWN); 954 alpha_multicast_ipi(remote_cpus, ALPHA_IPI_SHOOTDOWN);
955 } 955 }
956#endif /* MULTIPROCESSOR */ 956#endif /* MULTIPROCESSOR */
957 957
958 /* 958 /*
959 * Now that the remotes have been notified, release the 959 * Now that the remotes have been notified, release the
960 * activation lock. 960 * activation lock.
961 */ 961 */
962 if (activation_locked) { 962 if (activation_locked) {
963 KASSERT(tlbctx->t_pmap != NULL); 963 KASSERT(tlbctx->t_pmap != NULL);
964 PMAP_ACT_UNLOCK(tlbctx->t_pmap); 964 PMAP_ACT_UNLOCK(tlbctx->t_pmap);
965 } 965 }
966 966
967 /* 967 /*
968 * Do any work that we might need to do. We don't need to 968 * Do any work that we might need to do. We don't need to
969 * synchronize with activation here because we know that 969 * synchronize with activation here because we know that
970 * for the current CPU, activation status will not change. 970 * for the current CPU, activation status will not change.
971 */ 971 */
972 if (active_cpus & this_cpu) { 972 if (active_cpus & this_cpu) {
973 pmap_tlb_invalidate(tlbctx, ci); 973 pmap_tlb_invalidate(tlbctx, ci);
974 } 974 }
975 975
976#if defined(MULTIPROCESSOR) 976#if defined(MULTIPROCESSOR)
977 /* Wait for remote CPUs to finish. */ 977 /* Wait for remote CPUs to finish. */
978 if (remote_cpus) { 978 if (remote_cpus) {
979 int backoff = SPINLOCK_BACKOFF_MIN; 979 int backoff = SPINLOCK_BACKOFF_MIN;
980 u_int spins = 0; 980 u_int spins = 0;
981 981
982 while (atomic_load_relaxed(&tlb_context) != NULL) { 982 while (atomic_load_relaxed(&tlb_context) != NULL) {
983 SPINLOCK_BACKOFF(backoff); 983 SPINLOCK_BACKOFF(backoff);
984 if (spins++ > 0x0fffffff) { 984 if (spins++ > 0x0fffffff) {
985 printf("TLB LOCAL MASK = 0x%016lx\n", 985 printf("TLB LOCAL MASK = 0x%016lx\n",
986 this_cpu); 986 this_cpu);
987 printf("TLB REMOTE MASK = 0x%016lx\n", 987 printf("TLB REMOTE MASK = 0x%016lx\n",
988 remote_cpus); 988 remote_cpus);
989 printf("TLB REMOTE PENDING = 0x%016lx\n", 989 printf("TLB REMOTE PENDING = 0x%016lx\n",
990 tlb_pending); 990 tlb_pending);
991 printf("TLB CONTEXT = %p\n", tlb_context); 991 printf("TLB CONTEXT = %p\n", tlb_context);
992 printf("TLB LOCAL IPL = %lu\n", 992 printf("TLB LOCAL IPL = %lu\n",
993 alpha_pal_rdps() & ALPHA_PSL_IPL_MASK); 993 alpha_pal_rdps() & ALPHA_PSL_IPL_MASK);
994 panic("pmap_tlb_shootnow"); 994 panic("pmap_tlb_shootnow");
995 } 995 }
996 } 996 }
997 membar_consumer(); 997 membar_consumer();
998 } 998 }
999 KASSERT(tlb_context == NULL); 999 KASSERT(tlb_context == NULL);
1000#endif /* MULTIPROCESSOR */ 1000#endif /* MULTIPROCESSOR */
1001 1001
1002 mutex_spin_exit(&tlb_lock); 1002 mutex_spin_exit(&tlb_lock);
1003 1003
1004 if (__predict_false(TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_PV)) { 1004 if (__predict_false(TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_PV)) {
1005 /* 1005 /*
1006 * P->V TLB operations may operate on multiple pmaps. 1006 * P->V TLB operations may operate on multiple pmaps.
1007 * The shootdown takes a reference on the first pmap it 1007 * The shootdown takes a reference on the first pmap it
1008 * encounters, in order to prevent it from disappearing, 1008 * encounters, in order to prevent it from disappearing,
1009 * in the hope that we end up with a single-pmap P->V 1009 * in the hope that we end up with a single-pmap P->V
1010 * operation (instrumentation shows this is not rare). 1010 * operation (instrumentation shows this is not rare).
1011 * 1011 *
1012 * Once this shootdown is finished globally, we need to 1012 * Once this shootdown is finished globally, we need to
1013 * release this extra reference. 1013 * release this extra reference.
1014 */ 1014 */
1015 KASSERT(tlbctx->t_pmap != NULL); 1015 KASSERT(tlbctx->t_pmap != NULL);
1016 pmap_destroy(tlbctx->t_pmap); 1016 pmap_destroy(tlbctx->t_pmap);
1017 } 1017 }
1018} 1018}
1019 1019
1020#if defined(MULTIPROCESSOR) 1020#if defined(MULTIPROCESSOR)
1021void 1021void
1022pmap_tlb_shootdown_ipi(struct cpu_info * const ci, 1022pmap_tlb_shootdown_ipi(struct cpu_info * const ci,
1023 struct trapframe * const tf __unused) 1023 struct trapframe * const tf __unused)
1024{ 1024{
1025 KASSERT(tlb_context != NULL); 1025 KASSERT(tlb_context != NULL);
1026 pmap_tlb_invalidate(tlb_context, ci); 1026 pmap_tlb_invalidate(tlb_context, ci);
1027 if (atomic_and_ulong_nv(&tlb_pending, ~(1UL << ci->ci_cpuid)) == 0) { 1027 if (atomic_and_ulong_nv(&tlb_pending, ~(1UL << ci->ci_cpuid)) == 0) {
1028 membar_producer(); 1028 membar_producer();
1029 atomic_store_relaxed(&tlb_context, NULL); 1029 atomic_store_relaxed(&tlb_context, NULL);
1030 } 1030 }
1031} 1031}
1032#endif /* MULTIPROCESSOR */ 1032#endif /* MULTIPROCESSOR */
1033 1033
1034static void 1034static void
1035pmap_tlb_physpage_free(paddr_t const ptpa, 1035pmap_tlb_physpage_free(paddr_t const ptpa,
1036 struct pmap_tlb_context * const tlbctx) 1036 struct pmap_tlb_context * const tlbctx)
1037{ 1037{
1038 struct vm_page * const pg = PHYS_TO_VM_PAGE(ptpa); 1038 struct vm_page * const pg = PHYS_TO_VM_PAGE(ptpa);
1039 1039
1040 KASSERT(pg != NULL); 1040 KASSERT(pg != NULL);
1041 1041
1042#ifdef DEBUG 1042#ifdef DEBUG
1043 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 1043 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1044 KDASSERT(md->pvh_refcnt == 0); 1044 KDASSERT(md->pvh_refcnt == 0);
1045#endif 1045#endif
1046 1046
1047 LIST_INSERT_HEAD(&tlbctx->t_freeptq, pg, pageq.list); 1047 LIST_INSERT_HEAD(&tlbctx->t_freeptq, pg, pageq.list);
1048} 1048}
1049 1049
1050static void 1050static void
1051pmap_tlb_ptpage_drain(struct pmap_tlb_context * const tlbctx) 1051pmap_tlb_ptpage_drain(struct pmap_tlb_context * const tlbctx)
1052{ 1052{
1053 struct vm_page *pg; 1053 struct vm_page *pg;
1054 1054
1055 while ((pg = LIST_FIRST(&tlbctx->t_freeptq)) != NULL) { 1055 while ((pg = LIST_FIRST(&tlbctx->t_freeptq)) != NULL) {
1056 LIST_REMOVE(pg, pageq.list); 1056 LIST_REMOVE(pg, pageq.list);
1057 uvm_pagefree(pg); 1057 uvm_pagefree(pg);
1058 } 1058 }
1059} 1059}
1060 1060
1061/* 1061/*
1062 * Internal routines 1062 * Internal routines
1063 */ 1063 */
1064static void alpha_protection_init(void); 1064static void alpha_protection_init(void);
1065static pt_entry_t pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, bool, 1065static pt_entry_t pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, bool,
1066 pv_entry_t *, 1066 pv_entry_t *,
1067 struct pmap_tlb_context *); 1067 struct pmap_tlb_context *);
1068static void pmap_changebit(struct vm_page *, pt_entry_t, pt_entry_t, 1068static void pmap_changebit(struct vm_page *, pt_entry_t, pt_entry_t,
1069 struct pmap_tlb_context *); 1069 struct pmap_tlb_context *);
1070 1070
1071/* 1071/*
1072 * PT page management functions. 1072 * PT page management functions.
1073 */ 1073 */
1074static int pmap_ptpage_alloc(pt_entry_t *, int); 1074static int pmap_ptpage_alloc(pt_entry_t *, int);
1075static void pmap_ptpage_free(pt_entry_t *, struct pmap_tlb_context *); 1075static void pmap_ptpage_free(pt_entry_t *, struct pmap_tlb_context *);
1076static void pmap_l3pt_delref(pmap_t, vaddr_t, pt_entry_t *, 1076static void pmap_l3pt_delref(pmap_t, vaddr_t, pt_entry_t *,
1077 struct pmap_tlb_context *); 1077 struct pmap_tlb_context *);
1078static void pmap_l2pt_delref(pmap_t, pt_entry_t *, pt_entry_t *, 1078static void pmap_l2pt_delref(pmap_t, pt_entry_t *, pt_entry_t *,
1079 struct pmap_tlb_context *); 1079 struct pmap_tlb_context *);
1080static void pmap_l1pt_delref(pmap_t, pt_entry_t *); 1080static void pmap_l1pt_delref(pmap_t, pt_entry_t *);
1081 1081
1082static void *pmap_l1pt_alloc(struct pool *, int); 1082static void *pmap_l1pt_alloc(struct pool *, int);
1083static void pmap_l1pt_free(struct pool *, void *); 1083static void pmap_l1pt_free(struct pool *, void *);
1084 1084
1085static struct pool_allocator pmap_l1pt_allocator = { 1085static struct pool_allocator pmap_l1pt_allocator = {
1086 pmap_l1pt_alloc, pmap_l1pt_free, 0, 1086 pmap_l1pt_alloc, pmap_l1pt_free, 0,
1087}; 1087};
1088 1088
1089static int pmap_l1pt_ctor(void *, void *, int); 1089static int pmap_l1pt_ctor(void *, void *, int);
1090 1090
1091/* 1091/*
1092 * PV table management functions. 1092 * PV table management functions.
1093 */ 1093 */
1094static int pmap_pv_enter(pmap_t, struct vm_page *, vaddr_t, pt_entry_t *, 1094static int pmap_pv_enter(pmap_t, struct vm_page *, vaddr_t, pt_entry_t *,
1095 bool, pv_entry_t); 1095 bool, pv_entry_t);
1096static void pmap_pv_remove(pmap_t, struct vm_page *, vaddr_t, bool, 1096static void pmap_pv_remove(pmap_t, struct vm_page *, vaddr_t, bool,
1097 pv_entry_t *); 1097 pv_entry_t *);
1098static void *pmap_pv_page_alloc(struct pool *, int); 1098static void *pmap_pv_page_alloc(struct pool *, int);
1099static void pmap_pv_page_free(struct pool *, void *); 1099static void pmap_pv_page_free(struct pool *, void *);
1100 1100
1101static struct pool_allocator pmap_pv_page_allocator = { 1101static struct pool_allocator pmap_pv_page_allocator = {
1102 pmap_pv_page_alloc, pmap_pv_page_free, 0, 1102 pmap_pv_page_alloc, pmap_pv_page_free, 0,
1103}; 1103};
1104 1104
1105#ifdef DEBUG 1105#ifdef DEBUG
1106void pmap_pv_dump(paddr_t); 1106void pmap_pv_dump(paddr_t);
1107#endif 1107#endif
1108 1108
1109#define pmap_pv_alloc() pool_cache_get(&pmap_pv_cache, PR_NOWAIT) 1109#define pmap_pv_alloc() pool_cache_get(&pmap_pv_cache, PR_NOWAIT)
1110#define pmap_pv_free(pv) pool_cache_put(&pmap_pv_cache, (pv)) 1110#define pmap_pv_free(pv) pool_cache_put(&pmap_pv_cache, (pv))
1111 1111
1112/* 1112/*
1113 * ASN management functions. 1113 * ASN management functions.
1114 */ 1114 */
1115static u_int pmap_asn_alloc(pmap_t, struct cpu_info *); 1115static u_int pmap_asn_alloc(pmap_t, struct cpu_info *);
1116 1116
1117/* 1117/*
1118 * Misc. functions. 1118 * Misc. functions.
1119 */ 1119 */
1120static bool pmap_physpage_alloc(int, paddr_t *); 1120static bool pmap_physpage_alloc(int, paddr_t *);
1121static void pmap_physpage_free(paddr_t); 1121static void pmap_physpage_free(paddr_t);
1122static int pmap_physpage_addref(void *); 1122static int pmap_physpage_addref(void *);
1123static int pmap_physpage_delref(void *); 1123static int pmap_physpage_delref(void *);
1124 1124
1125static bool vtophys_internal(vaddr_t, paddr_t *p); 1125static bool vtophys_internal(vaddr_t, paddr_t *p);
1126 1126
1127/* 1127/*
1128 * PMAP_KERNEL_PTE: 1128 * PMAP_KERNEL_PTE:
1129 * 1129 *
1130 * Get a kernel PTE. 1130 * Get a kernel PTE.
1131 * 1131 *
1132 * If debugging, do a table walk. If not debugging, just use 1132 * If debugging, do a table walk. If not debugging, just use
1133 * the Virtual Page Table, since all kernel page tables are 1133 * the Virtual Page Table, since all kernel page tables are
1134 * pre-allocated and mapped in. 1134 * pre-allocated and mapped in.
1135 */ 1135 */
1136#ifdef DEBUG 1136#ifdef DEBUG
1137#define PMAP_KERNEL_PTE(va) \ 1137#define PMAP_KERNEL_PTE(va) \
1138({ \ 1138({ \
1139 pt_entry_t *l1pte_, *l2pte_; \ 1139 pt_entry_t *l1pte_, *l2pte_; \
1140 \ 1140 \
1141 l1pte_ = pmap_l1pte(pmap_kernel(), va); \ 1141 l1pte_ = pmap_l1pte(pmap_kernel(), va); \
1142 if (pmap_pte_v(l1pte_) == 0) { \ 1142 if (pmap_pte_v(l1pte_) == 0) { \
1143 printf("kernel level 1 PTE not valid, va 0x%lx " \ 1143 printf("kernel level 1 PTE not valid, va 0x%lx " \
1144 "(line %d)\n", (va), __LINE__); \ 1144 "(line %d)\n", (va), __LINE__); \
1145 panic("PMAP_KERNEL_PTE"); \ 1145 panic("PMAP_KERNEL_PTE"); \
1146 } \ 1146 } \
1147 l2pte_ = pmap_l2pte(pmap_kernel(), va, l1pte_); \ 1147 l2pte_ = pmap_l2pte(pmap_kernel(), va, l1pte_); \
1148 if (pmap_pte_v(l2pte_) == 0) { \ 1148 if (pmap_pte_v(l2pte_) == 0) { \
1149 printf("kernel level 2 PTE not valid, va 0x%lx " \ 1149 printf("kernel level 2 PTE not valid, va 0x%lx " \
1150 "(line %d)\n", (va), __LINE__); \ 1150 "(line %d)\n", (va), __LINE__); \
1151 panic("PMAP_KERNEL_PTE"); \ 1151 panic("PMAP_KERNEL_PTE"); \
1152 } \ 1152 } \
1153 pmap_l3pte(pmap_kernel(), va, l2pte_); \ 1153 pmap_l3pte(pmap_kernel(), va, l2pte_); \
1154}) 1154})
1155#else 1155#else
1156#define PMAP_KERNEL_PTE(va) (&VPT[VPT_INDEX((va))]) 1156#define PMAP_KERNEL_PTE(va) (&VPT[VPT_INDEX((va))])
1157#endif 1157#endif
1158 1158
1159/* 1159/*
1160 * PMAP_STAT_{INCR,DECR}: 1160 * PMAP_STAT_{INCR,DECR}:
1161 * 1161 *
1162 * Increment or decrement a pmap statistic. 1162 * Increment or decrement a pmap statistic.
1163 */ 1163 */
1164#define PMAP_STAT_INCR(s, v) atomic_add_long((unsigned long *)(&(s)), (v)) 1164#define PMAP_STAT_INCR(s, v) atomic_add_long((unsigned long *)(&(s)), (v))
1165#define PMAP_STAT_DECR(s, v) atomic_add_long((unsigned long *)(&(s)), -(v)) 1165#define PMAP_STAT_DECR(s, v) atomic_add_long((unsigned long *)(&(s)), -(v))
1166 1166
1167/* 1167/*
1168 * pmap_init_cpu: 1168 * pmap_init_cpu:
1169 * 1169 *
1170 * Initilize pmap data in the cpu_info. 1170 * Initilize pmap data in the cpu_info.
1171 */ 1171 */
1172void 1172void
1173pmap_init_cpu(struct cpu_info * const ci) 1173pmap_init_cpu(struct cpu_info * const ci)
1174{ 1174{
1175 pmap_t const pmap = pmap_kernel(); 1175 pmap_t const pmap = pmap_kernel();
1176 1176
1177 /* All CPUs start out using the kernel pmap. */ 1177 /* All CPUs start out using the kernel pmap. */
1178 atomic_or_ulong(&pmap->pm_cpus, 1UL << ci->ci_cpuid); 1178 atomic_or_ulong(&pmap->pm_cpus, 1UL << ci->ci_cpuid);
1179 pmap_reference(pmap); 1179 pmap_reference(pmap);
1180 ci->ci_pmap = pmap; 1180 ci->ci_pmap = pmap;
1181 1181
1182 /* Initialize ASN allocation logic. */ 1182 /* Initialize ASN allocation logic. */
1183 ci->ci_next_asn = PMAP_ASN_FIRST_USER; 1183 ci->ci_next_asn = PMAP_ASN_FIRST_USER;
1184 ci->ci_asn_gen = PMAP_ASNGEN_INITIAL; 1184 ci->ci_asn_gen = PMAP_ASNGEN_INITIAL;
1185} 1185}
1186 1186
1187/* 1187/*
1188 * pmap_bootstrap: 1188 * pmap_bootstrap:
1189 * 1189 *
1190 * Bootstrap the system to run with virtual memory. 1190 * Bootstrap the system to run with virtual memory.
1191 * 1191 *
1192 * Note: no locking is necessary in this function. 1192 * Note: no locking is necessary in this function.
1193 */ 1193 */
1194void 1194void
1195pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids) 1195pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids)
1196{ 1196{
1197 vsize_t lev2mapsize, lev3mapsize; 1197 vsize_t lev2mapsize, lev3mapsize;
1198 pt_entry_t *lev2map, *lev3map; 1198 pt_entry_t *lev2map, *lev3map;
1199 pt_entry_t pte; 1199 pt_entry_t pte;
1200 vsize_t bufsz; 1200 vsize_t bufsz;
1201 struct pcb *pcb; 1201 struct pcb *pcb;
1202 int i; 1202 int i;
1203 1203
1204#ifdef DEBUG 1204#ifdef DEBUG
1205 if (pmapdebug & (PDB_FOLLOW|PDB_BOOTSTRAP)) 1205 if (pmapdebug & (PDB_FOLLOW|PDB_BOOTSTRAP))
1206 printf("pmap_bootstrap(0x%lx, %u)\n", ptaddr, maxasn); 1206 printf("pmap_bootstrap(0x%lx, %u)\n", ptaddr, maxasn);
1207#endif 1207#endif
1208 1208
1209 /* 1209 /*
1210 * Compute the number of pages kmem_arena will have. 1210 * Compute the number of pages kmem_arena will have.
1211 */ 1211 */
1212 kmeminit_nkmempages(); 1212 kmeminit_nkmempages();
1213 1213
1214 /* 1214 /*
1215 * Figure out how many initial PTE's are necessary to map the 1215 * Figure out how many initial PTE's are necessary to map the
1216 * kernel. We also reserve space for kmem_alloc_pageable() 1216 * kernel. We also reserve space for kmem_alloc_pageable()
1217 * for vm_fork(). 1217 * for vm_fork().
1218 */ 1218 */
1219 1219
1220 /* Get size of buffer cache and set an upper limit */ 1220 /* Get size of buffer cache and set an upper limit */
1221 bufsz = buf_memcalc(); 1221 bufsz = buf_memcalc();
1222 buf_setvalimit(bufsz); 1222 buf_setvalimit(bufsz);
1223 1223
1224 lev3mapsize = 1224 lev3mapsize =
1225 (VM_PHYS_SIZE + (ubc_nwins << ubc_winshift) + 1225 (VM_PHYS_SIZE + (ubc_nwins << ubc_winshift) +
1226 bufsz + 16 * NCARGS + pager_map_size) / PAGE_SIZE + 1226 bufsz + 16 * NCARGS + pager_map_size) / PAGE_SIZE +
1227 (maxproc * UPAGES) + nkmempages; 1227 (maxproc * UPAGES) + nkmempages;
1228 1228
1229 lev3mapsize = roundup(lev3mapsize, NPTEPG); 1229 lev3mapsize = roundup(lev3mapsize, NPTEPG);
1230 1230
1231 /* 1231 /*
1232 * Initialize `FYI' variables. Note we're relying on 1232 * Initialize `FYI' variables. Note we're relying on
1233 * the fact that BSEARCH sorts the vm_physmem[] array 1233 * the fact that BSEARCH sorts the vm_physmem[] array
1234 * for us. 1234 * for us.
1235 */ 1235 */
1236 avail_start = ptoa(uvm_physseg_get_avail_start(uvm_physseg_get_first())); 1236 avail_start = ptoa(uvm_physseg_get_avail_start(uvm_physseg_get_first()));
1237 avail_end = ptoa(uvm_physseg_get_avail_end(uvm_physseg_get_last())); 1237 avail_end = ptoa(uvm_physseg_get_avail_end(uvm_physseg_get_last()));
1238 virtual_end = VM_MIN_KERNEL_ADDRESS + lev3mapsize * PAGE_SIZE; 1238 virtual_end = VM_MIN_KERNEL_ADDRESS + lev3mapsize * PAGE_SIZE;
1239 1239
1240#if 0 1240#if 0
1241 printf("avail_start = 0x%lx\n", avail_start); 1241 printf("avail_start = 0x%lx\n", avail_start);
1242 printf("avail_end = 0x%lx\n", avail_end); 1242 printf("avail_end = 0x%lx\n", avail_end);
1243 printf("virtual_end = 0x%lx\n", virtual_end); 1243 printf("virtual_end = 0x%lx\n", virtual_end);
1244#endif 1244#endif
1245 1245
1246 /* 1246 /*
1247 * Allocate a level 1 PTE table for the kernel. 1247 * Allocate a level 1 PTE table for the kernel.
1248 * This is always one page long. 1248 * This is always one page long.
1249 * IF THIS IS NOT A MULTIPLE OF PAGE_SIZE, ALL WILL GO TO HELL. 1249 * IF THIS IS NOT A MULTIPLE OF PAGE_SIZE, ALL WILL GO TO HELL.
1250 */ 1250 */
1251 kernel_lev1map = (pt_entry_t *) 1251 kernel_lev1map = (pt_entry_t *)
1252 uvm_pageboot_alloc(sizeof(pt_entry_t) * NPTEPG); 1252 uvm_pageboot_alloc(sizeof(pt_entry_t) * NPTEPG);
1253 1253
1254 /* 1254 /*
1255 * Allocate a level 2 PTE table for the kernel. 1255 * Allocate a level 2 PTE table for the kernel.
1256 * These must map all of the level3 PTEs. 1256 * These must map all of the level3 PTEs.
1257 * IF THIS IS NOT A MULTIPLE OF PAGE_SIZE, ALL WILL GO TO HELL. 1257 * IF THIS IS NOT A MULTIPLE OF PAGE_SIZE, ALL WILL GO TO HELL.
1258 */ 1258 */
1259 lev2mapsize = roundup(howmany(lev3mapsize, NPTEPG), NPTEPG); 1259 lev2mapsize = roundup(howmany(lev3mapsize, NPTEPG), NPTEPG);
1260 lev2map = (pt_entry_t *) 1260 lev2map = (pt_entry_t *)
1261 uvm_pageboot_alloc(sizeof(pt_entry_t) * lev2mapsize); 1261 uvm_pageboot_alloc(sizeof(pt_entry_t) * lev2mapsize);
1262 1262
1263 /* 1263 /*
1264 * Allocate a level 3 PTE table for the kernel. 1264 * Allocate a level 3 PTE table for the kernel.
1265 * Contains lev3mapsize PTEs. 1265 * Contains lev3mapsize PTEs.
1266 */ 1266 */
1267 lev3map = (pt_entry_t *) 1267 lev3map = (pt_entry_t *)
1268 uvm_pageboot_alloc(sizeof(pt_entry_t) * lev3mapsize); 1268 uvm_pageboot_alloc(sizeof(pt_entry_t) * lev3mapsize);
1269 1269
1270 /* 1270 /*
1271 * Set up level 1 page table 1271 * Set up level 1 page table
1272 */ 1272 */
1273 1273
1274 /* Map all of the level 2 pte pages */ 1274 /* Map all of the level 2 pte pages */
1275 for (i = 0; i < howmany(lev2mapsize, NPTEPG); i++) { 1275 for (i = 0; i < howmany(lev2mapsize, NPTEPG); i++) {
1276 pte = (ALPHA_K0SEG_TO_PHYS(((vaddr_t)lev2map) + 1276 pte = (ALPHA_K0SEG_TO_PHYS(((vaddr_t)lev2map) +
1277 (i*PAGE_SIZE)) >> PGSHIFT) << PG_SHIFT; 1277 (i*PAGE_SIZE)) >> PGSHIFT) << PG_SHIFT;
1278 pte |= PG_V | PG_ASM | PG_KRE | PG_KWE | PG_WIRED; 1278 pte |= PG_V | PG_ASM | PG_KRE | PG_KWE | PG_WIRED;
1279 kernel_lev1map[l1pte_index(VM_MIN_KERNEL_ADDRESS + 1279 kernel_lev1map[l1pte_index(VM_MIN_KERNEL_ADDRESS +
1280 (i*PAGE_SIZE*NPTEPG*NPTEPG))] = pte; 1280 (i*PAGE_SIZE*NPTEPG*NPTEPG))] = pte;
1281 } 1281 }
1282 1282
1283 /* Map the virtual page table */ 1283 /* Map the virtual page table */
1284 pte = (ALPHA_K0SEG_TO_PHYS((vaddr_t)kernel_lev1map) >> PGSHIFT) 1284 pte = (ALPHA_K0SEG_TO_PHYS((vaddr_t)kernel_lev1map) >> PGSHIFT)
1285 << PG_SHIFT; 1285 << PG_SHIFT;
1286 pte |= PG_V | PG_KRE | PG_KWE; /* NOTE NO ASM */ 1286 pte |= PG_V | PG_KRE | PG_KWE; /* NOTE NO ASM */
1287 kernel_lev1map[l1pte_index(VPTBASE)] = pte; 1287 kernel_lev1map[l1pte_index(VPTBASE)] = pte;
1288 VPT = (pt_entry_t *)VPTBASE; 1288 VPT = (pt_entry_t *)VPTBASE;
1289 1289
1290#ifdef _PMAP_MAY_USE_PROM_CONSOLE 
1291 { 
1292 extern pt_entry_t prom_pte; /* XXX */ 
1293 extern int prom_mapped; /* XXX */ 
1294 
1295 if (pmap_uses_prom_console()) { 
1296 /* 
1297 * XXX Save old PTE so we can remap the PROM, if 
1298 * XXX necessary. 
1299 */ 
1300 prom_pte = *(pt_entry_t *)ptaddr & ~PG_ASM; 
1301 } 
1302 prom_mapped = 0; 
1303 
1304 /* 
1305 * Actually, this code lies. The prom is still mapped, and will 
1306 * remain so until the context switch after alpha_init() returns. 
1307 */ 
1308 } 
1309#endif 
1310 
1311 /* 1290 /*
1312 * Set up level 2 page table. 1291 * Set up level 2 page table.
1313 */ 1292 */
1314 /* Map all of the level 3 pte pages */ 1293 /* Map all of the level 3 pte pages */
1315 for (i = 0; i < howmany(lev3mapsize, NPTEPG); i++) { 1294 for (i = 0; i < howmany(lev3mapsize, NPTEPG); i++) {
1316 pte = (ALPHA_K0SEG_TO_PHYS(((vaddr_t)lev3map) + 1295 pte = (ALPHA_K0SEG_TO_PHYS(((vaddr_t)lev3map) +
1317 (i*PAGE_SIZE)) >> PGSHIFT) << PG_SHIFT; 1296 (i*PAGE_SIZE)) >> PGSHIFT) << PG_SHIFT;
1318 pte |= PG_V | PG_ASM | PG_KRE | PG_KWE | PG_WIRED; 1297 pte |= PG_V | PG_ASM | PG_KRE | PG_KWE | PG_WIRED;
1319 lev2map[l2pte_index(VM_MIN_KERNEL_ADDRESS+ 1298 lev2map[l2pte_index(VM_MIN_KERNEL_ADDRESS+
1320 (i*PAGE_SIZE*NPTEPG))] = pte; 1299 (i*PAGE_SIZE*NPTEPG))] = pte;
1321 } 1300 }
1322 1301
1323 /* Initialize the pmap_growkernel_lock. */ 1302 /* Initialize the pmap_growkernel_lock. */
1324 rw_init(&pmap_growkernel_lock); 1303 rw_init(&pmap_growkernel_lock);
1325 1304
1326 /* 1305 /*
1327 * Set up level three page table (lev3map) 1306 * Set up level three page table (lev3map)
1328 */ 1307 */
1329 /* Nothing to do; it's already zero'd */ 1308 /* Nothing to do; it's already zero'd */
1330 1309
1331 /* 1310 /*
1332 * Initialize the pmap pools and list. 1311 * Initialize the pmap pools and list.
1333 */ 1312 */
1334 pmap_ncpuids = ncpuids; 1313 pmap_ncpuids = ncpuids;
1335 pool_cache_bootstrap(&pmap_pmap_cache, PMAP_SIZEOF(pmap_ncpuids), 1314 pool_cache_bootstrap(&pmap_pmap_cache, PMAP_SIZEOF(pmap_ncpuids),
1336 COHERENCY_UNIT, 0, 0, "pmap", NULL, IPL_NONE, NULL, NULL, NULL); 1315 COHERENCY_UNIT, 0, 0, "pmap", NULL, IPL_NONE, NULL, NULL, NULL);
1337 pool_cache_bootstrap(&pmap_l1pt_cache, PAGE_SIZE, 0, 0, 0, "pmapl1pt", 1316 pool_cache_bootstrap(&pmap_l1pt_cache, PAGE_SIZE, 0, 0, 0, "pmapl1pt",
1338 &pmap_l1pt_allocator, IPL_NONE, pmap_l1pt_ctor, NULL, NULL); 1317 &pmap_l1pt_allocator, IPL_NONE, pmap_l1pt_ctor, NULL, NULL);
1339 pool_cache_bootstrap(&pmap_pv_cache, sizeof(struct pv_entry), 0, 0, 1318 pool_cache_bootstrap(&pmap_pv_cache, sizeof(struct pv_entry), 0, 0,
1340 PR_LARGECACHE, "pmappv", &pmap_pv_page_allocator, IPL_NONE, NULL, 1319 PR_LARGECACHE, "pmappv", &pmap_pv_page_allocator, IPL_NONE, NULL,
1341 NULL, NULL); 1320 NULL, NULL);
1342 1321
1343 TAILQ_INIT(&pmap_all_pmaps); 1322 TAILQ_INIT(&pmap_all_pmaps);
1344 1323
1345 /* Initialize the ASN logic. See also pmap_init_cpu(). */ 1324 /* Initialize the ASN logic. See also pmap_init_cpu(). */
1346 pmap_max_asn = maxasn; 1325 pmap_max_asn = maxasn;
1347 1326
1348 /* 1327 /*
1349 * Initialize the locks. 1328 * Initialize the locks.
1350 */ 1329 */
1351 rw_init(&pmap_main_lock); 1330 rw_init(&pmap_main_lock);
1352 mutex_init(&pmap_all_pmaps_lock, MUTEX_DEFAULT, IPL_NONE); 1331 mutex_init(&pmap_all_pmaps_lock, MUTEX_DEFAULT, IPL_NONE);
1353 for (i = 0; i < __arraycount(pmap_pvh_locks); i++) { 1332 for (i = 0; i < __arraycount(pmap_pvh_locks); i++) {
1354 mutex_init(&pmap_pvh_locks[i].lock, MUTEX_DEFAULT, IPL_NONE); 1333 mutex_init(&pmap_pvh_locks[i].lock, MUTEX_DEFAULT, IPL_NONE);
1355 } 1334 }
1356 for (i = 0; i < __arraycount(pmap_pvh_locks); i++) { 1335 for (i = 0; i < __arraycount(pmap_pvh_locks); i++) {
1357 mutex_init(&pmap_pmap_locks[i].locks.lock, 1336 mutex_init(&pmap_pmap_locks[i].locks.lock,
1358 MUTEX_DEFAULT, IPL_NONE); 1337 MUTEX_DEFAULT, IPL_NONE);
1359 mutex_init(&pmap_pmap_locks[i].locks.activation_lock, 1338 mutex_init(&pmap_pmap_locks[i].locks.activation_lock,
1360 MUTEX_SPIN, IPL_SCHED); 1339 MUTEX_SPIN, IPL_SCHED);
1361 } 1340 }
1362  1341
1363 /* 1342 /*
1364 * This must block any interrupt from which a TLB shootdown 1343 * This must block any interrupt from which a TLB shootdown
1365 * could be issued, but must NOT block IPIs. 1344 * could be issued, but must NOT block IPIs.
1366 */ 1345 */
1367 mutex_init(&tlb_lock, MUTEX_SPIN, IPL_VM); 1346 mutex_init(&tlb_lock, MUTEX_SPIN, IPL_VM);
1368 1347
1369 /* 1348 /*
1370 * Initialize kernel pmap. Note that all kernel mappings 1349 * Initialize kernel pmap. Note that all kernel mappings
1371 * have PG_ASM set, so the ASN doesn't really matter for 1350 * have PG_ASM set, so the ASN doesn't really matter for
1372 * the kernel pmap. Also, since the kernel pmap always 1351 * the kernel pmap. Also, since the kernel pmap always
1373 * references kernel_lev1map, it always has an invalid ASN 1352 * references kernel_lev1map, it always has an invalid ASN
1374 * generation. 1353 * generation.
1375 */ 1354 */
1376 memset(pmap_kernel(), 0, sizeof(struct pmap)); 1355 memset(pmap_kernel(), 0, sizeof(struct pmap));
1377 pmap_kernel()->pm_lev1map = kernel_lev1map; 1356 pmap_kernel()->pm_lev1map = kernel_lev1map;
1378 pmap_kernel()->pm_count = 1; 1357 pmap_kernel()->pm_count = 1;
1379 /* Kernel pmap does not have ASN info. */ 1358 /* Kernel pmap does not have ASN info. */
1380 TAILQ_INSERT_TAIL(&pmap_all_pmaps, pmap_kernel(), pm_list); 1359 TAILQ_INSERT_TAIL(&pmap_all_pmaps, pmap_kernel(), pm_list);
1381 1360
1382 /* 1361 /*
1383 * Set up lwp0's PCB such that the ptbr points to the right place 1362 * Set up lwp0's PCB such that the ptbr points to the right place
1384 * and has the kernel pmap's (really unused) ASN. 1363 * and has the kernel pmap's (really unused) ASN.
1385 */ 1364 */
1386 pcb = lwp_getpcb(&lwp0); 1365 pcb = lwp_getpcb(&lwp0);
1387 pcb->pcb_hw.apcb_ptbr = 1366 pcb->pcb_hw.apcb_ptbr =
1388 ALPHA_K0SEG_TO_PHYS((vaddr_t)kernel_lev1map) >> PGSHIFT; 1367 ALPHA_K0SEG_TO_PHYS((vaddr_t)kernel_lev1map) >> PGSHIFT;
1389 pcb->pcb_hw.apcb_asn = PMAP_ASN_KERNEL; 1368 pcb->pcb_hw.apcb_asn = PMAP_ASN_KERNEL;
1390 1369
1391 struct cpu_info * const ci = curcpu(); 1370 struct cpu_info * const ci = curcpu();
1392 pmap_init_cpu(ci); 1371 pmap_init_cpu(ci);
1393} 1372}
1394 1373
1395#ifdef _PMAP_MAY_USE_PROM_CONSOLE 
1396int 
1397pmap_uses_prom_console(void) 
1398{ 
1399 
1400 return (cputype == ST_DEC_21000); 
1401} 
1402#endif /* _PMAP_MAY_USE_PROM_CONSOLE */ 
1403 
1404/* 1374/*
1405 * pmap_virtual_space: [ INTERFACE ] 1375 * pmap_virtual_space: [ INTERFACE ]
1406 * 1376 *
1407 * Define the initial bounds of the kernel virtual address space. 1377 * Define the initial bounds of the kernel virtual address space.
1408 */ 1378 */
1409void 1379void
1410pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp) 1380pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
1411{ 1381{
1412 1382
1413 *vstartp = VM_MIN_KERNEL_ADDRESS; /* kernel is in K0SEG */ 1383 *vstartp = VM_MIN_KERNEL_ADDRESS; /* kernel is in K0SEG */
1414 *vendp = VM_MAX_KERNEL_ADDRESS; /* we use pmap_growkernel */ 1384 *vendp = VM_MAX_KERNEL_ADDRESS; /* we use pmap_growkernel */
1415} 1385}
1416 1386
1417/* 1387/*
1418 * pmap_steal_memory: [ INTERFACE ] 1388 * pmap_steal_memory: [ INTERFACE ]
1419 * 1389 *
1420 * Bootstrap memory allocator (alternative to vm_bootstrap_steal_memory()). 1390 * Bootstrap memory allocator (alternative to vm_bootstrap_steal_memory()).
1421 * This function allows for early dynamic memory allocation until the 1391 * This function allows for early dynamic memory allocation until the
1422 * virtual memory system has been bootstrapped. After that point, either 1392 * virtual memory system has been bootstrapped. After that point, either
1423 * kmem_alloc or malloc should be used. This function works by stealing 1393 * kmem_alloc or malloc should be used. This function works by stealing
1424 * pages from the (to be) managed page pool, then implicitly mapping the 1394 * pages from the (to be) managed page pool, then implicitly mapping the
1425 * pages (by using their k0seg addresses) and zeroing them. 1395 * pages (by using their k0seg addresses) and zeroing them.
1426 * 1396 *
1427 * It may be used once the physical memory segments have been pre-loaded 1397 * It may be used once the physical memory segments have been pre-loaded
1428 * into the vm_physmem[] array. Early memory allocation MUST use this 1398 * into the vm_physmem[] array. Early memory allocation MUST use this
1429 * interface! This cannot be used after vm_page_startup(), and will 1399 * interface! This cannot be used after vm_page_startup(), and will
1430 * generate a panic if tried. 1400 * generate a panic if tried.
1431 * 1401 *
1432 * Note that this memory will never be freed, and in essence it is wired 1402 * Note that this memory will never be freed, and in essence it is wired
1433 * down. 1403 * down.
1434 * 1404 *
1435 * We must adjust *vstartp and/or *vendp iff we use address space 1405 * We must adjust *vstartp and/or *vendp iff we use address space
1436 * from the kernel virtual address range defined by pmap_virtual_space(). 1406 * from the kernel virtual address range defined by pmap_virtual_space().
1437 * 1407 *
1438 * Note: no locking is necessary in this function. 1408 * Note: no locking is necessary in this function.
1439 */ 1409 */
1440vaddr_t 1410vaddr_t
1441pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp) 1411pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
1442{ 1412{
1443 int npgs; 1413 int npgs;
1444 vaddr_t va; 1414 vaddr_t va;
1445 paddr_t pa; 1415 paddr_t pa;
1446 1416
1447 uvm_physseg_t bank; 1417 uvm_physseg_t bank;
1448 1418
1449 size = round_page(size); 1419 size = round_page(size);
1450 npgs = atop(size); 1420 npgs = atop(size);
1451 1421
1452#if 0 1422#if 0
1453 printf("PSM: size 0x%lx (npgs 0x%x)\n", size, npgs); 1423 printf("PSM: size 0x%lx (npgs 0x%x)\n", size, npgs);
1454#endif 1424#endif
1455 1425
1456 for (bank = uvm_physseg_get_first(); 1426 for (bank = uvm_physseg_get_first();
1457 uvm_physseg_valid_p(bank); 1427 uvm_physseg_valid_p(bank);
1458 bank = uvm_physseg_get_next(bank)) { 1428 bank = uvm_physseg_get_next(bank)) {
1459 if (uvm.page_init_done == true) 1429 if (uvm.page_init_done == true)
1460 panic("pmap_steal_memory: called _after_ bootstrap"); 1430 panic("pmap_steal_memory: called _after_ bootstrap");
1461 1431
1462#if 0 1432#if 0
1463 printf(" bank %d: avail_start 0x%"PRIxPADDR", start 0x%"PRIxPADDR", " 1433 printf(" bank %d: avail_start 0x%"PRIxPADDR", start 0x%"PRIxPADDR", "
1464 "avail_end 0x%"PRIxPADDR"\n", bank, uvm_physseg_get_avail_start(bank), 1434 "avail_end 0x%"PRIxPADDR"\n", bank, uvm_physseg_get_avail_start(bank),
1465 uvm_physseg_get_start(bank), uvm_physseg_get_avail_end(bank)); 1435 uvm_physseg_get_start(bank), uvm_physseg_get_avail_end(bank));
1466#endif 1436#endif
1467 1437
1468 if (uvm_physseg_get_avail_start(bank) != uvm_physseg_get_start(bank) || 1438 if (uvm_physseg_get_avail_start(bank) != uvm_physseg_get_start(bank) ||
1469 uvm_physseg_get_avail_start(bank) >= uvm_physseg_get_avail_end(bank)) 1439 uvm_physseg_get_avail_start(bank) >= uvm_physseg_get_avail_end(bank))
1470 continue; 1440 continue;
1471 1441
1472#if 0 1442#if 0
1473 printf(" avail_end - avail_start = 0x%"PRIxPADDR"\n", 1443 printf(" avail_end - avail_start = 0x%"PRIxPADDR"\n",
1474 uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank)); 1444 uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank));
1475#endif 1445#endif
1476 1446
1477 if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank) 1447 if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank)
1478 < npgs) 1448 < npgs)
1479 continue; 1449 continue;
1480 1450
1481 /* 1451 /*
1482 * There are enough pages here; steal them! 1452 * There are enough pages here; steal them!
1483 */ 1453 */
1484 pa = ptoa(uvm_physseg_get_start(bank)); 1454 pa = ptoa(uvm_physseg_get_start(bank));
1485 uvm_physseg_unplug(atop(pa), npgs); 1455 uvm_physseg_unplug(atop(pa), npgs);
1486 1456
1487 va = ALPHA_PHYS_TO_K0SEG(pa); 1457 va = ALPHA_PHYS_TO_K0SEG(pa);
1488 memset((void *)va, 0, size); 1458 memset((void *)va, 0, size);
1489 pmap_pages_stolen += npgs; 1459 pmap_pages_stolen += npgs;
1490 return (va); 1460 return (va);
1491 } 1461 }
1492 1462
1493 /* 1463 /*
1494 * If we got here, this was no memory left. 1464 * If we got here, this was no memory left.
1495 */ 1465 */
1496 panic("pmap_steal_memory: no memory to steal"); 1466 panic("pmap_steal_memory: no memory to steal");
1497} 1467}
1498 1468
1499/* 1469/*
1500 * pmap_init: [ INTERFACE ] 1470 * pmap_init: [ INTERFACE ]
1501 * 1471 *
1502 * Initialize the pmap module. Called by vm_init(), to initialize any 1472 * Initialize the pmap module. Called by vm_init(), to initialize any
1503 * structures that the pmap system needs to map virtual memory. 1473 * structures that the pmap system needs to map virtual memory.
1504 * 1474 *
1505 * Note: no locking is necessary in this function. 1475 * Note: no locking is necessary in this function.
1506 */ 1476 */
1507void 1477void
1508pmap_init(void) 1478pmap_init(void)
1509{ 1479{
1510 1480
1511#ifdef DEBUG 1481#ifdef DEBUG
1512 if (pmapdebug & PDB_FOLLOW) 1482 if (pmapdebug & PDB_FOLLOW)
1513 printf("pmap_init()\n"); 1483 printf("pmap_init()\n");
1514#endif 1484#endif
1515 1485
1516 /* initialize protection array */ 1486 /* initialize protection array */
1517 alpha_protection_init(); 1487 alpha_protection_init();
1518 1488
1519 /* Initialize TLB handling. */ 1489 /* Initialize TLB handling. */
1520 pmap_tlb_init(); 1490 pmap_tlb_init();
1521 1491
1522 /* 1492 /*
1523 * Set a low water mark on the pv_entry pool, so that we are 1493 * Set a low water mark on the pv_entry pool, so that we are
1524 * more likely to have these around even in extreme memory 1494 * more likely to have these around even in extreme memory
1525 * starvation. 1495 * starvation.
1526 */ 1496 */
1527 pool_cache_setlowat(&pmap_pv_cache, pmap_pv_lowat); 1497 pool_cache_setlowat(&pmap_pv_cache, pmap_pv_lowat);
1528 1498
1529 /* 1499 /*
1530 * Now it is safe to enable pv entry recording. 1500 * Now it is safe to enable pv entry recording.
1531 */ 1501 */
1532 pmap_initialized = true; 1502 pmap_initialized = true;
1533 1503
1534#if 0 1504#if 0
1535 for (uvm_physseg_t bank = uvm_physseg_get_first(); 1505 for (uvm_physseg_t bank = uvm_physseg_get_first();
1536 uvm_physseg_valid_p(bank); 1506 uvm_physseg_valid_p(bank);
1537 bank = uvm_physseg_get_next(bank)) { 1507 bank = uvm_physseg_get_next(bank)) {
1538 printf("bank %d\n", bank); 1508 printf("bank %d\n", bank);
1539 printf("\tstart = 0x%lx\n", ptoa(uvm_physseg_get_start(bank))); 1509 printf("\tstart = 0x%lx\n", ptoa(uvm_physseg_get_start(bank)));
1540 printf("\tend = 0x%lx\n", ptoa(uvm_physseg_get_end(bank))); 1510 printf("\tend = 0x%lx\n", ptoa(uvm_physseg_get_end(bank)));
1541 printf("\tavail_start = 0x%lx\n", 1511 printf("\tavail_start = 0x%lx\n",
1542 ptoa(uvm_physseg_get_avail_start(bank))); 1512 ptoa(uvm_physseg_get_avail_start(bank)));
1543 printf("\tavail_end = 0x%lx\n", 1513 printf("\tavail_end = 0x%lx\n",
1544 ptoa(uvm_physseg_get_avail_end(bank))); 1514 ptoa(uvm_physseg_get_avail_end(bank)));
1545 } 1515 }
1546#endif 1516#endif
1547} 1517}
1548 1518
1549/* 1519/*
1550 * pmap_create: [ INTERFACE ] 1520 * pmap_create: [ INTERFACE ]
1551 * 1521 *
1552 * Create and return a physical map. 1522 * Create and return a physical map.
1553 * 1523 *
1554 * Note: no locking is necessary in this function. 1524 * Note: no locking is necessary in this function.
1555 */ 1525 */
1556pmap_t 1526pmap_t
1557pmap_create(void) 1527pmap_create(void)
1558{ 1528{
1559 pmap_t pmap; 1529 pmap_t pmap;
1560 int i; 1530 int i;
1561 1531
1562#ifdef DEBUG 1532#ifdef DEBUG
1563 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) 1533 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
1564 printf("pmap_create()\n"); 1534 printf("pmap_create()\n");
1565#endif 1535#endif
1566 1536
1567 pmap = pool_cache_get(&pmap_pmap_cache, PR_WAITOK); 1537 pmap = pool_cache_get(&pmap_pmap_cache, PR_WAITOK);
1568 memset(pmap, 0, sizeof(*pmap)); 1538 memset(pmap, 0, sizeof(*pmap));
1569 1539
1570 pmap->pm_count = 1; 1540 pmap->pm_count = 1;
1571 1541
1572 /* 1542 /*
1573 * There are only kernel mappings at this point; give the pmap 1543 * There are only kernel mappings at this point; give the pmap
1574 * the kernel ASN. This will be initialized to correct values 1544 * the kernel ASN. This will be initialized to correct values
1575 * when the pmap is activated. 1545 * when the pmap is activated.
1576 */ 1546 */
1577 for (i = 0; i < pmap_ncpuids; i++) { 1547 for (i = 0; i < pmap_ncpuids; i++) {
1578 pmap->pm_asni[i].pma_asn = PMAP_ASN_KERNEL; 1548 pmap->pm_asni[i].pma_asn = PMAP_ASN_KERNEL;
1579 pmap->pm_asni[i].pma_asngen = PMAP_ASNGEN_INVALID; 1549 pmap->pm_asni[i].pma_asngen = PMAP_ASNGEN_INVALID;
1580 } 1550 }
1581 1551
1582 try_again: 1552 try_again:
1583 rw_enter(&pmap_growkernel_lock, RW_READER); 1553 rw_enter(&pmap_growkernel_lock, RW_READER);
1584 1554
1585 pmap->pm_lev1map = pool_cache_get(&pmap_l1pt_cache, PR_NOWAIT); 1555 pmap->pm_lev1map = pool_cache_get(&pmap_l1pt_cache, PR_NOWAIT);
1586 if (__predict_false(pmap->pm_lev1map == NULL)) { 1556 if (__predict_false(pmap->pm_lev1map == NULL)) {
1587 rw_exit(&pmap_growkernel_lock); 1557 rw_exit(&pmap_growkernel_lock);
1588 (void) kpause("pmap_create", false, hz >> 2, NULL); 1558 (void) kpause("pmap_create", false, hz >> 2, NULL);
1589 goto try_again; 1559 goto try_again;
1590 } 1560 }
1591 1561
1592 mutex_enter(&pmap_all_pmaps_lock); 1562 mutex_enter(&pmap_all_pmaps_lock);
1593 TAILQ_INSERT_TAIL(&pmap_all_pmaps, pmap, pm_list); 1563 TAILQ_INSERT_TAIL(&pmap_all_pmaps, pmap, pm_list);
1594 mutex_exit(&pmap_all_pmaps_lock); 1564 mutex_exit(&pmap_all_pmaps_lock);
1595 1565
1596 rw_exit(&pmap_growkernel_lock); 1566 rw_exit(&pmap_growkernel_lock);
1597 1567
1598 return (pmap); 1568 return (pmap);
1599} 1569}
1600 1570
1601/* 1571/*
1602 * pmap_destroy: [ INTERFACE ] 1572 * pmap_destroy: [ INTERFACE ]
1603 * 1573 *
1604 * Drop the reference count on the specified pmap, releasing 1574 * Drop the reference count on the specified pmap, releasing
1605 * all resources if the reference count drops to zero. 1575 * all resources if the reference count drops to zero.
1606 */ 1576 */
1607void 1577void
1608pmap_destroy(pmap_t pmap) 1578pmap_destroy(pmap_t pmap)
1609{ 1579{
1610 1580
1611#ifdef DEBUG 1581#ifdef DEBUG
1612 if (pmapdebug & PDB_FOLLOW) 1582 if (pmapdebug & PDB_FOLLOW)
1613 printf("pmap_destroy(%p)\n", pmap); 1583 printf("pmap_destroy(%p)\n", pmap);
1614#endif 1584#endif
1615 1585
1616 PMAP_MP(membar_exit()); 1586 PMAP_MP(membar_exit());
1617 if (atomic_dec_ulong_nv(&pmap->pm_count) > 0) 1587 if (atomic_dec_ulong_nv(&pmap->pm_count) > 0)
1618 return; 1588 return;
1619 1589
1620 rw_enter(&pmap_growkernel_lock, RW_READER); 1590 rw_enter(&pmap_growkernel_lock, RW_READER);
1621 1591
1622 /* 1592 /*
1623 * Remove it from the global list of all pmaps. 1593 * Remove it from the global list of all pmaps.
1624 */ 1594 */
1625 mutex_enter(&pmap_all_pmaps_lock); 1595 mutex_enter(&pmap_all_pmaps_lock);
1626 TAILQ_REMOVE(&pmap_all_pmaps, pmap, pm_list); 1596 TAILQ_REMOVE(&pmap_all_pmaps, pmap, pm_list);
1627 mutex_exit(&pmap_all_pmaps_lock); 1597 mutex_exit(&pmap_all_pmaps_lock);
1628 1598
1629 pool_cache_put(&pmap_l1pt_cache, pmap->pm_lev1map); 1599 pool_cache_put(&pmap_l1pt_cache, pmap->pm_lev1map);
1630 pmap->pm_lev1map = NULL; 1600 pmap->pm_lev1map = NULL;
1631 1601
1632 rw_exit(&pmap_growkernel_lock); 1602 rw_exit(&pmap_growkernel_lock);
1633 1603
1634 pool_cache_put(&pmap_pmap_cache, pmap); 1604 pool_cache_put(&pmap_pmap_cache, pmap);
1635} 1605}
1636 1606
1637/* 1607/*
1638 * pmap_reference: [ INTERFACE ] 1608 * pmap_reference: [ INTERFACE ]
1639 * 1609 *
1640 * Add a reference to the specified pmap. 1610 * Add a reference to the specified pmap.
1641 */ 1611 */
1642void 1612void
1643pmap_reference(pmap_t pmap) 1613pmap_reference(pmap_t pmap)
1644{ 1614{
1645 1615
1646#ifdef DEBUG 1616#ifdef DEBUG
1647 if (pmapdebug & PDB_FOLLOW) 1617 if (pmapdebug & PDB_FOLLOW)
1648 printf("pmap_reference(%p)\n", pmap); 1618 printf("pmap_reference(%p)\n", pmap);
1649#endif 1619#endif
1650 1620
1651 atomic_inc_ulong(&pmap->pm_count); 1621 atomic_inc_ulong(&pmap->pm_count);
1652 PMAP_MP(membar_enter()); 1622 PMAP_MP(membar_enter());
1653} 1623}
1654 1624
1655/* 1625/*
1656 * pmap_remove: [ INTERFACE ] 1626 * pmap_remove: [ INTERFACE ]
1657 * 1627 *
1658 * Remove the given range of addresses from the specified map. 1628 * Remove the given range of addresses from the specified map.
1659 * 1629 *
1660 * It is assumed that the start and end are properly 1630 * It is assumed that the start and end are properly
1661 * rounded to the page size. 1631 * rounded to the page size.
1662 */ 1632 */
1663static void 1633static void
1664pmap_remove_internal(pmap_t pmap, vaddr_t sva, vaddr_t eva, 1634pmap_remove_internal(pmap_t pmap, vaddr_t sva, vaddr_t eva,
1665 struct pmap_tlb_context * const tlbctx) 1635 struct pmap_tlb_context * const tlbctx)
1666{ 1636{
1667 pt_entry_t *l1pte, *l2pte, *l3pte; 1637 pt_entry_t *l1pte, *l2pte, *l3pte;
1668 pt_entry_t *saved_l2pte, *saved_l3pte; 1638 pt_entry_t *saved_l2pte, *saved_l3pte;
1669 vaddr_t l1eva, l2eva, l3vptva; 1639 vaddr_t l1eva, l2eva, l3vptva;
1670 pt_entry_t pte_bits; 1640 pt_entry_t pte_bits;
1671 1641
1672#ifdef DEBUG 1642#ifdef DEBUG
1673 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) 1643 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
1674 printf("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva); 1644 printf("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva);
1675#endif 1645#endif
1676 1646
1677 /* 1647 /*
1678 * If this is the kernel pmap, we can use a faster method 1648 * If this is the kernel pmap, we can use a faster method
1679 * for accessing the PTEs (since the PT pages are always 1649 * for accessing the PTEs (since the PT pages are always
1680 * resident). 1650 * resident).
1681 * 1651 *
1682 * Note that this routine should NEVER be called from an 1652 * Note that this routine should NEVER be called from an
1683 * interrupt context; pmap_kremove() is used for that. 1653 * interrupt context; pmap_kremove() is used for that.
1684 */ 1654 */
1685 if (pmap == pmap_kernel()) { 1655 if (pmap == pmap_kernel()) {
1686 PMAP_MAP_TO_HEAD_LOCK(); 1656 PMAP_MAP_TO_HEAD_LOCK();
1687 PMAP_LOCK(pmap); 1657 PMAP_LOCK(pmap);
1688 1658
1689 while (sva < eva) { 1659 while (sva < eva) {
1690 l3pte = PMAP_KERNEL_PTE(sva); 1660 l3pte = PMAP_KERNEL_PTE(sva);
1691 if (pmap_pte_v(l3pte)) { 1661 if (pmap_pte_v(l3pte)) {
1692 pte_bits = pmap_remove_mapping(pmap, sva, 1662 pte_bits = pmap_remove_mapping(pmap, sva,
1693 l3pte, true, NULL, tlbctx); 1663 l3pte, true, NULL, tlbctx);
1694 pmap_tlb_shootdown(pmap, sva, pte_bits, 1664 pmap_tlb_shootdown(pmap, sva, pte_bits,
1695 tlbctx); 1665 tlbctx);
1696 } 1666 }
1697 sva += PAGE_SIZE; 1667 sva += PAGE_SIZE;
1698 } 1668 }
1699 1669
1700 PMAP_MAP_TO_HEAD_UNLOCK(); 1670 PMAP_MAP_TO_HEAD_UNLOCK();
1701 PMAP_UNLOCK(pmap); 1671 PMAP_UNLOCK(pmap);
1702 pmap_tlb_shootnow(tlbctx); 1672 pmap_tlb_shootnow(tlbctx);
1703 pmap_tlb_ptpage_drain(tlbctx); 1673 pmap_tlb_ptpage_drain(tlbctx);
1704 TLB_COUNT(reason_remove_kernel); 1674 TLB_COUNT(reason_remove_kernel);
1705 1675
1706 return; 1676 return;
1707 } 1677 }
1708 1678
1709 KASSERT(sva < VM_MAXUSER_ADDRESS); 1679 KASSERT(sva < VM_MAXUSER_ADDRESS);
1710 KASSERT(eva <= VM_MAXUSER_ADDRESS); 1680 KASSERT(eva <= VM_MAXUSER_ADDRESS);
1711 KASSERT(pmap->pm_lev1map != kernel_lev1map); 1681 KASSERT(pmap->pm_lev1map != kernel_lev1map);
1712 1682
1713 PMAP_MAP_TO_HEAD_LOCK(); 1683 PMAP_MAP_TO_HEAD_LOCK();
1714 PMAP_LOCK(pmap); 1684 PMAP_LOCK(pmap);
1715 1685
1716 l1pte = pmap_l1pte(pmap, sva); 1686 l1pte = pmap_l1pte(pmap, sva);
1717 1687
1718 for (; sva < eva; sva = l1eva, l1pte++) { 1688 for (; sva < eva; sva = l1eva, l1pte++) {
1719 l1eva = alpha_trunc_l1seg(sva) + ALPHA_L1SEG_SIZE; 1689 l1eva = alpha_trunc_l1seg(sva) + ALPHA_L1SEG_SIZE;
1720 if (pmap_pte_v(l1pte)) { 1690 if (pmap_pte_v(l1pte)) {
1721 saved_l2pte = l2pte = pmap_l2pte(pmap, sva, l1pte); 1691 saved_l2pte = l2pte = pmap_l2pte(pmap, sva, l1pte);
1722 1692
1723 /* 1693 /*
1724 * Add a reference to the L2 table so it won't 1694 * Add a reference to the L2 table so it won't
1725 * get removed from under us. 1695 * get removed from under us.
1726 */ 1696 */
1727 pmap_physpage_addref(saved_l2pte); 1697 pmap_physpage_addref(saved_l2pte);
1728 1698
1729 for (; sva < l1eva && sva < eva; sva = l2eva, l2pte++) { 1699 for (; sva < l1eva && sva < eva; sva = l2eva, l2pte++) {
1730 l2eva = 1700 l2eva =
1731 alpha_trunc_l2seg(sva) + ALPHA_L2SEG_SIZE; 1701 alpha_trunc_l2seg(sva) + ALPHA_L2SEG_SIZE;
1732 if (pmap_pte_v(l2pte)) { 1702 if (pmap_pte_v(l2pte)) {
1733 saved_l3pte = l3pte = 1703 saved_l3pte = l3pte =
1734 pmap_l3pte(pmap, sva, l2pte); 1704 pmap_l3pte(pmap, sva, l2pte);
1735 1705
1736 /* 1706 /*
1737 * Add a reference to the L3 table so 1707 * Add a reference to the L3 table so
1738 * it won't get removed from under us. 1708 * it won't get removed from under us.
1739 */ 1709 */
1740 pmap_physpage_addref(saved_l3pte); 1710 pmap_physpage_addref(saved_l3pte);
1741 1711
1742 /* 1712 /*
1743 * Remember this sva; if the L3 table 1713 * Remember this sva; if the L3 table
1744 * gets removed, we need to invalidate 1714 * gets removed, we need to invalidate
1745 * the VPT TLB entry for it. 1715 * the VPT TLB entry for it.
1746 */ 1716 */
1747 l3vptva = sva; 1717 l3vptva = sva;
1748 1718
1749 for (; sva < l2eva && sva < eva; 1719 for (; sva < l2eva && sva < eva;
1750 sva += PAGE_SIZE, l3pte++) { 1720 sva += PAGE_SIZE, l3pte++) {
1751 if (!pmap_pte_v(l3pte)) { 1721 if (!pmap_pte_v(l3pte)) {
1752 continue; 1722 continue;
1753 } 1723 }
1754 pte_bits = 1724 pte_bits =
1755 pmap_remove_mapping( 1725 pmap_remove_mapping(
1756 pmap, sva, 1726 pmap, sva,
1757 l3pte, true, 1727 l3pte, true,
1758 NULL, tlbctx); 1728 NULL, tlbctx);
1759 pmap_tlb_shootdown(pmap, 1729 pmap_tlb_shootdown(pmap,
1760 sva, pte_bits, tlbctx); 1730 sva, pte_bits, tlbctx);
1761 } 1731 }
1762 1732
1763 /* 1733 /*
1764 * Remove the reference to the L3 1734 * Remove the reference to the L3
1765 * table that we added above. This 1735 * table that we added above. This
1766 * may free the L3 table. 1736 * may free the L3 table.
1767 */ 1737 */
1768 pmap_l3pt_delref(pmap, l3vptva, 1738 pmap_l3pt_delref(pmap, l3vptva,
1769 saved_l3pte, tlbctx); 1739 saved_l3pte, tlbctx);
1770 } 1740 }
1771 } 1741 }
1772 1742
1773 /* 1743 /*
1774 * Remove the reference to the L2 table that we 1744 * Remove the reference to the L2 table that we
1775 * added above. This may free the L2 table. 1745 * added above. This may free the L2 table.
1776 */ 1746 */
1777 pmap_l2pt_delref(pmap, l1pte, saved_l2pte, tlbctx); 1747 pmap_l2pt_delref(pmap, l1pte, saved_l2pte, tlbctx);
1778 } 1748 }
1779 } 1749 }
1780 1750
1781 PMAP_MAP_TO_HEAD_UNLOCK(); 1751 PMAP_MAP_TO_HEAD_UNLOCK();
1782 PMAP_UNLOCK(pmap); 1752 PMAP_UNLOCK(pmap);
1783 pmap_tlb_shootnow(tlbctx); 1753 pmap_tlb_shootnow(tlbctx);
1784 pmap_tlb_ptpage_drain(tlbctx); 1754 pmap_tlb_ptpage_drain(tlbctx);
1785 TLB_COUNT(reason_remove_user); 1755 TLB_COUNT(reason_remove_user);
1786} 1756}
1787 1757
1788void 1758void
1789pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva) 1759pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1790{ 1760{
1791 struct pmap_tlb_context tlbctx; 1761 struct pmap_tlb_context tlbctx;
1792 1762
1793 pmap_tlb_context_init(&tlbctx); 1763 pmap_tlb_context_init(&tlbctx);
1794 pmap_remove_internal(pmap, sva, eva, &tlbctx); 1764 pmap_remove_internal(pmap, sva, eva, &tlbctx);
1795} 1765}
1796 1766
1797/* 1767/*
1798 * pmap_page_protect: [ INTERFACE ] 1768 * pmap_page_protect: [ INTERFACE ]
1799 * 1769 *
1800 * Lower the permission for all mappings to a given page to 1770 * Lower the permission for all mappings to a given page to
1801 * the permissions specified. 1771 * the permissions specified.
1802 */ 1772 */
1803void 1773void
1804pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 1774pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
1805{ 1775{
1806 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 1776 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1807 pv_entry_t pv, nextpv; 1777 pv_entry_t pv, nextpv;
1808 pt_entry_t opte; 1778 pt_entry_t opte;
1809 kmutex_t *lock; 1779 kmutex_t *lock;
1810 struct pmap_tlb_context tlbctx; 1780 struct pmap_tlb_context tlbctx;
1811 1781
1812#ifdef DEBUG 1782#ifdef DEBUG
1813 paddr_t pa = VM_PAGE_TO_PHYS(pg); 1783 paddr_t pa = VM_PAGE_TO_PHYS(pg);
1814 1784
1815 1785
1816 if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) || 1786 if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
1817 (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))) 1787 (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
1818 printf("pmap_page_protect(%p, %x)\n", pg, prot); 1788 printf("pmap_page_protect(%p, %x)\n", pg, prot);
1819#endif 1789#endif
1820 1790
1821 pmap_tlb_context_init(&tlbctx); 1791 pmap_tlb_context_init(&tlbctx);
1822 1792
1823 switch (prot) { 1793 switch (prot) {
1824 case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE: 1794 case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
1825 case VM_PROT_READ|VM_PROT_WRITE: 1795 case VM_PROT_READ|VM_PROT_WRITE:
1826 return; 1796 return;
1827 1797
1828 /* copy_on_write */ 1798 /* copy_on_write */
1829 case VM_PROT_READ|VM_PROT_EXECUTE: 1799 case VM_PROT_READ|VM_PROT_EXECUTE:
1830 case VM_PROT_READ: 1800 case VM_PROT_READ:
1831 PMAP_HEAD_TO_MAP_LOCK(); 1801 PMAP_HEAD_TO_MAP_LOCK();
1832 lock = pmap_pvh_lock(pg); 1802 lock = pmap_pvh_lock(pg);
1833 mutex_enter(lock); 1803 mutex_enter(lock);
1834 for (pv = md->pvh_list; pv != NULL; pv = pv->pv_next) { 1804 for (pv = md->pvh_list; pv != NULL; pv = pv->pv_next) {
1835 PMAP_LOCK(pv->pv_pmap); 1805 PMAP_LOCK(pv->pv_pmap);
1836 opte = atomic_load_relaxed(pv->pv_pte); 1806 opte = atomic_load_relaxed(pv->pv_pte);
1837 if (opte & (PG_KWE | PG_UWE)) { 1807 if (opte & (PG_KWE | PG_UWE)) {
1838 atomic_store_relaxed(pv->pv_pte, 1808 atomic_store_relaxed(pv->pv_pte,
1839 opte & ~(PG_KWE | PG_UWE)); 1809 opte & ~(PG_KWE | PG_UWE));
1840 pmap_tlb_shootdown_pv(pv, opte, &tlbctx); 1810 pmap_tlb_shootdown_pv(pv, opte, &tlbctx);
1841 } 1811 }
1842 PMAP_UNLOCK(pv->pv_pmap); 1812 PMAP_UNLOCK(pv->pv_pmap);
1843 } 1813 }
1844 mutex_exit(lock); 1814 mutex_exit(lock);
1845 PMAP_HEAD_TO_MAP_UNLOCK(); 1815 PMAP_HEAD_TO_MAP_UNLOCK();
1846 pmap_tlb_shootnow(&tlbctx); 1816 pmap_tlb_shootnow(&tlbctx);
1847 TLB_COUNT(reason_page_protect_read); 1817 TLB_COUNT(reason_page_protect_read);
1848 return; 1818 return;
1849 1819
1850 /* remove_all */ 1820 /* remove_all */
1851 default: 1821 default:
1852 break; 1822 break;
1853 } 1823 }
1854 1824
1855 PMAP_HEAD_TO_MAP_LOCK(); 1825 PMAP_HEAD_TO_MAP_LOCK();
1856 lock = pmap_pvh_lock(pg); 1826 lock = pmap_pvh_lock(pg);
1857 mutex_enter(lock); 1827 mutex_enter(lock);
1858 for (pv = md->pvh_list; pv != NULL; pv = nextpv) { 1828 for (pv = md->pvh_list; pv != NULL; pv = nextpv) {
1859 pt_entry_t pte_bits; 1829 pt_entry_t pte_bits;
1860 1830
1861 nextpv = pv->pv_next; 1831 nextpv = pv->pv_next;
1862 1832
1863 PMAP_LOCK(pv->pv_pmap); 1833 PMAP_LOCK(pv->pv_pmap);
1864 pte_bits = pmap_remove_mapping(pv->pv_pmap, pv->pv_va, 1834 pte_bits = pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
1865 pv->pv_pte, false, NULL, &tlbctx); 1835 pv->pv_pte, false, NULL, &tlbctx);
1866 pmap_tlb_shootdown_pv(pv, pte_bits, &tlbctx); 1836 pmap_tlb_shootdown_pv(pv, pte_bits, &tlbctx);
1867 PMAP_UNLOCK(pv->pv_pmap); 1837 PMAP_UNLOCK(pv->pv_pmap);
1868 } 1838 }
1869 mutex_exit(lock); 1839 mutex_exit(lock);
1870 PMAP_HEAD_TO_MAP_UNLOCK(); 1840 PMAP_HEAD_TO_MAP_UNLOCK();
1871 pmap_tlb_shootnow(&tlbctx); 1841 pmap_tlb_shootnow(&tlbctx);
1872 pmap_tlb_ptpage_drain(&tlbctx); 1842 pmap_tlb_ptpage_drain(&tlbctx);
1873 TLB_COUNT(reason_page_protect_none); 1843 TLB_COUNT(reason_page_protect_none);
1874} 1844}
1875 1845
1876/* 1846/*
1877 * pmap_protect: [ INTERFACE ] 1847 * pmap_protect: [ INTERFACE ]
1878 * 1848 *
1879 * Set the physical protection on the specified range of this map 1849 * Set the physical protection on the specified range of this map
1880 * as requested. 1850 * as requested.
1881 */ 1851 */
1882void 1852void
1883pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) 1853pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1884{ 1854{
1885 pt_entry_t *l1pte, *l2pte, *l3pte, opte; 1855 pt_entry_t *l1pte, *l2pte, *l3pte, opte;
1886 vaddr_t l1eva, l2eva; 1856 vaddr_t l1eva, l2eva;
1887 struct pmap_tlb_context tlbctx; 1857 struct pmap_tlb_context tlbctx;
1888 1858
1889#ifdef DEBUG 1859#ifdef DEBUG
1890 if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) 1860 if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
1891 printf("pmap_protect(%p, %lx, %lx, %x)\n", 1861 printf("pmap_protect(%p, %lx, %lx, %x)\n",
1892 pmap, sva, eva, prot); 1862 pmap, sva, eva, prot);
1893#endif 1863#endif
1894 1864
1895 pmap_tlb_context_init(&tlbctx); 1865 pmap_tlb_context_init(&tlbctx);
1896 1866
1897 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1867 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1898 pmap_remove_internal(pmap, sva, eva, &tlbctx); 1868 pmap_remove_internal(pmap, sva, eva, &tlbctx);
1899 return; 1869 return;
1900 } 1870 }
1901 1871
1902 const pt_entry_t bits = pte_prot(pmap, prot); 1872 const pt_entry_t bits = pte_prot(pmap, prot);
1903 1873
1904 PMAP_LOCK(pmap); 1874 PMAP_LOCK(pmap);
1905 1875
1906 l1pte = pmap_l1pte(pmap, sva); 1876 l1pte = pmap_l1pte(pmap, sva);
1907 for (; sva < eva; sva = l1eva, l1pte++) { 1877 for (; sva < eva; sva = l1eva, l1pte++) {
1908 l1eva = alpha_trunc_l1seg(sva) + ALPHA_L1SEG_SIZE; 1878 l1eva = alpha_trunc_l1seg(sva) + ALPHA_L1SEG_SIZE;
1909 if (pmap_pte_v(l1pte)) { 1879 if (pmap_pte_v(l1pte)) {
1910 l2pte = pmap_l2pte(pmap, sva, l1pte); 1880 l2pte = pmap_l2pte(pmap, sva, l1pte);
1911 for (; sva < l1eva && sva < eva; sva = l2eva, l2pte++) { 1881 for (; sva < l1eva && sva < eva; sva = l2eva, l2pte++) {
1912 l2eva = 1882 l2eva =
1913 alpha_trunc_l2seg(sva) + ALPHA_L2SEG_SIZE; 1883 alpha_trunc_l2seg(sva) + ALPHA_L2SEG_SIZE;
1914 if (pmap_pte_v(l2pte)) { 1884 if (pmap_pte_v(l2pte)) {
1915 l3pte = pmap_l3pte(pmap, sva, l2pte); 1885 l3pte = pmap_l3pte(pmap, sva, l2pte);
1916 for (; sva < l2eva && sva < eva; 1886 for (; sva < l2eva && sva < eva;
1917 sva += PAGE_SIZE, l3pte++) { 1887 sva += PAGE_SIZE, l3pte++) {
1918 if (pmap_pte_v(l3pte) && 1888 if (pmap_pte_v(l3pte) &&
1919 pmap_pte_prot_chg(l3pte, 1889 pmap_pte_prot_chg(l3pte,
1920 bits)) { 1890 bits)) {
1921 opte = atomic_load_relaxed(l3pte); 1891 opte = atomic_load_relaxed(l3pte);
1922 pmap_pte_set_prot(l3pte, 1892 pmap_pte_set_prot(l3pte,
1923 bits); 1893 bits);
1924 pmap_tlb_shootdown(pmap, 1894 pmap_tlb_shootdown(pmap,
1925 sva, opte, &tlbctx); 1895 sva, opte, &tlbctx);
1926 } 1896 }
1927 } 1897 }
1928 } 1898 }
1929 } 1899 }
1930 } 1900 }
1931 } 1901 }
1932 1902
1933 PMAP_UNLOCK(pmap); 1903 PMAP_UNLOCK(pmap);
1934 pmap_tlb_shootnow(&tlbctx); 1904 pmap_tlb_shootnow(&tlbctx);
1935 TLB_COUNT(reason_protect); 1905 TLB_COUNT(reason_protect);
1936} 1906}
1937 1907
1938/* 1908/*
1939 * pmap_enter_tlb_shootdown: 1909 * pmap_enter_tlb_shootdown:
1940 * 1910 *
1941 * Carry out a TLB shootdown on behalf of a pmap_enter() 1911 * Carry out a TLB shootdown on behalf of a pmap_enter()
1942 * or a pmap_kenter_pa(). This is factored out separately 1912 * or a pmap_kenter_pa(). This is factored out separately
1943 * because we expect it to be not a common case. 1913 * because we expect it to be not a common case.
1944 */ 1914 */
1945static void __noinline 1915static void __noinline
1946pmap_enter_tlb_shootdown(pmap_t const pmap, vaddr_t const va, 1916pmap_enter_tlb_shootdown(pmap_t const pmap, vaddr_t const va,
1947 pt_entry_t const pte_bits, bool locked) 1917 pt_entry_t const pte_bits, bool locked)
1948{ 1918{
1949 struct pmap_tlb_context tlbctx; 1919 struct pmap_tlb_context tlbctx;
1950 1920
1951 pmap_tlb_context_init(&tlbctx); 1921 pmap_tlb_context_init(&tlbctx);
1952 pmap_tlb_shootdown(pmap, va, pte_bits, &tlbctx); 1922 pmap_tlb_shootdown(pmap, va, pte_bits, &tlbctx);
1953 if (locked) { 1923 if (locked) {
1954 PMAP_UNLOCK(pmap); 1924 PMAP_UNLOCK(pmap);
1955 } 1925 }
1956 pmap_tlb_shootnow(&tlbctx); 1926 pmap_tlb_shootnow(&tlbctx);
1957} 1927}
1958 1928
1959/* 1929/*
1960 * pmap_enter_l2pt_delref: 1930 * pmap_enter_l2pt_delref:
1961 * 1931 *
1962 * Release a reference on an L2 PT page for pmap_enter(). 1932 * Release a reference on an L2 PT page for pmap_enter().
1963 * This is factored out separately becacause we expect it 1933 * This is factored out separately becacause we expect it
1964 * to be a rare case. 1934 * to be a rare case.
1965 */ 1935 */
1966static void __noinline 1936static void __noinline
1967pmap_enter_l2pt_delref(pmap_t const pmap, pt_entry_t * const l1pte, 1937pmap_enter_l2pt_delref(pmap_t const pmap, pt_entry_t * const l1pte,
1968 pt_entry_t * const l2pte) 1938 pt_entry_t * const l2pte)
1969{ 1939{
1970 struct pmap_tlb_context tlbctx; 1940 struct pmap_tlb_context tlbctx;
1971 1941
1972 /* 1942 /*
1973 * PALcode may have tried to service a TLB miss with 1943 * PALcode may have tried to service a TLB miss with
1974 * this L2 PTE, so we need to make sure we don't actully 1944 * this L2 PTE, so we need to make sure we don't actully
1975 * free the PT page untl we've shot down any TLB entries 1945 * free the PT page untl we've shot down any TLB entries
1976 * for this VPT index. 1946 * for this VPT index.
1977 */ 1947 */
1978 1948
1979 pmap_tlb_context_init(&tlbctx); 1949 pmap_tlb_context_init(&tlbctx);
1980 pmap_l2pt_delref(pmap, l1pte, l2pte, &tlbctx); 1950 pmap_l2pt_delref(pmap, l1pte, l2pte, &tlbctx);
1981 PMAP_UNLOCK(pmap); 1951 PMAP_UNLOCK(pmap);
1982 pmap_tlb_shootnow(&tlbctx); 1952 pmap_tlb_shootnow(&tlbctx);
1983 pmap_tlb_ptpage_drain(&tlbctx); 1953 pmap_tlb_ptpage_drain(&tlbctx);
1984 TLB_COUNT(reason_enter_l2pt_delref); 1954 TLB_COUNT(reason_enter_l2pt_delref);
1985} 1955}
1986 1956
1987/* 1957/*
1988 * pmap_enter_l3pt_delref: 1958 * pmap_enter_l3pt_delref:
1989 * 1959 *
1990 * Release a reference on an L3 PT page for pmap_enter(). 1960 * Release a reference on an L3 PT page for pmap_enter().
1991 * This is factored out separately becacause we expect it 1961 * This is factored out separately becacause we expect it
1992 * to be a rare case. 1962 * to be a rare case.
1993 */ 1963 */
1994static void __noinline 1964static void __noinline
1995pmap_enter_l3pt_delref(pmap_t const pmap, vaddr_t const va, 1965pmap_enter_l3pt_delref(pmap_t const pmap, vaddr_t const va,
1996 pt_entry_t * const pte) 1966 pt_entry_t * const pte)
1997{ 1967{
1998 struct pmap_tlb_context tlbctx; 1968 struct pmap_tlb_context tlbctx;
1999 1969
2000 /* 1970 /*
2001 * PALcode may have tried to service a TLB miss with 1971 * PALcode may have tried to service a TLB miss with
2002 * this PTE, so we need to make sure we don't actully 1972 * this PTE, so we need to make sure we don't actully
2003 * free the PT page untl we've shot down any TLB entries 1973 * free the PT page untl we've shot down any TLB entries
2004 * for this VPT index. 1974 * for this VPT index.
2005 */ 1975 */
2006 1976
2007 pmap_tlb_context_init(&tlbctx); 1977 pmap_tlb_context_init(&tlbctx);
2008 pmap_l3pt_delref(pmap, va, pte, &tlbctx); 1978 pmap_l3pt_delref(pmap, va, pte, &tlbctx);
2009 PMAP_UNLOCK(pmap); 1979 PMAP_UNLOCK(pmap);
2010 pmap_tlb_shootnow(&tlbctx); 1980 pmap_tlb_shootnow(&tlbctx);
2011 pmap_tlb_ptpage_drain(&tlbctx); 1981 pmap_tlb_ptpage_drain(&tlbctx);
2012 TLB_COUNT(reason_enter_l3pt_delref); 1982 TLB_COUNT(reason_enter_l3pt_delref);
2013} 1983}
2014 1984
2015/* 1985/*
2016 * pmap_enter: [ INTERFACE ] 1986 * pmap_enter: [ INTERFACE ]
2017 * 1987 *
2018 * Insert the given physical page (p) at 1988 * Insert the given physical page (p) at
2019 * the specified virtual address (v) in the 1989 * the specified virtual address (v) in the
2020 * target physical map with the protection requested. 1990 * target physical map with the protection requested.
2021 * 1991 *
2022 * If specified, the page will be wired down, meaning 1992 * If specified, the page will be wired down, meaning
2023 * that the related pte can not be reclaimed. 1993 * that the related pte can not be reclaimed.
2024 * 1994 *
2025 * Note: This is the only routine which MAY NOT lazy-evaluate 1995 * Note: This is the only routine which MAY NOT lazy-evaluate
2026 * or lose information. That is, this routine must actually 1996 * or lose information. That is, this routine must actually
2027 * insert this page into the given map NOW. 1997 * insert this page into the given map NOW.
2028 */ 1998 */
2029int 1999int
2030pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 2000pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
2031{ 2001{
2032 pt_entry_t *pte, npte, opte; 2002 pt_entry_t *pte, npte, opte;
2033 pv_entry_t opv = NULL; 2003 pv_entry_t opv = NULL;
2034 paddr_t opa; 2004 paddr_t opa;
2035 bool tflush = false; 2005 bool tflush = false;
2036 int error = 0; 2006 int error = 0;
2037 kmutex_t *lock; 2007 kmutex_t *lock;
2038 2008
2039#ifdef DEBUG 2009#ifdef DEBUG
2040 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) 2010 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
2041 printf("pmap_enter(%p, %lx, %lx, %x, %x)\n", 2011 printf("pmap_enter(%p, %lx, %lx, %x, %x)\n",
2042 pmap, va, pa, prot, flags); 2012 pmap, va, pa, prot, flags);
2043#endif 2013#endif
2044 struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); 2014 struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
2045 const bool wired = (flags & PMAP_WIRED) != 0; 2015 const bool wired = (flags & PMAP_WIRED) != 0;
2046 2016
2047 PMAP_MAP_TO_HEAD_LOCK(); 2017 PMAP_MAP_TO_HEAD_LOCK();
2048 PMAP_LOCK(pmap); 2018 PMAP_LOCK(pmap);
2049 2019
2050 if (pmap == pmap_kernel()) { 2020 if (pmap == pmap_kernel()) {
2051 KASSERT(va >= VM_MIN_KERNEL_ADDRESS); 2021 KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
2052 pte = PMAP_KERNEL_PTE(va); 2022 pte = PMAP_KERNEL_PTE(va);
2053 } else { 2023 } else {
2054 pt_entry_t *l1pte, *l2pte; 2024 pt_entry_t *l1pte, *l2pte;
2055 2025
2056 KASSERT(va < VM_MAXUSER_ADDRESS); 2026 KASSERT(va < VM_MAXUSER_ADDRESS);
2057 KASSERT(pmap->pm_lev1map != kernel_lev1map); 2027 KASSERT(pmap->pm_lev1map != kernel_lev1map);
2058 2028
2059 /* 2029 /*
2060 * Check to see if the level 1 PTE is valid, and 2030 * Check to see if the level 1 PTE is valid, and
2061 * allocate a new level 2 page table page if it's not. 2031 * allocate a new level 2 page table page if it's not.
2062 * A reference will be added to the level 2 table when 2032 * A reference will be added to the level 2 table when
2063 * the level 3 table is created. 2033 * the level 3 table is created.
2064 */ 2034 */
2065 l1pte = pmap_l1pte(pmap, va); 2035 l1pte = pmap_l1pte(pmap, va);
2066 if (pmap_pte_v(l1pte) == 0) { 2036 if (pmap_pte_v(l1pte) == 0) {
2067 pmap_physpage_addref(l1pte); 2037 pmap_physpage_addref(l1pte);
2068 error = pmap_ptpage_alloc(l1pte, PGU_L2PT); 2038 error = pmap_ptpage_alloc(l1pte, PGU_L2PT);
2069 if (error) { 2039 if (error) {
2070 pmap_l1pt_delref(pmap, l1pte); 2040 pmap_l1pt_delref(pmap, l1pte);
2071 if (flags & PMAP_CANFAIL) 2041 if (flags & PMAP_CANFAIL)
2072 goto out; 2042 goto out;
2073 panic("pmap_enter: unable to create L2 PT " 2043 panic("pmap_enter: unable to create L2 PT "
2074 "page"); 2044 "page");
2075 } 2045 }
2076#ifdef DEBUG 2046#ifdef DEBUG
2077 if (pmapdebug & PDB_PTPAGE) 2047 if (pmapdebug & PDB_PTPAGE)
2078 printf("pmap_enter: new level 2 table at " 2048 printf("pmap_enter: new level 2 table at "
2079 "0x%lx\n", pmap_pte_pa(l1pte)); 2049 "0x%lx\n", pmap_pte_pa(l1pte));
2080#endif 2050#endif
2081 } 2051 }
2082 2052
2083 /* 2053 /*
2084 * Check to see if the level 2 PTE is valid, and 2054 * Check to see if the level 2 PTE is valid, and
2085 * allocate a new level 3 page table page if it's not. 2055 * allocate a new level 3 page table page if it's not.
2086 * A reference will be added to the level 3 table when 2056 * A reference will be added to the level 3 table when
2087 * the mapping is validated. 2057 * the mapping is validated.
2088 */ 2058 */
2089 l2pte = pmap_l2pte(pmap, va, l1pte); 2059 l2pte = pmap_l2pte(pmap, va, l1pte);
2090 if (pmap_pte_v(l2pte) == 0) { 2060 if (pmap_pte_v(l2pte) == 0) {
2091 pmap_physpage_addref(l2pte); 2061 pmap_physpage_addref(l2pte);
2092 error = pmap_ptpage_alloc(l2pte, PGU_L3PT); 2062 error = pmap_ptpage_alloc(l2pte, PGU_L3PT);
2093 if (error) { 2063 if (error) {
2094 /* unlocks pmap */ 2064 /* unlocks pmap */
2095 pmap_enter_l2pt_delref(pmap, l1pte, l2pte); 2065 pmap_enter_l2pt_delref(pmap, l1pte, l2pte);
2096 if (flags & PMAP_CANFAIL) { 2066 if (flags & PMAP_CANFAIL) {
2097 PMAP_LOCK(pmap); 2067 PMAP_LOCK(pmap);
2098 goto out; 2068 goto out;
2099 } 2069 }
2100 panic("pmap_enter: unable to create L3 PT " 2070 panic("pmap_enter: unable to create L3 PT "
2101 "page"); 2071 "page");
2102 } 2072 }
2103#ifdef DEBUG 2073#ifdef DEBUG
2104 if (pmapdebug & PDB_PTPAGE) 2074 if (pmapdebug & PDB_PTPAGE)
2105 printf("pmap_enter: new level 3 table at " 2075 printf("pmap_enter: new level 3 table at "
2106 "0x%lx\n", pmap_pte_pa(l2pte)); 2076 "0x%lx\n", pmap_pte_pa(l2pte));
2107#endif 2077#endif
2108 } 2078 }
2109 2079
2110 /* 2080 /*
2111 * Get the PTE that will map the page. 2081 * Get the PTE that will map the page.
2112 */ 2082 */
2113 pte = pmap_l3pte(pmap, va, l2pte); 2083 pte = pmap_l3pte(pmap, va, l2pte);
2114 } 2084 }
2115 2085
2116 /* Remember all of the old PTE; used for TBI check later. */ 2086 /* Remember all of the old PTE; used for TBI check later. */
2117 opte = atomic_load_relaxed(pte); 2087 opte = atomic_load_relaxed(pte);
2118 2088
2119 /* 2089 /*
2120 * Check to see if the old mapping is valid. If not, validate the 2090 * Check to see if the old mapping is valid. If not, validate the
2121 * new one immediately. 2091 * new one immediately.
2122 */ 2092 */
2123 if ((opte & PG_V) == 0) { 2093 if ((opte & PG_V) == 0) {
2124 /* No TLB invalidatons needed for new mappings. */ 2094 /* No TLB invalidatons needed for new mappings. */
2125 2095
2126 if (pmap != pmap_kernel()) { 2096 if (pmap != pmap_kernel()) {
2127 /* 2097 /*
2128 * New mappings gain a reference on the level 3 2098 * New mappings gain a reference on the level 3
2129 * table. 2099 * table.
2130 */ 2100 */
2131 pmap_physpage_addref(pte); 2101 pmap_physpage_addref(pte);
2132 } 2102 }
2133 goto validate_enterpv; 2103 goto validate_enterpv;
2134 } 2104 }
2135 2105
2136 opa = pmap_pte_pa(pte); 2106 opa = pmap_pte_pa(pte);
2137 2107
2138 if (opa == pa) { 2108 if (opa == pa) {
2139 /* 2109 /*
2140 * Mapping has not changed; must be a protection or 2110 * Mapping has not changed; must be a protection or
2141 * wiring change. 2111 * wiring change.
2142 */ 2112 */
2143 if (pmap_pte_w_chg(pte, wired ? PG_WIRED : 0)) { 2113 if (pmap_pte_w_chg(pte, wired ? PG_WIRED : 0)) {
2144#ifdef DEBUG 2114#ifdef DEBUG
2145 if (pmapdebug & PDB_ENTER) 2115 if (pmapdebug & PDB_ENTER)
2146 printf("pmap_enter: wiring change -> %d\n", 2116 printf("pmap_enter: wiring change -> %d\n",
2147 wired); 2117 wired);
2148#endif 2118#endif
2149 /* Adjust the wiring count. */ 2119 /* Adjust the wiring count. */
2150 if (wired) 2120 if (wired)
2151 PMAP_STAT_INCR(pmap->pm_stats.wired_count, 1); 2121 PMAP_STAT_INCR(pmap->pm_stats.wired_count, 1);
2152 else 2122 else
2153 PMAP_STAT_DECR(pmap->pm_stats.wired_count, 1); 2123 PMAP_STAT_DECR(pmap->pm_stats.wired_count, 1);
2154 } 2124 }
2155 2125
2156 /* Set the PTE. */ 2126 /* Set the PTE. */
2157 goto validate; 2127 goto validate;
2158 } 2128 }
2159 2129
2160 /* 2130 /*
2161 * The mapping has changed. We need to invalidate the 2131 * The mapping has changed. We need to invalidate the
2162 * old mapping before creating the new one. 2132 * old mapping before creating the new one.
2163 */ 2133 */
2164#ifdef DEBUG 2134#ifdef DEBUG
2165 if (pmapdebug & PDB_ENTER) 2135 if (pmapdebug & PDB_ENTER)
2166 printf("pmap_enter: removing old mapping 0x%lx\n", va); 2136 printf("pmap_enter: removing old mapping 0x%lx\n", va);
2167#endif 2137#endif
2168 if (pmap != pmap_kernel()) { 2138 if (pmap != pmap_kernel()) {
2169 /* 2139 /*
2170 * Gain an extra reference on the level 3 table. 2140 * Gain an extra reference on the level 3 table.
2171 * pmap_remove_mapping() will delete a reference, 2141 * pmap_remove_mapping() will delete a reference,
2172 * and we don't want the table to be erroneously 2142 * and we don't want the table to be erroneously
2173 * freed. 2143 * freed.
2174 */ 2144 */
2175 pmap_physpage_addref(pte); 2145 pmap_physpage_addref(pte);
2176 } 2146 }
2177 /* Already have the bits from opte above. */ 2147 /* Already have the bits from opte above. */
2178 (void) pmap_remove_mapping(pmap, va, pte, true, &opv, NULL); 2148 (void) pmap_remove_mapping(pmap, va, pte, true, &opv, NULL);
2179 2149
2180 validate_enterpv: 2150 validate_enterpv:
2181 /* Enter the mapping into the pv_table if appropriate. */ 2151 /* Enter the mapping into the pv_table if appropriate. */
2182 if (pg != NULL) { 2152 if (pg != NULL) {
2183 error = pmap_pv_enter(pmap, pg, va, pte, true, opv); 2153 error = pmap_pv_enter(pmap, pg, va, pte, true, opv);
2184 if (error) { 2154 if (error) {
2185 /* This can only fail if opv == NULL */ 2155 /* This can only fail if opv == NULL */
2186 KASSERT(opv == NULL); 2156 KASSERT(opv == NULL);
2187 2157
2188 /* unlocks pmap */ 2158 /* unlocks pmap */
2189 pmap_enter_l3pt_delref(pmap, va, pte); 2159 pmap_enter_l3pt_delref(pmap, va, pte);
2190 if (flags & PMAP_CANFAIL) { 2160 if (flags & PMAP_CANFAIL) {
2191 PMAP_LOCK(pmap); 2161 PMAP_LOCK(pmap);
2192 goto out; 2162 goto out;
2193 } 2163 }
2194 panic("pmap_enter: unable to enter mapping in PV " 2164 panic("pmap_enter: unable to enter mapping in PV "
2195 "table"); 2165 "table");
2196 } 2166 }
2197 opv = NULL; 2167 opv = NULL;
2198 } 2168 }
2199 2169
2200 /* Increment counters. */ 2170 /* Increment counters. */
2201 PMAP_STAT_INCR(pmap->pm_stats.resident_count, 1); 2171 PMAP_STAT_INCR(pmap->pm_stats.resident_count, 1);
2202 if (wired) 2172 if (wired)
2203 PMAP_STAT_INCR(pmap->pm_stats.wired_count, 1); 2173 PMAP_STAT_INCR(pmap->pm_stats.wired_count, 1);
2204 2174
2205 validate: 2175 validate:
2206 /* Build the new PTE. */ 2176 /* Build the new PTE. */
2207 npte = ((pa >> PGSHIFT) << PG_SHIFT) | pte_prot(pmap, prot) | PG_V; 2177 npte = ((pa >> PGSHIFT) << PG_SHIFT) | pte_prot(pmap, prot) | PG_V;
2208 if (pg != NULL) { 2178 if (pg != NULL) {
2209 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 2179 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
2210 int attrs; 2180 int attrs;
2211 2181
2212 KASSERT(((flags & VM_PROT_ALL) & ~prot) == 0); 2182 KASSERT(((flags & VM_PROT_ALL) & ~prot) == 0);
2213 2183
2214 lock = pmap_pvh_lock(pg); 2184 lock = pmap_pvh_lock(pg);
2215 mutex_enter(lock); 2185 mutex_enter(lock);
2216 if (flags & VM_PROT_WRITE) 2186 if (flags & VM_PROT_WRITE)
2217 md->pvh_attrs |= (PGA_REFERENCED|PGA_MODIFIED); 2187 md->pvh_attrs |= (PGA_REFERENCED|PGA_MODIFIED);
2218 else if (flags & VM_PROT_ALL) 2188 else if (flags & VM_PROT_ALL)
2219 md->pvh_attrs |= PGA_REFERENCED; 2189 md->pvh_attrs |= PGA_REFERENCED;
2220 attrs = md->pvh_attrs; 2190 attrs = md->pvh_attrs;
2221 mutex_exit(lock); 2191 mutex_exit(lock);
2222 2192
2223 /* Set up referenced/modified emulation for new mapping. */ 2193 /* Set up referenced/modified emulation for new mapping. */
2224 if ((attrs & PGA_REFERENCED) == 0) 2194 if ((attrs & PGA_REFERENCED) == 0)
2225 npte |= PG_FOR | PG_FOW | PG_FOE; 2195 npte |= PG_FOR | PG_FOW | PG_FOE;
2226 else if ((attrs & PGA_MODIFIED) == 0) 2196 else if ((attrs & PGA_MODIFIED) == 0)
2227 npte |= PG_FOW; 2197 npte |= PG_FOW;
2228 2198
2229 /* 2199 /*
2230 * Mapping was entered on PV list. 2200 * Mapping was entered on PV list.
2231 */ 2201 */
2232 npte |= PG_PVLIST; 2202 npte |= PG_PVLIST;
2233 } 2203 }
2234 if (wired) 2204 if (wired)
2235 npte |= PG_WIRED; 2205 npte |= PG_WIRED;
2236#ifdef DEBUG 2206#ifdef DEBUG
2237 if (pmapdebug & PDB_ENTER) 2207 if (pmapdebug & PDB_ENTER)
2238 printf("pmap_enter: new pte = 0x%lx\n", npte); 2208 printf("pmap_enter: new pte = 0x%lx\n", npte);
2239#endif 2209#endif
2240 2210
2241 /* 2211 /*
2242 * If the HW / PALcode portion of the new PTE is the same as the 2212 * If the HW / PALcode portion of the new PTE is the same as the
2243 * old PTE, no TBI is necessary. 2213 * old PTE, no TBI is necessary.
2244 */ 2214 */
2245 if (opte & PG_V) { 2215 if (opte & PG_V) {
2246 tflush = PG_PALCODE(opte) != PG_PALCODE(npte); 2216 tflush = PG_PALCODE(opte) != PG_PALCODE(npte);
2247 } 2217 }
2248 2218
2249 /* Set the new PTE. */ 2219 /* Set the new PTE. */
2250 atomic_store_relaxed(pte, npte); 2220 atomic_store_relaxed(pte, npte);
2251 2221
2252out: 2222out:
2253 PMAP_MAP_TO_HEAD_UNLOCK(); 2223 PMAP_MAP_TO_HEAD_UNLOCK();
2254 2224
2255 /* 2225 /*
2256 * Invalidate the TLB entry for this VA and any appropriate 2226 * Invalidate the TLB entry for this VA and any appropriate
2257 * caches. 2227 * caches.
2258 */ 2228 */
2259 if (tflush) { 2229 if (tflush) {
2260 /* unlocks pmap */ 2230 /* unlocks pmap */
2261 pmap_enter_tlb_shootdown(pmap, va, opte, true); 2231 pmap_enter_tlb_shootdown(pmap, va, opte, true);
2262 if (pmap == pmap_kernel()) { 2232 if (pmap == pmap_kernel()) {
2263 TLB_COUNT(reason_enter_kernel); 2233 TLB_COUNT(reason_enter_kernel);
2264 } else { 2234 } else {
2265 TLB_COUNT(reason_enter_user); 2235 TLB_COUNT(reason_enter_user);
2266 } 2236 }
2267 } else { 2237 } else {
2268 PMAP_UNLOCK(pmap); 2238 PMAP_UNLOCK(pmap);
2269 } 2239 }
2270 2240
2271 if (opv) 2241 if (opv)
2272 pmap_pv_free(opv); 2242 pmap_pv_free(opv);
2273 2243
2274 return error; 2244 return error;
2275} 2245}
2276 2246
2277/* 2247/*
2278 * pmap_kenter_pa: [ INTERFACE ] 2248 * pmap_kenter_pa: [ INTERFACE ]
2279 * 2249 *
2280 * Enter a va -> pa mapping into the kernel pmap without any 2250 * Enter a va -> pa mapping into the kernel pmap without any
2281 * physical->virtual tracking. 2251 * physical->virtual tracking.
2282 * 2252 *
2283 * Note: no locking is necessary in this function. 2253 * Note: no locking is necessary in this function.
2284 */ 2254 */
2285void 2255void
2286pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 2256pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
2287{ 2257{
2288 pmap_t const pmap = pmap_kernel(); 2258 pmap_t const pmap = pmap_kernel();
2289 2259
2290#ifdef DEBUG 2260#ifdef DEBUG
2291 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) 2261 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
2292 printf("pmap_kenter_pa(%lx, %lx, %x)\n", 2262 printf("pmap_kenter_pa(%lx, %lx, %x)\n",
2293 va, pa, prot); 2263 va, pa, prot);
2294#endif 2264#endif
2295 2265
2296 KASSERT(va >= VM_MIN_KERNEL_ADDRESS); 2266 KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
2297 2267
2298 pt_entry_t * const pte = PMAP_KERNEL_PTE(va); 2268 pt_entry_t * const pte = PMAP_KERNEL_PTE(va);
2299 2269
2300 /* Build the new PTE. */ 2270 /* Build the new PTE. */
2301 const pt_entry_t npte = 2271 const pt_entry_t npte =
2302 ((pa >> PGSHIFT) << PG_SHIFT) | pte_prot(pmap_kernel(), prot) | 2272 ((pa >> PGSHIFT) << PG_SHIFT) | pte_prot(pmap_kernel(), prot) |
2303 PG_V | PG_WIRED; 2273 PG_V | PG_WIRED;
2304 2274
2305 /* Set the new PTE. */ 2275 /* Set the new PTE. */
2306 const pt_entry_t opte = atomic_load_relaxed(pte); 2276 const pt_entry_t opte = atomic_load_relaxed(pte);
2307 atomic_store_relaxed(pte, npte); 2277 atomic_store_relaxed(pte, npte);
2308 PMAP_MP(membar_enter()); 2278 PMAP_MP(membar_enter());
2309 2279
2310 PMAP_STAT_INCR(pmap->pm_stats.resident_count, 1); 2280 PMAP_STAT_INCR(pmap->pm_stats.resident_count, 1);
2311 PMAP_STAT_INCR(pmap->pm_stats.wired_count, 1); 2281 PMAP_STAT_INCR(pmap->pm_stats.wired_count, 1);
2312 2282
2313 /* 2283 /*
2314 * There should not have been anything here, previously, 2284 * There should not have been anything here, previously,
2315 * so we can skip TLB shootdowns, etc. in the common case. 2285 * so we can skip TLB shootdowns, etc. in the common case.
2316 */ 2286 */
2317 if (__predict_false(opte & PG_V)) { 2287 if (__predict_false(opte & PG_V)) {
2318 const pt_entry_t diff = npte ^ opte; 2288 const pt_entry_t diff = npte ^ opte;
2319 2289
2320 printf_nolog("%s: mapping already present\n", __func__); 2290 printf_nolog("%s: mapping already present\n", __func__);
2321 PMAP_STAT_DECR(pmap->pm_stats.resident_count, 1); 2291 PMAP_STAT_DECR(pmap->pm_stats.resident_count, 1);
2322 if (diff & PG_WIRED) 2292 if (diff & PG_WIRED)
2323 PMAP_STAT_DECR(pmap->pm_stats.wired_count, 1); 2293 PMAP_STAT_DECR(pmap->pm_stats.wired_count, 1);
2324 /* XXX Can't handle this case. */ 2294 /* XXX Can't handle this case. */
2325 if (diff & PG_PVLIST) 2295 if (diff & PG_PVLIST)
2326 panic("pmap_kenter_pa: old mapping was managed"); 2296 panic("pmap_kenter_pa: old mapping was managed");
2327 2297
2328 pmap_enter_tlb_shootdown(pmap_kernel(), va, opte, false); 2298 pmap_enter_tlb_shootdown(pmap_kernel(), va, opte, false);
2329 TLB_COUNT(reason_kenter); 2299 TLB_COUNT(reason_kenter);
2330 } 2300 }
2331} 2301}
2332 2302
2333/* 2303/*
2334 * pmap_kremove: [ INTERFACE ] 2304 * pmap_kremove: [ INTERFACE ]
2335 * 2305 *
2336 * Remove a mapping entered with pmap_kenter_pa() starting at va, 2306 * Remove a mapping entered with pmap_kenter_pa() starting at va,
2337 * for size bytes (assumed to be page rounded). 2307 * for size bytes (assumed to be page rounded).
2338 */ 2308 */
2339void 2309void
2340pmap_kremove(vaddr_t va, vsize_t size) 2310pmap_kremove(vaddr_t va, vsize_t size)
2341{ 2311{
2342 pt_entry_t *pte, opte; 2312 pt_entry_t *pte, opte;
2343 pmap_t const pmap = pmap_kernel(); 2313 pmap_t const pmap = pmap_kernel();
2344 struct pmap_tlb_context tlbctx; 2314 struct pmap_tlb_context tlbctx;
2345 2315
2346#ifdef DEBUG 2316#ifdef DEBUG
2347 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) 2317 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
2348 printf("pmap_kremove(%lx, %lx)\n", 2318 printf("pmap_kremove(%lx, %lx)\n",
2349 va, size); 2319 va, size);
2350#endif 2320#endif
2351 2321
2352 pmap_tlb_context_init(&tlbctx); 2322 pmap_tlb_context_init(&tlbctx);
2353 2323
2354 KASSERT(va >= VM_MIN_KERNEL_ADDRESS); 2324 KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
2355 2325
2356 for (; size != 0; size -= PAGE_SIZE, va += PAGE_SIZE) { 2326 for (; size != 0; size -= PAGE_SIZE, va += PAGE_SIZE) {
2357 pte = PMAP_KERNEL_PTE(va); 2327 pte = PMAP_KERNEL_PTE(va);
2358 opte = atomic_load_relaxed(pte); 2328 opte = atomic_load_relaxed(pte);
2359 if (opte & PG_V) { 2329 if (opte & PG_V) {
2360 KASSERT((opte & PG_PVLIST) == 0); 2330 KASSERT((opte & PG_PVLIST) == 0);
2361 2331
2362 /* Zap the mapping. */ 2332 /* Zap the mapping. */
2363 atomic_store_relaxed(pte, PG_NV); 2333 atomic_store_relaxed(pte, PG_NV);
2364 pmap_tlb_shootdown(pmap, va, opte, &tlbctx); 2334 pmap_tlb_shootdown(pmap, va, opte, &tlbctx);
2365 2335
2366 /* Update stats. */ 2336 /* Update stats. */
2367 PMAP_STAT_DECR(pmap->pm_stats.resident_count, 1); 2337 PMAP_STAT_DECR(pmap->pm_stats.resident_count, 1);
2368 PMAP_STAT_DECR(pmap->pm_stats.wired_count, 1); 2338 PMAP_STAT_DECR(pmap->pm_stats.wired_count, 1);
2369 } 2339 }
2370 } 2340 }
2371 2341
2372 pmap_tlb_shootnow(&tlbctx); 2342 pmap_tlb_shootnow(&tlbctx);
2373 TLB_COUNT(reason_kremove); 2343 TLB_COUNT(reason_kremove);
2374} 2344}
2375 2345
2376/* 2346/*
2377 * pmap_unwire: [ INTERFACE ] 2347 * pmap_unwire: [ INTERFACE ]
2378 * 2348 *
2379 * Clear the wired attribute for a map/virtual-address pair. 2349 * Clear the wired attribute for a map/virtual-address pair.
2380 * 2350 *
2381 * The mapping must already exist in the pmap. 2351 * The mapping must already exist in the pmap.
2382 */ 2352 */
2383void 2353void
2384pmap_unwire(pmap_t pmap, vaddr_t va) 2354pmap_unwire(pmap_t pmap, vaddr_t va)
2385{ 2355{
2386 pt_entry_t *pte; 2356 pt_entry_t *pte;
2387 2357
2388#ifdef DEBUG 2358#ifdef DEBUG
2389 if (pmapdebug & PDB_FOLLOW) 2359 if (pmapdebug & PDB_FOLLOW)
2390 printf("pmap_unwire(%p, %lx)\n", pmap, va); 2360 printf("pmap_unwire(%p, %lx)\n", pmap, va);
2391#endif 2361#endif
2392 2362
2393 PMAP_LOCK(pmap); 2363 PMAP_LOCK(pmap);
2394 2364
2395 pte = pmap_l3pte(pmap, va, NULL); 2365 pte = pmap_l3pte(pmap, va, NULL);
2396 2366
2397 KASSERT(pte != NULL); 2367 KASSERT(pte != NULL);
2398 KASSERT(pmap_pte_v(pte)); 2368 KASSERT(pmap_pte_v(pte));
2399 2369
2400 /* 2370 /*
2401 * If wiring actually changed (always?) clear the wire bit and 2371 * If wiring actually changed (always?) clear the wire bit and
2402 * update the wire count. Note that wiring is not a hardware 2372 * update the wire count. Note that wiring is not a hardware

cvs diff -r1.53 -r1.54 src/sys/arch/alpha/alpha/prom.c (switch to unified diff)

--- src/sys/arch/alpha/alpha/prom.c 2020/08/30 16:26:56 1.53
+++ src/sys/arch/alpha/alpha/prom.c 2020/09/03 02:09:09 1.54
@@ -1,390 +1,430 @@ @@ -1,390 +1,430 @@
1/* $NetBSD: prom.c,v 1.53 2020/08/30 16:26:56 thorpej Exp $ */ 1/* $NetBSD: prom.c,v 1.54 2020/09/03 02:09:09 thorpej Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1992, 1994, 1995, 1996 Carnegie Mellon University 4 * Copyright (c) 1992, 1994, 1995, 1996 Carnegie Mellon University
5 * All Rights Reserved. 5 * All Rights Reserved.
6 * 6 *
7 * Permission to use, copy, modify and distribute this software and its 7 * Permission to use, copy, modify and distribute this software and its
8 * documentation is hereby granted, provided that both the copyright 8 * documentation is hereby granted, provided that both the copyright
9 * notice and this permission notice appear in all copies of the 9 * notice and this permission notice appear in all copies of the
10 * software, derivative works or modified versions, and any portions 10 * software, derivative works or modified versions, and any portions
11 * thereof, and that both notices appear in supporting documentation. 11 * thereof, and that both notices appear in supporting documentation.
12 * 12 *
13 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 13 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
14 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 14 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
15 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 15 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
16 * 16 *
17 * Carnegie Mellon requests users of this software to return to 17 * Carnegie Mellon requests users of this software to return to
18 * 18 *
19 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 19 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
20 * School of Computer Science 20 * School of Computer Science
21 * Carnegie Mellon University 21 * Carnegie Mellon University
22 * Pittsburgh PA 15213-3890 22 * Pittsburgh PA 15213-3890
23 * 23 *
24 * any improvements or extensions that they make and grant Carnegie Mellon 24 * any improvements or extensions that they make and grant Carnegie Mellon
25 * the rights to redistribute these changes. 25 * the rights to redistribute these changes.
26 */ 26 */
27 27
28#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ 28#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
29 29
30__KERNEL_RCSID(0, "$NetBSD: prom.c,v 1.53 2020/08/30 16:26:56 thorpej Exp $"); 30__KERNEL_RCSID(0, "$NetBSD: prom.c,v 1.54 2020/09/03 02:09:09 thorpej Exp $");
31 31
32#include "opt_multiprocessor.h" 32#include "opt_multiprocessor.h"
33 33
34#include <sys/param.h> 34#include <sys/param.h>
35#include <sys/systm.h> 35#include <sys/systm.h>
36#include <sys/proc.h> 36#include <sys/proc.h>
37#include <sys/cpu.h> 37#include <sys/cpu.h>
38 38
39#include <uvm/uvm_extern.h> 39#include <uvm/uvm_extern.h>
40 40
41#include <machine/rpb.h> 41#include <machine/rpb.h>
42#include <machine/alpha.h> 42#include <machine/alpha.h>
43#define ENABLEPROM 43#define ENABLEPROM
44#include <machine/prom.h> 44#include <machine/prom.h>
45 45
46#include <dev/cons.h> 46#include <dev/cons.h>
47 47
48/* XXX this is to fake out the console routines, while booting. */ 48/* XXX this is to fake out the console routines, while booting. */
49struct consdev promcons = { 49struct consdev promcons = {
50 .cn_getc = promcngetc, 50 .cn_getc = promcngetc,
51 .cn_putc = promcnputc, 51 .cn_putc = promcnputc,
52 .cn_pollc = nullcnpollc, 52 .cn_pollc = nullcnpollc,
53 .cn_dev = makedev(23,0), 53 .cn_dev = makedev(23,0),
54 .cn_pri = 1 54 .cn_pri = 1
55}; 55};
56 56
57struct rpb *hwrpb; 57struct rpb *hwrpb __read_mostly;
58int alpha_console; 58int alpha_console;
59 59
60extern struct prom_vec prom_dispatch_v; 60extern struct prom_vec prom_dispatch_v;
61 61
62static int prom_is_qemu; /* XXX */ 62bool prom_interface_initialized;
 63int prom_mapped = 1; /* Is PROM still mapped? */
 64static bool prom_is_qemu; /* XXX */
63 65
64static kmutex_t prom_lock; 66static kmutex_t prom_lock;
65 67
66#ifdef _PMAP_MAY_USE_PROM_CONSOLE 68#ifdef _PROM_MAY_USE_PROM_CONSOLE
67int prom_mapped = 1; /* Is PROM still mapped? */ 
68 69
69pt_entry_t prom_pte, saved_pte[1]; /* XXX */ 70pt_entry_t prom_pte, saved_pte[1]; /* XXX */
70 71
71static pt_entry_t * 72static pt_entry_t *
72prom_lev1map(void) 73prom_lev1map(void)
73{ 74{
74 struct alpha_pcb *apcb; 75 struct alpha_pcb *apcb;
75 76
76 /* 77 /*
77 * Find the level 1 map that we're currently running on. 78 * Find the level 1 map that we're currently running on.
78 */ 79 */
79 apcb = (struct alpha_pcb *)ALPHA_PHYS_TO_K0SEG(curpcb); 80 apcb = (struct alpha_pcb *)ALPHA_PHYS_TO_K0SEG(curpcb);
80 81
81 return ((pt_entry_t *)ALPHA_PHYS_TO_K0SEG(apcb->apcb_ptbr << PGSHIFT)); 82 return ((pt_entry_t *)ALPHA_PHYS_TO_K0SEG(apcb->apcb_ptbr << PGSHIFT));
82} 83}
83#endif /* _PMAP_MAY_USE_PROM_CONSOLE */ 84#endif /* _PROM_MAY_USE_PROM_CONSOLE */
 85
 86bool
 87prom_uses_prom_console(void)
 88{
 89#ifdef _PROM_MAY_USE_PROM_CONSOLE
 90 return (cputype == ST_DEC_21000);
 91#else
 92 return false;
 93#endif
 94}
 95
 96static void
 97prom_init_cputype(const struct rpb * const rpb)
 98{
 99 cputype = rpb->rpb_type;
 100 if (cputype < 0) {
 101 /*
 102 * At least some white-box systems have SRM which
 103 * reports a systype that's the negative of their
 104 * blue-box counterpart.
 105 */
 106 cputype = -cputype;
 107 }
 108}
84 109
85static void 110static void
86prom_check_qemu(const struct rpb * const rpb) 111prom_check_qemu(const struct rpb * const rpb)
87{ 112{
88 if (!prom_is_qemu) { 113 if (!prom_is_qemu) {
89 if (rpb->rpb_ssn[0] == 'Q' && 114 if (rpb->rpb_ssn[0] == 'Q' &&
90 rpb->rpb_ssn[1] == 'E' && 115 rpb->rpb_ssn[1] == 'E' &&
91 rpb->rpb_ssn[2] == 'M' && 116 rpb->rpb_ssn[2] == 'M' &&
92 rpb->rpb_ssn[3] == 'U') { 117 rpb->rpb_ssn[3] == 'U') {
93 prom_is_qemu = 1; 118 prom_is_qemu = true;
94 } 119 }
95 } 120 }
96} 121}
97 122
98void 123void
99init_prom_interface(struct rpb *rpb) 124init_prom_interface(u_long ptb_pfn, struct rpb *rpb)
100{ 125{
101 static bool prom_interface_initialized; 
102 126
103 if (prom_interface_initialized) 127 if (prom_interface_initialized)
104 return; 128 return;
105 129
106 struct crb *c; 130 struct crb *c;
107 131
 132 prom_init_cputype(rpb);
108 prom_check_qemu(rpb); 133 prom_check_qemu(rpb);
109 134
110 c = (struct crb *)((char *)rpb + rpb->rpb_crb_off); 135 c = (struct crb *)((char *)rpb + rpb->rpb_crb_off);
111 136
112 prom_dispatch_v.routine_arg = c->crb_v_dispatch; 137 prom_dispatch_v.routine_arg = c->crb_v_dispatch;
113 prom_dispatch_v.routine = c->crb_v_dispatch->entry_va; 138 prom_dispatch_v.routine = c->crb_v_dispatch->entry_va;
114 139
 140#ifdef _PROM_MAY_USE_PROM_CONSOLE
 141 if (prom_uses_prom_console()) {
 142 /*
 143 * XXX Save old PTE so we can remap the PROM, if
 144 * XXX necessary.
 145 */
 146 pt_entry_t * const l1pt =
 147 (pt_entry_t *)ALPHA_PHYS_TO_K0SEG(ptb_pfn << PGSHIFT);
 148 prom_pte = l1pt[0] & ~PG_ASM;
 149 }
 150#endif /* _PROM_MAY_USE_PROM_CONSOLE */
 151
115 mutex_init(&prom_lock, MUTEX_DEFAULT, IPL_HIGH); 152 mutex_init(&prom_lock, MUTEX_DEFAULT, IPL_HIGH);
116 prom_interface_initialized = true; 153 prom_interface_initialized = true;
117} 154}
118 155
119void 156void
120init_bootstrap_console(void) 157init_bootstrap_console(void)
121{ 158{
122 char buf[4]; 159 char buf[4];
123 160
124 init_prom_interface(hwrpb); 161 /* init_prom_interface() has already been called. */
 162 if (! prom_interface_initialized) {
 163 prom_halt(1);
 164 }
125 165
126 prom_getenv(PROM_E_TTY_DEV, buf, sizeof(buf)); 166 prom_getenv(PROM_E_TTY_DEV, buf, sizeof(buf));
127 alpha_console = buf[0] - '0'; 167 alpha_console = buf[0] - '0';
128 168
129 /* XXX fake out the console routines, for now */ 169 /* XXX fake out the console routines, for now */
130 cn_tab = &promcons; 170 cn_tab = &promcons;
131} 171}
132 172
133#ifdef _PMAP_MAY_USE_PROM_CONSOLE 173#ifdef _PROM_MAY_USE_PROM_CONSOLE
134static void prom_cache_sync(void); 174static void prom_cache_sync(void);
135#endif 175#endif
136 176
137void 177void
138prom_enter(void) 178prom_enter(void)
139{ 179{
140 180
141 mutex_enter(&prom_lock); 181 mutex_enter(&prom_lock);
142 182
143#ifdef _PMAP_MAY_USE_PROM_CONSOLE 183#ifdef _PROM_MAY_USE_PROM_CONSOLE
144 /* 184 /*
145 * If we have not yet switched out of the PROM's context 185 * If we have not yet switched out of the PROM's context
146 * (i.e. the first one after alpha_init()), then the PROM 186 * (i.e. the first one after alpha_init()), then the PROM
147 * is still mapped, regardless of the `prom_mapped' setting. 187 * is still mapped, regardless of the `prom_mapped' setting.
148 */ 188 */
149 if (prom_mapped == 0 && curpcb != 0) { 189 if (prom_mapped == 0 && curpcb != 0) {
150 if (!pmap_uses_prom_console()) 190 if (!prom_uses_prom_console())
151 panic("prom_enter"); 191 panic("prom_enter");
152 { 192 {
153 pt_entry_t *lev1map; 193 pt_entry_t *lev1map;
154 194
155 lev1map = prom_lev1map(); /* XXX */ 195 lev1map = prom_lev1map(); /* XXX */
156 saved_pte[0] = lev1map[0]; /* XXX */ 196 saved_pte[0] = lev1map[0]; /* XXX */
157 lev1map[0] = prom_pte; /* XXX */ 197 lev1map[0] = prom_pte; /* XXX */
158 } 198 }
159 prom_cache_sync(); /* XXX */ 199 prom_cache_sync(); /* XXX */
160 } 200 }
161#endif 201#endif
162} 202}
163 203
164void 204void
165prom_leave(void) 205prom_leave(void)
166{ 206{
167 207
168#ifdef _PMAP_MAY_USE_PROM_CONSOLE 208#ifdef _PROM_MAY_USE_PROM_CONSOLE
169 /* 209 /*
170 * See comment above. 210 * See comment above.
171 */ 211 */
172 if (prom_mapped == 0 && curpcb != 0) { 212 if (prom_mapped == 0 && curpcb != 0) {
173 if (!pmap_uses_prom_console()) 213 if (!prom_uses_prom_console())
174 panic("prom_leave"); 214 panic("prom_leave");
175 { 215 {
176 pt_entry_t *lev1map; 216 pt_entry_t *lev1map;
177 217
178 lev1map = prom_lev1map(); /* XXX */ 218 lev1map = prom_lev1map(); /* XXX */
179 lev1map[0] = saved_pte[0]; /* XXX */ 219 lev1map[0] = saved_pte[0]; /* XXX */
180 } 220 }
181 prom_cache_sync(); /* XXX */ 221 prom_cache_sync(); /* XXX */
182 } 222 }
183#endif 223#endif
184 mutex_exit(&prom_lock); 224 mutex_exit(&prom_lock);
185} 225}
186 226
187#ifdef _PMAP_MAY_USE_PROM_CONSOLE 227#ifdef _PROM_MAY_USE_PROM_CONSOLE
188static void 228static void
189prom_cache_sync(void) 229prom_cache_sync(void)
190{ 230{
191 ALPHA_TBIA(); 231 ALPHA_TBIA();
192 alpha_pal_imb(); 232 alpha_pal_imb();
193} 233}
194#endif 234#endif
195 235
196/* 236/*
197 * promcnputc: 237 * promcnputc:
198 * 238 *
199 * Remap char before passing off to prom. 239 * Remap char before passing off to prom.
200 * 240 *
201 * Prom only takes 32 bit addresses. Copy char somewhere prom can 241 * Prom only takes 32 bit addresses. Copy char somewhere prom can
202 * find it. This routine will stop working after pmap_rid_of_console 242 * find it. This routine will stop working after pmap_rid_of_console
203 * is called in alpha_init. This is due to the hard coded address 243 * is called in alpha_init. This is due to the hard coded address
204 * of the console area. 244 * of the console area.
205 */ 245 */
206void 246void
207promcnputc(dev_t dev, int c) 247promcnputc(dev_t dev, int c)
208{ 248{
209 prom_return_t ret; 249 prom_return_t ret;
210 unsigned char *to = (unsigned char *)0x20000000; 250 unsigned char *to = (unsigned char *)0x20000000;
211 251
212 /* XXX */ 252 /* XXX */
213 if (prom_is_qemu) 253 if (prom_is_qemu)
214 return; 254 return;
215 255
216 prom_enter(); 256 prom_enter();
217 *to = c; 257 *to = c;
218 258
219 do { 259 do {
220 ret.bits = prom_putstr(alpha_console, to, 1); 260 ret.bits = prom_putstr(alpha_console, to, 1);
221 } while ((ret.u.retval & 1) == 0); 261 } while ((ret.u.retval & 1) == 0);
222 262
223 prom_leave(); 263 prom_leave();
224} 264}
225 265
226/* 266/*
227 * promcngetc: 267 * promcngetc:
228 * 268 *
229 * Wait for the prom to get a real char and pass it back. 269 * Wait for the prom to get a real char and pass it back.
230 */ 270 */
231int 271int
232promcngetc(dev_t dev) 272promcngetc(dev_t dev)
233{ 273{
234 prom_return_t ret; 274 prom_return_t ret;
235 275
236 /* XXX */ 276 /* XXX */
237 if (prom_is_qemu) 277 if (prom_is_qemu)
238 return 0; 278 return 0;
239 279
240 for (;;) { 280 for (;;) {
241 prom_enter(); 281 prom_enter();
242 ret.bits = prom_getc(alpha_console); 282 ret.bits = prom_getc(alpha_console);
243 prom_leave(); 283 prom_leave();
244 if (ret.u.status == 0 || ret.u.status == 1) 284 if (ret.u.status == 0 || ret.u.status == 1)
245 return (ret.u.retval); 285 return (ret.u.retval);
246 } 286 }
247} 287}
248 288
249/* 289/*
250 * promcnlookc: 290 * promcnlookc:
251 * 291 *
252 * See if prom has a real char and pass it back. 292 * See if prom has a real char and pass it back.
253 */ 293 */
254int 294int
255promcnlookc(dev_t dev, char *cp) 295promcnlookc(dev_t dev, char *cp)
256{ 296{
257 prom_return_t ret; 297 prom_return_t ret;
258 298
259 /* XXX */ 299 /* XXX */
260 if (prom_is_qemu) 300 if (prom_is_qemu)
261 return 0; 301 return 0;
262 302
263 prom_enter(); 303 prom_enter();
264 ret.bits = prom_getc(alpha_console); 304 ret.bits = prom_getc(alpha_console);
265 prom_leave(); 305 prom_leave();
266 if (ret.u.status == 0 || ret.u.status == 1) { 306 if (ret.u.status == 0 || ret.u.status == 1) {
267 *cp = ret.u.retval; 307 *cp = ret.u.retval;
268 return 1; 308 return 1;
269 } else 309 } else
270 return 0; 310 return 0;
271} 311}
272 312
273int 313int
274prom_getenv(int id, char *buf, int len) 314prom_getenv(int id, char *buf, int len)
275{ 315{
276 unsigned char *to = (unsigned char *)0x20000000; 316 unsigned char *to = (unsigned char *)0x20000000;
277 prom_return_t ret; 317 prom_return_t ret;
278 318
279 /* XXX */ 319 /* XXX */
280 if (prom_is_qemu) 320 if (prom_is_qemu)
281 return 0; 321 return 0;
282 322
283 prom_enter(); 323 prom_enter();
284 ret.bits = prom_getenv_disp(id, to, len); 324 ret.bits = prom_getenv_disp(id, to, len);
285 if (ret.u.status & 0x4) 325 if (ret.u.status & 0x4)
286 ret.u.retval = 0; 326 ret.u.retval = 0;
287 len = uimin(len - 1, ret.u.retval); 327 len = uimin(len - 1, ret.u.retval);
288 memcpy(buf, to, len); 328 memcpy(buf, to, len);
289 buf[len] = '\0'; 329 buf[len] = '\0';
290 prom_leave(); 330 prom_leave();
291 331
292 return len; 332 return len;
293} 333}
294 334
295void 335void
296prom_halt(int halt) 336prom_halt(int halt)
297{ 337{
298 struct pcs *p; 338 struct pcs *p;
299 339
300 /* 340 /*
301 * Turn off interrupts, for sanity. 341 * Turn off interrupts, for sanity.
302 */ 342 */
303 (void) splhigh(); 343 (void) splhigh();
304 344
305 /* 345 /*
306 * Set "boot request" part of the CPU state depending on what 346 * Set "boot request" part of the CPU state depending on what
307 * we want to happen when we halt. 347 * we want to happen when we halt.
308 */ 348 */
309 p = LOCATE_PCS(hwrpb, hwrpb->rpb_primary_cpu_id); 349 p = LOCATE_PCS(hwrpb, hwrpb->rpb_primary_cpu_id);
310 p->pcs_flags &= ~(PCS_RC | PCS_HALT_REQ); 350 p->pcs_flags &= ~(PCS_RC | PCS_HALT_REQ);
311 if (halt) 351 if (halt)
312 p->pcs_flags |= PCS_HALT_STAY_HALTED; 352 p->pcs_flags |= PCS_HALT_STAY_HALTED;
313 else 353 else
314 p->pcs_flags |= PCS_HALT_WARM_BOOT; 354 p->pcs_flags |= PCS_HALT_WARM_BOOT;
315 355
316 /* 356 /*
317 * Halt the machine. 357 * Halt the machine.
318 */ 358 */
319 alpha_pal_halt(); 359 alpha_pal_halt();
320} 360}
321 361
322uint64_t 362uint64_t
323hwrpb_checksum(void) 363hwrpb_checksum(void)
324{ 364{
325 uint64_t *p, sum; 365 uint64_t *p, sum;
326 int i; 366 int i;
327 367
328 for (i = 0, p = (uint64_t *)hwrpb, sum = 0; 368 for (i = 0, p = (uint64_t *)hwrpb, sum = 0;
329 i < (offsetof(struct rpb, rpb_checksum) / sizeof (uint64_t)); 369 i < (offsetof(struct rpb, rpb_checksum) / sizeof (uint64_t));
330 i++, p++) 370 i++, p++)
331 sum += *p; 371 sum += *p;
332 372
333 return (sum); 373 return (sum);
334} 374}
335 375
336void 376void
337hwrpb_primary_init(void) 377hwrpb_primary_init(void)
338{ 378{
339 struct pcb *pcb; 379 struct pcb *pcb;
340 struct pcs *p; 380 struct pcs *p;
341 381
342 p = LOCATE_PCS(hwrpb, hwrpb->rpb_primary_cpu_id); 382 p = LOCATE_PCS(hwrpb, hwrpb->rpb_primary_cpu_id);
343 383
344 /* Initialize the primary's HWPCB and the Virtual Page Table Base. */ 384 /* Initialize the primary's HWPCB and the Virtual Page Table Base. */
345 pcb = lwp_getpcb(&lwp0); 385 pcb = lwp_getpcb(&lwp0);
346 memcpy(p->pcs_hwpcb, &pcb->pcb_hw, sizeof(pcb->pcb_hw)); 386 memcpy(p->pcs_hwpcb, &pcb->pcb_hw, sizeof(pcb->pcb_hw));
347 hwrpb->rpb_vptb = VPTBASE; 387 hwrpb->rpb_vptb = VPTBASE;
348 388
349 hwrpb->rpb_checksum = hwrpb_checksum(); 389 hwrpb->rpb_checksum = hwrpb_checksum();
350} 390}
351 391
352void 392void
353hwrpb_restart_setup(void) 393hwrpb_restart_setup(void)
354{ 394{
355 struct pcs *p; 395 struct pcs *p;
356 396
357 /* Clear bootstrap-in-progress flag since we're done bootstrapping */ 397 /* Clear bootstrap-in-progress flag since we're done bootstrapping */
358 p = LOCATE_PCS(hwrpb, hwrpb->rpb_primary_cpu_id); 398 p = LOCATE_PCS(hwrpb, hwrpb->rpb_primary_cpu_id);
359 p->pcs_flags &= ~PCS_BIP; 399 p->pcs_flags &= ~PCS_BIP;
360 400
361 /* when 'c'ontinuing from console halt, do a dump */ 401 /* when 'c'ontinuing from console halt, do a dump */
362 hwrpb->rpb_rest_term = (uint64_t)&XentRestart; 402 hwrpb->rpb_rest_term = (uint64_t)&XentRestart;
363 hwrpb->rpb_rest_term_val = 0x1; 403 hwrpb->rpb_rest_term_val = 0x1;
364 404
365 hwrpb->rpb_checksum = hwrpb_checksum(); 405 hwrpb->rpb_checksum = hwrpb_checksum();
366 406
367 p->pcs_flags |= (PCS_RC | PCS_CV); 407 p->pcs_flags |= (PCS_RC | PCS_CV);
368} 408}
369 409
370uint64_t 410uint64_t
371console_restart(struct trapframe *framep) 411console_restart(struct trapframe *framep)
372{ 412{
373 struct pcs *p; 413 struct pcs *p;
374 414
375 /* Clear restart-capable flag, since we can no longer restart. */ 415 /* Clear restart-capable flag, since we can no longer restart. */
376 p = LOCATE_PCS(hwrpb, hwrpb->rpb_primary_cpu_id); 416 p = LOCATE_PCS(hwrpb, hwrpb->rpb_primary_cpu_id);
377 p->pcs_flags &= ~PCS_RC; 417 p->pcs_flags &= ~PCS_RC;
378 418
379 /* Fill in the missing frame slots */ 419 /* Fill in the missing frame slots */
380 420
381 framep->tf_regs[FRAME_PS] = p->pcs_halt_ps; 421 framep->tf_regs[FRAME_PS] = p->pcs_halt_ps;
382 framep->tf_regs[FRAME_PC] = p->pcs_halt_pc; 422 framep->tf_regs[FRAME_PC] = p->pcs_halt_pc;
383 framep->tf_regs[FRAME_T11] = p->pcs_halt_r25; 423 framep->tf_regs[FRAME_T11] = p->pcs_halt_r25;
384 framep->tf_regs[FRAME_RA] = p->pcs_halt_r26; 424 framep->tf_regs[FRAME_RA] = p->pcs_halt_r26;
385 framep->tf_regs[FRAME_T12] = p->pcs_halt_r27; 425 framep->tf_regs[FRAME_T12] = p->pcs_halt_r27;
386 426
387 panic("user requested console halt"); 427 panic("user requested console halt");
388 428
389 return (1); 429 return (1);
390} 430}

cvs diff -r1.40 -r1.41 src/sys/arch/alpha/alpha/promcons.c (switch to unified diff)

--- src/sys/arch/alpha/alpha/promcons.c 2019/08/09 08:05:57 1.40
+++ src/sys/arch/alpha/alpha/promcons.c 2020/09/03 02:09:09 1.41
@@ -1,287 +1,287 @@ @@ -1,287 +1,287 @@
1/* $NetBSD: promcons.c,v 1.40 2019/08/09 08:05:57 rin Exp $ */ 1/* $NetBSD: promcons.c,v 1.41 2020/09/03 02:09:09 thorpej Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University. 4 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Author: Chris G. Demetriou 7 * Author: Chris G. Demetriou
8 * 8 *
9 * Permission to use, copy, modify and distribute this software and 9 * Permission to use, copy, modify and distribute this software and
10 * its documentation is hereby granted, provided that both the copyright 10 * its documentation is hereby granted, provided that both the copyright
11 * notice and this permission notice appear in all copies of the 11 * notice and this permission notice appear in all copies of the
12 * software, derivative works or modified versions, and any portions 12 * software, derivative works or modified versions, and any portions
13 * thereof, and that both notices appear in supporting documentation. 13 * thereof, and that both notices appear in supporting documentation.
14 * 14 *
15 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 15 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
16 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 16 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
17 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 17 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
18 * 18 *
19 * Carnegie Mellon requests users of this software to return to 19 * Carnegie Mellon requests users of this software to return to
20 * 20 *
21 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 21 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
22 * School of Computer Science 22 * School of Computer Science
23 * Carnegie Mellon University 23 * Carnegie Mellon University
24 * Pittsburgh PA 15213-3890 24 * Pittsburgh PA 15213-3890
25 * 25 *
26 * any improvements or extensions that they make and grant Carnegie the 26 * any improvements or extensions that they make and grant Carnegie the
27 * rights to redistribute these changes. 27 * rights to redistribute these changes.
28 */ 28 */
29 29
30#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ 30#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
31 31
32__KERNEL_RCSID(0, "$NetBSD: promcons.c,v 1.40 2019/08/09 08:05:57 rin Exp $"); 32__KERNEL_RCSID(0, "$NetBSD: promcons.c,v 1.41 2020/09/03 02:09:09 thorpej Exp $");
33 33
34#include <sys/param.h> 34#include <sys/param.h>
35#include <sys/systm.h> 35#include <sys/systm.h>
36#include <sys/ioctl.h> 36#include <sys/ioctl.h>
37#include <sys/select.h> 37#include <sys/select.h>
38#include <sys/tty.h> 38#include <sys/tty.h>
39#include <sys/proc.h> 39#include <sys/proc.h>
40#include <sys/file.h> 40#include <sys/file.h>
41#include <sys/uio.h> 41#include <sys/uio.h>
42#include <sys/kernel.h> 42#include <sys/kernel.h>
43#include <sys/syslog.h> 43#include <sys/syslog.h>
44#include <sys/types.h> 44#include <sys/types.h>
45#include <sys/device.h> 45#include <sys/device.h>
46#include <sys/conf.h> 46#include <sys/conf.h>
47#include <sys/kauth.h> 47#include <sys/kauth.h>
48 48
49#include <uvm/uvm_extern.h> 49#include <uvm/uvm_extern.h>
50 50
51#include <machine/cpuconf.h> 51#include <machine/cpuconf.h>
52#include <machine/prom.h> 52#include <machine/prom.h>
53 53
54#ifndef CONSPEED 54#ifndef CONSPEED
55#define CONSPEED 9600 55#define CONSPEED 9600
56#endif 56#endif
57 57
58#ifdef _PMAP_MAY_USE_PROM_CONSOLE 58#ifdef _PROM_MAY_USE_PROM_CONSOLE
59 59
60dev_type_open(promopen); 60dev_type_open(promopen);
61dev_type_close(promclose); 61dev_type_close(promclose);
62dev_type_read(promread); 62dev_type_read(promread);
63dev_type_write(promwrite); 63dev_type_write(promwrite);
64dev_type_ioctl(promioctl); 64dev_type_ioctl(promioctl);
65dev_type_stop(promstop); 65dev_type_stop(promstop);
66dev_type_tty(promtty); 66dev_type_tty(promtty);
67dev_type_poll(prompoll); 67dev_type_poll(prompoll);
68 68
69const struct cdevsw prom_cdevsw = { 69const struct cdevsw prom_cdevsw = {
70 .d_open = promopen, 70 .d_open = promopen,
71 .d_close = promclose, 71 .d_close = promclose,
72 .d_read = promread, 72 .d_read = promread,
73 .d_write = promwrite, 73 .d_write = promwrite,
74 .d_ioctl = promioctl, 74 .d_ioctl = promioctl,
75 .d_stop = promstop, 75 .d_stop = promstop,
76 .d_tty = promtty, 76 .d_tty = promtty,
77 .d_poll = prompoll, 77 .d_poll = prompoll,
78 .d_mmap = nommap, 78 .d_mmap = nommap,
79 .d_kqfilter = ttykqfilter, 79 .d_kqfilter = ttykqfilter,
80 .d_discard = nodiscard, 80 .d_discard = nodiscard,
81 .d_flag = D_TTY 81 .d_flag = D_TTY
82}; 82};
83 83
84#define PROM_POLL_HZ 50 84#define PROM_POLL_HZ 50
85 85
86static struct tty *prom_tty[1]; 86static struct tty *prom_tty[1];
87static int polltime; 87static int polltime;
88 88
89void promstart(struct tty *); 89void promstart(struct tty *);
90void promtimeout(void *); 90void promtimeout(void *);
91int promparam(struct tty *, struct termios *); 91int promparam(struct tty *, struct termios *);
92 92
93struct callout prom_ch; 93struct callout prom_ch;
94 94
95int 95int
96promopen(dev_t dev, int flag, int mode, struct lwp *l) 96promopen(dev_t dev, int flag, int mode, struct lwp *l)
97{ 97{
98 int unit = minor(dev); 98 int unit = minor(dev);
99 struct tty *tp; 99 struct tty *tp;
100 int s; 100 int s;
101 int error = 0, setuptimeout = 0; 101 int error = 0, setuptimeout = 0;
102 static bool callo; 102 static bool callo;
103 103
104 if (!callo) { 104 if (!callo) {
105 callout_init(&prom_ch, 0); 105 callout_init(&prom_ch, 0);
106 callo = true; 106 callo = true;
107 } 107 }
108 108
109 if (!pmap_uses_prom_console() || unit >= 1) 109 if (!prom_uses_prom_console() || unit >= 1)
110 return ENXIO; 110 return ENXIO;
111 111
112 s = spltty(); 112 s = spltty();
113 113
114 if (!prom_tty[unit]) { 114 if (!prom_tty[unit]) {
115 tp = prom_tty[unit] = tty_alloc(); 115 tp = prom_tty[unit] = tty_alloc();
116 tty_attach(tp); 116 tty_attach(tp);
117 } else 117 } else
118 tp = prom_tty[unit]; 118 tp = prom_tty[unit];
119 119
120 tp->t_oproc = promstart; 120 tp->t_oproc = promstart;
121 tp->t_param = promparam; 121 tp->t_param = promparam;
122 tp->t_dev = dev; 122 tp->t_dev = dev;
123 123
124 if (kauth_authorize_device_tty(l->l_cred, KAUTH_DEVICE_TTY_OPEN, tp)) { 124 if (kauth_authorize_device_tty(l->l_cred, KAUTH_DEVICE_TTY_OPEN, tp)) {
125 splx(s); 125 splx(s);
126 return (EBUSY); 126 return (EBUSY);
127 } 127 }
128 128
129 if ((tp->t_state & TS_ISOPEN) == 0) { 129 if ((tp->t_state & TS_ISOPEN) == 0) {
130 tp->t_state |= TS_CARR_ON; 130 tp->t_state |= TS_CARR_ON;
131 ttychars(tp); 131 ttychars(tp);
132 tp->t_iflag = TTYDEF_IFLAG; 132 tp->t_iflag = TTYDEF_IFLAG;
133 tp->t_oflag = TTYDEF_OFLAG; 133 tp->t_oflag = TTYDEF_OFLAG;
134 tp->t_cflag = TTYDEF_CFLAG|CLOCAL; 134 tp->t_cflag = TTYDEF_CFLAG|CLOCAL;
135 tp->t_lflag = TTYDEF_LFLAG; 135 tp->t_lflag = TTYDEF_LFLAG;
136 tp->t_ispeed = tp->t_ospeed = CONSPEED; 136 tp->t_ispeed = tp->t_ospeed = CONSPEED;
137 ttsetwater(tp); 137 ttsetwater(tp);
138 138
139 setuptimeout = 1; 139 setuptimeout = 1;
140 } 140 }
141 141
142 splx(s); 142 splx(s);
143 143
144 error = (*tp->t_linesw->l_open)(dev, tp); 144 error = (*tp->t_linesw->l_open)(dev, tp);
145 if (error == 0 && setuptimeout) { 145 if (error == 0 && setuptimeout) {
146 polltime = hz / PROM_POLL_HZ; 146 polltime = hz / PROM_POLL_HZ;
147 if (polltime < 1) 147 if (polltime < 1)
148 polltime = 1; 148 polltime = 1;
149 callout_reset(&prom_ch, polltime, promtimeout, tp); 149 callout_reset(&prom_ch, polltime, promtimeout, tp);
150 } 150 }
151 return error; 151 return error;
152} 152}
153 153
154int 154int
155promclose(dev_t dev, int flag, int mode, struct lwp *l) 155promclose(dev_t dev, int flag, int mode, struct lwp *l)
156{ 156{
157 int unit = minor(dev); 157 int unit = minor(dev);
158 struct tty *tp = prom_tty[unit]; 158 struct tty *tp = prom_tty[unit];
159 159
160 callout_stop(&prom_ch); 160 callout_stop(&prom_ch);
161 (*tp->t_linesw->l_close)(tp, flag); 161 (*tp->t_linesw->l_close)(tp, flag);
162 ttyclose(tp); 162 ttyclose(tp);
163 return 0; 163 return 0;
164} 164}
165 165
166int 166int
167promread(dev_t dev, struct uio *uio, int flag) 167promread(dev_t dev, struct uio *uio, int flag)
168{ 168{
169 struct tty *tp = prom_tty[minor(dev)]; 169 struct tty *tp = prom_tty[minor(dev)];
170 170
171 return ((*tp->t_linesw->l_read)(tp, uio, flag)); 171 return ((*tp->t_linesw->l_read)(tp, uio, flag));
172} 172}
173 173
174int 174int
175promwrite(dev_t dev, struct uio *uio, int flag) 175promwrite(dev_t dev, struct uio *uio, int flag)
176{ 176{
177 struct tty *tp = prom_tty[minor(dev)]; 177 struct tty *tp = prom_tty[minor(dev)];
178 178
179 return ((*tp->t_linesw->l_write)(tp, uio, flag)); 179 return ((*tp->t_linesw->l_write)(tp, uio, flag));
180} 180}
181 181
182int 182int
183prompoll(dev_t dev, int events, struct lwp *l) 183prompoll(dev_t dev, int events, struct lwp *l)
184{ 184{
185 struct tty *tp = prom_tty[minor(dev)]; 185 struct tty *tp = prom_tty[minor(dev)];
186 186
187 return ((*tp->t_linesw->l_poll)(tp, events, l)); 187 return ((*tp->t_linesw->l_poll)(tp, events, l));
188} 188}
189 189
190int 190int
191promioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l) 191promioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
192{ 192{
193 int unit = minor(dev); 193 int unit = minor(dev);
194 struct tty *tp = prom_tty[unit]; 194 struct tty *tp = prom_tty[unit];
195 int error; 195 int error;
196 196
197 error = (*tp->t_linesw->l_ioctl)(tp, cmd, data, flag, l); 197 error = (*tp->t_linesw->l_ioctl)(tp, cmd, data, flag, l);
198 if (error != EPASSTHROUGH) 198 if (error != EPASSTHROUGH)
199 return error; 199 return error;
200 return ttioctl(tp, cmd, data, flag, l); 200 return ttioctl(tp, cmd, data, flag, l);
201} 201}
202 202
203int 203int
204promparam(struct tty *tp, struct termios *t) 204promparam(struct tty *tp, struct termios *t)
205{ 205{
206 206
207 return 0; 207 return 0;
208} 208}
209 209
210void 210void
211promstart(struct tty *tp) 211promstart(struct tty *tp)
212{ 212{
213 int s; 213 int s;
214 214
215 s = spltty(); 215 s = spltty();
216 if (tp->t_state & (TS_TTSTOP | TS_BUSY)) 216 if (tp->t_state & (TS_TTSTOP | TS_BUSY))
217 goto out; 217 goto out;
218 ttypull(tp); 218 ttypull(tp);
219 tp->t_state |= TS_BUSY; 219 tp->t_state |= TS_BUSY;
220 while (tp->t_outq.c_cc != 0) 220 while (tp->t_outq.c_cc != 0)
221 promcnputc(tp->t_dev, getc(&tp->t_outq)); 221 promcnputc(tp->t_dev, getc(&tp->t_outq));
222 tp->t_state &= ~TS_BUSY; 222 tp->t_state &= ~TS_BUSY;
223out: 223out:
224 splx(s); 224 splx(s);
225} 225}
226 226
227/* 227/*
228 * Stop output on a line. 228 * Stop output on a line.
229 */ 229 */
230void 230void
231promstop(struct tty *tp, int flag) 231promstop(struct tty *tp, int flag)
232{ 232{
233 int s; 233 int s;
234 234
235 s = spltty(); 235 s = spltty();
236 if (tp->t_state & TS_BUSY) 236 if (tp->t_state & TS_BUSY)
237 if ((tp->t_state & TS_TTSTOP) == 0) 237 if ((tp->t_state & TS_TTSTOP) == 0)
238 tp->t_state |= TS_FLUSH; 238 tp->t_state |= TS_FLUSH;
239 splx(s); 239 splx(s);
240} 240}
241 241
242void 242void
243promtimeout(void *v) 243promtimeout(void *v)
244{ 244{
245 struct tty *tp = v; 245 struct tty *tp = v;
246 u_char c; 246 u_char c;
247 247
248 while (promcnlookc(tp->t_dev, &c)) { 248 while (promcnlookc(tp->t_dev, &c)) {
249 if (tp->t_state & TS_ISOPEN) 249 if (tp->t_state & TS_ISOPEN)
250 (*tp->t_linesw->l_rint)(c, tp); 250 (*tp->t_linesw->l_rint)(c, tp);
251 } 251 }
252 callout_reset(&prom_ch, polltime, promtimeout, tp); 252 callout_reset(&prom_ch, polltime, promtimeout, tp);
253} 253}
254 254
255struct tty * 255struct tty *
256promtty(dev_t dev) 256promtty(dev_t dev)
257{ 257{
258 258
259 if (minor(dev) != 0) 259 if (minor(dev) != 0)
260 panic("promtty: bogus"); 260 panic("promtty: bogus");
261 261
262 return prom_tty[0]; 262 return prom_tty[0];
263} 263}
264 264
265#else /* _PMAP_MAY_USE_PROM_CONSOLE */ 265#else /* _PROM_MAY_USE_PROM_CONSOLE */
266 266
267/* 267/*
268 * If not defined _PMAP_MAY_USE_PROM_CONSOLE, 268 * If not defined _PROM_MAY_USE_PROM_CONSOLE,
269 * this fake prom_cdevsw is attached to the kernel. 269 * this fake prom_cdevsw is attached to the kernel.
270 * NEVER REMOVE! 270 * NEVER REMOVE!
271 */ 271 */
272const struct cdevsw prom_cdevsw = { 272const struct cdevsw prom_cdevsw = {
273 .d_open = noopen, 273 .d_open = noopen,
274 .d_close = noclose, 274 .d_close = noclose,
275 .d_read = noread, 275 .d_read = noread,
276 .d_write = nowrite, 276 .d_write = nowrite,
277 .d_ioctl = noioctl, 277 .d_ioctl = noioctl,
278 .d_stop = nostop, 278 .d_stop = nostop,
279 .d_tty = notty, 279 .d_tty = notty,
280 .d_poll = nopoll, 280 .d_poll = nopoll,
281 .d_mmap = nommap, 281 .d_mmap = nommap,
282 .d_kqfilter = nokqfilter, 282 .d_kqfilter = nokqfilter,
283 .d_discard = nodiscard, 283 .d_discard = nodiscard,
284 .d_flag = 0 284 .d_flag = 0
285}; 285};
286 286
287#endif /* _PMAP_MAY_USE_PROM_CONSOLE */ 287#endif /* _PROM_MAY_USE_PROM_CONSOLE */

cvs diff -r1.38 -r1.39 src/sys/arch/alpha/include/alpha.h (switch to unified diff)

--- src/sys/arch/alpha/include/alpha.h 2019/04/06 03:06:24 1.38
+++ src/sys/arch/alpha/include/alpha.h 2020/09/03 02:09:09 1.39
@@ -1,174 +1,174 @@ @@ -1,174 +1,174 @@
1/* $NetBSD: alpha.h,v 1.38 2019/04/06 03:06:24 thorpej Exp $ */ 1/* $NetBSD: alpha.h,v 1.39 2020/09/03 02:09:09 thorpej Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1988 University of Utah. 4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1982, 1990, 1993 5 * Copyright (c) 1982, 1990, 1993
6 * The Regents of the University of California. All rights reserved. 6 * The Regents of the University of California. All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to Berkeley by 8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer 9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department. 10 * Science Department.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
15 * 1. Redistributions of source code must retain the above copyright 15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer. 16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright 17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the 18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution. 19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors 20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software 21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission. 22 * without specific prior written permission.
23 * 23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE. 34 * SUCH DAMAGE.
35 * 35 *
36 * from: Utah $Hdr: cpu.h 1.16 91/03/25$ 36 * from: Utah $Hdr: cpu.h 1.16 91/03/25$
37 * 37 *
38 * @(#)cpu.h 8.4 (Berkeley) 1/5/94 38 * @(#)cpu.h 8.4 (Berkeley) 1/5/94
39 */ 39 */
40 40
41#ifndef _ALPHA_H_ 41#ifndef _ALPHA_H_
42#define _ALPHA_H_ 42#define _ALPHA_H_
43 43
44typedef union alpha_s_float { 44typedef union alpha_s_float {
45 uint32_t i; 45 uint32_t i;
46 uint32_t frac: 23, 46 uint32_t frac: 23,
47 exp: 8, 47 exp: 8,
48 sign: 1; 48 sign: 1;
49} s_float; 49} s_float;
50 50
51typedef union alpha_t_float { 51typedef union alpha_t_float {
52 uint64_t i; 52 uint64_t i;
53 uint64_t frac: 52, 53 uint64_t frac: 52,
54 exp: 11, 54 exp: 11,
55 sign: 1; 55 sign: 1;
56} t_float; 56} t_float;
57 57
58#ifdef _KERNEL 58#ifdef _KERNEL
59 59
60#include <sys/bus.h> 60#include <sys/bus.h>
61 61
62#include <sys/pcu.h> 62#include <sys/pcu.h>
63 63
64struct pcb; 64struct pcb;
65struct lwp; 65struct lwp;
66struct reg; 66struct reg;
67struct rpb; 67struct rpb;
68struct trapframe; 68struct trapframe;
69 69
70extern u_long cpu_implver; /* from IMPLVER instruction */ 70extern u_long cpu_implver; /* from IMPLVER instruction */
71extern u_long cpu_amask; /* from AMASK instruction */ 71extern u_long cpu_amask; /* from AMASK instruction */
72extern int bootdev_debug; 72extern int bootdev_debug;
73extern int alpha_fp_sync_complete; 73extern int alpha_fp_sync_complete;
74extern int alpha_unaligned_print, alpha_unaligned_fix, alpha_unaligned_sigbus; 74extern int alpha_unaligned_print, alpha_unaligned_fix, alpha_unaligned_sigbus;
75 75
76void XentArith(uint64_t, uint64_t, uint64_t); /* MAGIC */ 76void XentArith(uint64_t, uint64_t, uint64_t); /* MAGIC */
77void XentIF(uint64_t, uint64_t, uint64_t); /* MAGIC */ 77void XentIF(uint64_t, uint64_t, uint64_t); /* MAGIC */
78void XentInt(uint64_t, uint64_t, uint64_t); /* MAGIC */ 78void XentInt(uint64_t, uint64_t, uint64_t); /* MAGIC */
79void XentMM(uint64_t, uint64_t, uint64_t); /* MAGIC */ 79void XentMM(uint64_t, uint64_t, uint64_t); /* MAGIC */
80void XentRestart(void); /* MAGIC */ 80void XentRestart(void); /* MAGIC */
81void XentSys(uint64_t, uint64_t, uint64_t); /* MAGIC */ 81void XentSys(uint64_t, uint64_t, uint64_t); /* MAGIC */
82void XentUna(uint64_t, uint64_t, uint64_t); /* MAGIC */ 82void XentUna(uint64_t, uint64_t, uint64_t); /* MAGIC */
83void alpha_init(u_long, u_long, u_long, u_long, u_long); 83void alpha_init(u_long, u_long, u_long, u_long, u_long);
84void ast(struct trapframe *); 84void ast(struct trapframe *);
85int badaddr(void *, size_t); 85int badaddr(void *, size_t);
86int badaddr_read(void *, size_t, void *); 86int badaddr_read(void *, size_t, void *);
87uint64_t console_restart(struct trapframe *); 87uint64_t console_restart(struct trapframe *);
88void do_sir(void); 88void do_sir(void);
89void exception_return(void); /* MAGIC */ 89void exception_return(void); /* MAGIC */
90void frametoreg(const struct trapframe *, struct reg *); 90void frametoreg(const struct trapframe *, struct reg *);
91void init_bootstrap_console(void); 91void init_bootstrap_console(void);
92void init_prom_interface(struct rpb *); 92void init_prom_interface(unsigned long, struct rpb *);
93void interrupt(unsigned long, unsigned long, unsigned long, 93void interrupt(unsigned long, unsigned long, unsigned long,
94 struct trapframe *); 94 struct trapframe *);
95void machine_check(unsigned long, struct trapframe *, unsigned long, 95void machine_check(unsigned long, struct trapframe *, unsigned long,
96 unsigned long); 96 unsigned long);
97uint64_t hwrpb_checksum(void); 97uint64_t hwrpb_checksum(void);
98void hwrpb_restart_setup(void); 98void hwrpb_restart_setup(void);
99void regdump(struct trapframe *); 99void regdump(struct trapframe *);
100void regtoframe(const struct reg *, struct trapframe *); 100void regtoframe(const struct reg *, struct trapframe *);
101void savectx(struct pcb *); 101void savectx(struct pcb *);
102void trap(unsigned long, unsigned long, unsigned long, unsigned long, 102void trap(unsigned long, unsigned long, unsigned long, unsigned long,
103 struct trapframe *); 103 struct trapframe *);
104void trap_init(void); 104void trap_init(void);
105void enable_nsio_ide(bus_space_tag_t); 105void enable_nsio_ide(bus_space_tag_t);
106char * dot_conv(unsigned long); 106char * dot_conv(unsigned long);
107 107
108extern const pcu_ops_t fpu_ops; 108extern const pcu_ops_t fpu_ops;
109 109
110void fpu_state_load(struct lwp *, u_int); 110void fpu_state_load(struct lwp *, u_int);
111void fpu_state_save(struct lwp *); 111void fpu_state_save(struct lwp *);
112void fpu_state_release(struct lwp *); 112void fpu_state_release(struct lwp *);
113 113
114static __inline bool 114static __inline bool
115fpu_valid_p(struct lwp *l) 115fpu_valid_p(struct lwp *l)
116{ 116{
117 return pcu_valid_p(&fpu_ops, l); 117 return pcu_valid_p(&fpu_ops, l);
118} 118}
119 119
120static __inline void 120static __inline void
121fpu_load(void) 121fpu_load(void)
122{ 122{
123 pcu_load(&fpu_ops); 123 pcu_load(&fpu_ops);
124} 124}
125 125
126static __inline void 126static __inline void
127fpu_save(lwp_t *l) 127fpu_save(lwp_t *l)
128{ 128{
129 pcu_save(&fpu_ops, l); 129 pcu_save(&fpu_ops, l);
130} 130}
131 131
132static __inline void 132static __inline void
133fpu_discard(lwp_t *l, bool valid_p) 133fpu_discard(lwp_t *l, bool valid_p)
134{ 134{
135 pcu_discard(&fpu_ops, l, valid_p); 135 pcu_discard(&fpu_ops, l, valid_p);
136} 136}
137 137
138void alpha_patch(bool); 138void alpha_patch(bool);
139 139
140/* Multiprocessor glue; cpu.c */ 140/* Multiprocessor glue; cpu.c */
141 141
142struct cpu_info; 142struct cpu_info;
143int cpu_iccb_send(long, const char *); 143int cpu_iccb_send(long, const char *);
144void cpu_iccb_receive(void); 144void cpu_iccb_receive(void);
145void cpu_hatch(struct cpu_info *); 145void cpu_hatch(struct cpu_info *);
146void cpu_halt(void) __attribute__((__noreturn__)); 146void cpu_halt(void) __attribute__((__noreturn__));
147void cpu_spinup_trampoline(void); /* MAGIC */ 147void cpu_spinup_trampoline(void); /* MAGIC */
148void cpu_pause(unsigned long); 148void cpu_pause(unsigned long);
149void cpu_resume(unsigned long); 149void cpu_resume(unsigned long);
150#if defined(DDB) 150#if defined(DDB)
151void cpu_debug_dump(void); 151void cpu_debug_dump(void);
152#endif 152#endif
153 153
154/* IEEE and VAX FP completion */ 154/* IEEE and VAX FP completion */
155 155
156void alpha_sts(int, s_float *); /* MAGIC */ 156void alpha_sts(int, s_float *); /* MAGIC */
157void alpha_stt(int, t_float *); /* MAGIC */ 157void alpha_stt(int, t_float *); /* MAGIC */
158void alpha_lds(int, s_float *); /* MAGIC */ 158void alpha_lds(int, s_float *); /* MAGIC */
159void alpha_ldt(int, t_float *); /* MAGIC */ 159void alpha_ldt(int, t_float *); /* MAGIC */
160 160
161uint64_t alpha_read_fpcr(void); /* MAGIC */ 161uint64_t alpha_read_fpcr(void); /* MAGIC */
162void alpha_write_fpcr(uint64_t); /* MAGIC */ 162void alpha_write_fpcr(uint64_t); /* MAGIC */
163 163
164uint64_t alpha_read_fp_c(struct lwp *); 164uint64_t alpha_read_fp_c(struct lwp *);
165void alpha_write_fp_c(struct lwp *, uint64_t); 165void alpha_write_fp_c(struct lwp *, uint64_t);
166 166
167int alpha_fp_complete(u_long, u_long, struct lwp *, uint64_t *); 167int alpha_fp_complete(u_long, u_long, struct lwp *, uint64_t *);
168 168
169/* Security sensitive rate limiting printf */ 169/* Security sensitive rate limiting printf */
170 170
171void rlprintf(struct timeval *t, const char *fmt, ...); 171void rlprintf(struct timeval *t, const char *fmt, ...);
172 172
173#endif /* _KERNEL */ 173#endif /* _KERNEL */
174#endif /* _ALPHA_H_ */ 174#endif /* _ALPHA_H_ */

cvs diff -r1.83 -r1.84 src/sys/arch/alpha/include/pmap.h (switch to unified diff)

--- src/sys/arch/alpha/include/pmap.h 2020/08/29 20:07:00 1.83
+++ src/sys/arch/alpha/include/pmap.h 2020/09/03 02:09:09 1.84
@@ -1,365 +1,353 @@ @@ -1,365 +1,353 @@
1/* $NetBSD: pmap.h,v 1.83 2020/08/29 20:07:00 thorpej Exp $ */ 1/* $NetBSD: pmap.h,v 1.84 2020/09/03 02:09:09 thorpej Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 1999, 2000, 2001, 2007 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 1999, 2000, 2001, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center and by Chris G. Demetriou. 9 * NASA Ames Research Center and by Chris G. Demetriou.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer. 15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE. 30 * POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32 32
33/* 33/*
34 * Copyright (c) 1991, 1993 34 * Copyright (c) 1991, 1993
35 * The Regents of the University of California. All rights reserved. 35 * The Regents of the University of California. All rights reserved.
36 * 36 *
37 * This code is derived from software contributed to Berkeley by 37 * This code is derived from software contributed to Berkeley by
38 * the Systems Programming Group of the University of Utah Computer 38 * the Systems Programming Group of the University of Utah Computer
39 * Science Department. 39 * Science Department.
40 * 40 *
41 * Redistribution and use in source and binary forms, with or without 41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions 42 * modification, are permitted provided that the following conditions
43 * are met: 43 * are met:
44 * 1. Redistributions of source code must retain the above copyright 44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer. 45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright 46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the 47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution. 48 * documentation and/or other materials provided with the distribution.
49 * 3. Neither the name of the University nor the names of its contributors 49 * 3. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software 50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission. 51 * without specific prior written permission.
52 * 52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE. 63 * SUCH DAMAGE.
64 * 64 *
65 * @(#)pmap.h 8.1 (Berkeley) 6/10/93 65 * @(#)pmap.h 8.1 (Berkeley) 6/10/93
66 */ 66 */
67 67
68/* 68/*
69 * Copyright (c) 1987 Carnegie-Mellon University 69 * Copyright (c) 1987 Carnegie-Mellon University
70 * 70 *
71 * This code is derived from software contributed to Berkeley by 71 * This code is derived from software contributed to Berkeley by
72 * the Systems Programming Group of the University of Utah Computer 72 * the Systems Programming Group of the University of Utah Computer
73 * Science Department. 73 * Science Department.
74 * 74 *
75 * Redistribution and use in source and binary forms, with or without 75 * Redistribution and use in source and binary forms, with or without
76 * modification, are permitted provided that the following conditions 76 * modification, are permitted provided that the following conditions
77 * are met: 77 * are met:
78 * 1. Redistributions of source code must retain the above copyright 78 * 1. Redistributions of source code must retain the above copyright
79 * notice, this list of conditions and the following disclaimer. 79 * notice, this list of conditions and the following disclaimer.
80 * 2. Redistributions in binary form must reproduce the above copyright 80 * 2. Redistributions in binary form must reproduce the above copyright
81 * notice, this list of conditions and the following disclaimer in the 81 * notice, this list of conditions and the following disclaimer in the
82 * documentation and/or other materials provided with the distribution. 82 * documentation and/or other materials provided with the distribution.
83 * 3. All advertising materials mentioning features or use of this software 83 * 3. All advertising materials mentioning features or use of this software
84 * must display the following acknowledgement: 84 * must display the following acknowledgement:
85 * This product includes software developed by the University of 85 * This product includes software developed by the University of
86 * California, Berkeley and its contributors. 86 * California, Berkeley and its contributors.
87 * 4. Neither the name of the University nor the names of its contributors 87 * 4. Neither the name of the University nor the names of its contributors
88 * may be used to endorse or promote products derived from this software 88 * may be used to endorse or promote products derived from this software
89 * without specific prior written permission. 89 * without specific prior written permission.
90 * 90 *
91 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 91 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
92 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 92 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
93 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 93 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
94 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 94 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
95 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 95 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
96 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 96 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
97 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 97 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
98 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 98 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
99 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 99 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
100 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 100 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
101 * SUCH DAMAGE. 101 * SUCH DAMAGE.
102 * 102 *
103 * @(#)pmap.h 8.1 (Berkeley) 6/10/93 103 * @(#)pmap.h 8.1 (Berkeley) 6/10/93
104 */ 104 */
105 105
106#ifndef _PMAP_MACHINE_ 106#ifndef _PMAP_MACHINE_
107#define _PMAP_MACHINE_ 107#define _PMAP_MACHINE_
108 108
109#if defined(_KERNEL_OPT) 109#if defined(_KERNEL_OPT)
110#include "opt_multiprocessor.h" 110#include "opt_multiprocessor.h"
111#endif 111#endif
112 112
113#include <sys/mutex.h> 113#include <sys/mutex.h>
114#include <sys/queue.h> 114#include <sys/queue.h>
115 115
116#include <machine/pte.h> 116#include <machine/pte.h>
117 117
118/* 118/*
119 * Machine-dependent virtual memory state. 119 * Machine-dependent virtual memory state.
120 * 120 *
121 * If we ever support processor numbers higher than 63, we'll have to 121 * If we ever support processor numbers higher than 63, we'll have to
122 * rethink the CPU mask. 122 * rethink the CPU mask.
123 * 123 *
124 * Note pm_asn and pm_asngen are arrays allocated in pmap_create(). 124 * Note pm_asn and pm_asngen are arrays allocated in pmap_create().
125 * Their size is based on the PCS count from the HWRPB, and indexed 125 * Their size is based on the PCS count from the HWRPB, and indexed
126 * by processor ID (from `whami'). This is all padded to COHERENCY_UNIT 126 * by processor ID (from `whami'). This is all padded to COHERENCY_UNIT
127 * to avoid false sharing. 127 * to avoid false sharing.
128 * 128 *
129 * The kernel pmap is a special case; since the kernel uses only ASM 129 * The kernel pmap is a special case; since the kernel uses only ASM
130 * mappings and uses a reserved ASN to keep the TLB clean, we don't 130 * mappings and uses a reserved ASN to keep the TLB clean, we don't
131 * allocate any ASN info for the kernel pmap at all. 131 * allocate any ASN info for the kernel pmap at all.
132 * arrays which hold enough for ALPHA_MAXPROCS. 132 * arrays which hold enough for ALPHA_MAXPROCS.
133 */ 133 */
134struct pmap_asn_info { 134struct pmap_asn_info {
135 unsigned int pma_asn; /* address space number */ 135 unsigned int pma_asn; /* address space number */
136 unsigned int pma_pad0; 136 unsigned int pma_pad0;
137 unsigned long pma_asngen; /* ASN generation number */ 137 unsigned long pma_asngen; /* ASN generation number */
138 unsigned long pma_padN[(COHERENCY_UNIT / 8) - 2]; 138 unsigned long pma_padN[(COHERENCY_UNIT / 8) - 2];
139}; 139};
140 140
141struct pmap { /* pmaps are aligned to COHERENCY_UNIT boundaries */ 141struct pmap { /* pmaps are aligned to COHERENCY_UNIT boundaries */
142 /* pmaps are locked by hashed mutexes */ 142 /* pmaps are locked by hashed mutexes */
143 pt_entry_t *pm_lev1map; /* [ 0] level 1 map */ 143 pt_entry_t *pm_lev1map; /* [ 0] level 1 map */
144 unsigned long pm_cpus; /* [ 8] CPUs using pmap */ 144 unsigned long pm_cpus; /* [ 8] CPUs using pmap */
145 unsigned long pm_needisync; /* [16] CPUs needing isync */ 145 unsigned long pm_needisync; /* [16] CPUs needing isync */
146 struct pmap_statistics pm_stats; /* [32] statistics */ 146 struct pmap_statistics pm_stats; /* [32] statistics */
147 long pm_count; /* [40] reference count */ 147 long pm_count; /* [40] reference count */
148 TAILQ_ENTRY(pmap) pm_list; /* [48] list of all pmaps */ 148 TAILQ_ENTRY(pmap) pm_list; /* [48] list of all pmaps */
149 /* -- COHERENCY_UNIT boundary -- */ 149 /* -- COHERENCY_UNIT boundary -- */
150 struct pmap_asn_info pm_asni[]; /* [64] ASN information */ 150 struct pmap_asn_info pm_asni[]; /* [64] ASN information */
151 /* variable length */ 151 /* variable length */
152}; 152};
153 153
154#define PMAP_SIZEOF(x) \ 154#define PMAP_SIZEOF(x) \
155 (ALIGN(offsetof(struct pmap, pm_asni[(x)]))) 155 (ALIGN(offsetof(struct pmap, pm_asni[(x)])))
156 156
157#define PMAP_ASN_KERNEL 0 /* kernel-reserved ASN */ 157#define PMAP_ASN_KERNEL 0 /* kernel-reserved ASN */
158#define PMAP_ASN_FIRST_USER 1 /* first user ASN */ 158#define PMAP_ASN_FIRST_USER 1 /* first user ASN */
159#define PMAP_ASNGEN_INVALID 0 /* reserved (invalid) ASN generation */ 159#define PMAP_ASNGEN_INVALID 0 /* reserved (invalid) ASN generation */
160#define PMAP_ASNGEN_INITIAL 1 /* first valid generatation */ 160#define PMAP_ASNGEN_INITIAL 1 /* first valid generatation */
161 161
162/* 162/*
163 * For each struct vm_page, there is a list of all currently valid virtual 163 * For each struct vm_page, there is a list of all currently valid virtual
164 * mappings of that page. An entry is a pv_entry_t, the list is pv_table. 164 * mappings of that page. An entry is a pv_entry_t, the list is pv_table.
165 */ 165 */
166typedef struct pv_entry { 166typedef struct pv_entry {
167 struct pv_entry *pv_next; /* next pv_entry on list */ 167 struct pv_entry *pv_next; /* next pv_entry on list */
168 struct pmap *pv_pmap; /* pmap where mapping lies */ 168 struct pmap *pv_pmap; /* pmap where mapping lies */
169 vaddr_t pv_va; /* virtual address for mapping */ 169 vaddr_t pv_va; /* virtual address for mapping */
170 pt_entry_t *pv_pte; /* PTE that maps the VA */ 170 pt_entry_t *pv_pte; /* PTE that maps the VA */
171} *pv_entry_t; 171} *pv_entry_t;
172 172
173/* pvh_attrs */ 173/* pvh_attrs */
174#define PGA_MODIFIED 0x01 /* modified */ 174#define PGA_MODIFIED 0x01 /* modified */
175#define PGA_REFERENCED 0x02 /* referenced */ 175#define PGA_REFERENCED 0x02 /* referenced */
176 176
177/* pvh_usage */ 177/* pvh_usage */
178#define PGU_NORMAL 0 /* free or normal use */ 178#define PGU_NORMAL 0 /* free or normal use */
179#define PGU_PVENT 1 /* PV entries */ 179#define PGU_PVENT 1 /* PV entries */
180#define PGU_L1PT 2 /* level 1 page table */ 180#define PGU_L1PT 2 /* level 1 page table */
181#define PGU_L2PT 3 /* level 2 page table */ 181#define PGU_L2PT 3 /* level 2 page table */
182#define PGU_L3PT 4 /* level 3 page table */ 182#define PGU_L3PT 4 /* level 3 page table */
183 183
184#ifdef _KERNEL 184#ifdef _KERNEL
185 185
186#include <sys/atomic.h> 186#include <sys/atomic.h>
187 187
188#ifdef _KERNEL_OPT 
189#include "opt_dec_kn8ae.h" /* XXX */ 
190#if defined(DEC_KN8AE) 
191#define _PMAP_MAY_USE_PROM_CONSOLE 
192#endif 
193#else 
194#define _PMAP_MAY_USE_PROM_CONSOLE 
195#endif 
196 
197struct cpu_info; 188struct cpu_info;
198struct trapframe; 189struct trapframe;
199 190
200void pmap_init_cpu(struct cpu_info *); 191void pmap_init_cpu(struct cpu_info *);
201#if defined(MULTIPROCESSOR) 192#if defined(MULTIPROCESSOR)
202void pmap_tlb_shootdown_ipi(struct cpu_info *, struct trapframe *); 193void pmap_tlb_shootdown_ipi(struct cpu_info *, struct trapframe *);
203#endif /* MULTIPROCESSOR */ 194#endif /* MULTIPROCESSOR */
204 195
205#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) 196#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
206#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count) 197#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
207 198
208#define pmap_copy(dp, sp, da, l, sa) /* nothing */ 199#define pmap_copy(dp, sp, da, l, sa) /* nothing */
209#define pmap_update(pmap) /* nothing (yet) */ 200#define pmap_update(pmap) /* nothing (yet) */
210 201
211static __inline bool 202static __inline bool
212pmap_remove_all(struct pmap *pmap) 203pmap_remove_all(struct pmap *pmap)
213{ 204{
214 /* Nothing. */ 205 /* Nothing. */
215 return false; 206 return false;
216} 207}
217 208
218#define pmap_is_referenced(pg) \ 209#define pmap_is_referenced(pg) \
219 (((pg)->mdpage.pvh_attrs & PGA_REFERENCED) != 0) 210 (((pg)->mdpage.pvh_attrs & PGA_REFERENCED) != 0)
220#define pmap_is_modified(pg) \ 211#define pmap_is_modified(pg) \
221 (((pg)->mdpage.pvh_attrs & PGA_MODIFIED) != 0) 212 (((pg)->mdpage.pvh_attrs & PGA_MODIFIED) != 0)
222 213
223#define PMAP_STEAL_MEMORY /* enable pmap_steal_memory() */ 214#define PMAP_STEAL_MEMORY /* enable pmap_steal_memory() */
224#define PMAP_GROWKERNEL /* enable pmap_growkernel() */ 215#define PMAP_GROWKERNEL /* enable pmap_growkernel() */
225 216
226#define PMAP_DIRECT 217#define PMAP_DIRECT
227#define PMAP_DIRECT_MAP(pa) ALPHA_PHYS_TO_K0SEG((pa)) 218#define PMAP_DIRECT_MAP(pa) ALPHA_PHYS_TO_K0SEG((pa))
228#define PMAP_DIRECT_UNMAP(va) ALPHA_K0SEG_TO_PHYS((va)) 219#define PMAP_DIRECT_UNMAP(va) ALPHA_K0SEG_TO_PHYS((va))
229 220
230static __inline int 221static __inline int
231pmap_direct_process(paddr_t pa, voff_t pgoff, size_t len, 222pmap_direct_process(paddr_t pa, voff_t pgoff, size_t len,
232 int (*process)(void *, size_t, void *), void *arg) 223 int (*process)(void *, size_t, void *), void *arg)
233{ 224{
234 vaddr_t va = PMAP_DIRECT_MAP(pa); 225 vaddr_t va = PMAP_DIRECT_MAP(pa);
235 226
236 return process((void *)(va + pgoff), len, arg); 227 return process((void *)(va + pgoff), len, arg);
237} 228}
238 229
239/* 230/*
240 * Alternate mapping hooks for pool pages. Avoids thrashing the TLB. 231 * Alternate mapping hooks for pool pages. Avoids thrashing the TLB.
241 */ 232 */
242#define PMAP_MAP_POOLPAGE(pa) PMAP_DIRECT_MAP(pa) 233#define PMAP_MAP_POOLPAGE(pa) PMAP_DIRECT_MAP(pa)
243#define PMAP_UNMAP_POOLPAGE(va) PMAP_DIRECT_UNMAP(va) 234#define PMAP_UNMAP_POOLPAGE(va) PMAP_DIRECT_UNMAP(va)
244 235
245/* 236/*
246 * Other hooks for the pool allocator. 237 * Other hooks for the pool allocator.
247 */ 238 */
248#define POOL_VTOPHYS(va) ALPHA_K0SEG_TO_PHYS((vaddr_t) (va)) 239#define POOL_VTOPHYS(va) ALPHA_K0SEG_TO_PHYS((vaddr_t) (va))
249 240
250bool pmap_pageidlezero(paddr_t); 241bool pmap_pageidlezero(paddr_t);
251#define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa)) 242#define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa))
252 243
253paddr_t vtophys(vaddr_t); 244paddr_t vtophys(vaddr_t);
254 245
255/* Machine-specific functions. */ 246/* Machine-specific functions. */
256void pmap_bootstrap(paddr_t, u_int, u_long); 247void pmap_bootstrap(paddr_t, u_int, u_long);
257int pmap_emulate_reference(struct lwp *, vaddr_t, int, int); 248int pmap_emulate_reference(struct lwp *, vaddr_t, int, int);
258#ifdef _PMAP_MAY_USE_PROM_CONSOLE 
259int pmap_uses_prom_console(void); 
260#endif 
261 249
262#define pmap_pte_pa(pte) (PG_PFNUM(*(pte)) << PGSHIFT) 250#define pmap_pte_pa(pte) (PG_PFNUM(*(pte)) << PGSHIFT)
263#define pmap_pte_prot(pte) (*(pte) & PG_PROT) 251#define pmap_pte_prot(pte) (*(pte) & PG_PROT)
264#define pmap_pte_w(pte) (*(pte) & PG_WIRED) 252#define pmap_pte_w(pte) (*(pte) & PG_WIRED)
265#define pmap_pte_v(pte) (*(pte) & PG_V) 253#define pmap_pte_v(pte) (*(pte) & PG_V)
266#define pmap_pte_pv(pte) (*(pte) & PG_PVLIST) 254#define pmap_pte_pv(pte) (*(pte) & PG_PVLIST)
267#define pmap_pte_asm(pte) (*(pte) & PG_ASM) 255#define pmap_pte_asm(pte) (*(pte) & PG_ASM)
268#define pmap_pte_exec(pte) (*(pte) & PG_EXEC) 256#define pmap_pte_exec(pte) (*(pte) & PG_EXEC)
269 257
270#define pmap_pte_set_w(pte, v) \ 258#define pmap_pte_set_w(pte, v) \
271do { \ 259do { \
272 if (v) \ 260 if (v) \
273 *(pte) |= PG_WIRED; \ 261 *(pte) |= PG_WIRED; \
274 else \ 262 else \
275 *(pte) &= ~PG_WIRED; \ 263 *(pte) &= ~PG_WIRED; \
276} while (0) 264} while (0)
277 265
278#define pmap_pte_w_chg(pte, nw) ((nw) ^ pmap_pte_w(pte)) 266#define pmap_pte_w_chg(pte, nw) ((nw) ^ pmap_pte_w(pte))
279 267
280#define pmap_pte_set_prot(pte, np) \ 268#define pmap_pte_set_prot(pte, np) \
281do { \ 269do { \
282 *(pte) &= ~PG_PROT; \ 270 *(pte) &= ~PG_PROT; \
283 *(pte) |= (np); \ 271 *(pte) |= (np); \
284} while (0) 272} while (0)
285 273
286#define pmap_pte_prot_chg(pte, np) ((np) ^ pmap_pte_prot(pte)) 274#define pmap_pte_prot_chg(pte, np) ((np) ^ pmap_pte_prot(pte))
287 275
288static __inline pt_entry_t *pmap_l2pte(pmap_t, vaddr_t, pt_entry_t *); 276static __inline pt_entry_t *pmap_l2pte(pmap_t, vaddr_t, pt_entry_t *);
289static __inline pt_entry_t *pmap_l3pte(pmap_t, vaddr_t, pt_entry_t *); 277static __inline pt_entry_t *pmap_l3pte(pmap_t, vaddr_t, pt_entry_t *);
290 278
291#define pmap_l1pte(pmap, v) \ 279#define pmap_l1pte(pmap, v) \
292 (&(pmap)->pm_lev1map[l1pte_index((vaddr_t)(v))]) 280 (&(pmap)->pm_lev1map[l1pte_index((vaddr_t)(v))])
293 281
294static __inline pt_entry_t * 282static __inline pt_entry_t *
295pmap_l2pte(pmap_t pmap, vaddr_t v, pt_entry_t *l1pte) 283pmap_l2pte(pmap_t pmap, vaddr_t v, pt_entry_t *l1pte)
296{ 284{
297 pt_entry_t *lev2map; 285 pt_entry_t *lev2map;
298 286
299 if (l1pte == NULL) { 287 if (l1pte == NULL) {
300 l1pte = pmap_l1pte(pmap, v); 288 l1pte = pmap_l1pte(pmap, v);
301 if (pmap_pte_v(l1pte) == 0) 289 if (pmap_pte_v(l1pte) == 0)
302 return (NULL); 290 return (NULL);
303 } 291 }
304 292
305 lev2map = (pt_entry_t *)ALPHA_PHYS_TO_K0SEG(pmap_pte_pa(l1pte)); 293 lev2map = (pt_entry_t *)ALPHA_PHYS_TO_K0SEG(pmap_pte_pa(l1pte));
306 return (&lev2map[l2pte_index(v)]); 294 return (&lev2map[l2pte_index(v)]);
307} 295}
308 296
309static __inline pt_entry_t * 297static __inline pt_entry_t *
310pmap_l3pte(pmap_t pmap, vaddr_t v, pt_entry_t *l2pte) 298pmap_l3pte(pmap_t pmap, vaddr_t v, pt_entry_t *l2pte)
311{ 299{
312 pt_entry_t *l1pte, *lev2map, *lev3map; 300 pt_entry_t *l1pte, *lev2map, *lev3map;
313 301
314 if (l2pte == NULL) { 302 if (l2pte == NULL) {
315 l1pte = pmap_l1pte(pmap, v); 303 l1pte = pmap_l1pte(pmap, v);
316 if (pmap_pte_v(l1pte) == 0) 304 if (pmap_pte_v(l1pte) == 0)
317 return (NULL); 305 return (NULL);
318 306
319 lev2map = (pt_entry_t *)ALPHA_PHYS_TO_K0SEG(pmap_pte_pa(l1pte)); 307 lev2map = (pt_entry_t *)ALPHA_PHYS_TO_K0SEG(pmap_pte_pa(l1pte));
320 l2pte = &lev2map[l2pte_index(v)]; 308 l2pte = &lev2map[l2pte_index(v)];
321 if (pmap_pte_v(l2pte) == 0) 309 if (pmap_pte_v(l2pte) == 0)
322 return (NULL); 310 return (NULL);
323 } 311 }
324 312
325 lev3map = (pt_entry_t *)ALPHA_PHYS_TO_K0SEG(pmap_pte_pa(l2pte)); 313 lev3map = (pt_entry_t *)ALPHA_PHYS_TO_K0SEG(pmap_pte_pa(l2pte));
326 return (&lev3map[l3pte_index(v)]); 314 return (&lev3map[l3pte_index(v)]);
327} 315}
328 316
329/* 317/*
330 * Macro for processing deferred I-stream synchronization. 318 * Macro for processing deferred I-stream synchronization.
331 * 319 *
332 * The pmap module may defer syncing the user I-stream until the 320 * The pmap module may defer syncing the user I-stream until the
333 * return to userspace, since the IMB PALcode op can be quite 321 * return to userspace, since the IMB PALcode op can be quite
334 * expensive. Since user instructions won't be executed until 322 * expensive. Since user instructions won't be executed until
335 * the return to userspace, this can be deferred until userret(). 323 * the return to userspace, this can be deferred until userret().
336 */ 324 */
337#define PMAP_USERRET(pmap) \ 325#define PMAP_USERRET(pmap) \
338do { \ 326do { \
339 u_long cpu_mask = (1UL << cpu_number()); \ 327 u_long cpu_mask = (1UL << cpu_number()); \
340 \ 328 \
341 if ((pmap)->pm_needisync & cpu_mask) { \ 329 if ((pmap)->pm_needisync & cpu_mask) { \
342 atomic_and_ulong(&(pmap)->pm_needisync, ~cpu_mask); \ 330 atomic_and_ulong(&(pmap)->pm_needisync, ~cpu_mask); \
343 alpha_pal_imb(); \ 331 alpha_pal_imb(); \
344 } \ 332 } \
345} while (0) 333} while (0)
346 334
347/* 335/*
348 * pmap-specific data store in the vm_page structure. 336 * pmap-specific data store in the vm_page structure.
349 */ 337 */
350#define __HAVE_VM_PAGE_MD 338#define __HAVE_VM_PAGE_MD
351struct vm_page_md { 339struct vm_page_md {
352 struct pv_entry *pvh_list; /* pv_entry list */ 340 struct pv_entry *pvh_list; /* pv_entry list */
353 int pvh_attrs; /* page attributes */ 341 int pvh_attrs; /* page attributes */
354 unsigned pvh_refcnt; 342 unsigned pvh_refcnt;
355}; 343};
356 344
357#define VM_MDPAGE_INIT(pg) \ 345#define VM_MDPAGE_INIT(pg) \
358do { \ 346do { \
359 (pg)->mdpage.pvh_list = NULL; \ 347 (pg)->mdpage.pvh_list = NULL; \
360 (pg)->mdpage.pvh_refcnt = 0; \ 348 (pg)->mdpage.pvh_refcnt = 0; \
361} while (/*CONSTCOND*/0) 349} while (/*CONSTCOND*/0)
362 350
363#endif /* _KERNEL */ 351#endif /* _KERNEL */
364 352
365#endif /* _PMAP_MACHINE_ */ 353#endif /* _PMAP_MACHINE_ */

cvs diff -r1.14 -r1.15 src/sys/arch/alpha/include/prom.h (switch to unified diff)

--- src/sys/arch/alpha/include/prom.h 2012/02/06 02:14:13 1.14
+++ src/sys/arch/alpha/include/prom.h 2020/09/03 02:09:09 1.15
@@ -1,124 +1,137 @@ @@ -1,124 +1,137 @@
1/* $NetBSD: prom.h,v 1.14 2012/02/06 02:14:13 matt Exp $ */ 1/* $NetBSD: prom.h,v 1.15 2020/09/03 02:09:09 thorpej Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University. 4 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Author: Keith Bostic, Chris G. Demetriou 7 * Author: Keith Bostic, Chris G. Demetriou
8 * 8 *
9 * Permission to use, copy, modify and distribute this software and 9 * Permission to use, copy, modify and distribute this software and
10 * its documentation is hereby granted, provided that both the copyright 10 * its documentation is hereby granted, provided that both the copyright
11 * notice and this permission notice appear in all copies of the 11 * notice and this permission notice appear in all copies of the
12 * software, derivative works or modified versions, and any portions 12 * software, derivative works or modified versions, and any portions
13 * thereof, and that both notices appear in supporting documentation. 13 * thereof, and that both notices appear in supporting documentation.
14 * 14 *
15 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 15 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
16 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 16 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
17 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 17 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
18 * 18 *
19 * Carnegie Mellon requests users of this software to return to 19 * Carnegie Mellon requests users of this software to return to
20 * 20 *
21 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 21 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
22 * School of Computer Science 22 * School of Computer Science
23 * Carnegie Mellon University 23 * Carnegie Mellon University
24 * Pittsburgh PA 15213-3890 24 * Pittsburgh PA 15213-3890
25 * 25 *
26 * any improvements or extensions that they make and grant Carnegie the 26 * any improvements or extensions that they make and grant Carnegie the
27 * rights to redistribute these changes. 27 * rights to redistribute these changes.
28 */ 28 */
29 29
30#ifndef ASSEMBLER 30#ifndef ASSEMBLER
31struct prom_vec { 31struct prom_vec {
32 uint64_t routine; 32 uint64_t routine;
33 void *routine_arg; 33 void *routine_arg;
34}; 34};
35 35
36/* The return value from a prom call. */ 36/* The return value from a prom call. */
37typedef union { 37typedef union {
38 struct { 38 struct {
39 uint64_t 39 uint64_t
40 retval : 32, /* return value. */ 40 retval : 32, /* return value. */
41 unit : 8, 41 unit : 8,
42 mbz : 8, 42 mbz : 8,
43 error : 13, 43 error : 13,
44 status : 3; 44 status : 3;
45 } u; 45 } u;
46 uint64_t bits; 46 uint64_t bits;
47} prom_return_t; 47} prom_return_t;
48 48
49#ifdef _STANDALONE 49#ifdef _STANDALONE
50int getchar(void); 50int getchar(void);
51void putchar(int); 51void putchar(int);
52#endif 52#endif
53 53
54void prom_halt(int) __attribute__((__noreturn__)); 54void prom_halt(int) __attribute__((__noreturn__));
55int prom_getenv(int, char *, int); 55int prom_getenv(int, char *, int);
56 56
57void hwrpb_primary_init(void); 57void hwrpb_primary_init(void);
58void hwrpb_restart_setup(void); 58void hwrpb_restart_setup(void);
59#endif 59#endif
60 60
61/* Prom operation values. */ 61/* Prom operation values. */
62#define PROM_R_CLOSE 0x11 62#define PROM_R_CLOSE 0x11
63#define PROM_R_GETC 0x01 63#define PROM_R_GETC 0x01
64#define PROM_R_GETENV 0x22 64#define PROM_R_GETENV 0x22
65#define PROM_R_OPEN 0x10 65#define PROM_R_OPEN 0x10
66#define PROM_R_PUTS 0x02 66#define PROM_R_PUTS 0x02
67#define PROM_R_READ 0x13 67#define PROM_R_READ 0x13
68#define PROM_R_WRITE 0x14 68#define PROM_R_WRITE 0x14
69#define PROM_R_IOCTL 0x12 69#define PROM_R_IOCTL 0x12
70 70
71/* Prom IOCTL operation subcodes */ 71/* Prom IOCTL operation subcodes */
72#define PROM_I_SKIP2IRG 1 72#define PROM_I_SKIP2IRG 1
73#define PROM_I_SKIP2MARK 2 73#define PROM_I_SKIP2MARK 2
74#define PROM_I_REWIND 3 74#define PROM_I_REWIND 3
75#define PROM_I_WRITEMARK 4 75#define PROM_I_WRITEMARK 4
76 76
77/* Environment variable values. */ 77/* Environment variable values. */
78#define PROM_E_BOOTED_DEV 0x4 78#define PROM_E_BOOTED_DEV 0x4
79#define PROM_E_BOOTED_FILE 0x6 79#define PROM_E_BOOTED_FILE 0x6
80#define PROM_E_BOOTED_OSFLAGS 0x8 80#define PROM_E_BOOTED_OSFLAGS 0x8
81#define PROM_E_TTY_DEV 0xf 81#define PROM_E_TTY_DEV 0xf
82#define PROM_E_SCSIID 0x42 82#define PROM_E_SCSIID 0x42
83#define PROM_E_SCSIFAST 0x43 83#define PROM_E_SCSIFAST 0x43
84 84
85#if defined(_STANDALONE) || defined(ENABLEPROM) 85#if defined(_STANDALONE) || defined(ENABLEPROM)
86/* 86/*
87 * These can't be called from the kernel without great care. 87 * These can't be called from the kernel without great care.
88 * 88 *
89 * There have to be stub routines to do the copying that ensures that the 89 * There have to be stub routines to do the copying that ensures that the
90 * PROM doesn't get called with an address larger than 32 bits. Calls that 90 * PROM doesn't get called with an address larger than 32 bits. Calls that
91 * either don't need to copy anything, or don't need the copy because it's 91 * either don't need to copy anything, or don't need the copy because it's
92 * already being done elsewhere, are defined here. 92 * already being done elsewhere, are defined here.
93 */ 93 */
94#define prom_open(dev, len) \ 94#define prom_open(dev, len) \
95 prom_dispatch(PROM_R_OPEN, (dev), (len), 0, 0) 95 prom_dispatch(PROM_R_OPEN, (dev), (len), 0, 0)
96#define prom_close(chan) \ 96#define prom_close(chan) \
97 prom_dispatch(PROM_R_CLOSE, chan, 0, 0, 0) 97 prom_dispatch(PROM_R_CLOSE, chan, 0, 0, 0)
98#define prom_read(chan, len, buf, blkno) \ 98#define prom_read(chan, len, buf, blkno) \
99 prom_dispatch(PROM_R_READ, chan, len, (uint64_t)buf, blkno) 99 prom_dispatch(PROM_R_READ, chan, len, (uint64_t)buf, blkno)
100#define prom_write(chan, len, buf, blkno) \ 100#define prom_write(chan, len, buf, blkno) \
101 prom_dispatch(PROM_R_WRITE, chan, len, (uint64_t)buf, blkno) 101 prom_dispatch(PROM_R_WRITE, chan, len, (uint64_t)buf, blkno)
102#define prom_ioctl(chan, op, count) \ 102#define prom_ioctl(chan, op, count) \
103 prom_dispatch(PROM_R_IOCTL, chan, op, (int64_t)count, 0, 0) 103 prom_dispatch(PROM_R_IOCTL, chan, op, (int64_t)count, 0, 0)
104#define prom_putstr(chan, str, len) \ 104#define prom_putstr(chan, str, len) \
105 prom_dispatch(PROM_R_PUTS, chan, (uint64_t)str, len, 0) 105 prom_dispatch(PROM_R_PUTS, chan, (uint64_t)str, len, 0)
106#define prom_getc(chan) \ 106#define prom_getc(chan) \
107 prom_dispatch(PROM_R_GETC, chan, 0, 0, 0) 107 prom_dispatch(PROM_R_GETC, chan, 0, 0, 0)
108#define prom_getenv_disp(id, buf, len) \ 108#define prom_getenv_disp(id, buf, len) \
109 prom_dispatch(PROM_R_GETENV, id, (uint64_t)buf, len, 0) 109 prom_dispatch(PROM_R_GETENV, id, (uint64_t)buf, len, 0)
110#endif 110#endif
111 111
112#ifndef ASSEMBLER 112#ifndef ASSEMBLER
113#ifdef _KERNEL 113#ifdef _KERNEL
 114
 115#ifdef _KERNEL_OPT
 116#include "opt_dec_kn8ae.h"
 117
 118#if defined(DEC_KN8AE)
 119#define _PROM_MAY_USE_PROM_CONSOLE
 120#endif /* DEC_KN8AE */
 121#endif /* _KERNEL_OPT */
 122
 123extern bool prom_interface_initialized;
 124
 125bool prom_uses_prom_console(void);
 126
114void prom_enter(void); 127void prom_enter(void);
115void prom_leave(void); 128void prom_leave(void);
116 129
117void promcnputc(dev_t, int); 130void promcnputc(dev_t, int);
118int promcngetc(dev_t); 131int promcngetc(dev_t);
119int promcnlookc(dev_t, char *); 132int promcnlookc(dev_t, char *);
120 133
121uint64_t prom_dispatch(uint64_t, uint64_t, uint64_t, uint64_t, 134uint64_t prom_dispatch(uint64_t, uint64_t, uint64_t, uint64_t,
122 uint64_t); 135 uint64_t);
123#endif /* _KERNEL */ 136#endif /* _KERNEL */
124#endif /* ASSEMBLER */ 137#endif /* ASSEMBLER */