Thu Jul 22 01:39:18 2021 UTC ()
Various minor cleanups and bug fixes to the FP software completion code:
- Use __CTASSERT() instead of rolling our own compile-time assertion
  using cpp.
- Use __BIT() &c instead of rolling our own.
- Improve some comments.
- Define a default FP_C and FPCR value that is self-consistent, and
  initialize it properly at process creation time.
- Fix signal information when the trap shadow cannot be resolved.
- Use defined constants rather than magic numbers for the exception
  summary bits.
- Add a machdep sysctl to enable FP software-completion debugging.


(thorpej)
diff -r1.24 -r1.25 src/sys/arch/alpha/alpha/fp_complete.c
diff -r1.374 -r1.375 src/sys/arch/alpha/alpha/machdep.c
diff -r1.102 -r1.103 src/sys/arch/alpha/include/cpu.h
diff -r1.7 -r1.8 src/sys/arch/alpha/include/fpu.h

cvs diff -r1.24 -r1.25 src/sys/arch/alpha/alpha/fp_complete.c (switch to unified diff)

--- src/sys/arch/alpha/alpha/fp_complete.c 2020/09/01 08:22:36 1.24
+++ src/sys/arch/alpha/alpha/fp_complete.c 2021/07/22 01:39:18 1.25
@@ -1,777 +1,897 @@ @@ -1,777 +1,897 @@
1/* $NetBSD: fp_complete.c,v 1.24 2020/09/01 08:22:36 thorpej Exp $ */ 1/* $NetBSD: fp_complete.c,v 1.25 2021/07/22 01:39:18 thorpej Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2001 Ross Harvey 4 * Copyright (c) 2001 Ross Harvey
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software 15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement: 16 * must display the following acknowledgement:
17 * This product includes software developed by the NetBSD 17 * This product includes software developed by the NetBSD
18 * Foundation, Inc. and its contributors. 18 * Foundation, Inc. and its contributors.
19 * 4. Neither the name of The NetBSD Foundation nor the names of its 19 * 4. Neither the name of The NetBSD Foundation nor the names of its
20 * contributors may be used to endorse or promote products derived 20 * contributors may be used to endorse or promote products derived
21 * from this software without specific prior written permission. 21 * from this software without specific prior written permission.
22 * 22 *
23 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 23 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 25 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 26 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE. 33 * POSSIBILITY OF SUCH DAMAGE.
34 */ 34 */
35 35
 36#include "opt_ddb.h"
 37
36#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ 38#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
37 39
38__KERNEL_RCSID(0, "$NetBSD: fp_complete.c,v 1.24 2020/09/01 08:22:36 thorpej Exp $"); 40__KERNEL_RCSID(0, "$NetBSD: fp_complete.c,v 1.25 2021/07/22 01:39:18 thorpej Exp $");
39 41
40#include <sys/param.h> 42#include <sys/param.h>
41#include <sys/systm.h> 43#include <sys/systm.h>
42#include <sys/proc.h> 44#include <sys/proc.h>
43#include <sys/atomic.h> 45#include <sys/atomic.h>
44#include <sys/evcnt.h> 46#include <sys/evcnt.h>
45 47
46#include <machine/cpu.h> 48#include <machine/cpu.h>
47#include <machine/fpu.h> 49#include <machine/fpu.h>
48#include <machine/reg.h> 50#include <machine/reg.h>
49#include <machine/alpha.h> 51#include <machine/alpha.h>
50#include <alpha/alpha/db_instruction.h> 52#include <alpha/alpha/db_instruction.h>
51 53
52#include <lib/libkern/softfloat.h> 54#include <lib/libkern/softfloat.h>
53 55
 56/*
 57 * Validate our assumptions about bit positions.
 58 */
 59__CTASSERT(ALPHA_AESR_INV == (FP_X_INV << 1));
 60__CTASSERT(ALPHA_AESR_DZE == (FP_X_DZ << 1));
 61__CTASSERT(ALPHA_AESR_OVF == (FP_X_OFL << 1));
 62__CTASSERT(ALPHA_AESR_UNF == (FP_X_UFL << 1));
 63__CTASSERT(ALPHA_AESR_INE == (FP_X_IMP << 1));
 64__CTASSERT(ALPHA_AESR_IOV == (FP_X_IOV << 1));
 65
 66__CTASSERT(IEEE_TRAP_ENABLE_INV == (FP_X_INV << 1));
 67__CTASSERT(IEEE_TRAP_ENABLE_DZE == (FP_X_DZ << 1));
 68__CTASSERT(IEEE_TRAP_ENABLE_OVF == (FP_X_OFL << 1));
 69__CTASSERT(IEEE_TRAP_ENABLE_UNF == (FP_X_UFL << 1));
 70__CTASSERT(IEEE_TRAP_ENABLE_INE == (FP_X_IMP << 1));
 71
 72__CTASSERT((uint64_t)FP_X_IMP << (61 - 3) == FPCR_INED);
 73__CTASSERT((uint64_t)FP_X_UFL << (61 - 3) == FPCR_UNFD);
 74__CTASSERT((uint64_t)FP_X_OFL << (49 - 0) == FPCR_OVFD);
 75__CTASSERT((uint64_t)FP_X_DZ << (49 - 0) == FPCR_DZED);
 76__CTASSERT((uint64_t)FP_X_INV << (49 - 0) == FPCR_INVD);
 77
 78__CTASSERT(FP_C_ALLBITS == MDLWP_FP_C);
 79
54#define TSWINSIZE 4 /* size of trap shadow window in uint32_t units */ 80#define TSWINSIZE 4 /* size of trap shadow window in uint32_t units */
55 81
56/* Set Name Opcodes AARM C.* Symbols */ 82/* Set Name Opcodes AARM C.* Symbols */
57 83
58#define CPUREG_CLASS (0xfUL << 0x10) /* INT[ALSM] */ 84#define CPUREG_CLASS (0xfUL << 0x10) /* INT[ALSM] */
59#define FPUREG_CLASS (0xfUL << 0x14) /* ITFP, FLT[ILV] */ 85#define FPUREG_CLASS (0xfUL << 0x14) /* ITFP, FLT[ILV] */
60#define CHECKFUNCTIONCODE (1UL << 0x18) /* MISC */ 86#define CHECKFUNCTIONCODE (1UL << 0x18) /* MISC */
61#define TRAPSHADOWBOUNDARY (1UL << 0x00 | /* PAL */\ 87#define TRAPSHADOWBOUNDARY (1UL << 0x00 | /* PAL */\
62 1UL << 0x19 | /* \PAL\ */\ 88 1UL << 0x19 | /* \PAL\ */\
63 1UL << 0x1a | /* JSR */\ 89 1UL << 0x1a | /* JSR */\
64 1UL << 0x1b | /* \PAL\ */\ 90 1UL << 0x1b | /* \PAL\ */\
65 1UL << 0x1d | /* \PAL\ */\ 91 1UL << 0x1d | /* \PAL\ */\
66 1UL << 0x1e | /* \PAL\ */\ 92 1UL << 0x1e | /* \PAL\ */\
67 1UL << 0x1f | /* \PAL\ */\ 93 1UL << 0x1f | /* \PAL\ */\
68 0xffffUL << 0x30 | /* branch ops */\ 94 0xffffUL << 0x30 | /* branch ops */\
69 CHECKFUNCTIONCODE) 95 CHECKFUNCTIONCODE)
70 96
71#define MAKE_FLOATXX(width, expwidth, sign, exp, msb, rest_of_frac) \ 97#define MAKE_FLOATXX(width, expwidth, sign, exp, msb, rest_of_frac) \
72 (u_int ## width ## _t)(sign) << ((width) - 1) |\ 98 (u_int ## width ## _t)(sign) << ((width) - 1) |\
73 (u_int ## width ## _t)(exp) << ((width) - 1 - (expwidth)) |\ 99 (u_int ## width ## _t)(exp) << ((width) - 1 - (expwidth)) |\
74 (u_int ## width ## _t)(msb) << ((width) - 1 - (expwidth) - 1) |\ 100 (u_int ## width ## _t)(msb) << ((width) - 1 - (expwidth) - 1) |\
75 (u_int ## width ## _t)(rest_of_frac) 101 (u_int ## width ## _t)(rest_of_frac)
76 102
77#define FLOAT32QNAN MAKE_FLOATXX(32, 8, 0, 0xff, 1, 0) 103#define FLOAT32QNAN MAKE_FLOATXX(32, 8, 0, 0xff, 1, 0)
78#define FLOAT64QNAN MAKE_FLOATXX(64, 11, 0, 0x7ff, 1, 0) 104#define FLOAT64QNAN MAKE_FLOATXX(64, 11, 0, 0x7ff, 1, 0)
79 105
80#define IS_SUBNORMAL(v) ((v)->exp == 0 && (v)->frac != 0) 106#define IS_SUBNORMAL(v) ((v)->exp == 0 && (v)->frac != 0)
81 107
82#define PREFILTER_SUBNORMAL(l,v) if ((l)->l_md.md_flags & IEEE_MAP_DMZ \ 108#define PREFILTER_SUBNORMAL(l,v) if ((l)->l_md.md_flags & IEEE_MAP_DMZ \
83 && IS_SUBNORMAL(v)) \ 109 && IS_SUBNORMAL(v)) \
84 (v)->frac = 0; else 110 (v)->frac = 0; else
85 111
86#define POSTFILTER_SUBNORMAL(l,v) if ((l)->l_md.md_flags & IEEE_MAP_UMZ \ 112#define POSTFILTER_SUBNORMAL(l,v) if ((l)->l_md.md_flags & IEEE_MAP_UMZ \
87 && IS_SUBNORMAL(v)) \ 113 && IS_SUBNORMAL(v)) \
88 (v)->frac = 0; else 114 (v)->frac = 0; else
89 115
90 /* Alpha returns 2.0 for true, all zeroes for false. */ 116 /* Alpha returns 2.0 for true, all zeroes for false. */
91 117
92#define CMP_RESULT(flag) ((flag) ? 4UL << 60 : 0L) 118#define CMP_RESULT(flag) ((flag) ? 4UL << 60 : 0L)
93 119
94 /* Move bits from sw fp_c to hw fpcr. */ 120 /* Move bits from sw fp_c to hw fpcr. */
95 121
96#define CRBLIT(sw, hw, m, offs) (((sw) & ~(m)) | ((hw) >> (offs) & (m))) 122#define CRBLIT(sw, hw, m, offs) (((sw) & ~(m)) | ((hw) >> (offs) & (m)))
97 123
98struct evcnt fpevent_use; 124struct evcnt fpevent_use;
99struct evcnt fpevent_reuse; 125struct evcnt fpevent_reuse;
100 126
101/* 127/*
102 * Temporary trap shadow instrumentation. The [un]resolved counters 128 * Temporary trap shadow instrumentation. The [un]resolved counters
103 * could be kept permanently, as they provide information on whether 129 * could be kept permanently, as they provide information on whether
104 * user code has met AARM trap shadow generation requirements. 130 * user code has met AARM trap shadow generation requirements.
105 */ 131 */
106 132
107struct alpha_shadow { 133struct alpha_shadow {
108 uint64_t resolved; /* cases trigger pc found */ 134 uint64_t resolved; /* cases trigger pc found */
109 uint64_t unresolved; /* cases it wasn't, code problems? */ 135 uint64_t unresolved; /* cases it wasn't, code problems? */
110 uint64_t scans; /* trap shadow scans */ 136 uint64_t scans; /* trap shadow scans */
111 uint64_t len; /* number of instructions examined */ 137 uint64_t len; /* number of instructions examined */
112 uint64_t uop; /* bit mask of unexpected opcodes */ 138 uint64_t uop; /* bit mask of unexpected opcodes */
113 uint64_t sqrts; /* ev6+ square root single count */ 139 uint64_t sqrts; /* ev6+ square root single count */
114 uint64_t sqrtt; /* ev6+ square root double count */ 140 uint64_t sqrtt; /* ev6+ square root double count */
115 uint32_t ufunc; /* bit mask of unexpected functions */ 141 uint32_t ufunc; /* bit mask of unexpected functions */
116 uint32_t max; /* max trap shadow scan */ 142 uint32_t max; /* max trap shadow scan */
117 uint32_t nilswop; /* unexpected op codes */ 143 uint32_t nilswop; /* unexpected op codes */
118 uint32_t nilswfunc; /* unexpected function codes */ 144 uint32_t nilswfunc; /* unexpected function codes */
119 uint32_t nilanyop; /* this "cannot happen" */ 145 uint32_t nilanyop; /* this "cannot happen" */
120 uint32_t vax; /* sigs from vax fp opcodes */ 146 uint32_t vax; /* sigs from vax fp opcodes */
121} alpha_shadow, alpha_shadow_zero; 147} alpha_shadow, alpha_shadow_zero;
122 148
123static float64 float64_unk(float64, float64); 149static float64 float64_unk(float64, float64);
124static float64 compare_un(float64, float64); 150static float64 compare_un(float64, float64);
125static float64 compare_eq(float64, float64); 151static float64 compare_eq(float64, float64);
126static float64 compare_lt(float64, float64); 152static float64 compare_lt(float64, float64);
127static float64 compare_le(float64, float64); 153static float64 compare_le(float64, float64);
128static void cvt_qs_ts_st_gf_qf(uint32_t, struct lwp *); 154static void cvt_qs_ts_st_gf_qf(uint32_t, struct lwp *);
129static void cvt_gd(uint32_t, struct lwp *); 155static void cvt_gd(uint32_t, struct lwp *);
130static void cvt_qt_dg_qg(uint32_t, struct lwp *); 156static void cvt_qt_dg_qg(uint32_t, struct lwp *);
131static void cvt_tq_gq(uint32_t, struct lwp *); 157static void cvt_tq_gq(uint32_t, struct lwp *);
132 158
133static float32 (*swfp_s[])(float32, float32) = { 159static float32 (*swfp_s[])(float32, float32) = {
134 float32_add, float32_sub, float32_mul, float32_div, 160 float32_add, float32_sub, float32_mul, float32_div,
135}; 161};
136 162
137static float64 (*swfp_t[])(float64, float64) = { 163static float64 (*swfp_t[])(float64, float64) = {
138 float64_add, float64_sub, float64_mul, float64_div, 164 float64_add, float64_sub, float64_mul, float64_div,
139 compare_un, compare_eq, compare_lt, compare_le, 165 compare_un, compare_eq, compare_lt, compare_le,
140 float64_unk, float64_unk, float64_unk, float64_unk 166 float64_unk, float64_unk, float64_unk, float64_unk
141}; 167};
142 168
143static void (*swfp_cvt[])(uint32_t, struct lwp *) = { 169static void (*swfp_cvt[])(uint32_t, struct lwp *) = {
144 cvt_qs_ts_st_gf_qf, cvt_gd, cvt_qt_dg_qg, cvt_tq_gq 170 cvt_qs_ts_st_gf_qf, cvt_gd, cvt_qt_dg_qg, cvt_tq_gq
145}; 171};
146 172
147static void 173static void
148this_cannot_happen(int what_cannot_happen, int64_t bits) 174this_cannot_happen(int what_cannot_happen, int64_t bits)
149{ 175{
150 static int total; 176 static int total;
151 alpha_instruction inst; 177 alpha_instruction inst;
152 static uint64_t reported; 178 static uint64_t reported;
153 179
154 inst.bits = bits; 180 inst.bits = bits;
155 ++alpha_shadow.nilswfunc; 181 ++alpha_shadow.nilswfunc;
156 if (bits != -1) 182 if (bits != -1)
157 alpha_shadow.uop |= 1UL << inst.generic_format.opcode; 183 alpha_shadow.uop |= 1UL << inst.generic_format.opcode;
158 if (1UL << what_cannot_happen & reported) 184 if (1UL << what_cannot_happen & reported)
159 return; 185 return;
160 reported |= 1UL << what_cannot_happen; 186 reported |= 1UL << what_cannot_happen;
161 if (total >= 1000) 187 if (total >= 1000)
162 return; /* right now, this return "cannot happen" */ 188 return; /* right now, this return "cannot happen" */
163 ++total; 189 ++total;
164 if (bits) 190 if (bits)
165 printf("FP instruction %x\n", (unsigned int)bits); 191 printf("FP instruction %x\n", (unsigned int)bits);
166 printf("FP event %d/%lx/%lx\n", what_cannot_happen, reported, 192 printf("FP event %d/%lx/%lx\n", what_cannot_happen, reported,
167 alpha_shadow.uop); 193 alpha_shadow.uop);
168 printf("Please report this to port-alpha-maintainer@NetBSD.org\n"); 194 printf("Please report this to port-alpha-maintainer@NetBSD.org\n");
169} 195}
170 196
171static inline void 197static inline void
172sts(unsigned int rn, s_float *v, struct lwp *l) 198sts(unsigned int rn, s_float *v, struct lwp *l)
173{ 199{
174 alpha_sts(rn, v); 200 alpha_sts(rn, v);
175 PREFILTER_SUBNORMAL(l, v); 201 PREFILTER_SUBNORMAL(l, v);
176} 202}
177 203
178static inline void 204static inline void
179stt(unsigned int rn, t_float *v, struct lwp *l) 205stt(unsigned int rn, t_float *v, struct lwp *l)
180{ 206{
181 alpha_stt(rn, v); 207 alpha_stt(rn, v);
182 PREFILTER_SUBNORMAL(l, v); 208 PREFILTER_SUBNORMAL(l, v);
183} 209}
184 210
185static inline void 211static inline void
186lds(unsigned int rn, s_float *v, struct lwp *l) 212lds(unsigned int rn, s_float *v, struct lwp *l)
187{ 213{
188 POSTFILTER_SUBNORMAL(l, v); 214 POSTFILTER_SUBNORMAL(l, v);
189 alpha_lds(rn, v); 215 alpha_lds(rn, v);
190} 216}
191 217
192static inline void 218static inline void
193ldt(unsigned int rn, t_float *v, struct lwp *l) 219ldt(unsigned int rn, t_float *v, struct lwp *l)
194{ 220{
195 POSTFILTER_SUBNORMAL(l, v); 221 POSTFILTER_SUBNORMAL(l, v);
196 alpha_ldt(rn, v); 222 alpha_ldt(rn, v);
197} 223}
198 224
199static float64 225static float64
200compare_lt(float64 a, float64 b) 226compare_lt(float64 a, float64 b)
201{ 227{
202 return CMP_RESULT(float64_lt_quiet(a, b)); 228 return CMP_RESULT(float64_lt_quiet(a, b));
203} 229}
204 230
205static float64 231static float64
206compare_le(float64 a, float64 b) 232compare_le(float64 a, float64 b)
207{ 233{
208 return CMP_RESULT(float64_le_quiet(a, b)); 234 return CMP_RESULT(float64_le_quiet(a, b));
209} 235}
210 236
211static float64 237static float64
212compare_un(float64 a, float64 b) 238compare_un(float64 a, float64 b)
213{ 239{
214 if (float64_is_nan(a) | float64_is_nan(b)) { 240 if (float64_is_nan(a) | float64_is_nan(b)) {
215 if (float64_is_signaling_nan(a) | float64_is_signaling_nan(b)) 241 if (float64_is_signaling_nan(a) | float64_is_signaling_nan(b))
216 float_set_invalid(); 242 float_set_invalid();
217 return CMP_RESULT(1); 243 return CMP_RESULT(1);
218 } 244 }
219 return CMP_RESULT(0); 245 return CMP_RESULT(0);
220} 246}
221 247
222static float64 248static float64
223compare_eq(float64 a, float64 b) 249compare_eq(float64 a, float64 b)
224{ 250{
225 return CMP_RESULT(float64_eq(a, b)); 251 return CMP_RESULT(float64_eq(a, b));
226} 252}
227/* 253/*
228 * A note regarding the VAX FP ops. 254 * A note regarding the VAX FP ops.
229 * 255 *
230 * The AARM gives us complete leeway to set or not set status flags on VAX 256 * The AARM gives us complete leeway to set or not set status flags on VAX
231 * ops, but we do any subnorm, NaN and dirty zero fixups anyway, and we set 257 * ops, but we do any subnorm, NaN and dirty zero fixups anyway, and we set
232 * flags by IEEE rules. Many ops are common to d/f/g and s/t source types. 258 * flags by IEEE rules. Many ops are common to d/f/g and s/t source types.
233 * For the purely vax ones, it's hard to imagine ever running them. 259 * For the purely vax ones, it's hard to imagine ever running them.
234 * (Generated VAX fp ops with completion flags? Hmm.) We are careful never 260 * (Generated VAX fp ops with completion flags? Hmm.) We are careful never
235 * to panic, assert, or print unlimited output based on a path through the 261 * to panic, assert, or print unlimited output based on a path through the
236 * decoder, so weird cases don't become security issues. 262 * decoder, so weird cases don't become security issues.
237 */ 263 */
238static void 264static void
239cvt_qs_ts_st_gf_qf(uint32_t inst_bits, struct lwp *l) 265cvt_qs_ts_st_gf_qf(uint32_t inst_bits, struct lwp *l)
240{ 266{
241 t_float tfb, tfc; 267 t_float tfb, tfc;
242 s_float sfb, sfc; 268 s_float sfb, sfc;
243 alpha_instruction inst; 269 alpha_instruction inst;
244 270
245 inst.bits = inst_bits; 271 inst.bits = inst_bits;
246 /* 272 /*
247 * cvtst and cvtts have the same opcode, function, and source. The 273 * cvtst and cvtts have the same opcode, function, and source. The
248 * distinction for cvtst is hidden in the illegal modifier combinations. 274 * distinction for cvtst is hidden in the illegal modifier combinations.
249 * We decode even the non-/s modifier, so that the fix-up-always mode 275 * We decode even the non-/s modifier, so that the fix-up-always mode
250 * works on ev6 and later. The rounding bits are unused and fixed for 276 * works on ev6 and later. The rounding bits are unused and fixed for
251 * cvtst, so we check those too. 277 * cvtst, so we check those too.
252 */ 278 */
253 switch(inst.float_format.function) { 279 switch(inst.float_format.function) {
254 case op_cvtst: 280 case op_cvtst:
255 case op_cvtst_u: 281 case op_cvtst_u:
256 sts(inst.float_detail.fb, &sfb, l); 282 sts(inst.float_detail.fb, &sfb, l);
257 tfc.i = float32_to_float64(sfb.i); 283 tfc.i = float32_to_float64(sfb.i);
258 ldt(inst.float_detail.fc, &tfc, l); 284 ldt(inst.float_detail.fc, &tfc, l);
259 return; 285 return;
260 } 286 }
261 if(inst.float_detail.src == 2) { 287 if(inst.float_detail.src == 2) {
262 stt(inst.float_detail.fb, &tfb, l); 288 stt(inst.float_detail.fb, &tfb, l);
263 sfc.i = float64_to_float32(tfb.i); 289 sfc.i = float64_to_float32(tfb.i);
264 lds(inst.float_detail.fc, &sfc, l); 290 lds(inst.float_detail.fc, &sfc, l);
265 return; 291 return;
266 } 292 }
267 /* 0: S/F */ 293 /* 0: S/F */
268 /* 1: /D */ 294 /* 1: /D */
269 /* 3: Q/Q */ 295 /* 3: Q/Q */
270 this_cannot_happen(5, inst.generic_format.opcode); 296 this_cannot_happen(5, inst.generic_format.opcode);
271 tfc.i = FLOAT64QNAN; 297 tfc.i = FLOAT64QNAN;
272 ldt(inst.float_detail.fc, &tfc, l); 298 ldt(inst.float_detail.fc, &tfc, l);
273 return; 299 return;
274} 300}
275 301
276static void 302static void
277cvt_gd(uint32_t inst_bits, struct lwp *l) 303cvt_gd(uint32_t inst_bits, struct lwp *l)
278{ 304{
279 t_float tfb, tfc; 305 t_float tfb, tfc;
280 alpha_instruction inst; 306 alpha_instruction inst;
281 307
282 inst.bits = inst_bits; 308 inst.bits = inst_bits;
283 stt(inst.float_detail.fb, &tfb, l); 309 stt(inst.float_detail.fb, &tfb, l);
284 (void) float64_to_float32(tfb.i); 310 (void) float64_to_float32(tfb.i);
285 l->l_md.md_flags &= ~NETBSD_FLAG_TO_FP_C(FP_X_IMP); 311 l->l_md.md_flags &= ~NETBSD_FLAG_TO_FP_C(FP_X_IMP);
286 tfc.i = float64_add(tfb.i, (float64)0); 312 tfc.i = float64_add(tfb.i, (float64)0);
287 ldt(inst.float_detail.fc, &tfc, l); 313 ldt(inst.float_detail.fc, &tfc, l);
288} 314}
289 315
290static void 316static void
291cvt_qt_dg_qg(uint32_t inst_bits, struct lwp *l) 317cvt_qt_dg_qg(uint32_t inst_bits, struct lwp *l)
292{ 318{
293 t_float tfb, tfc; 319 t_float tfb, tfc;
294 alpha_instruction inst; 320 alpha_instruction inst;
295 321
296 inst.bits = inst_bits; 322 inst.bits = inst_bits;
297 switch(inst.float_detail.src) { 323 switch(inst.float_detail.src) {
298 case 0: /* S/F */ 324 case 0: /* S/F */
299 this_cannot_happen(3, inst.bits); 325 this_cannot_happen(3, inst.bits);
300 /* fall thru */ 326 /* fall thru */
301 case 1: /* D */ 327 case 1: /* D */
302 /* VAX dirty 0's and reserved ops => UNPREDICTABLE */ 328 /* VAX dirty 0's and reserved ops => UNPREDICTABLE */
303 /* We've done what's important by just not trapping */ 329 /* We've done what's important by just not trapping */
304 tfc.i = 0; 330 tfc.i = 0;
305 break; 331 break;
306 case 2: /* T/G */ 332 case 2: /* T/G */
307 this_cannot_happen(4, inst.bits); 333 this_cannot_happen(4, inst.bits);
308 tfc.i = 0; 334 tfc.i = 0;
309 break; 335 break;
310 case 3: /* Q/Q */ 336 case 3: /* Q/Q */
311 stt(inst.float_detail.fb, &tfb, l); 337 stt(inst.float_detail.fb, &tfb, l);
312 tfc.i = int64_to_float64(tfb.i); 338 tfc.i = int64_to_float64(tfb.i);
313 break; 339 break;
314 } 340 }
315 alpha_ldt(inst.float_detail.fc, &tfc); 341 alpha_ldt(inst.float_detail.fc, &tfc);
316} 342}
317/* 343/*
318 * XXX: AARM and 754 seem to disagree here, also, beware of softfloat's 344 * XXX: AARM and 754 seem to disagree here, also, beware of softfloat's
319 * unfortunate habit of always returning the nontrapping result. 345 * unfortunate habit of always returning the nontrapping result.
320 * XXX: there are several apparent AARM/AAH disagreements, as well as 346 * XXX: there are several apparent AARM/AAH disagreements, as well as
321 * the issue of trap handler pc and trapping results. 347 * the issue of trap handler pc and trapping results.
322 */ 348 */
323static void 349static void
324cvt_tq_gq(uint32_t inst_bits, struct lwp *l) 350cvt_tq_gq(uint32_t inst_bits, struct lwp *l)
325{ 351{
326 t_float tfb, tfc; 352 t_float tfb, tfc;
327 alpha_instruction inst; 353 alpha_instruction inst;
328 354
329 inst.bits = inst_bits; 355 inst.bits = inst_bits;
330 stt(inst.float_detail.fb, &tfb, l); 356 stt(inst.float_detail.fb, &tfb, l);
331 tfc.i = tfb.sign ? float64_to_int64(tfb.i) : float64_to_uint64(tfb.i); 357 tfc.i = tfb.sign ? float64_to_int64(tfb.i) : float64_to_uint64(tfb.i);
332 alpha_ldt(inst.float_detail.fc, &tfc); /* yes, ldt */ 358 alpha_ldt(inst.float_detail.fc, &tfc); /* yes, ldt */
333} 359}
334 360
335static uint64_t 361static uint64_t
336fp_c_to_fpcr_1(uint64_t fpcr, uint64_t fp_c) 362fp_c_to_fpcr_1(uint64_t fpcr, uint64_t fp_c)
337{ 363{
338 uint64_t disables; 364 uint64_t disables;
339 365
340 /* 366 /*
341 * It's hard to arrange for conforming bit fields, because the FP_C 367 * It's hard to arrange for conforming bit fields, because the FP_C
342 * and the FPCR are both architected, with specified (and relatively 368 * and the FPCR are both architected, with specified (and relatively
343 * scrambled) bit numbers. Defining an internal unscrambled FP_C 369 * scrambled) bit numbers. Defining an internal unscrambled FP_C
344 * wouldn't help much, because every user exception requires the 370 * wouldn't help much, because every user exception requires the
345 * architected bit order in the sigcontext. 371 * architected bit order in the sigcontext.
346 * 372 *
347 * Programs that fiddle with the fpcr exception bits (instead of fp_c) 373 * Programs that fiddle with the fpcr exception bits (instead of fp_c)
348 * will lose, because those bits can be and usually are subsetted; 374 * will lose, because those bits can be and usually are subsetted;
349 * the official home is in the fp_c. Furthermore, the kernel puts 375 * the official home is in the fp_c. Furthermore, the kernel puts
350 * phony enables (it lies :-) in the fpcr in order to get control when 376 * phony enables (it lies :-) in the fpcr in order to get control when
351 * it is necessary to initially set a sticky bit. 377 * it is necessary to initially set a sticky bit.
352 */ 378 */
353 379
354 fpcr &= FPCR_DYN(3); 380 fpcr &= FPCR_DYN_RM;
355 381
356 /* 382 /*
357 * enable traps = case where flag bit is clear OR program wants a trap 383 * enable traps = case where flag bit is clear AND program wants a trap
358 * enables = ~flags | mask 384 *
 385 * enables = ~flags & mask
359 * disables = ~(~flags | mask) 386 * disables = ~(~flags | mask)
360 * disables = flags & ~mask. Thank you, Augustus De Morgan (1806-1871) 387 * disables = flags & ~mask. Thank you, Augustus De Morgan (1806-1871)
361 */ 388 */
362 disables = FP_C_TO_NETBSD_FLAG(fp_c) & ~FP_C_TO_NETBSD_MASK(fp_c); 389 disables = FP_C_TO_NETBSD_FLAG(fp_c) & ~FP_C_TO_NETBSD_MASK(fp_c);
363 390
364 fpcr |= (disables & (FP_X_IMP | FP_X_UFL)) << (61 - 3); 391 fpcr |= (disables & (FP_X_IMP | FP_X_UFL)) << (61 - 3);
365 fpcr |= (disables & (FP_X_OFL | FP_X_DZ | FP_X_INV)) << (49 - 0); 392 fpcr |= (disables & (FP_X_OFL | FP_X_DZ | FP_X_INV)) << (49 - 0);
366 393
367# if !(FP_X_INV == 1 && FP_X_DZ == 2 && FP_X_OFL == 4 && \ 
368 FP_X_UFL == 8 && FP_X_IMP == 16 && FP_X_IOV == 32 && \ 
369 FP_X_UFL << (61 - 3) == FPCR_UNFD && \ 
370 FP_X_IMP << (61 - 3) == FPCR_INED && \ 
371 FP_X_OFL << (49 - 0) == FPCR_OVFD) 
372# error "Assertion failed" 
373 /* 
374 * We don't care about the other built-in bit numbers because they 
375 * have been architecturally specified. 
376 */ 
377# endif 
378 
379 fpcr |= fp_c & FP_C_MIRRORED << (FPCR_MIR_START - FP_C_MIR_START); 394 fpcr |= fp_c & FP_C_MIRRORED << (FPCR_MIR_START - FP_C_MIR_START);
380 fpcr |= (fp_c & IEEE_MAP_DMZ) << 36; 395 fpcr |= (fp_c & IEEE_MAP_DMZ) << 36;
381 if (fp_c & FP_C_MIRRORED) 396 if (fp_c & FP_C_MIRRORED)
382 fpcr |= FPCR_SUM; 397 fpcr |= FPCR_SUM;
383 if (fp_c & IEEE_MAP_UMZ) 398 if (fp_c & IEEE_MAP_UMZ)
384 fpcr |= FPCR_UNDZ | FPCR_UNFD; 399 fpcr |= FPCR_UNDZ | FPCR_UNFD;
385 fpcr |= (~fp_c & IEEE_TRAP_ENABLE_DNO) << 41; 400 fpcr |= (~fp_c & IEEE_TRAP_ENABLE_DNO) << 41;
386 return fpcr; 401 return fpcr;
387} 402}
388 403
389static void 404static void
390fp_c_to_fpcr(struct lwp *l) 405fp_c_to_fpcr(struct lwp *l)
391{ 406{
392 alpha_write_fpcr(fp_c_to_fpcr_1(alpha_read_fpcr(), l->l_md.md_flags)); 407 alpha_write_fpcr(fp_c_to_fpcr_1(alpha_read_fpcr(), l->l_md.md_flags));
393} 408}
394 409
395void 410void
396alpha_write_fp_c(struct lwp *l, uint64_t fp_c) 411alpha_write_fp_c(struct lwp *l, uint64_t fp_c)
397{ 412{
398 uint64_t md_flags; 413 uint64_t md_flags;
399 414
400 fp_c &= MDLWP_FP_C; 415 fp_c &= MDLWP_FP_C;
401 md_flags = l->l_md.md_flags; 416 md_flags = l->l_md.md_flags;
402 if ((md_flags & MDLWP_FP_C) == fp_c) 417 if ((md_flags & MDLWP_FP_C) == fp_c)
403 return; 418 return;
404 l->l_md.md_flags = (md_flags & ~MDLWP_FP_C) | fp_c; 419 l->l_md.md_flags = (md_flags & ~MDLWP_FP_C) | fp_c;
405 kpreempt_disable(); 420 kpreempt_disable();
406 if (md_flags & MDLWP_FPACTIVE) { 421 if (md_flags & MDLWP_FPACTIVE) {
407 alpha_pal_wrfen(1); 422 alpha_pal_wrfen(1);
408 fp_c_to_fpcr(l); 423 fp_c_to_fpcr(l);
409 alpha_pal_wrfen(0); 424 alpha_pal_wrfen(0);
 425 } else {
 426 struct pcb *pcb = l->l_addr;
 427
 428 pcb->pcb_fp.fpr_cr =
 429 fp_c_to_fpcr_1(pcb->pcb_fp.fpr_cr, l->l_md.md_flags);
410 } 430 }
411 kpreempt_enable(); 431 kpreempt_enable();
412} 432}
413 433
414uint64_t 434uint64_t
415alpha_read_fp_c(struct lwp *l) 435alpha_read_fp_c(struct lwp *l)
416{ 436{
417 /* 437 /*
418 * A possibly-desireable EV6-specific optimization would deviate from 438 * A possibly-desireable EV6-specific optimization would deviate from
419 * the Alpha Architecture spec and keep some FP_C bits in the FPCR, 439 * the Alpha Architecture spec and keep some FP_C bits in the FPCR,
420 * but in a transparent way. Some of the code for that would need to 440 * but in a transparent way. Some of the code for that would need to
421 * go right here. 441 * go right here.
422 */ 442 */
423 return l->l_md.md_flags & MDLWP_FP_C; 443 return l->l_md.md_flags & MDLWP_FP_C;
424} 444}
425 445
426static float64 446static float64
427float64_unk(float64 a, float64 b) 447float64_unk(float64 a, float64 b)
428{ 448{
429 return 0; 449 return 0;
430} 450}
431 451
432/* 452/*
433 * The real function field encodings for IEEE and VAX FP instructions. 453 * The real function field encodings for IEEE and VAX FP instructions.
434 * 454 *
435 * Since there is only one operand type field, the cvtXX instructions 455 * Since there is only one operand type field, the cvtXX instructions
436 * require a variety of special cases, and these have to be analyzed as 456 * require a variety of special cases, and these have to be analyzed as
437 * they don't always fit into the field descriptions in AARM section I. 457 * they don't always fit into the field descriptions in AARM section I.
438 * 458 *
439 * Lots of staring at bits in the appendix shows what's really going on. 459 * Lots of staring at bits in the appendix shows what's really going on.
440 * 460 *
441 * | | 461 * | |
442 * 15 14 13|12 11 10 09|08 07 06 05 462 * 15 14 13|12 11 10 09|08 07 06 05
443 * --------======------============ 463 * --------======------============
444 * TRAP : RND : SRC : FUNCTION : 464 * TRAP : RND : SRC : FUNCTION :
445 * 0 0 0:. . .:. . . . . . . . . . . . Imprecise 465 * 0 0 0:. . .:. . . . . . . . . . . . Imprecise
446 * 0 0 1|. . .:. . . . . . . . . . . ./U underflow enable (if FP output) 466 * 0 0 1|. . .:. . . . . . . . . . . ./U underflow enable (if FP output)
447 * | /V overfloat enable (if int output) 467 * | /V overfloat enable (if int output)
448 * 0 1 0:. . .:. . . . . . . . . . . ."Unsupported", but used for CVTST 468 * 0 1 0:. . .:. . . . . . . . . . . ."Unsupported", but used for CVTST
449 * 0 1 1|. . .:. . . . . . . . . . . . Unsupported 469 * 0 1 1|. . .:. . . . . . . . . . . . Unsupported
450 * 1 0 0:. . .:. . . . . . . . . . . ./S software completion (VAX only) 470 * 1 0 0:. . .:. . . . . . . . . . . ./S software completion (VAX only)
451 * 1 0 1|. . .:. . . . . . . . . . . ./SU 471 * 1 0 1|. . .:. . . . . . . . . . . ./SU
452 * | /SV 472 * | /SV
453 * 1 1 0:. . .:. . . . . . . . . . . ."Unsupported", but used for CVTST/S 473 * 1 1 0:. . .:. . . . . . . . . . . ."Unsupported", but used for CVTST/S
454 * 1 1 1|. . .:. . . . . . . . . . . ./SUI (if FP output) (IEEE only) 474 * 1 1 1|. . .:. . . . . . . . . . . ./SUI (if FP output) (IEEE only)
455 * | /SVI (if int output) (IEEE only) 475 * | /SVI (if int output) (IEEE only)
456 * S I UV: In other words: bits 15:13 are S:I:UV, except that _usually_ 476 * S I UV: In other words: bits 15:13 are S:I:UV, except that _usually_
457 * | not all combinations are valid. 477 * | not all combinations are valid.
458 * | | 478 * | |
459 * 15 14 13|12 11 10 09|08 07 06 05 479 * 15 14 13|12 11 10 09|08 07 06 05
460 * --------======------============ 480 * --------======------============
461 * TRAP : RND : SRC : FUNCTION : 481 * TRAP : RND : SRC : FUNCTION :
462 * | 0 0 . . . . . . . . . . . ./C Chopped 482 * | 0 0 . . . . . . . . . . . ./C Chopped
463 * : 0 1 . . . . . . . . . . . ./M Minus Infinity 483 * : 0 1 . . . . . . . . . . . ./M Minus Infinity
464 * | 1 0 . . . . . . . . . . . . Normal 484 * | 1 0 . . . . . . . . . . . . Normal
465 * : 1 1 . . . . . . . . . . . ./D Dynamic (in FPCR: Plus Infinity) 485 * : 1 1 . . . . . . . . . . . ./D Dynamic (in FPCR: Plus Infinity)
466 * | | 486 * | |
467 * 15 14 13|12 11 10 09|08 07 06 05 487 * 15 14 13|12 11 10 09|08 07 06 05
468 * --------======------============ 488 * --------======------============
469 * TRAP : RND : SRC : FUNCTION : 489 * TRAP : RND : SRC : FUNCTION :
470 * 0 0. . . . . . . . . . S/F 490 * 0 0. . . . . . . . . . S/F
471 * 0 1. . . . . . . . . . -/D 491 * 0 1. . . . . . . . . . -/D
472 * 1 0. . . . . . . . . . T/G 492 * 1 0. . . . . . . . . . T/G
473 * 1 1. . . . . . . . . . Q/Q 493 * 1 1. . . . . . . . . . Q/Q
474 * | | 494 * | |
475 * 15 14 13|12 11 10 09|08 07 06 05 495 * 15 14 13|12 11 10 09|08 07 06 05
476 * --------======------============ 496 * --------======------============
477 * TRAP : RND : SRC : FUNCTION : 497 * TRAP : RND : SRC : FUNCTION :
478 * 0 0 0 0 . . . addX 498 * 0 0 0 0 . . . addX
479 * 0 0 0 1 . . . subX 499 * 0 0 0 1 . . . subX
480 * 0 0 1 0 . . . mulX 500 * 0 0 1 0 . . . mulX
481 * 0 0 1 1 . . . divX 501 * 0 0 1 1 . . . divX
482 * 0 1 0 0 . . . cmpXun 502 * 0 1 0 0 . . . cmpXun
483 * 0 1 0 1 . . . cmpXeq 503 * 0 1 0 1 . . . cmpXeq
484 * 0 1 1 0 . . . cmpXlt 504 * 0 1 1 0 . . . cmpXlt
485 * 0 1 1 1 . . . cmpXle 505 * 0 1 1 1 . . . cmpXle
486 * 1 0 0 0 . . . reserved 506 * 1 0 0 0 . . . reserved
487 * 1 0 0 1 . . . reserved 507 * 1 0 0 1 . . . reserved
488 * 1 0 1 0 . . . sqrt[fg] (op_fix, not exactly "vax") 508 * 1 0 1 0 . . . sqrt[fg] (op_fix, not exactly "vax")
489 * 1 0 1 1 . . . sqrt[st] (op_fix, not exactly "ieee") 509 * 1 0 1 1 . . . sqrt[st] (op_fix, not exactly "ieee")
490 * 1 1 0 0 . . . cvtXs/f (cvt[qt]s, cvtst(!), cvt[gq]f) 510 * 1 1 0 0 . . . cvtXs/f (cvt[qt]s, cvtst(!), cvt[gq]f)
491 * 1 1 0 1 . . . cvtXd (vax only) 511 * 1 1 0 1 . . . cvtXd (vax only)
492 * 1 1 1 0 . . . cvtXt/g (cvtqt, cvt[dq]g only) 512 * 1 1 1 0 . . . cvtXt/g (cvtqt, cvt[dq]g only)
493 * 1 1 1 1 . . . cvtXq/q (cvttq, cvtgq) 513 * 1 1 1 1 . . . cvtXq/q (cvttq, cvtgq)
494 * | | 514 * | |
495 * 15 14 13|12 11 10 09|08 07 06 05 the twilight zone 515 * 15 14 13|12 11 10 09|08 07 06 05 the twilight zone
496 * --------======------============ 516 * --------======------============
497 * TRAP : RND : SRC : FUNCTION : 517 * TRAP : RND : SRC : FUNCTION :
498 * /s /i /u x x 1 0 1 1 0 0 . . . cvtts, /siu only 0, 1, 5, 7 518 * /s /i /u x x 1 0 1 1 0 0 . . . cvtts, /siu only 0, 1, 5, 7
499 * 0 1 0 1 0 1 0 1 1 0 0 . . . cvtst (src == T (!)) 2ac NOT /S 519 * 0 1 0 1 0 1 0 1 1 0 0 . . . cvtst (src == T (!)) 2ac NOT /S
500 * 1 1 0 1 0 1 0 1 1 0 0 . . . cvtst/s (src == T (!)) 6ac 520 * 1 1 0 1 0 1 0 1 1 0 0 . . . cvtst/s (src == T (!)) 6ac
501 * x 0 x x x x 0 1 1 1 1 . . . cvttq/_ (src == T) 521 * x 0 x x x x 0 1 1 1 1 . . . cvttq/_ (src == T)
502 */ 522 */
503 523
504static void 524static void
 525print_fp_instruction(alpha_instruction *pc, struct lwp *l, uint32_t bits)
 526{
 527#if defined(DDB)
 528 char buf[32];
 529 struct alpha_print_instruction_context ctx = {
 530 .insn.bits = bits,
 531 .pc = (unsigned long)pc,
 532 .buf = buf,
 533 .bufsize = sizeof(buf),
 534 };
 535
 536 (void) alpha_print_instruction(&ctx);
 537
 538 printf("INSN [%s:%d] @0x%lx -> %s\n",
 539 l->l_proc->p_comm, l->l_proc->p_pid, ctx.pc, ctx.buf);
 540#else
 541 alpha_instruction insn = {
 542 .bits = bits,
 543 };
 544 printf("INSN [%s:%d] @0x%lx -> opc=0x%x func=0x%x fa=%d fb=%d fc=%d\n",
 545 l->l_proc->p_comm, l->l_proc->p_pid, (unsigned long)pc,
 546 insn.float_format.opcode, insn.float_format.function,
 547 insn.float_format.fa, insn.float_format.fb, insn.float_format.fc);
 548 printf("INSN [%s:%d] @0x%lx -> trp=0x%x rnd=0x%x src=0x%x fn=0x%x\n",
 549 l->l_proc->p_comm, l->l_proc->p_pid, (unsigned long)pc,
 550 insn.float_detail.trp, insn.float_detail.rnd,
 551 insn.float_detail.src, insn.float_detail.opclass);
 552#endif /* DDB */
 553}
 554
 555static void
505alpha_fp_interpret(alpha_instruction *pc, struct lwp *l, uint32_t bits) 556alpha_fp_interpret(alpha_instruction *pc, struct lwp *l, uint32_t bits)
506{ 557{
507 s_float sfa, sfb, sfc; 558 s_float sfa, sfb, sfc;
508 t_float tfa, tfb, tfc; 559 t_float tfa, tfb, tfc;
509 alpha_instruction inst; 560 alpha_instruction inst;
510 561
 562 if (alpha_fp_complete_debug) {
 563 print_fp_instruction(pc, l, bits);
 564 }
 565
511 inst.bits = bits; 566 inst.bits = bits;
512 switch(inst.generic_format.opcode) { 567 switch(inst.generic_format.opcode) {
513 default: 568 default:
514 /* this "cannot happen" */ 569 /* this "cannot happen" */
515 this_cannot_happen(2, inst.bits); 570 this_cannot_happen(2, inst.bits);
516 return; 571 return;
517 case op_any_float: 572 case op_any_float:
518 if (inst.float_format.function == op_cvtql_sv || 573 if (inst.float_format.function == op_cvtql_sv ||
519 inst.float_format.function == op_cvtql_v) { 574 inst.float_format.function == op_cvtql_v) {
520 alpha_stt(inst.float_detail.fb, &tfb); 575 alpha_stt(inst.float_detail.fb, &tfb);
521 sfc.i = (int64_t)tfb.i >= 0L ? INT_MAX : INT_MIN; 576 sfc.i = (int64_t)tfb.i >= 0L ? INT_MAX : INT_MIN;
522 alpha_lds(inst.float_detail.fc, &sfc); 577 alpha_lds(inst.float_detail.fc, &sfc);
523 float_raise(FP_X_INV); 578 float_raise(FP_X_INV);
524 } else { 579 } else {
525 ++alpha_shadow.nilanyop; 580 ++alpha_shadow.nilanyop;
526 this_cannot_happen(3, inst.bits); 581 this_cannot_happen(3, inst.bits);
527 } 582 }
528 break; 583 break;
529 case op_vax_float: 584 case op_vax_float:
530 ++alpha_shadow.vax; /* fall thru */ 585 ++alpha_shadow.vax; /* fall thru */
531 case op_ieee_float: 586 case op_ieee_float:
532 case op_fix_float: 587 case op_fix_float:
533 switch(inst.float_detail.src) { 588 switch(inst.float_detail.src) {
534 case op_src_sf: 589 case op_src_sf:
535 sts(inst.float_detail.fb, &sfb, l); 590 sts(inst.float_detail.fb, &sfb, l);
536 if (inst.float_detail.opclass == 10) 591 if (inst.float_detail.opclass == 10)
537 sfc.i = float32_sqrt(sfb.i); 592 sfc.i = float32_sqrt(sfb.i);
538 else if (inst.float_detail.opclass & ~3) { 593 else if (inst.float_detail.opclass & ~3) {
539 this_cannot_happen(1, inst.bits); 594 this_cannot_happen(1, inst.bits);
540 sfc.i = FLOAT32QNAN; 595 sfc.i = FLOAT32QNAN;
541 } else { 596 } else {
542 sts(inst.float_detail.fa, &sfa, l); 597 sts(inst.float_detail.fa, &sfa, l);
543 sfc.i = (*swfp_s[inst.float_detail.opclass])( 598 sfc.i = (*swfp_s[inst.float_detail.opclass])(
544 sfa.i, sfb.i); 599 sfa.i, sfb.i);
545 } 600 }
546 lds(inst.float_detail.fc, &sfc, l); 601 lds(inst.float_detail.fc, &sfc, l);
547 break; 602 break;
548 case op_src_xd: 603 case op_src_xd:
549 case op_src_tg: 604 case op_src_tg:
550 if (inst.float_detail.opclass >= 12) 605 if (inst.float_detail.opclass >= 12)
551 (*swfp_cvt[inst.float_detail.opclass - 12])( 606 (*swfp_cvt[inst.float_detail.opclass - 12])(
552 inst.bits, l); 607 inst.bits, l);
553 else { 608 else {
554 stt(inst.float_detail.fb, &tfb, l); 609 stt(inst.float_detail.fb, &tfb, l);
555 if (inst.float_detail.opclass == 10) 610 if (inst.float_detail.opclass == 10)
556 tfc.i = float64_sqrt(tfb.i); 611 tfc.i = float64_sqrt(tfb.i);
557 else { 612 else {
558 stt(inst.float_detail.fa, &tfa, l); 613 stt(inst.float_detail.fa, &tfa, l);
559 tfc.i = (*swfp_t[inst.float_detail 614 tfc.i = (*swfp_t[inst.float_detail
560 .opclass])(tfa.i, tfb.i); 615 .opclass])(tfa.i, tfb.i);
561 } 616 }
562 ldt(inst.float_detail.fc, &tfc, l); 617 ldt(inst.float_detail.fc, &tfc, l);
563 } 618 }
564 break; 619 break;
565 case op_src_qq: 620 case op_src_qq:
566 float_raise(FP_X_IMP); 621 float_raise(FP_X_IMP);
567 break; 622 break;
568 } 623 }
569 } 624 }
570} 625}
571 626
572static int 627static int
573alpha_fp_complete_at(alpha_instruction *trigger_pc, struct lwp *l, 628alpha_fp_complete_at(alpha_instruction *trigger_pc, struct lwp *l,
574 uint64_t *ucode) 629 uint64_t *ucode)
575{ 630{
576 int needsig; 631 int needsig;
577 alpha_instruction inst; 632 alpha_instruction inst;
578 uint64_t rm, fpcr, orig_fpcr; 633 uint64_t rm, fpcr, orig_fpcr;
579 uint64_t orig_flags, new_flags, changed_flags, md_flags; 634 uint64_t orig_flags, new_flags, changed_flags, md_flags;
580 635
581 if (__predict_false(copyin(trigger_pc, &inst, sizeof inst))) { 636 if (__predict_false(copyin(trigger_pc, &inst, sizeof inst))) {
582 this_cannot_happen(6, -1); 637 this_cannot_happen(6, -1);
583 return SIGSEGV; 638 return SIGSEGV;
584 } 639 }
585 kpreempt_disable(); 640 kpreempt_disable();
586 if ((curlwp->l_md.md_flags & MDLWP_FPACTIVE) == 0) { 641 if ((curlwp->l_md.md_flags & MDLWP_FPACTIVE) == 0) {
587 fpu_load(); 642 fpu_load();
588 } 643 }
589 alpha_pal_wrfen(1); 644 alpha_pal_wrfen(1);
590 /* 645 /*
591 * If necessary, lie about the dynamic rounding mode so emulation 646 * Alpha FLOAT instructions can override the rounding mode on a
592 * software need go to only one place for it, and so we don't have to 647 * per-instruction basis. If necessary, lie about the dynamic
593 * lock any memory locations or pass a third parameter to every 648 * rounding mode so emulation software need go to only one place
594 * SoftFloat entry point. 649 * for it, and so we don't have to lock any memory locations or
 650 * pass a third parameter to every SoftFloat entry point.
 651 *
 652 * N.B. the rounding mode field of the the FLOAT format instructions
 653 * matches that of the FPCR *except* for the value 3, which means
 654 * "dynamic" rounding mode (i.e. what is programmed into the FPCR).
595 */ 655 */
596 orig_fpcr = fpcr = alpha_read_fpcr(); 656 orig_fpcr = fpcr = alpha_read_fpcr();
597 rm = inst.float_detail.rnd; 657 rm = inst.float_detail.rnd;
598 if (__predict_false(rm != 3 /* dynamic */ && rm != (fpcr >> 58 & 3))) { 658 if (__predict_false(rm != 3 /* dynamic */ &&
599 fpcr = (fpcr & ~FPCR_DYN(3)) | FPCR_DYN(rm); 659 rm != __SHIFTOUT(fpcr, FPCR_DYN_RM))) {
 660 fpcr = (fpcr & ~FPCR_DYN_RM) | __SHIFTIN(rm, FPCR_DYN_RM);
600 alpha_write_fpcr(fpcr); 661 alpha_write_fpcr(fpcr);
601 } 662 }
602 orig_flags = FP_C_TO_NETBSD_FLAG(l->l_md.md_flags); 663 orig_flags = FP_C_TO_NETBSD_FLAG(l->l_md.md_flags);
603 664
604 alpha_fp_interpret(trigger_pc, l, inst.bits); 665 alpha_fp_interpret(trigger_pc, l, inst.bits);
605 666
606 md_flags = l->l_md.md_flags; 667 md_flags = l->l_md.md_flags;
607 668
608 new_flags = FP_C_TO_NETBSD_FLAG(md_flags); 669 new_flags = FP_C_TO_NETBSD_FLAG(md_flags);
609 changed_flags = orig_flags ^ new_flags; 670 changed_flags = orig_flags ^ new_flags;
610 KASSERT((orig_flags | changed_flags) == new_flags); /* panic on 1->0 */ 671 KASSERT((orig_flags | changed_flags) == new_flags); /* panic on 1->0 */
611 alpha_write_fpcr(fp_c_to_fpcr_1(orig_fpcr, md_flags)); 672 alpha_write_fpcr(fp_c_to_fpcr_1(orig_fpcr, md_flags));
612 needsig = changed_flags & FP_C_TO_NETBSD_MASK(md_flags); 673 needsig = changed_flags & FP_C_TO_NETBSD_MASK(md_flags);
613 alpha_pal_wrfen(0); 674 alpha_pal_wrfen(0);
614 kpreempt_enable(); 675 kpreempt_enable();
615 if (__predict_false(needsig)) { 676 if (__predict_false(needsig)) {
616 *ucode = needsig; 677 *ucode = needsig;
617 return SIGFPE; 678 return SIGFPE;
618 } 679 }
619 return 0; 680 return 0;
620} 681}
621 682
622int 683int
623alpha_fp_complete(u_long a0, u_long a1, struct lwp *l, uint64_t *ucode) 684alpha_fp_complete(u_long a0, u_long a1, struct lwp *l, uint64_t *ucode)
624{ 685{
625 int t; 686 int t;
626 int sig; 687 int sig;
627 uint64_t op_class; 688 uint64_t op_class;
628 alpha_instruction inst; 689 alpha_instruction inst;
629 /* "trigger_pc" is Compaq's term for the earliest faulting op */ 690 /* "trigger_pc" is Compaq's term for the earliest faulting op */
630 alpha_instruction *trigger_pc, *usertrap_pc; 691 alpha_instruction *trigger_pc, *usertrap_pc;
631 alpha_instruction *pc, *win_begin, tsw[TSWINSIZE]; 692 alpha_instruction *pc, *win_begin, tsw[TSWINSIZE];
632 693
633 sig = SIGFPE; 694 if (alpha_fp_complete_debug) {
 695 printf("%s: [%s:%d] a0[AESR]=0x%lx a1[regmask]=0x%lx "
 696 "FPCR=0x%lx FP_C=0x%lx\n",
 697 __func__, l->l_proc->p_comm, l->l_proc->p_pid,
 698 a0, a1, alpha_read_fpcr(),
 699 l->l_md.md_flags & (MDLWP_FP_C|MDLWP_FPACTIVE));
 700 }
 701
634 pc = (alpha_instruction *)l->l_md.md_tf->tf_regs[FRAME_PC]; 702 pc = (alpha_instruction *)l->l_md.md_tf->tf_regs[FRAME_PC];
635 trigger_pc = pc - 1; /* for ALPHA_AMASK_PAT case */ 703 trigger_pc = pc - 1; /* for ALPHA_AMASK_PAT case */
 704
 705 /*
 706 * Start out with the code mirroring the exception flags
 707 * (FP_X_*). Shift right 1 bit to discard SWC to achive
 708 * this.
 709 */
 710 *ucode = a0 >> 1;
 711
636 if (cpu_amask & ALPHA_AMASK_PAT) { 712 if (cpu_amask & ALPHA_AMASK_PAT) {
637 /* SWC | INV */ 713 if ((a0 & (ALPHA_AESR_SWC | ALPHA_AESR_INV)) != 0 ||
638 if (a0 & 3 || alpha_fp_sync_complete) { 714 alpha_fp_sync_complete) {
639 sig = alpha_fp_complete_at(trigger_pc, l, ucode); 715 sig = alpha_fp_complete_at(trigger_pc, l, ucode);
640 goto done; 716 goto resolved;
641 } 717 }
642 } 718 }
643 *ucode = a0; 719 if ((a0 & (ALPHA_AESR_SWC | ALPHA_AESR_INV)) == 0)
644 /* SWC | INV */ 720 goto unresolved;
645 if (!(a0 & 3)) 
646 return sig; 
647/* 721/*
648 * At this point we are somewhere in the trap shadow of one or more instruc- 722 * At this point we are somewhere in the trap shadow of one or more instruc-
649 * tions that have trapped with software completion specified. We have a mask 723 * tions that have trapped with software completion specified. We have a mask
650 * of the registers written by trapping instructions. 724 * of the registers written by trapping instructions.
651 * 725 *
652 * Now step backwards through the trap shadow, clearing bits in the 726 * Now step backwards through the trap shadow, clearing bits in the
653 * destination write mask until the trigger instruction is found, and 727 * destination write mask until the trigger instruction is found, and
654 * interpret this one instruction in SW. If a SIGFPE is not required, back up 728 * interpret this one instruction in SW. If a SIGFPE is not required, back up
655 * the PC until just after this instruction and restart. This will execute all 729 * the PC until just after this instruction and restart. This will execute all
656 * trap shadow instructions between the trigger pc and the trap pc twice. 730 * trap shadow instructions between the trigger pc and the trap pc twice.
657 */ 731 */
658 trigger_pc = 0; 732 trigger_pc = 0;
659 win_begin = pc; 733 win_begin = pc;
660 ++alpha_shadow.scans; 734 ++alpha_shadow.scans;
661 t = alpha_shadow.len; 735 t = alpha_shadow.len;
662 for (--pc; a1; --pc) { 736 for (--pc; a1; --pc) {
663 ++alpha_shadow.len; 737 ++alpha_shadow.len;
664 if (pc < win_begin) { 738 if (pc < win_begin) {
665 win_begin = pc - TSWINSIZE + 1; 739 win_begin = pc - TSWINSIZE + 1;
666 if (copyin(win_begin, tsw, sizeof tsw)) { 740 if (copyin(win_begin, tsw, sizeof tsw)) {
667 /* sigh, try to get just one */ 741 /* sigh, try to get just one */
668 win_begin = pc; 742 win_begin = pc;
669 if (copyin(win_begin, tsw, 4)) 743 if (copyin(win_begin, tsw, 4)) {
 744 /*
 745 * We're off the rails here; don't
 746 * bother updating the FP_C.
 747 */
670 return SIGSEGV; 748 return SIGSEGV;
 749 }
671 } 750 }
672 } 751 }
673 assert(win_begin <= pc && !((long)pc & 3)); 752 assert(win_begin <= pc && !((long)pc & 3));
674 inst = tsw[pc - win_begin]; 753 inst = tsw[pc - win_begin];
675 op_class = 1UL << inst.generic_format.opcode; 754 op_class = 1UL << inst.generic_format.opcode;
676 if (op_class & FPUREG_CLASS) { 755 if (op_class & FPUREG_CLASS) {
677 a1 &= ~(1UL << (inst.operate_generic_format.rc + 32)); 756 a1 &= ~(1UL << (inst.operate_generic_format.rc + 32));
678 trigger_pc = pc; 757 trigger_pc = pc;
679 } else if (op_class & CPUREG_CLASS) { 758 } else if (op_class & CPUREG_CLASS) {
680 a1 &= ~(1UL << inst.operate_generic_format.rc); 759 a1 &= ~(1UL << inst.operate_generic_format.rc);
681 trigger_pc = pc; 760 trigger_pc = pc;
682 } else if (op_class & TRAPSHADOWBOUNDARY) { 761 } else if (op_class & TRAPSHADOWBOUNDARY) {
683 if (op_class & CHECKFUNCTIONCODE) { 762 if (op_class & CHECKFUNCTIONCODE) {
684 if (inst.mem_format.displacement == op_trapb || 763 if (inst.mem_format.displacement == op_trapb ||
685 inst.mem_format.displacement == op_excb) 764 inst.mem_format.displacement == op_excb)
686 break; /* code breaks AARM rules */ 765 break; /* code breaks AARM rules */
687 } else 766 } else
688 break; /* code breaks AARM rules */ 767 break; /* code breaks AARM rules */
689 } 768 }
690 /* Some shadow-safe op, probably load, store, or FPTI class */ 769 /* Some shadow-safe op, probably load, store, or FPTI class */
691 } 770 }
692 t = alpha_shadow.len - t; 771 t = alpha_shadow.len - t;
693 if (t > alpha_shadow.max) 772 if (t > alpha_shadow.max)
694 alpha_shadow.max = t; 773 alpha_shadow.max = t;
695 if (__predict_true(trigger_pc != 0 && a1 == 0)) { 774 if (__predict_true(trigger_pc != 0 && a1 == 0)) {
696 ++alpha_shadow.resolved; 775 ++alpha_shadow.resolved;
697 sig = alpha_fp_complete_at(trigger_pc, l, ucode); 776 sig = alpha_fp_complete_at(trigger_pc, l, ucode);
 777 goto resolved;
698 } else { 778 } else {
699 ++alpha_shadow.unresolved; 779 ++alpha_shadow.unresolved;
700 return sig; 
701 } 780 }
702done: 781
 782 unresolved: /* obligatory statement */;
 783 /*
 784 * *ucode contains the exception bits (FP_X_*). We need to
 785 * update the FP_C and FPCR, and send a signal for any new
 786 * trap that is enabled.
 787 */
 788 uint64_t orig_flags = FP_C_TO_NETBSD_FLAG(l->l_md.md_flags);
 789 uint64_t new_flags = orig_flags | *ucode;
 790 uint64_t changed_flags = orig_flags ^ new_flags;
 791 KASSERT((orig_flags | changed_flags) == new_flags); /* panic on 1->0 */
 792
 793 l->l_md.md_flags |= NETBSD_FLAG_TO_FP_C(new_flags);
 794
 795 kpreempt_disable();
 796 if ((curlwp->l_md.md_flags & MDLWP_FPACTIVE) == 0) {
 797 fpu_load();
 798 }
 799 alpha_pal_wrfen(1);
 800 uint64_t orig_fpcr = alpha_read_fpcr();
 801 alpha_write_fpcr(fp_c_to_fpcr_1(orig_fpcr, l->l_md.md_flags));
 802 uint64_t needsig =
 803 changed_flags & FP_C_TO_NETBSD_MASK(l->l_md.md_flags);
 804 alpha_pal_wrfen(0);
 805 kpreempt_enable();
 806
 807 if (__predict_false(needsig)) {
 808 *ucode = needsig;
 809 return SIGFPE;
 810 }
 811 return 0;
 812
 813 resolved:
703 if (sig) { 814 if (sig) {
704 usertrap_pc = trigger_pc + 1; 815 usertrap_pc = trigger_pc + 1;
705 l->l_md.md_tf->tf_regs[FRAME_PC] = (unsigned long)usertrap_pc; 816 l->l_md.md_tf->tf_regs[FRAME_PC] = (unsigned long)usertrap_pc;
706 return sig; 
707 } 817 }
708 return 0; 818 return sig;
709} 819}
710 820
711/* 821/*
712 * Load the float-point context for the current lwp. 822 * Load the float-point context for the current lwp.
713 */ 823 */
714void 824void
715fpu_state_load(struct lwp *l, u_int flags) 825fpu_state_load(struct lwp *l, u_int flags)
716{ 826{
717 struct pcb * const pcb = lwp_getpcb(l); 827 struct pcb * const pcb = lwp_getpcb(l);
718 KASSERT(l == curlwp); 828 KASSERT(l == curlwp);
719 829
720#ifdef MULTIPROCESSOR 830#ifdef MULTIPROCESSOR
721 /* 831 /*
722 * If the LWP got switched to another CPU, pcu_switchpoint would have 832 * If the LWP got switched to another CPU, pcu_switchpoint would have
723 * called state_release to clear MDLWP_FPACTIVE. Now that we are back 833 * called state_release to clear MDLWP_FPACTIVE. Now that we are back
724 * on the CPU that has our FP context, set MDLWP_FPACTIVE again. 834 * on the CPU that has our FP context, set MDLWP_FPACTIVE again.
725 */ 835 */
726 if (flags & PCU_REENABLE) { 836 if (flags & PCU_REENABLE) {
727 KASSERT(flags & PCU_VALID); 837 KASSERT(flags & PCU_VALID);
728 l->l_md.md_flags |= MDLWP_FPACTIVE; 838 l->l_md.md_flags |= MDLWP_FPACTIVE;
729 return; 839 return;
730 } 840 }
731#else 841#else
732 KASSERT((flags & PCU_REENABLE) == 0); 842 KASSERT((flags & PCU_REENABLE) == 0);
733#endif 843#endif
734 844
735 /* 845 /*
736 * Instrument FP usage -- if a process had not previously 846 * Instrument FP usage -- if a process had not previously
737 * used FP, mark it as having used FP for the first time, 847 * used FP, mark it as having used FP for the first time,
738 * and count this event. 848 * and count this event.
739 * 849 *
740 * If a process has used FP, count a "used FP, and took 850 * If a process has used FP, count a "used FP, and took
741 * a trap to use it again" event. 851 * a trap to use it again" event.
742 */ 852 */
743 if ((flags & PCU_VALID) == 0) { 853 if ((flags & PCU_VALID) == 0) {
744 atomic_inc_ulong(&fpevent_use.ev_count); 854 atomic_inc_ulong(&fpevent_use.ev_count);
745 } else { 855 } else {
746 atomic_inc_ulong(&fpevent_reuse.ev_count); 856 atomic_inc_ulong(&fpevent_reuse.ev_count);
747 } 857 }
748 858
 859 if (alpha_fp_complete_debug) {
 860 printf("%s: [%s:%d] loading FPCR=0x%lx\n",
 861 __func__, l->l_proc->p_comm, l->l_proc->p_pid,
 862 pcb->pcb_fp.fpr_cr);
 863 }
749 alpha_pal_wrfen(1); 864 alpha_pal_wrfen(1);
750 restorefpstate(&pcb->pcb_fp); 865 restorefpstate(&pcb->pcb_fp);
751 alpha_pal_wrfen(0); 866 alpha_pal_wrfen(0);
752 867
753 l->l_md.md_flags |= MDLWP_FPACTIVE; 868 l->l_md.md_flags |= MDLWP_FPACTIVE;
754} 869}
755 870
756/* 871/*
757 * Save the FPU state. 872 * Save the FPU state.
758 */ 873 */
759 874
760void 875void
761fpu_state_save(struct lwp *l) 876fpu_state_save(struct lwp *l)
762{ 877{
763 struct pcb * const pcb = lwp_getpcb(l); 878 struct pcb * const pcb = lwp_getpcb(l);
764 879
765 alpha_pal_wrfen(1); 880 alpha_pal_wrfen(1);
766 savefpstate(&pcb->pcb_fp); 881 savefpstate(&pcb->pcb_fp);
767 alpha_pal_wrfen(0); 882 alpha_pal_wrfen(0);
 883 if (alpha_fp_complete_debug) {
 884 printf("%s: [%s:%d] saved FPCR=0x%lx\n",
 885 __func__, l->l_proc->p_comm, l->l_proc->p_pid,
 886 pcb->pcb_fp.fpr_cr);
 887 }
768} 888}
769 889
770/* 890/*
771 * Release the FPU. 891 * Release the FPU.
772 */ 892 */
773void 893void
774fpu_state_release(struct lwp *l) 894fpu_state_release(struct lwp *l)
775{ 895{
776 l->l_md.md_flags &= ~MDLWP_FPACTIVE; 896 l->l_md.md_flags &= ~MDLWP_FPACTIVE;
777} 897}

cvs diff -r1.374 -r1.375 src/sys/arch/alpha/alpha/machdep.c (switch to unified diff)

--- src/sys/arch/alpha/alpha/machdep.c 2021/07/11 01:58:41 1.374
+++ src/sys/arch/alpha/alpha/machdep.c 2021/07/22 01:39:18 1.375
@@ -1,1986 +1,1994 @@ @@ -1,1986 +1,1994 @@
1/* $NetBSD: machdep.c,v 1.374 2021/07/11 01:58:41 thorpej Exp $ */ 1/* $NetBSD: machdep.c,v 1.375 2021/07/22 01:39:18 thorpej Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 1999, 2000, 2019, 2020 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 1999, 2000, 2019, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center and by Chris G. Demetriou. 9 * NASA Ames Research Center and by Chris G. Demetriou.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer. 15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE. 30 * POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32 32
33/* 33/*
34 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University. 34 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
35 * All rights reserved. 35 * All rights reserved.
36 * 36 *
37 * Author: Chris G. Demetriou 37 * Author: Chris G. Demetriou
38 * 38 *
39 * Permission to use, copy, modify and distribute this software and 39 * Permission to use, copy, modify and distribute this software and
40 * its documentation is hereby granted, provided that both the copyright 40 * its documentation is hereby granted, provided that both the copyright
41 * notice and this permission notice appear in all copies of the 41 * notice and this permission notice appear in all copies of the
42 * software, derivative works or modified versions, and any portions 42 * software, derivative works or modified versions, and any portions
43 * thereof, and that both notices appear in supporting documentation. 43 * thereof, and that both notices appear in supporting documentation.
44 * 44 *
45 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 45 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
46 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 46 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
47 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 47 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 * 48 *
49 * Carnegie Mellon requests users of this software to return to 49 * Carnegie Mellon requests users of this software to return to
50 * 50 *
51 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 51 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
52 * School of Computer Science 52 * School of Computer Science
53 * Carnegie Mellon University 53 * Carnegie Mellon University
54 * Pittsburgh PA 15213-3890 54 * Pittsburgh PA 15213-3890
55 * 55 *
56 * any improvements or extensions that they make and grant Carnegie the 56 * any improvements or extensions that they make and grant Carnegie the
57 * rights to redistribute these changes. 57 * rights to redistribute these changes.
58 */ 58 */
59 59
60#include "opt_ddb.h" 60#include "opt_ddb.h"
61#include "opt_kgdb.h" 61#include "opt_kgdb.h"
62#include "opt_modular.h" 62#include "opt_modular.h"
63#include "opt_multiprocessor.h" 63#include "opt_multiprocessor.h"
64#include "opt_dec_3000_300.h" 64#include "opt_dec_3000_300.h"
65#include "opt_dec_3000_500.h" 65#include "opt_dec_3000_500.h"
66#include "opt_execfmt.h" 66#include "opt_execfmt.h"
67 67
68#define __RWLOCK_PRIVATE  68#define __RWLOCK_PRIVATE
69 69
70#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ 70#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
71 71
72__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.374 2021/07/11 01:58:41 thorpej Exp $"); 72__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.375 2021/07/22 01:39:18 thorpej Exp $");
73 73
74#include <sys/param.h> 74#include <sys/param.h>
75#include <sys/systm.h> 75#include <sys/systm.h>
76#include <sys/signalvar.h> 76#include <sys/signalvar.h>
77#include <sys/kernel.h> 77#include <sys/kernel.h>
78#include <sys/cpu.h> 78#include <sys/cpu.h>
79#include <sys/proc.h> 79#include <sys/proc.h>
80#include <sys/ras.h> 80#include <sys/ras.h>
81#include <sys/sched.h> 81#include <sys/sched.h>
82#include <sys/reboot.h> 82#include <sys/reboot.h>
83#include <sys/device.h> 83#include <sys/device.h>
84#include <sys/module.h> 84#include <sys/module.h>
85#include <sys/mman.h> 85#include <sys/mman.h>
86#include <sys/msgbuf.h> 86#include <sys/msgbuf.h>
87#include <sys/ioctl.h> 87#include <sys/ioctl.h>
88#include <sys/tty.h> 88#include <sys/tty.h>
89#include <sys/exec.h> 89#include <sys/exec.h>
90#include <sys/exec_aout.h> /* for MID_* */ 90#include <sys/exec_aout.h> /* for MID_* */
91#include <sys/exec_ecoff.h> 91#include <sys/exec_ecoff.h>
92#include <sys/core.h> 92#include <sys/core.h>
93#include <sys/kcore.h> 93#include <sys/kcore.h>
94#include <sys/ucontext.h> 94#include <sys/ucontext.h>
95#include <sys/conf.h> 95#include <sys/conf.h>
96#include <sys/ksyms.h> 96#include <sys/ksyms.h>
97#include <sys/kauth.h> 97#include <sys/kauth.h>
98#include <sys/atomic.h> 98#include <sys/atomic.h>
99#include <sys/cpu.h> 99#include <sys/cpu.h>
100#include <sys/rwlock.h> 100#include <sys/rwlock.h>
101 101
102#include <machine/kcore.h> 102#include <machine/kcore.h>
103#include <machine/fpu.h> 103#include <machine/fpu.h>
104 104
105#include <sys/mount.h> 105#include <sys/mount.h>
106#include <sys/syscallargs.h> 106#include <sys/syscallargs.h>
107 107
108#include <uvm/uvm.h> 108#include <uvm/uvm.h>
109#include <sys/sysctl.h> 109#include <sys/sysctl.h>
110 110
111#include <dev/cons.h> 111#include <dev/cons.h>
112#include <dev/mm.h> 112#include <dev/mm.h>
113 113
114#include <machine/autoconf.h> 114#include <machine/autoconf.h>
115#include <machine/reg.h> 115#include <machine/reg.h>
116#include <machine/rpb.h> 116#include <machine/rpb.h>
117#include <machine/prom.h> 117#include <machine/prom.h>
118#include <machine/cpuconf.h> 118#include <machine/cpuconf.h>
119#include <machine/ieeefp.h> 119#include <machine/ieeefp.h>
120 120
121#ifdef DDB 121#ifdef DDB
122#include <machine/db_machdep.h> 122#include <machine/db_machdep.h>
123#include <ddb/db_access.h> 123#include <ddb/db_access.h>
124#include <ddb/db_sym.h> 124#include <ddb/db_sym.h>
125#include <ddb/db_extern.h> 125#include <ddb/db_extern.h>
126#include <ddb/db_interface.h> 126#include <ddb/db_interface.h>
127#endif 127#endif
128 128
129#ifdef KGDB 129#ifdef KGDB
130#include <sys/kgdb.h> 130#include <sys/kgdb.h>
131#endif 131#endif
132 132
133#ifdef DEBUG 133#ifdef DEBUG
134#include <machine/sigdebug.h> 134#include <machine/sigdebug.h>
135int sigdebug = 0x0; 135int sigdebug = 0x0;
136int sigpid = 0; 136int sigpid = 0;
137#endif 137#endif
138 138
139/* Assert some assumptions made in lock_stubs.s */ 139/* Assert some assumptions made in lock_stubs.s */
140__CTASSERT(RW_READER == 0); 140__CTASSERT(RW_READER == 0);
141__CTASSERT(RW_HAS_WAITERS == 1); 141__CTASSERT(RW_HAS_WAITERS == 1);
142 142
143#include <machine/alpha.h> 143#include <machine/alpha.h>
144 144
145#include "ksyms.h" 145#include "ksyms.h"
146 146
147struct vm_map *phys_map = NULL; 147struct vm_map *phys_map = NULL;
148 148
149void *msgbufaddr; 149void *msgbufaddr;
150 150
151int maxmem; /* max memory per process */ 151int maxmem; /* max memory per process */
152 152
153int totalphysmem; /* total amount of physical memory in system */ 153int totalphysmem; /* total amount of physical memory in system */
154int resvmem; /* amount of memory reserved for PROM */ 154int resvmem; /* amount of memory reserved for PROM */
155int unusedmem; /* amount of memory for OS that we don't use */ 155int unusedmem; /* amount of memory for OS that we don't use */
156int unknownmem; /* amount of memory with an unknown use */ 156int unknownmem; /* amount of memory with an unknown use */
157 157
158int cputype; /* system type, from the RPB */ 158int cputype; /* system type, from the RPB */
159bool alpha_is_qemu; /* true if we've detected runnnig in qemu */ 159bool alpha_is_qemu; /* true if we've detected runnnig in qemu */
160 160
161int bootdev_debug = 0; /* patchable, or from DDB */ 161int bootdev_debug = 0; /* patchable, or from DDB */
162 162
163/* 163/*
164 * XXX We need an address to which we can assign things so that they 164 * XXX We need an address to which we can assign things so that they
165 * won't be optimized away because we didn't use the value. 165 * won't be optimized away because we didn't use the value.
166 */ 166 */
167uint32_t no_optimize; 167uint32_t no_optimize;
168 168
169/* the following is used externally (sysctl_hw) */ 169/* the following is used externally (sysctl_hw) */
170char machine[] = MACHINE; /* from <machine/param.h> */ 170char machine[] = MACHINE; /* from <machine/param.h> */
171char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */ 171char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */
172 172
173/* Number of machine cycles per microsecond */ 173/* Number of machine cycles per microsecond */
174uint64_t cycles_per_usec; 174uint64_t cycles_per_usec;
175 175
176/* number of CPUs in the box. really! */ 176/* number of CPUs in the box. really! */
177int ncpus; 177int ncpus;
178 178
179struct bootinfo_kernel bootinfo; 179struct bootinfo_kernel bootinfo;
180 180
181/* For built-in TCDS */ 181/* For built-in TCDS */
182#if defined(DEC_3000_300) || defined(DEC_3000_500) 182#if defined(DEC_3000_300) || defined(DEC_3000_500)
183uint8_t dec_3000_scsiid[3], dec_3000_scsifast[3]; 183uint8_t dec_3000_scsiid[3], dec_3000_scsifast[3];
184#endif 184#endif
185 185
186struct platform platform; 186struct platform platform;
187 187
188#if NKSYMS || defined(DDB) || defined(MODULAR) 188#if NKSYMS || defined(DDB) || defined(MODULAR)
189/* start and end of kernel symbol table */ 189/* start and end of kernel symbol table */
190void *ksym_start, *ksym_end; 190void *ksym_start, *ksym_end;
191#endif 191#endif
192 192
193/* for cpu_sysctl() */ 193/* for cpu_sysctl() */
194int alpha_unaligned_print = 1; /* warn about unaligned accesses */ 194int alpha_unaligned_print = 1; /* warn about unaligned accesses */
195int alpha_unaligned_fix = 1; /* fix up unaligned accesses */ 195int alpha_unaligned_fix = 1; /* fix up unaligned accesses */
196int alpha_unaligned_sigbus = 0; /* don't SIGBUS on fixed-up accesses */ 196int alpha_unaligned_sigbus = 0; /* don't SIGBUS on fixed-up accesses */
197int alpha_fp_sync_complete = 0; /* fp fixup if sync even without /s */ 197int alpha_fp_sync_complete = 0; /* fp fixup if sync even without /s */
 198int alpha_fp_complete_debug = 0; /* fp completion debug enabled */
198 199
199/* 200/*
200 * XXX This should be dynamically sized, but we have the chicken-egg problem! 201 * XXX This should be dynamically sized, but we have the chicken-egg problem!
201 * XXX it should also be larger than it is, because not all of the mddt 202 * XXX it should also be larger than it is, because not all of the mddt
202 * XXX clusters end up being used for VM. 203 * XXX clusters end up being used for VM.
203 */ 204 */
204phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX]; /* low size bits overloaded */ 205phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX]; /* low size bits overloaded */
205int mem_cluster_cnt; 206int mem_cluster_cnt;
206 207
207int cpu_dump(void); 208int cpu_dump(void);
208int cpu_dumpsize(void); 209int cpu_dumpsize(void);
209u_long cpu_dump_mempagecnt(void); 210u_long cpu_dump_mempagecnt(void);
210void dumpsys(void); 211void dumpsys(void);
211void identifycpu(void); 212void identifycpu(void);
212void printregs(struct reg *); 213void printregs(struct reg *);
213 214
214const pcu_ops_t fpu_ops = { 215const pcu_ops_t fpu_ops = {
215 .pcu_id = PCU_FPU, 216 .pcu_id = PCU_FPU,
216 .pcu_state_load = fpu_state_load, 217 .pcu_state_load = fpu_state_load,
217 .pcu_state_save = fpu_state_save, 218 .pcu_state_save = fpu_state_save,
218 .pcu_state_release = fpu_state_release, 219 .pcu_state_release = fpu_state_release,
219}; 220};
220 221
221const pcu_ops_t * const pcu_ops_md_defs[PCU_UNIT_COUNT] = { 222const pcu_ops_t * const pcu_ops_md_defs[PCU_UNIT_COUNT] = {
222 [PCU_FPU] = &fpu_ops, 223 [PCU_FPU] = &fpu_ops,
223}; 224};
224 225
225static void 226static void
226alpha_page_physload(unsigned long const start_pfn, unsigned long const end_pfn) 227alpha_page_physload(unsigned long const start_pfn, unsigned long const end_pfn)
227{ 228{
228 229
229 /* 230 /*
230 * Some Alpha platforms may have unique requirements about 231 * Some Alpha platforms may have unique requirements about
231 * how physical memory is managed (e.g. reserving memory 232 * how physical memory is managed (e.g. reserving memory
232 * ranges due to lack of SGMAP DMA). 233 * ranges due to lack of SGMAP DMA).
233 */ 234 */
234 if (platform.page_physload != NULL) { 235 if (platform.page_physload != NULL) {
235 (*platform.page_physload)(start_pfn, end_pfn); 236 (*platform.page_physload)(start_pfn, end_pfn);
236 return; 237 return;
237 } 238 }
238 239
239 uvm_page_physload(start_pfn, end_pfn, start_pfn, end_pfn, 240 uvm_page_physload(start_pfn, end_pfn, start_pfn, end_pfn,
240 VM_FREELIST_DEFAULT); 241 VM_FREELIST_DEFAULT);
241} 242}
242 243
243void 244void
244alpha_page_physload_sheltered(unsigned long const start_pfn, 245alpha_page_physload_sheltered(unsigned long const start_pfn,
245 unsigned long const end_pfn, unsigned long const shelter_start_pfn, 246 unsigned long const end_pfn, unsigned long const shelter_start_pfn,
246 unsigned long const shelter_end_pfn) 247 unsigned long const shelter_end_pfn)
247{ 248{
248 249
249 /* 250 /*
250 * If the added region ends before or starts after the sheltered 251 * If the added region ends before or starts after the sheltered
251 * region, then it just goes on the default freelist. 252 * region, then it just goes on the default freelist.
252 */ 253 */
253 if (end_pfn <= shelter_start_pfn || start_pfn >= shelter_end_pfn) { 254 if (end_pfn <= shelter_start_pfn || start_pfn >= shelter_end_pfn) {
254 uvm_page_physload(start_pfn, end_pfn, 255 uvm_page_physload(start_pfn, end_pfn,
255 start_pfn, end_pfn, VM_FREELIST_DEFAULT); 256 start_pfn, end_pfn, VM_FREELIST_DEFAULT);
256 return; 257 return;
257 } 258 }
258 259
259 /* 260 /*
260 * Load any portion that comes before the sheltered region. 261 * Load any portion that comes before the sheltered region.
261 */ 262 */
262 if (start_pfn < shelter_start_pfn) { 263 if (start_pfn < shelter_start_pfn) {
263 KASSERT(end_pfn > shelter_start_pfn); 264 KASSERT(end_pfn > shelter_start_pfn);
264 uvm_page_physload(start_pfn, shelter_start_pfn, 265 uvm_page_physload(start_pfn, shelter_start_pfn,
265 start_pfn, shelter_start_pfn, VM_FREELIST_DEFAULT); 266 start_pfn, shelter_start_pfn, VM_FREELIST_DEFAULT);
266 } 267 }
267 268
268 /* 269 /*
269 * Load the portion that overlaps that sheltered region. 270 * Load the portion that overlaps that sheltered region.
270 */ 271 */
271 const unsigned long ov_start = MAX(start_pfn, shelter_start_pfn); 272 const unsigned long ov_start = MAX(start_pfn, shelter_start_pfn);
272 const unsigned long ov_end = MIN(end_pfn, shelter_end_pfn); 273 const unsigned long ov_end = MIN(end_pfn, shelter_end_pfn);
273 KASSERT(ov_start >= shelter_start_pfn); 274 KASSERT(ov_start >= shelter_start_pfn);
274 KASSERT(ov_end <= shelter_end_pfn); 275 KASSERT(ov_end <= shelter_end_pfn);
275 uvm_page_physload(ov_start, ov_end, ov_start, ov_end, 276 uvm_page_physload(ov_start, ov_end, ov_start, ov_end,
276 VM_FREELIST_SHELTERED); 277 VM_FREELIST_SHELTERED);
277 278
278 /* 279 /*
279 * Load any portion that comes after the sheltered region. 280 * Load any portion that comes after the sheltered region.
280 */ 281 */
281 if (end_pfn > shelter_end_pfn) { 282 if (end_pfn > shelter_end_pfn) {
282 KASSERT(start_pfn < shelter_end_pfn); 283 KASSERT(start_pfn < shelter_end_pfn);
283 uvm_page_physload(shelter_end_pfn, end_pfn, 284 uvm_page_physload(shelter_end_pfn, end_pfn,
284 shelter_end_pfn, end_pfn, VM_FREELIST_DEFAULT); 285 shelter_end_pfn, end_pfn, VM_FREELIST_DEFAULT);
285 } 286 }
286} 287}
287 288
288void 289void
289alpha_init(u_long xxx_pfn __unused, u_long ptb, u_long bim, u_long bip, 290alpha_init(u_long xxx_pfn __unused, u_long ptb, u_long bim, u_long bip,
290 u_long biv) 291 u_long biv)
291 /* pfn: first free PFN number (no longer used) */ 292 /* pfn: first free PFN number (no longer used) */
292 /* ptb: PFN of current level 1 page table */ 293 /* ptb: PFN of current level 1 page table */
293 /* bim: bootinfo magic */ 294 /* bim: bootinfo magic */
294 /* bip: bootinfo pointer */ 295 /* bip: bootinfo pointer */
295 /* biv: bootinfo version */ 296 /* biv: bootinfo version */
296{ 297{
297 extern char kernel_text[], _end[]; 298 extern char kernel_text[], _end[];
298 struct mddt *mddtp; 299 struct mddt *mddtp;
299 struct mddt_cluster *memc; 300 struct mddt_cluster *memc;
300 int i, mddtweird; 301 int i, mddtweird;
301 struct pcb *pcb0; 302 struct pcb *pcb0;
302 vaddr_t kernstart, kernend, v; 303 vaddr_t kernstart, kernend, v;
303 paddr_t kernstartpfn, kernendpfn, pfn0, pfn1; 304 paddr_t kernstartpfn, kernendpfn, pfn0, pfn1;
304 cpuid_t cpu_id; 305 cpuid_t cpu_id;
305 struct cpu_info *ci; 306 struct cpu_info *ci;
306 char *p; 307 char *p;
307 const char *bootinfo_msg; 308 const char *bootinfo_msg;
308 const struct cpuinit *c; 309 const struct cpuinit *c;
309 310
310 /* NO OUTPUT ALLOWED UNTIL FURTHER NOTICE */ 311 /* NO OUTPUT ALLOWED UNTIL FURTHER NOTICE */
311 312
312 /* 313 /*
313 * Turn off interrupts (not mchecks) and floating point. 314 * Turn off interrupts (not mchecks) and floating point.
314 * Make sure the instruction and data streams are consistent. 315 * Make sure the instruction and data streams are consistent.
315 */ 316 */
316 (void)alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH); 317 (void)alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH);
317 alpha_pal_wrfen(0); 318 alpha_pal_wrfen(0);
318 ALPHA_TBIA(); 319 ALPHA_TBIA();
319 alpha_pal_imb(); 320 alpha_pal_imb();
320 321
321 /* Initialize the SCB. */ 322 /* Initialize the SCB. */
322 scb_init(); 323 scb_init();
323 324
324 cpu_id = cpu_number(); 325 cpu_id = cpu_number();
325 326
326 ci = &cpu_info_primary; 327 ci = &cpu_info_primary;
327 ci->ci_cpuid = cpu_id; 328 ci->ci_cpuid = cpu_id;
328 329
329#if defined(MULTIPROCESSOR) 330#if defined(MULTIPROCESSOR)
330 /* 331 /*
331 * Set the SysValue to &lwp0, after making sure that lwp0 332 * Set the SysValue to &lwp0, after making sure that lwp0
332 * is pointing at the primary CPU. Secondary processors do 333 * is pointing at the primary CPU. Secondary processors do
333 * this in their spinup trampoline. 334 * this in their spinup trampoline.
334 */ 335 */
335 lwp0.l_cpu = ci; 336 lwp0.l_cpu = ci;
336 cpu_info[cpu_id] = ci; 337 cpu_info[cpu_id] = ci;
337 alpha_pal_wrval((u_long)&lwp0); 338 alpha_pal_wrval((u_long)&lwp0);
338#endif 339#endif
339 340
340 /* 341 /*
341 * Get critical system information (if possible, from the 342 * Get critical system information (if possible, from the
342 * information provided by the boot program). 343 * information provided by the boot program).
343 */ 344 */
344 bootinfo_msg = NULL; 345 bootinfo_msg = NULL;
345 if (bim == BOOTINFO_MAGIC) { 346 if (bim == BOOTINFO_MAGIC) {
346 if (biv == 0) { /* backward compat */ 347 if (biv == 0) { /* backward compat */
347 biv = *(u_long *)bip; 348 biv = *(u_long *)bip;
348 bip += 8; 349 bip += 8;
349 } 350 }
350 switch (biv) { 351 switch (biv) {
351 case 1: { 352 case 1: {
352 struct bootinfo_v1 *v1p = (struct bootinfo_v1 *)bip; 353 struct bootinfo_v1 *v1p = (struct bootinfo_v1 *)bip;
353 354
354 bootinfo.ssym = v1p->ssym; 355 bootinfo.ssym = v1p->ssym;
355 bootinfo.esym = v1p->esym; 356 bootinfo.esym = v1p->esym;
356 /* hwrpb may not be provided by boot block in v1 */ 357 /* hwrpb may not be provided by boot block in v1 */
357 if (v1p->hwrpb != NULL) { 358 if (v1p->hwrpb != NULL) {
358 bootinfo.hwrpb_phys = 359 bootinfo.hwrpb_phys =
359 ((struct rpb *)v1p->hwrpb)->rpb_phys; 360 ((struct rpb *)v1p->hwrpb)->rpb_phys;
360 bootinfo.hwrpb_size = v1p->hwrpbsize; 361 bootinfo.hwrpb_size = v1p->hwrpbsize;
361 } else { 362 } else {
362 bootinfo.hwrpb_phys = 363 bootinfo.hwrpb_phys =
363 ((struct rpb *)HWRPB_ADDR)->rpb_phys; 364 ((struct rpb *)HWRPB_ADDR)->rpb_phys;
364 bootinfo.hwrpb_size = 365 bootinfo.hwrpb_size =
365 ((struct rpb *)HWRPB_ADDR)->rpb_size; 366 ((struct rpb *)HWRPB_ADDR)->rpb_size;
366 } 367 }
367 memcpy(bootinfo.boot_flags, v1p->boot_flags, 368 memcpy(bootinfo.boot_flags, v1p->boot_flags,
368 uimin(sizeof v1p->boot_flags, 369 uimin(sizeof v1p->boot_flags,
369 sizeof bootinfo.boot_flags)); 370 sizeof bootinfo.boot_flags));
370 memcpy(bootinfo.booted_kernel, v1p->booted_kernel, 371 memcpy(bootinfo.booted_kernel, v1p->booted_kernel,
371 uimin(sizeof v1p->booted_kernel, 372 uimin(sizeof v1p->booted_kernel,
372 sizeof bootinfo.booted_kernel)); 373 sizeof bootinfo.booted_kernel));
373 /* booted dev not provided in bootinfo */ 374 /* booted dev not provided in bootinfo */
374 init_prom_interface(ptb, (struct rpb *) 375 init_prom_interface(ptb, (struct rpb *)
375 ALPHA_PHYS_TO_K0SEG(bootinfo.hwrpb_phys)); 376 ALPHA_PHYS_TO_K0SEG(bootinfo.hwrpb_phys));
376 prom_getenv(PROM_E_BOOTED_DEV, bootinfo.booted_dev, 377 prom_getenv(PROM_E_BOOTED_DEV, bootinfo.booted_dev,
377 sizeof bootinfo.booted_dev); 378 sizeof bootinfo.booted_dev);
378 break; 379 break;
379 } 380 }
380 default: 381 default:
381 bootinfo_msg = "unknown bootinfo version"; 382 bootinfo_msg = "unknown bootinfo version";
382 goto nobootinfo; 383 goto nobootinfo;
383 } 384 }
384 } else { 385 } else {
385 bootinfo_msg = "boot program did not pass bootinfo"; 386 bootinfo_msg = "boot program did not pass bootinfo";
386nobootinfo: 387nobootinfo:
387 bootinfo.ssym = (u_long)_end; 388 bootinfo.ssym = (u_long)_end;
388 bootinfo.esym = (u_long)_end; 389 bootinfo.esym = (u_long)_end;
389 bootinfo.hwrpb_phys = ((struct rpb *)HWRPB_ADDR)->rpb_phys; 390 bootinfo.hwrpb_phys = ((struct rpb *)HWRPB_ADDR)->rpb_phys;
390 bootinfo.hwrpb_size = ((struct rpb *)HWRPB_ADDR)->rpb_size; 391 bootinfo.hwrpb_size = ((struct rpb *)HWRPB_ADDR)->rpb_size;
391 init_prom_interface(ptb, (struct rpb *)HWRPB_ADDR); 392 init_prom_interface(ptb, (struct rpb *)HWRPB_ADDR);
392 if (alpha_is_qemu) { 393 if (alpha_is_qemu) {
393 /* 394 /*
394 * Grab boot flags from kernel command line. 395 * Grab boot flags from kernel command line.
395 * Assume autoboot if not supplied. 396 * Assume autoboot if not supplied.
396 */ 397 */
397 if (! prom_qemu_getenv("flags", bootinfo.boot_flags, 398 if (! prom_qemu_getenv("flags", bootinfo.boot_flags,
398 sizeof(bootinfo.boot_flags))) { 399 sizeof(bootinfo.boot_flags))) {
399 strlcpy(bootinfo.boot_flags, "A", 400 strlcpy(bootinfo.boot_flags, "A",
400 sizeof(bootinfo.boot_flags)); 401 sizeof(bootinfo.boot_flags));
401 } 402 }
402 } else { 403 } else {
403 prom_getenv(PROM_E_BOOTED_OSFLAGS, bootinfo.boot_flags, 404 prom_getenv(PROM_E_BOOTED_OSFLAGS, bootinfo.boot_flags,
404 sizeof bootinfo.boot_flags); 405 sizeof bootinfo.boot_flags);
405 prom_getenv(PROM_E_BOOTED_FILE, bootinfo.booted_kernel, 406 prom_getenv(PROM_E_BOOTED_FILE, bootinfo.booted_kernel,
406 sizeof bootinfo.booted_kernel); 407 sizeof bootinfo.booted_kernel);
407 prom_getenv(PROM_E_BOOTED_DEV, bootinfo.booted_dev, 408 prom_getenv(PROM_E_BOOTED_DEV, bootinfo.booted_dev,
408 sizeof bootinfo.booted_dev); 409 sizeof bootinfo.booted_dev);
409 } 410 }
410 } 411 }
411 412
412 /* 413 /*
413 * Initialize the kernel's mapping of the RPB. It's needed for 414 * Initialize the kernel's mapping of the RPB. It's needed for
414 * lots of things. 415 * lots of things.
415 */ 416 */
416 hwrpb = (struct rpb *)ALPHA_PHYS_TO_K0SEG(bootinfo.hwrpb_phys); 417 hwrpb = (struct rpb *)ALPHA_PHYS_TO_K0SEG(bootinfo.hwrpb_phys);
417 418
418#if defined(DEC_3000_300) || defined(DEC_3000_500) 419#if defined(DEC_3000_300) || defined(DEC_3000_500)
419 if (hwrpb->rpb_type == ST_DEC_3000_300 || 420 if (hwrpb->rpb_type == ST_DEC_3000_300 ||
420 hwrpb->rpb_type == ST_DEC_3000_500) { 421 hwrpb->rpb_type == ST_DEC_3000_500) {
421 prom_getenv(PROM_E_SCSIID, dec_3000_scsiid, 422 prom_getenv(PROM_E_SCSIID, dec_3000_scsiid,
422 sizeof(dec_3000_scsiid)); 423 sizeof(dec_3000_scsiid));
423 prom_getenv(PROM_E_SCSIFAST, dec_3000_scsifast, 424 prom_getenv(PROM_E_SCSIFAST, dec_3000_scsifast,
424 sizeof(dec_3000_scsifast)); 425 sizeof(dec_3000_scsifast));
425 } 426 }
426#endif 427#endif
427 428
428 /* 429 /*
429 * Remember how many cycles there are per microsecond, 430 * Remember how many cycles there are per microsecond,
430 * so that we can use delay(). Round up, for safety. 431 * so that we can use delay(). Round up, for safety.
431 */ 432 */
432 cycles_per_usec = (hwrpb->rpb_cc_freq + 999999) / 1000000; 433 cycles_per_usec = (hwrpb->rpb_cc_freq + 999999) / 1000000;
433 434
434 /* 435 /*
435 * Initialize the (temporary) bootstrap console interface, so 436 * Initialize the (temporary) bootstrap console interface, so
436 * we can use printf until the VM system starts being setup. 437 * we can use printf until the VM system starts being setup.
437 * The real console is initialized before then. 438 * The real console is initialized before then.
438 */ 439 */
439 init_bootstrap_console(); 440 init_bootstrap_console();
440 441
441 /* OUTPUT NOW ALLOWED */ 442 /* OUTPUT NOW ALLOWED */
442 443
443 /* delayed from above */ 444 /* delayed from above */
444 if (bootinfo_msg) 445 if (bootinfo_msg)
445 printf("WARNING: %s (0x%lx, 0x%lx, 0x%lx)\n", 446 printf("WARNING: %s (0x%lx, 0x%lx, 0x%lx)\n",
446 bootinfo_msg, bim, bip, biv); 447 bootinfo_msg, bim, bip, biv);
447 448
448 /* Initialize the trap vectors on the primary processor. */ 449 /* Initialize the trap vectors on the primary processor. */
449 trap_init(); 450 trap_init();
450 451
451 /* 452 /*
452 * Find out this system's page size, and initialize 453 * Find out this system's page size, and initialize
453 * PAGE_SIZE-dependent variables. 454 * PAGE_SIZE-dependent variables.
454 */ 455 */
455 if (hwrpb->rpb_page_size != ALPHA_PGBYTES) 456 if (hwrpb->rpb_page_size != ALPHA_PGBYTES)
456 panic("page size %lu != %d?!", hwrpb->rpb_page_size, 457 panic("page size %lu != %d?!", hwrpb->rpb_page_size,
457 ALPHA_PGBYTES); 458 ALPHA_PGBYTES);
458 uvmexp.pagesize = hwrpb->rpb_page_size; 459 uvmexp.pagesize = hwrpb->rpb_page_size;
459 uvm_md_init(); 460 uvm_md_init();
460 461
461 /* 462 /*
462 * cputype has been initialized in init_prom_interface(). 463 * cputype has been initialized in init_prom_interface().
463 * Perform basic platform initialization using this info. 464 * Perform basic platform initialization using this info.
464 */ 465 */
465 KASSERT(prom_interface_initialized); 466 KASSERT(prom_interface_initialized);
466 c = platform_lookup(cputype); 467 c = platform_lookup(cputype);
467 if (c == NULL) { 468 if (c == NULL) {
468 platform_not_supported(); 469 platform_not_supported();
469 /* NOTREACHED */ 470 /* NOTREACHED */
470 } 471 }
471 (*c->init)(); 472 (*c->init)();
472 cpu_setmodel("%s", platform.model); 473 cpu_setmodel("%s", platform.model);
473 474
474 /* 475 /*
475 * Initialize the real console, so that the bootstrap console is 476 * Initialize the real console, so that the bootstrap console is
476 * no longer necessary. 477 * no longer necessary.
477 */ 478 */
478 (*platform.cons_init)(); 479 (*platform.cons_init)();
479 480
480#ifdef DIAGNOSTIC 481#ifdef DIAGNOSTIC
481 /* Paranoid sanity checking */ 482 /* Paranoid sanity checking */
482 483
483 /* We should always be running on the primary. */ 484 /* We should always be running on the primary. */
484 assert(hwrpb->rpb_primary_cpu_id == cpu_id); 485 assert(hwrpb->rpb_primary_cpu_id == cpu_id);
485 486
486 /* 487 /*
487 * On single-CPU systypes, the primary should always be CPU 0, 488 * On single-CPU systypes, the primary should always be CPU 0,
488 * except on Alpha 8200 systems where the CPU id is related 489 * except on Alpha 8200 systems where the CPU id is related
489 * to the VID, which is related to the Turbo Laser node id. 490 * to the VID, which is related to the Turbo Laser node id.
490 */ 491 */
491 if (cputype != ST_DEC_21000) 492 if (cputype != ST_DEC_21000)
492 assert(hwrpb->rpb_primary_cpu_id == 0); 493 assert(hwrpb->rpb_primary_cpu_id == 0);
493#endif 494#endif
494 495
495 /* NO MORE FIRMWARE ACCESS ALLOWED */ 496 /* NO MORE FIRMWARE ACCESS ALLOWED */
496 /* XXX Unless prom_uses_prom_console() evaluates to non-zero.) */ 497 /* XXX Unless prom_uses_prom_console() evaluates to non-zero.) */
497 498
498 /* 499 /*
499 * Find the beginning and end of the kernel (and leave a 500 * Find the beginning and end of the kernel (and leave a
500 * bit of space before the beginning for the bootstrap 501 * bit of space before the beginning for the bootstrap
501 * stack). 502 * stack).
502 */ 503 */
503 kernstart = trunc_page((vaddr_t)kernel_text) - 2 * PAGE_SIZE; 504 kernstart = trunc_page((vaddr_t)kernel_text) - 2 * PAGE_SIZE;
504#if NKSYMS || defined(DDB) || defined(MODULAR) 505#if NKSYMS || defined(DDB) || defined(MODULAR)
505 ksym_start = (void *)bootinfo.ssym; 506 ksym_start = (void *)bootinfo.ssym;
506 ksym_end = (void *)bootinfo.esym; 507 ksym_end = (void *)bootinfo.esym;
507 kernend = (vaddr_t)round_page((vaddr_t)ksym_end); 508 kernend = (vaddr_t)round_page((vaddr_t)ksym_end);
508#else 509#else
509 kernend = (vaddr_t)round_page((vaddr_t)_end); 510 kernend = (vaddr_t)round_page((vaddr_t)_end);
510#endif 511#endif
511 512
512 kernstartpfn = atop(ALPHA_K0SEG_TO_PHYS(kernstart)); 513 kernstartpfn = atop(ALPHA_K0SEG_TO_PHYS(kernstart));
513 kernendpfn = atop(ALPHA_K0SEG_TO_PHYS(kernend)); 514 kernendpfn = atop(ALPHA_K0SEG_TO_PHYS(kernend));
514 515
515 /* 516 /*
516 * Find out how much memory is available, by looking at 517 * Find out how much memory is available, by looking at
517 * the memory cluster descriptors. This also tries to do 518 * the memory cluster descriptors. This also tries to do
518 * its best to detect things things that have never been seen 519 * its best to detect things things that have never been seen
519 * before... 520 * before...
520 */ 521 */
521 mddtp = (struct mddt *)(((char *)hwrpb) + hwrpb->rpb_memdat_off); 522 mddtp = (struct mddt *)(((char *)hwrpb) + hwrpb->rpb_memdat_off);
522 523
523 /* MDDT SANITY CHECKING */ 524 /* MDDT SANITY CHECKING */
524 mddtweird = 0; 525 mddtweird = 0;
525 if (mddtp->mddt_cluster_cnt < 2) { 526 if (mddtp->mddt_cluster_cnt < 2) {
526 mddtweird = 1; 527 mddtweird = 1;
527 printf("WARNING: weird number of mem clusters: %lu\n", 528 printf("WARNING: weird number of mem clusters: %lu\n",
528 mddtp->mddt_cluster_cnt); 529 mddtp->mddt_cluster_cnt);
529 } 530 }
530 531
531#if 0 532#if 0
532 printf("Memory cluster count: %" PRIu64 "\n", mddtp->mddt_cluster_cnt); 533 printf("Memory cluster count: %" PRIu64 "\n", mddtp->mddt_cluster_cnt);
533#endif 534#endif
534 535
535 for (i = 0; i < mddtp->mddt_cluster_cnt; i++) { 536 for (i = 0; i < mddtp->mddt_cluster_cnt; i++) {
536 memc = &mddtp->mddt_clusters[i]; 537 memc = &mddtp->mddt_clusters[i];
537#if 0 538#if 0
538 printf("MEMC %d: pfn 0x%lx cnt 0x%lx usage 0x%lx\n", i, 539 printf("MEMC %d: pfn 0x%lx cnt 0x%lx usage 0x%lx\n", i,
539 memc->mddt_pfn, memc->mddt_pg_cnt, memc->mddt_usage); 540 memc->mddt_pfn, memc->mddt_pg_cnt, memc->mddt_usage);
540#endif 541#endif
541 totalphysmem += memc->mddt_pg_cnt; 542 totalphysmem += memc->mddt_pg_cnt;
542 if (mem_cluster_cnt < VM_PHYSSEG_MAX) { /* XXX */ 543 if (mem_cluster_cnt < VM_PHYSSEG_MAX) { /* XXX */
543 mem_clusters[mem_cluster_cnt].start = 544 mem_clusters[mem_cluster_cnt].start =
544 ptoa(memc->mddt_pfn); 545 ptoa(memc->mddt_pfn);
545 mem_clusters[mem_cluster_cnt].size = 546 mem_clusters[mem_cluster_cnt].size =
546 ptoa(memc->mddt_pg_cnt); 547 ptoa(memc->mddt_pg_cnt);
547 if (memc->mddt_usage & MDDT_mbz || 548 if (memc->mddt_usage & MDDT_mbz ||
548 memc->mddt_usage & MDDT_NONVOLATILE || /* XXX */ 549 memc->mddt_usage & MDDT_NONVOLATILE || /* XXX */
549 memc->mddt_usage & MDDT_PALCODE) 550 memc->mddt_usage & MDDT_PALCODE)
550 mem_clusters[mem_cluster_cnt].size |= 551 mem_clusters[mem_cluster_cnt].size |=
551 PROT_READ; 552 PROT_READ;
552 else 553 else
553 mem_clusters[mem_cluster_cnt].size |= 554 mem_clusters[mem_cluster_cnt].size |=
554 PROT_READ | PROT_WRITE | PROT_EXEC; 555 PROT_READ | PROT_WRITE | PROT_EXEC;
555 mem_cluster_cnt++; 556 mem_cluster_cnt++;
556 } 557 }
557 558
558 if (memc->mddt_usage & MDDT_mbz) { 559 if (memc->mddt_usage & MDDT_mbz) {
559 mddtweird = 1; 560 mddtweird = 1;
560 printf("WARNING: mem cluster %d has weird " 561 printf("WARNING: mem cluster %d has weird "
561 "usage 0x%lx\n", i, memc->mddt_usage); 562 "usage 0x%lx\n", i, memc->mddt_usage);
562 unknownmem += memc->mddt_pg_cnt; 563 unknownmem += memc->mddt_pg_cnt;
563 continue; 564 continue;
564 } 565 }
565 if (memc->mddt_usage & MDDT_NONVOLATILE) { 566 if (memc->mddt_usage & MDDT_NONVOLATILE) {
566 /* XXX should handle these... */ 567 /* XXX should handle these... */
567 printf("WARNING: skipping non-volatile mem " 568 printf("WARNING: skipping non-volatile mem "
568 "cluster %d\n", i); 569 "cluster %d\n", i);
569 unusedmem += memc->mddt_pg_cnt; 570 unusedmem += memc->mddt_pg_cnt;
570 continue; 571 continue;
571 } 572 }
572 if (memc->mddt_usage & MDDT_PALCODE) { 573 if (memc->mddt_usage & MDDT_PALCODE) {
573 resvmem += memc->mddt_pg_cnt; 574 resvmem += memc->mddt_pg_cnt;
574 continue; 575 continue;
575 } 576 }
576 577
577 /* 578 /*
578 * We have a memory cluster available for system 579 * We have a memory cluster available for system
579 * software use. We must determine if this cluster 580 * software use. We must determine if this cluster
580 * holds the kernel. 581 * holds the kernel.
581 */ 582 */
582 583
583 /* 584 /*
584 * XXX If the kernel uses the PROM console, we only use the 585 * XXX If the kernel uses the PROM console, we only use the
585 * XXX memory after the kernel in the first system segment, 586 * XXX memory after the kernel in the first system segment,
586 * XXX to avoid clobbering prom mapping, data, etc. 587 * XXX to avoid clobbering prom mapping, data, etc.
587 */ 588 */
588 physmem += memc->mddt_pg_cnt; 589 physmem += memc->mddt_pg_cnt;
589 pfn0 = memc->mddt_pfn; 590 pfn0 = memc->mddt_pfn;
590 pfn1 = memc->mddt_pfn + memc->mddt_pg_cnt; 591 pfn1 = memc->mddt_pfn + memc->mddt_pg_cnt;
591 if (pfn0 <= kernstartpfn && kernendpfn <= pfn1) { 592 if (pfn0 <= kernstartpfn && kernendpfn <= pfn1) {
592 /* 593 /*
593 * Must compute the location of the kernel 594 * Must compute the location of the kernel
594 * within the segment. 595 * within the segment.
595 */ 596 */
596#if 0 597#if 0
597 printf("Cluster %d contains kernel\n", i); 598 printf("Cluster %d contains kernel\n", i);
598#endif 599#endif
599 if (pfn0 < kernstartpfn && !prom_uses_prom_console()) { 600 if (pfn0 < kernstartpfn && !prom_uses_prom_console()) {
600 /* 601 /*
601 * There is a chunk before the kernel. 602 * There is a chunk before the kernel.
602 */ 603 */
603#if 0 604#if 0
604 printf("Loading chunk before kernel: " 605 printf("Loading chunk before kernel: "
605 "0x%lx / 0x%lx\n", pfn0, kernstartpfn); 606 "0x%lx / 0x%lx\n", pfn0, kernstartpfn);
606#endif 607#endif
607 alpha_page_physload(pfn0, kernstartpfn); 608 alpha_page_physload(pfn0, kernstartpfn);
608 } 609 }
609 if (kernendpfn < pfn1) { 610 if (kernendpfn < pfn1) {
610 /* 611 /*
611 * There is a chunk after the kernel. 612 * There is a chunk after the kernel.
612 */ 613 */
613#if 0 614#if 0
614 printf("Loading chunk after kernel: " 615 printf("Loading chunk after kernel: "
615 "0x%lx / 0x%lx\n", kernendpfn, pfn1); 616 "0x%lx / 0x%lx\n", kernendpfn, pfn1);
616#endif 617#endif
617 alpha_page_physload(kernendpfn, pfn1); 618 alpha_page_physload(kernendpfn, pfn1);
618 } 619 }
619 } else { 620 } else {
620 /* 621 /*
621 * Just load this cluster as one chunk. 622 * Just load this cluster as one chunk.
622 */ 623 */
623#if 0 624#if 0
624 printf("Loading cluster %d: 0x%lx / 0x%lx\n", i, 625 printf("Loading cluster %d: 0x%lx / 0x%lx\n", i,
625 pfn0, pfn1); 626 pfn0, pfn1);
626#endif 627#endif
627 alpha_page_physload(pfn0, pfn1); 628 alpha_page_physload(pfn0, pfn1);
628 } 629 }
629 } 630 }
630 631
631 /* 632 /*
632 * Dump out the MDDT if it looks odd... 633 * Dump out the MDDT if it looks odd...
633 */ 634 */
634 if (mddtweird) { 635 if (mddtweird) {
635 printf("\n"); 636 printf("\n");
636 printf("complete memory cluster information:\n"); 637 printf("complete memory cluster information:\n");
637 for (i = 0; i < mddtp->mddt_cluster_cnt; i++) { 638 for (i = 0; i < mddtp->mddt_cluster_cnt; i++) {
638 printf("mddt %d:\n", i); 639 printf("mddt %d:\n", i);
639 printf("\tpfn %lx\n", 640 printf("\tpfn %lx\n",
640 mddtp->mddt_clusters[i].mddt_pfn); 641 mddtp->mddt_clusters[i].mddt_pfn);
641 printf("\tcnt %lx\n", 642 printf("\tcnt %lx\n",
642 mddtp->mddt_clusters[i].mddt_pg_cnt); 643 mddtp->mddt_clusters[i].mddt_pg_cnt);
643 printf("\ttest %lx\n", 644 printf("\ttest %lx\n",
644 mddtp->mddt_clusters[i].mddt_pg_test); 645 mddtp->mddt_clusters[i].mddt_pg_test);
645 printf("\tbva %lx\n", 646 printf("\tbva %lx\n",
646 mddtp->mddt_clusters[i].mddt_v_bitaddr); 647 mddtp->mddt_clusters[i].mddt_v_bitaddr);
647 printf("\tbpa %lx\n", 648 printf("\tbpa %lx\n",
648 mddtp->mddt_clusters[i].mddt_p_bitaddr); 649 mddtp->mddt_clusters[i].mddt_p_bitaddr);
649 printf("\tbcksum %lx\n", 650 printf("\tbcksum %lx\n",
650 mddtp->mddt_clusters[i].mddt_bit_cksum); 651 mddtp->mddt_clusters[i].mddt_bit_cksum);
651 printf("\tusage %lx\n", 652 printf("\tusage %lx\n",
652 mddtp->mddt_clusters[i].mddt_usage); 653 mddtp->mddt_clusters[i].mddt_usage);
653 } 654 }
654 printf("\n"); 655 printf("\n");
655 } 656 }
656 657
657 if (totalphysmem == 0) 658 if (totalphysmem == 0)
658 panic("can't happen: system seems to have no memory!"); 659 panic("can't happen: system seems to have no memory!");
659 maxmem = physmem; 660 maxmem = physmem;
660#if 0 661#if 0
661 printf("totalphysmem = %d\n", totalphysmem); 662 printf("totalphysmem = %d\n", totalphysmem);
662 printf("physmem = %lu\n", physmem); 663 printf("physmem = %lu\n", physmem);
663 printf("resvmem = %d\n", resvmem); 664 printf("resvmem = %d\n", resvmem);
664 printf("unusedmem = %d\n", unusedmem); 665 printf("unusedmem = %d\n", unusedmem);
665 printf("unknownmem = %d\n", unknownmem); 666 printf("unknownmem = %d\n", unknownmem);
666#endif 667#endif
667 668
668 /* 669 /*
669 * Initialize error message buffer (at end of core). 670 * Initialize error message buffer (at end of core).
670 */ 671 */
671 { 672 {
672 paddr_t end; 673 paddr_t end;
673 vsize_t sz = (vsize_t)round_page(MSGBUFSIZE); 674 vsize_t sz = (vsize_t)round_page(MSGBUFSIZE);
674 vsize_t reqsz = sz; 675 vsize_t reqsz = sz;
675 uvm_physseg_t bank; 676 uvm_physseg_t bank;
676 677
677 bank = uvm_physseg_get_last(); 678 bank = uvm_physseg_get_last();
678 679
679 /* shrink so that it'll fit in the last segment */ 680 /* shrink so that it'll fit in the last segment */
680 if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank) < atop(sz)) 681 if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank) < atop(sz))
681 sz = ptoa(uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank)); 682 sz = ptoa(uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank));
682 683
683 end = uvm_physseg_get_end(bank); 684 end = uvm_physseg_get_end(bank);
684 end -= atop(sz); 685 end -= atop(sz);
685 686
686 uvm_physseg_unplug(end, atop(sz)); 687 uvm_physseg_unplug(end, atop(sz));
687 msgbufaddr = (void *) ALPHA_PHYS_TO_K0SEG(ptoa(end)); 688 msgbufaddr = (void *) ALPHA_PHYS_TO_K0SEG(ptoa(end));
688 689
689 initmsgbuf(msgbufaddr, sz); 690 initmsgbuf(msgbufaddr, sz);
690 691
691 /* warn if the message buffer had to be shrunk */ 692 /* warn if the message buffer had to be shrunk */
692 if (sz != reqsz) 693 if (sz != reqsz)
693 printf("WARNING: %ld bytes not available for msgbuf " 694 printf("WARNING: %ld bytes not available for msgbuf "
694 "in last cluster (%ld used)\n", reqsz, sz); 695 "in last cluster (%ld used)\n", reqsz, sz);
695 696
696 } 697 }
697 698
698 /* 699 /*
699 * NOTE: It is safe to use uvm_pageboot_alloc() before 700 * NOTE: It is safe to use uvm_pageboot_alloc() before
700 * pmap_bootstrap() because our pmap_virtual_space() 701 * pmap_bootstrap() because our pmap_virtual_space()
701 * returns compile-time constants. 702 * returns compile-time constants.
702 */ 703 */
703 704
704 /* 705 /*
705 * Allocate uarea page for lwp0 and set it. 706 * Allocate uarea page for lwp0 and set it.
706 */ 707 */
707 v = uvm_pageboot_alloc(UPAGES * PAGE_SIZE); 708 v = uvm_pageboot_alloc(UPAGES * PAGE_SIZE);
708 uvm_lwp_setuarea(&lwp0, v); 709 uvm_lwp_setuarea(&lwp0, v);
709 710
710 /* 711 /*
711 * Initialize the virtual memory system, and set the 712 * Initialize the virtual memory system, and set the
712 * page table base register in proc 0's PCB. 713 * page table base register in proc 0's PCB.
713 */ 714 */
714 pmap_bootstrap(ALPHA_PHYS_TO_K0SEG(ptb << PGSHIFT), 715 pmap_bootstrap(ALPHA_PHYS_TO_K0SEG(ptb << PGSHIFT),
715 hwrpb->rpb_max_asn, hwrpb->rpb_pcs_cnt); 716 hwrpb->rpb_max_asn, hwrpb->rpb_pcs_cnt);
716 717
717 /* 718 /*
718 * Initialize the rest of lwp0's PCB and cache its physical address. 719 * Initialize the rest of lwp0's PCB and cache its physical address.
719 */ 720 */
720 pcb0 = lwp_getpcb(&lwp0); 721 pcb0 = lwp_getpcb(&lwp0);
721 lwp0.l_md.md_pcbpaddr = (void *)ALPHA_K0SEG_TO_PHYS((vaddr_t)pcb0); 722 lwp0.l_md.md_pcbpaddr = (void *)ALPHA_K0SEG_TO_PHYS((vaddr_t)pcb0);
722 723
723 /* 724 /*
724 * Set the kernel sp, reserving space for an (empty) trapframe, 725 * Set the kernel sp, reserving space for an (empty) trapframe,
725 * and make lwp0's trapframe pointer point to it for sanity. 726 * and make lwp0's trapframe pointer point to it for sanity.
726 */ 727 */
727 pcb0->pcb_hw.apcb_ksp = v + USPACE - sizeof(struct trapframe); 728 pcb0->pcb_hw.apcb_ksp = v + USPACE - sizeof(struct trapframe);
728 lwp0.l_md.md_tf = (struct trapframe *)pcb0->pcb_hw.apcb_ksp; 729 lwp0.l_md.md_tf = (struct trapframe *)pcb0->pcb_hw.apcb_ksp;
729 730
730 /* Indicate that lwp0 has a CPU. */ 731 /* Indicate that lwp0 has a CPU. */
731 lwp0.l_cpu = ci; 732 lwp0.l_cpu = ci;
732 733
733 /* 734 /*
734 * Look at arguments passed to us and compute boothowto. 735 * Look at arguments passed to us and compute boothowto.
735 */ 736 */
736 737
737 boothowto = RB_SINGLE; 738 boothowto = RB_SINGLE;
738#ifdef KADB 739#ifdef KADB
739 boothowto |= RB_KDB; 740 boothowto |= RB_KDB;
740#endif 741#endif
741 for (p = bootinfo.boot_flags; p && *p != '\0'; p++) { 742 for (p = bootinfo.boot_flags; p && *p != '\0'; p++) {
742 /* 743 /*
743 * Note that we'd really like to differentiate case here, 744 * Note that we'd really like to differentiate case here,
744 * but the Alpha AXP Architecture Reference Manual 745 * but the Alpha AXP Architecture Reference Manual
745 * says that we shouldn't. 746 * says that we shouldn't.
746 */ 747 */
747 switch (*p) { 748 switch (*p) {
748 case 'a': /* autoboot */ 749 case 'a': /* autoboot */
749 case 'A': 750 case 'A':
750 boothowto &= ~RB_SINGLE; 751 boothowto &= ~RB_SINGLE;
751 break; 752 break;
752 753
753#ifdef DEBUG 754#ifdef DEBUG
754 case 'c': /* crash dump immediately after autoconfig */ 755 case 'c': /* crash dump immediately after autoconfig */
755 case 'C': 756 case 'C':
756 boothowto |= RB_DUMP; 757 boothowto |= RB_DUMP;
757 break; 758 break;
758#endif 759#endif
759 760
760#if defined(KGDB) || defined(DDB) 761#if defined(KGDB) || defined(DDB)
761 case 'd': /* break into the kernel debugger ASAP */ 762 case 'd': /* break into the kernel debugger ASAP */
762 case 'D': 763 case 'D':
763 boothowto |= RB_KDB; 764 boothowto |= RB_KDB;
764 break; 765 break;
765#endif 766#endif
766 767
767 case 'h': /* always halt, never reboot */ 768 case 'h': /* always halt, never reboot */
768 case 'H': 769 case 'H':
769 boothowto |= RB_HALT; 770 boothowto |= RB_HALT;
770 break; 771 break;
771 772
772#if 0 773#if 0
773 case 'm': /* mini root present in memory */ 774 case 'm': /* mini root present in memory */
774 case 'M': 775 case 'M':
775 boothowto |= RB_MINIROOT; 776 boothowto |= RB_MINIROOT;
776 break; 777 break;
777#endif 778#endif
778 779
779 case 'n': /* askname */ 780 case 'n': /* askname */
780 case 'N': 781 case 'N':
781 boothowto |= RB_ASKNAME; 782 boothowto |= RB_ASKNAME;
782 break; 783 break;
783 784
784 case 's': /* single-user (default, supported for sanity) */ 785 case 's': /* single-user (default, supported for sanity) */
785 case 'S': 786 case 'S':
786 boothowto |= RB_SINGLE; 787 boothowto |= RB_SINGLE;
787 break; 788 break;
788 789
789 case 'q': /* quiet boot */ 790 case 'q': /* quiet boot */
790 case 'Q': 791 case 'Q':
791 boothowto |= AB_QUIET; 792 boothowto |= AB_QUIET;
792 break; 793 break;
793 794
794 case 'v': /* verbose boot */ 795 case 'v': /* verbose boot */
795 case 'V': 796 case 'V':
796 boothowto |= AB_VERBOSE; 797 boothowto |= AB_VERBOSE;
797 break; 798 break;
798 799
799 case '-': 800 case '-':
800 /* 801 /*
801 * Just ignore this. It's not required, but it's 802 * Just ignore this. It's not required, but it's
802 * common for it to be passed regardless. 803 * common for it to be passed regardless.
803 */ 804 */
804 break; 805 break;
805 806
806 default: 807 default:
807 printf("Unrecognized boot flag '%c'.\n", *p); 808 printf("Unrecognized boot flag '%c'.\n", *p);
808 break; 809 break;
809 } 810 }
810 } 811 }
811 812
812 /* 813 /*
813 * Perform any initial kernel patches based on the running system. 814 * Perform any initial kernel patches based on the running system.
814 * We may perform more later if we attach additional CPUs. 815 * We may perform more later if we attach additional CPUs.
815 */ 816 */
816 alpha_patch(false); 817 alpha_patch(false);
817 818
818 /* 819 /*
819 * Figure out the number of CPUs in the box, from RPB fields. 820 * Figure out the number of CPUs in the box, from RPB fields.
820 * Really. We mean it. 821 * Really. We mean it.
821 */ 822 */
822 for (i = 0; i < hwrpb->rpb_pcs_cnt; i++) { 823 for (i = 0; i < hwrpb->rpb_pcs_cnt; i++) {
823 struct pcs *pcsp; 824 struct pcs *pcsp;
824 825
825 pcsp = LOCATE_PCS(hwrpb, i); 826 pcsp = LOCATE_PCS(hwrpb, i);
826 if ((pcsp->pcs_flags & PCS_PP) != 0) 827 if ((pcsp->pcs_flags & PCS_PP) != 0)
827 ncpus++; 828 ncpus++;
828 } 829 }
829 830
830 /* 831 /*
831 * Initialize debuggers, and break into them if appropriate. 832 * Initialize debuggers, and break into them if appropriate.
832 */ 833 */
833#if NKSYMS || defined(DDB) || defined(MODULAR) 834#if NKSYMS || defined(DDB) || defined(MODULAR)
834 ksyms_addsyms_elf((int)((uint64_t)ksym_end - (uint64_t)ksym_start), 835 ksyms_addsyms_elf((int)((uint64_t)ksym_end - (uint64_t)ksym_start),
835 ksym_start, ksym_end); 836 ksym_start, ksym_end);
836#endif 837#endif
837 838
838 if (boothowto & RB_KDB) { 839 if (boothowto & RB_KDB) {
839#if defined(KGDB) 840#if defined(KGDB)
840 kgdb_debug_init = 1; 841 kgdb_debug_init = 1;
841 kgdb_connect(1); 842 kgdb_connect(1);
842#elif defined(DDB) 843#elif defined(DDB)
843 Debugger(); 844 Debugger();
844#endif 845#endif
845 } 846 }
846 847
847#ifdef DIAGNOSTIC 848#ifdef DIAGNOSTIC
848 /* 849 /*
849 * Check our clock frequency, from RPB fields. 850 * Check our clock frequency, from RPB fields.
850 */ 851 */
851 if ((hwrpb->rpb_intr_freq >> 12) != 1024) 852 if ((hwrpb->rpb_intr_freq >> 12) != 1024)
852 printf("WARNING: unbelievable rpb_intr_freq: %ld (%d hz)\n", 853 printf("WARNING: unbelievable rpb_intr_freq: %ld (%d hz)\n",
853 hwrpb->rpb_intr_freq, hz); 854 hwrpb->rpb_intr_freq, hz);
854#endif 855#endif
855} 856}
856 857
857#ifdef MODULAR 858#ifdef MODULAR
858/* Push any modules loaded by the boot loader */ 859/* Push any modules loaded by the boot loader */
859void 860void
860module_init_md(void) 861module_init_md(void)
861{ 862{
862 /* nada. */ 863 /* nada. */
863} 864}
864#endif /* MODULAR */ 865#endif /* MODULAR */
865 866
866void 867void
867consinit(void) 868consinit(void)
868{ 869{
869 870
870 /* 871 /*
871 * Everything related to console initialization is done 872 * Everything related to console initialization is done
872 * in alpha_init(). 873 * in alpha_init().
873 */ 874 */
874#if defined(DIAGNOSTIC) && defined(_PROM_MAY_USE_PROM_CONSOLE) 875#if defined(DIAGNOSTIC) && defined(_PROM_MAY_USE_PROM_CONSOLE)
875 printf("consinit: %susing prom console\n", 876 printf("consinit: %susing prom console\n",
876 prom_uses_prom_console() ? "" : "not "); 877 prom_uses_prom_console() ? "" : "not ");
877#endif 878#endif
878} 879}
879 880
880void 881void
881cpu_startup(void) 882cpu_startup(void)
882{ 883{
883 extern struct evcnt fpevent_use, fpevent_reuse; 884 extern struct evcnt fpevent_use, fpevent_reuse;
884 vaddr_t minaddr, maxaddr; 885 vaddr_t minaddr, maxaddr;
885 char pbuf[9]; 886 char pbuf[9];
886#if defined(DEBUG) 887#if defined(DEBUG)
887 extern int pmapdebug; 888 extern int pmapdebug;
888 int opmapdebug = pmapdebug; 889 int opmapdebug = pmapdebug;
889 890
890 pmapdebug = 0; 891 pmapdebug = 0;
891#endif 892#endif
892 893
893 /* 894 /*
894 * Good {morning,afternoon,evening,night}. 895 * Good {morning,afternoon,evening,night}.
895 */ 896 */
896 printf("%s%s", copyright, version); 897 printf("%s%s", copyright, version);
897 identifycpu(); 898 identifycpu();
898 format_bytes(pbuf, sizeof(pbuf), ptoa(totalphysmem)); 899 format_bytes(pbuf, sizeof(pbuf), ptoa(totalphysmem));
899 printf("total memory = %s\n", pbuf); 900 printf("total memory = %s\n", pbuf);
900 format_bytes(pbuf, sizeof(pbuf), ptoa(resvmem)); 901 format_bytes(pbuf, sizeof(pbuf), ptoa(resvmem));
901 printf("(%s reserved for PROM, ", pbuf); 902 printf("(%s reserved for PROM, ", pbuf);
902 format_bytes(pbuf, sizeof(pbuf), ptoa(physmem)); 903 format_bytes(pbuf, sizeof(pbuf), ptoa(physmem));
903 printf("%s used by NetBSD)\n", pbuf); 904 printf("%s used by NetBSD)\n", pbuf);
904 if (unusedmem) { 905 if (unusedmem) {
905 format_bytes(pbuf, sizeof(pbuf), ptoa(unusedmem)); 906 format_bytes(pbuf, sizeof(pbuf), ptoa(unusedmem));
906 printf("WARNING: unused memory = %s\n", pbuf); 907 printf("WARNING: unused memory = %s\n", pbuf);
907 } 908 }
908 if (unknownmem) { 909 if (unknownmem) {
909 format_bytes(pbuf, sizeof(pbuf), ptoa(unknownmem)); 910 format_bytes(pbuf, sizeof(pbuf), ptoa(unknownmem));
910 printf("WARNING: %s of memory with unknown purpose\n", pbuf); 911 printf("WARNING: %s of memory with unknown purpose\n", pbuf);
911 } 912 }
912 913
913 minaddr = 0; 914 minaddr = 0;
914 915
915 /* 916 /*
916 * Allocate a submap for physio 917 * Allocate a submap for physio
917 */ 918 */
918 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 919 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
919 VM_PHYS_SIZE, 0, false, NULL); 920 VM_PHYS_SIZE, 0, false, NULL);
920 921
921 /* 922 /*
922 * No need to allocate an mbuf cluster submap. Mbuf clusters 923 * No need to allocate an mbuf cluster submap. Mbuf clusters
923 * are allocated via the pool allocator, and we use K0SEG to 924 * are allocated via the pool allocator, and we use K0SEG to
924 * map those pages. 925 * map those pages.
925 */ 926 */
926 927
927#if defined(DEBUG) 928#if defined(DEBUG)
928 pmapdebug = opmapdebug; 929 pmapdebug = opmapdebug;
929#endif 930#endif
930 format_bytes(pbuf, sizeof(pbuf), ptoa(uvm_availmem(false))); 931 format_bytes(pbuf, sizeof(pbuf), ptoa(uvm_availmem(false)));
931 printf("avail memory = %s\n", pbuf); 932 printf("avail memory = %s\n", pbuf);
932#if 0 933#if 0
933 { 934 {
934 extern u_long pmap_pages_stolen; 935 extern u_long pmap_pages_stolen;
935 936
936 format_bytes(pbuf, sizeof(pbuf), pmap_pages_stolen * PAGE_SIZE); 937 format_bytes(pbuf, sizeof(pbuf), pmap_pages_stolen * PAGE_SIZE);
937 printf("stolen memory for VM structures = %s\n", pbuf); 938 printf("stolen memory for VM structures = %s\n", pbuf);
938 } 939 }
939#endif 940#endif
940 941
941 /* 942 /*
942 * Set up the HWPCB so that it's safe to configure secondary 943 * Set up the HWPCB so that it's safe to configure secondary
943 * CPUs. 944 * CPUs.
944 */ 945 */
945 hwrpb_primary_init(); 946 hwrpb_primary_init();
946 947
947 /* 948 /*
948 * Initialize some trap event counters. 949 * Initialize some trap event counters.
949 */ 950 */
950 evcnt_attach_dynamic_nozero(&fpevent_use, EVCNT_TYPE_MISC, NULL, 951 evcnt_attach_dynamic_nozero(&fpevent_use, EVCNT_TYPE_MISC, NULL,
951 "FP", "proc use"); 952 "FP", "proc use");
952 evcnt_attach_dynamic_nozero(&fpevent_reuse, EVCNT_TYPE_MISC, NULL, 953 evcnt_attach_dynamic_nozero(&fpevent_reuse, EVCNT_TYPE_MISC, NULL,
953 "FP", "proc re-use"); 954 "FP", "proc re-use");
954} 955}
955 956
956/* 957/*
957 * Retrieve the platform name from the DSR. 958 * Retrieve the platform name from the DSR.
958 */ 959 */
959const char * 960const char *
960alpha_dsr_sysname(void) 961alpha_dsr_sysname(void)
961{ 962{
962 struct dsrdb *dsr; 963 struct dsrdb *dsr;
963 const char *sysname; 964 const char *sysname;
964 965
965 /* 966 /*
966 * DSR does not exist on early HWRPB versions. 967 * DSR does not exist on early HWRPB versions.
967 */ 968 */
968 if (hwrpb->rpb_version < HWRPB_DSRDB_MINVERS) 969 if (hwrpb->rpb_version < HWRPB_DSRDB_MINVERS)
969 return (NULL); 970 return (NULL);
970 971
971 dsr = (struct dsrdb *)(((char *)hwrpb) + hwrpb->rpb_dsrdb_off); 972 dsr = (struct dsrdb *)(((char *)hwrpb) + hwrpb->rpb_dsrdb_off);
972 sysname = (const char *)((char *)dsr + (dsr->dsr_sysname_off + 973 sysname = (const char *)((char *)dsr + (dsr->dsr_sysname_off +
973 sizeof(uint64_t))); 974 sizeof(uint64_t)));
974 return (sysname); 975 return (sysname);
975} 976}
976 977
977/* 978/*
978 * Lookup the system specified system variation in the provided table, 979 * Lookup the system specified system variation in the provided table,
979 * returning the model string on match. 980 * returning the model string on match.
980 */ 981 */
981const char * 982const char *
982alpha_variation_name(uint64_t variation, const struct alpha_variation_table *avtp) 983alpha_variation_name(uint64_t variation, const struct alpha_variation_table *avtp)
983{ 984{
984 int i; 985 int i;
985 986
986 for (i = 0; avtp[i].avt_model != NULL; i++) 987 for (i = 0; avtp[i].avt_model != NULL; i++)
987 if (avtp[i].avt_variation == variation) 988 if (avtp[i].avt_variation == variation)
988 return (avtp[i].avt_model); 989 return (avtp[i].avt_model);
989 return (NULL); 990 return (NULL);
990} 991}
991 992
992/* 993/*
993 * Generate a default platform name based for unknown system variations. 994 * Generate a default platform name based for unknown system variations.
994 */ 995 */
995const char * 996const char *
996alpha_unknown_sysname(void) 997alpha_unknown_sysname(void)
997{ 998{
998 static char s[128]; /* safe size */ 999 static char s[128]; /* safe size */
999 1000
1000 snprintf(s, sizeof(s), "%s family, unknown model variation 0x%lx", 1001 snprintf(s, sizeof(s), "%s family, unknown model variation 0x%lx",
1001 platform.family, hwrpb->rpb_variation & SV_ST_MASK); 1002 platform.family, hwrpb->rpb_variation & SV_ST_MASK);
1002 return ((const char *)s); 1003 return ((const char *)s);
1003} 1004}
1004 1005
1005void 1006void
1006identifycpu(void) 1007identifycpu(void)
1007{ 1008{
1008 const char *s; 1009 const char *s;
1009 int i; 1010 int i;
1010 1011
1011 /* 1012 /*
1012 * print out CPU identification information. 1013 * print out CPU identification information.
1013 */ 1014 */
1014 printf("%s", cpu_getmodel()); 1015 printf("%s", cpu_getmodel());
1015 for(s = cpu_getmodel(); *s; ++s) 1016 for(s = cpu_getmodel(); *s; ++s)
1016 if(strncasecmp(s, "MHz", 3) == 0) 1017 if(strncasecmp(s, "MHz", 3) == 0)
1017 goto skipMHz; 1018 goto skipMHz;
1018 printf(", %ldMHz", hwrpb->rpb_cc_freq / 1000000); 1019 printf(", %ldMHz", hwrpb->rpb_cc_freq / 1000000);
1019skipMHz: 1020skipMHz:
1020 printf(", s/n "); 1021 printf(", s/n ");
1021 for (i = 0; i < 10; i++) 1022 for (i = 0; i < 10; i++)
1022 printf("%c", hwrpb->rpb_ssn[i]); 1023 printf("%c", hwrpb->rpb_ssn[i]);
1023 printf("\n"); 1024 printf("\n");
1024 printf("%ld byte page size, %d processor%s.\n", 1025 printf("%ld byte page size, %d processor%s.\n",
1025 hwrpb->rpb_page_size, ncpus, ncpus == 1 ? "" : "s"); 1026 hwrpb->rpb_page_size, ncpus, ncpus == 1 ? "" : "s");
1026} 1027}
1027 1028
1028int waittime = -1; 1029int waittime = -1;
1029struct pcb dumppcb; 1030struct pcb dumppcb;
1030 1031
1031void 1032void
1032cpu_reboot(int howto, char *bootstr) 1033cpu_reboot(int howto, char *bootstr)
1033{ 1034{
1034#if defined(MULTIPROCESSOR) 1035#if defined(MULTIPROCESSOR)
1035 u_long cpu_id = cpu_number(); 1036 u_long cpu_id = cpu_number();
1036 u_long wait_mask; 1037 u_long wait_mask;
1037 int i; 1038 int i;
1038#endif 1039#endif
1039 1040
1040 /* If "always halt" was specified as a boot flag, obey. */ 1041 /* If "always halt" was specified as a boot flag, obey. */
1041 if ((boothowto & RB_HALT) != 0) 1042 if ((boothowto & RB_HALT) != 0)
1042 howto |= RB_HALT; 1043 howto |= RB_HALT;
1043 1044
1044 boothowto = howto; 1045 boothowto = howto;
1045 1046
1046 /* If system is cold, just halt. */ 1047 /* If system is cold, just halt. */
1047 if (cold) { 1048 if (cold) {
1048 boothowto |= RB_HALT; 1049 boothowto |= RB_HALT;
1049 goto haltsys; 1050 goto haltsys;
1050 } 1051 }
1051 1052
1052 if ((boothowto & RB_NOSYNC) == 0 && waittime < 0) { 1053 if ((boothowto & RB_NOSYNC) == 0 && waittime < 0) {
1053 waittime = 0; 1054 waittime = 0;
1054 vfs_shutdown(); 1055 vfs_shutdown();
1055 /* 1056 /*
1056 * If we've been adjusting the clock, the todr 1057 * If we've been adjusting the clock, the todr
1057 * will be out of synch; adjust it now. 1058 * will be out of synch; adjust it now.
1058 */ 1059 */
1059 resettodr(); 1060 resettodr();
1060 } 1061 }
1061 1062
1062 /* Disable interrupts. */ 1063 /* Disable interrupts. */
1063 splhigh(); 1064 splhigh();
1064 1065
1065#if defined(MULTIPROCESSOR) 1066#if defined(MULTIPROCESSOR)
1066 /* 1067 /*
1067 * Halt all other CPUs. If we're not the primary, the 1068 * Halt all other CPUs. If we're not the primary, the
1068 * primary will spin, waiting for us to halt. 1069 * primary will spin, waiting for us to halt.
1069 */ 1070 */
1070 cpu_id = cpu_number(); /* may have changed cpu */ 1071 cpu_id = cpu_number(); /* may have changed cpu */
1071 wait_mask = (1UL << cpu_id) | (1UL << hwrpb->rpb_primary_cpu_id); 1072 wait_mask = (1UL << cpu_id) | (1UL << hwrpb->rpb_primary_cpu_id);
1072 1073
1073 alpha_broadcast_ipi(ALPHA_IPI_HALT); 1074 alpha_broadcast_ipi(ALPHA_IPI_HALT);
1074 1075
1075 /* Ensure any CPUs paused by DDB resume execution so they can halt */ 1076 /* Ensure any CPUs paused by DDB resume execution so they can halt */
1076 cpus_paused = 0; 1077 cpus_paused = 0;
1077 1078
1078 for (i = 0; i < 10000; i++) { 1079 for (i = 0; i < 10000; i++) {
1079 alpha_mb(); 1080 alpha_mb();
1080 if (cpus_running == wait_mask) 1081 if (cpus_running == wait_mask)
1081 break; 1082 break;
1082 delay(1000); 1083 delay(1000);
1083 } 1084 }
1084 alpha_mb(); 1085 alpha_mb();
1085 if (cpus_running != wait_mask) 1086 if (cpus_running != wait_mask)
1086 printf("WARNING: Unable to halt secondary CPUs (0x%lx)\n", 1087 printf("WARNING: Unable to halt secondary CPUs (0x%lx)\n",
1087 cpus_running); 1088 cpus_running);
1088#endif /* MULTIPROCESSOR */ 1089#endif /* MULTIPROCESSOR */
1089 1090
1090 /* If rebooting and a dump is requested do it. */ 1091 /* If rebooting and a dump is requested do it. */
1091#if 0 1092#if 0
1092 if ((boothowto & (RB_DUMP | RB_HALT)) == RB_DUMP) 1093 if ((boothowto & (RB_DUMP | RB_HALT)) == RB_DUMP)
1093#else 1094#else
1094 if (boothowto & RB_DUMP) 1095 if (boothowto & RB_DUMP)
1095#endif 1096#endif
1096 dumpsys(); 1097 dumpsys();
1097 1098
1098haltsys: 1099haltsys:
1099 1100
1100 /* run any shutdown hooks */ 1101 /* run any shutdown hooks */
1101 doshutdownhooks(); 1102 doshutdownhooks();
1102 1103
1103 pmf_system_shutdown(boothowto); 1104 pmf_system_shutdown(boothowto);
1104 1105
1105#ifdef BOOTKEY 1106#ifdef BOOTKEY
1106 printf("hit any key to %s...\n", howto & RB_HALT ? "halt" : "reboot"); 1107 printf("hit any key to %s...\n", howto & RB_HALT ? "halt" : "reboot");
1107 cnpollc(1); /* for proper keyboard command handling */ 1108 cnpollc(1); /* for proper keyboard command handling */
1108 cngetc(); 1109 cngetc();
1109 cnpollc(0); 1110 cnpollc(0);
1110 printf("\n"); 1111 printf("\n");
1111#endif 1112#endif
1112 1113
1113 /* Finally, powerdown/halt/reboot the system. */ 1114 /* Finally, powerdown/halt/reboot the system. */
1114 if ((boothowto & RB_POWERDOWN) == RB_POWERDOWN && 1115 if ((boothowto & RB_POWERDOWN) == RB_POWERDOWN &&
1115 platform.powerdown != NULL) { 1116 platform.powerdown != NULL) {
1116 (*platform.powerdown)(); 1117 (*platform.powerdown)();
1117 printf("WARNING: powerdown failed!\n"); 1118 printf("WARNING: powerdown failed!\n");
1118 } 1119 }
1119 printf("%s\n\n", (boothowto & RB_HALT) ? "halted." : "rebooting..."); 1120 printf("%s\n\n", (boothowto & RB_HALT) ? "halted." : "rebooting...");
1120#if defined(MULTIPROCESSOR) 1121#if defined(MULTIPROCESSOR)
1121 if (cpu_id != hwrpb->rpb_primary_cpu_id) 1122 if (cpu_id != hwrpb->rpb_primary_cpu_id)
1122 cpu_halt(); 1123 cpu_halt();
1123 else 1124 else
1124#endif 1125#endif
1125 prom_halt(boothowto & RB_HALT); 1126 prom_halt(boothowto & RB_HALT);
1126 /*NOTREACHED*/ 1127 /*NOTREACHED*/
1127} 1128}
1128 1129
1129/* 1130/*
1130 * These variables are needed by /sbin/savecore 1131 * These variables are needed by /sbin/savecore
1131 */ 1132 */
1132uint32_t dumpmag = 0x8fca0101; /* magic number */ 1133uint32_t dumpmag = 0x8fca0101; /* magic number */
1133int dumpsize = 0; /* pages */ 1134int dumpsize = 0; /* pages */
1134long dumplo = 0; /* blocks */ 1135long dumplo = 0; /* blocks */
1135 1136
1136/* 1137/*
1137 * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers. 1138 * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers.
1138 */ 1139 */
1139int 1140int
1140cpu_dumpsize(void) 1141cpu_dumpsize(void)
1141{ 1142{
1142 int size; 1143 int size;
1143 1144
1144 size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)) + 1145 size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)) +
1145 ALIGN(mem_cluster_cnt * sizeof(phys_ram_seg_t)); 1146 ALIGN(mem_cluster_cnt * sizeof(phys_ram_seg_t));
1146 if (roundup(size, dbtob(1)) != dbtob(1)) 1147 if (roundup(size, dbtob(1)) != dbtob(1))
1147 return -1; 1148 return -1;
1148 1149
1149 return (1); 1150 return (1);
1150} 1151}
1151 1152
1152/* 1153/*
1153 * cpu_dump_mempagecnt: calculate size of RAM (in pages) to be dumped. 1154 * cpu_dump_mempagecnt: calculate size of RAM (in pages) to be dumped.
1154 */ 1155 */
1155u_long 1156u_long
1156cpu_dump_mempagecnt(void) 1157cpu_dump_mempagecnt(void)
1157{ 1158{
1158 u_long i, n; 1159 u_long i, n;
1159 1160
1160 n = 0; 1161 n = 0;
1161 for (i = 0; i < mem_cluster_cnt; i++) 1162 for (i = 0; i < mem_cluster_cnt; i++)
1162 n += atop(mem_clusters[i].size); 1163 n += atop(mem_clusters[i].size);
1163 return (n); 1164 return (n);
1164} 1165}
1165 1166
1166/* 1167/*
1167 * cpu_dump: dump machine-dependent kernel core dump headers. 1168 * cpu_dump: dump machine-dependent kernel core dump headers.
1168 */ 1169 */
1169int 1170int
1170cpu_dump(void) 1171cpu_dump(void)
1171{ 1172{
1172 int (*dump)(dev_t, daddr_t, void *, size_t); 1173 int (*dump)(dev_t, daddr_t, void *, size_t);
1173 char buf[dbtob(1)]; 1174 char buf[dbtob(1)];
1174 kcore_seg_t *segp; 1175 kcore_seg_t *segp;
1175 cpu_kcore_hdr_t *cpuhdrp; 1176 cpu_kcore_hdr_t *cpuhdrp;
1176 phys_ram_seg_t *memsegp; 1177 phys_ram_seg_t *memsegp;
1177 const struct bdevsw *bdev; 1178 const struct bdevsw *bdev;
1178 int i; 1179 int i;
1179 1180
1180 bdev = bdevsw_lookup(dumpdev); 1181 bdev = bdevsw_lookup(dumpdev);
1181 if (bdev == NULL) 1182 if (bdev == NULL)
1182 return (ENXIO); 1183 return (ENXIO);
1183 dump = bdev->d_dump; 1184 dump = bdev->d_dump;
1184 1185
1185 memset(buf, 0, sizeof buf); 1186 memset(buf, 0, sizeof buf);
1186 segp = (kcore_seg_t *)buf; 1187 segp = (kcore_seg_t *)buf;
1187 cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp))]; 1188 cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp))];
1188 memsegp = (phys_ram_seg_t *)&buf[ ALIGN(sizeof(*segp)) + 1189 memsegp = (phys_ram_seg_t *)&buf[ ALIGN(sizeof(*segp)) +
1189 ALIGN(sizeof(*cpuhdrp))]; 1190 ALIGN(sizeof(*cpuhdrp))];
1190 1191
1191 /* 1192 /*
1192 * Generate a segment header. 1193 * Generate a segment header.
1193 */ 1194 */
1194 CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU); 1195 CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
1195 segp->c_size = dbtob(1) - ALIGN(sizeof(*segp)); 1196 segp->c_size = dbtob(1) - ALIGN(sizeof(*segp));
1196 1197
1197 /* 1198 /*
1198 * Add the machine-dependent header info. 1199 * Add the machine-dependent header info.
1199 */ 1200 */
1200 cpuhdrp->lev1map_pa = ALPHA_K0SEG_TO_PHYS((vaddr_t)kernel_lev1map); 1201 cpuhdrp->lev1map_pa = ALPHA_K0SEG_TO_PHYS((vaddr_t)kernel_lev1map);
1201 cpuhdrp->page_size = PAGE_SIZE; 1202 cpuhdrp->page_size = PAGE_SIZE;
1202 cpuhdrp->nmemsegs = mem_cluster_cnt; 1203 cpuhdrp->nmemsegs = mem_cluster_cnt;
1203 1204
1204 /* 1205 /*
1205 * Fill in the memory segment descriptors. 1206 * Fill in the memory segment descriptors.
1206 */ 1207 */
1207 for (i = 0; i < mem_cluster_cnt; i++) { 1208 for (i = 0; i < mem_cluster_cnt; i++) {
1208 memsegp[i].start = mem_clusters[i].start; 1209 memsegp[i].start = mem_clusters[i].start;
1209 memsegp[i].size = mem_clusters[i].size & ~PAGE_MASK; 1210 memsegp[i].size = mem_clusters[i].size & ~PAGE_MASK;
1210 } 1211 }
1211 1212
1212 return (dump(dumpdev, dumplo, (void *)buf, dbtob(1))); 1213 return (dump(dumpdev, dumplo, (void *)buf, dbtob(1)));
1213} 1214}
1214 1215
1215/* 1216/*
1216 * This is called by main to set dumplo and dumpsize. 1217 * This is called by main to set dumplo and dumpsize.
1217 * Dumps always skip the first PAGE_SIZE of disk space 1218 * Dumps always skip the first PAGE_SIZE of disk space
1218 * in case there might be a disk label stored there. 1219 * in case there might be a disk label stored there.
1219 * If there is extra space, put dump at the end to 1220 * If there is extra space, put dump at the end to
1220 * reduce the chance that swapping trashes it. 1221 * reduce the chance that swapping trashes it.
1221 */ 1222 */
1222void 1223void
1223cpu_dumpconf(void) 1224cpu_dumpconf(void)
1224{ 1225{
1225 int nblks, dumpblks; /* size of dump area */ 1226 int nblks, dumpblks; /* size of dump area */
1226 1227
1227 if (dumpdev == NODEV) 1228 if (dumpdev == NODEV)
1228 goto bad; 1229 goto bad;
1229 nblks = bdev_size(dumpdev); 1230 nblks = bdev_size(dumpdev);
1230 if (nblks <= ctod(1)) 1231 if (nblks <= ctod(1))
1231 goto bad; 1232 goto bad;
1232 1233
1233 dumpblks = cpu_dumpsize(); 1234 dumpblks = cpu_dumpsize();
1234 if (dumpblks < 0) 1235 if (dumpblks < 0)
1235 goto bad; 1236 goto bad;
1236 dumpblks += ctod(cpu_dump_mempagecnt()); 1237 dumpblks += ctod(cpu_dump_mempagecnt());
1237 1238
1238 /* If dump won't fit (incl. room for possible label), punt. */ 1239 /* If dump won't fit (incl. room for possible label), punt. */
1239 if (dumpblks > (nblks - ctod(1))) 1240 if (dumpblks > (nblks - ctod(1)))
1240 goto bad; 1241 goto bad;
1241 1242
1242 /* Put dump at end of partition */ 1243 /* Put dump at end of partition */
1243 dumplo = nblks - dumpblks; 1244 dumplo = nblks - dumpblks;
1244 1245
1245 /* dumpsize is in page units, and doesn't include headers. */ 1246 /* dumpsize is in page units, and doesn't include headers. */
1246 dumpsize = cpu_dump_mempagecnt(); 1247 dumpsize = cpu_dump_mempagecnt();
1247 return; 1248 return;
1248 1249
1249bad: 1250bad:
1250 dumpsize = 0; 1251 dumpsize = 0;
1251 return; 1252 return;
1252} 1253}
1253 1254
1254/* 1255/*
1255 * Dump the kernel's image to the swap partition. 1256 * Dump the kernel's image to the swap partition.
1256 */ 1257 */
1257#define BYTES_PER_DUMP PAGE_SIZE 1258#define BYTES_PER_DUMP PAGE_SIZE
1258 1259
1259void 1260void
1260dumpsys(void) 1261dumpsys(void)
1261{ 1262{
1262 const struct bdevsw *bdev; 1263 const struct bdevsw *bdev;
1263 u_long totalbytesleft, bytes, i, n, memcl; 1264 u_long totalbytesleft, bytes, i, n, memcl;
1264 u_long maddr; 1265 u_long maddr;
1265 int psize; 1266 int psize;
1266 daddr_t blkno; 1267 daddr_t blkno;
1267 int (*dump)(dev_t, daddr_t, void *, size_t); 1268 int (*dump)(dev_t, daddr_t, void *, size_t);
1268 int error; 1269 int error;
1269 1270
1270 /* Save registers. */ 1271 /* Save registers. */
1271 savectx(&dumppcb); 1272 savectx(&dumppcb);
1272 1273
1273 if (dumpdev == NODEV) 1274 if (dumpdev == NODEV)
1274 return; 1275 return;
1275 bdev = bdevsw_lookup(dumpdev); 1276 bdev = bdevsw_lookup(dumpdev);
1276 if (bdev == NULL || bdev->d_psize == NULL) 1277 if (bdev == NULL || bdev->d_psize == NULL)
1277 return; 1278 return;
1278 1279
1279 /* 1280 /*
1280 * For dumps during autoconfiguration, 1281 * For dumps during autoconfiguration,
1281 * if dump device has already configured... 1282 * if dump device has already configured...
1282 */ 1283 */
1283 if (dumpsize == 0) 1284 if (dumpsize == 0)
1284 cpu_dumpconf(); 1285 cpu_dumpconf();
1285 if (dumplo <= 0) { 1286 if (dumplo <= 0) {
1286 printf("\ndump to dev %u,%u not possible\n", 1287 printf("\ndump to dev %u,%u not possible\n",
1287 major(dumpdev), minor(dumpdev)); 1288 major(dumpdev), minor(dumpdev));
1288 return; 1289 return;
1289 } 1290 }
1290 printf("\ndumping to dev %u,%u offset %ld\n", 1291 printf("\ndumping to dev %u,%u offset %ld\n",
1291 major(dumpdev), minor(dumpdev), dumplo); 1292 major(dumpdev), minor(dumpdev), dumplo);
1292 1293
1293 psize = bdev_size(dumpdev); 1294 psize = bdev_size(dumpdev);
1294 printf("dump "); 1295 printf("dump ");
1295 if (psize == -1) { 1296 if (psize == -1) {
1296 printf("area unavailable\n"); 1297 printf("area unavailable\n");
1297 return; 1298 return;
1298 } 1299 }
1299 1300
1300 /* XXX should purge all outstanding keystrokes. */ 1301 /* XXX should purge all outstanding keystrokes. */
1301 1302
1302 if ((error = cpu_dump()) != 0) 1303 if ((error = cpu_dump()) != 0)
1303 goto err; 1304 goto err;
1304 1305
1305 totalbytesleft = ptoa(cpu_dump_mempagecnt()); 1306 totalbytesleft = ptoa(cpu_dump_mempagecnt());
1306 blkno = dumplo + cpu_dumpsize(); 1307 blkno = dumplo + cpu_dumpsize();
1307 dump = bdev->d_dump; 1308 dump = bdev->d_dump;
1308 error = 0; 1309 error = 0;
1309 1310
1310 for (memcl = 0; memcl < mem_cluster_cnt; memcl++) { 1311 for (memcl = 0; memcl < mem_cluster_cnt; memcl++) {
1311 maddr = mem_clusters[memcl].start; 1312 maddr = mem_clusters[memcl].start;
1312 bytes = mem_clusters[memcl].size & ~PAGE_MASK; 1313 bytes = mem_clusters[memcl].size & ~PAGE_MASK;
1313 1314
1314 for (i = 0; i < bytes; i += n, totalbytesleft -= n) { 1315 for (i = 0; i < bytes; i += n, totalbytesleft -= n) {
1315 1316
1316 /* Print out how many MBs we to go. */ 1317 /* Print out how many MBs we to go. */
1317 if ((totalbytesleft % (1024*1024)) == 0) 1318 if ((totalbytesleft % (1024*1024)) == 0)
1318 printf_nolog("%ld ", 1319 printf_nolog("%ld ",
1319 totalbytesleft / (1024 * 1024)); 1320 totalbytesleft / (1024 * 1024));
1320 1321
1321 /* Limit size for next transfer. */ 1322 /* Limit size for next transfer. */
1322 n = bytes - i; 1323 n = bytes - i;
1323 if (n > BYTES_PER_DUMP) 1324 if (n > BYTES_PER_DUMP)
1324 n = BYTES_PER_DUMP; 1325 n = BYTES_PER_DUMP;
1325 1326
1326 error = (*dump)(dumpdev, blkno, 1327 error = (*dump)(dumpdev, blkno,
1327 (void *)ALPHA_PHYS_TO_K0SEG(maddr), n); 1328 (void *)ALPHA_PHYS_TO_K0SEG(maddr), n);
1328 if (error) 1329 if (error)
1329 goto err; 1330 goto err;
1330 maddr += n; 1331 maddr += n;
1331 blkno += btodb(n); /* XXX? */ 1332 blkno += btodb(n); /* XXX? */
1332 1333
1333 /* XXX should look for keystrokes, to cancel. */ 1334 /* XXX should look for keystrokes, to cancel. */
1334 } 1335 }
1335 } 1336 }
1336 1337
1337err: 1338err:
1338 switch (error) { 1339 switch (error) {
1339 1340
1340 case ENXIO: 1341 case ENXIO:
1341 printf("device bad\n"); 1342 printf("device bad\n");
1342 break; 1343 break;
1343 1344
1344 case EFAULT: 1345 case EFAULT:
1345 printf("device not ready\n"); 1346 printf("device not ready\n");
1346 break; 1347 break;
1347 1348
1348 case EINVAL: 1349 case EINVAL:
1349 printf("area improper\n"); 1350 printf("area improper\n");
1350 break; 1351 break;
1351 1352
1352 case EIO: 1353 case EIO:
1353 printf("i/o error\n"); 1354 printf("i/o error\n");
1354 break; 1355 break;
1355 1356
1356 case EINTR: 1357 case EINTR:
1357 printf("aborted from console\n"); 1358 printf("aborted from console\n");
1358 break; 1359 break;
1359 1360
1360 case 0: 1361 case 0:
1361 printf("succeeded\n"); 1362 printf("succeeded\n");
1362 break; 1363 break;
1363 1364
1364 default: 1365 default:
1365 printf("error %d\n", error); 1366 printf("error %d\n", error);
1366 break; 1367 break;
1367 } 1368 }
1368 printf("\n\n"); 1369 printf("\n\n");
1369 delay(1000); 1370 delay(1000);
1370} 1371}
1371 1372
1372void 1373void
1373frametoreg(const struct trapframe *framep, struct reg *regp) 1374frametoreg(const struct trapframe *framep, struct reg *regp)
1374{ 1375{
1375 1376
1376 regp->r_regs[R_V0] = framep->tf_regs[FRAME_V0]; 1377 regp->r_regs[R_V0] = framep->tf_regs[FRAME_V0];
1377 regp->r_regs[R_T0] = framep->tf_regs[FRAME_T0]; 1378 regp->r_regs[R_T0] = framep->tf_regs[FRAME_T0];
1378 regp->r_regs[R_T1] = framep->tf_regs[FRAME_T1]; 1379 regp->r_regs[R_T1] = framep->tf_regs[FRAME_T1];
1379 regp->r_regs[R_T2] = framep->tf_regs[FRAME_T2]; 1380 regp->r_regs[R_T2] = framep->tf_regs[FRAME_T2];
1380 regp->r_regs[R_T3] = framep->tf_regs[FRAME_T3]; 1381 regp->r_regs[R_T3] = framep->tf_regs[FRAME_T3];
1381 regp->r_regs[R_T4] = framep->tf_regs[FRAME_T4]; 1382 regp->r_regs[R_T4] = framep->tf_regs[FRAME_T4];
1382 regp->r_regs[R_T5] = framep->tf_regs[FRAME_T5]; 1383 regp->r_regs[R_T5] = framep->tf_regs[FRAME_T5];
1383 regp->r_regs[R_T6] = framep->tf_regs[FRAME_T6]; 1384 regp->r_regs[R_T6] = framep->tf_regs[FRAME_T6];
1384 regp->r_regs[R_T7] = framep->tf_regs[FRAME_T7]; 1385 regp->r_regs[R_T7] = framep->tf_regs[FRAME_T7];
1385 regp->r_regs[R_S0] = framep->tf_regs[FRAME_S0]; 1386 regp->r_regs[R_S0] = framep->tf_regs[FRAME_S0];
1386 regp->r_regs[R_S1] = framep->tf_regs[FRAME_S1]; 1387 regp->r_regs[R_S1] = framep->tf_regs[FRAME_S1];
1387 regp->r_regs[R_S2] = framep->tf_regs[FRAME_S2]; 1388 regp->r_regs[R_S2] = framep->tf_regs[FRAME_S2];
1388 regp->r_regs[R_S3] = framep->tf_regs[FRAME_S3]; 1389 regp->r_regs[R_S3] = framep->tf_regs[FRAME_S3];
1389 regp->r_regs[R_S4] = framep->tf_regs[FRAME_S4]; 1390 regp->r_regs[R_S4] = framep->tf_regs[FRAME_S4];
1390 regp->r_regs[R_S5] = framep->tf_regs[FRAME_S5]; 1391 regp->r_regs[R_S5] = framep->tf_regs[FRAME_S5];
1391 regp->r_regs[R_S6] = framep->tf_regs[FRAME_S6]; 1392 regp->r_regs[R_S6] = framep->tf_regs[FRAME_S6];
1392 regp->r_regs[R_A0] = framep->tf_regs[FRAME_A0]; 1393 regp->r_regs[R_A0] = framep->tf_regs[FRAME_A0];
1393 regp->r_regs[R_A1] = framep->tf_regs[FRAME_A1]; 1394 regp->r_regs[R_A1] = framep->tf_regs[FRAME_A1];
1394 regp->r_regs[R_A2] = framep->tf_regs[FRAME_A2]; 1395 regp->r_regs[R_A2] = framep->tf_regs[FRAME_A2];
1395 regp->r_regs[R_A3] = framep->tf_regs[FRAME_A3]; 1396 regp->r_regs[R_A3] = framep->tf_regs[FRAME_A3];
1396 regp->r_regs[R_A4] = framep->tf_regs[FRAME_A4]; 1397 regp->r_regs[R_A4] = framep->tf_regs[FRAME_A4];
1397 regp->r_regs[R_A5] = framep->tf_regs[FRAME_A5]; 1398 regp->r_regs[R_A5] = framep->tf_regs[FRAME_A5];
1398 regp->r_regs[R_T8] = framep->tf_regs[FRAME_T8]; 1399 regp->r_regs[R_T8] = framep->tf_regs[FRAME_T8];
1399 regp->r_regs[R_T9] = framep->tf_regs[FRAME_T9]; 1400 regp->r_regs[R_T9] = framep->tf_regs[FRAME_T9];
1400 regp->r_regs[R_T10] = framep->tf_regs[FRAME_T10]; 1401 regp->r_regs[R_T10] = framep->tf_regs[FRAME_T10];
1401 regp->r_regs[R_T11] = framep->tf_regs[FRAME_T11]; 1402 regp->r_regs[R_T11] = framep->tf_regs[FRAME_T11];
1402 regp->r_regs[R_RA] = framep->tf_regs[FRAME_RA]; 1403 regp->r_regs[R_RA] = framep->tf_regs[FRAME_RA];
1403 regp->r_regs[R_T12] = framep->tf_regs[FRAME_T12]; 1404 regp->r_regs[R_T12] = framep->tf_regs[FRAME_T12];
1404 regp->r_regs[R_AT] = framep->tf_regs[FRAME_AT]; 1405 regp->r_regs[R_AT] = framep->tf_regs[FRAME_AT];
1405 regp->r_regs[R_GP] = framep->tf_regs[FRAME_GP]; 1406 regp->r_regs[R_GP] = framep->tf_regs[FRAME_GP];
1406 /* regp->r_regs[R_SP] = framep->tf_regs[FRAME_SP]; XXX */ 1407 /* regp->r_regs[R_SP] = framep->tf_regs[FRAME_SP]; XXX */
1407 regp->r_regs[R_ZERO] = 0; 1408 regp->r_regs[R_ZERO] = 0;
1408} 1409}
1409 1410
1410void 1411void
1411regtoframe(const struct reg *regp, struct trapframe *framep) 1412regtoframe(const struct reg *regp, struct trapframe *framep)
1412{ 1413{
1413 1414
1414 framep->tf_regs[FRAME_V0] = regp->r_regs[R_V0]; 1415 framep->tf_regs[FRAME_V0] = regp->r_regs[R_V0];
1415 framep->tf_regs[FRAME_T0] = regp->r_regs[R_T0]; 1416 framep->tf_regs[FRAME_T0] = regp->r_regs[R_T0];
1416 framep->tf_regs[FRAME_T1] = regp->r_regs[R_T1]; 1417 framep->tf_regs[FRAME_T1] = regp->r_regs[R_T1];
1417 framep->tf_regs[FRAME_T2] = regp->r_regs[R_T2]; 1418 framep->tf_regs[FRAME_T2] = regp->r_regs[R_T2];
1418 framep->tf_regs[FRAME_T3] = regp->r_regs[R_T3]; 1419 framep->tf_regs[FRAME_T3] = regp->r_regs[R_T3];
1419 framep->tf_regs[FRAME_T4] = regp->r_regs[R_T4]; 1420 framep->tf_regs[FRAME_T4] = regp->r_regs[R_T4];
1420 framep->tf_regs[FRAME_T5] = regp->r_regs[R_T5]; 1421 framep->tf_regs[FRAME_T5] = regp->r_regs[R_T5];
1421 framep->tf_regs[FRAME_T6] = regp->r_regs[R_T6]; 1422 framep->tf_regs[FRAME_T6] = regp->r_regs[R_T6];
1422 framep->tf_regs[FRAME_T7] = regp->r_regs[R_T7]; 1423 framep->tf_regs[FRAME_T7] = regp->r_regs[R_T7];
1423 framep->tf_regs[FRAME_S0] = regp->r_regs[R_S0]; 1424 framep->tf_regs[FRAME_S0] = regp->r_regs[R_S0];
1424 framep->tf_regs[FRAME_S1] = regp->r_regs[R_S1]; 1425 framep->tf_regs[FRAME_S1] = regp->r_regs[R_S1];
1425 framep->tf_regs[FRAME_S2] = regp->r_regs[R_S2]; 1426 framep->tf_regs[FRAME_S2] = regp->r_regs[R_S2];
1426 framep->tf_regs[FRAME_S3] = regp->r_regs[R_S3]; 1427 framep->tf_regs[FRAME_S3] = regp->r_regs[R_S3];
1427 framep->tf_regs[FRAME_S4] = regp->r_regs[R_S4]; 1428 framep->tf_regs[FRAME_S4] = regp->r_regs[R_S4];
1428 framep->tf_regs[FRAME_S5] = regp->r_regs[R_S5]; 1429 framep->tf_regs[FRAME_S5] = regp->r_regs[R_S5];
1429 framep->tf_regs[FRAME_S6] = regp->r_regs[R_S6]; 1430 framep->tf_regs[FRAME_S6] = regp->r_regs[R_S6];
1430 framep->tf_regs[FRAME_A0] = regp->r_regs[R_A0]; 1431 framep->tf_regs[FRAME_A0] = regp->r_regs[R_A0];
1431 framep->tf_regs[FRAME_A1] = regp->r_regs[R_A1]; 1432 framep->tf_regs[FRAME_A1] = regp->r_regs[R_A1];
1432 framep->tf_regs[FRAME_A2] = regp->r_regs[R_A2]; 1433 framep->tf_regs[FRAME_A2] = regp->r_regs[R_A2];
1433 framep->tf_regs[FRAME_A3] = regp->r_regs[R_A3]; 1434 framep->tf_regs[FRAME_A3] = regp->r_regs[R_A3];
1434 framep->tf_regs[FRAME_A4] = regp->r_regs[R_A4]; 1435 framep->tf_regs[FRAME_A4] = regp->r_regs[R_A4];
1435 framep->tf_regs[FRAME_A5] = regp->r_regs[R_A5]; 1436 framep->tf_regs[FRAME_A5] = regp->r_regs[R_A5];
1436 framep->tf_regs[FRAME_T8] = regp->r_regs[R_T8]; 1437 framep->tf_regs[FRAME_T8] = regp->r_regs[R_T8];
1437 framep->tf_regs[FRAME_T9] = regp->r_regs[R_T9]; 1438 framep->tf_regs[FRAME_T9] = regp->r_regs[R_T9];
1438 framep->tf_regs[FRAME_T10] = regp->r_regs[R_T10]; 1439 framep->tf_regs[FRAME_T10] = regp->r_regs[R_T10];
1439 framep->tf_regs[FRAME_T11] = regp->r_regs[R_T11]; 1440 framep->tf_regs[FRAME_T11] = regp->r_regs[R_T11];
1440 framep->tf_regs[FRAME_RA] = regp->r_regs[R_RA]; 1441 framep->tf_regs[FRAME_RA] = regp->r_regs[R_RA];
1441 framep->tf_regs[FRAME_T12] = regp->r_regs[R_T12]; 1442 framep->tf_regs[FRAME_T12] = regp->r_regs[R_T12];
1442 framep->tf_regs[FRAME_AT] = regp->r_regs[R_AT]; 1443 framep->tf_regs[FRAME_AT] = regp->r_regs[R_AT];
1443 framep->tf_regs[FRAME_GP] = regp->r_regs[R_GP]; 1444 framep->tf_regs[FRAME_GP] = regp->r_regs[R_GP];
1444 /* framep->tf_regs[FRAME_SP] = regp->r_regs[R_SP]; XXX */ 1445 /* framep->tf_regs[FRAME_SP] = regp->r_regs[R_SP]; XXX */
1445 /* ??? = regp->r_regs[R_ZERO]; */ 1446 /* ??? = regp->r_regs[R_ZERO]; */
1446} 1447}
1447 1448
1448void 1449void
1449printregs(struct reg *regp) 1450printregs(struct reg *regp)
1450{ 1451{
1451 int i; 1452 int i;
1452 1453
1453 for (i = 0; i < 32; i++) 1454 for (i = 0; i < 32; i++)
1454 printf("R%d:\t0x%016lx%s", i, regp->r_regs[i], 1455 printf("R%d:\t0x%016lx%s", i, regp->r_regs[i],
1455 i & 1 ? "\n" : "\t"); 1456 i & 1 ? "\n" : "\t");
1456} 1457}
1457 1458
1458void 1459void
1459regdump(struct trapframe *framep) 1460regdump(struct trapframe *framep)
1460{ 1461{
1461 struct reg reg; 1462 struct reg reg;
1462 1463
1463 frametoreg(framep, &reg); 1464 frametoreg(framep, &reg);
1464 reg.r_regs[R_SP] = alpha_pal_rdusp(); 1465 reg.r_regs[R_SP] = alpha_pal_rdusp();
1465 1466
1466 printf("REGISTERS:\n"); 1467 printf("REGISTERS:\n");
1467 printregs(&reg); 1468 printregs(&reg);
1468} 1469}
1469 1470
1470 1471
1471 1472
1472void * 1473void *
1473getframe(const struct lwp *l, int sig, int *onstack) 1474getframe(const struct lwp *l, int sig, int *onstack)
1474{ 1475{
1475 void *frame; 1476 void *frame;
1476 1477
1477 /* Do we need to jump onto the signal stack? */ 1478 /* Do we need to jump onto the signal stack? */
1478 *onstack = 1479 *onstack =
1479 (l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 && 1480 (l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 &&
1480 (SIGACTION(l->l_proc, sig).sa_flags & SA_ONSTACK) != 0; 1481 (SIGACTION(l->l_proc, sig).sa_flags & SA_ONSTACK) != 0;
1481 1482
1482 if (*onstack) 1483 if (*onstack)
1483 frame = (void *)((char *)l->l_sigstk.ss_sp + 1484 frame = (void *)((char *)l->l_sigstk.ss_sp +
1484 l->l_sigstk.ss_size); 1485 l->l_sigstk.ss_size);
1485 else 1486 else
1486 frame = (void *)(alpha_pal_rdusp()); 1487 frame = (void *)(alpha_pal_rdusp());
1487 return (frame); 1488 return (frame);
1488} 1489}
1489 1490
1490void 1491void
1491buildcontext(struct lwp *l, const void *catcher, const void *tramp, const void *fp) 1492buildcontext(struct lwp *l, const void *catcher, const void *tramp, const void *fp)
1492{ 1493{
1493 struct trapframe *tf = l->l_md.md_tf; 1494 struct trapframe *tf = l->l_md.md_tf;
1494 1495
1495 tf->tf_regs[FRAME_RA] = (uint64_t)tramp; 1496 tf->tf_regs[FRAME_RA] = (uint64_t)tramp;
1496 tf->tf_regs[FRAME_PC] = (uint64_t)catcher; 1497 tf->tf_regs[FRAME_PC] = (uint64_t)catcher;
1497 tf->tf_regs[FRAME_T12] = (uint64_t)catcher; 1498 tf->tf_regs[FRAME_T12] = (uint64_t)catcher;
1498 alpha_pal_wrusp((unsigned long)fp); 1499 alpha_pal_wrusp((unsigned long)fp);
1499} 1500}
1500 1501
1501 1502
1502/* 1503/*
1503 * Send an interrupt to process, new style 1504 * Send an interrupt to process, new style
1504 */ 1505 */
1505void 1506void
1506sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask) 1507sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask)
1507{ 1508{
1508 struct lwp *l = curlwp; 1509 struct lwp *l = curlwp;
1509 struct proc *p = l->l_proc; 1510 struct proc *p = l->l_proc;
1510 struct sigacts *ps = p->p_sigacts; 1511 struct sigacts *ps = p->p_sigacts;
1511 int onstack, sig = ksi->ksi_signo, error; 1512 int onstack, sig = ksi->ksi_signo, error;
1512 struct sigframe_siginfo *fp, frame; 1513 struct sigframe_siginfo *fp, frame;
1513 struct trapframe *tf; 1514 struct trapframe *tf;
1514 sig_t catcher = SIGACTION(p, ksi->ksi_signo).sa_handler; 1515 sig_t catcher = SIGACTION(p, ksi->ksi_signo).sa_handler;
1515 1516
1516 fp = (struct sigframe_siginfo *)getframe(l,ksi->ksi_signo,&onstack); 1517 fp = (struct sigframe_siginfo *)getframe(l,ksi->ksi_signo,&onstack);
1517 tf = l->l_md.md_tf; 1518 tf = l->l_md.md_tf;
1518 1519
1519 /* Allocate space for the signal handler context. */ 1520 /* Allocate space for the signal handler context. */
1520 fp--; 1521 fp--;
1521 1522
1522#ifdef DEBUG 1523#ifdef DEBUG
1523 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) 1524 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
1524 printf("sendsig_siginfo(%d): sig %d ssp %p usp %p\n", p->p_pid, 1525 printf("sendsig_siginfo(%d): sig %d ssp %p usp %p\n", p->p_pid,
1525 sig, &onstack, fp); 1526 sig, &onstack, fp);
1526#endif 1527#endif
1527 1528
1528 /* Build stack frame for signal trampoline. */ 1529 /* Build stack frame for signal trampoline. */
1529 memset(&frame, 0, sizeof(frame)); 1530 memset(&frame, 0, sizeof(frame));
1530 frame.sf_si._info = ksi->ksi_info; 1531 frame.sf_si._info = ksi->ksi_info;
1531 frame.sf_uc.uc_flags = _UC_SIGMASK; 1532 frame.sf_uc.uc_flags = _UC_SIGMASK;
1532 frame.sf_uc.uc_sigmask = *mask; 1533 frame.sf_uc.uc_sigmask = *mask;
1533 frame.sf_uc.uc_link = l->l_ctxlink; 1534 frame.sf_uc.uc_link = l->l_ctxlink;
1534 frame.sf_uc.uc_flags |= (l->l_sigstk.ss_flags & SS_ONSTACK) 1535 frame.sf_uc.uc_flags |= (l->l_sigstk.ss_flags & SS_ONSTACK)
1535 ? _UC_SETSTACK : _UC_CLRSTACK; 1536 ? _UC_SETSTACK : _UC_CLRSTACK;
1536 sendsig_reset(l, sig); 1537 sendsig_reset(l, sig);
1537 mutex_exit(p->p_lock); 1538 mutex_exit(p->p_lock);
1538 cpu_getmcontext(l, &frame.sf_uc.uc_mcontext, &frame.sf_uc.uc_flags); 1539 cpu_getmcontext(l, &frame.sf_uc.uc_mcontext, &frame.sf_uc.uc_flags);
1539 error = copyout(&frame, fp, sizeof(frame)); 1540 error = copyout(&frame, fp, sizeof(frame));
1540 mutex_enter(p->p_lock); 1541 mutex_enter(p->p_lock);
1541 1542
1542 if (error != 0) { 1543 if (error != 0) {
1543 /* 1544 /*
1544 * Process has trashed its stack; give it an illegal 1545 * Process has trashed its stack; give it an illegal
1545 * instruction to halt it in its tracks. 1546 * instruction to halt it in its tracks.
1546 */ 1547 */
1547#ifdef DEBUG 1548#ifdef DEBUG
1548 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) 1549 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
1549 printf("sendsig_siginfo(%d): copyout failed on sig %d\n", 1550 printf("sendsig_siginfo(%d): copyout failed on sig %d\n",
1550 p->p_pid, sig); 1551 p->p_pid, sig);
1551#endif 1552#endif
1552 sigexit(l, SIGILL); 1553 sigexit(l, SIGILL);
1553 /* NOTREACHED */ 1554 /* NOTREACHED */
1554 } 1555 }
1555 1556
1556#ifdef DEBUG 1557#ifdef DEBUG
1557 if (sigdebug & SDB_FOLLOW) 1558 if (sigdebug & SDB_FOLLOW)
1558 printf("sendsig_siginfo(%d): sig %d usp %p code %x\n", 1559 printf("sendsig_siginfo(%d): sig %d usp %p code %x\n",
1559 p->p_pid, sig, fp, ksi->ksi_code); 1560 p->p_pid, sig, fp, ksi->ksi_code);
1560#endif 1561#endif
1561 1562
1562 /* 1563 /*
1563 * Set up the registers to directly invoke the signal handler. The 1564 * Set up the registers to directly invoke the signal handler. The
1564 * signal trampoline is then used to return from the signal. Note 1565 * signal trampoline is then used to return from the signal. Note
1565 * the trampoline version numbers are coordinated with machine- 1566 * the trampoline version numbers are coordinated with machine-
1566 * dependent code in libc. 1567 * dependent code in libc.
1567 */ 1568 */
1568 1569
1569 tf->tf_regs[FRAME_A0] = sig; 1570 tf->tf_regs[FRAME_A0] = sig;
1570 tf->tf_regs[FRAME_A1] = (uint64_t)&fp->sf_si; 1571 tf->tf_regs[FRAME_A1] = (uint64_t)&fp->sf_si;
1571 tf->tf_regs[FRAME_A2] = (uint64_t)&fp->sf_uc; 1572 tf->tf_regs[FRAME_A2] = (uint64_t)&fp->sf_uc;
1572 1573
1573 buildcontext(l,catcher,ps->sa_sigdesc[sig].sd_tramp,fp); 1574 buildcontext(l,catcher,ps->sa_sigdesc[sig].sd_tramp,fp);
1574 1575
1575 /* Remember that we're now on the signal stack. */ 1576 /* Remember that we're now on the signal stack. */
1576 if (onstack) 1577 if (onstack)
1577 l->l_sigstk.ss_flags |= SS_ONSTACK; 1578 l->l_sigstk.ss_flags |= SS_ONSTACK;
1578 1579
1579#ifdef DEBUG 1580#ifdef DEBUG
1580 if (sigdebug & SDB_FOLLOW) 1581 if (sigdebug & SDB_FOLLOW)
1581 printf("sendsig_siginfo(%d): pc %lx, catcher %lx\n", p->p_pid, 1582 printf("sendsig_siginfo(%d): pc %lx, catcher %lx\n", p->p_pid,
1582 tf->tf_regs[FRAME_PC], tf->tf_regs[FRAME_A3]); 1583 tf->tf_regs[FRAME_PC], tf->tf_regs[FRAME_A3]);
1583 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) 1584 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
1584 printf("sendsig_siginfo(%d): sig %d returns\n", 1585 printf("sendsig_siginfo(%d): sig %d returns\n",
1585 p->p_pid, sig); 1586 p->p_pid, sig);
1586#endif 1587#endif
1587} 1588}
1588 1589
1589/* 1590/*
1590 * machine dependent system variables. 1591 * machine dependent system variables.
1591 */ 1592 */
1592SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup") 1593SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
1593{ 1594{
1594 1595
1595 sysctl_createv(clog, 0, NULL, NULL, 1596 sysctl_createv(clog, 0, NULL, NULL,
1596 CTLFLAG_PERMANENT, 1597 CTLFLAG_PERMANENT,
1597 CTLTYPE_NODE, "machdep", NULL, 1598 CTLTYPE_NODE, "machdep", NULL,
1598 NULL, 0, NULL, 0, 1599 NULL, 0, NULL, 0,
1599 CTL_MACHDEP, CTL_EOL); 1600 CTL_MACHDEP, CTL_EOL);
1600 1601
1601 sysctl_createv(clog, 0, NULL, NULL, 1602 sysctl_createv(clog, 0, NULL, NULL,
1602 CTLFLAG_PERMANENT, 1603 CTLFLAG_PERMANENT,
1603 CTLTYPE_STRUCT, "console_device", NULL, 1604 CTLTYPE_STRUCT, "console_device", NULL,
1604 sysctl_consdev, 0, NULL, sizeof(dev_t), 1605 sysctl_consdev, 0, NULL, sizeof(dev_t),
1605 CTL_MACHDEP, CPU_CONSDEV, CTL_EOL); 1606 CTL_MACHDEP, CPU_CONSDEV, CTL_EOL);
1606 sysctl_createv(clog, 0, NULL, NULL, 1607 sysctl_createv(clog, 0, NULL, NULL,
1607 CTLFLAG_PERMANENT, 1608 CTLFLAG_PERMANENT,
1608 CTLTYPE_STRING, "root_device", NULL, 1609 CTLTYPE_STRING, "root_device", NULL,
1609 sysctl_root_device, 0, NULL, 0, 1610 sysctl_root_device, 0, NULL, 0,
1610 CTL_MACHDEP, CPU_ROOT_DEVICE, CTL_EOL); 1611 CTL_MACHDEP, CPU_ROOT_DEVICE, CTL_EOL);
1611 sysctl_createv(clog, 0, NULL, NULL, 1612 sysctl_createv(clog, 0, NULL, NULL,
1612 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1613 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1613 CTLTYPE_INT, "unaligned_print", 1614 CTLTYPE_INT, "unaligned_print",
1614 SYSCTL_DESCR("Warn about unaligned accesses"), 1615 SYSCTL_DESCR("Warn about unaligned accesses"),
1615 NULL, 0, &alpha_unaligned_print, 0, 1616 NULL, 0, &alpha_unaligned_print, 0,
1616 CTL_MACHDEP, CPU_UNALIGNED_PRINT, CTL_EOL); 1617 CTL_MACHDEP, CPU_UNALIGNED_PRINT, CTL_EOL);
1617 sysctl_createv(clog, 0, NULL, NULL, 1618 sysctl_createv(clog, 0, NULL, NULL,
1618 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1619 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1619 CTLTYPE_INT, "unaligned_fix", 1620 CTLTYPE_INT, "unaligned_fix",
1620 SYSCTL_DESCR("Fix up unaligned accesses"), 1621 SYSCTL_DESCR("Fix up unaligned accesses"),
1621 NULL, 0, &alpha_unaligned_fix, 0, 1622 NULL, 0, &alpha_unaligned_fix, 0,
1622 CTL_MACHDEP, CPU_UNALIGNED_FIX, CTL_EOL); 1623 CTL_MACHDEP, CPU_UNALIGNED_FIX, CTL_EOL);
1623 sysctl_createv(clog, 0, NULL, NULL, 1624 sysctl_createv(clog, 0, NULL, NULL,
1624 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1625 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1625 CTLTYPE_INT, "unaligned_sigbus", 1626 CTLTYPE_INT, "unaligned_sigbus",
1626 SYSCTL_DESCR("Do SIGBUS for fixed unaligned accesses"), 1627 SYSCTL_DESCR("Do SIGBUS for fixed unaligned accesses"),
1627 NULL, 0, &alpha_unaligned_sigbus, 0, 1628 NULL, 0, &alpha_unaligned_sigbus, 0,
1628 CTL_MACHDEP, CPU_UNALIGNED_SIGBUS, CTL_EOL); 1629 CTL_MACHDEP, CPU_UNALIGNED_SIGBUS, CTL_EOL);
1629 sysctl_createv(clog, 0, NULL, NULL, 1630 sysctl_createv(clog, 0, NULL, NULL,
1630 CTLFLAG_PERMANENT, 1631 CTLFLAG_PERMANENT,
1631 CTLTYPE_STRING, "booted_kernel", NULL, 1632 CTLTYPE_STRING, "booted_kernel", NULL,
1632 NULL, 0, bootinfo.booted_kernel, 0, 1633 NULL, 0, bootinfo.booted_kernel, 0,
1633 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL); 1634 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL);
1634 sysctl_createv(clog, 0, NULL, NULL, 1635 sysctl_createv(clog, 0, NULL, NULL,
1635 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1636 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1636 CTLTYPE_INT, "fp_sync_complete", NULL, 1637 CTLTYPE_INT, "fp_sync_complete", NULL,
1637 NULL, 0, &alpha_fp_sync_complete, 0, 1638 NULL, 0, &alpha_fp_sync_complete, 0,
1638 CTL_MACHDEP, CPU_FP_SYNC_COMPLETE, CTL_EOL); 1639 CTL_MACHDEP, CPU_FP_SYNC_COMPLETE, CTL_EOL);
1639 sysctl_createv(clog, 0, NULL, NULL, 1640 sysctl_createv(clog, 0, NULL, NULL,
1640 CTLFLAG_PERMANENT, 1641 CTLFLAG_PERMANENT,
1641 CTLTYPE_INT, "cctr", NULL, 1642 CTLTYPE_INT, "cctr", NULL,
1642 NULL, 0, &alpha_use_cctr, 0, 1643 NULL, 0, &alpha_use_cctr, 0,
1643 CTL_MACHDEP, CPU_CCTR, CTL_EOL); 1644 CTL_MACHDEP, CPU_CCTR, CTL_EOL);
1644 sysctl_createv(clog, 0, NULL, NULL, 1645 sysctl_createv(clog, 0, NULL, NULL,
1645 CTLFLAG_PERMANENT, 1646 CTLFLAG_PERMANENT,
1646 CTLTYPE_BOOL, "is_qemu", NULL, 1647 CTLTYPE_BOOL, "is_qemu", NULL,
1647 NULL, 0, &alpha_is_qemu, 0, 1648 NULL, 0, &alpha_is_qemu, 0,
1648 CTL_MACHDEP, CPU_IS_QEMU, CTL_EOL); 1649 CTL_MACHDEP, CPU_IS_QEMU, CTL_EOL);
 1650 sysctl_createv(clog, 0, NULL, NULL,
 1651 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
 1652 CTLTYPE_INT, "fp_complete_debug", NULL,
 1653 NULL, 0, &alpha_fp_complete_debug, 0,
 1654 CTL_MACHDEP, CPU_FP_COMPLETE_DEBUG, CTL_EOL);
1649} 1655}
1650 1656
1651/* 1657/*
1652 * Set registers on exec. 1658 * Set registers on exec.
1653 */ 1659 */
1654void 1660void
1655setregs(register struct lwp *l, struct exec_package *pack, vaddr_t stack) 1661setregs(register struct lwp *l, struct exec_package *pack, vaddr_t stack)
1656{ 1662{
1657 struct trapframe *tfp = l->l_md.md_tf; 1663 struct trapframe *tfp = l->l_md.md_tf;
1658 struct pcb *pcb; 1664 struct pcb *pcb;
1659#ifdef DEBUG 1665#ifdef DEBUG
1660 int i; 1666 int i;
1661#endif 1667#endif
1662 1668
1663#ifdef DEBUG 1669#ifdef DEBUG
1664 /* 1670 /*
1665 * Crash and dump, if the user requested it. 1671 * Crash and dump, if the user requested it.
1666 */ 1672 */
1667 if (boothowto & RB_DUMP) 1673 if (boothowto & RB_DUMP)
1668 panic("crash requested by boot flags"); 1674 panic("crash requested by boot flags");
1669#endif 1675#endif
1670 1676
1671#ifdef DEBUG 1677#ifdef DEBUG
1672 for (i = 0; i < FRAME_SIZE; i++) 1678 for (i = 0; i < FRAME_SIZE; i++)
1673 tfp->tf_regs[i] = 0xbabefacedeadbeef; 1679 tfp->tf_regs[i] = 0xbabefacedeadbeef;
1674#else 1680#else
1675 memset(tfp->tf_regs, 0, FRAME_SIZE * sizeof tfp->tf_regs[0]); 1681 memset(tfp->tf_regs, 0, FRAME_SIZE * sizeof tfp->tf_regs[0]);
1676#endif 1682#endif
1677 pcb = lwp_getpcb(l); 1683 pcb = lwp_getpcb(l);
1678 memset(&pcb->pcb_fp, 0, sizeof(pcb->pcb_fp)); 1684 memset(&pcb->pcb_fp, 0, sizeof(pcb->pcb_fp));
1679 alpha_pal_wrusp(stack); 1685 alpha_pal_wrusp(stack);
1680 tfp->tf_regs[FRAME_PS] = ALPHA_PSL_USERSET; 1686 tfp->tf_regs[FRAME_PS] = ALPHA_PSL_USERSET;
1681 tfp->tf_regs[FRAME_PC] = pack->ep_entry & ~3; 1687 tfp->tf_regs[FRAME_PC] = pack->ep_entry & ~3;
1682 1688
1683 tfp->tf_regs[FRAME_A0] = stack; /* a0 = sp */ 1689 tfp->tf_regs[FRAME_A0] = stack; /* a0 = sp */
1684 tfp->tf_regs[FRAME_A1] = 0; /* a1 = rtld cleanup */ 1690 tfp->tf_regs[FRAME_A1] = 0; /* a1 = rtld cleanup */
1685 tfp->tf_regs[FRAME_A2] = 0; /* a2 = rtld object */ 1691 tfp->tf_regs[FRAME_A2] = 0; /* a2 = rtld object */
1686 tfp->tf_regs[FRAME_A3] = l->l_proc->p_psstrp; /* a3 = ps_strings */ 1692 tfp->tf_regs[FRAME_A3] = l->l_proc->p_psstrp; /* a3 = ps_strings */
1687 tfp->tf_regs[FRAME_T12] = tfp->tf_regs[FRAME_PC]; /* a.k.a. PV */ 1693 tfp->tf_regs[FRAME_T12] = tfp->tf_regs[FRAME_PC]; /* a.k.a. PV */
1688 1694
1689 if (__predict_true((l->l_md.md_flags & IEEE_INHERIT) == 0)) { 1695 if (__predict_true((l->l_md.md_flags & IEEE_INHERIT) == 0)) {
1690 l->l_md.md_flags &= ~MDLWP_FP_C; 1696 l->l_md.md_flags =
1691 pcb->pcb_fp.fpr_cr = FPCR_DYN(FP_RN); 1697 (l->l_md.md_flags & ~(MDLWP_FP_C | MDLWP_FPACTIVE)) |
 1698 FP_C_DEFAULT;
 1699 pcb->pcb_fp.fpr_cr = FPCR_DEFAULT;
1692 } 1700 }
1693} 1701}
1694 1702
1695void (*alpha_delay_fn)(unsigned long); 1703void (*alpha_delay_fn)(unsigned long);
1696 1704
1697/* 1705/*
1698 * Wait "n" microseconds. 1706 * Wait "n" microseconds.
1699 */ 1707 */
1700void 1708void
1701delay(unsigned long n) 1709delay(unsigned long n)
1702{ 1710{
1703 unsigned long pcc0, pcc1, curcycle, cycles, usec; 1711 unsigned long pcc0, pcc1, curcycle, cycles, usec;
1704 1712
1705 if (n == 0) 1713 if (n == 0)
1706 return; 1714 return;
1707 1715
1708 /* 1716 /*
1709 * If we have an alternative delay function, go ahead and 1717 * If we have an alternative delay function, go ahead and
1710 * use it. 1718 * use it.
1711 */ 1719 */
1712 if (alpha_delay_fn != NULL) { 1720 if (alpha_delay_fn != NULL) {
1713 (*alpha_delay_fn)(n); 1721 (*alpha_delay_fn)(n);
1714 return; 1722 return;
1715 } 1723 }
1716 1724
1717 lwp_t * const l = curlwp; 1725 lwp_t * const l = curlwp;
1718 KPREEMPT_DISABLE(l); 1726 KPREEMPT_DISABLE(l);
1719 1727
1720 pcc0 = alpha_rpcc() & 0xffffffffUL; 1728 pcc0 = alpha_rpcc() & 0xffffffffUL;
1721 cycles = 0; 1729 cycles = 0;
1722 usec = 0; 1730 usec = 0;
1723 1731
1724 while (usec <= n) { 1732 while (usec <= n) {
1725 /* 1733 /*
1726 * Get the next CPU cycle count- assumes that we cannot 1734 * Get the next CPU cycle count- assumes that we cannot
1727 * have had more than one 32 bit overflow. 1735 * have had more than one 32 bit overflow.
1728 */ 1736 */
1729 pcc1 = alpha_rpcc() & 0xffffffffUL; 1737 pcc1 = alpha_rpcc() & 0xffffffffUL;
1730 if (pcc1 < pcc0) 1738 if (pcc1 < pcc0)
1731 curcycle = (pcc1 + 0x100000000UL) - pcc0; 1739 curcycle = (pcc1 + 0x100000000UL) - pcc0;
1732 else 1740 else
1733 curcycle = pcc1 - pcc0; 1741 curcycle = pcc1 - pcc0;
1734 1742
1735 /* 1743 /*
1736 * We now have the number of processor cycles since we 1744 * We now have the number of processor cycles since we
1737 * last checked. Add the current cycle count to the 1745 * last checked. Add the current cycle count to the
1738 * running total. If it's over cycles_per_usec, increment 1746 * running total. If it's over cycles_per_usec, increment
1739 * the usec counter. 1747 * the usec counter.
1740 */ 1748 */
1741 cycles += curcycle; 1749 cycles += curcycle;
1742 while (cycles > cycles_per_usec) { 1750 while (cycles > cycles_per_usec) {
1743 usec++; 1751 usec++;
1744 cycles -= cycles_per_usec; 1752 cycles -= cycles_per_usec;
1745 } 1753 }
1746 pcc0 = pcc1; 1754 pcc0 = pcc1;
1747 } 1755 }
1748 1756
1749 KPREEMPT_ENABLE(l); 1757 KPREEMPT_ENABLE(l);
1750} 1758}
1751 1759
1752#ifdef EXEC_ECOFF 1760#ifdef EXEC_ECOFF
1753void 1761void
1754cpu_exec_ecoff_setregs(struct lwp *l, struct exec_package *epp, vaddr_t stack) 1762cpu_exec_ecoff_setregs(struct lwp *l, struct exec_package *epp, vaddr_t stack)
1755{ 1763{
1756 struct ecoff_exechdr *execp = (struct ecoff_exechdr *)epp->ep_hdr; 1764 struct ecoff_exechdr *execp = (struct ecoff_exechdr *)epp->ep_hdr;
1757 1765
1758 l->l_md.md_tf->tf_regs[FRAME_GP] = execp->a.gp_value; 1766 l->l_md.md_tf->tf_regs[FRAME_GP] = execp->a.gp_value;
1759} 1767}
1760 1768
1761/* 1769/*
1762 * cpu_exec_ecoff_hook(): 1770 * cpu_exec_ecoff_hook():
1763 * cpu-dependent ECOFF format hook for execve(). 1771 * cpu-dependent ECOFF format hook for execve().
1764 * 1772 *
1765 * Do any machine-dependent diddling of the exec package when doing ECOFF. 1773 * Do any machine-dependent diddling of the exec package when doing ECOFF.
1766 * 1774 *
1767 */ 1775 */
1768int 1776int
1769cpu_exec_ecoff_probe(struct lwp *l, struct exec_package *epp) 1777cpu_exec_ecoff_probe(struct lwp *l, struct exec_package *epp)
1770{ 1778{
1771 struct ecoff_exechdr *execp = (struct ecoff_exechdr *)epp->ep_hdr; 1779 struct ecoff_exechdr *execp = (struct ecoff_exechdr *)epp->ep_hdr;
1772 int error; 1780 int error;
1773 1781
1774 if (execp->f.f_magic == ECOFF_MAGIC_NETBSD_ALPHA) 1782 if (execp->f.f_magic == ECOFF_MAGIC_NETBSD_ALPHA)
1775 error = 0; 1783 error = 0;
1776 else 1784 else
1777 error = ENOEXEC; 1785 error = ENOEXEC;
1778 1786
1779 return (error); 1787 return (error);
1780} 1788}
1781#endif /* EXEC_ECOFF */ 1789#endif /* EXEC_ECOFF */
1782 1790
1783int 1791int
1784mm_md_physacc(paddr_t pa, vm_prot_t prot) 1792mm_md_physacc(paddr_t pa, vm_prot_t prot)
1785{ 1793{
1786 u_quad_t size; 1794 u_quad_t size;
1787 int i; 1795 int i;
1788 1796
1789 for (i = 0; i < mem_cluster_cnt; i++) { 1797 for (i = 0; i < mem_cluster_cnt; i++) {
1790 if (pa < mem_clusters[i].start) 1798 if (pa < mem_clusters[i].start)
1791 continue; 1799 continue;
1792 size = mem_clusters[i].size & ~PAGE_MASK; 1800 size = mem_clusters[i].size & ~PAGE_MASK;
1793 if (pa >= (mem_clusters[i].start + size)) 1801 if (pa >= (mem_clusters[i].start + size))
1794 continue; 1802 continue;
1795 if ((prot & mem_clusters[i].size & PAGE_MASK) == prot) 1803 if ((prot & mem_clusters[i].size & PAGE_MASK) == prot)
1796 return 0; 1804 return 0;
1797 } 1805 }
1798 return EFAULT; 1806 return EFAULT;
1799} 1807}
1800 1808
1801bool 1809bool
1802mm_md_direct_mapped_io(void *addr, paddr_t *paddr) 1810mm_md_direct_mapped_io(void *addr, paddr_t *paddr)
1803{ 1811{
1804 vaddr_t va = (vaddr_t)addr; 1812 vaddr_t va = (vaddr_t)addr;
1805 1813
1806 if (va >= ALPHA_K0SEG_BASE && va <= ALPHA_K0SEG_END) { 1814 if (va >= ALPHA_K0SEG_BASE && va <= ALPHA_K0SEG_END) {
1807 *paddr = ALPHA_K0SEG_TO_PHYS(va); 1815 *paddr = ALPHA_K0SEG_TO_PHYS(va);
1808 return true; 1816 return true;
1809 } 1817 }
1810 return false; 1818 return false;
1811} 1819}
1812 1820
1813bool 1821bool
1814mm_md_direct_mapped_phys(paddr_t paddr, vaddr_t *vaddr) 1822mm_md_direct_mapped_phys(paddr_t paddr, vaddr_t *vaddr)
1815{ 1823{
1816 1824
1817 *vaddr = ALPHA_PHYS_TO_K0SEG(paddr); 1825 *vaddr = ALPHA_PHYS_TO_K0SEG(paddr);
1818 return true; 1826 return true;
1819} 1827}
1820 1828
1821void 1829void
1822cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags) 1830cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
1823{ 1831{
1824 struct trapframe *frame = l->l_md.md_tf; 1832 struct trapframe *frame = l->l_md.md_tf;
1825 struct pcb *pcb = lwp_getpcb(l); 1833 struct pcb *pcb = lwp_getpcb(l);
1826 __greg_t *gr = mcp->__gregs; 1834 __greg_t *gr = mcp->__gregs;
1827 __greg_t ras_pc; 1835 __greg_t ras_pc;
1828 1836
1829 /* Save register context. */ 1837 /* Save register context. */
1830 frametoreg(frame, (struct reg *)gr); 1838 frametoreg(frame, (struct reg *)gr);
1831 /* XXX if there's a better, general way to get the USP of 1839 /* XXX if there's a better, general way to get the USP of
1832 * an LWP that might or might not be curlwp, I'd like to know 1840 * an LWP that might or might not be curlwp, I'd like to know
1833 * about it. 1841 * about it.
1834 */ 1842 */
1835 if (l == curlwp) { 1843 if (l == curlwp) {
1836 gr[_REG_SP] = alpha_pal_rdusp(); 1844 gr[_REG_SP] = alpha_pal_rdusp();
1837 gr[_REG_UNIQUE] = alpha_pal_rdunique(); 1845 gr[_REG_UNIQUE] = alpha_pal_rdunique();
1838 } else { 1846 } else {
1839 gr[_REG_SP] = pcb->pcb_hw.apcb_usp; 1847 gr[_REG_SP] = pcb->pcb_hw.apcb_usp;
1840 gr[_REG_UNIQUE] = pcb->pcb_hw.apcb_unique; 1848 gr[_REG_UNIQUE] = pcb->pcb_hw.apcb_unique;
1841 } 1849 }
1842 gr[_REG_PC] = frame->tf_regs[FRAME_PC]; 1850 gr[_REG_PC] = frame->tf_regs[FRAME_PC];
1843 gr[_REG_PS] = frame->tf_regs[FRAME_PS]; 1851 gr[_REG_PS] = frame->tf_regs[FRAME_PS];
1844 1852
1845 if ((ras_pc = (__greg_t)ras_lookup(l->l_proc, 1853 if ((ras_pc = (__greg_t)ras_lookup(l->l_proc,
1846 (void *) gr[_REG_PC])) != -1) 1854 (void *) gr[_REG_PC])) != -1)
1847 gr[_REG_PC] = ras_pc; 1855 gr[_REG_PC] = ras_pc;
1848 1856
1849 *flags |= _UC_CPU | _UC_TLSBASE; 1857 *flags |= _UC_CPU | _UC_TLSBASE;
1850 1858
1851 /* Save floating point register context, if any, and copy it. */ 1859 /* Save floating point register context, if any, and copy it. */
1852 if (fpu_valid_p(l)) { 1860 if (fpu_valid_p(l)) {
1853 fpu_save(l); 1861 fpu_save(l);
1854 (void)memcpy(&mcp->__fpregs, &pcb->pcb_fp, 1862 (void)memcpy(&mcp->__fpregs, &pcb->pcb_fp,
1855 sizeof (mcp->__fpregs)); 1863 sizeof (mcp->__fpregs));
1856 mcp->__fpregs.__fp_fpcr = alpha_read_fp_c(l); 1864 mcp->__fpregs.__fp_fpcr = alpha_read_fp_c(l);
1857 *flags |= _UC_FPU; 1865 *flags |= _UC_FPU;
1858 } 1866 }
1859} 1867}
1860 1868
1861int 1869int
1862cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp) 1870cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp)
1863{ 1871{
1864 const __greg_t *gr = mcp->__gregs; 1872 const __greg_t *gr = mcp->__gregs;
1865 1873
1866 if ((gr[_REG_PS] & ALPHA_PSL_USERSET) != ALPHA_PSL_USERSET || 1874 if ((gr[_REG_PS] & ALPHA_PSL_USERSET) != ALPHA_PSL_USERSET ||
1867 (gr[_REG_PS] & ALPHA_PSL_USERCLR) != 0) 1875 (gr[_REG_PS] & ALPHA_PSL_USERCLR) != 0)
1868 return EINVAL; 1876 return EINVAL;
1869 1877
1870 return 0; 1878 return 0;
1871} 1879}
1872 1880
1873int 1881int
1874cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags) 1882cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
1875{ 1883{
1876 struct trapframe *frame = l->l_md.md_tf; 1884 struct trapframe *frame = l->l_md.md_tf;
1877 struct pcb *pcb = lwp_getpcb(l); 1885 struct pcb *pcb = lwp_getpcb(l);
1878 const __greg_t *gr = mcp->__gregs; 1886 const __greg_t *gr = mcp->__gregs;
1879 int error; 1887 int error;
1880 1888
1881 /* Restore register context, if any. */ 1889 /* Restore register context, if any. */
1882 if (flags & _UC_CPU) { 1890 if (flags & _UC_CPU) {
1883 /* Check for security violations first. */ 1891 /* Check for security violations first. */
1884 error = cpu_mcontext_validate(l, mcp); 1892 error = cpu_mcontext_validate(l, mcp);
1885 if (error) 1893 if (error)
1886 return error; 1894 return error;
1887 1895
1888 regtoframe((const struct reg *)gr, l->l_md.md_tf); 1896 regtoframe((const struct reg *)gr, l->l_md.md_tf);
1889 if (l == curlwp) 1897 if (l == curlwp)
1890 alpha_pal_wrusp(gr[_REG_SP]); 1898 alpha_pal_wrusp(gr[_REG_SP]);
1891 else 1899 else
1892 pcb->pcb_hw.apcb_usp = gr[_REG_SP]; 1900 pcb->pcb_hw.apcb_usp = gr[_REG_SP];
1893 frame->tf_regs[FRAME_PC] = gr[_REG_PC]; 1901 frame->tf_regs[FRAME_PC] = gr[_REG_PC];
1894 frame->tf_regs[FRAME_PS] = gr[_REG_PS]; 1902 frame->tf_regs[FRAME_PS] = gr[_REG_PS];
1895 } 1903 }
1896 1904
1897 if (flags & _UC_TLSBASE) 1905 if (flags & _UC_TLSBASE)
1898 lwp_setprivate(l, (void *)(uintptr_t)gr[_REG_UNIQUE]); 1906 lwp_setprivate(l, (void *)(uintptr_t)gr[_REG_UNIQUE]);
1899 1907
1900 /* Restore floating point register context, if any. */ 1908 /* Restore floating point register context, if any. */
1901 if (flags & _UC_FPU) { 1909 if (flags & _UC_FPU) {
1902 /* If we have an FP register context, get rid of it. */ 1910 /* If we have an FP register context, get rid of it. */
1903 fpu_discard(l, true); 1911 fpu_discard(l, true);
1904 (void)memcpy(&pcb->pcb_fp, &mcp->__fpregs, 1912 (void)memcpy(&pcb->pcb_fp, &mcp->__fpregs,
1905 sizeof (pcb->pcb_fp)); 1913 sizeof (pcb->pcb_fp));
1906 l->l_md.md_flags = mcp->__fpregs.__fp_fpcr & MDLWP_FP_C; 1914 l->l_md.md_flags = mcp->__fpregs.__fp_fpcr & MDLWP_FP_C;
1907 } 1915 }
1908 1916
1909 mutex_enter(l->l_proc->p_lock); 1917 mutex_enter(l->l_proc->p_lock);
1910 if (flags & _UC_SETSTACK) 1918 if (flags & _UC_SETSTACK)
1911 l->l_sigstk.ss_flags |= SS_ONSTACK; 1919 l->l_sigstk.ss_flags |= SS_ONSTACK;
1912 if (flags & _UC_CLRSTACK) 1920 if (flags & _UC_CLRSTACK)
1913 l->l_sigstk.ss_flags &= ~SS_ONSTACK; 1921 l->l_sigstk.ss_flags &= ~SS_ONSTACK;
1914 mutex_exit(l->l_proc->p_lock); 1922 mutex_exit(l->l_proc->p_lock);
1915 1923
1916 return (0); 1924 return (0);
1917} 1925}
1918 1926
1919static void 1927static void
1920cpu_kick(struct cpu_info * const ci) 1928cpu_kick(struct cpu_info * const ci)
1921{ 1929{
1922#if defined(MULTIPROCESSOR) 1930#if defined(MULTIPROCESSOR)
1923 alpha_send_ipi(ci->ci_cpuid, ALPHA_IPI_AST); 1931 alpha_send_ipi(ci->ci_cpuid, ALPHA_IPI_AST);
1924#endif /* MULTIPROCESSOR */ 1932#endif /* MULTIPROCESSOR */
1925} 1933}
1926 1934
1927/* 1935/*
1928 * Preempt the current process if in interrupt from user mode, 1936 * Preempt the current process if in interrupt from user mode,
1929 * or after the current trap/syscall if in system mode. 1937 * or after the current trap/syscall if in system mode.
1930 */ 1938 */
1931void 1939void
1932cpu_need_resched(struct cpu_info *ci, struct lwp *l, int flags) 1940cpu_need_resched(struct cpu_info *ci, struct lwp *l, int flags)
1933{ 1941{
1934 1942
1935 KASSERT(kpreempt_disabled()); 1943 KASSERT(kpreempt_disabled());
1936 1944
1937 if ((flags & RESCHED_IDLE) != 0) { 1945 if ((flags & RESCHED_IDLE) != 0) {
1938 /* 1946 /*
1939 * Nothing to do here; we are not currently using WTINT 1947 * Nothing to do here; we are not currently using WTINT
1940 * in cpu_idle(). 1948 * in cpu_idle().
1941 */ 1949 */
1942 return; 1950 return;
1943 } 1951 }
1944 1952
1945 /* XXX RESCHED_KPREEMPT XXX */ 1953 /* XXX RESCHED_KPREEMPT XXX */
1946 1954
1947 KASSERT((flags & RESCHED_UPREEMPT) != 0); 1955 KASSERT((flags & RESCHED_UPREEMPT) != 0);
1948 if ((flags & RESCHED_REMOTE) != 0) { 1956 if ((flags & RESCHED_REMOTE) != 0) {
1949 cpu_kick(ci); 1957 cpu_kick(ci);
1950 } else { 1958 } else {
1951 aston(l); 1959 aston(l);
1952 } 1960 }
1953} 1961}
1954 1962
1955/* 1963/*
1956 * Notify the current lwp (l) that it has a signal pending, 1964 * Notify the current lwp (l) that it has a signal pending,
1957 * process as soon as possible. 1965 * process as soon as possible.
1958 */ 1966 */
1959void 1967void
1960cpu_signotify(struct lwp *l) 1968cpu_signotify(struct lwp *l)
1961{ 1969{
1962 1970
1963 KASSERT(kpreempt_disabled()); 1971 KASSERT(kpreempt_disabled());
1964 1972
1965 if (l->l_cpu != curcpu()) { 1973 if (l->l_cpu != curcpu()) {
1966 cpu_kick(l->l_cpu); 1974 cpu_kick(l->l_cpu);
1967 } else { 1975 } else {
1968 aston(l); 1976 aston(l);
1969 } 1977 }
1970} 1978}
1971 1979
1972/* 1980/*
1973 * Give a profiling tick to the current process when the user profiling 1981 * Give a profiling tick to the current process when the user profiling
1974 * buffer pages are invalid. On the alpha, request an AST to send us 1982 * buffer pages are invalid. On the alpha, request an AST to send us
1975 * through trap, marking the proc as needing a profiling tick. 1983 * through trap, marking the proc as needing a profiling tick.
1976 */ 1984 */
1977void 1985void
1978cpu_need_proftick(struct lwp *l) 1986cpu_need_proftick(struct lwp *l)
1979{ 1987{
1980 1988
1981 KASSERT(kpreempt_disabled()); 1989 KASSERT(kpreempt_disabled());
1982 KASSERT(l->l_cpu == curcpu()); 1990 KASSERT(l->l_cpu == curcpu());
1983 1991
1984 l->l_pflag |= LP_OWEUPC; 1992 l->l_pflag |= LP_OWEUPC;
1985 aston(l); 1993 aston(l);
1986} 1994}

cvs diff -r1.102 -r1.103 src/sys/arch/alpha/include/cpu.h (switch to unified diff)

--- src/sys/arch/alpha/include/cpu.h 2021/06/26 15:02:19 1.102
+++ src/sys/arch/alpha/include/cpu.h 2021/07/22 01:39:18 1.103
@@ -1,256 +1,257 @@ @@ -1,256 +1,257 @@
1/* $NetBSD: cpu.h,v 1.102 2021/06/26 15:02:19 skrll Exp $ */ 1/* $NetBSD: cpu.h,v 1.103 2021/07/22 01:39:18 thorpej Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Charles M. Hannum. 9 * NASA Ames Research Center, and by Charles M. Hannum.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer. 15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE. 30 * POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32 32
33/* 33/*
34 * Copyright (c) 1988 University of Utah. 34 * Copyright (c) 1988 University of Utah.
35 * Copyright (c) 1982, 1990, 1993 35 * Copyright (c) 1982, 1990, 1993
36 * The Regents of the University of California. All rights reserved. 36 * The Regents of the University of California. All rights reserved.
37 * 37 *
38 * This code is derived from software contributed to Berkeley by 38 * This code is derived from software contributed to Berkeley by
39 * the Systems Programming Group of the University of Utah Computer 39 * the Systems Programming Group of the University of Utah Computer
40 * Science Department. 40 * Science Department.
41 * 41 *
42 * Redistribution and use in source and binary forms, with or without 42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions 43 * modification, are permitted provided that the following conditions
44 * are met: 44 * are met:
45 * 1. Redistributions of source code must retain the above copyright 45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer. 46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright 47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the 48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution. 49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors 50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software 51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission. 52 * without specific prior written permission.
53 * 53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE. 64 * SUCH DAMAGE.
65 * 65 *
66 * from: Utah $Hdr: cpu.h 1.16 91/03/25$ 66 * from: Utah $Hdr: cpu.h 1.16 91/03/25$
67 * 67 *
68 * @(#)cpu.h 8.4 (Berkeley) 1/5/94 68 * @(#)cpu.h 8.4 (Berkeley) 1/5/94
69 */ 69 */
70 70
71#ifndef _ALPHA_CPU_H_ 71#ifndef _ALPHA_CPU_H_
72#define _ALPHA_CPU_H_ 72#define _ALPHA_CPU_H_
73 73
74#if defined(_KERNEL_OPT) 74#if defined(_KERNEL_OPT)
75#include "opt_multiprocessor.h" 75#include "opt_multiprocessor.h"
76#include "opt_lockdebug.h" 76#include "opt_lockdebug.h"
77#endif 77#endif
78 78
79/* 79/*
80 * Exported definitions unique to Alpha cpu support. 80 * Exported definitions unique to Alpha cpu support.
81 */ 81 */
82 82
83#include <machine/alpha_cpu.h> 83#include <machine/alpha_cpu.h>
84 84
85#if defined(_KERNEL) || defined(_KMEMUSER) 85#if defined(_KERNEL) || defined(_KMEMUSER)
86#include <sys/cpu_data.h> 86#include <sys/cpu_data.h>
87#include <sys/cctr.h> 87#include <sys/cctr.h>
88#include <sys/intr.h> 88#include <sys/intr.h>
89#include <machine/frame.h> 89#include <machine/frame.h>
90 90
91/* 91/*
92 * Machine check information. 92 * Machine check information.
93 */ 93 */
94struct mchkinfo { 94struct mchkinfo {
95 volatile int mc_expected; /* machine check is expected */ 95 volatile int mc_expected; /* machine check is expected */
96 volatile int mc_received; /* machine check was received */ 96 volatile int mc_received; /* machine check was received */
97}; 97};
98 98
99/* 99/*
100 * Per-cpu information. Data accessed by MI code is marked [MI]. 100 * Per-cpu information. Data accessed by MI code is marked [MI].
101 */ 101 */
102struct cpu_info { 102struct cpu_info {
103 struct cpu_data ci_data; /* [MI] general per-cpu data */ 103 struct cpu_data ci_data; /* [MI] general per-cpu data */
104 struct lwp *ci_curlwp; /* [MI] current owner of the cpu */ 104 struct lwp *ci_curlwp; /* [MI] current owner of the cpu */
105 struct lwp *ci_onproc; /* [MI] current user LWP / kthread */ 105 struct lwp *ci_onproc; /* [MI] current user LWP / kthread */
106 struct cctr_state ci_cc; /* [MI] cycle counter state */ 106 struct cctr_state ci_cc; /* [MI] cycle counter state */
107 107
108 volatile int ci_mtx_count; /* [MI] neg count of spin mutexes */ 108 volatile int ci_mtx_count; /* [MI] neg count of spin mutexes */
109 volatile int ci_mtx_oldspl; /* [MI] for spin mutex splx() */ 109 volatile int ci_mtx_oldspl; /* [MI] for spin mutex splx() */
110 110
111 u_long ci_intrdepth; /* interrupt trap depth */ 111 u_long ci_intrdepth; /* interrupt trap depth */
112 volatile u_long ci_ssir; /* simulated software interrupt reg */ 112 volatile u_long ci_ssir; /* simulated software interrupt reg */
113 /* LWPs for soft intr dispatch */ 113 /* LWPs for soft intr dispatch */
114 struct lwp *ci_silwps[SOFTINT_COUNT]; 114 struct lwp *ci_silwps[SOFTINT_COUNT];
115 struct cpu_softc *ci_softc; /* pointer to our device */ 115 struct cpu_softc *ci_softc; /* pointer to our device */
116 116
117 struct pmap *ci_pmap; /* currently-activated pmap */ 117 struct pmap *ci_pmap; /* currently-activated pmap */
118 u_int ci_next_asn; /* next ASN to assign */ 118 u_int ci_next_asn; /* next ASN to assign */
119 u_long ci_asn_gen; /* current ASN generation */ 119 u_long ci_asn_gen; /* current ASN generation */
120 120
121 struct mchkinfo ci_mcinfo; /* machine check info */ 121 struct mchkinfo ci_mcinfo; /* machine check info */
122 122
123 /* 123 /*
124 * The following must be in their own cache line, as they are 124 * The following must be in their own cache line, as they are
125 * stored to regularly by remote CPUs. 125 * stored to regularly by remote CPUs.
126 */ 126 */
127 volatile u_long ci_ipis /* interprocessor interrupts pending */ 127 volatile u_long ci_ipis /* interprocessor interrupts pending */
128 __aligned(64); 128 __aligned(64);
129 u_int ci_want_resched; /* [MI] preempt current process */ 129 u_int ci_want_resched; /* [MI] preempt current process */
130 130
131 /* 131 /*
132 * These are largely static, and will frequently be fetched 132 * These are largely static, and will frequently be fetched
133 * by other CPUs. For that reason, they get their own cache 133 * by other CPUs. For that reason, they get their own cache
134 * line, too. 134 * line, too.
135 */ 135 */
136 struct cpu_info *ci_next /* next cpu_info structure */ 136 struct cpu_info *ci_next /* next cpu_info structure */
137 __aligned(64); 137 __aligned(64);
138 cpuid_t ci_cpuid; /* [MI] our CPU ID */ 138 cpuid_t ci_cpuid; /* [MI] our CPU ID */
139 volatile u_long ci_flags; /* flags; see below */ 139 volatile u_long ci_flags; /* flags; see below */
140 uint64_t ci_pcc_freq; /* cpu cycles/second */ 140 uint64_t ci_pcc_freq; /* cpu cycles/second */
141 struct trapframe *ci_db_regs; /* registers for debuggers */ 141 struct trapframe *ci_db_regs; /* registers for debuggers */
142 u_int ci_nintrhand; /* # of interrupt handlers */ 142 u_int ci_nintrhand; /* # of interrupt handlers */
143}; 143};
144 144
145/* Ensure some cpu_info fields are within the signed 16-bit displacement. */ 145/* Ensure some cpu_info fields are within the signed 16-bit displacement. */
146__CTASSERT(offsetof(struct cpu_info, ci_curlwp) <= 0x7ff0); 146__CTASSERT(offsetof(struct cpu_info, ci_curlwp) <= 0x7ff0);
147__CTASSERT(offsetof(struct cpu_info, ci_ssir) <= 0x7ff0); 147__CTASSERT(offsetof(struct cpu_info, ci_ssir) <= 0x7ff0);
148 148
149#endif /* _KERNEL || _KMEMUSER */ 149#endif /* _KERNEL || _KMEMUSER */
150 150
151#if defined(_KERNEL) 151#if defined(_KERNEL)
152 152
153#define CPUF_PRIMARY 0x01 /* CPU is primary CPU */ 153#define CPUF_PRIMARY 0x01 /* CPU is primary CPU */
154#define CPUF_PRESENT 0x02 /* CPU is present */ 154#define CPUF_PRESENT 0x02 /* CPU is present */
155#define CPUF_RUNNING 0x04 /* CPU is running */ 155#define CPUF_RUNNING 0x04 /* CPU is running */
156#define CPUF_PAUSED 0x08 /* CPU is paused */ 156#define CPUF_PAUSED 0x08 /* CPU is paused */
157 157
158extern struct cpu_info cpu_info_primary; 158extern struct cpu_info cpu_info_primary;
159extern struct cpu_info *cpu_info_list; 159extern struct cpu_info *cpu_info_list;
160 160
161#define CPU_INFO_ITERATOR int __unused 161#define CPU_INFO_ITERATOR int __unused
162#define CPU_INFO_FOREACH(cii, ci) ci = cpu_info_list; \ 162#define CPU_INFO_FOREACH(cii, ci) ci = cpu_info_list; \
163 ci != NULL; ci = ci->ci_next 163 ci != NULL; ci = ci->ci_next
164 164
165#if defined(MULTIPROCESSOR) 165#if defined(MULTIPROCESSOR)
166extern volatile u_long cpus_running; 166extern volatile u_long cpus_running;
167extern volatile u_long cpus_paused; 167extern volatile u_long cpus_paused;
168extern struct cpu_info *cpu_info[]; 168extern struct cpu_info *cpu_info[];
169 169
170#define curlwp ((struct lwp *)alpha_pal_rdval()) 170#define curlwp ((struct lwp *)alpha_pal_rdval())
171#define curcpu() curlwp->l_cpu 171#define curcpu() curlwp->l_cpu
172#define CPU_IS_PRIMARY(ci) ((ci)->ci_flags & CPUF_PRIMARY) 172#define CPU_IS_PRIMARY(ci) ((ci)->ci_flags & CPUF_PRIMARY)
173 173
174void cpu_boot_secondary_processors(void); 174void cpu_boot_secondary_processors(void);
175 175
176void cpu_pause_resume(unsigned long, int); 176void cpu_pause_resume(unsigned long, int);
177void cpu_pause_resume_all(int); 177void cpu_pause_resume_all(int);
178#else /* ! MULTIPROCESSOR */ 178#else /* ! MULTIPROCESSOR */
179#define curcpu() (&cpu_info_primary) 179#define curcpu() (&cpu_info_primary)
180#define curlwp curcpu()->ci_curlwp 180#define curlwp curcpu()->ci_curlwp
181#endif /* MULTIPROCESSOR */ 181#endif /* MULTIPROCESSOR */
182 182
183 183
184/* 184/*
185 * definitions of cpu-dependent requirements 185 * definitions of cpu-dependent requirements
186 * referenced in generic code 186 * referenced in generic code
187 */ 187 */
188#define cpu_number() alpha_pal_whami() 188#define cpu_number() alpha_pal_whami()
189#define cpu_proc_fork(p1, p2) /* nothing */ 189#define cpu_proc_fork(p1, p2) /* nothing */
190 190
191/* 191/*
192 * Arguments to hardclock and gatherstats encapsulate the previous 192 * Arguments to hardclock and gatherstats encapsulate the previous
193 * machine state in an opaque clockframe. On the alpha, we use 193 * machine state in an opaque clockframe. On the alpha, we use
194 * what we push on an interrupt (a trapframe). 194 * what we push on an interrupt (a trapframe).
195 */ 195 */
196struct clockframe { 196struct clockframe {
197 struct trapframe cf_tf; 197 struct trapframe cf_tf;
198}; 198};
199#define CLKF_USERMODE(framep) \ 199#define CLKF_USERMODE(framep) \
200 (((framep)->cf_tf.tf_regs[FRAME_PS] & ALPHA_PSL_USERMODE) != 0) 200 (((framep)->cf_tf.tf_regs[FRAME_PS] & ALPHA_PSL_USERMODE) != 0)
201#define CLKF_PC(framep) ((framep)->cf_tf.tf_regs[FRAME_PC]) 201#define CLKF_PC(framep) ((framep)->cf_tf.tf_regs[FRAME_PC])
202 202
203/* 203/*
204 * This isn't perfect; if the clock interrupt comes in before the 204 * This isn't perfect; if the clock interrupt comes in before the
205 * r/m/w cycle is complete, we won't be counted... but it's not 205 * r/m/w cycle is complete, we won't be counted... but it's not
206 * like this statistic has to be extremely accurate. 206 * like this statistic has to be extremely accurate.
207 */ 207 */
208#define CLKF_INTR(framep) \ 208#define CLKF_INTR(framep) \
209 ((curcpu()->ci_intrdepth & 0xf) != 0) /* see interrupt() */ 209 ((curcpu()->ci_intrdepth & 0xf) != 0) /* see interrupt() */
210 210
211/* 211/*
212 * This is used during profiling to integrate system time. It can safely 212 * This is used during profiling to integrate system time. It can safely
213 * assume that the process is resident. 213 * assume that the process is resident.
214 */ 214 */
215#define LWP_PC(p) ((l)->l_md.md_tf->tf_regs[FRAME_PC]) 215#define LWP_PC(p) ((l)->l_md.md_tf->tf_regs[FRAME_PC])
216 216
217void cpu_need_proftick(struct lwp *); 217void cpu_need_proftick(struct lwp *);
218void cpu_signotify(struct lwp *); 218void cpu_signotify(struct lwp *);
219 219
220#define aston(l) ((l)->l_md.md_astpending = 1) 220#define aston(l) ((l)->l_md.md_astpending = 1)
221#endif /* _KERNEL */ 221#endif /* _KERNEL */
222 222
223/* 223/*
224 * CTL_MACHDEP definitions. 224 * CTL_MACHDEP definitions.
225 */ 225 */
226#define CPU_CONSDEV 1 /* dev_t: console terminal device */ 226#define CPU_CONSDEV 1 /* dev_t: console terminal device */
227#define CPU_ROOT_DEVICE 2 /* string: root device name */ 227#define CPU_ROOT_DEVICE 2 /* string: root device name */
228#define CPU_UNALIGNED_PRINT 3 /* int: print unaligned accesses */ 228#define CPU_UNALIGNED_PRINT 3 /* int: print unaligned accesses */
229#define CPU_UNALIGNED_FIX 4 /* int: fix unaligned accesses */ 229#define CPU_UNALIGNED_FIX 4 /* int: fix unaligned accesses */
230#define CPU_UNALIGNED_SIGBUS 5 /* int: SIGBUS unaligned accesses */ 230#define CPU_UNALIGNED_SIGBUS 5 /* int: SIGBUS unaligned accesses */
231#define CPU_BOOTED_KERNEL 6 /* string: booted kernel name */ 231#define CPU_BOOTED_KERNEL 6 /* string: booted kernel name */
232#define CPU_FP_SYNC_COMPLETE 7 /* int: always fixup sync fp traps */ 232#define CPU_FP_SYNC_COMPLETE 7 /* int: always fixup sync fp traps */
233#define CPU_CCTR 8 /* int: using CC timecounter */ 233#define CPU_CCTR 8 /* int: using CC timecounter */
234#define CPU_IS_QEMU 9 /* int: running under Qemu */ 234#define CPU_IS_QEMU 9 /* int: running under Qemu */
 235#define CPU_FP_COMPLETE_DEBUG 10 /* int: enable FP completion debug */
235 236
236 237
237#ifdef _KERNEL 238#ifdef _KERNEL
238 239
239struct pcb; 240struct pcb;
240struct proc; 241struct proc;
241struct reg; 242struct reg;
242struct rpb; 243struct rpb;
243struct trapframe; 244struct trapframe;
244 245
245int badaddr(void *, size_t); 246int badaddr(void *, size_t);
246void * cpu_uarea_alloc(bool); 247void * cpu_uarea_alloc(bool);
247bool cpu_uarea_free(void *); 248bool cpu_uarea_free(void *);
248 249
249void cpu_idle_wtint(void); 250void cpu_idle_wtint(void);
250extern void (*cpu_idle_fn)(void); 251extern void (*cpu_idle_fn)(void);
251#define cpu_idle() (*cpu_idle_fn)() 252#define cpu_idle() (*cpu_idle_fn)()
252 253
253void cpu_initclocks_secondary(void); 254void cpu_initclocks_secondary(void);
254 255
255#endif /* _KERNEL */ 256#endif /* _KERNEL */
256#endif /* _ALPHA_CPU_H_ */ 257#endif /* _ALPHA_CPU_H_ */

cvs diff -r1.7 -r1.8 src/sys/arch/alpha/include/fpu.h (switch to unified diff)

--- src/sys/arch/alpha/include/fpu.h 2017/10/17 00:26:35 1.7
+++ src/sys/arch/alpha/include/fpu.h 2021/07/22 01:39:18 1.8
@@ -1,120 +1,146 @@ @@ -1,120 +1,146 @@
1/* $NetBSD: fpu.h,v 1.7 2017/10/17 00:26:35 maya Exp $ */ 1/* $NetBSD: fpu.h,v 1.8 2021/07/22 01:39:18 thorpej Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2001 Ross Harvey 4 * Copyright (c) 2001 Ross Harvey
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This software was written for NetBSD. 7 * This software was written for NetBSD.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the 15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution. 16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software 17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement: 18 * must display the following acknowledgement:
19 * This product includes software developed by the NetBSD 19 * This product includes software developed by the NetBSD
20 * Foundation, Inc. and its contributors. 20 * Foundation, Inc. and its contributors.
21 * 4. Neither the name of The NetBSD Foundation nor the names of its 21 * 4. Neither the name of The NetBSD Foundation nor the names of its
22 * contributors may be used to endorse or promote products derived 22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission. 23 * from this software without specific prior written permission.
24 * 24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE. 35 * POSSIBILITY OF SUCH DAMAGE.
36 */ 36 */
37 37
38#ifndef _ALPHA_FPU_H_ 38#ifndef _ALPHA_FPU_H_
39#define _ALPHA_FPU_H_ 39#define _ALPHA_FPU_H_
40 40
41#define _FP_C_DEF(n) (1UL << (n)) 
42 
43/* 41/*
44 * Most of these next definitions were moved from <ieeefp.h>. Apparently the 42 * Most of these next definitions were moved from <ieeefp.h>. Apparently the
45 * names happen to match those exported by Compaq and Linux from their fpu.h 43 * names happen to match those exported by Compaq and Linux from their fpu.h
46 * files. 44 * files.
47 */ 45 */
48 46
49#define FPCR_SUM _FP_C_DEF(63) 47/*
50#define FPCR_INED _FP_C_DEF(62) 48 * Bits in the Alpha Floating Point Control register. This is the hardware
51#define FPCR_UNFD _FP_C_DEF(61) 49 * register, and should not be directly manipulated by application software.
52#define FPCR_UNDZ _FP_C_DEF(60) 50 */
53#define FPCR_DYN(rm) ((unsigned long)(rm) << 58) 51#define FPCR_SUM __BIT(63) /* Summary (OR of all exception bits) */
54#define FPCR_IOV _FP_C_DEF(57) 52#define FPCR_INED __BIT(62) /* Inexact trap Disable */
55#define FPCR_INE _FP_C_DEF(56) 53#define FPCR_UNFD __BIT(61) /* Underflow trap Disable */
56#define FPCR_UNF _FP_C_DEF(55) 54#define FPCR_UNDZ __BIT(60) /* Underflow to Zero */
57#define FPCR_OVF _FP_C_DEF(54) 55#define FPCR_DYN_RM __BITS(58,59) /* Dynamic Rounding Mode */
58#define FPCR_DZE _FP_C_DEF(53) 56 /* 00 Chopped */
59#define FPCR_INV _FP_C_DEF(52) 57 /* 01 Minus Infinity */
60#define FPCR_OVFD _FP_C_DEF(51) 58 /* 10 Normal (round nearest) */
61#define FPCR_DZED _FP_C_DEF(50) 59 /* 11 Plus Infinity */
62#define FPCR_INVD _FP_C_DEF(49) 60#define FPCR_IOV __BIT(57) /* Integer Overflow */
63#define FPCR_DNZ _FP_C_DEF(48) 61#define FPCR_INE __BIT(56) /* Inexact Result */
64#define FPCR_DNOD _FP_C_DEF(47) 62#define FPCR_UNF __BIT(55) /* Underflow */
 63#define FPCR_OVF __BIT(54) /* Overflow */
 64#define FPCR_DZE __BIT(53) /* Division By Zero */
 65#define FPCR_INV __BIT(52) /* Invalid Operation */
 66#define FPCR_OVFD __BIT(51) /* Overflow trap Disable */
 67#define FPCR_DZED __BIT(50) /* Division By Zero trap Disable */
 68#define FPCR_INVD __BIT(49) /* Invalid Operation trap Disable */
 69#define FPCR_DNZ __BIT(48) /* Denormal Operands to Zero */
 70#define FPCR_DNOD __BIT(47) /* Denormal Operation tap Disable */
65 71
66#define FPCR_MIRRORED (FPCR_INE | FPCR_UNF | FPCR_OVF | FPCR_DZE | FPCR_INV) 72#define FPCR_MIRRORED (FPCR_INE | FPCR_UNF | FPCR_OVF | FPCR_DZE | FPCR_INV)
67#define FPCR_MIR_START 52 73#define FPCR_MIR_START 52
68 74
 75/* NetBSD default - no traps enabled, round-to-nearest */
 76#define FPCR_DEFAULT (__SHIFTIN(FP_RN, FPCR_DYN_RM) | \
 77 FPCR_INED | FPCR_UNFD | FPCR_OVFD | \
 78 FPCR_DZED | FPCR_INVD | FPCR_DNOD)
 79
69/* 80/*
 81 * IEEE Floating Point Control (FP_C) Quadword. This is a software
 82 * virtual register that abstracts the FPCR and software complation
 83 * performed by the kernel.
 84 *
70 * The AARM specifies the bit positions of the software word used for 85 * The AARM specifies the bit positions of the software word used for
71 * user mode interface to the control and status of the kernel completion 86 * user mode interface to the control and status of the kernel completion
72 * routines. Although it largely just redefines the FPCR, it shuffles 87 * routines. Although it largely just redefines the FPCR, it shuffles
73 * the bit order. The names of the bits are defined in the AARM, and 88 * the bit order. The names of the bits are defined in the AARM, and
74 * the definition prefix can easily be determined from public domain 89 * the definition prefix can easily be determined from public domain
75 * programs written to either the Compaq or Linux interfaces, which 90 * programs written to either the Compaq or Linux interfaces, which
76 * appear to be identical. 91 * appear to be identical.
 92 *
 93 * Bits 63-48 are reserved for implementation software.
 94 * Bits 47-23 are reserved for future archiecture definition.
 95 * Bits 16-12 are reserved for implementation software.
 96 * Bits 11-7 are reserved for future architecture definition.
 97 * Bit 0 is reserved for implementation software.
77 */ 98 */
78 99
79#define IEEE_STATUS_DNO _FP_C_DEF(22) 100#define IEEE_STATUS_DNO __BIT(22) /* Denormal Operand */
80#define IEEE_STATUS_INE _FP_C_DEF(21) 101#define IEEE_STATUS_INE __BIT(21) /* Inexact Result */
81#define IEEE_STATUS_UNF _FP_C_DEF(20) 102#define IEEE_STATUS_UNF __BIT(20) /* Underflow */
82#define IEEE_STATUS_OVF _FP_C_DEF(19) 103#define IEEE_STATUS_OVF __BIT(19) /* Overflow */
83#define IEEE_STATUS_DZE _FP_C_DEF(18) 104#define IEEE_STATUS_DZE __BIT(18) /* Division By Zero */
84#define IEEE_STATUS_INV _FP_C_DEF(17) 105#define IEEE_STATUS_INV __BIT(17) /* Invalid Operation */
85 106
86#define IEEE_TRAP_ENABLE_DNO _FP_C_DEF(6) 107#define IEEE_TRAP_ENABLE_DNO __BIT(6) /* Denormal Operation trap */
87#define IEEE_TRAP_ENABLE_INE _FP_C_DEF(5) 108#define IEEE_TRAP_ENABLE_INE __BIT(5) /* Inexact Result trap */
88#define IEEE_TRAP_ENABLE_UNF _FP_C_DEF(4) 109#define IEEE_TRAP_ENABLE_UNF __BIT(4) /* Underflow trap */
89#define IEEE_TRAP_ENABLE_OVF _FP_C_DEF(3) 110#define IEEE_TRAP_ENABLE_OVF __BIT(3) /* Overflow trap */
90#define IEEE_TRAP_ENABLE_DZE _FP_C_DEF(2) 111#define IEEE_TRAP_ENABLE_DZE __BIT(2) /* Division By Zero trap */
91#define IEEE_TRAP_ENABLE_INV _FP_C_DEF(1) 112#define IEEE_TRAP_ENABLE_INV __BIT(1) /* Invalid Operation trap */
92 113
93#define IEEE_INHERIT _FP_C_DEF(14) 114#define IEEE_INHERIT __BIT(14)
94#define IEEE_MAP_UMZ _FP_C_DEF(13) /* Map underflowed outputs to zero */ 115#define IEEE_MAP_UMZ __BIT(13) /* Map underflowed outputs to zero */
95#define IEEE_MAP_DMZ _FP_C_DEF(12) /* Map denormal inputs to zero */ 116#define IEEE_MAP_DMZ __BIT(12) /* Map denormal inputs to zero */
96 117
97#define FP_C_MIRRORED (IEEE_STATUS_INE | IEEE_STATUS_UNF | IEEE_STATUS_OVF\ 118#define FP_C_ALLBITS __BITS(1,22)
98 | IEEE_STATUS_DZE | IEEE_STATUS_INV) 119
 120#define FP_C_MIRRORED (IEEE_STATUS_INE | IEEE_STATUS_UNF | IEEE_STATUS_OVF \
 121 | IEEE_STATUS_DZE | IEEE_STATUS_INV)
99#define FP_C_MIR_START 17 122#define FP_C_MIR_START 17
100 123
 124/* NetBSD default - no traps enabled (see FPCR default) */
 125#define FP_C_DEFAULT 0
 126
101#ifdef _KERNEL 127#ifdef _KERNEL
102 128
103#define FLD_MASK(len) ((1UL << (len)) - 1) 129#define FLD_MASK(len) ((1UL << (len)) - 1)
104#define FLD_CLEAR(obj, origin, len) \ 130#define FLD_CLEAR(obj, origin, len) \
105 ((obj) & ~(FLD_MASK(len) << (origin))) 131 ((obj) & ~(FLD_MASK(len) << (origin)))
106#define FLD_INSERT(obj, origin, len, value) \ 132#define FLD_INSERT(obj, origin, len, value) \
107 (FLD_CLEAR(obj, origin, len) | (value) << origin) 133 (FLD_CLEAR(obj, origin, len) | (value) << origin)
108 134
109#define FP_C_TO_NETBSD_MASK(fp_c) ((fp_c) >> 1 & 0x3f) 135#define FP_C_TO_NETBSD_MASK(fp_c) ((fp_c) >> 1 & 0x3f)
110#define FP_C_TO_NETBSD_FLAG(fp_c) ((fp_c) >> 17 & 0x3f) 136#define FP_C_TO_NETBSD_FLAG(fp_c) ((fp_c) >> 17 & 0x3f)
111#define NETBSD_MASK_TO_FP_C(m) (((m) & 0x3f) << 1) 137#define NETBSD_MASK_TO_FP_C(m) (((m) & 0x3f) << 1)
112#define NETBSD_FLAG_TO_FP_C(s) (((s) & 0x3f) << 17) 138#define NETBSD_FLAG_TO_FP_C(s) (((s) & 0x3f) << 17)
113#define CLEAR_FP_C_MASK(fp_c) ((fp_c) & ~(0x3f << 1)) 139#define CLEAR_FP_C_MASK(fp_c) ((fp_c) & ~(0x3f << 1))
114#define CLEAR_FP_C_FLAG(fp_c) ((fp_c) & ~(0x3f << 17)) 140#define CLEAR_FP_C_FLAG(fp_c) ((fp_c) & ~(0x3f << 17))
115#define SET_FP_C_MASK(fp_c, m) (CLEAR_FP_C_MASK(fp_c) | NETBSD_MASK_TO_FP_C(m)) 141#define SET_FP_C_MASK(fp_c, m) (CLEAR_FP_C_MASK(fp_c) | NETBSD_MASK_TO_FP_C(m))
116#define SET_FP_C_FLAG(fp_c, m) (CLEAR_FP_C_FLAG(fp_c) | NETBSD_FLAG_TO_FP_C(m)) 142#define SET_FP_C_FLAG(fp_c, m) (CLEAR_FP_C_FLAG(fp_c) | NETBSD_FLAG_TO_FP_C(m))
117 143
118#endif 144#endif /* _KERNEL */
119 145
120#endif 146#endif /* _ALPHA_FPU_H_ */