Sun Feb 27 14:17:10 2022 UTC ()
alpha: Fix placement of membar for sending ICCB message.

While here, reduce it to membar_exit -- it's obviously not needed for
store-before-load here (although alpha doesn't have anything weaker
than the full sequential consistency `mb'), and although we do need a
store-before-load (and load-before-load) to spin waiting for the CPU
to wake up, that already happens a few lines below with alpha_mb in
the loop anyway.  So no need for membar_sync, which is just `mb'
under the hood -- deleting the membar_sync in this place can't hurt.

The membar_sync had been inserted automatically when converting from
an older style of atomic_ops(3) API.


(riastradh)
diff -r1.104 -r1.105 src/sys/arch/alpha/alpha/cpu.c

cvs diff -r1.104 -r1.105 src/sys/arch/alpha/alpha/cpu.c (switch to unified diff)

--- src/sys/arch/alpha/alpha/cpu.c 2021/05/05 03:54:16 1.104
+++ src/sys/arch/alpha/alpha/cpu.c 2022/02/27 14:17:10 1.105
@@ -1,973 +1,974 @@ @@ -1,973 +1,974 @@
1/* $NetBSD: cpu.c,v 1.104 2021/05/05 03:54:16 thorpej Exp $ */ 1/* $NetBSD: cpu.c,v 1.105 2022/02/27 14:17:10 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 1999, 2000, 2001, 2020 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 1999, 2000, 2001, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center. 9 * NASA Ames Research Center.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer. 15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE. 30 * POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32 32
33/* 33/*
34 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University. 34 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
35 * All rights reserved. 35 * All rights reserved.
36 * 36 *
37 * Author: Chris G. Demetriou 37 * Author: Chris G. Demetriou
38 * 38 *
39 * Permission to use, copy, modify and distribute this software and 39 * Permission to use, copy, modify and distribute this software and
40 * its documentation is hereby granted, provided that both the copyright 40 * its documentation is hereby granted, provided that both the copyright
41 * notice and this permission notice appear in all copies of the 41 * notice and this permission notice appear in all copies of the
42 * software, derivative works or modified versions, and any portions 42 * software, derivative works or modified versions, and any portions
43 * thereof, and that both notices appear in supporting documentation. 43 * thereof, and that both notices appear in supporting documentation.
44 * 44 *
45 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 45 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
46 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 46 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
47 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 47 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 * 48 *
49 * Carnegie Mellon requests users of this software to return to 49 * Carnegie Mellon requests users of this software to return to
50 * 50 *
51 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 51 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
52 * School of Computer Science 52 * School of Computer Science
53 * Carnegie Mellon University 53 * Carnegie Mellon University
54 * Pittsburgh PA 15213-3890 54 * Pittsburgh PA 15213-3890
55 * 55 *
56 * any improvements or extensions that they make and grant Carnegie the 56 * any improvements or extensions that they make and grant Carnegie the
57 * rights to redistribute these changes. 57 * rights to redistribute these changes.
58 */ 58 */
59 59
60#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ 60#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
61 61
62__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.104 2021/05/05 03:54:16 thorpej Exp $"); 62__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.105 2022/02/27 14:17:10 riastradh Exp $");
63 63
64#include "opt_ddb.h" 64#include "opt_ddb.h"
65#include "opt_multiprocessor.h" 65#include "opt_multiprocessor.h"
66 66
67#include <sys/param.h> 67#include <sys/param.h>
68#include <sys/systm.h> 68#include <sys/systm.h>
69#include <sys/device.h> 69#include <sys/device.h>
70#include <sys/kmem.h> 70#include <sys/kmem.h>
71#include <sys/proc.h> 71#include <sys/proc.h>
72#include <sys/atomic.h> 72#include <sys/atomic.h>
73#include <sys/cpu.h> 73#include <sys/cpu.h>
74#include <sys/sysctl.h> 74#include <sys/sysctl.h>
75 75
76#include <uvm/uvm_extern.h> 76#include <uvm/uvm_extern.h>
77 77
78#include <machine/autoconf.h> 78#include <machine/autoconf.h>
79#include <machine/cpuvar.h> 79#include <machine/cpuvar.h>
80#include <machine/rpb.h> 80#include <machine/rpb.h>
81#include <machine/prom.h> 81#include <machine/prom.h>
82#include <machine/alpha.h> 82#include <machine/alpha.h>
83 83
84struct cpu_info cpu_info_primary __cacheline_aligned = { 84struct cpu_info cpu_info_primary __cacheline_aligned = {
85 .ci_curlwp = &lwp0 85 .ci_curlwp = &lwp0
86}; 86};
87struct cpu_info *cpu_info_list __read_mostly = &cpu_info_primary; 87struct cpu_info *cpu_info_list __read_mostly = &cpu_info_primary;
88 88
89#if defined(MULTIPROCESSOR) 89#if defined(MULTIPROCESSOR)
90/* 90/*
91 * Array of CPU info structures. Must be statically-allocated because 91 * Array of CPU info structures. Must be statically-allocated because
92 * curproc, etc. are used early. 92 * curproc, etc. are used early.
93 */ 93 */
94struct cpu_info *cpu_info[ALPHA_MAXPROCS]; 94struct cpu_info *cpu_info[ALPHA_MAXPROCS];
95 95
96/* Bitmask of CPUs booted, currently running, and paused. */ 96/* Bitmask of CPUs booted, currently running, and paused. */
97volatile u_long cpus_booted __read_mostly; 97volatile u_long cpus_booted __read_mostly;
98volatile u_long cpus_running __read_mostly; 98volatile u_long cpus_running __read_mostly;
99volatile u_long cpus_paused __read_mostly; 99volatile u_long cpus_paused __read_mostly;
100 100
101void cpu_boot_secondary(struct cpu_info *); 101void cpu_boot_secondary(struct cpu_info *);
102#endif /* MULTIPROCESSOR */ 102#endif /* MULTIPROCESSOR */
103 103
104static void 104static void
105cpu_idle_default(void) 105cpu_idle_default(void)
106{ 106{
107 /* 107 /*
108 * Default is to do nothing. Platform code can overwrite 108 * Default is to do nothing. Platform code can overwrite
109 * as needed. 109 * as needed.
110 */ 110 */
111} 111}
112 112
113void 113void
114cpu_idle_wtint(void) 114cpu_idle_wtint(void)
115{ 115{
116 /* 116 /*
117 * Some PALcode versions implement the WTINT call to idle 117 * Some PALcode versions implement the WTINT call to idle
118 * in a low power mode. 118 * in a low power mode.
119 */ 119 */
120 alpha_pal_wtint(0); 120 alpha_pal_wtint(0);
121} 121}
122 122
123void (*cpu_idle_fn)(void) __read_mostly = cpu_idle_default; 123void (*cpu_idle_fn)(void) __read_mostly = cpu_idle_default;
124 124
125/* 125/*
126 * The Implementation Version and the Architecture Mask must be 126 * The Implementation Version and the Architecture Mask must be
127 * consistent across all CPUs in the system, so we set it for the 127 * consistent across all CPUs in the system, so we set it for the
128 * primary and announce the AMASK extensions if they exist. 128 * primary and announce the AMASK extensions if they exist.
129 * 129 *
130 * Note, we invert the AMASK so that if a bit is set, it means "has 130 * Note, we invert the AMASK so that if a bit is set, it means "has
131 * extension". 131 * extension".
132 */ 132 */
133u_long cpu_implver __read_mostly; 133u_long cpu_implver __read_mostly;
134u_long cpu_amask __read_mostly; 134u_long cpu_amask __read_mostly;
135 135
136/* Definition of the driver for autoconfig. */ 136/* Definition of the driver for autoconfig. */
137static int cpumatch(device_t, cfdata_t, void *); 137static int cpumatch(device_t, cfdata_t, void *);
138static void cpuattach(device_t, device_t, void *); 138static void cpuattach(device_t, device_t, void *);
139 139
140CFATTACH_DECL_NEW(cpu, sizeof(struct cpu_softc), 140CFATTACH_DECL_NEW(cpu, sizeof(struct cpu_softc),
141 cpumatch, cpuattach, NULL, NULL); 141 cpumatch, cpuattach, NULL, NULL);
142 142
143static void cpu_announce_extensions(struct cpu_info *); 143static void cpu_announce_extensions(struct cpu_info *);
144 144
145extern struct cfdriver cpu_cd; 145extern struct cfdriver cpu_cd;
146 146
147static const char * const lcaminor[] = { 147static const char * const lcaminor[] = {
148 "", 148 "",
149 "21066", "21066", 149 "21066", "21066",
150 "21068", "21068", 150 "21068", "21068",
151 "21066A", "21068A", 151 "21066A", "21068A",
152 NULL 152 NULL
153}; 153};
154 154
155const struct cputable_struct { 155const struct cputable_struct {
156 const char *cpu_evname; 156 const char *cpu_evname;
157 const char *cpu_major_name; 157 const char *cpu_major_name;
158 const char * const *cpu_minor_names; 158 const char * const *cpu_minor_names;
159} cpunametable[] = { 159} cpunametable[] = {
160[PCS_PROC_EV3] ={ "EV3", NULL, NULL }, 160[PCS_PROC_EV3] ={ "EV3", NULL, NULL },
161[PCS_PROC_EV4] ={ "EV4", "21064", NULL }, 161[PCS_PROC_EV4] ={ "EV4", "21064", NULL },
162[PCS_PROC_SIMULATION]={ "Sim", NULL, NULL }, 162[PCS_PROC_SIMULATION]={ "Sim", NULL, NULL },
163[PCS_PROC_LCA4] ={ "LCA4", NULL, lcaminor }, 163[PCS_PROC_LCA4] ={ "LCA4", NULL, lcaminor },
164[PCS_PROC_EV5] ={ "EV5", "21164", NULL }, 164[PCS_PROC_EV5] ={ "EV5", "21164", NULL },
165[PCS_PROC_EV45] ={ "EV45", "21064A", NULL }, 165[PCS_PROC_EV45] ={ "EV45", "21064A", NULL },
166[PCS_PROC_EV56] ={ "EV56", "21164A", NULL }, 166[PCS_PROC_EV56] ={ "EV56", "21164A", NULL },
167[PCS_PROC_EV6] ={ "EV6", "21264", NULL }, 167[PCS_PROC_EV6] ={ "EV6", "21264", NULL },
168[PCS_PROC_PCA56] ={ "PCA56", "21164PC", NULL }, 168[PCS_PROC_PCA56] ={ "PCA56", "21164PC", NULL },
169[PCS_PROC_PCA57] ={ "PCA57", "21164PC"/*XXX*/,NULL }, 169[PCS_PROC_PCA57] ={ "PCA57", "21164PC"/*XXX*/,NULL },
170[PCS_PROC_EV67] ={ "EV67", "21264A", NULL }, 170[PCS_PROC_EV67] ={ "EV67", "21264A", NULL },
171[PCS_PROC_EV68CB] ={ "EV68CB", "21264C", NULL }, 171[PCS_PROC_EV68CB] ={ "EV68CB", "21264C", NULL },
172[PCS_PROC_EV68AL] ={ "EV68AL", "21264B", NULL }, 172[PCS_PROC_EV68AL] ={ "EV68AL", "21264B", NULL },
173[PCS_PROC_EV68CX] ={ "EV68CX", "21264D", NULL }, 173[PCS_PROC_EV68CX] ={ "EV68CX", "21264D", NULL },
174[PCS_PROC_EV7] ={ "EV7", "21364", NULL }, 174[PCS_PROC_EV7] ={ "EV7", "21364", NULL },
175[PCS_PROC_EV79] ={ "EV79", NULL, NULL }, 175[PCS_PROC_EV79] ={ "EV79", NULL, NULL },
176[PCS_PROC_EV69] ={ "EV69", NULL, NULL }, 176[PCS_PROC_EV69] ={ "EV69", NULL, NULL },
177}; 177};
178 178
179static bool 179static bool
180cpu_description(const struct cpu_softc * const sc, 180cpu_description(const struct cpu_softc * const sc,
181 char * const buf, size_t const buflen) 181 char * const buf, size_t const buflen)
182{ 182{
183 const char * const *s; 183 const char * const *s;
184 const char *ev; 184 const char *ev;
185 int i; 185 int i;
186 186
187 const uint32_t major = sc->sc_major_type; 187 const uint32_t major = sc->sc_major_type;
188 const uint32_t minor = sc->sc_minor_type; 188 const uint32_t minor = sc->sc_minor_type;
189 189
190 if (major < __arraycount(cpunametable) && 190 if (major < __arraycount(cpunametable) &&
191 (ev = cpunametable[major].cpu_evname) != NULL) { 191 (ev = cpunametable[major].cpu_evname) != NULL) {
192 s = cpunametable[major].cpu_minor_names; 192 s = cpunametable[major].cpu_minor_names;
193 for (i = 0; s != NULL && s[i] != NULL; i++) { 193 for (i = 0; s != NULL && s[i] != NULL; i++) {
194 if (i == minor && strlen(s[i]) != 0) { 194 if (i == minor && strlen(s[i]) != 0) {
195 break; 195 break;
196 } 196 }
197 } 197 }
198 if (s == NULL || s[i] == NULL) { 198 if (s == NULL || s[i] == NULL) {
199 s = &cpunametable[major].cpu_major_name; 199 s = &cpunametable[major].cpu_major_name;
200 i = 0; 200 i = 0;
201 if (s[i] == NULL) { 201 if (s[i] == NULL) {
202 s = NULL; 202 s = NULL;
203 } 203 }
204 } 204 }
205 205
206 /* 206 /*
207 * Example strings: 207 * Example strings:
208 * 208 *
209 * Sim-0 209 * Sim-0
210 * 21068-3 (LCA4) [uses minor table] 210 * 21068-3 (LCA4) [uses minor table]
211 * 21264C-5 (EV68CB) 211 * 21264C-5 (EV68CB)
212 * 21164PC-1 (PCA56) 212 * 21164PC-1 (PCA56)
213 */ 213 */
214 if (s != NULL) { 214 if (s != NULL) {
215 snprintf(buf, buflen, "%s-%d (%s)", s[i], minor, ev); 215 snprintf(buf, buflen, "%s-%d (%s)", s[i], minor, ev);
216 } else { 216 } else {
217 snprintf(buf, buflen, "%s-%d", ev, minor); 217 snprintf(buf, buflen, "%s-%d", ev, minor);
218 } 218 }
219 return true; 219 return true;
220 } 220 }
221 221
222 snprintf(buf, buflen, "UNKNOWN CPU TYPE (%u:%u)", major, minor); 222 snprintf(buf, buflen, "UNKNOWN CPU TYPE (%u:%u)", major, minor);
223 return false; 223 return false;
224} 224}
225 225
226static int 226static int
227cpu_sysctl_model(SYSCTLFN_ARGS) 227cpu_sysctl_model(SYSCTLFN_ARGS)
228{ 228{
229 struct sysctlnode node = *rnode; 229 struct sysctlnode node = *rnode;
230 const struct cpu_softc * const sc = node.sysctl_data; 230 const struct cpu_softc * const sc = node.sysctl_data;
231 char model[32]; 231 char model[32];
232 232
233 cpu_description(sc, model, sizeof(model)); 233 cpu_description(sc, model, sizeof(model));
234 node.sysctl_data = model; 234 node.sysctl_data = model;
235 return sysctl_lookup(SYSCTLFN_CALL(&node)); 235 return sysctl_lookup(SYSCTLFN_CALL(&node));
236} 236}
237 237
238static int 238static int
239cpu_sysctl_amask_bit(SYSCTLFN_ARGS, unsigned long const bit) 239cpu_sysctl_amask_bit(SYSCTLFN_ARGS, unsigned long const bit)
240{ 240{
241 struct sysctlnode node = *rnode; 241 struct sysctlnode node = *rnode;
242 const struct cpu_softc * const sc = node.sysctl_data; 242 const struct cpu_softc * const sc = node.sysctl_data;
243 243
244 bool result = (sc->sc_amask & bit) ? true : false; 244 bool result = (sc->sc_amask & bit) ? true : false;
245 node.sysctl_data = &result; 245 node.sysctl_data = &result;
246 return sysctl_lookup(SYSCTLFN_CALL(&node)); 246 return sysctl_lookup(SYSCTLFN_CALL(&node));
247} 247}
248 248
249static int 249static int
250cpu_sysctl_bwx(SYSCTLFN_ARGS) 250cpu_sysctl_bwx(SYSCTLFN_ARGS)
251{ 251{
252 return cpu_sysctl_amask_bit(SYSCTLFN_CALL(rnode), ALPHA_AMASK_BWX); 252 return cpu_sysctl_amask_bit(SYSCTLFN_CALL(rnode), ALPHA_AMASK_BWX);
253} 253}
254 254
255static int 255static int
256cpu_sysctl_fix(SYSCTLFN_ARGS) 256cpu_sysctl_fix(SYSCTLFN_ARGS)
257{ 257{
258 return cpu_sysctl_amask_bit(SYSCTLFN_CALL(rnode), ALPHA_AMASK_FIX); 258 return cpu_sysctl_amask_bit(SYSCTLFN_CALL(rnode), ALPHA_AMASK_FIX);
259} 259}
260 260
261static int 261static int
262cpu_sysctl_cix(SYSCTLFN_ARGS) 262cpu_sysctl_cix(SYSCTLFN_ARGS)
263{ 263{
264 return cpu_sysctl_amask_bit(SYSCTLFN_CALL(rnode), ALPHA_AMASK_CIX); 264 return cpu_sysctl_amask_bit(SYSCTLFN_CALL(rnode), ALPHA_AMASK_CIX);
265} 265}
266 266
267static int 267static int
268cpu_sysctl_mvi(SYSCTLFN_ARGS) 268cpu_sysctl_mvi(SYSCTLFN_ARGS)
269{ 269{
270 return cpu_sysctl_amask_bit(SYSCTLFN_CALL(rnode), ALPHA_AMASK_MVI); 270 return cpu_sysctl_amask_bit(SYSCTLFN_CALL(rnode), ALPHA_AMASK_MVI);
271} 271}
272 272
273static int 273static int
274cpu_sysctl_pat(SYSCTLFN_ARGS) 274cpu_sysctl_pat(SYSCTLFN_ARGS)
275{ 275{
276 return cpu_sysctl_amask_bit(SYSCTLFN_CALL(rnode), ALPHA_AMASK_PAT); 276 return cpu_sysctl_amask_bit(SYSCTLFN_CALL(rnode), ALPHA_AMASK_PAT);
277} 277}
278 278
279static int 279static int
280cpu_sysctl_pmi(SYSCTLFN_ARGS) 280cpu_sysctl_pmi(SYSCTLFN_ARGS)
281{ 281{
282 return cpu_sysctl_amask_bit(SYSCTLFN_CALL(rnode), ALPHA_AMASK_PMI); 282 return cpu_sysctl_amask_bit(SYSCTLFN_CALL(rnode), ALPHA_AMASK_PMI);
283} 283}
284 284
285static int 285static int
286cpu_sysctl_primary(SYSCTLFN_ARGS) 286cpu_sysctl_primary(SYSCTLFN_ARGS)
287{ 287{
288 struct sysctlnode node = *rnode; 288 struct sysctlnode node = *rnode;
289 const struct cpu_softc * const sc = node.sysctl_data; 289 const struct cpu_softc * const sc = node.sysctl_data;
290 290
291 bool result = CPU_IS_PRIMARY(sc->sc_ci); 291 bool result = CPU_IS_PRIMARY(sc->sc_ci);
292 node.sysctl_data = &result; 292 node.sysctl_data = &result;
293 return sysctl_lookup(SYSCTLFN_CALL(&node)); 293 return sysctl_lookup(SYSCTLFN_CALL(&node));
294} 294}
295 295
296/* 296/*
297 * The following is an attempt to map out how booting secondary CPUs 297 * The following is an attempt to map out how booting secondary CPUs
298 * works. 298 * works.
299 * 299 *
300 * As we find processors during the autoconfiguration sequence, all 300 * As we find processors during the autoconfiguration sequence, all
301 * processors have idle stacks and PCBs created for them, including 301 * processors have idle stacks and PCBs created for them, including
302 * the primary (although the primary idles on lwp0's PCB until its 302 * the primary (although the primary idles on lwp0's PCB until its
303 * idle PCB is created). 303 * idle PCB is created).
304 * 304 *
305 * Right before calling uvm_scheduler(), main() calls, on lwp0's 305 * Right before calling uvm_scheduler(), main() calls, on lwp0's
306 * context, cpu_boot_secondary_processors(). This is our key to 306 * context, cpu_boot_secondary_processors(). This is our key to
307 * actually spin up the additional processor's we've found. We 307 * actually spin up the additional processor's we've found. We
308 * run through our cpu_info[] array looking for secondary processors 308 * run through our cpu_info[] array looking for secondary processors
309 * with idle PCBs, and spin them up. 309 * with idle PCBs, and spin them up.
310 * 310 *
311 * The spinup involves switching the secondary processor to the 311 * The spinup involves switching the secondary processor to the
312 * OSF/1 PALcode, setting the entry point to cpu_spinup_trampoline(), 312 * OSF/1 PALcode, setting the entry point to cpu_spinup_trampoline(),
313 * and sending a "START" message to the secondary's console. 313 * and sending a "START" message to the secondary's console.
314 * 314 *
315 * Upon successful processor bootup, the cpu_spinup_trampoline will call 315 * Upon successful processor bootup, the cpu_spinup_trampoline will call
316 * cpu_hatch(), which will print a message indicating that the processor 316 * cpu_hatch(), which will print a message indicating that the processor
317 * is running, and will set the "hatched" flag in its softc. At the end 317 * is running, and will set the "hatched" flag in its softc. At the end
318 * of cpu_hatch() is a spin-forever loop; we do not yet attempt to schedule 318 * of cpu_hatch() is a spin-forever loop; we do not yet attempt to schedule
319 * anything on secondary CPUs. 319 * anything on secondary CPUs.
320 */ 320 */
321 321
322static int 322static int
323cpumatch(device_t parent, cfdata_t cfdata, void *aux) 323cpumatch(device_t parent, cfdata_t cfdata, void *aux)
324{ 324{
325 struct mainbus_attach_args *ma = aux; 325 struct mainbus_attach_args *ma = aux;
326 326
327 /* make sure that we're looking for a CPU. */ 327 /* make sure that we're looking for a CPU. */
328 if (strcmp(ma->ma_name, cpu_cd.cd_name) != 0) 328 if (strcmp(ma->ma_name, cpu_cd.cd_name) != 0)
329 return (0); 329 return (0);
330 330
331 /* XXX CHECK SLOT? */ 331 /* XXX CHECK SLOT? */
332 /* XXX CHECK PRIMARY? */ 332 /* XXX CHECK PRIMARY? */
333 333
334 return (1); 334 return (1);
335} 335}
336 336
337static void 337static void
338cpuattach(device_t parent, device_t self, void *aux) 338cpuattach(device_t parent, device_t self, void *aux)
339{ 339{
340 struct cpu_softc * const sc = device_private(self); 340 struct cpu_softc * const sc = device_private(self);
341 const struct mainbus_attach_args * const ma = aux; 341 const struct mainbus_attach_args * const ma = aux;
342 struct cpu_info *ci; 342 struct cpu_info *ci;
343 char model[32]; 343 char model[32];
344 344
345 const bool primary = ma->ma_slot == hwrpb->rpb_primary_cpu_id; 345 const bool primary = ma->ma_slot == hwrpb->rpb_primary_cpu_id;
346 346
347 sc->sc_dev = self; 347 sc->sc_dev = self;
348 348
349 const struct pcs * const p = LOCATE_PCS(hwrpb, ma->ma_slot); 349 const struct pcs * const p = LOCATE_PCS(hwrpb, ma->ma_slot);
350 sc->sc_major_type = PCS_CPU_MAJORTYPE(p); 350 sc->sc_major_type = PCS_CPU_MAJORTYPE(p);
351 sc->sc_minor_type = PCS_CPU_MINORTYPE(p); 351 sc->sc_minor_type = PCS_CPU_MINORTYPE(p);
352 352
353 const bool recognized = cpu_description(sc, model, sizeof(model)); 353 const bool recognized = cpu_description(sc, model, sizeof(model));
354 354
355 aprint_normal(": ID %d%s, ", ma->ma_slot, primary ? " (primary)" : ""); 355 aprint_normal(": ID %d%s, ", ma->ma_slot, primary ? " (primary)" : "");
356 if (recognized) { 356 if (recognized) {
357 aprint_normal("%s", model); 357 aprint_normal("%s", model);
358 } else { 358 } else {
359 aprint_error("%s", model); 359 aprint_error("%s", model);
360 } 360 }
361 361
362 aprint_naive("\n"); 362 aprint_naive("\n");
363 aprint_normal("\n"); 363 aprint_normal("\n");
364 364
365 if (p->pcs_proc_var != 0) { 365 if (p->pcs_proc_var != 0) {
366 bool needcomma = false; 366 bool needcomma = false;
367 const char *vaxfp = ""; 367 const char *vaxfp = "";
368 const char *ieeefp = ""; 368 const char *ieeefp = "";
369 const char *pe = ""; 369 const char *pe = "";
370 370
371 if (p->pcs_proc_var & PCS_VAR_VAXFP) { 371 if (p->pcs_proc_var & PCS_VAR_VAXFP) {
372 sc->sc_vax_fp = true; 372 sc->sc_vax_fp = true;
373 vaxfp = "VAX FP support"; 373 vaxfp = "VAX FP support";
374 needcomma = true; 374 needcomma = true;
375 } 375 }
376 if (p->pcs_proc_var & PCS_VAR_IEEEFP) { 376 if (p->pcs_proc_var & PCS_VAR_IEEEFP) {
377 sc->sc_ieee_fp = true; 377 sc->sc_ieee_fp = true;
378 ieeefp = ", IEEE FP support"; 378 ieeefp = ", IEEE FP support";
379 if (!needcomma) 379 if (!needcomma)
380 ieeefp += 2; 380 ieeefp += 2;
381 needcomma = true; 381 needcomma = true;
382 } 382 }
383 if (p->pcs_proc_var & PCS_VAR_PE) { 383 if (p->pcs_proc_var & PCS_VAR_PE) {
384 sc->sc_primary_eligible = true; 384 sc->sc_primary_eligible = true;
385 pe = ", Primary Eligible"; 385 pe = ", Primary Eligible";
386 if (!needcomma) 386 if (!needcomma)
387 pe += 2; 387 pe += 2;
388 needcomma = true; 388 needcomma = true;
389 } 389 }
390 aprint_debug_dev(sc->sc_dev, "%s%s%s", vaxfp, ieeefp, pe); 390 aprint_debug_dev(sc->sc_dev, "%s%s%s", vaxfp, ieeefp, pe);
391 if (p->pcs_proc_var & PCS_VAR_RESERVED) 391 if (p->pcs_proc_var & PCS_VAR_RESERVED)
392 aprint_debug("%sreserved bits: %#lx", 392 aprint_debug("%sreserved bits: %#lx",
393 needcomma ? ", " : "", 393 needcomma ? ", " : "",
394 p->pcs_proc_var & PCS_VAR_RESERVED); 394 p->pcs_proc_var & PCS_VAR_RESERVED);
395 aprint_debug("\n"); 395 aprint_debug("\n");
396 } 396 }
397 397
398 if (ma->ma_slot > ALPHA_WHAMI_MAXID) { 398 if (ma->ma_slot > ALPHA_WHAMI_MAXID) {
399 if (primary) 399 if (primary)
400 panic("cpu_attach: primary CPU ID too large"); 400 panic("cpu_attach: primary CPU ID too large");
401 aprint_error_dev(sc->sc_dev, 401 aprint_error_dev(sc->sc_dev,
402 "processor ID too large, ignoring\n"); 402 "processor ID too large, ignoring\n");
403 return; 403 return;
404 } 404 }
405 405
406 if (primary) { 406 if (primary) {
407 ci = &cpu_info_primary; 407 ci = &cpu_info_primary;
408 } else { 408 } else {
409 /* 409 /*
410 * kmem_zalloc() will guarante cache line alignment for 410 * kmem_zalloc() will guarante cache line alignment for
411 * all allocations >= CACHE_LINE_SIZE. 411 * all allocations >= CACHE_LINE_SIZE.
412 */ 412 */
413 ci = kmem_zalloc(sizeof(*ci), KM_SLEEP); 413 ci = kmem_zalloc(sizeof(*ci), KM_SLEEP);
414 KASSERT(((uintptr_t)ci & (CACHE_LINE_SIZE - 1)) == 0); 414 KASSERT(((uintptr_t)ci & (CACHE_LINE_SIZE - 1)) == 0);
415 } 415 }
416#if defined(MULTIPROCESSOR) 416#if defined(MULTIPROCESSOR)
417 cpu_info[ma->ma_slot] = ci; 417 cpu_info[ma->ma_slot] = ci;
418#endif 418#endif
419 ci->ci_cpuid = ma->ma_slot; 419 ci->ci_cpuid = ma->ma_slot;
420 ci->ci_softc = sc; 420 ci->ci_softc = sc;
421 ci->ci_pcc_freq = hwrpb->rpb_cc_freq; 421 ci->ci_pcc_freq = hwrpb->rpb_cc_freq;
422 422
423 sc->sc_ci = ci; 423 sc->sc_ci = ci;
424 424
425#if defined(MULTIPROCESSOR) 425#if defined(MULTIPROCESSOR)
426 /* 426 /*
427 * Make sure the processor is available for use. 427 * Make sure the processor is available for use.
428 */ 428 */
429 if ((p->pcs_flags & PCS_PA) == 0) { 429 if ((p->pcs_flags & PCS_PA) == 0) {
430 if (primary) 430 if (primary)
431 panic("cpu_attach: primary not available?!"); 431 panic("cpu_attach: primary not available?!");
432 aprint_normal_dev(sc->sc_dev, 432 aprint_normal_dev(sc->sc_dev,
433 "processor not available for use\n"); 433 "processor not available for use\n");
434 return; 434 return;
435 } 435 }
436 436
437 /* Make sure the processor has valid PALcode. */ 437 /* Make sure the processor has valid PALcode. */
438 if ((p->pcs_flags & PCS_PV) == 0) { 438 if ((p->pcs_flags & PCS_PV) == 0) {
439 if (primary) 439 if (primary)
440 panic("cpu_attach: primary has invalid PALcode?!"); 440 panic("cpu_attach: primary has invalid PALcode?!");
441 aprint_error_dev(sc->sc_dev, "PALcode not valid\n"); 441 aprint_error_dev(sc->sc_dev, "PALcode not valid\n");
442 return; 442 return;
443 } 443 }
444#endif /* MULTIPROCESSOR */ 444#endif /* MULTIPROCESSOR */
445 445
446 /* 446 /*
447 * If we're the primary CPU, no more work to do; we're already 447 * If we're the primary CPU, no more work to do; we're already
448 * running! 448 * running!
449 */ 449 */
450 if (primary) { 450 if (primary) {
451 cpu_announce_extensions(ci); 451 cpu_announce_extensions(ci);
452#if defined(MULTIPROCESSOR) 452#if defined(MULTIPROCESSOR)
453 ci->ci_flags |= CPUF_PRIMARY|CPUF_RUNNING; 453 ci->ci_flags |= CPUF_PRIMARY|CPUF_RUNNING;
454 atomic_or_ulong(&cpus_booted, (1UL << ma->ma_slot)); 454 atomic_or_ulong(&cpus_booted, (1UL << ma->ma_slot));
455 atomic_or_ulong(&cpus_running, (1UL << ma->ma_slot)); 455 atomic_or_ulong(&cpus_running, (1UL << ma->ma_slot));
456#endif /* MULTIPROCESSOR */ 456#endif /* MULTIPROCESSOR */
457 } else { 457 } else {
458#if defined(MULTIPROCESSOR) 458#if defined(MULTIPROCESSOR)
459 int error; 459 int error;
460 460
461 error = mi_cpu_attach(ci); 461 error = mi_cpu_attach(ci);
462 if (error != 0) { 462 if (error != 0) {
463 aprint_error_dev(sc->sc_dev, 463 aprint_error_dev(sc->sc_dev,
464 "mi_cpu_attach failed with %d\n", error); 464 "mi_cpu_attach failed with %d\n", error);
465 return; 465 return;
466 } 466 }
467 467
468 /* 468 /*
469 * Boot the secondary processor. It will announce its 469 * Boot the secondary processor. It will announce its
470 * extensions, and then spin until we tell it to go 470 * extensions, and then spin until we tell it to go
471 * on its merry way. 471 * on its merry way.
472 */ 472 */
473 cpu_boot_secondary(ci); 473 cpu_boot_secondary(ci);
474 474
475 /* 475 /*
476 * Link the processor into the list. 476 * Link the processor into the list.
477 */ 477 */
478 ci->ci_next = cpu_info_list->ci_next; 478 ci->ci_next = cpu_info_list->ci_next;
479 cpu_info_list->ci_next = ci; 479 cpu_info_list->ci_next = ci;
480#else /* ! MULTIPROCESSOR */ 480#else /* ! MULTIPROCESSOR */
481 aprint_normal_dev(sc->sc_dev, "processor off-line; " 481 aprint_normal_dev(sc->sc_dev, "processor off-line; "
482 "multiprocessor support not present in kernel\n"); 482 "multiprocessor support not present in kernel\n");
483#endif /* MULTIPROCESSOR */ 483#endif /* MULTIPROCESSOR */
484 } 484 }
485 485
486 evcnt_attach_dynamic(&sc->sc_evcnt_clock, EVCNT_TYPE_INTR, 486 evcnt_attach_dynamic(&sc->sc_evcnt_clock, EVCNT_TYPE_INTR,
487 NULL, device_xname(sc->sc_dev), "clock"); 487 NULL, device_xname(sc->sc_dev), "clock");
488 evcnt_attach_dynamic(&sc->sc_evcnt_device, EVCNT_TYPE_INTR, 488 evcnt_attach_dynamic(&sc->sc_evcnt_device, EVCNT_TYPE_INTR,
489 NULL, device_xname(sc->sc_dev), "device"); 489 NULL, device_xname(sc->sc_dev), "device");
490#if defined(MULTIPROCESSOR) 490#if defined(MULTIPROCESSOR)
491 alpha_ipi_init(ci); 491 alpha_ipi_init(ci);
492#endif 492#endif
493 493
494 struct sysctllog **log = &sc->sc_sysctllog; 494 struct sysctllog **log = &sc->sc_sysctllog;
495 const struct sysctlnode *rnode, *cnode; 495 const struct sysctlnode *rnode, *cnode;
496 int error; 496 int error;
497 497
498 error = sysctl_createv(log, 0, NULL, &rnode, CTLFLAG_PERMANENT, 498 error = sysctl_createv(log, 0, NULL, &rnode, CTLFLAG_PERMANENT,
499 CTLTYPE_NODE, device_xname(sc->sc_dev), 499 CTLTYPE_NODE, device_xname(sc->sc_dev),
500 SYSCTL_DESCR("cpu properties"), 500 SYSCTL_DESCR("cpu properties"),
501 NULL, 0, 501 NULL, 0,
502 NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL); 502 NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
503 if (error) 503 if (error)
504 return; 504 return;
505 505
506 error = sysctl_createv(log, 0, &rnode, &cnode, 506 error = sysctl_createv(log, 0, &rnode, &cnode,
507 CTLFLAG_PERMANENT, CTLTYPE_STRING, "model", 507 CTLFLAG_PERMANENT, CTLTYPE_STRING, "model",
508 SYSCTL_DESCR("cpu model"), 508 SYSCTL_DESCR("cpu model"),
509 cpu_sysctl_model, 0, 509 cpu_sysctl_model, 0,
510 (void *)sc, 0, CTL_CREATE, CTL_EOL); 510 (void *)sc, 0, CTL_CREATE, CTL_EOL);
511 if (error) 511 if (error)
512 return; 512 return;
513 513
514 error = sysctl_createv(log, 0, &rnode, &cnode, 514 error = sysctl_createv(log, 0, &rnode, &cnode,
515 CTLFLAG_PERMANENT, CTLTYPE_INT, "major", 515 CTLFLAG_PERMANENT, CTLTYPE_INT, "major",
516 SYSCTL_DESCR("cpu major type"), 516 SYSCTL_DESCR("cpu major type"),
517 NULL, 0, 517 NULL, 0,
518 &sc->sc_major_type, 0, CTL_CREATE, CTL_EOL); 518 &sc->sc_major_type, 0, CTL_CREATE, CTL_EOL);
519 if (error) 519 if (error)
520 return; 520 return;
521 521
522 error = sysctl_createv(log, 0, &rnode, &cnode, 522 error = sysctl_createv(log, 0, &rnode, &cnode,
523 CTLFLAG_PERMANENT, CTLTYPE_INT, "minor", 523 CTLFLAG_PERMANENT, CTLTYPE_INT, "minor",
524 SYSCTL_DESCR("cpu minor type"), 524 SYSCTL_DESCR("cpu minor type"),
525 NULL, 0, 525 NULL, 0,
526 &sc->sc_minor_type, 0, CTL_CREATE, CTL_EOL); 526 &sc->sc_minor_type, 0, CTL_CREATE, CTL_EOL);
527 if (error) 527 if (error)
528 return; 528 return;
529 529
530 error = sysctl_createv(log, 0, &rnode, &cnode, 530 error = sysctl_createv(log, 0, &rnode, &cnode,
531 CTLFLAG_PERMANENT, CTLTYPE_LONG, "implver", 531 CTLFLAG_PERMANENT, CTLTYPE_LONG, "implver",
532 SYSCTL_DESCR("cpu implementation version"), 532 SYSCTL_DESCR("cpu implementation version"),
533 NULL, 0, 533 NULL, 0,
534 &sc->sc_implver, 0, CTL_CREATE, CTL_EOL); 534 &sc->sc_implver, 0, CTL_CREATE, CTL_EOL);
535 if (error) 535 if (error)
536 return; 536 return;
537 537
538 error = sysctl_createv(log, 0, &rnode, &cnode, 538 error = sysctl_createv(log, 0, &rnode, &cnode,
539 CTLFLAG_PERMANENT|CTLFLAG_HEX, CTLTYPE_LONG, "amask", 539 CTLFLAG_PERMANENT|CTLFLAG_HEX, CTLTYPE_LONG, "amask",
540 SYSCTL_DESCR("architecture extensions mask"), 540 SYSCTL_DESCR("architecture extensions mask"),
541 NULL, 0, 541 NULL, 0,
542 &sc->sc_amask, 0, CTL_CREATE, CTL_EOL); 542 &sc->sc_amask, 0, CTL_CREATE, CTL_EOL);
543 if (error) 543 if (error)
544 return; 544 return;
545 545
546 error = sysctl_createv(log, 0, &rnode, &cnode, 546 error = sysctl_createv(log, 0, &rnode, &cnode,
547 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "bwx", 547 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "bwx",
548 SYSCTL_DESCR("cpu supports BWX extension"), 548 SYSCTL_DESCR("cpu supports BWX extension"),
549 cpu_sysctl_bwx, 0, 549 cpu_sysctl_bwx, 0,
550 (void *)sc, 0, CTL_CREATE, CTL_EOL); 550 (void *)sc, 0, CTL_CREATE, CTL_EOL);
551 if (error) 551 if (error)
552 return; 552 return;
553 553
554 error = sysctl_createv(log, 0, &rnode, &cnode, 554 error = sysctl_createv(log, 0, &rnode, &cnode,
555 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "fix", 555 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "fix",
556 SYSCTL_DESCR("cpu supports FIX extension"), 556 SYSCTL_DESCR("cpu supports FIX extension"),
557 cpu_sysctl_fix, 0, 557 cpu_sysctl_fix, 0,
558 (void *)sc, 0, CTL_CREATE, CTL_EOL); 558 (void *)sc, 0, CTL_CREATE, CTL_EOL);
559 if (error) 559 if (error)
560 return; 560 return;
561 561
562 error = sysctl_createv(log, 0, &rnode, &cnode, 562 error = sysctl_createv(log, 0, &rnode, &cnode,
563 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "cix", 563 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "cix",
564 SYSCTL_DESCR("cpu supports CIX extension"), 564 SYSCTL_DESCR("cpu supports CIX extension"),
565 cpu_sysctl_cix, 0, 565 cpu_sysctl_cix, 0,
566 (void *)sc, 0, CTL_CREATE, CTL_EOL); 566 (void *)sc, 0, CTL_CREATE, CTL_EOL);
567 if (error) 567 if (error)
568 return; 568 return;
569 569
570 error = sysctl_createv(log, 0, &rnode, &cnode, 570 error = sysctl_createv(log, 0, &rnode, &cnode,
571 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "mvi", 571 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "mvi",
572 SYSCTL_DESCR("cpu supports MVI extension"), 572 SYSCTL_DESCR("cpu supports MVI extension"),
573 cpu_sysctl_mvi, 0, 573 cpu_sysctl_mvi, 0,
574 (void *)sc, 0, CTL_CREATE, CTL_EOL); 574 (void *)sc, 0, CTL_CREATE, CTL_EOL);
575 if (error) 575 if (error)
576 return; 576 return;
577 577
578 error = sysctl_createv(log, 0, &rnode, &cnode, 578 error = sysctl_createv(log, 0, &rnode, &cnode,
579 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "pat", 579 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "pat",
580 SYSCTL_DESCR("cpu supports PAT extension"), 580 SYSCTL_DESCR("cpu supports PAT extension"),
581 cpu_sysctl_pat, 0, 581 cpu_sysctl_pat, 0,
582 (void *)sc, 0, CTL_CREATE, CTL_EOL); 582 (void *)sc, 0, CTL_CREATE, CTL_EOL);
583 if (error) 583 if (error)
584 return; 584 return;
585 585
586 error = sysctl_createv(log, 0, &rnode, &cnode, 586 error = sysctl_createv(log, 0, &rnode, &cnode,
587 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "pmi", 587 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "pmi",
588 SYSCTL_DESCR("cpu supports PMI extension"), 588 SYSCTL_DESCR("cpu supports PMI extension"),
589 cpu_sysctl_pmi, 0, 589 cpu_sysctl_pmi, 0,
590 (void *)sc, 0, CTL_CREATE, CTL_EOL); 590 (void *)sc, 0, CTL_CREATE, CTL_EOL);
591 if (error) 591 if (error)
592 return; 592 return;
593 593
594 error = sysctl_createv(log, 0, &rnode, &cnode, 594 error = sysctl_createv(log, 0, &rnode, &cnode,
595 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "vax_fp", 595 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "vax_fp",
596 SYSCTL_DESCR("cpu supports VAX FP"), 596 SYSCTL_DESCR("cpu supports VAX FP"),
597 NULL, 0, 597 NULL, 0,
598 &sc->sc_vax_fp, 0, CTL_CREATE, CTL_EOL); 598 &sc->sc_vax_fp, 0, CTL_CREATE, CTL_EOL);
599 if (error) 599 if (error)
600 return; 600 return;
601 601
602 error = sysctl_createv(log, 0, &rnode, &cnode, 602 error = sysctl_createv(log, 0, &rnode, &cnode,
603 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "ieee_fp", 603 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "ieee_fp",
604 SYSCTL_DESCR("cpu supports IEEE FP"), 604 SYSCTL_DESCR("cpu supports IEEE FP"),
605 NULL, 0, 605 NULL, 0,
606 &sc->sc_ieee_fp, 0, CTL_CREATE, CTL_EOL); 606 &sc->sc_ieee_fp, 0, CTL_CREATE, CTL_EOL);
607 if (error) 607 if (error)
608 return; 608 return;
609 609
610 error = sysctl_createv(log, 0, &rnode, &cnode, 610 error = sysctl_createv(log, 0, &rnode, &cnode,
611 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "primary_eligible", 611 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "primary_eligible",
612 SYSCTL_DESCR("cpu is primary-eligible"), 612 SYSCTL_DESCR("cpu is primary-eligible"),
613 NULL, 0, 613 NULL, 0,
614 &sc->sc_primary_eligible, 0, CTL_CREATE, CTL_EOL); 614 &sc->sc_primary_eligible, 0, CTL_CREATE, CTL_EOL);
615 if (error) 615 if (error)
616 return; 616 return;
617 617
618 error = sysctl_createv(log, 0, &rnode, &cnode, 618 error = sysctl_createv(log, 0, &rnode, &cnode,
619 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "primary", 619 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "primary",
620 SYSCTL_DESCR("cpu is the primary cpu"), 620 SYSCTL_DESCR("cpu is the primary cpu"),
621 cpu_sysctl_primary, 0, 621 cpu_sysctl_primary, 0,
622 (void *)sc, 0, CTL_CREATE, CTL_EOL); 622 (void *)sc, 0, CTL_CREATE, CTL_EOL);
623 if (error) 623 if (error)
624 return; 624 return;
625 625
626 error = sysctl_createv(log, 0, &rnode, &cnode, 626 error = sysctl_createv(log, 0, &rnode, &cnode,
627 CTLFLAG_PERMANENT, CTLTYPE_LONG, "cpu_id", 627 CTLFLAG_PERMANENT, CTLTYPE_LONG, "cpu_id",
628 SYSCTL_DESCR("hardware cpu ID"), 628 SYSCTL_DESCR("hardware cpu ID"),
629 NULL, 0, 629 NULL, 0,
630 &sc->sc_ci->ci_cpuid, 0, CTL_CREATE, CTL_EOL); 630 &sc->sc_ci->ci_cpuid, 0, CTL_CREATE, CTL_EOL);
631 if (error) 631 if (error)
632 return; 632 return;
633 633
634 error = sysctl_createv(log, 0, &rnode, &cnode, 634 error = sysctl_createv(log, 0, &rnode, &cnode,
635 CTLFLAG_PERMANENT, CTLTYPE_LONG, "pcc_freq", 635 CTLFLAG_PERMANENT, CTLTYPE_LONG, "pcc_freq",
636 SYSCTL_DESCR("PCC frequency"), 636 SYSCTL_DESCR("PCC frequency"),
637 NULL, 0, 637 NULL, 0,
638 &sc->sc_ci->ci_pcc_freq, 0, CTL_CREATE, CTL_EOL); 638 &sc->sc_ci->ci_pcc_freq, 0, CTL_CREATE, CTL_EOL);
639 if (error) 639 if (error)
640 return; 640 return;
641} 641}
642 642
643static void 643static void
644cpu_announce_extensions(struct cpu_info *ci) 644cpu_announce_extensions(struct cpu_info *ci)
645{ 645{
646 u_long implver, amask = 0; 646 u_long implver, amask = 0;
647 char bits[64]; 647 char bits[64];
648 648
649 implver = alpha_implver(); 649 implver = alpha_implver();
650 if (implver >= ALPHA_IMPLVER_EV5) 650 if (implver >= ALPHA_IMPLVER_EV5)
651 amask = (~alpha_amask(ALPHA_AMASK_ALL)) & ALPHA_AMASK_ALL; 651 amask = (~alpha_amask(ALPHA_AMASK_ALL)) & ALPHA_AMASK_ALL;
652 652
653 ci->ci_softc->sc_implver = implver; 653 ci->ci_softc->sc_implver = implver;
654 ci->ci_softc->sc_amask = amask; 654 ci->ci_softc->sc_amask = amask;
655 655
656 if (ci->ci_cpuid == hwrpb->rpb_primary_cpu_id) { 656 if (ci->ci_cpuid == hwrpb->rpb_primary_cpu_id) {
657 cpu_implver = implver; 657 cpu_implver = implver;
658 cpu_amask = amask; 658 cpu_amask = amask;
659 } else { 659 } else {
660 if (implver < cpu_implver) 660 if (implver < cpu_implver)
661 aprint_error_dev(ci->ci_softc->sc_dev, 661 aprint_error_dev(ci->ci_softc->sc_dev,
662 "WARNING: IMPLVER %lu < %lu\n", 662 "WARNING: IMPLVER %lu < %lu\n",
663 implver, cpu_implver); 663 implver, cpu_implver);
664 664
665 /* 665 /*
666 * Cap the system architecture mask to the intersection 666 * Cap the system architecture mask to the intersection
667 * of features supported by all processors in the system. 667 * of features supported by all processors in the system.
668 */ 668 */
669 cpu_amask &= amask; 669 cpu_amask &= amask;
670 } 670 }
671 671
672 if (amask) { 672 if (amask) {
673 snprintb(bits, sizeof(bits), 673 snprintb(bits, sizeof(bits),
674 ALPHA_AMASK_BITS, amask); 674 ALPHA_AMASK_BITS, amask);
675 aprint_normal_dev(ci->ci_softc->sc_dev, 675 aprint_normal_dev(ci->ci_softc->sc_dev,
676 "Architecture extensions: %s\n", bits); 676 "Architecture extensions: %s\n", bits);
677 } 677 }
678} 678}
679 679
680#if defined(MULTIPROCESSOR) 680#if defined(MULTIPROCESSOR)
681void 681void
682cpu_boot_secondary_processors(void) 682cpu_boot_secondary_processors(void)
683{ 683{
684 struct cpu_info *ci; 684 struct cpu_info *ci;
685 u_long i; 685 u_long i;
686 bool did_patch = false; 686 bool did_patch = false;
687 687
688 for (i = 0; i < ALPHA_MAXPROCS; i++) { 688 for (i = 0; i < ALPHA_MAXPROCS; i++) {
689 ci = cpu_info[i]; 689 ci = cpu_info[i];
690 if (ci == NULL || ci->ci_data.cpu_idlelwp == NULL) 690 if (ci == NULL || ci->ci_data.cpu_idlelwp == NULL)
691 continue; 691 continue;
692 if (CPU_IS_PRIMARY(ci)) 692 if (CPU_IS_PRIMARY(ci))
693 continue; 693 continue;
694 if ((cpus_booted & (1UL << i)) == 0) 694 if ((cpus_booted & (1UL << i)) == 0)
695 continue; 695 continue;
696 696
697 /* Patch MP-criticial kernel routines. */ 697 /* Patch MP-criticial kernel routines. */
698 if (did_patch == false) { 698 if (did_patch == false) {
699 alpha_patch(true); 699 alpha_patch(true);
700 did_patch = true; 700 did_patch = true;
701 } 701 }
702 702
703 /* 703 /*
704 * Launch the processor. 704 * Launch the processor.
705 */ 705 */
706 atomic_or_ulong(&ci->ci_flags, CPUF_RUNNING); 706 atomic_or_ulong(&ci->ci_flags, CPUF_RUNNING);
707 atomic_or_ulong(&cpus_running, (1U << i)); 707 atomic_or_ulong(&cpus_running, (1U << i));
708 } 708 }
709} 709}
710 710
711void 711void
712cpu_boot_secondary(struct cpu_info *ci) 712cpu_boot_secondary(struct cpu_info *ci)
713{ 713{
714 long timeout; 714 long timeout;
715 struct pcs *pcsp, *primary_pcsp; 715 struct pcs *pcsp, *primary_pcsp;
716 struct pcb *pcb; 716 struct pcb *pcb;
717 u_long cpumask; 717 u_long cpumask;
718 718
719 pcb = lwp_getpcb(ci->ci_data.cpu_idlelwp); 719 pcb = lwp_getpcb(ci->ci_data.cpu_idlelwp);
720 primary_pcsp = LOCATE_PCS(hwrpb, hwrpb->rpb_primary_cpu_id); 720 primary_pcsp = LOCATE_PCS(hwrpb, hwrpb->rpb_primary_cpu_id);
721 pcsp = LOCATE_PCS(hwrpb, ci->ci_cpuid); 721 pcsp = LOCATE_PCS(hwrpb, ci->ci_cpuid);
722 cpumask = (1UL << ci->ci_cpuid); 722 cpumask = (1UL << ci->ci_cpuid);
723 723
724 /* 724 /*
725 * Set up the PCS's HWPCB to match ours. 725 * Set up the PCS's HWPCB to match ours.
726 */ 726 */
727 memcpy(pcsp->pcs_hwpcb, &pcb->pcb_hw, sizeof(pcb->pcb_hw)); 727 memcpy(pcsp->pcs_hwpcb, &pcb->pcb_hw, sizeof(pcb->pcb_hw));
728 728
729 /* 729 /*
730 * Set up the HWRPB to restart the secondary processor 730 * Set up the HWRPB to restart the secondary processor
731 * with our spin-up trampoline. 731 * with our spin-up trampoline.
732 */ 732 */
733 hwrpb->rpb_restart = (uint64_t) cpu_spinup_trampoline; 733 hwrpb->rpb_restart = (uint64_t) cpu_spinup_trampoline;
734 hwrpb->rpb_restart_val = (uint64_t) ci; 734 hwrpb->rpb_restart_val = (uint64_t) ci;
735 hwrpb->rpb_checksum = hwrpb_checksum(); 735 hwrpb->rpb_checksum = hwrpb_checksum();
736 736
737 /* 737 /*
738 * Configure the CPU to start in OSF/1 PALcode by copying 738 * Configure the CPU to start in OSF/1 PALcode by copying
739 * the primary CPU's PALcode revision info to the secondary 739 * the primary CPU's PALcode revision info to the secondary
740 * CPUs PCS. 740 * CPUs PCS.
741 */ 741 */
742 memcpy(&pcsp->pcs_pal_rev, &primary_pcsp->pcs_pal_rev, 742 memcpy(&pcsp->pcs_pal_rev, &primary_pcsp->pcs_pal_rev,
743 sizeof(pcsp->pcs_pal_rev)); 743 sizeof(pcsp->pcs_pal_rev));
744 pcsp->pcs_flags |= (PCS_CV|PCS_RC); 744 pcsp->pcs_flags |= (PCS_CV|PCS_RC);
745 pcsp->pcs_flags &= ~PCS_BIP; 745 pcsp->pcs_flags &= ~PCS_BIP;
746 746
747 /* Make sure the secondary console sees all this. */ 747 /* Make sure the secondary console sees all this. */
748 alpha_mb(); 748 alpha_mb();
749 749
750 /* Send a "START" command to the secondary CPU's console. */ 750 /* Send a "START" command to the secondary CPU's console. */
751 if (cpu_iccb_send(ci->ci_cpuid, "START\r\n")) { 751 if (cpu_iccb_send(ci->ci_cpuid, "START\r\n")) {
752 aprint_error_dev(ci->ci_softc->sc_dev, 752 aprint_error_dev(ci->ci_softc->sc_dev,
753 "unable to issue `START' command\n"); 753 "unable to issue `START' command\n");
754 return; 754 return;
755 } 755 }
756 756
757 /* Wait for the processor to boot. */ 757 /* Wait for the processor to boot. */
758 for (timeout = 10000; timeout != 0; timeout--) { 758 for (timeout = 10000; timeout != 0; timeout--) {
759 alpha_mb(); 759 alpha_mb();
760 if (pcsp->pcs_flags & PCS_BIP) 760 if (pcsp->pcs_flags & PCS_BIP)
761 break; 761 break;
762 delay(1000); 762 delay(1000);
763 } 763 }
764 if (timeout == 0) 764 if (timeout == 0)
765 aprint_error_dev(ci->ci_softc->sc_dev, 765 aprint_error_dev(ci->ci_softc->sc_dev,
766 "processor failed to boot\n"); 766 "processor failed to boot\n");
767 767
768 /* 768 /*
769 * ...and now wait for verification that it's running kernel 769 * ...and now wait for verification that it's running kernel
770 * code. 770 * code.
771 */ 771 */
772 for (timeout = 10000; timeout != 0; timeout--) { 772 for (timeout = 10000; timeout != 0; timeout--) {
773 alpha_mb(); 773 alpha_mb();
774 if (cpus_booted & cpumask) 774 if (cpus_booted & cpumask)
775 break; 775 break;
776 delay(1000); 776 delay(1000);
777 } 777 }
778 if (timeout == 0) 778 if (timeout == 0)
779 aprint_error_dev(ci->ci_softc->sc_dev, 779 aprint_error_dev(ci->ci_softc->sc_dev,
780 "processor failed to hatch\n"); 780 "processor failed to hatch\n");
781} 781}
782 782
783void 783void
784cpu_pause_resume(u_long cpu_id, int pause) 784cpu_pause_resume(u_long cpu_id, int pause)
785{ 785{
786 u_long cpu_mask = (1UL << cpu_id); 786 u_long cpu_mask = (1UL << cpu_id);
787 787
788 if (pause) { 788 if (pause) {
789 atomic_or_ulong(&cpus_paused, cpu_mask); 789 atomic_or_ulong(&cpus_paused, cpu_mask);
790 alpha_send_ipi(cpu_id, ALPHA_IPI_PAUSE); 790 alpha_send_ipi(cpu_id, ALPHA_IPI_PAUSE);
791 } else 791 } else
792 atomic_and_ulong(&cpus_paused, ~cpu_mask); 792 atomic_and_ulong(&cpus_paused, ~cpu_mask);
793} 793}
794 794
795void 795void
796cpu_pause_resume_all(int pause) 796cpu_pause_resume_all(int pause)
797{ 797{
798 struct cpu_info *ci, *self = curcpu(); 798 struct cpu_info *ci, *self = curcpu();
799 CPU_INFO_ITERATOR cii; 799 CPU_INFO_ITERATOR cii;
800 800
801 for (CPU_INFO_FOREACH(cii, ci)) { 801 for (CPU_INFO_FOREACH(cii, ci)) {
802 if (ci == self) 802 if (ci == self)
803 continue; 803 continue;
804 cpu_pause_resume(ci->ci_cpuid, pause); 804 cpu_pause_resume(ci->ci_cpuid, pause);
805 } 805 }
806} 806}
807 807
808void 808void
809cpu_halt(void) 809cpu_halt(void)
810{ 810{
811 struct cpu_info *ci = curcpu(); 811 struct cpu_info *ci = curcpu();
812 u_long cpu_id = cpu_number(); 812 u_long cpu_id = cpu_number();
813 struct pcs *pcsp = LOCATE_PCS(hwrpb, cpu_id); 813 struct pcs *pcsp = LOCATE_PCS(hwrpb, cpu_id);
814 814
815 aprint_normal_dev(ci->ci_softc->sc_dev, "shutting down...\n"); 815 aprint_normal_dev(ci->ci_softc->sc_dev, "shutting down...\n");
816 816
817 pcsp->pcs_flags &= ~(PCS_RC | PCS_HALT_REQ); 817 pcsp->pcs_flags &= ~(PCS_RC | PCS_HALT_REQ);
818 pcsp->pcs_flags |= PCS_HALT_STAY_HALTED; 818 pcsp->pcs_flags |= PCS_HALT_STAY_HALTED;
819 819
820 atomic_and_ulong(&cpus_running, ~(1UL << cpu_id)); 820 atomic_and_ulong(&cpus_running, ~(1UL << cpu_id));
821 atomic_and_ulong(&cpus_booted, ~(1U << cpu_id)); 821 atomic_and_ulong(&cpus_booted, ~(1U << cpu_id));
822 822
823 alpha_pal_halt(); 823 alpha_pal_halt();
824 /* NOTREACHED */ 824 /* NOTREACHED */
825} 825}
826 826
827void 827void
828cpu_hatch(struct cpu_info *ci) 828cpu_hatch(struct cpu_info *ci)
829{ 829{
830 u_long cpu_id = cpu_number(); 830 u_long cpu_id = cpu_number();
831 u_long cpumask = (1UL << cpu_id); 831 u_long cpumask = (1UL << cpu_id);
832 832
833 /* pmap initialization for this processor. */ 833 /* pmap initialization for this processor. */
834 pmap_init_cpu(ci); 834 pmap_init_cpu(ci);
835 835
836 /* Initialize trap vectors for this processor. */ 836 /* Initialize trap vectors for this processor. */
837 trap_init(); 837 trap_init();
838 838
839 /* Yahoo! We're running kernel code! Announce it! */ 839 /* Yahoo! We're running kernel code! Announce it! */
840 cpu_announce_extensions(ci); 840 cpu_announce_extensions(ci);
841 841
842 atomic_or_ulong(&cpus_booted, cpumask); 842 atomic_or_ulong(&cpus_booted, cpumask);
843 843
844 /* 844 /*
845 * Spin here until we're told we can start. 845 * Spin here until we're told we can start.
846 */ 846 */
847 while ((cpus_running & cpumask) == 0) 847 while ((cpus_running & cpumask) == 0)
848 /* spin */ ; 848 /* spin */ ;
849 849
850 /* 850 /*
851 * Invalidate the TLB and sync the I-stream before we 851 * Invalidate the TLB and sync the I-stream before we
852 * jump into the kernel proper. We have to do this 852 * jump into the kernel proper. We have to do this
853 * beacause we haven't been getting IPIs while we've 853 * beacause we haven't been getting IPIs while we've
854 * been spinning. 854 * been spinning.
855 */ 855 */
856 ALPHA_TBIA(); 856 ALPHA_TBIA();
857 alpha_pal_imb(); 857 alpha_pal_imb();
858 858
859 if (alpha_use_cctr) { 859 if (alpha_use_cctr) {
860 cc_init_secondary(ci); 860 cc_init_secondary(ci);
861 } 861 }
862 862
863 cpu_initclocks_secondary(); 863 cpu_initclocks_secondary();
864} 864}
865 865
866int 866int
867cpu_iccb_send(long cpu_id, const char *msg) 867cpu_iccb_send(long cpu_id, const char *msg)
868{ 868{
869 struct pcs *pcsp = LOCATE_PCS(hwrpb, cpu_id); 869 struct pcs *pcsp = LOCATE_PCS(hwrpb, cpu_id);
870 int timeout; 870 int timeout;
871 u_long cpumask = (1UL << cpu_id); 871 u_long cpumask = (1UL << cpu_id);
872 872
873 /* Wait for the ICCB to become available. */ 873 /* Wait for the ICCB to become available. */
874 for (timeout = 10000; timeout != 0; timeout--) { 874 for (timeout = 10000; timeout != 0; timeout--) {
875 alpha_mb(); 875 alpha_mb();
876 if ((hwrpb->rpb_rxrdy & cpumask) == 0) 876 if ((hwrpb->rpb_rxrdy & cpumask) == 0)
877 break; 877 break;
878 delay(1000); 878 delay(1000);
879 } 879 }
880 if (timeout == 0) 880 if (timeout == 0)
881 return (EIO); 881 return (EIO);
882 882
883 /* 883 /*
884 * Copy the message into the ICCB, and tell the secondary console 884 * Copy the message into the ICCB, and tell the secondary console
885 * that it's there. 885 * that it's there. Ensure the buffer is initialized before we
 886 * set the rxrdy bits, as a store-release.
886 */ 887 */
887 strcpy(pcsp->pcs_iccb.iccb_rxbuf, msg); 888 strcpy(pcsp->pcs_iccb.iccb_rxbuf, msg);
888 pcsp->pcs_iccb.iccb_rxlen = strlen(msg); 889 pcsp->pcs_iccb.iccb_rxlen = strlen(msg);
 890 membar_exit();
889 atomic_or_ulong(&hwrpb->rpb_rxrdy, cpumask); 891 atomic_or_ulong(&hwrpb->rpb_rxrdy, cpumask);
890 membar_sync(); 
891 892
892 /* Wait for the message to be received. */ 893 /* Wait for the message to be received. */
893 for (timeout = 10000; timeout != 0; timeout--) { 894 for (timeout = 10000; timeout != 0; timeout--) {
894 alpha_mb(); 895 alpha_mb();
895 if ((hwrpb->rpb_rxrdy & cpumask) == 0) 896 if ((hwrpb->rpb_rxrdy & cpumask) == 0)
896 break; 897 break;
897 delay(1000); 898 delay(1000);
898 } 899 }
899 if (timeout == 0) 900 if (timeout == 0)
900 return (EIO); 901 return (EIO);
901 902
902 return (0); 903 return (0);
903} 904}
904 905
905void 906void
906cpu_iccb_receive(void) 907cpu_iccb_receive(void)
907{ 908{
908#if 0 /* Don't bother... we don't get any important messages anyhow. */ 909#if 0 /* Don't bother... we don't get any important messages anyhow. */
909 uint64_t txrdy; 910 uint64_t txrdy;
910 char *cp1, *cp2, buf[80]; 911 char *cp1, *cp2, buf[80];
911 struct pcs *pcsp; 912 struct pcs *pcsp;
912 u_int cnt; 913 u_int cnt;
913 long cpu_id; 914 long cpu_id;
914 915
915 txrdy = hwrpb->rpb_txrdy; 916 txrdy = hwrpb->rpb_txrdy;
916 917
917 for (cpu_id = 0; cpu_id < hwrpb->rpb_pcs_cnt; cpu_id++) { 918 for (cpu_id = 0; cpu_id < hwrpb->rpb_pcs_cnt; cpu_id++) {
918 if (txrdy & (1UL << cpu_id)) { 919 if (txrdy & (1UL << cpu_id)) {
919 pcsp = LOCATE_PCS(hwrpb, cpu_id); 920 pcsp = LOCATE_PCS(hwrpb, cpu_id);
920 printf("Inter-console message from CPU %lu " 921 printf("Inter-console message from CPU %lu "
921 "HALT REASON = 0x%lx, FLAGS = 0x%lx\n", 922 "HALT REASON = 0x%lx, FLAGS = 0x%lx\n",
922 cpu_id, pcsp->pcs_halt_reason, pcsp->pcs_flags); 923 cpu_id, pcsp->pcs_halt_reason, pcsp->pcs_flags);
923  924
924 cnt = pcsp->pcs_iccb.iccb_txlen; 925 cnt = pcsp->pcs_iccb.iccb_txlen;
925 if (cnt >= 80) { 926 if (cnt >= 80) {
926 printf("Malformed inter-console message\n"); 927 printf("Malformed inter-console message\n");
927 continue; 928 continue;
928 } 929 }
929 cp1 = pcsp->pcs_iccb.iccb_txbuf; 930 cp1 = pcsp->pcs_iccb.iccb_txbuf;
930 cp2 = buf; 931 cp2 = buf;
931 while (cnt--) { 932 while (cnt--) {
932 if (*cp1 != '\r' && *cp1 != '\n') 933 if (*cp1 != '\r' && *cp1 != '\n')
933 *cp2++ = *cp1; 934 *cp2++ = *cp1;
934 cp1++; 935 cp1++;
935 } 936 }
936 *cp2 = '\0'; 937 *cp2 = '\0';
937 printf("Message from CPU %lu: %s\n", cpu_id, buf); 938 printf("Message from CPU %lu: %s\n", cpu_id, buf);
938 } 939 }
939 } 940 }
940#endif /* 0 */ 941#endif /* 0 */
941 hwrpb->rpb_txrdy = 0; 942 hwrpb->rpb_txrdy = 0;
942 alpha_mb(); 943 alpha_mb();
943} 944}
944 945
945#if defined(DDB) 946#if defined(DDB)
946 947
947#include <ddb/db_output.h> 948#include <ddb/db_output.h>
948#include <machine/db_machdep.h> 949#include <machine/db_machdep.h>
949 950
950/* 951/*
951 * Dump CPU information from DDB. 952 * Dump CPU information from DDB.
952 */ 953 */
953void 954void
954cpu_debug_dump(void) 955cpu_debug_dump(void)
955{ 956{
956 struct cpu_info *ci; 957 struct cpu_info *ci;
957 CPU_INFO_ITERATOR cii; 958 CPU_INFO_ITERATOR cii;
958 959
959 db_printf("addr dev id flags ipis curproc\n"); 960 db_printf("addr dev id flags ipis curproc\n");
960 for (CPU_INFO_FOREACH(cii, ci)) { 961 for (CPU_INFO_FOREACH(cii, ci)) {
961 db_printf("%p %s %lu %lx %lx %p\n", 962 db_printf("%p %s %lu %lx %lx %p\n",
962 ci, 963 ci,
963 device_xname(ci->ci_softc->sc_dev), 964 device_xname(ci->ci_softc->sc_dev),
964 ci->ci_cpuid, 965 ci->ci_cpuid,
965 ci->ci_flags, 966 ci->ci_flags,
966 ci->ci_ipis, 967 ci->ci_ipis,
967 ci->ci_curlwp); 968 ci->ci_curlwp);
968 } 969 }
969} 970}
970 971
971#endif /* DDB */ 972#endif /* DDB */
972 973
973#endif /* MULTIPROCESSOR */ 974#endif /* MULTIPROCESSOR */