Tue Nov 24 13:04:04 2009 UTC ()
Remove X86_MAXPROCS. This fixes PR port-xen/41755.
This also reduces diff to x86/x86/cpu.c as a nice side effect.
'looks good' bouyer@


(cegger)
diff -r1.37 -r1.38 src/sys/arch/xen/x86/cpu.c

cvs diff -r1.37 -r1.38 src/sys/arch/xen/x86/cpu.c (expand / switch to unified diff)

--- src/sys/arch/xen/x86/cpu.c 2009/11/21 05:54:04 1.37
+++ src/sys/arch/xen/x86/cpu.c 2009/11/24 13:04:04 1.38
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cpu.c,v 1.37 2009/11/21 05:54:04 rmind Exp $ */ 1/* $NetBSD: cpu.c,v 1.38 2009/11/24 13:04:04 cegger Exp $ */
2/* NetBSD: cpu.c,v 1.18 2004/02/20 17:35:01 yamt Exp */ 2/* NetBSD: cpu.c,v 1.18 2004/02/20 17:35:01 yamt Exp */
3 3
4/*- 4/*-
5 * Copyright (c) 2000 The NetBSD Foundation, Inc. 5 * Copyright (c) 2000 The NetBSD Foundation, Inc.
6 * Copyright (c) 2002, 2006, 2007 YAMAMOTO Takashi, 6 * Copyright (c) 2002, 2006, 2007 YAMAMOTO Takashi,
7 * All rights reserved. 7 * All rights reserved.
8 * 8 *
9 * This code is derived from software contributed to The NetBSD Foundation 9 * This code is derived from software contributed to The NetBSD Foundation
10 * by RedBack Networks Inc. 10 * by RedBack Networks Inc.
11 * 11 *
12 * Author: Bill Sommerfeld 12 * Author: Bill Sommerfeld
13 * 13 *
14 * Redistribution and use in source and binary forms, with or without 14 * Redistribution and use in source and binary forms, with or without
@@ -56,27 +56,27 @@ @@ -56,27 +56,27 @@
56 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE 58 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * SUCH DAMAGE. 65 * SUCH DAMAGE.
66 */ 66 */
67 67
68#include <sys/cdefs.h> 68#include <sys/cdefs.h>
69__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.37 2009/11/21 05:54:04 rmind Exp $"); 69__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.38 2009/11/24 13:04:04 cegger Exp $");
70 70
71#include "opt_ddb.h" 71#include "opt_ddb.h"
72#include "opt_multiprocessor.h" 72#include "opt_multiprocessor.h"
73#include "opt_mpbios.h" /* for MPDEBUG */ 73#include "opt_mpbios.h" /* for MPDEBUG */
74#include "opt_mtrr.h" 74#include "opt_mtrr.h"
75#include "opt_xen.h" 75#include "opt_xen.h"
76 76
77#include "lapic.h" 77#include "lapic.h"
78#include "ioapic.h" 78#include "ioapic.h"
79 79
80#include <sys/param.h> 80#include <sys/param.h>
81#include <sys/proc.h> 81#include <sys/proc.h>
82#include <sys/user.h> 82#include <sys/user.h>
@@ -102,27 +102,29 @@ __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.37 @@ -102,27 +102,29 @@ __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.37
102#include <machine/pio.h> 102#include <machine/pio.h>
103 103
104#include <xen/vcpuvar.h> 104#include <xen/vcpuvar.h>
105 105
106#if NLAPIC > 0 106#if NLAPIC > 0
107#include <machine/apicvar.h> 107#include <machine/apicvar.h>
108#include <machine/i82489reg.h> 108#include <machine/i82489reg.h>
109#include <machine/i82489var.h> 109#include <machine/i82489var.h>
110#endif 110#endif
111 111
112#include <dev/ic/mc146818reg.h> 112#include <dev/ic/mc146818reg.h>
113#include <dev/isa/isareg.h> 113#include <dev/isa/isareg.h>
114 114
115#define X86_MAXPROCS 32 115#if MAXCPUS > 32
 116#error cpu_info contains 32bit bitmasks
 117#endif
116 118
117int cpu_match(device_t, cfdata_t, void *); 119int cpu_match(device_t, cfdata_t, void *);
118void cpu_attach(device_t, device_t, void *); 120void cpu_attach(device_t, device_t, void *);
119int vcpu_match(device_t, cfdata_t, void *); 121int vcpu_match(device_t, cfdata_t, void *);
120void vcpu_attach(device_t, device_t, void *); 122void vcpu_attach(device_t, device_t, void *);
121void cpu_attach_common(device_t, device_t, void *); 123void cpu_attach_common(device_t, device_t, void *);
122void cpu_offline_md(void); 124void cpu_offline_md(void);
123 125
124struct cpu_softc { 126struct cpu_softc {
125 device_t sc_dev; /* device tree glue */ 127 device_t sc_dev; /* device tree glue */
126 struct cpu_info *sc_info; /* pointer to CPU info */ 128 struct cpu_info *sc_info; /* pointer to CPU info */
127 bool sc_wasonline; 129 bool sc_wasonline;
128}; 130};
@@ -136,171 +138,174 @@ CFATTACH_DECL_NEW(cpu, sizeof(struct cpu @@ -136,171 +138,174 @@ CFATTACH_DECL_NEW(cpu, sizeof(struct cpu
136 cpu_match, cpu_attach, NULL, NULL); 138 cpu_match, cpu_attach, NULL, NULL);
137CFATTACH_DECL_NEW(vcpu, sizeof(struct cpu_softc), 139CFATTACH_DECL_NEW(vcpu, sizeof(struct cpu_softc),
138 vcpu_match, vcpu_attach, NULL, NULL); 140 vcpu_match, vcpu_attach, NULL, NULL);
139 141
140/* 142/*
141 * Statically-allocated CPU info for the primary CPU (or the only 143 * Statically-allocated CPU info for the primary CPU (or the only
142 * CPU, on uniprocessors). The CPU info list is initialized to 144 * CPU, on uniprocessors). The CPU info list is initialized to
143 * point at it. 145 * point at it.
144 */ 146 */
145#ifdef TRAPLOG 147#ifdef TRAPLOG
146#include <machine/tlog.h> 148#include <machine/tlog.h>
147struct tlog tlog_primary; 149struct tlog tlog_primary;
148#endif 150#endif
149struct cpu_info cpu_info_primary = { 151struct cpu_info cpu_info_primary __aligned(CACHE_LINE_SIZE) = {
150 .ci_dev = 0, 152 .ci_dev = 0,
151 .ci_self = &cpu_info_primary, 153 .ci_self = &cpu_info_primary,
152 .ci_idepth = -1, 154 .ci_idepth = -1,
153 .ci_curlwp = &lwp0, 155 .ci_curlwp = &lwp0,
154 .ci_curldt = -1, 156 .ci_curldt = -1,
155#ifdef TRAPLOG 157#ifdef TRAPLOG
156 .ci_tlog = &tlog_primary, 158 .ci_tlog = &tlog_primary,
157#endif 159#endif
158 160
159}; 161};
160struct cpu_info phycpu_info_primary = { 162struct cpu_info phycpu_info_primary __aligned(CACHE_LINE_SIZE) = {
161 .ci_dev = 0, 163 .ci_dev = 0,
162 .ci_self = &phycpu_info_primary, 164 .ci_self = &phycpu_info_primary,
163}; 165};
164 166
165struct cpu_info *cpu_info_list = &cpu_info_primary; 167struct cpu_info *cpu_info_list = &cpu_info_primary;
 168struct cpu_info *phycpu_info_list = &phycpu_info_primary;
166 169
167static void cpu_set_tss_gates(struct cpu_info *ci); 170static void cpu_set_tss_gates(struct cpu_info *ci);
168 171
169uint32_t cpus_attached = 0; 172uint32_t cpus_attached = 0;
170uint32_t cpus_running = 0; 173uint32_t cpus_running = 0;
171 174
 175uint32_t phycpus_attached = 0;
 176uint32_t phycpus_running = 0;
 177
172bool x86_mp_online; 178bool x86_mp_online;
173paddr_t mp_trampoline_paddr = MP_TRAMPOLINE; 179paddr_t mp_trampoline_paddr = MP_TRAMPOLINE;
174 180
175struct cpu_info *phycpu_info[X86_MAXPROCS] = { &cpu_info_primary }; 181#if defined(MULTIPROCESSOR)
176 
177#ifdef MULTIPROCESSOR 
178/* 
179 * Array of CPU info structures. Must be statically-allocated because 
180 * curproc, etc. are used early. 
181 */ 
182struct cpu_info *cpu_info[X86_MAXPROCS] = { &cpu_info_primary }; 
183 
184void cpu_hatch(void *); 182void cpu_hatch(void *);
185static void cpu_boot_secondary(struct cpu_info *ci); 183static void cpu_boot_secondary(struct cpu_info *ci);
186static void cpu_start_secondary(struct cpu_info *ci); 184static void cpu_start_secondary(struct cpu_info *ci);
187static void cpu_copy_trampoline(void); 185static void cpu_copy_trampoline(void);
188 186
189/* 187/*
190 * Runs once per boot once multiprocessor goo has been detected and 188 * Runs once per boot once multiprocessor goo has been detected and
191 * the local APIC on the boot processor has been mapped. 189 * the local APIC on the boot processor has been mapped.
192 * 190 *
193 * Called from lapic_boot_init() (from mpbios_scan()). 191 * Called from lapic_boot_init() (from mpbios_scan()).
194 */ 192 */
195void 193void
196cpu_init_first(void) 194cpu_init_first(void)
197{ 195{
198 int cpunum = lapic_cpu_number(); 
199 
200 if (cpunum != 0) { 
201 cpu_info[0] = NULL; 
202 cpu_info[cpunum] = &cpu_info_primary; 
203 } 
204 196
 197 cpu_info_primary.ci_cpuid = lapic_cpu_number();
205 cpu_copy_trampoline(); 198 cpu_copy_trampoline();
206} 199}
207#endif 200#endif /* MULTIPROCESSOR */
208 201
209int 202int
210cpu_match(device_t parent, cfdata_t match, void *aux) 203cpu_match(device_t parent, cfdata_t match, void *aux)
211{ 204{
212 205
213 return 1; 206 return 1;
214} 207}
215 208
216void 209void
217cpu_attach(device_t parent, device_t self, void *aux) 210cpu_attach(device_t parent, device_t self, void *aux)
218{ 211{
219 struct cpu_softc *sc = device_private(self); 212 struct cpu_softc *sc = device_private(self);
220 struct cpu_attach_args *caa = aux; 213 struct cpu_attach_args *caa = aux;
221 struct cpu_info *ci; 214 struct cpu_info *ci;
222 uintptr_t ptr; 215 uintptr_t ptr;
223 int cpunum = caa->cpu_number; 216 static bool again = false;
224 217
225 sc->sc_dev = self; 218 sc->sc_dev = self;
226 219
227 if (cpus_attached == ~0) { 220 if (phycpus_attached == ~0) {
228 aprint_error(": increase MAXCPUS\n"); 221 aprint_error(": increase MAXCPUS\n");
229 return; 222 return;
230 } 223 }
231 224
232 /* 225 /*
233 * If we're an Application Processor, allocate a cpu_info 226 * If we're an Application Processor, allocate a cpu_info
234 * structure, otherwise use the primary's. 227 * structure, otherwise use the primary's.
235 */ 228 */
236 if (caa->cpu_role == CPU_ROLE_AP) { 229 if (caa->cpu_role == CPU_ROLE_AP) {
237 if ((boothowto & RB_MD1) != 0) { 230 if ((boothowto & RB_MD1) != 0) {
238 aprint_error(": multiprocessor boot disabled\n"); 231 aprint_error(": multiprocessor boot disabled\n");
239 if (!pmf_device_register(self, NULL, NULL)) 232 if (!pmf_device_register(self, NULL, NULL))
240 aprint_error_dev(self, 233 aprint_error_dev(self,
241 "couldn't establish power handler\n"); 234 "couldn't establish power handler\n");
242 return; 235 return;
243 } 236 }
244 aprint_naive(": Application Processor\n"); 237 aprint_naive(": Application Processor\n");
245 ptr = (uintptr_t)kmem_zalloc(sizeof(*ci) + CACHE_LINE_SIZE - 1, 238 ptr = (uintptr_t)kmem_zalloc(sizeof(*ci) + CACHE_LINE_SIZE - 1,
246 KM_SLEEP); 239 KM_SLEEP);
247 ci = (struct cpu_info *)((ptr + CACHE_LINE_SIZE - 1) & 240 ci = (struct cpu_info *)((ptr + CACHE_LINE_SIZE - 1) &
248 ~(CACHE_LINE_SIZE - 1)); 241 ~(CACHE_LINE_SIZE - 1));
249 ci->ci_curldt = -1; 242 ci->ci_curldt = -1;
250 if (phycpu_info[cpunum] != NULL) 
251 panic("cpu at apic id %d already attached?", cpunum); 
252 phycpu_info[cpunum] = ci; 
253 } else { 243 } else {
254 aprint_naive(": %s Processor\n", 244 aprint_naive(": %s Processor\n",
255 caa->cpu_role == CPU_ROLE_SP ? "Single" : "Boot"); 245 caa->cpu_role == CPU_ROLE_SP ? "Single" : "Boot");
256 ci = &phycpu_info_primary; 246 ci = &phycpu_info_primary;
257 if (cpunum != 0) { 
258 phycpu_info[0] = NULL; 
259 phycpu_info[cpunum] = ci; 
260 } 
261 } 247 }
262 248
263 ci->ci_self = ci; 249 ci->ci_self = ci;
264 sc->sc_info = ci; 250 sc->sc_info = ci;
265 251
266 ci->ci_dev = self; 252 ci->ci_dev = self;
267 ci->ci_cpuid = caa->cpu_number; 253 ci->ci_cpuid = caa->cpu_number;
268 ci->ci_vcpu = NULL; 254 ci->ci_vcpu = NULL;
269 255
 256 /*
 257 * Boot processor may not be attached first, but the below
 258 * must be done to allow booting other processors.
 259 */
 260 if (!again) {
 261 atomic_or_32(&ci->ci_flags, CPUF_PRESENT | CPUF_PRIMARY);
 262 /* Basic init */
 263 again = true;
 264 }
 265
270 printf(": "); 266 printf(": ");
271 switch (caa->cpu_role) { 267 switch (caa->cpu_role) {
272 case CPU_ROLE_SP: 268 case CPU_ROLE_SP:
273 printf("(uniprocessor)\n"); 269 printf("(uniprocessor)\n");
274 atomic_or_32(&ci->ci_flags, CPUF_PRESENT | CPUF_SP | CPUF_PRIMARY); 270 atomic_or_32(&ci->ci_flags, CPUF_SP);
275 break; 271 break;
276 272
277 case CPU_ROLE_BP: 273 case CPU_ROLE_BP:
278 printf("(boot processor)\n"); 274 printf("(boot processor)\n");
279 atomic_or_32(&ci->ci_flags, CPUF_PRESENT | CPUF_BSP | CPUF_PRIMARY); 275 atomic_or_32(&ci->ci_flags, CPUF_BSP);
280 break; 276 break;
281 277
282 case CPU_ROLE_AP: 278 case CPU_ROLE_AP:
283 /* 279 /*
284 * report on an AP 280 * report on an AP
285 */ 281 */
286 printf("(application processor)\n"); 282 printf("(application processor)\n");
 283 if (ci->ci_flags & CPUF_PRESENT) {
 284 struct cpu_info *tmp;
 285
 286 tmp = phycpu_info_list;
 287 while (tmp->ci_next)
 288 tmp = tmp->ci_next;
 289
 290 tmp->ci_next = ci;
 291 }
287 break; 292 break;
288 293
289 default: 294 default:
290 panic("unknown processor type??\n"); 295 panic("unknown processor type??\n");
291 } 296 }
292 297
293 atomic_or_32(&cpus_attached, ci->ci_cpumask); 298 atomic_or_32(&phycpus_attached, ci->ci_cpumask);
294 299
295 return; 300 return;
296} 301}
297 302
298int 303int
299vcpu_match(device_t parent, cfdata_t match, void *aux) 304vcpu_match(device_t parent, cfdata_t match, void *aux)
300{ 305{
301 struct vcpu_attach_args *vcaa = aux; 306 struct vcpu_attach_args *vcaa = aux;
302 307
303 if (strcmp(vcaa->vcaa_name, match->cf_name) == 0) 308 if (strcmp(vcaa->vcaa_name, match->cf_name) == 0)
304 return 1; 309 return 1;
305 return 0; 310 return 0;
306} 311}
@@ -346,185 +351,191 @@ cpu_vm_init(struct cpu_info *ci) @@ -346,185 +351,191 @@ cpu_vm_init(struct cpu_info *ci)
346 return; 351 return;
347 aprint_debug_dev(ci->ci_dev, "%d page colors\n", ncolors); 352 aprint_debug_dev(ci->ci_dev, "%d page colors\n", ncolors);
348 uvm_page_recolor(ncolors); 353 uvm_page_recolor(ncolors);
349} 354}
350 355
351void 356void
352cpu_attach_common(device_t parent, device_t self, void *aux) 357cpu_attach_common(device_t parent, device_t self, void *aux)
353{ 358{
354 struct cpu_softc *sc = device_private(self); 359 struct cpu_softc *sc = device_private(self);
355 struct cpu_attach_args *caa = aux; 360 struct cpu_attach_args *caa = aux;
356 struct cpu_info *ci; 361 struct cpu_info *ci;
357 uintptr_t ptr; 362 uintptr_t ptr;
358 int cpunum = caa->cpu_number; 363 int cpunum = caa->cpu_number;
 364 static bool again = false;
359 365
360 sc->sc_dev = self; 366 sc->sc_dev = self;
361 367
362 /* 368 /*
363 * If we're an Application Processor, allocate a cpu_info 369 * If we're an Application Processor, allocate a cpu_info
364 * structure, otherwise use the primary's. 370 * structure, otherwise use the primary's.
365 */ 371 */
366 if (caa->cpu_role == CPU_ROLE_AP) { 372 if (caa->cpu_role == CPU_ROLE_AP) {
367 if (cpunum >= X86_MAXPROCS) { 
368 aprint_error(": apic id %d ignored, " 
369 "please increase X86_MAXPROCS\n", cpunum); 
370 } 
371 
372 aprint_naive(": Application Processor\n"); 373 aprint_naive(": Application Processor\n");
373 ptr = (uintptr_t)kmem_alloc(sizeof(*ci) + CACHE_LINE_SIZE - 1, 374 ptr = (uintptr_t)kmem_alloc(sizeof(*ci) + CACHE_LINE_SIZE - 1,
374 KM_SLEEP); 375 KM_SLEEP);
375 ci = (struct cpu_info *)((ptr + CACHE_LINE_SIZE - 1) & 376 ci = (struct cpu_info *)((ptr + CACHE_LINE_SIZE - 1) &
376 ~(CACHE_LINE_SIZE - 1)); 377 ~(CACHE_LINE_SIZE - 1));
377 memset(ci, 0, sizeof(*ci)); 378 memset(ci, 0, sizeof(*ci));
378#if defined(MULTIPROCESSOR) 
379 if (cpu_info[cpunum] != NULL) 
380 panic("cpu at apic id %d already attached?", cpunum); 
381 cpu_info[cpunum] = ci; 
382#endif 
383#ifdef TRAPLOG 379#ifdef TRAPLOG
384 ci->ci_tlog_base = kmem_zalloc(sizeof(struct tlog), KM_SLEEP); 380 ci->ci_tlog_base = kmem_zalloc(sizeof(struct tlog), KM_SLEEP);
385#endif 381#endif
386 } else { 382 } else {
387 aprint_naive(": %s Processor\n", 383 aprint_naive(": %s Processor\n",
388 caa->cpu_role == CPU_ROLE_SP ? "Single" : "Boot"); 384 caa->cpu_role == CPU_ROLE_SP ? "Single" : "Boot");
389 ci = &cpu_info_primary; 385 ci = &cpu_info_primary;
390#if defined(MULTIPROCESSOR) 386#if NLAPIC > 0
391 if (cpunum != lapic_cpu_number()) { 387 if (cpunum != lapic_cpu_number()) {
392 panic("%s: running CPU is at apic %d" 388 /* XXX should be done earlier */
393 " instead of at expected %d", 389 uint32_t reg;
394 device_xname(sc->sc_dev), lapic_cpu_number(), cpunum); 390 aprint_verbose("\n");
 391 aprint_verbose_dev(self, "running CPU at apic %d"
 392 " instead of at expected %d", lapic_cpu_number(),
 393 cpunum);
 394 reg = i82489_readreg(LAPIC_ID);
 395 i82489_writereg(LAPIC_ID, (reg & ~LAPIC_ID_MASK) |
 396 (cpunum << LAPIC_ID_SHIFT));
 397 }
 398 if (cpunum != lapic_cpu_number()) {
 399 aprint_error_dev(self, "unable to reset apic id\n");
395 } 400 }
396#endif 401#endif
397 } 402 }
398 403
399 ci->ci_self = ci; 404 ci->ci_self = ci;
400 sc->sc_info = ci; 405 sc->sc_info = ci;
401 
402 ci->ci_dev = self; 406 ci->ci_dev = self;
403 ci->ci_cpuid = cpunum; 407 ci->ci_cpuid = cpunum;
404 408
405 KASSERT(HYPERVISOR_shared_info != NULL); 409 KASSERT(HYPERVISOR_shared_info != NULL);
406 ci->ci_vcpu = &HYPERVISOR_shared_info->vcpu_info[cpunum]; 410 ci->ci_vcpu = &HYPERVISOR_shared_info->vcpu_info[cpunum];
407 411
408 ci->ci_func = caa->cpu_func; 412 ci->ci_func = caa->cpu_func;
409 413
 414 /* Must be called before mi_cpu_attach(). */
 415 cpu_vm_init(ci);
 416
410 if (caa->cpu_role == CPU_ROLE_AP) { 417 if (caa->cpu_role == CPU_ROLE_AP) {
411#if defined(MULTIPROCESSOR) 
412 int error; 418 int error;
413 419
414 error = mi_cpu_attach(ci); 420 error = mi_cpu_attach(ci);
415 if (error != 0) { 421 if (error != 0) {
416 aprint_normal("\n"); 422 aprint_normal("\n");
417 aprint_error_dev(sc->sc_dev, "mi_cpu_attach failed with %d\n", 423 aprint_error_dev(self,
418 error); 424 "mi_cpu_attach failed with %d\n", error);
419 return; 425 return;
420 } 426 }
421#endif 
422 } else { 427 } else {
423 KASSERT(ci->ci_data.cpu_idlelwp != NULL); 428 KASSERT(ci->ci_data.cpu_idlelwp != NULL);
424 } 429 }
425 430
426 ci->ci_cpumask = (1 << cpu_index(ci)); 431 ci->ci_cpumask = (1 << cpu_index(ci));
427 pmap_reference(pmap_kernel()); 432 pmap_reference(pmap_kernel());
428 ci->ci_pmap = pmap_kernel(); 433 ci->ci_pmap = pmap_kernel();
429 ci->ci_tlbstate = TLBSTATE_STALE; 434 ci->ci_tlbstate = TLBSTATE_STALE;
430 435
431 /* further PCB init done later. */ 436 /*
432 437 * Boot processor may not be attached first, but the below
433 switch (caa->cpu_role) { 438 * must be done to allow booting other processors.
434 case CPU_ROLE_SP: 439 */
435 atomic_or_32(&ci->ci_flags, 440 if (!again) {
436 CPUF_PRESENT | CPUF_SP | CPUF_PRIMARY); 441 atomic_or_32(&ci->ci_flags, CPUF_PRESENT | CPUF_PRIMARY);
 442 /* Basic init. */
437 cpu_intr_init(ci); 443 cpu_intr_init(ci);
438 cpu_get_tsc_freq(ci); 444 cpu_get_tsc_freq(ci);
439 cpu_identify(ci); 
440 cpu_init(ci); 445 cpu_init(ci);
441 cpu_set_tss_gates(ci); 446 cpu_set_tss_gates(ci);
442 pmap_cpu_init_late(ci); 447 pmap_cpu_init_late(ci);
443 x86_cpu_idle_init(); 448#if NLAPIC > 0
 449 if (caa->cpu_role != CPU_ROLE_SP) {
 450 /* Enable lapic. */
 451 lapic_enable();
 452 lapic_set_lvt();
 453 lapic_calibrate_timer();
 454 }
 455#endif
 456 /* Make sure DELAY() is initialized. */
 457 DELAY(1);
 458 again = true;
 459 }
 460
 461 /* further PCB init done later. */
 462
 463 switch (caa->cpu_role) {
 464 case CPU_ROLE_SP:
 465 atomic_or_32(&ci->ci_flags, CPUF_SP);
 466 cpu_identify(ci);
444#if 0 467#if 0
445 x86_errata(); 468 x86_errata();
446#endif 469#endif
 470 x86_cpu_idle_init();
447 break; 471 break;
448 472
449 case CPU_ROLE_BP: 473 case CPU_ROLE_BP:
450 atomic_or_32(&ci->ci_flags,  474 atomic_or_32(&ci->ci_flags, CPUF_BSP);
451 CPUF_PRESENT | CPUF_BSP | CPUF_PRIMARY); 
452 cpu_intr_init(ci); 
453 cpu_get_tsc_freq(ci); 
454 cpu_identify(ci); 475 cpu_identify(ci);
455 cpu_init(ci); 476 cpu_init(ci);
456 cpu_set_tss_gates(ci); 
457 pmap_cpu_init_late(ci); 
458 x86_cpu_idle_init(); 
459#if NLAPIC > 0 
460 /* 
461 * Enable local apic 
462 */ 
463 lapic_enable(); 
464 lapic_set_lvt(); 
465 lapic_calibrate_timer(ci); 
466#endif 
467#if 0 477#if 0
468 x86_errata(); 478 x86_errata();
469#endif 479#endif
 480 x86_cpu_idle_init();
470 break; 481 break;
471 482
472 case CPU_ROLE_AP: 483 case CPU_ROLE_AP:
473 /* 484 /*
474 * report on an AP 485 * report on an AP
475 */ 486 */
476 487
477#if defined(MULTIPROCESSOR) 488#if defined(MULTIPROCESSOR)
478 cpu_intr_init(ci); 489 cpu_intr_init(ci);
479 gdt_alloc_cpu(ci); 490 gdt_alloc_cpu(ci);
480 cpu_set_tss_gates(ci); 491 cpu_set_tss_gates(ci);
481 pmap_cpu_init_early(ci); 492 pmap_cpu_init_early(ci);
482 pmap_cpu_init_late(ci); 493 pmap_cpu_init_late(ci);
483 cpu_start_secondary(ci); 494 cpu_start_secondary(ci);
484 if (ci->ci_flags & CPUF_PRESENT) { 495 if (ci->ci_flags & CPUF_PRESENT) {
485 struct cpu_info *tmp; 496 struct cpu_info *tmp;
486 497
487 identifycpu(ci); 498 identifycpu(ci);
488 tmp = cpu_info_list; 499 tmp = cpu_info_list;
489 while (tmp->ci_next) 500 while (tmp->ci_next)
490 tmp = tmp->ci_next; 501 tmp = tmp->ci_next;
491 502
492 tmp->ci_next = ci; 503 tmp->ci_next = ci;
493 } 504 }
494#else 505#else
495 aprint_normal_dev(sc->sc_dev, "not started\n"); 506 aprint_error_dev(self, "not started\n");
496#endif 507#endif
497 break; 508 break;
498 509
499 default: 510 default:
500 aprint_normal("\n"); 511 aprint_normal("\n");
501 panic("unknown processor type??\n"); 512 panic("unknown processor type??\n");
502 } 513 }
503 cpu_vm_init(ci); 
504 514
505 atomic_or_32(&cpus_attached, ci->ci_cpumask); 515 atomic_or_32(&cpus_attached, ci->ci_cpumask);
506 516
507#if 0 517#if 0
508 if (!pmf_device_register(self, cpu_suspend, cpu_resume)) 518 if (!pmf_device_register(self, cpu_suspend, cpu_resume))
509 aprint_error_dev(self, "couldn't establish power handler\n"); 519 aprint_error_dev(self, "couldn't establish power handler\n");
510#endif 520#endif
511 521
512#if defined(MULTIPROCESSOR) 522#if defined(MULTIPROCESSOR)
513 if (mp_verbose) { 523 if (mp_verbose) {
514 struct lwp *l = ci->ci_data.cpu_idlelwp; 524 struct lwp *l = ci->ci_data.cpu_idlelwp;
515 struct pcb *pcb = lwp_getpcb(l); 525 struct pcb *pcb = lwp_getpcb(l);
516 526
517 aprint_verbose_dev(sc->sc_dev, "idle lwp at %p, idle sp at 0x%p\n", 527 aprint_verbose_dev(self,
 528 "idle lwp at %p, idle sp at 0x%p\n",
518 l, 529 l,
519#ifdef i386 530#ifdef i386
520 (void *)pcb->pcb_esp 531 (void *)pcb->pcb_esp
521#else 532#else
522 (void *)pcb->pcb_rsp 533 (void *)pcb->pcb_rsp
523#endif 534#endif
524 ); 535 );
525  536
526 } 537 }
527#endif 538#endif
528} 539}
529 540
530/* 541/*
@@ -567,28 +578,28 @@ cpu_init(struct cpu_info *ci) @@ -567,28 +578,28 @@ cpu_init(struct cpu_info *ci)
567 578
568 atomic_or_32(&cpus_running, ci->ci_cpumask); 579 atomic_or_32(&cpus_running, ci->ci_cpumask);
569 atomic_or_32(&ci->ci_flags, CPUF_RUNNING); 580 atomic_or_32(&ci->ci_flags, CPUF_RUNNING);
570} 581}
571 582
572 583
573#ifdef MULTIPROCESSOR 584#ifdef MULTIPROCESSOR
574void 585void
575cpu_boot_secondary_processors(void) 586cpu_boot_secondary_processors(void)
576{ 587{
577 struct cpu_info *ci; 588 struct cpu_info *ci;
578 u_long i; 589 u_long i;
579 590
580 for (i = 0; i < X86_MAXPROCS; i++) { 591 for (i = 0; i < maxcpus; i++) {
581 ci = cpu_info[i]; 592 ci = cpu_lookup(i);
582 if (ci == NULL) 593 if (ci == NULL)
583 continue; 594 continue;
584 if (ci->ci_data.cpu_idlelwp == NULL) 595 if (ci->ci_data.cpu_idlelwp == NULL)
585 continue; 596 continue;
586 if ((ci->ci_flags & CPUF_PRESENT) == 0) 597 if ((ci->ci_flags & CPUF_PRESENT) == 0)
587 continue; 598 continue;
588 if (ci->ci_flags & (CPUF_BSP|CPUF_SP|CPUF_PRIMARY)) 599 if (ci->ci_flags & (CPUF_BSP|CPUF_SP|CPUF_PRIMARY))
589 continue; 600 continue;
590 cpu_boot_secondary(ci); 601 cpu_boot_secondary(ci);
591 } 602 }
592 603
593 x86_mp_online = true; 604 x86_mp_online = true;
594} 605}
@@ -598,28 +609,28 @@ cpu_init_idle_lwp(struct cpu_info *ci) @@ -598,28 +609,28 @@ cpu_init_idle_lwp(struct cpu_info *ci)
598{ 609{
599 struct lwp *l = ci->ci_data.cpu_idlelwp; 610 struct lwp *l = ci->ci_data.cpu_idlelwp;
600 struct pcb *pcb = lwp_getpcb(l); 611 struct pcb *pcb = lwp_getpcb(l);
601 612
602 pcb->pcb_cr0 = rcr0(); 613 pcb->pcb_cr0 = rcr0();
603} 614}
604 615
605void 616void
606cpu_init_idle_lwps(void) 617cpu_init_idle_lwps(void)
607{ 618{
608 struct cpu_info *ci; 619 struct cpu_info *ci;
609 u_long i; 620 u_long i;
610 621
611 for (i = 0; i < X86_MAXPROCS; i++) { 622 for (i = 0; i < maxcpus; i++) {
612 ci = cpu_info[i]; 623 ci = cpu_lookup(i);
613 if (ci == NULL) 624 if (ci == NULL)
614 continue; 625 continue;
615 if (ci->ci_data.cpu_idlelwp == NULL) 626 if (ci->ci_data.cpu_idlelwp == NULL)
616 continue; 627 continue;
617 if ((ci->ci_flags & CPUF_PRESENT) == 0) 628 if ((ci->ci_flags & CPUF_PRESENT) == 0)
618 continue; 629 continue;
619 cpu_init_idle_lwp(ci); 630 cpu_init_idle_lwp(ci);
620 } 631 }
621} 632}
622 633
623void 634void
624cpu_start_secondary(struct cpu_info *ci) 635cpu_start_secondary(struct cpu_info *ci)
625{ 636{
@@ -777,27 +788,27 @@ cpu_debug_dump(void) @@ -777,27 +788,27 @@ cpu_debug_dump(void)
777 CPU_INFO_ITERATOR cii; 788 CPU_INFO_ITERATOR cii;
778 789
779 db_printf("addr dev id flags ipis curlwp fpcurlwp\n"); 790 db_printf("addr dev id flags ipis curlwp fpcurlwp\n");
780 for (CPU_INFO_FOREACH(cii, ci)) { 791 for (CPU_INFO_FOREACH(cii, ci)) {
781 db_printf("%p %s %ld %x %x %10p %10p\n", 792 db_printf("%p %s %ld %x %x %10p %10p\n",
782 ci, 793 ci,
783 ci->ci_dev == NULL ? "BOOT" : device_xname(ci->ci_dev), 794 ci->ci_dev == NULL ? "BOOT" : device_xname(ci->ci_dev),
784 (long)ci->ci_cpuid, 795 (long)ci->ci_cpuid,
785 ci->ci_flags, ci->ci_ipis, 796 ci->ci_flags, ci->ci_ipis,
786 ci->ci_curlwp, 797 ci->ci_curlwp,
787 ci->ci_fpcurlwp); 798 ci->ci_fpcurlwp);
788 } 799 }
789} 800}
790#endif 801#endif /* DDB */
791 802
792static void 803static void
793cpu_copy_trampoline(void) 804cpu_copy_trampoline(void)
794{ 805{
795 /* 806 /*
796 * Copy boot code. 807 * Copy boot code.
797 */ 808 */
798 extern u_char cpu_spinup_trampoline[]; 809 extern u_char cpu_spinup_trampoline[];
799 extern u_char cpu_spinup_trampoline_end[]; 810 extern u_char cpu_spinup_trampoline_end[];
800 811
801 vaddr_t mp_trampoline_vaddr; 812 vaddr_t mp_trampoline_vaddr;
802 813
803 mp_trampoline_vaddr = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, 814 mp_trampoline_vaddr = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
@@ -805,27 +816,27 @@ cpu_copy_trampoline(void) @@ -805,27 +816,27 @@ cpu_copy_trampoline(void)
805 816
806 pmap_kenter_pa(mp_trampoline_vaddr, mp_trampoline_paddr, 817 pmap_kenter_pa(mp_trampoline_vaddr, mp_trampoline_paddr,
807 VM_PROT_READ | VM_PROT_WRITE, 0); 818 VM_PROT_READ | VM_PROT_WRITE, 0);
808 pmap_update(pmap_kernel()); 819 pmap_update(pmap_kernel());
809 memcpy((void *)mp_trampoline_vaddr, 820 memcpy((void *)mp_trampoline_vaddr,
810 cpu_spinup_trampoline, 821 cpu_spinup_trampoline,
811 cpu_spinup_trampoline_end - cpu_spinup_trampoline); 822 cpu_spinup_trampoline_end - cpu_spinup_trampoline);
812 823
813 pmap_kremove(mp_trampoline_vaddr, PAGE_SIZE); 824 pmap_kremove(mp_trampoline_vaddr, PAGE_SIZE);
814 pmap_update(pmap_kernel()); 825 pmap_update(pmap_kernel());
815 uvm_km_free(kernel_map, mp_trampoline_vaddr, PAGE_SIZE, UVM_KMF_VAONLY); 826 uvm_km_free(kernel_map, mp_trampoline_vaddr, PAGE_SIZE, UVM_KMF_VAONLY);
816} 827}
817 828
818#endif 829#endif /* MULTIPROCESSOR */
819 830
820#ifdef i386 831#ifdef i386
821#if 0 832#if 0
822static void 833static void
823tss_init(struct i386tss *tss, void *stack, void *func) 834tss_init(struct i386tss *tss, void *stack, void *func)
824{ 835{
825 memset(tss, 0, sizeof *tss); 836 memset(tss, 0, sizeof *tss);
826 tss->tss_esp0 = tss->tss_esp = (int)((char *)stack + USPACE - 16); 837 tss->tss_esp0 = tss->tss_esp = (int)((char *)stack + USPACE - 16);
827 tss->tss_ss0 = GSEL(GDATA_SEL, SEL_KPL); 838 tss->tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
828 tss->__tss_cs = GSEL(GCODE_SEL, SEL_KPL); 839 tss->__tss_cs = GSEL(GCODE_SEL, SEL_KPL);
829 tss->tss_fs = GSEL(GCPU_SEL, SEL_KPL); 840 tss->tss_fs = GSEL(GCPU_SEL, SEL_KPL);
830 tss->tss_gs = tss->__tss_es = tss->__tss_ds = 841 tss->tss_gs = tss->__tss_es = tss->__tss_ds =
831 tss->__tss_ss = GSEL(GDATA_SEL, SEL_KPL); 842 tss->__tss_ss = GSEL(GDATA_SEL, SEL_KPL);