Wed May 27 02:19:50 2009 UTC ()
- use _MAXNCPU instead of 4
- convert xpmsg_lock from a simplelock to a kmutex
- don't wait for sparc_noop IPI calls
- remove xmpsg_func's "retval" parameter and usage
- remove the IPI at high IPL message
- rework cpu_attach() a bunch, refactoring calls to getcpuinfo() and setting
  of cpi, and split most of the non-boot CPU handling into a new function
- make CPU_INFO_FOREACH() work whether modular or not
- move the MP cpu_info pages earlier
- move a few things in cpu.c around to colsolidate the MP code together
- remove useless if (cpus == NULL) tests -- cpus is an array now

with these changes, and an additional change to crazyintr() to not printf(),
i can get to single user shell on my SS20 again.  i can run a fwe commands
but some of them cause hangs.  "ps auxw" works, but "top -b" does not.

tested in UP LOCKDEBUG/DEBUG/DIAGNOSTIC kernel as well.
MP kernel with only cpu0 configured panics starting /sbin/init.
have not yet tested on a real UP machine.


(mrg)
diff -r1.214 -r1.215 src/sys/arch/sparc/sparc/cpu.c
diff -r1.77 -r1.78 src/sys/arch/sparc/sparc/cpuvar.h
diff -r1.103 -r1.104 src/sys/arch/sparc/sparc/intr.c
diff -r1.328 -r1.329 src/sys/arch/sparc/sparc/pmap.c

cvs diff -r1.214 -r1.215 src/sys/arch/sparc/sparc/cpu.c (expand / switch to unified diff)

--- src/sys/arch/sparc/sparc/cpu.c 2009/05/18 01:36:11 1.214
+++ src/sys/arch/sparc/sparc/cpu.c 2009/05/27 02:19:49 1.215
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cpu.c,v 1.214 2009/05/18 01:36:11 mrg Exp $ */ 1/* $NetBSD: cpu.c,v 1.215 2009/05/27 02:19:49 mrg Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1996 4 * Copyright (c) 1996
5 * The President and Fellows of Harvard College. All rights reserved. 5 * The President and Fellows of Harvard College. All rights reserved.
6 * Copyright (c) 1992, 1993 6 * Copyright (c) 1992, 1993
7 * The Regents of the University of California. All rights reserved. 7 * The Regents of the University of California. All rights reserved.
8 * 8 *
9 * This software was developed by the Computer Systems Engineering group 9 * This software was developed by the Computer Systems Engineering group
10 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 10 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
11 * contributed to Berkeley. 11 * contributed to Berkeley.
12 * 12 *
13 * All advertising materials mentioning features or use of this software 13 * All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement: 14 * must display the following acknowledgement:
@@ -42,27 +42,27 @@ @@ -42,27 +42,27 @@
42 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 42 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
43 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 43 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
44 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 44 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
45 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 45 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48 * SUCH DAMAGE. 48 * SUCH DAMAGE.
49 * 49 *
50 * @(#)cpu.c 8.5 (Berkeley) 11/23/93 50 * @(#)cpu.c 8.5 (Berkeley) 11/23/93
51 * 51 *
52 */ 52 */
53 53
54#include <sys/cdefs.h> 54#include <sys/cdefs.h>
55__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.214 2009/05/18 01:36:11 mrg Exp $"); 55__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.215 2009/05/27 02:19:49 mrg Exp $");
56 56
57#include "opt_multiprocessor.h" 57#include "opt_multiprocessor.h"
58#include "opt_lockdebug.h" 58#include "opt_lockdebug.h"
59#include "opt_ddb.h" 59#include "opt_ddb.h"
60#include "opt_sparc_arch.h" 60#include "opt_sparc_arch.h"
61 61
62#include <sys/param.h> 62#include <sys/param.h>
63#include <sys/systm.h> 63#include <sys/systm.h>
64#include <sys/device.h> 64#include <sys/device.h>
65#include <sys/malloc.h> 65#include <sys/malloc.h>
66#include <sys/simplelock.h> 66#include <sys/simplelock.h>
67#include <sys/kernel.h> 67#include <sys/kernel.h>
68 68
@@ -92,30 +92,27 @@ __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.21 @@ -92,30 +92,27 @@ __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.21
92struct cpu_softc { 92struct cpu_softc {
93 struct device sc_dev; /* generic device info */ 93 struct device sc_dev; /* generic device info */
94 struct cpu_info *sc_cpuinfo; 94 struct cpu_info *sc_cpuinfo;
95}; 95};
96 96
97/* The following are used externally (sysctl_hw). */ 97/* The following are used externally (sysctl_hw). */
98char machine[] = MACHINE; /* from <machine/param.h> */ 98char machine[] = MACHINE; /* from <machine/param.h> */
99char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */ 99char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */
100int cpu_arch; /* sparc architecture version */ 100int cpu_arch; /* sparc architecture version */
101char cpu_model[100]; /* machine model (primary CPU) */ 101char cpu_model[100]; /* machine model (primary CPU) */
102extern char machine_model[]; 102extern char machine_model[];
103 103
104int sparc_ncpus; /* # of CPUs detected by PROM */ 104int sparc_ncpus; /* # of CPUs detected by PROM */
105#ifdef MULTIPROCESSOR 105struct cpu_info *cpus[_MAXNCPU]; /* we only support 4 CPUs. */
106struct cpu_info *cpus[4]; /* we only support 4 CPUs. */ 
107u_int cpu_ready_mask; /* the set of CPUs marked as READY */ 
108#endif 
109 106
110/* The CPU configuration driver. */ 107/* The CPU configuration driver. */
111static void cpu_mainbus_attach(struct device *, struct device *, void *); 108static void cpu_mainbus_attach(struct device *, struct device *, void *);
112int cpu_mainbus_match(struct device *, struct cfdata *, void *); 109int cpu_mainbus_match(struct device *, struct cfdata *, void *);
113 110
114CFATTACH_DECL(cpu_mainbus, sizeof(struct cpu_softc), 111CFATTACH_DECL(cpu_mainbus, sizeof(struct cpu_softc),
115 cpu_mainbus_match, cpu_mainbus_attach, NULL, NULL); 112 cpu_mainbus_match, cpu_mainbus_attach, NULL, NULL);
116 113
117#if defined(SUN4D) 114#if defined(SUN4D)
118static int cpu_cpuunit_match(struct device *, struct cfdata *, void *); 115static int cpu_cpuunit_match(struct device *, struct cfdata *, void *);
119static void cpu_cpuunit_attach(struct device *, struct device *, void *); 116static void cpu_cpuunit_attach(struct device *, struct device *, void *);
120 117
121CFATTACH_DECL(cpu_cpuunit, sizeof(struct cpu_softc), 118CFATTACH_DECL(cpu_cpuunit, sizeof(struct cpu_softc),
@@ -126,89 +123,67 @@ static void cpu_attach(struct cpu_softc  @@ -126,89 +123,67 @@ static void cpu_attach(struct cpu_softc
126 123
127static const char *fsrtoname(int, int, int); 124static const char *fsrtoname(int, int, int);
128void cache_print(struct cpu_softc *); 125void cache_print(struct cpu_softc *);
129void cpu_setup(void); 126void cpu_setup(void);
130void fpu_init(struct cpu_info *); 127void fpu_init(struct cpu_info *);
131 128
132#define IU_IMPL(psr) ((u_int)(psr) >> 28) 129#define IU_IMPL(psr) ((u_int)(psr) >> 28)
133#define IU_VERS(psr) (((psr) >> 24) & 0xf) 130#define IU_VERS(psr) (((psr) >> 24) & 0xf)
134 131
135#define SRMMU_IMPL(mmusr) ((u_int)(mmusr) >> 28) 132#define SRMMU_IMPL(mmusr) ((u_int)(mmusr) >> 28)
136#define SRMMU_VERS(mmusr) (((mmusr) >> 24) & 0xf) 133#define SRMMU_VERS(mmusr) (((mmusr) >> 24) & 0xf)
137 134
138int bootmid; /* Module ID of boot CPU */ 135int bootmid; /* Module ID of boot CPU */
139#if defined(MULTIPROCESSOR) 
140void cpu_spinup(struct cpu_info *); 
141static void init_cpuinfo(struct cpu_info *, int); 
142 
143int go_smp_cpus = 0; /* non-primary CPUs wait for this to go */ 
144 
145/* lock this to send IPI's */ 
146struct simplelock xpmsg_lock = SIMPLELOCK_INITIALIZER; 
147 
148static void 
149init_cpuinfo(struct cpu_info *cpi, int node) 
150{ 
151 vaddr_t intstack, va; 
152 
153 /* 
154 * Finish initialising this cpu_info. 
155 */ 
156 getcpuinfo(cpi, node); 
157 
158 /* 
159 * Arrange interrupt stack. This cpu will also abuse the bottom 
160 * half of the interrupt stack before it gets to run its idle LWP. 
161 */ 
162 intstack = uvm_km_alloc(kernel_map, INT_STACK_SIZE, 0, UVM_KMF_WIRED); 
163 if (intstack == 0) 
164 panic("%s: no uspace/intstack", __func__); 
165 cpi->eintstack = (void*)(intstack + INT_STACK_SIZE); 
166 
167 /* Allocate virtual space for pmap page_copy/page_zero */ 
168 va = uvm_km_alloc(kernel_map, 2*PAGE_SIZE, 0, UVM_KMF_VAONLY); 
169 if (va == 0) 
170 panic("%s: no virtual space", __func__); 
171 
172 cpi->vpage[0] = (void *)(va + 0); 
173 cpi->vpage[1] = (void *)(va + PAGE_SIZE); 
174} 
175#endif /* MULTIPROCESSOR */ 
176 136
177#ifdef notdef 137#ifdef notdef
178/* 138/*
179 * IU implementations are parceled out to vendors (with some slight 139 * IU implementations are parceled out to vendors (with some slight
180 * glitches). Printing these is cute but takes too much space. 140 * glitches). Printing these is cute but takes too much space.
181 */ 141 */
182static char *iu_vendor[16] = { 142static char *iu_vendor[16] = {
183 "Fujitsu", /* and also LSI Logic */ 143 "Fujitsu", /* and also LSI Logic */
184 "ROSS", /* ROSS (ex-Cypress) */ 144 "ROSS", /* ROSS (ex-Cypress) */
185 "BIT", 145 "BIT",
186 "LSIL", /* LSI Logic finally got their own */ 146 "LSIL", /* LSI Logic finally got their own */
187 "TI", /* Texas Instruments */ 147 "TI", /* Texas Instruments */
188 "Matsushita", 148 "Matsushita",
189 "Philips", 149 "Philips",
190 "Harvest", /* Harvest VLSI Design Center */ 150 "Harvest", /* Harvest VLSI Design Center */
191 "SPEC", /* Systems and Processes Engineering Corporation */ 151 "SPEC", /* Systems and Processes Engineering Corporation */
192 "Weitek", 152 "Weitek",
193 "vendor#10", 153 "vendor#10",
194 "vendor#11", 154 "vendor#11",
195 "vendor#12", 155 "vendor#12",
196 "vendor#13", 156 "vendor#13",
197 "vendor#14", 157 "vendor#14",
198 "vendor#15" 158 "vendor#15"
199}; 159};
200#endif 160#endif
201 161
 162#if defined(MULTIPROCESSOR)
 163u_int cpu_ready_mask; /* the set of CPUs marked as READY */
 164void cpu_spinup(struct cpu_info *);
 165static void cpu_attach_non_boot(struct cpu_softc *, struct cpu_info *, int);
 166
 167int go_smp_cpus = 0; /* non-primary CPUs wait for this to go */
 168
 169/*
 170 * This must be locked around all message transactions to ensure only
 171 * one CPU is generating them.
 172 */
 173static kmutex_t xpmsg_mutex;
 174
 175#endif /* MULTIPROCESSOR */
 176
202/* 177/*
203 * 4/110 comment: the 4/110 chops off the top 4 bits of an OBIO address. 178 * 4/110 comment: the 4/110 chops off the top 4 bits of an OBIO address.
204 * this confuses autoconf. for example, if you try and map 179 * this confuses autoconf. for example, if you try and map
205 * 0xfe000000 in obio space on a 4/110 it actually maps 0x0e000000. 180 * 0xfe000000 in obio space on a 4/110 it actually maps 0x0e000000.
206 * this is easy to verify with the PROM. this causes problems 181 * this is easy to verify with the PROM. this causes problems
207 * with devices like "esp0 at obio0 addr 0xfa000000" because the 182 * with devices like "esp0 at obio0 addr 0xfa000000" because the
208 * 4/110 treats it as esp0 at obio0 addr 0x0a000000" which is the 183 * 4/110 treats it as esp0 at obio0 addr 0x0a000000" which is the
209 * address of the 4/110's "sw0" scsi chip. the same thing happens 184 * address of the 4/110's "sw0" scsi chip. the same thing happens
210 * between zs1 and zs2. since the sun4 line is "closed" and 185 * between zs1 and zs2. since the sun4 line is "closed" and
211 * we know all the "obio" devices that will ever be on it we just 186 * we know all the "obio" devices that will ever be on it we just
212 * put in some special case "if"'s in the match routines of esp, 187 * put in some special case "if"'s in the match routines of esp,
213 * dma, and zs. 188 * dma, and zs.
214 */ 189 */
@@ -316,150 +291,188 @@ cpu_cpuunit_attach(struct device *parent @@ -316,150 +291,188 @@ cpu_cpuunit_attach(struct device *parent
316 cpu_attach((struct cpu_softc *)self, cpua->cpua_node, 291 cpu_attach((struct cpu_softc *)self, cpua->cpua_node,
317 cpua->cpua_device_id); 292 cpua->cpua_device_id);
318} 293}
319#endif /* SUN4D */ 294#endif /* SUN4D */
320 295
321/* 296/*
322 * Attach the CPU. 297 * Attach the CPU.
323 * Discover interesting goop about the virtual address cache 298 * Discover interesting goop about the virtual address cache
324 * (slightly funny place to do it, but this is where it is to be found). 299 * (slightly funny place to do it, but this is where it is to be found).
325 */ 300 */
326static void 301static void
327cpu_attach(struct cpu_softc *sc, int node, int mid) 302cpu_attach(struct cpu_softc *sc, int node, int mid)
328{ 303{
 304 char buf[100];
329 struct cpu_info *cpi; 305 struct cpu_info *cpi;
330 int idx; 306 int idx;
331 static int cpu_attach_count = 0; 307 static int cpu_attach_count = 0;
332 308
333 /* 309 /*
334 * The first CPU we're attaching must be the boot CPU. 310 * The first CPU we're attaching must be the boot CPU.
335 * (see autoconf.c and cpuunit.c) 311 * (see autoconf.c and cpuunit.c)
336 */ 312 */
337 idx = cpu_attach_count++; 313 idx = cpu_attach_count++;
338 if (cpu_attach_count == 1) { 
339 getcpuinfo(&cpuinfo, node); 
340 
341#if defined(MULTIPROCESSOR) 
342 cpi = sc->sc_cpuinfo = cpus[idx]; 
343#else 
344 /* The `local' VA is global for uniprocessor. */ 
345 cpi = sc->sc_cpuinfo = (struct cpu_info *)CPUINFO_VA; 
346#endif 
347 cpi->master = 1; 
348 cpi->eintstack = eintstack; 
349 /* Note: `curpcb' is set to `proc0' in locore */ 
350 
351 /* 
352 * If we haven't been able to determine the Id of the 
353 * boot CPU, set it now. In this case we can only boot 
354 * from CPU #0 (see also the CPU attach code in autoconf.c) 
355 */ 
356 if (bootmid == 0) 
357 bootmid = mid; 
358 } else { 
359#if defined(MULTIPROCESSOR) 
360 int error; 
361 
362 /* 
363 * Initialise this cpu's cpu_info. 
364 */ 
365 cpi = sc->sc_cpuinfo = cpus[idx]; 
366 init_cpuinfo(cpi, node); 
367 314
368 /* 315#if !defined(MULTIPROCESSOR)
369 * Call the MI attach which creates an idle LWP for us. 316 if (cpu_attach_count > 1) {
370 */ 
371 error = mi_cpu_attach(cpi); 
372 if (error != 0) { 
373 aprint_normal("\n"); 
374 aprint_error("%s: mi_cpu_attach failed with %d\n", 
375 sc->sc_dev.dv_xname, error); 
376 return; 
377 } 
378 
379 /* 
380 * Note: `eintstack' is set in init_cpuinfo() above. 
381 * The %wim register will be initialized in cpu_hatch(). 
382 */ 
383 cpi->ci_curlwp = cpi->ci_data.cpu_idlelwp; 
384 cpi->curpcb = (struct pcb *)cpi->ci_curlwp->l_addr; 
385 cpi->curpcb->pcb_wim = 1; 
386 
387#else 
388 sc->sc_cpuinfo = NULL; 
389 printf(": no SMP support in kernel\n"); 317 printf(": no SMP support in kernel\n");
390 return; 318 return;
391#endif 
392 } 319 }
393 
394#ifdef DEBUG 
395 cpi->redzone = (void *)((long)cpi->eintstack + REDSIZE); 
396#endif 320#endif
397 321
 322 /*
 323 * Initialise this cpu's cpu_info.
 324 */
 325 cpi = sc->sc_cpuinfo = cpus[idx];
 326 getcpuinfo(cpi, node);
 327
398 cpi->ci_cpuid = idx; 328 cpi->ci_cpuid = idx;
399 cpi->mid = mid; 329 cpi->mid = mid;
400 cpi->node = node; 330 cpi->node = node;
 331#ifdef DEBUG
 332 cpi->redzone = (void *)((long)cpi->eintstack + REDSIZE);
 333#endif
401 334
402 if (sparc_ncpus > 1) { 335 if (sparc_ncpus > 1) {
403 printf(": mid %d", mid); 336 printf(": mid %d", mid);
404 if (mid == 0 && !CPU_ISSUN4D) 337 if (mid == 0 && !CPU_ISSUN4D)
405 printf(" [WARNING: mid should not be 0]"); 338 printf(" [WARNING: mid should not be 0]");
406 } 339 }
407 340
 341#if defined(MULTIPROCESSOR)
 342 if (cpu_attach_count > 1) {
 343 cpu_attach_non_boot(sc, cpi, node);
 344 return;
 345 }
 346#endif /* MULTIPROCESSOR */
 347
 348 /* Stuff to only run on the boot CPU */
 349 cpu_setup();
 350 snprintf(buf, sizeof buf, "%s @ %s MHz, %s FPU",
 351 cpi->cpu_name, clockfreq(cpi->hz), cpi->fpu_name);
 352 snprintf(cpu_model, sizeof cpu_model, "%s (%s)",
 353 machine_model, buf);
 354 printf(": %s\n", buf);
 355 cache_print(sc);
 356
 357 cpi->master = 1;
 358 cpi->eintstack = eintstack;
 359
 360 /*
 361 * If we haven't been able to determine the Id of the
 362 * boot CPU, set it now. In this case we can only boot
 363 * from CPU #0 (see also the CPU attach code in autoconf.c)
 364 */
 365 if (bootmid == 0)
 366 bootmid = mid;
 367}
 368
 369/*
 370 * Finish CPU attach.
 371 * Must be run by the CPU which is being attached.
 372 */
 373void
 374cpu_setup(void)
 375{
 376 if (cpuinfo.hotfix)
 377 (*cpuinfo.hotfix)(&cpuinfo);
 378
 379 /* Initialize FPU */
 380 fpu_init(&cpuinfo);
 381
 382 /* Enable the cache */
 383 cpuinfo.cache_enable();
 384
 385 cpuinfo.flags |= CPUFLG_HATCHED;
 386}
 387
 388#if defined(MULTIPROCESSOR)
 389/*
 390 * Perform most of the tasks needed for a non-boot CPU.
 391 */
 392static void
 393cpu_attach_non_boot(struct cpu_softc *sc, struct cpu_info *cpi, int node)
 394{
 395 vaddr_t intstack, va;
 396 int error;
 397
 398 /*
 399 * Arrange interrupt stack. This cpu will also abuse the bottom
 400 * half of the interrupt stack before it gets to run its idle LWP.
 401 */
 402 intstack = uvm_km_alloc(kernel_map, INT_STACK_SIZE, 0, UVM_KMF_WIRED);
 403 if (intstack == 0)
 404 panic("%s: no uspace/intstack", __func__);
 405 cpi->eintstack = (void*)(intstack + INT_STACK_SIZE);
408 406
409 if (cpi->master) { 407 /* Allocate virtual space for pmap page_copy/page_zero */
410 char buf[100]; 408 va = uvm_km_alloc(kernel_map, 2*PAGE_SIZE, 0, UVM_KMF_VAONLY);
 409 if (va == 0)
 410 panic("%s: no virtual space", __func__);
411 411
412 cpu_setup(); 412 cpi->vpage[0] = (void *)(va + 0);
413 snprintf(buf, sizeof buf, "%s @ %s MHz, %s FPU", 413 cpi->vpage[1] = (void *)(va + PAGE_SIZE);
414 cpi->cpu_name, clockfreq(cpi->hz), cpi->fpu_name); 414
415 snprintf(cpu_model, sizeof cpu_model, "%s (%s)", 415 /*
416 machine_model, buf); 416 * Call the MI attach which creates an idle LWP for us.
417 printf(": %s\n", buf); 417 */
418 cache_print(sc); 418 error = mi_cpu_attach(cpi);
 419 if (error != 0) {
 420 aprint_normal("\n");
 421 aprint_error("%s: mi_cpu_attach failed with %d\n",
 422 sc->sc_dev.dv_xname, error);
419 return; 423 return;
420 } 424 }
421 425
422#if defined(MULTIPROCESSOR) 426 /*
 427 * Note: `eintstack' is set in init_cpuinfo() above.
 428 * The %wim register will be initialized in cpu_hatch().
 429 */
 430 cpi->ci_curlwp = cpi->ci_data.cpu_idlelwp;
 431 cpi->curpcb = (struct pcb *)cpi->ci_curlwp->l_addr;
 432 cpi->curpcb->pcb_wim = 1;
 433
423 /* for now use the fixed virtual addresses setup in autoconf.c */ 434 /* for now use the fixed virtual addresses setup in autoconf.c */
424 cpi->intreg_4m = (struct icr_pi *) 435 cpi->intreg_4m = (struct icr_pi *)
425 (PI_INTR_VA + (_MAXNBPG * CPU_MID2CPUNO(mid))); 436 (PI_INTR_VA + (_MAXNBPG * CPU_MID2CPUNO(cpi->mid)));
426 437
427 /* Now start this CPU */ 438 /* Now start this CPU */
428 cpu_spinup(cpi); 439 cpu_spinup(cpi);
429 printf(": %s @ %s MHz, %s FPU\n", cpi->cpu_name, 440 printf(": %s @ %s MHz, %s FPU\n", cpi->cpu_name,
430 clockfreq(cpi->hz), cpi->fpu_name); 441 clockfreq(cpi->hz), cpi->fpu_name);
431 442
432 cache_print(sc); 443 cache_print(sc);
433 444
434 if (sparc_ncpus > 1 && idx == sparc_ncpus-1) { 445 /*
 446 * Now we're on the last CPU to be attaching.
 447 */
 448 if (sparc_ncpus > 1 && cpi->ci_cpuid == sparc_ncpus - 1) {
435 CPU_INFO_ITERATOR n; 449 CPU_INFO_ITERATOR n;
436 /* 450 /*
437 * Install MP cache flush functions, unless the 451 * Install MP cache flush functions, unless the
438 * single-processor versions are no-ops. 452 * single-processor versions are no-ops.
439 */ 453 */
440 for (CPU_INFO_FOREACH(n, cpi)) { 454 for (CPU_INFO_FOREACH(n, cpi)) {
441#define SET_CACHE_FUNC(x) \ 455#define SET_CACHE_FUNC(x) \
442 if (cpi->x != __CONCAT(noop_,x)) cpi->x = __CONCAT(smp_,x) 456 if (cpi->x != __CONCAT(noop_,x)) cpi->x = __CONCAT(smp_,x)
443 SET_CACHE_FUNC(vcache_flush_page); 457 SET_CACHE_FUNC(vcache_flush_page);
444 SET_CACHE_FUNC(vcache_flush_segment); 458 SET_CACHE_FUNC(vcache_flush_segment);
445 SET_CACHE_FUNC(vcache_flush_region); 459 SET_CACHE_FUNC(vcache_flush_region);
446 SET_CACHE_FUNC(vcache_flush_context); 460 SET_CACHE_FUNC(vcache_flush_context);
447 } 461 }
448 } 462 }
449#endif /* MULTIPROCESSOR */ 463#undef SET_CACHE_FUNC
450} 464}
451 465
452#if defined(MULTIPROCESSOR) 
453/* 466/*
454 * Start secondary processors in motion. 467 * Start secondary processors in motion.
455 */ 468 */
456void 469void
457cpu_boot_secondary_processors(void) 470cpu_boot_secondary_processors(void)
458{ 471{
459 CPU_INFO_ITERATOR n; 472 CPU_INFO_ITERATOR n;
460 struct cpu_info *cpi; 473 struct cpu_info *cpi;
461 474
462 printf("cpu0: booting secondary processors:"); 475 printf("cpu0: booting secondary processors:");
463 for (CPU_INFO_FOREACH(n, cpi)) { 476 for (CPU_INFO_FOREACH(n, cpi)) {
464 if (cpuinfo.mid == cpi->mid || 477 if (cpuinfo.mid == cpi->mid ||
465 (cpi->flags & CPUFLG_HATCHED) == 0) 478 (cpi->flags & CPUFLG_HATCHED) == 0)
@@ -469,61 +482,50 @@ cpu_boot_secondary_processors(void) @@ -469,61 +482,50 @@ cpu_boot_secondary_processors(void)
469 cpi->flags |= CPUFLG_READY; 482 cpi->flags |= CPUFLG_READY;
470 cpu_ready_mask |= (1 << n); 483 cpu_ready_mask |= (1 << n);
471 } 484 }
472 485
473 /* Mark the boot CPU as ready */ 486 /* Mark the boot CPU as ready */
474 cpuinfo.flags |= CPUFLG_READY; 487 cpuinfo.flags |= CPUFLG_READY;
475 cpu_ready_mask |= (1 << 0); 488 cpu_ready_mask |= (1 << 0);
476 489
477 /* Tell the other CPU's to start up. */ 490 /* Tell the other CPU's to start up. */
478 go_smp_cpus = 1; 491 go_smp_cpus = 1;
479 492
480 printf("\n"); 493 printf("\n");
481} 494}
482#endif /* MULTIPROCESSOR */ 
483 495
484/* 496/*
485 * Finish CPU attach. 497 * Early initialisation, before main().
486 * Must be run by the CPU which is being attached. 
487 */ 498 */
488void 499void
489cpu_setup(void) 500cpu_init_system(void)
490{ 501{
491 if (cpuinfo.hotfix) 
492 (*cpuinfo.hotfix)(&cpuinfo); 
493 
494 /* Initialize FPU */ 
495 fpu_init(&cpuinfo); 
496 502
497 /* Enable the cache */ 503 mutex_init(&xpmsg_mutex, MUTEX_SPIN, IPL_VM);
498 cpuinfo.cache_enable(); 
499 
500 cpuinfo.flags |= CPUFLG_HATCHED; 
501} 504}
502 505
503#if defined(MULTIPROCESSOR) 
504 
505extern void cpu_hatch(void); /* in locore.s */ 
506 
507/* 506/*
508 * Allocate per-CPU data, then start up this CPU using PROM. 507 * Allocate per-CPU data, then start up this CPU using PROM.
509 */ 508 */
510void 509void
511cpu_spinup(struct cpu_info *cpi) 510cpu_spinup(struct cpu_info *cpi)
512{ 511{
 512 extern void cpu_hatch(void); /* in locore.s */
513 struct openprom_addr oa; 513 struct openprom_addr oa;
514 void *pc = (void *)cpu_hatch; 514 void *pc;
515 int n; 515 int n;
516 516
 517 pc = (void *)cpu_hatch;
 518
517 /* Setup CPU-specific MMU tables */ 519 /* Setup CPU-specific MMU tables */
518 pmap_alloc_cpu(cpi); 520 pmap_alloc_cpu(cpi);
519 521
520 cpi->flags &= ~CPUFLG_HATCHED; 522 cpi->flags &= ~CPUFLG_HATCHED;
521 523
522 /* 524 /*
523 * The physical address of the context table is passed to 525 * The physical address of the context table is passed to
524 * the PROM in a "physical address descriptor". 526 * the PROM in a "physical address descriptor".
525 */ 527 */
526 oa.oa_space = 0; 528 oa.oa_space = 0;
527 oa.oa_base = (uint32_t)cpi->ctx_tbl_pa; 529 oa.oa_base = (uint32_t)cpi->ctx_tbl_pa;
528 oa.oa_size = cpi->mmu_ncontext * sizeof(cpi->ctx_tbl[0]); /*???*/ 530 oa.oa_size = cpi->mmu_ncontext * sizeof(cpi->ctx_tbl[0]); /*???*/
529 531
@@ -546,62 +548,40 @@ cpu_spinup(struct cpu_info *cpi) @@ -546,62 +548,40 @@ cpu_spinup(struct cpu_info *cpi)
546 } 548 }
547 printf("CPU did not spin up\n"); 549 printf("CPU did not spin up\n");
548} 550}
549 551
550/* 552/*
551 * Call a function on some CPUs. `cpuset' can be set to CPUSET_ALL 553 * Call a function on some CPUs. `cpuset' can be set to CPUSET_ALL
552 * to call every CPU, or `1 << cpi->ci_cpuid' for each CPU to call. 554 * to call every CPU, or `1 << cpi->ci_cpuid' for each CPU to call.
553 */ 555 */
554void 556void
555xcall(xcall_func_t func, xcall_trap_t trap, int arg0, int arg1, int arg2, 557xcall(xcall_func_t func, xcall_trap_t trap, int arg0, int arg1, int arg2,
556 u_int cpuset) 558 u_int cpuset)
557{ 559{
558 struct cpu_info *cpi; 560 struct cpu_info *cpi;
559 int s, n, i, done, callself, mybit; 561 int n, i, done, callself, mybit;
560 volatile struct xpmsg_func *p; 562 volatile struct xpmsg_func *p;
561 int fasttrap; 563 int fasttrap;
562 564 int is_noop = func == (xcall_func_t)sparc_noop;
563 /* XXX - note p->retval is probably no longer useful */ 
564 565
565 mybit = (1 << cpuinfo.ci_cpuid); 566 mybit = (1 << cpuinfo.ci_cpuid);
566 callself = func && (cpuset & mybit) != 0; 567 callself = func && (cpuset & mybit) != 0;
567 cpuset &= ~mybit; 568 cpuset &= ~mybit;
568 569
569 /* 
570 * If no cpus are configured yet, just call ourselves. 
571 */ 
572 if (cpus == NULL) { 
573 p = &cpuinfo.msg.u.xpmsg_func; 
574 if (callself) 
575 p->retval = (*func)(arg0, arg1, arg2); 
576 return; 
577 } 
578 
579 /* Mask any CPUs that are not ready */ 570 /* Mask any CPUs that are not ready */
580 cpuset &= cpu_ready_mask; 571 cpuset &= cpu_ready_mask;
581 572
582 /* prevent interrupts that grab the kernel lock */ 573 /* prevent interrupts that grab the kernel lock */
583 s = splsched(); 574 mutex_spin_enter(&xpmsg_mutex);
584#ifdef DEBUG 
585 if (!cold) { 
586 u_int pc, lvl = ((u_int)s & PSR_PIL) >> 8; 
587 if (lvl > IPL_SCHED) { 
588 __asm("mov %%i7, %0" : "=r" (pc) : ); 
589 printf_nolog("%d: xcall at lvl %u from 0x%x\n", 
590 cpu_number(), lvl, pc); 
591 } 
592 } 
593#endif 
594 LOCK_XPMSG(); 
595 575
596 /* 576 /*
597 * Firstly, call each CPU. We do this so that they might have 577 * Firstly, call each CPU. We do this so that they might have
598 * finished by the time we start looking. 578 * finished by the time we start looking.
599 */ 579 */
600 fasttrap = trap != NULL ? 1 : 0; 580 fasttrap = trap != NULL ? 1 : 0;
601 for (CPU_INFO_FOREACH(n, cpi)) { 581 for (CPU_INFO_FOREACH(n, cpi)) {
602 582
603 /* Note: n == cpi->ci_cpuid */ 583 /* Note: n == cpi->ci_cpuid */
604 if ((cpuset & (1 << n)) == 0) 584 if ((cpuset & (1 << n)) == 0)
605 continue; 585 continue;
606 586
607 cpi->msg.tag = XPMSG_FUNC; 587 cpi->msg.tag = XPMSG_FUNC;
@@ -611,174 +591,158 @@ xcall(xcall_func_t func, xcall_trap_t tr @@ -611,174 +591,158 @@ xcall(xcall_func_t func, xcall_trap_t tr
611 p->trap = trap; 591 p->trap = trap;
612 p->arg0 = arg0; 592 p->arg0 = arg0;
613 p->arg1 = arg1; 593 p->arg1 = arg1;
614 p->arg2 = arg2; 594 p->arg2 = arg2;
615 /* Fast cross calls use interrupt level 14 */ 595 /* Fast cross calls use interrupt level 14 */
616 raise_ipi(cpi,13+fasttrap);/*xcall_cookie->pil*/ 596 raise_ipi(cpi,13+fasttrap);/*xcall_cookie->pil*/
617 } 597 }
618 598
619 /* 599 /*
620 * Second, call ourselves. 600 * Second, call ourselves.
621 */ 601 */
622 p = &cpuinfo.msg.u.xpmsg_func; 602 p = &cpuinfo.msg.u.xpmsg_func;
623 if (callself) 603 if (callself)
624 p->retval = (*func)(arg0, arg1, arg2); 604 (*func)(arg0, arg1, arg2);
625 605
626 /* 606 /*
627 * Lastly, start looping, waiting for all CPUs to register that they 607 * Lastly, start looping, waiting for all CPUs to register that they
628 * have completed (bailing if it takes "too long", being loud about 608 * have completed (bailing if it takes "too long", being loud about
629 * this in the process). 609 * this in the process).
630 */ 610 */
631 done = 0; 611 done = is_noop;
632 i = 100000; /* time-out, not too long, but still an _AGE_ */ 612 i = 100000; /* time-out, not too long, but still an _AGE_ */
633 while (!done) { 613 while (!done) {
634 if (--i < 0) { 614 if (--i < 0) {
635 printf_nolog("xcall(cpu%d,%p): couldn't ping cpus:", 615 printf_nolog("xcall(cpu%d,%p): couldn't ping cpus:",
636 cpu_number(), func); 616 cpu_number(), func);
637 } 617 }
638 618
639 done = 1; 619 done = 1;
640 for (CPU_INFO_FOREACH(n, cpi)) { 620 for (CPU_INFO_FOREACH(n, cpi)) {
641 if ((cpuset & (1 << n)) == 0) 621 if ((cpuset & (1 << n)) == 0)
642 continue; 622 continue;
643 623
644 if (cpi->msg.complete == 0) { 624 if (cpi->msg.complete == 0) {
645 if (i < 0) { 625 if (i < 0) {
646 printf_nolog(" cpu%d", cpi->ci_cpuid); 626 printf_nolog(" cpu%d", cpi->ci_cpuid);
647 } else { 627 } else {
648 done = 0; 628 done = 0;
649 break; 629 break;
650 } 630 }
651 } 631 }
652 } 632 }
653 } 633 }
654 if (i < 0) 634 if (i < 0)
655 printf_nolog("\n"); 635 printf_nolog("\n");
656 636
657 UNLOCK_XPMSG(); 637 mutex_spin_exit(&xpmsg_mutex);
658 splx(s); 
659} 638}
660 639
661/* 640/*
662 * Tell all CPUs other than the current one to enter the PROM idle loop. 641 * Tell all CPUs other than the current one to enter the PROM idle loop.
663 */ 642 */
664void 643void
665mp_pause_cpus(void) 644mp_pause_cpus(void)
666{ 645{
667 CPU_INFO_ITERATOR n; 646 CPU_INFO_ITERATOR n;
668 struct cpu_info *cpi; 647 struct cpu_info *cpi;
669 648
670 if (cpus == NULL) 
671 return; 
672 
673 for (CPU_INFO_FOREACH(n, cpi)) { 649 for (CPU_INFO_FOREACH(n, cpi)) {
674 if (cpuinfo.mid == cpi->mid || 650 if (cpuinfo.mid == cpi->mid ||
675 (cpi->flags & CPUFLG_HATCHED) == 0) 651 (cpi->flags & CPUFLG_HATCHED) == 0)
676 continue; 652 continue;
677 653
678 /* 654 /*
679 * This PROM utility will put the OPENPROM_MBX_ABORT 655 * This PROM utility will put the OPENPROM_MBX_ABORT
680 * message (0xfc) in the target CPU's mailbox and then 656 * message (0xfc) in the target CPU's mailbox and then
681 * send it a level 15 soft interrupt. 657 * send it a level 15 soft interrupt.
682 */ 658 */
683 if (prom_cpuidle(cpi->node) != 0) 659 if (prom_cpuidle(cpi->node) != 0)
684 printf("cpu%d could not be paused\n", cpi->ci_cpuid); 660 printf("cpu%d could not be paused\n", cpi->ci_cpuid);
685 } 661 }
686} 662}
687 663
688/* 664/*
689 * Resume all idling CPUs. 665 * Resume all idling CPUs.
690 */ 666 */
691void 667void
692mp_resume_cpus(void) 668mp_resume_cpus(void)
693{ 669{
694 CPU_INFO_ITERATOR n; 670 CPU_INFO_ITERATOR n;
695 struct cpu_info *cpi; 671 struct cpu_info *cpi;
696 672
697 if (cpus == NULL) 
698 return; 
699 
700 for (CPU_INFO_FOREACH(n, cpi)) { 673 for (CPU_INFO_FOREACH(n, cpi)) {
701 if (cpuinfo.mid == cpi->mid || 674 if (cpuinfo.mid == cpi->mid ||
702 (cpi->flags & CPUFLG_HATCHED) == 0) 675 (cpi->flags & CPUFLG_HATCHED) == 0)
703 continue; 676 continue;
704 677
705 /* 678 /*
706 * This PROM utility makes the target CPU return 679 * This PROM utility makes the target CPU return
707 * from its prom_cpuidle(0) call (see intr.c:nmi_soft()). 680 * from its prom_cpuidle(0) call (see intr.c:nmi_soft()).
708 */ 681 */
709 if (prom_cpuresume(cpi->node) != 0) 682 if (prom_cpuresume(cpi->node) != 0)
710 printf("cpu%d could not be resumed\n", cpi->ci_cpuid); 683 printf("cpu%d could not be resumed\n", cpi->ci_cpuid);
711 } 684 }
712} 685}
713 686
714/* 687/*
715 * Tell all CPUs except the current one to hurry back into the prom 688 * Tell all CPUs except the current one to hurry back into the prom
716 */ 689 */
717void 690void
718mp_halt_cpus(void) 691mp_halt_cpus(void)
719{ 692{
720 CPU_INFO_ITERATOR n; 693 CPU_INFO_ITERATOR n;
721 struct cpu_info *cpi; 694 struct cpu_info *cpi;
722 695
723 if (cpus == NULL) 
724 return; 
725 
726 for (CPU_INFO_FOREACH(n, cpi)) { 696 for (CPU_INFO_FOREACH(n, cpi)) {
727 int r; 697 int r;
728 698
729 if (cpuinfo.mid == cpi->mid) 699 if (cpuinfo.mid == cpi->mid)
730 continue; 700 continue;
731 701
732 /* 702 /*
733 * This PROM utility will put the OPENPROM_MBX_STOP 703 * This PROM utility will put the OPENPROM_MBX_STOP
734 * message (0xfb) in the target CPU's mailbox and then 704 * message (0xfb) in the target CPU's mailbox and then
735 * send it a level 15 soft interrupt. 705 * send it a level 15 soft interrupt.
736 */ 706 */
737 r = prom_cpustop(cpi->node); 707 r = prom_cpustop(cpi->node);
738 printf("cpu%d %shalted\n", cpi->ci_cpuid, 708 printf("cpu%d %shalted\n", cpi->ci_cpuid,
739 r == 0 ? "" : "(boot CPU?) can not be "); 709 r == 0 ? "" : "(boot CPU?) can not be ");
740 } 710 }
741} 711}
742 712
743#if defined(DDB) 713#if defined(DDB)
744void 714void
745mp_pause_cpus_ddb(void) 715mp_pause_cpus_ddb(void)
746{ 716{
747 CPU_INFO_ITERATOR n; 717 CPU_INFO_ITERATOR n;
748 struct cpu_info *cpi; 718 struct cpu_info *cpi;
749 719
750 if (cpus == NULL) 
751 return; 
752 
753 for (CPU_INFO_FOREACH(n, cpi)) { 720 for (CPU_INFO_FOREACH(n, cpi)) {
754 if (cpi == NULL || cpi->mid == cpuinfo.mid || 721 if (cpi == NULL || cpi->mid == cpuinfo.mid ||
755 (cpi->flags & CPUFLG_HATCHED) == 0) 722 (cpi->flags & CPUFLG_HATCHED) == 0)
756 continue; 723 continue;
757 724
758 cpi->msg_lev15.tag = XPMSG15_PAUSECPU; 725 cpi->msg_lev15.tag = XPMSG15_PAUSECPU;
759 raise_ipi(cpi,15); /* high priority intr */ 726 raise_ipi(cpi,15); /* high priority intr */
760 } 727 }
761} 728}
762 729
763void 730void
764mp_resume_cpus_ddb(void) 731mp_resume_cpus_ddb(void)
765{ 732{
766 CPU_INFO_ITERATOR n; 733 CPU_INFO_ITERATOR n;
767 struct cpu_info *cpi; 734 struct cpu_info *cpi;
768 735
769 if (cpus == NULL) 
770 return; 
771 
772 for (CPU_INFO_FOREACH(n, cpi)) { 736 for (CPU_INFO_FOREACH(n, cpi)) {
773 if (cpi == NULL || cpuinfo.mid == cpi->mid || 737 if (cpi == NULL || cpuinfo.mid == cpi->mid ||
774 (cpi->flags & CPUFLG_PAUSED) == 0) 738 (cpi->flags & CPUFLG_PAUSED) == 0)
775 continue; 739 continue;
776 740
777 /* tell it to continue */ 741 /* tell it to continue */
778 cpi->flags &= ~CPUFLG_PAUSED; 742 cpi->flags &= ~CPUFLG_PAUSED;
779 } 743 }
780} 744}
781#endif /* DDB */ 745#endif /* DDB */
782#endif /* MULTIPROCESSOR */ 746#endif /* MULTIPROCESSOR */
783 747
784/* 748/*

cvs diff -r1.77 -r1.78 src/sys/arch/sparc/sparc/cpuvar.h (expand / switch to unified diff)

--- src/sys/arch/sparc/sparc/cpuvar.h 2009/05/18 01:36:11 1.77
+++ src/sys/arch/sparc/sparc/cpuvar.h 2009/05/27 02:19:49 1.78
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cpuvar.h,v 1.77 2009/05/18 01:36:11 mrg Exp $ */ 1/* $NetBSD: cpuvar.h,v 1.78 2009/05/27 02:19:49 mrg Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1996 The NetBSD Foundation, Inc. 4 * Copyright (c) 1996 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg. 8 * by Paul Kranenburg.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -27,26 +27,27 @@ @@ -27,26 +27,27 @@
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#ifndef _sparc_cpuvar_h 32#ifndef _sparc_cpuvar_h
33#define _sparc_cpuvar_h 33#define _sparc_cpuvar_h
34 34
35#if defined(_KERNEL_OPT) 35#if defined(_KERNEL_OPT)
36#include "opt_multiprocessor.h" 36#include "opt_multiprocessor.h"
37#include "opt_lockdebug.h" 37#include "opt_lockdebug.h"
38#include "opt_ddb.h" 38#include "opt_ddb.h"
39#include "opt_sparc_arch.h" 39#include "opt_sparc_arch.h"
 40#include "opt_modular.h"
40#endif 41#endif
41 42
42#include <sys/device.h> 43#include <sys/device.h>
43#include <sys/lock.h> 44#include <sys/lock.h>
44#include <sys/cpu_data.h> 45#include <sys/cpu_data.h>
45 46
46#include <sparc/include/reg.h> 47#include <sparc/include/reg.h>
47#include <sparc/sparc/cache.h> /* for cacheinfo */ 48#include <sparc/sparc/cache.h> /* for cacheinfo */
48 49
49/* 50/*
50 * CPU/MMU module information. 51 * CPU/MMU module information.
51 * There is one of these for each "mainline" CPU module we support. 52 * There is one of these for each "mainline" CPU module we support.
52 * The information contained in the structure is used only during 53 * The information contained in the structure is used only during
@@ -94,48 +95,38 @@ struct xpmsg { @@ -94,48 +95,38 @@ struct xpmsg {
94 volatile int tag; 95 volatile int tag;
95#define XPMSG15_PAUSECPU 1 96#define XPMSG15_PAUSECPU 1
96#define XPMSG_FUNC 4 97#define XPMSG_FUNC 4
97#define XPMSG_FTRP 5 98#define XPMSG_FTRP 5
98 99
99 volatile union { 100 volatile union {
100 /* 101 /*
101 * Cross call: ask to run (*func)(arg0,arg1,arg2) 102 * Cross call: ask to run (*func)(arg0,arg1,arg2)
102 * or (*trap)(arg0,arg1,arg2). `trap' should be the 103 * or (*trap)(arg0,arg1,arg2). `trap' should be the
103 * address of a `fast trap' handler that executes in 104 * address of a `fast trap' handler that executes in
104 * the trap window (see locore.s). 105 * the trap window (see locore.s).
105 */ 106 */
106 struct xpmsg_func { 107 struct xpmsg_func {
107 int (*func)(int, int, int); 108 void (*func)(int, int, int);
108 void (*trap)(int, int, int); 109 void (*trap)(int, int, int);
109 int arg0; 110 int arg0;
110 int arg1; 111 int arg1;
111 int arg2; 112 int arg2;
112 int retval; 
113 } xpmsg_func; 113 } xpmsg_func;
114 } u; 114 } u;
115 volatile int received; 115 volatile int received;
116 volatile int complete; 116 volatile int complete;
117}; 117};
118 118
119/* 119/*
120 * This must be locked around all message transactions to ensure only 
121 * one CPU is generating them. 
122 */ 
123extern struct simplelock xpmsg_lock; 
124 
125#define LOCK_XPMSG() simple_lock(&xpmsg_lock); 
126#define UNLOCK_XPMSG() simple_unlock(&xpmsg_lock); 
127 
128/* 
129 * The cpuinfo structure. This structure maintains information about one 120 * The cpuinfo structure. This structure maintains information about one
130 * currently installed CPU (there may be several of these if the machine 121 * currently installed CPU (there may be several of these if the machine
131 * supports multiple CPUs, as on some Sun4m architectures). The information 122 * supports multiple CPUs, as on some Sun4m architectures). The information
132 * in this structure supersedes the old "cpumod", "mmumod", and similar 123 * in this structure supersedes the old "cpumod", "mmumod", and similar
133 * fields. 124 * fields.
134 */ 125 */
135 126
136struct cpu_info { 127struct cpu_info {
137 struct cpu_data ci_data; /* MI per-cpu data */ 128 struct cpu_data ci_data; /* MI per-cpu data */
138 129
139 /* Scheduler flags */ 130 /* Scheduler flags */
140 int ci_want_ast; 131 int ci_want_ast;
141 int ci_want_resched; 132 int ci_want_resched;
@@ -405,49 +396,56 @@ struct cpu_info { @@ -405,49 +396,56 @@ struct cpu_info {
405#define CPUFLG_CACHEPAGETABLES 0x1 /* caching pagetables OK on Sun4m */ 396#define CPUFLG_CACHEPAGETABLES 0x1 /* caching pagetables OK on Sun4m */
406#define CPUFLG_CACHEIOMMUTABLES 0x2 /* caching IOMMU translations OK */ 397#define CPUFLG_CACHEIOMMUTABLES 0x2 /* caching IOMMU translations OK */
407#define CPUFLG_CACHEDVMA 0x4 /* DVMA goes through cache */ 398#define CPUFLG_CACHEDVMA 0x4 /* DVMA goes through cache */
408#define CPUFLG_SUN4CACHEBUG 0x8 /* trap page can't be cached */ 399#define CPUFLG_SUN4CACHEBUG 0x8 /* trap page can't be cached */
409#define CPUFLG_CACHE_MANDATORY 0x10 /* if cache is on, don't use 400#define CPUFLG_CACHE_MANDATORY 0x10 /* if cache is on, don't use
410 uncached access */ 401 uncached access */
411#define CPUFLG_HATCHED 0x1000 /* CPU is alive */ 402#define CPUFLG_HATCHED 0x1000 /* CPU is alive */
412#define CPUFLG_PAUSED 0x2000 /* CPU is paused */ 403#define CPUFLG_PAUSED 0x2000 /* CPU is paused */
413#define CPUFLG_GOTMSG 0x4000 /* CPU got an lev13 IPI */ 404#define CPUFLG_GOTMSG 0x4000 /* CPU got an lev13 IPI */
414#define CPUFLG_READY 0x8000 /* CPU available for IPI */ 405#define CPUFLG_READY 0x8000 /* CPU available for IPI */
415 406
416 407
417#define CPU_INFO_ITERATOR int 408#define CPU_INFO_ITERATOR int
418#ifdef MULTIPROCESSOR 409/*
419#define CPU_INFO_FOREACH(cii, cp) cii = 0; cp = cpus[cii], cii < sparc_ncpus; cii++ 410 * Provide two forms of CPU_INFO_FOREACH. One fast one for non-modular
 411 * non-SMP kernels, and the other for everyone else. Both work in the
 412 * non-SMP case, just involving an extra indirection through cpus[0] for
 413 * the portable version.
 414 */
 415#if defined(MULTIPROCESSOR) || defined(MODULAR) || defined(_MODULE)
 416#define CPU_INFO_FOREACH(cii, cp) cii = 0; (cp = cpus[cii]) && cp->eintstack && cii < sparc_ncpus; cii++
420#else 417#else
421#define CPU_INFO_FOREACH(cii, cp) (void)cii, cp = curcpu(); cp != NULL; cp = NULL 418#define CPU_INFO_FOREACH(cii, cp) (void)cii, cp = curcpu(); cp != NULL; cp = NULL
422#endif 419#endif
423 420
424/* 421/*
425 * Useful macros. 422 * Useful macros.
426 */ 423 */
427#define CPU_NOTREADY(cpi) ((cpi) == NULL || cpuinfo.mid == (cpi)->mid || \ 424#define CPU_NOTREADY(cpi) ((cpi) == NULL || cpuinfo.mid == (cpi)->mid || \
428 ((cpi)->flags & CPUFLG_READY) == 0) 425 ((cpi)->flags & CPUFLG_READY) == 0)
429 426
430/* 427/*
431 * Related function prototypes 428 * Related function prototypes
432 */ 429 */
433void getcpuinfo (struct cpu_info *sc, int node); 430void getcpuinfo (struct cpu_info *sc, int node);
434void mmu_install_tables (struct cpu_info *); 431void mmu_install_tables (struct cpu_info *);
435void pmap_alloc_cpu (struct cpu_info *); 432void pmap_alloc_cpu (struct cpu_info *);
436 433
437#define CPUSET_ALL 0xffffffffU /* xcall to all configured CPUs */ 434#define CPUSET_ALL 0xffffffffU /* xcall to all configured CPUs */
438 435
439#if defined(MULTIPROCESSOR) 436#if defined(MULTIPROCESSOR)
440typedef int (*xcall_func_t)(int, int, int); 437void cpu_init_system(void);
 438typedef void (*xcall_func_t)(int, int, int);
441typedef void (*xcall_trap_t)(int, int, int); 439typedef void (*xcall_trap_t)(int, int, int);
442void xcall(xcall_func_t, xcall_trap_t, int, int, int, u_int); 440void xcall(xcall_func_t, xcall_trap_t, int, int, int, u_int);
443/* Shorthand */ 441/* Shorthand */
444#define XCALL0(f,cpuset) \ 442#define XCALL0(f,cpuset) \
445 xcall((xcall_func_t)f, NULL, 0, 0, 0, cpuset) 443 xcall((xcall_func_t)f, NULL, 0, 0, 0, cpuset)
446#define XCALL1(f,a1,cpuset) \ 444#define XCALL1(f,a1,cpuset) \
447 xcall((xcall_func_t)f, NULL, (int)a1, 0, 0, cpuset) 445 xcall((xcall_func_t)f, NULL, (int)a1, 0, 0, cpuset)
448#define XCALL2(f,a1,a2,cpuset) \ 446#define XCALL2(f,a1,a2,cpuset) \
449 xcall((xcall_func_t)f, NULL, (int)a1, (int)a2, 0, cpuset) 447 xcall((xcall_func_t)f, NULL, (int)a1, (int)a2, 0, cpuset)
450#define XCALL3(f,a1,a2,a3,cpuset) \ 448#define XCALL3(f,a1,a2,a3,cpuset) \
451 xcall((xcall_func_t)f, NULL, (int)a1, (int)a2, (int)a3, cpuset) 449 xcall((xcall_func_t)f, NULL, (int)a1, (int)a2, (int)a3, cpuset)
452 450
453#define FXCALL0(f,tf,cpuset) \ 451#define FXCALL0(f,tf,cpuset) \
@@ -462,22 +460,22 @@ void xcall(xcall_func_t, xcall_trap_t, i @@ -462,22 +460,22 @@ void xcall(xcall_func_t, xcall_trap_t, i
462#define XCALL0(f,cpuset) /**/ 460#define XCALL0(f,cpuset) /**/
463#define XCALL1(f,a1,cpuset) /**/ 461#define XCALL1(f,a1,cpuset) /**/
464#define XCALL2(f,a1,a2,cpuset) /**/ 462#define XCALL2(f,a1,a2,cpuset) /**/
465#define XCALL3(f,a1,a2,a3,cpuset) /**/ 463#define XCALL3(f,a1,a2,a3,cpuset) /**/
466#define FXCALL0(f,tf,cpuset) /**/ 464#define FXCALL0(f,tf,cpuset) /**/
467#define FXCALL1(f,tf,a1,cpuset) /**/ 465#define FXCALL1(f,tf,a1,cpuset) /**/
468#define FXCALL2(f,tf,a1,a2,cpuset) /**/ 466#define FXCALL2(f,tf,a1,a2,cpuset) /**/
469#define FXCALL3(f,tf,a1,a2,a3,cpuset) /**/ 467#define FXCALL3(f,tf,a1,a2,a3,cpuset) /**/
470#endif /* MULTIPROCESSOR */ 468#endif /* MULTIPROCESSOR */
471 469
472extern int bootmid; /* Module ID of boot CPU */ 470extern int bootmid; /* Module ID of boot CPU */
473#define CPU_MID2CPUNO(mid) ((mid) != 0 ? (mid) - 8 : 0) 471#define CPU_MID2CPUNO(mid) ((mid) != 0 ? (mid) - 8 : 0)
474 472
475#ifdef MULTIPROCESSOR 
476extern struct cpu_info *cpus[]; 473extern struct cpu_info *cpus[];
 474#ifdef MULTIPROCESSOR
477extern u_int cpu_ready_mask; /* the set of CPUs marked as READY */ 475extern u_int cpu_ready_mask; /* the set of CPUs marked as READY */
478#endif 476#endif
479 477
480#define cpuinfo (*(struct cpu_info *)CPUINFO_VA) 478#define cpuinfo (*(struct cpu_info *)CPUINFO_VA)
481 479
482 480
483#endif /* _sparc_cpuvar_h */ 481#endif /* _sparc_cpuvar_h */

cvs diff -r1.103 -r1.104 src/sys/arch/sparc/sparc/intr.c (expand / switch to unified diff)

--- src/sys/arch/sparc/sparc/intr.c 2009/05/18 00:25:15 1.103
+++ src/sys/arch/sparc/sparc/intr.c 2009/05/27 02:19:50 1.104
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: intr.c,v 1.103 2009/05/18 00:25:15 mrg Exp $ */ 1/* $NetBSD: intr.c,v 1.104 2009/05/27 02:19:50 mrg Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1992, 1993 4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * 6 *
7 * This software was developed by the Computer Systems Engineering group 7 * This software was developed by the Computer Systems Engineering group
8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 * contributed to Berkeley. 9 * contributed to Berkeley.
10 * 10 *
11 * All advertising materials mentioning features or use of this software 11 * All advertising materials mentioning features or use of this software
12 * must display the following acknowledgement: 12 * must display the following acknowledgement:
13 * This product includes software developed by the University of 13 * This product includes software developed by the University of
14 * California, Lawrence Berkeley Laboratory. 14 * California, Lawrence Berkeley Laboratory.
@@ -31,27 +31,27 @@ @@ -31,27 +31,27 @@
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE. 38 * SUCH DAMAGE.
39 * 39 *
40 * @(#)intr.c 8.3 (Berkeley) 11/11/93 40 * @(#)intr.c 8.3 (Berkeley) 11/11/93
41 */ 41 */
42 42
43#include <sys/cdefs.h> 43#include <sys/cdefs.h>
44__KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.103 2009/05/18 00:25:15 mrg Exp $"); 44__KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.104 2009/05/27 02:19:50 mrg Exp $");
45 45
46#include "opt_multiprocessor.h" 46#include "opt_multiprocessor.h"
47#include "opt_sparc_arch.h" 47#include "opt_sparc_arch.h"
48 48
49#include <sys/param.h> 49#include <sys/param.h>
50#include <sys/systm.h> 50#include <sys/systm.h>
51#include <sys/kernel.h> 51#include <sys/kernel.h>
52#include <sys/malloc.h> 52#include <sys/malloc.h>
53#include <sys/cpu.h> 53#include <sys/cpu.h>
54#include <sys/intr.h> 54#include <sys/intr.h>
55#include <sys/simplelock.h> 55#include <sys/simplelock.h>
56 56
57#include <uvm/uvm_extern.h> 57#include <uvm/uvm_extern.h>
@@ -334,27 +334,27 @@ static void @@ -334,27 +334,27 @@ static void
334xcallintr(void *v) 334xcallintr(void *v)
335{ 335{
336 336
337 /* Tally */ 337 /* Tally */
338 lev13_evcnt.ev_count++; 338 lev13_evcnt.ev_count++;
339 339
340 /* notyet - cpuinfo.msg.received = 1; */ 340 /* notyet - cpuinfo.msg.received = 1; */
341 switch (cpuinfo.msg.tag) { 341 switch (cpuinfo.msg.tag) {
342 case XPMSG_FUNC: 342 case XPMSG_FUNC:
343 { 343 {
344 volatile struct xpmsg_func *p = &cpuinfo.msg.u.xpmsg_func; 344 volatile struct xpmsg_func *p = &cpuinfo.msg.u.xpmsg_func;
345 345
346 if (p->func) 346 if (p->func)
347 p->retval = (*p->func)(p->arg0, p->arg1, p->arg2); 347 (*p->func)(p->arg0, p->arg1, p->arg2);
348 break; 348 break;
349 } 349 }
350 } 350 }
351 cpuinfo.msg.tag = 0; 351 cpuinfo.msg.tag = 0;
352 cpuinfo.msg.complete = 1; 352 cpuinfo.msg.complete = 1;
353} 353}
354#endif /* MULTIPROCESSOR */ 354#endif /* MULTIPROCESSOR */
355#endif /* SUN4M || SUN4D */ 355#endif /* SUN4M || SUN4D */
356 356
357 357
358#ifdef MSIIEP 358#ifdef MSIIEP
359/* 359/*
360 * It's easier to make this separate so that not to further obscure 360 * It's easier to make this separate so that not to further obscure

cvs diff -r1.328 -r1.329 src/sys/arch/sparc/sparc/pmap.c (expand / switch to unified diff)

--- src/sys/arch/sparc/sparc/pmap.c 2009/05/18 02:28:35 1.328
+++ src/sys/arch/sparc/sparc/pmap.c 2009/05/27 02:19:50 1.329
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.328 2009/05/18 02:28:35 mrg Exp $ */ 1/* $NetBSD: pmap.c,v 1.329 2009/05/27 02:19:50 mrg Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1996 4 * Copyright (c) 1996
5 * The President and Fellows of Harvard College. All rights reserved. 5 * The President and Fellows of Harvard College. All rights reserved.
6 * Copyright (c) 1992, 1993 6 * Copyright (c) 1992, 1993
7 * The Regents of the University of California. All rights reserved. 7 * The Regents of the University of California. All rights reserved.
8 * 8 *
9 * This software was developed by the Computer Systems Engineering group 9 * This software was developed by the Computer Systems Engineering group
10 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 10 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
11 * contributed to Berkeley. 11 * contributed to Berkeley.
12 * 12 *
13 * All advertising materials mentioning features or use of this software 13 * All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement: 14 * must display the following acknowledgement:
@@ -46,27 +46,27 @@ @@ -46,27 +46,27 @@
46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48 * SUCH DAMAGE. 48 * SUCH DAMAGE.
49 * 49 *
50 * @(#)pmap.c 8.4 (Berkeley) 2/5/94 50 * @(#)pmap.c 8.4 (Berkeley) 2/5/94
51 * 51 *
52 */ 52 */
53 53
54/* 54/*
55 * SPARC physical map management code. 55 * SPARC physical map management code.
56 */ 56 */
57 57
58#include <sys/cdefs.h> 58#include <sys/cdefs.h>
59__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.328 2009/05/18 02:28:35 mrg Exp $"); 59__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.329 2009/05/27 02:19:50 mrg Exp $");
60 60
61#include "opt_ddb.h" 61#include "opt_ddb.h"
62#include "opt_kgdb.h" 62#include "opt_kgdb.h"
63#include "opt_sparc_arch.h" 63#include "opt_sparc_arch.h"
64 64
65#include <sys/param.h> 65#include <sys/param.h>
66#include <sys/systm.h> 66#include <sys/systm.h>
67#include <sys/device.h> 67#include <sys/device.h>
68#include <sys/proc.h> 68#include <sys/proc.h>
69#include <sys/queue.h> 69#include <sys/queue.h>
70#include <sys/pool.h> 70#include <sys/pool.h>
71#include <sys/exec.h> 71#include <sys/exec.h>
72#include <sys/core.h> 72#include <sys/core.h>
@@ -3492,26 +3492,43 @@ pmap_bootstrap4m(void *top) @@ -3492,26 +3492,43 @@ pmap_bootstrap4m(void *top)
3492 pmap_kremove_p = pmap_kremove4m; 3492 pmap_kremove_p = pmap_kremove4m;
3493 pmap_kprotect_p = pmap_kprotect4m; 3493 pmap_kprotect_p = pmap_kprotect4m;
3494 pmap_page_protect_p = pmap_page_protect4m; 3494 pmap_page_protect_p = pmap_page_protect4m;
3495 pmap_protect_p = pmap_protect4m; 3495 pmap_protect_p = pmap_protect4m;
3496 pmap_rmk_p = pmap_rmk4m; 3496 pmap_rmk_p = pmap_rmk4m;
3497 pmap_rmu_p = pmap_rmu4m; 3497 pmap_rmu_p = pmap_rmu4m;
3498#endif /* defined SUN4/SUN4C */ 3498#endif /* defined SUN4/SUN4C */
3499 3499
3500 /* 3500 /*
3501 * p points to top of kernel mem 3501 * p points to top of kernel mem
3502 */ 3502 */
3503 p = (vaddr_t)top; 3503 p = (vaddr_t)top;
3504 3504
 3505#if defined(MULTIPROCESSOR)
 3506 /*
 3507 * allocate the rest of the cpu_info{} area. note we waste the
 3508 * first one to get a VA space.
 3509 */
 3510 cpuinfo_len = ((sizeof(struct cpu_info) + NBPG - 1) & ~PGOFSET);
 3511 if (sparc_ncpus > 1) {
 3512 p = (p + NBPG - 1) & ~PGOFSET;
 3513 cpuinfo_data = (uint8_t *)p;
 3514 p += (cpuinfo_len * sparc_ncpus);
 3515
 3516 /* XXX we waste the first one */
 3517 memset(cpuinfo_data + cpuinfo_len, 0, cpuinfo_len * (sparc_ncpus - 1));
 3518 } else
 3519 cpuinfo_data = (uint8_t *)CPUINFO_VA;
 3520#endif
 3521
3505 /* 3522 /*
3506 * Intialize the kernel pmap. 3523 * Intialize the kernel pmap.
3507 */ 3524 */
3508 /* kernel_pmap_store.pm_ctxnum = 0; */ 3525 /* kernel_pmap_store.pm_ctxnum = 0; */
3509 kernel_pmap_store.pm_refcount = 1; 3526 kernel_pmap_store.pm_refcount = 1;
3510 3527
3511 /* 3528 /*
3512 * Set up pm_regmap for kernel to point NUREG *below* the beginning 3529 * Set up pm_regmap for kernel to point NUREG *below* the beginning
3513 * of kernel regmap storage. Since the kernel only uses regions 3530 * of kernel regmap storage. Since the kernel only uses regions
3514 * above NUREG, we save storage space and can index kernel and 3531 * above NUREG, we save storage space and can index kernel and
3515 * user regions in the same way. 3532 * user regions in the same way.
3516 */ 3533 */
3517 kernel_pmap_store.pm_regmap = &kernel_regmap_store[-NUREG]; 3534 kernel_pmap_store.pm_regmap = &kernel_regmap_store[-NUREG];
@@ -3529,42 +3546,26 @@ pmap_bootstrap4m(void *top) @@ -3529,42 +3546,26 @@ pmap_bootstrap4m(void *top)
3529 pmap_kernel()->pm_reg_ptps = (int **)(q = p); 3546 pmap_kernel()->pm_reg_ptps = (int **)(q = p);
3530 p += sparc_ncpus * sizeof(int **); 3547 p += sparc_ncpus * sizeof(int **);
3531 memset((void *)q, 0, (u_int)p - (u_int)q); 3548 memset((void *)q, 0, (u_int)p - (u_int)q);
3532 3549
3533 pmap_kernel()->pm_reg_ptps_pa = (int *)(q = p); 3550 pmap_kernel()->pm_reg_ptps_pa = (int *)(q = p);
3534 p += sparc_ncpus * sizeof(int *); 3551 p += sparc_ncpus * sizeof(int *);
3535 memset((void *)q, 0, (u_int)p - (u_int)q); 3552 memset((void *)q, 0, (u_int)p - (u_int)q);
3536 3553
3537 /* Allocate context administration */ 3554 /* Allocate context administration */
3538 pmap_kernel()->pm_ctx = ctxinfo = ci = (union ctxinfo *)p; 3555 pmap_kernel()->pm_ctx = ctxinfo = ci = (union ctxinfo *)p;
3539 p += ncontext * sizeof *ci; 3556 p += ncontext * sizeof *ci;
3540 memset((void *)ci, 0, (u_int)p - (u_int)ci); 3557 memset((void *)ci, 0, (u_int)p - (u_int)ci);
3541 3558
3542#if defined(MULTIPROCESSOR) 
3543 /* 
3544 * allocate the rest of the cpu_info{} area. note we waste the 
3545 * first one to get a VA space. 
3546 */ 
3547 p = (p + NBPG - 1) & ~PGOFSET; 
3548 cpuinfo_data = (uint8_t *)p; 
3549 cpuinfo_len = ((sizeof(struct cpu_info) + NBPG - 1) & ~PGOFSET); 
3550 p += (cpuinfo_len * sparc_ncpus); 
3551 prom_printf("extra cpus: %p, p: %p, gap start: %p, gap end: %p\n", 
3552 cpuinfo_data, p, etext_gap_start, etext_gap_end); 
3553 
3554 /* XXX we waste the first one */ 
3555 memset(cpuinfo_data + cpuinfo_len, 0, cpuinfo_len * (sparc_ncpus - 1)); 
3556#endif 
3557 
3558 /* 3559 /*
3559 * Set up the `constants' for the call to vm_init() 3560 * Set up the `constants' for the call to vm_init()
3560 * in main(). All pages beginning at p (rounded up to 3561 * in main(). All pages beginning at p (rounded up to
3561 * the next whole page) and continuing through the number 3562 * the next whole page) and continuing through the number
3562 * of available pages are free. 3563 * of available pages are free.
3563 */ 3564 */
3564 p = (p + NBPG - 1) & ~PGOFSET; 3565 p = (p + NBPG - 1) & ~PGOFSET;
3565 3566
3566 /* 3567 /*
3567 * Reserve memory for MMU pagetables. Some of these have severe 3568 * Reserve memory for MMU pagetables. Some of these have severe
3568 * alignment restrictions. We allocate in a sequence that 3569 * alignment restrictions. We allocate in a sequence that
3569 * minimizes alignment gaps. 3570 * minimizes alignment gaps.
3570 */ 3571 */
@@ -3789,47 +3790,56 @@ pmap_bootstrap4m(void *top) @@ -3789,47 +3790,56 @@ pmap_bootstrap4m(void *top)
3789 pa += NBPG; 3790 pa += NBPG;
3790 size -= NBPG; 3791 size -= NBPG;
3791 } 3792 }
3792 } 3793 }
3793 } 3794 }
3794 3795
3795 /* 3796 /*
3796 * Now switch to kernel pagetables (finally!) 3797 * Now switch to kernel pagetables (finally!)
3797 */ 3798 */
3798 mmu_install_tables(&cpuinfo); 3799 mmu_install_tables(&cpuinfo);
3799 3800
3800#ifdef MULTIPROCESSOR 3801#ifdef MULTIPROCESSOR
3801 /* 3802 /*
 3803 * Initialise any cpu-specific data now.
 3804 */
 3805 cpu_init_system();
 3806
 3807 /*
3802 * Remap cpu0 from CPUINFO_VA to the new correct value, wasting the 3808 * Remap cpu0 from CPUINFO_VA to the new correct value, wasting the
3803 * backing pages we allocated above XXX. 3809 * backing page we allocated above XXX.
3804 */ 3810 */
3805 for (off = 0, va = (vaddr_t)cpuinfo_data; 3811 for (off = 0, va = (vaddr_t)cpuinfo_data;
3806 off < sizeof(struct cpu_info); 3812 sparc_ncpus > 1 && off < sizeof(struct cpu_info);
3807 va += NBPG, off += NBPG) { 3813 va += NBPG, off += NBPG) {
3808 paddr_t pa = PMAP_BOOTSTRAP_VA2PA(CPUINFO_VA + off); 3814 paddr_t pa = PMAP_BOOTSTRAP_VA2PA(CPUINFO_VA + off);
3809 prom_printf("going to pmap_kenter_pa(va=%p, pa=%p)\n", va, pa); 3815 prom_printf("going to pmap_kenter_pa(va=%p, pa=%p)\n", va, pa);
3810 pmap_kremove(va, NBPG); 3816 pmap_kremove(va, NBPG);
3811 pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE); 3817 pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
 3818 cache_flush_page(va, 0);
 3819 cache_flush_page(CPUINFO_VA, 0);
3812 } 3820 }
3813 3821
3814 /* 3822 /*
3815 * Setup the cpus[] array and the ci_self links. 3823 * Setup the cpus[] array and the ci_self links.
3816 */ 3824 */
3817 prom_printf("setting cpus self reference\n"); 3825 prom_printf("setting cpus self reference\n");
3818 for (i = 0; i < sparc_ncpus; i++) { 3826 for (i = 0; i < sparc_ncpus; i++) {
3819 cpus[i] = (struct cpu_info *)(cpuinfo_data + (cpuinfo_len * i)); 3827 cpus[i] = (struct cpu_info *)(cpuinfo_data + (cpuinfo_len * i));
3820 cpus[i]->ci_self = cpus[i]; 3828 cpus[i]->ci_self = cpus[i];
3821 prom_printf("set cpu%d ci_self address: %p\n", i, cpus[i]); 3829 prom_printf("set cpu%d ci_self address: %p\n", i, cpus[i]);
3822 } 3830 }
 3831#else
 3832 cpus[0] = (struct cpu_info *)CPUINFO_VA;
3823#endif 3833#endif
3824 3834
3825 pmap_update(pmap_kernel()); 3835 pmap_update(pmap_kernel());
3826 prom_printf("pmap_bootstrap4m done\n"); 3836 prom_printf("pmap_bootstrap4m done\n");
3827} 3837}
3828 3838
3829static u_long prom_ctxreg; 3839static u_long prom_ctxreg;
3830 3840
3831void 3841void
3832mmu_install_tables(struct cpu_info *sc) 3842mmu_install_tables(struct cpu_info *sc)
3833{ 3843{
3834 3844
3835#ifdef DEBUG 3845#ifdef DEBUG