Wed May 27 02:19:50 2009 UTC ()
- use _MAXNCPU instead of 4
- convert xpmsg_lock from a simplelock to a kmutex
- don't wait for sparc_noop IPI calls
- remove xmpsg_func's "retval" parameter and usage
- remove the IPI at high IPL message
- rework cpu_attach() a bunch, refactoring calls to getcpuinfo() and setting
  of cpi, and split most of the non-boot CPU handling into a new function
- make CPU_INFO_FOREACH() work whether modular or not
- move the MP cpu_info pages earlier
- move a few things in cpu.c around to colsolidate the MP code together
- remove useless if (cpus == NULL) tests -- cpus is an array now

with these changes, and an additional change to crazyintr() to not printf(),
i can get to single user shell on my SS20 again.  i can run a fwe commands
but some of them cause hangs.  "ps auxw" works, but "top -b" does not.

tested in UP LOCKDEBUG/DEBUG/DIAGNOSTIC kernel as well.
MP kernel with only cpu0 configured panics starting /sbin/init.
have not yet tested on a real UP machine.


(mrg)
diff -r1.214 -r1.215 src/sys/arch/sparc/sparc/cpu.c
diff -r1.77 -r1.78 src/sys/arch/sparc/sparc/cpuvar.h
diff -r1.103 -r1.104 src/sys/arch/sparc/sparc/intr.c
diff -r1.328 -r1.329 src/sys/arch/sparc/sparc/pmap.c

cvs diff -r1.214 -r1.215 src/sys/arch/sparc/sparc/cpu.c (switch to unified diff)

--- src/sys/arch/sparc/sparc/cpu.c 2009/05/18 01:36:11 1.214
+++ src/sys/arch/sparc/sparc/cpu.c 2009/05/27 02:19:49 1.215
@@ -1,1770 +1,1734 @@ @@ -1,1770 +1,1734 @@
1/* $NetBSD: cpu.c,v 1.214 2009/05/18 01:36:11 mrg Exp $ */ 1/* $NetBSD: cpu.c,v 1.215 2009/05/27 02:19:49 mrg Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1996 4 * Copyright (c) 1996
5 * The President and Fellows of Harvard College. All rights reserved. 5 * The President and Fellows of Harvard College. All rights reserved.
6 * Copyright (c) 1992, 1993 6 * Copyright (c) 1992, 1993
7 * The Regents of the University of California. All rights reserved. 7 * The Regents of the University of California. All rights reserved.
8 * 8 *
9 * This software was developed by the Computer Systems Engineering group 9 * This software was developed by the Computer Systems Engineering group
10 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 10 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
11 * contributed to Berkeley. 11 * contributed to Berkeley.
12 * 12 *
13 * All advertising materials mentioning features or use of this software 13 * All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement: 14 * must display the following acknowledgement:
15 * This product includes software developed by Harvard University. 15 * This product includes software developed by Harvard University.
16 * This product includes software developed by the University of 16 * This product includes software developed by the University of
17 * California, Lawrence Berkeley Laboratory. 17 * California, Lawrence Berkeley Laboratory.
18 * 18 *
19 * Redistribution and use in source and binary forms, with or without 19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions 20 * modification, are permitted provided that the following conditions
21 * are met: 21 * are met:
22 * 22 *
23 * 1. Redistributions of source code must retain the above copyright 23 * 1. Redistributions of source code must retain the above copyright
24 * notice, this list of conditions and the following disclaimer. 24 * notice, this list of conditions and the following disclaimer.
25 * 2. Redistributions in binary form must reproduce the above copyright 25 * 2. Redistributions in binary form must reproduce the above copyright
26 * notice, this list of conditions and the following disclaimer in the 26 * notice, this list of conditions and the following disclaimer in the
27 * documentation and/or other materials provided with the distribution. 27 * documentation and/or other materials provided with the distribution.
28 * 3. All advertising materials mentioning features or use of this software 28 * 3. All advertising materials mentioning features or use of this software
29 * must display the following acknowledgement: 29 * must display the following acknowledgement:
30 * This product includes software developed by Aaron Brown and 30 * This product includes software developed by Aaron Brown and
31 * Harvard University. 31 * Harvard University.
32 * This product includes software developed by the University of 32 * This product includes software developed by the University of
33 * California, Berkeley and its contributors. 33 * California, Berkeley and its contributors.
34 * 4. Neither the name of the University nor the names of its contributors 34 * 4. Neither the name of the University nor the names of its contributors
35 * may be used to endorse or promote products derived from this software 35 * may be used to endorse or promote products derived from this software
36 * without specific prior written permission. 36 * without specific prior written permission.
37 * 37 *
38 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 38 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
39 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 39 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 40 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 41 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
42 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 42 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
43 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 43 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
44 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 44 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
45 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 45 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48 * SUCH DAMAGE. 48 * SUCH DAMAGE.
49 * 49 *
50 * @(#)cpu.c 8.5 (Berkeley) 11/23/93 50 * @(#)cpu.c 8.5 (Berkeley) 11/23/93
51 * 51 *
52 */ 52 */
53 53
54#include <sys/cdefs.h> 54#include <sys/cdefs.h>
55__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.214 2009/05/18 01:36:11 mrg Exp $"); 55__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.215 2009/05/27 02:19:49 mrg Exp $");
56 56
57#include "opt_multiprocessor.h" 57#include "opt_multiprocessor.h"
58#include "opt_lockdebug.h" 58#include "opt_lockdebug.h"
59#include "opt_ddb.h" 59#include "opt_ddb.h"
60#include "opt_sparc_arch.h" 60#include "opt_sparc_arch.h"
61 61
62#include <sys/param.h> 62#include <sys/param.h>
63#include <sys/systm.h> 63#include <sys/systm.h>
64#include <sys/device.h> 64#include <sys/device.h>
65#include <sys/malloc.h> 65#include <sys/malloc.h>
66#include <sys/simplelock.h> 66#include <sys/simplelock.h>
67#include <sys/kernel.h> 67#include <sys/kernel.h>
68 68
69#include <uvm/uvm.h> 69#include <uvm/uvm.h>
70 70
71#include <machine/promlib.h> 71#include <machine/promlib.h>
72#include <machine/autoconf.h> 72#include <machine/autoconf.h>
73#include <machine/cpu.h> 73#include <machine/cpu.h>
74#include <machine/reg.h> 74#include <machine/reg.h>
75#include <machine/ctlreg.h> 75#include <machine/ctlreg.h>
76#include <machine/trap.h> 76#include <machine/trap.h>
77#include <machine/pcb.h> 77#include <machine/pcb.h>
78#include <machine/pmap.h> 78#include <machine/pmap.h>
79 79
80#if defined(MULTIPROCESSOR) && defined(DDB) 80#if defined(MULTIPROCESSOR) && defined(DDB)
81#include <machine/db_machdep.h> 81#include <machine/db_machdep.h>
82#endif 82#endif
83 83
84#include <sparc/sparc/cache.h> 84#include <sparc/sparc/cache.h>
85#include <sparc/sparc/asm.h> 85#include <sparc/sparc/asm.h>
86#include <sparc/sparc/cpuvar.h> 86#include <sparc/sparc/cpuvar.h>
87#include <sparc/sparc/memreg.h> 87#include <sparc/sparc/memreg.h>
88#if defined(SUN4D) 88#if defined(SUN4D)
89#include <sparc/sparc/cpuunitvar.h> 89#include <sparc/sparc/cpuunitvar.h>
90#endif 90#endif
91 91
92struct cpu_softc { 92struct cpu_softc {
93 struct device sc_dev; /* generic device info */ 93 struct device sc_dev; /* generic device info */
94 struct cpu_info *sc_cpuinfo; 94 struct cpu_info *sc_cpuinfo;
95}; 95};
96 96
97/* The following are used externally (sysctl_hw). */ 97/* The following are used externally (sysctl_hw). */
98char machine[] = MACHINE; /* from <machine/param.h> */ 98char machine[] = MACHINE; /* from <machine/param.h> */
99char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */ 99char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */
100int cpu_arch; /* sparc architecture version */ 100int cpu_arch; /* sparc architecture version */
101char cpu_model[100]; /* machine model (primary CPU) */ 101char cpu_model[100]; /* machine model (primary CPU) */
102extern char machine_model[]; 102extern char machine_model[];
103 103
104int sparc_ncpus; /* # of CPUs detected by PROM */ 104int sparc_ncpus; /* # of CPUs detected by PROM */
105#ifdef MULTIPROCESSOR 105struct cpu_info *cpus[_MAXNCPU]; /* we only support 4 CPUs. */
106struct cpu_info *cpus[4]; /* we only support 4 CPUs. */ 
107u_int cpu_ready_mask; /* the set of CPUs marked as READY */ 
108#endif 
109 106
110/* The CPU configuration driver. */ 107/* The CPU configuration driver. */
111static void cpu_mainbus_attach(struct device *, struct device *, void *); 108static void cpu_mainbus_attach(struct device *, struct device *, void *);
112int cpu_mainbus_match(struct device *, struct cfdata *, void *); 109int cpu_mainbus_match(struct device *, struct cfdata *, void *);
113 110
114CFATTACH_DECL(cpu_mainbus, sizeof(struct cpu_softc), 111CFATTACH_DECL(cpu_mainbus, sizeof(struct cpu_softc),
115 cpu_mainbus_match, cpu_mainbus_attach, NULL, NULL); 112 cpu_mainbus_match, cpu_mainbus_attach, NULL, NULL);
116 113
117#if defined(SUN4D) 114#if defined(SUN4D)
118static int cpu_cpuunit_match(struct device *, struct cfdata *, void *); 115static int cpu_cpuunit_match(struct device *, struct cfdata *, void *);
119static void cpu_cpuunit_attach(struct device *, struct device *, void *); 116static void cpu_cpuunit_attach(struct device *, struct device *, void *);
120 117
121CFATTACH_DECL(cpu_cpuunit, sizeof(struct cpu_softc), 118CFATTACH_DECL(cpu_cpuunit, sizeof(struct cpu_softc),
122 cpu_cpuunit_match, cpu_cpuunit_attach, NULL, NULL); 119 cpu_cpuunit_match, cpu_cpuunit_attach, NULL, NULL);
123#endif /* SUN4D */ 120#endif /* SUN4D */
124 121
125static void cpu_attach(struct cpu_softc *, int, int); 122static void cpu_attach(struct cpu_softc *, int, int);
126 123
127static const char *fsrtoname(int, int, int); 124static const char *fsrtoname(int, int, int);
128void cache_print(struct cpu_softc *); 125void cache_print(struct cpu_softc *);
129void cpu_setup(void); 126void cpu_setup(void);
130void fpu_init(struct cpu_info *); 127void fpu_init(struct cpu_info *);
131 128
132#define IU_IMPL(psr) ((u_int)(psr) >> 28) 129#define IU_IMPL(psr) ((u_int)(psr) >> 28)
133#define IU_VERS(psr) (((psr) >> 24) & 0xf) 130#define IU_VERS(psr) (((psr) >> 24) & 0xf)
134 131
135#define SRMMU_IMPL(mmusr) ((u_int)(mmusr) >> 28) 132#define SRMMU_IMPL(mmusr) ((u_int)(mmusr) >> 28)
136#define SRMMU_VERS(mmusr) (((mmusr) >> 24) & 0xf) 133#define SRMMU_VERS(mmusr) (((mmusr) >> 24) & 0xf)
137 134
138int bootmid; /* Module ID of boot CPU */ 135int bootmid; /* Module ID of boot CPU */
139#if defined(MULTIPROCESSOR) 
140void cpu_spinup(struct cpu_info *); 
141static void init_cpuinfo(struct cpu_info *, int); 
142 
143int go_smp_cpus = 0; /* non-primary CPUs wait for this to go */ 
144 
145/* lock this to send IPI's */ 
146struct simplelock xpmsg_lock = SIMPLELOCK_INITIALIZER; 
147 
148static void 
149init_cpuinfo(struct cpu_info *cpi, int node) 
150{ 
151 vaddr_t intstack, va; 
152 
153 /* 
154 * Finish initialising this cpu_info. 
155 */ 
156 getcpuinfo(cpi, node); 
157 
158 /* 
159 * Arrange interrupt stack. This cpu will also abuse the bottom 
160 * half of the interrupt stack before it gets to run its idle LWP. 
161 */ 
162 intstack = uvm_km_alloc(kernel_map, INT_STACK_SIZE, 0, UVM_KMF_WIRED); 
163 if (intstack == 0) 
164 panic("%s: no uspace/intstack", __func__); 
165 cpi->eintstack = (void*)(intstack + INT_STACK_SIZE); 
166 
167 /* Allocate virtual space for pmap page_copy/page_zero */ 
168 va = uvm_km_alloc(kernel_map, 2*PAGE_SIZE, 0, UVM_KMF_VAONLY); 
169 if (va == 0) 
170 panic("%s: no virtual space", __func__); 
171 
172 cpi->vpage[0] = (void *)(va + 0); 
173 cpi->vpage[1] = (void *)(va + PAGE_SIZE); 
174} 
175#endif /* MULTIPROCESSOR */ 
176 136
177#ifdef notdef 137#ifdef notdef
178/* 138/*
179 * IU implementations are parceled out to vendors (with some slight 139 * IU implementations are parceled out to vendors (with some slight
180 * glitches). Printing these is cute but takes too much space. 140 * glitches). Printing these is cute but takes too much space.
181 */ 141 */
182static char *iu_vendor[16] = { 142static char *iu_vendor[16] = {
183 "Fujitsu", /* and also LSI Logic */ 143 "Fujitsu", /* and also LSI Logic */
184 "ROSS", /* ROSS (ex-Cypress) */ 144 "ROSS", /* ROSS (ex-Cypress) */
185 "BIT", 145 "BIT",
186 "LSIL", /* LSI Logic finally got their own */ 146 "LSIL", /* LSI Logic finally got their own */
187 "TI", /* Texas Instruments */ 147 "TI", /* Texas Instruments */
188 "Matsushita", 148 "Matsushita",
189 "Philips", 149 "Philips",
190 "Harvest", /* Harvest VLSI Design Center */ 150 "Harvest", /* Harvest VLSI Design Center */
191 "SPEC", /* Systems and Processes Engineering Corporation */ 151 "SPEC", /* Systems and Processes Engineering Corporation */
192 "Weitek", 152 "Weitek",
193 "vendor#10", 153 "vendor#10",
194 "vendor#11", 154 "vendor#11",
195 "vendor#12", 155 "vendor#12",
196 "vendor#13", 156 "vendor#13",
197 "vendor#14", 157 "vendor#14",
198 "vendor#15" 158 "vendor#15"
199}; 159};
200#endif 160#endif
201 161
 162#if defined(MULTIPROCESSOR)
 163u_int cpu_ready_mask; /* the set of CPUs marked as READY */
 164void cpu_spinup(struct cpu_info *);
 165static void cpu_attach_non_boot(struct cpu_softc *, struct cpu_info *, int);
 166
 167int go_smp_cpus = 0; /* non-primary CPUs wait for this to go */
 168
 169/*
 170 * This must be locked around all message transactions to ensure only
 171 * one CPU is generating them.
 172 */
 173static kmutex_t xpmsg_mutex;
 174
 175#endif /* MULTIPROCESSOR */
 176
202/* 177/*
203 * 4/110 comment: the 4/110 chops off the top 4 bits of an OBIO address. 178 * 4/110 comment: the 4/110 chops off the top 4 bits of an OBIO address.
204 * this confuses autoconf. for example, if you try and map 179 * this confuses autoconf. for example, if you try and map
205 * 0xfe000000 in obio space on a 4/110 it actually maps 0x0e000000. 180 * 0xfe000000 in obio space on a 4/110 it actually maps 0x0e000000.
206 * this is easy to verify with the PROM. this causes problems 181 * this is easy to verify with the PROM. this causes problems
207 * with devices like "esp0 at obio0 addr 0xfa000000" because the 182 * with devices like "esp0 at obio0 addr 0xfa000000" because the
208 * 4/110 treats it as esp0 at obio0 addr 0x0a000000" which is the 183 * 4/110 treats it as esp0 at obio0 addr 0x0a000000" which is the
209 * address of the 4/110's "sw0" scsi chip. the same thing happens 184 * address of the 4/110's "sw0" scsi chip. the same thing happens
210 * between zs1 and zs2. since the sun4 line is "closed" and 185 * between zs1 and zs2. since the sun4 line is "closed" and
211 * we know all the "obio" devices that will ever be on it we just 186 * we know all the "obio" devices that will ever be on it we just
212 * put in some special case "if"'s in the match routines of esp, 187 * put in some special case "if"'s in the match routines of esp,
213 * dma, and zs. 188 * dma, and zs.
214 */ 189 */
215 190
216int 191int
217cpu_mainbus_match(struct device *parent, struct cfdata *cf, void *aux) 192cpu_mainbus_match(struct device *parent, struct cfdata *cf, void *aux)
218{ 193{
219 struct mainbus_attach_args *ma = aux; 194 struct mainbus_attach_args *ma = aux;
220 195
221 return (strcmp(cf->cf_name, ma->ma_name) == 0); 196 return (strcmp(cf->cf_name, ma->ma_name) == 0);
222} 197}
223 198
224static void 199static void
225cpu_mainbus_attach(struct device *parent, struct device *self, void *aux) 200cpu_mainbus_attach(struct device *parent, struct device *self, void *aux)
226{ 201{
227 struct mainbus_attach_args *ma = aux; 202 struct mainbus_attach_args *ma = aux;
228 struct { uint32_t va; uint32_t size; } *mbprop = NULL; 203 struct { uint32_t va; uint32_t size; } *mbprop = NULL;
229 struct openprom_addr *rrp = NULL; 204 struct openprom_addr *rrp = NULL;
230 struct cpu_info *cpi; 205 struct cpu_info *cpi;
231 int mid, node; 206 int mid, node;
232 int error, n; 207 int error, n;
233 208
234 node = ma->ma_node; 209 node = ma->ma_node;
235 mid = (node != 0) ? prom_getpropint(node, "mid", 0) : 0; 210 mid = (node != 0) ? prom_getpropint(node, "mid", 0) : 0;
236 cpu_attach((struct cpu_softc *)self, node, mid); 211 cpu_attach((struct cpu_softc *)self, node, mid);
237 212
238 cpi = ((struct cpu_softc *)self)->sc_cpuinfo; 213 cpi = ((struct cpu_softc *)self)->sc_cpuinfo;
239 if (cpi == NULL) 214 if (cpi == NULL)
240 return; 215 return;
241 216
242 /* 217 /*
243 * Map CPU mailbox if available 218 * Map CPU mailbox if available
244 */ 219 */
245 if (node != 0 && (error = prom_getprop(node, "mailbox-virtual", 220 if (node != 0 && (error = prom_getprop(node, "mailbox-virtual",
246 sizeof(*mbprop), 221 sizeof(*mbprop),
247 &n, &mbprop)) == 0) { 222 &n, &mbprop)) == 0) {
248 cpi->mailbox = mbprop->va; 223 cpi->mailbox = mbprop->va;
249 free(mbprop, M_DEVBUF); 224 free(mbprop, M_DEVBUF);
250 } else if (node != 0 && (error = prom_getprop(node, "mailbox", 225 } else if (node != 0 && (error = prom_getprop(node, "mailbox",
251 sizeof(struct openprom_addr), 226 sizeof(struct openprom_addr),
252 &n, &rrp)) == 0) { 227 &n, &rrp)) == 0) {
253 /* XXX - map cached/uncached? If cached, deal with 228 /* XXX - map cached/uncached? If cached, deal with
254 * cache congruency! 229 * cache congruency!
255 */ 230 */
256 if (rrp[0].oa_space == 0) 231 if (rrp[0].oa_space == 0)
257 printf("%s: mailbox in mem space\n", self->dv_xname); 232 printf("%s: mailbox in mem space\n", self->dv_xname);
258 233
259 if (bus_space_map(ma->ma_bustag, 234 if (bus_space_map(ma->ma_bustag,
260 BUS_ADDR(rrp[0].oa_space, rrp[0].oa_base), 235 BUS_ADDR(rrp[0].oa_space, rrp[0].oa_base),
261 rrp[0].oa_size, 236 rrp[0].oa_size,
262 BUS_SPACE_MAP_LINEAR, 237 BUS_SPACE_MAP_LINEAR,
263 &cpi->mailbox) != 0) 238 &cpi->mailbox) != 0)
264 panic("%s: can't map CPU mailbox", self->dv_xname); 239 panic("%s: can't map CPU mailbox", self->dv_xname);
265 free(rrp, M_DEVBUF); 240 free(rrp, M_DEVBUF);
266 } 241 }
267 242
268 /* 243 /*
269 * Map Module Control Space if available 244 * Map Module Control Space if available
270 */ 245 */
271 if (cpi->mxcc == 0) 246 if (cpi->mxcc == 0)
272 /* We only know what it means on MXCCs */ 247 /* We only know what it means on MXCCs */
273 return; 248 return;
274 249
275 rrp = NULL; 250 rrp = NULL;
276 if (node == 0 || (error = prom_getprop(node, "reg", 251 if (node == 0 || (error = prom_getprop(node, "reg",
277 sizeof(struct openprom_addr), 252 sizeof(struct openprom_addr),
278 &n, &rrp)) != 0) 253 &n, &rrp)) != 0)
279 return; 254 return;
280 255
281 /* register set #0 is the MBus port register */ 256 /* register set #0 is the MBus port register */
282 if (bus_space_map(ma->ma_bustag, 257 if (bus_space_map(ma->ma_bustag,
283 BUS_ADDR(rrp[0].oa_space, rrp[0].oa_base), 258 BUS_ADDR(rrp[0].oa_space, rrp[0].oa_base),
284 rrp[0].oa_size, 259 rrp[0].oa_size,
285 BUS_SPACE_MAP_LINEAR, 260 BUS_SPACE_MAP_LINEAR,
286 &cpi->ci_mbusport) != 0) { 261 &cpi->ci_mbusport) != 0) {
287 panic("%s: can't map CPU regs", self->dv_xname); 262 panic("%s: can't map CPU regs", self->dv_xname);
288 } 263 }
289 /* register set #1: MCXX control */ 264 /* register set #1: MCXX control */
290 if (bus_space_map(ma->ma_bustag, 265 if (bus_space_map(ma->ma_bustag,
291 BUS_ADDR(rrp[1].oa_space, rrp[1].oa_base), 266 BUS_ADDR(rrp[1].oa_space, rrp[1].oa_base),
292 rrp[1].oa_size, 267 rrp[1].oa_size,
293 BUS_SPACE_MAP_LINEAR, 268 BUS_SPACE_MAP_LINEAR,
294 &cpi->ci_mxccregs) != 0) { 269 &cpi->ci_mxccregs) != 0) {
295 panic("%s: can't map CPU regs", self->dv_xname); 270 panic("%s: can't map CPU regs", self->dv_xname);
296 } 271 }
297 /* register sets #3 and #4 are E$ cache data and tags */ 272 /* register sets #3 and #4 are E$ cache data and tags */
298 273
299 free(rrp, M_DEVBUF); 274 free(rrp, M_DEVBUF);
300} 275}
301 276
302#if defined(SUN4D) 277#if defined(SUN4D)
303static int 278static int
304cpu_cpuunit_match(struct device *parent, struct cfdata *cf, void *aux) 279cpu_cpuunit_match(struct device *parent, struct cfdata *cf, void *aux)
305{ 280{
306 struct cpuunit_attach_args *cpua = aux; 281 struct cpuunit_attach_args *cpua = aux;
307 282
308 return (strcmp(cf->cf_name, cpua->cpua_type) == 0); 283 return (strcmp(cf->cf_name, cpua->cpua_type) == 0);
309} 284}
310 285
311static void 286static void
312cpu_cpuunit_attach(struct device *parent, struct device *self, void *aux) 287cpu_cpuunit_attach(struct device *parent, struct device *self, void *aux)
313{ 288{
314 struct cpuunit_attach_args *cpua = aux; 289 struct cpuunit_attach_args *cpua = aux;
315 290
316 cpu_attach((struct cpu_softc *)self, cpua->cpua_node, 291 cpu_attach((struct cpu_softc *)self, cpua->cpua_node,
317 cpua->cpua_device_id); 292 cpua->cpua_device_id);
318} 293}
319#endif /* SUN4D */ 294#endif /* SUN4D */
320 295
321/* 296/*
322 * Attach the CPU. 297 * Attach the CPU.
323 * Discover interesting goop about the virtual address cache 298 * Discover interesting goop about the virtual address cache
324 * (slightly funny place to do it, but this is where it is to be found). 299 * (slightly funny place to do it, but this is where it is to be found).
325 */ 300 */
326static void 301static void
327cpu_attach(struct cpu_softc *sc, int node, int mid) 302cpu_attach(struct cpu_softc *sc, int node, int mid)
328{ 303{
 304 char buf[100];
329 struct cpu_info *cpi; 305 struct cpu_info *cpi;
330 int idx; 306 int idx;
331 static int cpu_attach_count = 0; 307 static int cpu_attach_count = 0;
332 308
333 /* 309 /*
334 * The first CPU we're attaching must be the boot CPU. 310 * The first CPU we're attaching must be the boot CPU.
335 * (see autoconf.c and cpuunit.c) 311 * (see autoconf.c and cpuunit.c)
336 */ 312 */
337 idx = cpu_attach_count++; 313 idx = cpu_attach_count++;
338 if (cpu_attach_count == 1) { 
339 getcpuinfo(&cpuinfo, node); 
340 
341#if defined(MULTIPROCESSOR) 
342 cpi = sc->sc_cpuinfo = cpus[idx]; 
343#else 
344 /* The `local' VA is global for uniprocessor. */ 
345 cpi = sc->sc_cpuinfo = (struct cpu_info *)CPUINFO_VA; 
346#endif 
347 cpi->master = 1; 
348 cpi->eintstack = eintstack; 
349 /* Note: `curpcb' is set to `proc0' in locore */ 
350 
351 /* 
352 * If we haven't been able to determine the Id of the 
353 * boot CPU, set it now. In this case we can only boot 
354 * from CPU #0 (see also the CPU attach code in autoconf.c) 
355 */ 
356 if (bootmid == 0) 
357 bootmid = mid; 
358 } else { 
359#if defined(MULTIPROCESSOR) 
360 int error; 
361 
362 /* 
363 * Initialise this cpu's cpu_info. 
364 */ 
365 cpi = sc->sc_cpuinfo = cpus[idx]; 
366 init_cpuinfo(cpi, node); 
367 314
368 /* 315#if !defined(MULTIPROCESSOR)
369 * Call the MI attach which creates an idle LWP for us. 316 if (cpu_attach_count > 1) {
370 */ 
371 error = mi_cpu_attach(cpi); 
372 if (error != 0) { 
373 aprint_normal("\n"); 
374 aprint_error("%s: mi_cpu_attach failed with %d\n", 
375 sc->sc_dev.dv_xname, error); 
376 return; 
377 } 
378 
379 /* 
380 * Note: `eintstack' is set in init_cpuinfo() above. 
381 * The %wim register will be initialized in cpu_hatch(). 
382 */ 
383 cpi->ci_curlwp = cpi->ci_data.cpu_idlelwp; 
384 cpi->curpcb = (struct pcb *)cpi->ci_curlwp->l_addr; 
385 cpi->curpcb->pcb_wim = 1; 
386 
387#else 
388 sc->sc_cpuinfo = NULL; 
389 printf(": no SMP support in kernel\n"); 317 printf(": no SMP support in kernel\n");
390 return; 318 return;
391#endif 
392 } 319 }
393 
394#ifdef DEBUG 
395 cpi->redzone = (void *)((long)cpi->eintstack + REDSIZE); 
396#endif 320#endif
397 321
 322 /*
 323 * Initialise this cpu's cpu_info.
 324 */
 325 cpi = sc->sc_cpuinfo = cpus[idx];
 326 getcpuinfo(cpi, node);
 327
398 cpi->ci_cpuid = idx; 328 cpi->ci_cpuid = idx;
399 cpi->mid = mid; 329 cpi->mid = mid;
400 cpi->node = node; 330 cpi->node = node;
 331#ifdef DEBUG
 332 cpi->redzone = (void *)((long)cpi->eintstack + REDSIZE);
 333#endif
401 334
402 if (sparc_ncpus > 1) { 335 if (sparc_ncpus > 1) {
403 printf(": mid %d", mid); 336 printf(": mid %d", mid);
404 if (mid == 0 && !CPU_ISSUN4D) 337 if (mid == 0 && !CPU_ISSUN4D)
405 printf(" [WARNING: mid should not be 0]"); 338 printf(" [WARNING: mid should not be 0]");
406 } 339 }
407 340
 341#if defined(MULTIPROCESSOR)
 342 if (cpu_attach_count > 1) {
 343 cpu_attach_non_boot(sc, cpi, node);
 344 return;
 345 }
 346#endif /* MULTIPROCESSOR */
 347
 348 /* Stuff to only run on the boot CPU */
 349 cpu_setup();
 350 snprintf(buf, sizeof buf, "%s @ %s MHz, %s FPU",
 351 cpi->cpu_name, clockfreq(cpi->hz), cpi->fpu_name);
 352 snprintf(cpu_model, sizeof cpu_model, "%s (%s)",
 353 machine_model, buf);
 354 printf(": %s\n", buf);
 355 cache_print(sc);
 356
 357 cpi->master = 1;
 358 cpi->eintstack = eintstack;
 359
 360 /*
 361 * If we haven't been able to determine the Id of the
 362 * boot CPU, set it now. In this case we can only boot
 363 * from CPU #0 (see also the CPU attach code in autoconf.c)
 364 */
 365 if (bootmid == 0)
 366 bootmid = mid;
 367}
 368
 369/*
 370 * Finish CPU attach.
 371 * Must be run by the CPU which is being attached.
 372 */
 373void
 374cpu_setup(void)
 375{
 376 if (cpuinfo.hotfix)
 377 (*cpuinfo.hotfix)(&cpuinfo);
 378
 379 /* Initialize FPU */
 380 fpu_init(&cpuinfo);
 381
 382 /* Enable the cache */
 383 cpuinfo.cache_enable();
 384
 385 cpuinfo.flags |= CPUFLG_HATCHED;
 386}
 387
 388#if defined(MULTIPROCESSOR)
 389/*
 390 * Perform most of the tasks needed for a non-boot CPU.
 391 */
 392static void
 393cpu_attach_non_boot(struct cpu_softc *sc, struct cpu_info *cpi, int node)
 394{
 395 vaddr_t intstack, va;
 396 int error;
 397
 398 /*
 399 * Arrange interrupt stack. This cpu will also abuse the bottom
 400 * half of the interrupt stack before it gets to run its idle LWP.
 401 */
 402 intstack = uvm_km_alloc(kernel_map, INT_STACK_SIZE, 0, UVM_KMF_WIRED);
 403 if (intstack == 0)
 404 panic("%s: no uspace/intstack", __func__);
 405 cpi->eintstack = (void*)(intstack + INT_STACK_SIZE);
408 406
409 if (cpi->master) { 407 /* Allocate virtual space for pmap page_copy/page_zero */
410 char buf[100]; 408 va = uvm_km_alloc(kernel_map, 2*PAGE_SIZE, 0, UVM_KMF_VAONLY);
 409 if (va == 0)
 410 panic("%s: no virtual space", __func__);
411 411
412 cpu_setup(); 412 cpi->vpage[0] = (void *)(va + 0);
413 snprintf(buf, sizeof buf, "%s @ %s MHz, %s FPU", 413 cpi->vpage[1] = (void *)(va + PAGE_SIZE);
414 cpi->cpu_name, clockfreq(cpi->hz), cpi->fpu_name); 414
415 snprintf(cpu_model, sizeof cpu_model, "%s (%s)", 415 /*
416 machine_model, buf); 416 * Call the MI attach which creates an idle LWP for us.
417 printf(": %s\n", buf); 417 */
418 cache_print(sc); 418 error = mi_cpu_attach(cpi);
 419 if (error != 0) {
 420 aprint_normal("\n");
 421 aprint_error("%s: mi_cpu_attach failed with %d\n",
 422 sc->sc_dev.dv_xname, error);
419 return; 423 return;
420 } 424 }
421 425
422#if defined(MULTIPROCESSOR) 426 /*
 427 * Note: `eintstack' is set in init_cpuinfo() above.
 428 * The %wim register will be initialized in cpu_hatch().
 429 */
 430 cpi->ci_curlwp = cpi->ci_data.cpu_idlelwp;
 431 cpi->curpcb = (struct pcb *)cpi->ci_curlwp->l_addr;
 432 cpi->curpcb->pcb_wim = 1;
 433
423 /* for now use the fixed virtual addresses setup in autoconf.c */ 434 /* for now use the fixed virtual addresses setup in autoconf.c */
424 cpi->intreg_4m = (struct icr_pi *) 435 cpi->intreg_4m = (struct icr_pi *)
425 (PI_INTR_VA + (_MAXNBPG * CPU_MID2CPUNO(mid))); 436 (PI_INTR_VA + (_MAXNBPG * CPU_MID2CPUNO(cpi->mid)));
426 437
427 /* Now start this CPU */ 438 /* Now start this CPU */
428 cpu_spinup(cpi); 439 cpu_spinup(cpi);
429 printf(": %s @ %s MHz, %s FPU\n", cpi->cpu_name, 440 printf(": %s @ %s MHz, %s FPU\n", cpi->cpu_name,
430 clockfreq(cpi->hz), cpi->fpu_name); 441 clockfreq(cpi->hz), cpi->fpu_name);
431 442
432 cache_print(sc); 443 cache_print(sc);
433 444
434 if (sparc_ncpus > 1 && idx == sparc_ncpus-1) { 445 /*
 446 * Now we're on the last CPU to be attaching.
 447 */
 448 if (sparc_ncpus > 1 && cpi->ci_cpuid == sparc_ncpus - 1) {
435 CPU_INFO_ITERATOR n; 449 CPU_INFO_ITERATOR n;
436 /* 450 /*
437 * Install MP cache flush functions, unless the 451 * Install MP cache flush functions, unless the
438 * single-processor versions are no-ops. 452 * single-processor versions are no-ops.
439 */ 453 */
440 for (CPU_INFO_FOREACH(n, cpi)) { 454 for (CPU_INFO_FOREACH(n, cpi)) {
441#define SET_CACHE_FUNC(x) \ 455#define SET_CACHE_FUNC(x) \
442 if (cpi->x != __CONCAT(noop_,x)) cpi->x = __CONCAT(smp_,x) 456 if (cpi->x != __CONCAT(noop_,x)) cpi->x = __CONCAT(smp_,x)
443 SET_CACHE_FUNC(vcache_flush_page); 457 SET_CACHE_FUNC(vcache_flush_page);
444 SET_CACHE_FUNC(vcache_flush_segment); 458 SET_CACHE_FUNC(vcache_flush_segment);
445 SET_CACHE_FUNC(vcache_flush_region); 459 SET_CACHE_FUNC(vcache_flush_region);
446 SET_CACHE_FUNC(vcache_flush_context); 460 SET_CACHE_FUNC(vcache_flush_context);
447 } 461 }
448 } 462 }
449#endif /* MULTIPROCESSOR */ 463#undef SET_CACHE_FUNC
450} 464}
451 465
452#if defined(MULTIPROCESSOR) 
453/* 466/*
454 * Start secondary processors in motion. 467 * Start secondary processors in motion.
455 */ 468 */
456void 469void
457cpu_boot_secondary_processors(void) 470cpu_boot_secondary_processors(void)
458{ 471{
459 CPU_INFO_ITERATOR n; 472 CPU_INFO_ITERATOR n;
460 struct cpu_info *cpi; 473 struct cpu_info *cpi;
461 474
462 printf("cpu0: booting secondary processors:"); 475 printf("cpu0: booting secondary processors:");
463 for (CPU_INFO_FOREACH(n, cpi)) { 476 for (CPU_INFO_FOREACH(n, cpi)) {
464 if (cpuinfo.mid == cpi->mid || 477 if (cpuinfo.mid == cpi->mid ||
465 (cpi->flags & CPUFLG_HATCHED) == 0) 478 (cpi->flags & CPUFLG_HATCHED) == 0)
466 continue; 479 continue;
467 480
468 printf(" cpu%d", cpi->ci_cpuid); 481 printf(" cpu%d", cpi->ci_cpuid);
469 cpi->flags |= CPUFLG_READY; 482 cpi->flags |= CPUFLG_READY;
470 cpu_ready_mask |= (1 << n); 483 cpu_ready_mask |= (1 << n);
471 } 484 }
472 485
473 /* Mark the boot CPU as ready */ 486 /* Mark the boot CPU as ready */
474 cpuinfo.flags |= CPUFLG_READY; 487 cpuinfo.flags |= CPUFLG_READY;
475 cpu_ready_mask |= (1 << 0); 488 cpu_ready_mask |= (1 << 0);
476 489
477 /* Tell the other CPU's to start up. */ 490 /* Tell the other CPU's to start up. */
478 go_smp_cpus = 1; 491 go_smp_cpus = 1;
479 492
480 printf("\n"); 493 printf("\n");
481} 494}
482#endif /* MULTIPROCESSOR */ 
483 495
484/* 496/*
485 * Finish CPU attach. 497 * Early initialisation, before main().
486 * Must be run by the CPU which is being attached. 
487 */ 498 */
488void 499void
489cpu_setup(void) 500cpu_init_system(void)
490{ 501{
491 if (cpuinfo.hotfix) 
492 (*cpuinfo.hotfix)(&cpuinfo); 
493 
494 /* Initialize FPU */ 
495 fpu_init(&cpuinfo); 
496 502
497 /* Enable the cache */ 503 mutex_init(&xpmsg_mutex, MUTEX_SPIN, IPL_VM);
498 cpuinfo.cache_enable(); 
499 
500 cpuinfo.flags |= CPUFLG_HATCHED; 
501} 504}
502 505
503#if defined(MULTIPROCESSOR) 
504 
505extern void cpu_hatch(void); /* in locore.s */ 
506 
507/* 506/*
508 * Allocate per-CPU data, then start up this CPU using PROM. 507 * Allocate per-CPU data, then start up this CPU using PROM.
509 */ 508 */
510void 509void
511cpu_spinup(struct cpu_info *cpi) 510cpu_spinup(struct cpu_info *cpi)
512{ 511{
 512 extern void cpu_hatch(void); /* in locore.s */
513 struct openprom_addr oa; 513 struct openprom_addr oa;
514 void *pc = (void *)cpu_hatch; 514 void *pc;
515 int n; 515 int n;
516 516
 517 pc = (void *)cpu_hatch;
 518
517 /* Setup CPU-specific MMU tables */ 519 /* Setup CPU-specific MMU tables */
518 pmap_alloc_cpu(cpi); 520 pmap_alloc_cpu(cpi);
519 521
520 cpi->flags &= ~CPUFLG_HATCHED; 522 cpi->flags &= ~CPUFLG_HATCHED;
521 523
522 /* 524 /*
523 * The physical address of the context table is passed to 525 * The physical address of the context table is passed to
524 * the PROM in a "physical address descriptor". 526 * the PROM in a "physical address descriptor".
525 */ 527 */
526 oa.oa_space = 0; 528 oa.oa_space = 0;
527 oa.oa_base = (uint32_t)cpi->ctx_tbl_pa; 529 oa.oa_base = (uint32_t)cpi->ctx_tbl_pa;
528 oa.oa_size = cpi->mmu_ncontext * sizeof(cpi->ctx_tbl[0]); /*???*/ 530 oa.oa_size = cpi->mmu_ncontext * sizeof(cpi->ctx_tbl[0]); /*???*/
529 531
530 /* 532 /*
531 * Flush entire cache here, since the CPU may start with 533 * Flush entire cache here, since the CPU may start with
532 * caches off, hence no cache-coherency may be assumed. 534 * caches off, hence no cache-coherency may be assumed.
533 */ 535 */
534 cpuinfo.cache_flush_all(); 536 cpuinfo.cache_flush_all();
535 prom_cpustart(cpi->node, &oa, 0, pc); 537 prom_cpustart(cpi->node, &oa, 0, pc);
536 538
537 /* 539 /*
538 * Wait for this CPU to spin up. 540 * Wait for this CPU to spin up.
539 */ 541 */
540 for (n = 10000; n != 0; n--) { 542 for (n = 10000; n != 0; n--) {
541 cache_flush((void *) __UNVOLATILE(&cpi->flags), 543 cache_flush((void *) __UNVOLATILE(&cpi->flags),
542 sizeof(cpi->flags)); 544 sizeof(cpi->flags));
543 if (cpi->flags & CPUFLG_HATCHED) 545 if (cpi->flags & CPUFLG_HATCHED)
544 return; 546 return;
545 delay(100); 547 delay(100);
546 } 548 }
547 printf("CPU did not spin up\n"); 549 printf("CPU did not spin up\n");
548} 550}
549 551
550/* 552/*
551 * Call a function on some CPUs. `cpuset' can be set to CPUSET_ALL 553 * Call a function on some CPUs. `cpuset' can be set to CPUSET_ALL
552 * to call every CPU, or `1 << cpi->ci_cpuid' for each CPU to call. 554 * to call every CPU, or `1 << cpi->ci_cpuid' for each CPU to call.
553 */ 555 */
554void 556void
555xcall(xcall_func_t func, xcall_trap_t trap, int arg0, int arg1, int arg2, 557xcall(xcall_func_t func, xcall_trap_t trap, int arg0, int arg1, int arg2,
556 u_int cpuset) 558 u_int cpuset)
557{ 559{
558 struct cpu_info *cpi; 560 struct cpu_info *cpi;
559 int s, n, i, done, callself, mybit; 561 int n, i, done, callself, mybit;
560 volatile struct xpmsg_func *p; 562 volatile struct xpmsg_func *p;
561 int fasttrap; 563 int fasttrap;
562 564 int is_noop = func == (xcall_func_t)sparc_noop;
563 /* XXX - note p->retval is probably no longer useful */ 
564 565
565 mybit = (1 << cpuinfo.ci_cpuid); 566 mybit = (1 << cpuinfo.ci_cpuid);
566 callself = func && (cpuset & mybit) != 0; 567 callself = func && (cpuset & mybit) != 0;
567 cpuset &= ~mybit; 568 cpuset &= ~mybit;
568 569
569 /* 
570 * If no cpus are configured yet, just call ourselves. 
571 */ 
572 if (cpus == NULL) { 
573 p = &cpuinfo.msg.u.xpmsg_func; 
574 if (callself) 
575 p->retval = (*func)(arg0, arg1, arg2); 
576 return; 
577 } 
578 
579 /* Mask any CPUs that are not ready */ 570 /* Mask any CPUs that are not ready */
580 cpuset &= cpu_ready_mask; 571 cpuset &= cpu_ready_mask;
581 572
582 /* prevent interrupts that grab the kernel lock */ 573 /* prevent interrupts that grab the kernel lock */
583 s = splsched(); 574 mutex_spin_enter(&xpmsg_mutex);
584#ifdef DEBUG 
585 if (!cold) { 
586 u_int pc, lvl = ((u_int)s & PSR_PIL) >> 8; 
587 if (lvl > IPL_SCHED) { 
588 __asm("mov %%i7, %0" : "=r" (pc) : ); 
589 printf_nolog("%d: xcall at lvl %u from 0x%x\n", 
590 cpu_number(), lvl, pc); 
591 } 
592 } 
593#endif 
594 LOCK_XPMSG(); 
595 575
596 /* 576 /*
597 * Firstly, call each CPU. We do this so that they might have 577 * Firstly, call each CPU. We do this so that they might have
598 * finished by the time we start looking. 578 * finished by the time we start looking.
599 */ 579 */
600 fasttrap = trap != NULL ? 1 : 0; 580 fasttrap = trap != NULL ? 1 : 0;
601 for (CPU_INFO_FOREACH(n, cpi)) { 581 for (CPU_INFO_FOREACH(n, cpi)) {
602 582
603 /* Note: n == cpi->ci_cpuid */ 583 /* Note: n == cpi->ci_cpuid */
604 if ((cpuset & (1 << n)) == 0) 584 if ((cpuset & (1 << n)) == 0)
605 continue; 585 continue;
606 586
607 cpi->msg.tag = XPMSG_FUNC; 587 cpi->msg.tag = XPMSG_FUNC;
608 cpi->msg.complete = 0; 588 cpi->msg.complete = 0;
609 p = &cpi->msg.u.xpmsg_func; 589 p = &cpi->msg.u.xpmsg_func;
610 p->func = func; 590 p->func = func;
611 p->trap = trap; 591 p->trap = trap;
612 p->arg0 = arg0; 592 p->arg0 = arg0;
613 p->arg1 = arg1; 593 p->arg1 = arg1;
614 p->arg2 = arg2; 594 p->arg2 = arg2;
615 /* Fast cross calls use interrupt level 14 */ 595 /* Fast cross calls use interrupt level 14 */
616 raise_ipi(cpi,13+fasttrap);/*xcall_cookie->pil*/ 596 raise_ipi(cpi,13+fasttrap);/*xcall_cookie->pil*/
617 } 597 }
618 598
619 /* 599 /*
620 * Second, call ourselves. 600 * Second, call ourselves.
621 */ 601 */
622 p = &cpuinfo.msg.u.xpmsg_func; 602 p = &cpuinfo.msg.u.xpmsg_func;
623 if (callself) 603 if (callself)
624 p->retval = (*func)(arg0, arg1, arg2); 604 (*func)(arg0, arg1, arg2);
625 605
626 /* 606 /*
627 * Lastly, start looping, waiting for all CPUs to register that they 607 * Lastly, start looping, waiting for all CPUs to register that they
628 * have completed (bailing if it takes "too long", being loud about 608 * have completed (bailing if it takes "too long", being loud about
629 * this in the process). 609 * this in the process).
630 */ 610 */
631 done = 0; 611 done = is_noop;
632 i = 100000; /* time-out, not too long, but still an _AGE_ */ 612 i = 100000; /* time-out, not too long, but still an _AGE_ */
633 while (!done) { 613 while (!done) {
634 if (--i < 0) { 614 if (--i < 0) {
635 printf_nolog("xcall(cpu%d,%p): couldn't ping cpus:", 615 printf_nolog("xcall(cpu%d,%p): couldn't ping cpus:",
636 cpu_number(), func); 616 cpu_number(), func);
637 } 617 }
638 618
639 done = 1; 619 done = 1;
640 for (CPU_INFO_FOREACH(n, cpi)) { 620 for (CPU_INFO_FOREACH(n, cpi)) {
641 if ((cpuset & (1 << n)) == 0) 621 if ((cpuset & (1 << n)) == 0)
642 continue; 622 continue;
643 623
644 if (cpi->msg.complete == 0) { 624 if (cpi->msg.complete == 0) {
645 if (i < 0) { 625 if (i < 0) {
646 printf_nolog(" cpu%d", cpi->ci_cpuid); 626 printf_nolog(" cpu%d", cpi->ci_cpuid);
647 } else { 627 } else {
648 done = 0; 628 done = 0;
649 break; 629 break;
650 } 630 }
651 } 631 }
652 } 632 }
653 } 633 }
654 if (i < 0) 634 if (i < 0)
655 printf_nolog("\n"); 635 printf_nolog("\n");
656 636
657 UNLOCK_XPMSG(); 637 mutex_spin_exit(&xpmsg_mutex);
658 splx(s); 
659} 638}
660 639
661/* 640/*
662 * Tell all CPUs other than the current one to enter the PROM idle loop. 641 * Tell all CPUs other than the current one to enter the PROM idle loop.
663 */ 642 */
664void 643void
665mp_pause_cpus(void) 644mp_pause_cpus(void)
666{ 645{
667 CPU_INFO_ITERATOR n; 646 CPU_INFO_ITERATOR n;
668 struct cpu_info *cpi; 647 struct cpu_info *cpi;
669 648
670 if (cpus == NULL) 
671 return; 
672 
673 for (CPU_INFO_FOREACH(n, cpi)) { 649 for (CPU_INFO_FOREACH(n, cpi)) {
674 if (cpuinfo.mid == cpi->mid || 650 if (cpuinfo.mid == cpi->mid ||
675 (cpi->flags & CPUFLG_HATCHED) == 0) 651 (cpi->flags & CPUFLG_HATCHED) == 0)
676 continue; 652 continue;
677 653
678 /* 654 /*
679 * This PROM utility will put the OPENPROM_MBX_ABORT 655 * This PROM utility will put the OPENPROM_MBX_ABORT
680 * message (0xfc) in the target CPU's mailbox and then 656 * message (0xfc) in the target CPU's mailbox and then
681 * send it a level 15 soft interrupt. 657 * send it a level 15 soft interrupt.
682 */ 658 */
683 if (prom_cpuidle(cpi->node) != 0) 659 if (prom_cpuidle(cpi->node) != 0)
684 printf("cpu%d could not be paused\n", cpi->ci_cpuid); 660 printf("cpu%d could not be paused\n", cpi->ci_cpuid);
685 } 661 }
686} 662}
687 663
688/* 664/*
689 * Resume all idling CPUs. 665 * Resume all idling CPUs.
690 */ 666 */
691void 667void
692mp_resume_cpus(void) 668mp_resume_cpus(void)
693{ 669{
694 CPU_INFO_ITERATOR n; 670 CPU_INFO_ITERATOR n;
695 struct cpu_info *cpi; 671 struct cpu_info *cpi;
696 672
697 if (cpus == NULL) 
698 return; 
699 
700 for (CPU_INFO_FOREACH(n, cpi)) { 673 for (CPU_INFO_FOREACH(n, cpi)) {
701 if (cpuinfo.mid == cpi->mid || 674 if (cpuinfo.mid == cpi->mid ||
702 (cpi->flags & CPUFLG_HATCHED) == 0) 675 (cpi->flags & CPUFLG_HATCHED) == 0)
703 continue; 676 continue;
704 677
705 /* 678 /*
706 * This PROM utility makes the target CPU return 679 * This PROM utility makes the target CPU return
707 * from its prom_cpuidle(0) call (see intr.c:nmi_soft()). 680 * from its prom_cpuidle(0) call (see intr.c:nmi_soft()).
708 */ 681 */
709 if (prom_cpuresume(cpi->node) != 0) 682 if (prom_cpuresume(cpi->node) != 0)
710 printf("cpu%d could not be resumed\n", cpi->ci_cpuid); 683 printf("cpu%d could not be resumed\n", cpi->ci_cpuid);
711 } 684 }
712} 685}
713 686
714/* 687/*
715 * Tell all CPUs except the current one to hurry back into the prom 688 * Tell all CPUs except the current one to hurry back into the prom
716 */ 689 */
717void 690void
718mp_halt_cpus(void) 691mp_halt_cpus(void)
719{ 692{
720 CPU_INFO_ITERATOR n; 693 CPU_INFO_ITERATOR n;
721 struct cpu_info *cpi; 694 struct cpu_info *cpi;
722 695
723 if (cpus == NULL) 
724 return; 
725 
726 for (CPU_INFO_FOREACH(n, cpi)) { 696 for (CPU_INFO_FOREACH(n, cpi)) {
727 int r; 697 int r;
728 698
729 if (cpuinfo.mid == cpi->mid) 699 if (cpuinfo.mid == cpi->mid)
730 continue; 700 continue;
731 701
732 /* 702 /*
733 * This PROM utility will put the OPENPROM_MBX_STOP 703 * This PROM utility will put the OPENPROM_MBX_STOP
734 * message (0xfb) in the target CPU's mailbox and then 704 * message (0xfb) in the target CPU's mailbox and then
735 * send it a level 15 soft interrupt. 705 * send it a level 15 soft interrupt.
736 */ 706 */
737 r = prom_cpustop(cpi->node); 707 r = prom_cpustop(cpi->node);
738 printf("cpu%d %shalted\n", cpi->ci_cpuid, 708 printf("cpu%d %shalted\n", cpi->ci_cpuid,
739 r == 0 ? "" : "(boot CPU?) can not be "); 709 r == 0 ? "" : "(boot CPU?) can not be ");
740 } 710 }
741} 711}
742 712
743#if defined(DDB) 713#if defined(DDB)
744void 714void
745mp_pause_cpus_ddb(void) 715mp_pause_cpus_ddb(void)
746{ 716{
747 CPU_INFO_ITERATOR n; 717 CPU_INFO_ITERATOR n;
748 struct cpu_info *cpi; 718 struct cpu_info *cpi;
749 719
750 if (cpus == NULL) 
751 return; 
752 
753 for (CPU_INFO_FOREACH(n, cpi)) { 720 for (CPU_INFO_FOREACH(n, cpi)) {
754 if (cpi == NULL || cpi->mid == cpuinfo.mid || 721 if (cpi == NULL || cpi->mid == cpuinfo.mid ||
755 (cpi->flags & CPUFLG_HATCHED) == 0) 722 (cpi->flags & CPUFLG_HATCHED) == 0)
756 continue; 723 continue;
757 724
758 cpi->msg_lev15.tag = XPMSG15_PAUSECPU; 725 cpi->msg_lev15.tag = XPMSG15_PAUSECPU;
759 raise_ipi(cpi,15); /* high priority intr */ 726 raise_ipi(cpi,15); /* high priority intr */
760 } 727 }
761} 728}
762 729
763void 730void
764mp_resume_cpus_ddb(void) 731mp_resume_cpus_ddb(void)
765{ 732{
766 CPU_INFO_ITERATOR n; 733 CPU_INFO_ITERATOR n;
767 struct cpu_info *cpi; 734 struct cpu_info *cpi;
768 735
769 if (cpus == NULL) 
770 return; 
771 
772 for (CPU_INFO_FOREACH(n, cpi)) { 736 for (CPU_INFO_FOREACH(n, cpi)) {
773 if (cpi == NULL || cpuinfo.mid == cpi->mid || 737 if (cpi == NULL || cpuinfo.mid == cpi->mid ||
774 (cpi->flags & CPUFLG_PAUSED) == 0) 738 (cpi->flags & CPUFLG_PAUSED) == 0)
775 continue; 739 continue;
776 740
777 /* tell it to continue */ 741 /* tell it to continue */
778 cpi->flags &= ~CPUFLG_PAUSED; 742 cpi->flags &= ~CPUFLG_PAUSED;
779 } 743 }
780} 744}
781#endif /* DDB */ 745#endif /* DDB */
782#endif /* MULTIPROCESSOR */ 746#endif /* MULTIPROCESSOR */
783 747
784/* 748/*
785 * fpu_init() must be run on associated CPU. 749 * fpu_init() must be run on associated CPU.
786 */ 750 */
787void 751void
788fpu_init(struct cpu_info *sc) 752fpu_init(struct cpu_info *sc)
789{ 753{
790 struct fpstate fpstate; 754 struct fpstate fpstate;
791 int fpuvers; 755 int fpuvers;
792 756
793 /* 757 /*
794 * Get the FSR and clear any exceptions. If we do not unload 758 * Get the FSR and clear any exceptions. If we do not unload
795 * the queue here and it is left over from a previous crash, we 759 * the queue here and it is left over from a previous crash, we
796 * will panic in the first loadfpstate(), due to a sequence 760 * will panic in the first loadfpstate(), due to a sequence
797 * error, so we need to dump the whole state anyway. 761 * error, so we need to dump the whole state anyway.
798 * 762 *
799 * If there is no FPU, trap.c will advance over all the stores, 763 * If there is no FPU, trap.c will advance over all the stores,
800 * so we initialize fs_fsr here. 764 * so we initialize fs_fsr here.
801 */ 765 */
802 766
803 /* 7 is reserved for "none" */ 767 /* 7 is reserved for "none" */
804 fpstate.fs_fsr = 7 << FSR_VER_SHIFT; 768 fpstate.fs_fsr = 7 << FSR_VER_SHIFT;
805 savefpstate(&fpstate); 769 savefpstate(&fpstate);
806 sc->fpuvers = fpuvers = 770 sc->fpuvers = fpuvers =
807 (fpstate.fs_fsr >> FSR_VER_SHIFT) & (FSR_VER >> FSR_VER_SHIFT); 771 (fpstate.fs_fsr >> FSR_VER_SHIFT) & (FSR_VER >> FSR_VER_SHIFT);
808 772
809 if (fpuvers == 7) { 773 if (fpuvers == 7) {
810 sc->fpu_name = "no"; 774 sc->fpu_name = "no";
811 return; 775 return;
812 } 776 }
813 777
814 sc->fpupresent = 1; 778 sc->fpupresent = 1;
815 sc->fpu_name = fsrtoname(sc->cpu_impl, sc->cpu_vers, fpuvers); 779 sc->fpu_name = fsrtoname(sc->cpu_impl, sc->cpu_vers, fpuvers);
816 if (sc->fpu_name == NULL) { 780 if (sc->fpu_name == NULL) {
817 sprintf(sc->fpu_namebuf, "version 0x%x", fpuvers); 781 sprintf(sc->fpu_namebuf, "version 0x%x", fpuvers);
818 sc->fpu_name = sc->fpu_namebuf; 782 sc->fpu_name = sc->fpu_namebuf;
819 } 783 }
820} 784}
821 785
822void 786void
823cache_print(struct cpu_softc *sc) 787cache_print(struct cpu_softc *sc)
824{ 788{
825 struct cacheinfo *ci = &sc->sc_cpuinfo->cacheinfo; 789 struct cacheinfo *ci = &sc->sc_cpuinfo->cacheinfo;
826 790
827 if (sc->sc_cpuinfo->flags & CPUFLG_SUN4CACHEBUG) 791 if (sc->sc_cpuinfo->flags & CPUFLG_SUN4CACHEBUG)
828 printf("%s: cache chip bug; trap page uncached\n", 792 printf("%s: cache chip bug; trap page uncached\n",
829 sc->sc_dev.dv_xname); 793 sc->sc_dev.dv_xname);
830 794
831 printf("%s: ", sc->sc_dev.dv_xname); 795 printf("%s: ", sc->sc_dev.dv_xname);
832 796
833 if (ci->c_totalsize == 0) { 797 if (ci->c_totalsize == 0) {
834 printf("no cache\n"); 798 printf("no cache\n");
835 return; 799 return;
836 } 800 }
837 801
838 if (ci->c_split) { 802 if (ci->c_split) {
839 const char *sep = ""; 803 const char *sep = "";
840 804
841 printf("%s", (ci->c_physical ? "physical " : "")); 805 printf("%s", (ci->c_physical ? "physical " : ""));
842 if (ci->ic_totalsize > 0) { 806 if (ci->ic_totalsize > 0) {
843 printf("%s%dK instruction (%d b/l)", sep, 807 printf("%s%dK instruction (%d b/l)", sep,
844 ci->ic_totalsize/1024, ci->ic_linesize); 808 ci->ic_totalsize/1024, ci->ic_linesize);
845 sep = ", "; 809 sep = ", ";
846 } 810 }
847 if (ci->dc_totalsize > 0) { 811 if (ci->dc_totalsize > 0) {
848 printf("%s%dK data (%d b/l)", sep, 812 printf("%s%dK data (%d b/l)", sep,
849 ci->dc_totalsize/1024, ci->dc_linesize); 813 ci->dc_totalsize/1024, ci->dc_linesize);
850 } 814 }
851 } else if (ci->c_physical) { 815 } else if (ci->c_physical) {
852 /* combined, physical */ 816 /* combined, physical */
853 printf("physical %dK combined cache (%d bytes/line)", 817 printf("physical %dK combined cache (%d bytes/line)",
854 ci->c_totalsize/1024, ci->c_linesize); 818 ci->c_totalsize/1024, ci->c_linesize);
855 } else { 819 } else {
856 /* combined, virtual */ 820 /* combined, virtual */
857 printf("%dK byte write-%s, %d bytes/line, %cw flush", 821 printf("%dK byte write-%s, %d bytes/line, %cw flush",
858 ci->c_totalsize/1024, 822 ci->c_totalsize/1024,
859 (ci->c_vactype == VAC_WRITETHROUGH) ? "through" : "back", 823 (ci->c_vactype == VAC_WRITETHROUGH) ? "through" : "back",
860 ci->c_linesize, 824 ci->c_linesize,
861 ci->c_hwflush ? 'h' : 's'); 825 ci->c_hwflush ? 'h' : 's');
862 } 826 }
863 827
864 if (ci->ec_totalsize > 0) { 828 if (ci->ec_totalsize > 0) {
865 printf(", %dK external (%d b/l)", 829 printf(", %dK external (%d b/l)",
866 ci->ec_totalsize/1024, ci->ec_linesize); 830 ci->ec_totalsize/1024, ci->ec_linesize);
867 } 831 }
868 printf(": "); 832 printf(": ");
869 if (ci->c_enabled) 833 if (ci->c_enabled)
870 printf("cache enabled"); 834 printf("cache enabled");
871 printf("\n"); 835 printf("\n");
872} 836}
873 837
874 838
875/*------------*/ 839/*------------*/
876 840
877 841
878void cpumatch_unknown(struct cpu_info *, struct module_info *, int); 842void cpumatch_unknown(struct cpu_info *, struct module_info *, int);
879void cpumatch_sun4(struct cpu_info *, struct module_info *, int); 843void cpumatch_sun4(struct cpu_info *, struct module_info *, int);
880void cpumatch_sun4c(struct cpu_info *, struct module_info *, int); 844void cpumatch_sun4c(struct cpu_info *, struct module_info *, int);
881void cpumatch_ms1(struct cpu_info *, struct module_info *, int); 845void cpumatch_ms1(struct cpu_info *, struct module_info *, int);
882void cpumatch_viking(struct cpu_info *, struct module_info *, int); 846void cpumatch_viking(struct cpu_info *, struct module_info *, int);
883void cpumatch_hypersparc(struct cpu_info *, struct module_info *, int); 847void cpumatch_hypersparc(struct cpu_info *, struct module_info *, int);
884void cpumatch_turbosparc(struct cpu_info *, struct module_info *, int); 848void cpumatch_turbosparc(struct cpu_info *, struct module_info *, int);
885 849
886void getcacheinfo_sun4(struct cpu_info *, int node); 850void getcacheinfo_sun4(struct cpu_info *, int node);
887void getcacheinfo_sun4c(struct cpu_info *, int node); 851void getcacheinfo_sun4c(struct cpu_info *, int node);
888void getcacheinfo_obp(struct cpu_info *, int node); 852void getcacheinfo_obp(struct cpu_info *, int node);
889void getcacheinfo_sun4d(struct cpu_info *, int node); 853void getcacheinfo_sun4d(struct cpu_info *, int node);
890 854
891void sun4_hotfix(struct cpu_info *); 855void sun4_hotfix(struct cpu_info *);
892void viking_hotfix(struct cpu_info *); 856void viking_hotfix(struct cpu_info *);
893void turbosparc_hotfix(struct cpu_info *); 857void turbosparc_hotfix(struct cpu_info *);
894void swift_hotfix(struct cpu_info *); 858void swift_hotfix(struct cpu_info *);
895 859
896void ms1_mmu_enable(void); 860void ms1_mmu_enable(void);
897void viking_mmu_enable(void); 861void viking_mmu_enable(void);
898void swift_mmu_enable(void); 862void swift_mmu_enable(void);
899void hypersparc_mmu_enable(void); 863void hypersparc_mmu_enable(void);
900 864
901void srmmu_get_syncflt(void); 865void srmmu_get_syncflt(void);
902void ms1_get_syncflt(void); 866void ms1_get_syncflt(void);
903void viking_get_syncflt(void); 867void viking_get_syncflt(void);
904void swift_get_syncflt(void); 868void swift_get_syncflt(void);
905void turbosparc_get_syncflt(void); 869void turbosparc_get_syncflt(void);
906void hypersparc_get_syncflt(void); 870void hypersparc_get_syncflt(void);
907void cypress_get_syncflt(void); 871void cypress_get_syncflt(void);
908 872
909int srmmu_get_asyncflt(u_int *, u_int *); 873int srmmu_get_asyncflt(u_int *, u_int *);
910int hypersparc_get_asyncflt(u_int *, u_int *); 874int hypersparc_get_asyncflt(u_int *, u_int *);
911int cypress_get_asyncflt(u_int *, u_int *); 875int cypress_get_asyncflt(u_int *, u_int *);
912int no_asyncflt_regs(u_int *, u_int *); 876int no_asyncflt_regs(u_int *, u_int *);
913 877
914int hypersparc_getmid(void); 878int hypersparc_getmid(void);
915/* cypress and hypersparc can share this function, see ctlreg.h */ 879/* cypress and hypersparc can share this function, see ctlreg.h */
916#define cypress_getmid hypersparc_getmid 880#define cypress_getmid hypersparc_getmid
917int viking_getmid(void); 881int viking_getmid(void);
918 882
919int (*moduleerr_handler)(void); 883int (*moduleerr_handler)(void);
920int viking_module_error(void); 884int viking_module_error(void);
921 885
922struct module_info module_unknown = { 886struct module_info module_unknown = {
923 CPUTYP_UNKNOWN, 887 CPUTYP_UNKNOWN,
924 VAC_UNKNOWN, 888 VAC_UNKNOWN,
925 cpumatch_unknown 889 cpumatch_unknown
926}; 890};
927 891
928 892
929void 893void
930cpumatch_unknown(struct cpu_info *sc, struct module_info *mp, int node) 894cpumatch_unknown(struct cpu_info *sc, struct module_info *mp, int node)
931{ 895{
932 896
933 panic("Unknown CPU type: " 897 panic("Unknown CPU type: "
934 "cpu: impl %d, vers %d; mmu: impl %d, vers %d", 898 "cpu: impl %d, vers %d; mmu: impl %d, vers %d",
935 sc->cpu_impl, sc->cpu_vers, 899 sc->cpu_impl, sc->cpu_vers,
936 sc->mmu_impl, sc->mmu_vers); 900 sc->mmu_impl, sc->mmu_vers);
937} 901}
938 902
939#if defined(SUN4) 903#if defined(SUN4)
940struct module_info module_sun4 = { 904struct module_info module_sun4 = {
941 CPUTYP_UNKNOWN, 905 CPUTYP_UNKNOWN,
942 VAC_WRITETHROUGH, 906 VAC_WRITETHROUGH,
943 cpumatch_sun4, 907 cpumatch_sun4,
944 getcacheinfo_sun4, 908 getcacheinfo_sun4,
945 sun4_hotfix, 909 sun4_hotfix,
946 0, 910 0,
947 sun4_cache_enable, 911 sun4_cache_enable,
948 0, 912 0,
949 0, /* ncontext set in `match' function */ 913 0, /* ncontext set in `match' function */
950 0, /* get_syncflt(); unused in sun4c */ 914 0, /* get_syncflt(); unused in sun4c */
951 0, /* get_asyncflt(); unused in sun4c */ 915 0, /* get_asyncflt(); unused in sun4c */
952 sun4_cache_flush, 916 sun4_cache_flush,
953 sun4_vcache_flush_page, NULL, 917 sun4_vcache_flush_page, NULL,
954 sun4_vcache_flush_segment, NULL, 918 sun4_vcache_flush_segment, NULL,
955 sun4_vcache_flush_region, NULL, 919 sun4_vcache_flush_region, NULL,
956 sun4_vcache_flush_context, NULL, 920 sun4_vcache_flush_context, NULL,
957 NULL, NULL, 921 NULL, NULL,
958 noop_pcache_flush_page, 922 noop_pcache_flush_page,
959 noop_pure_vcache_flush, 923 noop_pure_vcache_flush,
960 noop_cache_flush_all, 924 noop_cache_flush_all,
961 0, 925 0,
962 pmap_zero_page4_4c, 926 pmap_zero_page4_4c,
963 pmap_copy_page4_4c 927 pmap_copy_page4_4c
964}; 928};
965 929
966void 930void
967getcacheinfo_sun4(struct cpu_info *sc, int node) 931getcacheinfo_sun4(struct cpu_info *sc, int node)
968{ 932{
969 struct cacheinfo *ci = &sc->cacheinfo; 933 struct cacheinfo *ci = &sc->cacheinfo;
970 934
971 switch (sc->cpu_type) { 935 switch (sc->cpu_type) {
972 case CPUTYP_4_100: 936 case CPUTYP_4_100:
973 ci->c_vactype = VAC_NONE; 937 ci->c_vactype = VAC_NONE;
974 ci->c_totalsize = 0; 938 ci->c_totalsize = 0;
975 ci->c_hwflush = 0; 939 ci->c_hwflush = 0;
976 ci->c_linesize = 0; 940 ci->c_linesize = 0;
977 ci->c_l2linesize = 0; 941 ci->c_l2linesize = 0;
978 ci->c_split = 0; 942 ci->c_split = 0;
979 ci->c_nlines = 0; 943 ci->c_nlines = 0;
980 944
981 /* Override cache flush functions */ 945 /* Override cache flush functions */
982 sc->cache_flush = noop_cache_flush; 946 sc->cache_flush = noop_cache_flush;
983 sc->sp_vcache_flush_page = noop_vcache_flush_page; 947 sc->sp_vcache_flush_page = noop_vcache_flush_page;
984 sc->sp_vcache_flush_segment = noop_vcache_flush_segment; 948 sc->sp_vcache_flush_segment = noop_vcache_flush_segment;
985 sc->sp_vcache_flush_region = noop_vcache_flush_region; 949 sc->sp_vcache_flush_region = noop_vcache_flush_region;
986 sc->sp_vcache_flush_context = noop_vcache_flush_context; 950 sc->sp_vcache_flush_context = noop_vcache_flush_context;
987 break; 951 break;
988 case CPUTYP_4_200: 952 case CPUTYP_4_200:
989 ci->c_vactype = VAC_WRITEBACK; 953 ci->c_vactype = VAC_WRITEBACK;
990 ci->c_totalsize = 128*1024; 954 ci->c_totalsize = 128*1024;
991 ci->c_hwflush = 0; 955 ci->c_hwflush = 0;
992 ci->c_linesize = 16; 956 ci->c_linesize = 16;
993 ci->c_l2linesize = 4; 957 ci->c_l2linesize = 4;
994 ci->c_split = 0; 958 ci->c_split = 0;
995 ci->c_nlines = ci->c_totalsize >> ci->c_l2linesize; 959 ci->c_nlines = ci->c_totalsize >> ci->c_l2linesize;
996 break; 960 break;
997 case CPUTYP_4_300: 961 case CPUTYP_4_300:
998 ci->c_vactype = VAC_WRITEBACK; 962 ci->c_vactype = VAC_WRITEBACK;
999 ci->c_totalsize = 128*1024; 963 ci->c_totalsize = 128*1024;
1000 ci->c_hwflush = 0; 964 ci->c_hwflush = 0;
1001 ci->c_linesize = 16; 965 ci->c_linesize = 16;
1002 ci->c_l2linesize = 4; 966 ci->c_l2linesize = 4;
1003 ci->c_split = 0; 967 ci->c_split = 0;
1004 ci->c_nlines = ci->c_totalsize >> ci->c_l2linesize; 968 ci->c_nlines = ci->c_totalsize >> ci->c_l2linesize;
1005 sc->flags |= CPUFLG_SUN4CACHEBUG; 969 sc->flags |= CPUFLG_SUN4CACHEBUG;
1006 break; 970 break;
1007 case CPUTYP_4_400: 971 case CPUTYP_4_400:
1008 ci->c_vactype = VAC_WRITEBACK; 972 ci->c_vactype = VAC_WRITEBACK;
1009 ci->c_totalsize = 128 * 1024; 973 ci->c_totalsize = 128 * 1024;
1010 ci->c_hwflush = 0; 974 ci->c_hwflush = 0;
1011 ci->c_linesize = 32; 975 ci->c_linesize = 32;
1012 ci->c_l2linesize = 5; 976 ci->c_l2linesize = 5;
1013 ci->c_split = 0; 977 ci->c_split = 0;
1014 ci->c_nlines = ci->c_totalsize >> ci->c_l2linesize; 978 ci->c_nlines = ci->c_totalsize >> ci->c_l2linesize;
1015 break; 979 break;
1016 } 980 }
1017} 981}
1018 982
1019void 983void
1020cpumatch_sun4(struct cpu_info *sc, struct module_info *mp, int node) 984cpumatch_sun4(struct cpu_info *sc, struct module_info *mp, int node)
1021{ 985{
1022 struct idprom *idp = prom_getidprom(); 986 struct idprom *idp = prom_getidprom();
1023 987
1024 switch (idp->idp_machtype) { 988 switch (idp->idp_machtype) {
1025 case ID_SUN4_100: 989 case ID_SUN4_100:
1026 sc->cpu_type = CPUTYP_4_100; 990 sc->cpu_type = CPUTYP_4_100;
1027 sc->classlvl = 100; 991 sc->classlvl = 100;
1028 sc->mmu_ncontext = 8; 992 sc->mmu_ncontext = 8;
1029 sc->mmu_nsegment = 256; 993 sc->mmu_nsegment = 256;
1030/*XXX*/ sc->hz = 14280000; 994/*XXX*/ sc->hz = 14280000;
1031 break; 995 break;
1032 case ID_SUN4_200: 996 case ID_SUN4_200:
1033 sc->cpu_type = CPUTYP_4_200; 997 sc->cpu_type = CPUTYP_4_200;
1034 sc->classlvl = 200; 998 sc->classlvl = 200;
1035 sc->mmu_nsegment = 512; 999 sc->mmu_nsegment = 512;
1036 sc->mmu_ncontext = 16; 1000 sc->mmu_ncontext = 16;
1037/*XXX*/ sc->hz = 16670000; 1001/*XXX*/ sc->hz = 16670000;
1038 break; 1002 break;
1039 case ID_SUN4_300: 1003 case ID_SUN4_300:
1040 sc->cpu_type = CPUTYP_4_300; 1004 sc->cpu_type = CPUTYP_4_300;
1041 sc->classlvl = 300; 1005 sc->classlvl = 300;
1042 sc->mmu_nsegment = 256; 1006 sc->mmu_nsegment = 256;
1043 sc->mmu_ncontext = 16; 1007 sc->mmu_ncontext = 16;
1044/*XXX*/ sc->hz = 25000000; 1008/*XXX*/ sc->hz = 25000000;
1045 break; 1009 break;
1046 case ID_SUN4_400: 1010 case ID_SUN4_400:
1047 sc->cpu_type = CPUTYP_4_400; 1011 sc->cpu_type = CPUTYP_4_400;
1048 sc->classlvl = 400; 1012 sc->classlvl = 400;
1049 sc->mmu_nsegment = 1024; 1013 sc->mmu_nsegment = 1024;
1050 sc->mmu_ncontext = 64; 1014 sc->mmu_ncontext = 64;
1051 sc->mmu_nregion = 256; 1015 sc->mmu_nregion = 256;
1052/*XXX*/ sc->hz = 33000000; 1016/*XXX*/ sc->hz = 33000000;
1053 sc->sun4_mmu3l = 1; 1017 sc->sun4_mmu3l = 1;
1054 break; 1018 break;
1055 } 1019 }
1056 1020
1057} 1021}
1058#endif /* SUN4 */ 1022#endif /* SUN4 */
1059 1023
1060#if defined(SUN4C) 1024#if defined(SUN4C)
1061struct module_info module_sun4c = { 1025struct module_info module_sun4c = {
1062 CPUTYP_UNKNOWN, 1026 CPUTYP_UNKNOWN,
1063 VAC_WRITETHROUGH, 1027 VAC_WRITETHROUGH,
1064 cpumatch_sun4c, 1028 cpumatch_sun4c,
1065 getcacheinfo_sun4c, 1029 getcacheinfo_sun4c,
1066 sun4_hotfix, 1030 sun4_hotfix,
1067 0, 1031 0,
1068 sun4_cache_enable, 1032 sun4_cache_enable,
1069 0, 1033 0,
1070 0, /* ncontext set in `match' function */ 1034 0, /* ncontext set in `match' function */
1071 0, /* get_syncflt(); unused in sun4c */ 1035 0, /* get_syncflt(); unused in sun4c */
1072 0, /* get_asyncflt(); unused in sun4c */ 1036 0, /* get_asyncflt(); unused in sun4c */
1073 sun4_cache_flush, 1037 sun4_cache_flush,
1074 sun4_vcache_flush_page, NULL, 1038 sun4_vcache_flush_page, NULL,
1075 sun4_vcache_flush_segment, NULL, 1039 sun4_vcache_flush_segment, NULL,
1076 sun4_vcache_flush_region, NULL, 1040 sun4_vcache_flush_region, NULL,
1077 sun4_vcache_flush_context, NULL, 1041 sun4_vcache_flush_context, NULL,
1078 NULL, NULL, 1042 NULL, NULL,
1079 noop_pcache_flush_page, 1043 noop_pcache_flush_page,
1080 noop_pure_vcache_flush, 1044 noop_pure_vcache_flush,
1081 noop_cache_flush_all, 1045 noop_cache_flush_all,
1082 0, 1046 0,
1083 pmap_zero_page4_4c, 1047 pmap_zero_page4_4c,
1084 pmap_copy_page4_4c 1048 pmap_copy_page4_4c
1085}; 1049};
1086 1050
1087void 1051void
1088cpumatch_sun4c(struct cpu_info *sc, struct module_info *mp, int node) 1052cpumatch_sun4c(struct cpu_info *sc, struct module_info *mp, int node)
1089{ 1053{
1090 int rnode; 1054 int rnode;
1091 1055
1092 rnode = findroot(); 1056 rnode = findroot();
1093 sc->mmu_npmeg = sc->mmu_nsegment = 1057 sc->mmu_npmeg = sc->mmu_nsegment =
1094 prom_getpropint(rnode, "mmu-npmg", 128); 1058 prom_getpropint(rnode, "mmu-npmg", 128);
1095 sc->mmu_ncontext = prom_getpropint(rnode, "mmu-nctx", 8); 1059 sc->mmu_ncontext = prom_getpropint(rnode, "mmu-nctx", 8);
1096 1060
1097 /* Get clock frequency */ 1061 /* Get clock frequency */
1098 sc->hz = prom_getpropint(rnode, "clock-frequency", 0); 1062 sc->hz = prom_getpropint(rnode, "clock-frequency", 0);
1099} 1063}
1100 1064
1101void 1065void
1102getcacheinfo_sun4c(struct cpu_info *sc, int node) 1066getcacheinfo_sun4c(struct cpu_info *sc, int node)
1103{ 1067{
1104 struct cacheinfo *ci = &sc->cacheinfo; 1068 struct cacheinfo *ci = &sc->cacheinfo;
1105 int i, l; 1069 int i, l;
1106 1070
1107 if (node == 0) 1071 if (node == 0)
1108 /* Bootstrapping */ 1072 /* Bootstrapping */
1109 return; 1073 return;
1110 1074
1111 /* Sun4c's have only virtually-addressed caches */ 1075 /* Sun4c's have only virtually-addressed caches */
1112 ci->c_physical = 0; 1076 ci->c_physical = 0;
1113 ci->c_totalsize = prom_getpropint(node, "vac-size", 65536); 1077 ci->c_totalsize = prom_getpropint(node, "vac-size", 65536);
1114 /* 1078 /*
1115 * Note: vac-hwflush is spelled with an underscore 1079 * Note: vac-hwflush is spelled with an underscore
1116 * on the 4/75s. 1080 * on the 4/75s.
1117 */ 1081 */
1118 ci->c_hwflush = 1082 ci->c_hwflush =
1119 prom_getpropint(node, "vac_hwflush", 0) | 1083 prom_getpropint(node, "vac_hwflush", 0) |
1120 prom_getpropint(node, "vac-hwflush", 0); 1084 prom_getpropint(node, "vac-hwflush", 0);
1121 1085
1122 ci->c_linesize = l = prom_getpropint(node, "vac-linesize", 16); 1086 ci->c_linesize = l = prom_getpropint(node, "vac-linesize", 16);
1123 for (i = 0; (1 << i) < l; i++) 1087 for (i = 0; (1 << i) < l; i++)
1124 /* void */; 1088 /* void */;
1125 if ((1 << i) != l) 1089 if ((1 << i) != l)
1126 panic("bad cache line size %d", l); 1090 panic("bad cache line size %d", l);
1127 ci->c_l2linesize = i; 1091 ci->c_l2linesize = i;
1128 ci->c_associativity = 1; 1092 ci->c_associativity = 1;
1129 ci->c_nlines = ci->c_totalsize >> i; 1093 ci->c_nlines = ci->c_totalsize >> i;
1130 1094
1131 ci->c_vactype = VAC_WRITETHROUGH; 1095 ci->c_vactype = VAC_WRITETHROUGH;
1132 1096
1133 /* 1097 /*
1134 * Machines with "buserr-type" 1 have a bug in the cache 1098 * Machines with "buserr-type" 1 have a bug in the cache
1135 * chip that affects traps. (I wish I knew more about this 1099 * chip that affects traps. (I wish I knew more about this
1136 * mysterious buserr-type variable....) 1100 * mysterious buserr-type variable....)
1137 */ 1101 */
1138 if (prom_getpropint(node, "buserr-type", 0) == 1) 1102 if (prom_getpropint(node, "buserr-type", 0) == 1)
1139 sc->flags |= CPUFLG_SUN4CACHEBUG; 1103 sc->flags |= CPUFLG_SUN4CACHEBUG;
1140} 1104}
1141#endif /* SUN4C */ 1105#endif /* SUN4C */
1142 1106
1143void 1107void
1144sun4_hotfix(struct cpu_info *sc) 1108sun4_hotfix(struct cpu_info *sc)
1145{ 1109{
1146 1110
1147 if ((sc->flags & CPUFLG_SUN4CACHEBUG) != 0) 1111 if ((sc->flags & CPUFLG_SUN4CACHEBUG) != 0)
1148 kvm_uncache((char *)trapbase, 1); 1112 kvm_uncache((char *)trapbase, 1);
1149 1113
1150 /* Use the hardware-assisted page flush routine, if present */ 1114 /* Use the hardware-assisted page flush routine, if present */
1151 if (sc->cacheinfo.c_hwflush) 1115 if (sc->cacheinfo.c_hwflush)
1152 sc->vcache_flush_page = sun4_vcache_flush_page_hw; 1116 sc->vcache_flush_page = sun4_vcache_flush_page_hw;
1153} 1117}
1154 1118
1155#if defined(SUN4M) 1119#if defined(SUN4M)
1156void 1120void
1157getcacheinfo_obp(struct cpu_info *sc, int node) 1121getcacheinfo_obp(struct cpu_info *sc, int node)
1158{ 1122{
1159 struct cacheinfo *ci = &sc->cacheinfo; 1123 struct cacheinfo *ci = &sc->cacheinfo;
1160 int i, l; 1124 int i, l;
1161 1125
1162 if (node == 0) 1126 if (node == 0)
1163 /* Bootstrapping */ 1127 /* Bootstrapping */
1164 return; 1128 return;
1165 1129
1166 /* 1130 /*
1167 * Determine the Sun4m cache organization. 1131 * Determine the Sun4m cache organization.
1168 */ 1132 */
1169 ci->c_physical = node_has_property(node, "cache-physical?"); 1133 ci->c_physical = node_has_property(node, "cache-physical?");
1170 1134
1171 if (prom_getpropint(node, "ncaches", 1) == 2) 1135 if (prom_getpropint(node, "ncaches", 1) == 2)
1172 ci->c_split = 1; 1136 ci->c_split = 1;
1173 else 1137 else
1174 ci->c_split = 0; 1138 ci->c_split = 0;
1175 1139
1176 /* hwflush is used only by sun4/4c code */ 1140 /* hwflush is used only by sun4/4c code */
1177 ci->c_hwflush = 0; 1141 ci->c_hwflush = 0;
1178 1142
1179 if (node_has_property(node, "icache-nlines") && 1143 if (node_has_property(node, "icache-nlines") &&
1180 node_has_property(node, "dcache-nlines") && 1144 node_has_property(node, "dcache-nlines") &&
1181 ci->c_split) { 1145 ci->c_split) {
1182 /* Harvard architecture: get I and D cache sizes */ 1146 /* Harvard architecture: get I and D cache sizes */
1183 ci->ic_nlines = prom_getpropint(node, "icache-nlines", 0); 1147 ci->ic_nlines = prom_getpropint(node, "icache-nlines", 0);
1184 ci->ic_linesize = l = 1148 ci->ic_linesize = l =
1185 prom_getpropint(node, "icache-line-size", 0); 1149 prom_getpropint(node, "icache-line-size", 0);
1186 for (i = 0; (1 << i) < l && l; i++) 1150 for (i = 0; (1 << i) < l && l; i++)
1187 /* void */; 1151 /* void */;
1188 if ((1 << i) != l && l) 1152 if ((1 << i) != l && l)
1189 panic("bad icache line size %d", l); 1153 panic("bad icache line size %d", l);
1190 ci->ic_l2linesize = i; 1154 ci->ic_l2linesize = i;
1191 ci->ic_associativity = 1155 ci->ic_associativity =
1192 prom_getpropint(node, "icache-associativity", 1); 1156 prom_getpropint(node, "icache-associativity", 1);
1193 ci->ic_totalsize = l * ci->ic_nlines * ci->ic_associativity; 1157 ci->ic_totalsize = l * ci->ic_nlines * ci->ic_associativity;
1194 1158
1195 ci->dc_nlines = prom_getpropint(node, "dcache-nlines", 0); 1159 ci->dc_nlines = prom_getpropint(node, "dcache-nlines", 0);
1196 ci->dc_linesize = l = 1160 ci->dc_linesize = l =
1197 prom_getpropint(node, "dcache-line-size",0); 1161 prom_getpropint(node, "dcache-line-size",0);
1198 for (i = 0; (1 << i) < l && l; i++) 1162 for (i = 0; (1 << i) < l && l; i++)
1199 /* void */; 1163 /* void */;
1200 if ((1 << i) != l && l) 1164 if ((1 << i) != l && l)
1201 panic("bad dcache line size %d", l); 1165 panic("bad dcache line size %d", l);
1202 ci->dc_l2linesize = i; 1166 ci->dc_l2linesize = i;
1203 ci->dc_associativity = 1167 ci->dc_associativity =
1204 prom_getpropint(node, "dcache-associativity", 1); 1168 prom_getpropint(node, "dcache-associativity", 1);
1205 ci->dc_totalsize = l * ci->dc_nlines * ci->dc_associativity; 1169 ci->dc_totalsize = l * ci->dc_nlines * ci->dc_associativity;
1206 1170
1207 ci->c_l2linesize = min(ci->ic_l2linesize, ci->dc_l2linesize); 1171 ci->c_l2linesize = min(ci->ic_l2linesize, ci->dc_l2linesize);
1208 ci->c_linesize = min(ci->ic_linesize, ci->dc_linesize); 1172 ci->c_linesize = min(ci->ic_linesize, ci->dc_linesize);
1209 ci->c_totalsize = max(ci->ic_totalsize, ci->dc_totalsize); 1173 ci->c_totalsize = max(ci->ic_totalsize, ci->dc_totalsize);
1210 ci->c_nlines = ci->c_totalsize >> ci->c_l2linesize; 1174 ci->c_nlines = ci->c_totalsize >> ci->c_l2linesize;
1211 } else { 1175 } else {
1212 /* unified I/D cache */ 1176 /* unified I/D cache */
1213 ci->c_nlines = prom_getpropint(node, "cache-nlines", 128); 1177 ci->c_nlines = prom_getpropint(node, "cache-nlines", 128);
1214 ci->c_linesize = l = 1178 ci->c_linesize = l =
1215 prom_getpropint(node, "cache-line-size", 0); 1179 prom_getpropint(node, "cache-line-size", 0);
1216 for (i = 0; (1 << i) < l && l; i++) 1180 for (i = 0; (1 << i) < l && l; i++)
1217 /* void */; 1181 /* void */;
1218 if ((1 << i) != l && l) 1182 if ((1 << i) != l && l)
1219 panic("bad cache line size %d", l); 1183 panic("bad cache line size %d", l);
1220 ci->c_l2linesize = i; 1184 ci->c_l2linesize = i;
1221 ci->c_associativity = 1185 ci->c_associativity =
1222 prom_getpropint(node, "cache-associativity", 1); 1186 prom_getpropint(node, "cache-associativity", 1);
1223 ci->dc_associativity = ci->ic_associativity = 1187 ci->dc_associativity = ci->ic_associativity =
1224 ci->c_associativity; 1188 ci->c_associativity;
1225 ci->c_totalsize = l * ci->c_nlines * ci->c_associativity; 1189 ci->c_totalsize = l * ci->c_nlines * ci->c_associativity;
1226 } 1190 }
1227 1191
1228 if (node_has_property(node, "ecache-nlines")) { 1192 if (node_has_property(node, "ecache-nlines")) {
1229 /* we have a L2 "e"xternal cache */ 1193 /* we have a L2 "e"xternal cache */
1230 ci->ec_nlines = prom_getpropint(node, "ecache-nlines", 32768); 1194 ci->ec_nlines = prom_getpropint(node, "ecache-nlines", 32768);
1231 ci->ec_linesize = l = prom_getpropint(node, "ecache-line-size", 0); 1195 ci->ec_linesize = l = prom_getpropint(node, "ecache-line-size", 0);
1232 for (i = 0; (1 << i) < l && l; i++) 1196 for (i = 0; (1 << i) < l && l; i++)
1233 /* void */; 1197 /* void */;
1234 if ((1 << i) != l && l) 1198 if ((1 << i) != l && l)
1235 panic("bad ecache line size %d", l); 1199 panic("bad ecache line size %d", l);
1236 ci->ec_l2linesize = i; 1200 ci->ec_l2linesize = i;
1237 ci->ec_associativity = 1201 ci->ec_associativity =
1238 prom_getpropint(node, "ecache-associativity", 1); 1202 prom_getpropint(node, "ecache-associativity", 1);
1239 ci->ec_totalsize = l * ci->ec_nlines * ci->ec_associativity; 1203 ci->ec_totalsize = l * ci->ec_nlines * ci->ec_associativity;
1240 } 1204 }
1241 if (ci->c_totalsize == 0) 1205 if (ci->c_totalsize == 0)
1242 printf("warning: couldn't identify cache\n"); 1206 printf("warning: couldn't identify cache\n");
1243} 1207}
1244 1208
1245/* 1209/*
1246 * We use the max. number of contexts on the micro and 1210 * We use the max. number of contexts on the micro and
1247 * hyper SPARCs. The SuperSPARC would let us use up to 65536 1211 * hyper SPARCs. The SuperSPARC would let us use up to 65536
1248 * contexts (by powers of 2), but we keep it at 4096 since 1212 * contexts (by powers of 2), but we keep it at 4096 since
1249 * the table must be aligned to #context*4. With 4K contexts, 1213 * the table must be aligned to #context*4. With 4K contexts,
1250 * we waste at most 16K of memory. Note that the context 1214 * we waste at most 16K of memory. Note that the context
1251 * table is *always* page-aligned, so there can always be 1215 * table is *always* page-aligned, so there can always be
1252 * 1024 contexts without sacrificing memory space (given 1216 * 1024 contexts without sacrificing memory space (given
1253 * that the chip supports 1024 contexts). 1217 * that the chip supports 1024 contexts).
1254 * 1218 *
1255 * Currently known limits: MS1=64, MS2=256, HS=4096, SS=65536 1219 * Currently known limits: MS1=64, MS2=256, HS=4096, SS=65536
1256 * some old SS's=4096 1220 * some old SS's=4096
1257 */ 1221 */
1258 1222
1259/* TI Microsparc I */ 1223/* TI Microsparc I */
1260struct module_info module_ms1 = { 1224struct module_info module_ms1 = {
1261 CPUTYP_MS1, 1225 CPUTYP_MS1,
1262 VAC_NONE, 1226 VAC_NONE,
1263 cpumatch_ms1, 1227 cpumatch_ms1,
1264 getcacheinfo_obp, 1228 getcacheinfo_obp,
1265 0, 1229 0,
1266 ms1_mmu_enable, 1230 ms1_mmu_enable,
1267 ms1_cache_enable, 1231 ms1_cache_enable,
1268 0, 1232 0,
1269 64, 1233 64,
1270 ms1_get_syncflt, 1234 ms1_get_syncflt,
1271 no_asyncflt_regs, 1235 no_asyncflt_regs,
1272 ms1_cache_flush, 1236 ms1_cache_flush,
1273 noop_vcache_flush_page, NULL, 1237 noop_vcache_flush_page, NULL,
1274 noop_vcache_flush_segment, NULL, 1238 noop_vcache_flush_segment, NULL,
1275 noop_vcache_flush_region, NULL, 1239 noop_vcache_flush_region, NULL,
1276 noop_vcache_flush_context, NULL, 1240 noop_vcache_flush_context, NULL,
1277 noop_vcache_flush_range, NULL, 1241 noop_vcache_flush_range, NULL,
1278 noop_pcache_flush_page, 1242 noop_pcache_flush_page,
1279 noop_pure_vcache_flush, 1243 noop_pure_vcache_flush,
1280 ms1_cache_flush_all, 1244 ms1_cache_flush_all,
1281 memerr4m, 1245 memerr4m,
1282 pmap_zero_page4m, 1246 pmap_zero_page4m,
1283 pmap_copy_page4m 1247 pmap_copy_page4m
1284}; 1248};
1285 1249
1286void 1250void
1287cpumatch_ms1(struct cpu_info *sc, struct module_info *mp, int node) 1251cpumatch_ms1(struct cpu_info *sc, struct module_info *mp, int node)
1288{ 1252{
1289 1253
1290 /* 1254 /*
1291 * Turn off page zeroing in the idle loop; an unidentified 1255 * Turn off page zeroing in the idle loop; an unidentified
1292 * bug causes (very sporadic) user process corruption. 1256 * bug causes (very sporadic) user process corruption.
1293 */ 1257 */
1294 vm_page_zero_enable = 0; 1258 vm_page_zero_enable = 0;
1295} 1259}
1296 1260
1297void 1261void
1298ms1_mmu_enable(void) 1262ms1_mmu_enable(void)
1299{ 1263{
1300} 1264}
1301 1265
1302/* TI Microsparc II */ 1266/* TI Microsparc II */
1303struct module_info module_ms2 = { /* UNTESTED */ 1267struct module_info module_ms2 = { /* UNTESTED */
1304 CPUTYP_MS2, 1268 CPUTYP_MS2,
1305 VAC_WRITETHROUGH, 1269 VAC_WRITETHROUGH,
1306 0, 1270 0,
1307 getcacheinfo_obp, 1271 getcacheinfo_obp,
1308 0, 1272 0,
1309 0, 1273 0,
1310 swift_cache_enable, 1274 swift_cache_enable,
1311 0, 1275 0,
1312 256, 1276 256,
1313 srmmu_get_syncflt, 1277 srmmu_get_syncflt,
1314 srmmu_get_asyncflt, 1278 srmmu_get_asyncflt,
1315 srmmu_cache_flush, 1279 srmmu_cache_flush,
1316 srmmu_vcache_flush_page, NULL, 1280 srmmu_vcache_flush_page, NULL,
1317 srmmu_vcache_flush_segment, NULL, 1281 srmmu_vcache_flush_segment, NULL,
1318 srmmu_vcache_flush_region, NULL, 1282 srmmu_vcache_flush_region, NULL,
1319 srmmu_vcache_flush_context, NULL, 1283 srmmu_vcache_flush_context, NULL,
1320 srmmu_vcache_flush_range, NULL, 1284 srmmu_vcache_flush_range, NULL,
1321 noop_pcache_flush_page, 1285 noop_pcache_flush_page,
1322 noop_pure_vcache_flush, 1286 noop_pure_vcache_flush,
1323 srmmu_cache_flush_all, 1287 srmmu_cache_flush_all,
1324 memerr4m, 1288 memerr4m,
1325 pmap_zero_page4m, 1289 pmap_zero_page4m,
1326 pmap_copy_page4m 1290 pmap_copy_page4m
1327}; 1291};
1328 1292
1329 1293
1330struct module_info module_swift = { 1294struct module_info module_swift = {
1331 CPUTYP_MS2, 1295 CPUTYP_MS2,
1332 VAC_WRITETHROUGH, 1296 VAC_WRITETHROUGH,
1333 0, 1297 0,
1334 getcacheinfo_obp, 1298 getcacheinfo_obp,
1335 swift_hotfix, 1299 swift_hotfix,
1336 0, 1300 0,
1337 swift_cache_enable, 1301 swift_cache_enable,
1338 0, 1302 0,
1339 256, 1303 256,
1340 swift_get_syncflt, 1304 swift_get_syncflt,
1341 no_asyncflt_regs, 1305 no_asyncflt_regs,
1342 srmmu_cache_flush, 1306 srmmu_cache_flush,
1343 srmmu_vcache_flush_page, NULL, 1307 srmmu_vcache_flush_page, NULL,
1344 srmmu_vcache_flush_segment, NULL, 1308 srmmu_vcache_flush_segment, NULL,
1345 srmmu_vcache_flush_region, NULL, 1309 srmmu_vcache_flush_region, NULL,
1346 srmmu_vcache_flush_context, NULL, 1310 srmmu_vcache_flush_context, NULL,
1347 srmmu_vcache_flush_range, NULL, 1311 srmmu_vcache_flush_range, NULL,
1348 noop_pcache_flush_page, 1312 noop_pcache_flush_page,
1349 noop_pure_vcache_flush, 1313 noop_pure_vcache_flush,
1350 srmmu_cache_flush_all, 1314 srmmu_cache_flush_all,
1351 memerr4m, 1315 memerr4m,
1352 pmap_zero_page4m, 1316 pmap_zero_page4m,
1353 pmap_copy_page4m 1317 pmap_copy_page4m
1354}; 1318};
1355 1319
1356void 1320void
1357swift_hotfix(struct cpu_info *sc) 1321swift_hotfix(struct cpu_info *sc)
1358{ 1322{
1359 int pcr = lda(SRMMU_PCR, ASI_SRMMU); 1323 int pcr = lda(SRMMU_PCR, ASI_SRMMU);
1360 1324
1361 /* Turn off branch prediction */ 1325 /* Turn off branch prediction */
1362 pcr &= ~SWIFT_PCR_BF; 1326 pcr &= ~SWIFT_PCR_BF;
1363 sta(SRMMU_PCR, ASI_SRMMU, pcr); 1327 sta(SRMMU_PCR, ASI_SRMMU, pcr);
1364} 1328}
1365 1329
1366void 1330void
1367swift_mmu_enable(void) 1331swift_mmu_enable(void)
1368{ 1332{
1369} 1333}
1370 1334
1371 1335
1372/* ROSS Hypersparc */ 1336/* ROSS Hypersparc */
1373struct module_info module_hypersparc = { 1337struct module_info module_hypersparc = {
1374 CPUTYP_UNKNOWN, 1338 CPUTYP_UNKNOWN,
1375 VAC_WRITEBACK, 1339 VAC_WRITEBACK,
1376 cpumatch_hypersparc, 1340 cpumatch_hypersparc,
1377 getcacheinfo_obp, 1341 getcacheinfo_obp,
1378 0, 1342 0,
1379 hypersparc_mmu_enable, 1343 hypersparc_mmu_enable,
1380 hypersparc_cache_enable, 1344 hypersparc_cache_enable,
1381 hypersparc_getmid, 1345 hypersparc_getmid,
1382 4096, 1346 4096,
1383 hypersparc_get_syncflt, 1347 hypersparc_get_syncflt,
1384 hypersparc_get_asyncflt, 1348 hypersparc_get_asyncflt,
1385 srmmu_cache_flush, 1349 srmmu_cache_flush,
1386 srmmu_vcache_flush_page, ft_srmmu_vcache_flush_page, 1350 srmmu_vcache_flush_page, ft_srmmu_vcache_flush_page,
1387 srmmu_vcache_flush_segment, ft_srmmu_vcache_flush_segment, 1351 srmmu_vcache_flush_segment, ft_srmmu_vcache_flush_segment,
1388 srmmu_vcache_flush_region, ft_srmmu_vcache_flush_region, 1352 srmmu_vcache_flush_region, ft_srmmu_vcache_flush_region,
1389 srmmu_vcache_flush_context, ft_srmmu_vcache_flush_context, 1353 srmmu_vcache_flush_context, ft_srmmu_vcache_flush_context,
1390 srmmu_vcache_flush_range, ft_srmmu_vcache_flush_range, 1354 srmmu_vcache_flush_range, ft_srmmu_vcache_flush_range,
1391 noop_pcache_flush_page, 1355 noop_pcache_flush_page,
1392 hypersparc_pure_vcache_flush, 1356 hypersparc_pure_vcache_flush,
1393 hypersparc_cache_flush_all, 1357 hypersparc_cache_flush_all,
1394 hypersparc_memerr, 1358 hypersparc_memerr,
1395 pmap_zero_page4m, 1359 pmap_zero_page4m,
1396 pmap_copy_page4m 1360 pmap_copy_page4m
1397}; 1361};
1398 1362
1399void 1363void
1400cpumatch_hypersparc(struct cpu_info *sc, struct module_info *mp, int node) 1364cpumatch_hypersparc(struct cpu_info *sc, struct module_info *mp, int node)
1401{ 1365{
1402 1366
1403 sc->cpu_type = CPUTYP_HS_MBUS;/*XXX*/ 1367 sc->cpu_type = CPUTYP_HS_MBUS;/*XXX*/
1404 1368
1405 if (node == 0) { 1369 if (node == 0) {
1406 /* Flush I-cache */ 1370 /* Flush I-cache */
1407 sta(0, ASI_HICACHECLR, 0); 1371 sta(0, ASI_HICACHECLR, 0);
1408 1372
1409 /* Disable `unimplemented flush' traps during boot-up */ 1373 /* Disable `unimplemented flush' traps during boot-up */
1410 wrasr(rdasr(HYPERSPARC_ASRNUM_ICCR) | HYPERSPARC_ICCR_FTD, 1374 wrasr(rdasr(HYPERSPARC_ASRNUM_ICCR) | HYPERSPARC_ICCR_FTD,
1411 HYPERSPARC_ASRNUM_ICCR); 1375 HYPERSPARC_ASRNUM_ICCR);
1412 } 1376 }
1413} 1377}
1414 1378
1415void 1379void
1416hypersparc_mmu_enable(void) 1380hypersparc_mmu_enable(void)
1417{ 1381{
1418#if 0 1382#if 0
1419 int pcr; 1383 int pcr;
1420 1384
1421 pcr = lda(SRMMU_PCR, ASI_SRMMU); 1385 pcr = lda(SRMMU_PCR, ASI_SRMMU);
1422 pcr |= HYPERSPARC_PCR_C; 1386 pcr |= HYPERSPARC_PCR_C;
1423 pcr &= ~HYPERSPARC_PCR_CE; 1387 pcr &= ~HYPERSPARC_PCR_CE;
1424 1388
1425 sta(SRMMU_PCR, ASI_SRMMU, pcr); 1389 sta(SRMMU_PCR, ASI_SRMMU, pcr);
1426#endif 1390#endif
1427} 1391}
1428 1392
1429int 1393int
1430hypersparc_getmid(void) 1394hypersparc_getmid(void)
1431{ 1395{
1432 u_int pcr = lda(SRMMU_PCR, ASI_SRMMU); 1396 u_int pcr = lda(SRMMU_PCR, ASI_SRMMU);
1433 return ((pcr & HYPERSPARC_PCR_MID) >> 15); 1397 return ((pcr & HYPERSPARC_PCR_MID) >> 15);
1434} 1398}
1435 1399
1436 1400
1437/* Cypress 605 */ 1401/* Cypress 605 */
1438struct module_info module_cypress = { 1402struct module_info module_cypress = {
1439 CPUTYP_CYPRESS, 1403 CPUTYP_CYPRESS,
1440 VAC_WRITEBACK, 1404 VAC_WRITEBACK,
1441 0, 1405 0,
1442 getcacheinfo_obp, 1406 getcacheinfo_obp,
1443 0, 1407 0,
1444 0, 1408 0,
1445 cypress_cache_enable, 1409 cypress_cache_enable,
1446 cypress_getmid, 1410 cypress_getmid,
1447 4096, 1411 4096,
1448 cypress_get_syncflt, 1412 cypress_get_syncflt,
1449 cypress_get_asyncflt, 1413 cypress_get_asyncflt,
1450 srmmu_cache_flush, 1414 srmmu_cache_flush,
1451 srmmu_vcache_flush_page, ft_srmmu_vcache_flush_page, 1415 srmmu_vcache_flush_page, ft_srmmu_vcache_flush_page,
1452 srmmu_vcache_flush_segment, ft_srmmu_vcache_flush_segment, 1416 srmmu_vcache_flush_segment, ft_srmmu_vcache_flush_segment,
1453 srmmu_vcache_flush_region, ft_srmmu_vcache_flush_region, 1417 srmmu_vcache_flush_region, ft_srmmu_vcache_flush_region,
1454 srmmu_vcache_flush_context, ft_srmmu_vcache_flush_context, 1418 srmmu_vcache_flush_context, ft_srmmu_vcache_flush_context,
1455 srmmu_vcache_flush_range, ft_srmmu_vcache_flush_range, 1419 srmmu_vcache_flush_range, ft_srmmu_vcache_flush_range,
1456 noop_pcache_flush_page, 1420 noop_pcache_flush_page,
1457 noop_pure_vcache_flush, 1421 noop_pure_vcache_flush,
1458 cypress_cache_flush_all, 1422 cypress_cache_flush_all,
1459 memerr4m, 1423 memerr4m,
1460 pmap_zero_page4m, 1424 pmap_zero_page4m,
1461 pmap_copy_page4m 1425 pmap_copy_page4m
1462}; 1426};
1463 1427
1464 1428
1465/* Fujitsu Turbosparc */ 1429/* Fujitsu Turbosparc */
1466struct module_info module_turbosparc = { 1430struct module_info module_turbosparc = {
1467 CPUTYP_MS2, 1431 CPUTYP_MS2,
1468 VAC_WRITEBACK, 1432 VAC_WRITEBACK,
1469 cpumatch_turbosparc, 1433 cpumatch_turbosparc,
1470 getcacheinfo_obp, 1434 getcacheinfo_obp,
1471 turbosparc_hotfix, 1435 turbosparc_hotfix,
1472 0, 1436 0,
1473 turbosparc_cache_enable, 1437 turbosparc_cache_enable,
1474 0, 1438 0,
1475 256, 1439 256,
1476 turbosparc_get_syncflt, 1440 turbosparc_get_syncflt,
1477 no_asyncflt_regs, 1441 no_asyncflt_regs,
1478 srmmu_cache_flush, 1442 srmmu_cache_flush,
1479 srmmu_vcache_flush_page, NULL, 1443 srmmu_vcache_flush_page, NULL,
1480 srmmu_vcache_flush_segment, NULL, 1444 srmmu_vcache_flush_segment, NULL,
1481 srmmu_vcache_flush_region, NULL, 1445 srmmu_vcache_flush_region, NULL,
1482 srmmu_vcache_flush_context, NULL, 1446 srmmu_vcache_flush_context, NULL,
1483 srmmu_vcache_flush_range, NULL, 1447 srmmu_vcache_flush_range, NULL,
1484 noop_pcache_flush_page, 1448 noop_pcache_flush_page,
1485 noop_pure_vcache_flush, 1449 noop_pure_vcache_flush,
1486 srmmu_cache_flush_all, 1450 srmmu_cache_flush_all,
1487 memerr4m, 1451 memerr4m,
1488 pmap_zero_page4m, 1452 pmap_zero_page4m,
1489 pmap_copy_page4m 1453 pmap_copy_page4m
1490}; 1454};
1491 1455
1492void 1456void
1493cpumatch_turbosparc(struct cpu_info *sc, struct module_info *mp, int node) 1457cpumatch_turbosparc(struct cpu_info *sc, struct module_info *mp, int node)
1494{ 1458{
1495 int i; 1459 int i;
1496 1460
1497 if (node == 0 || sc->master == 0) 1461 if (node == 0 || sc->master == 0)
1498 return; 1462 return;
1499 1463
1500 i = getpsr(); 1464 i = getpsr();
1501 if (sc->cpu_vers == IU_VERS(i)) 1465 if (sc->cpu_vers == IU_VERS(i))
1502 return; 1466 return;
1503 1467
1504 /* 1468 /*
1505 * A cloaked Turbosparc: clear any items in cpuinfo that 1469 * A cloaked Turbosparc: clear any items in cpuinfo that
1506 * might have been set to uS2 versions during bootstrap. 1470 * might have been set to uS2 versions during bootstrap.
1507 */ 1471 */
1508 sc->cpu_name = 0; 1472 sc->cpu_name = 0;
1509 sc->mmu_ncontext = 0; 1473 sc->mmu_ncontext = 0;
1510 sc->cpu_type = 0; 1474 sc->cpu_type = 0;
1511 sc->cacheinfo.c_vactype = 0; 1475 sc->cacheinfo.c_vactype = 0;
1512 sc->hotfix = 0; 1476 sc->hotfix = 0;
1513 sc->mmu_enable = 0; 1477 sc->mmu_enable = 0;
1514 sc->cache_enable = 0; 1478 sc->cache_enable = 0;
1515 sc->get_syncflt = 0; 1479 sc->get_syncflt = 0;
1516 sc->cache_flush = 0; 1480 sc->cache_flush = 0;
1517 sc->sp_vcache_flush_page = 0; 1481 sc->sp_vcache_flush_page = 0;
1518 sc->sp_vcache_flush_segment = 0; 1482 sc->sp_vcache_flush_segment = 0;
1519 sc->sp_vcache_flush_region = 0; 1483 sc->sp_vcache_flush_region = 0;
1520 sc->sp_vcache_flush_context = 0; 1484 sc->sp_vcache_flush_context = 0;
1521 sc->pcache_flush_page = 0; 1485 sc->pcache_flush_page = 0;
1522} 1486}
1523 1487
1524void 1488void
1525turbosparc_hotfix(struct cpu_info *sc) 1489turbosparc_hotfix(struct cpu_info *sc)
1526{ 1490{
1527 int pcf; 1491 int pcf;
1528 1492
1529 pcf = lda(SRMMU_PCFG, ASI_SRMMU); 1493 pcf = lda(SRMMU_PCFG, ASI_SRMMU);
1530 if (pcf & TURBOSPARC_PCFG_US2) { 1494 if (pcf & TURBOSPARC_PCFG_US2) {
1531 /* Turn off uS2 emulation bit */ 1495 /* Turn off uS2 emulation bit */
1532 pcf &= ~TURBOSPARC_PCFG_US2; 1496 pcf &= ~TURBOSPARC_PCFG_US2;
1533 sta(SRMMU_PCFG, ASI_SRMMU, pcf); 1497 sta(SRMMU_PCFG, ASI_SRMMU, pcf);
1534 } 1498 }
1535} 1499}
1536#endif /* SUN4M */ 1500#endif /* SUN4M */
1537 1501
1538#if defined(SUN4M) 1502#if defined(SUN4M)
1539struct module_info module_viking = { 1503struct module_info module_viking = {
1540 CPUTYP_UNKNOWN, /* set in cpumatch() */ 1504 CPUTYP_UNKNOWN, /* set in cpumatch() */
1541 VAC_NONE, 1505 VAC_NONE,
1542 cpumatch_viking, 1506 cpumatch_viking,
1543 getcacheinfo_obp, 1507 getcacheinfo_obp,
1544 viking_hotfix, 1508 viking_hotfix,
1545 viking_mmu_enable, 1509 viking_mmu_enable,
1546 viking_cache_enable, 1510 viking_cache_enable,
1547 viking_getmid, 1511 viking_getmid,
1548 4096, 1512 4096,
1549 viking_get_syncflt, 1513 viking_get_syncflt,
1550 no_asyncflt_regs, 1514 no_asyncflt_regs,
1551 /* supersparcs use cached DVMA, no need to flush */ 1515 /* supersparcs use cached DVMA, no need to flush */
1552 noop_cache_flush, 1516 noop_cache_flush,
1553 noop_vcache_flush_page, NULL, 1517 noop_vcache_flush_page, NULL,
1554 noop_vcache_flush_segment, NULL, 1518 noop_vcache_flush_segment, NULL,
1555 noop_vcache_flush_region, NULL, 1519 noop_vcache_flush_region, NULL,
1556 noop_vcache_flush_context, NULL, 1520 noop_vcache_flush_context, NULL,
1557 noop_vcache_flush_range, NULL, 1521 noop_vcache_flush_range, NULL,
1558 viking_pcache_flush_page, 1522 viking_pcache_flush_page,
1559 noop_pure_vcache_flush, 1523 noop_pure_vcache_flush,
1560 noop_cache_flush_all, 1524 noop_cache_flush_all,
1561 viking_memerr, 1525 viking_memerr,
1562 pmap_zero_page4m, 1526 pmap_zero_page4m,
1563 pmap_copy_page4m 1527 pmap_copy_page4m
1564}; 1528};
1565#endif /* SUN4M */ 1529#endif /* SUN4M */
1566 1530
1567#if defined(SUN4M) || defined(SUN4D) 1531#if defined(SUN4M) || defined(SUN4D)
1568void 1532void
1569cpumatch_viking(struct cpu_info *sc, struct module_info *mp, int node) 1533cpumatch_viking(struct cpu_info *sc, struct module_info *mp, int node)
1570{ 1534{
1571 1535
1572 if (node == 0) 1536 if (node == 0)
1573 viking_hotfix(sc); 1537 viking_hotfix(sc);
1574} 1538}
1575 1539
1576void 1540void
1577viking_hotfix(struct cpu_info *sc) 1541viking_hotfix(struct cpu_info *sc)
1578{ 1542{
1579static int mxcc = -1; 1543static int mxcc = -1;
1580 int pcr = lda(SRMMU_PCR, ASI_SRMMU); 1544 int pcr = lda(SRMMU_PCR, ASI_SRMMU);
1581 1545
1582 /* Test if we're directly on the MBus */ 1546 /* Test if we're directly on the MBus */
1583 if ((pcr & VIKING_PCR_MB) == 0) { 1547 if ((pcr & VIKING_PCR_MB) == 0) {
1584 sc->mxcc = 1; 1548 sc->mxcc = 1;
1585 sc->flags |= CPUFLG_CACHE_MANDATORY; 1549 sc->flags |= CPUFLG_CACHE_MANDATORY;
1586 sc->zero_page = pmap_zero_page_viking_mxcc; 1550 sc->zero_page = pmap_zero_page_viking_mxcc;
1587 sc->copy_page = pmap_copy_page_viking_mxcc; 1551 sc->copy_page = pmap_copy_page_viking_mxcc;
1588 moduleerr_handler = viking_module_error; 1552 moduleerr_handler = viking_module_error;
1589 1553
1590 /* 1554 /*
1591 * Ok to cache PTEs; set the flag here, so we don't 1555 * Ok to cache PTEs; set the flag here, so we don't
1592 * uncache in pmap_bootstrap(). 1556 * uncache in pmap_bootstrap().
1593 */ 1557 */
1594 if ((pcr & VIKING_PCR_TC) == 0) 1558 if ((pcr & VIKING_PCR_TC) == 0)
1595 printf("[viking: PCR_TC is off]"); 1559 printf("[viking: PCR_TC is off]");
1596 else 1560 else
1597 sc->flags |= CPUFLG_CACHEPAGETABLES; 1561 sc->flags |= CPUFLG_CACHEPAGETABLES;
1598 } else { 1562 } else {
1599#ifdef MULTIPROCESSOR 1563#ifdef MULTIPROCESSOR
1600 if ((sparc_ncpus > 1) && (sc->cacheinfo.ec_totalsize == 0)) 1564 if ((sparc_ncpus > 1) && (sc->cacheinfo.ec_totalsize == 0))
1601 sc->cache_flush = srmmu_cache_flush; 1565 sc->cache_flush = srmmu_cache_flush;
1602#endif 1566#endif
1603 } 1567 }
1604 /* Check all modules have the same MXCC configuration */ 1568 /* Check all modules have the same MXCC configuration */
1605 if (mxcc != -1 && sc->mxcc != mxcc) 1569 if (mxcc != -1 && sc->mxcc != mxcc)
1606 panic("MXCC module mismatch"); 1570 panic("MXCC module mismatch");
1607 1571
1608 mxcc = sc->mxcc; 1572 mxcc = sc->mxcc;
1609 1573
1610 /* XXX! */ 1574 /* XXX! */
1611 if (sc->mxcc) 1575 if (sc->mxcc)
1612 sc->cpu_type = CPUTYP_SS1_MBUS_MXCC; 1576 sc->cpu_type = CPUTYP_SS1_MBUS_MXCC;
1613 else 1577 else
1614 sc->cpu_type = CPUTYP_SS1_MBUS_NOMXCC; 1578 sc->cpu_type = CPUTYP_SS1_MBUS_NOMXCC;
1615} 1579}
1616 1580
1617void 1581void
1618viking_mmu_enable(void) 1582viking_mmu_enable(void)
1619{ 1583{
1620 int pcr; 1584 int pcr;
1621 1585
1622 pcr = lda(SRMMU_PCR, ASI_SRMMU); 1586 pcr = lda(SRMMU_PCR, ASI_SRMMU);
1623 1587
1624 if (cpuinfo.mxcc) { 1588 if (cpuinfo.mxcc) {
1625 if ((pcr & VIKING_PCR_TC) == 0) { 1589 if ((pcr & VIKING_PCR_TC) == 0) {
1626 printf("[viking: turn on PCR_TC]"); 1590 printf("[viking: turn on PCR_TC]");
1627 } 1591 }
1628 pcr |= VIKING_PCR_TC; 1592 pcr |= VIKING_PCR_TC;
1629 cpuinfo.flags |= CPUFLG_CACHEPAGETABLES; 1593 cpuinfo.flags |= CPUFLG_CACHEPAGETABLES;
1630 } else 1594 } else
1631 pcr &= ~VIKING_PCR_TC; 1595 pcr &= ~VIKING_PCR_TC;
1632 sta(SRMMU_PCR, ASI_SRMMU, pcr); 1596 sta(SRMMU_PCR, ASI_SRMMU, pcr);
1633} 1597}
1634 1598
1635int 1599int
1636viking_getmid(void) 1600viking_getmid(void)
1637{ 1601{
1638 1602
1639 if (cpuinfo.mxcc) { 1603 if (cpuinfo.mxcc) {
1640 u_int v = ldda(MXCC_MBUSPORT, ASI_CONTROL) & 0xffffffff; 1604 u_int v = ldda(MXCC_MBUSPORT, ASI_CONTROL) & 0xffffffff;
1641 return ((v >> 24) & 0xf); 1605 return ((v >> 24) & 0xf);
1642 } 1606 }
1643 return (0); 1607 return (0);
1644} 1608}
1645 1609
1646int 1610int
1647viking_module_error(void) 1611viking_module_error(void)
1648{ 1612{
1649 uint64_t v; 1613 uint64_t v;
1650 int n = 0, fatal = 0; 1614 int n = 0, fatal = 0;
1651 struct cpu_info *cpi; 1615 struct cpu_info *cpi;
1652 1616
1653 /* Report on MXCC error registers in each module */ 1617 /* Report on MXCC error registers in each module */
1654 for (CPU_INFO_FOREACH(n, cpi)) { 1618 for (CPU_INFO_FOREACH(n, cpi)) {
1655 if (cpi->ci_mxccregs == 0) { 1619 if (cpi->ci_mxccregs == 0) {
1656 printf("\tMXCC registers not mapped\n"); 1620 printf("\tMXCC registers not mapped\n");
1657 continue; 1621 continue;
1658 } 1622 }
1659 1623
1660 printf("module%d:\n", cpi->ci_cpuid); 1624 printf("module%d:\n", cpi->ci_cpuid);
1661 v = *((uint64_t *)(cpi->ci_mxccregs + 0xe00)); 1625 v = *((uint64_t *)(cpi->ci_mxccregs + 0xe00));
1662 printf("\tmxcc error 0x%llx\n", v); 1626 printf("\tmxcc error 0x%llx\n", v);
1663 v = *((uint64_t *)(cpi->ci_mxccregs + 0xb00)); 1627 v = *((uint64_t *)(cpi->ci_mxccregs + 0xb00));
1664 printf("\tmxcc status 0x%llx\n", v); 1628 printf("\tmxcc status 0x%llx\n", v);
1665 v = *((uint64_t *)(cpi->ci_mxccregs + 0xc00)); 1629 v = *((uint64_t *)(cpi->ci_mxccregs + 0xc00));
1666 printf("\tmxcc reset 0x%llx", v); 1630 printf("\tmxcc reset 0x%llx", v);
1667 if (v & MXCC_MRST_WD) 1631 if (v & MXCC_MRST_WD)
1668 printf(" (WATCHDOG RESET)"), fatal = 1; 1632 printf(" (WATCHDOG RESET)"), fatal = 1;
1669 if (v & MXCC_MRST_SI) 1633 if (v & MXCC_MRST_SI)
1670 printf(" (SOFTWARE RESET)"), fatal = 1; 1634 printf(" (SOFTWARE RESET)"), fatal = 1;
1671 printf("\n"); 1635 printf("\n");
1672 } 1636 }
1673 return (fatal); 1637 return (fatal);
1674} 1638}
1675#endif /* SUN4M || SUN4D */ 1639#endif /* SUN4M || SUN4D */
1676 1640
1677#if defined(SUN4D) 1641#if defined(SUN4D)
1678void 1642void
1679getcacheinfo_sun4d(struct cpu_info *sc, int node) 1643getcacheinfo_sun4d(struct cpu_info *sc, int node)
1680{ 1644{
1681 struct cacheinfo *ci = &sc->cacheinfo; 1645 struct cacheinfo *ci = &sc->cacheinfo;
1682 int i, l; 1646 int i, l;
1683 1647
1684 if (node == 0) 1648 if (node == 0)
1685 /* Bootstrapping */ 1649 /* Bootstrapping */
1686 return; 1650 return;
1687 1651
1688 /* 1652 /*
1689 * The Sun4d always has TI TMS390Z55 Viking CPUs; we hard-code 1653 * The Sun4d always has TI TMS390Z55 Viking CPUs; we hard-code
1690 * much of the cache information here. 1654 * much of the cache information here.
1691 */ 1655 */
1692 1656
1693 ci->c_physical = 1; 1657 ci->c_physical = 1;
1694 ci->c_split = 1; 1658 ci->c_split = 1;
1695 1659
1696 /* hwflush is used only by sun4/4c code */ 1660 /* hwflush is used only by sun4/4c code */
1697 ci->c_hwflush = 0; 1661 ci->c_hwflush = 0;
1698 1662
1699 ci->ic_nlines = 0x00000040; 1663 ci->ic_nlines = 0x00000040;
1700 ci->ic_linesize = 0x00000040; 1664 ci->ic_linesize = 0x00000040;
1701 ci->ic_l2linesize = 6; 1665 ci->ic_l2linesize = 6;
1702 ci->ic_associativity = 0x00000005; 1666 ci->ic_associativity = 0x00000005;
1703 ci->ic_totalsize = ci->ic_linesize * ci->ic_nlines * 1667 ci->ic_totalsize = ci->ic_linesize * ci->ic_nlines *
1704 ci->ic_associativity; 1668 ci->ic_associativity;
1705 1669
1706 ci->dc_nlines = 0x00000080; 1670 ci->dc_nlines = 0x00000080;
1707 ci->dc_linesize = 0x00000020; 1671 ci->dc_linesize = 0x00000020;
1708 ci->dc_l2linesize = 5; 1672 ci->dc_l2linesize = 5;
1709 ci->dc_associativity = 0x00000004; 1673 ci->dc_associativity = 0x00000004;
1710 ci->dc_totalsize = ci->dc_linesize * ci->dc_nlines * 1674 ci->dc_totalsize = ci->dc_linesize * ci->dc_nlines *
1711 ci->dc_associativity; 1675 ci->dc_associativity;
1712 1676
1713 ci->c_l2linesize = min(ci->ic_l2linesize, ci->dc_l2linesize); 1677 ci->c_l2linesize = min(ci->ic_l2linesize, ci->dc_l2linesize);
1714 ci->c_linesize = min(ci->ic_linesize, ci->dc_linesize); 1678 ci->c_linesize = min(ci->ic_linesize, ci->dc_linesize);
1715 ci->c_totalsize = max(ci->ic_totalsize, ci->dc_totalsize); 1679 ci->c_totalsize = max(ci->ic_totalsize, ci->dc_totalsize);
1716 ci->c_nlines = ci->c_totalsize >> ci->c_l2linesize; 1680 ci->c_nlines = ci->c_totalsize >> ci->c_l2linesize;
1717 1681
1718 if (node_has_property(node, "ecache-nlines")) { 1682 if (node_has_property(node, "ecache-nlines")) {
1719 /* we have a L2 "e"xternal cache */ 1683 /* we have a L2 "e"xternal cache */
1720 ci->ec_nlines = prom_getpropint(node, "ecache-nlines", 32768); 1684 ci->ec_nlines = prom_getpropint(node, "ecache-nlines", 32768);
1721 ci->ec_linesize = l = prom_getpropint(node, "ecache-line-size", 0); 1685 ci->ec_linesize = l = prom_getpropint(node, "ecache-line-size", 0);
1722 for (i = 0; (1 << i) < l && l; i++) 1686 for (i = 0; (1 << i) < l && l; i++)
1723 /* void */; 1687 /* void */;
1724 if ((1 << i) != l && l) 1688 if ((1 << i) != l && l)
1725 panic("bad ecache line size %d", l); 1689 panic("bad ecache line size %d", l);
1726 ci->ec_l2linesize = i; 1690 ci->ec_l2linesize = i;
1727 ci->ec_associativity = 1691 ci->ec_associativity =
1728 prom_getpropint(node, "ecache-associativity", 1); 1692 prom_getpropint(node, "ecache-associativity", 1);
1729 ci->ec_totalsize = l * ci->ec_nlines * ci->ec_associativity; 1693 ci->ec_totalsize = l * ci->ec_nlines * ci->ec_associativity;
1730 } 1694 }
1731} 1695}
1732 1696
1733struct module_info module_viking_sun4d = { 1697struct module_info module_viking_sun4d = {
1734 CPUTYP_UNKNOWN, /* set in cpumatch() */ 1698 CPUTYP_UNKNOWN, /* set in cpumatch() */
1735 VAC_NONE, 1699 VAC_NONE,
1736 cpumatch_viking, 1700 cpumatch_viking,
1737 getcacheinfo_sun4d, 1701 getcacheinfo_sun4d,
1738 viking_hotfix, 1702 viking_hotfix,
1739 viking_mmu_enable, 1703 viking_mmu_enable,
1740 viking_cache_enable, 1704 viking_cache_enable,
1741 viking_getmid, 1705 viking_getmid,
1742 4096, 1706 4096,
1743 viking_get_syncflt, 1707 viking_get_syncflt,
1744 no_asyncflt_regs, 1708 no_asyncflt_regs,
1745 /* supersparcs use cached DVMA, no need to flush */ 1709 /* supersparcs use cached DVMA, no need to flush */
1746 noop_cache_flush, 1710 noop_cache_flush,
1747 noop_vcache_flush_page, NULL, 1711 noop_vcache_flush_page, NULL,
1748 noop_vcache_flush_segment, NULL, 1712 noop_vcache_flush_segment, NULL,
1749 noop_vcache_flush_region, NULL, 1713 noop_vcache_flush_region, NULL,
1750 noop_vcache_flush_context, NULL, 1714 noop_vcache_flush_context, NULL,
1751 noop_vcache_flush_range, NULL, 1715 noop_vcache_flush_range, NULL,
1752 viking_pcache_flush_page, 1716 viking_pcache_flush_page,
1753 noop_pure_vcache_flush, 1717 noop_pure_vcache_flush,
1754 noop_cache_flush_all, 1718 noop_cache_flush_all,
1755 viking_memerr, 1719 viking_memerr,
1756 pmap_zero_page4m, 1720 pmap_zero_page4m,
1757 pmap_copy_page4m 1721 pmap_copy_page4m
1758}; 1722};
1759#endif /* SUN4D */ 1723#endif /* SUN4D */
1760 1724
1761#define ANY -1 /* match any version */ 1725#define ANY -1 /* match any version */
1762 1726
1763struct cpu_conf { 1727struct cpu_conf {
1764 int arch; 1728 int arch;
1765 int cpu_impl; 1729 int cpu_impl;
1766 int cpu_vers; 1730 int cpu_vers;
1767 int mmu_impl; 1731 int mmu_impl;
1768 int mmu_vers; 1732 int mmu_vers;
1769 const char *name; 1733 const char *name;
1770 struct module_info *minfo; 1734 struct module_info *minfo;

cvs diff -r1.77 -r1.78 src/sys/arch/sparc/sparc/cpuvar.h (switch to unified diff)

--- src/sys/arch/sparc/sparc/cpuvar.h 2009/05/18 01:36:11 1.77
+++ src/sys/arch/sparc/sparc/cpuvar.h 2009/05/27 02:19:49 1.78
@@ -1,483 +1,481 @@ @@ -1,483 +1,481 @@
1/* $NetBSD: cpuvar.h,v 1.77 2009/05/18 01:36:11 mrg Exp $ */ 1/* $NetBSD: cpuvar.h,v 1.78 2009/05/27 02:19:49 mrg Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1996 The NetBSD Foundation, Inc. 4 * Copyright (c) 1996 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg. 8 * by Paul Kranenburg.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#ifndef _sparc_cpuvar_h 32#ifndef _sparc_cpuvar_h
33#define _sparc_cpuvar_h 33#define _sparc_cpuvar_h
34 34
35#if defined(_KERNEL_OPT) 35#if defined(_KERNEL_OPT)
36#include "opt_multiprocessor.h" 36#include "opt_multiprocessor.h"
37#include "opt_lockdebug.h" 37#include "opt_lockdebug.h"
38#include "opt_ddb.h" 38#include "opt_ddb.h"
39#include "opt_sparc_arch.h" 39#include "opt_sparc_arch.h"
 40#include "opt_modular.h"
40#endif 41#endif
41 42
42#include <sys/device.h> 43#include <sys/device.h>
43#include <sys/lock.h> 44#include <sys/lock.h>
44#include <sys/cpu_data.h> 45#include <sys/cpu_data.h>
45 46
46#include <sparc/include/reg.h> 47#include <sparc/include/reg.h>
47#include <sparc/sparc/cache.h> /* for cacheinfo */ 48#include <sparc/sparc/cache.h> /* for cacheinfo */
48 49
49/* 50/*
50 * CPU/MMU module information. 51 * CPU/MMU module information.
51 * There is one of these for each "mainline" CPU module we support. 52 * There is one of these for each "mainline" CPU module we support.
52 * The information contained in the structure is used only during 53 * The information contained in the structure is used only during
53 * auto-configuration of the CPUs; some fields are copied into the 54 * auto-configuration of the CPUs; some fields are copied into the
54 * per-cpu data structure (cpu_info) for easy access during normal 55 * per-cpu data structure (cpu_info) for easy access during normal
55 * operation. 56 * operation.
56 */ 57 */
57struct cpu_info; 58struct cpu_info;
58struct module_info { 59struct module_info {
59 int cpu_type; 60 int cpu_type;
60 enum vactype vactype; 61 enum vactype vactype;
61 void (*cpu_match)(struct cpu_info *, struct module_info *, int); 62 void (*cpu_match)(struct cpu_info *, struct module_info *, int);
62 void (*getcacheinfo)(struct cpu_info *sc, int node); 63 void (*getcacheinfo)(struct cpu_info *sc, int node);
63 void (*hotfix)(struct cpu_info *); 64 void (*hotfix)(struct cpu_info *);
64 void (*mmu_enable)(void); 65 void (*mmu_enable)(void);
65 void (*cache_enable)(void); 66 void (*cache_enable)(void);
66 int (*getmid)(void); /* Get MID of current CPU */ 67 int (*getmid)(void); /* Get MID of current CPU */
67 int ncontext; /* max. # of contexts (that we use) */ 68 int ncontext; /* max. # of contexts (that we use) */
68 69
69 void (*get_syncflt)(void); 70 void (*get_syncflt)(void);
70 int (*get_asyncflt)(u_int *, u_int *); 71 int (*get_asyncflt)(u_int *, u_int *);
71 void (*cache_flush)(void *, u_int); 72 void (*cache_flush)(void *, u_int);
72 void (*sp_vcache_flush_page)(int, int); 73 void (*sp_vcache_flush_page)(int, int);
73 void (*ft_vcache_flush_page)(int, int); 74 void (*ft_vcache_flush_page)(int, int);
74 void (*sp_vcache_flush_segment)(int, int, int); 75 void (*sp_vcache_flush_segment)(int, int, int);
75 void (*ft_vcache_flush_segment)(int, int, int); 76 void (*ft_vcache_flush_segment)(int, int, int);
76 void (*sp_vcache_flush_region)(int, int); 77 void (*sp_vcache_flush_region)(int, int);
77 void (*ft_vcache_flush_region)(int, int); 78 void (*ft_vcache_flush_region)(int, int);
78 void (*sp_vcache_flush_context)(int); 79 void (*sp_vcache_flush_context)(int);
79 void (*ft_vcache_flush_context)(int); 80 void (*ft_vcache_flush_context)(int);
80 void (*sp_vcache_flush_range)(int, int, int); 81 void (*sp_vcache_flush_range)(int, int, int);
81 void (*ft_vcache_flush_range)(int, int, int); 82 void (*ft_vcache_flush_range)(int, int, int);
82 void (*pcache_flush_page)(paddr_t, int); 83 void (*pcache_flush_page)(paddr_t, int);
83 void (*pure_vcache_flush)(void); 84 void (*pure_vcache_flush)(void);
84 void (*cache_flush_all)(void); 85 void (*cache_flush_all)(void);
85 void (*memerr)(unsigned, u_int, u_int, struct trapframe *); 86 void (*memerr)(unsigned, u_int, u_int, struct trapframe *);
86 void (*zero_page)(paddr_t); 87 void (*zero_page)(paddr_t);
87 void (*copy_page)(paddr_t, paddr_t); 88 void (*copy_page)(paddr_t, paddr_t);
88}; 89};
89 90
90/* 91/*
91 * Message structure for Inter Processor Communication in MP systems 92 * Message structure for Inter Processor Communication in MP systems
92 */ 93 */
93struct xpmsg { 94struct xpmsg {
94 volatile int tag; 95 volatile int tag;
95#define XPMSG15_PAUSECPU 1 96#define XPMSG15_PAUSECPU 1
96#define XPMSG_FUNC 4 97#define XPMSG_FUNC 4
97#define XPMSG_FTRP 5 98#define XPMSG_FTRP 5
98 99
99 volatile union { 100 volatile union {
100 /* 101 /*
101 * Cross call: ask to run (*func)(arg0,arg1,arg2) 102 * Cross call: ask to run (*func)(arg0,arg1,arg2)
102 * or (*trap)(arg0,arg1,arg2). `trap' should be the 103 * or (*trap)(arg0,arg1,arg2). `trap' should be the
103 * address of a `fast trap' handler that executes in 104 * address of a `fast trap' handler that executes in
104 * the trap window (see locore.s). 105 * the trap window (see locore.s).
105 */ 106 */
106 struct xpmsg_func { 107 struct xpmsg_func {
107 int (*func)(int, int, int); 108 void (*func)(int, int, int);
108 void (*trap)(int, int, int); 109 void (*trap)(int, int, int);
109 int arg0; 110 int arg0;
110 int arg1; 111 int arg1;
111 int arg2; 112 int arg2;
112 int retval; 
113 } xpmsg_func; 113 } xpmsg_func;
114 } u; 114 } u;
115 volatile int received; 115 volatile int received;
116 volatile int complete; 116 volatile int complete;
117}; 117};
118 118
119/* 119/*
120 * This must be locked around all message transactions to ensure only 
121 * one CPU is generating them. 
122 */ 
123extern struct simplelock xpmsg_lock; 
124 
125#define LOCK_XPMSG() simple_lock(&xpmsg_lock); 
126#define UNLOCK_XPMSG() simple_unlock(&xpmsg_lock); 
127 
128/* 
129 * The cpuinfo structure. This structure maintains information about one 120 * The cpuinfo structure. This structure maintains information about one
130 * currently installed CPU (there may be several of these if the machine 121 * currently installed CPU (there may be several of these if the machine
131 * supports multiple CPUs, as on some Sun4m architectures). The information 122 * supports multiple CPUs, as on some Sun4m architectures). The information
132 * in this structure supersedes the old "cpumod", "mmumod", and similar 123 * in this structure supersedes the old "cpumod", "mmumod", and similar
133 * fields. 124 * fields.
134 */ 125 */
135 126
136struct cpu_info { 127struct cpu_info {
137 struct cpu_data ci_data; /* MI per-cpu data */ 128 struct cpu_data ci_data; /* MI per-cpu data */
138 129
139 /* Scheduler flags */ 130 /* Scheduler flags */
140 int ci_want_ast; 131 int ci_want_ast;
141 int ci_want_resched; 132 int ci_want_resched;
142 133
143 /* 134 /*
144 * SPARC cpu_info structures live at two VAs: one global 135 * SPARC cpu_info structures live at two VAs: one global
145 * VA (so each CPU can access any other CPU's cpu_info) 136 * VA (so each CPU can access any other CPU's cpu_info)
146 * and an alias VA CPUINFO_VA which is the same on each 137 * and an alias VA CPUINFO_VA which is the same on each
147 * CPU and maps to that CPU's cpu_info. Since the alias 138 * CPU and maps to that CPU's cpu_info. Since the alias
148 * CPUINFO_VA is how we locate our cpu_info, we have to 139 * CPUINFO_VA is how we locate our cpu_info, we have to
149 * self-reference the global VA so that we can return it 140 * self-reference the global VA so that we can return it
150 * in the curcpu() macro. 141 * in the curcpu() macro.
151 */ 142 */
152 struct cpu_info * volatile ci_self; 143 struct cpu_info * volatile ci_self;
153 144
154 /* Primary Inter-processor message area */ 145 /* Primary Inter-processor message area */
155 struct xpmsg msg; 146 struct xpmsg msg;
156 147
157 int ci_cpuid; /* CPU index (see cpus[] array) */ 148 int ci_cpuid; /* CPU index (see cpus[] array) */
158 149
159 /* Context administration */ 150 /* Context administration */
160 int *ctx_tbl; /* [4m] SRMMU-edible context table */ 151 int *ctx_tbl; /* [4m] SRMMU-edible context table */
161 paddr_t ctx_tbl_pa; /* [4m] ctx table physical address */ 152 paddr_t ctx_tbl_pa; /* [4m] ctx table physical address */
162 153
163 /* Cache information */ 154 /* Cache information */
164 struct cacheinfo cacheinfo; /* see cache.h */ 155 struct cacheinfo cacheinfo; /* see cache.h */
165 156
166 /* various flags to workaround anomalies in chips */ 157 /* various flags to workaround anomalies in chips */
167 volatile int flags; /* see CPUFLG_xxx, below */ 158 volatile int flags; /* see CPUFLG_xxx, below */
168 159
169 /* Per processor counter register (sun4m only) */ 160 /* Per processor counter register (sun4m only) */
170 volatile struct counter_4m *counterreg_4m; 161 volatile struct counter_4m *counterreg_4m;
171 162
172 /* Per processor interrupt mask register (sun4m only) */ 163 /* Per processor interrupt mask register (sun4m only) */
173 volatile struct icr_pi *intreg_4m; 164 volatile struct icr_pi *intreg_4m;
174 /* 165 /*
175 * Send a IPI to (cpi). For Ross cpus we need to read 166 * Send a IPI to (cpi). For Ross cpus we need to read
176 * the pending register to avoid a hardware bug. 167 * the pending register to avoid a hardware bug.
177 */ 168 */
178#define raise_ipi(cpi,lvl) do { \ 169#define raise_ipi(cpi,lvl) do { \
179 volatile int x; \ 170 volatile int x; \
180 (cpi)->intreg_4m->pi_set = PINTR_SINTRLEV(lvl); \ 171 (cpi)->intreg_4m->pi_set = PINTR_SINTRLEV(lvl); \
181 x = (cpi)->intreg_4m->pi_pend; \ 172 x = (cpi)->intreg_4m->pi_pend; \
182} while (0) 173} while (0)
183 174
184 int sun4_mmu3l; /* [4]: 3-level MMU present */ 175 int sun4_mmu3l; /* [4]: 3-level MMU present */
185#if defined(SUN4_MMU3L) 176#if defined(SUN4_MMU3L)
186#define HASSUN4_MMU3L (cpuinfo.sun4_mmu3l) 177#define HASSUN4_MMU3L (cpuinfo.sun4_mmu3l)
187#else 178#else
188#define HASSUN4_MMU3L (0) 179#define HASSUN4_MMU3L (0)
189#endif 180#endif
190 int ci_idepth; /* Interrupt depth */ 181 int ci_idepth; /* Interrupt depth */
191 182
192 /* 183 /*
193 * The following pointers point to processes that are somehow 184 * The following pointers point to processes that are somehow
194 * associated with this CPU--running on it, using its FPU, 185 * associated with this CPU--running on it, using its FPU,
195 * etc. 186 * etc.
196 */ 187 */
197 struct lwp *ci_curlwp; /* CPU owner */ 188 struct lwp *ci_curlwp; /* CPU owner */
198 struct lwp *fplwp; /* FPU owner */ 189 struct lwp *fplwp; /* FPU owner */
199 190
200 int ci_mtx_count; 191 int ci_mtx_count;
201 int ci_mtx_oldspl; 192 int ci_mtx_oldspl;
202 193
203 /* 194 /*
204 * Idle PCB and Interrupt stack; 195 * Idle PCB and Interrupt stack;
205 */ 196 */
206 void *eintstack; /* End of interrupt stack */ 197 void *eintstack; /* End of interrupt stack */
207#define INT_STACK_SIZE (128 * 128) /* 128 128-byte stack frames */ 198#define INT_STACK_SIZE (128 * 128) /* 128 128-byte stack frames */
208 void *redzone; /* DEBUG: stack red zone */ 199 void *redzone; /* DEBUG: stack red zone */
209#define REDSIZE (8*96) /* some room for bouncing */ 200#define REDSIZE (8*96) /* some room for bouncing */
210 201
211 struct pcb *curpcb; /* CPU's PCB & kernel stack */ 202 struct pcb *curpcb; /* CPU's PCB & kernel stack */
212 203
213 /* locore defined: */ 204 /* locore defined: */
214 void (*get_syncflt)(void); /* Not C-callable */ 205 void (*get_syncflt)(void); /* Not C-callable */
215 int (*get_asyncflt)(u_int *, u_int *); 206 int (*get_asyncflt)(u_int *, u_int *);
216 207
217 /* Synchronous Fault Status; temporary storage */ 208 /* Synchronous Fault Status; temporary storage */
218 struct { 209 struct {
219 int sfsr; 210 int sfsr;
220 int sfva; 211 int sfva;
221 } syncfltdump; 212 } syncfltdump;
222 213
223 /* 214 /*
224 * Cache handling functions. 215 * Cache handling functions.
225 * Most cache flush function come in two flavours: one that 216 * Most cache flush function come in two flavours: one that
226 * acts only on the CPU it executes on, and another that 217 * acts only on the CPU it executes on, and another that
227 * uses inter-processor signals to flush the cache on 218 * uses inter-processor signals to flush the cache on
228 * all processor modules. 219 * all processor modules.
229 * The `ft_' versions are fast trap cache flush handlers. 220 * The `ft_' versions are fast trap cache flush handlers.
230 */ 221 */
231 void (*cache_flush)(void *, u_int); 222 void (*cache_flush)(void *, u_int);
232 void (*vcache_flush_page)(int, int); 223 void (*vcache_flush_page)(int, int);
233 void (*sp_vcache_flush_page)(int, int); 224 void (*sp_vcache_flush_page)(int, int);
234 void (*ft_vcache_flush_page)(int, int); 225 void (*ft_vcache_flush_page)(int, int);
235 void (*vcache_flush_segment)(int, int, int); 226 void (*vcache_flush_segment)(int, int, int);
236 void (*sp_vcache_flush_segment)(int, int, int); 227 void (*sp_vcache_flush_segment)(int, int, int);
237 void (*ft_vcache_flush_segment)(int, int, int); 228 void (*ft_vcache_flush_segment)(int, int, int);
238 void (*vcache_flush_region)(int, int); 229 void (*vcache_flush_region)(int, int);
239 void (*sp_vcache_flush_region)(int, int); 230 void (*sp_vcache_flush_region)(int, int);
240 void (*ft_vcache_flush_region)(int, int); 231 void (*ft_vcache_flush_region)(int, int);
241 void (*vcache_flush_context)(int); 232 void (*vcache_flush_context)(int);
242 void (*sp_vcache_flush_context)(int); 233 void (*sp_vcache_flush_context)(int);
243 void (*ft_vcache_flush_context)(int); 234 void (*ft_vcache_flush_context)(int);
244 235
245 /* The are helpers for (*cache_flush)() */ 236 /* The are helpers for (*cache_flush)() */
246 void (*sp_vcache_flush_range)(int, int, int); 237 void (*sp_vcache_flush_range)(int, int, int);
247 void (*ft_vcache_flush_range)(int, int, int); 238 void (*ft_vcache_flush_range)(int, int, int);
248 239
249 void (*pcache_flush_page)(paddr_t, int); 240 void (*pcache_flush_page)(paddr_t, int);
250 void (*pure_vcache_flush)(void); 241 void (*pure_vcache_flush)(void);
251 void (*cache_flush_all)(void); 242 void (*cache_flush_all)(void);
252 243
253 /* Support for hardware-assisted page clear/copy */ 244 /* Support for hardware-assisted page clear/copy */
254 void (*zero_page)(paddr_t); 245 void (*zero_page)(paddr_t);
255 void (*copy_page)(paddr_t, paddr_t); 246 void (*copy_page)(paddr_t, paddr_t);
256 247
257 /* Virtual addresses for use in pmap copy_page/zero_page */ 248 /* Virtual addresses for use in pmap copy_page/zero_page */
258 void * vpage[2]; 249 void * vpage[2];
259 int *vpage_pte[2]; /* pte location of vpage[] */ 250 int *vpage_pte[2]; /* pte location of vpage[] */
260 251
261 void (*cache_enable)(void); 252 void (*cache_enable)(void);
262 253
263 int cpu_type; /* Type: see CPUTYP_xxx below */ 254 int cpu_type; /* Type: see CPUTYP_xxx below */
264 255
265 /* Inter-processor message area (high priority but used infrequently) */ 256 /* Inter-processor message area (high priority but used infrequently) */
266 struct xpmsg msg_lev15; 257 struct xpmsg msg_lev15;
267 258
268 /* CPU information */ 259 /* CPU information */
269 int node; /* PROM node for this CPU */ 260 int node; /* PROM node for this CPU */
270 int mid; /* Module ID for MP systems */ 261 int mid; /* Module ID for MP systems */
271 int mbus; /* 1 if CPU is on MBus */ 262 int mbus; /* 1 if CPU is on MBus */
272 int mxcc; /* 1 if a MBus-level MXCC is present */ 263 int mxcc; /* 1 if a MBus-level MXCC is present */
273 const char *cpu_name; /* CPU model */ 264 const char *cpu_name; /* CPU model */
274 int cpu_impl; /* CPU implementation code */ 265 int cpu_impl; /* CPU implementation code */
275 int cpu_vers; /* CPU version code */ 266 int cpu_vers; /* CPU version code */
276 int mmu_impl; /* MMU implementation code */ 267 int mmu_impl; /* MMU implementation code */
277 int mmu_vers; /* MMU version code */ 268 int mmu_vers; /* MMU version code */
278 int master; /* 1 if this is bootup CPU */ 269 int master; /* 1 if this is bootup CPU */
279 270
280 vaddr_t mailbox; /* VA of CPU's mailbox */ 271 vaddr_t mailbox; /* VA of CPU's mailbox */
281 272
282 int mmu_ncontext; /* Number of contexts supported */ 273 int mmu_ncontext; /* Number of contexts supported */
283 int mmu_nregion; /* Number of regions supported */ 274 int mmu_nregion; /* Number of regions supported */
284 int mmu_nsegment; /* [4/4c] Segments */ 275 int mmu_nsegment; /* [4/4c] Segments */
285 int mmu_npmeg; /* [4/4c] Pmegs */ 276 int mmu_npmeg; /* [4/4c] Pmegs */
286 277
287/* XXX - we currently don't actually use the following */ 278/* XXX - we currently don't actually use the following */
288 int arch; /* Architecture: CPU_SUN4x */ 279 int arch; /* Architecture: CPU_SUN4x */
289 int class; /* Class: SuperSPARC, microSPARC... */ 280 int class; /* Class: SuperSPARC, microSPARC... */
290 int classlvl; /* Iteration in class: 1, 2, etc. */ 281 int classlvl; /* Iteration in class: 1, 2, etc. */
291 int classsublvl; /* stepping in class (version) */ 282 int classsublvl; /* stepping in class (version) */
292 283
293 int hz; /* Clock speed */ 284 int hz; /* Clock speed */
294 285
295 /* FPU information */ 286 /* FPU information */
296 int fpupresent; /* true if FPU is present */ 287 int fpupresent; /* true if FPU is present */
297 int fpuvers; /* FPU revision */ 288 int fpuvers; /* FPU revision */
298 const char *fpu_name; /* FPU model */ 289 const char *fpu_name; /* FPU model */
299 char fpu_namebuf[32];/* Buffer for FPU name, if necessary */ 290 char fpu_namebuf[32];/* Buffer for FPU name, if necessary */
300 291
301 /* XXX */ 292 /* XXX */
302 volatile void *ci_ddb_regs; /* DDB regs */ 293 volatile void *ci_ddb_regs; /* DDB regs */
303 294
304 /* 295 /*
305 * The following are function pointers to do interesting CPU-dependent 296 * The following are function pointers to do interesting CPU-dependent
306 * things without having to do type-tests all the time 297 * things without having to do type-tests all the time
307 */ 298 */
308 299
309 /* bootup things: access to physical memory */ 300 /* bootup things: access to physical memory */
310 u_int (*read_physmem)(u_int addr, int space); 301 u_int (*read_physmem)(u_int addr, int space);
311 void (*write_physmem)(u_int addr, u_int data); 302 void (*write_physmem)(u_int addr, u_int data);
312 void (*cache_tablewalks)(void); 303 void (*cache_tablewalks)(void);
313 void (*mmu_enable)(void); 304 void (*mmu_enable)(void);
314 void (*hotfix)(struct cpu_info *); 305 void (*hotfix)(struct cpu_info *);
315 306
316 307
317#if 0 308#if 0
318 /* hardware-assisted block operation routines */ 309 /* hardware-assisted block operation routines */
319 void (*hwbcopy)(const void *from, void *to, size_t len); 310 void (*hwbcopy)(const void *from, void *to, size_t len);
320 void (*hwbzero)(void *buf, size_t len); 311 void (*hwbzero)(void *buf, size_t len);
321 312
322 /* routine to clear mbus-sbus buffers */ 313 /* routine to clear mbus-sbus buffers */
323 void (*mbusflush)(void); 314 void (*mbusflush)(void);
324#endif 315#endif
325 316
326 /* 317 /*
327 * Memory error handler; parity errors, unhandled NMIs and other 318 * Memory error handler; parity errors, unhandled NMIs and other
328 * unrecoverable faults end up here. 319 * unrecoverable faults end up here.
329 */ 320 */
330 void (*memerr)(unsigned, u_int, u_int, struct trapframe *); 321 void (*memerr)(unsigned, u_int, u_int, struct trapframe *);
331 void (*idlespin)(struct cpu_info *); 322 void (*idlespin)(struct cpu_info *);
332 /* Module Control Registers */ 323 /* Module Control Registers */
333 /*bus_space_handle_t*/ long ci_mbusport; 324 /*bus_space_handle_t*/ long ci_mbusport;
334 /*bus_space_handle_t*/ long ci_mxccregs; 325 /*bus_space_handle_t*/ long ci_mxccregs;
335 326
336 u_int ci_tt; /* Last trap (if tracing) */ 327 u_int ci_tt; /* Last trap (if tracing) */
337}; 328};
338 329
339/* 330/*
340 * CPU architectures 331 * CPU architectures
341 */ 332 */
342#define CPUARCH_UNKNOWN 0 333#define CPUARCH_UNKNOWN 0
343#define CPUARCH_SUN4 1 334#define CPUARCH_SUN4 1
344#define CPUARCH_SUN4C 2 335#define CPUARCH_SUN4C 2
345#define CPUARCH_SUN4M 3 336#define CPUARCH_SUN4M 3
346#define CPUARCH_SUN4D 4 337#define CPUARCH_SUN4D 4
347#define CPUARCH_SUN4U 5 338#define CPUARCH_SUN4U 5
348 339
349/* 340/*
350 * CPU classes 341 * CPU classes
351 */ 342 */
352#define CPUCLS_UNKNOWN 0 343#define CPUCLS_UNKNOWN 0
353 344
354#if defined(SUN4) 345#if defined(SUN4)
355#define CPUCLS_SUN4 1 346#define CPUCLS_SUN4 1
356#endif 347#endif
357 348
358#if defined(SUN4C) 349#if defined(SUN4C)
359#define CPUCLS_SUN4C 5 350#define CPUCLS_SUN4C 5
360#endif 351#endif
361 352
362#if defined(SUN4M) || defined(SUN4D) 353#if defined(SUN4M) || defined(SUN4D)
363#define CPUCLS_MICROSPARC 10 /* MicroSPARC-II */ 354#define CPUCLS_MICROSPARC 10 /* MicroSPARC-II */
364#define CPUCLS_SUPERSPARC 11 /* Generic SuperSPARC */ 355#define CPUCLS_SUPERSPARC 11 /* Generic SuperSPARC */
365#define CPUCLS_HYPERSPARC 12 /* Ross HyperSPARC RT620 */ 356#define CPUCLS_HYPERSPARC 12 /* Ross HyperSPARC RT620 */
366#endif 357#endif
367 358
368/* 359/*
369 * CPU types. Each of these should uniquely identify one platform/type of 360 * CPU types. Each of these should uniquely identify one platform/type of
370 * system, i.e. "MBus-based 75 MHz SuperSPARC-II with ECache" is 361 * system, i.e. "MBus-based 75 MHz SuperSPARC-II with ECache" is
371 * CPUTYP_SS2_MBUS_MXCC. The general form is 362 * CPUTYP_SS2_MBUS_MXCC. The general form is
372 * CPUTYP_proctype_bustype_cachetype_etc_etc 363 * CPUTYP_proctype_bustype_cachetype_etc_etc
373 * 364 *
374 * XXX: This is far from complete/comprehensive 365 * XXX: This is far from complete/comprehensive
375 * XXX: ADD SUN4, SUN4C TYPES 366 * XXX: ADD SUN4, SUN4C TYPES
376 */ 367 */
377#define CPUTYP_UNKNOWN 0 368#define CPUTYP_UNKNOWN 0
378 369
379#define CPUTYP_4_100 1 /* Sun4/100 */ 370#define CPUTYP_4_100 1 /* Sun4/100 */
380#define CPUTYP_4_200 2 /* Sun4/200 */ 371#define CPUTYP_4_200 2 /* Sun4/200 */
381#define CPUTYP_4_300 3 /* Sun4/300 */ 372#define CPUTYP_4_300 3 /* Sun4/300 */
382#define CPUTYP_4_400 4 /* Sun4/400 */ 373#define CPUTYP_4_400 4 /* Sun4/400 */
383 374
384#define CPUTYP_SLC 10 /* SPARCstation SLC */ 375#define CPUTYP_SLC 10 /* SPARCstation SLC */
385#define CPUTYP_ELC 11 /* SPARCstation ELC */ 376#define CPUTYP_ELC 11 /* SPARCstation ELC */
386#define CPUTYP_IPX 12 /* SPARCstation IPX */ 377#define CPUTYP_IPX 12 /* SPARCstation IPX */
387#define CPUTYP_IPC 13 /* SPARCstation IPC */ 378#define CPUTYP_IPC 13 /* SPARCstation IPC */
388#define CPUTYP_1 14 /* SPARCstation 1 */ 379#define CPUTYP_1 14 /* SPARCstation 1 */
389#define CPUTYP_1P 15 /* SPARCstation 1+ */ 380#define CPUTYP_1P 15 /* SPARCstation 1+ */
390#define CPUTYP_2 16 /* SPARCstation 2 */ 381#define CPUTYP_2 16 /* SPARCstation 2 */
391 382
392/* We classify the Sun4m's by feature, not by model (XXX: do same for 4/4c) */ 383/* We classify the Sun4m's by feature, not by model (XXX: do same for 4/4c) */
393#define CPUTYP_SS2_MBUS_MXCC 20 /* SuperSPARC-II, Mbus, MXCC (SS20) */ 384#define CPUTYP_SS2_MBUS_MXCC 20 /* SuperSPARC-II, Mbus, MXCC (SS20) */
394#define CPUTYP_SS1_MBUS_MXCC 21 /* SuperSPARC-I, Mbus, MXCC (SS10) */ 385#define CPUTYP_SS1_MBUS_MXCC 21 /* SuperSPARC-I, Mbus, MXCC (SS10) */
395#define CPUTYP_SS2_MBUS_NOMXCC 22 /* SuperSPARC-II, on MBus w/o MXCC */ 386#define CPUTYP_SS2_MBUS_NOMXCC 22 /* SuperSPARC-II, on MBus w/o MXCC */
396#define CPUTYP_SS1_MBUS_NOMXCC 23 /* SuperSPARC-I, on MBus w/o MXCC */ 387#define CPUTYP_SS1_MBUS_NOMXCC 23 /* SuperSPARC-I, on MBus w/o MXCC */
397#define CPUTYP_MS2 24 /* MicroSPARC-2 */ 388#define CPUTYP_MS2 24 /* MicroSPARC-2 */
398#define CPUTYP_MS1 25 /* MicroSPARC-1 */ 389#define CPUTYP_MS1 25 /* MicroSPARC-1 */
399#define CPUTYP_HS_MBUS 26 /* MBus-based HyperSPARC */ 390#define CPUTYP_HS_MBUS 26 /* MBus-based HyperSPARC */
400#define CPUTYP_CYPRESS 27 /* MBus-based Cypress */ 391#define CPUTYP_CYPRESS 27 /* MBus-based Cypress */
401 392
402/* 393/*
403 * CPU flags 394 * CPU flags
404 */ 395 */
405#define CPUFLG_CACHEPAGETABLES 0x1 /* caching pagetables OK on Sun4m */ 396#define CPUFLG_CACHEPAGETABLES 0x1 /* caching pagetables OK on Sun4m */
406#define CPUFLG_CACHEIOMMUTABLES 0x2 /* caching IOMMU translations OK */ 397#define CPUFLG_CACHEIOMMUTABLES 0x2 /* caching IOMMU translations OK */
407#define CPUFLG_CACHEDVMA 0x4 /* DVMA goes through cache */ 398#define CPUFLG_CACHEDVMA 0x4 /* DVMA goes through cache */
408#define CPUFLG_SUN4CACHEBUG 0x8 /* trap page can't be cached */ 399#define CPUFLG_SUN4CACHEBUG 0x8 /* trap page can't be cached */
409#define CPUFLG_CACHE_MANDATORY 0x10 /* if cache is on, don't use 400#define CPUFLG_CACHE_MANDATORY 0x10 /* if cache is on, don't use
410 uncached access */ 401 uncached access */
411#define CPUFLG_HATCHED 0x1000 /* CPU is alive */ 402#define CPUFLG_HATCHED 0x1000 /* CPU is alive */
412#define CPUFLG_PAUSED 0x2000 /* CPU is paused */ 403#define CPUFLG_PAUSED 0x2000 /* CPU is paused */
413#define CPUFLG_GOTMSG 0x4000 /* CPU got an lev13 IPI */ 404#define CPUFLG_GOTMSG 0x4000 /* CPU got an lev13 IPI */
414#define CPUFLG_READY 0x8000 /* CPU available for IPI */ 405#define CPUFLG_READY 0x8000 /* CPU available for IPI */
415 406
416 407
417#define CPU_INFO_ITERATOR int 408#define CPU_INFO_ITERATOR int
418#ifdef MULTIPROCESSOR 409/*
419#define CPU_INFO_FOREACH(cii, cp) cii = 0; cp = cpus[cii], cii < sparc_ncpus; cii++ 410 * Provide two forms of CPU_INFO_FOREACH. One fast one for non-modular
 411 * non-SMP kernels, and the other for everyone else. Both work in the
 412 * non-SMP case, just involving an extra indirection through cpus[0] for
 413 * the portable version.
 414 */
 415#if defined(MULTIPROCESSOR) || defined(MODULAR) || defined(_MODULE)
 416#define CPU_INFO_FOREACH(cii, cp) cii = 0; (cp = cpus[cii]) && cp->eintstack && cii < sparc_ncpus; cii++
420#else 417#else
421#define CPU_INFO_FOREACH(cii, cp) (void)cii, cp = curcpu(); cp != NULL; cp = NULL 418#define CPU_INFO_FOREACH(cii, cp) (void)cii, cp = curcpu(); cp != NULL; cp = NULL
422#endif 419#endif
423 420
424/* 421/*
425 * Useful macros. 422 * Useful macros.
426 */ 423 */
427#define CPU_NOTREADY(cpi) ((cpi) == NULL || cpuinfo.mid == (cpi)->mid || \ 424#define CPU_NOTREADY(cpi) ((cpi) == NULL || cpuinfo.mid == (cpi)->mid || \
428 ((cpi)->flags & CPUFLG_READY) == 0) 425 ((cpi)->flags & CPUFLG_READY) == 0)
429 426
430/* 427/*
431 * Related function prototypes 428 * Related function prototypes
432 */ 429 */
433void getcpuinfo (struct cpu_info *sc, int node); 430void getcpuinfo (struct cpu_info *sc, int node);
434void mmu_install_tables (struct cpu_info *); 431void mmu_install_tables (struct cpu_info *);
435void pmap_alloc_cpu (struct cpu_info *); 432void pmap_alloc_cpu (struct cpu_info *);
436 433
437#define CPUSET_ALL 0xffffffffU /* xcall to all configured CPUs */ 434#define CPUSET_ALL 0xffffffffU /* xcall to all configured CPUs */
438 435
439#if defined(MULTIPROCESSOR) 436#if defined(MULTIPROCESSOR)
440typedef int (*xcall_func_t)(int, int, int); 437void cpu_init_system(void);
 438typedef void (*xcall_func_t)(int, int, int);
441typedef void (*xcall_trap_t)(int, int, int); 439typedef void (*xcall_trap_t)(int, int, int);
442void xcall(xcall_func_t, xcall_trap_t, int, int, int, u_int); 440void xcall(xcall_func_t, xcall_trap_t, int, int, int, u_int);
443/* Shorthand */ 441/* Shorthand */
444#define XCALL0(f,cpuset) \ 442#define XCALL0(f,cpuset) \
445 xcall((xcall_func_t)f, NULL, 0, 0, 0, cpuset) 443 xcall((xcall_func_t)f, NULL, 0, 0, 0, cpuset)
446#define XCALL1(f,a1,cpuset) \ 444#define XCALL1(f,a1,cpuset) \
447 xcall((xcall_func_t)f, NULL, (int)a1, 0, 0, cpuset) 445 xcall((xcall_func_t)f, NULL, (int)a1, 0, 0, cpuset)
448#define XCALL2(f,a1,a2,cpuset) \ 446#define XCALL2(f,a1,a2,cpuset) \
449 xcall((xcall_func_t)f, NULL, (int)a1, (int)a2, 0, cpuset) 447 xcall((xcall_func_t)f, NULL, (int)a1, (int)a2, 0, cpuset)
450#define XCALL3(f,a1,a2,a3,cpuset) \ 448#define XCALL3(f,a1,a2,a3,cpuset) \
451 xcall((xcall_func_t)f, NULL, (int)a1, (int)a2, (int)a3, cpuset) 449 xcall((xcall_func_t)f, NULL, (int)a1, (int)a2, (int)a3, cpuset)
452 450
453#define FXCALL0(f,tf,cpuset) \ 451#define FXCALL0(f,tf,cpuset) \
454 xcall((xcall_func_t)f, (xcall_trap_t)tf, 0, 0, 0, cpuset) 452 xcall((xcall_func_t)f, (xcall_trap_t)tf, 0, 0, 0, cpuset)
455#define FXCALL1(f,tf,a1,cpuset) \ 453#define FXCALL1(f,tf,a1,cpuset) \
456 xcall((xcall_func_t)f, (xcall_trap_t)tf, (int)a1, 0, 0, cpuset) 454 xcall((xcall_func_t)f, (xcall_trap_t)tf, (int)a1, 0, 0, cpuset)
457#define FXCALL2(f,tf,a1,a2,cpuset) \ 455#define FXCALL2(f,tf,a1,a2,cpuset) \
458 xcall((xcall_func_t)f, (xcall_trap_t)tf, (int)a1, (int)a2, 0, cpuset) 456 xcall((xcall_func_t)f, (xcall_trap_t)tf, (int)a1, (int)a2, 0, cpuset)
459#define FXCALL3(f,tf,a1,a2,a3,cpuset) \ 457#define FXCALL3(f,tf,a1,a2,a3,cpuset) \
460 xcall((xcall_func_t)f, (xcall_trap_t)tf, (int)a1, (int)a2, (int)a3, cpuset) 458 xcall((xcall_func_t)f, (xcall_trap_t)tf, (int)a1, (int)a2, (int)a3, cpuset)
461#else 459#else
462#define XCALL0(f,cpuset) /**/ 460#define XCALL0(f,cpuset) /**/
463#define XCALL1(f,a1,cpuset) /**/ 461#define XCALL1(f,a1,cpuset) /**/
464#define XCALL2(f,a1,a2,cpuset) /**/ 462#define XCALL2(f,a1,a2,cpuset) /**/
465#define XCALL3(f,a1,a2,a3,cpuset) /**/ 463#define XCALL3(f,a1,a2,a3,cpuset) /**/
466#define FXCALL0(f,tf,cpuset) /**/ 464#define FXCALL0(f,tf,cpuset) /**/
467#define FXCALL1(f,tf,a1,cpuset) /**/ 465#define FXCALL1(f,tf,a1,cpuset) /**/
468#define FXCALL2(f,tf,a1,a2,cpuset) /**/ 466#define FXCALL2(f,tf,a1,a2,cpuset) /**/
469#define FXCALL3(f,tf,a1,a2,a3,cpuset) /**/ 467#define FXCALL3(f,tf,a1,a2,a3,cpuset) /**/
470#endif /* MULTIPROCESSOR */ 468#endif /* MULTIPROCESSOR */
471 469
472extern int bootmid; /* Module ID of boot CPU */ 470extern int bootmid; /* Module ID of boot CPU */
473#define CPU_MID2CPUNO(mid) ((mid) != 0 ? (mid) - 8 : 0) 471#define CPU_MID2CPUNO(mid) ((mid) != 0 ? (mid) - 8 : 0)
474 472
475#ifdef MULTIPROCESSOR 
476extern struct cpu_info *cpus[]; 473extern struct cpu_info *cpus[];
 474#ifdef MULTIPROCESSOR
477extern u_int cpu_ready_mask; /* the set of CPUs marked as READY */ 475extern u_int cpu_ready_mask; /* the set of CPUs marked as READY */
478#endif 476#endif
479 477
480#define cpuinfo (*(struct cpu_info *)CPUINFO_VA) 478#define cpuinfo (*(struct cpu_info *)CPUINFO_VA)
481 479
482 480
483#endif /* _sparc_cpuvar_h */ 481#endif /* _sparc_cpuvar_h */

cvs diff -r1.103 -r1.104 src/sys/arch/sparc/sparc/intr.c (switch to unified diff)

--- src/sys/arch/sparc/sparc/intr.c 2009/05/18 00:25:15 1.103
+++ src/sys/arch/sparc/sparc/intr.c 2009/05/27 02:19:50 1.104
@@ -1,796 +1,796 @@ @@ -1,796 +1,796 @@
1/* $NetBSD: intr.c,v 1.103 2009/05/18 00:25:15 mrg Exp $ */ 1/* $NetBSD: intr.c,v 1.104 2009/05/27 02:19:50 mrg Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1992, 1993 4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * 6 *
7 * This software was developed by the Computer Systems Engineering group 7 * This software was developed by the Computer Systems Engineering group
8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 * contributed to Berkeley. 9 * contributed to Berkeley.
10 * 10 *
11 * All advertising materials mentioning features or use of this software 11 * All advertising materials mentioning features or use of this software
12 * must display the following acknowledgement: 12 * must display the following acknowledgement:
13 * This product includes software developed by the University of 13 * This product includes software developed by the University of
14 * California, Lawrence Berkeley Laboratory. 14 * California, Lawrence Berkeley Laboratory.
15 * 15 *
16 * Redistribution and use in source and binary forms, with or without 16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions 17 * modification, are permitted provided that the following conditions
18 * are met: 18 * are met:
19 * 1. Redistributions of source code must retain the above copyright 19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer. 20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright 21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the 22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution. 23 * documentation and/or other materials provided with the distribution.
24 * 3. Neither the name of the University nor the names of its contributors 24 * 3. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software 25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission. 26 * without specific prior written permission.
27 * 27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE. 38 * SUCH DAMAGE.
39 * 39 *
40 * @(#)intr.c 8.3 (Berkeley) 11/11/93 40 * @(#)intr.c 8.3 (Berkeley) 11/11/93
41 */ 41 */
42 42
43#include <sys/cdefs.h> 43#include <sys/cdefs.h>
44__KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.103 2009/05/18 00:25:15 mrg Exp $"); 44__KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.104 2009/05/27 02:19:50 mrg Exp $");
45 45
46#include "opt_multiprocessor.h" 46#include "opt_multiprocessor.h"
47#include "opt_sparc_arch.h" 47#include "opt_sparc_arch.h"
48 48
49#include <sys/param.h> 49#include <sys/param.h>
50#include <sys/systm.h> 50#include <sys/systm.h>
51#include <sys/kernel.h> 51#include <sys/kernel.h>
52#include <sys/malloc.h> 52#include <sys/malloc.h>
53#include <sys/cpu.h> 53#include <sys/cpu.h>
54#include <sys/intr.h> 54#include <sys/intr.h>
55#include <sys/simplelock.h> 55#include <sys/simplelock.h>
56 56
57#include <uvm/uvm_extern.h> 57#include <uvm/uvm_extern.h>
58 58
59#include <dev/cons.h> 59#include <dev/cons.h>
60 60
61#include <machine/ctlreg.h> 61#include <machine/ctlreg.h>
62#include <machine/instr.h> 62#include <machine/instr.h>
63#include <machine/trap.h> 63#include <machine/trap.h>
64#include <machine/promlib.h> 64#include <machine/promlib.h>
65 65
66#include <sparc/sparc/asm.h> 66#include <sparc/sparc/asm.h>
67#include <sparc/sparc/cpuvar.h> 67#include <sparc/sparc/cpuvar.h>
68 68
69#if defined(MULTIPROCESSOR) && defined(DDB) 69#if defined(MULTIPROCESSOR) && defined(DDB)
70#include <machine/db_machdep.h> 70#include <machine/db_machdep.h>
71#endif 71#endif
72 72
73#if defined(MULTIPROCESSOR) 73#if defined(MULTIPROCESSOR)
74void *xcall_cookie; 74void *xcall_cookie;
75 75
76/* Stats */ 76/* Stats */
77struct evcnt lev13_evcnt = EVCNT_INITIALIZER(EVCNT_TYPE_INTR,0,"xcall","std"); 77struct evcnt lev13_evcnt = EVCNT_INITIALIZER(EVCNT_TYPE_INTR,0,"xcall","std");
78struct evcnt lev14_evcnt = EVCNT_INITIALIZER(EVCNT_TYPE_INTR,0,"xcall","fast"); 78struct evcnt lev14_evcnt = EVCNT_INITIALIZER(EVCNT_TYPE_INTR,0,"xcall","fast");
79EVCNT_ATTACH_STATIC(lev13_evcnt); 79EVCNT_ATTACH_STATIC(lev13_evcnt);
80EVCNT_ATTACH_STATIC(lev14_evcnt); 80EVCNT_ATTACH_STATIC(lev14_evcnt);
81#endif 81#endif
82 82
83 83
84void strayintr(struct clockframe *); 84void strayintr(struct clockframe *);
85#ifdef DIAGNOSTIC 85#ifdef DIAGNOSTIC
86void bogusintr(struct clockframe *); 86void bogusintr(struct clockframe *);
87#endif 87#endif
88 88
89/* 89/*
90 * Stray interrupt handler. Clear it if possible. 90 * Stray interrupt handler. Clear it if possible.
91 * If not, and if we get 10 interrupts in 10 seconds, panic. 91 * If not, and if we get 10 interrupts in 10 seconds, panic.
92 * XXXSMP: We are holding the kernel lock at entry & exit. 92 * XXXSMP: We are holding the kernel lock at entry & exit.
93 */ 93 */
94void 94void
95strayintr(struct clockframe *fp) 95strayintr(struct clockframe *fp)
96{ 96{
97 static int straytime, nstray; 97 static int straytime, nstray;
98 char bits[64]; 98 char bits[64];
99 int timesince; 99 int timesince;
100 100
101 snprintb(bits, sizeof(bits), PSR_BITS, fp->psr); 101 snprintb(bits, sizeof(bits), PSR_BITS, fp->psr);
102 printf("stray interrupt cpu%d ipl 0x%x pc=0x%x npc=0x%x psr=%s\n", 102 printf("stray interrupt cpu%d ipl 0x%x pc=0x%x npc=0x%x psr=%s\n",
103 cpu_number(), fp->ipl, fp->pc, fp->npc, bits); 103 cpu_number(), fp->ipl, fp->pc, fp->npc, bits);
104 104
105 timesince = time_uptime - straytime; 105 timesince = time_uptime - straytime;
106 if (timesince <= 10) { 106 if (timesince <= 10) {
107 if (++nstray > 10) 107 if (++nstray > 10)
108 panic("crazy interrupts"); 108 panic("crazy interrupts");
109 } else { 109 } else {
110 straytime = time_uptime; 110 straytime = time_uptime;
111 nstray = 1; 111 nstray = 1;
112 } 112 }
113} 113}
114 114
115 115
116#ifdef DIAGNOSTIC 116#ifdef DIAGNOSTIC
117/* 117/*
118 * Bogus interrupt for which neither hard nor soft interrupt bit in 118 * Bogus interrupt for which neither hard nor soft interrupt bit in
119 * the IPR was set. 119 * the IPR was set.
120 */ 120 */
121void 121void
122bogusintr(struct clockframe *fp) 122bogusintr(struct clockframe *fp)
123{ 123{
124 char bits[64]; 124 char bits[64];
125 125
126 snprintb(bits, sizeof(bits), PSR_BITS, fp->psr); 126 snprintb(bits, sizeof(bits), PSR_BITS, fp->psr);
127 printf("cpu%d: bogus interrupt ipl 0x%x pc=0x%x npc=0x%x psr=%s\n", 127 printf("cpu%d: bogus interrupt ipl 0x%x pc=0x%x npc=0x%x psr=%s\n",
128 cpu_number(), fp->ipl, fp->pc, fp->npc, bits); 128 cpu_number(), fp->ipl, fp->pc, fp->npc, bits);
129} 129}
130#endif /* DIAGNOSTIC */ 130#endif /* DIAGNOSTIC */
131 131
132/* 132/*
133 * Get module ID of interrupt target. 133 * Get module ID of interrupt target.
134 */ 134 */
135u_int 135u_int
136getitr(void) 136getitr(void)
137{ 137{
138#if defined(MULTIPROCESSOR) 138#if defined(MULTIPROCESSOR)
139 u_int v; 139 u_int v;
140 140
141 if (!CPU_ISSUN4M || sparc_ncpus <= 1) 141 if (!CPU_ISSUN4M || sparc_ncpus <= 1)
142 return (0); 142 return (0);
143 143
144 v = *((u_int *)ICR_ITR); 144 v = *((u_int *)ICR_ITR);
145 return (v + 8); 145 return (v + 8);
146#else 146#else
147 return (0); 147 return (0);
148#endif 148#endif
149} 149}
150 150
151/* 151/*
152 * Set interrupt target. 152 * Set interrupt target.
153 * Return previous value. 153 * Return previous value.
154 */ 154 */
155u_int 155u_int
156setitr(u_int mid) 156setitr(u_int mid)
157{ 157{
158#if defined(MULTIPROCESSOR) 158#if defined(MULTIPROCESSOR)
159 u_int v; 159 u_int v;
160 160
161 if (!CPU_ISSUN4M || sparc_ncpus <= 1) 161 if (!CPU_ISSUN4M || sparc_ncpus <= 1)
162 return (0); 162 return (0);
163 163
164 v = *((u_int *)ICR_ITR); 164 v = *((u_int *)ICR_ITR);
165 *((u_int *)ICR_ITR) = CPU_MID2CPUNO(mid); 165 *((u_int *)ICR_ITR) = CPU_MID2CPUNO(mid);
166 return (v + 8); 166 return (v + 8);
167#else 167#else
168 return (0); 168 return (0);
169#endif 169#endif
170} 170}
171 171
172#if (defined(SUN4M) && !defined(MSIIEP)) || defined(SUN4D) 172#if (defined(SUN4M) && !defined(MSIIEP)) || defined(SUN4D)
173void nmi_hard(void); 173void nmi_hard(void);
174void nmi_soft(struct trapframe *); 174void nmi_soft(struct trapframe *);
175 175
176int (*memerr_handler)(void); 176int (*memerr_handler)(void);
177int (*sbuserr_handler)(void); 177int (*sbuserr_handler)(void);
178int (*vmeerr_handler)(void); 178int (*vmeerr_handler)(void);
179int (*moduleerr_handler)(void); 179int (*moduleerr_handler)(void);
180 180
181#if defined(MULTIPROCESSOR) 181#if defined(MULTIPROCESSOR)
182volatile int nmi_hard_wait = 0; 182volatile int nmi_hard_wait = 0;
183struct simplelock nmihard_lock = SIMPLELOCK_INITIALIZER; 183struct simplelock nmihard_lock = SIMPLELOCK_INITIALIZER;
184int drop_into_rom_on_fatal = 1; 184int drop_into_rom_on_fatal = 1;
185#endif 185#endif
186 186
187void 187void
188nmi_hard(void) 188nmi_hard(void)
189{ 189{
190 /* 190 /*
191 * A level 15 hard interrupt. 191 * A level 15 hard interrupt.
192 */ 192 */
193 int fatal = 0; 193 int fatal = 0;
194 uint32_t si; 194 uint32_t si;
195 char bits[64]; 195 char bits[64];
196 u_int afsr, afva; 196 u_int afsr, afva;
197 197
198 afsr = afva = 0; 198 afsr = afva = 0;
199 if ((*cpuinfo.get_asyncflt)(&afsr, &afva) == 0) { 199 if ((*cpuinfo.get_asyncflt)(&afsr, &afva) == 0) {
200 snprintb(bits, sizeof(bits), AFSR_BITS, afsr); 200 snprintb(bits, sizeof(bits), AFSR_BITS, afsr);
201 printf("Async registers (mid %d): afsr=%s; afva=0x%x%x\n", 201 printf("Async registers (mid %d): afsr=%s; afva=0x%x%x\n",
202 cpuinfo.mid, bits, 202 cpuinfo.mid, bits,
203 (afsr & AFSR_AFA) >> AFSR_AFA_RSHIFT, afva); 203 (afsr & AFSR_AFA) >> AFSR_AFA_RSHIFT, afva);
204 } 204 }
205 205
206#if defined(MULTIPROCESSOR) 206#if defined(MULTIPROCESSOR)
207 /* 207 /*
208 * Increase nmi_hard_wait. If we aren't the master, loop while this 208 * Increase nmi_hard_wait. If we aren't the master, loop while this
209 * variable is non-zero. If we are the master, loop while this 209 * variable is non-zero. If we are the master, loop while this
210 * variable is less than the number of cpus. 210 * variable is less than the number of cpus.
211 */ 211 */
212 simple_lock(&nmihard_lock); 212 simple_lock(&nmihard_lock);
213 nmi_hard_wait++; 213 nmi_hard_wait++;
214 simple_unlock(&nmihard_lock); 214 simple_unlock(&nmihard_lock);
215 215
216 if (cpuinfo.master == 0) { 216 if (cpuinfo.master == 0) {
217 while (nmi_hard_wait) 217 while (nmi_hard_wait)
218 ; 218 ;
219 return; 219 return;
220 } else { 220 } else {
221 int n = 100000; 221 int n = 100000;
222 222
223 while (nmi_hard_wait < sparc_ncpus) { 223 while (nmi_hard_wait < sparc_ncpus) {
224 DELAY(1); 224 DELAY(1);
225 if (n-- > 0) 225 if (n-- > 0)
226 continue; 226 continue;
227 printf("nmi_hard: SMP botch."); 227 printf("nmi_hard: SMP botch.");
228 break; 228 break;
229 } 229 }
230 } 230 }
231#endif 231#endif
232 232
233 /* 233 /*
234 * Examine pending system interrupts. 234 * Examine pending system interrupts.
235 */ 235 */
236 si = *((uint32_t *)ICR_SI_PEND); 236 si = *((uint32_t *)ICR_SI_PEND);
237 snprintb(bits, sizeof(bits), SINTR_BITS, si); 237 snprintb(bits, sizeof(bits), SINTR_BITS, si);
238 printf("cpu%d: NMI: system interrupts: %s\n", cpu_number(), bits); 238 printf("cpu%d: NMI: system interrupts: %s\n", cpu_number(), bits);
239  239
240 240
241 if ((si & SINTR_M) != 0) { 241 if ((si & SINTR_M) != 0) {
242 /* ECC memory error */ 242 /* ECC memory error */
243 if (memerr_handler != NULL) 243 if (memerr_handler != NULL)
244 fatal |= (*memerr_handler)(); 244 fatal |= (*memerr_handler)();
245 } 245 }
246 if ((si & SINTR_I) != 0) { 246 if ((si & SINTR_I) != 0) {
247 /* MBus/SBus async error */ 247 /* MBus/SBus async error */
248 if (sbuserr_handler != NULL) 248 if (sbuserr_handler != NULL)
249 fatal |= (*sbuserr_handler)(); 249 fatal |= (*sbuserr_handler)();
250 } 250 }
251 if ((si & SINTR_V) != 0) { 251 if ((si & SINTR_V) != 0) {
252 /* VME async error */ 252 /* VME async error */
253 if (vmeerr_handler != NULL) 253 if (vmeerr_handler != NULL)
254 fatal |= (*vmeerr_handler)(); 254 fatal |= (*vmeerr_handler)();
255 } 255 }
256 if ((si & SINTR_ME) != 0) { 256 if ((si & SINTR_ME) != 0) {
257 /* Module async error */ 257 /* Module async error */
258 if (moduleerr_handler != NULL) 258 if (moduleerr_handler != NULL)
259 fatal |= (*moduleerr_handler)(); 259 fatal |= (*moduleerr_handler)();
260 } 260 }
261 261
262#if defined(MULTIPROCESSOR) 262#if defined(MULTIPROCESSOR)
263 /* 263 /*
264 * Tell everyone else we've finished dealing with the hard NMI. 264 * Tell everyone else we've finished dealing with the hard NMI.
265 */ 265 */
266 simple_lock(&nmihard_lock); 266 simple_lock(&nmihard_lock);
267 nmi_hard_wait = 0; 267 nmi_hard_wait = 0;
268 simple_unlock(&nmihard_lock); 268 simple_unlock(&nmihard_lock);
269 if (fatal && drop_into_rom_on_fatal) { 269 if (fatal && drop_into_rom_on_fatal) {
270 prom_abort(); 270 prom_abort();
271 return; 271 return;
272 } 272 }
273#endif 273#endif
274 274
275 if (fatal) 275 if (fatal)
276 panic("nmi"); 276 panic("nmi");
277} 277}
278 278
279/* 279/*
280 * Non-maskable soft interrupt level 15 handler 280 * Non-maskable soft interrupt level 15 handler
281 */ 281 */
282void 282void
283nmi_soft(struct trapframe *tf) 283nmi_soft(struct trapframe *tf)
284{ 284{
285 if (cpuinfo.mailbox) { 285 if (cpuinfo.mailbox) {
286 /* Check PROM messages */ 286 /* Check PROM messages */
287 uint8_t msg = *(uint8_t *)cpuinfo.mailbox; 287 uint8_t msg = *(uint8_t *)cpuinfo.mailbox;
288 switch (msg) { 288 switch (msg) {
289 case OPENPROM_MBX_STOP: 289 case OPENPROM_MBX_STOP:
290 case OPENPROM_MBX_WD: 290 case OPENPROM_MBX_WD:
291 /* In case there's an xcall in progress (unlikely) */ 291 /* In case there's an xcall in progress (unlikely) */
292 spl0(); 292 spl0();
293 cpuinfo.flags &= ~CPUFLG_READY; 293 cpuinfo.flags &= ~CPUFLG_READY;
294#ifdef MULTIPROCESSOR 294#ifdef MULTIPROCESSOR
295 cpu_ready_mask &= ~(1 << cpu_number()); 295 cpu_ready_mask &= ~(1 << cpu_number());
296#endif 296#endif
297 prom_cpustop(0); 297 prom_cpustop(0);
298 break; 298 break;
299 case OPENPROM_MBX_ABORT: 299 case OPENPROM_MBX_ABORT:
300 case OPENPROM_MBX_BPT: 300 case OPENPROM_MBX_BPT:
301 prom_cpuidle(0); 301 prom_cpuidle(0);
302 /* 302 /*
303 * We emerge here after someone does a 303 * We emerge here after someone does a
304 * prom_resumecpu(ournode). 304 * prom_resumecpu(ournode).
305 */ 305 */
306 return; 306 return;
307 default: 307 default:
308 break; 308 break;
309 } 309 }
310 } 310 }
311 311
312#if defined(MULTIPROCESSOR) 312#if defined(MULTIPROCESSOR)
313 switch (cpuinfo.msg_lev15.tag) { 313 switch (cpuinfo.msg_lev15.tag) {
314 case XPMSG15_PAUSECPU: 314 case XPMSG15_PAUSECPU:
315 /* XXX - assumes DDB is the only user of mp_pause_cpu() */ 315 /* XXX - assumes DDB is the only user of mp_pause_cpu() */
316 cpuinfo.flags |= CPUFLG_PAUSED; 316 cpuinfo.flags |= CPUFLG_PAUSED;
317#if defined(DDB) 317#if defined(DDB)
318 /* trap(T_DBPAUSE) */ 318 /* trap(T_DBPAUSE) */
319 __asm("ta 0x8b"); 319 __asm("ta 0x8b");
320#else 320#else
321 while (cpuinfo.flags & CPUFLG_PAUSED) 321 while (cpuinfo.flags & CPUFLG_PAUSED)
322 /* spin */; 322 /* spin */;
323#endif /* DDB */ 323#endif /* DDB */
324 } 324 }
325 cpuinfo.msg_lev15.tag = 0; 325 cpuinfo.msg_lev15.tag = 0;
326#endif /* MULTIPROCESSOR */ 326#endif /* MULTIPROCESSOR */
327} 327}
328 328
329#if defined(MULTIPROCESSOR) 329#if defined(MULTIPROCESSOR)
330/* 330/*
331 * Respond to an xcall() request from another CPU. 331 * Respond to an xcall() request from another CPU.
332 */ 332 */
333static void 333static void
334xcallintr(void *v) 334xcallintr(void *v)
335{ 335{
336 336
337 /* Tally */ 337 /* Tally */
338 lev13_evcnt.ev_count++; 338 lev13_evcnt.ev_count++;
339 339
340 /* notyet - cpuinfo.msg.received = 1; */ 340 /* notyet - cpuinfo.msg.received = 1; */
341 switch (cpuinfo.msg.tag) { 341 switch (cpuinfo.msg.tag) {
342 case XPMSG_FUNC: 342 case XPMSG_FUNC:
343 { 343 {
344 volatile struct xpmsg_func *p = &cpuinfo.msg.u.xpmsg_func; 344 volatile struct xpmsg_func *p = &cpuinfo.msg.u.xpmsg_func;
345 345
346 if (p->func) 346 if (p->func)
347 p->retval = (*p->func)(p->arg0, p->arg1, p->arg2); 347 (*p->func)(p->arg0, p->arg1, p->arg2);
348 break; 348 break;
349 } 349 }
350 } 350 }
351 cpuinfo.msg.tag = 0; 351 cpuinfo.msg.tag = 0;
352 cpuinfo.msg.complete = 1; 352 cpuinfo.msg.complete = 1;
353} 353}
354#endif /* MULTIPROCESSOR */ 354#endif /* MULTIPROCESSOR */
355#endif /* SUN4M || SUN4D */ 355#endif /* SUN4M || SUN4D */
356 356
357 357
358#ifdef MSIIEP 358#ifdef MSIIEP
359/* 359/*
360 * It's easier to make this separate so that not to further obscure 360 * It's easier to make this separate so that not to further obscure
361 * SUN4M case with more ifdefs. There's no common functionality 361 * SUN4M case with more ifdefs. There's no common functionality
362 * anyway. 362 * anyway.
363 */ 363 */
364 364
365#include <sparc/sparc/msiiepreg.h> 365#include <sparc/sparc/msiiepreg.h>
366 366
367void nmi_hard_msiiep(void); 367void nmi_hard_msiiep(void);
368void nmi_soft_msiiep(void); 368void nmi_soft_msiiep(void);
369 369
370 370
371void 371void
372nmi_hard_msiiep(void) 372nmi_hard_msiiep(void)
373{ 373{
374 uint32_t si; 374 uint32_t si;
375 char bits[128]; 375 char bits[128];
376 int fatal = 0; 376 int fatal = 0;
377 377
378 si = mspcic_read_4(pcic_sys_ipr); 378 si = mspcic_read_4(pcic_sys_ipr);
379 snprintb(bits, sizeof(bits), MSIIEP_SYS_IPR_BITS, si); 379 snprintb(bits, sizeof(bits), MSIIEP_SYS_IPR_BITS, si);
380 printf("NMI: system interrupts: %s\n", bits); 380 printf("NMI: system interrupts: %s\n", bits);
381  381
382 382
383 if (si & MSIIEP_SYS_IPR_MEM_FAULT) { 383 if (si & MSIIEP_SYS_IPR_MEM_FAULT) {
384 uint32_t afsr, afar, mfsr, mfar; 384 uint32_t afsr, afar, mfsr, mfar;
385 385
386 afar = *(volatile uint32_t *)MSIIEP_AFAR; 386 afar = *(volatile uint32_t *)MSIIEP_AFAR;
387 afsr = *(volatile uint32_t *)MSIIEP_AFSR; 387 afsr = *(volatile uint32_t *)MSIIEP_AFSR;
388 388
389 mfar = *(volatile uint32_t *)MSIIEP_MFAR; 389 mfar = *(volatile uint32_t *)MSIIEP_MFAR;
390 mfsr = *(volatile uint32_t *)MSIIEP_MFSR; 390 mfsr = *(volatile uint32_t *)MSIIEP_MFSR;
391 391
392 if (afsr & MSIIEP_AFSR_ERR) { 392 if (afsr & MSIIEP_AFSR_ERR) {
393 snprintb(bits, sizeof(bits), MSIIEP_AFSR_BITS, afsr); 393 snprintb(bits, sizeof(bits), MSIIEP_AFSR_BITS, afsr);
394 printf("async fault: afsr=%s; afar=%08x\n", bits, afsr); 394 printf("async fault: afsr=%s; afar=%08x\n", bits, afsr);
395 } 395 }
396 396
397 if (mfsr & MSIIEP_MFSR_ERR) { 397 if (mfsr & MSIIEP_MFSR_ERR) {
398 snprintb(bits, sizeof(bits), MSIIEP_MFSR_BITS, mfsr); 398 snprintb(bits, sizeof(bits), MSIIEP_MFSR_BITS, mfsr);
399 printf("mem fault: mfsr=%s; mfar=%08x\n", bits, mfsr); 399 printf("mem fault: mfsr=%s; mfar=%08x\n", bits, mfsr);
400 } 400 }
401 401
402 fatal = 0; 402 fatal = 0;
403 } 403 }
404 404
405 if (si & MSIIEP_SYS_IPR_SERR) { /* XXX */ 405 if (si & MSIIEP_SYS_IPR_SERR) { /* XXX */
406 printf("serr#\n"); 406 printf("serr#\n");
407 fatal = 0; 407 fatal = 0;
408 } 408 }
409 409
410 if (si & MSIIEP_SYS_IPR_DMA_ERR) { 410 if (si & MSIIEP_SYS_IPR_DMA_ERR) {
411 printf("dma: %08x\n", 411 printf("dma: %08x\n",
412 mspcic_read_stream_4(pcic_iotlb_err_addr)); 412 mspcic_read_stream_4(pcic_iotlb_err_addr));
413 fatal = 0; 413 fatal = 0;
414 } 414 }
415 415
416 if (si & MSIIEP_SYS_IPR_PIO_ERR) { 416 if (si & MSIIEP_SYS_IPR_PIO_ERR) {
417 printf("pio: addr=%08x, cmd=%x\n", 417 printf("pio: addr=%08x, cmd=%x\n",
418 mspcic_read_stream_4(pcic_pio_err_addr), 418 mspcic_read_stream_4(pcic_pio_err_addr),
419 mspcic_read_stream_1(pcic_pio_err_cmd)); 419 mspcic_read_stream_1(pcic_pio_err_cmd));
420 fatal = 0; 420 fatal = 0;
421 } 421 }
422 422
423 if (fatal) 423 if (fatal)
424 panic("nmi"); 424 panic("nmi");
425 425
426 /* Clear the NMI if it was PCIC related */ 426 /* Clear the NMI if it was PCIC related */
427 mspcic_write_1(pcic_sys_ipr_clr, MSIIEP_SYS_IPR_CLR_ALL); 427 mspcic_write_1(pcic_sys_ipr_clr, MSIIEP_SYS_IPR_CLR_ALL);
428} 428}
429 429
430 430
431void 431void
432nmi_soft_msiiep(void) 432nmi_soft_msiiep(void)
433{ 433{
434 434
435 panic("soft nmi"); 435 panic("soft nmi");
436} 436}
437 437
438#endif /* MSIIEP */ 438#endif /* MSIIEP */
439 439
440 440
441/* 441/*
442 * Level 15 interrupts are special, and not vectored here. 442 * Level 15 interrupts are special, and not vectored here.
443 * Only `prewired' interrupts appear here; boot-time configured devices 443 * Only `prewired' interrupts appear here; boot-time configured devices
444 * are attached via intr_establish() below. 444 * are attached via intr_establish() below.
445 */ 445 */
446struct intrhand *intrhand[15] = { 446struct intrhand *intrhand[15] = {
447 NULL, /* 0 = error */ 447 NULL, /* 0 = error */
448 NULL, /* 1 = software level 1 + Sbus */ 448 NULL, /* 1 = software level 1 + Sbus */
449 NULL, /* 2 = Sbus level 2 (4m: Sbus L1) */ 449 NULL, /* 2 = Sbus level 2 (4m: Sbus L1) */
450 NULL, /* 3 = SCSI + DMA + Sbus level 3 (4m: L2,lpt)*/ 450 NULL, /* 3 = SCSI + DMA + Sbus level 3 (4m: L2,lpt)*/
451 NULL, /* 4 = software level 4 (tty softint) (scsi) */ 451 NULL, /* 4 = software level 4 (tty softint) (scsi) */
452 NULL, /* 5 = Ethernet + Sbus level 4 (4m: Sbus L3) */ 452 NULL, /* 5 = Ethernet + Sbus level 4 (4m: Sbus L3) */
453 NULL, /* 6 = software level 6 (not used) (4m: enet)*/ 453 NULL, /* 6 = software level 6 (not used) (4m: enet)*/
454 NULL, /* 7 = video + Sbus level 5 */ 454 NULL, /* 7 = video + Sbus level 5 */
455 NULL, /* 8 = Sbus level 6 */ 455 NULL, /* 8 = Sbus level 6 */
456 NULL, /* 9 = Sbus level 7 */ 456 NULL, /* 9 = Sbus level 7 */
457 NULL, /* 10 = counter 0 = clock */ 457 NULL, /* 10 = counter 0 = clock */
458 NULL, /* 11 = floppy */ 458 NULL, /* 11 = floppy */
459 NULL, /* 12 = zs hardware interrupt */ 459 NULL, /* 12 = zs hardware interrupt */
460 NULL, /* 13 = audio chip */ 460 NULL, /* 13 = audio chip */
461 NULL, /* 14 = counter 1 = profiling timer */ 461 NULL, /* 14 = counter 1 = profiling timer */
462}; 462};
463 463
464/* 464/*
465 * Soft interrupts use a separate set of handler chains. 465 * Soft interrupts use a separate set of handler chains.
466 * This is necessary since soft interrupt handlers do not return a value 466 * This is necessary since soft interrupt handlers do not return a value
467 * and therefore cannot be mixed with hardware interrupt handlers on a 467 * and therefore cannot be mixed with hardware interrupt handlers on a
468 * shared handler chain. 468 * shared handler chain.
469 */ 469 */
470struct intrhand *sintrhand[15] = { NULL }; 470struct intrhand *sintrhand[15] = { NULL };
471 471
472static void 472static void
473ih_insert(struct intrhand **head, struct intrhand *ih) 473ih_insert(struct intrhand **head, struct intrhand *ih)
474{ 474{
475 struct intrhand **p, *q; 475 struct intrhand **p, *q;
476 /* 476 /*
477 * This is O(N^2) for long chains, but chains are never long 477 * This is O(N^2) for long chains, but chains are never long
478 * and we do want to preserve order. 478 * and we do want to preserve order.
479 */ 479 */
480 for (p = head; (q = *p) != NULL; p = &q->ih_next) 480 for (p = head; (q = *p) != NULL; p = &q->ih_next)
481 continue; 481 continue;
482 *p = ih; 482 *p = ih;
483 ih->ih_next = NULL; 483 ih->ih_next = NULL;
484} 484}
485 485
486static void 486static void
487ih_remove(struct intrhand **head, struct intrhand *ih) 487ih_remove(struct intrhand **head, struct intrhand *ih)
488{ 488{
489 struct intrhand **p, *q; 489 struct intrhand **p, *q;
490 490
491 for (p = head; (q = *p) != ih; p = &q->ih_next) 491 for (p = head; (q = *p) != ih; p = &q->ih_next)
492 continue; 492 continue;
493 if (q == NULL) 493 if (q == NULL)
494 panic("intr_remove: intrhand %p fun %p arg %p", 494 panic("intr_remove: intrhand %p fun %p arg %p",
495 ih, ih->ih_fun, ih->ih_arg); 495 ih, ih->ih_fun, ih->ih_arg);
496 496
497 *p = q->ih_next; 497 *p = q->ih_next;
498 q->ih_next = NULL; 498 q->ih_next = NULL;
499} 499}
500 500
501static int fastvec; /* marks fast vectors (see below) */ 501static int fastvec; /* marks fast vectors (see below) */
502extern int sparc_interrupt4m[]; 502extern int sparc_interrupt4m[];
503extern int sparc_interrupt44c[]; 503extern int sparc_interrupt44c[];
504 504
505#ifdef DIAGNOSTIC 505#ifdef DIAGNOSTIC
506static void 506static void
507check_tv(int level) 507check_tv(int level)
508{ 508{
509 struct trapvec *tv; 509 struct trapvec *tv;
510 int displ; 510 int displ;
511 511
512 /* double check for legal hardware interrupt */ 512 /* double check for legal hardware interrupt */
513 tv = &trapbase[T_L1INT - 1 + level]; 513 tv = &trapbase[T_L1INT - 1 + level];
514 displ = (CPU_ISSUN4M || CPU_ISSUN4D) 514 displ = (CPU_ISSUN4M || CPU_ISSUN4D)
515 ? &sparc_interrupt4m[0] - &tv->tv_instr[1] 515 ? &sparc_interrupt4m[0] - &tv->tv_instr[1]
516 : &sparc_interrupt44c[0] - &tv->tv_instr[1]; 516 : &sparc_interrupt44c[0] - &tv->tv_instr[1];
517 517
518 /* has to be `mov level,%l3; ba _sparc_interrupt; rdpsr %l0' */ 518 /* has to be `mov level,%l3; ba _sparc_interrupt; rdpsr %l0' */
519 if (tv->tv_instr[0] != I_MOVi(I_L3, level) || 519 if (tv->tv_instr[0] != I_MOVi(I_L3, level) ||
520 tv->tv_instr[1] != I_BA(0, displ) || 520 tv->tv_instr[1] != I_BA(0, displ) ||
521 tv->tv_instr[2] != I_RDPSR(I_L0)) 521 tv->tv_instr[2] != I_RDPSR(I_L0))
522 panic("intr_establish(%d)\n0x%x 0x%x 0x%x != 0x%x 0x%x 0x%x", 522 panic("intr_establish(%d)\n0x%x 0x%x 0x%x != 0x%x 0x%x 0x%x",
523 level, 523 level,
524 tv->tv_instr[0], tv->tv_instr[1], tv->tv_instr[2], 524 tv->tv_instr[0], tv->tv_instr[1], tv->tv_instr[2],
525 I_MOVi(I_L3, level), I_BA(0, displ), I_RDPSR(I_L0)); 525 I_MOVi(I_L3, level), I_BA(0, displ), I_RDPSR(I_L0));
526} 526}
527#endif 527#endif
528 528
529/* 529/*
530 * Wire a fast trap vector. Only one such fast trap is legal for any 530 * Wire a fast trap vector. Only one such fast trap is legal for any
531 * interrupt, and it must be a hardware interrupt. 531 * interrupt, and it must be a hardware interrupt.
532 */ 532 */
533static void 533static void
534inst_fasttrap(int level, void (*vec)(void)) 534inst_fasttrap(int level, void (*vec)(void))
535{ 535{
536 struct trapvec *tv; 536 struct trapvec *tv;
537 u_long hi22, lo10; 537 u_long hi22, lo10;
538 int s; 538 int s;
539 539
540 if (CPU_ISSUN4 || CPU_ISSUN4C) { 540 if (CPU_ISSUN4 || CPU_ISSUN4C) {
541 /* Can't wire to softintr slots */ 541 /* Can't wire to softintr slots */
542 if (level == 1 || level == 4 || level == 6) 542 if (level == 1 || level == 4 || level == 6)
543 return; 543 return;
544 } 544 }
545 545
546#ifdef DIAGNOSTIC 546#ifdef DIAGNOSTIC
547 check_tv(level); 547 check_tv(level);
548#endif 548#endif
549 549
550 tv = &trapbase[T_L1INT - 1 + level]; 550 tv = &trapbase[T_L1INT - 1 + level];
551 hi22 = ((u_long)vec) >> 10; 551 hi22 = ((u_long)vec) >> 10;
552 lo10 = ((u_long)vec) & 0x3ff; 552 lo10 = ((u_long)vec) & 0x3ff;
553 s = splhigh(); 553 s = splhigh();
554 554
555 /* kernel text is write protected -- let us in for a moment */ 555 /* kernel text is write protected -- let us in for a moment */
556 pmap_kprotect((vaddr_t)tv & -PAGE_SIZE, PAGE_SIZE, 556 pmap_kprotect((vaddr_t)tv & -PAGE_SIZE, PAGE_SIZE,
557 VM_PROT_READ|VM_PROT_WRITE); 557 VM_PROT_READ|VM_PROT_WRITE);
558 cpuinfo.cache_flush_all(); 558 cpuinfo.cache_flush_all();
559 tv->tv_instr[0] = I_SETHI(I_L3, hi22); /* sethi %hi(vec),%l3 */ 559 tv->tv_instr[0] = I_SETHI(I_L3, hi22); /* sethi %hi(vec),%l3 */
560 tv->tv_instr[1] = I_JMPLri(I_G0, I_L3, lo10);/* jmpl %l3+%lo(vec),%g0 */ 560 tv->tv_instr[1] = I_JMPLri(I_G0, I_L3, lo10);/* jmpl %l3+%lo(vec),%g0 */
561 tv->tv_instr[2] = I_RDPSR(I_L0); /* mov %psr, %l0 */ 561 tv->tv_instr[2] = I_RDPSR(I_L0); /* mov %psr, %l0 */
562 pmap_kprotect((vaddr_t)tv & -PAGE_SIZE, PAGE_SIZE, VM_PROT_READ); 562 pmap_kprotect((vaddr_t)tv & -PAGE_SIZE, PAGE_SIZE, VM_PROT_READ);
563 cpuinfo.cache_flush_all(); 563 cpuinfo.cache_flush_all();
564 fastvec |= 1 << level; 564 fastvec |= 1 << level;
565 splx(s); 565 splx(s);
566} 566}
567 567
568/* 568/*
569 * Uninstall a fast trap handler. 569 * Uninstall a fast trap handler.
570 */ 570 */
571static void 571static void
572uninst_fasttrap(int level) 572uninst_fasttrap(int level)
573{ 573{
574 struct trapvec *tv; 574 struct trapvec *tv;
575 int displ; /* suspenders, belt, and buttons too */ 575 int displ; /* suspenders, belt, and buttons too */
576 int s; 576 int s;
577 577
578 tv = &trapbase[T_L1INT - 1 + level]; 578 tv = &trapbase[T_L1INT - 1 + level];
579 s = splhigh(); 579 s = splhigh();
580 displ = (CPU_ISSUN4M || CPU_ISSUN4D) 580 displ = (CPU_ISSUN4M || CPU_ISSUN4D)
581 ? &sparc_interrupt4m[0] - &tv->tv_instr[1] 581 ? &sparc_interrupt4m[0] - &tv->tv_instr[1]
582 : &sparc_interrupt44c[0] - &tv->tv_instr[1]; 582 : &sparc_interrupt44c[0] - &tv->tv_instr[1];
583 583
584 /* kernel text is write protected -- let us in for a moment */ 584 /* kernel text is write protected -- let us in for a moment */
585 pmap_kprotect((vaddr_t)tv & -PAGE_SIZE, PAGE_SIZE, 585 pmap_kprotect((vaddr_t)tv & -PAGE_SIZE, PAGE_SIZE,
586 VM_PROT_READ|VM_PROT_WRITE); 586 VM_PROT_READ|VM_PROT_WRITE);
587 cpuinfo.cache_flush_all(); 587 cpuinfo.cache_flush_all();
588 tv->tv_instr[0] = I_MOVi(I_L3, level); 588 tv->tv_instr[0] = I_MOVi(I_L3, level);
589 tv->tv_instr[1] = I_BA(0, displ); 589 tv->tv_instr[1] = I_BA(0, displ);
590 tv->tv_instr[2] = I_RDPSR(I_L0); 590 tv->tv_instr[2] = I_RDPSR(I_L0);
591 pmap_kprotect((vaddr_t)tv & -PAGE_SIZE, PAGE_SIZE, VM_PROT_READ); 591 pmap_kprotect((vaddr_t)tv & -PAGE_SIZE, PAGE_SIZE, VM_PROT_READ);
592 cpuinfo.cache_flush_all(); 592 cpuinfo.cache_flush_all();
593 fastvec &= ~(1 << level); 593 fastvec &= ~(1 << level);
594 splx(s); 594 splx(s);
595} 595}
596 596
597/* 597/*
598 * Attach an interrupt handler to the vector chain for the given level. 598 * Attach an interrupt handler to the vector chain for the given level.
599 * This is not possible if it has been taken away as a fast vector. 599 * This is not possible if it has been taken away as a fast vector.
600 */ 600 */
601void 601void
602intr_establish(int level, int classipl, 602intr_establish(int level, int classipl,
603 struct intrhand *ih, void (*vec)(void)) 603 struct intrhand *ih, void (*vec)(void))
604{ 604{
605 int s = splhigh(); 605 int s = splhigh();
606 606
607#ifdef DIAGNOSTIC 607#ifdef DIAGNOSTIC
608 if (CPU_ISSUN4C) { 608 if (CPU_ISSUN4C) {
609 /* 609 /*
610 * Check reserved softintr slots on SUN4C only. 610 * Check reserved softintr slots on SUN4C only.
611 * No check for SUN4, as 4/300's have 611 * No check for SUN4, as 4/300's have
612 * esp0 at level 4 and le0 at level 6. 612 * esp0 at level 4 and le0 at level 6.
613 */ 613 */
614 if (level == 1 || level == 4 || level == 6) 614 if (level == 1 || level == 4 || level == 6)
615 panic("intr_establish: reserved softintr level"); 615 panic("intr_establish: reserved softintr level");
616 } 616 }
617#endif 617#endif
618 618
619 /* 619 /*
620 * If a `fast vector' is currently tied to this level, we must 620 * If a `fast vector' is currently tied to this level, we must
621 * first undo that. 621 * first undo that.
622 */ 622 */
623 if (fastvec & (1 << level)) { 623 if (fastvec & (1 << level)) {
624 printf("intr_establish: untie fast vector at level %d\n", 624 printf("intr_establish: untie fast vector at level %d\n",
625 level); 625 level);
626 uninst_fasttrap(level); 626 uninst_fasttrap(level);
627 } else if (vec != NULL && 627 } else if (vec != NULL &&
628 intrhand[level] == NULL && sintrhand[level] == NULL) { 628 intrhand[level] == NULL && sintrhand[level] == NULL) {
629 inst_fasttrap(level, vec); 629 inst_fasttrap(level, vec);
630 } 630 }
631 631
632 if (classipl == 0) 632 if (classipl == 0)
633 classipl = level; 633 classipl = level;
634 634
635 /* A requested IPL cannot exceed its device class level */ 635 /* A requested IPL cannot exceed its device class level */
636 if (classipl < level) 636 if (classipl < level)
637 panic("intr_establish: class lvl (%d) < pil (%d)\n", 637 panic("intr_establish: class lvl (%d) < pil (%d)\n",
638 classipl, level); 638 classipl, level);
639 639
640 /* pre-shift to PIL field in %psr */ 640 /* pre-shift to PIL field in %psr */
641 ih->ih_classipl = (classipl << 8) & PSR_PIL; 641 ih->ih_classipl = (classipl << 8) & PSR_PIL;
642 642
643 ih_insert(&intrhand[level], ih); 643 ih_insert(&intrhand[level], ih);
644 splx(s); 644 splx(s);
645} 645}
646 646
647void 647void
648intr_disestablish(int level, struct intrhand *ih) 648intr_disestablish(int level, struct intrhand *ih)
649{ 649{
650 650
651 ih_remove(&intrhand[level], ih); 651 ih_remove(&intrhand[level], ih);
652} 652}
653 653
654/* 654/*
655 * This is a softintr cookie. NB that sic_pilreq MUST be the 655 * This is a softintr cookie. NB that sic_pilreq MUST be the
656 * first element in the struct, because the softintr_schedule() 656 * first element in the struct, because the softintr_schedule()
657 * macro in intr.h casts cookies to int * to get it. On a 657 * macro in intr.h casts cookies to int * to get it. On a
658 * sun4m, sic_pilreq is an actual processor interrupt level that 658 * sun4m, sic_pilreq is an actual processor interrupt level that
659 * is passed to raise(), and on a sun4 or sun4c sic_pilreq is a 659 * is passed to raise(), and on a sun4 or sun4c sic_pilreq is a
660 * bit to set in the interrupt enable register with ienab_bis(). 660 * bit to set in the interrupt enable register with ienab_bis().
661 */ 661 */
662struct softintr_cookie { 662struct softintr_cookie {
663 int sic_pilreq; /* CPU-specific bits; MUST be first! */ 663 int sic_pilreq; /* CPU-specific bits; MUST be first! */
664 int sic_pil; /* Actual machine PIL that is used */ 664 int sic_pil; /* Actual machine PIL that is used */
665 struct intrhand sic_hand; 665 struct intrhand sic_hand;
666}; 666};
667 667
668/* 668/*
669 * softintr_init(): initialise the MI softintr system. 669 * softintr_init(): initialise the MI softintr system.
670 */ 670 */
671void 671void
672sparc_softintr_init(void) 672sparc_softintr_init(void)
673{ 673{
674 674
675#if defined(MULTIPROCESSOR) && (defined(SUN4M) || defined(SUN4D)) 675#if defined(MULTIPROCESSOR) && (defined(SUN4M) || defined(SUN4D))
676 /* Establish a standard soft interrupt handler for cross calls */ 676 /* Establish a standard soft interrupt handler for cross calls */
677 xcall_cookie = sparc_softintr_establish(13, xcallintr, NULL); 677 xcall_cookie = sparc_softintr_establish(13, xcallintr, NULL);
678#endif 678#endif
679} 679}
680 680
681/* 681/*
682 * softintr_establish(): MI interface. establish a func(arg) as a 682 * softintr_establish(): MI interface. establish a func(arg) as a
683 * software interrupt. 683 * software interrupt.
684 */ 684 */
685void * 685void *
686sparc_softintr_establish(int level, void (*fun)(void *), void *arg) 686sparc_softintr_establish(int level, void (*fun)(void *), void *arg)
687{ 687{
688 struct softintr_cookie *sic; 688 struct softintr_cookie *sic;
689 struct intrhand *ih; 689 struct intrhand *ih;
690 int pilreq; 690 int pilreq;
691 int pil; 691 int pil;
692 692
693 /* 693 /*
694 * On a sun4m, the processor interrupt level is stored 694 * On a sun4m, the processor interrupt level is stored
695 * in the softintr cookie to be passed to raise(). 695 * in the softintr cookie to be passed to raise().
696 * 696 *
697 * On a sun4 or sun4c the appropriate bit to set 697 * On a sun4 or sun4c the appropriate bit to set
698 * in the interrupt enable register is stored in 698 * in the interrupt enable register is stored in
699 * the softintr cookie to be passed to ienab_bis(). 699 * the softintr cookie to be passed to ienab_bis().
700 */ 700 */
701 pil = pilreq = level; 701 pil = pilreq = level;
702 if (CPU_ISSUN4 || CPU_ISSUN4C) { 702 if (CPU_ISSUN4 || CPU_ISSUN4C) {
703 /* Select the most suitable of three available softint levels */ 703 /* Select the most suitable of three available softint levels */
704 if (level >= 1 && level < 4) { 704 if (level >= 1 && level < 4) {
705 pil = 1; 705 pil = 1;
706 pilreq = IE_L1; 706 pilreq = IE_L1;
707 } else if (level >= 4 && level < 6) { 707 } else if (level >= 4 && level < 6) {
708 pil = 4; 708 pil = 4;
709 pilreq = IE_L4; 709 pilreq = IE_L4;
710 } else { 710 } else {
711 pil = 6; 711 pil = 6;
712 pilreq = IE_L6; 712 pilreq = IE_L6;
713 } 713 }
714 } 714 }
715 715
716 sic = malloc(sizeof(*sic), M_DEVBUF, 0); 716 sic = malloc(sizeof(*sic), M_DEVBUF, 0);
717 sic->sic_pil = pil; 717 sic->sic_pil = pil;
718 sic->sic_pilreq = pilreq; 718 sic->sic_pilreq = pilreq;
719 ih = &sic->sic_hand; 719 ih = &sic->sic_hand;
720 ih->ih_fun = (int (*)(void *))fun; 720 ih->ih_fun = (int (*)(void *))fun;
721 ih->ih_arg = arg; 721 ih->ih_arg = arg;
722 722
723 /* 723 /*
724 * Always run the handler at the requested level, which might 724 * Always run the handler at the requested level, which might
725 * be higher than the hardware can provide. 725 * be higher than the hardware can provide.
726 * 726 *
727 * pre-shift to PIL field in %psr 727 * pre-shift to PIL field in %psr
728 */ 728 */
729 ih->ih_classipl = (level << 8) & PSR_PIL; 729 ih->ih_classipl = (level << 8) & PSR_PIL;
730 730
731 if (fastvec & (1 << pil)) { 731 if (fastvec & (1 << pil)) {
732 printf("softintr_establish: untie fast vector at level %d\n", 732 printf("softintr_establish: untie fast vector at level %d\n",
733 pil); 733 pil);
734 uninst_fasttrap(level); 734 uninst_fasttrap(level);
735 } 735 }
736 736
737 ih_insert(&sintrhand[pil], ih); 737 ih_insert(&sintrhand[pil], ih);
738 return (void *)sic; 738 return (void *)sic;
739} 739}
740 740
741/* 741/*
742 * softintr_disestablish(): MI interface. disestablish the specified 742 * softintr_disestablish(): MI interface. disestablish the specified
743 * software interrupt. 743 * software interrupt.
744 */ 744 */
745void 745void
746sparc_softintr_disestablish(void *cookie) 746sparc_softintr_disestablish(void *cookie)
747{ 747{
748 struct softintr_cookie *sic = cookie; 748 struct softintr_cookie *sic = cookie;
749 749
750 ih_remove(&sintrhand[sic->sic_pil], &sic->sic_hand); 750 ih_remove(&sintrhand[sic->sic_pil], &sic->sic_hand);
751 free(cookie, M_DEVBUF); 751 free(cookie, M_DEVBUF);
752} 752}
753 753
754#if 0 754#if 0
755void 755void
756sparc_softintr_schedule(void *cookie) 756sparc_softintr_schedule(void *cookie)
757{ 757{
758 struct softintr_cookie *sic = cookie; 758 struct softintr_cookie *sic = cookie;
759 if (CPU_ISSUN4M || CPU_ISSUN4D) { 759 if (CPU_ISSUN4M || CPU_ISSUN4D) {
760#if defined(SUN4M) || defined(SUN4D) 760#if defined(SUN4M) || defined(SUN4D)
761 extern void raise(int,int); 761 extern void raise(int,int);
762 raise(0, sic->sic_pilreq); 762 raise(0, sic->sic_pilreq);
763#endif 763#endif
764 } else { 764 } else {
765#if defined(SUN4) || defined(SUN4C) 765#if defined(SUN4) || defined(SUN4C)
766 ienab_bis(sic->sic_pilreq); 766 ienab_bis(sic->sic_pilreq);
767#endif 767#endif
768 } 768 }
769} 769}
770#endif 770#endif
771 771
772#ifdef MULTIPROCESSOR 772#ifdef MULTIPROCESSOR
773/* 773/*
774 * Called by interrupt stubs, etc., to lock/unlock the kernel. 774 * Called by interrupt stubs, etc., to lock/unlock the kernel.
775 */ 775 */
776void 776void
777intr_lock_kernel(void) 777intr_lock_kernel(void)
778{ 778{
779 779
780 KERNEL_LOCK(1, NULL); 780 KERNEL_LOCK(1, NULL);
781} 781}
782 782
783void 783void
784intr_unlock_kernel(void) 784intr_unlock_kernel(void)
785{ 785{
786 786
787 KERNEL_UNLOCK_ONE(NULL); 787 KERNEL_UNLOCK_ONE(NULL);
788} 788}
789#endif 789#endif
790 790
791bool 791bool
792cpu_intr_p(void) 792cpu_intr_p(void)
793{ 793{
794 794
795 return curcpu()->ci_idepth != 0; 795 return curcpu()->ci_idepth != 0;
796} 796}

cvs diff -r1.328 -r1.329 src/sys/arch/sparc/sparc/pmap.c (switch to unified diff)

--- src/sys/arch/sparc/sparc/pmap.c 2009/05/18 02:28:35 1.328
+++ src/sys/arch/sparc/sparc/pmap.c 2009/05/27 02:19:50 1.329
@@ -1,1058 +1,1058 @@ @@ -1,1058 +1,1058 @@
1/* $NetBSD: pmap.c,v 1.328 2009/05/18 02:28:35 mrg Exp $ */ 1/* $NetBSD: pmap.c,v 1.329 2009/05/27 02:19:50 mrg Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1996 4 * Copyright (c) 1996
5 * The President and Fellows of Harvard College. All rights reserved. 5 * The President and Fellows of Harvard College. All rights reserved.
6 * Copyright (c) 1992, 1993 6 * Copyright (c) 1992, 1993
7 * The Regents of the University of California. All rights reserved. 7 * The Regents of the University of California. All rights reserved.
8 * 8 *
9 * This software was developed by the Computer Systems Engineering group 9 * This software was developed by the Computer Systems Engineering group
10 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 10 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
11 * contributed to Berkeley. 11 * contributed to Berkeley.
12 * 12 *
13 * All advertising materials mentioning features or use of this software 13 * All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement: 14 * must display the following acknowledgement:
15 * This product includes software developed by Harvard University. 15 * This product includes software developed by Harvard University.
16 * This product includes software developed by the University of 16 * This product includes software developed by the University of
17 * California, Lawrence Berkeley Laboratory. 17 * California, Lawrence Berkeley Laboratory.
18 * 18 *
19 * Redistribution and use in source and binary forms, with or without 19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions 20 * modification, are permitted provided that the following conditions
21 * are met: 21 * are met:
22 * 22 *
23 * 1. Redistributions of source code must retain the above copyright 23 * 1. Redistributions of source code must retain the above copyright
24 * notice, this list of conditions and the following disclaimer. 24 * notice, this list of conditions and the following disclaimer.
25 * 2. Redistributions in binary form must reproduce the above copyright 25 * 2. Redistributions in binary form must reproduce the above copyright
26 * notice, this list of conditions and the following disclaimer in the 26 * notice, this list of conditions and the following disclaimer in the
27 * documentation and/or other materials provided with the distribution. 27 * documentation and/or other materials provided with the distribution.
28 * 3. All advertising materials mentioning features or use of this software 28 * 3. All advertising materials mentioning features or use of this software
29 * must display the following acknowledgement: 29 * must display the following acknowledgement:
30 * This product includes software developed by Aaron Brown and 30 * This product includes software developed by Aaron Brown and
31 * Harvard University. 31 * Harvard University.
32 * This product includes software developed by the University of 32 * This product includes software developed by the University of
33 * California, Berkeley and its contributors. 33 * California, Berkeley and its contributors.
34 * 4. Neither the name of the University nor the names of its contributors 34 * 4. Neither the name of the University nor the names of its contributors
35 * may be used to endorse or promote products derived from this software 35 * may be used to endorse or promote products derived from this software
36 * without specific prior written permission. 36 * without specific prior written permission.
37 * 37 *
38 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 38 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
39 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 39 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 40 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 41 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
42 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 42 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
43 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 43 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
44 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 44 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
45 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 45 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48 * SUCH DAMAGE. 48 * SUCH DAMAGE.
49 * 49 *
50 * @(#)pmap.c 8.4 (Berkeley) 2/5/94 50 * @(#)pmap.c 8.4 (Berkeley) 2/5/94
51 * 51 *
52 */ 52 */
53 53
54/* 54/*
55 * SPARC physical map management code. 55 * SPARC physical map management code.
56 */ 56 */
57 57
58#include <sys/cdefs.h> 58#include <sys/cdefs.h>
59__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.328 2009/05/18 02:28:35 mrg Exp $"); 59__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.329 2009/05/27 02:19:50 mrg Exp $");
60 60
61#include "opt_ddb.h" 61#include "opt_ddb.h"
62#include "opt_kgdb.h" 62#include "opt_kgdb.h"
63#include "opt_sparc_arch.h" 63#include "opt_sparc_arch.h"
64 64
65#include <sys/param.h> 65#include <sys/param.h>
66#include <sys/systm.h> 66#include <sys/systm.h>
67#include <sys/device.h> 67#include <sys/device.h>
68#include <sys/proc.h> 68#include <sys/proc.h>
69#include <sys/queue.h> 69#include <sys/queue.h>
70#include <sys/pool.h> 70#include <sys/pool.h>
71#include <sys/exec.h> 71#include <sys/exec.h>
72#include <sys/core.h> 72#include <sys/core.h>
73#include <sys/kcore.h> 73#include <sys/kcore.h>
74#include <sys/kernel.h> 74#include <sys/kernel.h>
75#include <sys/atomic.h> 75#include <sys/atomic.h>
76 76
77#include <uvm/uvm.h> 77#include <uvm/uvm.h>
78 78
79#include <machine/autoconf.h> 79#include <machine/autoconf.h>
80#include <machine/bsd_openprom.h> 80#include <machine/bsd_openprom.h>
81#include <machine/oldmon.h> 81#include <machine/oldmon.h>
82#include <machine/cpu.h> 82#include <machine/cpu.h>
83#include <machine/ctlreg.h> 83#include <machine/ctlreg.h>
84#include <machine/kcore.h> 84#include <machine/kcore.h>
85 85
86#include <sparc/sparc/asm.h> 86#include <sparc/sparc/asm.h>
87#include <sparc/sparc/cache.h> 87#include <sparc/sparc/cache.h>
88#include <sparc/sparc/vaddrs.h> 88#include <sparc/sparc/vaddrs.h>
89#include <sparc/sparc/cpuvar.h> 89#include <sparc/sparc/cpuvar.h>
90 90
91/* 91/*
92 * The SPARCstation offers us the following challenges: 92 * The SPARCstation offers us the following challenges:
93 * 93 *
94 * 1. A virtual address cache. This is, strictly speaking, not 94 * 1. A virtual address cache. This is, strictly speaking, not
95 * part of the architecture, but the code below assumes one. 95 * part of the architecture, but the code below assumes one.
96 * This is a write-through cache on the 4c and a write-back cache 96 * This is a write-through cache on the 4c and a write-back cache
97 * on others. 97 * on others.
98 * 98 *
99 * 2. (4/4c only) An MMU that acts like a cache. There is not enough 99 * 2. (4/4c only) An MMU that acts like a cache. There is not enough
100 * space in the MMU to map everything all the time. Instead, we need 100 * space in the MMU to map everything all the time. Instead, we need
101 * to load MMU with the `working set' of translations for each 101 * to load MMU with the `working set' of translations for each
102 * process. The sun4m does not act like a cache; tables are maintained 102 * process. The sun4m does not act like a cache; tables are maintained
103 * in physical memory. 103 * in physical memory.
104 * 104 *
105 * 3. Segmented virtual and physical spaces. The upper 12 bits of 105 * 3. Segmented virtual and physical spaces. The upper 12 bits of
106 * a virtual address (the virtual segment) index a segment table, 106 * a virtual address (the virtual segment) index a segment table,
107 * giving a physical segment. The physical segment selects a 107 * giving a physical segment. The physical segment selects a
108 * `Page Map Entry Group' (PMEG) and the virtual page number---the 108 * `Page Map Entry Group' (PMEG) and the virtual page number---the
109 * next 5 or 6 bits of the virtual address---select the particular 109 * next 5 or 6 bits of the virtual address---select the particular
110 * `Page Map Entry' for the page. We call the latter a PTE and 110 * `Page Map Entry' for the page. We call the latter a PTE and
111 * call each Page Map Entry Group a pmeg (for want of a better name). 111 * call each Page Map Entry Group a pmeg (for want of a better name).
112 * Note that the sun4m has an unsegmented 36-bit physical space. 112 * Note that the sun4m has an unsegmented 36-bit physical space.
113 * 113 *
114 * Since there are no valid bits in the segment table, the only way 114 * Since there are no valid bits in the segment table, the only way
115 * to have an invalid segment is to make one full pmeg of invalid PTEs. 115 * to have an invalid segment is to make one full pmeg of invalid PTEs.
116 * We use the last one (since the ROM does as well) (sun4/4c only) 116 * We use the last one (since the ROM does as well) (sun4/4c only)
117 * 117 *
118 * 4. Discontiguous physical pages. The Mach VM expects physical pages 118 * 4. Discontiguous physical pages. The Mach VM expects physical pages
119 * to be in one sequential lump. 119 * to be in one sequential lump.
120 * 120 *
121 * 5. The MMU is always on: it is not possible to disable it. This is 121 * 5. The MMU is always on: it is not possible to disable it. This is
122 * mainly a startup hassle. 122 * mainly a startup hassle.
123 */ 123 */
124 124
125struct pmap_stats { 125struct pmap_stats {
126 int ps_unlink_pvfirst; /* # of pv_unlinks on head */ 126 int ps_unlink_pvfirst; /* # of pv_unlinks on head */
127 int ps_unlink_pvsearch; /* # of pv_unlink searches */ 127 int ps_unlink_pvsearch; /* # of pv_unlink searches */
128 int ps_changeprots; /* # of calls to changeprot */ 128 int ps_changeprots; /* # of calls to changeprot */
129 int ps_enter_firstpv; /* pv heads entered */ 129 int ps_enter_firstpv; /* pv heads entered */
130 int ps_enter_secondpv; /* pv nonheads entered */ 130 int ps_enter_secondpv; /* pv nonheads entered */
131 int ps_useless_changewire; /* useless wiring changes */ 131 int ps_useless_changewire; /* useless wiring changes */
132 int ps_npg_prot_all; /* # of active pages protected */ 132 int ps_npg_prot_all; /* # of active pages protected */
133 int ps_npg_prot_actual; /* # pages actually affected */ 133 int ps_npg_prot_actual; /* # pages actually affected */
134 int ps_npmeg_free; /* # of free pmegs */ 134 int ps_npmeg_free; /* # of free pmegs */
135 int ps_npmeg_locked; /* # of pmegs on locked list */ 135 int ps_npmeg_locked; /* # of pmegs on locked list */
136 int ps_npmeg_lru; /* # of pmegs on lru list */ 136 int ps_npmeg_lru; /* # of pmegs on lru list */
137} pmap_stats; 137} pmap_stats;
138 138
139#if defined(SUN4) || defined(SUN4C) 139#if defined(SUN4) || defined(SUN4C)
140struct evcnt mmu_stolenpmegs_evcnt = 140struct evcnt mmu_stolenpmegs_evcnt =
141 EVCNT_INITIALIZER(EVCNT_TYPE_INTR,0,"mmu","stln pmgs"); 141 EVCNT_INITIALIZER(EVCNT_TYPE_INTR,0,"mmu","stln pmgs");
142EVCNT_ATTACH_STATIC(mmu_stolenpmegs_evcnt); 142EVCNT_ATTACH_STATIC(mmu_stolenpmegs_evcnt);
143 143
144struct evcnt mmu_pagein_evcnt = 144struct evcnt mmu_pagein_evcnt =
145 EVCNT_INITIALIZER(EVCNT_TYPE_INTR,0,"mmu","pagein"); 145 EVCNT_INITIALIZER(EVCNT_TYPE_INTR,0,"mmu","pagein");
146EVCNT_ATTACH_STATIC(mmu_pagein_evcnt); 146EVCNT_ATTACH_STATIC(mmu_pagein_evcnt);
147#endif /* SUN4 || SUN4C */ 147#endif /* SUN4 || SUN4C */
148 148
149#ifdef DEBUG 149#ifdef DEBUG
150#define PDB_CREATE 0x0001 150#define PDB_CREATE 0x0001
151#define PDB_DESTROY 0x0002 151#define PDB_DESTROY 0x0002
152#define PDB_REMOVE 0x0004 152#define PDB_REMOVE 0x0004
153#define PDB_CHANGEPROT 0x0008 153#define PDB_CHANGEPROT 0x0008
154#define PDB_ENTER 0x0010 154#define PDB_ENTER 0x0010
155#define PDB_FOLLOW 0x0020 155#define PDB_FOLLOW 0x0020
156 156
157#define PDB_MMU_ALLOC 0x0100 157#define PDB_MMU_ALLOC 0x0100
158#define PDB_MMU_STEAL 0x0200 158#define PDB_MMU_STEAL 0x0200
159#define PDB_CTX_ALLOC 0x0400 159#define PDB_CTX_ALLOC 0x0400
160#define PDB_CTX_STEAL 0x0800 160#define PDB_CTX_STEAL 0x0800
161#define PDB_MMUREG_ALLOC 0x1000 161#define PDB_MMUREG_ALLOC 0x1000
162#define PDB_MMUREG_STEAL 0x2000 162#define PDB_MMUREG_STEAL 0x2000
163#define PDB_CACHESTUFF 0x4000 163#define PDB_CACHESTUFF 0x4000
164#define PDB_SWITCHMAP 0x8000 164#define PDB_SWITCHMAP 0x8000
165#define PDB_SANITYCHK 0x10000 165#define PDB_SANITYCHK 0x10000
166int pmapdebug = 0; 166int pmapdebug = 0;
167#endif 167#endif
168 168
169/* 169/*
170 * Bounds on managed physical addresses. Used by (MD) users 170 * Bounds on managed physical addresses. Used by (MD) users
171 * of uvm_pglistalloc() to provide search hints. 171 * of uvm_pglistalloc() to provide search hints.
172 */ 172 */
173paddr_t vm_first_phys = (paddr_t)-1; 173paddr_t vm_first_phys = (paddr_t)-1;
174paddr_t vm_last_phys = 0; 174paddr_t vm_last_phys = 0;
175psize_t vm_num_phys; 175psize_t vm_num_phys;
176 176
177#define PMAP_LOCK() KERNEL_LOCK(1, NULL) 177#define PMAP_LOCK() KERNEL_LOCK(1, NULL)
178#define PMAP_UNLOCK() KERNEL_UNLOCK_ONE(NULL) 178#define PMAP_UNLOCK() KERNEL_UNLOCK_ONE(NULL)
179 179
180/* 180/*
181 * Flags in pvlist.pv_flags. Note that PV_MOD must be 1 and PV_REF must be 2 181 * Flags in pvlist.pv_flags. Note that PV_MOD must be 1 and PV_REF must be 2
182 * since they must line up with the bits in the hardware PTEs (see pte.h). 182 * since they must line up with the bits in the hardware PTEs (see pte.h).
183 * SUN4M bits are at a slightly different location in the PTE. 183 * SUN4M bits are at a slightly different location in the PTE.
184 * 184 *
185 * Note: the REF, MOD and ANC flag bits occur only in the head of a pvlist. 185 * Note: the REF, MOD and ANC flag bits occur only in the head of a pvlist.
186 * The NC bit is meaningful in each individual pv entry and reflects the 186 * The NC bit is meaningful in each individual pv entry and reflects the
187 * requested non-cacheability at the time the entry was made through 187 * requested non-cacheability at the time the entry was made through
188 * pv_link() or when subsequently altered by kvm_uncache() (but the latter 188 * pv_link() or when subsequently altered by kvm_uncache() (but the latter
189 * does not happen in kernels as of the time of this writing (March 2001)). 189 * does not happen in kernels as of the time of this writing (March 2001)).
190 */ 190 */
191#define PV_MOD 1 /* page modified */ 191#define PV_MOD 1 /* page modified */
192#define PV_REF 2 /* page referenced */ 192#define PV_REF 2 /* page referenced */
193#define PV_NC 4 /* page cannot be cached */ 193#define PV_NC 4 /* page cannot be cached */
194#define PV_REF4M 1 /* page referenced (SRMMU) */ 194#define PV_REF4M 1 /* page referenced (SRMMU) */
195#define PV_MOD4M 2 /* page modified (SRMMU) */ 195#define PV_MOD4M 2 /* page modified (SRMMU) */
196#define PV_ANC 0x10 /* page has incongruent aliases */ 196#define PV_ANC 0x10 /* page has incongruent aliases */
197 197
198static struct pool pv_pool; 198static struct pool pv_pool;
199 199
200/* 200/*
201 * pvhead(pte): find a VM page given a PTE entry. 201 * pvhead(pte): find a VM page given a PTE entry.
202 */ 202 */
203#if defined(SUN4) || defined(SUN4C) 203#if defined(SUN4) || defined(SUN4C)
204static struct vm_page * 204static struct vm_page *
205pvhead4_4c(u_int pte) 205pvhead4_4c(u_int pte)
206{ 206{
207 paddr_t pa = (pte & PG_PFNUM) << PGSHIFT; 207 paddr_t pa = (pte & PG_PFNUM) << PGSHIFT;
208 208
209 return (PHYS_TO_VM_PAGE(pa)); 209 return (PHYS_TO_VM_PAGE(pa));
210} 210}
211#endif 211#endif
212 212
213#if defined(SUN4M) || defined(SUN4D) 213#if defined(SUN4M) || defined(SUN4D)
214static struct vm_page * 214static struct vm_page *
215pvhead4m(u_int pte) 215pvhead4m(u_int pte)
216{ 216{
217 paddr_t pa = (pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT; 217 paddr_t pa = (pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT;
218 218
219 return (PHYS_TO_VM_PAGE(pa)); 219 return (PHYS_TO_VM_PAGE(pa));
220} 220}
221#endif 221#endif
222 222
223/* 223/*
224 * Each virtual segment within each pmap is either valid or invalid. 224 * Each virtual segment within each pmap is either valid or invalid.
225 * It is valid if pm_npte[VA_VSEG(va)] is not 0. This does not mean 225 * It is valid if pm_npte[VA_VSEG(va)] is not 0. This does not mean
226 * it is in the MMU, however; that is true iff pm_segmap[VA_VSEG(va)] 226 * it is in the MMU, however; that is true iff pm_segmap[VA_VSEG(va)]
227 * does not point to the invalid PMEG. 227 * does not point to the invalid PMEG.
228 * 228 *
229 * In the older SPARC architectures (sun4/sun4c), page tables are cached in 229 * In the older SPARC architectures (sun4/sun4c), page tables are cached in
230 * the MMU. The following discussion applies to these architectures: 230 * the MMU. The following discussion applies to these architectures:
231 * 231 *
232 * If a virtual segment is valid and loaded, the correct PTEs appear 232 * If a virtual segment is valid and loaded, the correct PTEs appear
233 * in the MMU only. If it is valid and unloaded, the correct PTEs appear 233 * in the MMU only. If it is valid and unloaded, the correct PTEs appear
234 * in the pm_pte[VA_VSEG(va)] only. However, some effort is made to keep 234 * in the pm_pte[VA_VSEG(va)] only. However, some effort is made to keep
235 * the software copies consistent enough with the MMU so that libkvm can 235 * the software copies consistent enough with the MMU so that libkvm can
236 * do user address translations. In particular, pv_changepte() and 236 * do user address translations. In particular, pv_changepte() and
237 * pmap_enu() maintain consistency, while less critical changes are 237 * pmap_enu() maintain consistency, while less critical changes are
238 * not maintained. pm_pte[VA_VSEG(va)] always points to space for those 238 * not maintained. pm_pte[VA_VSEG(va)] always points to space for those
239 * PTEs. 239 * PTEs.
240 * 240 *
241 * Each PMEG in the MMU is either free or contains PTEs corresponding to 241 * Each PMEG in the MMU is either free or contains PTEs corresponding to
242 * some pmap and virtual segment. If it contains some PTEs, it also contains 242 * some pmap and virtual segment. If it contains some PTEs, it also contains
243 * reference and modify bits that belong in the pv_table. If we need 243 * reference and modify bits that belong in the pv_table. If we need
244 * to steal a PMEG from some process (if we need one and none are free) 244 * to steal a PMEG from some process (if we need one and none are free)
245 * we must copy the ref and mod bits, and update pm_segmap in the other 245 * we must copy the ref and mod bits, and update pm_segmap in the other
246 * pmap to show that its virtual segment is no longer in the MMU. 246 * pmap to show that its virtual segment is no longer in the MMU.
247 * 247 *
248 * There are 128 PMEGs in a small Sun-4, of which only a few dozen are 248 * There are 128 PMEGs in a small Sun-4, of which only a few dozen are
249 * tied down permanently, leaving `about' 100 to be spread among 249 * tied down permanently, leaving `about' 100 to be spread among
250 * running processes. These are managed as an LRU cache. Before 250 * running processes. These are managed as an LRU cache. Before
251 * calling the VM paging code for a user page fault, the fault handler 251 * calling the VM paging code for a user page fault, the fault handler
252 * calls mmu_load(pmap, va) to try to get a set of PTEs put into the 252 * calls mmu_load(pmap, va) to try to get a set of PTEs put into the
253 * MMU. mmu_load will check the validity of the segment and tell whether 253 * MMU. mmu_load will check the validity of the segment and tell whether
254 * it did something. 254 * it did something.
255 * 255 *
256 * Since I hate the name PMEG I call this data structure an `mmu entry'. 256 * Since I hate the name PMEG I call this data structure an `mmu entry'.
257 * Each mmuentry is on exactly one of three `usage' lists: free, LRU, 257 * Each mmuentry is on exactly one of three `usage' lists: free, LRU,
258 * or locked. The locked list is only used for kernel mappings that need 258 * or locked. The locked list is only used for kernel mappings that need
259 * to be wired down. 259 * to be wired down.
260 * 260 *
261 * 261 *
262 * In the sun4m architecture using the SPARC Reference MMU (SRMMU), three 262 * In the sun4m architecture using the SPARC Reference MMU (SRMMU), three
263 * levels of page tables are maintained in physical memory. We use the same 263 * levels of page tables are maintained in physical memory. We use the same
264 * structures as with the 3-level old-style MMU (pm_regmap, pm_segmap, 264 * structures as with the 3-level old-style MMU (pm_regmap, pm_segmap,
265 * rg_segmap, sg_pte, etc) to maintain kernel-edible page tables; we also 265 * rg_segmap, sg_pte, etc) to maintain kernel-edible page tables; we also
266 * build a parallel set of physical tables that can be used by the MMU. 266 * build a parallel set of physical tables that can be used by the MMU.
267 * (XXX: This seems redundant, but is it necessary for the unified kernel?) 267 * (XXX: This seems redundant, but is it necessary for the unified kernel?)
268 * 268 *
269 * If a virtual segment is valid, its entries will be in both parallel lists. 269 * If a virtual segment is valid, its entries will be in both parallel lists.
270 * If it is not valid, then its entry in the kernel tables will be zero, and 270 * If it is not valid, then its entry in the kernel tables will be zero, and
271 * its entry in the MMU tables will either be nonexistent or zero as well. 271 * its entry in the MMU tables will either be nonexistent or zero as well.
272 * 272 *
273 * The Reference MMU generally uses a Translation Look-aside Buffer (TLB) 273 * The Reference MMU generally uses a Translation Look-aside Buffer (TLB)
274 * to cache the result of recently executed page table walks. When 274 * to cache the result of recently executed page table walks. When
275 * manipulating page tables, we need to ensure consistency of the 275 * manipulating page tables, we need to ensure consistency of the
276 * in-memory and TLB copies of the page table entries. This is handled 276 * in-memory and TLB copies of the page table entries. This is handled
277 * by flushing (and invalidating) a TLB entry when appropriate before 277 * by flushing (and invalidating) a TLB entry when appropriate before
278 * altering an in-memory page table entry. 278 * altering an in-memory page table entry.
279 */ 279 */
280struct mmuentry { 280struct mmuentry {
281 CIRCLEQ_ENTRY(mmuentry) me_list; /* usage list link */ 281 CIRCLEQ_ENTRY(mmuentry) me_list; /* usage list link */
282 TAILQ_ENTRY(mmuentry) me_pmchain; /* pmap owner link */ 282 TAILQ_ENTRY(mmuentry) me_pmchain; /* pmap owner link */
283 struct pmap *me_pmap; /* pmap, if in use */ 283 struct pmap *me_pmap; /* pmap, if in use */
284 u_short me_vreg; /* associated virtual region/segment */ 284 u_short me_vreg; /* associated virtual region/segment */
285 u_short me_vseg; /* associated virtual region/segment */ 285 u_short me_vseg; /* associated virtual region/segment */
286 u_short me_cookie; /* hardware SMEG/PMEG number */ 286 u_short me_cookie; /* hardware SMEG/PMEG number */
287#ifdef DIAGNOSTIC 287#ifdef DIAGNOSTIC
288 int *me_statp;/*XXX*/ 288 int *me_statp;/*XXX*/
289#endif 289#endif
290}; 290};
291struct mmuentry *mmusegments; /* allocated in pmap_bootstrap */ 291struct mmuentry *mmusegments; /* allocated in pmap_bootstrap */
292struct mmuentry *mmuregions; /* allocated in pmap_bootstrap */ 292struct mmuentry *mmuregions; /* allocated in pmap_bootstrap */
293 293
294CIRCLEQ_HEAD(mmuq, mmuentry); 294CIRCLEQ_HEAD(mmuq, mmuentry);
295struct mmuq segm_freelist, segm_lru, segm_locked; 295struct mmuq segm_freelist, segm_lru, segm_locked;
296struct mmuq region_freelist, region_lru, region_locked; 296struct mmuq region_freelist, region_lru, region_locked;
297/* 297/*
298 * We use a circular queue, since that allows us to remove an element 298 * We use a circular queue, since that allows us to remove an element
299 * from a list without knowing the list header. 299 * from a list without knowing the list header.
300 */ 300 */
301#define CIRCLEQ_REMOVE_NOH(elm, field) do { \ 301#define CIRCLEQ_REMOVE_NOH(elm, field) do { \
302 (elm)->field.cqe_next->field.cqe_prev = (elm)->field.cqe_prev; \ 302 (elm)->field.cqe_next->field.cqe_prev = (elm)->field.cqe_prev; \
303 (elm)->field.cqe_prev->field.cqe_next = (elm)->field.cqe_next; \ 303 (elm)->field.cqe_prev->field.cqe_next = (elm)->field.cqe_next; \
304} while (/*CONSTCOND*/0) 304} while (/*CONSTCOND*/0)
305 305
306#define MMUQ_INIT(head) CIRCLEQ_INIT(head) 306#define MMUQ_INIT(head) CIRCLEQ_INIT(head)
307#define MMUQ_REMOVE(elm,field) CIRCLEQ_REMOVE_NOH(elm,field) 307#define MMUQ_REMOVE(elm,field) CIRCLEQ_REMOVE_NOH(elm,field)
308#define MMUQ_INSERT_TAIL(head,elm,field)CIRCLEQ_INSERT_TAIL(head,elm,field) 308#define MMUQ_INSERT_TAIL(head,elm,field)CIRCLEQ_INSERT_TAIL(head,elm,field)
309#define MMUQ_EMPTY(head) CIRCLEQ_EMPTY(head) 309#define MMUQ_EMPTY(head) CIRCLEQ_EMPTY(head)
310#define MMUQ_FIRST(head) CIRCLEQ_FIRST(head) 310#define MMUQ_FIRST(head) CIRCLEQ_FIRST(head)
311 311
312 312
313int seginval; /* [4/4c] the invalid segment number */ 313int seginval; /* [4/4c] the invalid segment number */
314int reginval; /* [4/3mmu] the invalid region number */ 314int reginval; /* [4/3mmu] the invalid region number */
315 315
316static kmutex_t demap_lock; 316static kmutex_t demap_lock;
317 317
318/* 318/*
319 * (sun4/4c) 319 * (sun4/4c)
320 * A context is simply a small number that dictates which set of 4096 320 * A context is simply a small number that dictates which set of 4096
321 * segment map entries the MMU uses. The Sun 4c has eight (SS1,IPC) or 321 * segment map entries the MMU uses. The Sun 4c has eight (SS1,IPC) or
322 * sixteen (SS2,IPX) such sets. These are alloted in an `almost MRU' fashion. 322 * sixteen (SS2,IPX) such sets. These are alloted in an `almost MRU' fashion.
323 * (sun4m) 323 * (sun4m)
324 * A context is simply a small number that indexes the context table, the 324 * A context is simply a small number that indexes the context table, the
325 * root-level page table mapping 4G areas. Each entry in this table points 325 * root-level page table mapping 4G areas. Each entry in this table points
326 * to a 1st-level region table. A SPARC reference MMU will usually use 16 326 * to a 1st-level region table. A SPARC reference MMU will usually use 16
327 * such contexts, but some offer as many as 64k contexts; the theoretical 327 * such contexts, but some offer as many as 64k contexts; the theoretical
328 * maximum is 2^32 - 1, but this would create overlarge context tables. 328 * maximum is 2^32 - 1, but this would create overlarge context tables.
329 * 329 *
330 * Each context is either free or attached to a pmap. 330 * Each context is either free or attached to a pmap.
331 * 331 *
332 * Since the virtual address cache is tagged by context, when we steal 332 * Since the virtual address cache is tagged by context, when we steal
333 * a context we have to flush (that part of) the cache. 333 * a context we have to flush (that part of) the cache.
334 */ 334 */
335union ctxinfo { 335union ctxinfo {
336 union ctxinfo *c_nextfree; /* free list (if free) */ 336 union ctxinfo *c_nextfree; /* free list (if free) */
337 struct pmap *c_pmap; /* pmap (if busy) */ 337 struct pmap *c_pmap; /* pmap (if busy) */
338}; 338};
339 339
340static kmutex_t ctx_lock; /* lock for below */ 340static kmutex_t ctx_lock; /* lock for below */
341union ctxinfo *ctxinfo; /* allocated at in pmap_bootstrap */ 341union ctxinfo *ctxinfo; /* allocated at in pmap_bootstrap */
342union ctxinfo *ctx_freelist; /* context free list */ 342union ctxinfo *ctx_freelist; /* context free list */
343int ctx_kick; /* allocation rover when none free */ 343int ctx_kick; /* allocation rover when none free */
344int ctx_kickdir; /* ctx_kick roves both directions */ 344int ctx_kickdir; /* ctx_kick roves both directions */
345int ncontext; /* sizeof ctx_freelist */ 345int ncontext; /* sizeof ctx_freelist */
346 346
347void ctx_alloc(struct pmap *); 347void ctx_alloc(struct pmap *);
348void ctx_free(struct pmap *); 348void ctx_free(struct pmap *);
349 349
350void * vmmap; /* one reserved MI vpage for /dev/mem */ 350void * vmmap; /* one reserved MI vpage for /dev/mem */
351/*void * vdumppages; -* 32KB worth of reserved dump pages */ 351/*void * vdumppages; -* 32KB worth of reserved dump pages */
352 352
353smeg_t tregion; /* [4/3mmu] Region for temporary mappings */ 353smeg_t tregion; /* [4/3mmu] Region for temporary mappings */
354 354
355static struct pmap kernel_pmap_store; /* the kernel's pmap */ 355static struct pmap kernel_pmap_store; /* the kernel's pmap */
356struct pmap *const kernel_pmap_ptr = &kernel_pmap_store; /* pmap_kernel() */ 356struct pmap *const kernel_pmap_ptr = &kernel_pmap_store; /* pmap_kernel() */
357struct regmap kernel_regmap_store[NKREG]; /* the kernel's regmap */ 357struct regmap kernel_regmap_store[NKREG]; /* the kernel's regmap */
358struct segmap kernel_segmap_store[NKREG*NSEGRG];/* the kernel's segmaps */ 358struct segmap kernel_segmap_store[NKREG*NSEGRG];/* the kernel's segmaps */
359 359
360#if defined(SUN4M) || defined(SUN4D) 360#if defined(SUN4M) || defined(SUN4D)
361u_int *kernel_regtable_store; /* 1k of storage to map the kernel */ 361u_int *kernel_regtable_store; /* 1k of storage to map the kernel */
362u_int *kernel_segtable_store; /* 2k of storage to map the kernel */ 362u_int *kernel_segtable_store; /* 2k of storage to map the kernel */
363u_int *kernel_pagtable_store; /* 128k of storage to map the kernel */ 363u_int *kernel_pagtable_store; /* 128k of storage to map the kernel */
364 364
365/* 365/*
366 * Memory pools and back-end supplier for SRMMU page tables. 366 * Memory pools and back-end supplier for SRMMU page tables.
367 * Share a pool between the level 2 and level 3 page tables, 367 * Share a pool between the level 2 and level 3 page tables,
368 * since these are equal in size. 368 * since these are equal in size.
369 */ 369 */
370static struct pool L1_pool; 370static struct pool L1_pool;
371static struct pool L23_pool; 371static struct pool L23_pool;
372 372
373static void *pgt_page_alloc(struct pool *, int); 373static void *pgt_page_alloc(struct pool *, int);
374static void pgt_page_free(struct pool *, void *); 374static void pgt_page_free(struct pool *, void *);
375 375
376static struct pool_allocator pgt_page_allocator = { 376static struct pool_allocator pgt_page_allocator = {
377 pgt_page_alloc, pgt_page_free, 0, 377 pgt_page_alloc, pgt_page_free, 0,
378}; 378};
379 379
380#endif /* SUN4M || SUN4D */ 380#endif /* SUN4M || SUN4D */
381 381
382#if defined(SUN4) || defined(SUN4C) 382#if defined(SUN4) || defined(SUN4C)
383/* 383/*
384 * Memory pool for user and kernel PTE tables. 384 * Memory pool for user and kernel PTE tables.
385 */ 385 */
386static struct pool pte_pool; 386static struct pool pte_pool;
387#endif 387#endif
388 388
389struct memarr *pmemarr; /* physical memory regions */ 389struct memarr *pmemarr; /* physical memory regions */
390int npmemarr; /* number of entries in pmemarr */ 390int npmemarr; /* number of entries in pmemarr */
391 391
392static paddr_t avail_start; /* first available physical page, other 392static paddr_t avail_start; /* first available physical page, other
393 than the `etext gap' defined below */ 393 than the `etext gap' defined below */
394static vaddr_t etext_gap_start;/* start of gap between text & data */ 394static vaddr_t etext_gap_start;/* start of gap between text & data */
395static vaddr_t etext_gap_end; /* end of gap between text & data */ 395static vaddr_t etext_gap_end; /* end of gap between text & data */
396static vaddr_t virtual_avail; /* first free kernel virtual address */ 396static vaddr_t virtual_avail; /* first free kernel virtual address */
397static vaddr_t virtual_end; /* last free kernel virtual address */ 397static vaddr_t virtual_end; /* last free kernel virtual address */
398 398
399static void pmap_page_upload(void); 399static void pmap_page_upload(void);
400 400
401int mmu_has_hole; 401int mmu_has_hole;
402 402
403vaddr_t prom_vstart; /* For /dev/kmem */ 403vaddr_t prom_vstart; /* For /dev/kmem */
404vaddr_t prom_vend; 404vaddr_t prom_vend;
405 405
406/* 406/*
407 * Memory pool for pmap structures. 407 * Memory pool for pmap structures.
408 */ 408 */
409static struct pool_cache pmap_cache; 409static struct pool_cache pmap_cache;
410static int pmap_pmap_pool_ctor(void *, void *, int); 410static int pmap_pmap_pool_ctor(void *, void *, int);
411static void pmap_pmap_pool_dtor(void *, void *); 411static void pmap_pmap_pool_dtor(void *, void *);
412static struct pool segmap_pool; 412static struct pool segmap_pool;
413 413
414#if defined(SUN4) 414#if defined(SUN4)
415/* 415/*
416 * [sun4]: segfixmask: on some systems (4/110) "getsegmap()" returns a 416 * [sun4]: segfixmask: on some systems (4/110) "getsegmap()" returns a
417 * partly invalid value. getsegmap returns a 16 bit value on the sun4, 417 * partly invalid value. getsegmap returns a 16 bit value on the sun4,
418 * but only the first 8 or so bits are valid (the rest are *supposed* to 418 * but only the first 8 or so bits are valid (the rest are *supposed* to
419 * be zero. On the 4/110 the bits that are supposed to be zero are 419 * be zero. On the 4/110 the bits that are supposed to be zero are
420 * all one instead. e.g. KERNBASE is usually mapped by pmeg number zero. 420 * all one instead. e.g. KERNBASE is usually mapped by pmeg number zero.
421 * On a 4/300 getsegmap(KERNBASE) == 0x0000, but 421 * On a 4/300 getsegmap(KERNBASE) == 0x0000, but
422 * on a 4/100 getsegmap(KERNBASE) == 0xff00 422 * on a 4/100 getsegmap(KERNBASE) == 0xff00
423 * 423 *
424 * This confuses mmu_reservemon() and causes it to not reserve the PROM's 424 * This confuses mmu_reservemon() and causes it to not reserve the PROM's
425 * pmegs. Then the PROM's pmegs get used during autoconfig and everything 425 * pmegs. Then the PROM's pmegs get used during autoconfig and everything
426 * falls apart! (not very fun to debug, BTW.) 426 * falls apart! (not very fun to debug, BTW.)
427 * 427 *
428 * solution: mask the invalid bits in the getsetmap macro. 428 * solution: mask the invalid bits in the getsetmap macro.
429 */ 429 */
430 430
431static u_int segfixmask = 0xffffffff; /* all bits valid to start */ 431static u_int segfixmask = 0xffffffff; /* all bits valid to start */
432#else 432#else
433#define segfixmask 0xffffffff /* It's in getsegmap's scope */ 433#define segfixmask 0xffffffff /* It's in getsegmap's scope */
434#endif 434#endif
435 435
436/* 436/*
437 * pseudo-functions for mnemonic value 437 * pseudo-functions for mnemonic value
438 */ 438 */
439#define getsegmap(va) (CPU_ISSUN4C \ 439#define getsegmap(va) (CPU_ISSUN4C \
440 ? lduba(va, ASI_SEGMAP) \ 440 ? lduba(va, ASI_SEGMAP) \
441 : (lduha(va, ASI_SEGMAP) & segfixmask)) 441 : (lduha(va, ASI_SEGMAP) & segfixmask))
442#define setsegmap(va, pmeg) (CPU_ISSUN4C \ 442#define setsegmap(va, pmeg) (CPU_ISSUN4C \
443 ? stba(va, ASI_SEGMAP, pmeg) \ 443 ? stba(va, ASI_SEGMAP, pmeg) \
444 : stha(va, ASI_SEGMAP, pmeg)) 444 : stha(va, ASI_SEGMAP, pmeg))
445 445
446/* 3-level sun4 MMU only: */ 446/* 3-level sun4 MMU only: */
447#define getregmap(va) ((unsigned)lduha((va)+2, ASI_REGMAP) >> 8) 447#define getregmap(va) ((unsigned)lduha((va)+2, ASI_REGMAP) >> 8)
448#define setregmap(va, smeg) stha((va)+2, ASI_REGMAP, (smeg << 8)) 448#define setregmap(va, smeg) stha((va)+2, ASI_REGMAP, (smeg << 8))
449 449
450 450
451#if defined(SUN4M) || defined(SUN4D) 451#if defined(SUN4M) || defined(SUN4D)
452#if 0 452#if 0
453#if VM_PROT_READ != 1 || VM_PROT_WRITE != 2 || VM_PROT_EXECUTE != 4 453#if VM_PROT_READ != 1 || VM_PROT_WRITE != 2 || VM_PROT_EXECUTE != 4
454#error fix protection code translation table 454#error fix protection code translation table
455#endif 455#endif
456#endif 456#endif
457/* 457/*
458 * Translation table for kernel vs. PTE protection bits. 458 * Translation table for kernel vs. PTE protection bits.
459 */ 459 */
460const u_int protection_codes[2][8] = { 460const u_int protection_codes[2][8] = {
461 /* kernel */ 461 /* kernel */
462 { 462 {
463 PPROT_N_RX, /* VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE */ 463 PPROT_N_RX, /* VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE */
464 PPROT_N_RX, /* VM_PROT_NONE | VM_PROT_NONE | VM_PROT_READ */ 464 PPROT_N_RX, /* VM_PROT_NONE | VM_PROT_NONE | VM_PROT_READ */
465 PPROT_N_RWX, /* VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE */ 465 PPROT_N_RWX, /* VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE */
466 PPROT_N_RWX, /* VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_READ */ 466 PPROT_N_RWX, /* VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_READ */
467 PPROT_N_RX, /* VM_PROT_EXECUTE | VM_PROT_NONE | VM_PROT_NONE */ 467 PPROT_N_RX, /* VM_PROT_EXECUTE | VM_PROT_NONE | VM_PROT_NONE */
468 PPROT_N_RX, /* VM_PROT_EXECUTE | VM_PROT_NONE | VM_PROT_READ */ 468 PPROT_N_RX, /* VM_PROT_EXECUTE | VM_PROT_NONE | VM_PROT_READ */
469 PPROT_N_RWX, /* VM_PROT_EXECUTE | VM_PROT_WRITE | VM_PROT_NONE */ 469 PPROT_N_RWX, /* VM_PROT_EXECUTE | VM_PROT_WRITE | VM_PROT_NONE */
470 PPROT_N_RWX, /* VM_PROT_EXECUTE | VM_PROT_WRITE | VM_PROT_READ */ 470 PPROT_N_RWX, /* VM_PROT_EXECUTE | VM_PROT_WRITE | VM_PROT_READ */
471 }, 471 },
472 472
473 /* user */ 473 /* user */
474 { 474 {
475 PPROT_N_RX, /* VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE */ 475 PPROT_N_RX, /* VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE */
476 PPROT_R_R, /* VM_PROT_NONE | VM_PROT_NONE | VM_PROT_READ */ 476 PPROT_R_R, /* VM_PROT_NONE | VM_PROT_NONE | VM_PROT_READ */
477 PPROT_RW_RW, /* VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE */ 477 PPROT_RW_RW, /* VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE */
478 PPROT_RW_RW, /* VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_READ */ 478 PPROT_RW_RW, /* VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_READ */
479 PPROT_X_X, /* VM_PROT_EXECUTE | VM_PROT_NONE | VM_PROT_NONE */ 479 PPROT_X_X, /* VM_PROT_EXECUTE | VM_PROT_NONE | VM_PROT_NONE */
480 PPROT_RX_RX, /* VM_PROT_EXECUTE | VM_PROT_NONE | VM_PROT_READ */ 480 PPROT_RX_RX, /* VM_PROT_EXECUTE | VM_PROT_NONE | VM_PROT_READ */
481 PPROT_RWX_RWX, /* VM_PROT_EXECUTE | VM_PROT_WRITE | VM_PROT_NONE */ 481 PPROT_RWX_RWX, /* VM_PROT_EXECUTE | VM_PROT_WRITE | VM_PROT_NONE */
482 PPROT_RWX_RWX, /* VM_PROT_EXECUTE | VM_PROT_WRITE | VM_PROT_READ */ 482 PPROT_RWX_RWX, /* VM_PROT_EXECUTE | VM_PROT_WRITE | VM_PROT_READ */
483 } 483 }
484}; 484};
485#define pte_kprot4m(prot) (protection_codes[0][(prot)]) 485#define pte_kprot4m(prot) (protection_codes[0][(prot)])
486#define pte_uprot4m(prot) (protection_codes[1][(prot)]) 486#define pte_uprot4m(prot) (protection_codes[1][(prot)])
487#define pte_prot4m(pm, prot) \ 487#define pte_prot4m(pm, prot) \
488 (protection_codes[(pm) == pmap_kernel() ? 0 : 1][(prot)]) 488 (protection_codes[(pm) == pmap_kernel() ? 0 : 1][(prot)])
489 489
490void setpte4m(vaddr_t va, int pte); 490void setpte4m(vaddr_t va, int pte);
491void setpgt4m(int *ptep, int pte); 491void setpgt4m(int *ptep, int pte);
492void setpgt4m_va(vaddr_t, int *, int, int, int, u_int); 492void setpgt4m_va(vaddr_t, int *, int, int, int, u_int);
493int updatepte4m(vaddr_t, int *, int, int, int, u_int); 493int updatepte4m(vaddr_t, int *, int, int, int, u_int);
494#endif /* SUN4M || SUN4D */ 494#endif /* SUN4M || SUN4D */
495 495
496#if defined(MULTIPROCESSOR) 496#if defined(MULTIPROCESSOR)
497#define PMAP_SET_CPUSET(pmap, cpi) \ 497#define PMAP_SET_CPUSET(pmap, cpi) \
498 (pmap->pm_cpuset |= (1 << (cpi)->ci_cpuid)) 498 (pmap->pm_cpuset |= (1 << (cpi)->ci_cpuid))
499#define PMAP_CLR_CPUSET(pmap, cpi) \ 499#define PMAP_CLR_CPUSET(pmap, cpi) \
500 (pmap->pm_cpuset &= ~(1 << (cpi)->ci_cpuid)) 500 (pmap->pm_cpuset &= ~(1 << (cpi)->ci_cpuid))
501#define PMAP_CPUSET(pmap) (pmap->pm_cpuset) 501#define PMAP_CPUSET(pmap) (pmap->pm_cpuset)
502#else 502#else
503#define PMAP_SET_CPUSET(pmap, cpi) /* nothing */ 503#define PMAP_SET_CPUSET(pmap, cpi) /* nothing */
504#define PMAP_CLR_CPUSET(pmap, cpi) /* nothing */ 504#define PMAP_CLR_CPUSET(pmap, cpi) /* nothing */
505#define PMAP_CPUSET(pmap) 1 /* XXX: 1 or 0? */ 505#define PMAP_CPUSET(pmap) 1 /* XXX: 1 or 0? */
506#endif /* MULTIPROCESSOR */ 506#endif /* MULTIPROCESSOR */
507 507
508 508
509/* Function pointer messiness for supporting multiple sparc architectures 509/* Function pointer messiness for supporting multiple sparc architectures
510 * within a single kernel: notice that there are two versions of many of the 510 * within a single kernel: notice that there are two versions of many of the
511 * functions within this file/module, one for the sun4/sun4c and the other 511 * functions within this file/module, one for the sun4/sun4c and the other
512 * for the sun4m. For performance reasons (since things like pte bits don't 512 * for the sun4m. For performance reasons (since things like pte bits don't
513 * map nicely between the two architectures), there are separate functions 513 * map nicely between the two architectures), there are separate functions
514 * rather than unified functions which test the cputyp variable. If only 514 * rather than unified functions which test the cputyp variable. If only
515 * one architecture is being used, then the non-suffixed function calls 515 * one architecture is being used, then the non-suffixed function calls
516 * are macro-translated into the appropriate xxx4_4c or xxx4m call. If 516 * are macro-translated into the appropriate xxx4_4c or xxx4m call. If
517 * multiple architectures are defined, the calls translate to (*xxx_p), 517 * multiple architectures are defined, the calls translate to (*xxx_p),
518 * i.e. they indirect through function pointers initialized as appropriate 518 * i.e. they indirect through function pointers initialized as appropriate
519 * to the run-time architecture in pmap_bootstrap. See also pmap.h. 519 * to the run-time architecture in pmap_bootstrap. See also pmap.h.
520 */ 520 */
521 521
522#if defined(SUN4M) || defined(SUN4D) 522#if defined(SUN4M) || defined(SUN4D)
523static void mmu_setup4m_L1(int, struct pmap *); 523static void mmu_setup4m_L1(int, struct pmap *);
524static void mmu_setup4m_L2(int, struct regmap *); 524static void mmu_setup4m_L2(int, struct regmap *);
525static void mmu_setup4m_L3(int, struct segmap *); 525static void mmu_setup4m_L3(int, struct segmap *);
526/*static*/ void mmu_reservemon4m(struct pmap *); 526/*static*/ void mmu_reservemon4m(struct pmap *);
527 527
528/*static*/ void pmap_changeprot4m(pmap_t, vaddr_t, vm_prot_t, int); 528/*static*/ void pmap_changeprot4m(pmap_t, vaddr_t, vm_prot_t, int);
529/*static*/ void pmap_rmk4m(struct pmap *, vaddr_t, vaddr_t, int, int); 529/*static*/ void pmap_rmk4m(struct pmap *, vaddr_t, vaddr_t, int, int);
530/*static*/ void pmap_rmu4m(struct pmap *, vaddr_t, vaddr_t, int, int); 530/*static*/ void pmap_rmu4m(struct pmap *, vaddr_t, vaddr_t, int, int);
531/*static*/ int pmap_enk4m(struct pmap *, vaddr_t, vm_prot_t, 531/*static*/ int pmap_enk4m(struct pmap *, vaddr_t, vm_prot_t,
532 int, struct vm_page *, int); 532 int, struct vm_page *, int);
533/*static*/ int pmap_enu4m(struct pmap *, vaddr_t, vm_prot_t, 533/*static*/ int pmap_enu4m(struct pmap *, vaddr_t, vm_prot_t,
534 int, struct vm_page *, int); 534 int, struct vm_page *, int);
535/*static*/ void pv_changepte4m(struct vm_page *, int, int); 535/*static*/ void pv_changepte4m(struct vm_page *, int, int);
536/*static*/ int pv_syncflags4m(struct vm_page *); 536/*static*/ int pv_syncflags4m(struct vm_page *);
537/*static*/ int pv_link4m(struct vm_page *, struct pmap *, vaddr_t, u_int *); 537/*static*/ int pv_link4m(struct vm_page *, struct pmap *, vaddr_t, u_int *);
538/*static*/ void pv_unlink4m(struct vm_page *, struct pmap *, vaddr_t); 538/*static*/ void pv_unlink4m(struct vm_page *, struct pmap *, vaddr_t);
539#endif 539#endif
540 540
541#if defined(SUN4) || defined(SUN4C) 541#if defined(SUN4) || defined(SUN4C)
542/*static*/ void mmu_reservemon4_4c(int *, int *); 542/*static*/ void mmu_reservemon4_4c(int *, int *);
543/*static*/ void pmap_changeprot4_4c(pmap_t, vaddr_t, vm_prot_t, int); 543/*static*/ void pmap_changeprot4_4c(pmap_t, vaddr_t, vm_prot_t, int);
544/*static*/ void pmap_rmk4_4c(struct pmap *, vaddr_t, vaddr_t, int, int); 544/*static*/ void pmap_rmk4_4c(struct pmap *, vaddr_t, vaddr_t, int, int);
545/*static*/ void pmap_rmu4_4c(struct pmap *, vaddr_t, vaddr_t, int, int); 545/*static*/ void pmap_rmu4_4c(struct pmap *, vaddr_t, vaddr_t, int, int);
546/*static*/ int pmap_enk4_4c(struct pmap *, vaddr_t, vm_prot_t, 546/*static*/ int pmap_enk4_4c(struct pmap *, vaddr_t, vm_prot_t,
547 int, struct vm_page *, int); 547 int, struct vm_page *, int);
548/*static*/ int pmap_enu4_4c(struct pmap *, vaddr_t, vm_prot_t, 548/*static*/ int pmap_enu4_4c(struct pmap *, vaddr_t, vm_prot_t,
549 int, struct vm_page *, int); 549 int, struct vm_page *, int);
550/*static*/ void pv_changepte4_4c(struct vm_page *, int, int); 550/*static*/ void pv_changepte4_4c(struct vm_page *, int, int);
551/*static*/ int pv_syncflags4_4c(struct vm_page *); 551/*static*/ int pv_syncflags4_4c(struct vm_page *);
552/*static*/ int pv_link4_4c(struct vm_page *, struct pmap *, vaddr_t, u_int *); 552/*static*/ int pv_link4_4c(struct vm_page *, struct pmap *, vaddr_t, u_int *);
553/*static*/ void pv_unlink4_4c(struct vm_page *, struct pmap *, vaddr_t); 553/*static*/ void pv_unlink4_4c(struct vm_page *, struct pmap *, vaddr_t);
554#endif 554#endif
555 555
556#if !(defined(SUN4M) || defined(SUN4D)) && (defined(SUN4) || defined(SUN4C)) 556#if !(defined(SUN4M) || defined(SUN4D)) && (defined(SUN4) || defined(SUN4C))
557#define pmap_rmk pmap_rmk4_4c 557#define pmap_rmk pmap_rmk4_4c
558#define pmap_rmu pmap_rmu4_4c 558#define pmap_rmu pmap_rmu4_4c
559 559
560#elif (defined(SUN4M) || defined(SUN4D)) && !(defined(SUN4) || defined(SUN4C)) 560#elif (defined(SUN4M) || defined(SUN4D)) && !(defined(SUN4) || defined(SUN4C))
561#define pmap_rmk pmap_rmk4m 561#define pmap_rmk pmap_rmk4m
562#define pmap_rmu pmap_rmu4m 562#define pmap_rmu pmap_rmu4m
563 563
564#else /* must use function pointers */ 564#else /* must use function pointers */
565 565
566/* function pointer declarations */ 566/* function pointer declarations */
567/* from pmap.h: */ 567/* from pmap.h: */
568bool (*pmap_clear_modify_p)(struct vm_page *); 568bool (*pmap_clear_modify_p)(struct vm_page *);
569bool (*pmap_clear_reference_p)(struct vm_page *); 569bool (*pmap_clear_reference_p)(struct vm_page *);
570int (*pmap_enter_p)(pmap_t, vaddr_t, paddr_t, vm_prot_t, int); 570int (*pmap_enter_p)(pmap_t, vaddr_t, paddr_t, vm_prot_t, int);
571bool (*pmap_extract_p)(pmap_t, vaddr_t, paddr_t *); 571bool (*pmap_extract_p)(pmap_t, vaddr_t, paddr_t *);
572bool (*pmap_is_modified_p)(struct vm_page *); 572bool (*pmap_is_modified_p)(struct vm_page *);
573bool (*pmap_is_referenced_p)(struct vm_page *); 573bool (*pmap_is_referenced_p)(struct vm_page *);
574void (*pmap_kenter_pa_p)(vaddr_t, paddr_t, vm_prot_t); 574void (*pmap_kenter_pa_p)(vaddr_t, paddr_t, vm_prot_t);
575void (*pmap_kremove_p)(vaddr_t, vsize_t); 575void (*pmap_kremove_p)(vaddr_t, vsize_t);
576void (*pmap_kprotect_p)(vaddr_t, vsize_t, vm_prot_t); 576void (*pmap_kprotect_p)(vaddr_t, vsize_t, vm_prot_t);
577void (*pmap_page_protect_p)(struct vm_page *, vm_prot_t); 577void (*pmap_page_protect_p)(struct vm_page *, vm_prot_t);
578void (*pmap_protect_p)(pmap_t, vaddr_t, vaddr_t, vm_prot_t); 578void (*pmap_protect_p)(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
579/* local: */ 579/* local: */
580void (*pmap_rmk_p)(struct pmap *, vaddr_t, vaddr_t, int, int); 580void (*pmap_rmk_p)(struct pmap *, vaddr_t, vaddr_t, int, int);
581void (*pmap_rmu_p)(struct pmap *, vaddr_t, vaddr_t, int, int); 581void (*pmap_rmu_p)(struct pmap *, vaddr_t, vaddr_t, int, int);
582 582
583#define pmap_rmk (*pmap_rmk_p) 583#define pmap_rmk (*pmap_rmk_p)
584#define pmap_rmu (*pmap_rmu_p) 584#define pmap_rmu (*pmap_rmu_p)
585 585
586#endif 586#endif
587 587
588/* --------------------------------------------------------------*/ 588/* --------------------------------------------------------------*/
589 589
590/* 590/*
591 * Next we have some sun4m/4d-specific routines which have no 4/4c 591 * Next we have some sun4m/4d-specific routines which have no 4/4c
592 * counterparts, or which are 4/4c macros. 592 * counterparts, or which are 4/4c macros.
593 */ 593 */
594 594
595#if defined(SUN4M) || defined(SUN4D) 595#if defined(SUN4M) || defined(SUN4D)
596/* 596/*
597 * SP versions of the tlb flush operations. 597 * SP versions of the tlb flush operations.
598 * 598 *
599 * Turn off traps to prevent register window overflows 599 * Turn off traps to prevent register window overflows
600 * from writing user windows to the wrong stack. 600 * from writing user windows to the wrong stack.
601 */ 601 */
602static void 602static void
603sp_tlb_flush(int va, int ctx, int lvl) 603sp_tlb_flush(int va, int ctx, int lvl)
604{ 604{
605 605
606 /* Traps off */ 606 /* Traps off */
607 __asm("rd %psr, %o3"); 607 __asm("rd %psr, %o3");
608 __asm("wr %%o3, %0, %%psr" :: "n" (PSR_ET)); 608 __asm("wr %%o3, %0, %%psr" :: "n" (PSR_ET));
609 609
610 /* Save context */ 610 /* Save context */
611 __asm("mov %0, %%o4" :: "n"(SRMMU_CXR)); 611 __asm("mov %0, %%o4" :: "n"(SRMMU_CXR));
612 __asm("lda [%%o4]%0, %%o5" :: "n"(ASI_SRMMU)); 612 __asm("lda [%%o4]%0, %%o5" :: "n"(ASI_SRMMU));
613 613
614 /* Set new context and flush type bits */ 614 /* Set new context and flush type bits */
615 __asm("andn %o0, 0xfff, %o0"); 615 __asm("andn %o0, 0xfff, %o0");
616 __asm("sta %%o1, [%%o4]%0" :: "n"(ASI_SRMMU)); 616 __asm("sta %%o1, [%%o4]%0" :: "n"(ASI_SRMMU));
617 __asm("or %o0, %o2, %o0"); 617 __asm("or %o0, %o2, %o0");
618 618
619 /* Do the TLB flush */ 619 /* Do the TLB flush */
620 __asm("sta %%g0, [%%o0]%0" :: "n"(ASI_SRMMUFP)); 620 __asm("sta %%g0, [%%o0]%0" :: "n"(ASI_SRMMUFP));
621 621
622 /* Restore context */ 622 /* Restore context */
623 __asm("sta %%o5, [%%o4]%0" :: "n"(ASI_SRMMU)); 623 __asm("sta %%o5, [%%o4]%0" :: "n"(ASI_SRMMU));
624 624
625 /* and turn traps on again */ 625 /* and turn traps on again */
626 __asm("wr %o3, 0, %psr"); 626 __asm("wr %o3, 0, %psr");
627 __asm("nop"); 627 __asm("nop");
628 __asm("nop"); 628 __asm("nop");
629 __asm("nop"); 629 __asm("nop");
630} 630}
631 631
632static inline void 632static inline void
633sp_tlb_flush_all(void) 633sp_tlb_flush_all(void)
634{ 634{
635 635
636 sta(ASI_SRMMUFP_LN, ASI_SRMMUFP, 0); 636 sta(ASI_SRMMUFP_LN, ASI_SRMMUFP, 0);
637} 637}
638 638
639#if defined(MULTIPROCESSOR) 639#if defined(MULTIPROCESSOR)
640/* 640/*
641 * The SMP versions of the tlb flush routines. We only need to 641 * The SMP versions of the tlb flush routines. We only need to
642 * do a cross call for these on sun4m (Mbus) systems. sun4d systems 642 * do a cross call for these on sun4m (Mbus) systems. sun4d systems
643 * have an Xbus which broadcasts TLB demaps in hardware. 643 * have an Xbus which broadcasts TLB demaps in hardware.
644 */ 644 */
645 645
646static inline void smp_tlb_flush_page (int va, int ctx, u_int cpuset); 646static inline void smp_tlb_flush_page (int va, int ctx, u_int cpuset);
647static inline void smp_tlb_flush_segment (int va, int ctx, u_int cpuset); 647static inline void smp_tlb_flush_segment (int va, int ctx, u_int cpuset);
648static inline void smp_tlb_flush_region (int va, int ctx, u_int cpuset); 648static inline void smp_tlb_flush_region (int va, int ctx, u_int cpuset);
649static inline void smp_tlb_flush_context (int ctx, u_int cpuset); 649static inline void smp_tlb_flush_context (int ctx, u_int cpuset);
650static inline void smp_tlb_flush_all (void); 650static inline void smp_tlb_flush_all (void);
651 651
652/* From locore: */ 652/* From locore: */
653extern void ft_tlb_flush(int va, int ctx, int lvl); 653extern void ft_tlb_flush(int va, int ctx, int lvl);
654 654
655static inline void 655static inline void
656smp_tlb_flush_page(int va, int ctx, u_int cpuset) 656smp_tlb_flush_page(int va, int ctx, u_int cpuset)
657{ 657{
658 658
659 if (CPU_ISSUN4D) { 659 if (CPU_ISSUN4D) {
660 sp_tlb_flush(va, ctx, ASI_SRMMUFP_L3); 660 sp_tlb_flush(va, ctx, ASI_SRMMUFP_L3);
661 } else 661 } else
662 FXCALL3(sp_tlb_flush, ft_tlb_flush, va, ctx, ASI_SRMMUFP_L3, cpuset); 662 FXCALL3(sp_tlb_flush, ft_tlb_flush, va, ctx, ASI_SRMMUFP_L3, cpuset);
663} 663}
664 664
665static inline void 665static inline void
666smp_tlb_flush_segment(int va, int ctx, u_int cpuset) 666smp_tlb_flush_segment(int va, int ctx, u_int cpuset)
667{ 667{
668 668
669 if (CPU_ISSUN4D) { 669 if (CPU_ISSUN4D) {
670 sp_tlb_flush(va, ctx, ASI_SRMMUFP_L2); 670 sp_tlb_flush(va, ctx, ASI_SRMMUFP_L2);
671 } else 671 } else
672 FXCALL3(sp_tlb_flush, ft_tlb_flush, va, ctx, ASI_SRMMUFP_L2, cpuset); 672 FXCALL3(sp_tlb_flush, ft_tlb_flush, va, ctx, ASI_SRMMUFP_L2, cpuset);
673} 673}
674 674
675static inline void 675static inline void
676smp_tlb_flush_region(int va, int ctx, u_int cpuset) 676smp_tlb_flush_region(int va, int ctx, u_int cpuset)
677{ 677{
678 678
679 if (CPU_ISSUN4D) { 679 if (CPU_ISSUN4D) {
680 sp_tlb_flush(va, ctx, ASI_SRMMUFP_L1); 680 sp_tlb_flush(va, ctx, ASI_SRMMUFP_L1);
681 } else 681 } else
682 FXCALL3(sp_tlb_flush, ft_tlb_flush, va, ctx, ASI_SRMMUFP_L1, cpuset); 682 FXCALL3(sp_tlb_flush, ft_tlb_flush, va, ctx, ASI_SRMMUFP_L1, cpuset);
683} 683}
684 684
685static inline void 685static inline void
686smp_tlb_flush_context(int ctx, u_int cpuset) 686smp_tlb_flush_context(int ctx, u_int cpuset)
687{ 687{
688 688
689 if (CPU_ISSUN4D) { 689 if (CPU_ISSUN4D) {
690 sp_tlb_flush(ctx, 0, ASI_SRMMUFP_L0); 690 sp_tlb_flush(ctx, 0, ASI_SRMMUFP_L0);
691 } else 691 } else
692 FXCALL3(sp_tlb_flush, ft_tlb_flush, 0, ctx, ASI_SRMMUFP_L0, cpuset); 692 FXCALL3(sp_tlb_flush, ft_tlb_flush, 0, ctx, ASI_SRMMUFP_L0, cpuset);
693} 693}
694 694
695static inline void 695static inline void
696smp_tlb_flush_all(void) 696smp_tlb_flush_all(void)
697{ 697{
698 698
699 if (CPU_ISSUN4D) { 699 if (CPU_ISSUN4D) {
700 sp_tlb_flush_all(); 700 sp_tlb_flush_all();
701 } else 701 } else
702 XCALL0(sp_tlb_flush_all, CPUSET_ALL); 702 XCALL0(sp_tlb_flush_all, CPUSET_ALL);
703} 703}
704#endif /* MULTIPROCESSOR */ 704#endif /* MULTIPROCESSOR */
705 705
706#if defined(MULTIPROCESSOR) 706#if defined(MULTIPROCESSOR)
707#define tlb_flush_page(va,ctx,s) smp_tlb_flush_page(va,ctx,s) 707#define tlb_flush_page(va,ctx,s) smp_tlb_flush_page(va,ctx,s)
708#define tlb_flush_segment(va,ctx,s) smp_tlb_flush_segment(va,ctx,s) 708#define tlb_flush_segment(va,ctx,s) smp_tlb_flush_segment(va,ctx,s)
709#define tlb_flush_region(va,ctx,s) smp_tlb_flush_region(va,ctx,s) 709#define tlb_flush_region(va,ctx,s) smp_tlb_flush_region(va,ctx,s)
710#define tlb_flush_context(ctx,s) smp_tlb_flush_context(ctx,s) 710#define tlb_flush_context(ctx,s) smp_tlb_flush_context(ctx,s)
711#define tlb_flush_all() smp_tlb_flush_all() 711#define tlb_flush_all() smp_tlb_flush_all()
712#else 712#else
713#define tlb_flush_page(va,ctx,s) sp_tlb_flush(va,ctx,ASI_SRMMUFP_L3) 713#define tlb_flush_page(va,ctx,s) sp_tlb_flush(va,ctx,ASI_SRMMUFP_L3)
714#define tlb_flush_segment(va,ctx,s) sp_tlb_flush(va,ctx,ASI_SRMMUFP_L2) 714#define tlb_flush_segment(va,ctx,s) sp_tlb_flush(va,ctx,ASI_SRMMUFP_L2)
715#define tlb_flush_region(va,ctx,s) sp_tlb_flush(va,ctx,ASI_SRMMUFP_L1) 715#define tlb_flush_region(va,ctx,s) sp_tlb_flush(va,ctx,ASI_SRMMUFP_L1)
716#define tlb_flush_context(ctx,s) sp_tlb_flush(ctx,0,ASI_SRMMUFP_L0) 716#define tlb_flush_context(ctx,s) sp_tlb_flush(ctx,0,ASI_SRMMUFP_L0)
717#define tlb_flush_all() sp_tlb_flush_all() 717#define tlb_flush_all() sp_tlb_flush_all()
718#endif /* MULTIPROCESSOR */ 718#endif /* MULTIPROCESSOR */
719 719
720static u_int VA2PA(void *); 720static u_int VA2PA(void *);
721static u_long srmmu_bypass_read(u_long); 721static u_long srmmu_bypass_read(u_long);
722 722
723/* 723/*
724 * VA2PA(addr) -- converts a virtual address to a physical address using 724 * VA2PA(addr) -- converts a virtual address to a physical address using
725 * the MMU's currently-installed page tables. As a side effect, the address 725 * the MMU's currently-installed page tables. As a side effect, the address
726 * translation used may cause the associated pte to be encached. The correct 726 * translation used may cause the associated pte to be encached. The correct
727 * context for VA must be set before this is called. 727 * context for VA must be set before this is called.
728 * 728 *
729 * This routine should work with any level of mapping, as it is used 729 * This routine should work with any level of mapping, as it is used
730 * during bootup to interact with the ROM's initial L1 mapping of the kernel. 730 * during bootup to interact with the ROM's initial L1 mapping of the kernel.
731 */ 731 */
732static u_int 732static u_int
733VA2PA(void *addr) 733VA2PA(void *addr)
734{ 734{
735 u_int pte; 735 u_int pte;
736 736
737 /* 737 /*
738 * We'll use that handy SRMMU flush/probe. 738 * We'll use that handy SRMMU flush/probe.
739 * Try each level in turn until we find a valid pte. Otherwise panic. 739 * Try each level in turn until we find a valid pte. Otherwise panic.
740 */ 740 */
741 741
742 pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L3, ASI_SRMMUFP); 742 pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L3, ASI_SRMMUFP);
743 /* Unlock fault status; required on Hypersparc modules */ 743 /* Unlock fault status; required on Hypersparc modules */
744 (void)lda(SRMMU_SFSR, ASI_SRMMU); 744 (void)lda(SRMMU_SFSR, ASI_SRMMU);
745 if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE) 745 if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
746 return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) | 746 return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
747 ((u_int)addr & 0xfff)); 747 ((u_int)addr & 0xfff));
748 748
749 /* A `TLB Flush Entire' is required before any L0, L1 or L2 probe */ 749 /* A `TLB Flush Entire' is required before any L0, L1 or L2 probe */
750 tlb_flush_all_real(); 750 tlb_flush_all_real();
751 751
752 pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L2, ASI_SRMMUFP); 752 pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L2, ASI_SRMMUFP);
753 if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE) 753 if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
754 return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) | 754 return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
755 ((u_int)addr & 0x3ffff)); 755 ((u_int)addr & 0x3ffff));
756 pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L1, ASI_SRMMUFP); 756 pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L1, ASI_SRMMUFP);
757 if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE) 757 if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
758 return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) | 758 return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
759 ((u_int)addr & 0xffffff)); 759 ((u_int)addr & 0xffffff));
760 pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L0, ASI_SRMMUFP); 760 pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L0, ASI_SRMMUFP);
761 if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE) 761 if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
762 return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) | 762 return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
763 ((u_int)addr & 0xffffffff)); 763 ((u_int)addr & 0xffffffff));
764 764
765#ifdef DIAGNOSTIC 765#ifdef DIAGNOSTIC
766 panic("VA2PA: Asked to translate unmapped VA %p", addr); 766 panic("VA2PA: Asked to translate unmapped VA %p", addr);
767#else 767#else
768 return (0); 768 return (0);
769#endif 769#endif
770} 770}
771 771
772/* 772/*
773 * Atomically update a PTE entry, coping with hardware updating the 773 * Atomically update a PTE entry, coping with hardware updating the
774 * PTE at the same time we are. This is the procedure that is 774 * PTE at the same time we are. This is the procedure that is
775 * recommended in the SuperSPARC user's manual. 775 * recommended in the SuperSPARC user's manual.
776 */ 776 */
777int 777int
778updatepte4m(vaddr_t va, int *pte, int bic, int bis, int ctx, u_int cpuset) 778updatepte4m(vaddr_t va, int *pte, int bic, int bis, int ctx, u_int cpuset)
779{ 779{
780 int oldval, swapval; 780 int oldval, swapval;
781 volatile int *vpte = (volatile int *)pte; 781 volatile int *vpte = (volatile int *)pte;
782 782
783 /* 783 /*
784 * Can only be one of these happening in the system 784 * Can only be one of these happening in the system
785 * at any one time. 785 * at any one time.
786 */ 786 */
787 mutex_spin_enter(&demap_lock); 787 mutex_spin_enter(&demap_lock);
788 788
789 /* 789 /*
790 * The idea is to loop swapping zero into the pte, flushing 790 * The idea is to loop swapping zero into the pte, flushing
791 * it, and repeating until it stays zero. At this point, 791 * it, and repeating until it stays zero. At this point,
792 * there should be no more hardware accesses to this PTE 792 * there should be no more hardware accesses to this PTE
793 * so we can modify it without losing any mod/ref info. 793 * so we can modify it without losing any mod/ref info.
794 */ 794 */
795 oldval = 0; 795 oldval = 0;
796 do { 796 do {
797 swapval = 0; 797 swapval = 0;
798 swap(vpte, swapval); 798 swap(vpte, swapval);
799 tlb_flush_page(va, ctx, cpuset); 799 tlb_flush_page(va, ctx, cpuset);
800 oldval |= swapval; 800 oldval |= swapval;
801 } while (__predict_false(*vpte != 0)); 801 } while (__predict_false(*vpte != 0));
802 802
803 swapval = (oldval & ~bic) | bis; 803 swapval = (oldval & ~bic) | bis;
804 swap(vpte, swapval); 804 swap(vpte, swapval);
805 805
806 mutex_spin_exit(&demap_lock); 806 mutex_spin_exit(&demap_lock);
807 807
808 return (oldval); 808 return (oldval);
809} 809}
810 810
811inline void 811inline void
812setpgt4m(int *ptep, int pte) 812setpgt4m(int *ptep, int pte)
813{ 813{
814 814
815 swap(ptep, pte); 815 swap(ptep, pte);
816} 816}
817 817
818inline void 818inline void
819setpgt4m_va(vaddr_t va, int *ptep, int pte, int pageflush, int ctx, 819setpgt4m_va(vaddr_t va, int *ptep, int pte, int pageflush, int ctx,
820 u_int cpuset) 820 u_int cpuset)
821{ 821{
822 822
823#if defined(MULTIPROCESSOR) 823#if defined(MULTIPROCESSOR)
824 updatepte4m(va, ptep, 0xffffffff, pte, pageflush ? ctx : 0, cpuset); 824 updatepte4m(va, ptep, 0xffffffff, pte, pageflush ? ctx : 0, cpuset);
825#else 825#else
826 if (__predict_true(pageflush)) 826 if (__predict_true(pageflush))
827 tlb_flush_page(va, ctx, 0); 827 tlb_flush_page(va, ctx, 0);
828 setpgt4m(ptep, pte); 828 setpgt4m(ptep, pte);
829#endif /* MULTIPROCESSOR */ 829#endif /* MULTIPROCESSOR */
830} 830}
831 831
832/* Set the page table entry for va to pte. */ 832/* Set the page table entry for va to pte. */
833void 833void
834setpte4m(vaddr_t va, int pte) 834setpte4m(vaddr_t va, int pte)
835{ 835{
836 struct pmap *pm; 836 struct pmap *pm;
837 struct regmap *rp; 837 struct regmap *rp;
838 struct segmap *sp; 838 struct segmap *sp;
839 839
840#ifdef DEBUG 840#ifdef DEBUG
841 if (getcontext4m() != 0) 841 if (getcontext4m() != 0)
842 panic("setpte4m: user context"); 842 panic("setpte4m: user context");
843#endif 843#endif
844 844
845 pm = pmap_kernel(); 845 pm = pmap_kernel();
846 rp = &pm->pm_regmap[VA_VREG(va)]; 846 rp = &pm->pm_regmap[VA_VREG(va)];
847 sp = &rp->rg_segmap[VA_VSEG(va)]; 847 sp = &rp->rg_segmap[VA_VSEG(va)];
848 848
849 tlb_flush_page(va, 0, CPUSET_ALL); 849 tlb_flush_page(va, 0, CPUSET_ALL);
850 setpgt4m(sp->sg_pte + VA_SUN4M_VPG(va), pte); 850 setpgt4m(sp->sg_pte + VA_SUN4M_VPG(va), pte);
851} 851}
852 852
853/* 853/*
854 * Page table pool back-end. 854 * Page table pool back-end.
855 */ 855 */
856void * 856void *
857pgt_page_alloc(struct pool *pp, int flags) 857pgt_page_alloc(struct pool *pp, int flags)
858{ 858{
859 int cacheit = (cpuinfo.flags & CPUFLG_CACHEPAGETABLES) != 0; 859 int cacheit = (cpuinfo.flags & CPUFLG_CACHEPAGETABLES) != 0;
860 struct vm_page *pg; 860 struct vm_page *pg;
861 vaddr_t va; 861 vaddr_t va;
862 paddr_t pa; 862 paddr_t pa;
863 863
864 /* Allocate a page of physical memory */ 864 /* Allocate a page of physical memory */
865 if ((pg = uvm_pagealloc(NULL, 0, NULL, 0)) == NULL) 865 if ((pg = uvm_pagealloc(NULL, 0, NULL, 0)) == NULL)
866 return (NULL); 866 return (NULL);
867 867
868 /* Allocate virtual memory */ 868 /* Allocate virtual memory */
869 va = uvm_km_alloc(kmem_map, PAGE_SIZE, 0, UVM_KMF_VAONLY | 869 va = uvm_km_alloc(kmem_map, PAGE_SIZE, 0, UVM_KMF_VAONLY |
870 ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK)); 870 ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK));
871 if (va == 0) { 871 if (va == 0) {
872 uvm_pagefree(pg); 872 uvm_pagefree(pg);
873 return (NULL); 873 return (NULL);
874 } 874 }
875 875
876 /* 876 /*
877 * On systems with a physical data cache we need to flush this page 877 * On systems with a physical data cache we need to flush this page
878 * from the cache if the pagetables cannot be cached. 878 * from the cache if the pagetables cannot be cached.
879 * On systems with a virtually indexed data cache, we only need 879 * On systems with a virtually indexed data cache, we only need
880 * to map it non-cacheable, since the page is not currently mapped. 880 * to map it non-cacheable, since the page is not currently mapped.
881 */ 881 */
882 pa = VM_PAGE_TO_PHYS(pg); 882 pa = VM_PAGE_TO_PHYS(pg);
883 if (cacheit == 0) 883 if (cacheit == 0)
884 pcache_flush_page(pa, 1); 884 pcache_flush_page(pa, 1);
885 885
886 /* Map the page */ 886 /* Map the page */
887 pmap_kenter_pa(va, pa | (cacheit ? 0 : PMAP_NC), 887 pmap_kenter_pa(va, pa | (cacheit ? 0 : PMAP_NC),
888 VM_PROT_READ | VM_PROT_WRITE); 888 VM_PROT_READ | VM_PROT_WRITE);
889 pmap_update(pmap_kernel()); 889 pmap_update(pmap_kernel());
890 890
891 return ((void *)va); 891 return ((void *)va);
892} 892}
893 893
894void 894void
895pgt_page_free(struct pool *pp, void *v) 895pgt_page_free(struct pool *pp, void *v)
896{ 896{
897 vaddr_t va; 897 vaddr_t va;
898 paddr_t pa; 898 paddr_t pa;
899 bool rv; 899 bool rv;
900 900
901 va = (vaddr_t)v; 901 va = (vaddr_t)v;
902 rv = pmap_extract(pmap_kernel(), va, &pa); 902 rv = pmap_extract(pmap_kernel(), va, &pa);
903 KASSERT(rv); 903 KASSERT(rv);
904 uvm_pagefree(PHYS_TO_VM_PAGE(pa)); 904 uvm_pagefree(PHYS_TO_VM_PAGE(pa));
905 pmap_kremove(va, PAGE_SIZE); 905 pmap_kremove(va, PAGE_SIZE);
906 uvm_km_free(kmem_map, va, PAGE_SIZE, UVM_KMF_VAONLY); 906 uvm_km_free(kmem_map, va, PAGE_SIZE, UVM_KMF_VAONLY);
907} 907}
908#endif /* SUN4M || SUN4D */ 908#endif /* SUN4M || SUN4D */
909 909
910/*----------------------------------------------------------------*/ 910/*----------------------------------------------------------------*/
911 911
912/* 912/*
913 * The following three macros are to be used in sun4/sun4c code only. 913 * The following three macros are to be used in sun4/sun4c code only.
914 */ 914 */
915#if defined(SUN4_MMU3L) 915#if defined(SUN4_MMU3L)
916#define CTX_USABLE(pm,rp) ( \ 916#define CTX_USABLE(pm,rp) ( \
917 ((pm)->pm_ctx != NULL && \ 917 ((pm)->pm_ctx != NULL && \
918 (!HASSUN4_MMU3L || (rp)->rg_smeg != reginval)) \ 918 (!HASSUN4_MMU3L || (rp)->rg_smeg != reginval)) \
919) 919)
920#else 920#else
921#define CTX_USABLE(pm,rp) ((pm)->pm_ctx != NULL ) 921#define CTX_USABLE(pm,rp) ((pm)->pm_ctx != NULL )
922#endif 922#endif
923 923
924#define GAP_WIDEN(pm,vr) do if (CPU_HAS_SUNMMU) { \ 924#define GAP_WIDEN(pm,vr) do if (CPU_HAS_SUNMMU) { \
925 if (vr + 1 == pm->pm_gap_start) \ 925 if (vr + 1 == pm->pm_gap_start) \
926 pm->pm_gap_start = vr; \ 926 pm->pm_gap_start = vr; \
927 if (vr == pm->pm_gap_end) \ 927 if (vr == pm->pm_gap_end) \
928 pm->pm_gap_end = vr + 1; \ 928 pm->pm_gap_end = vr + 1; \
929} while (0) 929} while (0)
930 930
931#define GAP_SHRINK(pm,vr) do if (CPU_HAS_SUNMMU) { \ 931#define GAP_SHRINK(pm,vr) do if (CPU_HAS_SUNMMU) { \
932 int x; \ 932 int x; \
933 x = pm->pm_gap_start + (pm->pm_gap_end - pm->pm_gap_start) / 2; \ 933 x = pm->pm_gap_start + (pm->pm_gap_end - pm->pm_gap_start) / 2; \
934 if (vr > x) { \ 934 if (vr > x) { \
935 if (vr < pm->pm_gap_end) \ 935 if (vr < pm->pm_gap_end) \
936 pm->pm_gap_end = vr; \ 936 pm->pm_gap_end = vr; \
937 } else { \ 937 } else { \
938 if (vr >= pm->pm_gap_start && x != pm->pm_gap_start) \ 938 if (vr >= pm->pm_gap_start && x != pm->pm_gap_start) \
939 pm->pm_gap_start = vr + 1; \ 939 pm->pm_gap_start = vr + 1; \
940 } \ 940 } \
941} while (0) 941} while (0)
942 942
943 943
944static void get_phys_mem(void **); 944static void get_phys_mem(void **);
945#if 0 /* not used */ 945#if 0 /* not used */
946void kvm_iocache(char *, int); 946void kvm_iocache(char *, int);
947#endif 947#endif
948 948
949#ifdef DEBUG 949#ifdef DEBUG
950void pm_check(char *, struct pmap *); 950void pm_check(char *, struct pmap *);
951void pm_check_k(char *, struct pmap *); 951void pm_check_k(char *, struct pmap *);
952void pm_check_u(char *, struct pmap *); 952void pm_check_u(char *, struct pmap *);
953#endif 953#endif
954 954
955/* 955/*
956 * During the PMAP bootstrap, we can use a simple translation to map a 956 * During the PMAP bootstrap, we can use a simple translation to map a
957 * kernel virtual address to a psysical memory address (this is arranged 957 * kernel virtual address to a psysical memory address (this is arranged
958 * in locore). Usually, KERNBASE maps to physical address 0. This is always 958 * in locore). Usually, KERNBASE maps to physical address 0. This is always
959 * the case on sun4 and sun4c machines. On sun4m machines -- if no memory is 959 * the case on sun4 and sun4c machines. On sun4m machines -- if no memory is
960 * installed in the bank corresponding to physical address 0 -- the PROM may 960 * installed in the bank corresponding to physical address 0 -- the PROM may
961 * elect to load us at some other address, presumably at the start of 961 * elect to load us at some other address, presumably at the start of
962 * the first memory bank that is available. We set the up the variable 962 * the first memory bank that is available. We set the up the variable
963 * `va2pa_offset' to hold the physical address corresponding to KERNBASE. 963 * `va2pa_offset' to hold the physical address corresponding to KERNBASE.
964 */ 964 */
965 965
966static u_long va2pa_offset; 966static u_long va2pa_offset;
967#define PMAP_BOOTSTRAP_VA2PA(v) ((paddr_t)((u_long)(v) - va2pa_offset)) 967#define PMAP_BOOTSTRAP_VA2PA(v) ((paddr_t)((u_long)(v) - va2pa_offset))
968#define PMAP_BOOTSTRAP_PA2VA(p) ((vaddr_t)((u_long)(p) + va2pa_offset)) 968#define PMAP_BOOTSTRAP_PA2VA(p) ((vaddr_t)((u_long)(p) + va2pa_offset))
969 969
970/* 970/*
971 * Grab physical memory list. 971 * Grab physical memory list.
972 * While here, compute `physmem'. 972 * While here, compute `physmem'.
973 */ 973 */
974void 974void
975get_phys_mem(void **top) 975get_phys_mem(void **top)
976{ 976{
977 struct memarr *mp; 977 struct memarr *mp;
978 char *p; 978 char *p;
979 int i; 979 int i;
980 980
981 /* Load the memory descriptor array at the current kernel top */ 981 /* Load the memory descriptor array at the current kernel top */
982 p = (void *)ALIGN(*top); 982 p = (void *)ALIGN(*top);
983 pmemarr = (struct memarr *)p; 983 pmemarr = (struct memarr *)p;
984 npmemarr = prom_makememarr(pmemarr, 1000, MEMARR_AVAILPHYS); 984 npmemarr = prom_makememarr(pmemarr, 1000, MEMARR_AVAILPHYS);
985 985
986 /* Update kernel top */ 986 /* Update kernel top */
987 p += npmemarr * sizeof(struct memarr); 987 p += npmemarr * sizeof(struct memarr);
988 *top = p; 988 *top = p;
989 989
990 for (physmem = 0, mp = pmemarr, i = npmemarr; --i >= 0; mp++) 990 for (physmem = 0, mp = pmemarr, i = npmemarr; --i >= 0; mp++)
991 physmem += btoc(mp->len); 991 physmem += btoc(mp->len);
992} 992}
993 993
994 994
995/* 995/*
996 * Support functions for vm_page_bootstrap(). 996 * Support functions for vm_page_bootstrap().
997 */ 997 */
998 998
999/* 999/*
1000 * How much virtual space does this kernel have? 1000 * How much virtual space does this kernel have?
1001 * (After mapping kernel text, data, etc.) 1001 * (After mapping kernel text, data, etc.)
1002 */ 1002 */
1003void 1003void
1004pmap_virtual_space(vaddr_t *v_start, vaddr_t *v_end) 1004pmap_virtual_space(vaddr_t *v_start, vaddr_t *v_end)
1005{ 1005{
1006 1006
1007 *v_start = virtual_avail; 1007 *v_start = virtual_avail;
1008 *v_end = virtual_end; 1008 *v_end = virtual_end;
1009} 1009}
1010 1010
1011#ifdef PMAP_GROWKERNEL 1011#ifdef PMAP_GROWKERNEL
1012vaddr_t 1012vaddr_t
1013pmap_growkernel(vaddr_t eva) 1013pmap_growkernel(vaddr_t eva)
1014{ 1014{
1015 struct regmap *rp; 1015 struct regmap *rp;
1016 struct segmap *sp; 1016 struct segmap *sp;
1017 int vr, evr, M, N, i; 1017 int vr, evr, M, N, i;
1018 struct vm_page *pg; 1018 struct vm_page *pg;
1019 vaddr_t va; 1019 vaddr_t va;
1020 1020
1021 if (eva <= virtual_end) 1021 if (eva <= virtual_end)
1022 return (virtual_end); 1022 return (virtual_end);
1023 1023
1024 /* For now, only implemented for sun4/sun4c */ 1024 /* For now, only implemented for sun4/sun4c */
1025 KASSERT(CPU_HAS_SUNMMU); 1025 KASSERT(CPU_HAS_SUNMMU);
1026 1026
1027 /* 1027 /*
1028 * Map in the next region(s) 1028 * Map in the next region(s)
1029 */ 1029 */
1030 1030
1031 /* Get current end-of-kernel */ 1031 /* Get current end-of-kernel */
1032 vr = virtual_end >> RGSHIFT; 1032 vr = virtual_end >> RGSHIFT;
1033 evr = (eva + NBPRG - 1) >> RGSHIFT; 1033 evr = (eva + NBPRG - 1) >> RGSHIFT;
1034 eva = evr << RGSHIFT; 1034 eva = evr << RGSHIFT;
1035 1035
1036 if (eva > VM_MAX_KERNEL_ADDRESS) 1036 if (eva > VM_MAX_KERNEL_ADDRESS)
1037 panic("growkernel: grown too large: %lx", eva); 1037 panic("growkernel: grown too large: %lx", eva);
1038 1038
1039 /* 1039 /*
1040 * Divide a region in N blocks of M segments, where each segment 1040 * Divide a region in N blocks of M segments, where each segment
1041 * block can have its PTEs mapped by one page. 1041 * block can have its PTEs mapped by one page.
1042 * N should come out to 1 for 8K pages and to 4 for 4K pages. 1042 * N should come out to 1 for 8K pages and to 4 for 4K pages.
1043 */ 1043 */
1044 M = NBPG / (NPTESG * sizeof(int)); 1044 M = NBPG / (NPTESG * sizeof(int));
1045 N = (NBPRG/NBPSG) / M; 1045 N = (NBPRG/NBPSG) / M;
1046 1046
1047 while (vr < evr) { 1047 while (vr < evr) {
1048 rp = &pmap_kernel()->pm_regmap[vr]; 1048 rp = &pmap_kernel()->pm_regmap[vr];
1049 for (i = 0; i < N; i++) { 1049 for (i = 0; i < N; i++) {
1050 sp = &rp->rg_segmap[i * M]; 1050 sp = &rp->rg_segmap[i * M];
1051 va = (vaddr_t)sp->sg_pte; 1051 va = (vaddr_t)sp->sg_pte;
1052 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); 1052 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
1053 if (pg == NULL) 1053 if (pg == NULL)
1054 panic("growkernel: out of memory"); 1054 panic("growkernel: out of memory");
1055 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), 1055 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
1056 VM_PROT_READ | VM_PROT_WRITE); 1056 VM_PROT_READ | VM_PROT_WRITE);
1057 } 1057 }
1058 } 1058 }
@@ -2506,2316 +2506,2326 @@ pv_link4_4c(struct vm_page *pg, struct p @@ -2506,2316 +2506,2326 @@ pv_link4_4c(struct vm_page *pg, struct p
2506 pv0->pv_pmap = pm; 2506 pv0->pv_pmap = pm;
2507 pv0->pv_va = va; 2507 pv0->pv_va = va;
2508 pv0->pv_flags |= nc; 2508 pv0->pv_flags |= nc;
2509 return (0); 2509 return (0);
2510 } 2510 }
2511 2511
2512 /* 2512 /*
2513 * Allocate the new PV entry now, and, if that fails, bail out 2513 * Allocate the new PV entry now, and, if that fails, bail out
2514 * before changing the cacheable state of the existing mappings. 2514 * before changing the cacheable state of the existing mappings.
2515 */ 2515 */
2516 npv = pool_get(&pv_pool, PR_NOWAIT); 2516 npv = pool_get(&pv_pool, PR_NOWAIT);
2517 if (npv == NULL) 2517 if (npv == NULL)
2518 return (ENOMEM); 2518 return (ENOMEM);
2519 2519
2520 pmap_stats.ps_enter_secondpv++; 2520 pmap_stats.ps_enter_secondpv++;
2521 2521
2522 /* 2522 /*
2523 * Before entering the new mapping, see if 2523 * Before entering the new mapping, see if
2524 * it will cause old mappings to become aliased 2524 * it will cause old mappings to become aliased
2525 * and thus need to be `discached'. 2525 * and thus need to be `discached'.
2526 */ 2526 */
2527 if (pv0->pv_flags & PV_ANC) { 2527 if (pv0->pv_flags & PV_ANC) {
2528 /* already uncached, just stay that way */ 2528 /* already uncached, just stay that way */
2529 *pteprotop |= PG_NC; 2529 *pteprotop |= PG_NC;
2530 goto link_npv; 2530 goto link_npv;
2531 } 2531 }
2532 2532
2533 for (pv = pv0; pv != NULL; pv = pv->pv_next) { 2533 for (pv = pv0; pv != NULL; pv = pv->pv_next) {
2534 if ((pv->pv_flags & PV_NC) != 0) { 2534 if ((pv->pv_flags & PV_NC) != 0) {
2535 *pteprotop |= PG_NC; 2535 *pteprotop |= PG_NC;
2536#ifdef DEBUG 2536#ifdef DEBUG
2537 /* Check currently illegal condition */ 2537 /* Check currently illegal condition */
2538 if (nc == 0) 2538 if (nc == 0)
2539 printf("pv_link: proc %s, va=0x%lx: " 2539 printf("pv_link: proc %s, va=0x%lx: "
2540 "unexpected uncached mapping at 0x%lx\n", 2540 "unexpected uncached mapping at 0x%lx\n",
2541 curproc ? curproc->p_comm : "--", 2541 curproc ? curproc->p_comm : "--",
2542 va, pv->pv_va); 2542 va, pv->pv_va);
2543#endif 2543#endif
2544 } 2544 }
2545 if (BADALIAS(va, pv->pv_va)) { 2545 if (BADALIAS(va, pv->pv_va)) {
2546#ifdef DEBUG 2546#ifdef DEBUG
2547 if (pmapdebug & PDB_CACHESTUFF) 2547 if (pmapdebug & PDB_CACHESTUFF)
2548 printf( 2548 printf(
2549 "pv_link: badalias: proc %s, 0x%lx<=>0x%lx, pg %p\n", 2549 "pv_link: badalias: proc %s, 0x%lx<=>0x%lx, pg %p\n",
2550 curproc ? curproc->p_comm : "--", 2550 curproc ? curproc->p_comm : "--",
2551 va, pv->pv_va, pg); 2551 va, pv->pv_va, pg);
2552#endif 2552#endif
2553 /* Mark list head `uncached due to aliases' */ 2553 /* Mark list head `uncached due to aliases' */
2554 pv0->pv_flags |= PV_ANC; 2554 pv0->pv_flags |= PV_ANC;
2555 pv_changepte4_4c(pg, PG_NC, 0); 2555 pv_changepte4_4c(pg, PG_NC, 0);
2556 *pteprotop |= PG_NC; 2556 *pteprotop |= PG_NC;
2557 break; 2557 break;
2558 } 2558 }
2559 } 2559 }
2560 2560
2561link_npv: 2561link_npv:
2562 npv->pv_next = pv0->pv_next; 2562 npv->pv_next = pv0->pv_next;
2563 npv->pv_pmap = pm; 2563 npv->pv_pmap = pm;
2564 npv->pv_va = va; 2564 npv->pv_va = va;
2565 npv->pv_flags = nc; 2565 npv->pv_flags = nc;
2566 pv0->pv_next = npv; 2566 pv0->pv_next = npv;
2567 return (0); 2567 return (0);
2568} 2568}
2569 2569
2570#endif /* SUN4 || SUN4C */ 2570#endif /* SUN4 || SUN4C */
2571 2571
2572#if defined(SUN4M) || defined(SUN4D) /* SRMMU versions of above */ 2572#if defined(SUN4M) || defined(SUN4D) /* SRMMU versions of above */
2573/* 2573/*
2574 * Walk the given pv list, and for each PTE, set or clear some bits 2574 * Walk the given pv list, and for each PTE, set or clear some bits
2575 * (e.g., PG_W or PG_NC). 2575 * (e.g., PG_W or PG_NC).
2576 * 2576 *
2577 * This routine flushes the cache for any page whose PTE changes, 2577 * This routine flushes the cache for any page whose PTE changes,
2578 * as long as the process has a context; this is overly conservative. 2578 * as long as the process has a context; this is overly conservative.
2579 * It also copies ref and mod bits to the pvlist, on the theory that 2579 * It also copies ref and mod bits to the pvlist, on the theory that
2580 * this might save work later. (XXX should test this theory) 2580 * this might save work later. (XXX should test this theory)
2581 * 2581 *
2582 * Called with PV lock and pmap main lock held. 2582 * Called with PV lock and pmap main lock held.
2583 */ 2583 */
2584void 2584void
2585pv_changepte4m(struct vm_page *pg, int bis, int bic) 2585pv_changepte4m(struct vm_page *pg, int bis, int bic)
2586{ 2586{
2587 struct pvlist *pv; 2587 struct pvlist *pv;
2588 struct pmap *pm; 2588 struct pmap *pm;
2589 vaddr_t va; 2589 vaddr_t va;
2590 struct regmap *rp; 2590 struct regmap *rp;
2591 struct segmap *sp; 2591 struct segmap *sp;
2592 2592
2593 pv = VM_MDPAGE_PVHEAD(pg); 2593 pv = VM_MDPAGE_PVHEAD(pg);
2594 if (pv->pv_pmap == NULL) 2594 if (pv->pv_pmap == NULL)
2595 return; 2595 return;
2596 2596
2597 for (; pv != NULL; pv = pv->pv_next) { 2597 for (; pv != NULL; pv = pv->pv_next) {
2598 int tpte; 2598 int tpte;
2599 pm = pv->pv_pmap; 2599 pm = pv->pv_pmap;
2600 /* XXXSMP: should lock pm */ 2600 /* XXXSMP: should lock pm */
2601 va = pv->pv_va; 2601 va = pv->pv_va;
2602 rp = &pm->pm_regmap[VA_VREG(va)]; 2602 rp = &pm->pm_regmap[VA_VREG(va)];
2603 sp = &rp->rg_segmap[VA_VSEG(va)]; 2603 sp = &rp->rg_segmap[VA_VSEG(va)];
2604 2604
2605 if (pm->pm_ctx) { 2605 if (pm->pm_ctx) {
2606 /* 2606 /*
2607 * XXX: always flush cache; conservative, but 2607 * XXX: always flush cache; conservative, but
2608 * needed to invalidate cache tag protection 2608 * needed to invalidate cache tag protection
2609 * bits and when disabling caching. 2609 * bits and when disabling caching.
2610 */ 2610 */
2611 cache_flush_page(va, pm->pm_ctxnum); 2611 cache_flush_page(va, pm->pm_ctxnum);
2612 } 2612 }
2613 2613
2614 tpte = sp->sg_pte[VA_SUN4M_VPG(va)]; 2614 tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
2615 KASSERT((tpte & SRMMU_TETYPE) == SRMMU_TEPTE); 2615 KASSERT((tpte & SRMMU_TETYPE) == SRMMU_TEPTE);
2616 VM_MDPAGE_PVHEAD(pg)->pv_flags |= MR4M(updatepte4m(va, 2616 VM_MDPAGE_PVHEAD(pg)->pv_flags |= MR4M(updatepte4m(va,
2617 &sp->sg_pte[VA_SUN4M_VPG(va)], bic, bis, pm->pm_ctxnum, 2617 &sp->sg_pte[VA_SUN4M_VPG(va)], bic, bis, pm->pm_ctxnum,
2618 PMAP_CPUSET(pm))); 2618 PMAP_CPUSET(pm)));
2619 } 2619 }
2620} 2620}
2621 2621
2622/* 2622/*
2623 * Sync ref and mod bits in pvlist. If page has been ref'd or modified, 2623 * Sync ref and mod bits in pvlist. If page has been ref'd or modified,
2624 * update ref/mod bits in pvlist, and clear the hardware bits. 2624 * update ref/mod bits in pvlist, and clear the hardware bits.
2625 * 2625 *
2626 * Return the new flags. 2626 * Return the new flags.
2627 */ 2627 */
2628int 2628int
2629pv_syncflags4m(struct vm_page *pg) 2629pv_syncflags4m(struct vm_page *pg)
2630{ 2630{
2631 struct pvlist *pv; 2631 struct pvlist *pv;
2632 struct pmap *pm; 2632 struct pmap *pm;
2633 int va, flags; 2633 int va, flags;
2634 int s; 2634 int s;
2635 struct regmap *rp; 2635 struct regmap *rp;
2636 struct segmap *sp; 2636 struct segmap *sp;
2637 int tpte; 2637 int tpte;
2638 2638
2639 s = splvm(); 2639 s = splvm();
2640 PMAP_LOCK(); 2640 PMAP_LOCK();
2641 pv = VM_MDPAGE_PVHEAD(pg); 2641 pv = VM_MDPAGE_PVHEAD(pg);
2642 if (pv->pv_pmap == NULL) { 2642 if (pv->pv_pmap == NULL) {
2643 /* Page not mapped; pv_flags is already up to date */ 2643 /* Page not mapped; pv_flags is already up to date */
2644 flags = 0; 2644 flags = 0;
2645 goto out; 2645 goto out;
2646 } 2646 }
2647 2647
2648 flags = pv->pv_flags; 2648 flags = pv->pv_flags;
2649 for (; pv != NULL; pv = pv->pv_next) { 2649 for (; pv != NULL; pv = pv->pv_next) {
2650 pm = pv->pv_pmap; 2650 pm = pv->pv_pmap;
2651 va = pv->pv_va; 2651 va = pv->pv_va;
2652 rp = &pm->pm_regmap[VA_VREG(va)]; 2652 rp = &pm->pm_regmap[VA_VREG(va)];
2653 sp = &rp->rg_segmap[VA_VSEG(va)]; 2653 sp = &rp->rg_segmap[VA_VSEG(va)];
2654 2654
2655 tpte = sp->sg_pte[VA_SUN4M_VPG(va)]; 2655 tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
2656 if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE && 2656 if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE &&
2657 (tpte & (SRMMU_PG_R|SRMMU_PG_M)) != 0) { 2657 (tpte & (SRMMU_PG_R|SRMMU_PG_M)) != 0) {
2658 /* 2658 /*
2659 * Flush cache if modified to make sure the PTE 2659 * Flush cache if modified to make sure the PTE
2660 * M bit will be set again on the next write access. 2660 * M bit will be set again on the next write access.
2661 */ 2661 */
2662 if (pm->pm_ctx && (tpte & SRMMU_PG_M) == SRMMU_PG_M) 2662 if (pm->pm_ctx && (tpte & SRMMU_PG_M) == SRMMU_PG_M)
2663 cache_flush_page(va, pm->pm_ctxnum); 2663 cache_flush_page(va, pm->pm_ctxnum);
2664 2664
2665 flags |= MR4M(updatepte4m(va, 2665 flags |= MR4M(updatepte4m(va,
2666 &sp->sg_pte[VA_SUN4M_VPG(va)], 2666 &sp->sg_pte[VA_SUN4M_VPG(va)],
2667 SRMMU_PG_M | SRMMU_PG_R, 2667 SRMMU_PG_M | SRMMU_PG_R,
2668 0, pm->pm_ctxnum, PMAP_CPUSET(pm))); 2668 0, pm->pm_ctxnum, PMAP_CPUSET(pm)));
2669 } 2669 }
2670 } 2670 }
2671 2671
2672 VM_MDPAGE_PVHEAD(pg)->pv_flags = flags; 2672 VM_MDPAGE_PVHEAD(pg)->pv_flags = flags;
2673out: 2673out:
2674 PMAP_UNLOCK(); 2674 PMAP_UNLOCK();
2675 splx(s); 2675 splx(s);
2676 return (flags); 2676 return (flags);
2677} 2677}
2678 2678
2679/* 2679/*
2680 * Should be called with pmap already locked. 2680 * Should be called with pmap already locked.
2681 */ 2681 */
2682void 2682void
2683pv_unlink4m(struct vm_page *pg, struct pmap *pm, vaddr_t va) 2683pv_unlink4m(struct vm_page *pg, struct pmap *pm, vaddr_t va)
2684{ 2684{
2685 struct pvlist *pv0, *npv; 2685 struct pvlist *pv0, *npv;
2686 2686
2687 pv0 = VM_MDPAGE_PVHEAD(pg); 2687 pv0 = VM_MDPAGE_PVHEAD(pg);
2688 2688
2689 npv = pv0->pv_next; 2689 npv = pv0->pv_next;
2690 /* 2690 /*
2691 * First entry is special (sigh). 2691 * First entry is special (sigh).
2692 */ 2692 */
2693 if (pv0->pv_pmap == pm && pv0->pv_va == va) { 2693 if (pv0->pv_pmap == pm && pv0->pv_va == va) {
2694 pmap_stats.ps_unlink_pvfirst++; 2694 pmap_stats.ps_unlink_pvfirst++;
2695 if (npv != NULL) { 2695 if (npv != NULL) {
2696 /* 2696 /*
2697 * Shift next entry into the head. 2697 * Shift next entry into the head.
2698 * Make sure to retain the REF, MOD and ANC flags 2698 * Make sure to retain the REF, MOD and ANC flags
2699 * on the list head. 2699 * on the list head.
2700 */ 2700 */
2701 pv0->pv_next = npv->pv_next; 2701 pv0->pv_next = npv->pv_next;
2702 pv0->pv_pmap = npv->pv_pmap; 2702 pv0->pv_pmap = npv->pv_pmap;
2703 pv0->pv_va = npv->pv_va; 2703 pv0->pv_va = npv->pv_va;
2704 pv0->pv_flags &= ~PV_NC; 2704 pv0->pv_flags &= ~PV_NC;
2705 pv0->pv_flags |= (npv->pv_flags & PV_NC); 2705 pv0->pv_flags |= (npv->pv_flags & PV_NC);
2706 pool_put(&pv_pool, npv); 2706 pool_put(&pv_pool, npv);
2707 } else { 2707 } else {
2708 /* 2708 /*
2709 * No mappings left; we need to maintain 2709 * No mappings left; we need to maintain
2710 * the REF and MOD flags, since pmap_is_modified() 2710 * the REF and MOD flags, since pmap_is_modified()
2711 * can still be called for this page. 2711 * can still be called for this page.
2712 */ 2712 */
2713 pv0->pv_pmap = NULL; 2713 pv0->pv_pmap = NULL;
2714 pv0->pv_flags &= ~(PV_NC|PV_ANC); 2714 pv0->pv_flags &= ~(PV_NC|PV_ANC);
2715 return; 2715 return;
2716 } 2716 }
2717 } else { 2717 } else {
2718 struct pvlist *prev; 2718 struct pvlist *prev;
2719 2719
2720 pmap_stats.ps_unlink_pvsearch++; 2720 pmap_stats.ps_unlink_pvsearch++;
2721 for (prev = pv0;; prev = npv, npv = npv->pv_next) { 2721 for (prev = pv0;; prev = npv, npv = npv->pv_next) {
2722 if (npv == NULL) { 2722 if (npv == NULL) {
2723 panic("pv_unlink: pm %p is missing on pg %p", 2723 panic("pv_unlink: pm %p is missing on pg %p",
2724 pm, pg); 2724 pm, pg);
2725 return; 2725 return;
2726 } 2726 }
2727 if (npv->pv_pmap == pm && npv->pv_va == va) 2727 if (npv->pv_pmap == pm && npv->pv_va == va)
2728 break; 2728 break;
2729 } 2729 }
2730 prev->pv_next = npv->pv_next; 2730 prev->pv_next = npv->pv_next;
2731 pool_put(&pv_pool, npv); 2731 pool_put(&pv_pool, npv);
2732 } 2732 }
2733 2733
2734 if ((pv0->pv_flags & (PV_NC|PV_ANC)) == PV_ANC) { 2734 if ((pv0->pv_flags & (PV_NC|PV_ANC)) == PV_ANC) {
2735 2735
2736 /* 2736 /*
2737 * Not cached: check whether we can fix that now. 2737 * Not cached: check whether we can fix that now.
2738 */ 2738 */
2739 va = pv0->pv_va; 2739 va = pv0->pv_va;
2740 for (npv = pv0->pv_next; npv != NULL; npv = npv->pv_next) 2740 for (npv = pv0->pv_next; npv != NULL; npv = npv->pv_next)
2741 if (BADALIAS(va, npv->pv_va) || 2741 if (BADALIAS(va, npv->pv_va) ||
2742 (npv->pv_flags & PV_NC) != 0) 2742 (npv->pv_flags & PV_NC) != 0)
2743 return; 2743 return;
2744#ifdef DEBUG 2744#ifdef DEBUG
2745 if (pmapdebug & PDB_CACHESTUFF) 2745 if (pmapdebug & PDB_CACHESTUFF)
2746 printf( 2746 printf(
2747 "pv_unlink: alias ok: proc %s, va 0x%lx, pg %p\n", 2747 "pv_unlink: alias ok: proc %s, va 0x%lx, pg %p\n",
2748 curproc ? curproc->p_comm : "--", 2748 curproc ? curproc->p_comm : "--",
2749 va, pg); 2749 va, pg);
2750#endif 2750#endif
2751 pv0->pv_flags &= ~PV_ANC; 2751 pv0->pv_flags &= ~PV_ANC;
2752 pv_changepte4m(pg, SRMMU_PG_C, 0); 2752 pv_changepte4m(pg, SRMMU_PG_C, 0);
2753 } 2753 }
2754} 2754}
2755 2755
2756/* 2756/*
2757 * pv_link is the inverse of pv_unlink, and is used in pmap_enter. 2757 * pv_link is the inverse of pv_unlink, and is used in pmap_enter.
2758 * May turn off the cacheable bit in the pte prototype for the new mapping. 2758 * May turn off the cacheable bit in the pte prototype for the new mapping.
2759 * Called with pm locked. 2759 * Called with pm locked.
2760 */ 2760 */
2761/*static*/ int 2761/*static*/ int
2762pv_link4m(struct vm_page *pg, struct pmap *pm, vaddr_t va, 2762pv_link4m(struct vm_page *pg, struct pmap *pm, vaddr_t va,
2763 unsigned int *pteprotop) 2763 unsigned int *pteprotop)
2764{ 2764{
2765 struct pvlist *pv0, *pv, *npv; 2765 struct pvlist *pv0, *pv, *npv;
2766 int nc = (*pteprotop & SRMMU_PG_C) == 0 ? PV_NC : 0; 2766 int nc = (*pteprotop & SRMMU_PG_C) == 0 ? PV_NC : 0;
2767 int error = 0; 2767 int error = 0;
2768 2768
2769 pv0 = VM_MDPAGE_PVHEAD(pg); 2769 pv0 = VM_MDPAGE_PVHEAD(pg);
2770 2770
2771 if (pv0->pv_pmap == NULL) { 2771 if (pv0->pv_pmap == NULL) {
2772 /* no pvlist entries yet */ 2772 /* no pvlist entries yet */
2773 pmap_stats.ps_enter_firstpv++; 2773 pmap_stats.ps_enter_firstpv++;
2774 pv0->pv_next = NULL; 2774 pv0->pv_next = NULL;
2775 pv0->pv_pmap = pm; 2775 pv0->pv_pmap = pm;
2776 pv0->pv_va = va; 2776 pv0->pv_va = va;
2777 pv0->pv_flags |= nc; 2777 pv0->pv_flags |= nc;
2778 goto out; 2778 goto out;
2779 } 2779 }
2780 2780
2781 /* 2781 /*
2782 * Allocate the new PV entry now, and, if that fails, bail out 2782 * Allocate the new PV entry now, and, if that fails, bail out
2783 * before changing the cacheable state of the existing mappings. 2783 * before changing the cacheable state of the existing mappings.
2784 */ 2784 */
2785 npv = pool_get(&pv_pool, PR_NOWAIT); 2785 npv = pool_get(&pv_pool, PR_NOWAIT);
2786 if (npv == NULL) { 2786 if (npv == NULL) {
2787 error = ENOMEM; 2787 error = ENOMEM;
2788 goto out; 2788 goto out;
2789 } 2789 }
2790 2790
2791 pmap_stats.ps_enter_secondpv++; 2791 pmap_stats.ps_enter_secondpv++;
2792 2792
2793 /* 2793 /*
2794 * See if the new mapping will cause old mappings to 2794 * See if the new mapping will cause old mappings to
2795 * become aliased and thus need to be `discached'. 2795 * become aliased and thus need to be `discached'.
2796 */ 2796 */
2797 if ((pv0->pv_flags & PV_ANC) != 0) { 2797 if ((pv0->pv_flags & PV_ANC) != 0) {
2798 /* already uncached, just stay that way */ 2798 /* already uncached, just stay that way */
2799 *pteprotop &= ~SRMMU_PG_C; 2799 *pteprotop &= ~SRMMU_PG_C;
2800 goto link_npv; 2800 goto link_npv;
2801 } 2801 }
2802 2802
2803 for (pv = pv0; pv != NULL; pv = pv->pv_next) { 2803 for (pv = pv0; pv != NULL; pv = pv->pv_next) {
2804 if ((pv->pv_flags & PV_NC) != 0) { 2804 if ((pv->pv_flags & PV_NC) != 0) {
2805 *pteprotop &= ~SRMMU_PG_C; 2805 *pteprotop &= ~SRMMU_PG_C;
2806#ifdef DEBUG 2806#ifdef DEBUG
2807 /* Check currently illegal condition */ 2807 /* Check currently illegal condition */
2808 if (nc == 0) 2808 if (nc == 0)
2809 printf("pv_link: proc %s, va=0x%lx: " 2809 printf("pv_link: proc %s, va=0x%lx: "
2810 "unexpected uncached mapping at 0x%lx\n", 2810 "unexpected uncached mapping at 0x%lx\n",
2811 curproc ? curproc->p_comm : "--", 2811 curproc ? curproc->p_comm : "--",
2812 va, pv->pv_va); 2812 va, pv->pv_va);
2813#endif 2813#endif
2814 } 2814 }
2815 if (BADALIAS(va, pv->pv_va)) { 2815 if (BADALIAS(va, pv->pv_va)) {
2816#ifdef DEBUG 2816#ifdef DEBUG
2817 if (pmapdebug & PDB_CACHESTUFF) 2817 if (pmapdebug & PDB_CACHESTUFF)
2818 printf( 2818 printf(
2819 "pv_link: badalias: proc %s, 0x%lx<=>0x%lx, pg %p\n", 2819 "pv_link: badalias: proc %s, 0x%lx<=>0x%lx, pg %p\n",
2820 curproc ? curproc->p_comm : "--", 2820 curproc ? curproc->p_comm : "--",
2821 va, pv->pv_va, pg); 2821 va, pv->pv_va, pg);
2822#endif 2822#endif
2823 /* Mark list head `uncached due to aliases' */ 2823 /* Mark list head `uncached due to aliases' */
2824 pv0->pv_flags |= PV_ANC; 2824 pv0->pv_flags |= PV_ANC;
2825 pv_changepte4m(pg, 0, SRMMU_PG_C); 2825 pv_changepte4m(pg, 0, SRMMU_PG_C);
2826 *pteprotop &= ~SRMMU_PG_C; 2826 *pteprotop &= ~SRMMU_PG_C;
2827 break; 2827 break;
2828 } 2828 }
2829 } 2829 }
2830 2830
2831link_npv: 2831link_npv:
2832 /* Now link in the new PV entry */ 2832 /* Now link in the new PV entry */
2833 npv->pv_next = pv0->pv_next; 2833 npv->pv_next = pv0->pv_next;
2834 npv->pv_pmap = pm; 2834 npv->pv_pmap = pm;
2835 npv->pv_va = va; 2835 npv->pv_va = va;
2836 npv->pv_flags = nc; 2836 npv->pv_flags = nc;
2837 pv0->pv_next = npv; 2837 pv0->pv_next = npv;
2838 2838
2839out: 2839out:
2840 return (error); 2840 return (error);
2841} 2841}
2842#endif 2842#endif
2843 2843
2844/* 2844/*
2845 * Uncache all entries on behalf of kvm_uncache(). In addition to 2845 * Uncache all entries on behalf of kvm_uncache(). In addition to
2846 * removing the cache bit from the PTE, we are also setting PV_NC 2846 * removing the cache bit from the PTE, we are also setting PV_NC
2847 * in each entry to stop pv_unlink() from re-caching (i.e. when a 2847 * in each entry to stop pv_unlink() from re-caching (i.e. when a
2848 * a bad alias is going away). 2848 * a bad alias is going away).
2849 */ 2849 */
2850static void 2850static void
2851pv_uncache(struct vm_page *pg) 2851pv_uncache(struct vm_page *pg)
2852{ 2852{
2853 struct pvlist *pv; 2853 struct pvlist *pv;
2854 int s; 2854 int s;
2855 2855
2856 s = splvm(); 2856 s = splvm();
2857 PMAP_LOCK(); 2857 PMAP_LOCK();
2858 2858
2859 for (pv = VM_MDPAGE_PVHEAD(pg); pv != NULL; pv = pv->pv_next) 2859 for (pv = VM_MDPAGE_PVHEAD(pg); pv != NULL; pv = pv->pv_next)
2860 pv->pv_flags |= PV_NC; 2860 pv->pv_flags |= PV_NC;
2861 2861
2862#if defined(SUN4M) || defined(SUN4D) 2862#if defined(SUN4M) || defined(SUN4D)
2863 if (CPU_HAS_SRMMU) 2863 if (CPU_HAS_SRMMU)
2864 pv_changepte4m(pg, 0, SRMMU_PG_C); 2864 pv_changepte4m(pg, 0, SRMMU_PG_C);
2865#endif 2865#endif
2866#if defined(SUN4) || defined(SUN4C) 2866#if defined(SUN4) || defined(SUN4C)
2867 if (CPU_HAS_SUNMMU) 2867 if (CPU_HAS_SUNMMU)
2868 pv_changepte4_4c(pg, PG_NC, 0); 2868 pv_changepte4_4c(pg, PG_NC, 0);
2869#endif 2869#endif
2870 PMAP_UNLOCK(); 2870 PMAP_UNLOCK();
2871 splx(s); 2871 splx(s);
2872} 2872}
2873 2873
2874/* 2874/*
2875 * Walk the given list and flush the cache for each (MI) page that is 2875 * Walk the given list and flush the cache for each (MI) page that is
2876 * potentially in the cache. Called only if vactype != VAC_NONE. 2876 * potentially in the cache. Called only if vactype != VAC_NONE.
2877 */ 2877 */
2878#if defined(SUN4) || defined(SUN4C) 2878#if defined(SUN4) || defined(SUN4C)
2879static void 2879static void
2880pv_flushcache4_4c(struct vm_page *pg) 2880pv_flushcache4_4c(struct vm_page *pg)
2881{ 2881{
2882 struct pvlist *pv; 2882 struct pvlist *pv;
2883 struct pmap *pm; 2883 struct pmap *pm;
2884 int s, ctx; 2884 int s, ctx;
2885 2885
2886 pv = VM_MDPAGE_PVHEAD(pg); 2886 pv = VM_MDPAGE_PVHEAD(pg);
2887 2887
2888 write_user_windows(); /* paranoia? */ 2888 write_user_windows(); /* paranoia? */
2889 s = splvm(); /* XXX extreme paranoia */ 2889 s = splvm(); /* XXX extreme paranoia */
2890 if ((pm = pv->pv_pmap) != NULL) { 2890 if ((pm = pv->pv_pmap) != NULL) {
2891 ctx = getcontext4(); 2891 ctx = getcontext4();
2892 for (;;) { 2892 for (;;) {
2893 if (pm->pm_ctx) { 2893 if (pm->pm_ctx) {
2894 setcontext4(pm->pm_ctxnum); 2894 setcontext4(pm->pm_ctxnum);
2895 cache_flush_page(pv->pv_va, pm->pm_ctxnum); 2895 cache_flush_page(pv->pv_va, pm->pm_ctxnum);
2896 } 2896 }
2897 pv = pv->pv_next; 2897 pv = pv->pv_next;
2898 if (pv == NULL) 2898 if (pv == NULL)
2899 break; 2899 break;
2900 pm = pv->pv_pmap; 2900 pm = pv->pv_pmap;
2901 } 2901 }
2902 setcontext4(ctx); 2902 setcontext4(ctx);
2903 } 2903 }
2904 splx(s); 2904 splx(s);
2905} 2905}
2906#endif /* SUN4 || SUN4C */ 2906#endif /* SUN4 || SUN4C */
2907 2907
2908#if defined(SUN4M) || defined(SUN4D) 2908#if defined(SUN4M) || defined(SUN4D)
2909static void 2909static void
2910pv_flushcache4m(struct vm_page *pg) 2910pv_flushcache4m(struct vm_page *pg)
2911{ 2911{
2912 struct pvlist *pv; 2912 struct pvlist *pv;
2913 struct pmap *pm; 2913 struct pmap *pm;
2914 int s; 2914 int s;
2915 2915
2916 pv = VM_MDPAGE_PVHEAD(pg); 2916 pv = VM_MDPAGE_PVHEAD(pg);
2917 2917
2918 s = splvm(); /* XXX extreme paranoia */ 2918 s = splvm(); /* XXX extreme paranoia */
2919 if ((pm = pv->pv_pmap) != NULL) { 2919 if ((pm = pv->pv_pmap) != NULL) {
2920 for (;;) { 2920 for (;;) {
2921 if (pm->pm_ctx) { 2921 if (pm->pm_ctx) {
2922 cache_flush_page(pv->pv_va, pm->pm_ctxnum); 2922 cache_flush_page(pv->pv_va, pm->pm_ctxnum);
2923 } 2923 }
2924 pv = pv->pv_next; 2924 pv = pv->pv_next;
2925 if (pv == NULL) 2925 if (pv == NULL)
2926 break; 2926 break;
2927 pm = pv->pv_pmap; 2927 pm = pv->pv_pmap;
2928 } 2928 }
2929 } 2929 }
2930 splx(s); 2930 splx(s);
2931} 2931}
2932#endif /* SUN4M || SUN4D */ 2932#endif /* SUN4M || SUN4D */
2933 2933
2934/*----------------------------------------------------------------*/ 2934/*----------------------------------------------------------------*/
2935 2935
2936/* 2936/*
2937 * At last, pmap code. 2937 * At last, pmap code.
2938 */ 2938 */
2939 2939
2940#if defined(SUN4) && (defined(SUN4C) || defined(SUN4M) || defined(SUN4D)) 2940#if defined(SUN4) && (defined(SUN4C) || defined(SUN4M) || defined(SUN4D))
2941int nptesg; 2941int nptesg;
2942#endif 2942#endif
2943 2943
2944#if defined(SUN4M) || defined(SUN4D) 2944#if defined(SUN4M) || defined(SUN4D)
2945static void pmap_bootstrap4m(void *); 2945static void pmap_bootstrap4m(void *);
2946#endif 2946#endif
2947#if defined(SUN4) || defined(SUN4C) 2947#if defined(SUN4) || defined(SUN4C)
2948static void pmap_bootstrap4_4c(void *, int, int, int); 2948static void pmap_bootstrap4_4c(void *, int, int, int);
2949#endif 2949#endif
2950 2950
2951/* 2951/*
2952 * Bootstrap the system enough to run with VM enabled. 2952 * Bootstrap the system enough to run with VM enabled.
2953 * 2953 *
2954 * nsegment is the number of mmu segment entries (``PMEGs''); 2954 * nsegment is the number of mmu segment entries (``PMEGs'');
2955 * nregion is the number of mmu region entries (``SMEGs''); 2955 * nregion is the number of mmu region entries (``SMEGs'');
2956 * nctx is the number of contexts. 2956 * nctx is the number of contexts.
2957 */ 2957 */
2958void 2958void
2959pmap_bootstrap(int nctx, int nregion, int nsegment) 2959pmap_bootstrap(int nctx, int nregion, int nsegment)
2960{ 2960{
2961 void *p; 2961 void *p;
2962 extern char etext[], kernel_data_start[]; 2962 extern char etext[], kernel_data_start[];
2963 extern char *kernel_top; 2963 extern char *kernel_top;
2964 2964
2965 uvmexp.pagesize = NBPG; 2965 uvmexp.pagesize = NBPG;
2966 uvm_setpagesize(); 2966 uvm_setpagesize();
2967 2967
2968#if defined(SUN4) && (defined(SUN4C) || defined(SUN4M) || defined(SUN4D)) 2968#if defined(SUN4) && (defined(SUN4C) || defined(SUN4M) || defined(SUN4D))
2969 /* In this case NPTESG is a variable */ 2969 /* In this case NPTESG is a variable */
2970 nptesg = (NBPSG >> pgshift); 2970 nptesg = (NBPSG >> pgshift);
2971#endif 2971#endif
2972 2972
2973 /* 2973 /*
2974 * Grab physical memory list. 2974 * Grab physical memory list.
2975 */ 2975 */
2976 p = kernel_top; 2976 p = kernel_top;
2977 get_phys_mem(&p); 2977 get_phys_mem(&p);
2978 2978
2979 /* 2979 /*
2980 * The data segment in sparc ELF images is aligned to a 64KB 2980 * The data segment in sparc ELF images is aligned to a 64KB
2981 * (the maximum page size defined by the ELF/sparc ABI) boundary. 2981 * (the maximum page size defined by the ELF/sparc ABI) boundary.
2982 * This results in a unused portion of physical memory in between 2982 * This results in a unused portion of physical memory in between
2983 * the text/rodata and the data segment. We pick up that gap 2983 * the text/rodata and the data segment. We pick up that gap
2984 * here to remove it from the kernel map and give it to the 2984 * here to remove it from the kernel map and give it to the
2985 * VM manager later. 2985 * VM manager later.
2986 */ 2986 */
2987 etext_gap_start = (vaddr_t)(etext + NBPG - 1) & ~PGOFSET; 2987 etext_gap_start = (vaddr_t)(etext + NBPG - 1) & ~PGOFSET;
2988 etext_gap_end = (vaddr_t)kernel_data_start & ~PGOFSET; 2988 etext_gap_end = (vaddr_t)kernel_data_start & ~PGOFSET;
2989 2989
2990 if (CPU_HAS_SRMMU) { 2990 if (CPU_HAS_SRMMU) {
2991#if defined(SUN4M) || defined(SUN4D) 2991#if defined(SUN4M) || defined(SUN4D)
2992 pmap_bootstrap4m(p); 2992 pmap_bootstrap4m(p);
2993#endif 2993#endif
2994 } else if (CPU_HAS_SUNMMU) { 2994 } else if (CPU_HAS_SUNMMU) {
2995#if defined(SUN4) || defined(SUN4C) 2995#if defined(SUN4) || defined(SUN4C)
2996 pmap_bootstrap4_4c(p, nctx, nregion, nsegment); 2996 pmap_bootstrap4_4c(p, nctx, nregion, nsegment);
2997#endif 2997#endif
2998 } 2998 }
2999 2999
3000 pmap_page_upload(); 3000 pmap_page_upload();
3001 mutex_init(&demap_lock, MUTEX_DEFAULT, IPL_VM); 3001 mutex_init(&demap_lock, MUTEX_DEFAULT, IPL_VM);
3002 mutex_init(&ctx_lock, MUTEX_DEFAULT, IPL_SCHED); 3002 mutex_init(&ctx_lock, MUTEX_DEFAULT, IPL_SCHED);
3003} 3003}
3004 3004
3005#if defined(SUN4) || defined(SUN4C) 3005#if defined(SUN4) || defined(SUN4C)
3006void 3006void
3007pmap_bootstrap4_4c(void *top, int nctx, int nregion, int nsegment) 3007pmap_bootstrap4_4c(void *top, int nctx, int nregion, int nsegment)
3008{ 3008{
3009 union ctxinfo *ci; 3009 union ctxinfo *ci;
3010 struct mmuentry *mmuseg; 3010 struct mmuentry *mmuseg;
3011#if defined(SUN4_MMU3L) 3011#if defined(SUN4_MMU3L)
3012 struct mmuentry *mmureg; 3012 struct mmuentry *mmureg;
3013#endif 3013#endif
3014 struct regmap *rp; 3014 struct regmap *rp;
3015 struct segmap *sp; 3015 struct segmap *sp;
3016 int i, j; 3016 int i, j;
3017 int npte, zseg, vr, vs; 3017 int npte, zseg, vr, vs;
3018 int startscookie, scookie; 3018 int startscookie, scookie;
3019#if defined(SUN4_MMU3L) 3019#if defined(SUN4_MMU3L)
3020 int startrcookie = 0, rcookie = 0; 3020 int startrcookie = 0, rcookie = 0;
3021#endif 3021#endif
3022 int *kptes; 3022 int *kptes;
3023 int lastpage; 3023 int lastpage;
3024 vaddr_t va; 3024 vaddr_t va;
3025 vaddr_t p; 3025 vaddr_t p;
3026 extern char kernel_text[]; 3026 extern char kernel_text[];
3027 3027
3028 /* 3028 /*
3029 * Compute `va2pa_offset'. 3029 * Compute `va2pa_offset'.
3030 * Use `kernel_text' to probe the MMU translation since 3030 * Use `kernel_text' to probe the MMU translation since
3031 * the pages at KERNBASE might not be mapped. 3031 * the pages at KERNBASE might not be mapped.
3032 */ 3032 */
3033 va2pa_offset = (vaddr_t)kernel_text - 3033 va2pa_offset = (vaddr_t)kernel_text -
3034 ((getpte4(kernel_text) & PG_PFNUM) << PGSHIFT); 3034 ((getpte4(kernel_text) & PG_PFNUM) << PGSHIFT);
3035 3035
3036 ncontext = nctx; 3036 ncontext = nctx;
3037 3037
3038 switch (cputyp) { 3038 switch (cputyp) {
3039 case CPU_SUN4C: 3039 case CPU_SUN4C:
3040 mmu_has_hole = 1; 3040 mmu_has_hole = 1;
3041 break; 3041 break;
3042 case CPU_SUN4: 3042 case CPU_SUN4:
3043 if (cpuinfo.cpu_type != CPUTYP_4_400) { 3043 if (cpuinfo.cpu_type != CPUTYP_4_400) {
3044 mmu_has_hole = 1; 3044 mmu_has_hole = 1;
3045 break; 3045 break;
3046 } 3046 }
3047 } 3047 }
3048 3048
3049#if defined(SUN4) 3049#if defined(SUN4)
3050 /* 3050 /*
3051 * set up the segfixmask to mask off invalid bits 3051 * set up the segfixmask to mask off invalid bits
3052 */ 3052 */
3053 segfixmask = nsegment - 1; /* assume nsegment is a power of 2 */ 3053 segfixmask = nsegment - 1; /* assume nsegment is a power of 2 */
3054#ifdef DIAGNOSTIC 3054#ifdef DIAGNOSTIC
3055 if (((nsegment & segfixmask) | (nsegment & ~segfixmask)) != nsegment) { 3055 if (((nsegment & segfixmask) | (nsegment & ~segfixmask)) != nsegment) {
3056 printf("pmap_bootstrap: unsuitable number of segments (%d)\n", 3056 printf("pmap_bootstrap: unsuitable number of segments (%d)\n",
3057 nsegment); 3057 nsegment);
3058 callrom(); 3058 callrom();
3059 } 3059 }
3060#endif 3060#endif
3061#endif 3061#endif
3062 3062
3063#if defined(SUN4M) || defined(SUN4D) /* We're in a dual-arch kernel. 3063#if defined(SUN4M) || defined(SUN4D) /* We're in a dual-arch kernel.
3064 Setup 4/4c fn. ptrs */ 3064 Setup 4/4c fn. ptrs */
3065 pmap_clear_modify_p = pmap_clear_modify4_4c; 3065 pmap_clear_modify_p = pmap_clear_modify4_4c;
3066 pmap_clear_reference_p = pmap_clear_reference4_4c; 3066 pmap_clear_reference_p = pmap_clear_reference4_4c;
3067 pmap_enter_p = pmap_enter4_4c; 3067 pmap_enter_p = pmap_enter4_4c;
3068 pmap_extract_p = pmap_extract4_4c; 3068 pmap_extract_p = pmap_extract4_4c;
3069 pmap_is_modified_p = pmap_is_modified4_4c; 3069 pmap_is_modified_p = pmap_is_modified4_4c;
3070 pmap_is_referenced_p = pmap_is_referenced4_4c; 3070 pmap_is_referenced_p = pmap_is_referenced4_4c;
3071 pmap_kenter_pa_p = pmap_kenter_pa4_4c; 3071 pmap_kenter_pa_p = pmap_kenter_pa4_4c;
3072 pmap_kremove_p = pmap_kremove4_4c; 3072 pmap_kremove_p = pmap_kremove4_4c;
3073 pmap_kprotect_p = pmap_kprotect4_4c; 3073 pmap_kprotect_p = pmap_kprotect4_4c;
3074 pmap_page_protect_p = pmap_page_protect4_4c; 3074 pmap_page_protect_p = pmap_page_protect4_4c;
3075 pmap_protect_p = pmap_protect4_4c; 3075 pmap_protect_p = pmap_protect4_4c;
3076 pmap_rmk_p = pmap_rmk4_4c; 3076 pmap_rmk_p = pmap_rmk4_4c;
3077 pmap_rmu_p = pmap_rmu4_4c; 3077 pmap_rmu_p = pmap_rmu4_4c;
3078#endif /* defined SUN4M || defined SUN4D */ 3078#endif /* defined SUN4M || defined SUN4D */
3079 3079
3080 p = (vaddr_t)top; 3080 p = (vaddr_t)top;
3081 3081
3082 /* 3082 /*
3083 * Last segment is the `invalid' one (one PMEG of pte's with !pg_v). 3083 * Last segment is the `invalid' one (one PMEG of pte's with !pg_v).
3084 * It will never be used for anything else. 3084 * It will never be used for anything else.
3085 */ 3085 */
3086 seginval = --nsegment; 3086 seginval = --nsegment;
3087 3087
3088#if defined(SUN4_MMU3L) 3088#if defined(SUN4_MMU3L)
3089 if (HASSUN4_MMU3L) 3089 if (HASSUN4_MMU3L)
3090 reginval = --nregion; 3090 reginval = --nregion;
3091#endif 3091#endif
3092 3092
3093 /* 3093 /*
3094 * Allocate and initialise mmu entries and context structures. 3094 * Allocate and initialise mmu entries and context structures.
3095 */ 3095 */
3096#if defined(SUN4_MMU3L) 3096#if defined(SUN4_MMU3L)
3097 mmuregions = mmureg = (struct mmuentry *)p; 3097 mmuregions = mmureg = (struct mmuentry *)p;
3098 p += nregion * sizeof(struct mmuentry); 3098 p += nregion * sizeof(struct mmuentry);
3099 memset(mmuregions, 0, nregion * sizeof(struct mmuentry)); 3099 memset(mmuregions, 0, nregion * sizeof(struct mmuentry));
3100#endif 3100#endif
3101 mmusegments = mmuseg = (struct mmuentry *)p; 3101 mmusegments = mmuseg = (struct mmuentry *)p;
3102 p += nsegment * sizeof(struct mmuentry); 3102 p += nsegment * sizeof(struct mmuentry);
3103 memset(mmusegments, 0, nsegment * sizeof(struct mmuentry)); 3103 memset(mmusegments, 0, nsegment * sizeof(struct mmuentry));
3104 3104
3105 pmap_kernel()->pm_ctx = ctxinfo = ci = (union ctxinfo *)p; 3105 pmap_kernel()->pm_ctx = ctxinfo = ci = (union ctxinfo *)p;
3106 p += nctx * sizeof *ci; 3106 p += nctx * sizeof *ci;
3107 3107
3108 /* Initialize MMU resource queues */ 3108 /* Initialize MMU resource queues */
3109#if defined(SUN4_MMU3L) 3109#if defined(SUN4_MMU3L)
3110 MMUQ_INIT(&region_freelist); 3110 MMUQ_INIT(&region_freelist);
3111 MMUQ_INIT(&region_lru); 3111 MMUQ_INIT(&region_lru);
3112 MMUQ_INIT(&region_locked); 3112 MMUQ_INIT(&region_locked);
3113#endif 3113#endif
3114 MMUQ_INIT(&segm_freelist); 3114 MMUQ_INIT(&segm_freelist);
3115 MMUQ_INIT(&segm_lru); 3115 MMUQ_INIT(&segm_lru);
3116 MMUQ_INIT(&segm_locked); 3116 MMUQ_INIT(&segm_locked);
3117 3117
3118 3118
3119 /* 3119 /*
3120 * Intialize the kernel pmap. 3120 * Intialize the kernel pmap.
3121 */ 3121 */
3122 /* kernel_pmap_store.pm_ctxnum = 0; */ 3122 /* kernel_pmap_store.pm_ctxnum = 0; */
3123 kernel_pmap_store.pm_refcount = 1; 3123 kernel_pmap_store.pm_refcount = 1;
3124#if defined(SUN4_MMU3L) 3124#if defined(SUN4_MMU3L)
3125 TAILQ_INIT(&kernel_pmap_store.pm_reglist); 3125 TAILQ_INIT(&kernel_pmap_store.pm_reglist);
3126#endif 3126#endif
3127 TAILQ_INIT(&kernel_pmap_store.pm_seglist); 3127 TAILQ_INIT(&kernel_pmap_store.pm_seglist);
3128 3128
3129 /* 3129 /*
3130 * Allocate memory for kernel PTEs 3130 * Allocate memory for kernel PTEs
3131 * XXX Consider allocating memory for only a few regions 3131 * XXX Consider allocating memory for only a few regions
3132 * and use growkernel() to allocate more as needed. 3132 * and use growkernel() to allocate more as needed.
3133 */ 3133 */
3134 kptes = (int *)p; 3134 kptes = (int *)p;
3135 p += NKREG * NSEGRG * NPTESG * sizeof(int); 3135 p += NKREG * NSEGRG * NPTESG * sizeof(int);
3136 memset(kptes, 0, NKREG * NSEGRG * NPTESG * sizeof(int)); 3136 memset(kptes, 0, NKREG * NSEGRG * NPTESG * sizeof(int));
3137 3137
3138 /* 3138 /*
3139 * Set up pm_regmap for kernel to point NUREG *below* the beginning 3139 * Set up pm_regmap for kernel to point NUREG *below* the beginning
3140 * of kernel regmap storage. Since the kernel only uses regions 3140 * of kernel regmap storage. Since the kernel only uses regions
3141 * above NUREG, we save storage space and can index kernel and 3141 * above NUREG, we save storage space and can index kernel and
3142 * user regions in the same way. 3142 * user regions in the same way.
3143 */ 3143 */
3144 kernel_pmap_store.pm_regmap = &kernel_regmap_store[-NUREG]; 3144 kernel_pmap_store.pm_regmap = &kernel_regmap_store[-NUREG];
3145 for (i = NKREG; --i >= 0;) { 3145 for (i = NKREG; --i >= 0;) {
3146#if defined(SUN4_MMU3L) 3146#if defined(SUN4_MMU3L)
3147 kernel_regmap_store[i].rg_smeg = reginval; 3147 kernel_regmap_store[i].rg_smeg = reginval;
3148#endif 3148#endif
3149 kernel_regmap_store[i].rg_segmap = 3149 kernel_regmap_store[i].rg_segmap =
3150 &kernel_segmap_store[i * NSEGRG]; 3150 &kernel_segmap_store[i * NSEGRG];
3151 for (j = NSEGRG; --j >= 0;) { 3151 for (j = NSEGRG; --j >= 0;) {
3152 sp = &kernel_segmap_store[i * NSEGRG + j]; 3152 sp = &kernel_segmap_store[i * NSEGRG + j];
3153 sp->sg_pmeg = seginval; 3153 sp->sg_pmeg = seginval;
3154 sp->sg_pte = &kptes[(i * NSEGRG + j) * NPTESG]; 3154 sp->sg_pte = &kptes[(i * NSEGRG + j) * NPTESG];
3155 } 3155 }
3156 } 3156 }
3157 3157
3158 /* 3158 /*
3159 * Preserve the monitor ROM's reserved VM region, so that 3159 * Preserve the monitor ROM's reserved VM region, so that
3160 * we can use L1-A or the monitor's debugger. As a side 3160 * we can use L1-A or the monitor's debugger. As a side
3161 * effect we map the ROM's reserved VM into all contexts 3161 * effect we map the ROM's reserved VM into all contexts
3162 * (otherwise L1-A crashes the machine!). 3162 * (otherwise L1-A crashes the machine!).
3163 */ 3163 */
3164 3164
3165 mmu_reservemon4_4c(&nregion, &nsegment); 3165 mmu_reservemon4_4c(&nregion, &nsegment);
3166 3166
3167#if defined(SUN4_MMU3L) 3167#if defined(SUN4_MMU3L)
3168 /* Reserve one region for temporary mappings */ 3168 /* Reserve one region for temporary mappings */
3169 if (HASSUN4_MMU3L) 3169 if (HASSUN4_MMU3L)
3170 tregion = --nregion; 3170 tregion = --nregion;
3171#endif 3171#endif
3172 3172
3173 /* 3173 /*
3174 * Set up the `constants' for the call to vm_init() 3174 * Set up the `constants' for the call to vm_init()
3175 * in main(). All pages beginning at p (rounded up to 3175 * in main(). All pages beginning at p (rounded up to
3176 * the next whole page) and continuing through the number 3176 * the next whole page) and continuing through the number
3177 * of available pages are free, but they start at a higher 3177 * of available pages are free, but they start at a higher
3178 * virtual address. This gives us two mappable MD pages 3178 * virtual address. This gives us two mappable MD pages
3179 * for pmap_zero_page and pmap_copy_page, and one MI page 3179 * for pmap_zero_page and pmap_copy_page, and one MI page
3180 * for /dev/mem, all with no associated physical memory. 3180 * for /dev/mem, all with no associated physical memory.
3181 */ 3181 */
3182 p = (p + NBPG - 1) & ~PGOFSET; 3182 p = (p + NBPG - 1) & ~PGOFSET;
3183 3183
3184 avail_start = PMAP_BOOTSTRAP_VA2PA(p); 3184 avail_start = PMAP_BOOTSTRAP_VA2PA(p);
3185 3185
3186 i = p; 3186 i = p;
3187 cpuinfo.vpage[0] = (void *)p, p += NBPG; 3187 cpuinfo.vpage[0] = (void *)p, p += NBPG;
3188 cpuinfo.vpage[1] = (void *)p, p += NBPG; 3188 cpuinfo.vpage[1] = (void *)p, p += NBPG;
3189 vmmap = (void *)p, p += NBPG; 3189 vmmap = (void *)p, p += NBPG;
3190 p = (vaddr_t)reserve_dumppages((void *)p); 3190 p = (vaddr_t)reserve_dumppages((void *)p);
3191 3191
3192 virtual_avail = p; 3192 virtual_avail = p;
3193 virtual_end = VM_MAX_KERNEL_ADDRESS; 3193 virtual_end = VM_MAX_KERNEL_ADDRESS;
3194 3194
3195 p = i; /* retract to first free phys */ 3195 p = i; /* retract to first free phys */
3196 3196
3197 3197
3198 /* 3198 /*
3199 * All contexts are free except the kernel's. 3199 * All contexts are free except the kernel's.
3200 * 3200 *
3201 * XXX sun4c could use context 0 for users? 3201 * XXX sun4c could use context 0 for users?
3202 */ 3202 */
3203 ci->c_pmap = pmap_kernel(); 3203 ci->c_pmap = pmap_kernel();
3204 ctx_freelist = ci + 1; 3204 ctx_freelist = ci + 1;
3205 for (i = 1; i < ncontext; i++) { 3205 for (i = 1; i < ncontext; i++) {
3206 ci++; 3206 ci++;
3207 ci->c_nextfree = ci + 1; 3207 ci->c_nextfree = ci + 1;
3208 } 3208 }
3209 ci->c_nextfree = NULL; 3209 ci->c_nextfree = NULL;
3210 ctx_kick = 0; 3210 ctx_kick = 0;
3211 ctx_kickdir = -1; 3211 ctx_kickdir = -1;
3212 3212
3213 /* 3213 /*
3214 * Init mmu entries that map the kernel physical addresses. 3214 * Init mmu entries that map the kernel physical addresses.
3215 * 3215 *
3216 * All the other MMU entries are free. 3216 * All the other MMU entries are free.
3217 * 3217 *
3218 * THIS ASSUMES THE KERNEL IS MAPPED BY A CONTIGUOUS RANGE OF 3218 * THIS ASSUMES THE KERNEL IS MAPPED BY A CONTIGUOUS RANGE OF
3219 * MMU SEGMENTS/REGIONS DURING THE BOOT PROCESS 3219 * MMU SEGMENTS/REGIONS DURING THE BOOT PROCESS
3220 */ 3220 */
3221 3221
3222 /* Compute the number of segments used by the kernel */ 3222 /* Compute the number of segments used by the kernel */
3223 zseg = (((p + NBPSG - 1) & ~SGOFSET) - KERNBASE) >> SGSHIFT; 3223 zseg = (((p + NBPSG - 1) & ~SGOFSET) - KERNBASE) >> SGSHIFT;
3224 lastpage = VA_VPG(p); 3224 lastpage = VA_VPG(p);
3225 if (lastpage == 0) 3225 if (lastpage == 0)
3226 /* 3226 /*
3227 * If the page bits in p are 0, we filled the last segment 3227 * If the page bits in p are 0, we filled the last segment
3228 * exactly; if not, it is the last page filled in the 3228 * exactly; if not, it is the last page filled in the
3229 * last segment. 3229 * last segment.
3230 */ 3230 */
3231 lastpage = NPTESG; 3231 lastpage = NPTESG;
3232 3232
3233 p = KERNBASE; /* first va */ 3233 p = KERNBASE; /* first va */
3234 vs = VA_VSEG(KERNBASE); /* first virtual segment */ 3234 vs = VA_VSEG(KERNBASE); /* first virtual segment */
3235 vr = VA_VREG(KERNBASE); /* first virtual region */ 3235 vr = VA_VREG(KERNBASE); /* first virtual region */
3236 rp = &pmap_kernel()->pm_regmap[vr]; 3236 rp = &pmap_kernel()->pm_regmap[vr];
3237 3237
3238 /* Get region/segment where kernel addresses start */ 3238 /* Get region/segment where kernel addresses start */
3239#if defined(SUN4_MMU3L) 3239#if defined(SUN4_MMU3L)
3240 if (HASSUN4_MMU3L) 3240 if (HASSUN4_MMU3L)
3241 startrcookie = rcookie = getregmap(p); 3241 startrcookie = rcookie = getregmap(p);
3242 mmureg = &mmuregions[rcookie]; 3242 mmureg = &mmuregions[rcookie];
3243#endif 3243#endif
3244 3244
3245 startscookie = scookie = getsegmap(p); 3245 startscookie = scookie = getsegmap(p);
3246 mmuseg = &mmusegments[scookie]; 3246 mmuseg = &mmusegments[scookie];
3247 zseg += scookie; /* First free segment */ 3247 zseg += scookie; /* First free segment */
3248 3248
3249 for (;;) { 3249 for (;;) {
3250 3250
3251 /* 3251 /*
3252 * Distribute each kernel region/segment into all contexts. 3252 * Distribute each kernel region/segment into all contexts.
3253 * This is done through the monitor ROM, rather than 3253 * This is done through the monitor ROM, rather than
3254 * directly here: if we do a setcontext we will fault, 3254 * directly here: if we do a setcontext we will fault,
3255 * as we are not (yet) mapped in any other context. 3255 * as we are not (yet) mapped in any other context.
3256 */ 3256 */
3257 3257
3258 if ((vs % NSEGRG) == 0) { 3258 if ((vs % NSEGRG) == 0) {
3259 /* Entering a new region */ 3259 /* Entering a new region */
3260 if (VA_VREG(p) > vr) { 3260 if (VA_VREG(p) > vr) {
3261#ifdef DEBUG 3261#ifdef DEBUG
3262 printf("note: giant kernel!\n"); 3262 printf("note: giant kernel!\n");
3263#endif 3263#endif
3264 vr++, rp++; 3264 vr++, rp++;
3265 } 3265 }
3266#if defined(SUN4_MMU3L) 3266#if defined(SUN4_MMU3L)
3267 if (HASSUN4_MMU3L) { 3267 if (HASSUN4_MMU3L) {
3268 for (i = 1; i < nctx; i++) 3268 for (i = 1; i < nctx; i++)
3269 prom_setcontext(i, (void *)p, rcookie); 3269 prom_setcontext(i, (void *)p, rcookie);
3270 3270
3271 MMUQ_INSERT_TAIL(&region_locked, 3271 MMUQ_INSERT_TAIL(&region_locked,
3272 mmureg, me_list); 3272 mmureg, me_list);
3273 TAILQ_INSERT_TAIL(&pmap_kernel()->pm_reglist, 3273 TAILQ_INSERT_TAIL(&pmap_kernel()->pm_reglist,
3274 mmureg, me_pmchain); 3274 mmureg, me_pmchain);
3275#ifdef DIAGNOSTIC 3275#ifdef DIAGNOSTIC
3276 mmuseg->me_statp = NULL; 3276 mmuseg->me_statp = NULL;
3277#endif 3277#endif
3278 mmureg->me_cookie = rcookie; 3278 mmureg->me_cookie = rcookie;
3279 mmureg->me_pmap = pmap_kernel(); 3279 mmureg->me_pmap = pmap_kernel();
3280 mmureg->me_vreg = vr; 3280 mmureg->me_vreg = vr;
3281 rp->rg_smeg = rcookie; 3281 rp->rg_smeg = rcookie;
3282 mmureg++; 3282 mmureg++;
3283 rcookie++; 3283 rcookie++;
3284 } 3284 }
3285#endif /* SUN4_MMU3L */ 3285#endif /* SUN4_MMU3L */
3286 } 3286 }
3287 3287
3288#if defined(SUN4_MMU3L) 3288#if defined(SUN4_MMU3L)
3289 if (!HASSUN4_MMU3L) 3289 if (!HASSUN4_MMU3L)
3290#endif 3290#endif
3291 for (i = 1; i < nctx; i++) 3291 for (i = 1; i < nctx; i++)
3292 prom_setcontext(i, (void *)p, scookie); 3292 prom_setcontext(i, (void *)p, scookie);
3293 3293
3294 /* set up the mmu entry */ 3294 /* set up the mmu entry */
3295 MMUQ_INSERT_TAIL(&segm_locked, mmuseg, me_list); 3295 MMUQ_INSERT_TAIL(&segm_locked, mmuseg, me_list);
3296#ifdef DIAGNOSTIC 3296#ifdef DIAGNOSTIC
3297 mmuseg->me_statp = &pmap_stats.ps_npmeg_locked; 3297 mmuseg->me_statp = &pmap_stats.ps_npmeg_locked;
3298#endif 3298#endif
3299 TAILQ_INSERT_TAIL(&pmap_kernel()->pm_seglist, mmuseg, me_pmchain); 3299 TAILQ_INSERT_TAIL(&pmap_kernel()->pm_seglist, mmuseg, me_pmchain);
3300 pmap_stats.ps_npmeg_locked++; 3300 pmap_stats.ps_npmeg_locked++;
3301 mmuseg->me_cookie = scookie; 3301 mmuseg->me_cookie = scookie;
3302 mmuseg->me_pmap = pmap_kernel(); 3302 mmuseg->me_pmap = pmap_kernel();
3303 mmuseg->me_vreg = vr; 3303 mmuseg->me_vreg = vr;
3304 mmuseg->me_vseg = vs % NSEGRG; 3304 mmuseg->me_vseg = vs % NSEGRG;
3305 sp = &rp->rg_segmap[vs % NSEGRG]; 3305 sp = &rp->rg_segmap[vs % NSEGRG];
3306 sp->sg_pmeg = scookie; 3306 sp->sg_pmeg = scookie;
3307 npte = ++scookie < zseg ? NPTESG : lastpage; 3307 npte = ++scookie < zseg ? NPTESG : lastpage;
3308 sp->sg_npte = npte; 3308 sp->sg_npte = npte;
3309 sp->sg_nwired = npte; 3309 sp->sg_nwired = npte;
3310 pmap_kernel()->pm_stats.resident_count += npte; 3310 pmap_kernel()->pm_stats.resident_count += npte;
3311 rp->rg_nsegmap += 1; 3311 rp->rg_nsegmap += 1;
3312 for (i = 0; i < npte; i++) 3312 for (i = 0; i < npte; i++)
3313 sp->sg_pte[i] = getpte4(p + i * NBPG) | PG_WIRED; 3313 sp->sg_pte[i] = getpte4(p + i * NBPG) | PG_WIRED;
3314 mmuseg++; 3314 mmuseg++;
3315 vs++; 3315 vs++;
3316 if (scookie < zseg) { 3316 if (scookie < zseg) {
3317 p += NBPSG; 3317 p += NBPSG;
3318 continue; 3318 continue;
3319 } 3319 }
3320 3320
3321 /* 3321 /*
3322 * Unmap the pages, if any, that are not part of 3322 * Unmap the pages, if any, that are not part of
3323 * the final segment. 3323 * the final segment.
3324 */ 3324 */
3325 for (p += npte << PGSHIFT; npte < NPTESG; npte++, p += NBPG) 3325 for (p += npte << PGSHIFT; npte < NPTESG; npte++, p += NBPG)
3326 setpte4(p, 0); 3326 setpte4(p, 0);
3327 3327
3328#if defined(SUN4_MMU3L) 3328#if defined(SUN4_MMU3L)
3329 if (HASSUN4_MMU3L) { 3329 if (HASSUN4_MMU3L) {
3330 /* 3330 /*
3331 * Unmap the segments, if any, that are not part of 3331 * Unmap the segments, if any, that are not part of
3332 * the final region. 3332 * the final region.
3333 */ 3333 */
3334 for (i = rp->rg_nsegmap; i < NSEGRG; i++, p += NBPSG) 3334 for (i = rp->rg_nsegmap; i < NSEGRG; i++, p += NBPSG)
3335 setsegmap(p, seginval); 3335 setsegmap(p, seginval);
3336 3336
3337 /* 3337 /*
3338 * Unmap any kernel regions that we aren't using. 3338 * Unmap any kernel regions that we aren't using.
3339 */ 3339 */
3340 for (i = 0; i < nctx; i++) { 3340 for (i = 0; i < nctx; i++) {
3341 setcontext4(i); 3341 setcontext4(i);
3342 for (va = p; 3342 for (va = p;
3343 va < (OPENPROM_STARTVADDR & ~(NBPRG - 1)); 3343 va < (OPENPROM_STARTVADDR & ~(NBPRG - 1));
3344 va += NBPRG) 3344 va += NBPRG)
3345 setregmap(va, reginval); 3345 setregmap(va, reginval);
3346 } 3346 }
3347 3347
3348 } else 3348 } else
3349#endif 3349#endif
3350 { 3350 {
3351 /* 3351 /*
3352 * Unmap any kernel segments that we aren't using. 3352 * Unmap any kernel segments that we aren't using.
3353 */ 3353 */
3354 for (i = 0; i < nctx; i++) { 3354 for (i = 0; i < nctx; i++) {
3355 setcontext4(i); 3355 setcontext4(i);
3356 for (va = p; 3356 for (va = p;
3357 va < (OPENPROM_STARTVADDR & ~(NBPSG - 1)); 3357 va < (OPENPROM_STARTVADDR & ~(NBPSG - 1));
3358 va += NBPSG) 3358 va += NBPSG)
3359 setsegmap(va, seginval); 3359 setsegmap(va, seginval);
3360 } 3360 }
3361 } 3361 }
3362 break; 3362 break;
3363 } 3363 }
3364 3364
3365#if defined(SUN4_MMU3L) 3365#if defined(SUN4_MMU3L)
3366 if (HASSUN4_MMU3L) 3366 if (HASSUN4_MMU3L)
3367 for (rcookie = 0; rcookie < nregion; rcookie++) { 3367 for (rcookie = 0; rcookie < nregion; rcookie++) {
3368 if (rcookie == startrcookie) 3368 if (rcookie == startrcookie)
3369 /* Kernel must fit in one region! */ 3369 /* Kernel must fit in one region! */
3370 rcookie++; 3370 rcookie++;
3371 mmureg = &mmuregions[rcookie]; 3371 mmureg = &mmuregions[rcookie];
3372 mmureg->me_cookie = rcookie; 3372 mmureg->me_cookie = rcookie;
3373 MMUQ_INSERT_TAIL(&region_freelist, mmureg, me_list); 3373 MMUQ_INSERT_TAIL(&region_freelist, mmureg, me_list);
3374#ifdef DIAGNOSTIC 3374#ifdef DIAGNOSTIC
3375 mmuseg->me_statp = NULL; 3375 mmuseg->me_statp = NULL;
3376#endif 3376#endif
3377 } 3377 }
3378#endif /* SUN4_MMU3L */ 3378#endif /* SUN4_MMU3L */
3379 3379
3380 for (scookie = 0; scookie < nsegment; scookie++) { 3380 for (scookie = 0; scookie < nsegment; scookie++) {
3381 if (scookie == startscookie) 3381 if (scookie == startscookie)
3382 /* Skip static kernel image */ 3382 /* Skip static kernel image */
3383 scookie = zseg; 3383 scookie = zseg;
3384 mmuseg = &mmusegments[scookie]; 3384 mmuseg = &mmusegments[scookie];
3385 mmuseg->me_cookie = scookie; 3385 mmuseg->me_cookie = scookie;
3386 MMUQ_INSERT_TAIL(&segm_freelist, mmuseg, me_list); 3386 MMUQ_INSERT_TAIL(&segm_freelist, mmuseg, me_list);
3387 pmap_stats.ps_npmeg_free++; 3387 pmap_stats.ps_npmeg_free++;
3388#ifdef DIAGNOSTIC 3388#ifdef DIAGNOSTIC
3389 mmuseg->me_statp = NULL; 3389 mmuseg->me_statp = NULL;
3390#endif 3390#endif
3391 } 3391 }
3392 3392
3393 /* Erase all spurious user-space segmaps */ 3393 /* Erase all spurious user-space segmaps */
3394 for (i = 1; i < ncontext; i++) { 3394 for (i = 1; i < ncontext; i++) {
3395 setcontext4(i); 3395 setcontext4(i);
3396 if (HASSUN4_MMU3L) 3396 if (HASSUN4_MMU3L)
3397 for (p = 0, j = NUREG; --j >= 0; p += NBPRG) 3397 for (p = 0, j = NUREG; --j >= 0; p += NBPRG)
3398 setregmap(p, reginval); 3398 setregmap(p, reginval);
3399 else 3399 else
3400 for (p = 0, vr = 0; vr < NUREG; vr++) { 3400 for (p = 0, vr = 0; vr < NUREG; vr++) {
3401 if (VA_INHOLE(p)) { 3401 if (VA_INHOLE(p)) {
3402 p = MMU_HOLE_END; 3402 p = MMU_HOLE_END;
3403 vr = VA_VREG(p); 3403 vr = VA_VREG(p);
3404 } 3404 }
3405 for (j = NSEGRG; --j >= 0; p += NBPSG) 3405 for (j = NSEGRG; --j >= 0; p += NBPSG)
3406 setsegmap(p, seginval); 3406 setsegmap(p, seginval);
3407 } 3407 }
3408 } 3408 }
3409 setcontext4(0); 3409 setcontext4(0);
3410 3410
3411 /* 3411 /*
3412 * write protect & encache kernel text; 3412 * write protect & encache kernel text;
3413 * set red zone at kernel base; 3413 * set red zone at kernel base;
3414 * enable cache on message buffer and cpuinfo. 3414 * enable cache on message buffer and cpuinfo.
3415 */ 3415 */
3416 { 3416 {
3417 extern char etext[]; 3417 extern char etext[];
3418 3418
3419 /* Enable cache on message buffer and cpuinfo */ 3419 /* Enable cache on message buffer and cpuinfo */
3420 for (p = KERNBASE; p < (vaddr_t)trapbase; p += NBPG) 3420 for (p = KERNBASE; p < (vaddr_t)trapbase; p += NBPG)
3421 setpte4(p, getpte4(p) & ~PG_NC); 3421 setpte4(p, getpte4(p) & ~PG_NC);
3422 3422
3423 /* Enable cache and write protext kernel text */ 3423 /* Enable cache and write protext kernel text */
3424 for (p = (vaddr_t)trapbase; p < (vaddr_t)etext; p += NBPG) 3424 for (p = (vaddr_t)trapbase; p < (vaddr_t)etext; p += NBPG)
3425 setpte4(p, getpte4(p) & ~(PG_NC|PG_W)); 3425 setpte4(p, getpte4(p) & ~(PG_NC|PG_W));
3426 3426
3427 /* 3427 /*
3428 * Unmap the `etext gap'; it'll be made available 3428 * Unmap the `etext gap'; it'll be made available
3429 * to the VM manager. 3429 * to the VM manager.
3430 */ 3430 */
3431 for (p = etext_gap_start; p < etext_gap_end; p += NBPG) { 3431 for (p = etext_gap_start; p < etext_gap_end; p += NBPG) {
3432 rp = &pmap_kernel()->pm_regmap[VA_VREG(p)]; 3432 rp = &pmap_kernel()->pm_regmap[VA_VREG(p)];
3433 sp = &rp->rg_segmap[VA_VSEG(p)]; 3433 sp = &rp->rg_segmap[VA_VSEG(p)];
3434 sp->sg_nwired--; 3434 sp->sg_nwired--;
3435 sp->sg_npte--; 3435 sp->sg_npte--;
3436 pmap_kernel()->pm_stats.resident_count--; 3436 pmap_kernel()->pm_stats.resident_count--;
3437 sp->sg_pte[VA_VPG(p)] = 0; 3437 sp->sg_pte[VA_VPG(p)] = 0;
3438 setpte4(p, 0); 3438 setpte4(p, 0);
3439 } 3439 }
3440 3440
3441 /* Enable cache on data & bss */ 3441 /* Enable cache on data & bss */
3442 for (p = etext_gap_end; p < virtual_avail; p += NBPG) 3442 for (p = etext_gap_end; p < virtual_avail; p += NBPG)
3443 setpte4(p, getpte4(p) & ~PG_NC); 3443 setpte4(p, getpte4(p) & ~PG_NC);
3444 3444
3445 } 3445 }
3446} 3446}
3447#endif 3447#endif
3448 3448
3449#if defined(SUN4M) || defined(SUN4D) /* SRMMU version of pmap_bootstrap */ 3449#if defined(SUN4M) || defined(SUN4D) /* SRMMU version of pmap_bootstrap */
3450/* 3450/*
3451 * Bootstrap the system enough to run with VM enabled on a sun4m machine. 3451 * Bootstrap the system enough to run with VM enabled on a sun4m machine.
3452 * 3452 *
3453 * Switches from ROM to kernel page tables, and sets up initial mappings. 3453 * Switches from ROM to kernel page tables, and sets up initial mappings.
3454 */ 3454 */
3455static void 3455static void
3456pmap_bootstrap4m(void *top) 3456pmap_bootstrap4m(void *top)
3457{ 3457{
3458 int i, j; 3458 int i, j;
3459 vaddr_t p, q; 3459 vaddr_t p, q;
3460 union ctxinfo *ci; 3460 union ctxinfo *ci;
3461 int reg, seg; 3461 int reg, seg;
3462 unsigned int ctxtblsize; 3462 unsigned int ctxtblsize;
3463 vaddr_t pagetables_start, pagetables_end; 3463 vaddr_t pagetables_start, pagetables_end;
3464 paddr_t pagetables_start_pa; 3464 paddr_t pagetables_start_pa;
3465 extern char etext[]; 3465 extern char etext[];
3466 extern char kernel_text[]; 3466 extern char kernel_text[];
3467 vaddr_t va; 3467 vaddr_t va;
3468#ifdef MULTIPROCESSOR 3468#ifdef MULTIPROCESSOR
3469 vsize_t off; 3469 vsize_t off;
3470 size_t cpuinfo_len; 3470 size_t cpuinfo_len;
3471 uint8_t *cpuinfo_data; 3471 uint8_t *cpuinfo_data;
3472#endif 3472#endif
3473 3473
3474 /* 3474 /*
3475 * Compute `va2pa_offset'. 3475 * Compute `va2pa_offset'.
3476 * Use `kernel_text' to probe the MMU translation since 3476 * Use `kernel_text' to probe the MMU translation since
3477 * the pages at KERNBASE might not be mapped. 3477 * the pages at KERNBASE might not be mapped.
3478 */ 3478 */
3479 va2pa_offset = (vaddr_t)kernel_text - VA2PA(kernel_text); 3479 va2pa_offset = (vaddr_t)kernel_text - VA2PA(kernel_text);
3480 3480
3481 ncontext = cpuinfo.mmu_ncontext; 3481 ncontext = cpuinfo.mmu_ncontext;
3482 3482
3483#if defined(SUN4) || defined(SUN4C) /* setup SRMMU fn. ptrs for dual-arch 3483#if defined(SUN4) || defined(SUN4C) /* setup SRMMU fn. ptrs for dual-arch
3484 kernel */ 3484 kernel */
3485 pmap_clear_modify_p = pmap_clear_modify4m; 3485 pmap_clear_modify_p = pmap_clear_modify4m;
3486 pmap_clear_reference_p = pmap_clear_reference4m; 3486 pmap_clear_reference_p = pmap_clear_reference4m;
3487 pmap_enter_p = pmap_enter4m; 3487 pmap_enter_p = pmap_enter4m;
3488 pmap_extract_p = pmap_extract4m; 3488 pmap_extract_p = pmap_extract4m;
3489 pmap_is_modified_p = pmap_is_modified4m; 3489 pmap_is_modified_p = pmap_is_modified4m;
3490 pmap_is_referenced_p = pmap_is_referenced4m; 3490 pmap_is_referenced_p = pmap_is_referenced4m;
3491 pmap_kenter_pa_p = pmap_kenter_pa4m; 3491 pmap_kenter_pa_p = pmap_kenter_pa4m;
3492 pmap_kremove_p = pmap_kremove4m; 3492 pmap_kremove_p = pmap_kremove4m;
3493 pmap_kprotect_p = pmap_kprotect4m; 3493 pmap_kprotect_p = pmap_kprotect4m;
3494 pmap_page_protect_p = pmap_page_protect4m; 3494 pmap_page_protect_p = pmap_page_protect4m;
3495 pmap_protect_p = pmap_protect4m; 3495 pmap_protect_p = pmap_protect4m;
3496 pmap_rmk_p = pmap_rmk4m; 3496 pmap_rmk_p = pmap_rmk4m;
3497 pmap_rmu_p = pmap_rmu4m; 3497 pmap_rmu_p = pmap_rmu4m;
3498#endif /* defined SUN4/SUN4C */ 3498#endif /* defined SUN4/SUN4C */
3499 3499
3500 /* 3500 /*
3501 * p points to top of kernel mem 3501 * p points to top of kernel mem
3502 */ 3502 */
3503 p = (vaddr_t)top; 3503 p = (vaddr_t)top;
3504 3504
 3505#if defined(MULTIPROCESSOR)
 3506 /*
 3507 * allocate the rest of the cpu_info{} area. note we waste the
 3508 * first one to get a VA space.
 3509 */
 3510 cpuinfo_len = ((sizeof(struct cpu_info) + NBPG - 1) & ~PGOFSET);
 3511 if (sparc_ncpus > 1) {
 3512 p = (p + NBPG - 1) & ~PGOFSET;
 3513 cpuinfo_data = (uint8_t *)p;
 3514 p += (cpuinfo_len * sparc_ncpus);
 3515
 3516 /* XXX we waste the first one */
 3517 memset(cpuinfo_data + cpuinfo_len, 0, cpuinfo_len * (sparc_ncpus - 1));
 3518 } else
 3519 cpuinfo_data = (uint8_t *)CPUINFO_VA;
 3520#endif
 3521
3505 /* 3522 /*
3506 * Intialize the kernel pmap. 3523 * Intialize the kernel pmap.
3507 */ 3524 */
3508 /* kernel_pmap_store.pm_ctxnum = 0; */ 3525 /* kernel_pmap_store.pm_ctxnum = 0; */
3509 kernel_pmap_store.pm_refcount = 1; 3526 kernel_pmap_store.pm_refcount = 1;
3510 3527
3511 /* 3528 /*
3512 * Set up pm_regmap for kernel to point NUREG *below* the beginning 3529 * Set up pm_regmap for kernel to point NUREG *below* the beginning
3513 * of kernel regmap storage. Since the kernel only uses regions 3530 * of kernel regmap storage. Since the kernel only uses regions
3514 * above NUREG, we save storage space and can index kernel and 3531 * above NUREG, we save storage space and can index kernel and
3515 * user regions in the same way. 3532 * user regions in the same way.
3516 */ 3533 */
3517 kernel_pmap_store.pm_regmap = &kernel_regmap_store[-NUREG]; 3534 kernel_pmap_store.pm_regmap = &kernel_regmap_store[-NUREG];
3518 memset(kernel_regmap_store, 0, NKREG * sizeof(struct regmap)); 3535 memset(kernel_regmap_store, 0, NKREG * sizeof(struct regmap));
3519 memset(kernel_segmap_store, 0, NKREG * NSEGRG * sizeof(struct segmap)); 3536 memset(kernel_segmap_store, 0, NKREG * NSEGRG * sizeof(struct segmap));
3520 for (i = NKREG; --i >= 0;) { 3537 for (i = NKREG; --i >= 0;) {
3521 kernel_regmap_store[i].rg_segmap = 3538 kernel_regmap_store[i].rg_segmap =
3522 &kernel_segmap_store[i * NSEGRG]; 3539 &kernel_segmap_store[i * NSEGRG];
3523 kernel_regmap_store[i].rg_seg_ptps = NULL; 3540 kernel_regmap_store[i].rg_seg_ptps = NULL;
3524 for (j = NSEGRG; --j >= 0;) 3541 for (j = NSEGRG; --j >= 0;)
3525 kernel_segmap_store[i * NSEGRG + j].sg_pte = NULL; 3542 kernel_segmap_store[i * NSEGRG + j].sg_pte = NULL;
3526 } 3543 }
3527 3544
3528 /* Allocate kernel region pointer tables */ 3545 /* Allocate kernel region pointer tables */
3529 pmap_kernel()->pm_reg_ptps = (int **)(q = p); 3546 pmap_kernel()->pm_reg_ptps = (int **)(q = p);
3530 p += sparc_ncpus * sizeof(int **); 3547 p += sparc_ncpus * sizeof(int **);
3531 memset((void *)q, 0, (u_int)p - (u_int)q); 3548 memset((void *)q, 0, (u_int)p - (u_int)q);
3532 3549
3533 pmap_kernel()->pm_reg_ptps_pa = (int *)(q = p); 3550 pmap_kernel()->pm_reg_ptps_pa = (int *)(q = p);
3534 p += sparc_ncpus * sizeof(int *); 3551 p += sparc_ncpus * sizeof(int *);
3535 memset((void *)q, 0, (u_int)p - (u_int)q); 3552 memset((void *)q, 0, (u_int)p - (u_int)q);
3536 3553
3537 /* Allocate context administration */ 3554 /* Allocate context administration */
3538 pmap_kernel()->pm_ctx = ctxinfo = ci = (union ctxinfo *)p; 3555 pmap_kernel()->pm_ctx = ctxinfo = ci = (union ctxinfo *)p;
3539 p += ncontext * sizeof *ci; 3556 p += ncontext * sizeof *ci;
3540 memset((void *)ci, 0, (u_int)p - (u_int)ci); 3557 memset((void *)ci, 0, (u_int)p - (u_int)ci);
3541 3558
3542#if defined(MULTIPROCESSOR) 
3543 /* 
3544 * allocate the rest of the cpu_info{} area. note we waste the 
3545 * first one to get a VA space. 
3546 */ 
3547 p = (p + NBPG - 1) & ~PGOFSET; 
3548 cpuinfo_data = (uint8_t *)p; 
3549 cpuinfo_len = ((sizeof(struct cpu_info) + NBPG - 1) & ~PGOFSET); 
3550 p += (cpuinfo_len * sparc_ncpus); 
3551 prom_printf("extra cpus: %p, p: %p, gap start: %p, gap end: %p\n", 
3552 cpuinfo_data, p, etext_gap_start, etext_gap_end); 
3553 
3554 /* XXX we waste the first one */ 
3555 memset(cpuinfo_data + cpuinfo_len, 0, cpuinfo_len * (sparc_ncpus - 1)); 
3556#endif 
3557 
3558 /* 3559 /*
3559 * Set up the `constants' for the call to vm_init() 3560 * Set up the `constants' for the call to vm_init()
3560 * in main(). All pages beginning at p (rounded up to 3561 * in main(). All pages beginning at p (rounded up to
3561 * the next whole page) and continuing through the number 3562 * the next whole page) and continuing through the number
3562 * of available pages are free. 3563 * of available pages are free.
3563 */ 3564 */
3564 p = (p + NBPG - 1) & ~PGOFSET; 3565 p = (p + NBPG - 1) & ~PGOFSET;
3565 3566
3566 /* 3567 /*
3567 * Reserve memory for MMU pagetables. Some of these have severe 3568 * Reserve memory for MMU pagetables. Some of these have severe
3568 * alignment restrictions. We allocate in a sequence that 3569 * alignment restrictions. We allocate in a sequence that
3569 * minimizes alignment gaps. 3570 * minimizes alignment gaps.
3570 */ 3571 */
3571 3572
3572 pagetables_start = p; 3573 pagetables_start = p;
3573 pagetables_start_pa = PMAP_BOOTSTRAP_VA2PA(p); 3574 pagetables_start_pa = PMAP_BOOTSTRAP_VA2PA(p);
3574 3575
3575 /* 3576 /*
3576 * Allocate context table. 3577 * Allocate context table.
3577 * To keep supersparc happy, minimum aligment is on a 4K boundary. 3578 * To keep supersparc happy, minimum aligment is on a 4K boundary.
3578 */ 3579 */
3579 ctxtblsize = max(ncontext,1024) * sizeof(int); 3580 ctxtblsize = max(ncontext,1024) * sizeof(int);
3580 cpuinfo.ctx_tbl = (int *)roundup((u_int)p, ctxtblsize); 3581 cpuinfo.ctx_tbl = (int *)roundup((u_int)p, ctxtblsize);
3581 cpuinfo.ctx_tbl_pa = PMAP_BOOTSTRAP_VA2PA(cpuinfo.ctx_tbl); 3582 cpuinfo.ctx_tbl_pa = PMAP_BOOTSTRAP_VA2PA(cpuinfo.ctx_tbl);
3582 p = (u_int)cpuinfo.ctx_tbl + ctxtblsize; 3583 p = (u_int)cpuinfo.ctx_tbl + ctxtblsize;
3583 3584
3584#if defined(MULTIPROCESSOR) 3585#if defined(MULTIPROCESSOR)
3585 /* 3586 /*
3586 * Make sure all smp_tlb_flush*() routines for kernel pmap are 3587 * Make sure all smp_tlb_flush*() routines for kernel pmap are
3587 * broadcast to all CPU's. 3588 * broadcast to all CPU's.
3588 */ 3589 */
3589 pmap_kernel()->pm_cpuset = CPUSET_ALL; 3590 pmap_kernel()->pm_cpuset = CPUSET_ALL;
3590#endif 3591#endif
3591 3592
3592 /* 3593 /*
3593 * Reserve memory for segment and page tables needed to map the entire 3594 * Reserve memory for segment and page tables needed to map the entire
3594 * kernel. This takes (2K + NKREG * 16K) of space, but unfortunately 3595 * kernel. This takes (2K + NKREG * 16K) of space, but unfortunately
3595 * is necessary since pmap_enter() *must* be able to enter a kernel 3596 * is necessary since pmap_enter() *must* be able to enter a kernel
3596 * mapping without delay. 3597 * mapping without delay.
3597 */ 3598 */
3598 p = (vaddr_t) roundup(p, SRMMU_L1SIZE * sizeof(u_int)); 3599 p = (vaddr_t) roundup(p, SRMMU_L1SIZE * sizeof(u_int));
3599 qzero((void *)p, SRMMU_L1SIZE * sizeof(u_int)); 3600 qzero((void *)p, SRMMU_L1SIZE * sizeof(u_int));
3600 kernel_regtable_store = (u_int *)p; 3601 kernel_regtable_store = (u_int *)p;
3601 p += SRMMU_L1SIZE * sizeof(u_int); 3602 p += SRMMU_L1SIZE * sizeof(u_int);
3602 3603
3603 p = (vaddr_t) roundup(p, SRMMU_L2SIZE * sizeof(u_int)); 3604 p = (vaddr_t) roundup(p, SRMMU_L2SIZE * sizeof(u_int));
3604 qzero((void *)p, (SRMMU_L2SIZE * sizeof(u_int)) * NKREG); 3605 qzero((void *)p, (SRMMU_L2SIZE * sizeof(u_int)) * NKREG);
3605 kernel_segtable_store = (u_int *)p; 3606 kernel_segtable_store = (u_int *)p;
3606 p += (SRMMU_L2SIZE * sizeof(u_int)) * NKREG; 3607 p += (SRMMU_L2SIZE * sizeof(u_int)) * NKREG;
3607 3608
3608 p = (vaddr_t) roundup(p, SRMMU_L3SIZE * sizeof(u_int)); 3609 p = (vaddr_t) roundup(p, SRMMU_L3SIZE * sizeof(u_int));
3609 /* zero it: all will be SRMMU_TEINVALID */ 3610 /* zero it: all will be SRMMU_TEINVALID */
3610 qzero((void *)p, ((SRMMU_L3SIZE * sizeof(u_int)) * NSEGRG) * NKREG); 3611 qzero((void *)p, ((SRMMU_L3SIZE * sizeof(u_int)) * NSEGRG) * NKREG);
3611 kernel_pagtable_store = (u_int *)p; 3612 kernel_pagtable_store = (u_int *)p;
3612 p += ((SRMMU_L3SIZE * sizeof(u_int)) * NSEGRG) * NKREG; 3613 p += ((SRMMU_L3SIZE * sizeof(u_int)) * NSEGRG) * NKREG;
3613 3614
3614 /* Round to next page and mark end of pre-wired kernel space */ 3615 /* Round to next page and mark end of pre-wired kernel space */
3615 p = (p + NBPG - 1) & ~PGOFSET; 3616 p = (p + NBPG - 1) & ~PGOFSET;
3616 pagetables_end = p; 3617 pagetables_end = p;
3617 3618
3618 avail_start = PMAP_BOOTSTRAP_VA2PA(p); 3619 avail_start = PMAP_BOOTSTRAP_VA2PA(p);
3619 3620
3620 /* 3621 /*
3621 * Now wire the region and segment tables of the kernel map. 3622 * Now wire the region and segment tables of the kernel map.
3622 */ 3623 */
3623 pmap_kernel()->pm_reg_ptps[0] = (int *) kernel_regtable_store; 3624 pmap_kernel()->pm_reg_ptps[0] = (int *) kernel_regtable_store;
3624 pmap_kernel()->pm_reg_ptps_pa[0] = 3625 pmap_kernel()->pm_reg_ptps_pa[0] =
3625 PMAP_BOOTSTRAP_VA2PA(kernel_regtable_store); 3626 PMAP_BOOTSTRAP_VA2PA(kernel_regtable_store);
3626 3627
3627 /* Install L1 table in context 0 */ 3628 /* Install L1 table in context 0 */
3628 setpgt4m(&cpuinfo.ctx_tbl[0], 3629 setpgt4m(&cpuinfo.ctx_tbl[0],
3629 (pmap_kernel()->pm_reg_ptps_pa[0] >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD); 3630 (pmap_kernel()->pm_reg_ptps_pa[0] >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
3630 3631
3631 for (reg = 0; reg < NKREG; reg++) { 3632 for (reg = 0; reg < NKREG; reg++) {
3632 struct regmap *rp; 3633 struct regmap *rp;
3633 void *kphyssegtbl; 3634 void *kphyssegtbl;
3634 3635
3635 /* 3636 /*
3636 * Entering new region; install & build segtbl 3637 * Entering new region; install & build segtbl
3637 */ 3638 */
3638 3639
3639 rp = &pmap_kernel()->pm_regmap[reg + VA_VREG(KERNBASE)]; 3640 rp = &pmap_kernel()->pm_regmap[reg + VA_VREG(KERNBASE)];
3640 3641
3641 kphyssegtbl = (void *) 3642 kphyssegtbl = (void *)
3642 &kernel_segtable_store[reg * SRMMU_L2SIZE]; 3643 &kernel_segtable_store[reg * SRMMU_L2SIZE];
3643 3644
3644 setpgt4m(&pmap_kernel()->pm_reg_ptps[0][reg + VA_VREG(KERNBASE)], 3645 setpgt4m(&pmap_kernel()->pm_reg_ptps[0][reg + VA_VREG(KERNBASE)],
3645 (PMAP_BOOTSTRAP_VA2PA(kphyssegtbl) >> SRMMU_PPNPASHIFT) | 3646 (PMAP_BOOTSTRAP_VA2PA(kphyssegtbl) >> SRMMU_PPNPASHIFT) |
3646 SRMMU_TEPTD); 3647 SRMMU_TEPTD);
3647 3648
3648 rp->rg_seg_ptps = (int *)kphyssegtbl; 3649 rp->rg_seg_ptps = (int *)kphyssegtbl;
3649 3650
3650 for (seg = 0; seg < NSEGRG; seg++) { 3651 for (seg = 0; seg < NSEGRG; seg++) {
3651 struct segmap *sp; 3652 struct segmap *sp;
3652 void *kphyspagtbl; 3653 void *kphyspagtbl;
3653 3654
3654 rp->rg_nsegmap++; 3655 rp->rg_nsegmap++;
3655 3656
3656 sp = &rp->rg_segmap[seg]; 3657 sp = &rp->rg_segmap[seg];
3657 kphyspagtbl = (void *) 3658 kphyspagtbl = (void *)
3658 &kernel_pagtable_store 3659 &kernel_pagtable_store
3659 [((reg * NSEGRG) + seg) * SRMMU_L3SIZE]; 3660 [((reg * NSEGRG) + seg) * SRMMU_L3SIZE];
3660 3661
3661 setpgt4m(&rp->rg_seg_ptps[seg], 3662 setpgt4m(&rp->rg_seg_ptps[seg],
3662 (PMAP_BOOTSTRAP_VA2PA(kphyspagtbl) >> SRMMU_PPNPASHIFT) | 3663 (PMAP_BOOTSTRAP_VA2PA(kphyspagtbl) >> SRMMU_PPNPASHIFT) |
3663 SRMMU_TEPTD); 3664 SRMMU_TEPTD);
3664 sp->sg_pte = (int *) kphyspagtbl; 3665 sp->sg_pte = (int *) kphyspagtbl;
3665 } 3666 }
3666 } 3667 }
3667 3668
3668 /* 3669 /*
3669 * Preserve the monitor ROM's reserved VM region, so that 3670 * Preserve the monitor ROM's reserved VM region, so that
3670 * we can use L1-A or the monitor's debugger. 3671 * we can use L1-A or the monitor's debugger.
3671 */ 3672 */
3672 mmu_reservemon4m(&kernel_pmap_store); 3673 mmu_reservemon4m(&kernel_pmap_store);
3673 3674
3674 /* 3675 /*
3675 * Reserve virtual address space for two mappable MD pages 3676 * Reserve virtual address space for two mappable MD pages
3676 * for pmap_zero_page and pmap_copy_page, one MI page 3677 * for pmap_zero_page and pmap_copy_page, one MI page
3677 * for /dev/mem, and some more for dumpsys(). 3678 * for /dev/mem, and some more for dumpsys().
3678 */ 3679 */
3679 q = p; 3680 q = p;
3680 cpuinfo.vpage[0] = (void *)p, p += NBPG; 3681 cpuinfo.vpage[0] = (void *)p, p += NBPG;
3681 cpuinfo.vpage[1] = (void *)p, p += NBPG; 3682 cpuinfo.vpage[1] = (void *)p, p += NBPG;
3682 vmmap = (void *)p, p += NBPG; 3683 vmmap = (void *)p, p += NBPG;
3683 p = (vaddr_t)reserve_dumppages((void *)p); 3684 p = (vaddr_t)reserve_dumppages((void *)p);
3684 3685
3685 /* Find PTE locations of vpage[] to optimize zero_fill() et.al. */ 3686 /* Find PTE locations of vpage[] to optimize zero_fill() et.al. */
3686 for (i = 0; i < 2; i++) { 3687 for (i = 0; i < 2; i++) {
3687 struct regmap *rp; 3688 struct regmap *rp;
3688 struct segmap *sp; 3689 struct segmap *sp;
3689 rp = &pmap_kernel()->pm_regmap[VA_VREG(cpuinfo.vpage[i])]; 3690 rp = &pmap_kernel()->pm_regmap[VA_VREG(cpuinfo.vpage[i])];
3690 sp = &rp->rg_segmap[VA_VSEG(cpuinfo.vpage[i])]; 3691 sp = &rp->rg_segmap[VA_VSEG(cpuinfo.vpage[i])];
3691 cpuinfo.vpage_pte[i] = 3692 cpuinfo.vpage_pte[i] =
3692 &sp->sg_pte[VA_SUN4M_VPG(cpuinfo.vpage[i])]; 3693 &sp->sg_pte[VA_SUN4M_VPG(cpuinfo.vpage[i])];
3693 } 3694 }
3694 3695
3695#if !(defined(PROM_AT_F0) || defined(MSIIEP)) 3696#if !(defined(PROM_AT_F0) || defined(MSIIEP))
3696 virtual_avail = p; 3697 virtual_avail = p;
3697#elif defined(MSIIEP) 3698#elif defined(MSIIEP)
3698 virtual_avail = (vaddr_t)0xf0800000; /* Krups */ 3699 virtual_avail = (vaddr_t)0xf0800000; /* Krups */
3699#else 3700#else
3700 virtual_avail = (vaddr_t)0xf0080000; /* Mr.Coffee/OFW */ 3701 virtual_avail = (vaddr_t)0xf0080000; /* Mr.Coffee/OFW */
3701#endif 3702#endif
3702 virtual_end = VM_MAX_KERNEL_ADDRESS; 3703 virtual_end = VM_MAX_KERNEL_ADDRESS;
3703 3704
3704 p = q; /* retract to first free phys */ 3705 p = q; /* retract to first free phys */
3705 3706
3706 /* 3707 /*
3707 * Set up the ctxinfo structures (freelist of contexts) 3708 * Set up the ctxinfo structures (freelist of contexts)
3708 */ 3709 */
3709 ci->c_pmap = pmap_kernel(); 3710 ci->c_pmap = pmap_kernel();
3710 ctx_freelist = ci + 1; 3711 ctx_freelist = ci + 1;
3711 for (i = 1; i < ncontext; i++) { 3712 for (i = 1; i < ncontext; i++) {
3712 ci++; 3713 ci++;
3713 ci->c_nextfree = ci + 1; 3714 ci->c_nextfree = ci + 1;
3714 } 3715 }
3715 ci->c_nextfree = NULL; 3716 ci->c_nextfree = NULL;
3716 ctx_kick = 0; 3717 ctx_kick = 0;
3717 ctx_kickdir = -1; 3718 ctx_kickdir = -1;
3718 3719
3719 /* 3720 /*
3720 * Now map the kernel into our new set of page tables, then 3721 * Now map the kernel into our new set of page tables, then
3721 * (finally) switch over to our running page tables. 3722 * (finally) switch over to our running page tables.
3722 * We map from KERNBASE to p into context 0's page tables (and 3723 * We map from KERNBASE to p into context 0's page tables (and
3723 * the kernel pmap). 3724 * the kernel pmap).
3724 */ 3725 */
3725#ifdef DEBUG /* Sanity checks */ 3726#ifdef DEBUG /* Sanity checks */
3726 if (p % NBPG != 0) 3727 if (p % NBPG != 0)
3727 panic("pmap_bootstrap4m: p misaligned?!?"); 3728 panic("pmap_bootstrap4m: p misaligned?!?");
3728 if (KERNBASE % NBPRG != 0) 3729 if (KERNBASE % NBPRG != 0)
3729 panic("pmap_bootstrap4m: KERNBASE not region-aligned"); 3730 panic("pmap_bootstrap4m: KERNBASE not region-aligned");
3730#endif 3731#endif
3731 3732
3732 for (q = KERNBASE; q < p; q += NBPG) { 3733 for (q = KERNBASE; q < p; q += NBPG) {
3733 struct regmap *rp; 3734 struct regmap *rp;
3734 struct segmap *sp; 3735 struct segmap *sp;
3735 int pte, *ptep; 3736 int pte, *ptep;
3736 3737
3737 /* 3738 /*
3738 * Now install entry for current page. 3739 * Now install entry for current page.
3739 */ 3740 */
3740 rp = &pmap_kernel()->pm_regmap[VA_VREG(q)]; 3741 rp = &pmap_kernel()->pm_regmap[VA_VREG(q)];
3741 sp = &rp->rg_segmap[VA_VSEG(q)]; 3742 sp = &rp->rg_segmap[VA_VSEG(q)];
3742 ptep = &sp->sg_pte[VA_VPG(q)]; 3743 ptep = &sp->sg_pte[VA_VPG(q)];
3743 3744
3744 /* 3745 /*
3745 * Unmap the `etext gap'; it'll be made available 3746 * Unmap the `etext gap'; it'll be made available
3746 * to the VM manager. 3747 * to the VM manager.
3747 */ 3748 */
3748 if (q >= etext_gap_start && q < etext_gap_end) { 3749 if (q >= etext_gap_start && q < etext_gap_end) {
3749 setpgt4m(ptep, 0); 3750 setpgt4m(ptep, 0);
3750 continue; 3751 continue;
3751 } 3752 }
3752 3753
3753 pte = PMAP_BOOTSTRAP_VA2PA(q) >> SRMMU_PPNPASHIFT; 3754 pte = PMAP_BOOTSTRAP_VA2PA(q) >> SRMMU_PPNPASHIFT;
3754 pte |= PPROT_N_RX | SRMMU_TEPTE; 3755 pte |= PPROT_N_RX | SRMMU_TEPTE;
3755 3756
3756 /* Deal with the cacheable bit for pagetable memory */ 3757 /* Deal with the cacheable bit for pagetable memory */
3757 if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) != 0 || 3758 if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) != 0 ||
3758 q < pagetables_start || q >= pagetables_end) 3759 q < pagetables_start || q >= pagetables_end)
3759 pte |= SRMMU_PG_C; 3760 pte |= SRMMU_PG_C;
3760 3761
3761 /* write-protect kernel text */ 3762 /* write-protect kernel text */
3762 if (q < (vaddr_t)trapbase || q >= (vaddr_t)etext) 3763 if (q < (vaddr_t)trapbase || q >= (vaddr_t)etext)
3763 pte |= PPROT_WRITE; 3764 pte |= PPROT_WRITE;
3764 3765
3765 setpgt4m(ptep, pte); 3766 setpgt4m(ptep, pte);
3766 pmap_kernel()->pm_stats.resident_count++; 3767 pmap_kernel()->pm_stats.resident_count++;
3767 } 3768 }
3768 3769
3769 if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0) { 3770 if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0) {
3770 /* 3771 /*
3771 * The page tables have been setup. Since we're still 3772 * The page tables have been setup. Since we're still
3772 * running on the PROM's memory map, the memory we 3773 * running on the PROM's memory map, the memory we
3773 * allocated for our page tables might still be cached. 3774 * allocated for our page tables might still be cached.
3774 * Flush it now, and don't touch it again until we 3775 * Flush it now, and don't touch it again until we
3775 * switch to our own tables (will be done immediately below). 3776 * switch to our own tables (will be done immediately below).
3776 */ 3777 */
3777 int size = pagetables_end - pagetables_start; 3778 int size = pagetables_end - pagetables_start;
3778 if (CACHEINFO.c_vactype != VAC_NONE) { 3779 if (CACHEINFO.c_vactype != VAC_NONE) {
3779 va = (vaddr_t)pagetables_start; 3780 va = (vaddr_t)pagetables_start;
3780 while (size > 0) { 3781 while (size > 0) {
3781 cache_flush_page(va, 0); 3782 cache_flush_page(va, 0);
3782 va += NBPG; 3783 va += NBPG;
3783 size -= NBPG; 3784 size -= NBPG;
3784 } 3785 }
3785 } else if (cpuinfo.pcache_flush_page != NULL) { 3786 } else if (cpuinfo.pcache_flush_page != NULL) {
3786 paddr_t pa = pagetables_start_pa; 3787 paddr_t pa = pagetables_start_pa;
3787 while (size > 0) { 3788 while (size > 0) {
3788 pcache_flush_page(pa, 0); 3789 pcache_flush_page(pa, 0);
3789 pa += NBPG; 3790 pa += NBPG;
3790 size -= NBPG; 3791 size -= NBPG;
3791 } 3792 }
3792 } 3793 }
3793 } 3794 }
3794 3795
3795 /* 3796 /*
3796 * Now switch to kernel pagetables (finally!) 3797 * Now switch to kernel pagetables (finally!)
3797 */ 3798 */
3798 mmu_install_tables(&cpuinfo); 3799 mmu_install_tables(&cpuinfo);
3799 3800
3800#ifdef MULTIPROCESSOR 3801#ifdef MULTIPROCESSOR
3801 /* 3802 /*
 3803 * Initialise any cpu-specific data now.
 3804 */
 3805 cpu_init_system();
 3806
 3807 /*
3802 * Remap cpu0 from CPUINFO_VA to the new correct value, wasting the 3808 * Remap cpu0 from CPUINFO_VA to the new correct value, wasting the
3803 * backing pages we allocated above XXX. 3809 * backing page we allocated above XXX.
3804 */ 3810 */
3805 for (off = 0, va = (vaddr_t)cpuinfo_data; 3811 for (off = 0, va = (vaddr_t)cpuinfo_data;
3806 off < sizeof(struct cpu_info); 3812 sparc_ncpus > 1 && off < sizeof(struct cpu_info);
3807 va += NBPG, off += NBPG) { 3813 va += NBPG, off += NBPG) {
3808 paddr_t pa = PMAP_BOOTSTRAP_VA2PA(CPUINFO_VA + off); 3814 paddr_t pa = PMAP_BOOTSTRAP_VA2PA(CPUINFO_VA + off);
3809 prom_printf("going to pmap_kenter_pa(va=%p, pa=%p)\n", va, pa); 3815 prom_printf("going to pmap_kenter_pa(va=%p, pa=%p)\n", va, pa);
3810 pmap_kremove(va, NBPG); 3816 pmap_kremove(va, NBPG);
3811 pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE); 3817 pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
 3818 cache_flush_page(va, 0);
 3819 cache_flush_page(CPUINFO_VA, 0);
3812 } 3820 }
3813 3821
3814 /* 3822 /*
3815 * Setup the cpus[] array and the ci_self links. 3823 * Setup the cpus[] array and the ci_self links.
3816 */ 3824 */
3817 prom_printf("setting cpus self reference\n"); 3825 prom_printf("setting cpus self reference\n");
3818 for (i = 0; i < sparc_ncpus; i++) { 3826 for (i = 0; i < sparc_ncpus; i++) {
3819 cpus[i] = (struct cpu_info *)(cpuinfo_data + (cpuinfo_len * i)); 3827 cpus[i] = (struct cpu_info *)(cpuinfo_data + (cpuinfo_len * i));
3820 cpus[i]->ci_self = cpus[i]; 3828 cpus[i]->ci_self = cpus[i];
3821 prom_printf("set cpu%d ci_self address: %p\n", i, cpus[i]); 3829 prom_printf("set cpu%d ci_self address: %p\n", i, cpus[i]);
3822 } 3830 }
 3831#else
 3832 cpus[0] = (struct cpu_info *)CPUINFO_VA;
3823#endif 3833#endif
3824 3834
3825 pmap_update(pmap_kernel()); 3835 pmap_update(pmap_kernel());
3826 prom_printf("pmap_bootstrap4m done\n"); 3836 prom_printf("pmap_bootstrap4m done\n");
3827} 3837}
3828 3838
3829static u_long prom_ctxreg; 3839static u_long prom_ctxreg;
3830 3840
3831void 3841void
3832mmu_install_tables(struct cpu_info *sc) 3842mmu_install_tables(struct cpu_info *sc)
3833{ 3843{
3834 3844
3835#ifdef DEBUG 3845#ifdef DEBUG
3836 prom_printf("pmap_bootstrap: installing kernel page tables..."); 3846 prom_printf("pmap_bootstrap: installing kernel page tables...");
3837#endif 3847#endif
3838 setcontext4m(0); /* paranoia? %%%: Make 0x3 a define! below */ 3848 setcontext4m(0); /* paranoia? %%%: Make 0x3 a define! below */
3839 3849
3840 /* Enable MMU tablewalk caching, flush TLB */ 3850 /* Enable MMU tablewalk caching, flush TLB */
3841 if (sc->mmu_enable != 0) 3851 if (sc->mmu_enable != 0)
3842 sc->mmu_enable(); 3852 sc->mmu_enable();
3843 3853
3844 tlb_flush_all_real(); 3854 tlb_flush_all_real();
3845 prom_ctxreg = lda(SRMMU_CXTPTR, ASI_SRMMU); 3855 prom_ctxreg = lda(SRMMU_CXTPTR, ASI_SRMMU);
3846 3856
3847 sta(SRMMU_CXTPTR, ASI_SRMMU, 3857 sta(SRMMU_CXTPTR, ASI_SRMMU,
3848 (sc->ctx_tbl_pa >> SRMMU_PPNPASHIFT) & ~0x3); 3858 (sc->ctx_tbl_pa >> SRMMU_PPNPASHIFT) & ~0x3);
3849 3859
3850 tlb_flush_all_real(); 3860 tlb_flush_all_real();
3851 3861
3852#ifdef DEBUG 3862#ifdef DEBUG
3853 prom_printf("done.\n"); 3863 prom_printf("done.\n");
3854#endif 3864#endif
3855} 3865}
3856 3866
3857void srmmu_restore_prom_ctx(void); 3867void srmmu_restore_prom_ctx(void);
3858 3868
3859void 3869void
3860srmmu_restore_prom_ctx(void) 3870srmmu_restore_prom_ctx(void)
3861{ 3871{
3862 3872
3863 tlb_flush_all(); 3873 tlb_flush_all();
3864 sta(SRMMU_CXTPTR, ASI_SRMMU, prom_ctxreg); 3874 sta(SRMMU_CXTPTR, ASI_SRMMU, prom_ctxreg);
3865 tlb_flush_all(); 3875 tlb_flush_all();
3866} 3876}
3867#endif /* SUN4M || SUN4D */ 3877#endif /* SUN4M || SUN4D */
3868 3878
3869#if defined(MULTIPROCESSOR) 3879#if defined(MULTIPROCESSOR)
3870/* 3880/*
3871 * Allocate per-CPU page tables. One region, segment and page table 3881 * Allocate per-CPU page tables. One region, segment and page table
3872 * is needed to map CPUINFO_VA to different physical addresses on 3882 * is needed to map CPUINFO_VA to different physical addresses on
3873 * each CPU. Since the kernel region and segment tables are all 3883 * each CPU. Since the kernel region and segment tables are all
3874 * pre-wired (in bootstrap() above) and we also assume that the 3884 * pre-wired (in bootstrap() above) and we also assume that the
3875 * first segment (256K) of kernel space is fully populated with 3885 * first segment (256K) of kernel space is fully populated with
3876 * pages from the start, these per-CPU tables will never need 3886 * pages from the start, these per-CPU tables will never need
3877 * to be updated when mapping kernel virtual memory. 3887 * to be updated when mapping kernel virtual memory.
3878 * 3888 *
3879 * Note: this routine is called in the context of the boot CPU 3889 * Note: this routine is called in the context of the boot CPU
3880 * during autoconfig. 3890 * during autoconfig.
3881 */ 3891 */
3882void 3892void
3883pmap_alloc_cpu(struct cpu_info *sc) 3893pmap_alloc_cpu(struct cpu_info *sc)
3884{ 3894{
3885#if defined(SUN4M) || defined(SUN4D) /* Only implemented for SUN4M/D */ 3895#if defined(SUN4M) || defined(SUN4D) /* Only implemented for SUN4M/D */
3886 vaddr_t va; 3896 vaddr_t va;
3887 paddr_t pa; 3897 paddr_t pa;
3888 paddr_t alignment; 3898 paddr_t alignment;
3889 u_int *ctxtable, *regtable, *segtable, *pagtable; 3899 u_int *ctxtable, *regtable, *segtable, *pagtable;
3890 u_int *ctxtable_pa, *regtable_pa, *segtable_pa, *pagtable_pa; 3900 u_int *ctxtable_pa, *regtable_pa, *segtable_pa, *pagtable_pa;
3891 psize_t ctxsize, size; 3901 psize_t ctxsize, size;
3892 int vr, vs, vpg; 3902 int vr, vs, vpg;
3893 struct regmap *rp; 3903 struct regmap *rp;
3894 struct segmap *sp; 3904 struct segmap *sp;
3895 struct pglist mlist; 3905 struct pglist mlist;
3896 int cachebit; 3906 int cachebit;
3897 int pagesz = NBPG; 3907 int pagesz = NBPG;
3898 int i; 3908 int i;
3899 3909
3900 cachebit = (cpuinfo.flags & CPUFLG_CACHEPAGETABLES) != 0; 3910 cachebit = (cpuinfo.flags & CPUFLG_CACHEPAGETABLES) != 0;
3901 3911
3902 /* 3912 /*
3903 * Allocate properly aligned and contiguous physically memory 3913 * Allocate properly aligned and contiguous physically memory
3904 * for the PTE tables. 3914 * for the PTE tables.
3905 */ 3915 */
3906 ctxsize = (sc->mmu_ncontext * sizeof(int) + pagesz - 1) & -pagesz; 3916 ctxsize = (sc->mmu_ncontext * sizeof(int) + pagesz - 1) & -pagesz;
3907 alignment = ctxsize; 3917 alignment = ctxsize;
3908 3918
3909 /* The region, segment and page table we need fit in one page */ 3919 /* The region, segment and page table we need fit in one page */
3910 size = ctxsize + pagesz; 3920 size = ctxsize + pagesz;
3911 3921
3912 if (uvm_pglistalloc(size, vm_first_phys, vm_first_phys+vm_num_phys, 3922 if (uvm_pglistalloc(size, vm_first_phys, vm_first_phys+vm_num_phys,
3913 alignment, 0, &mlist, 1, 0) != 0) 3923 alignment, 0, &mlist, 1, 0) != 0)
3914 panic("pmap_alloc_cpu: no memory"); 3924 panic("pmap_alloc_cpu: no memory");
3915 3925
3916 pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&mlist)); 3926 pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&mlist));
3917 3927
3918 /* Allocate virtual memory */ 3928 /* Allocate virtual memory */
3919 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY); 3929 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY);
3920 if (va == 0) 3930 if (va == 0)
3921 panic("pmap_alloc_cpu: no memory"); 3931 panic("pmap_alloc_cpu: no memory");
3922 3932
3923 /* 3933 /*
3924 * Layout the page tables in our chunk of memory 3934 * Layout the page tables in our chunk of memory
3925 */ 3935 */
3926 ctxtable = (u_int *)va; 3936 ctxtable = (u_int *)va;
3927 regtable = (u_int *)(va + ctxsize); 3937 regtable = (u_int *)(va + ctxsize);
3928 segtable = regtable + SRMMU_L1SIZE; 3938 segtable = regtable + SRMMU_L1SIZE;
3929 pagtable = segtable + SRMMU_L2SIZE; 3939 pagtable = segtable + SRMMU_L2SIZE;
3930 3940
3931 ctxtable_pa = (u_int *)pa; 3941 ctxtable_pa = (u_int *)pa;
3932 regtable_pa = (u_int *)(pa + ctxsize); 3942 regtable_pa = (u_int *)(pa + ctxsize);
3933 segtable_pa = regtable_pa + SRMMU_L1SIZE; 3943 segtable_pa = regtable_pa + SRMMU_L1SIZE;
3934 pagtable_pa = segtable_pa + SRMMU_L2SIZE; 3944 pagtable_pa = segtable_pa + SRMMU_L2SIZE;
3935 3945
3936 /* Map the pages */ 3946 /* Map the pages */
3937 while (size != 0) { 3947 while (size != 0) {
3938 pmap_kenter_pa(va, pa | (cachebit ? 0 : PMAP_NC), 3948 pmap_kenter_pa(va, pa | (cachebit ? 0 : PMAP_NC),
3939 VM_PROT_READ | VM_PROT_WRITE); 3949 VM_PROT_READ | VM_PROT_WRITE);
3940 va += pagesz; 3950 va += pagesz;
3941 pa += pagesz; 3951 pa += pagesz;
3942 size -= pagesz; 3952 size -= pagesz;
3943 } 3953 }
3944 pmap_update(pmap_kernel()); 3954 pmap_update(pmap_kernel());
3945 3955
3946 /* 3956 /*
3947 * Store the region table pointer (and its corresponding physical 3957 * Store the region table pointer (and its corresponding physical
3948 * address) in the CPU's slot in the kernel pmap region table 3958 * address) in the CPU's slot in the kernel pmap region table
3949 * pointer table. 3959 * pointer table.
3950 */ 3960 */
3951 pmap_kernel()->pm_reg_ptps[sc->ci_cpuid] = regtable; 3961 pmap_kernel()->pm_reg_ptps[sc->ci_cpuid] = regtable;
3952 pmap_kernel()->pm_reg_ptps_pa[sc->ci_cpuid] = (paddr_t)regtable_pa; 3962 pmap_kernel()->pm_reg_ptps_pa[sc->ci_cpuid] = (paddr_t)regtable_pa;
3953 3963
3954 vr = VA_VREG(CPUINFO_VA); 3964 vr = VA_VREG(CPUINFO_VA);
3955 vs = VA_VSEG(CPUINFO_VA); 3965 vs = VA_VSEG(CPUINFO_VA);
3956 vpg = VA_VPG(CPUINFO_VA); 3966 vpg = VA_VPG(CPUINFO_VA);
3957 rp = &pmap_kernel()->pm_regmap[vr]; 3967 rp = &pmap_kernel()->pm_regmap[vr];
3958 sp = &rp->rg_segmap[vs]; 3968 sp = &rp->rg_segmap[vs];
3959 3969
3960 /* 3970 /*
3961 * Copy page tables from CPU #0, then modify entry for CPUINFO_VA 3971 * Copy page tables from CPU #0, then modify entry for CPUINFO_VA
3962 * so that it points at the per-CPU pages. 3972 * so that it points at the per-CPU pages.
3963 */ 3973 */
3964 qcopy(pmap_kernel()->pm_reg_ptps[0], regtable, 3974 qcopy(pmap_kernel()->pm_reg_ptps[0], regtable,
3965 SRMMU_L1SIZE * sizeof(int)); 3975 SRMMU_L1SIZE * sizeof(int));
3966 qcopy(rp->rg_seg_ptps, segtable, SRMMU_L2SIZE * sizeof(int)); 3976 qcopy(rp->rg_seg_ptps, segtable, SRMMU_L2SIZE * sizeof(int));
3967 qcopy(sp->sg_pte, pagtable, SRMMU_L3SIZE * sizeof(int)); 3977 qcopy(sp->sg_pte, pagtable, SRMMU_L3SIZE * sizeof(int));
3968 3978
3969 setpgt4m(&ctxtable[0], 3979 setpgt4m(&ctxtable[0],
3970 ((u_long)regtable_pa >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD); 3980 ((u_long)regtable_pa >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
3971 setpgt4m(&regtable[vr], 3981 setpgt4m(&regtable[vr],
3972 ((u_long)segtable_pa >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD); 3982 ((u_long)segtable_pa >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
3973 setpgt4m(&segtable[vs], 3983 setpgt4m(&segtable[vs],
3974 ((u_long)pagtable_pa >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD); 3984 ((u_long)pagtable_pa >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
3975 setpgt4m(&pagtable[vpg], 3985 setpgt4m(&pagtable[vpg],
3976 (VA2PA((void *)sc) >> SRMMU_PPNPASHIFT) | 3986 (VA2PA((void *)sc) >> SRMMU_PPNPASHIFT) |
3977 (SRMMU_TEPTE | PPROT_N_RWX | SRMMU_PG_C)); 3987 (SRMMU_TEPTE | PPROT_N_RWX | SRMMU_PG_C));
3978 3988
3979 /* Install this CPU's context table */ 3989 /* Install this CPU's context table */
3980 sc->ctx_tbl = ctxtable; 3990 sc->ctx_tbl = ctxtable;
3981 sc->ctx_tbl_pa = (paddr_t)ctxtable_pa; 3991 sc->ctx_tbl_pa = (paddr_t)ctxtable_pa;
3982 3992
3983 /* Pre-compute this CPU's vpage[] PTEs */ 3993 /* Pre-compute this CPU's vpage[] PTEs */
3984 for (i = 0; i < 2; i++) { 3994 for (i = 0; i < 2; i++) {
3985 rp = &pmap_kernel()->pm_regmap[VA_VREG(sc->vpage[i])]; 3995 rp = &pmap_kernel()->pm_regmap[VA_VREG(sc->vpage[i])];
3986 sp = &rp->rg_segmap[VA_VSEG(sc->vpage[i])]; 3996 sp = &rp->rg_segmap[VA_VSEG(sc->vpage[i])];
3987 sc->vpage_pte[i] = &sp->sg_pte[VA_SUN4M_VPG(sc->vpage[i])]; 3997 sc->vpage_pte[i] = &sp->sg_pte[VA_SUN4M_VPG(sc->vpage[i])];
3988 } 3998 }
3989#endif /* SUN4M || SUN4D */ 3999#endif /* SUN4M || SUN4D */
3990} 4000}
3991#endif /* MULTIPROCESSOR */ 4001#endif /* MULTIPROCESSOR */
3992 4002
3993 4003
3994void 4004void
3995pmap_init(void) 4005pmap_init(void)
3996{ 4006{
3997 u_int sz; 4007 u_int sz;
3998 4008
3999 if (PAGE_SIZE != NBPG) 4009 if (PAGE_SIZE != NBPG)
4000 panic("pmap_init: PAGE_SIZE!=NBPG"); 4010 panic("pmap_init: PAGE_SIZE!=NBPG");
4001 4011
4002 vm_num_phys = vm_last_phys - vm_first_phys; 4012 vm_num_phys = vm_last_phys - vm_first_phys;
4003 4013
4004 /* Setup a pool for additional pvlist structures */ 4014 /* Setup a pool for additional pvlist structures */
4005 pool_init(&pv_pool, sizeof(struct pvlist), 0, 0, 0, "pvtable", NULL, 4015 pool_init(&pv_pool, sizeof(struct pvlist), 0, 0, 0, "pvtable", NULL,
4006 IPL_NONE); 4016 IPL_NONE);
4007 4017
4008 /* 4018 /*
4009 * Setup a pool for pmap structures. 4019 * Setup a pool for pmap structures.
4010 * The pool size includes space for an array of per-CPU 4020 * The pool size includes space for an array of per-CPU
4011 * region table pointers & physical addresses 4021 * region table pointers & physical addresses
4012 */ 4022 */
4013 sz = ALIGN(sizeof(struct pmap)) + 4023 sz = ALIGN(sizeof(struct pmap)) +
4014 ALIGN(NUREG * sizeof(struct regmap)) + 4024 ALIGN(NUREG * sizeof(struct regmap)) +
4015 sparc_ncpus * sizeof(int *) + /* pm_reg_ptps */ 4025 sparc_ncpus * sizeof(int *) + /* pm_reg_ptps */
4016 sparc_ncpus * sizeof(int); /* pm_reg_ptps_pa */ 4026 sparc_ncpus * sizeof(int); /* pm_reg_ptps_pa */
4017 pool_cache_bootstrap(&pmap_cache, sz, 0, 0, 0, "pmappl", NULL, 4027 pool_cache_bootstrap(&pmap_cache, sz, 0, 0, 0, "pmappl", NULL,
4018 IPL_NONE, pmap_pmap_pool_ctor, pmap_pmap_pool_dtor, NULL); 4028 IPL_NONE, pmap_pmap_pool_ctor, pmap_pmap_pool_dtor, NULL);
4019 4029
4020 sz = NSEGRG * sizeof (struct segmap); 4030 sz = NSEGRG * sizeof (struct segmap);
4021 pool_init(&segmap_pool, sz, 0, 0, 0, "segmap", NULL, IPL_NONE); 4031 pool_init(&segmap_pool, sz, 0, 0, 0, "segmap", NULL, IPL_NONE);
4022 4032
4023#if defined(SUN4M) || defined(SUN4D) 4033#if defined(SUN4M) || defined(SUN4D)
4024 if (CPU_HAS_SRMMU) { 4034 if (CPU_HAS_SRMMU) {
4025 /* 4035 /*
4026 * The SRMMU only ever needs chunks in one of two sizes: 4036 * The SRMMU only ever needs chunks in one of two sizes:
4027 * 1024 (for region level tables) and 256 (for segment 4037 * 1024 (for region level tables) and 256 (for segment
4028 * and page level tables). 4038 * and page level tables).
4029 */ 4039 */
4030 sz = SRMMU_L1SIZE * sizeof(int); 4040 sz = SRMMU_L1SIZE * sizeof(int);
4031 pool_init(&L1_pool, sz, sz, 0, 0, "L1 pagetable", 4041 pool_init(&L1_pool, sz, sz, 0, 0, "L1 pagetable",
4032 &pgt_page_allocator, IPL_NONE); 4042 &pgt_page_allocator, IPL_NONE);
4033 4043
4034 sz = SRMMU_L2SIZE * sizeof(int); 4044 sz = SRMMU_L2SIZE * sizeof(int);
4035 pool_init(&L23_pool, sz, sz, 0, 0, "L2/L3 pagetable", 4045 pool_init(&L23_pool, sz, sz, 0, 0, "L2/L3 pagetable",
4036 &pgt_page_allocator, IPL_NONE); 4046 &pgt_page_allocator, IPL_NONE);
4037 } 4047 }
4038#endif /* SUN4M || SUN4D */ 4048#endif /* SUN4M || SUN4D */
4039#if defined(SUN4) || defined(SUN4C) 4049#if defined(SUN4) || defined(SUN4C)
4040 if (CPU_HAS_SUNMMU) { 4050 if (CPU_HAS_SUNMMU) {
4041 sz = NPTESG * sizeof(int); 4051 sz = NPTESG * sizeof(int);
4042 pool_init(&pte_pool, sz, 0, 0, 0, "ptemap", NULL, 4052 pool_init(&pte_pool, sz, 0, 0, 0, "ptemap", NULL,
4043 IPL_NONE); 4053 IPL_NONE);
4044 } 4054 }
4045#endif /* SUN4 || SUN4C */ 4055#endif /* SUN4 || SUN4C */
4046} 4056}
4047 4057
4048 4058
4049/* 4059/*
4050 * Map physical addresses into kernel VM. 4060 * Map physical addresses into kernel VM.
4051 */ 4061 */
4052vaddr_t 4062vaddr_t
4053pmap_map(vaddr_t va, paddr_t pa, paddr_t endpa, int prot) 4063pmap_map(vaddr_t va, paddr_t pa, paddr_t endpa, int prot)
4054{ 4064{
4055 int pgsize = PAGE_SIZE; 4065 int pgsize = PAGE_SIZE;
4056 4066
4057 while (pa < endpa) { 4067 while (pa < endpa) {
4058 pmap_kenter_pa(va, pa, prot); 4068 pmap_kenter_pa(va, pa, prot);
4059 va += pgsize; 4069 va += pgsize;
4060 pa += pgsize; 4070 pa += pgsize;
4061 } 4071 }
4062 pmap_update(pmap_kernel()); 4072 pmap_update(pmap_kernel());
4063 return (va); 4073 return (va);
4064} 4074}
4065 4075
4066#ifdef DEBUG 4076#ifdef DEBUG
4067/* 4077/*
4068 * Check a pmap for spuriously lingering mappings 4078 * Check a pmap for spuriously lingering mappings
4069 */ 4079 */
4070static inline void 4080static inline void
4071pmap_quiet_check(struct pmap *pm) 4081pmap_quiet_check(struct pmap *pm)
4072{ 4082{
4073 int vs, vr; 4083 int vs, vr;
4074 4084
4075 if (CPU_HAS_SUNMMU) { 4085 if (CPU_HAS_SUNMMU) {
4076#if defined(SUN4_MMU3L) 4086#if defined(SUN4_MMU3L)
4077 if (TAILQ_FIRST(&pm->pm_reglist)) 4087 if (TAILQ_FIRST(&pm->pm_reglist))
4078 panic("pmap_destroy: region list not empty"); 4088 panic("pmap_destroy: region list not empty");
4079#endif 4089#endif
4080 if (TAILQ_FIRST(&pm->pm_seglist)) 4090 if (TAILQ_FIRST(&pm->pm_seglist))
4081 panic("pmap_destroy: segment list not empty"); 4091 panic("pmap_destroy: segment list not empty");
4082 } 4092 }
4083 4093
4084 for (vr = 0; vr < NUREG; vr++) { 4094 for (vr = 0; vr < NUREG; vr++) {
4085 struct regmap *rp = &pm->pm_regmap[vr]; 4095 struct regmap *rp = &pm->pm_regmap[vr];
4086 4096
4087 if (HASSUN4_MMU3L) { 4097 if (HASSUN4_MMU3L) {
4088 if (rp->rg_smeg != reginval) 4098 if (rp->rg_smeg != reginval)
4089 printf("pmap_chk: spurious smeg in " 4099 printf("pmap_chk: spurious smeg in "
4090 "user region %d\n", vr); 4100 "user region %d\n", vr);
4091 } 4101 }
4092 if (CPU_HAS_SRMMU) { 4102 if (CPU_HAS_SRMMU) {
4093 int n; 4103 int n;
4094#if defined(MULTIPROCESSOR) 4104#if defined(MULTIPROCESSOR)
4095 for (n = 0; n < sparc_ncpus; n++) 4105 for (n = 0; n < sparc_ncpus; n++)
4096#else 4106#else
4097 n = 0; 4107 n = 0;
4098#endif 4108#endif
4099 { 4109 {
4100 if (pm->pm_reg_ptps[n][vr] != SRMMU_TEINVALID) 4110 if (pm->pm_reg_ptps[n][vr] != SRMMU_TEINVALID)
4101 printf("pmap_chk: spurious PTP in user " 4111 printf("pmap_chk: spurious PTP in user "
4102 "region %d on CPU %d\n", vr, n); 4112 "region %d on CPU %d\n", vr, n);
4103 } 4113 }
4104 } 4114 }
4105 if (rp->rg_nsegmap != 0) 4115 if (rp->rg_nsegmap != 0)
4106 printf("pmap_chk: %d segments remain in " 4116 printf("pmap_chk: %d segments remain in "
4107 "region %d\n", rp->rg_nsegmap, vr); 4117 "region %d\n", rp->rg_nsegmap, vr);
4108 if (rp->rg_segmap != NULL) { 4118 if (rp->rg_segmap != NULL) {
4109 printf("pmap_chk: segments still " 4119 printf("pmap_chk: segments still "
4110 "allocated in region %d\n", vr); 4120 "allocated in region %d\n", vr);
4111 for (vs = 0; vs < NSEGRG; vs++) { 4121 for (vs = 0; vs < NSEGRG; vs++) {
4112 struct segmap *sp = &rp->rg_segmap[vs]; 4122 struct segmap *sp = &rp->rg_segmap[vs];
4113 if (sp->sg_npte != 0) 4123 if (sp->sg_npte != 0)
4114 printf("pmap_chk: %d ptes " 4124 printf("pmap_chk: %d ptes "
4115 "remain in segment %d\n", 4125 "remain in segment %d\n",
4116 sp->sg_npte, vs); 4126 sp->sg_npte, vs);
4117 if (sp->sg_pte != NULL) { 4127 if (sp->sg_pte != NULL) {
4118 printf("pmap_chk: ptes still " 4128 printf("pmap_chk: ptes still "
4119 "allocated in segment %d\n", vs); 4129 "allocated in segment %d\n", vs);
4120 } 4130 }
4121 if (CPU_HAS_SUNMMU) { 4131 if (CPU_HAS_SUNMMU) {
4122 if (sp->sg_pmeg != seginval) 4132 if (sp->sg_pmeg != seginval)
4123 printf("pmap_chk: pm %p(%d,%d) " 4133 printf("pmap_chk: pm %p(%d,%d) "
4124 "spurious soft pmeg %d\n", 4134 "spurious soft pmeg %d\n",
4125 pm, vr, vs, sp->sg_pmeg); 4135 pm, vr, vs, sp->sg_pmeg);
4126 } 4136 }
4127 } 4137 }
4128 } 4138 }
4129 4139
4130 /* Check for spurious pmeg entries in the MMU */ 4140 /* Check for spurious pmeg entries in the MMU */
4131 if (pm->pm_ctx == NULL) 4141 if (pm->pm_ctx == NULL)
4132 continue; 4142 continue;
4133 if (CPU_HAS_SUNMMU) { 4143 if (CPU_HAS_SUNMMU) {
4134 int ctx; 4144 int ctx;
4135 if (mmu_has_hole && (vr >= 32 || vr < (256 - 32))) 4145 if (mmu_has_hole && (vr >= 32 || vr < (256 - 32)))
4136 continue; 4146 continue;
4137 ctx = getcontext4(); 4147 ctx = getcontext4();
4138 setcontext4(pm->pm_ctxnum); 4148 setcontext4(pm->pm_ctxnum);
4139 for (vs = 0; vs < NSEGRG; vs++) { 4149 for (vs = 0; vs < NSEGRG; vs++) {
4140 vaddr_t va = VSTOVA(vr,vs); 4150 vaddr_t va = VSTOVA(vr,vs);
4141 int pmeg = getsegmap(va); 4151 int pmeg = getsegmap(va);
4142 if (pmeg != seginval) 4152 if (pmeg != seginval)
4143 printf("pmap_chk: pm %p(%d,%d:%x): " 4153 printf("pmap_chk: pm %p(%d,%d:%x): "
4144 "spurious pmeg %d\n", 4154 "spurious pmeg %d\n",
4145 pm, vr, vs, (u_int)va, pmeg); 4155 pm, vr, vs, (u_int)va, pmeg);
4146 } 4156 }
4147 setcontext4(ctx); 4157 setcontext4(ctx);
4148 } 4158 }
4149 } 4159 }
4150 if (pm->pm_stats.resident_count) { 4160 if (pm->pm_stats.resident_count) {
4151 printf("pmap_chk: res count %ld\n", 4161 printf("pmap_chk: res count %ld\n",
4152 pm->pm_stats.resident_count); 4162 pm->pm_stats.resident_count);
4153 } 4163 }
4154 if (pm->pm_stats.wired_count) { 4164 if (pm->pm_stats.wired_count) {
4155 printf("pmap_chk: wired count %ld\n", 4165 printf("pmap_chk: wired count %ld\n",
4156 pm->pm_stats.wired_count); 4166 pm->pm_stats.wired_count);
4157 } 4167 }
4158} 4168}
4159#endif /* DEBUG */ 4169#endif /* DEBUG */
4160 4170
4161int 4171int
4162pmap_pmap_pool_ctor(void *arg, void *object, int flags) 4172pmap_pmap_pool_ctor(void *arg, void *object, int flags)
4163{ 4173{
4164 struct pmap *pm = object; 4174 struct pmap *pm = object;
4165 u_long addr; 4175 u_long addr;
4166 4176
4167 memset(pm, 0, sizeof *pm); 4177 memset(pm, 0, sizeof *pm);
4168 4178
4169 /* 4179 /*
4170 * `pmap_pool' entries include space for the per-CPU 4180 * `pmap_pool' entries include space for the per-CPU
4171 * region table pointer arrays. 4181 * region table pointer arrays.
4172 */ 4182 */
4173 addr = (u_long)pm + ALIGN(sizeof(struct pmap)); 4183 addr = (u_long)pm + ALIGN(sizeof(struct pmap));
4174 pm->pm_regmap = (void *)addr; 4184 pm->pm_regmap = (void *)addr;
4175 addr += ALIGN(NUREG * sizeof(struct regmap)); 4185 addr += ALIGN(NUREG * sizeof(struct regmap));
4176 pm->pm_reg_ptps = (int **)addr; 4186 pm->pm_reg_ptps = (int **)addr;
4177 addr += sparc_ncpus * sizeof(int *); 4187 addr += sparc_ncpus * sizeof(int *);
4178 pm->pm_reg_ptps_pa = (int *)addr; 4188 pm->pm_reg_ptps_pa = (int *)addr;
4179 4189
4180 qzero((void *)pm->pm_regmap, NUREG * sizeof(struct regmap)); 4190 qzero((void *)pm->pm_regmap, NUREG * sizeof(struct regmap));
4181 4191
4182 /* pm->pm_ctx = NULL; // already done */ 4192 /* pm->pm_ctx = NULL; // already done */
4183 4193
4184 if (CPU_HAS_SUNMMU) { 4194 if (CPU_HAS_SUNMMU) {
4185 TAILQ_INIT(&pm->pm_seglist); 4195 TAILQ_INIT(&pm->pm_seglist);
4186#if defined(SUN4_MMU3L) 4196#if defined(SUN4_MMU3L)
4187 TAILQ_INIT(&pm->pm_reglist); 4197 TAILQ_INIT(&pm->pm_reglist);
4188 if (HASSUN4_MMU3L) { 4198 if (HASSUN4_MMU3L) {
4189 int i; 4199 int i;
4190 for (i = NUREG; --i >= 0;) 4200 for (i = NUREG; --i >= 0;)
4191 pm->pm_regmap[i].rg_smeg = reginval; 4201 pm->pm_regmap[i].rg_smeg = reginval;
4192 } 4202 }
4193#endif 4203#endif
4194 } 4204 }
4195#if defined(SUN4M) || defined(SUN4D) 4205#if defined(SUN4M) || defined(SUN4D)
4196 else { 4206 else {
4197 int i, n; 4207 int i, n;
4198 4208
4199 /* 4209 /*
4200 * We must allocate and initialize hardware-readable (MMU) 4210 * We must allocate and initialize hardware-readable (MMU)
4201 * pagetables. We must also map the kernel regions into this 4211 * pagetables. We must also map the kernel regions into this
4202 * pmap's pagetables, so that we can access the kernel from 4212 * pmap's pagetables, so that we can access the kernel from
4203 * this user context. 4213 * this user context.
4204 */ 4214 */
4205#if defined(MULTIPROCESSOR) 4215#if defined(MULTIPROCESSOR)
4206 for (n = 0; n < sparc_ncpus; n++) 4216 for (n = 0; n < sparc_ncpus; n++)
4207#else 4217#else
4208 n = 0; 4218 n = 0;
4209#endif 4219#endif
4210 { 4220 {
4211 int *upt, *kpt; 4221 int *upt, *kpt;
4212 4222
4213 upt = pool_get(&L1_pool, flags); 4223 upt = pool_get(&L1_pool, flags);
4214 pm->pm_reg_ptps[n] = upt; 4224 pm->pm_reg_ptps[n] = upt;
4215 pm->pm_reg_ptps_pa[n] = VA2PA((char *)upt); 4225 pm->pm_reg_ptps_pa[n] = VA2PA((char *)upt);
4216 4226
4217 /* Invalidate user space regions */ 4227 /* Invalidate user space regions */
4218 for (i = 0; i < NUREG; i++) 4228 for (i = 0; i < NUREG; i++)
4219 setpgt4m(upt++, SRMMU_TEINVALID); 4229 setpgt4m(upt++, SRMMU_TEINVALID);
4220 4230
4221 /* Copy kernel regions */ 4231 /* Copy kernel regions */
4222 kpt = &pmap_kernel()->pm_reg_ptps[n][VA_VREG(KERNBASE)]; 4232 kpt = &pmap_kernel()->pm_reg_ptps[n][VA_VREG(KERNBASE)];
4223 for (i = 0; i < NKREG; i++) 4233 for (i = 0; i < NKREG; i++)
4224 setpgt4m(upt++, kpt[i]); 4234 setpgt4m(upt++, kpt[i]);
4225 } 4235 }
4226 } 4236 }
4227#endif /* SUN4M || SUN4D */ 4237#endif /* SUN4M || SUN4D */
4228 4238
4229 /* XXX - a peculiar place to do this, but we can't do it in pmap_init 4239 /* XXX - a peculiar place to do this, but we can't do it in pmap_init
4230 * and here at least it's off the beaten code track. 4240 * and here at least it's off the beaten code track.
4231 */ 4241 */
4232{static int x; if (x == 0) pool_setlowat(&pv_pool, 512), x = 1; } 4242{static int x; if (x == 0) pool_setlowat(&pv_pool, 512), x = 1; }
4233 4243
4234 return (0); 4244 return (0);
4235} 4245}
4236 4246
4237void 4247void
4238pmap_pmap_pool_dtor(void *arg, void *object) 4248pmap_pmap_pool_dtor(void *arg, void *object)
4239{ 4249{
4240 struct pmap *pm = object; 4250 struct pmap *pm = object;
4241 union ctxinfo *c; 4251 union ctxinfo *c;
4242 int s = splvm(); /* paranoia */ 4252 int s = splvm(); /* paranoia */
4243 4253
4244#ifdef DEBUG 4254#ifdef DEBUG
4245 if (pmapdebug & PDB_DESTROY) 4255 if (pmapdebug & PDB_DESTROY)
4246 printf("pmap_pmap_pool_dtor(%p)\n", pm); 4256 printf("pmap_pmap_pool_dtor(%p)\n", pm);
4247#endif 4257#endif
4248 4258
4249 if ((c = pm->pm_ctx) != NULL) { 4259 if ((c = pm->pm_ctx) != NULL) {
4250 ctx_free(pm); 4260 ctx_free(pm);
4251 } 4261 }
4252 4262
4253#if defined(SUN4M) || defined(SUN4D) 4263#if defined(SUN4M) || defined(SUN4D)
4254 if (CPU_HAS_SRMMU) { 4264 if (CPU_HAS_SRMMU) {
4255 int n; 4265 int n;
4256 4266
4257#if defined(MULTIPROCESSOR) 4267#if defined(MULTIPROCESSOR)
4258 for (n = 0; n < sparc_ncpus; n++) 4268 for (n = 0; n < sparc_ncpus; n++)
4259#else 4269#else
4260 n = 0; 4270 n = 0;
4261#endif 4271#endif
4262 { 4272 {
4263 int *pt = pm->pm_reg_ptps[n]; 4273 int *pt = pm->pm_reg_ptps[n];
4264 pm->pm_reg_ptps[n] = NULL; 4274 pm->pm_reg_ptps[n] = NULL;
4265 pm->pm_reg_ptps_pa[n] = 0; 4275 pm->pm_reg_ptps_pa[n] = 0;
4266 pool_put(&L1_pool, pt); 4276 pool_put(&L1_pool, pt);
4267 } 4277 }
4268 } 4278 }
4269#endif /* SUN4M || SUN4D */ 4279#endif /* SUN4M || SUN4D */
4270 splx(s); 4280 splx(s);
4271} 4281}
4272 4282
4273/* 4283/*
4274 * Create and return a physical map. 4284 * Create and return a physical map.
4275 */ 4285 */
4276struct pmap * 4286struct pmap *
4277pmap_create(void) 4287pmap_create(void)
4278{ 4288{
4279 struct pmap *pm; 4289 struct pmap *pm;
4280 4290
4281 pm = pool_cache_get(&pmap_cache, PR_WAITOK); 4291 pm = pool_cache_get(&pmap_cache, PR_WAITOK);
4282 4292
4283 /* 4293 /*
4284 * Reset fields that are not preserved in the pmap cache pool. 4294 * Reset fields that are not preserved in the pmap cache pool.
4285 */ 4295 */
4286 pm->pm_refcount = 1; 4296 pm->pm_refcount = 1;
4287#if defined(MULTIPROCESSOR) 4297#if defined(MULTIPROCESSOR)
4288 /* reset active CPU set */ 4298 /* reset active CPU set */
4289 pm->pm_cpuset = 0; 4299 pm->pm_cpuset = 0;
4290#endif 4300#endif
4291 if (CPU_HAS_SUNMMU) { 4301 if (CPU_HAS_SUNMMU) {
4292 /* reset the region gap */ 4302 /* reset the region gap */
4293 pm->pm_gap_start = 0; 4303 pm->pm_gap_start = 0;
4294 pm->pm_gap_end = VA_VREG(VM_MAXUSER_ADDRESS); 4304 pm->pm_gap_end = VA_VREG(VM_MAXUSER_ADDRESS);
4295 } 4305 }
4296 4306
4297#ifdef DEBUG 4307#ifdef DEBUG
4298 if (pmapdebug & PDB_CREATE) 4308 if (pmapdebug & PDB_CREATE)
4299 printf("pmap_create[%d]: created %p\n", cpu_number(), pm); 4309 printf("pmap_create[%d]: created %p\n", cpu_number(), pm);
4300 pmap_quiet_check(pm); 4310 pmap_quiet_check(pm);
4301#endif 4311#endif
4302 return (pm); 4312 return (pm);
4303} 4313}
4304 4314
4305/* 4315/*
4306 * Retire the given pmap from service. 4316 * Retire the given pmap from service.
4307 * Should only be called if the map contains no valid mappings. 4317 * Should only be called if the map contains no valid mappings.
4308 */ 4318 */
4309void 4319void
4310pmap_destroy(struct pmap *pm) 4320pmap_destroy(struct pmap *pm)
4311{ 4321{
4312 4322
4313#ifdef DEBUG 4323#ifdef DEBUG
4314 if (pmapdebug & PDB_DESTROY) 4324 if (pmapdebug & PDB_DESTROY)
4315 printf("pmap_destroy[%d](%p)\n", cpu_number(), pm); 4325 printf("pmap_destroy[%d](%p)\n", cpu_number(), pm);
4316#endif 4326#endif
4317 if (atomic_dec_uint_nv(&pm->pm_refcount) == 0) { 4327 if (atomic_dec_uint_nv(&pm->pm_refcount) == 0) {
4318#ifdef DEBUG 4328#ifdef DEBUG
4319 pmap_quiet_check(pm); 4329 pmap_quiet_check(pm);
4320#endif 4330#endif
4321 pool_cache_put(&pmap_cache, pm); 4331 pool_cache_put(&pmap_cache, pm);
4322 } 4332 }
4323} 4333}
4324 4334
4325/* 4335/*
4326 * Add a reference to the given pmap. 4336 * Add a reference to the given pmap.
4327 */ 4337 */
4328void 4338void
4329pmap_reference(struct pmap *pm) 4339pmap_reference(struct pmap *pm)
4330{ 4340{
4331 4341
4332 atomic_inc_uint(&pm->pm_refcount); 4342 atomic_inc_uint(&pm->pm_refcount);
4333} 4343}
4334 4344
4335#if defined(SUN4) || defined(SUN4C) 4345#if defined(SUN4) || defined(SUN4C)
4336/* 4346/*
4337 * helper to deallocate level 2 & 3 page tables. 4347 * helper to deallocate level 2 & 3 page tables.
4338 */ 4348 */
4339static void 4349static void
4340pgt_lvl23_remove4_4c(struct pmap *pm, struct regmap *rp, struct segmap *sp, 4350pgt_lvl23_remove4_4c(struct pmap *pm, struct regmap *rp, struct segmap *sp,
4341 int vr, int vs) 4351 int vr, int vs)
4342{ 4352{
4343 vaddr_t va, tva; 4353 vaddr_t va, tva;
4344 int i, pmeg; 4354 int i, pmeg;
4345 4355
4346 va = VSTOVA(vr,vs); 4356 va = VSTOVA(vr,vs);
4347 if ((pmeg = sp->sg_pmeg) != seginval) { 4357 if ((pmeg = sp->sg_pmeg) != seginval) {
4348 if (CTX_USABLE(pm,rp)) { 4358 if (CTX_USABLE(pm,rp)) {
4349 setcontext4(pm->pm_ctxnum); 4359 setcontext4(pm->pm_ctxnum);
4350 setsegmap(va, seginval); 4360 setsegmap(va, seginval);
4351 } else { 4361 } else {
4352 /* no context, use context 0 */ 4362 /* no context, use context 0 */
4353 setcontext4(0); 4363 setcontext4(0);
4354 if (HASSUN4_MMU3L && rp->rg_smeg != reginval) { 4364 if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
4355 setregmap(0, rp->rg_smeg); 4365 setregmap(0, rp->rg_smeg);
4356 tva = vs << SGSHIFT; 4366 tva = vs << SGSHIFT;
4357 setsegmap(tva, seginval); 4367 setsegmap(tva, seginval);
4358 } 4368 }
4359 } 4369 }
4360 if (!HASSUN4_MMU3L) { 4370 if (!HASSUN4_MMU3L) {
4361 if (pm == pmap_kernel()) { 4371 if (pm == pmap_kernel()) {
4362 /* Unmap segment from all contexts */ 4372 /* Unmap segment from all contexts */
4363 for (i = ncontext; --i >= 0;) { 4373 for (i = ncontext; --i >= 0;) {
4364 setcontext4(i); 4374 setcontext4(i);
4365 setsegmap(va, seginval); 4375 setsegmap(va, seginval);
4366 } 4376 }
4367 } 4377 }
4368 } 4378 }
4369 me_free(pm, pmeg); 4379 me_free(pm, pmeg);
4370 sp->sg_pmeg = seginval; 4380 sp->sg_pmeg = seginval;
4371 } 4381 }
4372 /* Free software tables for non-kernel maps */ 4382 /* Free software tables for non-kernel maps */
4373 if (pm != pmap_kernel()) { 4383 if (pm != pmap_kernel()) {
4374 pool_put(&pte_pool, sp->sg_pte); 4384 pool_put(&pte_pool, sp->sg_pte);
4375 sp->sg_pte = NULL; 4385 sp->sg_pte = NULL;
4376 } 4386 }
4377 4387
4378 if (rp->rg_nsegmap <= 0) 4388 if (rp->rg_nsegmap <= 0)
4379 panic("pgt_rm: pm %p: nsegmap = %d\n", pm, rp->rg_nsegmap); 4389 panic("pgt_rm: pm %p: nsegmap = %d\n", pm, rp->rg_nsegmap);
4380 4390
4381 if (--rp->rg_nsegmap == 0) { 4391 if (--rp->rg_nsegmap == 0) {
4382#if defined(SUN4_MMU3L) 4392#if defined(SUN4_MMU3L)
4383 if (HASSUN4_MMU3L) { 4393 if (HASSUN4_MMU3L) {
4384 if (rp->rg_smeg != reginval) { 4394 if (rp->rg_smeg != reginval) {
4385 if (pm == pmap_kernel()) { 4395 if (pm == pmap_kernel()) {
4386 /* Unmap from all contexts */ 4396 /* Unmap from all contexts */
4387 for (i = ncontext; --i >= 0;) { 4397 for (i = ncontext; --i >= 0;) {
4388 setcontext4(i); 4398 setcontext4(i);
4389 setregmap(va, reginval); 4399 setregmap(va, reginval);
4390 } 4400 }
4391 } else if (pm->pm_ctx) { 4401 } else if (pm->pm_ctx) {
4392 setcontext4(pm->pm_ctxnum); 4402 setcontext4(pm->pm_ctxnum);
4393 setregmap(va, reginval); 4403 setregmap(va, reginval);
4394 } 4404 }
4395 4405
4396 /* Release MMU resource */ 4406 /* Release MMU resource */
4397 region_free(pm, rp->rg_smeg); 4407 region_free(pm, rp->rg_smeg);
4398 rp->rg_smeg = reginval; 4408 rp->rg_smeg = reginval;
4399 } 4409 }
4400 } 4410 }
4401#endif /* SUN4_MMU3L */ 4411#endif /* SUN4_MMU3L */
4402 /* Free software tables for non-kernel maps */ 4412 /* Free software tables for non-kernel maps */
4403 if (pm != pmap_kernel()) { 4413 if (pm != pmap_kernel()) {
4404 GAP_WIDEN(pm,vr); 4414 GAP_WIDEN(pm,vr);
4405 pool_put(&segmap_pool, rp->rg_segmap); 4415 pool_put(&segmap_pool, rp->rg_segmap);
4406 rp->rg_segmap = NULL; 4416 rp->rg_segmap = NULL;
4407 } 4417 }
4408 } 4418 }
4409} 4419}
4410#endif /* SUN4 || SUN4C */ 4420#endif /* SUN4 || SUN4C */
4411 4421
4412#if defined(SUN4M) || defined(SUN4D) 4422#if defined(SUN4M) || defined(SUN4D)
4413/* 4423/*
4414 * SRMMU helper to deallocate level 2 & 3 page tables. 4424 * SRMMU helper to deallocate level 2 & 3 page tables.
4415 */ 4425 */
4416static void 4426static void
4417pgt_lvl23_remove4m(struct pmap *pm, struct regmap *rp, struct segmap *sp, 4427pgt_lvl23_remove4m(struct pmap *pm, struct regmap *rp, struct segmap *sp,
4418 int vr, int vs) 4428 int vr, int vs)
4419{ 4429{
4420 4430
4421 /* Invalidate level 2 PTP entry */ 4431 /* Invalidate level 2 PTP entry */
4422 if (pm->pm_ctx) 4432 if (pm->pm_ctx)
4423 tlb_flush_segment(VSTOVA(vr,vs), pm->pm_ctxnum, 4433 tlb_flush_segment(VSTOVA(vr,vs), pm->pm_ctxnum,
4424 PMAP_CPUSET(pm)); 4434 PMAP_CPUSET(pm));
4425 setpgt4m(&rp->rg_seg_ptps[vs], SRMMU_TEINVALID); 4435 setpgt4m(&rp->rg_seg_ptps[vs], SRMMU_TEINVALID);
4426 pool_put(&L23_pool, sp->sg_pte); 4436 pool_put(&L23_pool, sp->sg_pte);
4427 sp->sg_pte = NULL; 4437 sp->sg_pte = NULL;
4428 4438
4429 /* If region is now empty, remove level 2 pagetable as well */ 4439 /* If region is now empty, remove level 2 pagetable as well */
4430 if (--rp->rg_nsegmap == 0) { 4440 if (--rp->rg_nsegmap == 0) {
4431 int n = 0; 4441 int n = 0;
4432 if (pm->pm_ctx) 4442 if (pm->pm_ctx)
4433 tlb_flush_region(VRTOVA(vr), pm->pm_ctxnum, 4443 tlb_flush_region(VRTOVA(vr), pm->pm_ctxnum,
4434 PMAP_CPUSET(pm)); 4444 PMAP_CPUSET(pm));
4435#ifdef MULTIPROCESSOR 4445#ifdef MULTIPROCESSOR
4436 /* Invalidate level 1 PTP entries on all CPUs */ 4446 /* Invalidate level 1 PTP entries on all CPUs */
4437 for (; n < sparc_ncpus; n++) { 4447 for (; n < sparc_ncpus; n++) {
4438 if ((cpus[n]->flags & CPUFLG_HATCHED) == 0) 4448 if ((cpus[n]->flags & CPUFLG_HATCHED) == 0)
4439 continue; 4449 continue;
4440#endif 4450#endif
4441 setpgt4m(&pm->pm_reg_ptps[n][vr], SRMMU_TEINVALID); 4451 setpgt4m(&pm->pm_reg_ptps[n][vr], SRMMU_TEINVALID);
4442#ifdef MULTIPROCESSOR 4452#ifdef MULTIPROCESSOR
4443 } 4453 }
4444#endif 4454#endif
4445 4455
4446 pool_put(&segmap_pool, rp->rg_segmap); 4456 pool_put(&segmap_pool, rp->rg_segmap);
4447 rp->rg_segmap = NULL; 4457 rp->rg_segmap = NULL;
4448 pool_put(&L23_pool, rp->rg_seg_ptps); 4458 pool_put(&L23_pool, rp->rg_seg_ptps);
4449 } 4459 }
4450} 4460}
4451#endif /* SUN4M || SUN4D */ 4461#endif /* SUN4M || SUN4D */
4452 4462
4453void 4463void
4454pmap_remove_all(struct pmap *pm) 4464pmap_remove_all(struct pmap *pm)
4455{ 4465{
4456 if (pm->pm_ctx == NULL) 4466 if (pm->pm_ctx == NULL)
4457 return; 4467 return;
4458 4468
4459#if defined(SUN4) || defined(SUN4C) 4469#if defined(SUN4) || defined(SUN4C)
4460 if (CPU_HAS_SUNMMU) { 4470 if (CPU_HAS_SUNMMU) {
4461 int ctx = getcontext4(); 4471 int ctx = getcontext4();
4462 setcontext4(pm->pm_ctxnum); 4472 setcontext4(pm->pm_ctxnum);
4463 cache_flush_context(pm->pm_ctxnum); 4473 cache_flush_context(pm->pm_ctxnum);
4464 setcontext4(ctx); 4474 setcontext4(ctx);
4465 } 4475 }
4466#endif 4476#endif
4467 4477
4468#if defined(SUN4M) || defined(SUN4D) 4478#if defined(SUN4M) || defined(SUN4D)
4469 if (CPU_HAS_SRMMU) { 4479 if (CPU_HAS_SRMMU) {
4470 cache_flush_context(pm->pm_ctxnum); 4480 cache_flush_context(pm->pm_ctxnum);
4471 } 4481 }
4472#endif 4482#endif
4473 4483
4474 pm->pm_flags |= PMAP_USERCACHECLEAN; 4484 pm->pm_flags |= PMAP_USERCACHECLEAN;
4475} 4485}
4476 4486
4477/* 4487/*
4478 * Remove the given range of mapping entries. 4488 * Remove the given range of mapping entries.
4479 * The starting and ending addresses are already rounded to pages. 4489 * The starting and ending addresses are already rounded to pages.
4480 * Sheer lunacy: pmap_remove is often asked to remove nonexistent 4490 * Sheer lunacy: pmap_remove is often asked to remove nonexistent
4481 * mappings. 4491 * mappings.
4482 */ 4492 */
4483void 4493void
4484pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva) 4494pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva)
4485{ 4495{
4486 vaddr_t nva; 4496 vaddr_t nva;
4487 int vr, vs, s, ctx; 4497 int vr, vs, s, ctx;
4488 void (*rm)(struct pmap *, vaddr_t, vaddr_t, int, int); 4498 void (*rm)(struct pmap *, vaddr_t, vaddr_t, int, int);
4489 4499
4490#ifdef DEBUG 4500#ifdef DEBUG
4491 if (pmapdebug & PDB_REMOVE) 4501 if (pmapdebug & PDB_REMOVE)
4492 printf("pmap_remove[%d](%p, 0x%lx, 0x%lx)\n", 4502 printf("pmap_remove[%d](%p, 0x%lx, 0x%lx)\n",
4493 cpu_number(), pm, va, endva); 4503 cpu_number(), pm, va, endva);
4494#endif 4504#endif
4495 4505
4496 if (!CPU_HAS_SRMMU) 4506 if (!CPU_HAS_SRMMU)
4497 write_user_windows(); 4507 write_user_windows();
4498 4508
4499 if (pm == pmap_kernel()) { 4509 if (pm == pmap_kernel()) {
4500 /* 4510 /*
4501 * Removing from kernel address space. 4511 * Removing from kernel address space.
4502 */ 4512 */
4503 rm = pmap_rmk; 4513 rm = pmap_rmk;
4504 } else { 4514 } else {
4505 /* 4515 /*
4506 * Removing from user address space. 4516 * Removing from user address space.
4507 */ 4517 */
4508 rm = pmap_rmu; 4518 rm = pmap_rmu;
4509 } 4519 }
4510 4520
4511 ctx = getcontext(); 4521 ctx = getcontext();
4512 s = splvm(); /* XXX conservative */ 4522 s = splvm(); /* XXX conservative */
4513 PMAP_LOCK(); 4523 PMAP_LOCK();
4514 for (; va < endva; va = nva) { 4524 for (; va < endva; va = nva) {
4515 /* do one virtual segment at a time */ 4525 /* do one virtual segment at a time */
4516 vr = VA_VREG(va); 4526 vr = VA_VREG(va);
4517 vs = VA_VSEG(va); 4527 vs = VA_VSEG(va);
4518 nva = VSTOVA(vr, vs + 1); 4528 nva = VSTOVA(vr, vs + 1);
4519 if (nva == 0 || nva > endva) 4529 if (nva == 0 || nva > endva)
4520 nva = endva; 4530 nva = endva;
4521 if (pm->pm_regmap[vr].rg_nsegmap != 0) 4531 if (pm->pm_regmap[vr].rg_nsegmap != 0)
4522 (*rm)(pm, va, nva, vr, vs); 4532 (*rm)(pm, va, nva, vr, vs);
4523 } 4533 }
4524 PMAP_UNLOCK(); 4534 PMAP_UNLOCK();
4525 splx(s); 4535 splx(s);
4526 setcontext(ctx); 4536 setcontext(ctx);
4527} 4537}
4528 4538
4529/* 4539/*
4530 * It is the same amount of work to cache_flush_page 16 pages 4540 * It is the same amount of work to cache_flush_page 16 pages
4531 * as to cache_flush_segment 1 segment, assuming a 64K cache size 4541 * as to cache_flush_segment 1 segment, assuming a 64K cache size
4532 * and a 4K page size or a 128K cache size and 8K page size. 4542 * and a 4K page size or a 128K cache size and 8K page size.
4533 */ 4543 */
4534#define PMAP_SFL_THRESHOLD 16 /* if > magic, use cache_flush_segment */ 4544#define PMAP_SFL_THRESHOLD 16 /* if > magic, use cache_flush_segment */
4535 4545
4536/* 4546/*
4537 * Remove a range contained within a single segment. 4547 * Remove a range contained within a single segment.
4538 * These are egregiously complicated routines. 4548 * These are egregiously complicated routines.
4539 */ 4549 */
4540 4550
4541#if defined(SUN4) || defined(SUN4C) 4551#if defined(SUN4) || defined(SUN4C)
4542 4552
4543/* remove from kernel */ 4553/* remove from kernel */
4544/*static*/ void 4554/*static*/ void
4545pmap_rmk4_4c(struct pmap *pm, vaddr_t va, vaddr_t endva, int vr, int vs) 4555pmap_rmk4_4c(struct pmap *pm, vaddr_t va, vaddr_t endva, int vr, int vs)
4546{ 4556{
4547 int pte, mmupte, *ptep, perpage, npg; 4557 int pte, mmupte, *ptep, perpage, npg;
4548 struct vm_page *pg; 4558 struct vm_page *pg;
4549 int nleft, pmeg, inmmu; 4559 int nleft, pmeg, inmmu;
4550 struct regmap *rp; 4560 struct regmap *rp;
4551 struct segmap *sp; 4561 struct segmap *sp;
4552 4562
4553 rp = &pm->pm_regmap[vr]; 4563 rp = &pm->pm_regmap[vr];
4554 sp = &rp->rg_segmap[vs]; 4564 sp = &rp->rg_segmap[vs];
4555 4565
4556 if (rp->rg_nsegmap == 0) 4566 if (rp->rg_nsegmap == 0)
4557 return; 4567 return;
4558 if ((nleft = sp->sg_npte) == 0) 4568 if ((nleft = sp->sg_npte) == 0)
4559 return; 4569 return;
4560 pmeg = sp->sg_pmeg; 4570 pmeg = sp->sg_pmeg;
4561 inmmu = pmeg != seginval; 4571 inmmu = pmeg != seginval;
4562 ptep = &sp->sg_pte[VA_VPG(va)]; 4572 ptep = &sp->sg_pte[VA_VPG(va)];
4563 4573
4564 /* decide how to flush cache */ 4574 /* decide how to flush cache */
4565 npg = (endva - va) >> PGSHIFT; 4575 npg = (endva - va) >> PGSHIFT;
4566 if (!inmmu) { 4576 if (!inmmu) {
4567 perpage = 0; 4577 perpage = 0;
4568 } else if (npg > PMAP_SFL_THRESHOLD) { 4578 } else if (npg > PMAP_SFL_THRESHOLD) {
4569 /* flush the whole segment */ 4579 /* flush the whole segment */
4570 perpage = 0; 4580 perpage = 0;
4571 cache_flush_segment(vr, vs, 0); 4581 cache_flush_segment(vr, vs, 0);
4572 } else { 4582 } else {
4573 /* flush each page individually; some never need flushing */ 4583 /* flush each page individually; some never need flushing */
4574 perpage = (CACHEINFO.c_vactype != VAC_NONE); 4584 perpage = (CACHEINFO.c_vactype != VAC_NONE);
4575 } 4585 }
4576 4586
4577 for (; va < endva; va += NBPG, ptep++) { 4587 for (; va < endva; va += NBPG, ptep++) {
4578 pte = *ptep; 4588 pte = *ptep;
4579 mmupte = inmmu ? getpte4(va) : 0; 4589 mmupte = inmmu ? getpte4(va) : 0;
4580 if ((pte & PG_V) == 0) { 4590 if ((pte & PG_V) == 0) {
4581#ifdef DIAGNOSTIC 4591#ifdef DIAGNOSTIC
4582 if (inmmu && (mmupte & PG_V) != 0) 4592 if (inmmu && (mmupte & PG_V) != 0)
4583 printf("rmk: inconsistent ptes va=%lx\n", va); 4593 printf("rmk: inconsistent ptes va=%lx\n", va);
4584#endif 4594#endif
4585 continue; 4595 continue;
4586 } 4596 }
4587 if ((pte & PG_TYPE) == PG_OBMEM) { 4597 if ((pte & PG_TYPE) == PG_OBMEM) {
4588 /* if cacheable, flush page as needed */ 4598 /* if cacheable, flush page as needed */
4589 if (perpage && (mmupte & PG_NC) == 0) 4599 if (perpage && (mmupte & PG_NC) == 0)
4590 cache_flush_page(va, 0); 4600 cache_flush_page(va, 0);
4591 if ((pg = pvhead4_4c(pte)) != NULL) { 4601 if ((pg = pvhead4_4c(pte)) != NULL) {
4592 if (inmmu) 4602 if (inmmu)
4593 VM_MDPAGE_PVHEAD(pg)->pv_flags |= MR4_4C(mmupte); 4603 VM_MDPAGE_PVHEAD(pg)->pv_flags |= MR4_4C(mmupte);
4594 pv_unlink4_4c(pg, pm, va); 4604 pv_unlink4_4c(pg, pm, va);
4595 } 4605 }
4596 } 4606 }
4597 nleft--; 4607 nleft--;
4598#ifdef DIAGNOSTIC 4608#ifdef DIAGNOSTIC
4599 if (nleft < 0) 4609 if (nleft < 0)
4600 panic("pmap_rmk: too many PTEs in segment; " 4610 panic("pmap_rmk: too many PTEs in segment; "
4601 "va 0x%lx; endva 0x%lx", va, endva); 4611 "va 0x%lx; endva 0x%lx", va, endva);
4602#endif 4612#endif
4603 if (pte & PG_WIRED) { 4613 if (pte & PG_WIRED) {
4604 sp->sg_nwired--; 4614 sp->sg_nwired--;
4605 pm->pm_stats.wired_count--; 4615 pm->pm_stats.wired_count--;
4606 } 4616 }
4607 4617
4608 if (inmmu) 4618 if (inmmu)
4609 setpte4(va, 0); 4619 setpte4(va, 0);
4610 *ptep = 0; 4620 *ptep = 0;
4611 pm->pm_stats.resident_count--; 4621 pm->pm_stats.resident_count--;
4612 } 4622 }
4613 4623
4614#ifdef DIAGNOSTIC 4624#ifdef DIAGNOSTIC
4615 if (sp->sg_nwired > nleft || sp->sg_nwired < 0) 4625 if (sp->sg_nwired > nleft || sp->sg_nwired < 0)
4616 panic("pmap_rmk: pm %p, va %lx: nleft=%d, nwired=%d", 4626 panic("pmap_rmk: pm %p, va %lx: nleft=%d, nwired=%d",
4617 pm, va, nleft, sp->sg_nwired); 4627 pm, va, nleft, sp->sg_nwired);
4618#endif 4628#endif
4619 if ((sp->sg_npte = nleft) == 0) 4629 if ((sp->sg_npte = nleft) == 0)
4620 pgt_lvl23_remove4_4c(pm, rp, sp, vr, vs); 4630 pgt_lvl23_remove4_4c(pm, rp, sp, vr, vs);
4621 else if (sp->sg_nwired == 0) { 4631 else if (sp->sg_nwired == 0) {
4622 if (sp->sg_pmeg != seginval) 4632 if (sp->sg_pmeg != seginval)
4623 mmu_pmeg_unlock(sp->sg_pmeg); 4633 mmu_pmeg_unlock(sp->sg_pmeg);
4624 } 4634 }
4625} 4635}
4626 4636
4627#endif /* SUN4 || SUN4C */ 4637#endif /* SUN4 || SUN4C */
4628 4638
4629#if defined(SUN4M) || defined(SUN4D) /* SRMMU version of pmap_rmk */ 4639#if defined(SUN4M) || defined(SUN4D) /* SRMMU version of pmap_rmk */
4630 4640
4631/* remove from kernel (4m)*/ 4641/* remove from kernel (4m)*/
4632/* pm is already locked */ 4642/* pm is already locked */
4633/*static*/ void 4643/*static*/ void
4634pmap_rmk4m(struct pmap *pm, vaddr_t va, vaddr_t endva, int vr, int vs) 4644pmap_rmk4m(struct pmap *pm, vaddr_t va, vaddr_t endva, int vr, int vs)
4635{ 4645{
4636 int tpte, perpage, npg; 4646 int tpte, perpage, npg;
4637 struct vm_page *pg; 4647 struct vm_page *pg;
4638 struct regmap *rp; 4648 struct regmap *rp;
4639 struct segmap *sp; 4649 struct segmap *sp;
4640 4650
4641 rp = &pm->pm_regmap[vr]; 4651 rp = &pm->pm_regmap[vr];
4642 sp = &rp->rg_segmap[vs]; 4652 sp = &rp->rg_segmap[vs];
4643 if (rp->rg_nsegmap == 0) 4653 if (rp->rg_nsegmap == 0)
4644 return; 4654 return;
4645 4655
4646 /* decide how to flush cache */ 4656 /* decide how to flush cache */
4647 npg = (endva - va) >> PGSHIFT; 4657 npg = (endva - va) >> PGSHIFT;
4648 if (npg > PMAP_SFL_THRESHOLD) { 4658 if (npg > PMAP_SFL_THRESHOLD) {
4649 /* flush the whole segment */ 4659 /* flush the whole segment */
4650 perpage = 0; 4660 perpage = 0;
4651 if (CACHEINFO.c_vactype != VAC_NONE) 4661 if (CACHEINFO.c_vactype != VAC_NONE)
4652 cache_flush_segment(vr, vs, 0); 4662 cache_flush_segment(vr, vs, 0);
4653 } else { 4663 } else {
4654 /* flush each page individually; some never need flushing */ 4664 /* flush each page individually; some never need flushing */
4655 perpage = (CACHEINFO.c_vactype != VAC_NONE); 4665 perpage = (CACHEINFO.c_vactype != VAC_NONE);
4656 } 4666 }
4657 while (va < endva) { 4667 while (va < endva) {
4658 tpte = sp->sg_pte[VA_SUN4M_VPG(va)]; 4668 tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
4659 if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) { 4669 if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) {
4660#ifdef DEBUG 4670#ifdef DEBUG
4661 if ((pmapdebug & PDB_SANITYCHK) && 4671 if ((pmapdebug & PDB_SANITYCHK) &&
4662 (getpte4m(va) & SRMMU_TETYPE) == SRMMU_TEPTE) 4672 (getpte4m(va) & SRMMU_TETYPE) == SRMMU_TEPTE)
4663 panic("pmap_rmk: Spurious kTLB entry for 0x%lx", 4673 panic("pmap_rmk: Spurious kTLB entry for 0x%lx",
4664 va); 4674 va);
4665#endif 4675#endif
4666 va += NBPG; 4676 va += NBPG;
4667 continue; 4677 continue;
4668 } 4678 }
4669 if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) { 4679 if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
4670 /* if cacheable, flush page as needed */ 4680 /* if cacheable, flush page as needed */
4671 if (perpage && (tpte & SRMMU_PG_C)) 4681 if (perpage && (tpte & SRMMU_PG_C))
4672 cache_flush_page(va, 0); 4682 cache_flush_page(va, 0);
4673 if ((pg = pvhead4m(tpte)) != NULL) { 4683 if ((pg = pvhead4m(tpte)) != NULL) {
4674 VM_MDPAGE_PVHEAD(pg)->pv_flags |= MR4M(tpte); 4684 VM_MDPAGE_PVHEAD(pg)->pv_flags |= MR4M(tpte);
4675 pv_unlink4m(pg, pm, va); 4685 pv_unlink4m(pg, pm, va);
4676 } 4686 }
4677 } 4687 }
4678 setpgt4m_va(va, &sp->sg_pte[VA_SUN4M_VPG(va)], 4688 setpgt4m_va(va, &sp->sg_pte[VA_SUN4M_VPG(va)],
4679 SRMMU_TEINVALID, 1, 0, CPUSET_ALL); 4689 SRMMU_TEINVALID, 1, 0, CPUSET_ALL);
4680 pm->pm_stats.resident_count--; 4690 pm->pm_stats.resident_count--;
4681 va += NBPG; 4691 va += NBPG;
4682 } 4692 }
4683} 4693}
4684#endif /* SUN4M || SUN4D */ 4694#endif /* SUN4M || SUN4D */
4685 4695
4686#if defined(SUN4) || defined(SUN4C) 4696#if defined(SUN4) || defined(SUN4C)
4687 4697
4688/* remove from user */ 4698/* remove from user */
4689/*static*/ void 4699/*static*/ void
4690pmap_rmu4_4c(struct pmap *pm, vaddr_t va, vaddr_t endva, int vr, int vs) 4700pmap_rmu4_4c(struct pmap *pm, vaddr_t va, vaddr_t endva, int vr, int vs)
4691{ 4701{
4692 int *ptep, pteva, pte, perpage, npg; 4702 int *ptep, pteva, pte, perpage, npg;
4693 struct vm_page *pg; 4703 struct vm_page *pg;
4694 int nleft, pmeg, inmmu; 4704 int nleft, pmeg, inmmu;
4695 struct regmap *rp; 4705 struct regmap *rp;
4696 struct segmap *sp; 4706 struct segmap *sp;
4697 4707
4698 rp = &pm->pm_regmap[vr]; 4708 rp = &pm->pm_regmap[vr];
4699 if (rp->rg_nsegmap == 0) 4709 if (rp->rg_nsegmap == 0)
4700 return; 4710 return;
4701 sp = &rp->rg_segmap[vs]; 4711 sp = &rp->rg_segmap[vs];
4702 if ((nleft = sp->sg_npte) == 0) 4712 if ((nleft = sp->sg_npte) == 0)
4703 return; 4713 return;
4704 pmeg = sp->sg_pmeg; 4714 pmeg = sp->sg_pmeg;
4705 inmmu = pmeg != seginval; 4715 inmmu = pmeg != seginval;
4706 4716
4707 /* 4717 /*
4708 * PTEs are in MMU. Invalidate in hardware, update ref & 4718 * PTEs are in MMU. Invalidate in hardware, update ref &
4709 * mod bits, and flush cache if required. 4719 * mod bits, and flush cache if required.
4710 */ 4720 */
4711 if (!inmmu) { 4721 if (!inmmu) {
4712 perpage = 0; 4722 perpage = 0;
4713 pteva = 0; 4723 pteva = 0;
4714 } else if (CTX_USABLE(pm,rp)) { 4724 } else if (CTX_USABLE(pm,rp)) {
4715 /* process has a context, must flush cache */ 4725 /* process has a context, must flush cache */
4716 npg = (endva - va) >> PGSHIFT; 4726 npg = (endva - va) >> PGSHIFT;
4717 setcontext4(pm->pm_ctxnum); 4727 setcontext4(pm->pm_ctxnum);
4718 if ((pm->pm_flags & PMAP_USERCACHECLEAN) != 0) 4728 if ((pm->pm_flags & PMAP_USERCACHECLEAN) != 0)
4719 perpage = 0; 4729 perpage = 0;
4720 else if (npg > PMAP_SFL_THRESHOLD) { 4730 else if (npg > PMAP_SFL_THRESHOLD) {
4721 perpage = 0; /* flush the whole segment */ 4731 perpage = 0; /* flush the whole segment */
4722 cache_flush_segment(vr, vs, pm->pm_ctxnum); 4732 cache_flush_segment(vr, vs, pm->pm_ctxnum);
4723 } else 4733 } else
4724 perpage = (CACHEINFO.c_vactype != VAC_NONE); 4734 perpage = (CACHEINFO.c_vactype != VAC_NONE);
4725 pteva = va; 4735 pteva = va;
4726 } else { 4736 } else {
4727 /* no context, use context 0; cache flush unnecessary */ 4737 /* no context, use context 0; cache flush unnecessary */
4728 setcontext4(0); 4738 setcontext4(0);
4729 if (HASSUN4_MMU3L) 4739 if (HASSUN4_MMU3L)
4730 setregmap(0, tregion); 4740 setregmap(0, tregion);
4731 /* XXX use per-CPU pteva? */ 4741 /* XXX use per-CPU pteva? */
4732 setsegmap(0, pmeg); 4742 setsegmap(0, pmeg);
4733 pteva = VA_VPG(va) << PGSHIFT; 4743 pteva = VA_VPG(va) << PGSHIFT;
4734 perpage = 0; 4744 perpage = 0;
4735 } 4745 }
4736 4746
4737 ptep = sp->sg_pte + VA_VPG(va); 4747 ptep = sp->sg_pte + VA_VPG(va);
4738 for (; va < endva; ptep++, pteva += NBPG, va += NBPG) { 4748 for (; va < endva; ptep++, pteva += NBPG, va += NBPG) {
4739 int mmupte; 4749 int mmupte;
4740 pte = *ptep; 4750 pte = *ptep;
4741 mmupte = inmmu ? getpte4(pteva) : 0; 4751 mmupte = inmmu ? getpte4(pteva) : 0;
4742 4752
4743 if ((pte & PG_V) == 0) { 4753 if ((pte & PG_V) == 0) {
4744#ifdef DIAGNOSTIC 4754#ifdef DIAGNOSTIC
4745 if (inmmu && (mmupte & PG_V) != 0) 4755 if (inmmu && (mmupte & PG_V) != 0)
4746 printf("pmap_rmu: pte=%x, mmupte=%x\n", 4756 printf("pmap_rmu: pte=%x, mmupte=%x\n",
4747 pte, getpte4(pteva)); 4757 pte, getpte4(pteva));
4748#endif 4758#endif
4749 continue; 4759 continue;
4750 } 4760 }
4751 if ((pte & PG_TYPE) == PG_OBMEM) { 4761 if ((pte & PG_TYPE) == PG_OBMEM) {
4752 /* if cacheable, flush page as needed */ 4762 /* if cacheable, flush page as needed */
4753 if (perpage && (mmupte & PG_NC) == 0) 4763 if (perpage && (mmupte & PG_NC) == 0)
4754 cache_flush_page(va, pm->pm_ctxnum); 4764 cache_flush_page(va, pm->pm_ctxnum);
4755 if ((pg = pvhead4_4c(pte)) != NULL) { 4765 if ((pg = pvhead4_4c(pte)) != NULL) {
4756 if (inmmu) 4766 if (inmmu)
4757 VM_MDPAGE_PVHEAD(pg)->pv_flags |= MR4_4C(mmupte); 4767 VM_MDPAGE_PVHEAD(pg)->pv_flags |= MR4_4C(mmupte);
4758 pv_unlink4_4c(pg, pm, va); 4768 pv_unlink4_4c(pg, pm, va);
4759 } 4769 }
4760 } 4770 }
4761 nleft--; 4771 nleft--;
4762#ifdef DIAGNOSTIC 4772#ifdef DIAGNOSTIC
4763 if (nleft < 0) 4773 if (nleft < 0)
4764 panic("pmap_rmu: too many PTEs in segment; " 4774 panic("pmap_rmu: too many PTEs in segment; "
4765 "va 0x%lx; endva 0x%lx", va, endva); 4775 "va 0x%lx; endva 0x%lx", va, endva);
4766#endif 4776#endif
4767 if (inmmu) 4777 if (inmmu)
4768 setpte4(pteva, 0); 4778 setpte4(pteva, 0);
4769 4779
4770 if (pte & PG_WIRED) { 4780 if (pte & PG_WIRED) {
4771 sp->sg_nwired--; 4781 sp->sg_nwired--;
4772 pm->pm_stats.wired_count--; 4782 pm->pm_stats.wired_count--;
4773 } 4783 }
4774 *ptep = 0; 4784 *ptep = 0;
4775 pm->pm_stats.resident_count--; 4785 pm->pm_stats.resident_count--;
4776 } 4786 }
4777 4787
4778#ifdef DIAGNOSTIC 4788#ifdef DIAGNOSTIC
4779 if (sp->sg_nwired > nleft || sp->sg_nwired < 0) 4789 if (sp->sg_nwired > nleft || sp->sg_nwired < 0)
4780 panic("pmap_rmu: pm %p, va %lx: nleft=%d, nwired=%d", 4790 panic("pmap_rmu: pm %p, va %lx: nleft=%d, nwired=%d",
4781 pm, va, nleft, sp->sg_nwired); 4791 pm, va, nleft, sp->sg_nwired);
4782#endif 4792#endif
4783 if ((sp->sg_npte = nleft) == 0) 4793 if ((sp->sg_npte = nleft) == 0)
4784 pgt_lvl23_remove4_4c(pm, rp, sp, vr, vs); 4794 pgt_lvl23_remove4_4c(pm, rp, sp, vr, vs);
4785 else if (sp->sg_nwired == 0) { 4795 else if (sp->sg_nwired == 0) {
4786 if (sp->sg_pmeg != seginval) 4796 if (sp->sg_pmeg != seginval)
4787 mmu_pmeg_unlock(sp->sg_pmeg); 4797 mmu_pmeg_unlock(sp->sg_pmeg);
4788 } 4798 }
4789} 4799}
4790 4800
4791#endif /* SUN4 || SUN4C */ 4801#endif /* SUN4 || SUN4C */
4792 4802
4793#if defined(SUN4M) || defined(SUN4D) /* SRMMU version of pmap_rmu */ 4803#if defined(SUN4M) || defined(SUN4D) /* SRMMU version of pmap_rmu */
4794/* remove from user */ 4804/* remove from user */
4795/* Note: pm is already locked */ 4805/* Note: pm is already locked */
4796/*static*/ void 4806/*static*/ void
4797pmap_rmu4m(struct pmap *pm, vaddr_t va, vaddr_t endva, int vr, int vs) 4807pmap_rmu4m(struct pmap *pm, vaddr_t va, vaddr_t endva, int vr, int vs)
4798{ 4808{
4799 int *pte0, perpage, npg; 4809 int *pte0, perpage, npg;
4800 struct vm_page *pg; 4810 struct vm_page *pg;
4801 int nleft; 4811 int nleft;
4802 struct regmap *rp; 4812 struct regmap *rp;
4803 struct segmap *sp; 4813 struct segmap *sp;
4804 4814
4805 rp = &pm->pm_regmap[vr]; 4815 rp = &pm->pm_regmap[vr];
4806 if (rp->rg_nsegmap == 0) 4816 if (rp->rg_nsegmap == 0)
4807 return; 4817 return;
4808 sp = &rp->rg_segmap[vs]; 4818 sp = &rp->rg_segmap[vs];
4809 if ((nleft = sp->sg_npte) == 0) 4819 if ((nleft = sp->sg_npte) == 0)
4810 return; 4820 return;
4811 pte0 = sp->sg_pte; 4821 pte0 = sp->sg_pte;
4812 4822
4813 /* 4823 /*
4814 * Invalidate PTE in MMU pagetables. Flush cache if necessary. 4824 * Invalidate PTE in MMU pagetables. Flush cache if necessary.
4815 */ 4825 */
4816 if (pm->pm_ctx && (pm->pm_flags & PMAP_USERCACHECLEAN) == 0) { 4826 if (pm->pm_ctx && (pm->pm_flags & PMAP_USERCACHECLEAN) == 0) {
4817 /* process has a context, must flush cache */ 4827 /* process has a context, must flush cache */
4818 if (CACHEINFO.c_vactype != VAC_NONE) { 4828 if (CACHEINFO.c_vactype != VAC_NONE) {
4819 npg = (endva - va) >> PGSHIFT; 4829 npg = (endva - va) >> PGSHIFT;
4820 if (npg > PMAP_SFL_THRESHOLD) { 4830 if (npg > PMAP_SFL_THRESHOLD) {
4821 perpage = 0; /* flush the whole segment */ 4831 perpage = 0; /* flush the whole segment */