Thu Jan 9 20:13:54 2014 UTC ()
Coding style fixes (whitespace) - no functional changes


(palle)
diff -r1.107 -r1.108 src/sys/arch/sparc64/sparc64/cpu.c
diff -r1.285 -r1.286 src/sys/arch/sparc64/sparc64/pmap.c

cvs diff -r1.107 -r1.108 src/sys/arch/sparc64/sparc64/cpu.c (switch to unified diff)

--- src/sys/arch/sparc64/sparc64/cpu.c 2014/01/07 20:11:35 1.107
+++ src/sys/arch/sparc64/sparc64/cpu.c 2014/01/09 20:13:54 1.108
@@ -1,540 +1,540 @@ @@ -1,540 +1,540 @@
1/* $NetBSD: cpu.c,v 1.107 2014/01/07 20:11:35 palle Exp $ */ 1/* $NetBSD: cpu.c,v 1.108 2014/01/09 20:13:54 palle Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1996 4 * Copyright (c) 1996
5 * The President and Fellows of Harvard College. All rights reserved. 5 * The President and Fellows of Harvard College. All rights reserved.
6 * Copyright (c) 1992, 1993 6 * Copyright (c) 1992, 1993
7 * The Regents of the University of California. All rights reserved. 7 * The Regents of the University of California. All rights reserved.
8 * 8 *
9 * This software was developed by the Computer Systems Engineering group 9 * This software was developed by the Computer Systems Engineering group
10 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 10 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
11 * contributed to Berkeley. 11 * contributed to Berkeley.
12 * 12 *
13 * All advertising materials mentioning features or use of this software 13 * All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement: 14 * must display the following acknowledgement:
15 * This product includes software developed by Harvard University. 15 * This product includes software developed by Harvard University.
16 * This product includes software developed by the University of 16 * This product includes software developed by the University of
17 * California, Lawrence Berkeley Laboratory. 17 * California, Lawrence Berkeley Laboratory.
18 * 18 *
19 * Redistribution and use in source and binary forms, with or without 19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions 20 * modification, are permitted provided that the following conditions
21 * are met: 21 * are met:
22 * 22 *
23 * 1. Redistributions of source code must retain the above copyright 23 * 1. Redistributions of source code must retain the above copyright
24 * notice, this list of conditions and the following disclaimer. 24 * notice, this list of conditions and the following disclaimer.
25 * 2. Redistributions in binary form must reproduce the above copyright 25 * 2. Redistributions in binary form must reproduce the above copyright
26 * notice, this list of conditions and the following disclaimer in the 26 * notice, this list of conditions and the following disclaimer in the
27 * documentation and/or other materials provided with the distribution. 27 * documentation and/or other materials provided with the distribution.
28 * 3. All advertising materials mentioning features or use of this software 28 * 3. All advertising materials mentioning features or use of this software
29 * must display the following acknowledgement: 29 * must display the following acknowledgement:
30 * This product includes software developed by Aaron Brown and 30 * This product includes software developed by Aaron Brown and
31 * Harvard University. 31 * Harvard University.
32 * This product includes software developed by the University of 32 * This product includes software developed by the University of
33 * California, Berkeley and its contributors. 33 * California, Berkeley and its contributors.
34 * 4. Neither the name of the University nor the names of its contributors 34 * 4. Neither the name of the University nor the names of its contributors
35 * may be used to endorse or promote products derived from this software 35 * may be used to endorse or promote products derived from this software
36 * without specific prior written permission. 36 * without specific prior written permission.
37 * 37 *
38 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 38 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
39 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 39 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 40 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 41 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
42 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 42 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
43 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 43 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
44 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 44 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
45 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 45 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48 * SUCH DAMAGE. 48 * SUCH DAMAGE.
49 * 49 *
50 * @(#)cpu.c 8.5 (Berkeley) 11/23/93 50 * @(#)cpu.c 8.5 (Berkeley) 11/23/93
51 * 51 *
52 */ 52 */
53 53
54#include <sys/cdefs.h> 54#include <sys/cdefs.h>
55__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.107 2014/01/07 20:11:35 palle Exp $"); 55__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.108 2014/01/09 20:13:54 palle Exp $");
56 56
57#include "opt_multiprocessor.h" 57#include "opt_multiprocessor.h"
58 58
59#include <sys/param.h> 59#include <sys/param.h>
60#include <sys/systm.h> 60#include <sys/systm.h>
61#include <sys/device.h> 61#include <sys/device.h>
62#include <sys/kernel.h> 62#include <sys/kernel.h>
63#include <sys/reboot.h> 63#include <sys/reboot.h>
64 64
65#include <uvm/uvm.h> 65#include <uvm/uvm.h>
66 66
67#include <machine/autoconf.h> 67#include <machine/autoconf.h>
68#include <machine/cpu.h> 68#include <machine/cpu.h>
69#include <machine/reg.h> 69#include <machine/reg.h>
70#include <machine/trap.h> 70#include <machine/trap.h>
71#include <machine/pmap.h> 71#include <machine/pmap.h>
72#include <machine/sparc64.h> 72#include <machine/sparc64.h>
73#include <machine/openfirm.h> 73#include <machine/openfirm.h>
74 74
75#include <sparc64/sparc64/cache.h> 75#include <sparc64/sparc64/cache.h>
76#ifdef SUN4V 76#ifdef SUN4V
77#include <sparc64/hypervisor.h> 77#include <sparc64/hypervisor.h>
78#endif 78#endif
79 79
80int ecache_min_line_size; 80int ecache_min_line_size;
81 81
82/* Linked list of all CPUs in system. */ 82/* Linked list of all CPUs in system. */
83#if defined(MULTIPROCESSOR) 83#if defined(MULTIPROCESSOR)
84int sparc_ncpus = 0; 84int sparc_ncpus = 0;
85#endif 85#endif
86struct cpu_info *cpus = NULL; 86struct cpu_info *cpus = NULL;
87 87
88volatile sparc64_cpuset_t cpus_active;/* set of active cpus */ 88volatile sparc64_cpuset_t cpus_active;/* set of active cpus */
89struct cpu_bootargs *cpu_args; /* allocated very early in pmap_bootstrap. */ 89struct cpu_bootargs *cpu_args; /* allocated very early in pmap_bootstrap. */
90struct pool_cache *fpstate_cache; 90struct pool_cache *fpstate_cache;
91 91
92static struct cpu_info *alloc_cpuinfo(u_int); 92static struct cpu_info *alloc_cpuinfo(u_int);
93 93
94/* The following are used externally (sysctl_hw). */ 94/* The following are used externally (sysctl_hw). */
95char machine[] = MACHINE; /* from <machine/param.h> */ 95char machine[] = MACHINE; /* from <machine/param.h> */
96char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */ 96char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */
97char cpu_model[100]; /* machine model (primary CPU) */ 97char cpu_model[100]; /* machine model (primary CPU) */
98 98
99/* These are used in locore.s, and are maximums */ 99/* These are used in locore.s, and are maximums */
100int dcache_line_size; 100int dcache_line_size;
101int dcache_size; 101int dcache_size;
102int icache_line_size; 102int icache_line_size;
103int icache_size; 103int icache_size;
104 104
105#ifdef MULTIPROCESSOR 105#ifdef MULTIPROCESSOR
106static const char *ipi_evcnt_names[IPI_EVCNT_NUM] = IPI_EVCNT_NAMES; 106static const char *ipi_evcnt_names[IPI_EVCNT_NUM] = IPI_EVCNT_NAMES;
107#endif 107#endif
108 108
109static void cpu_reset_fpustate(void); 109static void cpu_reset_fpustate(void);
110 110
111volatile int sync_tick = 0; 111volatile int sync_tick = 0;
112 112
113/* The CPU configuration driver. */ 113/* The CPU configuration driver. */
114void cpu_attach(device_t, device_t, void *); 114void cpu_attach(device_t, device_t, void *);
115int cpu_match(device_t, cfdata_t, void *); 115int cpu_match(device_t, cfdata_t, void *);
116 116
117CFATTACH_DECL_NEW(cpu, 0, cpu_match, cpu_attach, NULL, NULL); 117CFATTACH_DECL_NEW(cpu, 0, cpu_match, cpu_attach, NULL, NULL);
118 118
119static int 119static int
120upaid_from_node(u_int cpu_node) 120upaid_from_node(u_int cpu_node)
121{ 121{
122 int portid; 122 int portid;
123 123
124 if (OF_getprop(cpu_node, "upa-portid", &portid, sizeof(portid)) <= 0 && 124 if (OF_getprop(cpu_node, "upa-portid", &portid, sizeof(portid)) <= 0 &&
125 OF_getprop(cpu_node, "portid", &portid, sizeof(portid)) <= 0) 125 OF_getprop(cpu_node, "portid", &portid, sizeof(portid)) <= 0)
126 panic("cpu node w/o upa-portid"); 126 panic("cpu node w/o upa-portid");
127 127
128 return portid; 128 return portid;
129} 129}
130 130
131struct cpu_info * 131struct cpu_info *
132alloc_cpuinfo(u_int cpu_node) 132alloc_cpuinfo(u_int cpu_node)
133{ 133{
134 paddr_t pa0, pa; 134 paddr_t pa0, pa;
135 vaddr_t va, va0; 135 vaddr_t va, va0;
136 vsize_t sz = 8 * PAGE_SIZE; 136 vsize_t sz = 8 * PAGE_SIZE;
137 int portid; 137 int portid;
138 struct cpu_info *cpi, *ci; 138 struct cpu_info *cpi, *ci;
139 extern paddr_t cpu0paddr; 139 extern paddr_t cpu0paddr;
140 140
141 /* 141 /*
142 * Check for UPAID in the cpus list. 142 * Check for UPAID in the cpus list.
143 */ 143 */
144 portid = upaid_from_node(cpu_node); 144 portid = upaid_from_node(cpu_node);
145 145
146 for (cpi = cpus; cpi != NULL; cpi = cpi->ci_next) 146 for (cpi = cpus; cpi != NULL; cpi = cpi->ci_next)
147 if (cpi->ci_cpuid == portid) 147 if (cpi->ci_cpuid == portid)
148 return cpi; 148 return cpi;
149 149
150 /* Allocate the aligned VA and determine the size. */ 150 /* Allocate the aligned VA and determine the size. */
151 va = uvm_km_alloc(kernel_map, sz, 8 * PAGE_SIZE, UVM_KMF_VAONLY); 151 va = uvm_km_alloc(kernel_map, sz, 8 * PAGE_SIZE, UVM_KMF_VAONLY);
152 if (!va) 152 if (!va)
153 panic("alloc_cpuinfo: no virtual space"); 153 panic("alloc_cpuinfo: no virtual space");
154 va0 = va; 154 va0 = va;
155 155
156 pa0 = cpu0paddr; 156 pa0 = cpu0paddr;
157 cpu0paddr += sz; 157 cpu0paddr += sz;
158 158
159 for (pa = pa0; pa < cpu0paddr; pa += PAGE_SIZE, va += PAGE_SIZE) 159 for (pa = pa0; pa < cpu0paddr; pa += PAGE_SIZE, va += PAGE_SIZE)
160 pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0); 160 pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
161 161
162 pmap_update(pmap_kernel()); 162 pmap_update(pmap_kernel());
163 163
164 cpi = (struct cpu_info *)(va0 + CPUINFO_VA - INTSTACK); 164 cpi = (struct cpu_info *)(va0 + CPUINFO_VA - INTSTACK);
165 165
166 memset((void *)va0, 0, sz); 166 memset((void *)va0, 0, sz);
167 167
168 /* 168 /*
169 * Initialize cpuinfo structure. 169 * Initialize cpuinfo structure.
170 * 170 *
171 * Arrange pcb, idle stack and interrupt stack in the same 171 * Arrange pcb, idle stack and interrupt stack in the same
172 * way as is done for the boot CPU in pmap.c. 172 * way as is done for the boot CPU in pmap.c.
173 */ 173 */
174 cpi->ci_next = NULL; 174 cpi->ci_next = NULL;
175 cpi->ci_curlwp = NULL; 175 cpi->ci_curlwp = NULL;
176 cpi->ci_cpuid = portid; 176 cpi->ci_cpuid = portid;
177 cpi->ci_fplwp = NULL; 177 cpi->ci_fplwp = NULL;
178 cpi->ci_eintstack = NULL; 178 cpi->ci_eintstack = NULL;
179 cpi->ci_spinup = NULL; 179 cpi->ci_spinup = NULL;
180 cpi->ci_paddr = pa0; 180 cpi->ci_paddr = pa0;
181 cpi->ci_self = cpi; 181 cpi->ci_self = cpi;
182#ifdef SUN4V 182#ifdef SUN4V
183 if ( CPU_ISSUN4V ) 183 if (CPU_ISSUN4V)
184 cpi->ci_mmfsa = pa0; 184 cpi->ci_mmfsa = pa0;
185#endif 185#endif
186 cpi->ci_node = cpu_node; 186 cpi->ci_node = cpu_node;
187 cpi->ci_idepth = -1; 187 cpi->ci_idepth = -1;
188 memset(cpi->ci_intrpending, -1, sizeof(cpi->ci_intrpending)); 188 memset(cpi->ci_intrpending, -1, sizeof(cpi->ci_intrpending));
189 189
190 /* 190 /*
191 * Finally, add itself to the list of active cpus. 191 * Finally, add itself to the list of active cpus.
192 */ 192 */
193 for (ci = cpus; ci->ci_next != NULL; ci = ci->ci_next) 193 for (ci = cpus; ci->ci_next != NULL; ci = ci->ci_next)
194 ; 194 ;
195#ifdef MULTIPROCESSOR 195#ifdef MULTIPROCESSOR
196 ci->ci_next = cpi; 196 ci->ci_next = cpi;
197#endif 197#endif
198 return (cpi); 198 return (cpi);
199} 199}
200 200
201int 201int
202cpu_match(device_t parent, cfdata_t cf, void *aux) 202cpu_match(device_t parent, cfdata_t cf, void *aux)
203{ 203{
204 struct mainbus_attach_args *ma = aux; 204 struct mainbus_attach_args *ma = aux;
205 205
206 if (strcmp(cf->cf_name, ma->ma_name) != 0) 206 if (strcmp(cf->cf_name, ma->ma_name) != 0)
207 return 0; 207 return 0;
208 208
209 /* 209 /*
210 * If we are going to only attach a single cpu, make sure 210 * If we are going to only attach a single cpu, make sure
211 * to pick the one we are running on right now. 211 * to pick the one we are running on right now.
212 */ 212 */
213 if (upaid_from_node(ma->ma_node) != CPU_UPAID) { 213 if (upaid_from_node(ma->ma_node) != CPU_UPAID) {
214#ifdef MULTIPROCESSOR 214#ifdef MULTIPROCESSOR
215 if (boothowto & RB_MD1) 215 if (boothowto & RB_MD1)
216#endif 216#endif
217 return 0; 217 return 0;
218 } 218 }
219 219
220 return 1; 220 return 1;
221} 221}
222 222
223static void 223static void
224cpu_reset_fpustate(void) 224cpu_reset_fpustate(void)
225{ 225{
226 struct fpstate64 *fpstate; 226 struct fpstate64 *fpstate;
227 struct fpstate64 fps[2]; 227 struct fpstate64 fps[2];
228 228
229 /* This needs to be 64-byte aligned */ 229 /* This needs to be 64-byte aligned */
230 fpstate = ALIGNFPSTATE(&fps[1]); 230 fpstate = ALIGNFPSTATE(&fps[1]);
231 231
232 /* 232 /*
233 * Get the FSR and clear any exceptions. If we do not unload 233 * Get the FSR and clear any exceptions. If we do not unload
234 * the queue here and it is left over from a previous crash, we 234 * the queue here and it is left over from a previous crash, we
235 * will panic in the first loadfpstate(), due to a sequence error, 235 * will panic in the first loadfpstate(), due to a sequence error,
236 * so we need to dump the whole state anyway. 236 * so we need to dump the whole state anyway.
237 */ 237 */
238 fpstate->fs_fsr = 7 << FSR_VER_SHIFT; /* 7 is reserved for "none" */ 238 fpstate->fs_fsr = 7 << FSR_VER_SHIFT; /* 7 is reserved for "none" */
239 savefpstate(fpstate); 239 savefpstate(fpstate);
240} 240}
241 241
242/* 242/*
243 * Attach the CPU. 243 * Attach the CPU.
244 * Discover interesting goop about the virtual address cache 244 * Discover interesting goop about the virtual address cache
245 * (slightly funny place to do it, but this is where it is to be found). 245 * (slightly funny place to do it, but this is where it is to be found).
246 */ 246 */
247void 247void
248cpu_attach(device_t parent, device_t dev, void *aux) 248cpu_attach(device_t parent, device_t dev, void *aux)
249{ 249{
250 int node; 250 int node;
251 long clk, sclk = 0; 251 long clk, sclk = 0;
252 struct mainbus_attach_args *ma = aux; 252 struct mainbus_attach_args *ma = aux;
253 struct cpu_info *ci; 253 struct cpu_info *ci;
254 const char *sep; 254 const char *sep;
255 register int i, l; 255 register int i, l;
256 int bigcache, cachesize; 256 int bigcache, cachesize;
257 char buf[100]; 257 char buf[100];
258 int totalsize = 0; 258 int totalsize = 0;
259 int linesize, dcachesize, icachesize; 259 int linesize, dcachesize, icachesize;
260 260
261 /* tell them what we have */ 261 /* tell them what we have */
262 node = ma->ma_node; 262 node = ma->ma_node;
263 263
264 /* 264 /*
265 * Allocate cpu_info structure if needed. 265 * Allocate cpu_info structure if needed.
266 */ 266 */
267 ci = alloc_cpuinfo((u_int)node); 267 ci = alloc_cpuinfo((u_int)node);
268 268
269 /* 269 /*
270 * Only do this on the boot cpu. Other cpu's call 270 * Only do this on the boot cpu. Other cpu's call
271 * cpu_reset_fpustate() from cpu_hatch() before they 271 * cpu_reset_fpustate() from cpu_hatch() before they
272 * call into the idle loop. 272 * call into the idle loop.
273 * For other cpus, we need to call mi_cpu_attach() 273 * For other cpus, we need to call mi_cpu_attach()
274 * and complete setting up cpcb. 274 * and complete setting up cpcb.
275 */ 275 */
276 if (ci->ci_flags & CPUF_PRIMARY) { 276 if (ci->ci_flags & CPUF_PRIMARY) {
277 fpstate_cache = pool_cache_init(sizeof(struct fpstate64), 277 fpstate_cache = pool_cache_init(sizeof(struct fpstate64),
278 SPARC64_BLOCK_SIZE, 0, 0, "fpstate", 278 SPARC64_BLOCK_SIZE, 0, 0, "fpstate",
279 NULL, IPL_NONE, NULL, NULL, NULL); 279 NULL, IPL_NONE, NULL, NULL, NULL);
280 cpu_reset_fpustate(); 280 cpu_reset_fpustate();
281 } 281 }
282#ifdef MULTIPROCESSOR 282#ifdef MULTIPROCESSOR
283 else { 283 else {
284 mi_cpu_attach(ci); 284 mi_cpu_attach(ci);
285 ci->ci_cpcb = lwp_getpcb(ci->ci_data.cpu_idlelwp); 285 ci->ci_cpcb = lwp_getpcb(ci->ci_data.cpu_idlelwp);
286 } 286 }
287 for (i = 0; i < IPI_EVCNT_NUM; ++i) 287 for (i = 0; i < IPI_EVCNT_NUM; ++i)
288 evcnt_attach_dynamic(&ci->ci_ipi_evcnt[i], EVCNT_TYPE_INTR, 288 evcnt_attach_dynamic(&ci->ci_ipi_evcnt[i], EVCNT_TYPE_INTR,
289 NULL, device_xname(dev), ipi_evcnt_names[i]); 289 NULL, device_xname(dev), ipi_evcnt_names[i]);
290#endif 290#endif
291 evcnt_attach_dynamic(&ci->ci_tick_evcnt, EVCNT_TYPE_INTR, NULL, 291 evcnt_attach_dynamic(&ci->ci_tick_evcnt, EVCNT_TYPE_INTR, NULL,
292 device_xname(dev), "timer"); 292 device_xname(dev), "timer");
293 mutex_init(&ci->ci_ctx_lock, MUTEX_SPIN, IPL_VM); 293 mutex_init(&ci->ci_ctx_lock, MUTEX_SPIN, IPL_VM);
294 294
295 clk = prom_getpropint(node, "clock-frequency", 0); 295 clk = prom_getpropint(node, "clock-frequency", 0);
296 if (clk == 0) { 296 if (clk == 0) {
297 /* 297 /*
298 * Try to find it in the OpenPROM root... 298 * Try to find it in the OpenPROM root...
299 */ 299 */
300 clk = prom_getpropint(findroot(), "clock-frequency", 0); 300 clk = prom_getpropint(findroot(), "clock-frequency", 0);
301 } 301 }
302 if (clk) { 302 if (clk) {
303 /* Tell OS what frequency we run on */ 303 /* Tell OS what frequency we run on */
304 ci->ci_cpu_clockrate[0] = clk; 304 ci->ci_cpu_clockrate[0] = clk;
305 ci->ci_cpu_clockrate[1] = clk / 1000000; 305 ci->ci_cpu_clockrate[1] = clk / 1000000;
306 } 306 }
307 307
308 sclk = prom_getpropint(findroot(), "stick-frequency", 0); 308 sclk = prom_getpropint(findroot(), "stick-frequency", 0);
309 309
310 ci->ci_system_clockrate[0] = sclk; 310 ci->ci_system_clockrate[0] = sclk;
311 ci->ci_system_clockrate[1] = sclk / 1000000; 311 ci->ci_system_clockrate[1] = sclk / 1000000;
312 312
313 snprintf(buf, sizeof buf, "%s @ %s MHz", 313 snprintf(buf, sizeof buf, "%s @ %s MHz",
314 prom_getpropstring(node, "name"), clockfreq(clk)); 314 prom_getpropstring(node, "name"), clockfreq(clk));
315 snprintf(cpu_model, sizeof cpu_model, "%s (%s)", machine_model, buf); 315 snprintf(cpu_model, sizeof cpu_model, "%s (%s)", machine_model, buf);
316 316
317 aprint_normal(": %s, UPA id %d\n", buf, ci->ci_cpuid); 317 aprint_normal(": %s, UPA id %d\n", buf, ci->ci_cpuid);
318 aprint_naive("\n"); 318 aprint_naive("\n");
319 319
320 if (ci->ci_system_clockrate[0] != 0) { 320 if (ci->ci_system_clockrate[0] != 0) {
321 aprint_normal_dev(dev, "system tick frequency %d MHz\n",  321 aprint_normal_dev(dev, "system tick frequency %d MHz\n",
322 (int)ci->ci_system_clockrate[1]); 322 (int)ci->ci_system_clockrate[1]);
323 } 323 }
324 aprint_normal_dev(dev, ""); 324 aprint_normal_dev(dev, "");
325 325
326 bigcache = 0; 326 bigcache = 0;
327 327
328 icachesize = prom_getpropint(node, "icache-size", 0); 328 icachesize = prom_getpropint(node, "icache-size", 0);
329 if (icachesize > icache_size) 329 if (icachesize > icache_size)
330 icache_size = icachesize; 330 icache_size = icachesize;
331 linesize = l = prom_getpropint(node, "icache-line-size", 0); 331 linesize = l = prom_getpropint(node, "icache-line-size", 0);
332 if (linesize > icache_line_size) 332 if (linesize > icache_line_size)
333 icache_line_size = linesize; 333 icache_line_size = linesize;
334 334
335 for (i = 0; (1 << i) < l && l; i++) 335 for (i = 0; (1 << i) < l && l; i++)
336 /* void */; 336 /* void */;
337 if ((1 << i) != l && l) 337 if ((1 << i) != l && l)
338 panic("bad icache line size %d", l); 338 panic("bad icache line size %d", l);
339 totalsize = icachesize; 339 totalsize = icachesize;
340 if (totalsize == 0) 340 if (totalsize == 0)
341 totalsize = l * 341 totalsize = l *
342 prom_getpropint(node, "icache-nlines", 64) * 342 prom_getpropint(node, "icache-nlines", 64) *
343 prom_getpropint(node, "icache-associativity", 1); 343 prom_getpropint(node, "icache-associativity", 1);
344 344
345 cachesize = totalsize / 345 cachesize = totalsize /
346 prom_getpropint(node, "icache-associativity", 1); 346 prom_getpropint(node, "icache-associativity", 1);
347 bigcache = cachesize; 347 bigcache = cachesize;
348 348
349 sep = ""; 349 sep = "";
350 if (totalsize > 0) { 350 if (totalsize > 0) {
351 aprint_normal("%s%ldK instruction (%ld b/l)", sep, 351 aprint_normal("%s%ldK instruction (%ld b/l)", sep,
352 (long)totalsize/1024, 352 (long)totalsize/1024,
353 (long)linesize); 353 (long)linesize);
354 sep = ", "; 354 sep = ", ";
355 } 355 }
356 356
357 dcachesize = prom_getpropint(node, "dcache-size", 0); 357 dcachesize = prom_getpropint(node, "dcache-size", 0);
358 if (dcachesize > dcache_size) 358 if (dcachesize > dcache_size)
359 dcache_size = dcachesize; 359 dcache_size = dcachesize;
360 linesize = l = prom_getpropint(node, "dcache-line-size", 0); 360 linesize = l = prom_getpropint(node, "dcache-line-size", 0);
361 if (linesize > dcache_line_size) 361 if (linesize > dcache_line_size)
362 dcache_line_size = linesize; 362 dcache_line_size = linesize;
363 363
364 for (i = 0; (1 << i) < l && l; i++) 364 for (i = 0; (1 << i) < l && l; i++)
365 /* void */; 365 /* void */;
366 if ((1 << i) != l && l) 366 if ((1 << i) != l && l)
367 panic("bad dcache line size %d", l); 367 panic("bad dcache line size %d", l);
368 totalsize = dcachesize; 368 totalsize = dcachesize;
369 if (totalsize == 0) 369 if (totalsize == 0)
370 totalsize = l * 370 totalsize = l *
371 prom_getpropint(node, "dcache-nlines", 128) * 371 prom_getpropint(node, "dcache-nlines", 128) *
372 prom_getpropint(node, "dcache-associativity", 1); 372 prom_getpropint(node, "dcache-associativity", 1);
373 373
374 cachesize = totalsize / 374 cachesize = totalsize /
375 prom_getpropint(node, "dcache-associativity", 1); 375 prom_getpropint(node, "dcache-associativity", 1);
376 if (cachesize > bigcache) 376 if (cachesize > bigcache)
377 bigcache = cachesize; 377 bigcache = cachesize;
378 378
379 if (totalsize > 0) { 379 if (totalsize > 0) {
380 aprint_normal("%s%ldK data (%ld b/l)", sep, 380 aprint_normal("%s%ldK data (%ld b/l)", sep,
381 (long)totalsize/1024, 381 (long)totalsize/1024,
382 (long)linesize); 382 (long)linesize);
383 sep = ", "; 383 sep = ", ";
384 } 384 }
385 385
386 linesize = l = 386 linesize = l =
387 prom_getpropint(node, "ecache-line-size", 0); 387 prom_getpropint(node, "ecache-line-size", 0);
388 for (i = 0; (1 << i) < l && l; i++) 388 for (i = 0; (1 << i) < l && l; i++)
389 /* void */; 389 /* void */;
390 if ((1 << i) != l && l) 390 if ((1 << i) != l && l)
391 panic("bad ecache line size %d", l); 391 panic("bad ecache line size %d", l);
392 totalsize = prom_getpropint(node, "ecache-size", 0); 392 totalsize = prom_getpropint(node, "ecache-size", 0);
393 if (totalsize == 0) 393 if (totalsize == 0)
394 totalsize = l * 394 totalsize = l *
395 prom_getpropint(node, "ecache-nlines", 32768) * 395 prom_getpropint(node, "ecache-nlines", 32768) *
396 prom_getpropint(node, "ecache-associativity", 1); 396 prom_getpropint(node, "ecache-associativity", 1);
397 397
398 cachesize = totalsize / 398 cachesize = totalsize /
399 prom_getpropint(node, "ecache-associativity", 1); 399 prom_getpropint(node, "ecache-associativity", 1);
400 if (cachesize > bigcache) 400 if (cachesize > bigcache)
401 bigcache = cachesize; 401 bigcache = cachesize;
402 402
403 if (totalsize > 0) { 403 if (totalsize > 0) {
404 aprint_normal("%s%ldK external (%ld b/l)", sep, 404 aprint_normal("%s%ldK external (%ld b/l)", sep,
405 (long)totalsize/1024, 405 (long)totalsize/1024,
406 (long)linesize); 406 (long)linesize);
407 } 407 }
408 aprint_normal("\n"); 408 aprint_normal("\n");
409 409
410 if (ecache_min_line_size == 0 || 410 if (ecache_min_line_size == 0 ||
411 linesize < ecache_min_line_size) 411 linesize < ecache_min_line_size)
412 ecache_min_line_size = linesize; 412 ecache_min_line_size = linesize;
413 413
414 /* 414 /*
415 * Now that we know the size of the largest cache on this CPU, 415 * Now that we know the size of the largest cache on this CPU,
416 * re-color our pages. 416 * re-color our pages.
417 */ 417 */
418 uvm_page_recolor(atop(bigcache)); /* XXX */ 418 uvm_page_recolor(atop(bigcache)); /* XXX */
419 419
420} 420}
421 421
422int 422int
423cpu_myid(void) 423cpu_myid(void)
424{ 424{
425 char buf[32]; 425 char buf[32];
426 int impl; 426 int impl;
427 427
428#ifdef SUN4V 428#ifdef SUN4V
429 if (CPU_ISSUN4V) { 429 if (CPU_ISSUN4V) {
430 uint64_t myid; 430 uint64_t myid;
431 hv_cpu_myid(&myid); 431 hv_cpu_myid(&myid);
432 return myid; 432 return myid;
433 } 433 }
434#endif 434#endif
435 if (OF_getprop(findroot(), "name", buf, sizeof(buf)) > 0 && 435 if (OF_getprop(findroot(), "name", buf, sizeof(buf)) > 0 &&
436 strcmp(buf, "SUNW,Ultra-Enterprise-10000") == 0) 436 strcmp(buf, "SUNW,Ultra-Enterprise-10000") == 0)
437 return lduwa(0x1fff40000d0UL, ASI_PHYS_NON_CACHED); 437 return lduwa(0x1fff40000d0UL, ASI_PHYS_NON_CACHED);
438 impl = (getver() & VER_IMPL) >> VER_IMPL_SHIFT; 438 impl = (getver() & VER_IMPL) >> VER_IMPL_SHIFT;
439 switch (impl) { 439 switch (impl) {
440 case IMPL_OLYMPUS_C: 440 case IMPL_OLYMPUS_C:
441 case IMPL_JUPITER: 441 case IMPL_JUPITER:
442 return CPU_JUPITERID; 442 return CPU_JUPITERID;
443 case IMPL_CHEETAH: 443 case IMPL_CHEETAH:
444 case IMPL_CHEETAH_PLUS: 444 case IMPL_CHEETAH_PLUS:
445 case IMPL_JAGUAR: 445 case IMPL_JAGUAR:
446 case IMPL_PANTHER: 446 case IMPL_PANTHER:
447 return CPU_FIREPLANEID; 447 return CPU_FIREPLANEID;
448 default: 448 default:
449 return CPU_UPAID; 449 return CPU_UPAID;
450 } 450 }
451} 451}
452 452
453#if defined(MULTIPROCESSOR) 453#if defined(MULTIPROCESSOR)
454vaddr_t cpu_spinup_trampoline; 454vaddr_t cpu_spinup_trampoline;
455 455
456/* 456/*
457 * Start secondary processors in motion. 457 * Start secondary processors in motion.
458 */ 458 */
459void 459void
460cpu_boot_secondary_processors(void) 460cpu_boot_secondary_processors(void)
461{ 461{
462 int i, pstate; 462 int i, pstate;
463 struct cpu_info *ci; 463 struct cpu_info *ci;
464 464
465 sync_tick = 0; 465 sync_tick = 0;
466 466
467 sparc64_ipi_init(); 467 sparc64_ipi_init();
468 468
469 if (boothowto & RB_MD1) { 469 if (boothowto & RB_MD1) {
470 cpus[0].ci_next = NULL; 470 cpus[0].ci_next = NULL;
471 sparc_ncpus = ncpu = ncpuonline = 1; 471 sparc_ncpus = ncpu = ncpuonline = 1;
472 return; 472 return;
473 } 473 }
474 474
475 for (ci = cpus; ci != NULL; ci = ci->ci_next) { 475 for (ci = cpus; ci != NULL; ci = ci->ci_next) {
476 if (ci->ci_cpuid == CPU_UPAID) 476 if (ci->ci_cpuid == CPU_UPAID)
477 continue; 477 continue;
478 478
479 cpu_pmap_prepare(ci, false); 479 cpu_pmap_prepare(ci, false);
480 cpu_args->cb_node = ci->ci_node; 480 cpu_args->cb_node = ci->ci_node;
481 cpu_args->cb_cpuinfo = ci->ci_paddr; 481 cpu_args->cb_cpuinfo = ci->ci_paddr;
482 membar_Sync(); 482 membar_Sync();
483 483
484 /* Disable interrupts and start another CPU. */ 484 /* Disable interrupts and start another CPU. */
485 pstate = getpstate(); 485 pstate = getpstate();
486 setpstate(PSTATE_KERN); 486 setpstate(PSTATE_KERN);
487 487
488 prom_startcpu(ci->ci_node, (void *)cpu_spinup_trampoline, 0); 488 prom_startcpu(ci->ci_node, (void *)cpu_spinup_trampoline, 0);
489 489
490 for (i = 0; i < 2000; i++) { 490 for (i = 0; i < 2000; i++) {
491 membar_Sync(); 491 membar_Sync();
492 if (CPUSET_HAS(cpus_active, ci->ci_index)) 492 if (CPUSET_HAS(cpus_active, ci->ci_index))
493 break; 493 break;
494 delay(10000); 494 delay(10000);
495 } 495 }
496 496
497 /* synchronize %tick ( to some degree at least ) */ 497 /* synchronize %tick ( to some degree at least ) */
498 delay(1000); 498 delay(1000);
499 sync_tick = 1; 499 sync_tick = 1;
500 membar_Sync(); 500 membar_Sync();
501 settick(0); 501 settick(0);
502 if (ci->ci_system_clockrate[0] != 0) 502 if (ci->ci_system_clockrate[0] != 0)
503 setstick(0); 503 setstick(0);
504 504
505 setpstate(pstate); 505 setpstate(pstate);
506 506
507 if (!CPUSET_HAS(cpus_active, ci->ci_index)) 507 if (!CPUSET_HAS(cpus_active, ci->ci_index))
508 printf("cpu%d: startup failed\n", ci->ci_cpuid); 508 printf("cpu%d: startup failed\n", ci->ci_cpuid);
509 } 509 }
510} 510}
511 511
512void 512void
513cpu_hatch(void) 513cpu_hatch(void)
514{ 514{
515 char *v = (char*)CPUINFO_VA; 515 char *v = (char*)CPUINFO_VA;
516 int i; 516 int i;
517 517
518 for (i = 0; i < 4*PAGE_SIZE; i += sizeof(long)) 518 for (i = 0; i < 4*PAGE_SIZE; i += sizeof(long))
519 flush(v + i); 519 flush(v + i);
520 520
521 cpu_pmap_init(curcpu()); 521 cpu_pmap_init(curcpu());
522 CPUSET_ADD(cpus_active, cpu_number()); 522 CPUSET_ADD(cpus_active, cpu_number());
523 cpu_reset_fpustate(); 523 cpu_reset_fpustate();
524 curlwp = curcpu()->ci_data.cpu_idlelwp; 524 curlwp = curcpu()->ci_data.cpu_idlelwp;
525 membar_Sync(); 525 membar_Sync();
526 526
527 /* wait for the boot CPU to flip the switch */ 527 /* wait for the boot CPU to flip the switch */
528 while (sync_tick == 0) { 528 while (sync_tick == 0) {
529 /* we do nothing here */ 529 /* we do nothing here */
530 } 530 }
531 settick(0); 531 settick(0);
532 if (curcpu()->ci_system_clockrate[0] != 0) { 532 if (curcpu()->ci_system_clockrate[0] != 0) {
533 setstick(0); 533 setstick(0);
534 stickintr_establish(PIL_CLOCK, stickintr); 534 stickintr_establish(PIL_CLOCK, stickintr);
535 } else { 535 } else {
536 tickintr_establish(PIL_CLOCK, tickintr); 536 tickintr_establish(PIL_CLOCK, tickintr);
537 } 537 }
538 spl0(); 538 spl0();
539} 539}
540#endif /* MULTIPROCESSOR */ 540#endif /* MULTIPROCESSOR */

cvs diff -r1.285 -r1.286 src/sys/arch/sparc64/sparc64/pmap.c (switch to unified diff)

--- src/sys/arch/sparc64/sparc64/pmap.c 2014/01/07 20:11:35 1.285
+++ src/sys/arch/sparc64/sparc64/pmap.c 2014/01/09 20:13:54 1.286
@@ -1,2160 +1,2160 @@ @@ -1,2160 +1,2160 @@
1/* $NetBSD: pmap.c,v 1.285 2014/01/07 20:11:35 palle Exp $ */ 1/* $NetBSD: pmap.c,v 1.286 2014/01/09 20:13:54 palle Exp $ */
2/* 2/*
3 * 3 *
4 * Copyright (C) 1996-1999 Eduardo Horvath. 4 * Copyright (C) 1996-1999 Eduardo Horvath.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions 9 * modification, are permitted provided that the following conditions
10 * are met: 10 * are met:
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE. 24 * SUCH DAMAGE.
25 * 25 *
26 */ 26 */
27 27
28#include <sys/cdefs.h> 28#include <sys/cdefs.h>
29__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.285 2014/01/07 20:11:35 palle Exp $"); 29__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.286 2014/01/09 20:13:54 palle Exp $");
30 30
31#undef NO_VCACHE /* Don't forget the locked TLB in dostart */ 31#undef NO_VCACHE /* Don't forget the locked TLB in dostart */
32#define HWREF 32#define HWREF
33 33
34#include "opt_ddb.h" 34#include "opt_ddb.h"
35#include "opt_multiprocessor.h" 35#include "opt_multiprocessor.h"
36#include "opt_modular.h" 36#include "opt_modular.h"
37 37
38#include <sys/param.h> 38#include <sys/param.h>
39#include <sys/malloc.h> 39#include <sys/malloc.h>
40#include <sys/queue.h> 40#include <sys/queue.h>
41#include <sys/systm.h> 41#include <sys/systm.h>
42#include <sys/msgbuf.h> 42#include <sys/msgbuf.h>
43#include <sys/pool.h> 43#include <sys/pool.h>
44#include <sys/exec.h> 44#include <sys/exec.h>
45#include <sys/core.h> 45#include <sys/core.h>
46#include <sys/kcore.h> 46#include <sys/kcore.h>
47#include <sys/proc.h> 47#include <sys/proc.h>
48#include <sys/atomic.h> 48#include <sys/atomic.h>
49#include <sys/cpu.h> 49#include <sys/cpu.h>
50 50
51#include <sys/exec_aout.h> /* for MID_* */ 51#include <sys/exec_aout.h> /* for MID_* */
52 52
53#include <uvm/uvm.h> 53#include <uvm/uvm.h>
54 54
55#include <machine/pcb.h> 55#include <machine/pcb.h>
56#include <machine/sparc64.h> 56#include <machine/sparc64.h>
57#include <machine/ctlreg.h> 57#include <machine/ctlreg.h>
58#include <machine/promlib.h> 58#include <machine/promlib.h>
59#include <machine/kcore.h> 59#include <machine/kcore.h>
60#include <machine/bootinfo.h> 60#include <machine/bootinfo.h>
61 61
62#include <sparc64/sparc64/cache.h> 62#include <sparc64/sparc64/cache.h>
63#ifdef SUN4V 63#ifdef SUN4V
64#include <sparc64/hypervisor.h> 64#include <sparc64/hypervisor.h>
65#endif 65#endif
66 66
67#ifdef DDB 67#ifdef DDB
68#include <machine/db_machdep.h> 68#include <machine/db_machdep.h>
69#include <ddb/db_command.h> 69#include <ddb/db_command.h>
70#include <ddb/db_sym.h> 70#include <ddb/db_sym.h>
71#include <ddb/db_variables.h> 71#include <ddb/db_variables.h>
72#include <ddb/db_extern.h> 72#include <ddb/db_extern.h>
73#include <ddb/db_access.h> 73#include <ddb/db_access.h>
74#include <ddb/db_output.h> 74#include <ddb/db_output.h>
75#else 75#else
76#define Debugger() 76#define Debugger()
77#define db_printf printf 77#define db_printf printf
78#endif 78#endif
79 79
80#define MEG (1<<20) /* 1MB */ 80#define MEG (1<<20) /* 1MB */
81#define KB (1<<10) /* 1KB */ 81#define KB (1<<10) /* 1KB */
82 82
83paddr_t cpu0paddr; /* contigious phys memory preallocated for cpus */ 83paddr_t cpu0paddr; /* contigious phys memory preallocated for cpus */
84 84
85/* These routines are in assembly to allow access thru physical mappings */ 85/* These routines are in assembly to allow access thru physical mappings */
86extern int64_t pseg_get_real(struct pmap *, vaddr_t); 86extern int64_t pseg_get_real(struct pmap *, vaddr_t);
87extern int pseg_set_real(struct pmap *, vaddr_t, int64_t, paddr_t); 87extern int pseg_set_real(struct pmap *, vaddr_t, int64_t, paddr_t);
88 88
89/* 89/*
90 * Diatribe on ref/mod counting: 90 * Diatribe on ref/mod counting:
91 * 91 *
92 * First of all, ref/mod info must be non-volatile. Hence we need to keep it 92 * First of all, ref/mod info must be non-volatile. Hence we need to keep it
93 * in the pv_entry structure for each page. (We could bypass this for the 93 * in the pv_entry structure for each page. (We could bypass this for the
94 * vm_page, but that's a long story....) 94 * vm_page, but that's a long story....)
95 * 95 *
96 * This architecture has nice, fast traps with lots of space for software bits 96 * This architecture has nice, fast traps with lots of space for software bits
97 * in the TTE. To accelerate ref/mod counts we make use of these features. 97 * in the TTE. To accelerate ref/mod counts we make use of these features.
98 * 98 *
99 * When we map a page initially, we place a TTE in the page table. It's 99 * When we map a page initially, we place a TTE in the page table. It's
100 * inserted with the TLB_W and TLB_ACCESS bits cleared. If a page is really 100 * inserted with the TLB_W and TLB_ACCESS bits cleared. If a page is really
101 * writable we set the TLB_REAL_W bit for the trap handler. 101 * writable we set the TLB_REAL_W bit for the trap handler.
102 * 102 *
103 * Whenever we take a TLB miss trap, the trap handler will set the TLB_ACCESS 103 * Whenever we take a TLB miss trap, the trap handler will set the TLB_ACCESS
104 * bit in the approprate TTE in the page table. Whenever we take a protection 104 * bit in the approprate TTE in the page table. Whenever we take a protection
105 * fault, if the TLB_REAL_W bit is set then we flip both the TLB_W and TLB_MOD 105 * fault, if the TLB_REAL_W bit is set then we flip both the TLB_W and TLB_MOD
106 * bits to enable writing and mark the page as modified. 106 * bits to enable writing and mark the page as modified.
107 * 107 *
108 * This means that we may have ref/mod information all over the place. The 108 * This means that we may have ref/mod information all over the place. The
109 * pmap routines must traverse the page tables of all pmaps with a given page 109 * pmap routines must traverse the page tables of all pmaps with a given page
110 * and collect/clear all the ref/mod information and copy it into the pv_entry. 110 * and collect/clear all the ref/mod information and copy it into the pv_entry.
111 */ 111 */
112 112
113#ifdef NO_VCACHE 113#ifdef NO_VCACHE
114#define FORCE_ALIAS 1 114#define FORCE_ALIAS 1
115#else 115#else
116#define FORCE_ALIAS 0 116#define FORCE_ALIAS 0
117#endif 117#endif
118 118
119#define PV_ALIAS 0x1LL 119#define PV_ALIAS 0x1LL
120#define PV_REF 0x2LL 120#define PV_REF 0x2LL
121#define PV_MOD 0x4LL 121#define PV_MOD 0x4LL
122#define PV_NVC 0x8LL 122#define PV_NVC 0x8LL
123#define PV_NC 0x10LL 123#define PV_NC 0x10LL
124#define PV_WE 0x20LL /* Debug -- this page was writable somtime */ 124#define PV_WE 0x20LL /* Debug -- this page was writable somtime */
125#define PV_MASK (0x03fLL) 125#define PV_MASK (0x03fLL)
126#define PV_VAMASK (~(PAGE_SIZE - 1)) 126#define PV_VAMASK (~(PAGE_SIZE - 1))
127#define PV_MATCH(pv,va) (!(((pv)->pv_va ^ (va)) & PV_VAMASK)) 127#define PV_MATCH(pv,va) (!(((pv)->pv_va ^ (va)) & PV_VAMASK))
128#define PV_SETVA(pv,va) ((pv)->pv_va = (((va) & PV_VAMASK) | \ 128#define PV_SETVA(pv,va) ((pv)->pv_va = (((va) & PV_VAMASK) | \
129 (((pv)->pv_va) & PV_MASK))) 129 (((pv)->pv_va) & PV_MASK)))
130 130
131struct pool_cache pmap_cache; 131struct pool_cache pmap_cache;
132struct pool_cache pmap_pv_cache; 132struct pool_cache pmap_pv_cache;
133 133
134pv_entry_t pmap_remove_pv(struct pmap *, vaddr_t, struct vm_page *); 134pv_entry_t pmap_remove_pv(struct pmap *, vaddr_t, struct vm_page *);
135void pmap_enter_pv(struct pmap *, vaddr_t, paddr_t, struct vm_page *, 135void pmap_enter_pv(struct pmap *, vaddr_t, paddr_t, struct vm_page *,
136 pv_entry_t); 136 pv_entry_t);
137void pmap_page_cache(struct pmap *, paddr_t, int); 137void pmap_page_cache(struct pmap *, paddr_t, int);
138 138
139/* 139/*
140 * First and last managed physical addresses. 140 * First and last managed physical addresses.
141 * XXX only used for dumping the system. 141 * XXX only used for dumping the system.
142 */ 142 */
143paddr_t vm_first_phys, vm_num_phys; 143paddr_t vm_first_phys, vm_num_phys;
144 144
145/* 145/*
146 * Here's the CPU TSB stuff. It's allocated in pmap_bootstrap. 146 * Here's the CPU TSB stuff. It's allocated in pmap_bootstrap.
147 */ 147 */
148int tsbsize; /* tsbents = 512 * 2^^tsbsize */ 148int tsbsize; /* tsbents = 512 * 2^^tsbsize */
149#define TSBENTS (512<<tsbsize) 149#define TSBENTS (512<<tsbsize)
150#define TSBSIZE (TSBENTS * 16) 150#define TSBSIZE (TSBENTS * 16)
151 151
152#ifdef SUN4V 152#ifdef SUN4V
153struct tsb_desc *tsb_desc; 153struct tsb_desc *tsb_desc;
154#endif 154#endif
155 155
156static struct pmap kernel_pmap_; 156static struct pmap kernel_pmap_;
157struct pmap *const kernel_pmap_ptr = &kernel_pmap_; 157struct pmap *const kernel_pmap_ptr = &kernel_pmap_;
158 158
159static int ctx_alloc(struct pmap *); 159static int ctx_alloc(struct pmap *);
160static bool pmap_is_referenced_locked(struct vm_page *); 160static bool pmap_is_referenced_locked(struct vm_page *);
161 161
162static void ctx_free(struct pmap *, struct cpu_info *); 162static void ctx_free(struct pmap *, struct cpu_info *);
163 163
164/* 164/*
165 * Check if any MMU has a non-zero context 165 * Check if any MMU has a non-zero context
166 */ 166 */
167static inline bool 167static inline bool
168pmap_has_ctx(struct pmap *p) 168pmap_has_ctx(struct pmap *p)
169{ 169{
170 int i; 170 int i;
171 171
172 /* any context on any cpu? */ 172 /* any context on any cpu? */
173 for (i = 0; i < sparc_ncpus; i++) 173 for (i = 0; i < sparc_ncpus; i++)
174 if (p->pm_ctx[i] > 0) 174 if (p->pm_ctx[i] > 0)
175 return true; 175 return true;
176 176
177 return false;  177 return false;
178} 178}
179 179
180#ifdef MULTIPROCESSOR 180#ifdef MULTIPROCESSOR
181#define pmap_ctx(PM) ((PM)->pm_ctx[cpu_number()]) 181#define pmap_ctx(PM) ((PM)->pm_ctx[cpu_number()])
182#else 182#else
183#define pmap_ctx(PM) ((PM)->pm_ctx[0]) 183#define pmap_ctx(PM) ((PM)->pm_ctx[0])
184#endif 184#endif
185 185
186/* 186/*
187 * Check if this pmap has a live mapping on some MMU. 187 * Check if this pmap has a live mapping on some MMU.
188 */ 188 */
189static inline bool 189static inline bool
190pmap_is_on_mmu(struct pmap *p) 190pmap_is_on_mmu(struct pmap *p)
191{ 191{
192 /* The kernel pmap is always on all MMUs */ 192 /* The kernel pmap is always on all MMUs */
193 if (p == pmap_kernel()) 193 if (p == pmap_kernel())
194 return true; 194 return true;
195 195
196 return pmap_has_ctx(p); 196 return pmap_has_ctx(p);
197} 197}
198 198
199/* 199/*
200 * Virtual and physical addresses of the start and end of kernel text 200 * Virtual and physical addresses of the start and end of kernel text
201 * and data segments. 201 * and data segments.
202 */ 202 */
203vaddr_t ktext; 203vaddr_t ktext;
204paddr_t ktextp; 204paddr_t ktextp;
205vaddr_t ektext; 205vaddr_t ektext;
206paddr_t ektextp; 206paddr_t ektextp;
207vaddr_t kdata; 207vaddr_t kdata;
208paddr_t kdatap; 208paddr_t kdatap;
209vaddr_t ekdata; 209vaddr_t ekdata;
210paddr_t ekdatap; 210paddr_t ekdatap;
211 211
212/* 212/*
213 * Kernel 4MB pages. 213 * Kernel 4MB pages.
214 */ 214 */
215extern struct tlb_entry *kernel_tlbs; 215extern struct tlb_entry *kernel_tlbs;
216extern int kernel_tlb_slots; 216extern int kernel_tlb_slots;
217 217
218static int npgs; 218static int npgs;
219 219
220vaddr_t vmmap; /* one reserved MI vpage for /dev/mem */ 220vaddr_t vmmap; /* one reserved MI vpage for /dev/mem */
221 221
222int phys_installed_size; /* Installed physical memory */ 222int phys_installed_size; /* Installed physical memory */
223struct mem_region *phys_installed; 223struct mem_region *phys_installed;
224 224
225paddr_t avail_start, avail_end; /* These are used by ps & family */ 225paddr_t avail_start, avail_end; /* These are used by ps & family */
226 226
227static int ptelookup_va(vaddr_t va); 227static int ptelookup_va(vaddr_t va);
228 228
229static inline void 229static inline void
230clrx(void *addr) 230clrx(void *addr)
231{ 231{
232 __asm volatile("clrx [%0]" : : "r" (addr) : "memory"); 232 __asm volatile("clrx [%0]" : : "r" (addr) : "memory");
233} 233}
234 234
235static void 235static void
236tsb_invalidate(vaddr_t va, pmap_t pm) 236tsb_invalidate(vaddr_t va, pmap_t pm)
237{ 237{
238 struct cpu_info *ci; 238 struct cpu_info *ci;
239 int ctx; 239 int ctx;
240 bool kpm = (pm == pmap_kernel()); 240 bool kpm = (pm == pmap_kernel());
241 int i; 241 int i;
242 int64_t tag; 242 int64_t tag;
243 243
244 i = ptelookup_va(va); 244 i = ptelookup_va(va);
245#ifdef MULTIPROCESSOR 245#ifdef MULTIPROCESSOR
246 for (ci = cpus; ci != NULL; ci = ci->ci_next) { 246 for (ci = cpus; ci != NULL; ci = ci->ci_next) {
247 if (!CPUSET_HAS(cpus_active, ci->ci_index)) 247 if (!CPUSET_HAS(cpus_active, ci->ci_index))
248 continue; 248 continue;
249#else 249#else
250 ci = curcpu(); 250 ci = curcpu();
251#endif 251#endif
252 ctx = pm->pm_ctx[ci->ci_index]; 252 ctx = pm->pm_ctx[ci->ci_index];
253 if (kpm || ctx > 0) { 253 if (kpm || ctx > 0) {
254 tag = TSB_TAG(0, ctx, va); 254 tag = TSB_TAG(0, ctx, va);
255 if (ci->ci_tsb_dmmu[i].tag == tag) { 255 if (ci->ci_tsb_dmmu[i].tag == tag) {
256 clrx(&ci->ci_tsb_dmmu[i].data); 256 clrx(&ci->ci_tsb_dmmu[i].data);
257 } 257 }
258 if (ci->ci_tsb_immu[i].tag == tag) { 258 if (ci->ci_tsb_immu[i].tag == tag) {
259 clrx(&ci->ci_tsb_immu[i].data); 259 clrx(&ci->ci_tsb_immu[i].data);
260 } 260 }
261 } 261 }
262#ifdef MULTIPROCESSOR 262#ifdef MULTIPROCESSOR
263 } 263 }
264#endif 264#endif
265} 265}
266 266
267struct prom_map *prom_map; 267struct prom_map *prom_map;
268int prom_map_size; 268int prom_map_size;
269 269
270#define PDB_CREATE 0x000001 270#define PDB_CREATE 0x000001
271#define PDB_DESTROY 0x000002 271#define PDB_DESTROY 0x000002
272#define PDB_REMOVE 0x000004 272#define PDB_REMOVE 0x000004
273#define PDB_CHANGEPROT 0x000008 273#define PDB_CHANGEPROT 0x000008
274#define PDB_ENTER 0x000010 274#define PDB_ENTER 0x000010
275#define PDB_DEMAP 0x000020 /* used in locore */ 275#define PDB_DEMAP 0x000020 /* used in locore */
276#define PDB_REF 0x000040 276#define PDB_REF 0x000040
277#define PDB_COPY 0x000080 277#define PDB_COPY 0x000080
278#define PDB_MMU_ALLOC 0x000100 278#define PDB_MMU_ALLOC 0x000100
279#define PDB_MMU_STEAL 0x000200 279#define PDB_MMU_STEAL 0x000200
280#define PDB_CTX_ALLOC 0x000400 280#define PDB_CTX_ALLOC 0x000400
281#define PDB_CTX_STEAL 0x000800 281#define PDB_CTX_STEAL 0x000800
282#define PDB_MMUREG_ALLOC 0x001000 282#define PDB_MMUREG_ALLOC 0x001000
283#define PDB_MMUREG_STEAL 0x002000 283#define PDB_MMUREG_STEAL 0x002000
284#define PDB_CACHESTUFF 0x004000 284#define PDB_CACHESTUFF 0x004000
285#define PDB_ALIAS 0x008000 285#define PDB_ALIAS 0x008000
286#define PDB_EXTRACT 0x010000 286#define PDB_EXTRACT 0x010000
287#define PDB_BOOT 0x020000 287#define PDB_BOOT 0x020000
288#define PDB_BOOT1 0x040000 288#define PDB_BOOT1 0x040000
289#define PDB_GROW 0x080000 289#define PDB_GROW 0x080000
290#define PDB_CTX_FLUSHALL 0x100000 290#define PDB_CTX_FLUSHALL 0x100000
291#define PDB_ACTIVATE 0x200000 291#define PDB_ACTIVATE 0x200000
292 292
293#if defined(DEBUG) && !defined(PMAP_DEBUG) 293#if defined(DEBUG) && !defined(PMAP_DEBUG)
294#define PMAP_DEBUG 294#define PMAP_DEBUG
295#endif 295#endif
296 296
297#ifdef PMAP_DEBUG 297#ifdef PMAP_DEBUG
298struct { 298struct {
299 int kernel; /* entering kernel mapping */ 299 int kernel; /* entering kernel mapping */
300 int user; /* entering user mapping */ 300 int user; /* entering user mapping */
301 int ptpneeded; /* needed to allocate a PT page */ 301 int ptpneeded; /* needed to allocate a PT page */
302 int pwchange; /* no mapping change, just wiring or protection */ 302 int pwchange; /* no mapping change, just wiring or protection */
303 int wchange; /* no mapping change, just wiring */ 303 int wchange; /* no mapping change, just wiring */
304 int mchange; /* was mapped but mapping to different page */ 304 int mchange; /* was mapped but mapping to different page */
305 int managed; /* a managed page */ 305 int managed; /* a managed page */
306 int firstpv; /* first mapping for this PA */ 306 int firstpv; /* first mapping for this PA */
307 int secondpv; /* second mapping for this PA */ 307 int secondpv; /* second mapping for this PA */
308 int ci; /* cache inhibited */ 308 int ci; /* cache inhibited */
309 int unmanaged; /* not a managed page */ 309 int unmanaged; /* not a managed page */
310 int flushes; /* cache flushes */ 310 int flushes; /* cache flushes */
311 int cachehit; /* new entry forced valid entry out */ 311 int cachehit; /* new entry forced valid entry out */
312} enter_stats; 312} enter_stats;
313struct { 313struct {
314 int calls; 314 int calls;
315 int removes; 315 int removes;
316 int flushes; 316 int flushes;
317 int tflushes; /* TLB flushes */ 317 int tflushes; /* TLB flushes */
318 int pidflushes; /* HW pid stolen */ 318 int pidflushes; /* HW pid stolen */
319 int pvfirst; 319 int pvfirst;
320 int pvsearch; 320 int pvsearch;
321} remove_stats; 321} remove_stats;
322#define ENTER_STAT(x) do { enter_stats.x ++; } while (0) 322#define ENTER_STAT(x) do { enter_stats.x ++; } while (0)
323#define REMOVE_STAT(x) do { remove_stats.x ++; } while (0) 323#define REMOVE_STAT(x) do { remove_stats.x ++; } while (0)
324 324
325int pmapdebug = 0; 325int pmapdebug = 0;
326//int pmapdebug = 0 | PDB_CTX_ALLOC | PDB_ACTIVATE; 326//int pmapdebug = 0 | PDB_CTX_ALLOC | PDB_ACTIVATE;
327/* Number of H/W pages stolen for page tables */ 327/* Number of H/W pages stolen for page tables */
328int pmap_pages_stolen = 0; 328int pmap_pages_stolen = 0;
329 329
330#define BDPRINTF(n, f) if (pmapdebug & (n)) prom_printf f 330#define BDPRINTF(n, f) if (pmapdebug & (n)) prom_printf f
331#define DPRINTF(n, f) if (pmapdebug & (n)) printf f 331#define DPRINTF(n, f) if (pmapdebug & (n)) printf f
332#else 332#else
333#define ENTER_STAT(x) do { /* nothing */ } while (0) 333#define ENTER_STAT(x) do { /* nothing */ } while (0)
334#define REMOVE_STAT(x) do { /* nothing */ } while (0) 334#define REMOVE_STAT(x) do { /* nothing */ } while (0)
335#define BDPRINTF(n, f) 335#define BDPRINTF(n, f)
336#define DPRINTF(n, f) 336#define DPRINTF(n, f)
337#define pmapdebug 0 337#define pmapdebug 0
338#endif 338#endif
339 339
340#define pv_check() 340#define pv_check()
341 341
342static int pmap_get_page(paddr_t *); 342static int pmap_get_page(paddr_t *);
343static void pmap_free_page(paddr_t, sparc64_cpuset_t); 343static void pmap_free_page(paddr_t, sparc64_cpuset_t);
344static void pmap_free_page_noflush(paddr_t); 344static void pmap_free_page_noflush(paddr_t);
345 345
346/* 346/*
347 * Global pmap locks. 347 * Global pmap locks.
348 */ 348 */
349static kmutex_t pmap_lock; 349static kmutex_t pmap_lock;
350static bool lock_available = false; 350static bool lock_available = false;
351 351
352/* 352/*
353 * Support for big page sizes. This maps the page size to the 353 * Support for big page sizes. This maps the page size to the
354 * page bits. That is: these are the bits between 8K pages and 354 * page bits. That is: these are the bits between 8K pages and
355 * larger page sizes that cause aliasing. 355 * larger page sizes that cause aliasing.
356 */ 356 */
357#define PSMAP_ENTRY(MASK, CODE) { .mask = MASK, .code = CODE } 357#define PSMAP_ENTRY(MASK, CODE) { .mask = MASK, .code = CODE }
358struct page_size_map page_size_map[] = { 358struct page_size_map page_size_map[] = {
359#ifdef DEBUG 359#ifdef DEBUG
360 PSMAP_ENTRY(0, PGSZ_8K & 0), /* Disable large pages */ 360 PSMAP_ENTRY(0, PGSZ_8K & 0), /* Disable large pages */
361#endif 361#endif
362 PSMAP_ENTRY((4 * 1024 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_4M), 362 PSMAP_ENTRY((4 * 1024 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_4M),
363 PSMAP_ENTRY((512 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_512K), 363 PSMAP_ENTRY((512 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_512K),
364 PSMAP_ENTRY((64 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_64K), 364 PSMAP_ENTRY((64 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_64K),
365 PSMAP_ENTRY((8 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_8K), 365 PSMAP_ENTRY((8 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_8K),
366 PSMAP_ENTRY(0, 0), 366 PSMAP_ENTRY(0, 0),
367}; 367};
368 368
369/* 369/*
370 * This probably shouldn't be necessary, but it stops USIII machines from 370 * This probably shouldn't be necessary, but it stops USIII machines from
371 * breaking in general, and not just for MULTIPROCESSOR. 371 * breaking in general, and not just for MULTIPROCESSOR.
372 */ 372 */
373#define USE_LOCKSAFE_PSEG_GETSET 373#define USE_LOCKSAFE_PSEG_GETSET
374#if defined(USE_LOCKSAFE_PSEG_GETSET) 374#if defined(USE_LOCKSAFE_PSEG_GETSET)
375 375
376static kmutex_t pseg_lock; 376static kmutex_t pseg_lock;
377 377
378static __inline__ int64_t 378static __inline__ int64_t
379pseg_get_locksafe(struct pmap *pm, vaddr_t va) 379pseg_get_locksafe(struct pmap *pm, vaddr_t va)
380{ 380{
381 int64_t rv; 381 int64_t rv;
382 bool took_lock = lock_available /*&& pm == pmap_kernel()*/; 382 bool took_lock = lock_available /*&& pm == pmap_kernel()*/;
383 383
384 if (__predict_true(took_lock)) 384 if (__predict_true(took_lock))
385 mutex_enter(&pseg_lock); 385 mutex_enter(&pseg_lock);
386 rv = pseg_get_real(pm, va); 386 rv = pseg_get_real(pm, va);
387 if (__predict_true(took_lock)) 387 if (__predict_true(took_lock))
388 mutex_exit(&pseg_lock); 388 mutex_exit(&pseg_lock);
389 return rv; 389 return rv;
390} 390}
391 391
392static __inline__ int 392static __inline__ int
393pseg_set_locksafe(struct pmap *pm, vaddr_t va, int64_t data, paddr_t ptp) 393pseg_set_locksafe(struct pmap *pm, vaddr_t va, int64_t data, paddr_t ptp)
394{ 394{
395 int rv; 395 int rv;
396 bool took_lock = lock_available /*&& pm == pmap_kernel()*/; 396 bool took_lock = lock_available /*&& pm == pmap_kernel()*/;
397 397
398 if (__predict_true(took_lock)) 398 if (__predict_true(took_lock))
399 mutex_enter(&pseg_lock); 399 mutex_enter(&pseg_lock);
400 rv = pseg_set_real(pm, va, data, ptp); 400 rv = pseg_set_real(pm, va, data, ptp);
401 if (__predict_true(took_lock)) 401 if (__predict_true(took_lock))
402 mutex_exit(&pseg_lock); 402 mutex_exit(&pseg_lock);
403 return rv; 403 return rv;
404} 404}
405 405
406#define pseg_get(pm, va) pseg_get_locksafe(pm, va) 406#define pseg_get(pm, va) pseg_get_locksafe(pm, va)
407#define pseg_set(pm, va, data, ptp) pseg_set_locksafe(pm, va, data, ptp) 407#define pseg_set(pm, va, data, ptp) pseg_set_locksafe(pm, va, data, ptp)
408 408
409#else /* USE_LOCKSAFE_PSEG_GETSET */ 409#else /* USE_LOCKSAFE_PSEG_GETSET */
410 410
411#define pseg_get(pm, va) pseg_get_real(pm, va) 411#define pseg_get(pm, va) pseg_get_real(pm, va)
412#define pseg_set(pm, va, data, ptp) pseg_set_real(pm, va, data, ptp) 412#define pseg_set(pm, va, data, ptp) pseg_set_real(pm, va, data, ptp)
413 413
414#endif /* USE_LOCKSAFE_PSEG_GETSET */ 414#endif /* USE_LOCKSAFE_PSEG_GETSET */
415 415
416/* 416/*
417 * Enter a TTE into the kernel pmap only. Don't do anything else. 417 * Enter a TTE into the kernel pmap only. Don't do anything else.
418 * 418 *
419 * Use only during bootstrapping since it does no locking and 419 * Use only during bootstrapping since it does no locking and
420 * can lose ref/mod info!!!! 420 * can lose ref/mod info!!!!
421 * 421 *
422 */ 422 */
423static void pmap_enter_kpage(vaddr_t va, int64_t data) 423static void pmap_enter_kpage(vaddr_t va, int64_t data)
424{ 424{
425 paddr_t newp; 425 paddr_t newp;
426 426
427 newp = 0UL; 427 newp = 0UL;
428 while (pseg_set(pmap_kernel(), va, data, newp) & 1) { 428 while (pseg_set(pmap_kernel(), va, data, newp) & 1) {
429 if (!pmap_get_page(&newp)) { 429 if (!pmap_get_page(&newp)) {
430 prom_printf("pmap_enter_kpage: out of pages\n"); 430 prom_printf("pmap_enter_kpage: out of pages\n");
431 panic("pmap_enter_kpage"); 431 panic("pmap_enter_kpage");
432 } 432 }
433 433
434 ENTER_STAT(ptpneeded); 434 ENTER_STAT(ptpneeded);
435 BDPRINTF(PDB_BOOT1, 435 BDPRINTF(PDB_BOOT1,
436 ("pseg_set: pm=%p va=%p data=%lx newp %lx\n", 436 ("pseg_set: pm=%p va=%p data=%lx newp %lx\n",
437 pmap_kernel(), va, (long)data, (long)newp)); 437 pmap_kernel(), va, (long)data, (long)newp));
438 if (pmapdebug & PDB_BOOT1) 438 if (pmapdebug & PDB_BOOT1)
439 {int i; for (i=0; i<140000000; i++) ;} 439 {int i; for (i=0; i<140000000; i++) ;}
440 } 440 }
441} 441}
442 442
443/* 443/*
444 * Check the bootargs to see if we need to enable bootdebug. 444 * Check the bootargs to see if we need to enable bootdebug.
445 */ 445 */
446#ifdef DEBUG 446#ifdef DEBUG
447static void pmap_bootdebug(void) 447static void pmap_bootdebug(void)
448{ 448{
449 const char *cp = prom_getbootargs(); 449 const char *cp = prom_getbootargs();
450 450
451 for (;;) 451 for (;;)
452 switch (*++cp) { 452 switch (*++cp) {
453 case '\0': 453 case '\0':
454 return; 454 return;
455 case 'V': 455 case 'V':
456 pmapdebug |= PDB_BOOT|PDB_BOOT1; 456 pmapdebug |= PDB_BOOT|PDB_BOOT1;
457 break; 457 break;
458 case 'D': 458 case 'D':
459 pmapdebug |= PDB_BOOT1; 459 pmapdebug |= PDB_BOOT1;
460 break; 460 break;
461 } 461 }
462} 462}
463#else 463#else
464#define pmap_bootdebug() /* nothing */ 464#define pmap_bootdebug() /* nothing */
465#endif 465#endif
466 466
467 467
468/* 468/*
469 * Calculate the correct number of page colors to use. This should be the 469 * Calculate the correct number of page colors to use. This should be the
470 * size of the E$/PAGE_SIZE. However, different CPUs can have different sized 470 * size of the E$/PAGE_SIZE. However, different CPUs can have different sized
471 * E$, so we need to take the GCM of the E$ size. 471 * E$, so we need to take the GCM of the E$ size.
472 */ 472 */
473static int pmap_calculate_colors(void) 473static int pmap_calculate_colors(void)
474{ 474{
475 int node; 475 int node;
476 int size, assoc, color, maxcolor = 1; 476 int size, assoc, color, maxcolor = 1;
477 477
478 for (node = prom_firstchild(prom_findroot()); node != 0; 478 for (node = prom_firstchild(prom_findroot()); node != 0;
479 node = prom_nextsibling(node)) { 479 node = prom_nextsibling(node)) {
480 char *name = prom_getpropstring(node, "device_type"); 480 char *name = prom_getpropstring(node, "device_type");
481 if (strcmp("cpu", name) != 0) 481 if (strcmp("cpu", name) != 0)
482 continue; 482 continue;
483 483
484 /* Found a CPU, get the E$ info. */ 484 /* Found a CPU, get the E$ info. */
485 size = prom_getpropint(node, "ecache-size", -1); 485 size = prom_getpropint(node, "ecache-size", -1);
486 if (size == -1) { 486 if (size == -1) {
487 prom_printf("pmap_calculate_colors: node %x has " 487 prom_printf("pmap_calculate_colors: node %x has "
488 "no ecache-size\n", node); 488 "no ecache-size\n", node);
489 /* If we can't get the E$ size, skip the node */ 489 /* If we can't get the E$ size, skip the node */
490 continue; 490 continue;
491 } 491 }
492 492
493 assoc = prom_getpropint(node, "ecache-associativity", 1); 493 assoc = prom_getpropint(node, "ecache-associativity", 1);
494 color = size/assoc/PAGE_SIZE; 494 color = size/assoc/PAGE_SIZE;
495 if (color > maxcolor) 495 if (color > maxcolor)
496 maxcolor = color; 496 maxcolor = color;
497 } 497 }
498 return (maxcolor); 498 return (maxcolor);
499} 499}
500 500
501static void pmap_alloc_bootargs(void) 501static void pmap_alloc_bootargs(void)
502{ 502{
503 char *v; 503 char *v;
504 504
505 v = OF_claim(NULL, 2*PAGE_SIZE, PAGE_SIZE); 505 v = OF_claim(NULL, 2*PAGE_SIZE, PAGE_SIZE);
506 if ((v == NULL) || (v == (void*)-1)) 506 if ((v == NULL) || (v == (void*)-1))
507 panic("Can't claim two pages of memory."); 507 panic("Can't claim two pages of memory.");
508 508
509 memset(v, 0, 2*PAGE_SIZE); 509 memset(v, 0, 2*PAGE_SIZE);
510 510
511 cpu_args = (struct cpu_bootargs*)v; 511 cpu_args = (struct cpu_bootargs*)v;
512} 512}
513 513
514#if defined(MULTIPROCESSOR) 514#if defined(MULTIPROCESSOR)
515static void pmap_mp_init(void); 515static void pmap_mp_init(void);
516 516
517static void 517static void
518pmap_mp_init(void) 518pmap_mp_init(void)
519{ 519{
520 pte_t *tp; 520 pte_t *tp;
521 char *v; 521 char *v;
522 int i; 522 int i;
523 523
524 extern void cpu_mp_startup(void); 524 extern void cpu_mp_startup(void);
525 525
526 if ((v = OF_claim(NULL, PAGE_SIZE, PAGE_SIZE)) == NULL) { 526 if ((v = OF_claim(NULL, PAGE_SIZE, PAGE_SIZE)) == NULL) {
527 panic("pmap_mp_init: Cannot claim a page."); 527 panic("pmap_mp_init: Cannot claim a page.");
528 } 528 }
529 529
530 memcpy(v, mp_tramp_code, mp_tramp_code_len); 530 memcpy(v, mp_tramp_code, mp_tramp_code_len);
531 *(u_long *)(v + mp_tramp_tlb_slots) = kernel_tlb_slots; 531 *(u_long *)(v + mp_tramp_tlb_slots) = kernel_tlb_slots;
532 *(u_long *)(v + mp_tramp_func) = (u_long)cpu_mp_startup; 532 *(u_long *)(v + mp_tramp_func) = (u_long)cpu_mp_startup;
533 *(u_long *)(v + mp_tramp_ci) = (u_long)cpu_args; 533 *(u_long *)(v + mp_tramp_ci) = (u_long)cpu_args;
534 tp = (pte_t *)(v + mp_tramp_code_len); 534 tp = (pte_t *)(v + mp_tramp_code_len);
535 for (i = 0; i < kernel_tlb_slots; i++) { 535 for (i = 0; i < kernel_tlb_slots; i++) {
536 tp[i].tag = kernel_tlbs[i].te_va; 536 tp[i].tag = kernel_tlbs[i].te_va;
537 tp[i].data = TSB_DATA(0, /* g */ 537 tp[i].data = TSB_DATA(0, /* g */
538 PGSZ_4M, /* sz */ 538 PGSZ_4M, /* sz */
539 kernel_tlbs[i].te_pa, /* pa */ 539 kernel_tlbs[i].te_pa, /* pa */
540 1, /* priv */ 540 1, /* priv */
541 1, /* write */ 541 1, /* write */
542 1, /* cache */ 542 1, /* cache */
543 1, /* aliased */ 543 1, /* aliased */
544 1, /* valid */ 544 1, /* valid */
545 0 /* ie */); 545 0 /* ie */);
546 tp[i].data |= TLB_L | TLB_CV; 546 tp[i].data |= TLB_L | TLB_CV;
547 DPRINTF(PDB_BOOT1, ("xtlb[%d]: Tag: %" PRIx64 " Data: %" 547 DPRINTF(PDB_BOOT1, ("xtlb[%d]: Tag: %" PRIx64 " Data: %"
548 PRIx64 "\n", i, tp[i].tag, tp[i].data)); 548 PRIx64 "\n", i, tp[i].tag, tp[i].data));
549 } 549 }
550 550
551 for (i = 0; i < PAGE_SIZE; i += sizeof(long)) 551 for (i = 0; i < PAGE_SIZE; i += sizeof(long))
552 flush(v + i); 552 flush(v + i);
553 553
554 cpu_spinup_trampoline = (vaddr_t)v; 554 cpu_spinup_trampoline = (vaddr_t)v;
555} 555}
556#else 556#else
557#define pmap_mp_init() ((void)0) 557#define pmap_mp_init() ((void)0)
558#endif 558#endif
559 559
560paddr_t pmap_kextract(vaddr_t va); 560paddr_t pmap_kextract(vaddr_t va);
561 561
562paddr_t 562paddr_t
563pmap_kextract(vaddr_t va) 563pmap_kextract(vaddr_t va)
564{ 564{
565 int i; 565 int i;
566 paddr_t paddr = (paddr_t)-1; 566 paddr_t paddr = (paddr_t)-1;
567 567
568 for (i = 0; i < kernel_tlb_slots; i++) { 568 for (i = 0; i < kernel_tlb_slots; i++) {
569 if ((va & ~PAGE_MASK_4M) == kernel_tlbs[i].te_va) { 569 if ((va & ~PAGE_MASK_4M) == kernel_tlbs[i].te_va) {
570 paddr = kernel_tlbs[i].te_pa + 570 paddr = kernel_tlbs[i].te_pa +
571 (paddr_t)(va & PAGE_MASK_4M); 571 (paddr_t)(va & PAGE_MASK_4M);
572 break; 572 break;
573 } 573 }
574 } 574 }
575 575
576 if (i == kernel_tlb_slots) { 576 if (i == kernel_tlb_slots) {
577 panic("pmap_kextract: Address %p is not from kernel space.\n" 577 panic("pmap_kextract: Address %p is not from kernel space.\n"
578 "Data segment is too small?\n", (void*)va); 578 "Data segment is too small?\n", (void*)va);
579 } 579 }
580 580
581 return (paddr); 581 return (paddr);
582} 582}
583 583
584/* 584/*
585 * Bootstrap kernel allocator, allocates from unused space in 4MB kernel 585 * Bootstrap kernel allocator, allocates from unused space in 4MB kernel
586 * data segment meaning that 586 * data segment meaning that
587 * 587 *
588 * - Access to allocated memory will never generate a trap 588 * - Access to allocated memory will never generate a trap
589 * - Allocated chunks are never reclaimed or freed 589 * - Allocated chunks are never reclaimed or freed
590 * - Allocation calls do not change PROM memlists 590 * - Allocation calls do not change PROM memlists
591 */ 591 */
592static struct mem_region kdata_mem_pool; 592static struct mem_region kdata_mem_pool;
593 593
594static void 594static void
595kdata_alloc_init(vaddr_t va_start, vaddr_t va_end) 595kdata_alloc_init(vaddr_t va_start, vaddr_t va_end)
596{ 596{
597 vsize_t va_size = va_end - va_start; 597 vsize_t va_size = va_end - va_start;
598 598
599 kdata_mem_pool.start = va_start; 599 kdata_mem_pool.start = va_start;
600 kdata_mem_pool.size = va_size; 600 kdata_mem_pool.size = va_size;
601 601
602 BDPRINTF(PDB_BOOT, ("kdata_alloc_init(): %d bytes @%p.\n", va_size, 602 BDPRINTF(PDB_BOOT, ("kdata_alloc_init(): %d bytes @%p.\n", va_size,
603 va_start)); 603 va_start));
604} 604}
605 605
606static vaddr_t 606static vaddr_t
607kdata_alloc(vsize_t size, vsize_t align) 607kdata_alloc(vsize_t size, vsize_t align)
608{ 608{
609 vaddr_t va; 609 vaddr_t va;
610 vsize_t asize; 610 vsize_t asize;
611 611
612 asize = roundup(kdata_mem_pool.start, align) - kdata_mem_pool.start; 612 asize = roundup(kdata_mem_pool.start, align) - kdata_mem_pool.start;
613 613
614 kdata_mem_pool.start += asize; 614 kdata_mem_pool.start += asize;
615 kdata_mem_pool.size -= asize; 615 kdata_mem_pool.size -= asize;
616 616
617 if (kdata_mem_pool.size < size) { 617 if (kdata_mem_pool.size < size) {
618 panic("kdata_alloc(): Data segment is too small.\n"); 618 panic("kdata_alloc(): Data segment is too small.\n");
619 } 619 }
620 620
621 va = kdata_mem_pool.start; 621 va = kdata_mem_pool.start;
622 kdata_mem_pool.start += size; 622 kdata_mem_pool.start += size;
623 kdata_mem_pool.size -= size; 623 kdata_mem_pool.size -= size;
624 624
625 BDPRINTF(PDB_BOOT, ("kdata_alloc(): Allocated %d@%p, %d free.\n", 625 BDPRINTF(PDB_BOOT, ("kdata_alloc(): Allocated %d@%p, %d free.\n",
626 size, (void*)va, kdata_mem_pool.size)); 626 size, (void*)va, kdata_mem_pool.size));
627 627
628 return (va); 628 return (va);
629} 629}
630 630
631/* 631/*
632 * Unified routine for reading PROM properties. 632 * Unified routine for reading PROM properties.
633 */ 633 */
634static void 634static void
635pmap_read_memlist(const char *device, const char *property, void **ml, 635pmap_read_memlist(const char *device, const char *property, void **ml,
636 int *ml_size, vaddr_t (* ml_alloc)(vsize_t, vsize_t)) 636 int *ml_size, vaddr_t (* ml_alloc)(vsize_t, vsize_t))
637{ 637{
638 void *va; 638 void *va;
639 int size, handle; 639 int size, handle;
640 640
641 if ( (handle = prom_finddevice(device)) == 0) { 641 if ( (handle = prom_finddevice(device)) == 0) {
642 prom_printf("pmap_read_memlist(): No %s device found.\n", 642 prom_printf("pmap_read_memlist(): No %s device found.\n",
643 device); 643 device);
644 prom_halt(); 644 prom_halt();
645 } 645 }
646 if ( (size = OF_getproplen(handle, property)) < 0) { 646 if ( (size = OF_getproplen(handle, property)) < 0) {
647 prom_printf("pmap_read_memlist(): %s/%s has no length.\n", 647 prom_printf("pmap_read_memlist(): %s/%s has no length.\n",
648 device, property); 648 device, property);
649 prom_halt(); 649 prom_halt();
650 } 650 }
651 if ( (va = (void*)(* ml_alloc)(size, sizeof(uint64_t))) == NULL) { 651 if ( (va = (void*)(* ml_alloc)(size, sizeof(uint64_t))) == NULL) {
652 prom_printf("pmap_read_memlist(): Cannot allocate memlist.\n"); 652 prom_printf("pmap_read_memlist(): Cannot allocate memlist.\n");
653 prom_halt(); 653 prom_halt();
654 } 654 }
655 if (OF_getprop(handle, property, va, size) <= 0) { 655 if (OF_getprop(handle, property, va, size) <= 0) {
656 prom_printf("pmap_read_memlist(): Cannot read %s/%s.\n", 656 prom_printf("pmap_read_memlist(): Cannot read %s/%s.\n",
657 device, property); 657 device, property);
658 prom_halt(); 658 prom_halt();
659 } 659 }
660 660
661 *ml = va; 661 *ml = va;
662 *ml_size = size; 662 *ml_size = size;
663} 663}
664 664
665/* 665/*
666 * This is called during bootstrap, before the system is really initialized. 666 * This is called during bootstrap, before the system is really initialized.
667 * 667 *
668 * It's called with the start and end virtual addresses of the kernel. We 668 * It's called with the start and end virtual addresses of the kernel. We
669 * bootstrap the pmap allocator now. We will allocate the basic structures we 669 * bootstrap the pmap allocator now. We will allocate the basic structures we
670 * need to bootstrap the VM system here: the page frame tables, the TSB, and 670 * need to bootstrap the VM system here: the page frame tables, the TSB, and
671 * the free memory lists. 671 * the free memory lists.
672 * 672 *
673 * Now all this is becoming a bit obsolete. maxctx is still important, but by 673 * Now all this is becoming a bit obsolete. maxctx is still important, but by
674 * separating the kernel text and data segments we really would need to 674 * separating the kernel text and data segments we really would need to
675 * provide the start and end of each segment. But we can't. The rodata 675 * provide the start and end of each segment. But we can't. The rodata
676 * segment is attached to the end of the kernel segment and has nothing to 676 * segment is attached to the end of the kernel segment and has nothing to
677 * delimit its end. We could still pass in the beginning of the kernel and 677 * delimit its end. We could still pass in the beginning of the kernel and
678 * the beginning and end of the data segment but we could also just as easily 678 * the beginning and end of the data segment but we could also just as easily
679 * calculate that all in here. 679 * calculate that all in here.
680 * 680 *
681 * To handle the kernel text, we need to do a reverse mapping of the start of 681 * To handle the kernel text, we need to do a reverse mapping of the start of
682 * the kernel, then traverse the free memory lists to find out how big it is. 682 * the kernel, then traverse the free memory lists to find out how big it is.
683 */ 683 */
684 684
685void 685void
686pmap_bootstrap(u_long kernelstart, u_long kernelend) 686pmap_bootstrap(u_long kernelstart, u_long kernelend)
687{ 687{
688#ifdef MODULAR 688#ifdef MODULAR
689 extern vaddr_t module_start, module_end; 689 extern vaddr_t module_start, module_end;
690#endif 690#endif
691 extern char etext[], data_start[]; /* start of data segment */ 691 extern char etext[], data_start[]; /* start of data segment */
692 extern int msgbufmapped; 692 extern int msgbufmapped;
693 struct mem_region *mp, *mp1, *avail, *orig; 693 struct mem_region *mp, *mp1, *avail, *orig;
694 int i, j, pcnt, msgbufsiz; 694 int i, j, pcnt, msgbufsiz;
695 size_t s, sz; 695 size_t s, sz;
696 int64_t data; 696 int64_t data;
697 vaddr_t va, intstk; 697 vaddr_t va, intstk;
698 uint64_t phys_msgbuf; 698 uint64_t phys_msgbuf;
699 paddr_t newp = 0; 699 paddr_t newp = 0;
700 700
701 void *prom_memlist; 701 void *prom_memlist;
702 int prom_memlist_size; 702 int prom_memlist_size;
703 703
704 BDPRINTF(PDB_BOOT, ("Entered pmap_bootstrap.\n")); 704 BDPRINTF(PDB_BOOT, ("Entered pmap_bootstrap.\n"));
705 705
706 cache_setup_funcs(); 706 cache_setup_funcs();
707 707
708 /* 708 /*
709 * Calculate kernel size. 709 * Calculate kernel size.
710 */ 710 */
711 ktext = kernelstart; 711 ktext = kernelstart;
712 ktextp = pmap_kextract(ktext); 712 ktextp = pmap_kextract(ktext);
713 ektext = roundup((vaddr_t)etext, PAGE_SIZE_4M); 713 ektext = roundup((vaddr_t)etext, PAGE_SIZE_4M);
714 ektextp = roundup(pmap_kextract((vaddr_t)etext), PAGE_SIZE_4M); 714 ektextp = roundup(pmap_kextract((vaddr_t)etext), PAGE_SIZE_4M);
715 715
716 kdata = (vaddr_t)data_start; 716 kdata = (vaddr_t)data_start;
717 kdatap = pmap_kextract(kdata); 717 kdatap = pmap_kextract(kdata);
718 ekdata = roundup(kernelend, PAGE_SIZE_4M); 718 ekdata = roundup(kernelend, PAGE_SIZE_4M);
719 ekdatap = roundup(pmap_kextract(kernelend), PAGE_SIZE_4M); 719 ekdatap = roundup(pmap_kextract(kernelend), PAGE_SIZE_4M);
720 720
721 BDPRINTF(PDB_BOOT, ("Virtual layout: text %lx-%lx, data %lx-%lx.\n", 721 BDPRINTF(PDB_BOOT, ("Virtual layout: text %lx-%lx, data %lx-%lx.\n",
722 ktext, ektext, kdata, ekdata)); 722 ktext, ektext, kdata, ekdata));
723 BDPRINTF(PDB_BOOT, ("Physical layout: text %lx-%lx, data %lx-%lx.\n", 723 BDPRINTF(PDB_BOOT, ("Physical layout: text %lx-%lx, data %lx-%lx.\n",
724 ktextp, ektextp, kdatap, ekdatap)); 724 ktextp, ektextp, kdatap, ekdatap));
725 725
726 /* Initialize bootstrap allocator. */ 726 /* Initialize bootstrap allocator. */
727 kdata_alloc_init(kernelend + 1 * 1024 * 1024, ekdata); 727 kdata_alloc_init(kernelend + 1 * 1024 * 1024, ekdata);
728 728
729 pmap_bootdebug(); 729 pmap_bootdebug();
730 pmap_alloc_bootargs(); 730 pmap_alloc_bootargs();
731 pmap_mp_init(); 731 pmap_mp_init();
732 732
733 /* 733 /*
734 * set machine page size 734 * set machine page size
735 */ 735 */
736 uvmexp.pagesize = NBPG; 736 uvmexp.pagesize = NBPG;
737 uvmexp.ncolors = pmap_calculate_colors(); 737 uvmexp.ncolors = pmap_calculate_colors();
738 uvm_setpagesize(); 738 uvm_setpagesize();
739 739
740 /* 740 /*
741 * Get hold or the message buffer. 741 * Get hold or the message buffer.
742 */ 742 */
743 msgbufp = (struct kern_msgbuf *)(vaddr_t)MSGBUF_VA; 743 msgbufp = (struct kern_msgbuf *)(vaddr_t)MSGBUF_VA;
744/* XXXXX -- increase msgbufsiz for uvmhist printing */ 744/* XXXXX -- increase msgbufsiz for uvmhist printing */
745 msgbufsiz = 4*PAGE_SIZE /* round_page(sizeof(struct msgbuf)) */; 745 msgbufsiz = 4*PAGE_SIZE /* round_page(sizeof(struct msgbuf)) */;
746 BDPRINTF(PDB_BOOT, ("Trying to allocate msgbuf at %lx, size %lx\n", 746 BDPRINTF(PDB_BOOT, ("Trying to allocate msgbuf at %lx, size %lx\n",
747 (long)msgbufp, (long)msgbufsiz)); 747 (long)msgbufp, (long)msgbufsiz));
748 if ((long)msgbufp != 748 if ((long)msgbufp !=
749 (long)(phys_msgbuf = prom_claim_virt((vaddr_t)msgbufp, msgbufsiz))) 749 (long)(phys_msgbuf = prom_claim_virt((vaddr_t)msgbufp, msgbufsiz)))
750 prom_printf( 750 prom_printf(
751 "cannot get msgbuf VA, msgbufp=%p, phys_msgbuf=%lx\n", 751 "cannot get msgbuf VA, msgbufp=%p, phys_msgbuf=%lx\n",
752 (void *)msgbufp, (long)phys_msgbuf); 752 (void *)msgbufp, (long)phys_msgbuf);
753 phys_msgbuf = prom_get_msgbuf(msgbufsiz, MMU_PAGE_ALIGN); 753 phys_msgbuf = prom_get_msgbuf(msgbufsiz, MMU_PAGE_ALIGN);
754 BDPRINTF(PDB_BOOT, 754 BDPRINTF(PDB_BOOT,
755 ("We should have the memory at %lx, let's map it in\n", 755 ("We should have the memory at %lx, let's map it in\n",
756 phys_msgbuf)); 756 phys_msgbuf));
757 if (prom_map_phys(phys_msgbuf, msgbufsiz, (vaddr_t)msgbufp, 757 if (prom_map_phys(phys_msgbuf, msgbufsiz, (vaddr_t)msgbufp,
758 -1/* sunos does this */) == -1) { 758 -1/* sunos does this */) == -1) {
759 prom_printf("Failed to map msgbuf\n"); 759 prom_printf("Failed to map msgbuf\n");
760 } else { 760 } else {
761 BDPRINTF(PDB_BOOT, ("msgbuf mapped at %p\n", 761 BDPRINTF(PDB_BOOT, ("msgbuf mapped at %p\n",
762 (void *)msgbufp)); 762 (void *)msgbufp));
763 } 763 }
764 msgbufmapped = 1; /* enable message buffer */ 764 msgbufmapped = 1; /* enable message buffer */
765 initmsgbuf((void *)msgbufp, msgbufsiz); 765 initmsgbuf((void *)msgbufp, msgbufsiz);
766 766
767 /* 767 /*
768 * Find out how much RAM we have installed. 768 * Find out how much RAM we have installed.
769 */ 769 */
770 BDPRINTF(PDB_BOOT, ("pmap_bootstrap: getting phys installed\n")); 770 BDPRINTF(PDB_BOOT, ("pmap_bootstrap: getting phys installed\n"));
771 pmap_read_memlist("/memory", "reg", &prom_memlist, &prom_memlist_size, 771 pmap_read_memlist("/memory", "reg", &prom_memlist, &prom_memlist_size,
772 kdata_alloc); 772 kdata_alloc);
773 phys_installed = prom_memlist; 773 phys_installed = prom_memlist;
774 phys_installed_size = prom_memlist_size / sizeof(*phys_installed); 774 phys_installed_size = prom_memlist_size / sizeof(*phys_installed);
775 775
776 if (pmapdebug & PDB_BOOT1) { 776 if (pmapdebug & PDB_BOOT1) {
777 /* print out mem list */ 777 /* print out mem list */
778 prom_printf("Installed physical memory:\n"); 778 prom_printf("Installed physical memory:\n");
779 for (i = 0; i < phys_installed_size; i++) { 779 for (i = 0; i < phys_installed_size; i++) {
780 prom_printf("memlist start %lx size %lx\n", 780 prom_printf("memlist start %lx size %lx\n",
781 (u_long)phys_installed[i].start, 781 (u_long)phys_installed[i].start,
782 (u_long)phys_installed[i].size); 782 (u_long)phys_installed[i].size);
783 } 783 }
784 } 784 }
785 785
786 BDPRINTF(PDB_BOOT1, ("Calculating physmem:")); 786 BDPRINTF(PDB_BOOT1, ("Calculating physmem:"));
787 for (i = 0; i < phys_installed_size; i++) 787 for (i = 0; i < phys_installed_size; i++)
788 physmem += btoc(phys_installed[i].size); 788 physmem += btoc(phys_installed[i].size);
789 BDPRINTF(PDB_BOOT1, (" result %x or %d pages\n", 789 BDPRINTF(PDB_BOOT1, (" result %x or %d pages\n",
790 (int)physmem, (int)physmem)); 790 (int)physmem, (int)physmem));
791 791
792 /* 792 /*
793 * Calculate approx TSB size. This probably needs tweaking. 793 * Calculate approx TSB size. This probably needs tweaking.
794 */ 794 */
795 if (physmem < btoc(64 * 1024 * 1024)) 795 if (physmem < btoc(64 * 1024 * 1024))
796 tsbsize = 0; 796 tsbsize = 0;
797 else if (physmem < btoc(512 * 1024 * 1024)) 797 else if (physmem < btoc(512 * 1024 * 1024))
798 tsbsize = 1; 798 tsbsize = 1;
799 else 799 else
800 tsbsize = 2; 800 tsbsize = 2;
801 801
802 /* 802 /*
803 * Save the prom translations 803 * Save the prom translations
804 */ 804 */
805 pmap_read_memlist("/virtual-memory", "translations", &prom_memlist, 805 pmap_read_memlist("/virtual-memory", "translations", &prom_memlist,
806 &prom_memlist_size, kdata_alloc); 806 &prom_memlist_size, kdata_alloc);
807 prom_map = prom_memlist; 807 prom_map = prom_memlist;
808 prom_map_size = prom_memlist_size / sizeof(struct prom_map); 808 prom_map_size = prom_memlist_size / sizeof(struct prom_map);
809 809
810 if (pmapdebug & PDB_BOOT) { 810 if (pmapdebug & PDB_BOOT) {
811 /* print out mem list */ 811 /* print out mem list */
812 prom_printf("Prom xlations:\n"); 812 prom_printf("Prom xlations:\n");
813 for (i = 0; i < prom_map_size; i++) { 813 for (i = 0; i < prom_map_size; i++) {
814 prom_printf("start %016lx size %016lx tte %016lx\n", 814 prom_printf("start %016lx size %016lx tte %016lx\n",
815 (u_long)prom_map[i].vstart, 815 (u_long)prom_map[i].vstart,
816 (u_long)prom_map[i].vsize, 816 (u_long)prom_map[i].vsize,
817 (u_long)prom_map[i].tte); 817 (u_long)prom_map[i].tte);
818 } 818 }
819 prom_printf("End of prom xlations\n"); 819 prom_printf("End of prom xlations\n");
820 } 820 }
821 821
822 /* 822 /*
823 * Here's a quick in-lined reverse bubble sort. It gets rid of 823 * Here's a quick in-lined reverse bubble sort. It gets rid of
824 * any translations inside the kernel data VA range. 824 * any translations inside the kernel data VA range.
825 */ 825 */
826 for (i = 0; i < prom_map_size; i++) { 826 for (i = 0; i < prom_map_size; i++) {
827 for (j = i; j < prom_map_size; j++) { 827 for (j = i; j < prom_map_size; j++) {
828 if (prom_map[j].vstart > prom_map[i].vstart) { 828 if (prom_map[j].vstart > prom_map[i].vstart) {
829 struct prom_map tmp; 829 struct prom_map tmp;
830 830
831 tmp = prom_map[i]; 831 tmp = prom_map[i];
832 prom_map[i] = prom_map[j]; 832 prom_map[i] = prom_map[j];
833 prom_map[j] = tmp; 833 prom_map[j] = tmp;
834 } 834 }
835 } 835 }
836 } 836 }
837 if (pmapdebug & PDB_BOOT) { 837 if (pmapdebug & PDB_BOOT) {
838 /* print out mem list */ 838 /* print out mem list */
839 prom_printf("Prom xlations:\n"); 839 prom_printf("Prom xlations:\n");
840 for (i = 0; i < prom_map_size; i++) { 840 for (i = 0; i < prom_map_size; i++) {
841 prom_printf("start %016lx size %016lx tte %016lx\n", 841 prom_printf("start %016lx size %016lx tte %016lx\n",
842 (u_long)prom_map[i].vstart, 842 (u_long)prom_map[i].vstart,
843 (u_long)prom_map[i].vsize, 843 (u_long)prom_map[i].vsize,
844 (u_long)prom_map[i].tte); 844 (u_long)prom_map[i].tte);
845 } 845 }
846 prom_printf("End of prom xlations\n"); 846 prom_printf("End of prom xlations\n");
847 } 847 }
848 848
849 /* 849 /*
850 * Allocate a ncpu*64KB page for the cpu_info & stack structure now. 850 * Allocate a ncpu*64KB page for the cpu_info & stack structure now.
851 */ 851 */
852 cpu0paddr = prom_alloc_phys(8 * PAGE_SIZE * sparc_ncpus, 8 * PAGE_SIZE); 852 cpu0paddr = prom_alloc_phys(8 * PAGE_SIZE * sparc_ncpus, 8 * PAGE_SIZE);
853 if (cpu0paddr == 0) { 853 if (cpu0paddr == 0) {
854 prom_printf("Cannot allocate cpu_infos\n"); 854 prom_printf("Cannot allocate cpu_infos\n");
855 prom_halt(); 855 prom_halt();
856 } 856 }
857 857
858 /* 858 /*
859 * Now the kernel text segment is in its final location we can try to 859 * Now the kernel text segment is in its final location we can try to
860 * find out how much memory really is free. 860 * find out how much memory really is free.
861 */ 861 */
862 pmap_read_memlist("/memory", "available", &prom_memlist, 862 pmap_read_memlist("/memory", "available", &prom_memlist,
863 &prom_memlist_size, kdata_alloc); 863 &prom_memlist_size, kdata_alloc);
864 orig = prom_memlist; 864 orig = prom_memlist;
865 sz = prom_memlist_size; 865 sz = prom_memlist_size;
866 pcnt = prom_memlist_size / sizeof(*orig); 866 pcnt = prom_memlist_size / sizeof(*orig);
867 867
868 BDPRINTF(PDB_BOOT1, ("Available physical memory:\n")); 868 BDPRINTF(PDB_BOOT1, ("Available physical memory:\n"));
869 avail = (struct mem_region*)kdata_alloc(sz, sizeof(uint64_t)); 869 avail = (struct mem_region*)kdata_alloc(sz, sizeof(uint64_t));
870 for (i = 0; i < pcnt; i++) { 870 for (i = 0; i < pcnt; i++) {
871 avail[i] = orig[i]; 871 avail[i] = orig[i];
872 BDPRINTF(PDB_BOOT1, ("memlist start %lx size %lx\n", 872 BDPRINTF(PDB_BOOT1, ("memlist start %lx size %lx\n",
873 (u_long)orig[i].start, 873 (u_long)orig[i].start,
874 (u_long)orig[i].size)); 874 (u_long)orig[i].size));
875 } 875 }
876 BDPRINTF(PDB_BOOT1, ("End of available physical memory\n")); 876 BDPRINTF(PDB_BOOT1, ("End of available physical memory\n"));
877 877
878 BDPRINTF(PDB_BOOT, ("ktext %08lx[%08lx] - %08lx[%08lx] : " 878 BDPRINTF(PDB_BOOT, ("ktext %08lx[%08lx] - %08lx[%08lx] : "
879 "kdata %08lx[%08lx] - %08lx[%08lx]\n", 879 "kdata %08lx[%08lx] - %08lx[%08lx]\n",
880 (u_long)ktext, (u_long)ktextp, 880 (u_long)ktext, (u_long)ktextp,
881 (u_long)ektext, (u_long)ektextp, 881 (u_long)ektext, (u_long)ektextp,
882 (u_long)kdata, (u_long)kdatap, 882 (u_long)kdata, (u_long)kdatap,
883 (u_long)ekdata, (u_long)ekdatap)); 883 (u_long)ekdata, (u_long)ekdatap));
884 if (pmapdebug & PDB_BOOT1) { 884 if (pmapdebug & PDB_BOOT1) {
885 /* print out mem list */ 885 /* print out mem list */
886 prom_printf("Available %lx physical memory before cleanup:\n", 886 prom_printf("Available %lx physical memory before cleanup:\n",
887 (u_long)avail); 887 (u_long)avail);
888 for (i = 0; i < pcnt; i++) { 888 for (i = 0; i < pcnt; i++) {
889 prom_printf("memlist start %lx size %lx\n", 889 prom_printf("memlist start %lx size %lx\n",
890 (u_long)avail[i].start, 890 (u_long)avail[i].start,
891 (u_long)avail[i].size); 891 (u_long)avail[i].size);
892 } 892 }
893 prom_printf("End of available physical memory before cleanup\n"); 893 prom_printf("End of available physical memory before cleanup\n");
894 prom_printf("kernel physical text size %08lx - %08lx\n", 894 prom_printf("kernel physical text size %08lx - %08lx\n",
895 (u_long)ktextp, (u_long)ektextp); 895 (u_long)ktextp, (u_long)ektextp);
896 prom_printf("kernel physical data size %08lx - %08lx\n", 896 prom_printf("kernel physical data size %08lx - %08lx\n",
897 (u_long)kdatap, (u_long)ekdatap); 897 (u_long)kdatap, (u_long)ekdatap);
898 } 898 }
899 899
900 /* 900 /*
901 * Here's a another quick in-lined bubble sort. 901 * Here's a another quick in-lined bubble sort.
902 */ 902 */
903 for (i = 0; i < pcnt; i++) { 903 for (i = 0; i < pcnt; i++) {
904 for (j = i; j < pcnt; j++) { 904 for (j = i; j < pcnt; j++) {
905 if (avail[j].start < avail[i].start) { 905 if (avail[j].start < avail[i].start) {
906 struct mem_region tmp; 906 struct mem_region tmp;
907 tmp = avail[i]; 907 tmp = avail[i];
908 avail[i] = avail[j]; 908 avail[i] = avail[j];
909 avail[j] = tmp; 909 avail[j] = tmp;
910 } 910 }
911 } 911 }
912 } 912 }
913 913
914 /* Throw away page zero if we have it. */ 914 /* Throw away page zero if we have it. */
915 if (avail->start == 0) { 915 if (avail->start == 0) {
916 avail->start += PAGE_SIZE; 916 avail->start += PAGE_SIZE;
917 avail->size -= PAGE_SIZE; 917 avail->size -= PAGE_SIZE;
918 } 918 }
919 919
920 /* 920 /*
921 * Now we need to remove the area we valloc'ed from the available 921 * Now we need to remove the area we valloc'ed from the available
922 * memory lists. (NB: we may have already alloc'ed the entire space). 922 * memory lists. (NB: we may have already alloc'ed the entire space).
923 */ 923 */
924 npgs = 0; 924 npgs = 0;
925 for (mp = avail, i = 0; i < pcnt; i++, mp = &avail[i]) { 925 for (mp = avail, i = 0; i < pcnt; i++, mp = &avail[i]) {
926 /* 926 /*
927 * Now page align the start of the region. 927 * Now page align the start of the region.
928 */ 928 */
929 s = mp->start % PAGE_SIZE; 929 s = mp->start % PAGE_SIZE;
930 if (mp->size >= s) { 930 if (mp->size >= s) {
931 mp->size -= s; 931 mp->size -= s;
932 mp->start += s; 932 mp->start += s;
933 } 933 }
934 /* 934 /*
935 * And now align the size of the region. 935 * And now align the size of the region.
936 */ 936 */
937 mp->size -= mp->size % PAGE_SIZE; 937 mp->size -= mp->size % PAGE_SIZE;
938 /* 938 /*
939 * Check whether some memory is left here. 939 * Check whether some memory is left here.
940 */ 940 */
941 if (mp->size == 0) { 941 if (mp->size == 0) {
942 memcpy(mp, mp + 1, 942 memcpy(mp, mp + 1,
943 (pcnt - (mp - avail)) * sizeof *mp); 943 (pcnt - (mp - avail)) * sizeof *mp);
944 pcnt--; 944 pcnt--;
945 mp--; 945 mp--;
946 continue; 946 continue;
947 } 947 }
948 s = mp->start; 948 s = mp->start;
949 sz = mp->size; 949 sz = mp->size;
950 npgs += btoc(sz); 950 npgs += btoc(sz);
951 for (mp1 = avail; mp1 < mp; mp1++) 951 for (mp1 = avail; mp1 < mp; mp1++)
952 if (s < mp1->start) 952 if (s < mp1->start)
953 break; 953 break;
954 if (mp1 < mp) { 954 if (mp1 < mp) {
955 memcpy(mp1 + 1, mp1, (char *)mp - (char *)mp1); 955 memcpy(mp1 + 1, mp1, (char *)mp - (char *)mp1);
956 mp1->start = s; 956 mp1->start = s;
957 mp1->size = sz; 957 mp1->size = sz;
958 } 958 }
959#ifdef DEBUG 959#ifdef DEBUG
960/* Clear all memory we give to the VM system. I want to make sure 960/* Clear all memory we give to the VM system. I want to make sure
961 * the PROM isn't using it for something, so this should break the PROM. 961 * the PROM isn't using it for something, so this should break the PROM.
962 */ 962 */
963 963
964/* Calling pmap_zero_page() at this point also hangs some machines 964/* Calling pmap_zero_page() at this point also hangs some machines
965 * so don't do it at all. -- pk 26/02/2002 965 * so don't do it at all. -- pk 26/02/2002
966 */ 966 */
967#if 0 967#if 0
968 { 968 {
969 paddr_t p; 969 paddr_t p;
970 for (p = mp->start; p < mp->start+mp->size; 970 for (p = mp->start; p < mp->start+mp->size;
971 p += PAGE_SIZE) 971 p += PAGE_SIZE)
972 pmap_zero_page(p); 972 pmap_zero_page(p);
973 } 973 }
974#endif 974#endif
975#endif /* DEBUG */ 975#endif /* DEBUG */
976 /* 976 /*
977 * In future we should be able to specify both allocated 977 * In future we should be able to specify both allocated
978 * and free. 978 * and free.
979 */ 979 */
980 BDPRINTF(PDB_BOOT1, ("uvm_page_physload(%lx, %lx)\n", 980 BDPRINTF(PDB_BOOT1, ("uvm_page_physload(%lx, %lx)\n",
981 (long)mp->start, 981 (long)mp->start,
982 (long)(mp->start + mp->size))); 982 (long)(mp->start + mp->size)));
983 uvm_page_physload( 983 uvm_page_physload(
984 atop(mp->start), 984 atop(mp->start),
985 atop(mp->start+mp->size), 985 atop(mp->start+mp->size),
986 atop(mp->start), 986 atop(mp->start),
987 atop(mp->start+mp->size), 987 atop(mp->start+mp->size),
988 VM_FREELIST_DEFAULT); 988 VM_FREELIST_DEFAULT);
989 } 989 }
990 990
991 if (pmapdebug & PDB_BOOT) { 991 if (pmapdebug & PDB_BOOT) {
992 /* print out mem list */ 992 /* print out mem list */
993 prom_printf("Available physical memory after cleanup:\n"); 993 prom_printf("Available physical memory after cleanup:\n");
994 for (i = 0; i < pcnt; i++) { 994 for (i = 0; i < pcnt; i++) {
995 prom_printf("avail start %lx size %lx\n", 995 prom_printf("avail start %lx size %lx\n",
996 (long)avail[i].start, (long)avail[i].size); 996 (long)avail[i].start, (long)avail[i].size);
997 } 997 }
998 prom_printf("End of available physical memory after cleanup\n"); 998 prom_printf("End of available physical memory after cleanup\n");
999 } 999 }
1000 1000
1001 /* 1001 /*
1002 * Allocate and clear out pmap_kernel()->pm_segs[] 1002 * Allocate and clear out pmap_kernel()->pm_segs[]
1003 */ 1003 */
1004 pmap_kernel()->pm_refs = 1; 1004 pmap_kernel()->pm_refs = 1;
1005 memset(&pmap_kernel()->pm_ctx, 0, sizeof(pmap_kernel()->pm_ctx)); 1005 memset(&pmap_kernel()->pm_ctx, 0, sizeof(pmap_kernel()->pm_ctx));
1006 1006
1007 /* Throw away page zero */ 1007 /* Throw away page zero */
1008 do { 1008 do {
1009 pmap_get_page(&newp); 1009 pmap_get_page(&newp);
1010 } while (!newp); 1010 } while (!newp);
1011 pmap_kernel()->pm_segs=(paddr_t *)(u_long)newp; 1011 pmap_kernel()->pm_segs=(paddr_t *)(u_long)newp;
1012 pmap_kernel()->pm_physaddr = newp; 1012 pmap_kernel()->pm_physaddr = newp;
1013 1013
1014 /* 1014 /*
1015 * finish filling out kernel pmap. 1015 * finish filling out kernel pmap.
1016 */ 1016 */
1017 1017
1018 BDPRINTF(PDB_BOOT, ("pmap_kernel()->pm_physaddr = %lx\n", 1018 BDPRINTF(PDB_BOOT, ("pmap_kernel()->pm_physaddr = %lx\n",
1019 (long)pmap_kernel()->pm_physaddr)); 1019 (long)pmap_kernel()->pm_physaddr));
1020 /* 1020 /*
1021 * Tell pmap about our mesgbuf -- Hope this works already 1021 * Tell pmap about our mesgbuf -- Hope this works already
1022 */ 1022 */
1023 BDPRINTF(PDB_BOOT1, ("Calling consinit()\n")); 1023 BDPRINTF(PDB_BOOT1, ("Calling consinit()\n"));
1024 if (pmapdebug & PDB_BOOT1) 1024 if (pmapdebug & PDB_BOOT1)
1025 consinit(); 1025 consinit();
1026 BDPRINTF(PDB_BOOT1, ("Inserting mesgbuf into pmap_kernel()\n")); 1026 BDPRINTF(PDB_BOOT1, ("Inserting mesgbuf into pmap_kernel()\n"));
1027 /* it's not safe to call pmap_enter so we need to do this ourselves */ 1027 /* it's not safe to call pmap_enter so we need to do this ourselves */
1028 va = (vaddr_t)msgbufp; 1028 va = (vaddr_t)msgbufp;
1029 prom_map_phys(phys_msgbuf, msgbufsiz, (vaddr_t)msgbufp, -1); 1029 prom_map_phys(phys_msgbuf, msgbufsiz, (vaddr_t)msgbufp, -1);
1030 while (msgbufsiz) { 1030 while (msgbufsiz) {
1031 data = TSB_DATA(0 /* global */, 1031 data = TSB_DATA(0 /* global */,
1032 PGSZ_8K, 1032 PGSZ_8K,
1033 phys_msgbuf, 1033 phys_msgbuf,
1034 1 /* priv */, 1034 1 /* priv */,
1035 1 /* Write */, 1035 1 /* Write */,
1036 1 /* Cacheable */, 1036 1 /* Cacheable */,
1037 FORCE_ALIAS /* ALIAS -- Disable D$ */, 1037 FORCE_ALIAS /* ALIAS -- Disable D$ */,
1038 1 /* valid */, 1038 1 /* valid */,
1039 0 /* IE */); 1039 0 /* IE */);
1040 pmap_enter_kpage(va, data); 1040 pmap_enter_kpage(va, data);
1041 va += PAGE_SIZE; 1041 va += PAGE_SIZE;
1042 msgbufsiz -= PAGE_SIZE; 1042 msgbufsiz -= PAGE_SIZE;
1043 phys_msgbuf += PAGE_SIZE; 1043 phys_msgbuf += PAGE_SIZE;
1044 } 1044 }
1045 BDPRINTF(PDB_BOOT1, ("Done inserting mesgbuf into pmap_kernel()\n")); 1045 BDPRINTF(PDB_BOOT1, ("Done inserting mesgbuf into pmap_kernel()\n"));
1046 1046
1047 BDPRINTF(PDB_BOOT1, ("Inserting PROM mappings into pmap_kernel()\n")); 1047 BDPRINTF(PDB_BOOT1, ("Inserting PROM mappings into pmap_kernel()\n"));
1048 for (i = 0; i < prom_map_size; i++) 1048 for (i = 0; i < prom_map_size; i++)
1049 if (prom_map[i].vstart && ((prom_map[i].vstart >> 32) == 0)) 1049 if (prom_map[i].vstart && ((prom_map[i].vstart >> 32) == 0))
1050 for (j = 0; j < prom_map[i].vsize; j += PAGE_SIZE) { 1050 for (j = 0; j < prom_map[i].vsize; j += PAGE_SIZE) {
1051 int k; 1051 int k;
1052 1052
1053 for (k = 0; page_size_map[k].mask; k++) { 1053 for (k = 0; page_size_map[k].mask; k++) {
1054 if (((prom_map[i].vstart | 1054 if (((prom_map[i].vstart |
1055 prom_map[i].tte) & 1055 prom_map[i].tte) &
1056 page_size_map[k].mask) == 0 && 1056 page_size_map[k].mask) == 0 &&
1057 page_size_map[k].mask < 1057 page_size_map[k].mask <
1058 prom_map[i].vsize) 1058 prom_map[i].vsize)
1059 break; 1059 break;
1060 } 1060 }
1061 page_size_map[k].use++; 1061 page_size_map[k].use++;
1062 /* Enter PROM map into pmap_kernel() */ 1062 /* Enter PROM map into pmap_kernel() */
1063 pmap_enter_kpage(prom_map[i].vstart + j, 1063 pmap_enter_kpage(prom_map[i].vstart + j,
1064 (prom_map[i].tte + j) | TLB_EXEC | 1064 (prom_map[i].tte + j) | TLB_EXEC |
1065 page_size_map[k].code); 1065 page_size_map[k].code);
1066 } 1066 }
1067 BDPRINTF(PDB_BOOT1, ("Done inserting PROM mappings into pmap_kernel()\n")); 1067 BDPRINTF(PDB_BOOT1, ("Done inserting PROM mappings into pmap_kernel()\n"));
1068 1068
1069 /* 1069 /*
1070 * Fix up start of kernel heap. 1070 * Fix up start of kernel heap.
1071 */ 1071 */
1072 vmmap = (vaddr_t)roundup(ekdata, 4*MEG); 1072 vmmap = (vaddr_t)roundup(ekdata, 4*MEG);
1073 /* Let's keep 1 page of redzone after the kernel */ 1073 /* Let's keep 1 page of redzone after the kernel */
1074 vmmap += PAGE_SIZE; 1074 vmmap += PAGE_SIZE;
1075 { 1075 {
1076 extern void main(void); 1076 extern void main(void);
1077 vaddr_t u0va; 1077 vaddr_t u0va;
1078 paddr_t pa; 1078 paddr_t pa;
1079 1079
1080 u0va = vmmap; 1080 u0va = vmmap;
1081 1081
1082 BDPRINTF(PDB_BOOT1, 1082 BDPRINTF(PDB_BOOT1,
1083 ("Inserting lwp0 USPACE into pmap_kernel() at %p\n", 1083 ("Inserting lwp0 USPACE into pmap_kernel() at %p\n",
1084 vmmap)); 1084 vmmap));
1085 1085
1086 while (vmmap < u0va + 2*USPACE) { 1086 while (vmmap < u0va + 2*USPACE) {
1087 int64_t data1; 1087 int64_t data1;
1088 1088
1089 if (!pmap_get_page(&pa)) 1089 if (!pmap_get_page(&pa))
1090 panic("pmap_bootstrap: no pages"); 1090 panic("pmap_bootstrap: no pages");
1091 prom_map_phys(pa, PAGE_SIZE, vmmap, -1); 1091 prom_map_phys(pa, PAGE_SIZE, vmmap, -1);
1092 data1 = TSB_DATA(0 /* global */, 1092 data1 = TSB_DATA(0 /* global */,
1093 PGSZ_8K, 1093 PGSZ_8K,
1094 pa, 1094 pa,
1095 1 /* priv */, 1095 1 /* priv */,
1096 1 /* Write */, 1096 1 /* Write */,
1097 1 /* Cacheable */, 1097 1 /* Cacheable */,
1098 FORCE_ALIAS /* ALIAS -- Disable D$ */, 1098 FORCE_ALIAS /* ALIAS -- Disable D$ */,
1099 1 /* valid */, 1099 1 /* valid */,
1100 0 /* IE */); 1100 0 /* IE */);
1101 pmap_enter_kpage(vmmap, data1); 1101 pmap_enter_kpage(vmmap, data1);
1102 vmmap += PAGE_SIZE; 1102 vmmap += PAGE_SIZE;
1103 } 1103 }
1104 BDPRINTF(PDB_BOOT1, 1104 BDPRINTF(PDB_BOOT1,
1105 ("Done inserting stack 0 into pmap_kernel()\n")); 1105 ("Done inserting stack 0 into pmap_kernel()\n"));
1106 1106
1107 /* Now map in and initialize our cpu_info structure */ 1107 /* Now map in and initialize our cpu_info structure */
1108#ifdef DIAGNOSTIC 1108#ifdef DIAGNOSTIC
1109 vmmap += PAGE_SIZE; /* redzone -- XXXX do we need one? */ 1109 vmmap += PAGE_SIZE; /* redzone -- XXXX do we need one? */
1110#endif 1110#endif
1111 if ((vmmap ^ INTSTACK) & VA_ALIAS_MASK) 1111 if ((vmmap ^ INTSTACK) & VA_ALIAS_MASK)
1112 vmmap += PAGE_SIZE; /* Matchup virtual color for D$ */ 1112 vmmap += PAGE_SIZE; /* Matchup virtual color for D$ */
1113 intstk = vmmap; 1113 intstk = vmmap;
1114 cpus = (struct cpu_info *)(intstk + CPUINFO_VA - INTSTACK); 1114 cpus = (struct cpu_info *)(intstk + CPUINFO_VA - INTSTACK);
1115 1115
1116 BDPRINTF(PDB_BOOT1, 1116 BDPRINTF(PDB_BOOT1,
1117 ("Inserting cpu_info into pmap_kernel() at %p\n", 1117 ("Inserting cpu_info into pmap_kernel() at %p\n",
1118 cpus)); 1118 cpus));
1119 /* Now map in all 8 pages of interrupt stack/cpu_info */ 1119 /* Now map in all 8 pages of interrupt stack/cpu_info */
1120 pa = cpu0paddr; 1120 pa = cpu0paddr;
1121 prom_map_phys(pa, 64*KB, vmmap, -1); 1121 prom_map_phys(pa, 64*KB, vmmap, -1);
1122 1122
1123 /* 1123 /*
1124 * Also map it in as the interrupt stack. 1124 * Also map it in as the interrupt stack.
1125 * This lets the PROM see this if needed. 1125 * This lets the PROM see this if needed.
1126 * 1126 *
1127 * XXXX locore.s does not flush these mappings 1127 * XXXX locore.s does not flush these mappings
1128 * before installing the locked TTE. 1128 * before installing the locked TTE.
1129 */ 1129 */
1130 prom_map_phys(pa, 64*KB, INTSTACK, -1); 1130 prom_map_phys(pa, 64*KB, INTSTACK, -1);
1131 for (i = 0; i < 8; i++) { 1131 for (i = 0; i < 8; i++) {
1132 int64_t data1; 1132 int64_t data1;
1133 1133
1134 data1 = TSB_DATA(0 /* global */, 1134 data1 = TSB_DATA(0 /* global */,
1135 PGSZ_8K, 1135 PGSZ_8K,
1136 pa, 1136 pa,
1137 1 /* priv */, 1137 1 /* priv */,
1138 1 /* Write */, 1138 1 /* Write */,
1139 1 /* Cacheable */, 1139 1 /* Cacheable */,
1140 FORCE_ALIAS /* ALIAS -- Disable D$ */, 1140 FORCE_ALIAS /* ALIAS -- Disable D$ */,
1141 1 /* valid */, 1141 1 /* valid */,
1142 0 /* IE */); 1142 0 /* IE */);
1143 pmap_enter_kpage(vmmap, data1); 1143 pmap_enter_kpage(vmmap, data1);
1144 vmmap += PAGE_SIZE; 1144 vmmap += PAGE_SIZE;
1145 pa += PAGE_SIZE; 1145 pa += PAGE_SIZE;
1146 } 1146 }
1147 BDPRINTF(PDB_BOOT1, ("Initializing cpu_info\n")); 1147 BDPRINTF(PDB_BOOT1, ("Initializing cpu_info\n"));
1148 1148
1149 /* Initialize our cpu_info structure */ 1149 /* Initialize our cpu_info structure */
1150 memset((void *)intstk, 0, 64 * KB); 1150 memset((void *)intstk, 0, 64 * KB);
1151 cpus->ci_self = cpus; 1151 cpus->ci_self = cpus;
1152 cpus->ci_next = NULL; 1152 cpus->ci_next = NULL;
1153 cpus->ci_curlwp = &lwp0; 1153 cpus->ci_curlwp = &lwp0;
1154 cpus->ci_flags = CPUF_PRIMARY; 1154 cpus->ci_flags = CPUF_PRIMARY;
1155 cpus->ci_cpuid = cpu_myid(); 1155 cpus->ci_cpuid = cpu_myid();
1156 cpus->ci_fplwp = NULL; 1156 cpus->ci_fplwp = NULL;
1157 cpus->ci_eintstack = NULL; 1157 cpus->ci_eintstack = NULL;
1158 cpus->ci_spinup = main; /* Call main when we're running. */ 1158 cpus->ci_spinup = main; /* Call main when we're running. */
1159 cpus->ci_paddr = cpu0paddr; 1159 cpus->ci_paddr = cpu0paddr;
1160#ifdef SUN4V 1160#ifdef SUN4V
1161 if ( CPU_ISSUN4V ) 1161 if (CPU_ISSUN4V)
1162 cpus->ci_mmfsa = cpu0paddr; 1162 cpus->ci_mmfsa = cpu0paddr;
1163#endif 1163#endif
1164 cpus->ci_cpcb = (struct pcb *)u0va; 1164 cpus->ci_cpcb = (struct pcb *)u0va;
1165 cpus->ci_idepth = -1; 1165 cpus->ci_idepth = -1;
1166 memset(cpus->ci_intrpending, -1, sizeof(cpus->ci_intrpending)); 1166 memset(cpus->ci_intrpending, -1, sizeof(cpus->ci_intrpending));
1167 1167
1168 uvm_lwp_setuarea(&lwp0, u0va); 1168 uvm_lwp_setuarea(&lwp0, u0va);
1169 lwp0.l_md.md_tf = (struct trapframe64*)(u0va + USPACE 1169 lwp0.l_md.md_tf = (struct trapframe64*)(u0va + USPACE
1170 - sizeof(struct trapframe64)); 1170 - sizeof(struct trapframe64));
1171 1171
1172 cpu0paddr += 64 * KB; 1172 cpu0paddr += 64 * KB;
1173 1173
1174 CPUSET_CLEAR(cpus_active); 1174 CPUSET_CLEAR(cpus_active);
1175 CPUSET_ADD(cpus_active, 0); 1175 CPUSET_ADD(cpus_active, 0);
1176 1176
1177 cpu_pmap_prepare(cpus, true); 1177 cpu_pmap_prepare(cpus, true);
1178 cpu_pmap_init(cpus); 1178 cpu_pmap_init(cpus);
1179 1179
1180 /* The rest will be done at CPU attach time. */ 1180 /* The rest will be done at CPU attach time. */
1181 BDPRINTF(PDB_BOOT1, 1181 BDPRINTF(PDB_BOOT1,
1182 ("Done inserting cpu_info into pmap_kernel()\n")); 1182 ("Done inserting cpu_info into pmap_kernel()\n"));
1183 } 1183 }
1184 1184
1185 vmmap = (vaddr_t)reserve_dumppages((void *)(u_long)vmmap); 1185 vmmap = (vaddr_t)reserve_dumppages((void *)(u_long)vmmap);
1186 1186
1187#ifdef MODULAR 1187#ifdef MODULAR
1188 /* 1188 /*
1189 * For 32bit kernels: 1189 * For 32bit kernels:
1190 * Reserve 16 MB of VA for module loading. Right now our full 1190 * Reserve 16 MB of VA for module loading. Right now our full
1191 * GENERIC kernel is about 13 MB, so this looks good enough. 1191 * GENERIC kernel is about 13 MB, so this looks good enough.
1192 * For 64bit kernels: 1192 * For 64bit kernels:
1193 * We can use all the space left before the special addresses, 1193 * We can use all the space left before the special addresses,
1194 * but leave 2 pages at vmmap alone (see pmap_virtual_space) 1194 * but leave 2 pages at vmmap alone (see pmap_virtual_space)
1195 * and another red zone page. 1195 * and another red zone page.
1196 */ 1196 */
1197#ifdef __arch64__ 1197#ifdef __arch64__
1198 module_start = vmmap + 3*PAGE_SIZE; 1198 module_start = vmmap + 3*PAGE_SIZE;
1199 module_end = 0x08000000; /* keep all modules within 2GB */ 1199 module_end = 0x08000000; /* keep all modules within 2GB */
1200 KASSERT(module_end < KERNEND); /* of kernel text */ 1200 KASSERT(module_end < KERNEND); /* of kernel text */
1201#else 1201#else
1202 module_start = vmmap; 1202 module_start = vmmap;
1203 vmmap += 16 * 1024*1024; 1203 vmmap += 16 * 1024*1024;
1204 module_end = vmmap; 1204 module_end = vmmap;
1205#endif 1205#endif
1206#endif 1206#endif
1207 1207
1208 /* 1208 /*
1209 * Set up bounds of allocatable memory for vmstat et al. 1209 * Set up bounds of allocatable memory for vmstat et al.
1210 */ 1210 */
1211 avail_start = avail->start; 1211 avail_start = avail->start;
1212 for (mp = avail; mp->size; mp++) 1212 for (mp = avail; mp->size; mp++)
1213 avail_end = mp->start+mp->size; 1213 avail_end = mp->start+mp->size;
1214 1214
1215 BDPRINTF(PDB_BOOT1, ("Finished pmap_bootstrap()\n")); 1215 BDPRINTF(PDB_BOOT1, ("Finished pmap_bootstrap()\n"));
1216 1216
1217 BDPRINTF(PDB_BOOT, ("left kdata: %" PRId64 " @%" PRIx64 ".\n", 1217 BDPRINTF(PDB_BOOT, ("left kdata: %" PRId64 " @%" PRIx64 ".\n",
1218 kdata_mem_pool.size, kdata_mem_pool.start)); 1218 kdata_mem_pool.size, kdata_mem_pool.start));
1219} 1219}
1220 1220
1221/* 1221/*
1222 * Allocate TSBs for both mmus from the locked kernel data segment page. 1222 * Allocate TSBs for both mmus from the locked kernel data segment page.
1223 * This is run before the cpu itself is activated (or by the first cpu 1223 * This is run before the cpu itself is activated (or by the first cpu
1224 * itself) 1224 * itself)
1225 */ 1225 */
1226void 1226void
1227cpu_pmap_prepare(struct cpu_info *ci, bool initial) 1227cpu_pmap_prepare(struct cpu_info *ci, bool initial)
1228{ 1228{
1229 /* allocate our TSBs */ 1229 /* allocate our TSBs */
1230 ci->ci_tsb_dmmu = (pte_t *)kdata_alloc(TSBSIZE, TSBSIZE); 1230 ci->ci_tsb_dmmu = (pte_t *)kdata_alloc(TSBSIZE, TSBSIZE);
1231 ci->ci_tsb_immu = (pte_t *)kdata_alloc(TSBSIZE, TSBSIZE); 1231 ci->ci_tsb_immu = (pte_t *)kdata_alloc(TSBSIZE, TSBSIZE);
1232 memset(ci->ci_tsb_dmmu, 0, TSBSIZE); 1232 memset(ci->ci_tsb_dmmu, 0, TSBSIZE);
1233 memset(ci->ci_tsb_immu, 0, TSBSIZE); 1233 memset(ci->ci_tsb_immu, 0, TSBSIZE);
1234 if (!initial) { 1234 if (!initial) {
1235 KASSERT(ci != curcpu()); 1235 KASSERT(ci != curcpu());
1236 /* 1236 /*
1237 * Initially share ctxbusy with the boot cpu, the 1237 * Initially share ctxbusy with the boot cpu, the
1238 * cpu will replace it as soon as it runs (and can 1238 * cpu will replace it as soon as it runs (and can
1239 * probe the number of available contexts itself). 1239 * probe the number of available contexts itself).
1240 * Untill then only context 0 (aka kernel) will be 1240 * Untill then only context 0 (aka kernel) will be
1241 * referenced anyway. 1241 * referenced anyway.
1242 */ 1242 */
1243 ci->ci_numctx = curcpu()->ci_numctx; 1243 ci->ci_numctx = curcpu()->ci_numctx;
1244 ci->ci_ctxbusy = curcpu()->ci_ctxbusy; 1244 ci->ci_ctxbusy = curcpu()->ci_ctxbusy;
1245 } 1245 }
1246 1246
1247#ifdef SUN4V 1247#ifdef SUN4V
1248 if (initial && CPU_ISSUN4V) { 1248 if (initial && CPU_ISSUN4V) {
1249 tsb_desc = (struct tsb_desc *)kdata_alloc( 1249 tsb_desc = (struct tsb_desc *)kdata_alloc(
1250 sizeof(struct tsb_desc), 16); 1250 sizeof(struct tsb_desc), 16);
1251 memset(tsb_desc, 0, sizeof(struct tsb_desc)); 1251 memset(tsb_desc, 0, sizeof(struct tsb_desc));
1252 /* 8K page size used for TSB index computation */ 1252 /* 8K page size used for TSB index computation */
1253 tsb_desc->td_idxpgsz = 0; 1253 tsb_desc->td_idxpgsz = 0;
1254 tsb_desc->td_assoc = 1; 1254 tsb_desc->td_assoc = 1;
1255 tsb_desc->td_size = TSBENTS; 1255 tsb_desc->td_size = TSBENTS;
1256 tsb_desc->td_ctxidx = -1; 1256 tsb_desc->td_ctxidx = -1;
1257 tsb_desc->td_pgsz = 0xf; 1257 tsb_desc->td_pgsz = 0xf;
1258 tsb_desc->td_pa = pmap_kextract((vaddr_t)ci->ci_tsb_dmmu); 1258 tsb_desc->td_pa = pmap_kextract((vaddr_t)ci->ci_tsb_dmmu);
1259 BDPRINTF(PDB_BOOT1, ("cpu %d: TSB descriptor allocated at %p " 1259 BDPRINTF(PDB_BOOT1, ("cpu %d: TSB descriptor allocated at %p "
1260 "size %08x - td_pa at %p\n", 1260 "size %08x - td_pa at %p\n",
1261 ci->ci_index, tsb_desc, sizeof(struct tsb_desc), 1261 ci->ci_index, tsb_desc, sizeof(struct tsb_desc),
1262 tsb_desc->td_pa)); 1262 tsb_desc->td_pa));
1263  1263
1264 } 1264 }
1265#endif 1265#endif
1266 1266
1267 BDPRINTF(PDB_BOOT1, ("cpu %d: TSB allocated at %p/%p size %08x\n", 1267 BDPRINTF(PDB_BOOT1, ("cpu %d: TSB allocated at %p/%p size %08x\n",
1268 ci->ci_index, ci->ci_tsb_dmmu, ci->ci_tsb_immu, TSBSIZE)); 1268 ci->ci_index, ci->ci_tsb_dmmu, ci->ci_tsb_immu, TSBSIZE));
1269} 1269}
1270 1270
1271/* 1271/*
1272 * Initialize the per CPU parts for the cpu running this code. 1272 * Initialize the per CPU parts for the cpu running this code.
1273 */ 1273 */
1274void 1274void
1275cpu_pmap_init(struct cpu_info *ci) 1275cpu_pmap_init(struct cpu_info *ci)
1276{ 1276{
1277 size_t ctxsize; 1277 size_t ctxsize;
1278 1278
1279 /* 1279 /*
1280 * We delay initialising ci_ctx_lock here as LOCKDEBUG isn't 1280 * We delay initialising ci_ctx_lock here as LOCKDEBUG isn't
1281 * running for cpu0 yet.. 1281 * running for cpu0 yet..
1282 */ 1282 */
1283 ci->ci_pmap_next_ctx = 1; 1283 ci->ci_pmap_next_ctx = 1;
1284 /* all SUN4U use 13 bit contexts - SUN4V use at least 13 bit contexts */ 1284 /* all SUN4U use 13 bit contexts - SUN4V use at least 13 bit contexts */
1285 ci->ci_numctx = 0x2000;  1285 ci->ci_numctx = 0x2000;
1286 ctxsize = sizeof(paddr_t)*ci->ci_numctx; 1286 ctxsize = sizeof(paddr_t)*ci->ci_numctx;
1287 ci->ci_ctxbusy = (paddr_t *)kdata_alloc(ctxsize, sizeof(uint64_t)); 1287 ci->ci_ctxbusy = (paddr_t *)kdata_alloc(ctxsize, sizeof(uint64_t));
1288 memset(ci->ci_ctxbusy, 0, ctxsize); 1288 memset(ci->ci_ctxbusy, 0, ctxsize);
1289 LIST_INIT(&ci->ci_pmap_ctxlist); 1289 LIST_INIT(&ci->ci_pmap_ctxlist);
1290 1290
1291 /* mark kernel context as busy */ 1291 /* mark kernel context as busy */
1292 ci->ci_ctxbusy[0] = pmap_kernel()->pm_physaddr; 1292 ci->ci_ctxbusy[0] = pmap_kernel()->pm_physaddr;
1293} 1293}
1294 1294
1295/* 1295/*
1296 * Initialize anything else for pmap handling. 1296 * Initialize anything else for pmap handling.
1297 * Called during vm_init(). 1297 * Called during vm_init().
1298 */ 1298 */
1299void 1299void
1300pmap_init(void) 1300pmap_init(void)
1301{ 1301{
1302 struct vm_page *pg; 1302 struct vm_page *pg;
1303 struct pglist pglist; 1303 struct pglist pglist;
1304 uint64_t data; 1304 uint64_t data;
1305 paddr_t pa; 1305 paddr_t pa;
1306 psize_t size; 1306 psize_t size;
1307 vaddr_t va; 1307 vaddr_t va;
1308 1308
1309 BDPRINTF(PDB_BOOT1, ("pmap_init()\n")); 1309 BDPRINTF(PDB_BOOT1, ("pmap_init()\n"));
1310 1310
1311 size = sizeof(struct pv_entry) * physmem; 1311 size = sizeof(struct pv_entry) * physmem;
1312 if (uvm_pglistalloc((psize_t)size, (paddr_t)0, (paddr_t)-1, 1312 if (uvm_pglistalloc((psize_t)size, (paddr_t)0, (paddr_t)-1,
1313 (paddr_t)PAGE_SIZE, (paddr_t)0, &pglist, 1, 0) != 0) 1313 (paddr_t)PAGE_SIZE, (paddr_t)0, &pglist, 1, 0) != 0)
1314 panic("pmap_init: no memory"); 1314 panic("pmap_init: no memory");
1315 1315
1316 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY); 1316 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY);
1317 if (va == 0) 1317 if (va == 0)
1318 panic("pmap_init: no memory"); 1318 panic("pmap_init: no memory");
1319 1319
1320 /* Map the pages */ 1320 /* Map the pages */
1321 TAILQ_FOREACH(pg, &pglist, pageq.queue) { 1321 TAILQ_FOREACH(pg, &pglist, pageq.queue) {
1322 pa = VM_PAGE_TO_PHYS(pg); 1322 pa = VM_PAGE_TO_PHYS(pg);
1323 pmap_zero_page(pa); 1323 pmap_zero_page(pa);
1324 data = TSB_DATA(0 /* global */, 1324 data = TSB_DATA(0 /* global */,
1325 PGSZ_8K, 1325 PGSZ_8K,
1326 pa, 1326 pa,
1327 1 /* priv */, 1327 1 /* priv */,
1328 1 /* Write */, 1328 1 /* Write */,
1329 1 /* Cacheable */, 1329 1 /* Cacheable */,
1330 FORCE_ALIAS /* ALIAS -- Disable D$ */, 1330 FORCE_ALIAS /* ALIAS -- Disable D$ */,
1331 1 /* valid */, 1331 1 /* valid */,
1332 0 /* IE */); 1332 0 /* IE */);
1333 pmap_enter_kpage(va, data); 1333 pmap_enter_kpage(va, data);
1334 va += PAGE_SIZE; 1334 va += PAGE_SIZE;
1335 } 1335 }
1336 1336
1337 /* 1337 /*
1338 * initialize the pmap pools. 1338 * initialize the pmap pools.
1339 */ 1339 */
1340 pool_cache_bootstrap(&pmap_cache, sizeof(struct pmap), 1340 pool_cache_bootstrap(&pmap_cache, sizeof(struct pmap),
1341 SPARC64_BLOCK_SIZE, 0, 0, "pmappl", NULL, IPL_NONE, NULL, NULL, 1341 SPARC64_BLOCK_SIZE, 0, 0, "pmappl", NULL, IPL_NONE, NULL, NULL,
1342 NULL); 1342 NULL);
1343 pool_cache_bootstrap(&pmap_pv_cache, sizeof(struct pv_entry), 0, 0, 1343 pool_cache_bootstrap(&pmap_pv_cache, sizeof(struct pv_entry), 0, 0,
1344 PR_LARGECACHE, "pv_entry", NULL, IPL_NONE, NULL, NULL, NULL); 1344 PR_LARGECACHE, "pv_entry", NULL, IPL_NONE, NULL, NULL, NULL);
1345 1345
1346 vm_first_phys = avail_start; 1346 vm_first_phys = avail_start;
1347 vm_num_phys = avail_end - avail_start; 1347 vm_num_phys = avail_end - avail_start;
1348 1348
1349 mutex_init(&pmap_lock, MUTEX_DEFAULT, IPL_NONE); 1349 mutex_init(&pmap_lock, MUTEX_DEFAULT, IPL_NONE);
1350#if defined(USE_LOCKSAFE_PSEG_GETSET) 1350#if defined(USE_LOCKSAFE_PSEG_GETSET)
1351 mutex_init(&pseg_lock, MUTEX_SPIN, IPL_VM); 1351 mutex_init(&pseg_lock, MUTEX_SPIN, IPL_VM);
1352#endif 1352#endif
1353 lock_available = true; 1353 lock_available = true;
1354} 1354}
1355 1355
1356/* 1356/*
1357 * How much virtual space is available to the kernel? 1357 * How much virtual space is available to the kernel?
1358 */ 1358 */
1359static vaddr_t kbreak; /* End of kernel VA */ 1359static vaddr_t kbreak; /* End of kernel VA */
1360void 1360void
1361pmap_virtual_space(vaddr_t *start, vaddr_t *end) 1361pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1362{ 1362{
1363 1363
1364 /* 1364 /*
1365 * Reserve one segment for kernel virtual memory. 1365 * Reserve one segment for kernel virtual memory.
1366 */ 1366 */
1367#ifdef __arch64__ 1367#ifdef __arch64__
1368 /* 1368 /*
1369 * On 64 bit kernels, start it beyound firmware, so 1369 * On 64 bit kernels, start it beyound firmware, so
1370 * we are basically unrestricted. 1370 * we are basically unrestricted.
1371 */ 1371 */
1372 *start = kbreak = VM_KERNEL_MEM_VA_START; 1372 *start = kbreak = VM_KERNEL_MEM_VA_START;
1373 *end = VM_MAX_KERNEL_ADDRESS; 1373 *end = VM_MAX_KERNEL_ADDRESS;
1374#else 1374#else
1375 /* 1375 /*
1376 * Reserve two pages for pmap_copy_page && /dev/mem, but otherwise 1376 * Reserve two pages for pmap_copy_page && /dev/mem, but otherwise
1377 * end it beyound the iospace and other special fixed addresses. 1377 * end it beyound the iospace and other special fixed addresses.
1378 */ 1378 */
1379 *start = kbreak = (vaddr_t)(vmmap + 2*PAGE_SIZE); 1379 *start = kbreak = (vaddr_t)(vmmap + 2*PAGE_SIZE);
1380 *end = VM_MAX_KERNEL_ADDRESS; 1380 *end = VM_MAX_KERNEL_ADDRESS;
1381#endif 1381#endif
1382 BDPRINTF(PDB_BOOT1, ("pmap_virtual_space: %x-%x\n", *start, *end)); 1382 BDPRINTF(PDB_BOOT1, ("pmap_virtual_space: %x-%x\n", *start, *end));
1383} 1383}
1384 1384
1385/* 1385/*
1386 * Preallocate kernel page tables to a specified VA. 1386 * Preallocate kernel page tables to a specified VA.
1387 * This simply loops through the first TTE for each 1387 * This simply loops through the first TTE for each
1388 * page table from the beginning of the kernel pmap, 1388 * page table from the beginning of the kernel pmap,
1389 * reads the entry, and if the result is 1389 * reads the entry, and if the result is
1390 * zero (either invalid entry or no page table) it stores 1390 * zero (either invalid entry or no page table) it stores
1391 * a zero there, populating page tables in the process. 1391 * a zero there, populating page tables in the process.
1392 * This is not the most efficient technique but i don't 1392 * This is not the most efficient technique but i don't
1393 * expect it to be called that often. 1393 * expect it to be called that often.
1394 */ 1394 */
1395vaddr_t 1395vaddr_t
1396pmap_growkernel(vaddr_t maxkvaddr) 1396pmap_growkernel(vaddr_t maxkvaddr)
1397{ 1397{
1398 struct pmap *pm = pmap_kernel(); 1398 struct pmap *pm = pmap_kernel();
1399 paddr_t pa; 1399 paddr_t pa;
1400 1400
1401 if (maxkvaddr >= VM_MAX_KERNEL_ADDRESS) { 1401 if (maxkvaddr >= VM_MAX_KERNEL_ADDRESS) {
1402 printf("WARNING: cannot extend kernel pmap beyond %p to %p\n", 1402 printf("WARNING: cannot extend kernel pmap beyond %p to %p\n",
1403 (void *)VM_MAX_KERNEL_ADDRESS, (void *)maxkvaddr); 1403 (void *)VM_MAX_KERNEL_ADDRESS, (void *)maxkvaddr);
1404 return (kbreak); 1404 return (kbreak);
1405 } 1405 }
1406 DPRINTF(PDB_GROW, ("pmap_growkernel(%lx...%lx)\n", kbreak, maxkvaddr)); 1406 DPRINTF(PDB_GROW, ("pmap_growkernel(%lx...%lx)\n", kbreak, maxkvaddr));
1407 /* Align with the start of a page table */ 1407 /* Align with the start of a page table */
1408 for (kbreak &= (-1 << PDSHIFT); kbreak < maxkvaddr; 1408 for (kbreak &= (-1 << PDSHIFT); kbreak < maxkvaddr;
1409 kbreak += (1 << PDSHIFT)) { 1409 kbreak += (1 << PDSHIFT)) {
1410 if (pseg_get(pm, kbreak) & TLB_V) 1410 if (pseg_get(pm, kbreak) & TLB_V)
1411 continue; 1411 continue;
1412 1412
1413 pa = 0; 1413 pa = 0;
1414 while (pseg_set(pm, kbreak, 0, pa) & 1) { 1414 while (pseg_set(pm, kbreak, 0, pa) & 1) {
1415 DPRINTF(PDB_GROW, 1415 DPRINTF(PDB_GROW,
1416 ("pmap_growkernel: extending %lx\n", kbreak)); 1416 ("pmap_growkernel: extending %lx\n", kbreak));
1417 pa = 0; 1417 pa = 0;
1418 if (!pmap_get_page(&pa)) 1418 if (!pmap_get_page(&pa))
1419 panic("pmap_growkernel: no pages"); 1419 panic("pmap_growkernel: no pages");
1420 ENTER_STAT(ptpneeded); 1420 ENTER_STAT(ptpneeded);
1421 } 1421 }
1422 } 1422 }
1423 return (kbreak); 1423 return (kbreak);
1424} 1424}
1425 1425
1426/* 1426/*
1427 * Create and return a physical map. 1427 * Create and return a physical map.
1428 */ 1428 */
1429struct pmap * 1429struct pmap *
1430pmap_create(void) 1430pmap_create(void)
1431{ 1431{
1432 struct pmap *pm; 1432 struct pmap *pm;
1433 1433
1434 DPRINTF(PDB_CREATE, ("pmap_create()\n")); 1434 DPRINTF(PDB_CREATE, ("pmap_create()\n"));
1435 1435
1436 pm = pool_cache_get(&pmap_cache, PR_WAITOK); 1436 pm = pool_cache_get(&pmap_cache, PR_WAITOK);
1437 memset(pm, 0, sizeof *pm); 1437 memset(pm, 0, sizeof *pm);
1438 DPRINTF(PDB_CREATE, ("pmap_create(): created %p\n", pm)); 1438 DPRINTF(PDB_CREATE, ("pmap_create(): created %p\n", pm));
1439 1439
1440 mutex_init(&pm->pm_obj_lock, MUTEX_DEFAULT, IPL_NONE); 1440 mutex_init(&pm->pm_obj_lock, MUTEX_DEFAULT, IPL_NONE);
1441 uvm_obj_init(&pm->pm_obj, NULL, false, 1); 1441 uvm_obj_init(&pm->pm_obj, NULL, false, 1);
1442 uvm_obj_setlock(&pm->pm_obj, &pm->pm_obj_lock); 1442 uvm_obj_setlock(&pm->pm_obj, &pm->pm_obj_lock);
1443 1443
1444 if (pm != pmap_kernel()) { 1444 if (pm != pmap_kernel()) {
1445 while (!pmap_get_page(&pm->pm_physaddr)) { 1445 while (!pmap_get_page(&pm->pm_physaddr)) {
1446 uvm_wait("pmap_create"); 1446 uvm_wait("pmap_create");
1447 } 1447 }
1448 pm->pm_segs = (paddr_t *)(u_long)pm->pm_physaddr; 1448 pm->pm_segs = (paddr_t *)(u_long)pm->pm_physaddr;
1449 } 1449 }
1450 DPRINTF(PDB_CREATE, ("pmap_create(%p): ctx %d\n", pm, pmap_ctx(pm))); 1450 DPRINTF(PDB_CREATE, ("pmap_create(%p): ctx %d\n", pm, pmap_ctx(pm)));
1451 return pm; 1451 return pm;
1452} 1452}
1453 1453
1454/* 1454/*
1455 * Add a reference to the given pmap. 1455 * Add a reference to the given pmap.
1456 */ 1456 */
1457void 1457void
1458pmap_reference(struct pmap *pm) 1458pmap_reference(struct pmap *pm)
1459{ 1459{
1460 1460
1461 atomic_inc_uint(&pm->pm_refs); 1461 atomic_inc_uint(&pm->pm_refs);
1462} 1462}
1463 1463
1464/* 1464/*
1465 * Retire the given pmap from service. 1465 * Retire the given pmap from service.
1466 * Should only be called if the map contains no valid mappings. 1466 * Should only be called if the map contains no valid mappings.
1467 */ 1467 */
1468void 1468void
1469pmap_destroy(struct pmap *pm) 1469pmap_destroy(struct pmap *pm)
1470{ 1470{
1471#ifdef MULTIPROCESSOR 1471#ifdef MULTIPROCESSOR
1472 struct cpu_info *ci; 1472 struct cpu_info *ci;
1473 sparc64_cpuset_t pmap_cpus_active; 1473 sparc64_cpuset_t pmap_cpus_active;
1474#else 1474#else
1475#define pmap_cpus_active 0 1475#define pmap_cpus_active 0
1476#endif 1476#endif
1477 struct vm_page *pg, *nextpg; 1477 struct vm_page *pg, *nextpg;
1478 1478
1479 if ((int)atomic_dec_uint_nv(&pm->pm_refs) > 0) { 1479 if ((int)atomic_dec_uint_nv(&pm->pm_refs) > 0) {
1480 return; 1480 return;
1481 } 1481 }
1482 DPRINTF(PDB_DESTROY, ("pmap_destroy: freeing pmap %p\n", pm)); 1482 DPRINTF(PDB_DESTROY, ("pmap_destroy: freeing pmap %p\n", pm));
1483#ifdef MULTIPROCESSOR 1483#ifdef MULTIPROCESSOR
1484 CPUSET_CLEAR(pmap_cpus_active); 1484 CPUSET_CLEAR(pmap_cpus_active);
1485 for (ci = cpus; ci != NULL; ci = ci->ci_next) { 1485 for (ci = cpus; ci != NULL; ci = ci->ci_next) {
1486 /* XXXMRG: Move the lock inside one or both tests? */ 1486 /* XXXMRG: Move the lock inside one or both tests? */
1487 mutex_enter(&ci->ci_ctx_lock); 1487 mutex_enter(&ci->ci_ctx_lock);
1488 if (CPUSET_HAS(cpus_active, ci->ci_index)) { 1488 if (CPUSET_HAS(cpus_active, ci->ci_index)) {
1489 if (pm->pm_ctx[ci->ci_index] > 0) { 1489 if (pm->pm_ctx[ci->ci_index] > 0) {
1490 CPUSET_ADD(pmap_cpus_active, ci->ci_index); 1490 CPUSET_ADD(pmap_cpus_active, ci->ci_index);
1491 ctx_free(pm, ci); 1491 ctx_free(pm, ci);
1492 } 1492 }
1493 } 1493 }
1494 mutex_exit(&ci->ci_ctx_lock); 1494 mutex_exit(&ci->ci_ctx_lock);
1495 } 1495 }
1496#else 1496#else
1497 if (pmap_ctx(pm)) { 1497 if (pmap_ctx(pm)) {
1498 mutex_enter(&curcpu()->ci_ctx_lock); 1498 mutex_enter(&curcpu()->ci_ctx_lock);
1499 ctx_free(pm, curcpu()); 1499 ctx_free(pm, curcpu());
1500 mutex_exit(&curcpu()->ci_ctx_lock); 1500 mutex_exit(&curcpu()->ci_ctx_lock);
1501 } 1501 }
1502#endif 1502#endif
1503 1503
1504 /* we could be a little smarter and leave pages zeroed */ 1504 /* we could be a little smarter and leave pages zeroed */
1505 for (pg = TAILQ_FIRST(&pm->pm_obj.memq); pg != NULL; pg = nextpg) { 1505 for (pg = TAILQ_FIRST(&pm->pm_obj.memq); pg != NULL; pg = nextpg) {
1506#ifdef DIAGNOSTIC 1506#ifdef DIAGNOSTIC
1507 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 1507 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
1508#endif 1508#endif
1509 1509
1510 KASSERT((pg->flags & PG_MARKER) == 0); 1510 KASSERT((pg->flags & PG_MARKER) == 0);
1511 nextpg = TAILQ_NEXT(pg, listq.queue); 1511 nextpg = TAILQ_NEXT(pg, listq.queue);
1512 TAILQ_REMOVE(&pm->pm_obj.memq, pg, listq.queue); 1512 TAILQ_REMOVE(&pm->pm_obj.memq, pg, listq.queue);
1513 KASSERT(md->mdpg_pvh.pv_pmap == NULL); 1513 KASSERT(md->mdpg_pvh.pv_pmap == NULL);
1514 dcache_flush_page_cpuset(VM_PAGE_TO_PHYS(pg), pmap_cpus_active); 1514 dcache_flush_page_cpuset(VM_PAGE_TO_PHYS(pg), pmap_cpus_active);
1515 uvm_pagefree(pg); 1515 uvm_pagefree(pg);
1516 } 1516 }
1517 pmap_free_page((paddr_t)(u_long)pm->pm_segs, pmap_cpus_active); 1517 pmap_free_page((paddr_t)(u_long)pm->pm_segs, pmap_cpus_active);
1518 1518
1519 uvm_obj_destroy(&pm->pm_obj, false); 1519 uvm_obj_destroy(&pm->pm_obj, false);
1520 mutex_destroy(&pm->pm_obj_lock); 1520 mutex_destroy(&pm->pm_obj_lock);
1521 pool_cache_put(&pmap_cache, pm); 1521 pool_cache_put(&pmap_cache, pm);
1522} 1522}
1523 1523
1524/* 1524/*
1525 * Copy the range specified by src_addr/len 1525 * Copy the range specified by src_addr/len
1526 * from the source map to the range dst_addr/len 1526 * from the source map to the range dst_addr/len
1527 * in the destination map. 1527 * in the destination map.
1528 * 1528 *
1529 * This routine is only advisory and need not do anything. 1529 * This routine is only advisory and need not do anything.
1530 */ 1530 */
1531void 1531void
1532pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vaddr_t dst_addr, vsize_t len, vaddr_t src_addr) 1532pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vaddr_t dst_addr, vsize_t len, vaddr_t src_addr)
1533{ 1533{
1534 1534
1535 DPRINTF(PDB_CREATE, ("pmap_copy(%p, %p, %p, %lx, %p)\n", 1535 DPRINTF(PDB_CREATE, ("pmap_copy(%p, %p, %p, %lx, %p)\n",
1536 dst_pmap, src_pmap, (void *)(u_long)dst_addr, 1536 dst_pmap, src_pmap, (void *)(u_long)dst_addr,
1537 (u_long)len, (void *)(u_long)src_addr)); 1537 (u_long)len, (void *)(u_long)src_addr));
1538} 1538}
1539 1539
1540/* 1540/*
1541 * Activate the address space for the specified process. If the 1541 * Activate the address space for the specified process. If the
1542 * process is the current process, load the new MMU context. 1542 * process is the current process, load the new MMU context.
1543 */ 1543 */
1544void 1544void
1545pmap_activate(struct lwp *l) 1545pmap_activate(struct lwp *l)
1546{ 1546{
1547 struct pmap *pmap = l->l_proc->p_vmspace->vm_map.pmap; 1547 struct pmap *pmap = l->l_proc->p_vmspace->vm_map.pmap;
1548 1548
1549 if (pmap == pmap_kernel()) { 1549 if (pmap == pmap_kernel()) {
1550 return; 1550 return;
1551 } 1551 }
1552 1552
1553 /* 1553 /*
1554 * This is essentially the same thing that happens in cpu_switchto() 1554 * This is essentially the same thing that happens in cpu_switchto()
1555 * when the newly selected process is about to run, except that we 1555 * when the newly selected process is about to run, except that we
1556 * have to make sure to clean the register windows before we set 1556 * have to make sure to clean the register windows before we set
1557 * the new context. 1557 * the new context.
1558 */ 1558 */
1559 1559
1560 if (l != curlwp) { 1560 if (l != curlwp) {
1561 return; 1561 return;
1562 } 1562 }
1563 write_user_windows(); 1563 write_user_windows();
1564 pmap_activate_pmap(pmap); 1564 pmap_activate_pmap(pmap);
1565} 1565}
1566 1566
1567void 1567void
1568pmap_activate_pmap(struct pmap *pmap) 1568pmap_activate_pmap(struct pmap *pmap)
1569{ 1569{
1570 1570
1571 if (pmap_ctx(pmap) == 0) { 1571 if (pmap_ctx(pmap) == 0) {
1572 (void) ctx_alloc(pmap); 1572 (void) ctx_alloc(pmap);
1573 } 1573 }
1574 DPRINTF(PDB_ACTIVATE, 1574 DPRINTF(PDB_ACTIVATE,
1575 ("%s: cpu%d activating ctx %d\n", __func__, 1575 ("%s: cpu%d activating ctx %d\n", __func__,
1576 cpu_number(), pmap_ctx(pmap))); 1576 cpu_number(), pmap_ctx(pmap)));
1577 dmmu_set_secondary_context(pmap_ctx(pmap)); 1577 dmmu_set_secondary_context(pmap_ctx(pmap));
1578} 1578}
1579 1579
1580/* 1580/*
1581 * Deactivate the address space of the specified process. 1581 * Deactivate the address space of the specified process.
1582 */ 1582 */
1583void 1583void
1584pmap_deactivate(struct lwp *l) 1584pmap_deactivate(struct lwp *l)
1585{ 1585{
1586 1586
1587 DPRINTF(PDB_ACTIVATE, 1587 DPRINTF(PDB_ACTIVATE,
1588 ("%s: cpu%d deactivating ctx %d\n", __func__, 1588 ("%s: cpu%d deactivating ctx %d\n", __func__,
1589 cpu_number(), pmap_ctx(l->l_proc->p_vmspace->vm_map.pmap))); 1589 cpu_number(), pmap_ctx(l->l_proc->p_vmspace->vm_map.pmap)));
1590} 1590}
1591 1591
1592/* 1592/*
1593 * pmap_kenter_pa: [ INTERFACE ] 1593 * pmap_kenter_pa: [ INTERFACE ]
1594 * 1594 *
1595 * Enter a va -> pa mapping into the kernel pmap without any 1595 * Enter a va -> pa mapping into the kernel pmap without any
1596 * physical->virtual tracking. 1596 * physical->virtual tracking.
1597 * 1597 *
1598 * Note: no locking is necessary in this function. 1598 * Note: no locking is necessary in this function.
1599 */ 1599 */
1600void 1600void
1601pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 1601pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1602{ 1602{
1603 pte_t tte; 1603 pte_t tte;
1604 paddr_t ptp; 1604 paddr_t ptp;
1605 struct pmap *pm = pmap_kernel(); 1605 struct pmap *pm = pmap_kernel();
1606 int i; 1606 int i;
1607 1607
1608 KASSERT(va < INTSTACK || va > EINTSTACK); 1608 KASSERT(va < INTSTACK || va > EINTSTACK);
1609 KASSERT(va < kdata || va > ekdata); 1609 KASSERT(va < kdata || va > ekdata);
1610 1610
1611 /* 1611 /*
1612 * Construct the TTE. 1612 * Construct the TTE.
1613 */ 1613 */
1614 1614
1615 ENTER_STAT(unmanaged); 1615 ENTER_STAT(unmanaged);
1616 if (pa & (PMAP_NVC|PMAP_NC)) { 1616 if (pa & (PMAP_NVC|PMAP_NC)) {
1617 ENTER_STAT(ci); 1617 ENTER_STAT(ci);
1618 } 1618 }
1619 1619
1620 tte.data = TSB_DATA(0, PGSZ_8K, pa, 1 /* Privileged */, 1620 tte.data = TSB_DATA(0, PGSZ_8K, pa, 1 /* Privileged */,
1621 (VM_PROT_WRITE & prot), 1621 (VM_PROT_WRITE & prot),
1622 !(pa & PMAP_NC), pa & (PMAP_NVC), 1, 0); 1622 !(pa & PMAP_NC), pa & (PMAP_NVC), 1, 0);
1623 /* We don't track mod/ref here. */ 1623 /* We don't track mod/ref here. */
1624 if (prot & VM_PROT_WRITE) 1624 if (prot & VM_PROT_WRITE)
1625 tte.data |= TLB_REAL_W|TLB_W; 1625 tte.data |= TLB_REAL_W|TLB_W;
1626 if (prot & VM_PROT_EXECUTE) 1626 if (prot & VM_PROT_EXECUTE)
1627 tte.data |= TLB_EXEC; 1627 tte.data |= TLB_EXEC;
1628 tte.data |= TLB_TSB_LOCK; /* wired */ 1628 tte.data |= TLB_TSB_LOCK; /* wired */
1629 ptp = 0; 1629 ptp = 0;
1630 1630
1631 retry: 1631 retry:
1632 i = pseg_set(pm, va, tte.data, ptp); 1632 i = pseg_set(pm, va, tte.data, ptp);
1633 if (i & 1) { 1633 if (i & 1) {
1634 KASSERT((i & 4) == 0); 1634 KASSERT((i & 4) == 0);
1635 ptp = 0; 1635 ptp = 0;
1636 if (!pmap_get_page(&ptp)) 1636 if (!pmap_get_page(&ptp))
1637 panic("pmap_kenter_pa: no pages"); 1637 panic("pmap_kenter_pa: no pages");
1638 ENTER_STAT(ptpneeded); 1638 ENTER_STAT(ptpneeded);
1639 goto retry; 1639 goto retry;
1640 } 1640 }
1641 if (ptp && i == 0) { 1641 if (ptp && i == 0) {
1642 /* We allocated a spare page but didn't use it. Free it. */ 1642 /* We allocated a spare page but didn't use it. Free it. */
1643 printf("pmap_kenter_pa: freeing unused page %llx\n", 1643 printf("pmap_kenter_pa: freeing unused page %llx\n",
1644 (long long)ptp); 1644 (long long)ptp);
1645 pmap_free_page_noflush(ptp); 1645 pmap_free_page_noflush(ptp);
1646 } 1646 }
1647#ifdef PMAP_DEBUG 1647#ifdef PMAP_DEBUG
1648 i = ptelookup_va(va); 1648 i = ptelookup_va(va);
1649 if (pmapdebug & PDB_ENTER) 1649 if (pmapdebug & PDB_ENTER)
1650 prom_printf("pmap_kenter_pa: va=%08x data=%08x:%08x " 1650 prom_printf("pmap_kenter_pa: va=%08x data=%08x:%08x "
1651 "tsb_dmmu[%d]=%08x\n", va, (int)(tte.data>>32), 1651 "tsb_dmmu[%d]=%08x\n", va, (int)(tte.data>>32),
1652 (int)tte.data, i, &curcpu()->ci_tsb_dmmu[i]); 1652 (int)tte.data, i, &curcpu()->ci_tsb_dmmu[i]);
1653 if (pmapdebug & PDB_MMU_STEAL && curcpu()->ci_tsb_dmmu[i].data) { 1653 if (pmapdebug & PDB_MMU_STEAL && curcpu()->ci_tsb_dmmu[i].data) {
1654 prom_printf("pmap_kenter_pa: evicting entry tag=%x:%08x " 1654 prom_printf("pmap_kenter_pa: evicting entry tag=%x:%08x "
1655 "data=%08x:%08x tsb_dmmu[%d]=%08x\n", 1655 "data=%08x:%08x tsb_dmmu[%d]=%08x\n",
1656 (int)(curcpu()->ci_tsb_dmmu[i].tag>>32), (int)curcpu()->ci_tsb_dmmu[i].tag, 1656 (int)(curcpu()->ci_tsb_dmmu[i].tag>>32), (int)curcpu()->ci_tsb_dmmu[i].tag,
1657 (int)(curcpu()->ci_tsb_dmmu[i].data>>32), (int)curcpu()->ci_tsb_dmmu[i].data, 1657 (int)(curcpu()->ci_tsb_dmmu[i].data>>32), (int)curcpu()->ci_tsb_dmmu[i].data,
1658 i, &curcpu()->ci_tsb_dmmu[i]); 1658 i, &curcpu()->ci_tsb_dmmu[i]);
1659 prom_printf("with va=%08x data=%08x:%08x tsb_dmmu[%d]=%08x\n", 1659 prom_printf("with va=%08x data=%08x:%08x tsb_dmmu[%d]=%08x\n",
1660 va, (int)(tte.data>>32), (int)tte.data, i, 1660 va, (int)(tte.data>>32), (int)tte.data, i,
1661 &curcpu()->ci_tsb_dmmu[i]); 1661 &curcpu()->ci_tsb_dmmu[i]);
1662 } 1662 }
1663#endif 1663#endif
1664} 1664}
1665 1665
1666/* 1666/*
1667 * pmap_kremove: [ INTERFACE ] 1667 * pmap_kremove: [ INTERFACE ]
1668 * 1668 *
1669 * Remove a mapping entered with pmap_kenter_pa() starting at va, 1669 * Remove a mapping entered with pmap_kenter_pa() starting at va,
1670 * for size bytes (assumed to be page rounded). 1670 * for size bytes (assumed to be page rounded).
1671 */ 1671 */
1672void 1672void
1673pmap_kremove(vaddr_t va, vsize_t size) 1673pmap_kremove(vaddr_t va, vsize_t size)
1674{ 1674{
1675 struct pmap *pm = pmap_kernel(); 1675 struct pmap *pm = pmap_kernel();
1676 int64_t data; 1676 int64_t data;
1677 paddr_t pa; 1677 paddr_t pa;
1678 int rv; 1678 int rv;
1679 bool flush = FALSE; 1679 bool flush = FALSE;
1680 1680
1681 KASSERT(va < INTSTACK || va > EINTSTACK); 1681 KASSERT(va < INTSTACK || va > EINTSTACK);
1682 KASSERT(va < kdata || va > ekdata); 1682 KASSERT(va < kdata || va > ekdata);
1683 1683
1684 DPRINTF(PDB_DEMAP, ("pmap_kremove: start 0x%lx size %lx\n", va, size)); 1684 DPRINTF(PDB_DEMAP, ("pmap_kremove: start 0x%lx size %lx\n", va, size));
1685 for (; size >= PAGE_SIZE; va += PAGE_SIZE, size -= PAGE_SIZE) { 1685 for (; size >= PAGE_SIZE; va += PAGE_SIZE, size -= PAGE_SIZE) {
1686 1686
1687#ifdef DIAGNOSTIC 1687#ifdef DIAGNOSTIC
1688 /* 1688 /*
1689 * Is this part of the permanent 4MB mapping? 1689 * Is this part of the permanent 4MB mapping?
1690 */ 1690 */
1691 if (va >= ktext && va < roundup(ekdata, 4*MEG)) 1691 if (va >= ktext && va < roundup(ekdata, 4*MEG))
1692 panic("pmap_kremove: va=%08x in locked TLB", (u_int)va); 1692 panic("pmap_kremove: va=%08x in locked TLB", (u_int)va);
1693#endif 1693#endif
1694 1694
1695 data = pseg_get(pm, va); 1695 data = pseg_get(pm, va);
1696 if ((data & TLB_V) == 0) { 1696 if ((data & TLB_V) == 0) {
1697 continue; 1697 continue;
1698 } 1698 }
1699 1699
1700 flush = TRUE; 1700 flush = TRUE;
1701 pa = data & TLB_PA_MASK; 1701 pa = data & TLB_PA_MASK;
1702 1702
1703 /* 1703 /*
1704 * We need to flip the valid bit and 1704 * We need to flip the valid bit and
1705 * clear the access statistics. 1705 * clear the access statistics.
1706 */ 1706 */
1707 1707
1708 rv = pseg_set(pm, va, 0, 0); 1708 rv = pseg_set(pm, va, 0, 0);
1709 if (rv & 1) 1709 if (rv & 1)
1710 panic("pmap_kremove: pseg_set needs spare, rv=%d\n", 1710 panic("pmap_kremove: pseg_set needs spare, rv=%d\n",
1711 rv); 1711 rv);
1712 DPRINTF(PDB_DEMAP, ("pmap_kremove: seg %x pdir %x pte %x\n", 1712 DPRINTF(PDB_DEMAP, ("pmap_kremove: seg %x pdir %x pte %x\n",
1713 (int)va_to_seg(va), (int)va_to_dir(va), 1713 (int)va_to_seg(va), (int)va_to_dir(va),
1714 (int)va_to_pte(va))); 1714 (int)va_to_pte(va)));
1715 REMOVE_STAT(removes); 1715 REMOVE_STAT(removes);
1716 1716
1717 tsb_invalidate(va, pm); 1717 tsb_invalidate(va, pm);
1718 REMOVE_STAT(tflushes); 1718 REMOVE_STAT(tflushes);
1719 1719
1720 /* 1720 /*
1721 * Here we assume nothing can get into the TLB 1721 * Here we assume nothing can get into the TLB
1722 * unless it has a PTE. 1722 * unless it has a PTE.
1723 */ 1723 */
1724 1724
1725 tlb_flush_pte(va, pm); 1725 tlb_flush_pte(va, pm);
1726 dcache_flush_page_all(pa); 1726 dcache_flush_page_all(pa);
1727 } 1727 }
1728 if (flush) 1728 if (flush)
1729 REMOVE_STAT(flushes); 1729 REMOVE_STAT(flushes);
1730} 1730}
1731 1731
1732/* 1732/*
1733 * Insert physical page at pa into the given pmap at virtual address va. 1733 * Insert physical page at pa into the given pmap at virtual address va.
1734 * Supports 64-bit pa so we can map I/O space. 1734 * Supports 64-bit pa so we can map I/O space.
1735 */ 1735 */
1736 1736
1737int 1737int
1738pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 1738pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1739{ 1739{
1740 pte_t tte; 1740 pte_t tte;
1741 int64_t data; 1741 int64_t data;
1742 paddr_t opa = 0, ptp; /* XXX: gcc */ 1742 paddr_t opa = 0, ptp; /* XXX: gcc */
1743 pv_entry_t pvh, npv = NULL, freepv; 1743 pv_entry_t pvh, npv = NULL, freepv;
1744 struct vm_page *pg, *opg, *ptpg; 1744 struct vm_page *pg, *opg, *ptpg;
1745 int s, i, uncached = 0, error = 0; 1745 int s, i, uncached = 0, error = 0;
1746 int size = PGSZ_8K; /* PMAP_SZ_TO_TTE(pa); */ 1746 int size = PGSZ_8K; /* PMAP_SZ_TO_TTE(pa); */
1747 bool wired = (flags & PMAP_WIRED) != 0; 1747 bool wired = (flags & PMAP_WIRED) != 0;
1748 bool wasmapped = FALSE; 1748 bool wasmapped = FALSE;
1749 bool dopv = TRUE; 1749 bool dopv = TRUE;
1750 1750
1751 /* 1751 /*
1752 * Is this part of the permanent mappings? 1752 * Is this part of the permanent mappings?
1753 */ 1753 */
1754 KASSERT(pm != pmap_kernel() || va < INTSTACK || va > EINTSTACK); 1754 KASSERT(pm != pmap_kernel() || va < INTSTACK || va > EINTSTACK);
1755 KASSERT(pm != pmap_kernel() || va < kdata || va > ekdata); 1755 KASSERT(pm != pmap_kernel() || va < kdata || va > ekdata);
1756 1756
1757 /* Grab a spare PV. */ 1757 /* Grab a spare PV. */
1758 freepv = pool_cache_get(&pmap_pv_cache, PR_NOWAIT); 1758 freepv = pool_cache_get(&pmap_pv_cache, PR_NOWAIT);
1759 if (__predict_false(freepv == NULL)) { 1759 if (__predict_false(freepv == NULL)) {
1760 if (flags & PMAP_CANFAIL) 1760 if (flags & PMAP_CANFAIL)
1761 return (ENOMEM); 1761 return (ENOMEM);
1762 panic("pmap_enter: no pv entries available"); 1762 panic("pmap_enter: no pv entries available");
1763 } 1763 }
1764 freepv->pv_next = NULL; 1764 freepv->pv_next = NULL;
1765 1765
1766 /* 1766 /*
1767 * If a mapping at this address already exists, check if we're 1767 * If a mapping at this address already exists, check if we're
1768 * entering the same PA again. if it's different remove it. 1768 * entering the same PA again. if it's different remove it.
1769 */ 1769 */
1770 1770
1771 mutex_enter(&pmap_lock); 1771 mutex_enter(&pmap_lock);
1772 data = pseg_get(pm, va); 1772 data = pseg_get(pm, va);
1773 if (data & TLB_V) { 1773 if (data & TLB_V) {
1774 wasmapped = TRUE; 1774 wasmapped = TRUE;
1775 opa = data & TLB_PA_MASK; 1775 opa = data & TLB_PA_MASK;
1776 if (opa != pa) { 1776 if (opa != pa) {
1777 opg = PHYS_TO_VM_PAGE(opa); 1777 opg = PHYS_TO_VM_PAGE(opa);
1778 if (opg != NULL) { 1778 if (opg != NULL) {
1779 npv = pmap_remove_pv(pm, va, opg); 1779 npv = pmap_remove_pv(pm, va, opg);
1780 } 1780 }
1781 } 1781 }
1782 } 1782 }
1783 1783
1784 /* 1784 /*
1785 * Construct the TTE. 1785 * Construct the TTE.
1786 */ 1786 */
1787 pg = PHYS_TO_VM_PAGE(pa); 1787 pg = PHYS_TO_VM_PAGE(pa);
1788 if (pg) { 1788 if (pg) {
1789 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 1789 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1790 1790
1791 pvh = &md->mdpg_pvh; 1791 pvh = &md->mdpg_pvh;
1792 uncached = (pvh->pv_va & (PV_ALIAS|PV_NVC)); 1792 uncached = (pvh->pv_va & (PV_ALIAS|PV_NVC));
1793#ifdef DIAGNOSTIC 1793#ifdef DIAGNOSTIC
1794 if ((flags & VM_PROT_ALL) & ~prot) 1794 if ((flags & VM_PROT_ALL) & ~prot)
1795 panic("pmap_enter: access_type exceeds prot"); 1795 panic("pmap_enter: access_type exceeds prot");
1796#endif 1796#endif
1797 /* 1797 /*
1798 * If we don't have the traphandler do it, 1798 * If we don't have the traphandler do it,
1799 * set the ref/mod bits now. 1799 * set the ref/mod bits now.
1800 */ 1800 */
1801 if (flags & VM_PROT_ALL) 1801 if (flags & VM_PROT_ALL)
1802 pvh->pv_va |= PV_REF; 1802 pvh->pv_va |= PV_REF;
1803 if (flags & VM_PROT_WRITE) 1803 if (flags & VM_PROT_WRITE)
1804 pvh->pv_va |= PV_MOD; 1804 pvh->pv_va |= PV_MOD;
1805 1805
1806 /* 1806 /*
1807 * make sure we have a pv entry ready if we need one. 1807 * make sure we have a pv entry ready if we need one.
1808 */ 1808 */
1809 if (pvh->pv_pmap == NULL || (wasmapped && opa == pa)) { 1809 if (pvh->pv_pmap == NULL || (wasmapped && opa == pa)) {
1810 if (npv != NULL) { 1810 if (npv != NULL) {
1811 /* free it */ 1811 /* free it */
1812 npv->pv_next = freepv; 1812 npv->pv_next = freepv;
1813 freepv = npv; 1813 freepv = npv;
1814 npv = NULL; 1814 npv = NULL;
1815 } 1815 }
1816 if (wasmapped && opa == pa) { 1816 if (wasmapped && opa == pa) {
1817 dopv = FALSE; 1817 dopv = FALSE;
1818 } 1818 }
1819 } else if (npv == NULL) { 1819 } else if (npv == NULL) {
1820 /* use the pre-allocated pv */ 1820 /* use the pre-allocated pv */
1821 npv = freepv; 1821 npv = freepv;
1822 freepv = freepv->pv_next; 1822 freepv = freepv->pv_next;
1823 } 1823 }
1824 ENTER_STAT(managed); 1824 ENTER_STAT(managed);
1825 } else { 1825 } else {
1826 ENTER_STAT(unmanaged); 1826 ENTER_STAT(unmanaged);
1827 dopv = FALSE; 1827 dopv = FALSE;
1828 if (npv != NULL) { 1828 if (npv != NULL) {
1829 /* free it */ 1829 /* free it */
1830 npv->pv_next = freepv; 1830 npv->pv_next = freepv;
1831 freepv = npv; 1831 freepv = npv;
1832 npv = NULL; 1832 npv = NULL;
1833 } 1833 }
1834 } 1834 }
1835 1835
1836#ifndef NO_VCACHE 1836#ifndef NO_VCACHE
1837 if (pa & PMAP_NVC) 1837 if (pa & PMAP_NVC)
1838#endif 1838#endif
1839 uncached = 1; 1839 uncached = 1;
1840 if (uncached) { 1840 if (uncached) {
1841 ENTER_STAT(ci); 1841 ENTER_STAT(ci);
1842 } 1842 }
1843 tte.data = TSB_DATA(0, size, pa, pm == pmap_kernel(), 1843 tte.data = TSB_DATA(0, size, pa, pm == pmap_kernel(),
1844 flags & VM_PROT_WRITE, !(pa & PMAP_NC), 1844 flags & VM_PROT_WRITE, !(pa & PMAP_NC),
1845 uncached, 1, pa & PMAP_LITTLE); 1845 uncached, 1, pa & PMAP_LITTLE);
1846#ifdef HWREF 1846#ifdef HWREF
1847 if (prot & VM_PROT_WRITE) 1847 if (prot & VM_PROT_WRITE)
1848 tte.data |= TLB_REAL_W; 1848 tte.data |= TLB_REAL_W;
1849 if (prot & VM_PROT_EXECUTE) 1849 if (prot & VM_PROT_EXECUTE)
1850 tte.data |= TLB_EXEC; 1850 tte.data |= TLB_EXEC;
1851#else 1851#else
1852 /* If it needs ref accounting do nothing. */ 1852 /* If it needs ref accounting do nothing. */
1853 if (!(flags & VM_PROT_READ)) { 1853 if (!(flags & VM_PROT_READ)) {
1854 mutex_exit(&pmap_lock); 1854 mutex_exit(&pmap_lock);
1855 goto out; 1855 goto out;
1856 } 1856 }
1857#endif 1857#endif
1858 if (flags & VM_PROT_EXECUTE) { 1858 if (flags & VM_PROT_EXECUTE) {
1859 if ((flags & (VM_PROT_READ|VM_PROT_WRITE)) == 0) 1859 if ((flags & (VM_PROT_READ|VM_PROT_WRITE)) == 0)
1860 tte.data |= TLB_EXEC_ONLY|TLB_EXEC; 1860 tte.data |= TLB_EXEC_ONLY|TLB_EXEC;
1861 else 1861 else
1862 tte.data |= TLB_EXEC; 1862 tte.data |= TLB_EXEC;
1863 } 1863 }
1864 if (wired) 1864 if (wired)
1865 tte.data |= TLB_TSB_LOCK; 1865 tte.data |= TLB_TSB_LOCK;
1866 ptp = 0; 1866 ptp = 0;
1867 1867
1868 retry: 1868 retry:
1869 i = pseg_set(pm, va, tte.data, ptp); 1869 i = pseg_set(pm, va, tte.data, ptp);
1870 if (i == -2) { 1870 if (i == -2) {
1871 if (flags & PMAP_CANFAIL) 1871 if (flags & PMAP_CANFAIL)
1872 return (ENOMEM); 1872 return (ENOMEM);
1873 panic("pmap_enter: invalid VA (inside hole)"); 1873 panic("pmap_enter: invalid VA (inside hole)");
1874 } 1874 }
1875 if (i & 4) { 1875 if (i & 4) {
1876 /* ptp used as L3 */ 1876 /* ptp used as L3 */
1877 KASSERT(ptp != 0); 1877 KASSERT(ptp != 0);
1878 KASSERT((i & 3) == 0); 1878 KASSERT((i & 3) == 0);
1879 ptpg = PHYS_TO_VM_PAGE(ptp); 1879 ptpg = PHYS_TO_VM_PAGE(ptp);
1880 if (ptpg) { 1880 if (ptpg) {
1881 ptpg->offset = (uint64_t)va & (0xfffffLL << 23); 1881 ptpg->offset = (uint64_t)va & (0xfffffLL << 23);
1882 TAILQ_INSERT_TAIL(&pm->pm_obj.memq, ptpg, listq.queue); 1882 TAILQ_INSERT_TAIL(&pm->pm_obj.memq, ptpg, listq.queue);
1883 } else { 1883 } else {
1884 KASSERT(pm == pmap_kernel()); 1884 KASSERT(pm == pmap_kernel());
1885 } 1885 }
1886 } 1886 }
1887 if (i & 2) { 1887 if (i & 2) {
1888 /* ptp used as L2 */ 1888 /* ptp used as L2 */
1889 KASSERT(ptp != 0); 1889 KASSERT(ptp != 0);
1890 KASSERT((i & 4) == 0); 1890 KASSERT((i & 4) == 0);
1891 ptpg = PHYS_TO_VM_PAGE(ptp); 1891 ptpg = PHYS_TO_VM_PAGE(ptp);
1892 if (ptpg) { 1892 if (ptpg) {
1893 ptpg->offset = (((uint64_t)va >> 43) & 0x3ffLL) << 13; 1893 ptpg->offset = (((uint64_t)va >> 43) & 0x3ffLL) << 13;
1894 TAILQ_INSERT_TAIL(&pm->pm_obj.memq, ptpg, listq.queue); 1894 TAILQ_INSERT_TAIL(&pm->pm_obj.memq, ptpg, listq.queue);
1895 } else { 1895 } else {
1896 KASSERT(pm == pmap_kernel()); 1896 KASSERT(pm == pmap_kernel());
1897 } 1897 }
1898 } 1898 }
1899 if (i & 1) { 1899 if (i & 1) {
1900 KASSERT((i & 4) == 0); 1900 KASSERT((i & 4) == 0);
1901 ptp = 0; 1901 ptp = 0;
1902 if (!pmap_get_page(&ptp)) { 1902 if (!pmap_get_page(&ptp)) {
1903 mutex_exit(&pmap_lock); 1903 mutex_exit(&pmap_lock);
1904 if (flags & PMAP_CANFAIL) { 1904 if (flags & PMAP_CANFAIL) {
1905 if (npv != NULL) { 1905 if (npv != NULL) {
1906 /* free it */ 1906 /* free it */
1907 npv->pv_next = freepv; 1907 npv->pv_next = freepv;
1908 freepv = npv; 1908 freepv = npv;
1909 } 1909 }
1910 error = ENOMEM; 1910 error = ENOMEM;
1911 goto out; 1911 goto out;
1912 } else { 1912 } else {
1913 panic("pmap_enter: no pages"); 1913 panic("pmap_enter: no pages");
1914 } 1914 }
1915 } 1915 }
1916 ENTER_STAT(ptpneeded); 1916 ENTER_STAT(ptpneeded);
1917 goto retry; 1917 goto retry;
1918 } 1918 }
1919 if (ptp && i == 0) { 1919 if (ptp && i == 0) {
1920 /* We allocated a spare page but didn't use it. Free it. */ 1920 /* We allocated a spare page but didn't use it. Free it. */
1921 printf("pmap_enter: freeing unused page %llx\n", 1921 printf("pmap_enter: freeing unused page %llx\n",
1922 (long long)ptp); 1922 (long long)ptp);
1923 pmap_free_page_noflush(ptp); 1923 pmap_free_page_noflush(ptp);
1924 } 1924 }
1925 if (dopv) { 1925 if (dopv) {
1926 pmap_enter_pv(pm, va, pa, pg, npv); 1926 pmap_enter_pv(pm, va, pa, pg, npv);
1927 } 1927 }
1928 1928
1929 mutex_exit(&pmap_lock); 1929 mutex_exit(&pmap_lock);
1930#ifdef PMAP_DEBUG 1930#ifdef PMAP_DEBUG
1931 i = ptelookup_va(va); 1931 i = ptelookup_va(va);
1932 if (pmapdebug & PDB_ENTER) 1932 if (pmapdebug & PDB_ENTER)
1933 prom_printf("pmap_enter: va=%08x data=%08x:%08x " 1933 prom_printf("pmap_enter: va=%08x data=%08x:%08x "
1934 "tsb_dmmu[%d]=%08x\n", va, (int)(tte.data>>32), 1934 "tsb_dmmu[%d]=%08x\n", va, (int)(tte.data>>32),
1935 (int)tte.data, i, &curcpu()->ci_tsb_dmmu[i]); 1935 (int)tte.data, i, &curcpu()->ci_tsb_dmmu[i]);
1936 if (pmapdebug & PDB_MMU_STEAL && curcpu()->ci_tsb_dmmu[i].data) { 1936 if (pmapdebug & PDB_MMU_STEAL && curcpu()->ci_tsb_dmmu[i].data) {
1937 prom_printf("pmap_enter: evicting entry tag=%x:%08x " 1937 prom_printf("pmap_enter: evicting entry tag=%x:%08x "
1938 "data=%08x:%08x tsb_dmmu[%d]=%08x\n", 1938 "data=%08x:%08x tsb_dmmu[%d]=%08x\n",
1939 (int)(curcpu()->ci_tsb_dmmu[i].tag>>32), (int)curcpu()->ci_tsb_dmmu[i].tag, 1939 (int)(curcpu()->ci_tsb_dmmu[i].tag>>32), (int)curcpu()->ci_tsb_dmmu[i].tag,
1940 (int)(curcpu()->ci_tsb_dmmu[i].data>>32), (int)curcpu()->ci_tsb_dmmu[i].data, i, 1940 (int)(curcpu()->ci_tsb_dmmu[i].data>>32), (int)curcpu()->ci_tsb_dmmu[i].data, i,
1941 &curcpu()->ci_tsb_dmmu[i]); 1941 &curcpu()->ci_tsb_dmmu[i]);
1942 prom_printf("with va=%08x data=%08x:%08x tsb_dmmu[%d]=%08x\n", 1942 prom_printf("with va=%08x data=%08x:%08x tsb_dmmu[%d]=%08x\n",
1943 va, (int)(tte.data>>32), (int)tte.data, i, 1943 va, (int)(tte.data>>32), (int)tte.data, i,
1944 &curcpu()->ci_tsb_dmmu[i]); 1944 &curcpu()->ci_tsb_dmmu[i]);
1945 } 1945 }
1946#endif 1946#endif
1947 1947
1948 if (flags & (VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE)) { 1948 if (flags & (VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE)) {
1949 1949
1950 /* 1950 /*
1951 * preload the TSB with the new entry, 1951 * preload the TSB with the new entry,
1952 * since we're going to need it immediately anyway. 1952 * since we're going to need it immediately anyway.
1953 */ 1953 */
1954 1954
1955 KASSERT(pmap_ctx(pm)>=0); 1955 KASSERT(pmap_ctx(pm)>=0);
1956 i = ptelookup_va(va); 1956 i = ptelookup_va(va);
1957 tte.tag = TSB_TAG(0, pmap_ctx(pm), va); 1957 tte.tag = TSB_TAG(0, pmap_ctx(pm), va);
1958 s = splhigh(); 1958 s = splhigh();
1959 if (wasmapped && pmap_is_on_mmu(pm)) { 1959 if (wasmapped && pmap_is_on_mmu(pm)) {
1960 tsb_invalidate(va, pm); 1960 tsb_invalidate(va, pm);
1961 } 1961 }
1962 if (flags & (VM_PROT_READ | VM_PROT_WRITE)) { 1962 if (flags & (VM_PROT_READ | VM_PROT_WRITE)) {
1963 curcpu()->ci_tsb_dmmu[i].tag = tte.tag; 1963 curcpu()->ci_tsb_dmmu[i].tag = tte.tag;
1964 __asm volatile("" : : : "memory"); 1964 __asm volatile("" : : : "memory");
1965 curcpu()->ci_tsb_dmmu[i].data = tte.data; 1965 curcpu()->ci_tsb_dmmu[i].data = tte.data;
1966 } 1966 }
1967 if (flags & VM_PROT_EXECUTE) { 1967 if (flags & VM_PROT_EXECUTE) {
1968 curcpu()->ci_tsb_immu[i].tag = tte.tag; 1968 curcpu()->ci_tsb_immu[i].tag = tte.tag;
1969 __asm volatile("" : : : "memory"); 1969 __asm volatile("" : : : "memory");
1970 curcpu()->ci_tsb_immu[i].data = tte.data; 1970 curcpu()->ci_tsb_immu[i].data = tte.data;
1971 } 1971 }
1972 1972
1973 /* 1973 /*
1974 * it's only necessary to flush the TLB if this page was 1974 * it's only necessary to flush the TLB if this page was
1975 * previously mapped, but for some reason it's a lot faster 1975 * previously mapped, but for some reason it's a lot faster
1976 * for the fork+exit microbenchmark if we always do it. 1976 * for the fork+exit microbenchmark if we always do it.
1977 */ 1977 */
1978 1978
1979 KASSERT(pmap_ctx(pm)>=0); 1979 KASSERT(pmap_ctx(pm)>=0);
1980#ifdef MULTIPROCESSOR 1980#ifdef MULTIPROCESSOR
1981 if (wasmapped && pmap_is_on_mmu(pm)) 1981 if (wasmapped && pmap_is_on_mmu(pm))
1982 tlb_flush_pte(va, pm); 1982 tlb_flush_pte(va, pm);
1983 else 1983 else
1984 sp_tlb_flush_pte(va, pmap_ctx(pm)); 1984 sp_tlb_flush_pte(va, pmap_ctx(pm));
1985#else 1985#else
1986 tlb_flush_pte(va, pm); 1986 tlb_flush_pte(va, pm);
1987#endif 1987#endif
1988 splx(s); 1988 splx(s);
1989 } else if (wasmapped && pmap_is_on_mmu(pm)) { 1989 } else if (wasmapped && pmap_is_on_mmu(pm)) {
1990 /* Force reload -- protections may be changed */ 1990 /* Force reload -- protections may be changed */
1991 KASSERT(pmap_ctx(pm)>=0); 1991 KASSERT(pmap_ctx(pm)>=0);
1992 tsb_invalidate(va, pm); 1992 tsb_invalidate(va, pm);
1993 tlb_flush_pte(va, pm); 1993 tlb_flush_pte(va, pm);
1994 } 1994 }
1995 1995
1996 /* We will let the fast mmu miss interrupt load the new translation */ 1996 /* We will let the fast mmu miss interrupt load the new translation */
1997 pv_check(); 1997 pv_check();
1998 out: 1998 out:
1999 /* Catch up on deferred frees. */ 1999 /* Catch up on deferred frees. */
2000 for (; freepv != NULL; freepv = npv) { 2000 for (; freepv != NULL; freepv = npv) {
2001 npv = freepv->pv_next; 2001 npv = freepv->pv_next;
2002 pool_cache_put(&pmap_pv_cache, freepv); 2002 pool_cache_put(&pmap_pv_cache, freepv);
2003 } 2003 }
2004 return error; 2004 return error;
2005} 2005}
2006 2006
2007void 2007void
2008pmap_remove_all(struct pmap *pm) 2008pmap_remove_all(struct pmap *pm)
2009{ 2009{
2010#ifdef MULTIPROCESSOR 2010#ifdef MULTIPROCESSOR
2011 struct cpu_info *ci; 2011 struct cpu_info *ci;
2012 sparc64_cpuset_t pmap_cpus_active; 2012 sparc64_cpuset_t pmap_cpus_active;
2013#endif 2013#endif
2014 2014
2015 if (pm == pmap_kernel()) { 2015 if (pm == pmap_kernel()) {
2016 return; 2016 return;
2017 } 2017 }
2018 write_user_windows(); 2018 write_user_windows();
2019 pm->pm_refs = 0; 2019 pm->pm_refs = 0;
2020 2020
2021 /* 2021 /*
2022 * XXXMRG: pmap_destroy() does exactly the same dance here. 2022 * XXXMRG: pmap_destroy() does exactly the same dance here.
2023 * surely one of them isn't necessary? 2023 * surely one of them isn't necessary?
2024 */ 2024 */
2025#ifdef MULTIPROCESSOR 2025#ifdef MULTIPROCESSOR
2026 CPUSET_CLEAR(pmap_cpus_active); 2026 CPUSET_CLEAR(pmap_cpus_active);
2027 for (ci = cpus; ci != NULL; ci = ci->ci_next) { 2027 for (ci = cpus; ci != NULL; ci = ci->ci_next) {
2028 /* XXXMRG: Move the lock inside one or both tests? */ 2028 /* XXXMRG: Move the lock inside one or both tests? */
2029 mutex_enter(&ci->ci_ctx_lock); 2029 mutex_enter(&ci->ci_ctx_lock);
2030 if (CPUSET_HAS(cpus_active, ci->ci_index)) { 2030 if (CPUSET_HAS(cpus_active, ci->ci_index)) {
2031 if (pm->pm_ctx[ci->ci_index] > 0) { 2031 if (pm->pm_ctx[ci->ci_index] > 0) {
2032 CPUSET_ADD(pmap_cpus_active, ci->ci_index); 2032 CPUSET_ADD(pmap_cpus_active, ci->ci_index);
2033 ctx_free(pm, ci); 2033 ctx_free(pm, ci);
2034 } 2034 }
2035 } 2035 }
2036 mutex_exit(&ci->ci_ctx_lock); 2036 mutex_exit(&ci->ci_ctx_lock);
2037 } 2037 }
2038#else 2038#else
2039 if (pmap_ctx(pm)) { 2039 if (pmap_ctx(pm)) {
2040 mutex_enter(&curcpu()->ci_ctx_lock); 2040 mutex_enter(&curcpu()->ci_ctx_lock);
2041 ctx_free(pm, curcpu()); 2041 ctx_free(pm, curcpu());
2042 mutex_exit(&curcpu()->ci_ctx_lock); 2042 mutex_exit(&curcpu()->ci_ctx_lock);
2043 } 2043 }
2044#endif 2044#endif
2045 2045
2046 REMOVE_STAT(flushes); 2046 REMOVE_STAT(flushes);
2047 /* 2047 /*
2048 * XXXMRG: couldn't we do something less severe here, and 2048 * XXXMRG: couldn't we do something less severe here, and
2049 * only flush the right context on each CPU? 2049 * only flush the right context on each CPU?
2050 */ 2050 */
2051 blast_dcache(); 2051 blast_dcache();
2052} 2052}
2053 2053
2054/* 2054/*
2055 * Remove the given range of mapping entries. 2055 * Remove the given range of mapping entries.
2056 */ 2056 */
2057void 2057void
2058pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva) 2058pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva)
2059{ 2059{
2060 int64_t data; 2060 int64_t data;
2061 paddr_t pa; 2061 paddr_t pa;
2062 struct vm_page *pg; 2062 struct vm_page *pg;
2063 pv_entry_t pv, freepv = NULL; 2063 pv_entry_t pv, freepv = NULL;
2064 int rv; 2064 int rv;
2065 bool flush = FALSE; 2065 bool flush = FALSE;
2066 2066
2067 /* 2067 /*
2068 * In here we should check each pseg and if there are no more entries, 2068 * In here we should check each pseg and if there are no more entries,
2069 * free it. It's just that linear scans of 8K pages gets expensive. 2069 * free it. It's just that linear scans of 8K pages gets expensive.
2070 */ 2070 */
2071 2071
2072 KASSERT(pm != pmap_kernel() || endva < INTSTACK || va > EINTSTACK); 2072 KASSERT(pm != pmap_kernel() || endva < INTSTACK || va > EINTSTACK);
2073 KASSERT(pm != pmap_kernel() || endva < kdata || va > ekdata); 2073 KASSERT(pm != pmap_kernel() || endva < kdata || va > ekdata);
2074 2074
2075 mutex_enter(&pmap_lock); 2075 mutex_enter(&pmap_lock);
2076 DPRINTF(PDB_REMOVE, ("pmap_remove(pm=%p, va=%p, endva=%p):", pm, 2076 DPRINTF(PDB_REMOVE, ("pmap_remove(pm=%p, va=%p, endva=%p):", pm,
2077 (void *)(u_long)va, (void *)(u_long)endva)); 2077 (void *)(u_long)va, (void *)(u_long)endva));
2078 REMOVE_STAT(calls); 2078 REMOVE_STAT(calls);
2079 2079
2080 /* Now do the real work */ 2080 /* Now do the real work */
2081 for (; va < endva; va += PAGE_SIZE) { 2081 for (; va < endva; va += PAGE_SIZE) {
2082#ifdef DIAGNOSTIC 2082#ifdef DIAGNOSTIC
2083 /* 2083 /*
2084 * Is this part of the permanent 4MB mapping? 2084 * Is this part of the permanent 4MB mapping?
2085 */ 2085 */
2086 if (pm == pmap_kernel() && va >= ktext && 2086 if (pm == pmap_kernel() && va >= ktext &&
2087 va < roundup(ekdata, 4*MEG)) 2087 va < roundup(ekdata, 4*MEG))
2088 panic("pmap_remove: va=%08llx in locked TLB", 2088 panic("pmap_remove: va=%08llx in locked TLB",
2089 (long long)va); 2089 (long long)va);
2090#endif 2090#endif
2091 2091
2092 data = pseg_get(pm, va); 2092 data = pseg_get(pm, va);
2093 if ((data & TLB_V) == 0) { 2093 if ((data & TLB_V) == 0) {
2094 continue; 2094 continue;
2095 } 2095 }
2096 2096
2097 flush = TRUE; 2097 flush = TRUE;
2098 /* First remove the pv entry, if there is one */ 2098 /* First remove the pv entry, if there is one */
2099 pa = data & TLB_PA_MASK; 2099 pa = data & TLB_PA_MASK;
2100 pg = PHYS_TO_VM_PAGE(pa); 2100 pg = PHYS_TO_VM_PAGE(pa);
2101 if (pg) { 2101 if (pg) {
2102 pv = pmap_remove_pv(pm, va, pg); 2102 pv = pmap_remove_pv(pm, va, pg);
2103 if (pv != NULL) { 2103 if (pv != NULL) {
2104 /* free it */ 2104 /* free it */
2105 pv->pv_next = freepv; 2105 pv->pv_next = freepv;
2106 freepv = pv; 2106 freepv = pv;
2107 } 2107 }
2108 } 2108 }
2109 2109
2110 /* 2110 /*
2111 * We need to flip the valid bit and 2111 * We need to flip the valid bit and
2112 * clear the access statistics. 2112 * clear the access statistics.
2113 */ 2113 */
2114 2114
2115 rv = pseg_set(pm, va, 0, 0); 2115 rv = pseg_set(pm, va, 0, 0);
2116 if (rv & 1) 2116 if (rv & 1)
2117 panic("pmap_remove: pseg_set needed spare, rv=%d!\n", 2117 panic("pmap_remove: pseg_set needed spare, rv=%d!\n",
2118 rv); 2118 rv);
2119 2119
2120 DPRINTF(PDB_REMOVE, (" clearing seg %x pte %x\n", 2120 DPRINTF(PDB_REMOVE, (" clearing seg %x pte %x\n",
2121 (int)va_to_seg(va), (int)va_to_pte(va))); 2121 (int)va_to_seg(va), (int)va_to_pte(va)));
2122 REMOVE_STAT(removes); 2122 REMOVE_STAT(removes);
2123 2123
2124 if (pm != pmap_kernel() && !pmap_has_ctx(pm)) 2124 if (pm != pmap_kernel() && !pmap_has_ctx(pm))
2125 continue; 2125 continue;
2126 2126
2127 /* 2127 /*
2128 * if the pmap is being torn down, don't bother flushing, 2128 * if the pmap is being torn down, don't bother flushing,
2129 * we already have done so. 2129 * we already have done so.
2130 */ 2130 */
2131 2131
2132 if (!pm->pm_refs) 2132 if (!pm->pm_refs)
2133 continue; 2133 continue;
2134 2134
2135 /* 2135 /*
2136 * Here we assume nothing can get into the TLB 2136 * Here we assume nothing can get into the TLB
2137 * unless it has a PTE. 2137 * unless it has a PTE.
2138 */ 2138 */
2139 2139
2140 KASSERT(pmap_ctx(pm)>=0); 2140 KASSERT(pmap_ctx(pm)>=0);
2141 tsb_invalidate(va, pm); 2141 tsb_invalidate(va, pm);
2142 REMOVE_STAT(tflushes); 2142 REMOVE_STAT(tflushes);
2143 tlb_flush_pte(va, pm); 2143 tlb_flush_pte(va, pm);
2144 dcache_flush_page_all(pa); 2144 dcache_flush_page_all(pa);
2145 } 2145 }
2146 if (flush && pm->pm_refs) 2146 if (flush && pm->pm_refs)
2147 REMOVE_STAT(flushes); 2147 REMOVE_STAT(flushes);
2148 DPRINTF(PDB_REMOVE, ("\n")); 2148 DPRINTF(PDB_REMOVE, ("\n"));
2149 pv_check(); 2149 pv_check();
2150 mutex_exit(&pmap_lock); 2150 mutex_exit(&pmap_lock);
2151 2151
2152 /* Catch up on deferred frees. */ 2152 /* Catch up on deferred frees. */
2153 for (; freepv != NULL; freepv = pv) { 2153 for (; freepv != NULL; freepv = pv) {
2154 pv = freepv->pv_next; 2154 pv = freepv->pv_next;
2155 pool_cache_put(&pmap_pv_cache, freepv); 2155 pool_cache_put(&pmap_pv_cache, freepv);
2156 } 2156 }
2157} 2157}
2158 2158
2159/* 2159/*
2160 * Change the protection on the specified range of this pmap. 2160 * Change the protection on the specified range of this pmap.