Sun Oct 5 02:07:39 2008 UTC ()
pool_cache: use BLOCK_SIZE as alignment.
pmap_pv_cache: use large size groups (PR_LARGECACHE) like x86.


(nakayama)
diff -r1.221 -r1.222 src/sys/arch/sparc64/sparc64/pmap.c

cvs diff -r1.221 -r1.222 src/sys/arch/sparc64/sparc64/pmap.c (switch to unified diff)

--- src/sys/arch/sparc64/sparc64/pmap.c 2008/09/23 21:30:11 1.221
+++ src/sys/arch/sparc64/sparc64/pmap.c 2008/10/05 02:07:39 1.222
@@ -1,2264 +1,2264 @@ @@ -1,2264 +1,2264 @@
1/* $NetBSD: pmap.c,v 1.221 2008/09/23 21:30:11 martin Exp $ */ 1/* $NetBSD: pmap.c,v 1.222 2008/10/05 02:07:39 nakayama Exp $ */
2/* 2/*
3 * 3 *
4 * Copyright (C) 1996-1999 Eduardo Horvath. 4 * Copyright (C) 1996-1999 Eduardo Horvath.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions 9 * modification, are permitted provided that the following conditions
10 * are met: 10 * are met:
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE. 24 * SUCH DAMAGE.
25 * 25 *
26 */ 26 */
27 27
28#include <sys/cdefs.h> 28#include <sys/cdefs.h>
29__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.221 2008/09/23 21:30:11 martin Exp $"); 29__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.222 2008/10/05 02:07:39 nakayama Exp $");
30 30
31#undef NO_VCACHE /* Don't forget the locked TLB in dostart */ 31#undef NO_VCACHE /* Don't forget the locked TLB in dostart */
32#define HWREF 32#define HWREF
33 33
34#include "opt_ddb.h" 34#include "opt_ddb.h"
35#include "opt_multiprocessor.h" 35#include "opt_multiprocessor.h"
36 36
37#include <sys/param.h> 37#include <sys/param.h>
38#include <sys/malloc.h> 38#include <sys/malloc.h>
39#include <sys/queue.h> 39#include <sys/queue.h>
40#include <sys/systm.h> 40#include <sys/systm.h>
41#include <sys/msgbuf.h> 41#include <sys/msgbuf.h>
42#include <sys/pool.h> 42#include <sys/pool.h>
43#include <sys/exec.h> 43#include <sys/exec.h>
44#include <sys/core.h> 44#include <sys/core.h>
45#include <sys/kcore.h> 45#include <sys/kcore.h>
46#include <sys/proc.h> 46#include <sys/proc.h>
47#include <sys/atomic.h> 47#include <sys/atomic.h>
48#include <sys/cpu.h> 48#include <sys/cpu.h>
49 49
50#include <uvm/uvm.h> 50#include <uvm/uvm.h>
51 51
52#include <machine/pcb.h> 52#include <machine/pcb.h>
53#include <machine/sparc64.h> 53#include <machine/sparc64.h>
54#include <machine/ctlreg.h> 54#include <machine/ctlreg.h>
55#include <machine/promlib.h> 55#include <machine/promlib.h>
56#include <machine/kcore.h> 56#include <machine/kcore.h>
57#include <machine/bootinfo.h> 57#include <machine/bootinfo.h>
58 58
59#include "cache.h" 59#include "cache.h"
60 60
61#ifdef DDB 61#ifdef DDB
62#include <machine/db_machdep.h> 62#include <machine/db_machdep.h>
63#include <ddb/db_command.h> 63#include <ddb/db_command.h>
64#include <ddb/db_sym.h> 64#include <ddb/db_sym.h>
65#include <ddb/db_variables.h> 65#include <ddb/db_variables.h>
66#include <ddb/db_extern.h> 66#include <ddb/db_extern.h>
67#include <ddb/db_access.h> 67#include <ddb/db_access.h>
68#include <ddb/db_output.h> 68#include <ddb/db_output.h>
69#else 69#else
70#define Debugger() 70#define Debugger()
71#define db_printf printf 71#define db_printf printf
72#endif 72#endif
73 73
74#define MEG (1<<20) /* 1MB */ 74#define MEG (1<<20) /* 1MB */
75#define KB (1<<10) /* 1KB */ 75#define KB (1<<10) /* 1KB */
76 76
77paddr_t cpu0paddr; /* contigious phys memory preallocated for cpus */ 77paddr_t cpu0paddr; /* contigious phys memory preallocated for cpus */
78 78
79/* These routines are in assembly to allow access thru physical mappings */ 79/* These routines are in assembly to allow access thru physical mappings */
80extern int64_t pseg_get(struct pmap *, vaddr_t); 80extern int64_t pseg_get(struct pmap *, vaddr_t);
81extern int pseg_set(struct pmap *, vaddr_t, int64_t, paddr_t); 81extern int pseg_set(struct pmap *, vaddr_t, int64_t, paddr_t);
82 82
83/* 83/*
84 * Diatribe on ref/mod counting: 84 * Diatribe on ref/mod counting:
85 * 85 *
86 * First of all, ref/mod info must be non-volatile. Hence we need to keep it 86 * First of all, ref/mod info must be non-volatile. Hence we need to keep it
87 * in the pv_entry structure for each page. (We could bypass this for the 87 * in the pv_entry structure for each page. (We could bypass this for the
88 * vm_page, but that's a long story....) 88 * vm_page, but that's a long story....)
89 * 89 *
90 * This architecture has nice, fast traps with lots of space for software bits 90 * This architecture has nice, fast traps with lots of space for software bits
91 * in the TTE. To accelerate ref/mod counts we make use of these features. 91 * in the TTE. To accelerate ref/mod counts we make use of these features.
92 * 92 *
93 * When we map a page initially, we place a TTE in the page table. It's 93 * When we map a page initially, we place a TTE in the page table. It's
94 * inserted with the TLB_W and TLB_ACCESS bits cleared. If a page is really 94 * inserted with the TLB_W and TLB_ACCESS bits cleared. If a page is really
95 * writable we set the TLB_REAL_W bit for the trap handler. 95 * writable we set the TLB_REAL_W bit for the trap handler.
96 * 96 *
97 * Whenever we take a TLB miss trap, the trap handler will set the TLB_ACCESS 97 * Whenever we take a TLB miss trap, the trap handler will set the TLB_ACCESS
98 * bit in the approprate TTE in the page table. Whenever we take a protection 98 * bit in the approprate TTE in the page table. Whenever we take a protection
99 * fault, if the TLB_REAL_W bit is set then we flip both the TLB_W and TLB_MOD 99 * fault, if the TLB_REAL_W bit is set then we flip both the TLB_W and TLB_MOD
100 * bits to enable writing and mark the page as modified. 100 * bits to enable writing and mark the page as modified.
101 * 101 *
102 * This means that we may have ref/mod information all over the place. The 102 * This means that we may have ref/mod information all over the place. The
103 * pmap routines must traverse the page tables of all pmaps with a given page 103 * pmap routines must traverse the page tables of all pmaps with a given page
104 * and collect/clear all the ref/mod information and copy it into the pv_entry. 104 * and collect/clear all the ref/mod information and copy it into the pv_entry.
105 */ 105 */
106 106
107#ifdef NO_VCACHE 107#ifdef NO_VCACHE
108#define FORCE_ALIAS 1 108#define FORCE_ALIAS 1
109#else 109#else
110#define FORCE_ALIAS 0 110#define FORCE_ALIAS 0
111#endif 111#endif
112 112
113#define PV_ALIAS 0x1LL 113#define PV_ALIAS 0x1LL
114#define PV_REF 0x2LL 114#define PV_REF 0x2LL
115#define PV_MOD 0x4LL 115#define PV_MOD 0x4LL
116#define PV_NVC 0x8LL 116#define PV_NVC 0x8LL
117#define PV_NC 0x10LL 117#define PV_NC 0x10LL
118#define PV_WE 0x20LL /* Debug -- this page was writable somtime */ 118#define PV_WE 0x20LL /* Debug -- this page was writable somtime */
119#define PV_MASK (0x03fLL) 119#define PV_MASK (0x03fLL)
120#define PV_VAMASK (~(PAGE_SIZE - 1)) 120#define PV_VAMASK (~(PAGE_SIZE - 1))
121#define PV_MATCH(pv,va) (!(((pv)->pv_va ^ (va)) & PV_VAMASK)) 121#define PV_MATCH(pv,va) (!(((pv)->pv_va ^ (va)) & PV_VAMASK))
122#define PV_SETVA(pv,va) ((pv)->pv_va = (((va) & PV_VAMASK) | \ 122#define PV_SETVA(pv,va) ((pv)->pv_va = (((va) & PV_VAMASK) | \
123 (((pv)->pv_va) & PV_MASK))) 123 (((pv)->pv_va) & PV_MASK)))
124 124
125struct pool_cache pmap_cache; 125struct pool_cache pmap_cache;
126struct pool_cache pmap_pv_cache; 126struct pool_cache pmap_pv_cache;
127 127
128pv_entry_t pmap_remove_pv(struct pmap *, vaddr_t, struct vm_page *); 128pv_entry_t pmap_remove_pv(struct pmap *, vaddr_t, struct vm_page *);
129void pmap_enter_pv(struct pmap *, vaddr_t, paddr_t, struct vm_page *, 129void pmap_enter_pv(struct pmap *, vaddr_t, paddr_t, struct vm_page *,
130 pv_entry_t); 130 pv_entry_t);
131void pmap_page_cache(struct pmap *, paddr_t, int); 131void pmap_page_cache(struct pmap *, paddr_t, int);
132 132
133/* 133/*
134 * First and last managed physical addresses. 134 * First and last managed physical addresses.
135 * XXX only used for dumping the system. 135 * XXX only used for dumping the system.
136 */ 136 */
137paddr_t vm_first_phys, vm_num_phys; 137paddr_t vm_first_phys, vm_num_phys;
138 138
139/* 139/*
140 * Here's the CPU TSB stuff. It's allocated in pmap_bootstrap. 140 * Here's the CPU TSB stuff. It's allocated in pmap_bootstrap.
141 */ 141 */
142int tsbsize; /* tsbents = 512 * 2^^tsbsize */ 142int tsbsize; /* tsbents = 512 * 2^^tsbsize */
143#define TSBENTS (512<<tsbsize) 143#define TSBENTS (512<<tsbsize)
144#define TSBSIZE (TSBENTS * 16) 144#define TSBSIZE (TSBENTS * 16)
145 145
146struct pmap kernel_pmap_; 146struct pmap kernel_pmap_;
147 147
148static int ctx_alloc(struct pmap *); 148static int ctx_alloc(struct pmap *);
149#ifdef MULTIPROCESSOR 149#ifdef MULTIPROCESSOR
150static void ctx_free(struct pmap *, struct cpu_info *); 150static void ctx_free(struct pmap *, struct cpu_info *);
151#define pmap_ctx(PM) ((PM)->pm_ctx[cpu_number()]) 151#define pmap_ctx(PM) ((PM)->pm_ctx[cpu_number()])
152 152
153static bool pmap_is_referenced_locked(struct vm_page *); 153static bool pmap_is_referenced_locked(struct vm_page *);
154 154
155/* 155/*
156 * Check if any MMU has a non-zero context 156 * Check if any MMU has a non-zero context
157 */ 157 */
158static inline bool 158static inline bool
159pmap_has_ctx(struct pmap *p) 159pmap_has_ctx(struct pmap *p)
160{ 160{
161 int i; 161 int i;
162 162
163 /* any context on any cpu? */ 163 /* any context on any cpu? */
164 for (i = 0; i < sparc_ncpus; i++) 164 for (i = 0; i < sparc_ncpus; i++)
165 if (p->pm_ctx[i] > 0) 165 if (p->pm_ctx[i] > 0)
166 return true; 166 return true;
167 167
168 return false;  168 return false;
169} 169}
170 170
171/* 171/*
172 * Check if this pmap has a live mapping on some MMU. 172 * Check if this pmap has a live mapping on some MMU.
173 */ 173 */
174static inline bool 174static inline bool
175pmap_is_on_mmu(struct pmap *p) 175pmap_is_on_mmu(struct pmap *p)
176{ 176{
177 /* The kernel pmap is always on all MMUs */ 177 /* The kernel pmap is always on all MMUs */
178 if (p == pmap_kernel()) 178 if (p == pmap_kernel())
179 return true; 179 return true;
180 180
181 return pmap_has_ctx(p); 181 return pmap_has_ctx(p);
182} 182}
183#else 183#else
184static void ctx_free(struct pmap *); 184static void ctx_free(struct pmap *);
185#define pmap_ctx(PM) ((PM)->pm_ctx) 185#define pmap_ctx(PM) ((PM)->pm_ctx)
186 186
187static inline bool 187static inline bool
188pmap_has_ctx(struct pmap *p) 188pmap_has_ctx(struct pmap *p)
189{ 189{
190 return pmap_ctx(p) > 0; 190 return pmap_ctx(p) > 0;
191} 191}
192 192
193static inline bool 193static inline bool
194pmap_is_on_mmu(struct pmap *p) 194pmap_is_on_mmu(struct pmap *p)
195{ 195{
196 return p == pmap_kernel() || pmap_ctx(p) > 0; 196 return p == pmap_kernel() || pmap_ctx(p) > 0;
197} 197}
198#endif 198#endif
199 199
200/* 200/*
201 * Virtual and physical addresses of the start and end of kernel text 201 * Virtual and physical addresses of the start and end of kernel text
202 * and data segments. 202 * and data segments.
203 */ 203 */
204vaddr_t ktext; 204vaddr_t ktext;
205paddr_t ktextp; 205paddr_t ktextp;
206vaddr_t ektext; 206vaddr_t ektext;
207paddr_t ektextp; 207paddr_t ektextp;
208vaddr_t kdata; 208vaddr_t kdata;
209paddr_t kdatap; 209paddr_t kdatap;
210vaddr_t ekdata; 210vaddr_t ekdata;
211paddr_t ekdatap; 211paddr_t ekdatap;
212 212
213/* 213/*
214 * Kernel 4MB pages. 214 * Kernel 4MB pages.
215 */ 215 */
216extern struct tlb_entry *kernel_tlbs; 216extern struct tlb_entry *kernel_tlbs;
217extern int kernel_tlb_slots; 217extern int kernel_tlb_slots;
218 218
219static int npgs; 219static int npgs;
220static u_int nextavail; 220static u_int nextavail;
221 221
222vaddr_t vmmap; /* one reserved MI vpage for /dev/mem */ 222vaddr_t vmmap; /* one reserved MI vpage for /dev/mem */
223 223
224int phys_installed_size; /* Installed physical memory */ 224int phys_installed_size; /* Installed physical memory */
225struct mem_region *phys_installed; 225struct mem_region *phys_installed;
226 226
227paddr_t avail_start, avail_end; /* These are used by ps & family */ 227paddr_t avail_start, avail_end; /* These are used by ps & family */
228 228
229static int ptelookup_va(vaddr_t va); 229static int ptelookup_va(vaddr_t va);
230 230
231static inline void 231static inline void
232clrx(void *addr) 232clrx(void *addr)
233{ 233{
234 __asm volatile("clrx [%0]" : : "r" (addr) : "memory"); 234 __asm volatile("clrx [%0]" : : "r" (addr) : "memory");
235} 235}
236 236
237#ifdef MULTIPROCESSOR 237#ifdef MULTIPROCESSOR
238static void 238static void
239tsb_invalidate(vaddr_t va, pmap_t pm) 239tsb_invalidate(vaddr_t va, pmap_t pm)
240{ 240{
241 struct cpu_info *ci; 241 struct cpu_info *ci;
242 int ctx; 242 int ctx;
243 bool kpm = (pm == pmap_kernel()); 243 bool kpm = (pm == pmap_kernel());
244 int i; 244 int i;
245 int64_t tag; 245 int64_t tag;
246 246
247 i = ptelookup_va(va); 247 i = ptelookup_va(va);
248 for (ci = cpus; ci != NULL; ci = ci->ci_next) { 248 for (ci = cpus; ci != NULL; ci = ci->ci_next) {
249 if (!CPUSET_HAS(cpus_active, ci->ci_index)) 249 if (!CPUSET_HAS(cpus_active, ci->ci_index))
250 continue; 250 continue;
251 ctx = pm->pm_ctx[ci->ci_index]; 251 ctx = pm->pm_ctx[ci->ci_index];
252 if (kpm || ctx > 0) { 252 if (kpm || ctx > 0) {
253 tag = TSB_TAG(0, ctx, va); 253 tag = TSB_TAG(0, ctx, va);
254 if (ci->ci_tsb_dmmu[i].tag == tag) { 254 if (ci->ci_tsb_dmmu[i].tag == tag) {
255 clrx(&ci->ci_tsb_dmmu[i].data); 255 clrx(&ci->ci_tsb_dmmu[i].data);
256 } 256 }
257 if (ci->ci_tsb_immu[i].tag == tag) { 257 if (ci->ci_tsb_immu[i].tag == tag) {
258 clrx(&ci->ci_tsb_immu[i].data); 258 clrx(&ci->ci_tsb_immu[i].data);
259 } 259 }
260 } 260 }
261 } 261 }
262} 262}
263#else 263#else
264static inline void 264static inline void
265tsb_invalidate(vaddr_t va, pmap_t pm) 265tsb_invalidate(vaddr_t va, pmap_t pm)
266{ 266{
267 int i; 267 int i;
268 int64_t tag; 268 int64_t tag;
269 269
270 i = ptelookup_va(va); 270 i = ptelookup_va(va);
271 tag = TSB_TAG(0, pmap_ctx(pm), va); 271 tag = TSB_TAG(0, pmap_ctx(pm), va);
272 if (curcpu()->ci_tsb_dmmu[i].tag == tag) { 272 if (curcpu()->ci_tsb_dmmu[i].tag == tag) {
273 clrx(&curcpu()->ci_tsb_dmmu[i].data); 273 clrx(&curcpu()->ci_tsb_dmmu[i].data);
274 } 274 }
275 if (curcpu()->ci_tsb_immu[i].tag == tag) { 275 if (curcpu()->ci_tsb_immu[i].tag == tag) {
276 clrx(&curcpu()->ci_tsb_immu[i].data); 276 clrx(&curcpu()->ci_tsb_immu[i].data);
277 } 277 }
278} 278}
279#endif 279#endif
280 280
281struct prom_map *prom_map; 281struct prom_map *prom_map;
282int prom_map_size; 282int prom_map_size;
283 283
284#ifdef DEBUG 284#ifdef DEBUG
285struct { 285struct {
286 int kernel; /* entering kernel mapping */ 286 int kernel; /* entering kernel mapping */
287 int user; /* entering user mapping */ 287 int user; /* entering user mapping */
288 int ptpneeded; /* needed to allocate a PT page */ 288 int ptpneeded; /* needed to allocate a PT page */
289 int pwchange; /* no mapping change, just wiring or protection */ 289 int pwchange; /* no mapping change, just wiring or protection */
290 int wchange; /* no mapping change, just wiring */ 290 int wchange; /* no mapping change, just wiring */
291 int mchange; /* was mapped but mapping to different page */ 291 int mchange; /* was mapped but mapping to different page */
292 int managed; /* a managed page */ 292 int managed; /* a managed page */
293 int firstpv; /* first mapping for this PA */ 293 int firstpv; /* first mapping for this PA */
294 int secondpv; /* second mapping for this PA */ 294 int secondpv; /* second mapping for this PA */
295 int ci; /* cache inhibited */ 295 int ci; /* cache inhibited */
296 int unmanaged; /* not a managed page */ 296 int unmanaged; /* not a managed page */
297 int flushes; /* cache flushes */ 297 int flushes; /* cache flushes */
298 int cachehit; /* new entry forced valid entry out */ 298 int cachehit; /* new entry forced valid entry out */
299} enter_stats; 299} enter_stats;
300struct { 300struct {
301 int calls; 301 int calls;
302 int removes; 302 int removes;
303 int flushes; 303 int flushes;
304 int tflushes; /* TLB flushes */ 304 int tflushes; /* TLB flushes */
305 int pidflushes; /* HW pid stolen */ 305 int pidflushes; /* HW pid stolen */
306 int pvfirst; 306 int pvfirst;
307 int pvsearch; 307 int pvsearch;
308} remove_stats; 308} remove_stats;
309#define ENTER_STAT(x) enter_stats.x ++ 309#define ENTER_STAT(x) enter_stats.x ++
310#define REMOVE_STAT(x) remove_stats.x ++ 310#define REMOVE_STAT(x) remove_stats.x ++
311 311
312#define PDB_CREATE 0x0001 312#define PDB_CREATE 0x0001
313#define PDB_DESTROY 0x0002 313#define PDB_DESTROY 0x0002
314#define PDB_REMOVE 0x0004 314#define PDB_REMOVE 0x0004
315#define PDB_CHANGEPROT 0x0008 315#define PDB_CHANGEPROT 0x0008
316#define PDB_ENTER 0x0010 316#define PDB_ENTER 0x0010
317#define PDB_DEMAP 0x0020 317#define PDB_DEMAP 0x0020
318#define PDB_REF 0x0040 318#define PDB_REF 0x0040
319#define PDB_COPY 0x0080 319#define PDB_COPY 0x0080
320 320
321#define PDB_MMU_ALLOC 0x0100 321#define PDB_MMU_ALLOC 0x0100
322#define PDB_MMU_STEAL 0x0200 322#define PDB_MMU_STEAL 0x0200
323#define PDB_CTX_ALLOC 0x0400 323#define PDB_CTX_ALLOC 0x0400
324#define PDB_CTX_STEAL 0x0800 324#define PDB_CTX_STEAL 0x0800
325#define PDB_MMUREG_ALLOC 0x1000 325#define PDB_MMUREG_ALLOC 0x1000
326#define PDB_MMUREG_STEAL 0x2000 326#define PDB_MMUREG_STEAL 0x2000
327#define PDB_CACHESTUFF 0x4000 327#define PDB_CACHESTUFF 0x4000
328#define PDB_ALIAS 0x8000 328#define PDB_ALIAS 0x8000
329#define PDB_EXTRACT 0x10000 329#define PDB_EXTRACT 0x10000
330#define PDB_BOOT 0x20000 330#define PDB_BOOT 0x20000
331#define PDB_BOOT1 0x40000 331#define PDB_BOOT1 0x40000
332#define PDB_GROW 0x80000 332#define PDB_GROW 0x80000
333int pmapdebug = 0; 333int pmapdebug = 0;
334/* Number of H/W pages stolen for page tables */ 334/* Number of H/W pages stolen for page tables */
335int pmap_pages_stolen = 0; 335int pmap_pages_stolen = 0;
336 336
337#define BDPRINTF(n, f) if (pmapdebug & (n)) prom_printf f 337#define BDPRINTF(n, f) if (pmapdebug & (n)) prom_printf f
338#define DPRINTF(n, f) if (pmapdebug & (n)) printf f 338#define DPRINTF(n, f) if (pmapdebug & (n)) printf f
339#else 339#else
340#define ENTER_STAT(x) 340#define ENTER_STAT(x)
341#define REMOVE_STAT(x) 341#define REMOVE_STAT(x)
342#define BDPRINTF(n, f) 342#define BDPRINTF(n, f)
343#define DPRINTF(n, f) 343#define DPRINTF(n, f)
344#endif 344#endif
345 345
346#define pv_check() 346#define pv_check()
347 347
348static int pmap_get_page(paddr_t *p); 348static int pmap_get_page(paddr_t *p);
349static void pmap_free_page(paddr_t pa); 349static void pmap_free_page(paddr_t pa);
350 350
351/* 351/*
352 * Global pmap lock. 352 * Global pmap lock.
353 */ 353 */
354static kmutex_t pmap_lock; 354static kmutex_t pmap_lock;
355 355
356/* 356/*
357 * Support for big page sizes. This maps the page size to the 357 * Support for big page sizes. This maps the page size to the
358 * page bits. That is: these are the bits between 8K pages and 358 * page bits. That is: these are the bits between 8K pages and
359 * larger page sizes that cause aliasing. 359 * larger page sizes that cause aliasing.
360 */ 360 */
361#define PSMAP_ENTRY(MASK, CODE) { .mask = MASK, .code = CODE } 361#define PSMAP_ENTRY(MASK, CODE) { .mask = MASK, .code = CODE }
362struct page_size_map page_size_map[] = { 362struct page_size_map page_size_map[] = {
363#ifdef DEBUG 363#ifdef DEBUG
364 PSMAP_ENTRY(0, PGSZ_8K & 0), /* Disable large pages */ 364 PSMAP_ENTRY(0, PGSZ_8K & 0), /* Disable large pages */
365#endif 365#endif
366 PSMAP_ENTRY((4 * 1024 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_4M), 366 PSMAP_ENTRY((4 * 1024 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_4M),
367 PSMAP_ENTRY((512 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_512K), 367 PSMAP_ENTRY((512 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_512K),
368 PSMAP_ENTRY((64 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_64K), 368 PSMAP_ENTRY((64 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_64K),
369 PSMAP_ENTRY((8 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_8K), 369 PSMAP_ENTRY((8 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_8K),
370 PSMAP_ENTRY(0, 0), 370 PSMAP_ENTRY(0, 0),
371}; 371};
372 372
373/* 373/*
374 * Enter a TTE into the kernel pmap only. Don't do anything else. 374 * Enter a TTE into the kernel pmap only. Don't do anything else.
375 * 375 *
376 * Use only during bootstrapping since it does no locking and 376 * Use only during bootstrapping since it does no locking and
377 * can lose ref/mod info!!!! 377 * can lose ref/mod info!!!!
378 * 378 *
379 */ 379 */
380static void pmap_enter_kpage(vaddr_t va, int64_t data) 380static void pmap_enter_kpage(vaddr_t va, int64_t data)
381{ 381{
382 paddr_t newp; 382 paddr_t newp;
383 383
384 newp = 0UL; 384 newp = 0UL;
385 while (pseg_set(pmap_kernel(), va, data, newp) & 1) { 385 while (pseg_set(pmap_kernel(), va, data, newp) & 1) {
386 if (!pmap_get_page(&newp)) { 386 if (!pmap_get_page(&newp)) {
387 prom_printf("pmap_enter_kpage: out of pages\n"); 387 prom_printf("pmap_enter_kpage: out of pages\n");
388 panic("pmap_enter_kpage"); 388 panic("pmap_enter_kpage");
389 } 389 }
390 390
391 ENTER_STAT(ptpneeded); 391 ENTER_STAT(ptpneeded);
392 BDPRINTF(PDB_BOOT1, 392 BDPRINTF(PDB_BOOT1,
393 ("pseg_set: pm=%p va=%p data=%lx newp %lx\n", 393 ("pseg_set: pm=%p va=%p data=%lx newp %lx\n",
394 pmap_kernel(), va, (long)data, (long)newp)); 394 pmap_kernel(), va, (long)data, (long)newp));
395#ifdef DEBUG 395#ifdef DEBUG
396 if (pmapdebug & PDB_BOOT1) 396 if (pmapdebug & PDB_BOOT1)
397 {int i; for (i=0; i<140000000; i++) ;} 397 {int i; for (i=0; i<140000000; i++) ;}
398#endif 398#endif
399 } 399 }
400} 400}
401 401
402/* 402/*
403 * Check the bootargs to see if we need to enable bootdebug. 403 * Check the bootargs to see if we need to enable bootdebug.
404 */ 404 */
405#ifdef DEBUG 405#ifdef DEBUG
406static void pmap_bootdebug(void) 406static void pmap_bootdebug(void)
407{ 407{
408 const char *cp = prom_getbootargs(); 408 const char *cp = prom_getbootargs();
409 409
410 for (;;) 410 for (;;)
411 switch (*++cp) { 411 switch (*++cp) {
412 case '\0': 412 case '\0':
413 return; 413 return;
414 case 'V': 414 case 'V':
415 pmapdebug |= PDB_BOOT|PDB_BOOT1; 415 pmapdebug |= PDB_BOOT|PDB_BOOT1;
416 break; 416 break;
417 case 'D': 417 case 'D':
418 pmapdebug |= PDB_BOOT1; 418 pmapdebug |= PDB_BOOT1;
419 break; 419 break;
420 } 420 }
421} 421}
422#endif 422#endif
423 423
424 424
425/* 425/*
426 * Calculate the correct number of page colors to use. This should be the 426 * Calculate the correct number of page colors to use. This should be the
427 * size of the E$/PAGE_SIZE. However, different CPUs can have different sized 427 * size of the E$/PAGE_SIZE. However, different CPUs can have different sized
428 * E$, so we need to take the GCM of the E$ size. 428 * E$, so we need to take the GCM of the E$ size.
429 */ 429 */
430static int pmap_calculate_colors(void) 430static int pmap_calculate_colors(void)
431{ 431{
432 int node; 432 int node;
433 int size, assoc, color, maxcolor = 1; 433 int size, assoc, color, maxcolor = 1;
434 434
435 for (node = prom_firstchild(prom_findroot()); node != 0; 435 for (node = prom_firstchild(prom_findroot()); node != 0;
436 node = prom_nextsibling(node)) { 436 node = prom_nextsibling(node)) {
437 char *name = prom_getpropstring(node, "device_type"); 437 char *name = prom_getpropstring(node, "device_type");
438 if (strcmp("cpu", name) != 0) 438 if (strcmp("cpu", name) != 0)
439 continue; 439 continue;
440 440
441 /* Found a CPU, get the E$ info. */ 441 /* Found a CPU, get the E$ info. */
442 size = prom_getpropint(node, "ecache-size", -1); 442 size = prom_getpropint(node, "ecache-size", -1);
443 if (size == -1) { 443 if (size == -1) {
444 prom_printf("pmap_calculate_colors: node %x has " 444 prom_printf("pmap_calculate_colors: node %x has "
445 "no ecache-size\n", node); 445 "no ecache-size\n", node);
446 /* If we can't get the E$ size, skip the node */ 446 /* If we can't get the E$ size, skip the node */
447 continue; 447 continue;
448 } 448 }
449 449
450 assoc = prom_getpropint(node, "ecache-associativity", 1); 450 assoc = prom_getpropint(node, "ecache-associativity", 1);
451 color = size/assoc/PAGE_SIZE; 451 color = size/assoc/PAGE_SIZE;
452 if (color > maxcolor) 452 if (color > maxcolor)
453 maxcolor = color; 453 maxcolor = color;
454 } 454 }
455 return (maxcolor); 455 return (maxcolor);
456} 456}
457 457
458static void pmap_alloc_bootargs(void) 458static void pmap_alloc_bootargs(void)
459{ 459{
460/* extern struct cpu_bootargs *cpu_args; */ 460/* extern struct cpu_bootargs *cpu_args; */
461 char *v; 461 char *v;
462 462
463 v = OF_claim(NULL, 2*PAGE_SIZE, PAGE_SIZE); 463 v = OF_claim(NULL, 2*PAGE_SIZE, PAGE_SIZE);
464 if ((v == NULL) || (v == (void*)-1)) 464 if ((v == NULL) || (v == (void*)-1))
465 panic("Can't claim a page of memory."); 465 panic("Can't claim a page of memory.");
466 466
467 memset(v, 0, 2*PAGE_SIZE); 467 memset(v, 0, 2*PAGE_SIZE);
468 468
469 cpu_args = (struct cpu_bootargs*)v; 469 cpu_args = (struct cpu_bootargs*)v;
470} 470}
471 471
472#if defined(MULTIPROCESSOR) 472#if defined(MULTIPROCESSOR)
473static void pmap_mp_init(void); 473static void pmap_mp_init(void);
474 474
475static void 475static void
476pmap_mp_init(void) 476pmap_mp_init(void)
477{ 477{
478 pte_t *tp; 478 pte_t *tp;
479 char *v; 479 char *v;
480 int i; 480 int i;
481 481
482 extern void cpu_mp_startup(void); 482 extern void cpu_mp_startup(void);
483 483
484 if ((v = OF_claim(NULL, PAGE_SIZE, PAGE_SIZE)) == NULL) { 484 if ((v = OF_claim(NULL, PAGE_SIZE, PAGE_SIZE)) == NULL) {
485 panic("pmap_mp_init: Cannot claim a page."); 485 panic("pmap_mp_init: Cannot claim a page.");
486 } 486 }
487 487
488 bcopy(mp_tramp_code, v, mp_tramp_code_len); 488 bcopy(mp_tramp_code, v, mp_tramp_code_len);
489 *(u_long *)(v + mp_tramp_tlb_slots) = kernel_tlb_slots; 489 *(u_long *)(v + mp_tramp_tlb_slots) = kernel_tlb_slots;
490 *(u_long *)(v + mp_tramp_func) = (u_long)cpu_mp_startup; 490 *(u_long *)(v + mp_tramp_func) = (u_long)cpu_mp_startup;
491 *(u_long *)(v + mp_tramp_ci) = (u_long)cpu_args; 491 *(u_long *)(v + mp_tramp_ci) = (u_long)cpu_args;
492 tp = (pte_t *)(v + mp_tramp_code_len); 492 tp = (pte_t *)(v + mp_tramp_code_len);
493 for (i = 0; i < kernel_tlb_slots; i++) { 493 for (i = 0; i < kernel_tlb_slots; i++) {
494 tp[i].tag = kernel_tlbs[i].te_va; 494 tp[i].tag = kernel_tlbs[i].te_va;
495 tp[i].data = TSB_DATA(0, /* g */ 495 tp[i].data = TSB_DATA(0, /* g */
496 PGSZ_4M, /* sz */ 496 PGSZ_4M, /* sz */
497 kernel_tlbs[i].te_pa, /* pa */ 497 kernel_tlbs[i].te_pa, /* pa */
498 1, /* priv */ 498 1, /* priv */
499 1, /* write */ 499 1, /* write */
500 1, /* cache */ 500 1, /* cache */
501 1, /* aliased */ 501 1, /* aliased */
502 1, /* valid */ 502 1, /* valid */
503 0 /* ie */); 503 0 /* ie */);
504 tp[i].data |= TLB_L | TLB_CV; 504 tp[i].data |= TLB_L | TLB_CV;
505 DPRINTF(PDB_BOOT1, ("xtlb[%d]: Tag: %" PRIx64 " Data: %" 505 DPRINTF(PDB_BOOT1, ("xtlb[%d]: Tag: %" PRIx64 " Data: %"
506 PRIx64 "\n", i, tp[i].tag, tp[i].data)); 506 PRIx64 "\n", i, tp[i].tag, tp[i].data));
507 } 507 }
508 508
509 for (i = 0; i < PAGE_SIZE; i += sizeof(long)) 509 for (i = 0; i < PAGE_SIZE; i += sizeof(long))
510 flush(v + i); 510 flush(v + i);
511 511
512 cpu_spinup_trampoline = (vaddr_t)v; 512 cpu_spinup_trampoline = (vaddr_t)v;
513} 513}
514#else 514#else
515#define pmap_mp_init() ((void)0) 515#define pmap_mp_init() ((void)0)
516#endif 516#endif
517 517
518paddr_t pmap_kextract(vaddr_t va); 518paddr_t pmap_kextract(vaddr_t va);
519 519
520paddr_t 520paddr_t
521pmap_kextract(vaddr_t va) 521pmap_kextract(vaddr_t va)
522{ 522{
523 int i; 523 int i;
524 paddr_t paddr = (paddr_t)-1; 524 paddr_t paddr = (paddr_t)-1;
525 525
526 for (i = 0; i < kernel_tlb_slots; i++) { 526 for (i = 0; i < kernel_tlb_slots; i++) {
527 if ((va & ~PAGE_MASK_4M) == kernel_tlbs[i].te_va) { 527 if ((va & ~PAGE_MASK_4M) == kernel_tlbs[i].te_va) {
528 paddr = kernel_tlbs[i].te_pa + 528 paddr = kernel_tlbs[i].te_pa +
529 (paddr_t)(va & PAGE_MASK_4M); 529 (paddr_t)(va & PAGE_MASK_4M);
530 break; 530 break;
531 } 531 }
532 } 532 }
533 533
534 if (i == kernel_tlb_slots) { 534 if (i == kernel_tlb_slots) {
535 panic("pmap_kextract: Address %p is not from kernel space.\n" 535 panic("pmap_kextract: Address %p is not from kernel space.\n"
536 "Data segment is too small?\n", (void*)va); 536 "Data segment is too small?\n", (void*)va);
537 } 537 }
538 538
539 return (paddr); 539 return (paddr);
540} 540}
541 541
542/* 542/*
543 * Bootstrap kernel allocator, allocates from unused space in 4MB kernel 543 * Bootstrap kernel allocator, allocates from unused space in 4MB kernel
544 * data segment meaning that 544 * data segment meaning that
545 * 545 *
546 * - Access to allocated memory will never generate a trap 546 * - Access to allocated memory will never generate a trap
547 * - Allocated chunks are never reclaimed or freed 547 * - Allocated chunks are never reclaimed or freed
548 * - Allocation calls do not change PROM memlists 548 * - Allocation calls do not change PROM memlists
549 */ 549 */
550static struct mem_region kdata_mem_pool; 550static struct mem_region kdata_mem_pool;
551 551
552static void 552static void
553kdata_alloc_init(vaddr_t va_start, vaddr_t va_end) 553kdata_alloc_init(vaddr_t va_start, vaddr_t va_end)
554{ 554{
555 vsize_t va_size = va_end - va_start; 555 vsize_t va_size = va_end - va_start;
556 556
557 kdata_mem_pool.start = va_start; 557 kdata_mem_pool.start = va_start;
558 kdata_mem_pool.size = va_size; 558 kdata_mem_pool.size = va_size;
559 559
560 BDPRINTF(PDB_BOOT, ("kdata_alloc_init(): %d bytes @%p.\n", va_size, 560 BDPRINTF(PDB_BOOT, ("kdata_alloc_init(): %d bytes @%p.\n", va_size,
561 va_start)); 561 va_start));
562} 562}
563 563
564static vaddr_t 564static vaddr_t
565kdata_alloc(vsize_t size, vsize_t align) 565kdata_alloc(vsize_t size, vsize_t align)
566{ 566{
567 vaddr_t va; 567 vaddr_t va;
568 vsize_t asize; 568 vsize_t asize;
569 569
570 asize = roundup(kdata_mem_pool.start, align) - kdata_mem_pool.start; 570 asize = roundup(kdata_mem_pool.start, align) - kdata_mem_pool.start;
571 571
572 kdata_mem_pool.start += asize; 572 kdata_mem_pool.start += asize;
573 kdata_mem_pool.size -= asize; 573 kdata_mem_pool.size -= asize;
574 574
575 if (kdata_mem_pool.size < size) { 575 if (kdata_mem_pool.size < size) {
576 panic("kdata_alloc(): Data segment is too small.\n"); 576 panic("kdata_alloc(): Data segment is too small.\n");
577 } 577 }
578 578
579 va = kdata_mem_pool.start; 579 va = kdata_mem_pool.start;
580 kdata_mem_pool.start += size; 580 kdata_mem_pool.start += size;
581 kdata_mem_pool.size -= size; 581 kdata_mem_pool.size -= size;
582 582
583 BDPRINTF(PDB_BOOT, ("kdata_alloc(): Allocated %d@%p, %d free.\n", 583 BDPRINTF(PDB_BOOT, ("kdata_alloc(): Allocated %d@%p, %d free.\n",
584 size, (void*)va, kdata_mem_pool.size)); 584 size, (void*)va, kdata_mem_pool.size));
585 585
586 return (va); 586 return (va);
587} 587}
588 588
589/* 589/*
590 * Unified routine for reading PROM properties. 590 * Unified routine for reading PROM properties.
591 */ 591 */
592static void 592static void
593pmap_read_memlist(const char *device, const char *property, void **ml, 593pmap_read_memlist(const char *device, const char *property, void **ml,
594 int *ml_size, vaddr_t (* ml_alloc)(vsize_t, vsize_t)) 594 int *ml_size, vaddr_t (* ml_alloc)(vsize_t, vsize_t))
595{ 595{
596 void *va; 596 void *va;
597 int size, handle; 597 int size, handle;
598 598
599 if ( (handle = prom_finddevice(device)) == 0) { 599 if ( (handle = prom_finddevice(device)) == 0) {
600 prom_printf("pmap_read_memlist(): No %s device found.\n", 600 prom_printf("pmap_read_memlist(): No %s device found.\n",
601 device); 601 device);
602 prom_halt(); 602 prom_halt();
603 } 603 }
604 if ( (size = OF_getproplen(handle, property)) < 0) { 604 if ( (size = OF_getproplen(handle, property)) < 0) {
605 prom_printf("pmap_read_memlist(): %s/%s has no length.\n", 605 prom_printf("pmap_read_memlist(): %s/%s has no length.\n",
606 device, property); 606 device, property);
607 prom_halt(); 607 prom_halt();
608 } 608 }
609 if ( (va = (void*)(* ml_alloc)(size, sizeof(uint64_t))) == NULL) { 609 if ( (va = (void*)(* ml_alloc)(size, sizeof(uint64_t))) == NULL) {
610 prom_printf("pmap_read_memlist(): Cannot allocate memlist.\n"); 610 prom_printf("pmap_read_memlist(): Cannot allocate memlist.\n");
611 prom_halt(); 611 prom_halt();
612 } 612 }
613 if (OF_getprop(handle, property, va, size) <= 0) { 613 if (OF_getprop(handle, property, va, size) <= 0) {
614 prom_printf("pmap_read_memlist(): Cannot read %s/%s.\n", 614 prom_printf("pmap_read_memlist(): Cannot read %s/%s.\n",
615 device, property); 615 device, property);
616 prom_halt(); 616 prom_halt();
617 } 617 }
618 618
619 *ml = va; 619 *ml = va;
620 *ml_size = size; 620 *ml_size = size;
621} 621}
622 622
623/* 623/*
624 * This is called during bootstrap, before the system is really initialized. 624 * This is called during bootstrap, before the system is really initialized.
625 * 625 *
626 * It's called with the start and end virtual addresses of the kernel. We 626 * It's called with the start and end virtual addresses of the kernel. We
627 * bootstrap the pmap allocator now. We will allocate the basic structures we 627 * bootstrap the pmap allocator now. We will allocate the basic structures we
628 * need to bootstrap the VM system here: the page frame tables, the TSB, and 628 * need to bootstrap the VM system here: the page frame tables, the TSB, and
629 * the free memory lists. 629 * the free memory lists.
630 * 630 *
631 * Now all this is becoming a bit obsolete. maxctx is still important, but by 631 * Now all this is becoming a bit obsolete. maxctx is still important, but by
632 * separating the kernel text and data segments we really would need to 632 * separating the kernel text and data segments we really would need to
633 * provide the start and end of each segment. But we can't. The rodata 633 * provide the start and end of each segment. But we can't. The rodata
634 * segment is attached to the end of the kernel segment and has nothing to 634 * segment is attached to the end of the kernel segment and has nothing to
635 * delimit its end. We could still pass in the beginning of the kernel and 635 * delimit its end. We could still pass in the beginning of the kernel and
636 * the beginning and end of the data segment but we could also just as easily 636 * the beginning and end of the data segment but we could also just as easily
637 * calculate that all in here. 637 * calculate that all in here.
638 * 638 *
639 * To handle the kernel text, we need to do a reverse mapping of the start of 639 * To handle the kernel text, we need to do a reverse mapping of the start of
640 * the kernel, then traverse the free memory lists to find out how big it is. 640 * the kernel, then traverse the free memory lists to find out how big it is.
641 */ 641 */
642 642
643void 643void
644pmap_bootstrap(u_long kernelstart, u_long kernelend) 644pmap_bootstrap(u_long kernelstart, u_long kernelend)
645{ 645{
646 extern char etext[], data_start[]; /* start of data segment */ 646 extern char etext[], data_start[]; /* start of data segment */
647 extern int msgbufmapped; 647 extern int msgbufmapped;
648 struct mem_region *mp, *mp1, *avail, *orig; 648 struct mem_region *mp, *mp1, *avail, *orig;
649 int i, j, pcnt, msgbufsiz; 649 int i, j, pcnt, msgbufsiz;
650 size_t s, sz; 650 size_t s, sz;
651 int64_t data; 651 int64_t data;
652 vaddr_t va, intstk; 652 vaddr_t va, intstk;
653 uint64_t phys_msgbuf; 653 uint64_t phys_msgbuf;
654 paddr_t newp = 0; 654 paddr_t newp = 0;
655 655
656 void *prom_memlist; 656 void *prom_memlist;
657 int prom_memlist_size; 657 int prom_memlist_size;
658 658
659 BDPRINTF(PDB_BOOT, ("Entered pmap_bootstrap.\n")); 659 BDPRINTF(PDB_BOOT, ("Entered pmap_bootstrap.\n"));
660 660
661 /* 661 /*
662 * Calculate kernel size. 662 * Calculate kernel size.
663 */ 663 */
664 ktext = kernelstart; 664 ktext = kernelstart;
665 ktextp = pmap_kextract(ktext); 665 ktextp = pmap_kextract(ktext);
666 ektext = roundup((vaddr_t)etext, PAGE_SIZE_4M); 666 ektext = roundup((vaddr_t)etext, PAGE_SIZE_4M);
667 ektextp = roundup(pmap_kextract((vaddr_t)etext), PAGE_SIZE_4M); 667 ektextp = roundup(pmap_kextract((vaddr_t)etext), PAGE_SIZE_4M);
668 668
669 kdata = (vaddr_t)data_start; 669 kdata = (vaddr_t)data_start;
670 kdatap = pmap_kextract(kdata); 670 kdatap = pmap_kextract(kdata);
671 ekdata = roundup(kernelend, PAGE_SIZE_4M); 671 ekdata = roundup(kernelend, PAGE_SIZE_4M);
672 ekdatap = roundup(pmap_kextract(kernelend), PAGE_SIZE_4M); 672 ekdatap = roundup(pmap_kextract(kernelend), PAGE_SIZE_4M);
673 673
674 BDPRINTF(PDB_BOOT, ("Virtual layout: text %lx-%lx, data %lx-%lx.\n", 674 BDPRINTF(PDB_BOOT, ("Virtual layout: text %lx-%lx, data %lx-%lx.\n",
675 ktext, ektext, kdata, ekdata)); 675 ktext, ektext, kdata, ekdata));
676 BDPRINTF(PDB_BOOT, ("Physical layout: text %lx-%lx, data %lx-%lx.\n", 676 BDPRINTF(PDB_BOOT, ("Physical layout: text %lx-%lx, data %lx-%lx.\n",
677 ktextp, ektextp, kdatap, ekdatap)); 677 ktextp, ektextp, kdatap, ekdatap));
678 678
679 /* Initialize bootstrap allocator. */ 679 /* Initialize bootstrap allocator. */
680 kdata_alloc_init(kernelend + 1 * 1024 * 1024, ekdata); 680 kdata_alloc_init(kernelend + 1 * 1024 * 1024, ekdata);
681 681
682#ifdef DEBUG 682#ifdef DEBUG
683 pmap_bootdebug(); 683 pmap_bootdebug();
684#endif 684#endif
685 685
686 pmap_alloc_bootargs(); 686 pmap_alloc_bootargs();
687 pmap_mp_init(); 687 pmap_mp_init();
688 688
689 /* 689 /*
690 * set machine page size 690 * set machine page size
691 */ 691 */
692 uvmexp.pagesize = NBPG; 692 uvmexp.pagesize = NBPG;
693 uvmexp.ncolors = pmap_calculate_colors(); 693 uvmexp.ncolors = pmap_calculate_colors();
694 uvm_setpagesize(); 694 uvm_setpagesize();
695 695
696 /* 696 /*
697 * Get hold or the message buffer. 697 * Get hold or the message buffer.
698 */ 698 */
699 msgbufp = (struct kern_msgbuf *)(vaddr_t)MSGBUF_VA; 699 msgbufp = (struct kern_msgbuf *)(vaddr_t)MSGBUF_VA;
700/* XXXXX -- increase msgbufsiz for uvmhist printing */ 700/* XXXXX -- increase msgbufsiz for uvmhist printing */
701 msgbufsiz = 4*PAGE_SIZE /* round_page(sizeof(struct msgbuf)) */; 701 msgbufsiz = 4*PAGE_SIZE /* round_page(sizeof(struct msgbuf)) */;
702 BDPRINTF(PDB_BOOT, ("Trying to allocate msgbuf at %lx, size %lx\n", 702 BDPRINTF(PDB_BOOT, ("Trying to allocate msgbuf at %lx, size %lx\n",
703 (long)msgbufp, (long)msgbufsiz)); 703 (long)msgbufp, (long)msgbufsiz));
704 if ((long)msgbufp != 704 if ((long)msgbufp !=
705 (long)(phys_msgbuf = prom_claim_virt((vaddr_t)msgbufp, msgbufsiz))) 705 (long)(phys_msgbuf = prom_claim_virt((vaddr_t)msgbufp, msgbufsiz)))
706 prom_printf( 706 prom_printf(
707 "cannot get msgbuf VA, msgbufp=%p, phys_msgbuf=%lx\n", 707 "cannot get msgbuf VA, msgbufp=%p, phys_msgbuf=%lx\n",
708 (void *)msgbufp, (long)phys_msgbuf); 708 (void *)msgbufp, (long)phys_msgbuf);
709 phys_msgbuf = prom_get_msgbuf(msgbufsiz, MMU_PAGE_ALIGN); 709 phys_msgbuf = prom_get_msgbuf(msgbufsiz, MMU_PAGE_ALIGN);
710 BDPRINTF(PDB_BOOT, 710 BDPRINTF(PDB_BOOT,
711 ("We should have the memory at %lx, let's map it in\n", 711 ("We should have the memory at %lx, let's map it in\n",
712 phys_msgbuf)); 712 phys_msgbuf));
713 if (prom_map_phys(phys_msgbuf, msgbufsiz, (vaddr_t)msgbufp, 713 if (prom_map_phys(phys_msgbuf, msgbufsiz, (vaddr_t)msgbufp,
714 -1/* sunos does this */) == -1) { 714 -1/* sunos does this */) == -1) {
715 prom_printf("Failed to map msgbuf\n"); 715 prom_printf("Failed to map msgbuf\n");
716 } else { 716 } else {
717 BDPRINTF(PDB_BOOT, ("msgbuf mapped at %p\n", 717 BDPRINTF(PDB_BOOT, ("msgbuf mapped at %p\n",
718 (void *)msgbufp)); 718 (void *)msgbufp));
719 } 719 }
720 msgbufmapped = 1; /* enable message buffer */ 720 msgbufmapped = 1; /* enable message buffer */
721 initmsgbuf((void *)msgbufp, msgbufsiz); 721 initmsgbuf((void *)msgbufp, msgbufsiz);
722 722
723 /* 723 /*
724 * Find out how much RAM we have installed. 724 * Find out how much RAM we have installed.
725 */ 725 */
726 BDPRINTF(PDB_BOOT, ("pmap_bootstrap: getting phys installed\n")); 726 BDPRINTF(PDB_BOOT, ("pmap_bootstrap: getting phys installed\n"));
727 pmap_read_memlist("/memory", "reg", &prom_memlist, &prom_memlist_size, 727 pmap_read_memlist("/memory", "reg", &prom_memlist, &prom_memlist_size,
728 kdata_alloc); 728 kdata_alloc);
729 phys_installed = prom_memlist; 729 phys_installed = prom_memlist;
730 phys_installed_size = prom_memlist_size / sizeof(*phys_installed); 730 phys_installed_size = prom_memlist_size / sizeof(*phys_installed);
731 731
732#ifdef DEBUG 732#ifdef DEBUG
733 if (pmapdebug & PDB_BOOT1) { 733 if (pmapdebug & PDB_BOOT1) {
734 /* print out mem list */ 734 /* print out mem list */
735 prom_printf("Installed physical memory:\n"); 735 prom_printf("Installed physical memory:\n");
736 for (i = 0; i < phys_installed_size; i++) { 736 for (i = 0; i < phys_installed_size; i++) {
737 prom_printf("memlist start %lx size %lx\n", 737 prom_printf("memlist start %lx size %lx\n",
738 (u_long)phys_installed[i].start, 738 (u_long)phys_installed[i].start,
739 (u_long)phys_installed[i].size); 739 (u_long)phys_installed[i].size);
740 } 740 }
741 } 741 }
742#endif 742#endif
743 743
744 BDPRINTF(PDB_BOOT1, ("Calculating physmem:")); 744 BDPRINTF(PDB_BOOT1, ("Calculating physmem:"));
745 for (i = 0; i < phys_installed_size; i++) 745 for (i = 0; i < phys_installed_size; i++)
746 physmem += btoc(phys_installed[i].size); 746 physmem += btoc(phys_installed[i].size);
747 BDPRINTF(PDB_BOOT1, (" result %x or %d pages\n", 747 BDPRINTF(PDB_BOOT1, (" result %x or %d pages\n",
748 (int)physmem, (int)physmem)); 748 (int)physmem, (int)physmem));
749 749
750 /* 750 /*
751 * Calculate approx TSB size. This probably needs tweaking. 751 * Calculate approx TSB size. This probably needs tweaking.
752 */ 752 */
753 if (physmem < btoc(64 * 1024 * 1024)) 753 if (physmem < btoc(64 * 1024 * 1024))
754 tsbsize = 0; 754 tsbsize = 0;
755 else if (physmem < btoc(512 * 1024 * 1024)) 755 else if (physmem < btoc(512 * 1024 * 1024))
756 tsbsize = 1; 756 tsbsize = 1;
757 else 757 else
758 tsbsize = 2; 758 tsbsize = 2;
759 759
760 /* 760 /*
761 * Save the prom translations 761 * Save the prom translations
762 */ 762 */
763 pmap_read_memlist("/virtual-memory", "translations", &prom_memlist, 763 pmap_read_memlist("/virtual-memory", "translations", &prom_memlist,
764 &prom_memlist_size, kdata_alloc); 764 &prom_memlist_size, kdata_alloc);
765 prom_map = prom_memlist; 765 prom_map = prom_memlist;
766 prom_map_size = prom_memlist_size / sizeof(struct prom_map); 766 prom_map_size = prom_memlist_size / sizeof(struct prom_map);
767 767
768#ifdef DEBUG 768#ifdef DEBUG
769 if (pmapdebug & PDB_BOOT) { 769 if (pmapdebug & PDB_BOOT) {
770 /* print out mem list */ 770 /* print out mem list */
771 prom_printf("Prom xlations:\n"); 771 prom_printf("Prom xlations:\n");
772 for (i = 0; i < prom_map_size; i++) { 772 for (i = 0; i < prom_map_size; i++) {
773 prom_printf("start %016lx size %016lx tte %016lx\n", 773 prom_printf("start %016lx size %016lx tte %016lx\n",
774 (u_long)prom_map[i].vstart, 774 (u_long)prom_map[i].vstart,
775 (u_long)prom_map[i].vsize, 775 (u_long)prom_map[i].vsize,
776 (u_long)prom_map[i].tte); 776 (u_long)prom_map[i].tte);
777 } 777 }
778 prom_printf("End of prom xlations\n"); 778 prom_printf("End of prom xlations\n");
779 } 779 }
780#endif 780#endif
781 781
782 /* 782 /*
783 * Here's a quick in-lined reverse bubble sort. It gets rid of 783 * Here's a quick in-lined reverse bubble sort. It gets rid of
784 * any translations inside the kernel data VA range. 784 * any translations inside the kernel data VA range.
785 */ 785 */
786 for (i = 0; i < prom_map_size; i++) { 786 for (i = 0; i < prom_map_size; i++) {
787 for (j = i; j < prom_map_size; j++) { 787 for (j = i; j < prom_map_size; j++) {
788 if (prom_map[j].vstart > prom_map[i].vstart) { 788 if (prom_map[j].vstart > prom_map[i].vstart) {
789 struct prom_map tmp; 789 struct prom_map tmp;
790 790
791 tmp = prom_map[i]; 791 tmp = prom_map[i];
792 prom_map[i] = prom_map[j]; 792 prom_map[i] = prom_map[j];
793 prom_map[j] = tmp; 793 prom_map[j] = tmp;
794 } 794 }
795 } 795 }
796 } 796 }
797#ifdef DEBUG 797#ifdef DEBUG
798 if (pmapdebug & PDB_BOOT) { 798 if (pmapdebug & PDB_BOOT) {
799 /* print out mem list */ 799 /* print out mem list */
800 prom_printf("Prom xlations:\n"); 800 prom_printf("Prom xlations:\n");
801 for (i = 0; i < prom_map_size; i++) { 801 for (i = 0; i < prom_map_size; i++) {
802 prom_printf("start %016lx size %016lx tte %016lx\n", 802 prom_printf("start %016lx size %016lx tte %016lx\n",
803 (u_long)prom_map[i].vstart, 803 (u_long)prom_map[i].vstart,
804 (u_long)prom_map[i].vsize, 804 (u_long)prom_map[i].vsize,
805 (u_long)prom_map[i].tte); 805 (u_long)prom_map[i].tte);
806 } 806 }
807 prom_printf("End of prom xlations\n"); 807 prom_printf("End of prom xlations\n");
808 } 808 }
809#endif 809#endif
810 810
811 /* 811 /*
812 * Allocate a ncpu*64KB page for the cpu_info & stack structure now. 812 * Allocate a ncpu*64KB page for the cpu_info & stack structure now.
813 */ 813 */
814 cpu0paddr = prom_alloc_phys(8 * PAGE_SIZE * sparc_ncpus, 8 * PAGE_SIZE); 814 cpu0paddr = prom_alloc_phys(8 * PAGE_SIZE * sparc_ncpus, 8 * PAGE_SIZE);
815 if (cpu0paddr == 0) { 815 if (cpu0paddr == 0) {
816 prom_printf("Cannot allocate cpu_infos\n"); 816 prom_printf("Cannot allocate cpu_infos\n");
817 prom_halt(); 817 prom_halt();
818 } 818 }
819 819
820 /* 820 /*
821 * Now the kernel text segment is in its final location we can try to 821 * Now the kernel text segment is in its final location we can try to
822 * find out how much memory really is free. 822 * find out how much memory really is free.
823 */ 823 */
824 pmap_read_memlist("/memory", "available", &prom_memlist, 824 pmap_read_memlist("/memory", "available", &prom_memlist,
825 &prom_memlist_size, kdata_alloc); 825 &prom_memlist_size, kdata_alloc);
826 orig = prom_memlist; 826 orig = prom_memlist;
827 sz = prom_memlist_size; 827 sz = prom_memlist_size;
828 pcnt = prom_memlist_size / sizeof(*orig); 828 pcnt = prom_memlist_size / sizeof(*orig);
829 829
830 BDPRINTF(PDB_BOOT1, ("Available physical memory:\n")); 830 BDPRINTF(PDB_BOOT1, ("Available physical memory:\n"));
831 avail = (struct mem_region*)kdata_alloc(sz, sizeof(uint64_t)); 831 avail = (struct mem_region*)kdata_alloc(sz, sizeof(uint64_t));
832 for (i = 0; i < pcnt; i++) { 832 for (i = 0; i < pcnt; i++) {
833 avail[i] = orig[i]; 833 avail[i] = orig[i];
834 BDPRINTF(PDB_BOOT1, ("memlist start %lx size %lx\n", 834 BDPRINTF(PDB_BOOT1, ("memlist start %lx size %lx\n",
835 (u_long)orig[i].start, 835 (u_long)orig[i].start,
836 (u_long)orig[i].size)); 836 (u_long)orig[i].size));
837 } 837 }
838 BDPRINTF(PDB_BOOT1, ("End of available physical memory\n")); 838 BDPRINTF(PDB_BOOT1, ("End of available physical memory\n"));
839 839
840 BDPRINTF(PDB_BOOT, ("ktext %08lx[%08lx] - %08lx[%08lx] : " 840 BDPRINTF(PDB_BOOT, ("ktext %08lx[%08lx] - %08lx[%08lx] : "
841 "kdata %08lx[%08lx] - %08lx[%08lx]\n", 841 "kdata %08lx[%08lx] - %08lx[%08lx]\n",
842 (u_long)ktext, (u_long)ktextp, 842 (u_long)ktext, (u_long)ktextp,
843 (u_long)ektext, (u_long)ektextp, 843 (u_long)ektext, (u_long)ektextp,
844 (u_long)kdata, (u_long)kdatap, 844 (u_long)kdata, (u_long)kdatap,
845 (u_long)ekdata, (u_long)ekdatap)); 845 (u_long)ekdata, (u_long)ekdatap));
846#ifdef DEBUG 846#ifdef DEBUG
847 if (pmapdebug & PDB_BOOT1) { 847 if (pmapdebug & PDB_BOOT1) {
848 /* print out mem list */ 848 /* print out mem list */
849 prom_printf("Available %lx physical memory before cleanup:\n", 849 prom_printf("Available %lx physical memory before cleanup:\n",
850 (u_long)avail); 850 (u_long)avail);
851 for (i = 0; i < pcnt; i++) { 851 for (i = 0; i < pcnt; i++) {
852 prom_printf("memlist start %lx size %lx\n", 852 prom_printf("memlist start %lx size %lx\n",
853 (u_long)avail[i].start, 853 (u_long)avail[i].start,
854 (u_long)avail[i].size); 854 (u_long)avail[i].size);
855 } 855 }
856 prom_printf("End of available physical memory before cleanup\n"); 856 prom_printf("End of available physical memory before cleanup\n");
857 prom_printf("kernel physical text size %08lx - %08lx\n", 857 prom_printf("kernel physical text size %08lx - %08lx\n",
858 (u_long)ktextp, (u_long)ektextp); 858 (u_long)ktextp, (u_long)ektextp);
859 prom_printf("kernel physical data size %08lx - %08lx\n", 859 prom_printf("kernel physical data size %08lx - %08lx\n",
860 (u_long)kdatap, (u_long)ekdatap); 860 (u_long)kdatap, (u_long)ekdatap);
861 } 861 }
862#endif 862#endif
863 /* 863 /*
864 * Here's a another quick in-lined bubble sort. 864 * Here's a another quick in-lined bubble sort.
865 */ 865 */
866 for (i = 0; i < pcnt; i++) { 866 for (i = 0; i < pcnt; i++) {
867 for (j = i; j < pcnt; j++) { 867 for (j = i; j < pcnt; j++) {
868 if (avail[j].start < avail[i].start) { 868 if (avail[j].start < avail[i].start) {
869 struct mem_region tmp; 869 struct mem_region tmp;
870 tmp = avail[i]; 870 tmp = avail[i];
871 avail[i] = avail[j]; 871 avail[i] = avail[j];
872 avail[j] = tmp; 872 avail[j] = tmp;
873 } 873 }
874 } 874 }
875 } 875 }
876 876
877 /* Throw away page zero if we have it. */ 877 /* Throw away page zero if we have it. */
878 if (avail->start == 0) { 878 if (avail->start == 0) {
879 avail->start += PAGE_SIZE; 879 avail->start += PAGE_SIZE;
880 avail->size -= PAGE_SIZE; 880 avail->size -= PAGE_SIZE;
881 } 881 }
882 882
883 /* 883 /*
884 * Now we need to remove the area we valloc'ed from the available 884 * Now we need to remove the area we valloc'ed from the available
885 * memory lists. (NB: we may have already alloc'ed the entire space). 885 * memory lists. (NB: we may have already alloc'ed the entire space).
886 */ 886 */
887 npgs = 0; 887 npgs = 0;
888 for (mp = avail, i = 0; i < pcnt; i++, mp = &avail[i]) { 888 for (mp = avail, i = 0; i < pcnt; i++, mp = &avail[i]) {
889 /* 889 /*
890 * Now page align the start of the region. 890 * Now page align the start of the region.
891 */ 891 */
892 s = mp->start % PAGE_SIZE; 892 s = mp->start % PAGE_SIZE;
893 if (mp->size >= s) { 893 if (mp->size >= s) {
894 mp->size -= s; 894 mp->size -= s;
895 mp->start += s; 895 mp->start += s;
896 } 896 }
897 /* 897 /*
898 * And now align the size of the region. 898 * And now align the size of the region.
899 */ 899 */
900 mp->size -= mp->size % PAGE_SIZE; 900 mp->size -= mp->size % PAGE_SIZE;
901 /* 901 /*
902 * Check whether some memory is left here. 902 * Check whether some memory is left here.
903 */ 903 */
904 if (mp->size == 0) { 904 if (mp->size == 0) {
905 memcpy(mp, mp + 1, 905 memcpy(mp, mp + 1,
906 (pcnt - (mp - avail)) * sizeof *mp); 906 (pcnt - (mp - avail)) * sizeof *mp);
907 pcnt--; 907 pcnt--;
908 mp--; 908 mp--;
909 continue; 909 continue;
910 } 910 }
911 s = mp->start; 911 s = mp->start;
912 sz = mp->size; 912 sz = mp->size;
913 npgs += btoc(sz); 913 npgs += btoc(sz);
914 for (mp1 = avail; mp1 < mp; mp1++) 914 for (mp1 = avail; mp1 < mp; mp1++)
915 if (s < mp1->start) 915 if (s < mp1->start)
916 break; 916 break;
917 if (mp1 < mp) { 917 if (mp1 < mp) {
918 memcpy(mp1 + 1, mp1, (char *)mp - (char *)mp1); 918 memcpy(mp1 + 1, mp1, (char *)mp - (char *)mp1);
919 mp1->start = s; 919 mp1->start = s;
920 mp1->size = sz; 920 mp1->size = sz;
921 } 921 }
922#ifdef DEBUG 922#ifdef DEBUG
923/* Clear all memory we give to the VM system. I want to make sure 923/* Clear all memory we give to the VM system. I want to make sure
924 * the PROM isn't using it for something, so this should break the PROM. 924 * the PROM isn't using it for something, so this should break the PROM.
925 */ 925 */
926 926
927/* Calling pmap_zero_page() at this point also hangs some machines 927/* Calling pmap_zero_page() at this point also hangs some machines
928 * so don't do it at all. -- pk 26/02/2002 928 * so don't do it at all. -- pk 26/02/2002
929 */ 929 */
930#if 0 930#if 0
931 { 931 {
932 paddr_t p; 932 paddr_t p;
933 for (p = mp->start; p < mp->start+mp->size; 933 for (p = mp->start; p < mp->start+mp->size;
934 p += PAGE_SIZE) 934 p += PAGE_SIZE)
935 pmap_zero_page(p); 935 pmap_zero_page(p);
936 } 936 }
937#endif 937#endif
938#endif /* DEBUG */ 938#endif /* DEBUG */
939 /* 939 /*
940 * In future we should be able to specify both allocated 940 * In future we should be able to specify both allocated
941 * and free. 941 * and free.
942 */ 942 */
943 BDPRINTF(PDB_BOOT1, ("uvm_page_physload(%lx, %lx)\n", 943 BDPRINTF(PDB_BOOT1, ("uvm_page_physload(%lx, %lx)\n",
944 (long)mp->start, 944 (long)mp->start,
945 (long)(mp->start + mp->size))); 945 (long)(mp->start + mp->size)));
946 uvm_page_physload( 946 uvm_page_physload(
947 atop(mp->start), 947 atop(mp->start),
948 atop(mp->start+mp->size), 948 atop(mp->start+mp->size),
949 atop(mp->start), 949 atop(mp->start),
950 atop(mp->start+mp->size), 950 atop(mp->start+mp->size),
951 VM_FREELIST_DEFAULT); 951 VM_FREELIST_DEFAULT);
952 } 952 }
953 953
954#ifdef DEBUG 954#ifdef DEBUG
955 if (pmapdebug & PDB_BOOT) { 955 if (pmapdebug & PDB_BOOT) {
956 /* print out mem list */ 956 /* print out mem list */
957 prom_printf("Available physical memory after cleanup:\n"); 957 prom_printf("Available physical memory after cleanup:\n");
958 for (i = 0; i < pcnt; i++) { 958 for (i = 0; i < pcnt; i++) {
959 prom_printf("avail start %lx size %lx\n", 959 prom_printf("avail start %lx size %lx\n",
960 (long)avail[i].start, (long)avail[i].size); 960 (long)avail[i].start, (long)avail[i].size);
961 } 961 }
962 prom_printf("End of available physical memory after cleanup\n"); 962 prom_printf("End of available physical memory after cleanup\n");
963 } 963 }
964#endif 964#endif
965 /* 965 /*
966 * Allocate and clear out pmap_kernel()->pm_segs[] 966 * Allocate and clear out pmap_kernel()->pm_segs[]
967 */ 967 */
968 pmap_kernel()->pm_refs = 1; 968 pmap_kernel()->pm_refs = 1;
969 memset(&pmap_kernel()->pm_ctx, 0, sizeof(pmap_kernel()->pm_ctx)); 969 memset(&pmap_kernel()->pm_ctx, 0, sizeof(pmap_kernel()->pm_ctx));
970 970
971 /* Throw away page zero */ 971 /* Throw away page zero */
972 do { 972 do {
973 pmap_get_page(&newp); 973 pmap_get_page(&newp);
974 } while (!newp); 974 } while (!newp);
975 pmap_kernel()->pm_segs=(paddr_t *)(u_long)newp; 975 pmap_kernel()->pm_segs=(paddr_t *)(u_long)newp;
976 pmap_kernel()->pm_physaddr = newp; 976 pmap_kernel()->pm_physaddr = newp;
977 977
978 /* 978 /*
979 * finish filling out kernel pmap. 979 * finish filling out kernel pmap.
980 */ 980 */
981 981
982 BDPRINTF(PDB_BOOT, ("pmap_kernel()->pm_physaddr = %lx\n", 982 BDPRINTF(PDB_BOOT, ("pmap_kernel()->pm_physaddr = %lx\n",
983 (long)pmap_kernel()->pm_physaddr)); 983 (long)pmap_kernel()->pm_physaddr));
984 /* 984 /*
985 * Tell pmap about our mesgbuf -- Hope this works already 985 * Tell pmap about our mesgbuf -- Hope this works already
986 */ 986 */
987#ifdef DEBUG 987#ifdef DEBUG
988 BDPRINTF(PDB_BOOT1, ("Calling consinit()\n")); 988 BDPRINTF(PDB_BOOT1, ("Calling consinit()\n"));
989 if (pmapdebug & PDB_BOOT1) 989 if (pmapdebug & PDB_BOOT1)
990 consinit(); 990 consinit();
991 BDPRINTF(PDB_BOOT1, ("Inserting mesgbuf into pmap_kernel()\n")); 991 BDPRINTF(PDB_BOOT1, ("Inserting mesgbuf into pmap_kernel()\n"));
992#endif 992#endif
993 /* it's not safe to call pmap_enter so we need to do this ourselves */ 993 /* it's not safe to call pmap_enter so we need to do this ourselves */
994 va = (vaddr_t)msgbufp; 994 va = (vaddr_t)msgbufp;
995 prom_map_phys(phys_msgbuf, msgbufsiz, (vaddr_t)msgbufp, -1); 995 prom_map_phys(phys_msgbuf, msgbufsiz, (vaddr_t)msgbufp, -1);
996 while (msgbufsiz) { 996 while (msgbufsiz) {
997 data = TSB_DATA(0 /* global */, 997 data = TSB_DATA(0 /* global */,
998 PGSZ_8K, 998 PGSZ_8K,
999 phys_msgbuf, 999 phys_msgbuf,
1000 1 /* priv */, 1000 1 /* priv */,
1001 1 /* Write */, 1001 1 /* Write */,
1002 1 /* Cacheable */, 1002 1 /* Cacheable */,
1003 FORCE_ALIAS /* ALIAS -- Disable D$ */, 1003 FORCE_ALIAS /* ALIAS -- Disable D$ */,
1004 1 /* valid */, 1004 1 /* valid */,
1005 0 /* IE */); 1005 0 /* IE */);
1006 pmap_enter_kpage(va, data); 1006 pmap_enter_kpage(va, data);
1007 va += PAGE_SIZE; 1007 va += PAGE_SIZE;
1008 msgbufsiz -= PAGE_SIZE; 1008 msgbufsiz -= PAGE_SIZE;
1009 phys_msgbuf += PAGE_SIZE; 1009 phys_msgbuf += PAGE_SIZE;
1010 } 1010 }
1011 BDPRINTF(PDB_BOOT1, ("Done inserting mesgbuf into pmap_kernel()\n")); 1011 BDPRINTF(PDB_BOOT1, ("Done inserting mesgbuf into pmap_kernel()\n"));
1012 1012
1013 BDPRINTF(PDB_BOOT1, ("Inserting PROM mappings into pmap_kernel()\n")); 1013 BDPRINTF(PDB_BOOT1, ("Inserting PROM mappings into pmap_kernel()\n"));
1014 for (i = 0; i < prom_map_size; i++) 1014 for (i = 0; i < prom_map_size; i++)
1015 if (prom_map[i].vstart && ((prom_map[i].vstart >> 32) == 0)) 1015 if (prom_map[i].vstart && ((prom_map[i].vstart >> 32) == 0))
1016 for (j = 0; j < prom_map[i].vsize; j += PAGE_SIZE) { 1016 for (j = 0; j < prom_map[i].vsize; j += PAGE_SIZE) {
1017 int k; 1017 int k;
1018 1018
1019 for (k = 0; page_size_map[k].mask; k++) { 1019 for (k = 0; page_size_map[k].mask; k++) {
1020 if (((prom_map[i].vstart | 1020 if (((prom_map[i].vstart |
1021 prom_map[i].tte) & 1021 prom_map[i].tte) &
1022 page_size_map[k].mask) == 0 && 1022 page_size_map[k].mask) == 0 &&
1023 page_size_map[k].mask < 1023 page_size_map[k].mask <
1024 prom_map[i].vsize) 1024 prom_map[i].vsize)
1025 break; 1025 break;
1026 } 1026 }
1027#ifdef DEBUG 1027#ifdef DEBUG
1028 page_size_map[k].use++; 1028 page_size_map[k].use++;
1029#endif 1029#endif
1030 /* Enter PROM map into pmap_kernel() */ 1030 /* Enter PROM map into pmap_kernel() */
1031 pmap_enter_kpage(prom_map[i].vstart + j, 1031 pmap_enter_kpage(prom_map[i].vstart + j,
1032 (prom_map[i].tte + j) | TLB_EXEC | 1032 (prom_map[i].tte + j) | TLB_EXEC |
1033 page_size_map[k].code); 1033 page_size_map[k].code);
1034 } 1034 }
1035 BDPRINTF(PDB_BOOT1, ("Done inserting PROM mappings into pmap_kernel()\n")); 1035 BDPRINTF(PDB_BOOT1, ("Done inserting PROM mappings into pmap_kernel()\n"));
1036 1036
1037 /* 1037 /*
1038 * Fix up start of kernel heap. 1038 * Fix up start of kernel heap.
1039 */ 1039 */
1040 vmmap = (vaddr_t)roundup(ekdata, 4*MEG); 1040 vmmap = (vaddr_t)roundup(ekdata, 4*MEG);
1041 /* Let's keep 1 page of redzone after the kernel */ 1041 /* Let's keep 1 page of redzone after the kernel */
1042 vmmap += PAGE_SIZE; 1042 vmmap += PAGE_SIZE;
1043 { 1043 {
1044 extern struct pcb *proc0paddr; 1044 extern struct pcb *proc0paddr;
1045 extern void main(void); 1045 extern void main(void);
1046 vaddr_t u0va; 1046 vaddr_t u0va;
1047 paddr_t pa; 1047 paddr_t pa;
1048 1048
1049 u0va = vmmap; 1049 u0va = vmmap;
1050 1050
1051 BDPRINTF(PDB_BOOT1, 1051 BDPRINTF(PDB_BOOT1,
1052 ("Inserting proc0 USPACE into pmap_kernel() at %p\n", 1052 ("Inserting proc0 USPACE into pmap_kernel() at %p\n",
1053 vmmap)); 1053 vmmap));
1054 1054
1055 while (vmmap < u0va + 2*USPACE) { 1055 while (vmmap < u0va + 2*USPACE) {
1056 int64_t data1; 1056 int64_t data1;
1057 1057
1058 if (!pmap_get_page(&pa)) 1058 if (!pmap_get_page(&pa))
1059 panic("pmap_bootstrap: no pages"); 1059 panic("pmap_bootstrap: no pages");
1060 prom_map_phys(pa, PAGE_SIZE, vmmap, -1); 1060 prom_map_phys(pa, PAGE_SIZE, vmmap, -1);
1061 data1 = TSB_DATA(0 /* global */, 1061 data1 = TSB_DATA(0 /* global */,
1062 PGSZ_8K, 1062 PGSZ_8K,
1063 pa, 1063 pa,
1064 1 /* priv */, 1064 1 /* priv */,
1065 1 /* Write */, 1065 1 /* Write */,
1066 1 /* Cacheable */, 1066 1 /* Cacheable */,
1067 FORCE_ALIAS /* ALIAS -- Disable D$ */, 1067 FORCE_ALIAS /* ALIAS -- Disable D$ */,
1068 1 /* valid */, 1068 1 /* valid */,
1069 0 /* IE */); 1069 0 /* IE */);
1070 pmap_enter_kpage(vmmap, data1); 1070 pmap_enter_kpage(vmmap, data1);
1071 vmmap += PAGE_SIZE; 1071 vmmap += PAGE_SIZE;
1072 } 1072 }
1073 BDPRINTF(PDB_BOOT1, 1073 BDPRINTF(PDB_BOOT1,
1074 ("Done inserting stack 0 into pmap_kernel()\n")); 1074 ("Done inserting stack 0 into pmap_kernel()\n"));
1075 1075
1076 /* Now map in and initialize our cpu_info structure */ 1076 /* Now map in and initialize our cpu_info structure */
1077#ifdef DIAGNOSTIC 1077#ifdef DIAGNOSTIC
1078 vmmap += PAGE_SIZE; /* redzone -- XXXX do we need one? */ 1078 vmmap += PAGE_SIZE; /* redzone -- XXXX do we need one? */
1079#endif 1079#endif
1080 if ((vmmap ^ INTSTACK) & VA_ALIAS_MASK) 1080 if ((vmmap ^ INTSTACK) & VA_ALIAS_MASK)
1081 vmmap += PAGE_SIZE; /* Matchup virtual color for D$ */ 1081 vmmap += PAGE_SIZE; /* Matchup virtual color for D$ */
1082 intstk = vmmap; 1082 intstk = vmmap;
1083 cpus = (struct cpu_info *)(intstk + CPUINFO_VA - INTSTACK); 1083 cpus = (struct cpu_info *)(intstk + CPUINFO_VA - INTSTACK);
1084 1084
1085 BDPRINTF(PDB_BOOT1, 1085 BDPRINTF(PDB_BOOT1,
1086 ("Inserting cpu_info into pmap_kernel() at %p\n", 1086 ("Inserting cpu_info into pmap_kernel() at %p\n",
1087 cpus)); 1087 cpus));
1088 /* Now map in all 8 pages of interrupt stack/cpu_info */ 1088 /* Now map in all 8 pages of interrupt stack/cpu_info */
1089 pa = cpu0paddr; 1089 pa = cpu0paddr;
1090 prom_map_phys(pa, 64*KB, vmmap, -1); 1090 prom_map_phys(pa, 64*KB, vmmap, -1);
1091 1091
1092 /* 1092 /*
1093 * Also map it in as the interrupt stack. 1093 * Also map it in as the interrupt stack.
1094 * This lets the PROM see this if needed. 1094 * This lets the PROM see this if needed.
1095 * 1095 *
1096 * XXXX locore.s does not flush these mappings 1096 * XXXX locore.s does not flush these mappings
1097 * before installing the locked TTE. 1097 * before installing the locked TTE.
1098 */ 1098 */
1099 prom_map_phys(pa, 64*KB, INTSTACK, -1); 1099 prom_map_phys(pa, 64*KB, INTSTACK, -1);
1100 for (i = 0; i < 8; i++) { 1100 for (i = 0; i < 8; i++) {
1101 int64_t data1; 1101 int64_t data1;
1102 1102
1103 data1 = TSB_DATA(0 /* global */, 1103 data1 = TSB_DATA(0 /* global */,
1104 PGSZ_8K, 1104 PGSZ_8K,
1105 pa, 1105 pa,
1106 1 /* priv */, 1106 1 /* priv */,
1107 1 /* Write */, 1107 1 /* Write */,
1108 1 /* Cacheable */, 1108 1 /* Cacheable */,
1109 FORCE_ALIAS /* ALIAS -- Disable D$ */, 1109 FORCE_ALIAS /* ALIAS -- Disable D$ */,
1110 1 /* valid */, 1110 1 /* valid */,
1111 0 /* IE */); 1111 0 /* IE */);
1112 pmap_enter_kpage(vmmap, data1); 1112 pmap_enter_kpage(vmmap, data1);
1113 vmmap += PAGE_SIZE; 1113 vmmap += PAGE_SIZE;
1114 pa += PAGE_SIZE; 1114 pa += PAGE_SIZE;
1115 } 1115 }
1116 BDPRINTF(PDB_BOOT1, ("Initializing cpu_info\n")); 1116 BDPRINTF(PDB_BOOT1, ("Initializing cpu_info\n"));
1117 1117
1118 /* Initialize our cpu_info structure */ 1118 /* Initialize our cpu_info structure */
1119 memset((void *)intstk, 0, 64 * KB); 1119 memset((void *)intstk, 0, 64 * KB);
1120 cpus->ci_self = cpus; 1120 cpus->ci_self = cpus;
1121 cpus->ci_next = NULL; 1121 cpus->ci_next = NULL;
1122 cpus->ci_curlwp = &lwp0; 1122 cpus->ci_curlwp = &lwp0;
1123 cpus->ci_flags = CPUF_PRIMARY; 1123 cpus->ci_flags = CPUF_PRIMARY;
1124 cpus->ci_cpuid = CPU_UPAID; 1124 cpus->ci_cpuid = CPU_UPAID;
1125 cpus->ci_fplwp = NULL; 1125 cpus->ci_fplwp = NULL;
1126 cpus->ci_spinup = main; /* Call main when we're running. */ 1126 cpus->ci_spinup = main; /* Call main when we're running. */
1127 cpus->ci_paddr = cpu0paddr; 1127 cpus->ci_paddr = cpu0paddr;
1128 cpus->ci_cpcb = (struct pcb *)u0va; 1128 cpus->ci_cpcb = (struct pcb *)u0va;
1129 proc0paddr = cpus->ci_cpcb; 1129 proc0paddr = cpus->ci_cpcb;
1130 cpus->ci_idepth = -1; 1130 cpus->ci_idepth = -1;
1131 memset(cpus->ci_intrpending, -1, sizeof(cpus->ci_intrpending)); 1131 memset(cpus->ci_intrpending, -1, sizeof(cpus->ci_intrpending));
1132 1132
1133 lwp0.l_addr = (struct user*)u0va; 1133 lwp0.l_addr = (struct user*)u0va;
1134 lwp0.l_md.md_tf = (struct trapframe64*)(u0va + USPACE 1134 lwp0.l_md.md_tf = (struct trapframe64*)(u0va + USPACE
1135 - sizeof(struct trapframe64)); 1135 - sizeof(struct trapframe64));
1136 1136
1137 cpu0paddr += 64 * KB; 1137 cpu0paddr += 64 * KB;
1138 1138
1139 CPUSET_CLEAR(cpus_active); 1139 CPUSET_CLEAR(cpus_active);
1140 CPUSET_ADD(cpus_active, 0); 1140 CPUSET_ADD(cpus_active, 0);
1141 1141
1142 cpu_pmap_prepare(cpus, true); 1142 cpu_pmap_prepare(cpus, true);
1143 cpu_pmap_init(cpus); 1143 cpu_pmap_init(cpus);
1144 1144
1145 /* The rest will be done at CPU attach time. */ 1145 /* The rest will be done at CPU attach time. */
1146 BDPRINTF(PDB_BOOT1, 1146 BDPRINTF(PDB_BOOT1,
1147 ("Done inserting cpu_info into pmap_kernel()\n")); 1147 ("Done inserting cpu_info into pmap_kernel()\n"));
1148 } 1148 }
1149 1149
1150 vmmap = (vaddr_t)reserve_dumppages((void *)(u_long)vmmap); 1150 vmmap = (vaddr_t)reserve_dumppages((void *)(u_long)vmmap);
1151 1151
1152 /* 1152 /*
1153 * Set up bounds of allocatable memory for vmstat et al. 1153 * Set up bounds of allocatable memory for vmstat et al.
1154 */ 1154 */
1155 nextavail = avail->start; 1155 nextavail = avail->start;
1156 avail_start = nextavail; 1156 avail_start = nextavail;
1157 for (mp = avail; mp->size; mp++) 1157 for (mp = avail; mp->size; mp++)
1158 avail_end = mp->start+mp->size; 1158 avail_end = mp->start+mp->size;
1159 1159
1160 BDPRINTF(PDB_BOOT1, ("Finished pmap_bootstrap()\n")); 1160 BDPRINTF(PDB_BOOT1, ("Finished pmap_bootstrap()\n"));
1161 1161
1162 BDPRINTF(PDB_BOOT, ("left kdata: %" PRId64 " @%" PRIx64 ".\n", 1162 BDPRINTF(PDB_BOOT, ("left kdata: %" PRId64 " @%" PRIx64 ".\n",
1163 kdata_mem_pool.size, kdata_mem_pool.start)); 1163 kdata_mem_pool.size, kdata_mem_pool.start));
1164} 1164}
1165 1165
1166/* 1166/*
1167 * Allocate TSBs for both mmus from the locked kernel data segment page. 1167 * Allocate TSBs for both mmus from the locked kernel data segment page.
1168 * This is run before the cpu itself is activated (or by the first cpu 1168 * This is run before the cpu itself is activated (or by the first cpu
1169 * itself) 1169 * itself)
1170 */ 1170 */
1171void 1171void
1172cpu_pmap_prepare(struct cpu_info *ci, bool initial) 1172cpu_pmap_prepare(struct cpu_info *ci, bool initial)
1173{ 1173{
1174 /* allocate our TSBs */ 1174 /* allocate our TSBs */
1175 ci->ci_tsb_dmmu = (pte_t *)kdata_alloc(TSBSIZE, TSBSIZE); 1175 ci->ci_tsb_dmmu = (pte_t *)kdata_alloc(TSBSIZE, TSBSIZE);
1176 ci->ci_tsb_immu = (pte_t *)kdata_alloc(TSBSIZE, TSBSIZE); 1176 ci->ci_tsb_immu = (pte_t *)kdata_alloc(TSBSIZE, TSBSIZE);
1177 memset(ci->ci_tsb_dmmu, 0, TSBSIZE); 1177 memset(ci->ci_tsb_dmmu, 0, TSBSIZE);
1178 memset(ci->ci_tsb_immu, 0, TSBSIZE); 1178 memset(ci->ci_tsb_immu, 0, TSBSIZE);
1179 if (!initial) { 1179 if (!initial) {
1180 KASSERT(ci != curcpu()); 1180 KASSERT(ci != curcpu());
1181 /* 1181 /*
1182 * Initially share ctxbusy with the boot cpu, the 1182 * Initially share ctxbusy with the boot cpu, the
1183 * cpu will replace it as soon as it runs (and can 1183 * cpu will replace it as soon as it runs (and can
1184 * probe the number of available contexts itself). 1184 * probe the number of available contexts itself).
1185 * Untill then only context 0 (aka kernel) will be 1185 * Untill then only context 0 (aka kernel) will be
1186 * referenced anyway. 1186 * referenced anyway.
1187 */ 1187 */
1188 ci->ci_numctx = curcpu()->ci_numctx; 1188 ci->ci_numctx = curcpu()->ci_numctx;
1189 ci->ci_ctxbusy = curcpu()->ci_ctxbusy; 1189 ci->ci_ctxbusy = curcpu()->ci_ctxbusy;
1190 } 1190 }
1191 1191
1192 BDPRINTF(PDB_BOOT1, ("cpu %d: TSB allocated at %p/%p size %08x\n", 1192 BDPRINTF(PDB_BOOT1, ("cpu %d: TSB allocated at %p/%p size %08x\n",
1193 ci->ci_index, ci->ci_tsb_dmmu, ci->ci_tsb_immu, TSBSIZE)); 1193 ci->ci_index, ci->ci_tsb_dmmu, ci->ci_tsb_immu, TSBSIZE));
1194} 1194}
1195 1195
1196/* 1196/*
1197 * Initialize the per CPU parts for the cpu running this code (despite the 1197 * Initialize the per CPU parts for the cpu running this code (despite the
1198 * passed cpuinfo) - get_maxctx() only works on the local cpu. 1198 * passed cpuinfo) - get_maxctx() only works on the local cpu.
1199 */ 1199 */
1200void 1200void
1201cpu_pmap_init(struct cpu_info *ci) 1201cpu_pmap_init(struct cpu_info *ci)
1202{ 1202{
1203 extern int get_maxctx(void); 1203 extern int get_maxctx(void);
1204 size_t ctxsize; 1204 size_t ctxsize;
1205 1205
1206 ci->ci_pmap_next_ctx = 1; 1206 ci->ci_pmap_next_ctx = 1;
1207 ci->ci_numctx = get_maxctx(); 1207 ci->ci_numctx = get_maxctx();
1208 ctxsize = sizeof(paddr_t)*ci->ci_numctx; 1208 ctxsize = sizeof(paddr_t)*ci->ci_numctx;
1209 ci->ci_ctxbusy = (paddr_t *)kdata_alloc(ctxsize, sizeof(uint64_t)); 1209 ci->ci_ctxbusy = (paddr_t *)kdata_alloc(ctxsize, sizeof(uint64_t));
1210 memset(ci->ci_ctxbusy, 0, ctxsize); 1210 memset(ci->ci_ctxbusy, 0, ctxsize);
1211 LIST_INIT(&ci->ci_pmap_ctxlist); 1211 LIST_INIT(&ci->ci_pmap_ctxlist);
1212 1212
1213 /* mark kernel context as busy */ 1213 /* mark kernel context as busy */
1214 ci->ci_ctxbusy[0] = pmap_kernel()->pm_physaddr; 1214 ci->ci_ctxbusy[0] = pmap_kernel()->pm_physaddr;
1215} 1215}
1216 1216
1217/* 1217/*
1218 * Initialize anything else for pmap handling. 1218 * Initialize anything else for pmap handling.
1219 * Called during vm_init(). 1219 * Called during vm_init().
1220 */ 1220 */
1221void 1221void
1222pmap_init() 1222pmap_init()
1223{ 1223{
1224 struct vm_page *pg; 1224 struct vm_page *pg;
1225 struct pglist pglist; 1225 struct pglist pglist;
1226 uint64_t data; 1226 uint64_t data;
1227 paddr_t pa; 1227 paddr_t pa;
1228 psize_t size; 1228 psize_t size;
1229 vaddr_t va; 1229 vaddr_t va;
1230 1230
1231 BDPRINTF(PDB_BOOT1, ("pmap_init()\n")); 1231 BDPRINTF(PDB_BOOT1, ("pmap_init()\n"));
1232 1232
1233 size = sizeof(struct pv_entry) * physmem; 1233 size = sizeof(struct pv_entry) * physmem;
1234 if (uvm_pglistalloc((psize_t)size, (paddr_t)0, (paddr_t)-1, 1234 if (uvm_pglistalloc((psize_t)size, (paddr_t)0, (paddr_t)-1,
1235 (paddr_t)PAGE_SIZE, (paddr_t)0, &pglist, 1, 0) != 0) 1235 (paddr_t)PAGE_SIZE, (paddr_t)0, &pglist, 1, 0) != 0)
1236 panic("pmap_init: no memory"); 1236 panic("pmap_init: no memory");
1237 1237
1238 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY); 1238 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY);
1239 if (va == 0) 1239 if (va == 0)
1240 panic("pmap_init: no memory"); 1240 panic("pmap_init: no memory");
1241 1241
1242 /* Map the pages */ 1242 /* Map the pages */
1243 TAILQ_FOREACH(pg, &pglist, pageq.queue) { 1243 TAILQ_FOREACH(pg, &pglist, pageq.queue) {
1244 pa = VM_PAGE_TO_PHYS(pg); 1244 pa = VM_PAGE_TO_PHYS(pg);
1245 pmap_zero_page(pa); 1245 pmap_zero_page(pa);
1246 data = TSB_DATA(0 /* global */, 1246 data = TSB_DATA(0 /* global */,
1247 PGSZ_8K, 1247 PGSZ_8K,
1248 pa, 1248 pa,
1249 1 /* priv */, 1249 1 /* priv */,
1250 1 /* Write */, 1250 1 /* Write */,
1251 1 /* Cacheable */, 1251 1 /* Cacheable */,
1252 FORCE_ALIAS /* ALIAS -- Disable D$ */, 1252 FORCE_ALIAS /* ALIAS -- Disable D$ */,
1253 1 /* valid */, 1253 1 /* valid */,
1254 0 /* IE */); 1254 0 /* IE */);
1255 pmap_enter_kpage(va, data); 1255 pmap_enter_kpage(va, data);
1256 va += PAGE_SIZE; 1256 va += PAGE_SIZE;
1257 } 1257 }
1258 1258
1259 /* 1259 /*
1260 * initialize the pmap pools. 1260 * initialize the pmap pools.
1261 */ 1261 */
1262 pool_cache_bootstrap(&pmap_cache, sizeof(struct pmap), 0, 0, 0, 1262 pool_cache_bootstrap(&pmap_cache, sizeof(struct pmap), BLOCK_SIZE, 0,
1263 "pmappl", NULL, IPL_NONE, NULL, NULL, NULL); 1263 0, "pmappl", NULL, IPL_NONE, NULL, NULL, NULL);
1264 pool_cache_bootstrap(&pmap_pv_cache, sizeof(struct pv_entry), 0, 0, 0, 1264 pool_cache_bootstrap(&pmap_pv_cache, sizeof(struct pv_entry), 0, 0,
1265 "pv_entry", NULL, IPL_NONE, NULL, NULL, NULL); 1265 PR_LARGECACHE, "pv_entry", NULL, IPL_NONE, NULL, NULL, NULL);
1266 1266
1267 vm_first_phys = avail_start; 1267 vm_first_phys = avail_start;
1268 vm_num_phys = avail_end - avail_start; 1268 vm_num_phys = avail_end - avail_start;
1269 1269
1270 mutex_init(&pmap_lock, MUTEX_DEFAULT, IPL_NONE); 1270 mutex_init(&pmap_lock, MUTEX_DEFAULT, IPL_NONE);
1271} 1271}
1272 1272
1273/* 1273/*
1274 * How much virtual space is available to the kernel? 1274 * How much virtual space is available to the kernel?
1275 */ 1275 */
1276static vaddr_t kbreak; /* End of kernel VA */ 1276static vaddr_t kbreak; /* End of kernel VA */
1277void 1277void
1278pmap_virtual_space(start, end) 1278pmap_virtual_space(start, end)
1279 vaddr_t *start, *end; 1279 vaddr_t *start, *end;
1280{ 1280{
1281 1281
1282 /* 1282 /*
1283 * Reserve one segment for kernel virtual memory 1283 * Reserve one segment for kernel virtual memory
1284 */ 1284 */
1285 /* Reserve two pages for pmap_copy_page && /dev/mem */ 1285 /* Reserve two pages for pmap_copy_page && /dev/mem */
1286 *start = kbreak = (vaddr_t)(vmmap + 2*PAGE_SIZE); 1286 *start = kbreak = (vaddr_t)(vmmap + 2*PAGE_SIZE);
1287 *end = VM_MAX_KERNEL_ADDRESS; 1287 *end = VM_MAX_KERNEL_ADDRESS;
1288 BDPRINTF(PDB_BOOT1, ("pmap_virtual_space: %x-%x\n", *start, *end)); 1288 BDPRINTF(PDB_BOOT1, ("pmap_virtual_space: %x-%x\n", *start, *end));
1289} 1289}
1290 1290
1291/* 1291/*
1292 * Preallocate kernel page tables to a specified VA. 1292 * Preallocate kernel page tables to a specified VA.
1293 * This simply loops through the first TTE for each 1293 * This simply loops through the first TTE for each
1294 * page table from the beginning of the kernel pmap, 1294 * page table from the beginning of the kernel pmap,
1295 * reads the entry, and if the result is 1295 * reads the entry, and if the result is
1296 * zero (either invalid entry or no page table) it stores 1296 * zero (either invalid entry or no page table) it stores
1297 * a zero there, populating page tables in the process. 1297 * a zero there, populating page tables in the process.
1298 * This is not the most efficient technique but i don't 1298 * This is not the most efficient technique but i don't
1299 * expect it to be called that often. 1299 * expect it to be called that often.
1300 */ 1300 */
1301vaddr_t 1301vaddr_t
1302pmap_growkernel(maxkvaddr) 1302pmap_growkernel(maxkvaddr)
1303 vaddr_t maxkvaddr; 1303 vaddr_t maxkvaddr;
1304{ 1304{
1305 struct pmap *pm = pmap_kernel(); 1305 struct pmap *pm = pmap_kernel();
1306 paddr_t pa; 1306 paddr_t pa;
1307 1307
1308 if (maxkvaddr >= KERNEND) { 1308 if (maxkvaddr >= KERNEND) {
1309 printf("WARNING: cannot extend kernel pmap beyond %p to %p\n", 1309 printf("WARNING: cannot extend kernel pmap beyond %p to %p\n",
1310 (void *)KERNEND, (void *)maxkvaddr); 1310 (void *)KERNEND, (void *)maxkvaddr);
1311 return (kbreak); 1311 return (kbreak);
1312 } 1312 }
1313 mutex_enter(&pmap_lock); 1313 mutex_enter(&pmap_lock);
1314 DPRINTF(PDB_GROW, ("pmap_growkernel(%lx...%lx)\n", kbreak, maxkvaddr)); 1314 DPRINTF(PDB_GROW, ("pmap_growkernel(%lx...%lx)\n", kbreak, maxkvaddr));
1315 /* Align with the start of a page table */ 1315 /* Align with the start of a page table */
1316 for (kbreak &= (-1 << PDSHIFT); kbreak < maxkvaddr; 1316 for (kbreak &= (-1 << PDSHIFT); kbreak < maxkvaddr;
1317 kbreak += (1 << PDSHIFT)) { 1317 kbreak += (1 << PDSHIFT)) {
1318 if (pseg_get(pm, kbreak)) 1318 if (pseg_get(pm, kbreak))
1319 continue; 1319 continue;
1320 1320
1321 pa = 0; 1321 pa = 0;
1322 while (pseg_set(pm, kbreak, 0, pa) & 1) { 1322 while (pseg_set(pm, kbreak, 0, pa) & 1) {
1323 DPRINTF(PDB_GROW, 1323 DPRINTF(PDB_GROW,
1324 ("pmap_growkernel: extending %lx\n", kbreak)); 1324 ("pmap_growkernel: extending %lx\n", kbreak));
1325 pa = 0; 1325 pa = 0;
1326 if (!pmap_get_page(&pa)) 1326 if (!pmap_get_page(&pa))
1327 panic("pmap_growkernel: no pages"); 1327 panic("pmap_growkernel: no pages");
1328 ENTER_STAT(ptpneeded); 1328 ENTER_STAT(ptpneeded);
1329 } 1329 }
1330 } 1330 }
1331 mutex_exit(&pmap_lock); 1331 mutex_exit(&pmap_lock);
1332 return (kbreak); 1332 return (kbreak);
1333} 1333}
1334 1334
1335/* 1335/*
1336 * Create and return a physical map. 1336 * Create and return a physical map.
1337 */ 1337 */
1338struct pmap * 1338struct pmap *
1339pmap_create() 1339pmap_create()
1340{ 1340{
1341 struct pmap *pm; 1341 struct pmap *pm;
1342 1342
1343 DPRINTF(PDB_CREATE, ("pmap_create()\n")); 1343 DPRINTF(PDB_CREATE, ("pmap_create()\n"));
1344 1344
1345 pm = pool_cache_get(&pmap_cache, PR_WAITOK); 1345 pm = pool_cache_get(&pmap_cache, PR_WAITOK);
1346 memset(pm, 0, sizeof *pm); 1346 memset(pm, 0, sizeof *pm);
1347 DPRINTF(PDB_CREATE, ("pmap_create(): created %p\n", pm)); 1347 DPRINTF(PDB_CREATE, ("pmap_create(): created %p\n", pm));
1348 1348
1349 UVM_OBJ_INIT(&pm->pm_obj, NULL, 1); 1349 UVM_OBJ_INIT(&pm->pm_obj, NULL, 1);
1350 if (pm != pmap_kernel()) { 1350 if (pm != pmap_kernel()) {
1351 while (!pmap_get_page(&pm->pm_physaddr)) { 1351 while (!pmap_get_page(&pm->pm_physaddr)) {
1352 uvm_wait("pmap_create"); 1352 uvm_wait("pmap_create");
1353 } 1353 }
1354 pm->pm_segs = (paddr_t *)(u_long)pm->pm_physaddr; 1354 pm->pm_segs = (paddr_t *)(u_long)pm->pm_physaddr;
1355 } 1355 }
1356 DPRINTF(PDB_CREATE, ("pmap_create(%p): ctx %d\n", pm, pmap_ctx(pm))); 1356 DPRINTF(PDB_CREATE, ("pmap_create(%p): ctx %d\n", pm, pmap_ctx(pm)));
1357 return pm; 1357 return pm;
1358} 1358}
1359 1359
1360/* 1360/*
1361 * Add a reference to the given pmap. 1361 * Add a reference to the given pmap.
1362 */ 1362 */
1363void 1363void
1364pmap_reference(pm) 1364pmap_reference(pm)
1365 struct pmap *pm; 1365 struct pmap *pm;
1366{ 1366{
1367 1367
1368 atomic_inc_uint(&pm->pm_refs); 1368 atomic_inc_uint(&pm->pm_refs);
1369} 1369}
1370 1370
1371/* 1371/*
1372 * Retire the given pmap from service. 1372 * Retire the given pmap from service.
1373 * Should only be called if the map contains no valid mappings. 1373 * Should only be called if the map contains no valid mappings.
1374 */ 1374 */
1375void 1375void
1376pmap_destroy(pm) 1376pmap_destroy(pm)
1377 struct pmap *pm; 1377 struct pmap *pm;
1378{ 1378{
1379#ifdef MULTIPROCESSOR 1379#ifdef MULTIPROCESSOR
1380 struct cpu_info *ci; 1380 struct cpu_info *ci;
1381#endif 1381#endif
1382 struct vm_page *pg, *nextpg; 1382 struct vm_page *pg, *nextpg;
1383 1383
1384 if ((int)atomic_dec_uint_nv(&pm->pm_refs) > 0) { 1384 if ((int)atomic_dec_uint_nv(&pm->pm_refs) > 0) {
1385 return; 1385 return;
1386 } 1386 }
1387 DPRINTF(PDB_DESTROY, ("pmap_destroy: freeing pmap %p\n", pm)); 1387 DPRINTF(PDB_DESTROY, ("pmap_destroy: freeing pmap %p\n", pm));
1388#ifdef MULTIPROCESSOR 1388#ifdef MULTIPROCESSOR
1389 mutex_enter(&pmap_lock); 1389 mutex_enter(&pmap_lock);
1390 for (ci = cpus; ci != NULL; ci = ci->ci_next) { 1390 for (ci = cpus; ci != NULL; ci = ci->ci_next) {
1391 if (CPUSET_HAS(cpus_active, ci->ci_index)) 1391 if (CPUSET_HAS(cpus_active, ci->ci_index))
1392 ctx_free(pm, ci); 1392 ctx_free(pm, ci);
1393 } 1393 }
1394 mutex_exit(&pmap_lock); 1394 mutex_exit(&pmap_lock);
1395#else 1395#else
1396 ctx_free(pm); 1396 ctx_free(pm);
1397#endif 1397#endif
1398 1398
1399 /* we could be a little smarter and leave pages zeroed */ 1399 /* we could be a little smarter and leave pages zeroed */
1400 for (pg = TAILQ_FIRST(&pm->pm_obj.memq); pg != NULL; pg = nextpg) { 1400 for (pg = TAILQ_FIRST(&pm->pm_obj.memq); pg != NULL; pg = nextpg) {
1401 nextpg = TAILQ_NEXT(pg, listq.queue); 1401 nextpg = TAILQ_NEXT(pg, listq.queue);
1402 TAILQ_REMOVE(&pm->pm_obj.memq, pg, listq.queue); 1402 TAILQ_REMOVE(&pm->pm_obj.memq, pg, listq.queue);
1403 KASSERT(pg->mdpage.mdpg_pvh.pv_pmap == NULL); 1403 KASSERT(pg->mdpage.mdpg_pvh.pv_pmap == NULL);
1404 uvm_pagefree(pg); 1404 uvm_pagefree(pg);
1405 } 1405 }
1406 pmap_free_page((paddr_t)(u_long)pm->pm_segs); 1406 pmap_free_page((paddr_t)(u_long)pm->pm_segs);
1407 UVM_OBJ_DESTROY(&pm->pm_obj); 1407 UVM_OBJ_DESTROY(&pm->pm_obj);
1408 pool_cache_put(&pmap_cache, pm); 1408 pool_cache_put(&pmap_cache, pm);
1409} 1409}
1410 1410
1411/* 1411/*
1412 * Copy the range specified by src_addr/len 1412 * Copy the range specified by src_addr/len
1413 * from the source map to the range dst_addr/len 1413 * from the source map to the range dst_addr/len
1414 * in the destination map. 1414 * in the destination map.
1415 * 1415 *
1416 * This routine is only advisory and need not do anything. 1416 * This routine is only advisory and need not do anything.
1417 */ 1417 */
1418void 1418void
1419pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) 1419pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
1420 struct pmap *dst_pmap, *src_pmap; 1420 struct pmap *dst_pmap, *src_pmap;
1421 vaddr_t dst_addr, src_addr; 1421 vaddr_t dst_addr, src_addr;
1422 vsize_t len; 1422 vsize_t len;
1423{ 1423{
1424 1424
1425 DPRINTF(PDB_CREATE, ("pmap_copy(%p, %p, %p, %lx, %p)\n", 1425 DPRINTF(PDB_CREATE, ("pmap_copy(%p, %p, %p, %lx, %p)\n",
1426 dst_pmap, src_pmap, (void *)(u_long)dst_addr, 1426 dst_pmap, src_pmap, (void *)(u_long)dst_addr,
1427 (u_long)len, (void *)(u_long)src_addr)); 1427 (u_long)len, (void *)(u_long)src_addr));
1428} 1428}
1429 1429
1430/* 1430/*
1431 * Garbage collects the physical map system for 1431 * Garbage collects the physical map system for
1432 * pages which are no longer used. 1432 * pages which are no longer used.
1433 * Success need not be guaranteed -- that is, there 1433 * Success need not be guaranteed -- that is, there
1434 * may well be pages which are not referenced, but 1434 * may well be pages which are not referenced, but
1435 * others may be collected. 1435 * others may be collected.
1436 * Called by the pageout daemon when pages are scarce. 1436 * Called by the pageout daemon when pages are scarce.
1437 */ 1437 */
1438void 1438void
1439pmap_collect(pm) 1439pmap_collect(pm)
1440 struct pmap *pm; 1440 struct pmap *pm;
1441{ 1441{
1442 int64_t data; 1442 int64_t data;
1443 paddr_t pa, *pdir, *ptbl; 1443 paddr_t pa, *pdir, *ptbl;
1444 struct vm_page *pg; 1444 struct vm_page *pg;
1445 int i, j, k, n, m; 1445 int i, j, k, n, m;
1446 1446
1447 /* 1447 /*
1448 * This is a good place to scan the pmaps for page tables with 1448 * This is a good place to scan the pmaps for page tables with
1449 * no valid mappings in them and free them. 1449 * no valid mappings in them and free them.
1450 */ 1450 */
1451 1451
1452 /* NEVER GARBAGE COLLECT THE KERNEL PMAP */ 1452 /* NEVER GARBAGE COLLECT THE KERNEL PMAP */
1453 if (pm == pmap_kernel()) 1453 if (pm == pmap_kernel())
1454 return; 1454 return;
1455 1455
1456 mutex_enter(&pmap_lock); 1456 mutex_enter(&pmap_lock);
1457 for (i = 0; i < STSZ; i++) { 1457 for (i = 0; i < STSZ; i++) {
1458 pdir = (paddr_t *)(u_long)ldxa((vaddr_t)&pm->pm_segs[i], 1458 pdir = (paddr_t *)(u_long)ldxa((vaddr_t)&pm->pm_segs[i],
1459 ASI_PHYS_CACHED); 1459 ASI_PHYS_CACHED);
1460 if (pdir == NULL) { 1460 if (pdir == NULL) {
1461 continue; 1461 continue;
1462 } 1462 }
1463 m = 0; 1463 m = 0;
1464 for (k = 0; k < PDSZ; k++) { 1464 for (k = 0; k < PDSZ; k++) {
1465 ptbl = (paddr_t *)(u_long)ldxa((vaddr_t)&pdir[k], 1465 ptbl = (paddr_t *)(u_long)ldxa((vaddr_t)&pdir[k],
1466 ASI_PHYS_CACHED); 1466 ASI_PHYS_CACHED);
1467 if (ptbl == NULL) { 1467 if (ptbl == NULL) {
1468 continue; 1468 continue;
1469 } 1469 }
1470 m++; 1470 m++;
1471 n = 0; 1471 n = 0;
1472 for (j = 0; j < PTSZ; j++) { 1472 for (j = 0; j < PTSZ; j++) {
1473 data = ldxa((vaddr_t)&ptbl[j], ASI_PHYS_CACHED); 1473 data = ldxa((vaddr_t)&ptbl[j], ASI_PHYS_CACHED);
1474 if (data & TLB_V) 1474 if (data & TLB_V)
1475 n++; 1475 n++;
1476 } 1476 }
1477 if (!n) { 1477 if (!n) {
1478 stxa((paddr_t)(u_long)&pdir[k], 1478 stxa((paddr_t)(u_long)&pdir[k],
1479 ASI_PHYS_CACHED, 0); 1479 ASI_PHYS_CACHED, 0);
1480 pa = (paddr_t)(u_long)ptbl; 1480 pa = (paddr_t)(u_long)ptbl;
1481 pg = PHYS_TO_VM_PAGE(pa); 1481 pg = PHYS_TO_VM_PAGE(pa);
1482 TAILQ_REMOVE(&pm->pm_obj.memq, pg, listq.queue); 1482 TAILQ_REMOVE(&pm->pm_obj.memq, pg, listq.queue);
1483 pmap_free_page(pa); 1483 pmap_free_page(pa);
1484 } 1484 }
1485 } 1485 }
1486 if (!m) { 1486 if (!m) {
1487 stxa((paddr_t)(u_long)&pm->pm_segs[i], 1487 stxa((paddr_t)(u_long)&pm->pm_segs[i],
1488 ASI_PHYS_CACHED, 0); 1488 ASI_PHYS_CACHED, 0);
1489 pa = (paddr_t)(u_long)pdir; 1489 pa = (paddr_t)(u_long)pdir;
1490 pg = PHYS_TO_VM_PAGE(pa); 1490 pg = PHYS_TO_VM_PAGE(pa);
1491 TAILQ_REMOVE(&pm->pm_obj.memq, pg, listq.queue); 1491 TAILQ_REMOVE(&pm->pm_obj.memq, pg, listq.queue);
1492 pmap_free_page(pa); 1492 pmap_free_page(pa);
1493 } 1493 }
1494 } 1494 }
1495 mutex_exit(&pmap_lock); 1495 mutex_exit(&pmap_lock);
1496} 1496}
1497 1497
1498/* 1498/*
1499 * Activate the address space for the specified process. If the 1499 * Activate the address space for the specified process. If the
1500 * process is the current process, load the new MMU context. 1500 * process is the current process, load the new MMU context.
1501 */ 1501 */
1502void 1502void
1503pmap_activate(l) 1503pmap_activate(l)
1504 struct lwp *l; 1504 struct lwp *l;
1505{ 1505{
1506 struct pmap *pmap = l->l_proc->p_vmspace->vm_map.pmap; 1506 struct pmap *pmap = l->l_proc->p_vmspace->vm_map.pmap;
1507 1507
1508 if (pmap == pmap_kernel()) { 1508 if (pmap == pmap_kernel()) {
1509 return; 1509 return;
1510 } 1510 }
1511 1511
1512 /* 1512 /*
1513 * This is essentially the same thing that happens in cpu_switch() 1513 * This is essentially the same thing that happens in cpu_switch()
1514 * when the newly selected process is about to run, except that we 1514 * when the newly selected process is about to run, except that we
1515 * have to make sure to clean the register windows before we set 1515 * have to make sure to clean the register windows before we set
1516 * the new context. 1516 * the new context.
1517 */ 1517 */
1518 1518
1519 if (l != curlwp) { 1519 if (l != curlwp) {
1520 return; 1520 return;
1521 } 1521 }
1522 write_user_windows(); 1522 write_user_windows();
1523 pmap_activate_pmap(pmap); 1523 pmap_activate_pmap(pmap);
1524} 1524}
1525 1525
1526void 1526void
1527pmap_activate_pmap(struct pmap *pmap) 1527pmap_activate_pmap(struct pmap *pmap)
1528{ 1528{
1529 1529
1530 if (pmap_ctx(pmap) == 0) { 1530 if (pmap_ctx(pmap) == 0) {
1531 (void) ctx_alloc(pmap); 1531 (void) ctx_alloc(pmap);
1532 } 1532 }
1533 dmmu_set_secondary_context(pmap_ctx(pmap)); 1533 dmmu_set_secondary_context(pmap_ctx(pmap));
1534} 1534}
1535 1535
1536/* 1536/*
1537 * Deactivate the address space of the specified process. 1537 * Deactivate the address space of the specified process.
1538 */ 1538 */
1539void 1539void
1540pmap_deactivate(l) 1540pmap_deactivate(l)
1541 struct lwp *l; 1541 struct lwp *l;
1542{ 1542{
1543} 1543}
1544 1544
1545/* 1545/*
1546 * pmap_kenter_pa: [ INTERFACE ] 1546 * pmap_kenter_pa: [ INTERFACE ]
1547 * 1547 *
1548 * Enter a va -> pa mapping into the kernel pmap without any 1548 * Enter a va -> pa mapping into the kernel pmap without any
1549 * physical->virtual tracking. 1549 * physical->virtual tracking.
1550 * 1550 *
1551 * Note: no locking is necessary in this function. 1551 * Note: no locking is necessary in this function.
1552 */ 1552 */
1553void 1553void
1554pmap_kenter_pa(va, pa, prot) 1554pmap_kenter_pa(va, pa, prot)
1555 vaddr_t va; 1555 vaddr_t va;
1556 paddr_t pa; 1556 paddr_t pa;
1557 vm_prot_t prot; 1557 vm_prot_t prot;
1558{ 1558{
1559 pte_t tte; 1559 pte_t tte;
1560 paddr_t ptp; 1560 paddr_t ptp;
1561 struct pmap *pm = pmap_kernel(); 1561 struct pmap *pm = pmap_kernel();
1562 int i; 1562 int i;
1563 1563
1564 KASSERT(va < INTSTACK || va > EINTSTACK); 1564 KASSERT(va < INTSTACK || va > EINTSTACK);
1565 KASSERT(va < kdata || va > ekdata); 1565 KASSERT(va < kdata || va > ekdata);
1566 1566
1567 /* 1567 /*
1568 * Construct the TTE. 1568 * Construct the TTE.
1569 */ 1569 */
1570 1570
1571 ENTER_STAT(unmanaged); 1571 ENTER_STAT(unmanaged);
1572 if (pa & (PMAP_NVC|PMAP_NC)) { 1572 if (pa & (PMAP_NVC|PMAP_NC)) {
1573 ENTER_STAT(ci); 1573 ENTER_STAT(ci);
1574 } 1574 }
1575 1575
1576 tte.data = TSB_DATA(0, PGSZ_8K, pa, 1 /* Privileged */, 1576 tte.data = TSB_DATA(0, PGSZ_8K, pa, 1 /* Privileged */,
1577 (VM_PROT_WRITE & prot), 1577 (VM_PROT_WRITE & prot),
1578 !(pa & PMAP_NC), pa & (PMAP_NVC), 1, 0); 1578 !(pa & PMAP_NC), pa & (PMAP_NVC), 1, 0);
1579 /* We don't track mod/ref here. */ 1579 /* We don't track mod/ref here. */
1580 if (prot & VM_PROT_WRITE) 1580 if (prot & VM_PROT_WRITE)
1581 tte.data |= TLB_REAL_W|TLB_W; 1581 tte.data |= TLB_REAL_W|TLB_W;
1582 if (prot & VM_PROT_EXECUTE) 1582 if (prot & VM_PROT_EXECUTE)
1583 tte.data |= TLB_EXEC; 1583 tte.data |= TLB_EXEC;
1584 tte.data |= TLB_TSB_LOCK; /* wired */ 1584 tte.data |= TLB_TSB_LOCK; /* wired */
1585 ptp = 0; 1585 ptp = 0;
1586 1586
1587 retry: 1587 retry:
1588 i = pseg_set(pm, va, tte.data, ptp); 1588 i = pseg_set(pm, va, tte.data, ptp);
1589 if (i & 1) { 1589 if (i & 1) {
1590 KASSERT((i & 4) == 0); 1590 KASSERT((i & 4) == 0);
1591 ptp = 0; 1591 ptp = 0;
1592 if (!pmap_get_page(&ptp)) 1592 if (!pmap_get_page(&ptp))
1593 panic("pmap_kenter_pa: no pages"); 1593 panic("pmap_kenter_pa: no pages");
1594 ENTER_STAT(ptpneeded); 1594 ENTER_STAT(ptpneeded);
1595 goto retry; 1595 goto retry;
1596 } 1596 }
1597 if (ptp && i == 0) { 1597 if (ptp && i == 0) {
1598 /* We allocated a spare page but didn't use it. Free it. */ 1598 /* We allocated a spare page but didn't use it. Free it. */
1599 printf("pmap_kenter_pa: freeing unused page %llx\n", 1599 printf("pmap_kenter_pa: freeing unused page %llx\n",
1600 (long long)ptp); 1600 (long long)ptp);
1601 pmap_free_page(ptp); 1601 pmap_free_page(ptp);
1602 } 1602 }
1603#ifdef DEBUG 1603#ifdef DEBUG
1604 i = ptelookup_va(va); 1604 i = ptelookup_va(va);
1605 if (pmapdebug & PDB_ENTER) 1605 if (pmapdebug & PDB_ENTER)
1606 prom_printf("pmap_kenter_pa: va=%08x data=%08x:%08x " 1606 prom_printf("pmap_kenter_pa: va=%08x data=%08x:%08x "
1607 "tsb_dmmu[%d]=%08x\n", va, (int)(tte.data>>32), 1607 "tsb_dmmu[%d]=%08x\n", va, (int)(tte.data>>32),
1608 (int)tte.data, i, &curcpu()->ci_tsb_dmmu[i]); 1608 (int)tte.data, i, &curcpu()->ci_tsb_dmmu[i]);
1609 if (pmapdebug & PDB_MMU_STEAL && curcpu()->ci_tsb_dmmu[i].data) { 1609 if (pmapdebug & PDB_MMU_STEAL && curcpu()->ci_tsb_dmmu[i].data) {
1610 prom_printf("pmap_kenter_pa: evicting entry tag=%x:%08x " 1610 prom_printf("pmap_kenter_pa: evicting entry tag=%x:%08x "
1611 "data=%08x:%08x tsb_dmmu[%d]=%08x\n", 1611 "data=%08x:%08x tsb_dmmu[%d]=%08x\n",
1612 (int)(curcpu()->ci_tsb_dmmu[i].tag>>32), (int)curcpu()->ci_tsb_dmmu[i].tag, 1612 (int)(curcpu()->ci_tsb_dmmu[i].tag>>32), (int)curcpu()->ci_tsb_dmmu[i].tag,
1613 (int)(curcpu()->ci_tsb_dmmu[i].data>>32), (int)curcpu()->ci_tsb_dmmu[i].data, 1613 (int)(curcpu()->ci_tsb_dmmu[i].data>>32), (int)curcpu()->ci_tsb_dmmu[i].data,
1614 i, &curcpu()->ci_tsb_dmmu[i]); 1614 i, &curcpu()->ci_tsb_dmmu[i]);
1615 prom_printf("with va=%08x data=%08x:%08x tsb_dmmu[%d]=%08x\n", 1615 prom_printf("with va=%08x data=%08x:%08x tsb_dmmu[%d]=%08x\n",
1616 va, (int)(tte.data>>32), (int)tte.data, i, 1616 va, (int)(tte.data>>32), (int)tte.data, i,
1617 &curcpu()->ci_tsb_dmmu[i]); 1617 &curcpu()->ci_tsb_dmmu[i]);
1618 } 1618 }
1619#endif 1619#endif
1620} 1620}
1621 1621
1622/* 1622/*
1623 * pmap_kremove: [ INTERFACE ] 1623 * pmap_kremove: [ INTERFACE ]
1624 * 1624 *
1625 * Remove a mapping entered with pmap_kenter_pa() starting at va, 1625 * Remove a mapping entered with pmap_kenter_pa() starting at va,
1626 * for size bytes (assumed to be page rounded). 1626 * for size bytes (assumed to be page rounded).
1627 */ 1627 */
1628void 1628void
1629pmap_kremove(va, size) 1629pmap_kremove(va, size)
1630 vaddr_t va; 1630 vaddr_t va;
1631 vsize_t size; 1631 vsize_t size;
1632{ 1632{
1633 struct pmap *pm = pmap_kernel(); 1633 struct pmap *pm = pmap_kernel();
1634 int64_t data; 1634 int64_t data;
1635 paddr_t pa; 1635 paddr_t pa;
1636 int rv; 1636 int rv;
1637 bool flush = FALSE; 1637 bool flush = FALSE;
1638 1638
1639 KASSERT(va < INTSTACK || va > EINTSTACK); 1639 KASSERT(va < INTSTACK || va > EINTSTACK);
1640 KASSERT(va < kdata || va > ekdata); 1640 KASSERT(va < kdata || va > ekdata);
1641 1641
1642 DPRINTF(PDB_DEMAP, ("pmap_kremove: start 0x%lx size %lx\n", va, size)); 1642 DPRINTF(PDB_DEMAP, ("pmap_kremove: start 0x%lx size %lx\n", va, size));
1643 for (; size >= PAGE_SIZE; va += PAGE_SIZE, size -= PAGE_SIZE) { 1643 for (; size >= PAGE_SIZE; va += PAGE_SIZE, size -= PAGE_SIZE) {
1644 1644
1645#ifdef DIAGNOSTIC 1645#ifdef DIAGNOSTIC
1646 /* 1646 /*
1647 * Is this part of the permanent 4MB mapping? 1647 * Is this part of the permanent 4MB mapping?
1648 */ 1648 */
1649 if (va >= ktext && va < roundup(ekdata, 4*MEG)) 1649 if (va >= ktext && va < roundup(ekdata, 4*MEG))
1650 panic("pmap_kremove: va=%08x in locked TLB", (u_int)va); 1650 panic("pmap_kremove: va=%08x in locked TLB", (u_int)va);
1651#endif 1651#endif
1652 1652
1653 data = pseg_get(pm, va); 1653 data = pseg_get(pm, va);
1654 if (data == 0) { 1654 if (data == 0) {
1655 continue; 1655 continue;
1656 } 1656 }
1657 1657
1658 flush = TRUE; 1658 flush = TRUE;
1659 pa = data & TLB_PA_MASK; 1659 pa = data & TLB_PA_MASK;
1660 1660
1661 /* 1661 /*
1662 * We need to flip the valid bit and 1662 * We need to flip the valid bit and
1663 * clear the access statistics. 1663 * clear the access statistics.
1664 */ 1664 */
1665 1665
1666 rv = pseg_set(pm, va, 0, 0); 1666 rv = pseg_set(pm, va, 0, 0);
1667 if (rv & 1) 1667 if (rv & 1)
1668 panic("pmap_kremove: pseg_set needs spare, rv=%d\n", 1668 panic("pmap_kremove: pseg_set needs spare, rv=%d\n",
1669 rv); 1669 rv);
1670 DPRINTF(PDB_DEMAP, ("pmap_kremove: seg %x pdir %x pte %x\n", 1670 DPRINTF(PDB_DEMAP, ("pmap_kremove: seg %x pdir %x pte %x\n",
1671 (int)va_to_seg(va), (int)va_to_dir(va), 1671 (int)va_to_seg(va), (int)va_to_dir(va),
1672 (int)va_to_pte(va))); 1672 (int)va_to_pte(va)));
1673 REMOVE_STAT(removes); 1673 REMOVE_STAT(removes);
1674 1674
1675 tsb_invalidate(va, pm); 1675 tsb_invalidate(va, pm);
1676 REMOVE_STAT(tflushes); 1676 REMOVE_STAT(tflushes);
1677 1677
1678 /* 1678 /*
1679 * Here we assume nothing can get into the TLB 1679 * Here we assume nothing can get into the TLB
1680 * unless it has a PTE. 1680 * unless it has a PTE.
1681 */ 1681 */
1682 1682
1683 tlb_flush_pte(va, pm); 1683 tlb_flush_pte(va, pm);
1684 } 1684 }
1685 if (flush) { 1685 if (flush) {
1686 REMOVE_STAT(flushes); 1686 REMOVE_STAT(flushes);
1687 blast_dcache(); 1687 blast_dcache();
1688 } 1688 }
1689} 1689}
1690 1690
1691/* 1691/*
1692 * Insert physical page at pa into the given pmap at virtual address va. 1692 * Insert physical page at pa into the given pmap at virtual address va.
1693 * Supports 64-bit pa so we can map I/O space. 1693 * Supports 64-bit pa so we can map I/O space.
1694 */ 1694 */
1695 1695
1696int 1696int
1697pmap_enter(pm, va, pa, prot, flags) 1697pmap_enter(pm, va, pa, prot, flags)
1698 struct pmap *pm; 1698 struct pmap *pm;
1699 vaddr_t va; 1699 vaddr_t va;
1700 paddr_t pa; 1700 paddr_t pa;
1701 vm_prot_t prot; 1701 vm_prot_t prot;
1702 int flags; 1702 int flags;
1703{ 1703{
1704 pte_t tte; 1704 pte_t tte;
1705 int64_t data; 1705 int64_t data;
1706 paddr_t opa = 0, ptp; /* XXX: gcc */ 1706 paddr_t opa = 0, ptp; /* XXX: gcc */
1707 pv_entry_t pvh, npv = NULL, freepv; 1707 pv_entry_t pvh, npv = NULL, freepv;
1708 struct vm_page *pg, *opg, *ptpg; 1708 struct vm_page *pg, *opg, *ptpg;
1709 int s, i, uncached = 0, error = 0; 1709 int s, i, uncached = 0, error = 0;
1710 int size = PGSZ_8K; /* PMAP_SZ_TO_TTE(pa); */ 1710 int size = PGSZ_8K; /* PMAP_SZ_TO_TTE(pa); */
1711 bool wired = (flags & PMAP_WIRED) != 0; 1711 bool wired = (flags & PMAP_WIRED) != 0;
1712 bool wasmapped = FALSE; 1712 bool wasmapped = FALSE;
1713 bool dopv = TRUE; 1713 bool dopv = TRUE;
1714 1714
1715 /* 1715 /*
1716 * Is this part of the permanent mappings? 1716 * Is this part of the permanent mappings?
1717 */ 1717 */
1718 KASSERT(pm != pmap_kernel() || va < INTSTACK || va > EINTSTACK); 1718 KASSERT(pm != pmap_kernel() || va < INTSTACK || va > EINTSTACK);
1719 KASSERT(pm != pmap_kernel() || va < kdata || va > ekdata); 1719 KASSERT(pm != pmap_kernel() || va < kdata || va > ekdata);
1720 1720
1721 /* Grab a spare PV. */ 1721 /* Grab a spare PV. */
1722 freepv = pool_cache_get(&pmap_pv_cache, PR_NOWAIT); 1722 freepv = pool_cache_get(&pmap_pv_cache, PR_NOWAIT);
1723 if (__predict_false(freepv == NULL)) { 1723 if (__predict_false(freepv == NULL)) {
1724 if (flags & PMAP_CANFAIL) 1724 if (flags & PMAP_CANFAIL)
1725 return (ENOMEM); 1725 return (ENOMEM);
1726 panic("pmap_enter: no pv entries available"); 1726 panic("pmap_enter: no pv entries available");
1727 } 1727 }
1728 freepv->pv_next = NULL; 1728 freepv->pv_next = NULL;
1729 1729
1730 /* 1730 /*
1731 * If a mapping at this address already exists, check if we're 1731 * If a mapping at this address already exists, check if we're
1732 * entering the same PA again. if it's different remove it. 1732 * entering the same PA again. if it's different remove it.
1733 */ 1733 */
1734 1734
1735 mutex_enter(&pmap_lock); 1735 mutex_enter(&pmap_lock);
1736 data = pseg_get(pm, va); 1736 data = pseg_get(pm, va);
1737 if (data & TLB_V) { 1737 if (data & TLB_V) {
1738 wasmapped = TRUE; 1738 wasmapped = TRUE;
1739 opa = data & TLB_PA_MASK; 1739 opa = data & TLB_PA_MASK;
1740 if (opa != pa) { 1740 if (opa != pa) {
1741 opg = PHYS_TO_VM_PAGE(opa); 1741 opg = PHYS_TO_VM_PAGE(opa);
1742 if (opg != NULL) { 1742 if (opg != NULL) {
1743 npv = pmap_remove_pv(pm, va, opg); 1743 npv = pmap_remove_pv(pm, va, opg);
1744 } 1744 }
1745 } 1745 }
1746 } 1746 }
1747 1747
1748 /* 1748 /*
1749 * Construct the TTE. 1749 * Construct the TTE.
1750 */ 1750 */
1751 pg = PHYS_TO_VM_PAGE(pa); 1751 pg = PHYS_TO_VM_PAGE(pa);
1752 if (pg) { 1752 if (pg) {
1753 pvh = &pg->mdpage.mdpg_pvh; 1753 pvh = &pg->mdpage.mdpg_pvh;
1754 uncached = (pvh->pv_va & (PV_ALIAS|PV_NVC)); 1754 uncached = (pvh->pv_va & (PV_ALIAS|PV_NVC));
1755#ifdef DIAGNOSTIC 1755#ifdef DIAGNOSTIC
1756 if ((flags & VM_PROT_ALL) & ~prot) 1756 if ((flags & VM_PROT_ALL) & ~prot)
1757 panic("pmap_enter: access_type exceeds prot"); 1757 panic("pmap_enter: access_type exceeds prot");
1758#endif 1758#endif
1759 /* 1759 /*
1760 * If we don't have the traphandler do it, 1760 * If we don't have the traphandler do it,
1761 * set the ref/mod bits now. 1761 * set the ref/mod bits now.
1762 */ 1762 */
1763 if (flags & VM_PROT_ALL) 1763 if (flags & VM_PROT_ALL)
1764 pvh->pv_va |= PV_REF; 1764 pvh->pv_va |= PV_REF;
1765 if (flags & VM_PROT_WRITE) 1765 if (flags & VM_PROT_WRITE)
1766 pvh->pv_va |= PV_MOD; 1766 pvh->pv_va |= PV_MOD;
1767 1767
1768 /* 1768 /*
1769 * make sure we have a pv entry ready if we need one. 1769 * make sure we have a pv entry ready if we need one.
1770 */ 1770 */
1771 if (pvh->pv_pmap == NULL || (wasmapped && opa == pa)) { 1771 if (pvh->pv_pmap == NULL || (wasmapped && opa == pa)) {
1772 if (npv != NULL) { 1772 if (npv != NULL) {
1773 /* free it */ 1773 /* free it */
1774 npv->pv_next = freepv; 1774 npv->pv_next = freepv;
1775 freepv = npv; 1775 freepv = npv;
1776 npv = NULL; 1776 npv = NULL;
1777 } 1777 }
1778 if (wasmapped && opa == pa) { 1778 if (wasmapped && opa == pa) {
1779 dopv = FALSE; 1779 dopv = FALSE;
1780 } 1780 }
1781 } else if (npv == NULL) { 1781 } else if (npv == NULL) {
1782 /* use the pre-allocated pv */ 1782 /* use the pre-allocated pv */
1783 npv = freepv; 1783 npv = freepv;
1784 freepv = freepv->pv_next; 1784 freepv = freepv->pv_next;
1785 } 1785 }
1786 ENTER_STAT(managed); 1786 ENTER_STAT(managed);
1787 } else { 1787 } else {
1788 ENTER_STAT(unmanaged); 1788 ENTER_STAT(unmanaged);
1789 dopv = FALSE; 1789 dopv = FALSE;
1790 if (npv != NULL) { 1790 if (npv != NULL) {
1791 /* free it */ 1791 /* free it */
1792 npv->pv_next = freepv; 1792 npv->pv_next = freepv;
1793 freepv = npv; 1793 freepv = npv;
1794 npv = NULL; 1794 npv = NULL;
1795 } 1795 }
1796 } 1796 }
1797 1797
1798#ifndef NO_VCACHE 1798#ifndef NO_VCACHE
1799 if (pa & PMAP_NVC) 1799 if (pa & PMAP_NVC)
1800#endif 1800#endif
1801 uncached = 1; 1801 uncached = 1;
1802 if (uncached) { 1802 if (uncached) {
1803 ENTER_STAT(ci); 1803 ENTER_STAT(ci);
1804 } 1804 }
1805 tte.data = TSB_DATA(0, size, pa, pm == pmap_kernel(), 1805 tte.data = TSB_DATA(0, size, pa, pm == pmap_kernel(),
1806 flags & VM_PROT_WRITE, !(pa & PMAP_NC), 1806 flags & VM_PROT_WRITE, !(pa & PMAP_NC),
1807 uncached, 1, pa & PMAP_LITTLE); 1807 uncached, 1, pa & PMAP_LITTLE);
1808#ifdef HWREF 1808#ifdef HWREF
1809 if (prot & VM_PROT_WRITE) 1809 if (prot & VM_PROT_WRITE)
1810 tte.data |= TLB_REAL_W; 1810 tte.data |= TLB_REAL_W;
1811 if (prot & VM_PROT_EXECUTE) 1811 if (prot & VM_PROT_EXECUTE)
1812 tte.data |= TLB_EXEC; 1812 tte.data |= TLB_EXEC;
1813#else 1813#else
1814 /* If it needs ref accounting do nothing. */ 1814 /* If it needs ref accounting do nothing. */
1815 if (!(flags & VM_PROT_READ)) { 1815 if (!(flags & VM_PROT_READ)) {
1816 mutex_exit(&pmap_lock); 1816 mutex_exit(&pmap_lock);
1817 goto out; 1817 goto out;
1818 } 1818 }
1819#endif 1819#endif
1820 if (flags & VM_PROT_EXECUTE) { 1820 if (flags & VM_PROT_EXECUTE) {
1821 if ((flags & (VM_PROT_READ|VM_PROT_WRITE)) == 0) 1821 if ((flags & (VM_PROT_READ|VM_PROT_WRITE)) == 0)
1822 tte.data |= TLB_EXEC_ONLY|TLB_EXEC; 1822 tte.data |= TLB_EXEC_ONLY|TLB_EXEC;
1823 else 1823 else
1824 tte.data |= TLB_EXEC; 1824 tte.data |= TLB_EXEC;
1825 } 1825 }
1826 if (wired) 1826 if (wired)
1827 tte.data |= TLB_TSB_LOCK; 1827 tte.data |= TLB_TSB_LOCK;
1828 ptp = 0; 1828 ptp = 0;
1829 1829
1830 retry: 1830 retry:
1831 i = pseg_set(pm, va, tte.data, ptp); 1831 i = pseg_set(pm, va, tte.data, ptp);
1832 if (i & 4) { 1832 if (i & 4) {
1833 /* ptp used as L3 */ 1833 /* ptp used as L3 */
1834 KASSERT(ptp != 0); 1834 KASSERT(ptp != 0);
1835 KASSERT((i & 3) == 0); 1835 KASSERT((i & 3) == 0);
1836 ptpg = PHYS_TO_VM_PAGE(ptp); 1836 ptpg = PHYS_TO_VM_PAGE(ptp);
1837 if (ptpg) { 1837 if (ptpg) {
1838 ptpg->offset = (uint64_t)va & (0xfffffLL << 23); 1838 ptpg->offset = (uint64_t)va & (0xfffffLL << 23);
1839 TAILQ_INSERT_TAIL(&pm->pm_obj.memq, ptpg, listq.queue); 1839 TAILQ_INSERT_TAIL(&pm->pm_obj.memq, ptpg, listq.queue);
1840 } else { 1840 } else {
1841 KASSERT(pm == pmap_kernel()); 1841 KASSERT(pm == pmap_kernel());
1842 } 1842 }
1843 } 1843 }
1844 if (i & 2) { 1844 if (i & 2) {
1845 /* ptp used as L2 */ 1845 /* ptp used as L2 */
1846 KASSERT(ptp != 0); 1846 KASSERT(ptp != 0);
1847 KASSERT((i & 4) == 0); 1847 KASSERT((i & 4) == 0);
1848 ptpg = PHYS_TO_VM_PAGE(ptp); 1848 ptpg = PHYS_TO_VM_PAGE(ptp);
1849 if (ptpg) { 1849 if (ptpg) {
1850 ptpg->offset = (((uint64_t)va >> 43) & 0x3ffLL) << 13; 1850 ptpg->offset = (((uint64_t)va >> 43) & 0x3ffLL) << 13;
1851 TAILQ_INSERT_TAIL(&pm->pm_obj.memq, ptpg, listq.queue); 1851 TAILQ_INSERT_TAIL(&pm->pm_obj.memq, ptpg, listq.queue);
1852 } else { 1852 } else {
1853 KASSERT(pm == pmap_kernel()); 1853 KASSERT(pm == pmap_kernel());
1854 } 1854 }
1855 } 1855 }
1856 if (i & 1) { 1856 if (i & 1) {
1857 KASSERT((i & 4) == 0); 1857 KASSERT((i & 4) == 0);
1858 ptp = 0; 1858 ptp = 0;
1859 if (!pmap_get_page(&ptp)) { 1859 if (!pmap_get_page(&ptp)) {
1860 mutex_exit(&pmap_lock); 1860 mutex_exit(&pmap_lock);
1861 if (flags & PMAP_CANFAIL) { 1861 if (flags & PMAP_CANFAIL) {
1862 if (npv != NULL) { 1862 if (npv != NULL) {
1863 /* free it */ 1863 /* free it */
1864 npv->pv_next = freepv; 1864 npv->pv_next = freepv;
1865 freepv = npv; 1865 freepv = npv;
1866 } 1866 }
1867 error = ENOMEM; 1867 error = ENOMEM;
1868 goto out; 1868 goto out;
1869 } else { 1869 } else {
1870 panic("pmap_enter: no pages"); 1870 panic("pmap_enter: no pages");
1871 } 1871 }
1872 } 1872 }
1873 ENTER_STAT(ptpneeded); 1873 ENTER_STAT(ptpneeded);
1874 goto retry; 1874 goto retry;
1875 } 1875 }
1876 if (ptp && i == 0) { 1876 if (ptp && i == 0) {
1877 /* We allocated a spare page but didn't use it. Free it. */ 1877 /* We allocated a spare page but didn't use it. Free it. */
1878 printf("pmap_enter: freeing unused page %llx\n", 1878 printf("pmap_enter: freeing unused page %llx\n",
1879 (long long)ptp); 1879 (long long)ptp);
1880 pmap_free_page(ptp); 1880 pmap_free_page(ptp);
1881 } 1881 }
1882 if (dopv) { 1882 if (dopv) {
1883 pmap_enter_pv(pm, va, pa, pg, npv); 1883 pmap_enter_pv(pm, va, pa, pg, npv);
1884 } 1884 }
1885 1885
1886 mutex_exit(&pmap_lock); 1886 mutex_exit(&pmap_lock);
1887#ifdef DEBUG 1887#ifdef DEBUG
1888 i = ptelookup_va(va); 1888 i = ptelookup_va(va);
1889 if (pmapdebug & PDB_ENTER) 1889 if (pmapdebug & PDB_ENTER)
1890 prom_printf("pmap_enter: va=%08x data=%08x:%08x " 1890 prom_printf("pmap_enter: va=%08x data=%08x:%08x "
1891 "tsb_dmmu[%d]=%08x\n", va, (int)(tte.data>>32), 1891 "tsb_dmmu[%d]=%08x\n", va, (int)(tte.data>>32),
1892 (int)tte.data, i, &curcpu()->ci_tsb_dmmu[i]); 1892 (int)tte.data, i, &curcpu()->ci_tsb_dmmu[i]);
1893 if (pmapdebug & PDB_MMU_STEAL && curcpu()->ci_tsb_dmmu[i].data) { 1893 if (pmapdebug & PDB_MMU_STEAL && curcpu()->ci_tsb_dmmu[i].data) {
1894 prom_printf("pmap_enter: evicting entry tag=%x:%08x " 1894 prom_printf("pmap_enter: evicting entry tag=%x:%08x "
1895 "data=%08x:%08x tsb_dmmu[%d]=%08x\n", 1895 "data=%08x:%08x tsb_dmmu[%d]=%08x\n",
1896 (int)(curcpu()->ci_tsb_dmmu[i].tag>>32), (int)curcpu()->ci_tsb_dmmu[i].tag, 1896 (int)(curcpu()->ci_tsb_dmmu[i].tag>>32), (int)curcpu()->ci_tsb_dmmu[i].tag,
1897 (int)(curcpu()->ci_tsb_dmmu[i].data>>32), (int)curcpu()->ci_tsb_dmmu[i].data, i, 1897 (int)(curcpu()->ci_tsb_dmmu[i].data>>32), (int)curcpu()->ci_tsb_dmmu[i].data, i,
1898 &curcpu()->ci_tsb_dmmu[i]); 1898 &curcpu()->ci_tsb_dmmu[i]);
1899 prom_printf("with va=%08x data=%08x:%08x tsb_dmmu[%d]=%08x\n", 1899 prom_printf("with va=%08x data=%08x:%08x tsb_dmmu[%d]=%08x\n",
1900 va, (int)(tte.data>>32), (int)tte.data, i, 1900 va, (int)(tte.data>>32), (int)tte.data, i,
1901 &curcpu()->ci_tsb_dmmu[i]); 1901 &curcpu()->ci_tsb_dmmu[i]);
1902 } 1902 }
1903#endif 1903#endif
1904 1904
1905 if (flags & (VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE)) { 1905 if (flags & (VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE)) {
1906 1906
1907 /* 1907 /*
1908 * preload the TSB with the new entry, 1908 * preload the TSB with the new entry,
1909 * since we're going to need it immediately anyway. 1909 * since we're going to need it immediately anyway.
1910 */ 1910 */
1911 1911
1912 KASSERT(pmap_ctx(pm)>=0); 1912 KASSERT(pmap_ctx(pm)>=0);
1913 i = ptelookup_va(va); 1913 i = ptelookup_va(va);
1914 tte.tag = TSB_TAG(0, pmap_ctx(pm), va); 1914 tte.tag = TSB_TAG(0, pmap_ctx(pm), va);
1915 s = splhigh(); 1915 s = splhigh();
1916 if (wasmapped && pmap_is_on_mmu(pm)) { 1916 if (wasmapped && pmap_is_on_mmu(pm)) {
1917 tsb_invalidate(va, pm); 1917 tsb_invalidate(va, pm);
1918 } 1918 }
1919 if (flags & (VM_PROT_READ | VM_PROT_WRITE)) { 1919 if (flags & (VM_PROT_READ | VM_PROT_WRITE)) {
1920 curcpu()->ci_tsb_dmmu[i].tag = tte.tag; 1920 curcpu()->ci_tsb_dmmu[i].tag = tte.tag;
1921 __asm volatile("" : : : "memory"); 1921 __asm volatile("" : : : "memory");
1922 curcpu()->ci_tsb_dmmu[i].data = tte.data; 1922 curcpu()->ci_tsb_dmmu[i].data = tte.data;
1923 } 1923 }
1924 if (flags & VM_PROT_EXECUTE) { 1924 if (flags & VM_PROT_EXECUTE) {
1925 curcpu()->ci_tsb_immu[i].tag = tte.tag; 1925 curcpu()->ci_tsb_immu[i].tag = tte.tag;
1926 __asm volatile("" : : : "memory"); 1926 __asm volatile("" : : : "memory");
1927 curcpu()->ci_tsb_immu[i].data = tte.data; 1927 curcpu()->ci_tsb_immu[i].data = tte.data;
1928 } 1928 }
1929 1929
1930 /* 1930 /*
1931 * it's only necessary to flush the TLB if this page was 1931 * it's only necessary to flush the TLB if this page was
1932 * previously mapped, but for some reason it's a lot faster 1932 * previously mapped, but for some reason it's a lot faster
1933 * for the fork+exit microbenchmark if we always do it. 1933 * for the fork+exit microbenchmark if we always do it.
1934 */ 1934 */
1935 1935
1936 KASSERT(pmap_ctx(pm)>=0); 1936 KASSERT(pmap_ctx(pm)>=0);
1937#ifdef MULTIPROCESSOR 1937#ifdef MULTIPROCESSOR
1938 if (wasmapped && pmap_is_on_mmu(pm)) 1938 if (wasmapped && pmap_is_on_mmu(pm))
1939 tlb_flush_pte(va, pm); 1939 tlb_flush_pte(va, pm);
1940 else 1940 else
1941 sp_tlb_flush_pte(va, pmap_ctx(pm)); 1941 sp_tlb_flush_pte(va, pmap_ctx(pm));
1942#else 1942#else
1943 tlb_flush_pte(va, pm); 1943 tlb_flush_pte(va, pm);
1944#endif 1944#endif
1945 splx(s); 1945 splx(s);
1946 } else if (wasmapped && pmap_is_on_mmu(pm)) { 1946 } else if (wasmapped && pmap_is_on_mmu(pm)) {
1947 /* Force reload -- protections may be changed */ 1947 /* Force reload -- protections may be changed */
1948 KASSERT(pmap_ctx(pm)>=0); 1948 KASSERT(pmap_ctx(pm)>=0);
1949 tsb_invalidate(va, pm); 1949 tsb_invalidate(va, pm);
1950 tlb_flush_pte(va, pm); 1950 tlb_flush_pte(va, pm);
1951 } 1951 }
1952 1952
1953 /* We will let the fast mmu miss interrupt load the new translation */ 1953 /* We will let the fast mmu miss interrupt load the new translation */
1954 pv_check(); 1954 pv_check();
1955 out: 1955 out:
1956 /* Catch up on deferred frees. */ 1956 /* Catch up on deferred frees. */
1957 for (; freepv != NULL; freepv = npv) { 1957 for (; freepv != NULL; freepv = npv) {
1958 npv = freepv->pv_next; 1958 npv = freepv->pv_next;
1959 pool_cache_put(&pmap_pv_cache, freepv); 1959 pool_cache_put(&pmap_pv_cache, freepv);
1960 } 1960 }
1961 return error; 1961 return error;
1962} 1962}
1963 1963
1964void 1964void
1965pmap_remove_all(pm) 1965pmap_remove_all(pm)
1966 struct pmap *pm; 1966 struct pmap *pm;
1967{ 1967{
1968#ifdef MULTIPROCESSOR 1968#ifdef MULTIPROCESSOR
1969 struct cpu_info *ci; 1969 struct cpu_info *ci;
1970#endif 1970#endif
1971 1971
1972 if (pm == pmap_kernel()) { 1972 if (pm == pmap_kernel()) {
1973 return; 1973 return;
1974 } 1974 }
1975 write_user_windows(); 1975 write_user_windows();
1976 pm->pm_refs = 0; 1976 pm->pm_refs = 0;
1977#ifdef MULTIPROCESSOR 1977#ifdef MULTIPROCESSOR
1978 mutex_enter(&pmap_lock); 1978 mutex_enter(&pmap_lock);
1979 for (ci = cpus; ci != NULL; ci = ci->ci_next) { 1979 for (ci = cpus; ci != NULL; ci = ci->ci_next) {
1980 if (CPUSET_HAS(cpus_active, ci->ci_index)) 1980 if (CPUSET_HAS(cpus_active, ci->ci_index))
1981 ctx_free(pm, ci); 1981 ctx_free(pm, ci);
1982 } 1982 }
1983 mutex_exit(&pmap_lock); 1983 mutex_exit(&pmap_lock);
1984#else 1984#else
1985 ctx_free(pm); 1985 ctx_free(pm);
1986#endif 1986#endif
1987 REMOVE_STAT(flushes); 1987 REMOVE_STAT(flushes);
1988 blast_dcache(); 1988 blast_dcache();
1989} 1989}
1990 1990
1991/* 1991/*
1992 * Remove the given range of mapping entries. 1992 * Remove the given range of mapping entries.
1993 */ 1993 */
1994void 1994void
1995pmap_remove(pm, va, endva) 1995pmap_remove(pm, va, endva)
1996 struct pmap *pm; 1996 struct pmap *pm;
1997 vaddr_t va, endva; 1997 vaddr_t va, endva;
1998{ 1998{
1999 int64_t data; 1999 int64_t data;
2000 paddr_t pa; 2000 paddr_t pa;
2001 struct vm_page *pg; 2001 struct vm_page *pg;
2002 pv_entry_t pv, freepv = NULL; 2002 pv_entry_t pv, freepv = NULL;
2003 int rv; 2003 int rv;
2004 bool flush = FALSE; 2004 bool flush = FALSE;
2005 2005
2006 /* 2006 /*
2007 * In here we should check each pseg and if there are no more entries, 2007 * In here we should check each pseg and if there are no more entries,
2008 * free it. It's just that linear scans of 8K pages gets expensive. 2008 * free it. It's just that linear scans of 8K pages gets expensive.
2009 */ 2009 */
2010 2010
2011 KASSERT(pm != pmap_kernel() || endva < INTSTACK || va > EINTSTACK); 2011 KASSERT(pm != pmap_kernel() || endva < INTSTACK || va > EINTSTACK);
2012 KASSERT(pm != pmap_kernel() || endva < kdata || va > ekdata); 2012 KASSERT(pm != pmap_kernel() || endva < kdata || va > ekdata);
2013 2013
2014 mutex_enter(&pmap_lock); 2014 mutex_enter(&pmap_lock);
2015 DPRINTF(PDB_REMOVE, ("pmap_remove(pm=%p, va=%p, endva=%p):", pm, 2015 DPRINTF(PDB_REMOVE, ("pmap_remove(pm=%p, va=%p, endva=%p):", pm,
2016 (void *)(u_long)va, (void *)(u_long)endva)); 2016 (void *)(u_long)va, (void *)(u_long)endva));
2017 REMOVE_STAT(calls); 2017 REMOVE_STAT(calls);
2018 2018
2019 /* Now do the real work */ 2019 /* Now do the real work */
2020 for (; va < endva; va += PAGE_SIZE) { 2020 for (; va < endva; va += PAGE_SIZE) {
2021#ifdef DIAGNOSTIC 2021#ifdef DIAGNOSTIC
2022 /* 2022 /*
2023 * Is this part of the permanent 4MB mapping? 2023 * Is this part of the permanent 4MB mapping?
2024 */ 2024 */
2025 if (pm == pmap_kernel() && va >= ktext && 2025 if (pm == pmap_kernel() && va >= ktext &&
2026 va < roundup(ekdata, 4*MEG)) 2026 va < roundup(ekdata, 4*MEG))
2027 panic("pmap_remove: va=%08llx in locked TLB", 2027 panic("pmap_remove: va=%08llx in locked TLB",
2028 (long long)va); 2028 (long long)va);
2029#endif 2029#endif
2030 2030
2031 data = pseg_get(pm, va); 2031 data = pseg_get(pm, va);
2032 if (data == 0) { 2032 if (data == 0) {
2033 continue; 2033 continue;
2034 } 2034 }
2035 2035
2036 flush = TRUE; 2036 flush = TRUE;
2037 /* First remove the pv entry, if there is one */ 2037 /* First remove the pv entry, if there is one */
2038 pa = data & TLB_PA_MASK; 2038 pa = data & TLB_PA_MASK;
2039 pg = PHYS_TO_VM_PAGE(pa); 2039 pg = PHYS_TO_VM_PAGE(pa);
2040 if (pg) { 2040 if (pg) {
2041 pv = pmap_remove_pv(pm, va, pg); 2041 pv = pmap_remove_pv(pm, va, pg);
2042 if (pv != NULL) { 2042 if (pv != NULL) {
2043 /* free it */ 2043 /* free it */
2044 pv->pv_next = freepv; 2044 pv->pv_next = freepv;
2045 freepv = pv; 2045 freepv = pv;
2046 } 2046 }
2047 } 2047 }
2048 2048
2049 /* 2049 /*
2050 * We need to flip the valid bit and 2050 * We need to flip the valid bit and
2051 * clear the access statistics. 2051 * clear the access statistics.
2052 */ 2052 */
2053 2053
2054 rv = pseg_set(pm, va, 0, 0); 2054 rv = pseg_set(pm, va, 0, 0);
2055 if (rv & 1) 2055 if (rv & 1)
2056 panic("pmap_remove: pseg_set needed spare, rv=%d!\n", 2056 panic("pmap_remove: pseg_set needed spare, rv=%d!\n",
2057 rv); 2057 rv);
2058 2058
2059 DPRINTF(PDB_REMOVE, (" clearing seg %x pte %x\n", 2059 DPRINTF(PDB_REMOVE, (" clearing seg %x pte %x\n",
2060 (int)va_to_seg(va), (int)va_to_pte(va))); 2060 (int)va_to_seg(va), (int)va_to_pte(va)));
2061 REMOVE_STAT(removes); 2061 REMOVE_STAT(removes);
2062 2062
2063 if (pm != pmap_kernel() && !pmap_has_ctx(pm)) 2063 if (pm != pmap_kernel() && !pmap_has_ctx(pm))
2064 continue; 2064 continue;
2065 2065
2066 /* 2066 /*
2067 * if the pmap is being torn down, don't bother flushing. 2067 * if the pmap is being torn down, don't bother flushing.
2068 */ 2068 */
2069 2069
2070 if (!pm->pm_refs) 2070 if (!pm->pm_refs)
2071 continue; 2071 continue;
2072 2072
2073 /* 2073 /*
2074 * Here we assume nothing can get into the TLB 2074 * Here we assume nothing can get into the TLB
2075 * unless it has a PTE. 2075 * unless it has a PTE.
2076 */ 2076 */
2077 2077
2078 KASSERT(pmap_ctx(pm)>=0); 2078 KASSERT(pmap_ctx(pm)>=0);
2079 tsb_invalidate(va, pm); 2079 tsb_invalidate(va, pm);
2080 REMOVE_STAT(tflushes); 2080 REMOVE_STAT(tflushes);
2081 tlb_flush_pte(va, pm); 2081 tlb_flush_pte(va, pm);
2082 } 2082 }
2083 if (flush && pm->pm_refs) { 2083 if (flush && pm->pm_refs) {
2084 REMOVE_STAT(flushes); 2084 REMOVE_STAT(flushes);
2085 blast_dcache(); 2085 blast_dcache();
2086 } 2086 }
2087 DPRINTF(PDB_REMOVE, ("\n")); 2087 DPRINTF(PDB_REMOVE, ("\n"));
2088 pv_check(); 2088 pv_check();
2089 mutex_exit(&pmap_lock); 2089 mutex_exit(&pmap_lock);
2090 2090
2091 /* Catch up on deferred frees. */ 2091 /* Catch up on deferred frees. */
2092 for (; freepv != NULL; freepv = pv) { 2092 for (; freepv != NULL; freepv = pv) {
2093 pv = freepv->pv_next; 2093 pv = freepv->pv_next;
2094 pool_cache_put(&pmap_pv_cache, freepv); 2094 pool_cache_put(&pmap_pv_cache, freepv);
2095 } 2095 }
2096} 2096}
2097 2097
2098/* 2098/*
2099 * Change the protection on the specified range of this pmap. 2099 * Change the protection on the specified range of this pmap.
2100 */ 2100 */
2101void 2101void
2102pmap_protect(pm, sva, eva, prot) 2102pmap_protect(pm, sva, eva, prot)
2103 struct pmap *pm; 2103 struct pmap *pm;
2104 vaddr_t sva, eva; 2104 vaddr_t sva, eva;
2105 vm_prot_t prot; 2105 vm_prot_t prot;
2106{ 2106{
2107 paddr_t pa; 2107 paddr_t pa;
2108 int64_t data; 2108 int64_t data;
2109 struct vm_page *pg; 2109 struct vm_page *pg;
2110 pv_entry_t pv; 2110 pv_entry_t pv;
2111 int rv; 2111 int rv;
2112 2112
2113 KASSERT(pm != pmap_kernel() || eva < INTSTACK || sva > EINTSTACK); 2113 KASSERT(pm != pmap_kernel() || eva < INTSTACK || sva > EINTSTACK);
2114 KASSERT(pm != pmap_kernel() || eva < kdata || sva > ekdata); 2114 KASSERT(pm != pmap_kernel() || eva < kdata || sva > ekdata);
2115 2115
2116 if (prot == VM_PROT_NONE) { 2116 if (prot == VM_PROT_NONE) {
2117 pmap_remove(pm, sva, eva); 2117 pmap_remove(pm, sva, eva);
2118 return; 2118 return;
2119 } 2119 }
2120 2120
2121 mutex_enter(&pmap_lock); 2121 mutex_enter(&pmap_lock);
2122 sva = sva & ~PGOFSET; 2122 sva = sva & ~PGOFSET;
2123 for (; sva < eva; sva += PAGE_SIZE) { 2123 for (; sva < eva; sva += PAGE_SIZE) {
2124#ifdef DEBUG 2124#ifdef DEBUG
2125 /* 2125 /*
2126 * Is this part of the permanent 4MB mapping? 2126 * Is this part of the permanent 4MB mapping?
2127 */ 2127 */
2128 if (pm == pmap_kernel() && sva >= ktext && 2128 if (pm == pmap_kernel() && sva >= ktext &&
2129 sva < roundup(ekdata, 4 * MEG)) { 2129 sva < roundup(ekdata, 4 * MEG)) {
2130 prom_printf("pmap_protect: va=%08x in locked TLB\n", 2130 prom_printf("pmap_protect: va=%08x in locked TLB\n",
2131 sva); 2131 sva);
2132 prom_abort(); 2132 prom_abort();
2133 return; 2133 return;
2134 } 2134 }
2135#endif 2135#endif
2136 DPRINTF(PDB_CHANGEPROT, ("pmap_protect: va %p\n", 2136 DPRINTF(PDB_CHANGEPROT, ("pmap_protect: va %p\n",
2137 (void *)(u_long)sva)); 2137 (void *)(u_long)sva));
2138 data = pseg_get(pm, sva); 2138 data = pseg_get(pm, sva);
2139 if ((data & TLB_V) == 0) { 2139 if ((data & TLB_V) == 0) {
2140 continue; 2140 continue;
2141 } 2141 }
2142 2142
2143 pa = data & TLB_PA_MASK; 2143 pa = data & TLB_PA_MASK;
2144 DPRINTF(PDB_CHANGEPROT|PDB_REF, 2144 DPRINTF(PDB_CHANGEPROT|PDB_REF,
2145 ("pmap_protect: va=%08x data=%08llx " 2145 ("pmap_protect: va=%08x data=%08llx "
2146 "seg=%08x pte=%08x\n", 2146 "seg=%08x pte=%08x\n",
2147 (u_int)sva, (long long)pa, (int)va_to_seg(sva), 2147 (u_int)sva, (long long)pa, (int)va_to_seg(sva),
2148 (int)va_to_pte(sva))); 2148 (int)va_to_pte(sva)));
2149 2149
2150 pg = PHYS_TO_VM_PAGE(pa); 2150 pg = PHYS_TO_VM_PAGE(pa);
2151 if (pg) { 2151 if (pg) {
2152 /* Save REF/MOD info */ 2152 /* Save REF/MOD info */
2153 pv = &pg->mdpage.mdpg_pvh; 2153 pv = &pg->mdpage.mdpg_pvh;
2154 if (data & TLB_ACCESS) 2154 if (data & TLB_ACCESS)
2155 pv->pv_va |= PV_REF; 2155 pv->pv_va |= PV_REF;
2156 if (data & TLB_MODIFY) 2156 if (data & TLB_MODIFY)
2157 pv->pv_va |= PV_MOD; 2157 pv->pv_va |= PV_MOD;
2158 } 2158 }
2159 2159
2160 /* Just do the pmap and TSB, not the pv_list */ 2160 /* Just do the pmap and TSB, not the pv_list */
2161 if ((prot & VM_PROT_WRITE) == 0) 2161 if ((prot & VM_PROT_WRITE) == 0)
2162 data &= ~(TLB_W|TLB_REAL_W); 2162 data &= ~(TLB_W|TLB_REAL_W);
2163 if ((prot & VM_PROT_EXECUTE) == 0) 2163 if ((prot & VM_PROT_EXECUTE) == 0)
2164 data &= ~(TLB_EXEC); 2164 data &= ~(TLB_EXEC);
2165 2165
2166 rv = pseg_set(pm, sva, data, 0); 2166 rv = pseg_set(pm, sva, data, 0);
2167 if (rv & 1) 2167 if (rv & 1)
2168 panic("pmap_protect: pseg_set needs spare! rv=%d\n", 2168 panic("pmap_protect: pseg_set needs spare! rv=%d\n",
2169 rv); 2169 rv);
2170 2170
2171 if (pm != pmap_kernel() && !pmap_has_ctx(pm)) 2171 if (pm != pmap_kernel() && !pmap_has_ctx(pm))
2172 continue; 2172 continue;
2173 2173
2174 KASSERT(pmap_ctx(pm)>=0); 2174 KASSERT(pmap_ctx(pm)>=0);
2175 tsb_invalidate(sva, pm); 2175 tsb_invalidate(sva, pm);
2176 tlb_flush_pte(sva, pm); 2176 tlb_flush_pte(sva, pm);
2177 } 2177 }
2178 pv_check(); 2178 pv_check();
2179 mutex_exit(&pmap_lock); 2179 mutex_exit(&pmap_lock);
2180} 2180}
2181 2181
2182/* 2182/*
2183 * Extract the physical page address associated 2183 * Extract the physical page address associated
2184 * with the given map/virtual_address pair. 2184 * with the given map/virtual_address pair.
2185 */ 2185 */
2186bool 2186bool
2187pmap_extract(pm, va, pap) 2187pmap_extract(pm, va, pap)
2188 struct pmap *pm; 2188 struct pmap *pm;
2189 vaddr_t va; 2189 vaddr_t va;
2190 paddr_t *pap; 2190 paddr_t *pap;
2191{ 2191{
2192 paddr_t pa; 2192 paddr_t pa;
2193 int64_t data = 0; 2193 int64_t data = 0;
2194 2194
2195 if (pm == pmap_kernel() && va >= kdata && va < roundup(ekdata, 4*MEG)) { 2195 if (pm == pmap_kernel() && va >= kdata && va < roundup(ekdata, 4*MEG)) {
2196 /* Need to deal w/locked TLB entry specially. */ 2196 /* Need to deal w/locked TLB entry specially. */
2197 pa = pmap_kextract(va); 2197 pa = pmap_kextract(va);
2198 DPRINTF(PDB_EXTRACT, ("pmap_extract: va=%lx pa=%llx\n", 2198 DPRINTF(PDB_EXTRACT, ("pmap_extract: va=%lx pa=%llx\n",
2199 (u_long)va, (unsigned long long)pa)); 2199 (u_long)va, (unsigned long long)pa));
2200 } else if (pm == pmap_kernel() && va >= ktext && va < ektext) { 2200 } else if (pm == pmap_kernel() && va >= ktext && va < ektext) {
2201 /* Need to deal w/locked TLB entry specially. */ 2201 /* Need to deal w/locked TLB entry specially. */
2202 pa = pmap_kextract(va); 2202 pa = pmap_kextract(va);
2203 DPRINTF(PDB_EXTRACT, ("pmap_extract: va=%lx pa=%llx\n", 2203 DPRINTF(PDB_EXTRACT, ("pmap_extract: va=%lx pa=%llx\n",
2204 (u_long)va, (unsigned long long)pa)); 2204 (u_long)va, (unsigned long long)pa));
2205 } else if (pm == pmap_kernel() && va >= INTSTACK && va < (INTSTACK + 64*KB)) { 2205 } else if (pm == pmap_kernel() && va >= INTSTACK && va < (INTSTACK + 64*KB)) {
2206 pa = (paddr_t)(curcpu()->ci_paddr - INTSTACK + va); 2206 pa = (paddr_t)(curcpu()->ci_paddr - INTSTACK + va);
2207 DPRINTF(PDB_EXTRACT, ("pmap_extract (intstack): va=%lx pa=%llx\n", 2207 DPRINTF(PDB_EXTRACT, ("pmap_extract (intstack): va=%lx pa=%llx\n",
2208 (u_long)va, (unsigned long long)pa)); 2208 (u_long)va, (unsigned long long)pa));
2209 if (pap != NULL) 2209 if (pap != NULL)
2210 *pap = pa; 2210 *pap = pa;
2211 return TRUE; 2211 return TRUE;
2212 } else { 2212 } else {
2213 if (pm != pmap_kernel()) { 2213 if (pm != pmap_kernel()) {
2214 mutex_enter(&pmap_lock); 2214 mutex_enter(&pmap_lock);
2215 } 2215 }
2216 data = pseg_get(pm, va); 2216 data = pseg_get(pm, va);
2217 pa = data & TLB_PA_MASK; 2217 pa = data & TLB_PA_MASK;
2218#ifdef DEBUG 2218#ifdef DEBUG
2219 if (pmapdebug & PDB_EXTRACT) { 2219 if (pmapdebug & PDB_EXTRACT) {
2220 paddr_t npa = ldxa((vaddr_t)&pm->pm_segs[va_to_seg(va)], 2220 paddr_t npa = ldxa((vaddr_t)&pm->pm_segs[va_to_seg(va)],
2221 ASI_PHYS_CACHED); 2221 ASI_PHYS_CACHED);
2222 printf("pmap_extract: va=%p segs[%ld]=%llx", 2222 printf("pmap_extract: va=%p segs[%ld]=%llx",
2223 (void *)(u_long)va, (long)va_to_seg(va), 2223 (void *)(u_long)va, (long)va_to_seg(va),
2224 (unsigned long long)npa); 2224 (unsigned long long)npa);
2225 if (npa) { 2225 if (npa) {
2226 npa = (paddr_t) 2226 npa = (paddr_t)
2227 ldxa((vaddr_t)&((paddr_t *)(u_long)npa) 2227 ldxa((vaddr_t)&((paddr_t *)(u_long)npa)
2228 [va_to_dir(va)], 2228 [va_to_dir(va)],
2229 ASI_PHYS_CACHED); 2229 ASI_PHYS_CACHED);
2230 printf(" segs[%ld][%ld]=%lx", 2230 printf(" segs[%ld][%ld]=%lx",
2231 (long)va_to_seg(va), 2231 (long)va_to_seg(va),
2232 (long)va_to_dir(va), (long)npa); 2232 (long)va_to_dir(va), (long)npa);
2233 } 2233 }
2234 if (npa) { 2234 if (npa) {
2235 npa = (paddr_t) 2235 npa = (paddr_t)
2236 ldxa((vaddr_t)&((paddr_t *)(u_long)npa) 2236 ldxa((vaddr_t)&((paddr_t *)(u_long)npa)
2237 [va_to_pte(va)], 2237 [va_to_pte(va)],
2238 ASI_PHYS_CACHED); 2238 ASI_PHYS_CACHED);
2239 printf(" segs[%ld][%ld][%ld]=%lx", 2239 printf(" segs[%ld][%ld][%ld]=%lx",
2240 (long)va_to_seg(va), 2240 (long)va_to_seg(va),
2241 (long)va_to_dir(va), 2241 (long)va_to_dir(va),
2242 (long)va_to_pte(va), (long)npa); 2242 (long)va_to_pte(va), (long)npa);
2243 } 2243 }
2244 printf(" pseg_get: %lx\n", (long)pa); 2244 printf(" pseg_get: %lx\n", (long)pa);
2245 } 2245 }
2246#endif 2246#endif
2247 if (pm != pmap_kernel()) { 2247 if (pm != pmap_kernel()) {
2248 mutex_exit(&pmap_lock); 2248 mutex_exit(&pmap_lock);
2249 } 2249 }
2250 } 2250 }
2251 if ((data & TLB_V) == 0) 2251 if ((data & TLB_V) == 0)
2252 return (FALSE); 2252 return (FALSE);
2253 if (pap != NULL) 2253 if (pap != NULL)
2254 *pap = pa + (va & PGOFSET); 2254 *pap = pa + (va & PGOFSET);
2255 return (TRUE); 2255 return (TRUE);
2256} 2256}
2257 2257
2258/* 2258/*
2259 * Change protection on a kernel address. 2259 * Change protection on a kernel address.
2260 * This should only be called from MD code. 2260 * This should only be called from MD code.
2261 */ 2261 */
2262void 2262void
2263pmap_kprotect(va, prot) 2263pmap_kprotect(va, prot)
2264 vaddr_t va; 2264 vaddr_t va;