Fri Jan 25 17:12:33 2013 UTC ()
Update the DEBUG section at the tail of pmap_clear_modify().

Nothing prevents page modification after the modify bit was
cleared but before the DEBUG section checks pmap_is_modified(),
so remove this check.

For the same reason "modified" and "changed" may differ, so only
check "modified" implies "changed" here.

Ok: Matthew Green <mrg@netbsd.org>


(hannken)
diff -r1.279 -r1.280 src/sys/arch/sparc64/sparc64/pmap.c

cvs diff -r1.279 -r1.280 src/sys/arch/sparc64/sparc64/pmap.c (switch to unified diff)

--- src/sys/arch/sparc64/sparc64/pmap.c 2013/01/03 09:40:55 1.279
+++ src/sys/arch/sparc64/sparc64/pmap.c 2013/01/25 17:12:33 1.280
@@ -1,1028 +1,1028 @@ @@ -1,1028 +1,1028 @@
1/* $NetBSD: pmap.c,v 1.279 2013/01/03 09:40:55 martin Exp $ */ 1/* $NetBSD: pmap.c,v 1.280 2013/01/25 17:12:33 hannken Exp $ */
2/* 2/*
3 * 3 *
4 * Copyright (C) 1996-1999 Eduardo Horvath. 4 * Copyright (C) 1996-1999 Eduardo Horvath.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions 9 * modification, are permitted provided that the following conditions
10 * are met: 10 * are met:
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE. 24 * SUCH DAMAGE.
25 * 25 *
26 */ 26 */
27 27
28#include <sys/cdefs.h> 28#include <sys/cdefs.h>
29__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.279 2013/01/03 09:40:55 martin Exp $"); 29__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.280 2013/01/25 17:12:33 hannken Exp $");
30 30
31#undef NO_VCACHE /* Don't forget the locked TLB in dostart */ 31#undef NO_VCACHE /* Don't forget the locked TLB in dostart */
32#define HWREF 32#define HWREF
33 33
34#include "opt_ddb.h" 34#include "opt_ddb.h"
35#include "opt_multiprocessor.h" 35#include "opt_multiprocessor.h"
36#include "opt_modular.h" 36#include "opt_modular.h"
37 37
38#include <sys/param.h> 38#include <sys/param.h>
39#include <sys/malloc.h> 39#include <sys/malloc.h>
40#include <sys/queue.h> 40#include <sys/queue.h>
41#include <sys/systm.h> 41#include <sys/systm.h>
42#include <sys/msgbuf.h> 42#include <sys/msgbuf.h>
43#include <sys/pool.h> 43#include <sys/pool.h>
44#include <sys/exec.h> 44#include <sys/exec.h>
45#include <sys/core.h> 45#include <sys/core.h>
46#include <sys/kcore.h> 46#include <sys/kcore.h>
47#include <sys/proc.h> 47#include <sys/proc.h>
48#include <sys/atomic.h> 48#include <sys/atomic.h>
49#include <sys/cpu.h> 49#include <sys/cpu.h>
50 50
51#include <sys/exec_aout.h> /* for MID_* */ 51#include <sys/exec_aout.h> /* for MID_* */
52 52
53#include <uvm/uvm.h> 53#include <uvm/uvm.h>
54 54
55#include <machine/pcb.h> 55#include <machine/pcb.h>
56#include <machine/sparc64.h> 56#include <machine/sparc64.h>
57#include <machine/ctlreg.h> 57#include <machine/ctlreg.h>
58#include <machine/promlib.h> 58#include <machine/promlib.h>
59#include <machine/kcore.h> 59#include <machine/kcore.h>
60#include <machine/bootinfo.h> 60#include <machine/bootinfo.h>
61 61
62#include <sparc64/sparc64/cache.h> 62#include <sparc64/sparc64/cache.h>
63 63
64#ifdef DDB 64#ifdef DDB
65#include <machine/db_machdep.h> 65#include <machine/db_machdep.h>
66#include <ddb/db_command.h> 66#include <ddb/db_command.h>
67#include <ddb/db_sym.h> 67#include <ddb/db_sym.h>
68#include <ddb/db_variables.h> 68#include <ddb/db_variables.h>
69#include <ddb/db_extern.h> 69#include <ddb/db_extern.h>
70#include <ddb/db_access.h> 70#include <ddb/db_access.h>
71#include <ddb/db_output.h> 71#include <ddb/db_output.h>
72#else 72#else
73#define Debugger() 73#define Debugger()
74#define db_printf printf 74#define db_printf printf
75#endif 75#endif
76 76
77#define MEG (1<<20) /* 1MB */ 77#define MEG (1<<20) /* 1MB */
78#define KB (1<<10) /* 1KB */ 78#define KB (1<<10) /* 1KB */
79 79
80paddr_t cpu0paddr; /* contigious phys memory preallocated for cpus */ 80paddr_t cpu0paddr; /* contigious phys memory preallocated for cpus */
81 81
82/* These routines are in assembly to allow access thru physical mappings */ 82/* These routines are in assembly to allow access thru physical mappings */
83extern int64_t pseg_get_real(struct pmap *, vaddr_t); 83extern int64_t pseg_get_real(struct pmap *, vaddr_t);
84extern int pseg_set_real(struct pmap *, vaddr_t, int64_t, paddr_t); 84extern int pseg_set_real(struct pmap *, vaddr_t, int64_t, paddr_t);
85 85
86/* 86/*
87 * Diatribe on ref/mod counting: 87 * Diatribe on ref/mod counting:
88 * 88 *
89 * First of all, ref/mod info must be non-volatile. Hence we need to keep it 89 * First of all, ref/mod info must be non-volatile. Hence we need to keep it
90 * in the pv_entry structure for each page. (We could bypass this for the 90 * in the pv_entry structure for each page. (We could bypass this for the
91 * vm_page, but that's a long story....) 91 * vm_page, but that's a long story....)
92 * 92 *
93 * This architecture has nice, fast traps with lots of space for software bits 93 * This architecture has nice, fast traps with lots of space for software bits
94 * in the TTE. To accelerate ref/mod counts we make use of these features. 94 * in the TTE. To accelerate ref/mod counts we make use of these features.
95 * 95 *
96 * When we map a page initially, we place a TTE in the page table. It's 96 * When we map a page initially, we place a TTE in the page table. It's
97 * inserted with the TLB_W and TLB_ACCESS bits cleared. If a page is really 97 * inserted with the TLB_W and TLB_ACCESS bits cleared. If a page is really
98 * writable we set the TLB_REAL_W bit for the trap handler. 98 * writable we set the TLB_REAL_W bit for the trap handler.
99 * 99 *
100 * Whenever we take a TLB miss trap, the trap handler will set the TLB_ACCESS 100 * Whenever we take a TLB miss trap, the trap handler will set the TLB_ACCESS
101 * bit in the approprate TTE in the page table. Whenever we take a protection 101 * bit in the approprate TTE in the page table. Whenever we take a protection
102 * fault, if the TLB_REAL_W bit is set then we flip both the TLB_W and TLB_MOD 102 * fault, if the TLB_REAL_W bit is set then we flip both the TLB_W and TLB_MOD
103 * bits to enable writing and mark the page as modified. 103 * bits to enable writing and mark the page as modified.
104 * 104 *
105 * This means that we may have ref/mod information all over the place. The 105 * This means that we may have ref/mod information all over the place. The
106 * pmap routines must traverse the page tables of all pmaps with a given page 106 * pmap routines must traverse the page tables of all pmaps with a given page
107 * and collect/clear all the ref/mod information and copy it into the pv_entry. 107 * and collect/clear all the ref/mod information and copy it into the pv_entry.
108 */ 108 */
109 109
110#ifdef NO_VCACHE 110#ifdef NO_VCACHE
111#define FORCE_ALIAS 1 111#define FORCE_ALIAS 1
112#else 112#else
113#define FORCE_ALIAS 0 113#define FORCE_ALIAS 0
114#endif 114#endif
115 115
116#define PV_ALIAS 0x1LL 116#define PV_ALIAS 0x1LL
117#define PV_REF 0x2LL 117#define PV_REF 0x2LL
118#define PV_MOD 0x4LL 118#define PV_MOD 0x4LL
119#define PV_NVC 0x8LL 119#define PV_NVC 0x8LL
120#define PV_NC 0x10LL 120#define PV_NC 0x10LL
121#define PV_WE 0x20LL /* Debug -- this page was writable somtime */ 121#define PV_WE 0x20LL /* Debug -- this page was writable somtime */
122#define PV_MASK (0x03fLL) 122#define PV_MASK (0x03fLL)
123#define PV_VAMASK (~(PAGE_SIZE - 1)) 123#define PV_VAMASK (~(PAGE_SIZE - 1))
124#define PV_MATCH(pv,va) (!(((pv)->pv_va ^ (va)) & PV_VAMASK)) 124#define PV_MATCH(pv,va) (!(((pv)->pv_va ^ (va)) & PV_VAMASK))
125#define PV_SETVA(pv,va) ((pv)->pv_va = (((va) & PV_VAMASK) | \ 125#define PV_SETVA(pv,va) ((pv)->pv_va = (((va) & PV_VAMASK) | \
126 (((pv)->pv_va) & PV_MASK))) 126 (((pv)->pv_va) & PV_MASK)))
127 127
128struct pool_cache pmap_cache; 128struct pool_cache pmap_cache;
129struct pool_cache pmap_pv_cache; 129struct pool_cache pmap_pv_cache;
130 130
131pv_entry_t pmap_remove_pv(struct pmap *, vaddr_t, struct vm_page *); 131pv_entry_t pmap_remove_pv(struct pmap *, vaddr_t, struct vm_page *);
132void pmap_enter_pv(struct pmap *, vaddr_t, paddr_t, struct vm_page *, 132void pmap_enter_pv(struct pmap *, vaddr_t, paddr_t, struct vm_page *,
133 pv_entry_t); 133 pv_entry_t);
134void pmap_page_cache(struct pmap *, paddr_t, int); 134void pmap_page_cache(struct pmap *, paddr_t, int);
135 135
136/* 136/*
137 * First and last managed physical addresses. 137 * First and last managed physical addresses.
138 * XXX only used for dumping the system. 138 * XXX only used for dumping the system.
139 */ 139 */
140paddr_t vm_first_phys, vm_num_phys; 140paddr_t vm_first_phys, vm_num_phys;
141 141
142/* 142/*
143 * Here's the CPU TSB stuff. It's allocated in pmap_bootstrap. 143 * Here's the CPU TSB stuff. It's allocated in pmap_bootstrap.
144 */ 144 */
145int tsbsize; /* tsbents = 512 * 2^^tsbsize */ 145int tsbsize; /* tsbents = 512 * 2^^tsbsize */
146#define TSBENTS (512<<tsbsize) 146#define TSBENTS (512<<tsbsize)
147#define TSBSIZE (TSBENTS * 16) 147#define TSBSIZE (TSBENTS * 16)
148 148
149static struct pmap kernel_pmap_; 149static struct pmap kernel_pmap_;
150struct pmap *const kernel_pmap_ptr = &kernel_pmap_; 150struct pmap *const kernel_pmap_ptr = &kernel_pmap_;
151 151
152static int ctx_alloc(struct pmap *); 152static int ctx_alloc(struct pmap *);
153static bool pmap_is_referenced_locked(struct vm_page *); 153static bool pmap_is_referenced_locked(struct vm_page *);
154 154
155static void ctx_free(struct pmap *, struct cpu_info *); 155static void ctx_free(struct pmap *, struct cpu_info *);
156 156
157/* 157/*
158 * Check if any MMU has a non-zero context 158 * Check if any MMU has a non-zero context
159 */ 159 */
160static inline bool 160static inline bool
161pmap_has_ctx(struct pmap *p) 161pmap_has_ctx(struct pmap *p)
162{ 162{
163 int i; 163 int i;
164 164
165 /* any context on any cpu? */ 165 /* any context on any cpu? */
166 for (i = 0; i < sparc_ncpus; i++) 166 for (i = 0; i < sparc_ncpus; i++)
167 if (p->pm_ctx[i] > 0) 167 if (p->pm_ctx[i] > 0)
168 return true; 168 return true;
169 169
170 return false;  170 return false;
171} 171}
172 172
173#ifdef MULTIPROCESSOR 173#ifdef MULTIPROCESSOR
174#define pmap_ctx(PM) ((PM)->pm_ctx[cpu_number()]) 174#define pmap_ctx(PM) ((PM)->pm_ctx[cpu_number()])
175#else 175#else
176#define pmap_ctx(PM) ((PM)->pm_ctx[0]) 176#define pmap_ctx(PM) ((PM)->pm_ctx[0])
177#endif 177#endif
178 178
179/* 179/*
180 * Check if this pmap has a live mapping on some MMU. 180 * Check if this pmap has a live mapping on some MMU.
181 */ 181 */
182static inline bool 182static inline bool
183pmap_is_on_mmu(struct pmap *p) 183pmap_is_on_mmu(struct pmap *p)
184{ 184{
185 /* The kernel pmap is always on all MMUs */ 185 /* The kernel pmap is always on all MMUs */
186 if (p == pmap_kernel()) 186 if (p == pmap_kernel())
187 return true; 187 return true;
188 188
189 return pmap_has_ctx(p); 189 return pmap_has_ctx(p);
190} 190}
191 191
192/* 192/*
193 * Virtual and physical addresses of the start and end of kernel text 193 * Virtual and physical addresses of the start and end of kernel text
194 * and data segments. 194 * and data segments.
195 */ 195 */
196vaddr_t ktext; 196vaddr_t ktext;
197paddr_t ktextp; 197paddr_t ktextp;
198vaddr_t ektext; 198vaddr_t ektext;
199paddr_t ektextp; 199paddr_t ektextp;
200vaddr_t kdata; 200vaddr_t kdata;
201paddr_t kdatap; 201paddr_t kdatap;
202vaddr_t ekdata; 202vaddr_t ekdata;
203paddr_t ekdatap; 203paddr_t ekdatap;
204 204
205/* 205/*
206 * Kernel 4MB pages. 206 * Kernel 4MB pages.
207 */ 207 */
208extern struct tlb_entry *kernel_tlbs; 208extern struct tlb_entry *kernel_tlbs;
209extern int kernel_tlb_slots; 209extern int kernel_tlb_slots;
210 210
211static int npgs; 211static int npgs;
212 212
213vaddr_t vmmap; /* one reserved MI vpage for /dev/mem */ 213vaddr_t vmmap; /* one reserved MI vpage for /dev/mem */
214 214
215int phys_installed_size; /* Installed physical memory */ 215int phys_installed_size; /* Installed physical memory */
216struct mem_region *phys_installed; 216struct mem_region *phys_installed;
217 217
218paddr_t avail_start, avail_end; /* These are used by ps & family */ 218paddr_t avail_start, avail_end; /* These are used by ps & family */
219 219
220static int ptelookup_va(vaddr_t va); 220static int ptelookup_va(vaddr_t va);
221 221
222static inline void 222static inline void
223clrx(void *addr) 223clrx(void *addr)
224{ 224{
225 __asm volatile("clrx [%0]" : : "r" (addr) : "memory"); 225 __asm volatile("clrx [%0]" : : "r" (addr) : "memory");
226} 226}
227 227
228static void 228static void
229tsb_invalidate(vaddr_t va, pmap_t pm) 229tsb_invalidate(vaddr_t va, pmap_t pm)
230{ 230{
231 struct cpu_info *ci; 231 struct cpu_info *ci;
232 int ctx; 232 int ctx;
233 bool kpm = (pm == pmap_kernel()); 233 bool kpm = (pm == pmap_kernel());
234 int i; 234 int i;
235 int64_t tag; 235 int64_t tag;
236 236
237 i = ptelookup_va(va); 237 i = ptelookup_va(va);
238#ifdef MULTIPROCESSOR 238#ifdef MULTIPROCESSOR
239 for (ci = cpus; ci != NULL; ci = ci->ci_next) { 239 for (ci = cpus; ci != NULL; ci = ci->ci_next) {
240 if (!CPUSET_HAS(cpus_active, ci->ci_index)) 240 if (!CPUSET_HAS(cpus_active, ci->ci_index))
241 continue; 241 continue;
242#else 242#else
243 ci = curcpu(); 243 ci = curcpu();
244#endif 244#endif
245 ctx = pm->pm_ctx[ci->ci_index]; 245 ctx = pm->pm_ctx[ci->ci_index];
246 if (kpm || ctx > 0) { 246 if (kpm || ctx > 0) {
247 tag = TSB_TAG(0, ctx, va); 247 tag = TSB_TAG(0, ctx, va);
248 if (ci->ci_tsb_dmmu[i].tag == tag) { 248 if (ci->ci_tsb_dmmu[i].tag == tag) {
249 clrx(&ci->ci_tsb_dmmu[i].data); 249 clrx(&ci->ci_tsb_dmmu[i].data);
250 } 250 }
251 if (ci->ci_tsb_immu[i].tag == tag) { 251 if (ci->ci_tsb_immu[i].tag == tag) {
252 clrx(&ci->ci_tsb_immu[i].data); 252 clrx(&ci->ci_tsb_immu[i].data);
253 } 253 }
254 } 254 }
255#ifdef MULTIPROCESSOR 255#ifdef MULTIPROCESSOR
256 } 256 }
257#endif 257#endif
258} 258}
259 259
260struct prom_map *prom_map; 260struct prom_map *prom_map;
261int prom_map_size; 261int prom_map_size;
262 262
263#define PDB_CREATE 0x000001 263#define PDB_CREATE 0x000001
264#define PDB_DESTROY 0x000002 264#define PDB_DESTROY 0x000002
265#define PDB_REMOVE 0x000004 265#define PDB_REMOVE 0x000004
266#define PDB_CHANGEPROT 0x000008 266#define PDB_CHANGEPROT 0x000008
267#define PDB_ENTER 0x000010 267#define PDB_ENTER 0x000010
268#define PDB_DEMAP 0x000020 /* used in locore */ 268#define PDB_DEMAP 0x000020 /* used in locore */
269#define PDB_REF 0x000040 269#define PDB_REF 0x000040
270#define PDB_COPY 0x000080 270#define PDB_COPY 0x000080
271#define PDB_MMU_ALLOC 0x000100 271#define PDB_MMU_ALLOC 0x000100
272#define PDB_MMU_STEAL 0x000200 272#define PDB_MMU_STEAL 0x000200
273#define PDB_CTX_ALLOC 0x000400 273#define PDB_CTX_ALLOC 0x000400
274#define PDB_CTX_STEAL 0x000800 274#define PDB_CTX_STEAL 0x000800
275#define PDB_MMUREG_ALLOC 0x001000 275#define PDB_MMUREG_ALLOC 0x001000
276#define PDB_MMUREG_STEAL 0x002000 276#define PDB_MMUREG_STEAL 0x002000
277#define PDB_CACHESTUFF 0x004000 277#define PDB_CACHESTUFF 0x004000
278#define PDB_ALIAS 0x008000 278#define PDB_ALIAS 0x008000
279#define PDB_EXTRACT 0x010000 279#define PDB_EXTRACT 0x010000
280#define PDB_BOOT 0x020000 280#define PDB_BOOT 0x020000
281#define PDB_BOOT1 0x040000 281#define PDB_BOOT1 0x040000
282#define PDB_GROW 0x080000 282#define PDB_GROW 0x080000
283#define PDB_CTX_FLUSHALL 0x100000 283#define PDB_CTX_FLUSHALL 0x100000
284#define PDB_ACTIVATE 0x200000 284#define PDB_ACTIVATE 0x200000
285 285
286#if defined(DEBUG) && !defined(PMAP_DEBUG) 286#if defined(DEBUG) && !defined(PMAP_DEBUG)
287#define PMAP_DEBUG 287#define PMAP_DEBUG
288#endif 288#endif
289 289
290#ifdef PMAP_DEBUG 290#ifdef PMAP_DEBUG
291struct { 291struct {
292 int kernel; /* entering kernel mapping */ 292 int kernel; /* entering kernel mapping */
293 int user; /* entering user mapping */ 293 int user; /* entering user mapping */
294 int ptpneeded; /* needed to allocate a PT page */ 294 int ptpneeded; /* needed to allocate a PT page */
295 int pwchange; /* no mapping change, just wiring or protection */ 295 int pwchange; /* no mapping change, just wiring or protection */
296 int wchange; /* no mapping change, just wiring */ 296 int wchange; /* no mapping change, just wiring */
297 int mchange; /* was mapped but mapping to different page */ 297 int mchange; /* was mapped but mapping to different page */
298 int managed; /* a managed page */ 298 int managed; /* a managed page */
299 int firstpv; /* first mapping for this PA */ 299 int firstpv; /* first mapping for this PA */
300 int secondpv; /* second mapping for this PA */ 300 int secondpv; /* second mapping for this PA */
301 int ci; /* cache inhibited */ 301 int ci; /* cache inhibited */
302 int unmanaged; /* not a managed page */ 302 int unmanaged; /* not a managed page */
303 int flushes; /* cache flushes */ 303 int flushes; /* cache flushes */
304 int cachehit; /* new entry forced valid entry out */ 304 int cachehit; /* new entry forced valid entry out */
305} enter_stats; 305} enter_stats;
306struct { 306struct {
307 int calls; 307 int calls;
308 int removes; 308 int removes;
309 int flushes; 309 int flushes;
310 int tflushes; /* TLB flushes */ 310 int tflushes; /* TLB flushes */
311 int pidflushes; /* HW pid stolen */ 311 int pidflushes; /* HW pid stolen */
312 int pvfirst; 312 int pvfirst;
313 int pvsearch; 313 int pvsearch;
314} remove_stats; 314} remove_stats;
315#define ENTER_STAT(x) do { enter_stats.x ++; } while (0) 315#define ENTER_STAT(x) do { enter_stats.x ++; } while (0)
316#define REMOVE_STAT(x) do { remove_stats.x ++; } while (0) 316#define REMOVE_STAT(x) do { remove_stats.x ++; } while (0)
317 317
318int pmapdebug = 0; 318int pmapdebug = 0;
319//int pmapdebug = 0 | PDB_CTX_ALLOC | PDB_ACTIVATE; 319//int pmapdebug = 0 | PDB_CTX_ALLOC | PDB_ACTIVATE;
320/* Number of H/W pages stolen for page tables */ 320/* Number of H/W pages stolen for page tables */
321int pmap_pages_stolen = 0; 321int pmap_pages_stolen = 0;
322 322
323#define BDPRINTF(n, f) if (pmapdebug & (n)) prom_printf f 323#define BDPRINTF(n, f) if (pmapdebug & (n)) prom_printf f
324#define DPRINTF(n, f) if (pmapdebug & (n)) printf f 324#define DPRINTF(n, f) if (pmapdebug & (n)) printf f
325#else 325#else
326#define ENTER_STAT(x) do { /* nothing */ } while (0) 326#define ENTER_STAT(x) do { /* nothing */ } while (0)
327#define REMOVE_STAT(x) do { /* nothing */ } while (0) 327#define REMOVE_STAT(x) do { /* nothing */ } while (0)
328#define BDPRINTF(n, f) 328#define BDPRINTF(n, f)
329#define DPRINTF(n, f) 329#define DPRINTF(n, f)
330#define pmapdebug 0 330#define pmapdebug 0
331#endif 331#endif
332 332
333#define pv_check() 333#define pv_check()
334 334
335static int pmap_get_page(paddr_t *); 335static int pmap_get_page(paddr_t *);
336static void pmap_free_page(paddr_t, sparc64_cpuset_t); 336static void pmap_free_page(paddr_t, sparc64_cpuset_t);
337static void pmap_free_page_noflush(paddr_t); 337static void pmap_free_page_noflush(paddr_t);
338 338
339/* 339/*
340 * Global pmap locks. 340 * Global pmap locks.
341 */ 341 */
342static kmutex_t pmap_lock; 342static kmutex_t pmap_lock;
343static bool lock_available = false; 343static bool lock_available = false;
344 344
345/* 345/*
346 * Support for big page sizes. This maps the page size to the 346 * Support for big page sizes. This maps the page size to the
347 * page bits. That is: these are the bits between 8K pages and 347 * page bits. That is: these are the bits between 8K pages and
348 * larger page sizes that cause aliasing. 348 * larger page sizes that cause aliasing.
349 */ 349 */
350#define PSMAP_ENTRY(MASK, CODE) { .mask = MASK, .code = CODE } 350#define PSMAP_ENTRY(MASK, CODE) { .mask = MASK, .code = CODE }
351struct page_size_map page_size_map[] = { 351struct page_size_map page_size_map[] = {
352#ifdef DEBUG 352#ifdef DEBUG
353 PSMAP_ENTRY(0, PGSZ_8K & 0), /* Disable large pages */ 353 PSMAP_ENTRY(0, PGSZ_8K & 0), /* Disable large pages */
354#endif 354#endif
355 PSMAP_ENTRY((4 * 1024 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_4M), 355 PSMAP_ENTRY((4 * 1024 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_4M),
356 PSMAP_ENTRY((512 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_512K), 356 PSMAP_ENTRY((512 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_512K),
357 PSMAP_ENTRY((64 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_64K), 357 PSMAP_ENTRY((64 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_64K),
358 PSMAP_ENTRY((8 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_8K), 358 PSMAP_ENTRY((8 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_8K),
359 PSMAP_ENTRY(0, 0), 359 PSMAP_ENTRY(0, 0),
360}; 360};
361 361
362/* 362/*
363 * This probably shouldn't be necessary, but it stops USIII machines from 363 * This probably shouldn't be necessary, but it stops USIII machines from
364 * breaking in general, and not just for MULTIPROCESSOR. 364 * breaking in general, and not just for MULTIPROCESSOR.
365 */ 365 */
366#define USE_LOCKSAFE_PSEG_GETSET 366#define USE_LOCKSAFE_PSEG_GETSET
367#if defined(USE_LOCKSAFE_PSEG_GETSET) 367#if defined(USE_LOCKSAFE_PSEG_GETSET)
368 368
369static kmutex_t pseg_lock; 369static kmutex_t pseg_lock;
370 370
371static __inline__ int64_t 371static __inline__ int64_t
372pseg_get_locksafe(struct pmap *pm, vaddr_t va) 372pseg_get_locksafe(struct pmap *pm, vaddr_t va)
373{ 373{
374 int64_t rv; 374 int64_t rv;
375 bool took_lock = lock_available /*&& pm == pmap_kernel()*/; 375 bool took_lock = lock_available /*&& pm == pmap_kernel()*/;
376 376
377 if (__predict_true(took_lock)) 377 if (__predict_true(took_lock))
378 mutex_enter(&pseg_lock); 378 mutex_enter(&pseg_lock);
379 rv = pseg_get_real(pm, va); 379 rv = pseg_get_real(pm, va);
380 if (__predict_true(took_lock)) 380 if (__predict_true(took_lock))
381 mutex_exit(&pseg_lock); 381 mutex_exit(&pseg_lock);
382 return rv; 382 return rv;
383} 383}
384 384
385static __inline__ int 385static __inline__ int
386pseg_set_locksafe(struct pmap *pm, vaddr_t va, int64_t data, paddr_t ptp) 386pseg_set_locksafe(struct pmap *pm, vaddr_t va, int64_t data, paddr_t ptp)
387{ 387{
388 int rv; 388 int rv;
389 bool took_lock = lock_available /*&& pm == pmap_kernel()*/; 389 bool took_lock = lock_available /*&& pm == pmap_kernel()*/;
390 390
391 if (__predict_true(took_lock)) 391 if (__predict_true(took_lock))
392 mutex_enter(&pseg_lock); 392 mutex_enter(&pseg_lock);
393 rv = pseg_set_real(pm, va, data, ptp); 393 rv = pseg_set_real(pm, va, data, ptp);
394 if (__predict_true(took_lock)) 394 if (__predict_true(took_lock))
395 mutex_exit(&pseg_lock); 395 mutex_exit(&pseg_lock);
396 return rv; 396 return rv;
397} 397}
398 398
399#define pseg_get(pm, va) pseg_get_locksafe(pm, va) 399#define pseg_get(pm, va) pseg_get_locksafe(pm, va)
400#define pseg_set(pm, va, data, ptp) pseg_set_locksafe(pm, va, data, ptp) 400#define pseg_set(pm, va, data, ptp) pseg_set_locksafe(pm, va, data, ptp)
401 401
402#else /* USE_LOCKSAFE_PSEG_GETSET */ 402#else /* USE_LOCKSAFE_PSEG_GETSET */
403 403
404#define pseg_get(pm, va) pseg_get_real(pm, va) 404#define pseg_get(pm, va) pseg_get_real(pm, va)
405#define pseg_set(pm, va, data, ptp) pseg_set_real(pm, va, data, ptp) 405#define pseg_set(pm, va, data, ptp) pseg_set_real(pm, va, data, ptp)
406 406
407#endif /* USE_LOCKSAFE_PSEG_GETSET */ 407#endif /* USE_LOCKSAFE_PSEG_GETSET */
408 408
409/* 409/*
410 * Enter a TTE into the kernel pmap only. Don't do anything else. 410 * Enter a TTE into the kernel pmap only. Don't do anything else.
411 * 411 *
412 * Use only during bootstrapping since it does no locking and 412 * Use only during bootstrapping since it does no locking and
413 * can lose ref/mod info!!!! 413 * can lose ref/mod info!!!!
414 * 414 *
415 */ 415 */
416static void pmap_enter_kpage(vaddr_t va, int64_t data) 416static void pmap_enter_kpage(vaddr_t va, int64_t data)
417{ 417{
418 paddr_t newp; 418 paddr_t newp;
419 419
420 newp = 0UL; 420 newp = 0UL;
421 while (pseg_set(pmap_kernel(), va, data, newp) & 1) { 421 while (pseg_set(pmap_kernel(), va, data, newp) & 1) {
422 if (!pmap_get_page(&newp)) { 422 if (!pmap_get_page(&newp)) {
423 prom_printf("pmap_enter_kpage: out of pages\n"); 423 prom_printf("pmap_enter_kpage: out of pages\n");
424 panic("pmap_enter_kpage"); 424 panic("pmap_enter_kpage");
425 } 425 }
426 426
427 ENTER_STAT(ptpneeded); 427 ENTER_STAT(ptpneeded);
428 BDPRINTF(PDB_BOOT1, 428 BDPRINTF(PDB_BOOT1,
429 ("pseg_set: pm=%p va=%p data=%lx newp %lx\n", 429 ("pseg_set: pm=%p va=%p data=%lx newp %lx\n",
430 pmap_kernel(), va, (long)data, (long)newp)); 430 pmap_kernel(), va, (long)data, (long)newp));
431 if (pmapdebug & PDB_BOOT1) 431 if (pmapdebug & PDB_BOOT1)
432 {int i; for (i=0; i<140000000; i++) ;} 432 {int i; for (i=0; i<140000000; i++) ;}
433 } 433 }
434} 434}
435 435
436/* 436/*
437 * Check the bootargs to see if we need to enable bootdebug. 437 * Check the bootargs to see if we need to enable bootdebug.
438 */ 438 */
439#ifdef DEBUG 439#ifdef DEBUG
440static void pmap_bootdebug(void) 440static void pmap_bootdebug(void)
441{ 441{
442 const char *cp = prom_getbootargs(); 442 const char *cp = prom_getbootargs();
443 443
444 for (;;) 444 for (;;)
445 switch (*++cp) { 445 switch (*++cp) {
446 case '\0': 446 case '\0':
447 return; 447 return;
448 case 'V': 448 case 'V':
449 pmapdebug |= PDB_BOOT|PDB_BOOT1; 449 pmapdebug |= PDB_BOOT|PDB_BOOT1;
450 break; 450 break;
451 case 'D': 451 case 'D':
452 pmapdebug |= PDB_BOOT1; 452 pmapdebug |= PDB_BOOT1;
453 break; 453 break;
454 } 454 }
455} 455}
456#else 456#else
457#define pmap_bootdebug() /* nothing */ 457#define pmap_bootdebug() /* nothing */
458#endif 458#endif
459 459
460 460
461/* 461/*
462 * Calculate the correct number of page colors to use. This should be the 462 * Calculate the correct number of page colors to use. This should be the
463 * size of the E$/PAGE_SIZE. However, different CPUs can have different sized 463 * size of the E$/PAGE_SIZE. However, different CPUs can have different sized
464 * E$, so we need to take the GCM of the E$ size. 464 * E$, so we need to take the GCM of the E$ size.
465 */ 465 */
466static int pmap_calculate_colors(void) 466static int pmap_calculate_colors(void)
467{ 467{
468 int node; 468 int node;
469 int size, assoc, color, maxcolor = 1; 469 int size, assoc, color, maxcolor = 1;
470 470
471 for (node = prom_firstchild(prom_findroot()); node != 0; 471 for (node = prom_firstchild(prom_findroot()); node != 0;
472 node = prom_nextsibling(node)) { 472 node = prom_nextsibling(node)) {
473 char *name = prom_getpropstring(node, "device_type"); 473 char *name = prom_getpropstring(node, "device_type");
474 if (strcmp("cpu", name) != 0) 474 if (strcmp("cpu", name) != 0)
475 continue; 475 continue;
476 476
477 /* Found a CPU, get the E$ info. */ 477 /* Found a CPU, get the E$ info. */
478 size = prom_getpropint(node, "ecache-size", -1); 478 size = prom_getpropint(node, "ecache-size", -1);
479 if (size == -1) { 479 if (size == -1) {
480 prom_printf("pmap_calculate_colors: node %x has " 480 prom_printf("pmap_calculate_colors: node %x has "
481 "no ecache-size\n", node); 481 "no ecache-size\n", node);
482 /* If we can't get the E$ size, skip the node */ 482 /* If we can't get the E$ size, skip the node */
483 continue; 483 continue;
484 } 484 }
485 485
486 assoc = prom_getpropint(node, "ecache-associativity", 1); 486 assoc = prom_getpropint(node, "ecache-associativity", 1);
487 color = size/assoc/PAGE_SIZE; 487 color = size/assoc/PAGE_SIZE;
488 if (color > maxcolor) 488 if (color > maxcolor)
489 maxcolor = color; 489 maxcolor = color;
490 } 490 }
491 return (maxcolor); 491 return (maxcolor);
492} 492}
493 493
494static void pmap_alloc_bootargs(void) 494static void pmap_alloc_bootargs(void)
495{ 495{
496 char *v; 496 char *v;
497 497
498 v = OF_claim(NULL, 2*PAGE_SIZE, PAGE_SIZE); 498 v = OF_claim(NULL, 2*PAGE_SIZE, PAGE_SIZE);
499 if ((v == NULL) || (v == (void*)-1)) 499 if ((v == NULL) || (v == (void*)-1))
500 panic("Can't claim two pages of memory."); 500 panic("Can't claim two pages of memory.");
501 501
502 memset(v, 0, 2*PAGE_SIZE); 502 memset(v, 0, 2*PAGE_SIZE);
503 503
504 cpu_args = (struct cpu_bootargs*)v; 504 cpu_args = (struct cpu_bootargs*)v;
505} 505}
506 506
507#if defined(MULTIPROCESSOR) 507#if defined(MULTIPROCESSOR)
508static void pmap_mp_init(void); 508static void pmap_mp_init(void);
509 509
510static void 510static void
511pmap_mp_init(void) 511pmap_mp_init(void)
512{ 512{
513 pte_t *tp; 513 pte_t *tp;
514 char *v; 514 char *v;
515 int i; 515 int i;
516 516
517 extern void cpu_mp_startup(void); 517 extern void cpu_mp_startup(void);
518 518
519 if ((v = OF_claim(NULL, PAGE_SIZE, PAGE_SIZE)) == NULL) { 519 if ((v = OF_claim(NULL, PAGE_SIZE, PAGE_SIZE)) == NULL) {
520 panic("pmap_mp_init: Cannot claim a page."); 520 panic("pmap_mp_init: Cannot claim a page.");
521 } 521 }
522 522
523 memcpy(v, mp_tramp_code, mp_tramp_code_len); 523 memcpy(v, mp_tramp_code, mp_tramp_code_len);
524 *(u_long *)(v + mp_tramp_tlb_slots) = kernel_tlb_slots; 524 *(u_long *)(v + mp_tramp_tlb_slots) = kernel_tlb_slots;
525 *(u_long *)(v + mp_tramp_func) = (u_long)cpu_mp_startup; 525 *(u_long *)(v + mp_tramp_func) = (u_long)cpu_mp_startup;
526 *(u_long *)(v + mp_tramp_ci) = (u_long)cpu_args; 526 *(u_long *)(v + mp_tramp_ci) = (u_long)cpu_args;
527 tp = (pte_t *)(v + mp_tramp_code_len); 527 tp = (pte_t *)(v + mp_tramp_code_len);
528 for (i = 0; i < kernel_tlb_slots; i++) { 528 for (i = 0; i < kernel_tlb_slots; i++) {
529 tp[i].tag = kernel_tlbs[i].te_va; 529 tp[i].tag = kernel_tlbs[i].te_va;
530 tp[i].data = TSB_DATA(0, /* g */ 530 tp[i].data = TSB_DATA(0, /* g */
531 PGSZ_4M, /* sz */ 531 PGSZ_4M, /* sz */
532 kernel_tlbs[i].te_pa, /* pa */ 532 kernel_tlbs[i].te_pa, /* pa */
533 1, /* priv */ 533 1, /* priv */
534 1, /* write */ 534 1, /* write */
535 1, /* cache */ 535 1, /* cache */
536 1, /* aliased */ 536 1, /* aliased */
537 1, /* valid */ 537 1, /* valid */
538 0 /* ie */); 538 0 /* ie */);
539 tp[i].data |= TLB_L | TLB_CV; 539 tp[i].data |= TLB_L | TLB_CV;
540 DPRINTF(PDB_BOOT1, ("xtlb[%d]: Tag: %" PRIx64 " Data: %" 540 DPRINTF(PDB_BOOT1, ("xtlb[%d]: Tag: %" PRIx64 " Data: %"
541 PRIx64 "\n", i, tp[i].tag, tp[i].data)); 541 PRIx64 "\n", i, tp[i].tag, tp[i].data));
542 } 542 }
543 543
544 for (i = 0; i < PAGE_SIZE; i += sizeof(long)) 544 for (i = 0; i < PAGE_SIZE; i += sizeof(long))
545 flush(v + i); 545 flush(v + i);
546 546
547 cpu_spinup_trampoline = (vaddr_t)v; 547 cpu_spinup_trampoline = (vaddr_t)v;
548} 548}
549#else 549#else
550#define pmap_mp_init() ((void)0) 550#define pmap_mp_init() ((void)0)
551#endif 551#endif
552 552
553paddr_t pmap_kextract(vaddr_t va); 553paddr_t pmap_kextract(vaddr_t va);
554 554
555paddr_t 555paddr_t
556pmap_kextract(vaddr_t va) 556pmap_kextract(vaddr_t va)
557{ 557{
558 int i; 558 int i;
559 paddr_t paddr = (paddr_t)-1; 559 paddr_t paddr = (paddr_t)-1;
560 560
561 for (i = 0; i < kernel_tlb_slots; i++) { 561 for (i = 0; i < kernel_tlb_slots; i++) {
562 if ((va & ~PAGE_MASK_4M) == kernel_tlbs[i].te_va) { 562 if ((va & ~PAGE_MASK_4M) == kernel_tlbs[i].te_va) {
563 paddr = kernel_tlbs[i].te_pa + 563 paddr = kernel_tlbs[i].te_pa +
564 (paddr_t)(va & PAGE_MASK_4M); 564 (paddr_t)(va & PAGE_MASK_4M);
565 break; 565 break;
566 } 566 }
567 } 567 }
568 568
569 if (i == kernel_tlb_slots) { 569 if (i == kernel_tlb_slots) {
570 panic("pmap_kextract: Address %p is not from kernel space.\n" 570 panic("pmap_kextract: Address %p is not from kernel space.\n"
571 "Data segment is too small?\n", (void*)va); 571 "Data segment is too small?\n", (void*)va);
572 } 572 }
573 573
574 return (paddr); 574 return (paddr);
575} 575}
576 576
577/* 577/*
578 * Bootstrap kernel allocator, allocates from unused space in 4MB kernel 578 * Bootstrap kernel allocator, allocates from unused space in 4MB kernel
579 * data segment meaning that 579 * data segment meaning that
580 * 580 *
581 * - Access to allocated memory will never generate a trap 581 * - Access to allocated memory will never generate a trap
582 * - Allocated chunks are never reclaimed or freed 582 * - Allocated chunks are never reclaimed or freed
583 * - Allocation calls do not change PROM memlists 583 * - Allocation calls do not change PROM memlists
584 */ 584 */
585static struct mem_region kdata_mem_pool; 585static struct mem_region kdata_mem_pool;
586 586
587static void 587static void
588kdata_alloc_init(vaddr_t va_start, vaddr_t va_end) 588kdata_alloc_init(vaddr_t va_start, vaddr_t va_end)
589{ 589{
590 vsize_t va_size = va_end - va_start; 590 vsize_t va_size = va_end - va_start;
591 591
592 kdata_mem_pool.start = va_start; 592 kdata_mem_pool.start = va_start;
593 kdata_mem_pool.size = va_size; 593 kdata_mem_pool.size = va_size;
594 594
595 BDPRINTF(PDB_BOOT, ("kdata_alloc_init(): %d bytes @%p.\n", va_size, 595 BDPRINTF(PDB_BOOT, ("kdata_alloc_init(): %d bytes @%p.\n", va_size,
596 va_start)); 596 va_start));
597} 597}
598 598
599static vaddr_t 599static vaddr_t
600kdata_alloc(vsize_t size, vsize_t align) 600kdata_alloc(vsize_t size, vsize_t align)
601{ 601{
602 vaddr_t va; 602 vaddr_t va;
603 vsize_t asize; 603 vsize_t asize;
604 604
605 asize = roundup(kdata_mem_pool.start, align) - kdata_mem_pool.start; 605 asize = roundup(kdata_mem_pool.start, align) - kdata_mem_pool.start;
606 606
607 kdata_mem_pool.start += asize; 607 kdata_mem_pool.start += asize;
608 kdata_mem_pool.size -= asize; 608 kdata_mem_pool.size -= asize;
609 609
610 if (kdata_mem_pool.size < size) { 610 if (kdata_mem_pool.size < size) {
611 panic("kdata_alloc(): Data segment is too small.\n"); 611 panic("kdata_alloc(): Data segment is too small.\n");
612 } 612 }
613 613
614 va = kdata_mem_pool.start; 614 va = kdata_mem_pool.start;
615 kdata_mem_pool.start += size; 615 kdata_mem_pool.start += size;
616 kdata_mem_pool.size -= size; 616 kdata_mem_pool.size -= size;
617 617
618 BDPRINTF(PDB_BOOT, ("kdata_alloc(): Allocated %d@%p, %d free.\n", 618 BDPRINTF(PDB_BOOT, ("kdata_alloc(): Allocated %d@%p, %d free.\n",
619 size, (void*)va, kdata_mem_pool.size)); 619 size, (void*)va, kdata_mem_pool.size));
620 620
621 return (va); 621 return (va);
622} 622}
623 623
624/* 624/*
625 * Unified routine for reading PROM properties. 625 * Unified routine for reading PROM properties.
626 */ 626 */
627static void 627static void
628pmap_read_memlist(const char *device, const char *property, void **ml, 628pmap_read_memlist(const char *device, const char *property, void **ml,
629 int *ml_size, vaddr_t (* ml_alloc)(vsize_t, vsize_t)) 629 int *ml_size, vaddr_t (* ml_alloc)(vsize_t, vsize_t))
630{ 630{
631 void *va; 631 void *va;
632 int size, handle; 632 int size, handle;
633 633
634 if ( (handle = prom_finddevice(device)) == 0) { 634 if ( (handle = prom_finddevice(device)) == 0) {
635 prom_printf("pmap_read_memlist(): No %s device found.\n", 635 prom_printf("pmap_read_memlist(): No %s device found.\n",
636 device); 636 device);
637 prom_halt(); 637 prom_halt();
638 } 638 }
639 if ( (size = OF_getproplen(handle, property)) < 0) { 639 if ( (size = OF_getproplen(handle, property)) < 0) {
640 prom_printf("pmap_read_memlist(): %s/%s has no length.\n", 640 prom_printf("pmap_read_memlist(): %s/%s has no length.\n",
641 device, property); 641 device, property);
642 prom_halt(); 642 prom_halt();
643 } 643 }
644 if ( (va = (void*)(* ml_alloc)(size, sizeof(uint64_t))) == NULL) { 644 if ( (va = (void*)(* ml_alloc)(size, sizeof(uint64_t))) == NULL) {
645 prom_printf("pmap_read_memlist(): Cannot allocate memlist.\n"); 645 prom_printf("pmap_read_memlist(): Cannot allocate memlist.\n");
646 prom_halt(); 646 prom_halt();
647 } 647 }
648 if (OF_getprop(handle, property, va, size) <= 0) { 648 if (OF_getprop(handle, property, va, size) <= 0) {
649 prom_printf("pmap_read_memlist(): Cannot read %s/%s.\n", 649 prom_printf("pmap_read_memlist(): Cannot read %s/%s.\n",
650 device, property); 650 device, property);
651 prom_halt(); 651 prom_halt();
652 } 652 }
653 653
654 *ml = va; 654 *ml = va;
655 *ml_size = size; 655 *ml_size = size;
656} 656}
657 657
658/* 658/*
659 * This is called during bootstrap, before the system is really initialized. 659 * This is called during bootstrap, before the system is really initialized.
660 * 660 *
661 * It's called with the start and end virtual addresses of the kernel. We 661 * It's called with the start and end virtual addresses of the kernel. We
662 * bootstrap the pmap allocator now. We will allocate the basic structures we 662 * bootstrap the pmap allocator now. We will allocate the basic structures we
663 * need to bootstrap the VM system here: the page frame tables, the TSB, and 663 * need to bootstrap the VM system here: the page frame tables, the TSB, and
664 * the free memory lists. 664 * the free memory lists.
665 * 665 *
666 * Now all this is becoming a bit obsolete. maxctx is still important, but by 666 * Now all this is becoming a bit obsolete. maxctx is still important, but by
667 * separating the kernel text and data segments we really would need to 667 * separating the kernel text and data segments we really would need to
668 * provide the start and end of each segment. But we can't. The rodata 668 * provide the start and end of each segment. But we can't. The rodata
669 * segment is attached to the end of the kernel segment and has nothing to 669 * segment is attached to the end of the kernel segment and has nothing to
670 * delimit its end. We could still pass in the beginning of the kernel and 670 * delimit its end. We could still pass in the beginning of the kernel and
671 * the beginning and end of the data segment but we could also just as easily 671 * the beginning and end of the data segment but we could also just as easily
672 * calculate that all in here. 672 * calculate that all in here.
673 * 673 *
674 * To handle the kernel text, we need to do a reverse mapping of the start of 674 * To handle the kernel text, we need to do a reverse mapping of the start of
675 * the kernel, then traverse the free memory lists to find out how big it is. 675 * the kernel, then traverse the free memory lists to find out how big it is.
676 */ 676 */
677 677
678void 678void
679pmap_bootstrap(u_long kernelstart, u_long kernelend) 679pmap_bootstrap(u_long kernelstart, u_long kernelend)
680{ 680{
681#ifdef MODULAR 681#ifdef MODULAR
682 extern vaddr_t module_start, module_end; 682 extern vaddr_t module_start, module_end;
683#endif 683#endif
684 extern char etext[], data_start[]; /* start of data segment */ 684 extern char etext[], data_start[]; /* start of data segment */
685 extern int msgbufmapped; 685 extern int msgbufmapped;
686 struct mem_region *mp, *mp1, *avail, *orig; 686 struct mem_region *mp, *mp1, *avail, *orig;
687 int i, j, pcnt, msgbufsiz; 687 int i, j, pcnt, msgbufsiz;
688 size_t s, sz; 688 size_t s, sz;
689 int64_t data; 689 int64_t data;
690 vaddr_t va, intstk; 690 vaddr_t va, intstk;
691 uint64_t phys_msgbuf; 691 uint64_t phys_msgbuf;
692 paddr_t newp = 0; 692 paddr_t newp = 0;
693 693
694 void *prom_memlist; 694 void *prom_memlist;
695 int prom_memlist_size; 695 int prom_memlist_size;
696 696
697 BDPRINTF(PDB_BOOT, ("Entered pmap_bootstrap.\n")); 697 BDPRINTF(PDB_BOOT, ("Entered pmap_bootstrap.\n"));
698 698
699 cache_setup_funcs(); 699 cache_setup_funcs();
700 700
701 /* 701 /*
702 * Calculate kernel size. 702 * Calculate kernel size.
703 */ 703 */
704 ktext = kernelstart; 704 ktext = kernelstart;
705 ktextp = pmap_kextract(ktext); 705 ktextp = pmap_kextract(ktext);
706 ektext = roundup((vaddr_t)etext, PAGE_SIZE_4M); 706 ektext = roundup((vaddr_t)etext, PAGE_SIZE_4M);
707 ektextp = roundup(pmap_kextract((vaddr_t)etext), PAGE_SIZE_4M); 707 ektextp = roundup(pmap_kextract((vaddr_t)etext), PAGE_SIZE_4M);
708 708
709 kdata = (vaddr_t)data_start; 709 kdata = (vaddr_t)data_start;
710 kdatap = pmap_kextract(kdata); 710 kdatap = pmap_kextract(kdata);
711 ekdata = roundup(kernelend, PAGE_SIZE_4M); 711 ekdata = roundup(kernelend, PAGE_SIZE_4M);
712 ekdatap = roundup(pmap_kextract(kernelend), PAGE_SIZE_4M); 712 ekdatap = roundup(pmap_kextract(kernelend), PAGE_SIZE_4M);
713 713
714 BDPRINTF(PDB_BOOT, ("Virtual layout: text %lx-%lx, data %lx-%lx.\n", 714 BDPRINTF(PDB_BOOT, ("Virtual layout: text %lx-%lx, data %lx-%lx.\n",
715 ktext, ektext, kdata, ekdata)); 715 ktext, ektext, kdata, ekdata));
716 BDPRINTF(PDB_BOOT, ("Physical layout: text %lx-%lx, data %lx-%lx.\n", 716 BDPRINTF(PDB_BOOT, ("Physical layout: text %lx-%lx, data %lx-%lx.\n",
717 ktextp, ektextp, kdatap, ekdatap)); 717 ktextp, ektextp, kdatap, ekdatap));
718 718
719 /* Initialize bootstrap allocator. */ 719 /* Initialize bootstrap allocator. */
720 kdata_alloc_init(kernelend + 1 * 1024 * 1024, ekdata); 720 kdata_alloc_init(kernelend + 1 * 1024 * 1024, ekdata);
721 721
722 pmap_bootdebug(); 722 pmap_bootdebug();
723 pmap_alloc_bootargs(); 723 pmap_alloc_bootargs();
724 pmap_mp_init(); 724 pmap_mp_init();
725 725
726 /* 726 /*
727 * set machine page size 727 * set machine page size
728 */ 728 */
729 uvmexp.pagesize = NBPG; 729 uvmexp.pagesize = NBPG;
730 uvmexp.ncolors = pmap_calculate_colors(); 730 uvmexp.ncolors = pmap_calculate_colors();
731 uvm_setpagesize(); 731 uvm_setpagesize();
732 732
733 /* 733 /*
734 * Get hold or the message buffer. 734 * Get hold or the message buffer.
735 */ 735 */
736 msgbufp = (struct kern_msgbuf *)(vaddr_t)MSGBUF_VA; 736 msgbufp = (struct kern_msgbuf *)(vaddr_t)MSGBUF_VA;
737/* XXXXX -- increase msgbufsiz for uvmhist printing */ 737/* XXXXX -- increase msgbufsiz for uvmhist printing */
738 msgbufsiz = 4*PAGE_SIZE /* round_page(sizeof(struct msgbuf)) */; 738 msgbufsiz = 4*PAGE_SIZE /* round_page(sizeof(struct msgbuf)) */;
739 BDPRINTF(PDB_BOOT, ("Trying to allocate msgbuf at %lx, size %lx\n", 739 BDPRINTF(PDB_BOOT, ("Trying to allocate msgbuf at %lx, size %lx\n",
740 (long)msgbufp, (long)msgbufsiz)); 740 (long)msgbufp, (long)msgbufsiz));
741 if ((long)msgbufp != 741 if ((long)msgbufp !=
742 (long)(phys_msgbuf = prom_claim_virt((vaddr_t)msgbufp, msgbufsiz))) 742 (long)(phys_msgbuf = prom_claim_virt((vaddr_t)msgbufp, msgbufsiz)))
743 prom_printf( 743 prom_printf(
744 "cannot get msgbuf VA, msgbufp=%p, phys_msgbuf=%lx\n", 744 "cannot get msgbuf VA, msgbufp=%p, phys_msgbuf=%lx\n",
745 (void *)msgbufp, (long)phys_msgbuf); 745 (void *)msgbufp, (long)phys_msgbuf);
746 phys_msgbuf = prom_get_msgbuf(msgbufsiz, MMU_PAGE_ALIGN); 746 phys_msgbuf = prom_get_msgbuf(msgbufsiz, MMU_PAGE_ALIGN);
747 BDPRINTF(PDB_BOOT, 747 BDPRINTF(PDB_BOOT,
748 ("We should have the memory at %lx, let's map it in\n", 748 ("We should have the memory at %lx, let's map it in\n",
749 phys_msgbuf)); 749 phys_msgbuf));
750 if (prom_map_phys(phys_msgbuf, msgbufsiz, (vaddr_t)msgbufp, 750 if (prom_map_phys(phys_msgbuf, msgbufsiz, (vaddr_t)msgbufp,
751 -1/* sunos does this */) == -1) { 751 -1/* sunos does this */) == -1) {
752 prom_printf("Failed to map msgbuf\n"); 752 prom_printf("Failed to map msgbuf\n");
753 } else { 753 } else {
754 BDPRINTF(PDB_BOOT, ("msgbuf mapped at %p\n", 754 BDPRINTF(PDB_BOOT, ("msgbuf mapped at %p\n",
755 (void *)msgbufp)); 755 (void *)msgbufp));
756 } 756 }
757 msgbufmapped = 1; /* enable message buffer */ 757 msgbufmapped = 1; /* enable message buffer */
758 initmsgbuf((void *)msgbufp, msgbufsiz); 758 initmsgbuf((void *)msgbufp, msgbufsiz);
759 759
760 /* 760 /*
761 * Find out how much RAM we have installed. 761 * Find out how much RAM we have installed.
762 */ 762 */
763 BDPRINTF(PDB_BOOT, ("pmap_bootstrap: getting phys installed\n")); 763 BDPRINTF(PDB_BOOT, ("pmap_bootstrap: getting phys installed\n"));
764 pmap_read_memlist("/memory", "reg", &prom_memlist, &prom_memlist_size, 764 pmap_read_memlist("/memory", "reg", &prom_memlist, &prom_memlist_size,
765 kdata_alloc); 765 kdata_alloc);
766 phys_installed = prom_memlist; 766 phys_installed = prom_memlist;
767 phys_installed_size = prom_memlist_size / sizeof(*phys_installed); 767 phys_installed_size = prom_memlist_size / sizeof(*phys_installed);
768 768
769 if (pmapdebug & PDB_BOOT1) { 769 if (pmapdebug & PDB_BOOT1) {
770 /* print out mem list */ 770 /* print out mem list */
771 prom_printf("Installed physical memory:\n"); 771 prom_printf("Installed physical memory:\n");
772 for (i = 0; i < phys_installed_size; i++) { 772 for (i = 0; i < phys_installed_size; i++) {
773 prom_printf("memlist start %lx size %lx\n", 773 prom_printf("memlist start %lx size %lx\n",
774 (u_long)phys_installed[i].start, 774 (u_long)phys_installed[i].start,
775 (u_long)phys_installed[i].size); 775 (u_long)phys_installed[i].size);
776 } 776 }
777 } 777 }
778 778
779 BDPRINTF(PDB_BOOT1, ("Calculating physmem:")); 779 BDPRINTF(PDB_BOOT1, ("Calculating physmem:"));
780 for (i = 0; i < phys_installed_size; i++) 780 for (i = 0; i < phys_installed_size; i++)
781 physmem += btoc(phys_installed[i].size); 781 physmem += btoc(phys_installed[i].size);
782 BDPRINTF(PDB_BOOT1, (" result %x or %d pages\n", 782 BDPRINTF(PDB_BOOT1, (" result %x or %d pages\n",
783 (int)physmem, (int)physmem)); 783 (int)physmem, (int)physmem));
784 784
785 /* 785 /*
786 * Calculate approx TSB size. This probably needs tweaking. 786 * Calculate approx TSB size. This probably needs tweaking.
787 */ 787 */
788 if (physmem < btoc(64 * 1024 * 1024)) 788 if (physmem < btoc(64 * 1024 * 1024))
789 tsbsize = 0; 789 tsbsize = 0;
790 else if (physmem < btoc(512 * 1024 * 1024)) 790 else if (physmem < btoc(512 * 1024 * 1024))
791 tsbsize = 1; 791 tsbsize = 1;
792 else 792 else
793 tsbsize = 2; 793 tsbsize = 2;
794 794
795 /* 795 /*
796 * Save the prom translations 796 * Save the prom translations
797 */ 797 */
798 pmap_read_memlist("/virtual-memory", "translations", &prom_memlist, 798 pmap_read_memlist("/virtual-memory", "translations", &prom_memlist,
799 &prom_memlist_size, kdata_alloc); 799 &prom_memlist_size, kdata_alloc);
800 prom_map = prom_memlist; 800 prom_map = prom_memlist;
801 prom_map_size = prom_memlist_size / sizeof(struct prom_map); 801 prom_map_size = prom_memlist_size / sizeof(struct prom_map);
802 802
803 if (pmapdebug & PDB_BOOT) { 803 if (pmapdebug & PDB_BOOT) {
804 /* print out mem list */ 804 /* print out mem list */
805 prom_printf("Prom xlations:\n"); 805 prom_printf("Prom xlations:\n");
806 for (i = 0; i < prom_map_size; i++) { 806 for (i = 0; i < prom_map_size; i++) {
807 prom_printf("start %016lx size %016lx tte %016lx\n", 807 prom_printf("start %016lx size %016lx tte %016lx\n",
808 (u_long)prom_map[i].vstart, 808 (u_long)prom_map[i].vstart,
809 (u_long)prom_map[i].vsize, 809 (u_long)prom_map[i].vsize,
810 (u_long)prom_map[i].tte); 810 (u_long)prom_map[i].tte);
811 } 811 }
812 prom_printf("End of prom xlations\n"); 812 prom_printf("End of prom xlations\n");
813 } 813 }
814 814
815 /* 815 /*
816 * Here's a quick in-lined reverse bubble sort. It gets rid of 816 * Here's a quick in-lined reverse bubble sort. It gets rid of
817 * any translations inside the kernel data VA range. 817 * any translations inside the kernel data VA range.
818 */ 818 */
819 for (i = 0; i < prom_map_size; i++) { 819 for (i = 0; i < prom_map_size; i++) {
820 for (j = i; j < prom_map_size; j++) { 820 for (j = i; j < prom_map_size; j++) {
821 if (prom_map[j].vstart > prom_map[i].vstart) { 821 if (prom_map[j].vstart > prom_map[i].vstart) {
822 struct prom_map tmp; 822 struct prom_map tmp;
823 823
824 tmp = prom_map[i]; 824 tmp = prom_map[i];
825 prom_map[i] = prom_map[j]; 825 prom_map[i] = prom_map[j];
826 prom_map[j] = tmp; 826 prom_map[j] = tmp;
827 } 827 }
828 } 828 }
829 } 829 }
830 if (pmapdebug & PDB_BOOT) { 830 if (pmapdebug & PDB_BOOT) {
831 /* print out mem list */ 831 /* print out mem list */
832 prom_printf("Prom xlations:\n"); 832 prom_printf("Prom xlations:\n");
833 for (i = 0; i < prom_map_size; i++) { 833 for (i = 0; i < prom_map_size; i++) {
834 prom_printf("start %016lx size %016lx tte %016lx\n", 834 prom_printf("start %016lx size %016lx tte %016lx\n",
835 (u_long)prom_map[i].vstart, 835 (u_long)prom_map[i].vstart,
836 (u_long)prom_map[i].vsize, 836 (u_long)prom_map[i].vsize,
837 (u_long)prom_map[i].tte); 837 (u_long)prom_map[i].tte);
838 } 838 }
839 prom_printf("End of prom xlations\n"); 839 prom_printf("End of prom xlations\n");
840 } 840 }
841 841
842 /* 842 /*
843 * Allocate a ncpu*64KB page for the cpu_info & stack structure now. 843 * Allocate a ncpu*64KB page for the cpu_info & stack structure now.
844 */ 844 */
845 cpu0paddr = prom_alloc_phys(8 * PAGE_SIZE * sparc_ncpus, 8 * PAGE_SIZE); 845 cpu0paddr = prom_alloc_phys(8 * PAGE_SIZE * sparc_ncpus, 8 * PAGE_SIZE);
846 if (cpu0paddr == 0) { 846 if (cpu0paddr == 0) {
847 prom_printf("Cannot allocate cpu_infos\n"); 847 prom_printf("Cannot allocate cpu_infos\n");
848 prom_halt(); 848 prom_halt();
849 } 849 }
850 850
851 /* 851 /*
852 * Now the kernel text segment is in its final location we can try to 852 * Now the kernel text segment is in its final location we can try to
853 * find out how much memory really is free. 853 * find out how much memory really is free.
854 */ 854 */
855 pmap_read_memlist("/memory", "available", &prom_memlist, 855 pmap_read_memlist("/memory", "available", &prom_memlist,
856 &prom_memlist_size, kdata_alloc); 856 &prom_memlist_size, kdata_alloc);
857 orig = prom_memlist; 857 orig = prom_memlist;
858 sz = prom_memlist_size; 858 sz = prom_memlist_size;
859 pcnt = prom_memlist_size / sizeof(*orig); 859 pcnt = prom_memlist_size / sizeof(*orig);
860 860
861 BDPRINTF(PDB_BOOT1, ("Available physical memory:\n")); 861 BDPRINTF(PDB_BOOT1, ("Available physical memory:\n"));
862 avail = (struct mem_region*)kdata_alloc(sz, sizeof(uint64_t)); 862 avail = (struct mem_region*)kdata_alloc(sz, sizeof(uint64_t));
863 for (i = 0; i < pcnt; i++) { 863 for (i = 0; i < pcnt; i++) {
864 avail[i] = orig[i]; 864 avail[i] = orig[i];
865 BDPRINTF(PDB_BOOT1, ("memlist start %lx size %lx\n", 865 BDPRINTF(PDB_BOOT1, ("memlist start %lx size %lx\n",
866 (u_long)orig[i].start, 866 (u_long)orig[i].start,
867 (u_long)orig[i].size)); 867 (u_long)orig[i].size));
868 } 868 }
869 BDPRINTF(PDB_BOOT1, ("End of available physical memory\n")); 869 BDPRINTF(PDB_BOOT1, ("End of available physical memory\n"));
870 870
871 BDPRINTF(PDB_BOOT, ("ktext %08lx[%08lx] - %08lx[%08lx] : " 871 BDPRINTF(PDB_BOOT, ("ktext %08lx[%08lx] - %08lx[%08lx] : "
872 "kdata %08lx[%08lx] - %08lx[%08lx]\n", 872 "kdata %08lx[%08lx] - %08lx[%08lx]\n",
873 (u_long)ktext, (u_long)ktextp, 873 (u_long)ktext, (u_long)ktextp,
874 (u_long)ektext, (u_long)ektextp, 874 (u_long)ektext, (u_long)ektextp,
875 (u_long)kdata, (u_long)kdatap, 875 (u_long)kdata, (u_long)kdatap,
876 (u_long)ekdata, (u_long)ekdatap)); 876 (u_long)ekdata, (u_long)ekdatap));
877 if (pmapdebug & PDB_BOOT1) { 877 if (pmapdebug & PDB_BOOT1) {
878 /* print out mem list */ 878 /* print out mem list */
879 prom_printf("Available %lx physical memory before cleanup:\n", 879 prom_printf("Available %lx physical memory before cleanup:\n",
880 (u_long)avail); 880 (u_long)avail);
881 for (i = 0; i < pcnt; i++) { 881 for (i = 0; i < pcnt; i++) {
882 prom_printf("memlist start %lx size %lx\n", 882 prom_printf("memlist start %lx size %lx\n",
883 (u_long)avail[i].start, 883 (u_long)avail[i].start,
884 (u_long)avail[i].size); 884 (u_long)avail[i].size);
885 } 885 }
886 prom_printf("End of available physical memory before cleanup\n"); 886 prom_printf("End of available physical memory before cleanup\n");
887 prom_printf("kernel physical text size %08lx - %08lx\n", 887 prom_printf("kernel physical text size %08lx - %08lx\n",
888 (u_long)ktextp, (u_long)ektextp); 888 (u_long)ktextp, (u_long)ektextp);
889 prom_printf("kernel physical data size %08lx - %08lx\n", 889 prom_printf("kernel physical data size %08lx - %08lx\n",
890 (u_long)kdatap, (u_long)ekdatap); 890 (u_long)kdatap, (u_long)ekdatap);
891 } 891 }
892 892
893 /* 893 /*
894 * Here's a another quick in-lined bubble sort. 894 * Here's a another quick in-lined bubble sort.
895 */ 895 */
896 for (i = 0; i < pcnt; i++) { 896 for (i = 0; i < pcnt; i++) {
897 for (j = i; j < pcnt; j++) { 897 for (j = i; j < pcnt; j++) {
898 if (avail[j].start < avail[i].start) { 898 if (avail[j].start < avail[i].start) {
899 struct mem_region tmp; 899 struct mem_region tmp;
900 tmp = avail[i]; 900 tmp = avail[i];
901 avail[i] = avail[j]; 901 avail[i] = avail[j];
902 avail[j] = tmp; 902 avail[j] = tmp;
903 } 903 }
904 } 904 }
905 } 905 }
906 906
907 /* Throw away page zero if we have it. */ 907 /* Throw away page zero if we have it. */
908 if (avail->start == 0) { 908 if (avail->start == 0) {
909 avail->start += PAGE_SIZE; 909 avail->start += PAGE_SIZE;
910 avail->size -= PAGE_SIZE; 910 avail->size -= PAGE_SIZE;
911 } 911 }
912 912
913 /* 913 /*
914 * Now we need to remove the area we valloc'ed from the available 914 * Now we need to remove the area we valloc'ed from the available
915 * memory lists. (NB: we may have already alloc'ed the entire space). 915 * memory lists. (NB: we may have already alloc'ed the entire space).
916 */ 916 */
917 npgs = 0; 917 npgs = 0;
918 for (mp = avail, i = 0; i < pcnt; i++, mp = &avail[i]) { 918 for (mp = avail, i = 0; i < pcnt; i++, mp = &avail[i]) {
919 /* 919 /*
920 * Now page align the start of the region. 920 * Now page align the start of the region.
921 */ 921 */
922 s = mp->start % PAGE_SIZE; 922 s = mp->start % PAGE_SIZE;
923 if (mp->size >= s) { 923 if (mp->size >= s) {
924 mp->size -= s; 924 mp->size -= s;
925 mp->start += s; 925 mp->start += s;
926 } 926 }
927 /* 927 /*
928 * And now align the size of the region. 928 * And now align the size of the region.
929 */ 929 */
930 mp->size -= mp->size % PAGE_SIZE; 930 mp->size -= mp->size % PAGE_SIZE;
931 /* 931 /*
932 * Check whether some memory is left here. 932 * Check whether some memory is left here.
933 */ 933 */
934 if (mp->size == 0) { 934 if (mp->size == 0) {
935 memcpy(mp, mp + 1, 935 memcpy(mp, mp + 1,
936 (pcnt - (mp - avail)) * sizeof *mp); 936 (pcnt - (mp - avail)) * sizeof *mp);
937 pcnt--; 937 pcnt--;
938 mp--; 938 mp--;
939 continue; 939 continue;
940 } 940 }
941 s = mp->start; 941 s = mp->start;
942 sz = mp->size; 942 sz = mp->size;
943 npgs += btoc(sz); 943 npgs += btoc(sz);
944 for (mp1 = avail; mp1 < mp; mp1++) 944 for (mp1 = avail; mp1 < mp; mp1++)
945 if (s < mp1->start) 945 if (s < mp1->start)
946 break; 946 break;
947 if (mp1 < mp) { 947 if (mp1 < mp) {
948 memcpy(mp1 + 1, mp1, (char *)mp - (char *)mp1); 948 memcpy(mp1 + 1, mp1, (char *)mp - (char *)mp1);
949 mp1->start = s; 949 mp1->start = s;
950 mp1->size = sz; 950 mp1->size = sz;
951 } 951 }
952#ifdef DEBUG 952#ifdef DEBUG
953/* Clear all memory we give to the VM system. I want to make sure 953/* Clear all memory we give to the VM system. I want to make sure
954 * the PROM isn't using it for something, so this should break the PROM. 954 * the PROM isn't using it for something, so this should break the PROM.
955 */ 955 */
956 956
957/* Calling pmap_zero_page() at this point also hangs some machines 957/* Calling pmap_zero_page() at this point also hangs some machines
958 * so don't do it at all. -- pk 26/02/2002 958 * so don't do it at all. -- pk 26/02/2002
959 */ 959 */
960#if 0 960#if 0
961 { 961 {
962 paddr_t p; 962 paddr_t p;
963 for (p = mp->start; p < mp->start+mp->size; 963 for (p = mp->start; p < mp->start+mp->size;
964 p += PAGE_SIZE) 964 p += PAGE_SIZE)
965 pmap_zero_page(p); 965 pmap_zero_page(p);
966 } 966 }
967#endif 967#endif
968#endif /* DEBUG */ 968#endif /* DEBUG */
969 /* 969 /*
970 * In future we should be able to specify both allocated 970 * In future we should be able to specify both allocated
971 * and free. 971 * and free.
972 */ 972 */
973 BDPRINTF(PDB_BOOT1, ("uvm_page_physload(%lx, %lx)\n", 973 BDPRINTF(PDB_BOOT1, ("uvm_page_physload(%lx, %lx)\n",
974 (long)mp->start, 974 (long)mp->start,
975 (long)(mp->start + mp->size))); 975 (long)(mp->start + mp->size)));
976 uvm_page_physload( 976 uvm_page_physload(
977 atop(mp->start), 977 atop(mp->start),
978 atop(mp->start+mp->size), 978 atop(mp->start+mp->size),
979 atop(mp->start), 979 atop(mp->start),
980 atop(mp->start+mp->size), 980 atop(mp->start+mp->size),
981 VM_FREELIST_DEFAULT); 981 VM_FREELIST_DEFAULT);
982 } 982 }
983 983
984 if (pmapdebug & PDB_BOOT) { 984 if (pmapdebug & PDB_BOOT) {
985 /* print out mem list */ 985 /* print out mem list */
986 prom_printf("Available physical memory after cleanup:\n"); 986 prom_printf("Available physical memory after cleanup:\n");
987 for (i = 0; i < pcnt; i++) { 987 for (i = 0; i < pcnt; i++) {
988 prom_printf("avail start %lx size %lx\n", 988 prom_printf("avail start %lx size %lx\n",
989 (long)avail[i].start, (long)avail[i].size); 989 (long)avail[i].start, (long)avail[i].size);
990 } 990 }
991 prom_printf("End of available physical memory after cleanup\n"); 991 prom_printf("End of available physical memory after cleanup\n");
992 } 992 }
993 993
994 /* 994 /*
995 * Allocate and clear out pmap_kernel()->pm_segs[] 995 * Allocate and clear out pmap_kernel()->pm_segs[]
996 */ 996 */
997 pmap_kernel()->pm_refs = 1; 997 pmap_kernel()->pm_refs = 1;
998 memset(&pmap_kernel()->pm_ctx, 0, sizeof(pmap_kernel()->pm_ctx)); 998 memset(&pmap_kernel()->pm_ctx, 0, sizeof(pmap_kernel()->pm_ctx));
999 999
1000 /* Throw away page zero */ 1000 /* Throw away page zero */
1001 do { 1001 do {
1002 pmap_get_page(&newp); 1002 pmap_get_page(&newp);
1003 } while (!newp); 1003 } while (!newp);
1004 pmap_kernel()->pm_segs=(paddr_t *)(u_long)newp; 1004 pmap_kernel()->pm_segs=(paddr_t *)(u_long)newp;
1005 pmap_kernel()->pm_physaddr = newp; 1005 pmap_kernel()->pm_physaddr = newp;
1006 1006
1007 /* 1007 /*
1008 * finish filling out kernel pmap. 1008 * finish filling out kernel pmap.
1009 */ 1009 */
1010 1010
1011 BDPRINTF(PDB_BOOT, ("pmap_kernel()->pm_physaddr = %lx\n", 1011 BDPRINTF(PDB_BOOT, ("pmap_kernel()->pm_physaddr = %lx\n",
1012 (long)pmap_kernel()->pm_physaddr)); 1012 (long)pmap_kernel()->pm_physaddr));
1013 /* 1013 /*
1014 * Tell pmap about our mesgbuf -- Hope this works already 1014 * Tell pmap about our mesgbuf -- Hope this works already
1015 */ 1015 */
1016 BDPRINTF(PDB_BOOT1, ("Calling consinit()\n")); 1016 BDPRINTF(PDB_BOOT1, ("Calling consinit()\n"));
1017 if (pmapdebug & PDB_BOOT1) 1017 if (pmapdebug & PDB_BOOT1)
1018 consinit(); 1018 consinit();
1019 BDPRINTF(PDB_BOOT1, ("Inserting mesgbuf into pmap_kernel()\n")); 1019 BDPRINTF(PDB_BOOT1, ("Inserting mesgbuf into pmap_kernel()\n"));
1020 /* it's not safe to call pmap_enter so we need to do this ourselves */ 1020 /* it's not safe to call pmap_enter so we need to do this ourselves */
1021 va = (vaddr_t)msgbufp; 1021 va = (vaddr_t)msgbufp;
1022 prom_map_phys(phys_msgbuf, msgbufsiz, (vaddr_t)msgbufp, -1); 1022 prom_map_phys(phys_msgbuf, msgbufsiz, (vaddr_t)msgbufp, -1);
1023 while (msgbufsiz) { 1023 while (msgbufsiz) {
1024 data = TSB_DATA(0 /* global */, 1024 data = TSB_DATA(0 /* global */,
1025 PGSZ_8K, 1025 PGSZ_8K,
1026 phys_msgbuf, 1026 phys_msgbuf,
1027 1 /* priv */, 1027 1 /* priv */,
1028 1 /* Write */, 1028 1 /* Write */,
@@ -1581,2009 +1581,2005 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v @@ -1581,2009 +1581,2005 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v
1581 KASSERT(va < kdata || va > ekdata); 1581 KASSERT(va < kdata || va > ekdata);
1582 1582
1583 /* 1583 /*
1584 * Construct the TTE. 1584 * Construct the TTE.
1585 */ 1585 */
1586 1586
1587 ENTER_STAT(unmanaged); 1587 ENTER_STAT(unmanaged);
1588 if (pa & (PMAP_NVC|PMAP_NC)) { 1588 if (pa & (PMAP_NVC|PMAP_NC)) {
1589 ENTER_STAT(ci); 1589 ENTER_STAT(ci);
1590 } 1590 }
1591 1591
1592 tte.data = TSB_DATA(0, PGSZ_8K, pa, 1 /* Privileged */, 1592 tte.data = TSB_DATA(0, PGSZ_8K, pa, 1 /* Privileged */,
1593 (VM_PROT_WRITE & prot), 1593 (VM_PROT_WRITE & prot),
1594 !(pa & PMAP_NC), pa & (PMAP_NVC), 1, 0); 1594 !(pa & PMAP_NC), pa & (PMAP_NVC), 1, 0);
1595 /* We don't track mod/ref here. */ 1595 /* We don't track mod/ref here. */
1596 if (prot & VM_PROT_WRITE) 1596 if (prot & VM_PROT_WRITE)
1597 tte.data |= TLB_REAL_W|TLB_W; 1597 tte.data |= TLB_REAL_W|TLB_W;
1598 if (prot & VM_PROT_EXECUTE) 1598 if (prot & VM_PROT_EXECUTE)
1599 tte.data |= TLB_EXEC; 1599 tte.data |= TLB_EXEC;
1600 tte.data |= TLB_TSB_LOCK; /* wired */ 1600 tte.data |= TLB_TSB_LOCK; /* wired */
1601 ptp = 0; 1601 ptp = 0;
1602 1602
1603 retry: 1603 retry:
1604 i = pseg_set(pm, va, tte.data, ptp); 1604 i = pseg_set(pm, va, tte.data, ptp);
1605 if (i & 1) { 1605 if (i & 1) {
1606 KASSERT((i & 4) == 0); 1606 KASSERT((i & 4) == 0);
1607 ptp = 0; 1607 ptp = 0;
1608 if (!pmap_get_page(&ptp)) 1608 if (!pmap_get_page(&ptp))
1609 panic("pmap_kenter_pa: no pages"); 1609 panic("pmap_kenter_pa: no pages");
1610 ENTER_STAT(ptpneeded); 1610 ENTER_STAT(ptpneeded);
1611 goto retry; 1611 goto retry;
1612 } 1612 }
1613 if (ptp && i == 0) { 1613 if (ptp && i == 0) {
1614 /* We allocated a spare page but didn't use it. Free it. */ 1614 /* We allocated a spare page but didn't use it. Free it. */
1615 printf("pmap_kenter_pa: freeing unused page %llx\n", 1615 printf("pmap_kenter_pa: freeing unused page %llx\n",
1616 (long long)ptp); 1616 (long long)ptp);
1617 pmap_free_page_noflush(ptp); 1617 pmap_free_page_noflush(ptp);
1618 } 1618 }
1619#ifdef PMAP_DEBUG 1619#ifdef PMAP_DEBUG
1620 i = ptelookup_va(va); 1620 i = ptelookup_va(va);
1621 if (pmapdebug & PDB_ENTER) 1621 if (pmapdebug & PDB_ENTER)
1622 prom_printf("pmap_kenter_pa: va=%08x data=%08x:%08x " 1622 prom_printf("pmap_kenter_pa: va=%08x data=%08x:%08x "
1623 "tsb_dmmu[%d]=%08x\n", va, (int)(tte.data>>32), 1623 "tsb_dmmu[%d]=%08x\n", va, (int)(tte.data>>32),
1624 (int)tte.data, i, &curcpu()->ci_tsb_dmmu[i]); 1624 (int)tte.data, i, &curcpu()->ci_tsb_dmmu[i]);
1625 if (pmapdebug & PDB_MMU_STEAL && curcpu()->ci_tsb_dmmu[i].data) { 1625 if (pmapdebug & PDB_MMU_STEAL && curcpu()->ci_tsb_dmmu[i].data) {
1626 prom_printf("pmap_kenter_pa: evicting entry tag=%x:%08x " 1626 prom_printf("pmap_kenter_pa: evicting entry tag=%x:%08x "
1627 "data=%08x:%08x tsb_dmmu[%d]=%08x\n", 1627 "data=%08x:%08x tsb_dmmu[%d]=%08x\n",
1628 (int)(curcpu()->ci_tsb_dmmu[i].tag>>32), (int)curcpu()->ci_tsb_dmmu[i].tag, 1628 (int)(curcpu()->ci_tsb_dmmu[i].tag>>32), (int)curcpu()->ci_tsb_dmmu[i].tag,
1629 (int)(curcpu()->ci_tsb_dmmu[i].data>>32), (int)curcpu()->ci_tsb_dmmu[i].data, 1629 (int)(curcpu()->ci_tsb_dmmu[i].data>>32), (int)curcpu()->ci_tsb_dmmu[i].data,
1630 i, &curcpu()->ci_tsb_dmmu[i]); 1630 i, &curcpu()->ci_tsb_dmmu[i]);
1631 prom_printf("with va=%08x data=%08x:%08x tsb_dmmu[%d]=%08x\n", 1631 prom_printf("with va=%08x data=%08x:%08x tsb_dmmu[%d]=%08x\n",
1632 va, (int)(tte.data>>32), (int)tte.data, i, 1632 va, (int)(tte.data>>32), (int)tte.data, i,
1633 &curcpu()->ci_tsb_dmmu[i]); 1633 &curcpu()->ci_tsb_dmmu[i]);
1634 } 1634 }
1635#endif 1635#endif
1636} 1636}
1637 1637
1638/* 1638/*
1639 * pmap_kremove: [ INTERFACE ] 1639 * pmap_kremove: [ INTERFACE ]
1640 * 1640 *
1641 * Remove a mapping entered with pmap_kenter_pa() starting at va, 1641 * Remove a mapping entered with pmap_kenter_pa() starting at va,
1642 * for size bytes (assumed to be page rounded). 1642 * for size bytes (assumed to be page rounded).
1643 */ 1643 */
1644void 1644void
1645pmap_kremove(vaddr_t va, vsize_t size) 1645pmap_kremove(vaddr_t va, vsize_t size)
1646{ 1646{
1647 struct pmap *pm = pmap_kernel(); 1647 struct pmap *pm = pmap_kernel();
1648 int64_t data; 1648 int64_t data;
1649 paddr_t pa; 1649 paddr_t pa;
1650 int rv; 1650 int rv;
1651 bool flush = FALSE; 1651 bool flush = FALSE;
1652 1652
1653 KASSERT(va < INTSTACK || va > EINTSTACK); 1653 KASSERT(va < INTSTACK || va > EINTSTACK);
1654 KASSERT(va < kdata || va > ekdata); 1654 KASSERT(va < kdata || va > ekdata);
1655 1655
1656 DPRINTF(PDB_DEMAP, ("pmap_kremove: start 0x%lx size %lx\n", va, size)); 1656 DPRINTF(PDB_DEMAP, ("pmap_kremove: start 0x%lx size %lx\n", va, size));
1657 for (; size >= PAGE_SIZE; va += PAGE_SIZE, size -= PAGE_SIZE) { 1657 for (; size >= PAGE_SIZE; va += PAGE_SIZE, size -= PAGE_SIZE) {
1658 1658
1659#ifdef DIAGNOSTIC 1659#ifdef DIAGNOSTIC
1660 /* 1660 /*
1661 * Is this part of the permanent 4MB mapping? 1661 * Is this part of the permanent 4MB mapping?
1662 */ 1662 */
1663 if (va >= ktext && va < roundup(ekdata, 4*MEG)) 1663 if (va >= ktext && va < roundup(ekdata, 4*MEG))
1664 panic("pmap_kremove: va=%08x in locked TLB", (u_int)va); 1664 panic("pmap_kremove: va=%08x in locked TLB", (u_int)va);
1665#endif 1665#endif
1666 1666
1667 data = pseg_get(pm, va); 1667 data = pseg_get(pm, va);
1668 if ((data & TLB_V) == 0) { 1668 if ((data & TLB_V) == 0) {
1669 continue; 1669 continue;
1670 } 1670 }
1671 1671
1672 flush = TRUE; 1672 flush = TRUE;
1673 pa = data & TLB_PA_MASK; 1673 pa = data & TLB_PA_MASK;
1674 1674
1675 /* 1675 /*
1676 * We need to flip the valid bit and 1676 * We need to flip the valid bit and
1677 * clear the access statistics. 1677 * clear the access statistics.
1678 */ 1678 */
1679 1679
1680 rv = pseg_set(pm, va, 0, 0); 1680 rv = pseg_set(pm, va, 0, 0);
1681 if (rv & 1) 1681 if (rv & 1)
1682 panic("pmap_kremove: pseg_set needs spare, rv=%d\n", 1682 panic("pmap_kremove: pseg_set needs spare, rv=%d\n",
1683 rv); 1683 rv);
1684 DPRINTF(PDB_DEMAP, ("pmap_kremove: seg %x pdir %x pte %x\n", 1684 DPRINTF(PDB_DEMAP, ("pmap_kremove: seg %x pdir %x pte %x\n",
1685 (int)va_to_seg(va), (int)va_to_dir(va), 1685 (int)va_to_seg(va), (int)va_to_dir(va),
1686 (int)va_to_pte(va))); 1686 (int)va_to_pte(va)));
1687 REMOVE_STAT(removes); 1687 REMOVE_STAT(removes);
1688 1688
1689 tsb_invalidate(va, pm); 1689 tsb_invalidate(va, pm);
1690 REMOVE_STAT(tflushes); 1690 REMOVE_STAT(tflushes);
1691 1691
1692 /* 1692 /*
1693 * Here we assume nothing can get into the TLB 1693 * Here we assume nothing can get into the TLB
1694 * unless it has a PTE. 1694 * unless it has a PTE.
1695 */ 1695 */
1696 1696
1697 tlb_flush_pte(va, pm); 1697 tlb_flush_pte(va, pm);
1698 dcache_flush_page_all(pa); 1698 dcache_flush_page_all(pa);
1699 } 1699 }
1700 if (flush) 1700 if (flush)
1701 REMOVE_STAT(flushes); 1701 REMOVE_STAT(flushes);
1702} 1702}
1703 1703
1704/* 1704/*
1705 * Insert physical page at pa into the given pmap at virtual address va. 1705 * Insert physical page at pa into the given pmap at virtual address va.
1706 * Supports 64-bit pa so we can map I/O space. 1706 * Supports 64-bit pa so we can map I/O space.
1707 */ 1707 */
1708 1708
1709int 1709int
1710pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 1710pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1711{ 1711{
1712 pte_t tte; 1712 pte_t tte;
1713 int64_t data; 1713 int64_t data;
1714 paddr_t opa = 0, ptp; /* XXX: gcc */ 1714 paddr_t opa = 0, ptp; /* XXX: gcc */
1715 pv_entry_t pvh, npv = NULL, freepv; 1715 pv_entry_t pvh, npv = NULL, freepv;
1716 struct vm_page *pg, *opg, *ptpg; 1716 struct vm_page *pg, *opg, *ptpg;
1717 int s, i, uncached = 0, error = 0; 1717 int s, i, uncached = 0, error = 0;
1718 int size = PGSZ_8K; /* PMAP_SZ_TO_TTE(pa); */ 1718 int size = PGSZ_8K; /* PMAP_SZ_TO_TTE(pa); */
1719 bool wired = (flags & PMAP_WIRED) != 0; 1719 bool wired = (flags & PMAP_WIRED) != 0;
1720 bool wasmapped = FALSE; 1720 bool wasmapped = FALSE;
1721 bool dopv = TRUE; 1721 bool dopv = TRUE;
1722 1722
1723 /* 1723 /*
1724 * Is this part of the permanent mappings? 1724 * Is this part of the permanent mappings?
1725 */ 1725 */
1726 KASSERT(pm != pmap_kernel() || va < INTSTACK || va > EINTSTACK); 1726 KASSERT(pm != pmap_kernel() || va < INTSTACK || va > EINTSTACK);
1727 KASSERT(pm != pmap_kernel() || va < kdata || va > ekdata); 1727 KASSERT(pm != pmap_kernel() || va < kdata || va > ekdata);
1728 1728
1729 /* Grab a spare PV. */ 1729 /* Grab a spare PV. */
1730 freepv = pool_cache_get(&pmap_pv_cache, PR_NOWAIT); 1730 freepv = pool_cache_get(&pmap_pv_cache, PR_NOWAIT);
1731 if (__predict_false(freepv == NULL)) { 1731 if (__predict_false(freepv == NULL)) {
1732 if (flags & PMAP_CANFAIL) 1732 if (flags & PMAP_CANFAIL)
1733 return (ENOMEM); 1733 return (ENOMEM);
1734 panic("pmap_enter: no pv entries available"); 1734 panic("pmap_enter: no pv entries available");
1735 } 1735 }
1736 freepv->pv_next = NULL; 1736 freepv->pv_next = NULL;
1737 1737
1738 /* 1738 /*
1739 * If a mapping at this address already exists, check if we're 1739 * If a mapping at this address already exists, check if we're
1740 * entering the same PA again. if it's different remove it. 1740 * entering the same PA again. if it's different remove it.
1741 */ 1741 */
1742 1742
1743 mutex_enter(&pmap_lock); 1743 mutex_enter(&pmap_lock);
1744 data = pseg_get(pm, va); 1744 data = pseg_get(pm, va);
1745 if (data & TLB_V) { 1745 if (data & TLB_V) {
1746 wasmapped = TRUE; 1746 wasmapped = TRUE;
1747 opa = data & TLB_PA_MASK; 1747 opa = data & TLB_PA_MASK;
1748 if (opa != pa) { 1748 if (opa != pa) {
1749 opg = PHYS_TO_VM_PAGE(opa); 1749 opg = PHYS_TO_VM_PAGE(opa);
1750 if (opg != NULL) { 1750 if (opg != NULL) {
1751 npv = pmap_remove_pv(pm, va, opg); 1751 npv = pmap_remove_pv(pm, va, opg);
1752 } 1752 }
1753 } 1753 }
1754 } 1754 }
1755 1755
1756 /* 1756 /*
1757 * Construct the TTE. 1757 * Construct the TTE.
1758 */ 1758 */
1759 pg = PHYS_TO_VM_PAGE(pa); 1759 pg = PHYS_TO_VM_PAGE(pa);
1760 if (pg) { 1760 if (pg) {
1761 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 1761 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1762 1762
1763 pvh = &md->mdpg_pvh; 1763 pvh = &md->mdpg_pvh;
1764 uncached = (pvh->pv_va & (PV_ALIAS|PV_NVC)); 1764 uncached = (pvh->pv_va & (PV_ALIAS|PV_NVC));
1765#ifdef DIAGNOSTIC 1765#ifdef DIAGNOSTIC
1766 if ((flags & VM_PROT_ALL) & ~prot) 1766 if ((flags & VM_PROT_ALL) & ~prot)
1767 panic("pmap_enter: access_type exceeds prot"); 1767 panic("pmap_enter: access_type exceeds prot");
1768#endif 1768#endif
1769 /* 1769 /*
1770 * If we don't have the traphandler do it, 1770 * If we don't have the traphandler do it,
1771 * set the ref/mod bits now. 1771 * set the ref/mod bits now.
1772 */ 1772 */
1773 if (flags & VM_PROT_ALL) 1773 if (flags & VM_PROT_ALL)
1774 pvh->pv_va |= PV_REF; 1774 pvh->pv_va |= PV_REF;
1775 if (flags & VM_PROT_WRITE) 1775 if (flags & VM_PROT_WRITE)
1776 pvh->pv_va |= PV_MOD; 1776 pvh->pv_va |= PV_MOD;
1777 1777
1778 /* 1778 /*
1779 * make sure we have a pv entry ready if we need one. 1779 * make sure we have a pv entry ready if we need one.
1780 */ 1780 */
1781 if (pvh->pv_pmap == NULL || (wasmapped && opa == pa)) { 1781 if (pvh->pv_pmap == NULL || (wasmapped && opa == pa)) {
1782 if (npv != NULL) { 1782 if (npv != NULL) {
1783 /* free it */ 1783 /* free it */
1784 npv->pv_next = freepv; 1784 npv->pv_next = freepv;
1785 freepv = npv; 1785 freepv = npv;
1786 npv = NULL; 1786 npv = NULL;
1787 } 1787 }
1788 if (wasmapped && opa == pa) { 1788 if (wasmapped && opa == pa) {
1789 dopv = FALSE; 1789 dopv = FALSE;
1790 } 1790 }
1791 } else if (npv == NULL) { 1791 } else if (npv == NULL) {
1792 /* use the pre-allocated pv */ 1792 /* use the pre-allocated pv */
1793 npv = freepv; 1793 npv = freepv;
1794 freepv = freepv->pv_next; 1794 freepv = freepv->pv_next;
1795 } 1795 }
1796 ENTER_STAT(managed); 1796 ENTER_STAT(managed);
1797 } else { 1797 } else {
1798 ENTER_STAT(unmanaged); 1798 ENTER_STAT(unmanaged);
1799 dopv = FALSE; 1799 dopv = FALSE;
1800 if (npv != NULL) { 1800 if (npv != NULL) {
1801 /* free it */ 1801 /* free it */
1802 npv->pv_next = freepv; 1802 npv->pv_next = freepv;
1803 freepv = npv; 1803 freepv = npv;
1804 npv = NULL; 1804 npv = NULL;
1805 } 1805 }
1806 } 1806 }
1807 1807
1808#ifndef NO_VCACHE 1808#ifndef NO_VCACHE
1809 if (pa & PMAP_NVC) 1809 if (pa & PMAP_NVC)
1810#endif 1810#endif
1811 uncached = 1; 1811 uncached = 1;
1812 if (uncached) { 1812 if (uncached) {
1813 ENTER_STAT(ci); 1813 ENTER_STAT(ci);
1814 } 1814 }
1815 tte.data = TSB_DATA(0, size, pa, pm == pmap_kernel(), 1815 tte.data = TSB_DATA(0, size, pa, pm == pmap_kernel(),
1816 flags & VM_PROT_WRITE, !(pa & PMAP_NC), 1816 flags & VM_PROT_WRITE, !(pa & PMAP_NC),
1817 uncached, 1, pa & PMAP_LITTLE); 1817 uncached, 1, pa & PMAP_LITTLE);
1818#ifdef HWREF 1818#ifdef HWREF
1819 if (prot & VM_PROT_WRITE) 1819 if (prot & VM_PROT_WRITE)
1820 tte.data |= TLB_REAL_W; 1820 tte.data |= TLB_REAL_W;
1821 if (prot & VM_PROT_EXECUTE) 1821 if (prot & VM_PROT_EXECUTE)
1822 tte.data |= TLB_EXEC; 1822 tte.data |= TLB_EXEC;
1823#else 1823#else
1824 /* If it needs ref accounting do nothing. */ 1824 /* If it needs ref accounting do nothing. */
1825 if (!(flags & VM_PROT_READ)) { 1825 if (!(flags & VM_PROT_READ)) {
1826 mutex_exit(&pmap_lock); 1826 mutex_exit(&pmap_lock);
1827 goto out; 1827 goto out;
1828 } 1828 }
1829#endif 1829#endif
1830 if (flags & VM_PROT_EXECUTE) { 1830 if (flags & VM_PROT_EXECUTE) {
1831 if ((flags & (VM_PROT_READ|VM_PROT_WRITE)) == 0) 1831 if ((flags & (VM_PROT_READ|VM_PROT_WRITE)) == 0)
1832 tte.data |= TLB_EXEC_ONLY|TLB_EXEC; 1832 tte.data |= TLB_EXEC_ONLY|TLB_EXEC;
1833 else 1833 else
1834 tte.data |= TLB_EXEC; 1834 tte.data |= TLB_EXEC;
1835 } 1835 }
1836 if (wired) 1836 if (wired)
1837 tte.data |= TLB_TSB_LOCK; 1837 tte.data |= TLB_TSB_LOCK;
1838 ptp = 0; 1838 ptp = 0;
1839 1839
1840 retry: 1840 retry:
1841 i = pseg_set(pm, va, tte.data, ptp); 1841 i = pseg_set(pm, va, tte.data, ptp);
1842 if (i & 4) { 1842 if (i & 4) {
1843 /* ptp used as L3 */ 1843 /* ptp used as L3 */
1844 KASSERT(ptp != 0); 1844 KASSERT(ptp != 0);
1845 KASSERT((i & 3) == 0); 1845 KASSERT((i & 3) == 0);
1846 ptpg = PHYS_TO_VM_PAGE(ptp); 1846 ptpg = PHYS_TO_VM_PAGE(ptp);
1847 if (ptpg) { 1847 if (ptpg) {
1848 ptpg->offset = (uint64_t)va & (0xfffffLL << 23); 1848 ptpg->offset = (uint64_t)va & (0xfffffLL << 23);
1849 TAILQ_INSERT_TAIL(&pm->pm_obj.memq, ptpg, listq.queue); 1849 TAILQ_INSERT_TAIL(&pm->pm_obj.memq, ptpg, listq.queue);
1850 } else { 1850 } else {
1851 KASSERT(pm == pmap_kernel()); 1851 KASSERT(pm == pmap_kernel());
1852 } 1852 }
1853 } 1853 }
1854 if (i & 2) { 1854 if (i & 2) {
1855 /* ptp used as L2 */ 1855 /* ptp used as L2 */
1856 KASSERT(ptp != 0); 1856 KASSERT(ptp != 0);
1857 KASSERT((i & 4) == 0); 1857 KASSERT((i & 4) == 0);
1858 ptpg = PHYS_TO_VM_PAGE(ptp); 1858 ptpg = PHYS_TO_VM_PAGE(ptp);
1859 if (ptpg) { 1859 if (ptpg) {
1860 ptpg->offset = (((uint64_t)va >> 43) & 0x3ffLL) << 13; 1860 ptpg->offset = (((uint64_t)va >> 43) & 0x3ffLL) << 13;
1861 TAILQ_INSERT_TAIL(&pm->pm_obj.memq, ptpg, listq.queue); 1861 TAILQ_INSERT_TAIL(&pm->pm_obj.memq, ptpg, listq.queue);
1862 } else { 1862 } else {
1863 KASSERT(pm == pmap_kernel()); 1863 KASSERT(pm == pmap_kernel());
1864 } 1864 }
1865 } 1865 }
1866 if (i & 1) { 1866 if (i & 1) {
1867 KASSERT((i & 4) == 0); 1867 KASSERT((i & 4) == 0);
1868 ptp = 0; 1868 ptp = 0;
1869 if (!pmap_get_page(&ptp)) { 1869 if (!pmap_get_page(&ptp)) {
1870 mutex_exit(&pmap_lock); 1870 mutex_exit(&pmap_lock);
1871 if (flags & PMAP_CANFAIL) { 1871 if (flags & PMAP_CANFAIL) {
1872 if (npv != NULL) { 1872 if (npv != NULL) {
1873 /* free it */ 1873 /* free it */
1874 npv->pv_next = freepv; 1874 npv->pv_next = freepv;
1875 freepv = npv; 1875 freepv = npv;
1876 } 1876 }
1877 error = ENOMEM; 1877 error = ENOMEM;
1878 goto out; 1878 goto out;
1879 } else { 1879 } else {
1880 panic("pmap_enter: no pages"); 1880 panic("pmap_enter: no pages");
1881 } 1881 }
1882 } 1882 }
1883 ENTER_STAT(ptpneeded); 1883 ENTER_STAT(ptpneeded);
1884 goto retry; 1884 goto retry;
1885 } 1885 }
1886 if (ptp && i == 0) { 1886 if (ptp && i == 0) {
1887 /* We allocated a spare page but didn't use it. Free it. */ 1887 /* We allocated a spare page but didn't use it. Free it. */
1888 printf("pmap_enter: freeing unused page %llx\n", 1888 printf("pmap_enter: freeing unused page %llx\n",
1889 (long long)ptp); 1889 (long long)ptp);
1890 pmap_free_page_noflush(ptp); 1890 pmap_free_page_noflush(ptp);
1891 } 1891 }
1892 if (dopv) { 1892 if (dopv) {
1893 pmap_enter_pv(pm, va, pa, pg, npv); 1893 pmap_enter_pv(pm, va, pa, pg, npv);
1894 } 1894 }
1895 1895
1896 mutex_exit(&pmap_lock); 1896 mutex_exit(&pmap_lock);
1897#ifdef PMAP_DEBUG 1897#ifdef PMAP_DEBUG
1898 i = ptelookup_va(va); 1898 i = ptelookup_va(va);
1899 if (pmapdebug & PDB_ENTER) 1899 if (pmapdebug & PDB_ENTER)
1900 prom_printf("pmap_enter: va=%08x data=%08x:%08x " 1900 prom_printf("pmap_enter: va=%08x data=%08x:%08x "
1901 "tsb_dmmu[%d]=%08x\n", va, (int)(tte.data>>32), 1901 "tsb_dmmu[%d]=%08x\n", va, (int)(tte.data>>32),
1902 (int)tte.data, i, &curcpu()->ci_tsb_dmmu[i]); 1902 (int)tte.data, i, &curcpu()->ci_tsb_dmmu[i]);
1903 if (pmapdebug & PDB_MMU_STEAL && curcpu()->ci_tsb_dmmu[i].data) { 1903 if (pmapdebug & PDB_MMU_STEAL && curcpu()->ci_tsb_dmmu[i].data) {
1904 prom_printf("pmap_enter: evicting entry tag=%x:%08x " 1904 prom_printf("pmap_enter: evicting entry tag=%x:%08x "
1905 "data=%08x:%08x tsb_dmmu[%d]=%08x\n", 1905 "data=%08x:%08x tsb_dmmu[%d]=%08x\n",
1906 (int)(curcpu()->ci_tsb_dmmu[i].tag>>32), (int)curcpu()->ci_tsb_dmmu[i].tag, 1906 (int)(curcpu()->ci_tsb_dmmu[i].tag>>32), (int)curcpu()->ci_tsb_dmmu[i].tag,
1907 (int)(curcpu()->ci_tsb_dmmu[i].data>>32), (int)curcpu()->ci_tsb_dmmu[i].data, i, 1907 (int)(curcpu()->ci_tsb_dmmu[i].data>>32), (int)curcpu()->ci_tsb_dmmu[i].data, i,
1908 &curcpu()->ci_tsb_dmmu[i]); 1908 &curcpu()->ci_tsb_dmmu[i]);
1909 prom_printf("with va=%08x data=%08x:%08x tsb_dmmu[%d]=%08x\n", 1909 prom_printf("with va=%08x data=%08x:%08x tsb_dmmu[%d]=%08x\n",
1910 va, (int)(tte.data>>32), (int)tte.data, i, 1910 va, (int)(tte.data>>32), (int)tte.data, i,
1911 &curcpu()->ci_tsb_dmmu[i]); 1911 &curcpu()->ci_tsb_dmmu[i]);
1912 } 1912 }
1913#endif 1913#endif
1914 1914
1915 if (flags & (VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE)) { 1915 if (flags & (VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE)) {
1916 1916
1917 /* 1917 /*
1918 * preload the TSB with the new entry, 1918 * preload the TSB with the new entry,
1919 * since we're going to need it immediately anyway. 1919 * since we're going to need it immediately anyway.
1920 */ 1920 */
1921 1921
1922 KASSERT(pmap_ctx(pm)>=0); 1922 KASSERT(pmap_ctx(pm)>=0);
1923 i = ptelookup_va(va); 1923 i = ptelookup_va(va);
1924 tte.tag = TSB_TAG(0, pmap_ctx(pm), va); 1924 tte.tag = TSB_TAG(0, pmap_ctx(pm), va);
1925 s = splhigh(); 1925 s = splhigh();
1926 if (wasmapped && pmap_is_on_mmu(pm)) { 1926 if (wasmapped && pmap_is_on_mmu(pm)) {
1927 tsb_invalidate(va, pm); 1927 tsb_invalidate(va, pm);
1928 } 1928 }
1929 if (flags & (VM_PROT_READ | VM_PROT_WRITE)) { 1929 if (flags & (VM_PROT_READ | VM_PROT_WRITE)) {
1930 curcpu()->ci_tsb_dmmu[i].tag = tte.tag; 1930 curcpu()->ci_tsb_dmmu[i].tag = tte.tag;
1931 __asm volatile("" : : : "memory"); 1931 __asm volatile("" : : : "memory");
1932 curcpu()->ci_tsb_dmmu[i].data = tte.data; 1932 curcpu()->ci_tsb_dmmu[i].data = tte.data;
1933 } 1933 }
1934 if (flags & VM_PROT_EXECUTE) { 1934 if (flags & VM_PROT_EXECUTE) {
1935 curcpu()->ci_tsb_immu[i].tag = tte.tag; 1935 curcpu()->ci_tsb_immu[i].tag = tte.tag;
1936 __asm volatile("" : : : "memory"); 1936 __asm volatile("" : : : "memory");
1937 curcpu()->ci_tsb_immu[i].data = tte.data; 1937 curcpu()->ci_tsb_immu[i].data = tte.data;
1938 } 1938 }
1939 1939
1940 /* 1940 /*
1941 * it's only necessary to flush the TLB if this page was 1941 * it's only necessary to flush the TLB if this page was
1942 * previously mapped, but for some reason it's a lot faster 1942 * previously mapped, but for some reason it's a lot faster
1943 * for the fork+exit microbenchmark if we always do it. 1943 * for the fork+exit microbenchmark if we always do it.
1944 */ 1944 */
1945 1945
1946 KASSERT(pmap_ctx(pm)>=0); 1946 KASSERT(pmap_ctx(pm)>=0);
1947#ifdef MULTIPROCESSOR 1947#ifdef MULTIPROCESSOR
1948 if (wasmapped && pmap_is_on_mmu(pm)) 1948 if (wasmapped && pmap_is_on_mmu(pm))
1949 tlb_flush_pte(va, pm); 1949 tlb_flush_pte(va, pm);
1950 else 1950 else
1951 sp_tlb_flush_pte(va, pmap_ctx(pm)); 1951 sp_tlb_flush_pte(va, pmap_ctx(pm));
1952#else 1952#else
1953 tlb_flush_pte(va, pm); 1953 tlb_flush_pte(va, pm);
1954#endif 1954#endif
1955 splx(s); 1955 splx(s);
1956 } else if (wasmapped && pmap_is_on_mmu(pm)) { 1956 } else if (wasmapped && pmap_is_on_mmu(pm)) {
1957 /* Force reload -- protections may be changed */ 1957 /* Force reload -- protections may be changed */
1958 KASSERT(pmap_ctx(pm)>=0); 1958 KASSERT(pmap_ctx(pm)>=0);
1959 tsb_invalidate(va, pm); 1959 tsb_invalidate(va, pm);
1960 tlb_flush_pte(va, pm); 1960 tlb_flush_pte(va, pm);
1961 } 1961 }
1962 1962
1963 /* We will let the fast mmu miss interrupt load the new translation */ 1963 /* We will let the fast mmu miss interrupt load the new translation */
1964 pv_check(); 1964 pv_check();
1965 out: 1965 out:
1966 /* Catch up on deferred frees. */ 1966 /* Catch up on deferred frees. */
1967 for (; freepv != NULL; freepv = npv) { 1967 for (; freepv != NULL; freepv = npv) {
1968 npv = freepv->pv_next; 1968 npv = freepv->pv_next;
1969 pool_cache_put(&pmap_pv_cache, freepv); 1969 pool_cache_put(&pmap_pv_cache, freepv);
1970 } 1970 }
1971 return error; 1971 return error;
1972} 1972}
1973 1973
1974void 1974void
1975pmap_remove_all(struct pmap *pm) 1975pmap_remove_all(struct pmap *pm)
1976{ 1976{
1977#ifdef MULTIPROCESSOR 1977#ifdef MULTIPROCESSOR
1978 struct cpu_info *ci; 1978 struct cpu_info *ci;
1979 sparc64_cpuset_t pmap_cpus_active; 1979 sparc64_cpuset_t pmap_cpus_active;
1980#endif 1980#endif
1981 1981
1982 if (pm == pmap_kernel()) { 1982 if (pm == pmap_kernel()) {
1983 return; 1983 return;
1984 } 1984 }
1985 write_user_windows(); 1985 write_user_windows();
1986 pm->pm_refs = 0; 1986 pm->pm_refs = 0;
1987 1987
1988 /* 1988 /*
1989 * XXXMRG: pmap_destroy() does exactly the same dance here. 1989 * XXXMRG: pmap_destroy() does exactly the same dance here.
1990 * surely one of them isn't necessary? 1990 * surely one of them isn't necessary?
1991 */ 1991 */
1992#ifdef MULTIPROCESSOR 1992#ifdef MULTIPROCESSOR
1993 CPUSET_CLEAR(pmap_cpus_active); 1993 CPUSET_CLEAR(pmap_cpus_active);
1994 for (ci = cpus; ci != NULL; ci = ci->ci_next) { 1994 for (ci = cpus; ci != NULL; ci = ci->ci_next) {
1995 /* XXXMRG: Move the lock inside one or both tests? */ 1995 /* XXXMRG: Move the lock inside one or both tests? */
1996 mutex_enter(&ci->ci_ctx_lock); 1996 mutex_enter(&ci->ci_ctx_lock);
1997 if (CPUSET_HAS(cpus_active, ci->ci_index)) { 1997 if (CPUSET_HAS(cpus_active, ci->ci_index)) {
1998 if (pm->pm_ctx[ci->ci_index] > 0) { 1998 if (pm->pm_ctx[ci->ci_index] > 0) {
1999 CPUSET_ADD(pmap_cpus_active, ci->ci_index); 1999 CPUSET_ADD(pmap_cpus_active, ci->ci_index);
2000 ctx_free(pm, ci); 2000 ctx_free(pm, ci);
2001 } 2001 }
2002 } 2002 }
2003 mutex_exit(&ci->ci_ctx_lock); 2003 mutex_exit(&ci->ci_ctx_lock);
2004 } 2004 }
2005#else 2005#else
2006 if (pmap_ctx(pm)) { 2006 if (pmap_ctx(pm)) {
2007 mutex_enter(&curcpu()->ci_ctx_lock); 2007 mutex_enter(&curcpu()->ci_ctx_lock);
2008 ctx_free(pm, curcpu()); 2008 ctx_free(pm, curcpu());
2009 mutex_exit(&curcpu()->ci_ctx_lock); 2009 mutex_exit(&curcpu()->ci_ctx_lock);
2010 } 2010 }
2011#endif 2011#endif
2012 2012
2013 REMOVE_STAT(flushes); 2013 REMOVE_STAT(flushes);
2014 /* 2014 /*
2015 * XXXMRG: couldn't we do something less severe here, and 2015 * XXXMRG: couldn't we do something less severe here, and
2016 * only flush the right context on each CPU? 2016 * only flush the right context on each CPU?
2017 */ 2017 */
2018 blast_dcache(); 2018 blast_dcache();
2019} 2019}
2020 2020
2021/* 2021/*
2022 * Remove the given range of mapping entries. 2022 * Remove the given range of mapping entries.
2023 */ 2023 */
2024void 2024void
2025pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva) 2025pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva)
2026{ 2026{
2027 int64_t data; 2027 int64_t data;
2028 paddr_t pa; 2028 paddr_t pa;
2029 struct vm_page *pg; 2029 struct vm_page *pg;
2030 pv_entry_t pv, freepv = NULL; 2030 pv_entry_t pv, freepv = NULL;
2031 int rv; 2031 int rv;
2032 bool flush = FALSE; 2032 bool flush = FALSE;
2033 2033
2034 /* 2034 /*
2035 * In here we should check each pseg and if there are no more entries, 2035 * In here we should check each pseg and if there are no more entries,
2036 * free it. It's just that linear scans of 8K pages gets expensive. 2036 * free it. It's just that linear scans of 8K pages gets expensive.
2037 */ 2037 */
2038 2038
2039 KASSERT(pm != pmap_kernel() || endva < INTSTACK || va > EINTSTACK); 2039 KASSERT(pm != pmap_kernel() || endva < INTSTACK || va > EINTSTACK);
2040 KASSERT(pm != pmap_kernel() || endva < kdata || va > ekdata); 2040 KASSERT(pm != pmap_kernel() || endva < kdata || va > ekdata);
2041 2041
2042 mutex_enter(&pmap_lock); 2042 mutex_enter(&pmap_lock);
2043 DPRINTF(PDB_REMOVE, ("pmap_remove(pm=%p, va=%p, endva=%p):", pm, 2043 DPRINTF(PDB_REMOVE, ("pmap_remove(pm=%p, va=%p, endva=%p):", pm,
2044 (void *)(u_long)va, (void *)(u_long)endva)); 2044 (void *)(u_long)va, (void *)(u_long)endva));
2045 REMOVE_STAT(calls); 2045 REMOVE_STAT(calls);
2046 2046
2047 /* Now do the real work */ 2047 /* Now do the real work */
2048 for (; va < endva; va += PAGE_SIZE) { 2048 for (; va < endva; va += PAGE_SIZE) {
2049#ifdef DIAGNOSTIC 2049#ifdef DIAGNOSTIC
2050 /* 2050 /*
2051 * Is this part of the permanent 4MB mapping? 2051 * Is this part of the permanent 4MB mapping?
2052 */ 2052 */
2053 if (pm == pmap_kernel() && va >= ktext && 2053 if (pm == pmap_kernel() && va >= ktext &&
2054 va < roundup(ekdata, 4*MEG)) 2054 va < roundup(ekdata, 4*MEG))
2055 panic("pmap_remove: va=%08llx in locked TLB", 2055 panic("pmap_remove: va=%08llx in locked TLB",
2056 (long long)va); 2056 (long long)va);
2057#endif 2057#endif
2058 2058
2059 data = pseg_get(pm, va); 2059 data = pseg_get(pm, va);
2060 if ((data & TLB_V) == 0) { 2060 if ((data & TLB_V) == 0) {
2061 continue; 2061 continue;
2062 } 2062 }
2063 2063
2064 flush = TRUE; 2064 flush = TRUE;
2065 /* First remove the pv entry, if there is one */ 2065 /* First remove the pv entry, if there is one */
2066 pa = data & TLB_PA_MASK; 2066 pa = data & TLB_PA_MASK;
2067 pg = PHYS_TO_VM_PAGE(pa); 2067 pg = PHYS_TO_VM_PAGE(pa);
2068 if (pg) { 2068 if (pg) {
2069 pv = pmap_remove_pv(pm, va, pg); 2069 pv = pmap_remove_pv(pm, va, pg);
2070 if (pv != NULL) { 2070 if (pv != NULL) {
2071 /* free it */ 2071 /* free it */
2072 pv->pv_next = freepv; 2072 pv->pv_next = freepv;
2073 freepv = pv; 2073 freepv = pv;
2074 } 2074 }
2075 } 2075 }
2076 2076
2077 /* 2077 /*
2078 * We need to flip the valid bit and 2078 * We need to flip the valid bit and
2079 * clear the access statistics. 2079 * clear the access statistics.
2080 */ 2080 */
2081 2081
2082 rv = pseg_set(pm, va, 0, 0); 2082 rv = pseg_set(pm, va, 0, 0);
2083 if (rv & 1) 2083 if (rv & 1)
2084 panic("pmap_remove: pseg_set needed spare, rv=%d!\n", 2084 panic("pmap_remove: pseg_set needed spare, rv=%d!\n",
2085 rv); 2085 rv);
2086 2086
2087 DPRINTF(PDB_REMOVE, (" clearing seg %x pte %x\n", 2087 DPRINTF(PDB_REMOVE, (" clearing seg %x pte %x\n",
2088 (int)va_to_seg(va), (int)va_to_pte(va))); 2088 (int)va_to_seg(va), (int)va_to_pte(va)));
2089 REMOVE_STAT(removes); 2089 REMOVE_STAT(removes);
2090 2090
2091 if (pm != pmap_kernel() && !pmap_has_ctx(pm)) 2091 if (pm != pmap_kernel() && !pmap_has_ctx(pm))
2092 continue; 2092 continue;
2093 2093
2094 /* 2094 /*
2095 * if the pmap is being torn down, don't bother flushing, 2095 * if the pmap is being torn down, don't bother flushing,
2096 * we already have done so. 2096 * we already have done so.
2097 */ 2097 */
2098 2098
2099 if (!pm->pm_refs) 2099 if (!pm->pm_refs)
2100 continue; 2100 continue;
2101 2101
2102 /* 2102 /*
2103 * Here we assume nothing can get into the TLB 2103 * Here we assume nothing can get into the TLB
2104 * unless it has a PTE. 2104 * unless it has a PTE.
2105 */ 2105 */
2106 2106
2107 KASSERT(pmap_ctx(pm)>=0); 2107 KASSERT(pmap_ctx(pm)>=0);
2108 tsb_invalidate(va, pm); 2108 tsb_invalidate(va, pm);
2109 REMOVE_STAT(tflushes); 2109 REMOVE_STAT(tflushes);
2110 tlb_flush_pte(va, pm); 2110 tlb_flush_pte(va, pm);
2111 dcache_flush_page_all(pa); 2111 dcache_flush_page_all(pa);
2112 } 2112 }
2113 if (flush && pm->pm_refs) 2113 if (flush && pm->pm_refs)
2114 REMOVE_STAT(flushes); 2114 REMOVE_STAT(flushes);
2115 DPRINTF(PDB_REMOVE, ("\n")); 2115 DPRINTF(PDB_REMOVE, ("\n"));
2116 pv_check(); 2116 pv_check();
2117 mutex_exit(&pmap_lock); 2117 mutex_exit(&pmap_lock);
2118 2118
2119 /* Catch up on deferred frees. */ 2119 /* Catch up on deferred frees. */
2120 for (; freepv != NULL; freepv = pv) { 2120 for (; freepv != NULL; freepv = pv) {
2121 pv = freepv->pv_next; 2121 pv = freepv->pv_next;
2122 pool_cache_put(&pmap_pv_cache, freepv); 2122 pool_cache_put(&pmap_pv_cache, freepv);
2123 } 2123 }
2124} 2124}
2125 2125
2126/* 2126/*
2127 * Change the protection on the specified range of this pmap. 2127 * Change the protection on the specified range of this pmap.
2128 */ 2128 */
2129void 2129void
2130pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot) 2130pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
2131{ 2131{
2132 paddr_t pa; 2132 paddr_t pa;
2133 int64_t data; 2133 int64_t data;
2134 struct vm_page *pg; 2134 struct vm_page *pg;
2135 pv_entry_t pv; 2135 pv_entry_t pv;
2136 int rv; 2136 int rv;
2137 2137
2138 KASSERT(pm != pmap_kernel() || eva < INTSTACK || sva > EINTSTACK); 2138 KASSERT(pm != pmap_kernel() || eva < INTSTACK || sva > EINTSTACK);
2139 KASSERT(pm != pmap_kernel() || eva < kdata || sva > ekdata); 2139 KASSERT(pm != pmap_kernel() || eva < kdata || sva > ekdata);
2140 2140
2141 if (prot == VM_PROT_NONE) { 2141 if (prot == VM_PROT_NONE) {
2142 pmap_remove(pm, sva, eva); 2142 pmap_remove(pm, sva, eva);
2143 return; 2143 return;
2144 } 2144 }
2145 2145
2146 sva = trunc_page(sva); 2146 sva = trunc_page(sva);
2147 mutex_enter(&pmap_lock); 2147 mutex_enter(&pmap_lock);
2148 for (; sva < eva; sva += PAGE_SIZE) { 2148 for (; sva < eva; sva += PAGE_SIZE) {
2149#ifdef PMAP_DEBUG 2149#ifdef PMAP_DEBUG
2150 /* 2150 /*
2151 * Is this part of the permanent 4MB mapping? 2151 * Is this part of the permanent 4MB mapping?
2152 */ 2152 */
2153 if (pm == pmap_kernel() && sva >= ktext && 2153 if (pm == pmap_kernel() && sva >= ktext &&
2154 sva < roundup(ekdata, 4 * MEG)) { 2154 sva < roundup(ekdata, 4 * MEG)) {
2155 mutex_exit(&pmap_lock); 2155 mutex_exit(&pmap_lock);
2156 prom_printf("pmap_protect: va=%08x in locked TLB\n", 2156 prom_printf("pmap_protect: va=%08x in locked TLB\n",
2157 sva); 2157 sva);
2158 prom_abort(); 2158 prom_abort();
2159 return; 2159 return;
2160 } 2160 }
2161#endif 2161#endif
2162 DPRINTF(PDB_CHANGEPROT, ("pmap_protect: va %p\n", 2162 DPRINTF(PDB_CHANGEPROT, ("pmap_protect: va %p\n",
2163 (void *)(u_long)sva)); 2163 (void *)(u_long)sva));
2164 data = pseg_get(pm, sva); 2164 data = pseg_get(pm, sva);
2165 if ((data & TLB_V) == 0) { 2165 if ((data & TLB_V) == 0) {
2166 continue; 2166 continue;
2167 } 2167 }
2168 2168
2169 pa = data & TLB_PA_MASK; 2169 pa = data & TLB_PA_MASK;
2170 DPRINTF(PDB_CHANGEPROT|PDB_REF, 2170 DPRINTF(PDB_CHANGEPROT|PDB_REF,
2171 ("pmap_protect: va=%08x data=%08llx " 2171 ("pmap_protect: va=%08x data=%08llx "
2172 "seg=%08x pte=%08x\n", 2172 "seg=%08x pte=%08x\n",
2173 (u_int)sva, (long long)pa, (int)va_to_seg(sva), 2173 (u_int)sva, (long long)pa, (int)va_to_seg(sva),
2174 (int)va_to_pte(sva))); 2174 (int)va_to_pte(sva)));
2175 2175
2176 pg = PHYS_TO_VM_PAGE(pa); 2176 pg = PHYS_TO_VM_PAGE(pa);
2177 if (pg) { 2177 if (pg) {
2178 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 2178 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
2179 2179
2180 /* Save REF/MOD info */ 2180 /* Save REF/MOD info */
2181 pv = &md->mdpg_pvh; 2181 pv = &md->mdpg_pvh;
2182 if (data & TLB_ACCESS) 2182 if (data & TLB_ACCESS)
2183 pv->pv_va |= PV_REF; 2183 pv->pv_va |= PV_REF;
2184 if (data & TLB_MODIFY) 2184 if (data & TLB_MODIFY)
2185 pv->pv_va |= PV_MOD; 2185 pv->pv_va |= PV_MOD;
2186 } 2186 }
2187 2187
2188 /* Just do the pmap and TSB, not the pv_list */ 2188 /* Just do the pmap and TSB, not the pv_list */
2189 if ((prot & VM_PROT_WRITE) == 0) 2189 if ((prot & VM_PROT_WRITE) == 0)
2190 data &= ~(TLB_W|TLB_REAL_W); 2190 data &= ~(TLB_W|TLB_REAL_W);
2191 if ((prot & VM_PROT_EXECUTE) == 0) 2191 if ((prot & VM_PROT_EXECUTE) == 0)
2192 data &= ~(TLB_EXEC); 2192 data &= ~(TLB_EXEC);
2193 2193
2194 rv = pseg_set(pm, sva, data, 0); 2194 rv = pseg_set(pm, sva, data, 0);
2195 if (rv & 1) 2195 if (rv & 1)
2196 panic("pmap_protect: pseg_set needs spare! rv=%d\n", 2196 panic("pmap_protect: pseg_set needs spare! rv=%d\n",
2197 rv); 2197 rv);
2198 2198
2199 if (pm != pmap_kernel() && !pmap_has_ctx(pm)) 2199 if (pm != pmap_kernel() && !pmap_has_ctx(pm))
2200 continue; 2200 continue;
2201 2201
2202 KASSERT(pmap_ctx(pm)>=0); 2202 KASSERT(pmap_ctx(pm)>=0);
2203 tsb_invalidate(sva, pm); 2203 tsb_invalidate(sva, pm);
2204 tlb_flush_pte(sva, pm); 2204 tlb_flush_pte(sva, pm);
2205 } 2205 }
2206 pv_check(); 2206 pv_check();
2207 mutex_exit(&pmap_lock); 2207 mutex_exit(&pmap_lock);
2208} 2208}
2209 2209
2210/* 2210/*
2211 * Extract the physical page address associated 2211 * Extract the physical page address associated
2212 * with the given map/virtual_address pair. 2212 * with the given map/virtual_address pair.
2213 */ 2213 */
2214bool 2214bool
2215pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap) 2215pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap)
2216{ 2216{
2217 paddr_t pa; 2217 paddr_t pa;
2218 int64_t data = 0; 2218 int64_t data = 0;
2219 2219
2220 if (pm == pmap_kernel() && va >= kdata && va < roundup(ekdata, 4*MEG)) { 2220 if (pm == pmap_kernel() && va >= kdata && va < roundup(ekdata, 4*MEG)) {
2221 /* Need to deal w/locked TLB entry specially. */ 2221 /* Need to deal w/locked TLB entry specially. */
2222 pa = pmap_kextract(va); 2222 pa = pmap_kextract(va);
2223 DPRINTF(PDB_EXTRACT, ("pmap_extract: va=%lx pa=%llx\n", 2223 DPRINTF(PDB_EXTRACT, ("pmap_extract: va=%lx pa=%llx\n",
2224 (u_long)va, (unsigned long long)pa)); 2224 (u_long)va, (unsigned long long)pa));
2225 if (pap != NULL) 2225 if (pap != NULL)
2226 *pap = pa; 2226 *pap = pa;
2227 return TRUE; 2227 return TRUE;
2228 } else if (pm == pmap_kernel() && va >= ktext && va < ektext) { 2228 } else if (pm == pmap_kernel() && va >= ktext && va < ektext) {
2229 /* Need to deal w/locked TLB entry specially. */ 2229 /* Need to deal w/locked TLB entry specially. */
2230 pa = pmap_kextract(va); 2230 pa = pmap_kextract(va);
2231 DPRINTF(PDB_EXTRACT, ("pmap_extract: va=%lx pa=%llx\n", 2231 DPRINTF(PDB_EXTRACT, ("pmap_extract: va=%lx pa=%llx\n",
2232 (u_long)va, (unsigned long long)pa)); 2232 (u_long)va, (unsigned long long)pa));
2233 if (pap != NULL) 2233 if (pap != NULL)
2234 *pap = pa; 2234 *pap = pa;
2235 return TRUE; 2235 return TRUE;
2236 } else if (pm == pmap_kernel() && va >= INTSTACK && va < (INTSTACK + 64*KB)) { 2236 } else if (pm == pmap_kernel() && va >= INTSTACK && va < (INTSTACK + 64*KB)) {
2237 pa = (paddr_t)(curcpu()->ci_paddr - INTSTACK + va); 2237 pa = (paddr_t)(curcpu()->ci_paddr - INTSTACK + va);
2238 DPRINTF(PDB_EXTRACT, ("pmap_extract (intstack): va=%lx pa=%llx\n", 2238 DPRINTF(PDB_EXTRACT, ("pmap_extract (intstack): va=%lx pa=%llx\n",
2239 (u_long)va, (unsigned long long)pa)); 2239 (u_long)va, (unsigned long long)pa));
2240 if (pap != NULL) 2240 if (pap != NULL)
2241 *pap = pa; 2241 *pap = pa;
2242 return TRUE; 2242 return TRUE;
2243 } else { 2243 } else {
2244 data = pseg_get(pm, va); 2244 data = pseg_get(pm, va);
2245 pa = data & TLB_PA_MASK; 2245 pa = data & TLB_PA_MASK;
2246 if (pmapdebug & PDB_EXTRACT) { 2246 if (pmapdebug & PDB_EXTRACT) {
2247 paddr_t npa = ldxa((vaddr_t)&pm->pm_segs[va_to_seg(va)], 2247 paddr_t npa = ldxa((vaddr_t)&pm->pm_segs[va_to_seg(va)],
2248 ASI_PHYS_CACHED); 2248 ASI_PHYS_CACHED);
2249 printf("pmap_extract: va=%p segs[%ld]=%llx", 2249 printf("pmap_extract: va=%p segs[%ld]=%llx",
2250 (void *)(u_long)va, (long)va_to_seg(va), 2250 (void *)(u_long)va, (long)va_to_seg(va),
2251 (unsigned long long)npa); 2251 (unsigned long long)npa);
2252 if (npa) { 2252 if (npa) {
2253 npa = (paddr_t) 2253 npa = (paddr_t)
2254 ldxa((vaddr_t)&((paddr_t *)(u_long)npa) 2254 ldxa((vaddr_t)&((paddr_t *)(u_long)npa)
2255 [va_to_dir(va)], 2255 [va_to_dir(va)],
2256 ASI_PHYS_CACHED); 2256 ASI_PHYS_CACHED);
2257 printf(" segs[%ld][%ld]=%lx", 2257 printf(" segs[%ld][%ld]=%lx",
2258 (long)va_to_seg(va), 2258 (long)va_to_seg(va),
2259 (long)va_to_dir(va), (long)npa); 2259 (long)va_to_dir(va), (long)npa);
2260 } 2260 }
2261 if (npa) { 2261 if (npa) {
2262 npa = (paddr_t) 2262 npa = (paddr_t)
2263 ldxa((vaddr_t)&((paddr_t *)(u_long)npa) 2263 ldxa((vaddr_t)&((paddr_t *)(u_long)npa)
2264 [va_to_pte(va)], 2264 [va_to_pte(va)],
2265 ASI_PHYS_CACHED); 2265 ASI_PHYS_CACHED);
2266 printf(" segs[%ld][%ld][%ld]=%lx", 2266 printf(" segs[%ld][%ld][%ld]=%lx",
2267 (long)va_to_seg(va), 2267 (long)va_to_seg(va),
2268 (long)va_to_dir(va), 2268 (long)va_to_dir(va),
2269 (long)va_to_pte(va), (long)npa); 2269 (long)va_to_pte(va), (long)npa);
2270 } 2270 }
2271 printf(" pseg_get: %lx\n", (long)pa); 2271 printf(" pseg_get: %lx\n", (long)pa);
2272 } 2272 }
2273 } 2273 }
2274 if ((data & TLB_V) == 0) 2274 if ((data & TLB_V) == 0)
2275 return (FALSE); 2275 return (FALSE);
2276 if (pap != NULL) 2276 if (pap != NULL)
2277 *pap = pa + (va & PGOFSET); 2277 *pap = pa + (va & PGOFSET);
2278 return (TRUE); 2278 return (TRUE);
2279} 2279}
2280 2280
2281/* 2281/*
2282 * Change protection on a kernel address. 2282 * Change protection on a kernel address.
2283 * This should only be called from MD code. 2283 * This should only be called from MD code.
2284 */ 2284 */
2285void 2285void
2286pmap_kprotect(vaddr_t va, vm_prot_t prot) 2286pmap_kprotect(vaddr_t va, vm_prot_t prot)
2287{ 2287{
2288 struct pmap *pm = pmap_kernel(); 2288 struct pmap *pm = pmap_kernel();
2289 int64_t data; 2289 int64_t data;
2290 int rv; 2290 int rv;
2291 2291
2292 data = pseg_get(pm, va); 2292 data = pseg_get(pm, va);
2293 KASSERT(data & TLB_V); 2293 KASSERT(data & TLB_V);
2294 if (prot & VM_PROT_WRITE) { 2294 if (prot & VM_PROT_WRITE) {
2295 data |= (TLB_W|TLB_REAL_W); 2295 data |= (TLB_W|TLB_REAL_W);
2296 } else { 2296 } else {
2297 data &= ~(TLB_W|TLB_REAL_W); 2297 data &= ~(TLB_W|TLB_REAL_W);
2298 } 2298 }
2299 rv = pseg_set(pm, va, data, 0); 2299 rv = pseg_set(pm, va, data, 0);
2300 if (rv & 1) 2300 if (rv & 1)
2301 panic("pmap_kprotect: pseg_set needs spare! rv=%d", rv); 2301 panic("pmap_kprotect: pseg_set needs spare! rv=%d", rv);
2302 KASSERT(pmap_ctx(pm)>=0); 2302 KASSERT(pmap_ctx(pm)>=0);
2303 tsb_invalidate(va, pm); 2303 tsb_invalidate(va, pm);
2304 tlb_flush_pte(va, pm); 2304 tlb_flush_pte(va, pm);
2305} 2305}
2306 2306
2307/* 2307/*
2308 * Return the number bytes that pmap_dumpmmu() will dump. 2308 * Return the number bytes that pmap_dumpmmu() will dump.
2309 */ 2309 */
2310int 2310int
2311pmap_dumpsize(void) 2311pmap_dumpsize(void)
2312{ 2312{
2313 int sz; 2313 int sz;
2314 2314
2315 sz = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)); 2315 sz = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
2316 sz += kernel_tlb_slots * sizeof(struct cpu_kcore_4mbseg); 2316 sz += kernel_tlb_slots * sizeof(struct cpu_kcore_4mbseg);
2317 sz += phys_installed_size * sizeof(phys_ram_seg_t); 2317 sz += phys_installed_size * sizeof(phys_ram_seg_t);
2318 2318
2319 return btodb(sz + DEV_BSIZE - 1); 2319 return btodb(sz + DEV_BSIZE - 1);
2320} 2320}
2321 2321
2322/* 2322/*
2323 * Write the mmu contents to the dump device. 2323 * Write the mmu contents to the dump device.
2324 * This gets appended to the end of a crash dump since 2324 * This gets appended to the end of a crash dump since
2325 * there is no in-core copy of kernel memory mappings on a 4/4c machine. 2325 * there is no in-core copy of kernel memory mappings on a 4/4c machine.
2326 * 2326 *
2327 * Write the core dump headers and MD data to the dump device. 2327 * Write the core dump headers and MD data to the dump device.
2328 * We dump the following items: 2328 * We dump the following items:
2329 * 2329 *
2330 * kcore_seg_t MI header defined in <sys/kcore.h>) 2330 * kcore_seg_t MI header defined in <sys/kcore.h>)
2331 * cpu_kcore_hdr_t MD header defined in <machine/kcore.h>) 2331 * cpu_kcore_hdr_t MD header defined in <machine/kcore.h>)
2332 * phys_ram_seg_t[phys_installed_size] physical memory segments 2332 * phys_ram_seg_t[phys_installed_size] physical memory segments
2333 */ 2333 */
2334int 2334int
2335pmap_dumpmmu(int (*dump)(dev_t, daddr_t, void *, size_t), daddr_t blkno) 2335pmap_dumpmmu(int (*dump)(dev_t, daddr_t, void *, size_t), daddr_t blkno)
2336{ 2336{
2337 kcore_seg_t *kseg; 2337 kcore_seg_t *kseg;
2338 cpu_kcore_hdr_t *kcpu; 2338 cpu_kcore_hdr_t *kcpu;
2339 phys_ram_seg_t memseg; 2339 phys_ram_seg_t memseg;
2340 struct cpu_kcore_4mbseg ktlb; 2340 struct cpu_kcore_4mbseg ktlb;
2341 int error = 0; 2341 int error = 0;
2342 int i; 2342 int i;
2343 int buffer[dbtob(1) / sizeof(int)]; 2343 int buffer[dbtob(1) / sizeof(int)];
2344 int *bp, *ep; 2344 int *bp, *ep;
2345 2345
2346#define EXPEDITE(p,n) do { \ 2346#define EXPEDITE(p,n) do { \
2347 int *sp = (void *)(p); \ 2347 int *sp = (void *)(p); \
2348 int sz = (n); \ 2348 int sz = (n); \
2349 while (sz > 0) { \ 2349 while (sz > 0) { \
2350 *bp++ = *sp++; \ 2350 *bp++ = *sp++; \
2351 if (bp >= ep) { \ 2351 if (bp >= ep) { \
2352 error = (*dump)(dumpdev, blkno, \ 2352 error = (*dump)(dumpdev, blkno, \
2353 (void *)buffer, dbtob(1)); \ 2353 (void *)buffer, dbtob(1)); \
2354 if (error != 0) \ 2354 if (error != 0) \
2355 return (error); \ 2355 return (error); \
2356 ++blkno; \ 2356 ++blkno; \
2357 bp = buffer; \ 2357 bp = buffer; \
2358 } \ 2358 } \
2359 sz -= 4; \ 2359 sz -= 4; \
2360 } \ 2360 } \
2361} while (0) 2361} while (0)
2362 2362
2363 /* Setup bookkeeping pointers */ 2363 /* Setup bookkeeping pointers */
2364 bp = buffer; 2364 bp = buffer;
2365 ep = &buffer[sizeof(buffer) / sizeof(buffer[0])]; 2365 ep = &buffer[sizeof(buffer) / sizeof(buffer[0])];
2366 2366
2367 /* Fill in MI segment header */ 2367 /* Fill in MI segment header */
2368 kseg = (kcore_seg_t *)bp; 2368 kseg = (kcore_seg_t *)bp;
2369 CORE_SETMAGIC(*kseg, KCORE_MAGIC, MID_MACHINE, CORE_CPU); 2369 CORE_SETMAGIC(*kseg, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
2370 kseg->c_size = dbtob(pmap_dumpsize()) - ALIGN(sizeof(kcore_seg_t)); 2370 kseg->c_size = dbtob(pmap_dumpsize()) - ALIGN(sizeof(kcore_seg_t));
2371 2371
2372 /* Fill in MD segment header (interpreted by MD part of libkvm) */ 2372 /* Fill in MD segment header (interpreted by MD part of libkvm) */
2373 kcpu = (cpu_kcore_hdr_t *)((long)bp + ALIGN(sizeof(kcore_seg_t))); 2373 kcpu = (cpu_kcore_hdr_t *)((long)bp + ALIGN(sizeof(kcore_seg_t)));
2374 kcpu->cputype = cputyp; 2374 kcpu->cputype = cputyp;
2375 kcpu->kernbase = (uint64_t)KERNBASE; 2375 kcpu->kernbase = (uint64_t)KERNBASE;
2376 kcpu->cpubase = (uint64_t)CPUINFO_VA; 2376 kcpu->cpubase = (uint64_t)CPUINFO_VA;
2377 2377
2378 /* Describe the locked text segment */ 2378 /* Describe the locked text segment */
2379 kcpu->ktextbase = (uint64_t)ktext; 2379 kcpu->ktextbase = (uint64_t)ktext;
2380 kcpu->ktextp = (uint64_t)ktextp; 2380 kcpu->ktextp = (uint64_t)ktextp;
2381 kcpu->ktextsz = (uint64_t)ektext - ktext; 2381 kcpu->ktextsz = (uint64_t)ektext - ktext;
2382 if (kcpu->ktextsz > 4*MEG) 2382 if (kcpu->ktextsz > 4*MEG)
2383 kcpu->ktextsz = 0; /* old version can not work */ 2383 kcpu->ktextsz = 0; /* old version can not work */
2384 2384
2385 /* Describe locked data segment */ 2385 /* Describe locked data segment */
2386 kcpu->kdatabase = (uint64_t)kdata; 2386 kcpu->kdatabase = (uint64_t)kdata;
2387 kcpu->kdatap = (uint64_t)kdatap; 2387 kcpu->kdatap = (uint64_t)kdatap;
2388 kcpu->kdatasz = (uint64_t)ekdatap - kdatap; 2388 kcpu->kdatasz = (uint64_t)ekdatap - kdatap;
2389 2389
2390 /* new version of locked segments description */ 2390 /* new version of locked segments description */
2391 kcpu->newmagic = SPARC64_KCORE_NEWMAGIC; 2391 kcpu->newmagic = SPARC64_KCORE_NEWMAGIC;
2392 kcpu->num4mbsegs = kernel_tlb_slots; 2392 kcpu->num4mbsegs = kernel_tlb_slots;
2393 kcpu->off4mbsegs = ALIGN(sizeof(cpu_kcore_hdr_t)); 2393 kcpu->off4mbsegs = ALIGN(sizeof(cpu_kcore_hdr_t));
2394 2394
2395 /* description of per-cpu mappings */ 2395 /* description of per-cpu mappings */
2396 kcpu->numcpuinfos = sparc_ncpus; 2396 kcpu->numcpuinfos = sparc_ncpus;
2397 kcpu->percpusz = 64 * 1024; /* used to be 128k for some time */ 2397 kcpu->percpusz = 64 * 1024; /* used to be 128k for some time */
2398 kcpu->thiscpu = cpu_number(); /* which cpu is doing this dump */ 2398 kcpu->thiscpu = cpu_number(); /* which cpu is doing this dump */
2399 kcpu->cpusp = cpu0paddr - 64 * 1024 * sparc_ncpus; 2399 kcpu->cpusp = cpu0paddr - 64 * 1024 * sparc_ncpus;
2400 2400
2401 /* Now the memsegs */ 2401 /* Now the memsegs */
2402 kcpu->nmemseg = phys_installed_size; 2402 kcpu->nmemseg = phys_installed_size;
2403 kcpu->memsegoffset = kcpu->off4mbsegs 2403 kcpu->memsegoffset = kcpu->off4mbsegs
2404 + kernel_tlb_slots * sizeof(struct cpu_kcore_4mbseg); 2404 + kernel_tlb_slots * sizeof(struct cpu_kcore_4mbseg);
2405 2405
2406 /* Now we need to point this at our kernel pmap. */ 2406 /* Now we need to point this at our kernel pmap. */
2407 kcpu->nsegmap = STSZ; 2407 kcpu->nsegmap = STSZ;
2408 kcpu->segmapoffset = (uint64_t)pmap_kernel()->pm_physaddr; 2408 kcpu->segmapoffset = (uint64_t)pmap_kernel()->pm_physaddr;
2409 2409
2410 /* Note: we have assumed everything fits in buffer[] so far... */ 2410 /* Note: we have assumed everything fits in buffer[] so far... */
2411 bp = (int *)((long)kcpu + ALIGN(sizeof(cpu_kcore_hdr_t))); 2411 bp = (int *)((long)kcpu + ALIGN(sizeof(cpu_kcore_hdr_t)));
2412 2412
2413 /* write locked kernel 4MB TLBs */ 2413 /* write locked kernel 4MB TLBs */
2414 for (i = 0; i < kernel_tlb_slots; i++) { 2414 for (i = 0; i < kernel_tlb_slots; i++) {
2415 ktlb.va = kernel_tlbs[i].te_va; 2415 ktlb.va = kernel_tlbs[i].te_va;
2416 ktlb.pa = kernel_tlbs[i].te_pa; 2416 ktlb.pa = kernel_tlbs[i].te_pa;
2417 EXPEDITE(&ktlb, sizeof(ktlb)); 2417 EXPEDITE(&ktlb, sizeof(ktlb));
2418 } 2418 }
2419 2419
2420 /* write memsegs */ 2420 /* write memsegs */
2421 for (i = 0; i < phys_installed_size; i++) { 2421 for (i = 0; i < phys_installed_size; i++) {
2422 memseg.start = phys_installed[i].start; 2422 memseg.start = phys_installed[i].start;
2423 memseg.size = phys_installed[i].size; 2423 memseg.size = phys_installed[i].size;
2424 EXPEDITE(&memseg, sizeof(phys_ram_seg_t)); 2424 EXPEDITE(&memseg, sizeof(phys_ram_seg_t));
2425 } 2425 }
2426 2426
2427 if (bp != buffer) 2427 if (bp != buffer)
2428 error = (*dump)(dumpdev, blkno++, (void *)buffer, dbtob(1)); 2428 error = (*dump)(dumpdev, blkno++, (void *)buffer, dbtob(1));
2429 2429
2430 return (error); 2430 return (error);
2431} 2431}
2432 2432
2433/* 2433/*
2434 * Determine (non)existence of physical page 2434 * Determine (non)existence of physical page
2435 */ 2435 */
2436int 2436int
2437pmap_pa_exists(paddr_t pa) 2437pmap_pa_exists(paddr_t pa)
2438{ 2438{
2439 int i; 2439 int i;
2440 2440
2441 /* Just go through physical memory list & see if we're there */ 2441 /* Just go through physical memory list & see if we're there */
2442 for (i = 0; i < phys_installed_size; i++) { 2442 for (i = 0; i < phys_installed_size; i++) {
2443 if ((phys_installed[i].start <= pa) && 2443 if ((phys_installed[i].start <= pa) &&
2444 (phys_installed[i].start + 2444 (phys_installed[i].start +
2445 phys_installed[i].size >= pa)) 2445 phys_installed[i].size >= pa))
2446 return 1; 2446 return 1;
2447 } 2447 }
2448 return 0; 2448 return 0;
2449} 2449}
2450 2450
2451/* 2451/*
2452 * Lookup the appropriate TSB entry. 2452 * Lookup the appropriate TSB entry.
2453 * 2453 *
2454 * Here is the full official pseudo code: 2454 * Here is the full official pseudo code:
2455 * 2455 *
2456 */ 2456 */
2457 2457
2458#ifdef NOTYET 2458#ifdef NOTYET
2459int64 GenerateTSBPointer( 2459int64 GenerateTSBPointer(
2460 int64 va, /* Missing VA */ 2460 int64 va, /* Missing VA */
2461 PointerType type, /* 8K_POINTER or 16K_POINTER */ 2461 PointerType type, /* 8K_POINTER or 16K_POINTER */
2462 int64 TSBBase, /* TSB Register[63:13] << 13 */ 2462 int64 TSBBase, /* TSB Register[63:13] << 13 */
2463 Boolean split, /* TSB Register[12] */ 2463 Boolean split, /* TSB Register[12] */
2464 int TSBSize) /* TSB Register[2:0] */ 2464 int TSBSize) /* TSB Register[2:0] */
2465{ 2465{
2466 int64 vaPortion; 2466 int64 vaPortion;
2467 int64 TSBBaseMask; 2467 int64 TSBBaseMask;
2468 int64 splitMask; 2468 int64 splitMask;
2469 2469
2470 /* TSBBaseMask marks the bits from TSB Base Reg */ 2470 /* TSBBaseMask marks the bits from TSB Base Reg */
2471 TSBBaseMask = 0xffffffffffffe000 << 2471 TSBBaseMask = 0xffffffffffffe000 <<
2472 (split? (TSBsize + 1) : TSBsize); 2472 (split? (TSBsize + 1) : TSBsize);
2473 2473
2474 /* Shift va towards lsb appropriately and */ 2474 /* Shift va towards lsb appropriately and */
2475 /* zero out the original va page offset */ 2475 /* zero out the original va page offset */
2476 vaPortion = (va >> ((type == 8K_POINTER)? 9: 12)) & 2476 vaPortion = (va >> ((type == 8K_POINTER)? 9: 12)) &
2477 0xfffffffffffffff0; 2477 0xfffffffffffffff0;
2478 2478
2479 if (split) { 2479 if (split) {
2480 /* There's only one bit in question for split */ 2480 /* There's only one bit in question for split */
2481 splitMask = 1 << (13 + TSBsize); 2481 splitMask = 1 << (13 + TSBsize);
2482 if (type == 8K_POINTER) 2482 if (type == 8K_POINTER)
2483 /* Make sure we're in the lower half */ 2483 /* Make sure we're in the lower half */
2484 vaPortion &= ~splitMask; 2484 vaPortion &= ~splitMask;
2485 else 2485 else
2486 /* Make sure we're in the upper half */ 2486 /* Make sure we're in the upper half */
2487 vaPortion |= splitMask; 2487 vaPortion |= splitMask;
2488 } 2488 }
2489 return (TSBBase & TSBBaseMask) | (vaPortion & ~TSBBaseMask); 2489 return (TSBBase & TSBBaseMask) | (vaPortion & ~TSBBaseMask);
2490} 2490}
2491#endif 2491#endif
2492/* 2492/*
2493 * Of course, since we are not using a split TSB or variable page sizes, 2493 * Of course, since we are not using a split TSB or variable page sizes,
2494 * we can optimize this a bit. 2494 * we can optimize this a bit.
2495 * 2495 *
2496 * The following only works for a unified 8K TSB. It will find the slot 2496 * The following only works for a unified 8K TSB. It will find the slot
2497 * for that particular va and return it. IT MAY BE FOR ANOTHER MAPPING! 2497 * for that particular va and return it. IT MAY BE FOR ANOTHER MAPPING!
2498 */ 2498 */
2499int 2499int
2500ptelookup_va(vaddr_t va) 2500ptelookup_va(vaddr_t va)
2501{ 2501{
2502 long tsbptr; 2502 long tsbptr;
2503#define TSBBASEMASK (0xffffffffffffe000LL << tsbsize) 2503#define TSBBASEMASK (0xffffffffffffe000LL << tsbsize)
2504 2504
2505 tsbptr = (((va >> 9) & 0xfffffffffffffff0LL) & ~TSBBASEMASK); 2505 tsbptr = (((va >> 9) & 0xfffffffffffffff0LL) & ~TSBBASEMASK);
2506 return (tsbptr / sizeof(pte_t)); 2506 return (tsbptr / sizeof(pte_t));
2507} 2507}
2508 2508
2509/* 2509/*
2510 * Do whatever is needed to sync the MOD/REF flags 2510 * Do whatever is needed to sync the MOD/REF flags
2511 */ 2511 */
2512 2512
2513bool 2513bool
2514pmap_clear_modify(struct vm_page *pg) 2514pmap_clear_modify(struct vm_page *pg)
2515{ 2515{
2516 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 2516 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
2517 pv_entry_t pv; 2517 pv_entry_t pv;
2518 int rv; 2518 int rv;
2519 int changed = 0; 2519 int changed = 0;
2520#ifdef DEBUG 2520#ifdef DEBUG
2521 int modified = 0; 2521 int modified = 0;
2522 2522
2523 DPRINTF(PDB_CHANGEPROT|PDB_REF, ("pmap_clear_modify(%p)\n", pg)); 2523 DPRINTF(PDB_CHANGEPROT|PDB_REF, ("pmap_clear_modify(%p)\n", pg));
2524 2524
2525 modified = pmap_is_modified(pg); 2525 modified = pmap_is_modified(pg);
2526#endif 2526#endif
2527 mutex_enter(&pmap_lock); 2527 mutex_enter(&pmap_lock);
2528 /* Clear all mappings */ 2528 /* Clear all mappings */
2529 pv = &md->mdpg_pvh; 2529 pv = &md->mdpg_pvh;
2530#ifdef DEBUG 2530#ifdef DEBUG
2531 if (pv->pv_va & PV_MOD) 2531 if (pv->pv_va & PV_MOD)
2532 pv->pv_va |= PV_WE; /* Remember this was modified */ 2532 pv->pv_va |= PV_WE; /* Remember this was modified */
2533#endif 2533#endif
2534 if (pv->pv_va & PV_MOD) { 2534 if (pv->pv_va & PV_MOD) {
2535 changed |= 1; 2535 changed |= 1;
2536 pv->pv_va &= ~PV_MOD; 2536 pv->pv_va &= ~PV_MOD;
2537 } 2537 }
2538#ifdef DEBUG 2538#ifdef DEBUG
2539 if (pv->pv_next && !pv->pv_pmap) { 2539 if (pv->pv_next && !pv->pv_pmap) {
2540 printf("pmap_clear_modify: npv but no pmap for pv %p\n", pv); 2540 printf("pmap_clear_modify: npv but no pmap for pv %p\n", pv);
2541 Debugger(); 2541 Debugger();
2542 } 2542 }
2543#endif 2543#endif
2544 if (pv->pv_pmap != NULL) { 2544 if (pv->pv_pmap != NULL) {
2545 for (; pv; pv = pv->pv_next) { 2545 for (; pv; pv = pv->pv_next) {
2546 int64_t data; 2546 int64_t data;
2547 struct pmap *pmap = pv->pv_pmap; 2547 struct pmap *pmap = pv->pv_pmap;
2548 vaddr_t va = pv->pv_va & PV_VAMASK; 2548 vaddr_t va = pv->pv_va & PV_VAMASK;
2549 2549
2550 /* First clear the mod bit in the PTE and make it R/O */ 2550 /* First clear the mod bit in the PTE and make it R/O */
2551 data = pseg_get(pmap, va); 2551 data = pseg_get(pmap, va);
2552 KASSERT(data & TLB_V); 2552 KASSERT(data & TLB_V);
2553 /* Need to both clear the modify and write bits */ 2553 /* Need to both clear the modify and write bits */
2554 if (data & TLB_MODIFY) 2554 if (data & TLB_MODIFY)
2555 changed |= 1; 2555 changed |= 1;
2556#ifdef HWREF 2556#ifdef HWREF
2557 data &= ~(TLB_MODIFY|TLB_W); 2557 data &= ~(TLB_MODIFY|TLB_W);
2558#else 2558#else
2559 data &= ~(TLB_MODIFY|TLB_W|TLB_REAL_W); 2559 data &= ~(TLB_MODIFY|TLB_W|TLB_REAL_W);
2560#endif 2560#endif
2561 rv = pseg_set(pmap, va, data, 0); 2561 rv = pseg_set(pmap, va, data, 0);
2562 if (rv & 1) 2562 if (rv & 1)
2563 printf("pmap_clear_modify: pseg_set needs" 2563 printf("pmap_clear_modify: pseg_set needs"
2564 " spare! rv=%d\n", rv); 2564 " spare! rv=%d\n", rv);
2565 if (pmap_is_on_mmu(pmap)) { 2565 if (pmap_is_on_mmu(pmap)) {
2566 KASSERT(pmap_ctx(pmap)>=0); 2566 KASSERT(pmap_ctx(pmap)>=0);
2567 tsb_invalidate(va, pmap); 2567 tsb_invalidate(va, pmap);
2568 tlb_flush_pte(va, pmap); 2568 tlb_flush_pte(va, pmap);
2569 } 2569 }
2570 /* Then clear the mod bit in the pv */ 2570 /* Then clear the mod bit in the pv */
2571 if (pv->pv_va & PV_MOD) { 2571 if (pv->pv_va & PV_MOD) {
2572 changed |= 1; 2572 changed |= 1;
2573 pv->pv_va &= ~PV_MOD; 2573 pv->pv_va &= ~PV_MOD;
2574 } 2574 }
2575 } 2575 }
2576 } 2576 }
2577 pv_check(); 2577 pv_check();
2578 mutex_exit(&pmap_lock); 2578 mutex_exit(&pmap_lock);
2579#ifdef DEBUG 2579#ifdef DEBUG
2580 if (pmap_is_modified(pg)) { 
2581 printf("pmap_clear_modify(): %p still modified!\n", pg); 
2582 Debugger(); 
2583 } 
2584 DPRINTF(PDB_CHANGEPROT|PDB_REF, ("pmap_clear_modify: pg %p %s\n", pg, 2580 DPRINTF(PDB_CHANGEPROT|PDB_REF, ("pmap_clear_modify: pg %p %s\n", pg,
2585 (changed ? "was modified" : "was not modified"))); 2581 (changed ? "was modified" : "was not modified")));
2586 if (modified != changed) { 2582 if (modified && modified != changed) {
2587 printf("pmap_clear_modify: modified %d changed %d\n", 2583 printf("pmap_clear_modify: modified %d changed %d\n",
2588 modified, changed); 2584 modified, changed);
2589 Debugger(); 2585 Debugger();
2590 } else return (modified); 2586 }
2591#endif 2587#endif
2592 return (changed); 2588 return (changed);
2593} 2589}
2594 2590
2595bool 2591bool
2596pmap_clear_reference(struct vm_page *pg) 2592pmap_clear_reference(struct vm_page *pg)
2597{ 2593{
2598 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 2594 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
2599 pv_entry_t pv; 2595 pv_entry_t pv;
2600 int rv; 2596 int rv;
2601 int changed = 0; 2597 int changed = 0;
2602#ifdef DEBUG 2598#ifdef DEBUG
2603 int referenced = 0; 2599 int referenced = 0;
2604#endif 2600#endif
2605 2601
2606 mutex_enter(&pmap_lock); 2602 mutex_enter(&pmap_lock);
2607#ifdef DEBUG 2603#ifdef DEBUG
2608 DPRINTF(PDB_CHANGEPROT|PDB_REF, ("pmap_clear_reference(%p)\n", pg)); 2604 DPRINTF(PDB_CHANGEPROT|PDB_REF, ("pmap_clear_reference(%p)\n", pg));
2609 referenced = pmap_is_referenced_locked(pg); 2605 referenced = pmap_is_referenced_locked(pg);
2610#endif 2606#endif
2611 /* Clear all references */ 2607 /* Clear all references */
2612 pv = &md->mdpg_pvh; 2608 pv = &md->mdpg_pvh;
2613 if (pv->pv_va & PV_REF) { 2609 if (pv->pv_va & PV_REF) {
2614 changed |= 1; 2610 changed |= 1;
2615 pv->pv_va &= ~PV_REF; 2611 pv->pv_va &= ~PV_REF;
2616 } 2612 }
2617#ifdef DEBUG 2613#ifdef DEBUG
2618 if (pv->pv_next && !pv->pv_pmap) { 2614 if (pv->pv_next && !pv->pv_pmap) {
2619 printf("pmap_clear_reference: npv but no pmap for pv %p\n", pv); 2615 printf("pmap_clear_reference: npv but no pmap for pv %p\n", pv);
2620 Debugger(); 2616 Debugger();
2621 } 2617 }
2622#endif 2618#endif
2623 if (pv->pv_pmap != NULL) { 2619 if (pv->pv_pmap != NULL) {
2624 for (; pv; pv = pv->pv_next) { 2620 for (; pv; pv = pv->pv_next) {
2625 int64_t data; 2621 int64_t data;
2626 struct pmap *pmap = pv->pv_pmap; 2622 struct pmap *pmap = pv->pv_pmap;
2627 vaddr_t va = pv->pv_va & PV_VAMASK; 2623 vaddr_t va = pv->pv_va & PV_VAMASK;
2628 2624
2629 data = pseg_get(pmap, va); 2625 data = pseg_get(pmap, va);
2630 KASSERT(data & TLB_V); 2626 KASSERT(data & TLB_V);
2631 DPRINTF(PDB_CHANGEPROT, 2627 DPRINTF(PDB_CHANGEPROT,
2632 ("clearing ref pm:%p va:%p ctx:%lx data:%llx\n", 2628 ("clearing ref pm:%p va:%p ctx:%lx data:%llx\n",
2633 pmap, (void *)(u_long)va, 2629 pmap, (void *)(u_long)va,
2634 (u_long)pmap_ctx(pmap), 2630 (u_long)pmap_ctx(pmap),
2635 (long long)data)); 2631 (long long)data));
2636#ifdef HWREF 2632#ifdef HWREF
2637 if (data & TLB_ACCESS) { 2633 if (data & TLB_ACCESS) {
2638 changed |= 1; 2634 changed |= 1;
2639 data &= ~TLB_ACCESS; 2635 data &= ~TLB_ACCESS;
2640 } 2636 }
2641#else 2637#else
2642 if (data < 0) 2638 if (data < 0)
2643 changed |= 1; 2639 changed |= 1;
2644 data = 0; 2640 data = 0;
2645#endif 2641#endif
2646 rv = pseg_set(pmap, va, data, 0); 2642 rv = pseg_set(pmap, va, data, 0);
2647 if (rv & 1) 2643 if (rv & 1)
2648 panic("pmap_clear_reference: pseg_set needs" 2644 panic("pmap_clear_reference: pseg_set needs"
2649 " spare! rv=%d\n", rv); 2645 " spare! rv=%d\n", rv);
2650 if (pmap_is_on_mmu(pmap)) { 2646 if (pmap_is_on_mmu(pmap)) {
2651 KASSERT(pmap_ctx(pmap)>=0); 2647 KASSERT(pmap_ctx(pmap)>=0);
2652 tsb_invalidate(va, pmap); 2648 tsb_invalidate(va, pmap);
2653 tlb_flush_pte(va, pmap); 2649 tlb_flush_pte(va, pmap);
2654 } 2650 }
2655 if (pv->pv_va & PV_REF) { 2651 if (pv->pv_va & PV_REF) {
2656 changed |= 1; 2652 changed |= 1;
2657 pv->pv_va &= ~PV_REF; 2653 pv->pv_va &= ~PV_REF;
2658 } 2654 }
2659 } 2655 }
2660 } 2656 }
2661 dcache_flush_page_all(VM_PAGE_TO_PHYS(pg)); 2657 dcache_flush_page_all(VM_PAGE_TO_PHYS(pg));
2662 pv_check(); 2658 pv_check();
2663#ifdef DEBUG 2659#ifdef DEBUG
2664 if (pmap_is_referenced_locked(pg)) { 2660 if (pmap_is_referenced_locked(pg)) {
2665 pv = &md->mdpg_pvh; 2661 pv = &md->mdpg_pvh;
2666 printf("pmap_clear_reference(): %p still referenced " 2662 printf("pmap_clear_reference(): %p still referenced "
2667 "(pmap = %p, ctx = %d)\n", pg, pv->pv_pmap, 2663 "(pmap = %p, ctx = %d)\n", pg, pv->pv_pmap,
2668 pv->pv_pmap ? pmap_ctx(pv->pv_pmap) : 0); 2664 pv->pv_pmap ? pmap_ctx(pv->pv_pmap) : 0);
2669 Debugger(); 2665 Debugger();
2670 } 2666 }
2671 DPRINTF(PDB_CHANGEPROT|PDB_REF, 2667 DPRINTF(PDB_CHANGEPROT|PDB_REF,
2672 ("pmap_clear_reference: pg %p %s\n", pg, 2668 ("pmap_clear_reference: pg %p %s\n", pg,
2673 (changed ? "was referenced" : "was not referenced"))); 2669 (changed ? "was referenced" : "was not referenced")));
2674 if (referenced != changed) { 2670 if (referenced != changed) {
2675 printf("pmap_clear_reference: referenced %d changed %d\n", 2671 printf("pmap_clear_reference: referenced %d changed %d\n",
2676 referenced, changed); 2672 referenced, changed);
2677 Debugger(); 2673 Debugger();
2678 } else { 2674 } else {
2679 mutex_exit(&pmap_lock); 2675 mutex_exit(&pmap_lock);
2680 return (referenced); 2676 return (referenced);
2681 } 2677 }
2682#endif 2678#endif
2683 mutex_exit(&pmap_lock); 2679 mutex_exit(&pmap_lock);
2684 return (changed); 2680 return (changed);
2685} 2681}
2686 2682
2687bool 2683bool
2688pmap_is_modified(struct vm_page *pg) 2684pmap_is_modified(struct vm_page *pg)
2689{ 2685{
2690 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 2686 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
2691 pv_entry_t pv, npv; 2687 pv_entry_t pv, npv;
2692 bool res = false; 2688 bool res = false;
2693 2689
2694 /* Check if any mapping has been modified */ 2690 /* Check if any mapping has been modified */
2695 pv = &md->mdpg_pvh; 2691 pv = &md->mdpg_pvh;
2696 if (pv->pv_va & PV_MOD) 2692 if (pv->pv_va & PV_MOD)
2697 res = true; 2693 res = true;
2698#ifdef HWREF 2694#ifdef HWREF
2699#ifdef DEBUG 2695#ifdef DEBUG
2700 if (pv->pv_next && !pv->pv_pmap) { 2696 if (pv->pv_next && !pv->pv_pmap) {
2701 printf("pmap_is_modified: npv but no pmap for pv %p\n", pv); 2697 printf("pmap_is_modified: npv but no pmap for pv %p\n", pv);
2702 Debugger(); 2698 Debugger();
2703 } 2699 }
2704#endif 2700#endif
2705 if (!res && pv->pv_pmap != NULL) { 2701 if (!res && pv->pv_pmap != NULL) {
2706 mutex_enter(&pmap_lock); 2702 mutex_enter(&pmap_lock);
2707 for (npv = pv; !res && npv && npv->pv_pmap; 2703 for (npv = pv; !res && npv && npv->pv_pmap;
2708 npv = npv->pv_next) { 2704 npv = npv->pv_next) {
2709 int64_t data; 2705 int64_t data;
2710 2706
2711 data = pseg_get(npv->pv_pmap, npv->pv_va & PV_VAMASK); 2707 data = pseg_get(npv->pv_pmap, npv->pv_va & PV_VAMASK);
2712 KASSERT(data & TLB_V); 2708 KASSERT(data & TLB_V);
2713 if (data & TLB_MODIFY) 2709 if (data & TLB_MODIFY)
2714 res = true; 2710 res = true;
2715 2711
2716 /* Migrate modify info to head pv */ 2712 /* Migrate modify info to head pv */
2717 if (npv->pv_va & PV_MOD) { 2713 if (npv->pv_va & PV_MOD) {
2718 res = true; 2714 res = true;
2719 npv->pv_va &= ~PV_MOD; 2715 npv->pv_va &= ~PV_MOD;
2720 } 2716 }
2721 } 2717 }
2722 /* Save modify info */ 2718 /* Save modify info */
2723 if (res) 2719 if (res)
2724 pv->pv_va |= PV_MOD; 2720 pv->pv_va |= PV_MOD;
2725#ifdef DEBUG 2721#ifdef DEBUG
2726 if (res) 2722 if (res)
2727 pv->pv_va |= PV_WE; 2723 pv->pv_va |= PV_WE;
2728#endif 2724#endif
2729 mutex_exit(&pmap_lock); 2725 mutex_exit(&pmap_lock);
2730 } 2726 }
2731#endif 2727#endif
2732 2728
2733 DPRINTF(PDB_CHANGEPROT|PDB_REF, ("pmap_is_modified(%p) = %d\n", pg, 2729 DPRINTF(PDB_CHANGEPROT|PDB_REF, ("pmap_is_modified(%p) = %d\n", pg,
2734 res)); 2730 res));
2735 pv_check(); 2731 pv_check();
2736 return res; 2732 return res;
2737} 2733}
2738 2734
2739/* 2735/*
2740 * Variant of pmap_is_reference() where caller already holds pmap_lock 2736 * Variant of pmap_is_reference() where caller already holds pmap_lock
2741 */ 2737 */
2742static bool 2738static bool
2743pmap_is_referenced_locked(struct vm_page *pg) 2739pmap_is_referenced_locked(struct vm_page *pg)
2744{ 2740{
2745 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 2741 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
2746 pv_entry_t pv, npv; 2742 pv_entry_t pv, npv;
2747 bool res = false; 2743 bool res = false;
2748 2744
2749 KASSERT(mutex_owned(&pmap_lock)); 2745 KASSERT(mutex_owned(&pmap_lock));
2750 2746
2751 /* Check if any mapping has been referenced */ 2747 /* Check if any mapping has been referenced */
2752 pv = &md->mdpg_pvh; 2748 pv = &md->mdpg_pvh;
2753 if (pv->pv_va & PV_REF) 2749 if (pv->pv_va & PV_REF)
2754 return true; 2750 return true;
2755 2751
2756#ifdef HWREF 2752#ifdef HWREF
2757#ifdef DEBUG 2753#ifdef DEBUG
2758 if (pv->pv_next && !pv->pv_pmap) { 2754 if (pv->pv_next && !pv->pv_pmap) {
2759 printf("pmap_is_referenced: npv but no pmap for pv %p\n", pv); 2755 printf("pmap_is_referenced: npv but no pmap for pv %p\n", pv);
2760 Debugger(); 2756 Debugger();
2761 } 2757 }
2762#endif 2758#endif
2763 if (pv->pv_pmap == NULL) 2759 if (pv->pv_pmap == NULL)
2764 return false; 2760 return false;
2765 2761
2766 for (npv = pv; npv; npv = npv->pv_next) { 2762 for (npv = pv; npv; npv = npv->pv_next) {
2767 int64_t data; 2763 int64_t data;
2768 2764
2769 data = pseg_get(npv->pv_pmap, npv->pv_va & PV_VAMASK); 2765 data = pseg_get(npv->pv_pmap, npv->pv_va & PV_VAMASK);
2770 KASSERT(data & TLB_V); 2766 KASSERT(data & TLB_V);
2771 if (data & TLB_ACCESS) 2767 if (data & TLB_ACCESS)
2772 res = true; 2768 res = true;
2773 2769
2774 /* Migrate ref info to head pv */ 2770 /* Migrate ref info to head pv */
2775 if (npv->pv_va & PV_REF) { 2771 if (npv->pv_va & PV_REF) {
2776 res = true; 2772 res = true;
2777 npv->pv_va &= ~PV_REF; 2773 npv->pv_va &= ~PV_REF;
2778 } 2774 }
2779 } 2775 }
2780 /* Save ref info */ 2776 /* Save ref info */
2781 if (res) 2777 if (res)
2782 pv->pv_va |= PV_REF; 2778 pv->pv_va |= PV_REF;
2783#endif 2779#endif
2784 2780
2785 DPRINTF(PDB_CHANGEPROT|PDB_REF, 2781 DPRINTF(PDB_CHANGEPROT|PDB_REF,
2786 ("pmap_is_referenced(%p) = %d\n", pg, res)); 2782 ("pmap_is_referenced(%p) = %d\n", pg, res));
2787 pv_check(); 2783 pv_check();
2788 return res; 2784 return res;
2789} 2785}
2790 2786
2791bool 2787bool
2792pmap_is_referenced(struct vm_page *pg) 2788pmap_is_referenced(struct vm_page *pg)
2793{ 2789{
2794 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 2790 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
2795 pv_entry_t pv; 2791 pv_entry_t pv;
2796 bool res = false; 2792 bool res = false;
2797 2793
2798 /* Check if any mapping has been referenced */ 2794 /* Check if any mapping has been referenced */
2799 pv = &md->mdpg_pvh; 2795 pv = &md->mdpg_pvh;
2800 if (pv->pv_va & PV_REF) 2796 if (pv->pv_va & PV_REF)
2801 return true; 2797 return true;
2802 2798
2803#ifdef HWREF 2799#ifdef HWREF
2804#ifdef DEBUG 2800#ifdef DEBUG
2805 if (pv->pv_next && !pv->pv_pmap) { 2801 if (pv->pv_next && !pv->pv_pmap) {
2806 printf("pmap_is_referenced: npv but no pmap for pv %p\n", pv); 2802 printf("pmap_is_referenced: npv but no pmap for pv %p\n", pv);
2807 Debugger(); 2803 Debugger();
2808 } 2804 }
2809#endif 2805#endif
2810 if (pv->pv_pmap != NULL) { 2806 if (pv->pv_pmap != NULL) {
2811 mutex_enter(&pmap_lock); 2807 mutex_enter(&pmap_lock);
2812 res = pmap_is_referenced_locked(pg); 2808 res = pmap_is_referenced_locked(pg);
2813 mutex_exit(&pmap_lock); 2809 mutex_exit(&pmap_lock);
2814 } 2810 }
2815#endif 2811#endif
2816 2812
2817 DPRINTF(PDB_CHANGEPROT|PDB_REF, 2813 DPRINTF(PDB_CHANGEPROT|PDB_REF,
2818 ("pmap_is_referenced(%p) = %d\n", pg, res)); 2814 ("pmap_is_referenced(%p) = %d\n", pg, res));
2819 pv_check(); 2815 pv_check();
2820 return res; 2816 return res;
2821} 2817}
2822 2818
2823 2819
2824 2820
2825/* 2821/*
2826 * Routine: pmap_unwire 2822 * Routine: pmap_unwire
2827 * Function: Clear the wired attribute for a map/virtual-address 2823 * Function: Clear the wired attribute for a map/virtual-address
2828 * pair. 2824 * pair.
2829 * In/out conditions: 2825 * In/out conditions:
2830 * The mapping must already exist in the pmap. 2826 * The mapping must already exist in the pmap.
2831 */ 2827 */
2832void 2828void
2833pmap_unwire(pmap_t pmap, vaddr_t va) 2829pmap_unwire(pmap_t pmap, vaddr_t va)
2834{ 2830{
2835 int64_t data; 2831 int64_t data;
2836 int rv; 2832 int rv;
2837 2833
2838 DPRINTF(PDB_MMU_STEAL, ("pmap_unwire(%p, %lx)\n", pmap, va)); 2834 DPRINTF(PDB_MMU_STEAL, ("pmap_unwire(%p, %lx)\n", pmap, va));
2839 2835
2840#ifdef DEBUG 2836#ifdef DEBUG
2841 /* 2837 /*
2842 * Is this part of the permanent 4MB mapping? 2838 * Is this part of the permanent 4MB mapping?
2843 */ 2839 */
2844 if (pmap == pmap_kernel() && va >= ktext && 2840 if (pmap == pmap_kernel() && va >= ktext &&
2845 va < roundup(ekdata, 4*MEG)) { 2841 va < roundup(ekdata, 4*MEG)) {
2846 prom_printf("pmap_unwire: va=%08x in locked TLB\n", va); 2842 prom_printf("pmap_unwire: va=%08x in locked TLB\n", va);
2847 prom_abort(); 2843 prom_abort();
2848 return; 2844 return;
2849 } 2845 }
2850#endif 2846#endif
2851 data = pseg_get(pmap, va & PV_VAMASK); 2847 data = pseg_get(pmap, va & PV_VAMASK);
2852 KASSERT(data & TLB_V); 2848 KASSERT(data & TLB_V);
2853 data &= ~TLB_TSB_LOCK; 2849 data &= ~TLB_TSB_LOCK;
2854 rv = pseg_set(pmap, va & PV_VAMASK, data, 0); 2850 rv = pseg_set(pmap, va & PV_VAMASK, data, 0);
2855 if (rv & 1) 2851 if (rv & 1)
2856 panic("pmap_unwire: pseg_set needs spare! rv=%d\n", rv); 2852 panic("pmap_unwire: pseg_set needs spare! rv=%d\n", rv);
2857 pv_check(); 2853 pv_check();
2858} 2854}
2859 2855
2860/* 2856/*
2861 * Lower the protection on the specified physical page. 2857 * Lower the protection on the specified physical page.
2862 * 2858 *
2863 * Never enable writing as it will break COW 2859 * Never enable writing as it will break COW
2864 */ 2860 */
2865 2861
2866void 2862void
2867pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 2863pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
2868{ 2864{
2869 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 2865 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
2870 int64_t clear, set; 2866 int64_t clear, set;
2871 int64_t data = 0; 2867 int64_t data = 0;
2872 int rv; 2868 int rv;
2873 pv_entry_t pv, npv, freepv = NULL; 2869 pv_entry_t pv, npv, freepv = NULL;
2874 struct pmap *pmap; 2870 struct pmap *pmap;
2875 vaddr_t va; 2871 vaddr_t va;
2876 bool needflush = FALSE; 2872 bool needflush = FALSE;
2877 2873
2878 DPRINTF(PDB_CHANGEPROT, 2874 DPRINTF(PDB_CHANGEPROT,
2879 ("pmap_page_protect: pg %p prot %x\n", pg, prot)); 2875 ("pmap_page_protect: pg %p prot %x\n", pg, prot));
2880 2876
2881 mutex_enter(&pmap_lock); 2877 mutex_enter(&pmap_lock);
2882 pv = &md->mdpg_pvh; 2878 pv = &md->mdpg_pvh;
2883 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) { 2879 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
2884 /* copy_on_write */ 2880 /* copy_on_write */
2885 2881
2886 set = TLB_V; 2882 set = TLB_V;
2887 clear = TLB_REAL_W|TLB_W; 2883 clear = TLB_REAL_W|TLB_W;
2888 if (VM_PROT_EXECUTE & prot) 2884 if (VM_PROT_EXECUTE & prot)
2889 set |= TLB_EXEC; 2885 set |= TLB_EXEC;
2890 else 2886 else
2891 clear |= TLB_EXEC; 2887 clear |= TLB_EXEC;
2892 if (VM_PROT_EXECUTE == prot) 2888 if (VM_PROT_EXECUTE == prot)
2893 set |= TLB_EXEC_ONLY; 2889 set |= TLB_EXEC_ONLY;
2894 2890
2895#ifdef DEBUG 2891#ifdef DEBUG
2896 if (pv->pv_next && !pv->pv_pmap) { 2892 if (pv->pv_next && !pv->pv_pmap) {
2897 printf("pmap_page_protect: no pmap for pv %p\n", pv); 2893 printf("pmap_page_protect: no pmap for pv %p\n", pv);
2898 Debugger(); 2894 Debugger();
2899 } 2895 }
2900#endif 2896#endif
2901 if (pv->pv_pmap != NULL) { 2897 if (pv->pv_pmap != NULL) {
2902 for (; pv; pv = pv->pv_next) { 2898 for (; pv; pv = pv->pv_next) {
2903 pmap = pv->pv_pmap; 2899 pmap = pv->pv_pmap;
2904 va = pv->pv_va & PV_VAMASK; 2900 va = pv->pv_va & PV_VAMASK;
2905 2901
2906 DPRINTF(PDB_CHANGEPROT | PDB_REF, 2902 DPRINTF(PDB_CHANGEPROT | PDB_REF,
2907 ("pmap_page_protect: " 2903 ("pmap_page_protect: "
2908 "RO va %p of pg %p...\n", 2904 "RO va %p of pg %p...\n",
2909 (void *)(u_long)pv->pv_va, pg)); 2905 (void *)(u_long)pv->pv_va, pg));
2910 data = pseg_get(pmap, va); 2906 data = pseg_get(pmap, va);
2911 KASSERT(data & TLB_V); 2907 KASSERT(data & TLB_V);
2912 2908
2913 /* Save REF/MOD info */ 2909 /* Save REF/MOD info */
2914 if (data & TLB_ACCESS) 2910 if (data & TLB_ACCESS)
2915 pv->pv_va |= PV_REF; 2911 pv->pv_va |= PV_REF;
2916 if (data & TLB_MODIFY) 2912 if (data & TLB_MODIFY)
2917 pv->pv_va |= PV_MOD; 2913 pv->pv_va |= PV_MOD;
2918 2914
2919 data &= ~clear; 2915 data &= ~clear;
2920 data |= set; 2916 data |= set;
2921 rv = pseg_set(pmap, va, data, 0); 2917 rv = pseg_set(pmap, va, data, 0);
2922 if (rv & 1) 2918 if (rv & 1)
2923 panic("pmap_page_protect: " 2919 panic("pmap_page_protect: "
2924 "pseg_set needs spare! rv=%d\n", 2920 "pseg_set needs spare! rv=%d\n",
2925 rv); 2921 rv);
2926 if (pmap_is_on_mmu(pmap)) { 2922 if (pmap_is_on_mmu(pmap)) {
2927 KASSERT(pmap_ctx(pmap)>=0); 2923 KASSERT(pmap_ctx(pmap)>=0);
2928 tsb_invalidate(va, pmap); 2924 tsb_invalidate(va, pmap);
2929 tlb_flush_pte(va, pmap); 2925 tlb_flush_pte(va, pmap);
2930 } 2926 }
2931 } 2927 }
2932 } 2928 }
2933 } else { 2929 } else {
2934 /* remove mappings */ 2930 /* remove mappings */
2935 DPRINTF(PDB_REMOVE, 2931 DPRINTF(PDB_REMOVE,
2936 ("pmap_page_protect: demapping pg %p\n", pg)); 2932 ("pmap_page_protect: demapping pg %p\n", pg));
2937 2933
2938 /* First remove the entire list of continuation pv's */ 2934 /* First remove the entire list of continuation pv's */
2939 for (npv = pv->pv_next; npv; npv = pv->pv_next) { 2935 for (npv = pv->pv_next; npv; npv = pv->pv_next) {
2940 pmap = npv->pv_pmap; 2936 pmap = npv->pv_pmap;
2941 va = npv->pv_va & PV_VAMASK; 2937 va = npv->pv_va & PV_VAMASK;
2942 2938
2943 /* We're removing npv from pv->pv_next */ 2939 /* We're removing npv from pv->pv_next */
2944 DPRINTF(PDB_CHANGEPROT|PDB_REF|PDB_REMOVE, 2940 DPRINTF(PDB_CHANGEPROT|PDB_REF|PDB_REMOVE,
2945 ("pmap_page_protect: " 2941 ("pmap_page_protect: "
2946 "demap va %p of pg %p in pmap %p...\n", 2942 "demap va %p of pg %p in pmap %p...\n",
2947 (void *)(u_long)va, pg, pmap)); 2943 (void *)(u_long)va, pg, pmap));
2948 2944
2949 /* clear the entry in the page table */ 2945 /* clear the entry in the page table */
2950 data = pseg_get(pmap, va); 2946 data = pseg_get(pmap, va);
2951 KASSERT(data & TLB_V); 2947 KASSERT(data & TLB_V);
2952 2948
2953 /* Save ref/mod info */ 2949 /* Save ref/mod info */
2954 if (data & TLB_ACCESS) 2950 if (data & TLB_ACCESS)
2955 pv->pv_va |= PV_REF; 2951 pv->pv_va |= PV_REF;
2956 if (data & TLB_MODIFY) 2952 if (data & TLB_MODIFY)
2957 pv->pv_va |= PV_MOD; 2953 pv->pv_va |= PV_MOD;
2958 /* Clear mapping */ 2954 /* Clear mapping */
2959 rv = pseg_set(pmap, va, 0, 0); 2955 rv = pseg_set(pmap, va, 0, 0);
2960 if (rv & 1) 2956 if (rv & 1)
2961 panic("pmap_page_protect: pseg_set needs" 2957 panic("pmap_page_protect: pseg_set needs"
2962 " spare! rv=%d\n", rv); 2958 " spare! rv=%d\n", rv);
2963 if (pmap_is_on_mmu(pmap)) { 2959 if (pmap_is_on_mmu(pmap)) {
2964 KASSERT(pmap_ctx(pmap)>=0); 2960 KASSERT(pmap_ctx(pmap)>=0);
2965 tsb_invalidate(va, pmap); 2961 tsb_invalidate(va, pmap);
2966 tlb_flush_pte(va, pmap); 2962 tlb_flush_pte(va, pmap);
2967 } 2963 }
2968 if (pmap->pm_refs > 0) { 2964 if (pmap->pm_refs > 0) {
2969 needflush = TRUE; 2965 needflush = TRUE;
2970 } 2966 }
2971 2967
2972 /* free the pv */ 2968 /* free the pv */
2973 pv->pv_next = npv->pv_next; 2969 pv->pv_next = npv->pv_next;
2974 npv->pv_next = freepv; 2970 npv->pv_next = freepv;
2975 freepv = npv; 2971 freepv = npv;
2976 } 2972 }
2977 2973
2978 /* Then remove the primary pv */ 2974 /* Then remove the primary pv */
2979#ifdef DEBUG 2975#ifdef DEBUG
2980 if (pv->pv_next && !pv->pv_pmap) { 2976 if (pv->pv_next && !pv->pv_pmap) {
2981 printf("pmap_page_protect: no pmap for pv %p\n", pv); 2977 printf("pmap_page_protect: no pmap for pv %p\n", pv);
2982 Debugger(); 2978 Debugger();
2983 } 2979 }
2984#endif 2980#endif
2985 if (pv->pv_pmap != NULL) { 2981 if (pv->pv_pmap != NULL) {
2986 pmap = pv->pv_pmap; 2982 pmap = pv->pv_pmap;
2987 va = pv->pv_va & PV_VAMASK; 2983 va = pv->pv_va & PV_VAMASK;
2988 2984
2989 DPRINTF(PDB_CHANGEPROT|PDB_REF|PDB_REMOVE, 2985 DPRINTF(PDB_CHANGEPROT|PDB_REF|PDB_REMOVE,
2990 ("pmap_page_protect: " 2986 ("pmap_page_protect: "
2991 "demap va %p of pg %p from pm %p...\n", 2987 "demap va %p of pg %p from pm %p...\n",
2992 (void *)(u_long)va, pg, pmap)); 2988 (void *)(u_long)va, pg, pmap));
2993 2989
2994 data = pseg_get(pmap, va); 2990 data = pseg_get(pmap, va);
2995 KASSERT(data & TLB_V); 2991 KASSERT(data & TLB_V);
2996 /* Save ref/mod info */ 2992 /* Save ref/mod info */
2997 if (data & TLB_ACCESS) 2993 if (data & TLB_ACCESS)
2998 pv->pv_va |= PV_REF; 2994 pv->pv_va |= PV_REF;
2999 if (data & TLB_MODIFY) 2995 if (data & TLB_MODIFY)
3000 pv->pv_va |= PV_MOD; 2996 pv->pv_va |= PV_MOD;
3001 rv = pseg_set(pmap, va, 0, 0); 2997 rv = pseg_set(pmap, va, 0, 0);
3002 if (rv & 1) 2998 if (rv & 1)
3003 panic("pmap_page_protect: pseg_set needs" 2999 panic("pmap_page_protect: pseg_set needs"
3004 " spare! rv=%d\n", rv); 3000 " spare! rv=%d\n", rv);
3005 if (pmap_is_on_mmu(pmap)) { 3001 if (pmap_is_on_mmu(pmap)) {
3006 KASSERT(pmap_ctx(pmap)>=0); 3002 KASSERT(pmap_ctx(pmap)>=0);
3007 tsb_invalidate(va, pmap); 3003 tsb_invalidate(va, pmap);
3008 tlb_flush_pte(va, pmap); 3004 tlb_flush_pte(va, pmap);
3009 } 3005 }
3010 if (pmap->pm_refs > 0) { 3006 if (pmap->pm_refs > 0) {
3011 needflush = TRUE; 3007 needflush = TRUE;
3012 } 3008 }
3013 npv = pv->pv_next; 3009 npv = pv->pv_next;
3014 /* dump the first pv */ 3010 /* dump the first pv */
3015 if (npv) { 3011 if (npv) {
3016 /* First save mod/ref bits */ 3012 /* First save mod/ref bits */
3017 pv->pv_pmap = npv->pv_pmap; 3013 pv->pv_pmap = npv->pv_pmap;
3018 pv->pv_va = (pv->pv_va & PV_MASK) | npv->pv_va; 3014 pv->pv_va = (pv->pv_va & PV_MASK) | npv->pv_va;
3019 pv->pv_next = npv->pv_next; 3015 pv->pv_next = npv->pv_next;
3020 npv->pv_next = freepv; 3016 npv->pv_next = freepv;
3021 freepv = npv; 3017 freepv = npv;
3022 } else { 3018 } else {
3023 pv->pv_pmap = NULL; 3019 pv->pv_pmap = NULL;
3024 pv->pv_next = NULL; 3020 pv->pv_next = NULL;
3025 } 3021 }
3026 } 3022 }
3027 if (needflush) 3023 if (needflush)
3028 dcache_flush_page_all(VM_PAGE_TO_PHYS(pg)); 3024 dcache_flush_page_all(VM_PAGE_TO_PHYS(pg));
3029 } 3025 }
3030 /* We should really only flush the pages we demapped. */ 3026 /* We should really only flush the pages we demapped. */
3031 pv_check(); 3027 pv_check();
3032 mutex_exit(&pmap_lock); 3028 mutex_exit(&pmap_lock);
3033 3029
3034 /* Catch up on deferred frees. */ 3030 /* Catch up on deferred frees. */
3035 for (; freepv != NULL; freepv = npv) { 3031 for (; freepv != NULL; freepv = npv) {
3036 npv = freepv->pv_next; 3032 npv = freepv->pv_next;
3037 pool_cache_put(&pmap_pv_cache, freepv); 3033 pool_cache_put(&pmap_pv_cache, freepv);
3038 } 3034 }
3039} 3035}
3040 3036
3041#ifdef PMAP_COUNT_DEBUG 3037#ifdef PMAP_COUNT_DEBUG
3042/* 3038/*
3043 * count pages in pmap -- this can be slow. 3039 * count pages in pmap -- this can be slow.
3044 */ 3040 */
3045int 3041int
3046pmap_count_res(struct pmap *pm) 3042pmap_count_res(struct pmap *pm)
3047{ 3043{
3048 int64_t data; 3044 int64_t data;
3049 paddr_t *pdir, *ptbl; 3045 paddr_t *pdir, *ptbl;
3050 int i, j, k, n; 3046 int i, j, k, n;
3051 3047
3052 /* Don't want one of these pages reused while we're reading it. */ 3048 /* Don't want one of these pages reused while we're reading it. */
3053 mutex_enter(&pmap_lock); 3049 mutex_enter(&pmap_lock);
3054 n = 0; 3050 n = 0;
3055 for (i = 0; i < STSZ; i++) { 3051 for (i = 0; i < STSZ; i++) {
3056 pdir = (paddr_t *)(u_long)ldxa((vaddr_t)&pm->pm_segs[i], 3052 pdir = (paddr_t *)(u_long)ldxa((vaddr_t)&pm->pm_segs[i],
3057 ASI_PHYS_CACHED); 3053 ASI_PHYS_CACHED);
3058 if (pdir == NULL) { 3054 if (pdir == NULL) {
3059 continue; 3055 continue;
3060 } 3056 }
3061 for (k = 0; k < PDSZ; k++) { 3057 for (k = 0; k < PDSZ; k++) {
3062 ptbl = (paddr_t *)(u_long)ldxa((vaddr_t)&pdir[k], 3058 ptbl = (paddr_t *)(u_long)ldxa((vaddr_t)&pdir[k],
3063 ASI_PHYS_CACHED); 3059 ASI_PHYS_CACHED);
3064 if (ptbl == NULL) { 3060 if (ptbl == NULL) {
3065 continue; 3061 continue;
3066 } 3062 }
3067 for (j = 0; j < PTSZ; j++) { 3063 for (j = 0; j < PTSZ; j++) {
3068 data = (int64_t)ldxa((vaddr_t)&ptbl[j], 3064 data = (int64_t)ldxa((vaddr_t)&ptbl[j],
3069 ASI_PHYS_CACHED); 3065 ASI_PHYS_CACHED);
3070 if (data & TLB_V) 3066 if (data & TLB_V)
3071 n++; 3067 n++;
3072 } 3068 }
3073 } 3069 }
3074 } 3070 }
3075 mutex_exit(&pmap_lock); 3071 mutex_exit(&pmap_lock);
3076 3072
3077 if (pm->pm_stats.resident_count != n) 3073 if (pm->pm_stats.resident_count != n)
3078 printf("pmap_count_resident: pm_stats = %ld, counted: %d\n", 3074 printf("pmap_count_resident: pm_stats = %ld, counted: %d\n",
3079 pm->pm_stats.resident_count, n); 3075 pm->pm_stats.resident_count, n);
3080 3076
3081 return n; 3077 return n;
3082} 3078}
3083 3079
3084/* 3080/*
3085 * count wired pages in pmap -- this can be slow. 3081 * count wired pages in pmap -- this can be slow.
3086 */ 3082 */
3087int 3083int
3088pmap_count_wired(struct pmap *pm) 3084pmap_count_wired(struct pmap *pm)
3089{ 3085{
3090 int64_t data; 3086 int64_t data;
3091 paddr_t *pdir, *ptbl; 3087 paddr_t *pdir, *ptbl;
3092 int i, j, k, n; 3088 int i, j, k, n;
3093 3089
3094 /* Don't want one of these pages reused while we're reading it. */ 3090 /* Don't want one of these pages reused while we're reading it. */
3095 mutex_enter(&pmap_lock); /* XXX uvmplock */ 3091 mutex_enter(&pmap_lock); /* XXX uvmplock */
3096 n = 0; 3092 n = 0;
3097 for (i = 0; i < STSZ; i++) { 3093 for (i = 0; i < STSZ; i++) {
3098 pdir = (paddr_t *)(u_long)ldxa((vaddr_t)&pm->pm_segs[i], 3094 pdir = (paddr_t *)(u_long)ldxa((vaddr_t)&pm->pm_segs[i],
3099 ASI_PHYS_CACHED); 3095 ASI_PHYS_CACHED);
3100 if (pdir == NULL) { 3096 if (pdir == NULL) {
3101 continue; 3097 continue;
3102 } 3098 }
3103 for (k = 0; k < PDSZ; k++) { 3099 for (k = 0; k < PDSZ; k++) {
3104 ptbl = (paddr_t *)(u_long)ldxa((vaddr_t)&pdir[k], 3100 ptbl = (paddr_t *)(u_long)ldxa((vaddr_t)&pdir[k],
3105 ASI_PHYS_CACHED); 3101 ASI_PHYS_CACHED);
3106 if (ptbl == NULL) { 3102 if (ptbl == NULL) {
3107 continue; 3103 continue;
3108 } 3104 }
3109 for (j = 0; j < PTSZ; j++) { 3105 for (j = 0; j < PTSZ; j++) {
3110 data = (int64_t)ldxa((vaddr_t)&ptbl[j], 3106 data = (int64_t)ldxa((vaddr_t)&ptbl[j],
3111 ASI_PHYS_CACHED); 3107 ASI_PHYS_CACHED);
3112 if (data & TLB_TSB_LOCK) 3108 if (data & TLB_TSB_LOCK)
3113 n++; 3109 n++;
3114 } 3110 }
3115 } 3111 }
3116 } 3112 }
3117 mutex_exit(&pmap_lock); /* XXX uvmplock */ 3113 mutex_exit(&pmap_lock); /* XXX uvmplock */
3118 3114
3119 if (pm->pm_stats.wired_count != n) 3115 if (pm->pm_stats.wired_count != n)
3120 printf("pmap_count_wired: pm_stats = %ld, counted: %d\n", 3116 printf("pmap_count_wired: pm_stats = %ld, counted: %d\n",
3121 pm->pm_stats.wired_count, n); 3117 pm->pm_stats.wired_count, n);
3122 3118
3123 return n; 3119 return n;
3124} 3120}
3125#endif /* PMAP_COUNT_DEBUG */ 3121#endif /* PMAP_COUNT_DEBUG */
3126 3122
3127void 3123void
3128pmap_procwr(struct proc *p, vaddr_t va, size_t len) 3124pmap_procwr(struct proc *p, vaddr_t va, size_t len)
3129{ 3125{
3130 3126
3131 blast_icache(); 3127 blast_icache();
3132} 3128}
3133 3129
3134/* 3130/*
3135 * Allocate a hardware context to the given pmap. 3131 * Allocate a hardware context to the given pmap.
3136 */ 3132 */
3137static int 3133static int
3138ctx_alloc(struct pmap *pm) 3134ctx_alloc(struct pmap *pm)
3139{ 3135{
3140 int i, ctx; 3136 int i, ctx;
3141 3137
3142 KASSERT(pm != pmap_kernel()); 3138 KASSERT(pm != pmap_kernel());
3143 KASSERT(pm == curproc->p_vmspace->vm_map.pmap); 3139 KASSERT(pm == curproc->p_vmspace->vm_map.pmap);
3144 mutex_enter(&curcpu()->ci_ctx_lock); 3140 mutex_enter(&curcpu()->ci_ctx_lock);
3145 ctx = curcpu()->ci_pmap_next_ctx++; 3141 ctx = curcpu()->ci_pmap_next_ctx++;
3146 3142
3147 /* 3143 /*
3148 * if we have run out of contexts, remove all user entries from 3144 * if we have run out of contexts, remove all user entries from
3149 * the TSB, TLB and dcache and start over with context 1 again. 3145 * the TSB, TLB and dcache and start over with context 1 again.
3150 */ 3146 */
3151 3147
3152 if (ctx == curcpu()->ci_numctx) { 3148 if (ctx == curcpu()->ci_numctx) {
3153 DPRINTF(PDB_CTX_ALLOC|PDB_CTX_FLUSHALL, 3149 DPRINTF(PDB_CTX_ALLOC|PDB_CTX_FLUSHALL,
3154 ("ctx_alloc: cpu%d run out of contexts %d\n", 3150 ("ctx_alloc: cpu%d run out of contexts %d\n",
3155 cpu_number(), curcpu()->ci_numctx)); 3151 cpu_number(), curcpu()->ci_numctx));
3156 write_user_windows(); 3152 write_user_windows();
3157 while (!LIST_EMPTY(&curcpu()->ci_pmap_ctxlist)) { 3153 while (!LIST_EMPTY(&curcpu()->ci_pmap_ctxlist)) {
3158#ifdef MULTIPROCESSOR 3154#ifdef MULTIPROCESSOR
3159 KASSERT(pmap_ctx(LIST_FIRST(&curcpu()->ci_pmap_ctxlist)) != 0); 3155 KASSERT(pmap_ctx(LIST_FIRST(&curcpu()->ci_pmap_ctxlist)) != 0);
3160#endif 3156#endif
3161 ctx_free(LIST_FIRST(&curcpu()->ci_pmap_ctxlist), 3157 ctx_free(LIST_FIRST(&curcpu()->ci_pmap_ctxlist),
3162 curcpu()); 3158 curcpu());
3163 } 3159 }
3164 for (i = TSBENTS - 1; i >= 0; i--) { 3160 for (i = TSBENTS - 1; i >= 0; i--) {
3165 if (TSB_TAG_CTX(curcpu()->ci_tsb_dmmu[i].tag) != 0) { 3161 if (TSB_TAG_CTX(curcpu()->ci_tsb_dmmu[i].tag) != 0) {
3166 clrx(&curcpu()->ci_tsb_dmmu[i].data); 3162 clrx(&curcpu()->ci_tsb_dmmu[i].data);
3167 } 3163 }
3168 if (TSB_TAG_CTX(curcpu()->ci_tsb_immu[i].tag) != 0) { 3164 if (TSB_TAG_CTX(curcpu()->ci_tsb_immu[i].tag) != 0) {
3169 clrx(&curcpu()->ci_tsb_immu[i].data); 3165 clrx(&curcpu()->ci_tsb_immu[i].data);
3170 } 3166 }
3171 } 3167 }
3172 sp_tlb_flush_all(); 3168 sp_tlb_flush_all();
3173 ctx = 1; 3169 ctx = 1;
3174 curcpu()->ci_pmap_next_ctx = 2; 3170 curcpu()->ci_pmap_next_ctx = 2;
3175 } 3171 }
3176 curcpu()->ci_ctxbusy[ctx] = pm->pm_physaddr; 3172 curcpu()->ci_ctxbusy[ctx] = pm->pm_physaddr;
3177 LIST_INSERT_HEAD(&curcpu()->ci_pmap_ctxlist, pm, pm_list[cpu_number()]); 3173 LIST_INSERT_HEAD(&curcpu()->ci_pmap_ctxlist, pm, pm_list[cpu_number()]);
3178 pmap_ctx(pm) = ctx; 3174 pmap_ctx(pm) = ctx;
3179 mutex_exit(&curcpu()->ci_ctx_lock); 3175 mutex_exit(&curcpu()->ci_ctx_lock);
3180 DPRINTF(PDB_CTX_ALLOC, ("ctx_alloc: cpu%d allocated ctx %d\n", 3176 DPRINTF(PDB_CTX_ALLOC, ("ctx_alloc: cpu%d allocated ctx %d\n",
3181 cpu_number(), ctx)); 3177 cpu_number(), ctx));
3182 return ctx; 3178 return ctx;
3183} 3179}
3184 3180
3185/* 3181/*
3186 * Give away a context. 3182 * Give away a context.
3187 */ 3183 */
3188static void 3184static void
3189ctx_free(struct pmap *pm, struct cpu_info *ci) 3185ctx_free(struct pmap *pm, struct cpu_info *ci)
3190{ 3186{
3191 int oldctx; 3187 int oldctx;
3192 int cpunum; 3188 int cpunum;
3193 3189
3194 KASSERT(mutex_owned(&ci->ci_ctx_lock)); 3190 KASSERT(mutex_owned(&ci->ci_ctx_lock));
3195 3191
3196#ifdef MULTIPROCESSOR 3192#ifdef MULTIPROCESSOR
3197 cpunum = ci->ci_index; 3193 cpunum = ci->ci_index;
3198#else 3194#else
3199 /* Give the compiler a hint.. */ 3195 /* Give the compiler a hint.. */
3200 cpunum = 0; 3196 cpunum = 0;
3201#endif 3197#endif
3202 3198
3203 oldctx = pm->pm_ctx[cpunum]; 3199 oldctx = pm->pm_ctx[cpunum];
3204 if (oldctx == 0) 3200 if (oldctx == 0)
3205 return; 3201 return;
3206 3202
3207#ifdef DIAGNOSTIC 3203#ifdef DIAGNOSTIC
3208 if (pm == pmap_kernel()) 3204 if (pm == pmap_kernel())
3209 panic("ctx_free: freeing kernel context"); 3205 panic("ctx_free: freeing kernel context");
3210 if (ci->ci_ctxbusy[oldctx] == 0) 3206 if (ci->ci_ctxbusy[oldctx] == 0)
3211 printf("ctx_free: freeing free context %d\n", oldctx); 3207 printf("ctx_free: freeing free context %d\n", oldctx);
3212 if (ci->ci_ctxbusy[oldctx] != pm->pm_physaddr) { 3208 if (ci->ci_ctxbusy[oldctx] != pm->pm_physaddr) {
3213 printf("ctx_free: freeing someone else's context\n " 3209 printf("ctx_free: freeing someone else's context\n "
3214 "ctxbusy[%d] = %p, pm(%p)->pm_ctx = %p\n", 3210 "ctxbusy[%d] = %p, pm(%p)->pm_ctx = %p\n",
3215 oldctx, (void *)(u_long)ci->ci_ctxbusy[oldctx], pm, 3211 oldctx, (void *)(u_long)ci->ci_ctxbusy[oldctx], pm,
3216 (void *)(u_long)pm->pm_physaddr); 3212 (void *)(u_long)pm->pm_physaddr);
3217 Debugger(); 3213 Debugger();
3218 } 3214 }
3219#endif 3215#endif
3220 /* We should verify it has not been stolen and reallocated... */ 3216 /* We should verify it has not been stolen and reallocated... */
3221 DPRINTF(PDB_CTX_ALLOC, ("ctx_free: cpu%d freeing ctx %d\n", 3217 DPRINTF(PDB_CTX_ALLOC, ("ctx_free: cpu%d freeing ctx %d\n",
3222 cpu_number(), oldctx)); 3218 cpu_number(), oldctx));
3223 ci->ci_ctxbusy[oldctx] = 0UL; 3219 ci->ci_ctxbusy[oldctx] = 0UL;
3224 pm->pm_ctx[cpunum] = 0; 3220 pm->pm_ctx[cpunum] = 0;
3225 LIST_REMOVE(pm, pm_list[cpunum]); 3221 LIST_REMOVE(pm, pm_list[cpunum]);
3226} 3222}
3227 3223
3228/* 3224/*
3229 * Enter the pmap and virtual address into the 3225 * Enter the pmap and virtual address into the
3230 * physical to virtual map table. 3226 * physical to virtual map table.
3231 * 3227 *
3232 * We enter here with the pmap locked. 3228 * We enter here with the pmap locked.
3233 */ 3229 */
3234 3230
3235void 3231void
3236pmap_enter_pv(struct pmap *pmap, vaddr_t va, paddr_t pa, struct vm_page *pg, 3232pmap_enter_pv(struct pmap *pmap, vaddr_t va, paddr_t pa, struct vm_page *pg,
3237 pv_entry_t npv) 3233 pv_entry_t npv)
3238{ 3234{
3239 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 3235 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
3240 pv_entry_t pvh; 3236 pv_entry_t pvh;
3241 3237
3242 KASSERT(mutex_owned(&pmap_lock)); 3238 KASSERT(mutex_owned(&pmap_lock));
3243 3239
3244 pvh = &md->mdpg_pvh; 3240 pvh = &md->mdpg_pvh;
3245 DPRINTF(PDB_ENTER, ("pmap_enter: pvh %p: was %lx/%p/%p\n", 3241 DPRINTF(PDB_ENTER, ("pmap_enter: pvh %p: was %lx/%p/%p\n",
3246 pvh, pvh->pv_va, pvh->pv_pmap, pvh->pv_next)); 3242 pvh, pvh->pv_va, pvh->pv_pmap, pvh->pv_next));
3247 if (pvh->pv_pmap == NULL) { 3243 if (pvh->pv_pmap == NULL) {
3248 3244
3249 /* 3245 /*
3250 * No entries yet, use header as the first entry 3246 * No entries yet, use header as the first entry
3251 */ 3247 */
3252 DPRINTF(PDB_ENTER, ("pmap_enter: first pv: pmap %p va %lx\n", 3248 DPRINTF(PDB_ENTER, ("pmap_enter: first pv: pmap %p va %lx\n",
3253 pmap, va)); 3249 pmap, va));
3254 ENTER_STAT(firstpv); 3250 ENTER_STAT(firstpv);
3255 PV_SETVA(pvh, va); 3251 PV_SETVA(pvh, va);
3256 pvh->pv_pmap = pmap; 3252 pvh->pv_pmap = pmap;
3257 pvh->pv_next = NULL; 3253 pvh->pv_next = NULL;
3258 KASSERT(npv == NULL); 3254 KASSERT(npv == NULL);
3259 } else { 3255 } else {
3260 if (pg->loan_count == 0 && !(pvh->pv_va & PV_ALIAS)) { 3256 if (pg->loan_count == 0 && !(pvh->pv_va & PV_ALIAS)) {
3261 3257
3262 /* 3258 /*
3263 * There is at least one other VA mapping this page. 3259 * There is at least one other VA mapping this page.
3264 * Check if they are cache index compatible. If not 3260 * Check if they are cache index compatible. If not
3265 * remove all mappings, flush the cache and set page 3261 * remove all mappings, flush the cache and set page
3266 * to be mapped uncached. Caching will be restored 3262 * to be mapped uncached. Caching will be restored
3267 * when pages are mapped compatible again. 3263 * when pages are mapped compatible again.
3268 */ 3264 */
3269 if ((pvh->pv_va ^ va) & VA_ALIAS_MASK) { 3265 if ((pvh->pv_va ^ va) & VA_ALIAS_MASK) {
3270 pvh->pv_va |= PV_ALIAS; 3266 pvh->pv_va |= PV_ALIAS;
3271 pmap_page_cache(pmap, pa, 0); 3267 pmap_page_cache(pmap, pa, 0);
3272 ENTER_STAT(ci); 3268 ENTER_STAT(ci);
3273 } 3269 }
3274 } 3270 }
3275 3271
3276 /* 3272 /*
3277 * There is at least one other VA mapping this page. 3273 * There is at least one other VA mapping this page.
3278 * Place this entry after the header. 3274 * Place this entry after the header.
3279 */ 3275 */
3280 3276
3281 DPRINTF(PDB_ENTER, ("pmap_enter: new pv: pmap %p va %lx\n", 3277 DPRINTF(PDB_ENTER, ("pmap_enter: new pv: pmap %p va %lx\n",
3282 pmap, va)); 3278 pmap, va));
3283 npv->pv_pmap = pmap; 3279 npv->pv_pmap = pmap;
3284 npv->pv_va = va & PV_VAMASK; 3280 npv->pv_va = va & PV_VAMASK;
3285 npv->pv_next = pvh->pv_next; 3281 npv->pv_next = pvh->pv_next;
3286 pvh->pv_next = npv; 3282 pvh->pv_next = npv;
3287 3283
3288 if (!npv->pv_next) { 3284 if (!npv->pv_next) {
3289 ENTER_STAT(secondpv); 3285 ENTER_STAT(secondpv);
3290 } 3286 }
3291 } 3287 }
3292} 3288}
3293 3289
3294/* 3290/*
3295 * Remove a physical to virtual address translation. 3291 * Remove a physical to virtual address translation.
3296 */ 3292 */
3297 3293
3298pv_entry_t 3294pv_entry_t
3299pmap_remove_pv(struct pmap *pmap, vaddr_t va, struct vm_page *pg) 3295pmap_remove_pv(struct pmap *pmap, vaddr_t va, struct vm_page *pg)
3300{ 3296{
3301 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 3297 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
3302 pv_entry_t pvh, npv, pv; 3298 pv_entry_t pvh, npv, pv;
3303 int64_t data = 0; 3299 int64_t data = 0;
3304 3300
3305 KASSERT(mutex_owned(&pmap_lock)); 3301 KASSERT(mutex_owned(&pmap_lock));
3306 3302
3307 pvh = &md->mdpg_pvh; 3303 pvh = &md->mdpg_pvh;
3308 3304
3309 DPRINTF(PDB_REMOVE, ("pmap_remove_pv(pm=%p, va=%p, pg=%p)\n", pmap, 3305 DPRINTF(PDB_REMOVE, ("pmap_remove_pv(pm=%p, va=%p, pg=%p)\n", pmap,
3310 (void *)(u_long)va, pg)); 3306 (void *)(u_long)va, pg));
3311 pv_check(); 3307 pv_check();
3312 3308
3313 /* 3309 /*
3314 * Remove page from the PV table. 3310 * Remove page from the PV table.
3315 * If it is the first entry on the list, it is actually 3311 * If it is the first entry on the list, it is actually
3316 * in the header and we must copy the following entry up 3312 * in the header and we must copy the following entry up
3317 * to the header. Otherwise we must search the list for 3313 * to the header. Otherwise we must search the list for
3318 * the entry. In either case we free the now unused entry. 3314 * the entry. In either case we free the now unused entry.
3319 */ 3315 */
3320 if (pmap == pvh->pv_pmap && PV_MATCH(pvh, va)) { 3316 if (pmap == pvh->pv_pmap && PV_MATCH(pvh, va)) {
3321 data = pseg_get(pvh->pv_pmap, pvh->pv_va & PV_VAMASK); 3317 data = pseg_get(pvh->pv_pmap, pvh->pv_va & PV_VAMASK);
3322 KASSERT(data & TLB_V); 3318 KASSERT(data & TLB_V);
3323 npv = pvh->pv_next; 3319 npv = pvh->pv_next;
3324 if (npv) { 3320 if (npv) {
3325 /* First save mod/ref bits */ 3321 /* First save mod/ref bits */
3326 pvh->pv_va = (pvh->pv_va & PV_MASK) | npv->pv_va; 3322 pvh->pv_va = (pvh->pv_va & PV_MASK) | npv->pv_va;
3327 pvh->pv_next = npv->pv_next; 3323 pvh->pv_next = npv->pv_next;
3328 pvh->pv_pmap = npv->pv_pmap; 3324 pvh->pv_pmap = npv->pv_pmap;
3329 } else { 3325 } else {
3330 pvh->pv_pmap = NULL; 3326 pvh->pv_pmap = NULL;
3331 pvh->pv_next = NULL; 3327 pvh->pv_next = NULL;
3332 pvh->pv_va &= (PV_REF|PV_MOD); 3328 pvh->pv_va &= (PV_REF|PV_MOD);
3333 } 3329 }
3334 REMOVE_STAT(pvfirst); 3330 REMOVE_STAT(pvfirst);
3335 } else { 3331 } else {
3336 for (pv = pvh, npv = pvh->pv_next; npv; 3332 for (pv = pvh, npv = pvh->pv_next; npv;
3337 pv = npv, npv = npv->pv_next) { 3333 pv = npv, npv = npv->pv_next) {
3338 REMOVE_STAT(pvsearch); 3334 REMOVE_STAT(pvsearch);
3339 if (pmap == npv->pv_pmap && PV_MATCH(npv, va)) 3335 if (pmap == npv->pv_pmap && PV_MATCH(npv, va))
3340 break; 3336 break;
3341 } 3337 }
3342 pv->pv_next = npv->pv_next; 3338 pv->pv_next = npv->pv_next;
3343 data = pseg_get(npv->pv_pmap, npv->pv_va & PV_VAMASK); 3339 data = pseg_get(npv->pv_pmap, npv->pv_va & PV_VAMASK);
3344 KASSERT(data & TLB_V); 3340 KASSERT(data & TLB_V);
3345 } 3341 }
3346 3342
3347 /* Save ref/mod info */ 3343 /* Save ref/mod info */
3348 if (data & TLB_ACCESS) 3344 if (data & TLB_ACCESS)
3349 pvh->pv_va |= PV_REF; 3345 pvh->pv_va |= PV_REF;
3350 if (data & TLB_MODIFY) 3346 if (data & TLB_MODIFY)
3351 pvh->pv_va |= PV_MOD; 3347 pvh->pv_va |= PV_MOD;
3352 3348
3353 /* Check to see if the alias went away */ 3349 /* Check to see if the alias went away */
3354 if (pvh->pv_va & PV_ALIAS) { 3350 if (pvh->pv_va & PV_ALIAS) {
3355 pvh->pv_va &= ~PV_ALIAS; 3351 pvh->pv_va &= ~PV_ALIAS;
3356 for (pv = pvh; pv; pv = pv->pv_next) { 3352 for (pv = pvh; pv; pv = pv->pv_next) {
3357 if ((pv->pv_va ^ pvh->pv_va) & VA_ALIAS_MASK) { 3353 if ((pv->pv_va ^ pvh->pv_va) & VA_ALIAS_MASK) {
3358 pvh->pv_va |= PV_ALIAS; 3354 pvh->pv_va |= PV_ALIAS;
3359 break; 3355 break;
3360 } 3356 }
3361 } 3357 }
3362 if (!(pvh->pv_va & PV_ALIAS)) 3358 if (!(pvh->pv_va & PV_ALIAS))
3363 pmap_page_cache(pmap, VM_PAGE_TO_PHYS(pg), 1); 3359 pmap_page_cache(pmap, VM_PAGE_TO_PHYS(pg), 1);
3364 } 3360 }
3365 pv_check(); 3361 pv_check();
3366 return npv; 3362 return npv;
3367} 3363}
3368 3364
3369/* 3365/*
3370 * pmap_page_cache: 3366 * pmap_page_cache:
3371 * 3367 *
3372 * Change all mappings of a page to cached/uncached. 3368 * Change all mappings of a page to cached/uncached.
3373 */ 3369 */
3374void 3370void
3375pmap_page_cache(struct pmap *pm, paddr_t pa, int mode) 3371pmap_page_cache(struct pmap *pm, paddr_t pa, int mode)
3376{ 3372{
3377 struct vm_page *pg; 3373 struct vm_page *pg;
3378 struct vm_page_md *md; 3374 struct vm_page_md *md;
3379 pv_entry_t pv; 3375 pv_entry_t pv;
3380 vaddr_t va; 3376 vaddr_t va;
3381 int rv; 3377 int rv;
3382 3378
3383#if 0 3379#if 0
3384 /* 3380 /*
3385 * Why is this? 3381 * Why is this?
3386 */ 3382 */
3387 if (CPU_ISSUN4US || CPU_ISSUN4V) 3383 if (CPU_ISSUN4US || CPU_ISSUN4V)
3388 return; 3384 return;
3389#endif 3385#endif
3390 3386
3391 KASSERT(mutex_owned(&pmap_lock)); 3387 KASSERT(mutex_owned(&pmap_lock));
3392 3388
3393 DPRINTF(PDB_ENTER, ("pmap_page_uncache(%llx)\n", 3389 DPRINTF(PDB_ENTER, ("pmap_page_uncache(%llx)\n",
3394 (unsigned long long)pa)); 3390 (unsigned long long)pa));
3395 pg = PHYS_TO_VM_PAGE(pa); 3391 pg = PHYS_TO_VM_PAGE(pa);
3396 md = VM_PAGE_TO_MD(pg); 3392 md = VM_PAGE_TO_MD(pg);
3397 pv = &md->mdpg_pvh; 3393 pv = &md->mdpg_pvh;
3398 while (pv) { 3394 while (pv) {
3399 va = pv->pv_va & PV_VAMASK; 3395 va = pv->pv_va & PV_VAMASK;
3400 if (pv->pv_va & PV_NC) { 3396 if (pv->pv_va & PV_NC) {
3401 int64_t data; 3397 int64_t data;
3402 3398
3403 /* Non-cached -- I/O mapping */ 3399 /* Non-cached -- I/O mapping */
3404 data = pseg_get(pv->pv_pmap, va); 3400 data = pseg_get(pv->pv_pmap, va);
3405 KASSERT(data & TLB_V); 3401 KASSERT(data & TLB_V);
3406 rv = pseg_set(pv->pv_pmap, va, 3402 rv = pseg_set(pv->pv_pmap, va,
3407 data & ~(TLB_CV|TLB_CP), 0); 3403 data & ~(TLB_CV|TLB_CP), 0);
3408 if (rv & 1) 3404 if (rv & 1)
3409 panic("pmap_page_cache: pseg_set needs" 3405 panic("pmap_page_cache: pseg_set needs"
3410 " spare! rv=%d\n", rv); 3406 " spare! rv=%d\n", rv);
3411 } else if (mode && (!(pv->pv_va & PV_NVC))) { 3407 } else if (mode && (!(pv->pv_va & PV_NVC))) {
3412 int64_t data; 3408 int64_t data;
3413 3409
3414 /* Enable caching */ 3410 /* Enable caching */
3415 data = pseg_get(pv->pv_pmap, va); 3411 data = pseg_get(pv->pv_pmap, va);
3416 KASSERT(data & TLB_V); 3412 KASSERT(data & TLB_V);
3417 rv = pseg_set(pv->pv_pmap, va, data | TLB_CV, 0); 3413 rv = pseg_set(pv->pv_pmap, va, data | TLB_CV, 0);
3418 if (rv & 1) 3414 if (rv & 1)
3419 panic("pmap_page_cache: pseg_set needs" 3415 panic("pmap_page_cache: pseg_set needs"
3420 " spare! rv=%d\n", rv); 3416 " spare! rv=%d\n", rv);
3421 } else { 3417 } else {
3422 int64_t data; 3418 int64_t data;
3423 3419
3424 /* Disable caching */ 3420 /* Disable caching */
3425 data = pseg_get(pv->pv_pmap, va); 3421 data = pseg_get(pv->pv_pmap, va);
3426 KASSERT(data & TLB_V); 3422 KASSERT(data & TLB_V);
3427 rv = pseg_set(pv->pv_pmap, va, data & ~TLB_CV, 0); 3423 rv = pseg_set(pv->pv_pmap, va, data & ~TLB_CV, 0);
3428 if (rv & 1) 3424 if (rv & 1)
3429 panic("pmap_page_cache: pseg_set needs" 3425 panic("pmap_page_cache: pseg_set needs"
3430 " spare! rv=%d\n", rv); 3426 " spare! rv=%d\n", rv);
3431 } 3427 }
3432 if (pmap_is_on_mmu(pv->pv_pmap)) { 3428 if (pmap_is_on_mmu(pv->pv_pmap)) {
3433 /* Force reload -- cache bits have changed */ 3429 /* Force reload -- cache bits have changed */
3434 KASSERT(pmap_ctx(pv->pv_pmap)>=0); 3430 KASSERT(pmap_ctx(pv->pv_pmap)>=0);
3435 tsb_invalidate(va, pv->pv_pmap); 3431 tsb_invalidate(va, pv->pv_pmap);
3436 tlb_flush_pte(va, pv->pv_pmap); 3432 tlb_flush_pte(va, pv->pv_pmap);
3437 } 3433 }
3438 pv = pv->pv_next; 3434 pv = pv->pv_next;
3439 } 3435 }
3440} 3436}
3441 3437
3442/* 3438/*
3443 * Some routines to allocate and free PTPs. 3439 * Some routines to allocate and free PTPs.
3444 */ 3440 */
3445static int 3441static int
3446pmap_get_page(paddr_t *p) 3442pmap_get_page(paddr_t *p)
3447{ 3443{
3448 struct vm_page *pg; 3444 struct vm_page *pg;
3449 paddr_t pa; 3445 paddr_t pa;
3450 3446
3451 if (uvm.page_init_done) { 3447 if (uvm.page_init_done) {
3452 pg = uvm_pagealloc(NULL, 0, NULL, 3448 pg = uvm_pagealloc(NULL, 0, NULL,
3453 UVM_PGA_ZERO | UVM_PGA_USERESERVE); 3449 UVM_PGA_ZERO | UVM_PGA_USERESERVE);
3454 if (pg == NULL) 3450 if (pg == NULL)
3455 return (0); 3451 return (0);
3456 pa = VM_PAGE_TO_PHYS(pg); 3452 pa = VM_PAGE_TO_PHYS(pg);
3457 } else { 3453 } else {
3458 if (!uvm_page_physget(&pa)) 3454 if (!uvm_page_physget(&pa))
3459 return (0); 3455 return (0);
3460 pmap_zero_page(pa); 3456 pmap_zero_page(pa);
3461 } 3457 }
3462 *p = pa; 3458 *p = pa;
3463 return (1); 3459 return (1);
3464} 3460}
3465 3461
3466static void 3462static void
3467pmap_free_page(paddr_t pa, sparc64_cpuset_t cs) 3463pmap_free_page(paddr_t pa, sparc64_cpuset_t cs)
3468{ 3464{
3469 struct vm_page *pg = PHYS_TO_VM_PAGE(pa); 3465 struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
3470 3466
3471 dcache_flush_page_cpuset(pa, cs); 3467 dcache_flush_page_cpuset(pa, cs);
3472 uvm_pagefree(pg); 3468 uvm_pagefree(pg);
3473} 3469}
3474 3470
3475static void 3471static void
3476pmap_free_page_noflush(paddr_t pa) 3472pmap_free_page_noflush(paddr_t pa)
3477{ 3473{
3478 struct vm_page *pg = PHYS_TO_VM_PAGE(pa); 3474 struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
3479 3475
3480 uvm_pagefree(pg); 3476 uvm_pagefree(pg);
3481} 3477}
3482 3478
3483#ifdef DDB 3479#ifdef DDB
3484 3480
3485void db_dump_pv(db_expr_t, int, db_expr_t, const char *); 3481void db_dump_pv(db_expr_t, int, db_expr_t, const char *);
3486void 3482void
3487db_dump_pv(db_expr_t addr, int have_addr, db_expr_t count, const char *modif) 3483db_dump_pv(db_expr_t addr, int have_addr, db_expr_t count, const char *modif)
3488{ 3484{
3489 struct vm_page *pg; 3485 struct vm_page *pg;
3490 struct vm_page_md *md; 3486 struct vm_page_md *md;
3491 struct pv_entry *pv; 3487 struct pv_entry *pv;
3492 3488
3493 if (!have_addr) { 3489 if (!have_addr) {
3494 db_printf("Need addr for pv\n"); 3490 db_printf("Need addr for pv\n");
3495 return; 3491 return;
3496 } 3492 }
3497 3493
3498 pg = PHYS_TO_VM_PAGE((paddr_t)addr); 3494 pg = PHYS_TO_VM_PAGE((paddr_t)addr);
3499 if (pg == NULL) { 3495 if (pg == NULL) {
3500 db_printf("page is not managed\n"); 3496 db_printf("page is not managed\n");
3501 return; 3497 return;
3502 } 3498 }
3503 md = VM_PAGE_TO_MD(pg); 3499 md = VM_PAGE_TO_MD(pg);
3504 for (pv = &md->mdpg_pvh; pv; pv = pv->pv_next) 3500 for (pv = &md->mdpg_pvh; pv; pv = pv->pv_next)
3505 db_printf("pv@%p: next=%p pmap=%p va=0x%llx\n", 3501 db_printf("pv@%p: next=%p pmap=%p va=0x%llx\n",
3506 pv, pv->pv_next, pv->pv_pmap, 3502 pv, pv->pv_next, pv->pv_pmap,
3507 (unsigned long long)pv->pv_va); 3503 (unsigned long long)pv->pv_va);
3508} 3504}
3509 3505
3510#endif 3506#endif
3511 3507
3512#ifdef DEBUG 3508#ifdef DEBUG
3513/* 3509/*
3514 * Test ref/modify handling. */ 3510 * Test ref/modify handling. */
3515void pmap_testout(void); 3511void pmap_testout(void);
3516void 3512void
3517pmap_testout(void) 3513pmap_testout(void)
3518{ 3514{
3519 vaddr_t va; 3515 vaddr_t va;
3520 volatile int *loc; 3516 volatile int *loc;
3521 int val = 0; 3517 int val = 0;
3522 paddr_t pa; 3518 paddr_t pa;
3523 struct vm_page *pg; 3519 struct vm_page *pg;
3524 int ref, mod; 3520 int ref, mod;
3525 3521
3526 /* Allocate a page */ 3522 /* Allocate a page */
3527 va = (vaddr_t)(vmmap - PAGE_SIZE); 3523 va = (vaddr_t)(vmmap - PAGE_SIZE);
3528 KASSERT(va != 0); 3524 KASSERT(va != 0);
3529 loc = (int*)va; 3525 loc = (int*)va;
3530 3526
3531 pmap_get_page(&pa); 3527 pmap_get_page(&pa);
3532 pg = PHYS_TO_VM_PAGE(pa); 3528 pg = PHYS_TO_VM_PAGE(pa);
3533 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, VM_PROT_ALL); 3529 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, VM_PROT_ALL);
3534 pmap_update(pmap_kernel()); 3530 pmap_update(pmap_kernel());
3535 3531
3536 /* Now clear reference and modify */ 3532 /* Now clear reference and modify */
3537 ref = pmap_clear_reference(pg); 3533 ref = pmap_clear_reference(pg);
3538 mod = pmap_clear_modify(pg); 3534 mod = pmap_clear_modify(pg);
3539 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 3535 printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
3540 (void *)(u_long)va, (long)pa, 3536 (void *)(u_long)va, (long)pa,
3541 ref, mod); 3537 ref, mod);
3542 3538
3543 /* Check it's properly cleared */ 3539 /* Check it's properly cleared */
3544 ref = pmap_is_referenced(pg); 3540 ref = pmap_is_referenced(pg);
3545 mod = pmap_is_modified(pg); 3541 mod = pmap_is_modified(pg);
3546 printf("Checking cleared page: ref %d, mod %d\n", 3542 printf("Checking cleared page: ref %d, mod %d\n",
3547 ref, mod); 3543 ref, mod);
3548 3544
3549 /* Reference page */ 3545 /* Reference page */
3550 val = *loc; 3546 val = *loc;
3551 3547
3552 ref = pmap_is_referenced(pg); 3548 ref = pmap_is_referenced(pg);
3553 mod = pmap_is_modified(pg); 3549 mod = pmap_is_modified(pg);
3554 printf("Referenced page: ref %d, mod %d val %x\n", 3550 printf("Referenced page: ref %d, mod %d val %x\n",
3555 ref, mod, val); 3551 ref, mod, val);
3556 3552
3557 /* Now clear reference and modify */ 3553 /* Now clear reference and modify */
3558 ref = pmap_clear_reference(pg); 3554 ref = pmap_clear_reference(pg);
3559 mod = pmap_clear_modify(pg); 3555 mod = pmap_clear_modify(pg);
3560 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 3556 printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
3561 (void *)(u_long)va, (long)pa, 3557 (void *)(u_long)va, (long)pa,
3562 ref, mod); 3558 ref, mod);
3563 3559
3564 /* Modify page */ 3560 /* Modify page */
3565 *loc = 1; 3561 *loc = 1;
3566 3562
3567 ref = pmap_is_referenced(pg); 3563 ref = pmap_is_referenced(pg);
3568 mod = pmap_is_modified(pg); 3564 mod = pmap_is_modified(pg);
3569 printf("Modified page: ref %d, mod %d\n", 3565 printf("Modified page: ref %d, mod %d\n",
3570 ref, mod); 3566 ref, mod);
3571 3567
3572 /* Now clear reference and modify */ 3568 /* Now clear reference and modify */
3573 ref = pmap_clear_reference(pg); 3569 ref = pmap_clear_reference(pg);
3574 mod = pmap_clear_modify(pg); 3570 mod = pmap_clear_modify(pg);
3575 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 3571 printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
3576 (void *)(u_long)va, (long)pa, 3572 (void *)(u_long)va, (long)pa,
3577 ref, mod); 3573 ref, mod);
3578 3574
3579 /* Check it's properly cleared */ 3575 /* Check it's properly cleared */
3580 ref = pmap_is_referenced(pg); 3576 ref = pmap_is_referenced(pg);
3581 mod = pmap_is_modified(pg); 3577 mod = pmap_is_modified(pg);
3582 printf("Checking cleared page: ref %d, mod %d\n", 3578 printf("Checking cleared page: ref %d, mod %d\n",
3583 ref, mod); 3579 ref, mod);
3584 3580
3585 /* Modify page */ 3581 /* Modify page */
3586 *loc = 1; 3582 *loc = 1;
3587 3583
3588 ref = pmap_is_referenced(pg); 3584 ref = pmap_is_referenced(pg);
3589 mod = pmap_is_modified(pg); 3585 mod = pmap_is_modified(pg);