| @@ -1,14 +1,14 @@ | | | @@ -1,14 +1,14 @@ |
1 | /* $NetBSD: pmap.c,v 1.179.16.12 2010/01/22 07:41:10 matt Exp $ */ | | 1 | /* $NetBSD: pmap.c,v 1.179.16.13 2010/01/26 21:19:25 matt Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, | | 8 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, |
9 | * NASA Ames Research Center and by Chris G. Demetriou. | | 9 | * NASA Ames Research Center and by Chris G. Demetriou. |
10 | * | | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | | 11 | * Redistribution and use in source and binary forms, with or without |
12 | * modification, are permitted provided that the following conditions | | 12 | * modification, are permitted provided that the following conditions |
13 | * are met: | | 13 | * are met: |
14 | * 1. Redistributions of source code must retain the above copyright | | 14 | * 1. Redistributions of source code must retain the above copyright |
| @@ -57,27 +57,27 @@ | | | @@ -57,27 +57,27 @@ |
57 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | | 57 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
58 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | | 58 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
59 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 59 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
60 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | | 60 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
61 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 61 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
62 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 62 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
63 | * SUCH DAMAGE. | | 63 | * SUCH DAMAGE. |
64 | * | | 64 | * |
65 | * @(#)pmap.c 8.4 (Berkeley) 1/26/94 | | 65 | * @(#)pmap.c 8.4 (Berkeley) 1/26/94 |
66 | */ | | 66 | */ |
67 | | | 67 | |
68 | #include <sys/cdefs.h> | | 68 | #include <sys/cdefs.h> |
69 | | | 69 | |
70 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.179.16.12 2010/01/22 07:41:10 matt Exp $"); | | 70 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.179.16.13 2010/01/26 21:19:25 matt Exp $"); |
71 | | | 71 | |
72 | /* | | 72 | /* |
73 | * Manages physical address maps. | | 73 | * Manages physical address maps. |
74 | * | | 74 | * |
75 | * In addition to hardware address maps, this | | 75 | * In addition to hardware address maps, this |
76 | * module is called upon to provide software-use-only | | 76 | * module is called upon to provide software-use-only |
77 | * maps which may or may not be stored in the same | | 77 | * maps which may or may not be stored in the same |
78 | * form as hardware maps. These pseudo-maps are | | 78 | * form as hardware maps. These pseudo-maps are |
79 | * used to store intermediate results from copy | | 79 | * used to store intermediate results from copy |
80 | * operations to and from address spaces. | | 80 | * operations to and from address spaces. |
81 | * | | 81 | * |
82 | * Since the information managed by this module is | | 82 | * Since the information managed by this module is |
83 | * also stored by the logical address mapping module, | | 83 | * also stored by the logical address mapping module, |
| @@ -141,212 +141,303 @@ CTASSERT((intptr_t)MIPS_PHYS_TO_KSEG0(0x | | | @@ -141,212 +141,303 @@ CTASSERT((intptr_t)MIPS_PHYS_TO_KSEG0(0x |
141 | CTASSERT(MIPS_KSEG1_START < 0); | | 141 | CTASSERT(MIPS_KSEG1_START < 0); |
142 | CTASSERT((intptr_t)MIPS_PHYS_TO_KSEG1(0x1000) < 0); | | 142 | CTASSERT((intptr_t)MIPS_PHYS_TO_KSEG1(0x1000) < 0); |
143 | CTASSERT(MIPS_KSEG2_START < 0); | | 143 | CTASSERT(MIPS_KSEG2_START < 0); |
144 | CTASSERT(MIPS_MAX_MEM_ADDR < 0); | | 144 | CTASSERT(MIPS_MAX_MEM_ADDR < 0); |
145 | CTASSERT(MIPS_RESERVED_ADDR < 0); | | 145 | CTASSERT(MIPS_RESERVED_ADDR < 0); |
146 | CTASSERT((uint32_t)MIPS_KSEG0_START == 0x80000000); | | 146 | CTASSERT((uint32_t)MIPS_KSEG0_START == 0x80000000); |
147 | CTASSERT((uint32_t)MIPS_KSEG1_START == 0xa0000000); | | 147 | CTASSERT((uint32_t)MIPS_KSEG1_START == 0xa0000000); |
148 | CTASSERT((uint32_t)MIPS_KSEG2_START == 0xc0000000); | | 148 | CTASSERT((uint32_t)MIPS_KSEG2_START == 0xc0000000); |
149 | CTASSERT((uint32_t)MIPS_MAX_MEM_ADDR == 0xbe000000); | | 149 | CTASSERT((uint32_t)MIPS_MAX_MEM_ADDR == 0xbe000000); |
150 | CTASSERT((uint32_t)MIPS_RESERVED_ADDR == 0xbfc80000); | | 150 | CTASSERT((uint32_t)MIPS_RESERVED_ADDR == 0xbfc80000); |
151 | CTASSERT(MIPS_KSEG0_P(MIPS_PHYS_TO_KSEG0(0))); | | 151 | CTASSERT(MIPS_KSEG0_P(MIPS_PHYS_TO_KSEG0(0))); |
152 | CTASSERT(MIPS_KSEG1_P(MIPS_PHYS_TO_KSEG1(0))); | | 152 | CTASSERT(MIPS_KSEG1_P(MIPS_PHYS_TO_KSEG1(0))); |
153 | | | 153 | |
154 | #ifdef DEBUG | | 154 | #define PMAP_COUNT(name) (pmap_evcnt_##name.ev_count++ + 0) |
155 | struct { | | 155 | #define PMAP_COUNTER(name, desc) \ |
156 | int kernel; /* entering kernel mapping */ | | 156 | static struct evcnt pmap_evcnt_##name = \ |
157 | int user; /* entering user mapping */ | | 157 | EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", desc); \ |
158 | int ptpneeded; /* needed to allocate a PT page */ | | 158 | EVCNT_ATTACH_STATIC(pmap_evcnt_##name) |
159 | int pwchange; /* no mapping change, just wiring or protection */ | | 159 | |
160 | int wchange; /* no mapping change, just wiring */ | | 160 | PMAP_COUNTER(remove_kernel_calls, "remove kernel calls"); |
161 | int mchange; /* was mapped but mapping to different page */ | | 161 | PMAP_COUNTER(remove_kernel_pages, "kernel pages unmapped"); |
162 | int managed; /* a managed page */ | | 162 | PMAP_COUNTER(remove_user_calls, "remove user calls"); |
163 | int firstpv; /* first mapping for this PA */ | | 163 | PMAP_COUNTER(remove_user_pages, "user pages unmapped"); |
164 | int secondpv; /* second mapping for this PA */ | | 164 | PMAP_COUNTER(remove_flushes, "remove cache flushes"); |
165 | int ci; /* cache inhibited */ | | 165 | PMAP_COUNTER(remove_tlb_ops, "remove tlb ops"); |
166 | int unmanaged; /* not a managed page */ | | 166 | PMAP_COUNTER(remove_pvfirst, "remove pv first"); |
167 | int flushes; /* cache flushes */ | | 167 | PMAP_COUNTER(remove_pvsearch, "remove pv search"); |
168 | int cachehit; /* new entry forced valid entry out */ | | 168 | |
169 | } enter_stats; | | 169 | PMAP_COUNTER(prefer_requests, "prefer requests"); |
170 | struct { | | 170 | PMAP_COUNTER(prefer_adjustments, "prefer adjustments"); |
171 | int calls; | | 171 | |
172 | int removes; | | 172 | PMAP_COUNTER(idlezeroed_pages, "pages idle zeroed"); |
173 | int flushes; | | 173 | PMAP_COUNTER(zeroed_pages, "pages zeroed"); |
174 | int pidflushes; /* HW pid stolen */ | | 174 | PMAP_COUNTER(copied_pages, "pages copied"); |
175 | int pvfirst; | | 175 | |
176 | int pvsearch; | | 176 | PMAP_COUNTER(kenter_pa, "kernel fast mapped pages"); |
177 | } remove_stats; | | 177 | PMAP_COUNTER(kenter_pa_bad, "kernel fast mapped pages (bad color)"); |
| | | 178 | PMAP_COUNTER(kenter_pa_unmanaged, "kernel fast mapped unmanaged pages"); |
| | | 179 | PMAP_COUNTER(kremove_pages, "kernel fast unmapped pages"); |
| | | 180 | |
| | | 181 | PMAP_COUNTER(page_cache_evictions, "pages changed to uncacheable"); |
| | | 182 | PMAP_COUNTER(page_cache_restorations, "pages changed to cacheable"); |
| | | 183 | |
| | | 184 | PMAP_COUNTER(kernel_mappings_bad, "kernel pages mapped (bad color)"); |
| | | 185 | PMAP_COUNTER(user_mappings_bad, "user pages mapped (bad color)"); |
| | | 186 | PMAP_COUNTER(kernel_mappings, "kernel pages mapped"); |
| | | 187 | PMAP_COUNTER(user_mappings, "user pages mapped"); |
| | | 188 | PMAP_COUNTER(user_mappings_changed, "user mapping changed"); |
| | | 189 | PMAP_COUNTER(kernel_mappings_changed, "kernel mapping changed"); |
| | | 190 | PMAP_COUNTER(uncached_mappings, "uncached pages mapped"); |
| | | 191 | PMAP_COUNTER(unmanaged_mappings, "unmanaged pages mapped"); |
| | | 192 | PMAP_COUNTER(managed_mappings, "managed pages mapped"); |
| | | 193 | PMAP_COUNTER(mappings, "pages mapped"); |
| | | 194 | PMAP_COUNTER(remappings, "pages remapped"); |
| | | 195 | PMAP_COUNTER(unmappings, "pages unmapped"); |
| | | 196 | PMAP_COUNTER(primary_mappings, "page initial mappings"); |
| | | 197 | PMAP_COUNTER(primary_unmappings, "page final unmappings"); |
| | | 198 | PMAP_COUNTER(tlb_hit, "page mapping"); |
| | | 199 | |
| | | 200 | PMAP_COUNTER(exec_mappings, "exec pages mapped"); |
| | | 201 | PMAP_COUNTER(exec_synced_mappings, "exec pages synced"); |
| | | 202 | PMAP_COUNTER(exec_synced_remove, "exec pages synced (PR)"); |
| | | 203 | PMAP_COUNTER(exec_synced_clear_modify, "exec pages synced (CM)"); |
| | | 204 | PMAP_COUNTER(exec_synced_page_protect, "exec pages synced (PP)"); |
| | | 205 | PMAP_COUNTER(exec_synced_protect, "exec pages synced (P)"); |
| | | 206 | PMAP_COUNTER(exec_uncached_page_protect, "exec pages uncached (PP)"); |
| | | 207 | PMAP_COUNTER(exec_uncached_clear_modify, "exec pages uncached (CM)"); |
| | | 208 | PMAP_COUNTER(exec_uncached_zero_page, "exec pages uncached (ZP)"); |
| | | 209 | PMAP_COUNTER(exec_uncached_copy_page, "exec pages uncached (CP)"); |
| | | 210 | PMAP_COUNTER(exec_uncached_remove, "exec pages uncached (PR)"); |
| | | 211 | |
| | | 212 | PMAP_COUNTER(create, "creates"); |
| | | 213 | PMAP_COUNTER(reference, "references"); |
| | | 214 | PMAP_COUNTER(dereference, "dereferences"); |
| | | 215 | PMAP_COUNTER(destroy, "destroyed"); |
| | | 216 | PMAP_COUNTER(activate, "activations"); |
| | | 217 | PMAP_COUNTER(deactivate, "deactivations"); |
| | | 218 | PMAP_COUNTER(update, "updates"); |
| | | 219 | PMAP_COUNTER(unwire, "unwires"); |
| | | 220 | PMAP_COUNTER(copy, "copies"); |
| | | 221 | PMAP_COUNTER(collect, "collects"); |
| | | 222 | PMAP_COUNTER(clear_modify, "clear_modifies"); |
| | | 223 | PMAP_COUNTER(protect, "protects"); |
| | | 224 | PMAP_COUNTER(page_protect, "page_protects"); |
178 | | | 225 | |
179 | #define PDB_FOLLOW 0x0001 | | 226 | #define PDB_FOLLOW 0x0001 |
180 | #define PDB_INIT 0x0002 | | 227 | #define PDB_INIT 0x0002 |
181 | #define PDB_ENTER 0x0004 | | 228 | #define PDB_ENTER 0x0004 |
182 | #define PDB_REMOVE 0x0008 | | 229 | #define PDB_REMOVE 0x0008 |
183 | #define PDB_CREATE 0x0010 | | 230 | #define PDB_CREATE 0x0010 |
184 | #define PDB_PTPAGE 0x0020 | | 231 | #define PDB_PTPAGE 0x0020 |
185 | #define PDB_PVENTRY 0x0040 | | 232 | #define PDB_PVENTRY 0x0040 |
186 | #define PDB_BITS 0x0080 | | 233 | #define PDB_BITS 0x0080 |
187 | #define PDB_COLLECT 0x0100 | | 234 | #define PDB_COLLECT 0x0100 |
188 | #define PDB_PROTECT 0x0200 | | 235 | #define PDB_PROTECT 0x0200 |
189 | #define PDB_TLBPID 0x0400 | | 236 | #define PDB_TLBPID 0x0400 |
190 | #define PDB_PARANOIA 0x2000 | | 237 | #define PDB_PARANOIA 0x2000 |
191 | #define PDB_WIRING 0x4000 | | 238 | #define PDB_WIRING 0x4000 |
192 | #define PDB_PVDUMP 0x8000 | | 239 | #define PDB_PVDUMP 0x8000 |
193 | int pmapdebug = 0; | | 240 | int pmapdebug = 0; |
194 | | | 241 | |
195 | #endif | | | |
196 | | | | |
197 | #define PMAP_ASID_RESERVED 0 | | 242 | #define PMAP_ASID_RESERVED 0 |
198 | | | 243 | |
199 | CTASSERT(PMAP_ASID_RESERVED == 0); | | 244 | CTASSERT(PMAP_ASID_RESERVED == 0); |
200 | /* | | 245 | /* |
201 | * Initialize the kernel pmap. | | 246 | * Initialize the kernel pmap. |
202 | */ | | 247 | */ |
203 | #ifdef MULTIPROCESSOR | | 248 | #ifdef MULTIPROCESSOR |
204 | #define PMAP_SIZE offsetof(struct pmap, pm_pai[MAXCPUS]) | | 249 | #define PMAP_SIZE offsetof(struct pmap, pm_pai[MAXCPUS]) |
205 | #else | | 250 | #else |
206 | #define PMAP_SIZE sizeof(struct pmap) | | 251 | #define PMAP_SIZE sizeof(struct pmap) |
207 | #endif | | 252 | #endif |
208 | struct pmap_kernel kernel_pmap_store = { | | 253 | struct pmap_kernel kernel_pmap_store = { |
209 | .kernel_pmap = { | | 254 | .kernel_pmap = { |
210 | .pm_count = 1, | | 255 | .pm_count = 1, |
211 | .pm_segtab = (void *)(MIPS_KSEG2_START + 0x1eadbeef), | | 256 | .pm_segtab = (void *)(MIPS_KSEG2_START + 0x1eadbeef), |
212 | }, | | 257 | }, |
213 | }; | | 258 | }; |
214 | | | 259 | |
215 | paddr_t mips_avail_start; /* PA of first available physical page */ | | 260 | paddr_t mips_avail_start; /* PA of first available physical page */ |
216 | paddr_t mips_avail_end; /* PA of last available physical page */ | | 261 | paddr_t mips_avail_end; /* PA of last available physical page */ |
217 | vaddr_t mips_virtual_end; /* VA of last avail page (end of kernel AS) */ | | 262 | vaddr_t mips_virtual_end; /* VA of last avail page (end of kernel AS) */ |
218 | | | 263 | |
219 | struct pv_entry *pv_table; | | | |
220 | int pv_table_npages; | | | |
221 | | | | |
222 | pt_entry_t *Sysmap; /* kernel pte table */ | | 264 | pt_entry_t *Sysmap; /* kernel pte table */ |
223 | unsigned int Sysmapsize; /* number of pte's in Sysmap */ | | 265 | unsigned int Sysmapsize; /* number of pte's in Sysmap */ |
224 | | | 266 | |
225 | /* | | 267 | /* |
226 | * The pools from which pmap structures and sub-structures are allocated. | | 268 | * The pools from which pmap structures and sub-structures are allocated. |
227 | */ | | 269 | */ |
228 | struct pool pmap_pmap_pool; | | 270 | struct pool pmap_pmap_pool; |
229 | struct pool pmap_pv_pool; | | 271 | struct pool pmap_pv_pool; |
230 | | | 272 | |
231 | #ifndef PMAP_PV_LOWAT | | 273 | #ifndef PMAP_PV_LOWAT |
232 | #define PMAP_PV_LOWAT 16 | | 274 | #define PMAP_PV_LOWAT 16 |
233 | #endif | | 275 | #endif |
234 | int pmap_pv_lowat = PMAP_PV_LOWAT; | | 276 | int pmap_pv_lowat = PMAP_PV_LOWAT; |
235 | | | 277 | |
236 | bool pmap_initialized = false; | | 278 | bool pmap_initialized = false; |
| | | 279 | #define PMAP_PAGE_COLOROK_P(a, b) \ |
| | | 280 | ((((int)(a) ^ (int)(b)) & pmap_page_colormask) == 0) |
| | | 281 | u_int pmap_page_colormask; |
237 | | | 282 | |
238 | #define PAGE_IS_MANAGED(pa) \ | | 283 | #define PAGE_IS_MANAGED(pa) \ |
239 | (pmap_initialized == true && vm_physseg_find(atop(pa), NULL) != -1) | | 284 | (pmap_initialized == true && vm_physseg_find(atop(pa), NULL) != -1) |
240 | | | 285 | |
241 | #define PMAP_IS_ACTIVE(pm) \ | | 286 | #define PMAP_IS_ACTIVE(pm) \ |
242 | ((pm) == pmap_kernel() || \ | | 287 | ((pm) == pmap_kernel() || \ |
243 | (pm) == curlwp->l_proc->p_vmspace->vm_map.pmap) | | 288 | (pm) == curlwp->l_proc->p_vmspace->vm_map.pmap) |
244 | | | 289 | |
245 | /* Forward function declarations */ | | 290 | /* Forward function declarations */ |
246 | void pmap_remove_pv(pmap_t, vaddr_t, struct vm_page *); | | 291 | void pmap_remove_pv(pmap_t, vaddr_t, struct vm_page *, bool); |
247 | void pmap_enter_pv(pmap_t, vaddr_t, struct vm_page *, u_int *); | | 292 | void pmap_enter_pv(pmap_t, vaddr_t, struct vm_page *, u_int *); |
248 | pt_entry_t *pmap_pte(pmap_t, vaddr_t); | | 293 | pt_entry_t *pmap_pte(pmap_t, vaddr_t); |
249 | | | 294 | |
250 | /* | | 295 | /* |
251 | * PV table management functions. | | 296 | * PV table management functions. |
252 | */ | | 297 | */ |
253 | void *pmap_pv_page_alloc(struct pool *, int); | | 298 | void *pmap_pv_page_alloc(struct pool *, int); |
254 | void pmap_pv_page_free(struct pool *, void *); | | 299 | void pmap_pv_page_free(struct pool *, void *); |
255 | | | 300 | |
256 | struct pool_allocator pmap_pv_page_allocator = { | | 301 | struct pool_allocator pmap_pv_page_allocator = { |
257 | pmap_pv_page_alloc, pmap_pv_page_free, 0, | | 302 | pmap_pv_page_alloc, pmap_pv_page_free, 0, |
258 | }; | | 303 | }; |
259 | | | 304 | |
260 | #define pmap_pv_alloc() pool_get(&pmap_pv_pool, PR_NOWAIT) | | 305 | #define pmap_pv_alloc() pool_get(&pmap_pv_pool, PR_NOWAIT) |
261 | #define pmap_pv_free(pv) pool_put(&pmap_pv_pool, (pv)) | | 306 | #define pmap_pv_free(pv) pool_put(&pmap_pv_pool, (pv)) |
262 | | | 307 | |
263 | /* | | 308 | /* |
264 | * Misc. functions. | | 309 | * Misc. functions. |
265 | */ | | 310 | */ |
266 | | | 311 | |
267 | static inline bool | | 312 | static inline bool |
268 | pmap_clear_page_attributes(struct vm_page *pg, u_int attributes) | | 313 | pmap_clear_page_attributes(struct vm_page *pg, u_int clear_attributes) |
269 | { | | 314 | { |
270 | volatile u_int * const attrp = &pg->mdpage.pvh_attrs; | | 315 | volatile u_int * const attrp = &pg->mdpage.pvh_attrs; |
271 | #ifdef MULTIPROCESSOR | | 316 | #ifdef MULTIPROCESSOR |
272 | for (;;) { | | 317 | for (;;) { |
273 | u_int old_attr = *attrp; | | 318 | u_int old_attr = *attrp; |
274 | if ((old_attr & attributes) == 0) | | 319 | if ((old_attr & clear_attributes) == 0) |
275 | return false; | | 320 | return false; |
276 | u_int new_attr = old_attr & ~attributes; | | 321 | u_int new_attr = old_attr & ~clear_attributes; |
277 | if (old_attr == atomic_cas_uint(attrp, old_attr, new_attr)) | | 322 | if (old_attr == atomic_cas_uint(attrp, old_attr, new_attr)) |
278 | return true; | | 323 | return true; |
279 | } | | 324 | } |
280 | #else | | 325 | #else |
281 | u_int old_attr = *attrp; | | 326 | u_int old_attr = *attrp; |
282 | if ((old_attr & attributes) == 0) | | 327 | if ((old_attr & clear_attributes) == 0) |
283 | return false; | | 328 | return false; |
284 | *attrp &= ~attributes; | | 329 | *attrp &= ~clear_attributes; |
285 | return true; | | 330 | return true; |
286 | #endif | | 331 | #endif |
287 | } | | 332 | } |
288 | | | 333 | |
289 | static inline void | | 334 | static inline void |
290 | pmap_set_page_attributes(struct vm_page *pg, u_int attributes) | | 335 | pmap_set_page_attributes(struct vm_page *pg, u_int set_attributes) |
291 | { | | 336 | { |
292 | #ifdef MULTIPROCESSOR | | 337 | #ifdef MULTIPROCESSOR |
293 | atomic_or_uint(&pg->mdpage.pvh_attrs, attributes); | | 338 | atomic_or_uint(&pg->mdpage.pvh_attrs, set_attributes); |
294 | #else | | 339 | #else |
295 | pg->mdpage.pvh_attrs |= attributes; | | 340 | pg->mdpage.pvh_attrs |= set_attributes; |
296 | #endif | | 341 | #endif |
297 | } | | 342 | } |
298 | | | 343 | |
299 | #if defined(MIPS3_PLUS) /* XXX mmu XXX */ | | 344 | static inline void |
300 | void mips_dump_segtab(struct proc *); | | 345 | pmap_page_syncicache(struct vm_page *pg) |
301 | static void mips_flushcache_allpvh(paddr_t); | | 346 | { |
| | | 347 | if (MIPS_HAS_R4K_MMU) { |
| | | 348 | if (PG_MD_CACHED_P(pg)) { |
| | | 349 | mips_icache_sync_range_index( |
| | | 350 | pg->mdpage.pvh_first.pv_va, PAGE_SIZE); |
| | | 351 | } |
| | | 352 | } else { |
| | | 353 | mips_icache_sync_range(MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(pg)), |
| | | 354 | PAGE_SIZE); |
| | | 355 | } |
| | | 356 | } |
302 | | | 357 | |
303 | /* | | 358 | static vaddr_t |
304 | * Flush virtual addresses associated with a given physical address | | 359 | pmap_map_ephemeral_page(struct vm_page *pg, int prot, pt_entry_t *old_pt_entry_p) |
305 | */ | | | |
306 | static void | | | |
307 | mips_flushcache_allpvh(paddr_t pa) | | | |
308 | { | | 360 | { |
309 | struct vm_page *pg; | | 361 | const paddr_t pa = VM_PAGE_TO_PHYS(pg); |
310 | struct pv_entry *pv; | | 362 | pv_entry_t pv = &pg->mdpage.pvh_first; |
311 | | | 363 | |
312 | pg = PHYS_TO_VM_PAGE(pa); | | 364 | #ifdef _LP64 |
313 | if (pg == NULL) { | | 365 | vaddr_t va = MIPS_PHYS_TO_XKPHYS_CACHED(pa); |
314 | /* page is unmanaged */ | | 366 | #else |
315 | #ifdef DIAGNOSTIC | | 367 | vaddr_t va; |
316 | printf("mips_flushcache_allpvh(): unmanaged pa = %#"PRIxPADDR"\n", | | 368 | if (pa <= MIPS_PHYS_MASK) { |
317 | pa); | | 369 | va = MIPS_PHYS_TO_KSEG0(pa); |
318 | #endif | | 370 | } else { |
319 | return; | | 371 | KASSERT(pmap_initialized); |
320 | } | | 372 | /* |
| | | 373 | * Make sure to use a congruent mapping to the last mapped |
| | | 374 | * address so we don't have to worry about virtual aliases. |
| | | 375 | */ |
| | | 376 | kpreempt_disable(); |
| | | 377 | struct cpu_info * const ci = curcpu(); |
321 | | | 378 | |
322 | pv = pg->mdpage.pvh_list; | | 379 | va = (prot & VM_PROT_WRITE ? ci->ci_pmap_dstbase : ci->ci_pmap_srcbase) |
| | | 380 | + mips_cache_indexof(MIPS_CACHE_VIRTUAL_ALIAS ? pv->pv_va : pa); |
| | | 381 | *old_pt_entry_p = *kvtopte(va); |
| | | 382 | pmap_kenter_pa(va, pa, prot); |
| | | 383 | } |
| | | 384 | #endif /* _LP64 */ |
| | | 385 | if (MIPS_CACHE_VIRTUAL_ALIAS) { |
| | | 386 | /* |
| | | 387 | * If we are forced to use an incompatible alias, flush the |
| | | 388 | * page from the cache so we will copy the correct contents. |
| | | 389 | */ |
| | | 390 | if (PG_MD_CACHED_P(pg) |
| | | 391 | && mips_cache_badalias(pv->pv_va, va)) |
| | | 392 | mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE); |
| | | 393 | if (pv->pv_pmap == NULL); |
| | | 394 | pv->pv_va = va; |
| | | 395 | } |
323 | | | 396 | |
324 | #if defined(MIPS3_NO_PV_UNCACHED) | | 397 | return va; |
325 | /* No current mapping. Cache was flushed by pmap_remove_pv() */ | | 398 | } |
326 | if (pv->pv_pmap == NULL) | | | |
327 | return; | | | |
328 | | | 399 | |
329 | /* Only one index is allowed at a time */ | | 400 | static void |
330 | if (mips_cache_indexof(pa) != mips_cache_indexof(pv->pv_va)) | | 401 | pmap_unmap_ephemeral_page(struct vm_page *pg, vaddr_t va, |
331 | mips_dcache_wbinv_range_index(pv->pv_va, NBPG); | | 402 | pt_entry_t old_pt_entry) |
332 | #else | | 403 | { |
333 | while (pv) { | | 404 | pv_entry_t pv = &pg->mdpage.pvh_first; |
334 | mips_dcache_wbinv_range_index(pv->pv_va, NBPG); | | 405 | |
335 | pv = pv->pv_next; | | 406 | if (MIPS_CACHE_VIRTUAL_ALIAS |
| | | 407 | && (PG_MD_UNCACHED_P(pg) |
| | | 408 | || (pv->pv_pmap != NULL |
| | | 409 | && mips_cache_badalias(pv->pv_va, va)))) { |
| | | 410 | /* |
| | | 411 | * If this page was previously uncached or we had to use an |
| | | 412 | * incompatible alias and it has a valid mapping, flush it |
| | | 413 | * from the cache. |
| | | 414 | */ |
| | | 415 | mips_dcache_wbinv_range(va, PAGE_SIZE); |
| | | 416 | } |
| | | 417 | #ifndef _LP64 |
| | | 418 | /* |
| | | 419 | * If we had to map using a page table entry, unmap it now. |
| | | 420 | */ |
| | | 421 | if (va >= VM_MIN_KERNEL_ADDRESS) { |
| | | 422 | pmap_kremove(va, PAGE_SIZE); |
| | | 423 | if (mips_pg_v(old_pt_entry.pt_entry)) { |
| | | 424 | *kvtopte(va) = old_pt_entry; |
| | | 425 | pmap_tlb_update(pmap_kernel(), va, old_pt_entry.pt_entry); |
| | | 426 | } |
| | | 427 | kpreempt_enable(); |
336 | } | | 428 | } |
337 | #endif | | 429 | #endif |
338 | } | | 430 | } |
339 | #endif /* MIPS3_PLUS */ | | | |
340 | | | 431 | |
341 | /* | | 432 | /* |
342 | * Bootstrap the system enough to run with virtual memory. | | 433 | * Bootstrap the system enough to run with virtual memory. |
343 | * firstaddr is the first unused kseg0 address (not page aligned). | | 434 | * firstaddr is the first unused kseg0 address (not page aligned). |
344 | */ | | 435 | */ |
345 | void | | 436 | void |
346 | pmap_bootstrap(void) | | 437 | pmap_bootstrap(void) |
347 | { | | 438 | { |
348 | vsize_t bufsz; | | 439 | vsize_t bufsz; |
349 | | | 440 | |
350 | /* | | 441 | /* |
351 | * Compute the number of pages kmem_map will have. | | 442 | * Compute the number of pages kmem_map will have. |
352 | */ | | 443 | */ |
| @@ -390,39 +481,26 @@ pmap_bootstrap(void) | | | @@ -390,39 +481,26 @@ pmap_bootstrap(void) |
390 | Sysmapsize = | | 481 | Sysmapsize = |
391 | (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / NBPG; | | 482 | (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / NBPG; |
392 | } | | 483 | } |
393 | #endif | | 484 | #endif |
394 | | | 485 | |
395 | /* | | 486 | /* |
396 | * Now actually allocate the kernel PTE array (must be done | | 487 | * Now actually allocate the kernel PTE array (must be done |
397 | * after virtual_end is initialized). | | 488 | * after virtual_end is initialized). |
398 | */ | | 489 | */ |
399 | Sysmap = (pt_entry_t *) | | 490 | Sysmap = (pt_entry_t *) |
400 | uvm_pageboot_alloc(sizeof(pt_entry_t) * Sysmapsize); | | 491 | uvm_pageboot_alloc(sizeof(pt_entry_t) * Sysmapsize); |
401 | | | 492 | |
402 | /* | | 493 | /* |
403 | * Allocate memory for the pv_heads. (A few more of the latter | | | |
404 | * are allocated than are needed.) | | | |
405 | * | | | |
406 | * We could do this in pmap_init when we know the actual | | | |
407 | * managed page pool size, but its better to use kseg0 | | | |
408 | * addresses rather than kernel virtual addresses mapped | | | |
409 | * through the TLB. | | | |
410 | */ | | | |
411 | pv_table_npages = physmem; | | | |
412 | pv_table = (struct pv_entry *) | | | |
413 | uvm_pageboot_alloc(sizeof(struct pv_entry) * pv_table_npages); | | | |
414 | | | | |
415 | /* | | | |
416 | * Initialize the pools. | | 494 | * Initialize the pools. |
417 | */ | | 495 | */ |
418 | pool_init(&pmap_pmap_pool, PMAP_SIZE, 0, 0, 0, "pmappl", | | 496 | pool_init(&pmap_pmap_pool, PMAP_SIZE, 0, 0, 0, "pmappl", |
419 | &pool_allocator_nointr, IPL_NONE); | | 497 | &pool_allocator_nointr, IPL_NONE); |
420 | pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl", | | 498 | pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl", |
421 | &pmap_pv_page_allocator, IPL_NONE); | | 499 | &pmap_pv_page_allocator, IPL_NONE); |
422 | | | 500 | |
423 | tlb_set_asid(0); | | 501 | tlb_set_asid(0); |
424 | | | 502 | |
425 | #ifdef MIPS3_PLUS /* XXX mmu XXX */ | | 503 | #ifdef MIPS3_PLUS /* XXX mmu XXX */ |
426 | /* | | 504 | /* |
427 | * The R4?00 stores only one copy of the Global bit in the | | 505 | * The R4?00 stores only one copy of the Global bit in the |
428 | * translation lookaside buffer for each 2 page entry. | | 506 | * translation lookaside buffer for each 2 page entry. |
| @@ -540,59 +618,58 @@ pmap_steal_memory(vsize_t size, vaddr_t | | | @@ -540,59 +618,58 @@ pmap_steal_memory(vsize_t size, vaddr_t |
540 | * If we got here, there was no memory left. | | 618 | * If we got here, there was no memory left. |
541 | */ | | 619 | */ |
542 | panic("pmap_steal_memory: no memory to steal"); | | 620 | panic("pmap_steal_memory: no memory to steal"); |
543 | } | | 621 | } |
544 | | | 622 | |
545 | /* | | 623 | /* |
546 | * Initialize the pmap module. | | 624 | * Initialize the pmap module. |
547 | * Called by vm_init, to initialize any structures that the pmap | | 625 | * Called by vm_init, to initialize any structures that the pmap |
548 | * system needs to map virtual memory. | | 626 | * system needs to map virtual memory. |
549 | */ | | 627 | */ |
550 | void | | 628 | void |
551 | pmap_init(void) | | 629 | pmap_init(void) |
552 | { | | 630 | { |
553 | vsize_t s; | | | |
554 | int bank, i; | | | |
555 | pv_entry_t pv; | | | |
556 | | | | |
557 | #ifdef DEBUG | | 631 | #ifdef DEBUG |
558 | if (pmapdebug & (PDB_FOLLOW|PDB_INIT)) | | 632 | if (pmapdebug & (PDB_FOLLOW|PDB_INIT)) |
559 | printf("pmap_init()\n"); | | 633 | printf("pmap_init()\n"); |
560 | #endif | | 634 | #endif |
561 | | | 635 | |
562 | /* | | 636 | /* |
563 | * Memory for the pv entry heads has | | | |
564 | * already been allocated. Initialize the physical memory | | | |
565 | * segments. | | | |
566 | */ | | | |
567 | pv = pv_table; | | | |
568 | for (bank = 0; bank < vm_nphysseg; bank++) { | | | |
569 | s = vm_physmem[bank].end - vm_physmem[bank].start; | | | |
570 | for (i = 0; i < s; i++) | | | |
571 | vm_physmem[bank].pgs[i].mdpage.pvh_list = pv++; | | | |
572 | } | | | |
573 | | | | |
574 | /* | | | |
575 | * Set a low water mark on the pv_entry pool, so that we are | | 637 | * Set a low water mark on the pv_entry pool, so that we are |
576 | * more likely to have these around even in extreme memory | | 638 | * more likely to have these around even in extreme memory |
577 | * starvation. | | 639 | * starvation. |
578 | */ | | 640 | */ |
579 | pool_setlowat(&pmap_pv_pool, pmap_pv_lowat); | | 641 | pool_setlowat(&pmap_pv_pool, pmap_pv_lowat); |
580 | | | 642 | |
581 | /* | | 643 | /* |
582 | * Now it is safe to enable pv entry recording. | | 644 | * Now it is safe to enable pv entry recording. |
583 | */ | | 645 | */ |
584 | pmap_initialized = true; | | 646 | pmap_initialized = true; |
585 | | | 647 | |
| | | 648 | #ifndef _LP64 |
| | | 649 | /* |
| | | 650 | * If we have more memory than can be mapped by KSEG0, we need allocate |
| | | 651 | * enough VA so we can map pages with the right color (to avoid cache |
| | | 652 | * alias problems). |
| | | 653 | */ |
| | | 654 | if (mips_avail_end > MIPS_KSEG1_START - MIPS_KSEG0_START) { |
| | | 655 | curcpu()->ci_pmap_dstbase = uvm_km_alloc(kernel_map, |
| | | 656 | uvmexp.ncolors * PAGE_SIZE, 0, UVM_KMF_VAONLY); |
| | | 657 | curcpu()->ci_pmap_srcbase = uvm_km_alloc(kernel_map, |
| | | 658 | uvmexp.ncolors * PAGE_SIZE, 0, UVM_KMF_VAONLY); |
| | | 659 | } |
| | | 660 | #endif |
| | | 661 | |
| | | 662 | |
586 | #ifdef MIPS3 | | 663 | #ifdef MIPS3 |
587 | if (MIPS_HAS_R4K_MMU) { | | 664 | if (MIPS_HAS_R4K_MMU) { |
588 | /* | | 665 | /* |
589 | * XXX | | 666 | * XXX |
590 | * Disable sosend_loan() in src/sys/kern/uipc_socket.c | | 667 | * Disable sosend_loan() in src/sys/kern/uipc_socket.c |
591 | * on MIPS3 CPUs to avoid possible virtual cache aliases | | 668 | * on MIPS3 CPUs to avoid possible virtual cache aliases |
592 | * and uncached mappings in pmap_enter_pv(). | | 669 | * and uncached mappings in pmap_enter_pv(). |
593 | * | | 670 | * |
594 | * Ideally, read only shared mapping won't cause aliases | | 671 | * Ideally, read only shared mapping won't cause aliases |
595 | * so pmap_enter_pv() should handle any shared read only | | 672 | * so pmap_enter_pv() should handle any shared read only |
596 | * mappings without uncached ops like ARM pmap. | | 673 | * mappings without uncached ops like ARM pmap. |
597 | * | | 674 | * |
598 | * On the other hand, R4000 and R4400 have the virtual | | 675 | * On the other hand, R4000 and R4400 have the virtual |
| @@ -612,109 +689,115 @@ pmap_init(void) | | | @@ -612,109 +689,115 @@ pmap_init(void) |
612 | * is zero, the map is an actual physical | | 689 | * is zero, the map is an actual physical |
613 | * map, and may be referenced by the | | 690 | * map, and may be referenced by the |
614 | * hardware. | | 691 | * hardware. |
615 | * | | 692 | * |
616 | * If the size specified is non-zero, | | 693 | * If the size specified is non-zero, |
617 | * the map will be used in software only, and | | 694 | * the map will be used in software only, and |
618 | * is bounded by that size. | | 695 | * is bounded by that size. |
619 | */ | | 696 | */ |
620 | pmap_t | | 697 | pmap_t |
621 | pmap_create(void) | | 698 | pmap_create(void) |
622 | { | | 699 | { |
623 | pmap_t pmap; | | 700 | pmap_t pmap; |
624 | | | 701 | |
| | | 702 | PMAP_COUNT(create); |
625 | #ifdef DEBUG | | 703 | #ifdef DEBUG |
626 | if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) | | 704 | if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) |
627 | printf("pmap_create()\n"); | | 705 | printf("pmap_create()\n"); |
628 | #endif | | 706 | #endif |
629 | | | 707 | |
630 | pmap = pool_get(&pmap_pmap_pool, PR_WAITOK); | | 708 | pmap = pool_get(&pmap_pmap_pool, PR_WAITOK); |
631 | memset(pmap, 0, PMAP_SIZE); | | 709 | memset(pmap, 0, PMAP_SIZE); |
632 | | | 710 | |
633 | pmap->pm_count = 1; | | 711 | pmap->pm_count = 1; |
634 | | | 712 | |
635 | pmap_segtab_alloc(pmap); | | 713 | pmap_segtab_alloc(pmap); |
636 | | | 714 | |
637 | return pmap; | | 715 | return pmap; |
638 | } | | 716 | } |
639 | | | 717 | |
640 | /* | | 718 | /* |
641 | * Retire the given physical map from service. | | 719 | * Retire the given physical map from service. |
642 | * Should only be called if the map contains | | 720 | * Should only be called if the map contains |
643 | * no valid mappings. | | 721 | * no valid mappings. |
644 | */ | | 722 | */ |
645 | void | | 723 | void |
646 | pmap_destroy(pmap_t pmap) | | 724 | pmap_destroy(pmap_t pmap) |
647 | { | | 725 | { |
648 | int count; | | | |
649 | | | | |
650 | #ifdef DEBUG | | 726 | #ifdef DEBUG |
651 | if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) | | 727 | if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) |
652 | printf("pmap_destroy(%p)\n", pmap); | | 728 | printf("pmap_destroy(%p)\n", pmap); |
653 | #endif | | 729 | #endif |
654 | count = --pmap->pm_count; | | 730 | if (--pmap->pm_count) { |
655 | if (count > 0) | | 731 | PMAP_COUNT(dereference); |
656 | return; | | 732 | return; |
| | | 733 | } |
657 | | | 734 | |
| | | 735 | PMAP_COUNT(destroy); |
658 | pmap_segtab_free(pmap); | | 736 | pmap_segtab_free(pmap); |
659 | | | 737 | |
660 | pool_put(&pmap_pmap_pool, pmap); | | 738 | pool_put(&pmap_pmap_pool, pmap); |
661 | } | | 739 | } |
662 | | | 740 | |
663 | /* | | 741 | /* |
664 | * Add a reference to the specified pmap. | | 742 | * Add a reference to the specified pmap. |
665 | */ | | 743 | */ |
666 | void | | 744 | void |
667 | pmap_reference(pmap_t pmap) | | 745 | pmap_reference(pmap_t pmap) |
668 | { | | 746 | { |
669 | | | 747 | |
670 | #ifdef DEBUG | | 748 | #ifdef DEBUG |
671 | if (pmapdebug & PDB_FOLLOW) | | 749 | if (pmapdebug & PDB_FOLLOW) |
672 | printf("pmap_reference(%p)\n", pmap); | | 750 | printf("pmap_reference(%p)\n", pmap); |
673 | #endif | | 751 | #endif |
674 | if (pmap != NULL) { | | 752 | if (pmap != NULL) { |
675 | pmap->pm_count++; | | 753 | pmap->pm_count++; |
676 | } | | 754 | } |
| | | 755 | PMAP_COUNT(reference); |
677 | } | | 756 | } |
678 | | | 757 | |
679 | /* | | 758 | /* |
680 | * Make a new pmap (vmspace) active for the given process. | | 759 | * Make a new pmap (vmspace) active for the given process. |
681 | */ | | 760 | */ |
682 | void | | 761 | void |
683 | pmap_activate(struct lwp *l) | | 762 | pmap_activate(struct lwp *l) |
684 | { | | 763 | { |
685 | pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap; | | 764 | pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap; |
686 | unsigned int asid; | | 765 | uint32_t asid; |
| | | 766 | |
| | | 767 | PMAP_COUNT(activate); |
687 | | | 768 | |
688 | asid = pmap_tlb_asid_alloc(pmap, l->l_cpu); | | 769 | asid = pmap_tlb_asid_alloc(pmap, l->l_cpu); |
689 | if (l == curlwp) { | | 770 | if (l == curlwp) { |
690 | pmap_segtab_activate(l); | | 771 | pmap_segtab_activate(l); |
691 | tlb_set_asid(asid); | | 772 | tlb_set_asid(asid); |
692 | } | | 773 | } |
693 | } | | 774 | } |
694 | | | 775 | |
695 | /* | | 776 | /* |
696 | * Make a previously active pmap (vmspace) inactive. | | 777 | * Make a previously active pmap (vmspace) inactive. |
697 | */ | | 778 | */ |
698 | void | | 779 | void |
699 | pmap_deactivate(struct lwp *l) | | 780 | pmap_deactivate(struct lwp *l) |
700 | { | | 781 | { |
| | | 782 | PMAP_COUNT(deactivate); |
701 | | | 783 | |
702 | /* Nothing to do. */ | | 784 | /* Nothing to do. */ |
703 | } | | 785 | } |
704 | | | 786 | |
705 | void | | 787 | void |
706 | pmap_update(struct pmap *pmap) | | 788 | pmap_update(struct pmap *pmap) |
707 | { | | 789 | { |
| | | 790 | PMAP_COUNT(update); |
708 | #if 0 | | 791 | #if 0 |
709 | __asm __volatile( | | 792 | __asm __volatile( |
710 | "mtc0\t$ra,$%0; nop; eret" | | 793 | "mtc0\t$ra,$%0; nop; eret" |
711 | : | | 794 | : |
712 | : "n"(MIPS_COP_0_ERROR_PC)); | | 795 | : "n"(MIPS_COP_0_ERROR_PC)); |
713 | #endif | | 796 | #endif |
714 | } | | 797 | } |
715 | | | 798 | |
716 | /* | | 799 | /* |
717 | * Remove the given range of addresses from the specified map. | | 800 | * Remove the given range of addresses from the specified map. |
718 | * | | 801 | * |
719 | * It is assumed that the start and end are properly | | 802 | * It is assumed that the start and end are properly |
720 | * rounded to the page size. | | 803 | * rounded to the page size. |
| @@ -726,85 +809,88 @@ pmap_pte_remove(pmap_t pmap, vaddr_t sva | | | @@ -726,85 +809,88 @@ pmap_pte_remove(pmap_t pmap, vaddr_t sva |
726 | { | | 809 | { |
727 | #ifdef DEBUG | | 810 | #ifdef DEBUG |
728 | if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) { | | 811 | if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) { |
729 | printf("%s: %p, %"PRIxVADDR", %"PRIxVADDR", %p, %"PRIxPTR"\n", | | 812 | printf("%s: %p, %"PRIxVADDR", %"PRIxVADDR", %p, %"PRIxPTR"\n", |
730 | __func__, pmap, sva, eva, pte, flags); | | 813 | __func__, pmap, sva, eva, pte, flags); |
731 | } | | 814 | } |
732 | #endif | | 815 | #endif |
733 | | | 816 | |
734 | for (; sva < eva; sva += NBPG, pte++) { | | 817 | for (; sva < eva; sva += NBPG, pte++) { |
735 | struct vm_page *pg; | | 818 | struct vm_page *pg; |
736 | uint32_t pt_entry = pte->pt_entry; | | 819 | uint32_t pt_entry = pte->pt_entry; |
737 | if (!mips_pg_v(pt_entry)) | | 820 | if (!mips_pg_v(pt_entry)) |
738 | continue; | | 821 | continue; |
| | | 822 | PMAP_COUNT(remove_user_pages); |
739 | if (mips_pg_wired(pt_entry)) | | 823 | if (mips_pg_wired(pt_entry)) |
740 | pmap->pm_stats.wired_count--; | | 824 | pmap->pm_stats.wired_count--; |
741 | pmap->pm_stats.resident_count--; | | 825 | pmap->pm_stats.resident_count--; |
742 | pg = PHYS_TO_VM_PAGE(mips_tlbpfn_to_paddr(pt_entry)); | | 826 | pg = PHYS_TO_VM_PAGE(mips_tlbpfn_to_paddr(pt_entry)); |
743 | if (pg) | | 827 | if (pg) { |
744 | pmap_remove_pv(pmap, sva, pg); | | 828 | pmap_remove_pv(pmap, sva, pg, |
| | | 829 | pt_entry & mips_pg_m_bit()); |
| | | 830 | } |
745 | pte->pt_entry = mips_pg_nv_bit(); | | 831 | pte->pt_entry = mips_pg_nv_bit(); |
746 | /* | | 832 | /* |
747 | * Flush the TLB for the given address. | | 833 | * Flush the TLB for the given address. |
748 | */ | | 834 | */ |
749 | pmap_tlb_invalidate_addr(pmap, sva); | | 835 | pmap_tlb_invalidate_addr(pmap, sva); |
750 | #ifdef DEBUG | | | |
751 | remove_stats.flushes++; | | | |
752 | #endif | | | |
753 | } | | 836 | } |
754 | return false; | | 837 | return false; |
755 | } | | 838 | } |
756 | | | 839 | |
757 | void | | 840 | void |
758 | pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva) | | 841 | pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva) |
759 | { | | 842 | { |
760 | struct vm_page *pg; | | 843 | struct vm_page *pg; |
761 | | | 844 | |
762 | #ifdef DEBUG | | 845 | #ifdef DEBUG |
763 | if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) | | 846 | if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) |
764 | printf("pmap_remove(%p, %#"PRIxVADDR", %#"PRIxVADDR")\n", pmap, sva, eva); | | 847 | printf("pmap_remove(%p, %#"PRIxVADDR", %#"PRIxVADDR")\n", pmap, sva, eva); |
765 | remove_stats.calls++; | | | |
766 | #endif | | 848 | #endif |
| | | 849 | |
767 | if (pmap == pmap_kernel()) { | | 850 | if (pmap == pmap_kernel()) { |
768 | /* remove entries from kernel pmap */ | | 851 | /* remove entries from kernel pmap */ |
| | | 852 | PMAP_COUNT(remove_kernel_calls); |
769 | #ifdef PARANOIADIAG | | 853 | #ifdef PARANOIADIAG |
770 | if (sva < VM_MIN_KERNEL_ADDRESS || eva >= virtual_end) | | 854 | if (sva < VM_MIN_KERNEL_ADDRESS || eva >= mips_virtual_end) |
771 | panic("pmap_remove: kva not in range"); | | 855 | panic("pmap_remove: kva not in range"); |
772 | #endif | | 856 | #endif |
773 | pt_entry_t *pte = kvtopte(sva); | | 857 | pt_entry_t *pte = kvtopte(sva); |
774 | for (; sva < eva; sva += NBPG, pte++) { | | 858 | for (; sva < eva; sva += NBPG, pte++) { |
775 | uint32_t pt_entry = pte->pt_entry; | | 859 | uint32_t pt_entry = pte->pt_entry; |
776 | if (!mips_pg_v(pt_entry)) | | 860 | if (!mips_pg_v(pt_entry)) |
777 | continue; | | 861 | continue; |
| | | 862 | PMAP_COUNT(remove_kernel_pages); |
778 | if (mips_pg_wired(pt_entry)) | | 863 | if (mips_pg_wired(pt_entry)) |
779 | pmap->pm_stats.wired_count--; | | 864 | pmap->pm_stats.wired_count--; |
780 | pmap->pm_stats.resident_count--; | | 865 | pmap->pm_stats.resident_count--; |
781 | pg = PHYS_TO_VM_PAGE(mips_tlbpfn_to_paddr(pt_entry)); | | 866 | pg = PHYS_TO_VM_PAGE(mips_tlbpfn_to_paddr(pt_entry)); |
782 | if (pg) | | 867 | if (pg) |
783 | pmap_remove_pv(pmap, sva, pg); | | 868 | pmap_remove_pv(pmap, sva, pg, false); |
784 | if (MIPS_HAS_R4K_MMU) | | 869 | if (MIPS_HAS_R4K_MMU) |
785 | /* See above about G bit */ | | 870 | /* See above about G bit */ |
786 | pte->pt_entry = MIPS3_PG_NV | MIPS3_PG_G; | | 871 | pte->pt_entry = MIPS3_PG_NV | MIPS3_PG_G; |
787 | else | | 872 | else |
788 | pte->pt_entry = MIPS1_PG_NV; | | 873 | pte->pt_entry = MIPS1_PG_NV; |
789 | | | 874 | |
790 | /* | | 875 | /* |
791 | * Flush the TLB for the given address. | | 876 | * Flush the TLB for the given address. |
792 | */ | | 877 | */ |
793 | pmap_tlb_invalidate_addr(pmap, sva); | | 878 | pmap_tlb_invalidate_addr(pmap, sva); |
794 | } | | 879 | } |
795 | return; | | 880 | return; |
796 | } | | 881 | } |
797 | | | 882 | |
| | | 883 | PMAP_COUNT(remove_user_calls); |
798 | #ifdef PARANOIADIAG | | 884 | #ifdef PARANOIADIAG |
799 | if (eva > VM_MAXUSER_ADDRESS) | | 885 | if (eva > VM_MAXUSER_ADDRESS) |
800 | panic("pmap_remove: uva not in range"); | | 886 | panic("pmap_remove: uva not in range"); |
801 | if (PMAP_IS_ACTIVE(pmap)) { | | 887 | if (PMAP_IS_ACTIVE(pmap)) { |
802 | struct pmap_asid_info * const pai = PMAP_PAI(pmap, curcpu()); | | 888 | struct pmap_asid_info * const pai = PMAP_PAI(pmap, curcpu()); |
803 | uint32_t asid; | | 889 | uint32_t asid; |
804 | | | 890 | |
805 | __asm volatile("mfc0 %0,$10; nop" : "=r"(asid)); | | 891 | __asm volatile("mfc0 %0,$10; nop" : "=r"(asid)); |
806 | asid = (MIPS_HAS_R4K_MMU) ? (asid & 0xff) : (asid & 0xfc0) >> 6; | | 892 | asid = (MIPS_HAS_R4K_MMU) ? (asid & 0xff) : (asid & 0xfc0) >> 6; |
807 | if (asid != pai->pai_asid) { | | 893 | if (asid != pai->pai_asid) { |
808 | panic("inconsistency for active TLB flush: %d <-> %d", | | 894 | panic("inconsistency for active TLB flush: %d <-> %d", |
809 | asid, pai->pai_asid); | | 895 | asid, pai->pai_asid); |
810 | } | | 896 | } |
| @@ -814,125 +900,146 @@ pmap_remove(pmap_t pmap, vaddr_t sva, va | | | @@ -814,125 +900,146 @@ pmap_remove(pmap_t pmap, vaddr_t sva, va |
814 | } | | 900 | } |
815 | | | 901 | |
816 | /* | | 902 | /* |
817 | * pmap_page_protect: | | 903 | * pmap_page_protect: |
818 | * | | 904 | * |
819 | * Lower the permission for all mappings to a given page. | | 905 | * Lower the permission for all mappings to a given page. |
820 | */ | | 906 | */ |
821 | void | | 907 | void |
822 | pmap_page_protect(struct vm_page *pg, vm_prot_t prot) | | 908 | pmap_page_protect(struct vm_page *pg, vm_prot_t prot) |
823 | { | | 909 | { |
824 | pv_entry_t pv; | | 910 | pv_entry_t pv; |
825 | vaddr_t va; | | 911 | vaddr_t va; |
826 | | | 912 | |
| | | 913 | PMAP_COUNT(page_protect); |
827 | #ifdef DEBUG | | 914 | #ifdef DEBUG |
828 | if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) || | | 915 | if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) || |
829 | (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))) | | 916 | (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))) |
830 | printf("pmap_page_protect(%#"PRIxPADDR", %x)\n", | | 917 | printf("pmap_page_protect(%#"PRIxPADDR", %x)\n", |
831 | VM_PAGE_TO_PHYS(pg), prot); | | 918 | VM_PAGE_TO_PHYS(pg), prot); |
832 | #endif | | 919 | #endif |
833 | switch (prot) { | | 920 | switch (prot) { |
834 | case VM_PROT_READ|VM_PROT_WRITE: | | 921 | case VM_PROT_READ|VM_PROT_WRITE: |
835 | case VM_PROT_ALL: | | 922 | case VM_PROT_ALL: |
836 | break; | | 923 | break; |
837 | | | 924 | |
838 | /* copy_on_write */ | | 925 | /* copy_on_write */ |
839 | case VM_PROT_READ: | | 926 | case VM_PROT_READ: |
840 | case VM_PROT_READ|VM_PROT_EXECUTE: | | 927 | case VM_PROT_READ|VM_PROT_EXECUTE: |
841 | pv = pg->mdpage.pvh_list; | | 928 | pv = &pg->mdpage.pvh_first; |
842 | /* | | 929 | /* |
843 | * Loop over all current mappings setting/clearing as appropos. | | 930 | * Loop over all current mappings setting/clearing as appropos. |
844 | */ | | 931 | */ |
845 | if (pv->pv_pmap != NULL) { | | 932 | if (pv->pv_pmap != NULL) { |
846 | for (; pv; pv = pv->pv_next) { | | 933 | for (; pv; pv = pv->pv_next) { |
847 | va = pv->pv_va; | | 934 | va = pv->pv_va; |
848 | pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE, | | 935 | pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE, |
849 | prot); | | 936 | prot); |
850 | pmap_update(pv->pv_pmap); | | 937 | pmap_update(pv->pv_pmap); |
851 | } | | 938 | } |
852 | } | | 939 | } |
853 | break; | | 940 | break; |
854 | | | 941 | |
855 | /* remove_all */ | | 942 | /* remove_all */ |
856 | default: | | 943 | default: |
857 | pv = pg->mdpage.pvh_list; | | 944 | /* |
| | | 945 | * Do this first so that for each unmapping, pmap_remove_pv |
| | | 946 | * won't try to sync the icache. |
| | | 947 | */ |
| | | 948 | if (pmap_clear_page_attributes(pg, PG_MD_EXECPAGE)) { |
| | | 949 | PMAP_COUNT(exec_uncached_page_protect); |
| | | 950 | } |
| | | 951 | pv = &pg->mdpage.pvh_first; |
858 | while (pv->pv_pmap != NULL) { | | 952 | while (pv->pv_pmap != NULL) { |
859 | pmap_remove(pv->pv_pmap, pv->pv_va, | | 953 | va = pv->pv_va; |
860 | pv->pv_va + PAGE_SIZE); | | 954 | pmap_remove(pv->pv_pmap, va, va + PAGE_SIZE); |
| | | 955 | pmap_update(pv->pv_pmap); |
861 | } | | 956 | } |
862 | pmap_update(pv->pv_pmap); | | | |
863 | } | | 957 | } |
864 | } | | 958 | } |
865 | | | 959 | |
866 | static bool | | 960 | static bool |
867 | pmap_pte_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *pte, | | 961 | pmap_pte_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *pte, |
868 | uintptr_t flags) | | 962 | uintptr_t flags) |
869 | { | | 963 | { |
870 | const uint32_t pg_mask = ~(mips_pg_m_bit() | mips_pg_ro_bit()); | | 964 | const uint32_t pg_mask = ~(mips_pg_m_bit() | mips_pg_ro_bit()); |
871 | const uint32_t p = flags; | | 965 | const uint32_t p = (flags & VM_PROT_WRITE) ? mips_pg_rw_bit() : mips_pg_ro_bit(); |
872 | | | | |
873 | /* | | 966 | /* |
874 | * Change protection on every valid mapping within this segment. | | 967 | * Change protection on every valid mapping within this segment. |
875 | */ | | 968 | */ |
876 | for (; sva < eva; sva += NBPG, pte++) { | | 969 | for (; sva < eva; sva += NBPG, pte++) { |
877 | uint32_t pt_entry = pte->pt_entry; | | 970 | uint32_t pt_entry = pte->pt_entry; |
878 | if (!mips_pg_v(pt_entry)) | | 971 | if (!mips_pg_v(pt_entry)) |
879 | continue; | | 972 | continue; |
880 | if (MIPS_HAS_R4K_MMU && (pt_entry & mips_pg_m_bit())) | | 973 | struct vm_page *pg; |
881 | mips_dcache_wbinv_range_index(sva, PAGE_SIZE); | | 974 | pg = PHYS_TO_VM_PAGE(mips_tlbpfn_to_paddr(pt_entry)); |
| | | 975 | if (pg && (pt_entry & mips_pg_m_bit())) { |
| | | 976 | if (MIPS_HAS_R4K_MMU |
| | | 977 | && MIPS_CACHE_VIRTUAL_ALIAS |
| | | 978 | && PG_MD_CACHED_P(pg)) |
| | | 979 | mips_dcache_wbinv_range_index(sva, PAGE_SIZE); |
| | | 980 | if (PG_MD_EXECPAGE_P(pg)) { |
| | | 981 | KASSERT(pg->mdpage.pvh_first.pv_pmap != NULL); |
| | | 982 | if (PG_MD_CACHED_P(pg)) { |
| | | 983 | pmap_page_syncicache(pg); |
| | | 984 | PMAP_COUNT(exec_synced_protect); |
| | | 985 | } |
| | | 986 | } |
| | | 987 | } |
882 | pt_entry = (pt_entry & pg_mask) | p; | | 988 | pt_entry = (pt_entry & pg_mask) | p; |
883 | pte->pt_entry = pt_entry; | | 989 | pte->pt_entry = pt_entry; |
884 | /* | | 990 | /* |
885 | * Update the TLB if needed. | | 991 | * Update the TLB if needed. |
886 | */ | | 992 | */ |
887 | pmap_tlb_update(pmap, sva, pt_entry); | | 993 | pmap_tlb_update(pmap, sva, pt_entry); |
888 | } | | 994 | } |
889 | return false; | | 995 | return false; |
890 | } | | 996 | } |
891 | | | 997 | |
892 | /* | | 998 | /* |
893 | * Set the physical protection on the | | 999 | * Set the physical protection on the |
894 | * specified range of this map as requested. | | 1000 | * specified range of this map as requested. |
895 | */ | | 1001 | */ |
896 | void | | 1002 | void |
897 | pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) | | 1003 | pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) |
898 | { | | 1004 | { |
899 | const uint32_t pg_mask = ~(mips_pg_m_bit() | mips_pg_ro_bit()); | | 1005 | const uint32_t pg_mask = ~(mips_pg_m_bit() | mips_pg_ro_bit()); |
900 | pt_entry_t *pte; | | 1006 | pt_entry_t *pte; |
901 | u_int p; | | 1007 | u_int p; |
902 | | | 1008 | |
| | | 1009 | PMAP_COUNT(protect); |
903 | #ifdef DEBUG | | 1010 | #ifdef DEBUG |
904 | if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) | | 1011 | if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) |
905 | printf("pmap_protect(%p, %#"PRIxVADDR", %#"PRIxVADDR", %x)\n", | | 1012 | printf("pmap_protect(%p, %#"PRIxVADDR", %#"PRIxVADDR", %x)\n", |
906 | pmap, sva, eva, prot); | | 1013 | pmap, sva, eva, prot); |
907 | #endif | | 1014 | #endif |
908 | if ((prot & VM_PROT_READ) == VM_PROT_NONE) { | | 1015 | if ((prot & VM_PROT_READ) == VM_PROT_NONE) { |
909 | pmap_remove(pmap, sva, eva); | | 1016 | pmap_remove(pmap, sva, eva); |
910 | return; | | 1017 | return; |
911 | } | | 1018 | } |
912 | | | 1019 | |
913 | p = (prot & VM_PROT_WRITE) ? mips_pg_rw_bit() : mips_pg_ro_bit(); | | 1020 | p = (prot & VM_PROT_WRITE) ? mips_pg_rw_bit() : mips_pg_ro_bit(); |
914 | | | 1021 | |
915 | if (pmap == pmap_kernel()) { | | 1022 | if (pmap == pmap_kernel()) { |
916 | /* | | 1023 | /* |
917 | * Change entries in kernel pmap. | | 1024 | * Change entries in kernel pmap. |
918 | * This will trap if the page is writable (in order to set | | 1025 | * This will trap if the page is writable (in order to set |
919 | * the dirty bit) even if the dirty bit is already set. The | | 1026 | * the dirty bit) even if the dirty bit is already set. The |
920 | * optimization isn't worth the effort since this code isn't | | 1027 | * optimization isn't worth the effort since this code isn't |
921 | * executed much. The common case is to make a user page | | 1028 | * executed much. The common case is to make a user page |
922 | * read-only. | | 1029 | * read-only. |
923 | */ | | 1030 | */ |
924 | #ifdef PARANOIADIAG | | 1031 | #ifdef PARANOIADIAG |
925 | if (sva < VM_MIN_KERNEL_ADDRESS || eva >= virtual_end) | | 1032 | if (sva < VM_MIN_KERNEL_ADDRESS || eva >= mips_virtual_end) |
926 | panic("pmap_protect: kva not in range"); | | 1033 | panic("pmap_protect: kva not in range"); |
927 | #endif | | 1034 | #endif |
928 | pte = kvtopte(sva); | | 1035 | pte = kvtopte(sva); |
929 | for (; sva < eva; sva += NBPG, pte++) { | | 1036 | for (; sva < eva; sva += NBPG, pte++) { |
930 | uint32_t pt_entry = pte->pt_entry; | | 1037 | uint32_t pt_entry = pte->pt_entry; |
931 | if (!mips_pg_v(pt_entry)) | | 1038 | if (!mips_pg_v(pt_entry)) |
932 | continue; | | 1039 | continue; |
933 | if (MIPS_HAS_R4K_MMU && (pt_entry & mips_pg_m_bit())) | | 1040 | if (MIPS_HAS_R4K_MMU && (pt_entry & mips_pg_m_bit())) |
934 | mips_dcache_wb_range(sva, PAGE_SIZE); | | 1041 | mips_dcache_wb_range(sva, PAGE_SIZE); |
935 | pt_entry &= (pt_entry & pg_mask) | p; | | 1042 | pt_entry &= (pt_entry & pg_mask) | p; |
936 | pte->pt_entry = pt_entry; | | 1043 | pte->pt_entry = pt_entry; |
937 | pmap_tlb_update(pmap, sva, pt_entry); | | 1044 | pmap_tlb_update(pmap, sva, pt_entry); |
938 | } | | 1045 | } |
| @@ -1006,237 +1113,250 @@ pmap_procwr(struct proc *p, vaddr_t va, | | | @@ -1006,237 +1113,250 @@ pmap_procwr(struct proc *p, vaddr_t va, |
1006 | * XXXJRT need to loop. | | 1113 | * XXXJRT need to loop. |
1007 | */ | | 1114 | */ |
1008 | mips_icache_sync_range( | | 1115 | mips_icache_sync_range( |
1009 | MIPS_PHYS_TO_KSEG0(mips1_tlbpfn_to_paddr(entry) | | 1116 | MIPS_PHYS_TO_KSEG0(mips1_tlbpfn_to_paddr(entry) |
1010 | + (va & PGOFSET)), | | 1117 | + (va & PGOFSET)), |
1011 | len); | | 1118 | len); |
1012 | #endif /* MIPS1 */ | | 1119 | #endif /* MIPS1 */ |
1013 | } | | 1120 | } |
1014 | } | | 1121 | } |
1015 | | | 1122 | |
1016 | /* | | 1123 | /* |
1017 | * Return RO protection of page. | | 1124 | * Return RO protection of page. |
1018 | */ | | 1125 | */ |
1019 | int | | 1126 | bool |
1020 | pmap_is_page_ro(pmap_t pmap, vaddr_t va, int entry) | | 1127 | pmap_is_page_ro_p(pmap_t pmap, vaddr_t va, uint32_t entry) |
1021 | { | | 1128 | { |
1022 | | | 1129 | |
1023 | return entry & mips_pg_ro_bit(); | | 1130 | return (entry & mips_pg_ro_bit()) != 0; |
1024 | } | | 1131 | } |
1025 | | | 1132 | |
1026 | #if defined(MIPS3_PLUS) && !defined(MIPS3_NO_PV_UNCACHED) /* XXX mmu XXX */ | | 1133 | #if defined(MIPS3_PLUS) && !defined(MIPS3_NO_PV_UNCACHED) /* XXX mmu XXX */ |
1027 | /* | | 1134 | /* |
1028 | * pmap_page_cache: | | 1135 | * pmap_page_cache: |
1029 | * | | 1136 | * |
1030 | * Change all mappings of a managed page to cached/uncached. | | 1137 | * Change all mappings of a managed page to cached/uncached. |
1031 | */ | | 1138 | */ |
1032 | static void | | 1139 | static void |
1033 | pmap_page_cache(struct vm_page *pg, int mode) | | 1140 | pmap_page_cache(struct vm_page *pg, bool cached) |
1034 | { | | 1141 | { |
1035 | pv_entry_t pv; | | 1142 | const uint32_t newmode = cached ? MIPS3_PG_CACHED : MIPS3_PG_UNCACHED; |
1036 | unsigned newmode; | | | |
1037 | | | 1143 | |
1038 | #ifdef DEBUG | | 1144 | #ifdef DEBUG |
1039 | if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) | | 1145 | if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) |
1040 | printf("pmap_page_uncache(%#"PRIxPADDR")\n", VM_PAGE_TO_PHYS(pg)); | | 1146 | printf("pmap_page_uncache(%#"PRIxPADDR")\n", VM_PAGE_TO_PHYS(pg)); |
1041 | #endif | | 1147 | #endif |
1042 | newmode = mode & PV_UNCACHED ? MIPS3_PG_UNCACHED : MIPS3_PG_CACHED; | | 1148 | |
1043 | pv = pg->mdpage.pvh_list; | | 1149 | if (cached) { |
| | | 1150 | pmap_clear_page_attributes(pg, PG_MD_UNCACHED); |
| | | 1151 | PMAP_COUNT(page_cache_restorations); |
| | | 1152 | } else { |
| | | 1153 | pmap_set_page_attributes(pg, PG_MD_UNCACHED); |
| | | 1154 | PMAP_COUNT(page_cache_evictions); |
| | | 1155 | } |
1044 | | | 1156 | |
1045 | while (pv) { | | 1157 | for (pv_entry_t pv = &pg->mdpage.pvh_first; |
| | | 1158 | pv != NULL; |
| | | 1159 | pv = pv->pv_next) { |
1046 | pmap_t pmap = pv->pv_pmap; | | 1160 | pmap_t pmap = pv->pv_pmap; |
1047 | pt_entry_t *pte; | | 1161 | pt_entry_t *pte; |
1048 | uint32_t entry; | | 1162 | uint32_t pt_entry; |
1049 | | | 1163 | |
1050 | pv->pv_flags = (pv->pv_flags & ~PV_UNCACHED) | mode; | | 1164 | KASSERT(pv->pv_pmap != NULL); |
1051 | if (pmap == pmap_kernel()) { | | 1165 | if (pmap == pmap_kernel()) { |
1052 | /* | | 1166 | /* |
1053 | * Change entries in kernel pmap. | | 1167 | * Change entries in kernel pmap. |
1054 | */ | | 1168 | */ |
1055 | pte = kvtopte(pv->pv_va); | | 1169 | pte = kvtopte(pv->pv_va); |
1056 | } else { | | 1170 | } else { |
1057 | pte = pmap_pte_lookup(pmap, pv->pv_va); | | 1171 | pte = pmap_pte_lookup(pmap, pv->pv_va); |
1058 | if (pte == NULL) | | 1172 | if (pte == NULL) |
1059 | continue; | | 1173 | continue; |
1060 | } | | 1174 | } |
1061 | entry = pte->pt_entry; | | 1175 | pt_entry = pte->pt_entry; |
1062 | if (entry & MIPS3_PG_V) { | | 1176 | if (pt_entry & MIPS3_PG_V) { |
1063 | entry = (entry & ~MIPS3_PG_CACHEMODE) | newmode; | | 1177 | pt_entry = (pt_entry & ~MIPS3_PG_CACHEMODE) | newmode; |
1064 | pte->pt_entry = entry; | | 1178 | pte->pt_entry = pt_entry; |
1065 | pmap_tlb_update(pv->pv_pmap, pv->pv_va, entry); | | 1179 | pmap_tlb_update(pv->pv_pmap, pv->pv_va, pt_entry); |
1066 | } | | 1180 | } |
1067 | pv = pv->pv_next; | | | |
1068 | } | | 1181 | } |
1069 | } | | 1182 | } |
1070 | #endif /* MIPS3_PLUS && !MIPS3_NO_PV_UNCACHED */ | | 1183 | #endif /* MIPS3_PLUS && !MIPS3_NO_PV_UNCACHED */ |
1071 | | | 1184 | |
1072 | /* | | 1185 | /* |
1073 | * Insert the given physical page (p) at | | 1186 | * Insert the given physical page (p) at |
1074 | * the specified virtual address (v) in the | | 1187 | * the specified virtual address (v) in the |
1075 | * target physical map with the protection requested. | | 1188 | * target physical map with the protection requested. |
1076 | * | | 1189 | * |
1077 | * If specified, the page will be wired down, meaning | | 1190 | * If specified, the page will be wired down, meaning |
1078 | * that the related pte can not be reclaimed. | | 1191 | * that the related pte can not be reclaimed. |
1079 | * | | 1192 | * |
1080 | * NB: This is the only routine which MAY NOT lazy-evaluate | | 1193 | * NB: This is the only routine which MAY NOT lazy-evaluate |
1081 | * or lose information. That is, this routine must actually | | 1194 | * or lose information. That is, this routine must actually |
1082 | * insert this page into the given map NOW. | | 1195 | * insert this page into the given map NOW. |
1083 | */ | | 1196 | */ |
1084 | int | | 1197 | int |
1085 | pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) | | 1198 | pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) |
1086 | { | | 1199 | { |
1087 | pt_entry_t *pte; | | 1200 | pt_entry_t *pte; |
1088 | u_int npte; | | 1201 | u_int npte; |
1089 | struct vm_page *pg; | | 1202 | struct vm_page *pg; |
1090 | unsigned asid; | | | |
1091 | #if defined(_MIPS_PADDR_T_64BIT) || defined(_LP64) | | 1203 | #if defined(_MIPS_PADDR_T_64BIT) || defined(_LP64) |
1092 | int cached = 1; | | 1204 | bool cached = true; |
1093 | #endif | | 1205 | #endif |
1094 | bool wired = (flags & PMAP_WIRED) != 0; | | 1206 | bool wired = (flags & PMAP_WIRED) != 0; |
1095 | | | 1207 | |
1096 | #ifdef DEBUG | | 1208 | #ifdef DEBUG |
1097 | if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) | | 1209 | if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) |
1098 | printf("pmap_enter(%p, %#"PRIxVADDR", %#"PRIxPADDR", %x, %x)\n", | | 1210 | printf("pmap_enter(%p, %#"PRIxVADDR", %#"PRIxPADDR", %x, %x)\n", |
1099 | pmap, va, pa, prot, wired); | | 1211 | pmap, va, pa, prot, wired); |
1100 | #endif | | 1212 | #endif |
1101 | #if defined(DEBUG) || defined(DIAGNOSTIC) || defined(PARANOIADIAG) | | 1213 | const bool good_color = PMAP_PAGE_COLOROK_P(pa, va); |
1102 | if (pmap == pmap_kernel()) { | | 1214 | if (pmap == pmap_kernel()) { |
1103 | #ifdef DEBUG | | 1215 | PMAP_COUNT(kernel_mappings); |
1104 | enter_stats.kernel++; | | 1216 | if (!good_color) |
1105 | #endif | | 1217 | PMAP_COUNT(kernel_mappings_bad); |
| | | 1218 | #if defined(DEBUG) || defined(DIAGNOSTIC) || defined(PARANOIADIAG) |
1106 | if (va < VM_MIN_KERNEL_ADDRESS || va >= mips_virtual_end) | | 1219 | if (va < VM_MIN_KERNEL_ADDRESS || va >= mips_virtual_end) |
1107 | panic("pmap_enter: kva too big"); | | 1220 | panic("pmap_enter: kva %#"PRIxVADDR"too big", va); |
1108 | } else { | | | |
1109 | #ifdef DEBUG | | | |
1110 | enter_stats.user++; | | | |
1111 | #endif | | 1221 | #endif |
| | | 1222 | } else { |
| | | 1223 | PMAP_COUNT(user_mappings); |
| | | 1224 | if (!good_color) |
| | | 1225 | PMAP_COUNT(user_mappings_bad); |
| | | 1226 | #if defined(DEBUG) || defined(DIAGNOSTIC) || defined(PARANOIADIAG) |
1112 | if (va >= VM_MAXUSER_ADDRESS) | | 1227 | if (va >= VM_MAXUSER_ADDRESS) |
1113 | panic("pmap_enter: uva too big"); | | 1228 | panic("pmap_enter: uva %#"PRIxVADDR" too big", va); |
1114 | } | | | |
1115 | #endif | | 1229 | #endif |
| | | 1230 | } |
1116 | #ifdef PARANOIADIAG | | 1231 | #ifdef PARANOIADIAG |
1117 | #if defined(cobalt) || defined(newsmips) || defined(pmax) /* otherwise ok */ | | 1232 | #if defined(cobalt) || defined(newsmips) || defined(pmax) /* otherwise ok */ |
1118 | if (pa & 0x80000000) /* this is not error in general. */ | | 1233 | if (pa & 0x80000000) /* this is not error in general. */ |
1119 | panic("pmap_enter: pa"); | | 1234 | panic("pmap_enter: pa"); |
1120 | #endif | | 1235 | #endif |
1121 | | | 1236 | |
1122 | #if defined(_MIPS_PADDR_T_64BIT) || defined(_LP64) | | 1237 | #if defined(_MIPS_PADDR_T_64BIT) || defined(_LP64) |
1123 | if (pa & PMAP_NOCACHE) { | | 1238 | if (pa & PMAP_NOCACHE) { |
1124 | cached = 0; | | 1239 | cached = false; |
1125 | pa &= ~PMAP_NOCACHE; | | 1240 | pa &= ~PMAP_NOCACHE; |
1126 | } | | 1241 | } |
1127 | #endif | | 1242 | #endif |
1128 | | | 1243 | |
1129 | if (!(prot & VM_PROT_READ)) | | 1244 | if (!(prot & VM_PROT_READ)) |
1130 | panic("pmap_enter: prot"); | | 1245 | panic("pmap_enter: prot"); |
1131 | #endif | | 1246 | #endif |
1132 | pg = PHYS_TO_VM_PAGE(pa); | | 1247 | pg = PHYS_TO_VM_PAGE(pa); |
1133 | | | 1248 | |
1134 | if (pg) { | | 1249 | if (pg) { |
1135 | /* Set page referenced/modified status based on flags */ | | 1250 | /* Set page referenced/modified status based on flags */ |
1136 | if (flags & VM_PROT_WRITE) | | 1251 | if (flags & VM_PROT_WRITE) |
1137 | pmap_set_page_attributes(pg, PV_MODIFIED|PV_REFERENCED); | | 1252 | pmap_set_page_attributes(pg, PG_MD_MODIFIED|PG_MD_REFERENCED); |
1138 | else if (flags & VM_PROT_ALL) | | 1253 | else if (flags & VM_PROT_ALL) |
1139 | pmap_set_page_attributes(pg, PV_REFERENCED); | | 1254 | pmap_set_page_attributes(pg, PG_MD_REFERENCED); |
1140 | if (!(prot & VM_PROT_WRITE)) | | 1255 | if (!(prot & VM_PROT_WRITE)) |
1141 | /* | | 1256 | /* |
1142 | * If page is not yet referenced, we could emulate this | | 1257 | * If page is not yet referenced, we could emulate this |
1143 | * by not setting the page valid, and setting the | | 1258 | * by not setting the page valid, and setting the |
1144 | * referenced status in the TLB fault handler, similar | | 1259 | * referenced status in the TLB fault handler, similar |
1145 | * to how page modified status is done for UTLBmod | | 1260 | * to how page modified status is done for UTLBmod |
1146 | * exceptions. | | 1261 | * exceptions. |
1147 | */ | | 1262 | */ |
1148 | npte = mips_pg_ropage_bit(); | | 1263 | npte = mips_pg_ropage_bit(); |
1149 | else { | | 1264 | else { |
1150 | #if defined(_MIPS_PADDR_T_64BIT) || defined(_LP64) | | 1265 | #if defined(_MIPS_PADDR_T_64BIT) || defined(_LP64) |
1151 | if (cached == 0) { | | 1266 | if (cached == false) { |
1152 | if (pg->mdpage.pvh_attrs & PV_MODIFIED) { | | 1267 | if (PG_MD_MODIFIED_P(pg)) { |
1153 | npte = mips_pg_rwncpage_bit(); | | 1268 | npte = mips_pg_rwncpage_bit(); |
1154 | } else { | | 1269 | } else { |
1155 | npte = mips_pg_cwncpage_bit(); | | 1270 | npte = mips_pg_cwncpage_bit(); |
1156 | } | | 1271 | } |
| | | 1272 | PMAP_COUNT(uncached_mappings); |
1157 | } else { | | 1273 | } else { |
1158 | #endif | | 1274 | #endif |
1159 | if (pg->mdpage.pvh_attrs & PV_MODIFIED) { | | 1275 | if (PG_MD_MODIFIED_P(pg)) { |
1160 | npte = mips_pg_rwpage_bit(); | | 1276 | npte = mips_pg_rwpage_bit(); |
1161 | } else { | | 1277 | } else { |
1162 | npte = mips_pg_cwpage_bit(); | | 1278 | npte = mips_pg_cwpage_bit(); |
1163 | } | | 1279 | } |
1164 | #if defined(_MIPS_PADDR_T_64BIT) || defined(_LP64) | | 1280 | #if defined(_MIPS_PADDR_T_64BIT) || defined(_LP64) |
1165 | } | | 1281 | } |
1166 | #endif | | 1282 | #endif |
1167 | } | | 1283 | } |
1168 | #ifdef DEBUG | | 1284 | PMAP_COUNT(managed_mappings); |
1169 | enter_stats.managed++; | | | |
1170 | #endif | | | |
1171 | } else { | | 1285 | } else { |
1172 | /* | | 1286 | /* |
1173 | * Assumption: if it is not part of our managed memory | | 1287 | * Assumption: if it is not part of our managed memory |
1174 | * then it must be device memory which may be volatile. | | 1288 | * then it must be device memory which may be volatile. |
1175 | */ | | 1289 | */ |
1176 | #ifdef DEBUG | | | |
1177 | enter_stats.unmanaged++; | | | |
1178 | #endif | | | |
1179 | if (MIPS_HAS_R4K_MMU) { | | 1290 | if (MIPS_HAS_R4K_MMU) { |
1180 | npte = MIPS3_PG_IOPAGE(PMAP_CCA_FOR_PA(pa)) & | | 1291 | npte = MIPS3_PG_IOPAGE(PMAP_CCA_FOR_PA(pa)) & |
1181 | ~MIPS3_PG_G; | | 1292 | ~MIPS3_PG_G; |
1182 | if ((prot & VM_PROT_WRITE) == 0) { | | 1293 | if ((prot & VM_PROT_WRITE) == 0) { |
1183 | npte |= MIPS3_PG_RO; | | 1294 | npte |= MIPS3_PG_RO; |
1184 | npte &= ~MIPS3_PG_D; | | 1295 | npte &= ~MIPS3_PG_D; |
1185 | } | | 1296 | } |
1186 | } else { | | 1297 | } else { |
1187 | npte = (prot & VM_PROT_WRITE) ? | | 1298 | npte = (prot & VM_PROT_WRITE) ? |
1188 | (MIPS1_PG_D | MIPS1_PG_N) : | | 1299 | (MIPS1_PG_D | MIPS1_PG_N) : |
1189 | (MIPS1_PG_RO | MIPS1_PG_N); | | 1300 | (MIPS1_PG_RO | MIPS1_PG_N); |
1190 | } | | 1301 | } |
| | | 1302 | PMAP_COUNT(unmanaged_mappings); |
1191 | } | | 1303 | } |
1192 | | | 1304 | |
| | | 1305 | #if 0 |
1193 | /* | | 1306 | /* |
1194 | * The only time we need to flush the cache is if we | | 1307 | * The only time we need to flush the cache is if we |
1195 | * execute from a physical address and then change the data. | | 1308 | * execute from a physical address and then change the data. |
1196 | * This is the best place to do this. | | 1309 | * This is the best place to do this. |
1197 | * pmap_protect() and pmap_remove() are mostly used to switch | | 1310 | * pmap_protect() and pmap_remove() are mostly used to switch |
1198 | * between R/W and R/O pages. | | 1311 | * between R/W and R/O pages. |
1199 | * NOTE: we only support cache flush for read only text. | | 1312 | * NOTE: we only support cache flush for read only text. |
1200 | */ | | 1313 | */ |
1201 | #ifdef MIPS1 | | 1314 | #ifdef MIPS1 |
1202 | if ((!MIPS_HAS_R4K_MMU) && prot == (VM_PROT_READ | VM_PROT_EXECUTE)) { | | 1315 | if (!MIPS_HAS_R4K_MMU |
1203 | mips_icache_sync_range(MIPS_PHYS_TO_KSEG0(pa), PAGE_SIZE); | | 1316 | && pg != NULL |
| | | 1317 | && prot == (VM_PROT_READ | VM_PROT_EXECUTE)) { |
| | | 1318 | PMAP_COUNT(enter_exec_mapping); |
| | | 1319 | if (!PG_MD_EXECPAGE_P(pg)) { |
| | | 1320 | mips_icache_sync_range(MIPS_PHYS_TO_KSEG0(pa), |
| | | 1321 | PAGE_SIZE); |
| | | 1322 | pmap_set_page_attributes(pg, PG_MD_EXECPAGE); |
| | | 1323 | PMAP_COUNT(exec_syncicache_entry); |
| | | 1324 | } |
1204 | } | | 1325 | } |
1205 | #endif | | 1326 | #endif |
| | | 1327 | #endif |
1206 | | | 1328 | |
1207 | if (pmap == pmap_kernel()) { | | 1329 | if (pmap == pmap_kernel()) { |
1208 | if (pg) | | 1330 | if (pg) |
1209 | pmap_enter_pv(pmap, va, pg, &npte); | | 1331 | pmap_enter_pv(pmap, va, pg, &npte); |
1210 | | | 1332 | |
1211 | /* enter entries into kernel pmap */ | | 1333 | /* enter entries into kernel pmap */ |
1212 | pte = kvtopte(va); | | 1334 | pte = kvtopte(va); |
1213 | | | 1335 | |
1214 | if (MIPS_HAS_R4K_MMU) | | 1336 | if (MIPS_HAS_R4K_MMU) |
1215 | npte |= mips3_paddr_to_tlbpfn(pa) | MIPS3_PG_G; | | 1337 | npte |= mips3_paddr_to_tlbpfn(pa) | MIPS3_PG_G; |
1216 | else | | 1338 | else |
1217 | npte |= mips1_paddr_to_tlbpfn(pa) | | | 1339 | npte |= mips1_paddr_to_tlbpfn(pa) | |
1218 | MIPS1_PG_V | MIPS1_PG_G; | | 1340 | MIPS1_PG_V | MIPS1_PG_G; |
1219 | | | 1341 | |
1220 | if (wired) { | | 1342 | if (wired) { |
1221 | pmap->pm_stats.wired_count++; | | 1343 | pmap->pm_stats.wired_count++; |
1222 | npte |= mips_pg_wired_bit(); | | 1344 | npte |= mips_pg_wired_bit(); |
1223 | } | | 1345 | } |
1224 | if (mips_pg_v(pte->pt_entry) | | 1346 | if (mips_pg_v(pte->pt_entry) |
1225 | && mips_tlbpfn_to_paddr(pte->pt_entry) != pa) { | | 1347 | && mips_tlbpfn_to_paddr(pte->pt_entry) != pa) { |
1226 | pmap_remove(pmap, va, va + NBPG); | | 1348 | pmap_remove(pmap, va, va + NBPG); |
1227 | #ifdef DEBUG | | 1349 | PMAP_COUNT(kernel_mappings_changed); |
1228 | enter_stats.mchange++; | | | |
1229 | #endif | | | |
1230 | } | | 1350 | } |
1231 | if (!mips_pg_v(pte->pt_entry)) | | 1351 | if (!mips_pg_v(pte->pt_entry)) |
1232 | pmap->pm_stats.resident_count++; | | 1352 | pmap->pm_stats.resident_count++; |
1233 | pte->pt_entry = npte; | | 1353 | pte->pt_entry = npte; |
1234 | | | 1354 | |
1235 | /* | | 1355 | /* |
1236 | * Update the same virtual address entry. | | 1356 | * Update the same virtual address entry. |
1237 | */ | | 1357 | */ |
1238 | | | 1358 | |
1239 | pmap_tlb_update(pmap, va, npte); | | 1359 | pmap_tlb_update(pmap, va, npte); |
1240 | return 0; | | 1360 | return 0; |
1241 | } | | 1361 | } |
1242 | | | 1362 | |
| @@ -1254,169 +1374,161 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd | | | @@ -1254,169 +1374,161 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd |
1254 | * Assume uniform modified and referenced status for all | | 1374 | * Assume uniform modified and referenced status for all |
1255 | * MIPS pages in a MACH page. | | 1375 | * MIPS pages in a MACH page. |
1256 | */ | | 1376 | */ |
1257 | | | 1377 | |
1258 | if (MIPS_HAS_R4K_MMU) | | 1378 | if (MIPS_HAS_R4K_MMU) |
1259 | npte |= mips3_paddr_to_tlbpfn(pa); | | 1379 | npte |= mips3_paddr_to_tlbpfn(pa); |
1260 | else | | 1380 | else |
1261 | npte |= mips1_paddr_to_tlbpfn(pa) | MIPS1_PG_V; | | 1381 | npte |= mips1_paddr_to_tlbpfn(pa) | MIPS1_PG_V; |
1262 | | | 1382 | |
1263 | if (wired) { | | 1383 | if (wired) { |
1264 | pmap->pm_stats.wired_count++; | | 1384 | pmap->pm_stats.wired_count++; |
1265 | npte |= mips_pg_wired_bit(); | | 1385 | npte |= mips_pg_wired_bit(); |
1266 | } | | 1386 | } |
1267 | struct pmap_asid_info * const pai = PMAP_PAI(pmap, curcpu()); | | | |
1268 | bool needsupdate = PMAP_PAI_ASIDVALID_P(pai, curcpu()); | | | |
1269 | #if defined(DEBUG) | | 1387 | #if defined(DEBUG) |
1270 | if (pmapdebug & PDB_ENTER) { | | 1388 | if (pmapdebug & PDB_ENTER) { |
1271 | printf("pmap_enter: %p: %#"PRIxVADDR": new pte %#x (pa %#"PRIxPADDR")", pmap, va, npte, pa); | | 1389 | printf("pmap_enter: %p: %#"PRIxVADDR": new pte %#x (pa %#"PRIxPADDR")", pmap, va, npte, pa); |
1272 | if (needsupdate) | | | |
1273 | printf(" asid %u (%#x)", pai->pai_asid, pai->pai_asid); | | | |
1274 | printf("\n"); | | 1390 | printf("\n"); |
1275 | } | | 1391 | } |
1276 | #endif | | 1392 | #endif |
1277 | | | 1393 | |
1278 | #ifdef PARANOIADIAG | | 1394 | #ifdef PARANOIADIAG |
1279 | if (PMAP_IS_ACTIVE(pmap)) { | | 1395 | if (PMAP_IS_ACTIVE(pmap)) { |
1280 | uint32_t asid; | | 1396 | uint32_t asid; |
1281 | | | 1397 | |
1282 | __asm volatile("mfc0 %0,$10; nop" : "=r"(asid)); | | 1398 | __asm volatile("mfc0 %0,$10; nop" : "=r"(asid)); |
1283 | asid = (MIPS_HAS_R4K_MMU) ? (asid & 0xff) : (asid & 0xfc0) >> 6; | | 1399 | asid = (MIPS_HAS_R4K_MMU) ? (asid & 0xff) : (asid & 0xfc0) >> 6; |
1284 | if (asid != pai->pai_asid) { | | 1400 | if (asid != pai->pai_asid) { |
1285 | panic("inconsistency for active TLB update: %u <-> %u", | | 1401 | panic("inconsistency for active TLB update: %u <-> %u", |
1286 | asid, pai->pai_asid); | | 1402 | asid, pai->pai_asid); |
1287 | } | | 1403 | } |
1288 | } | | 1404 | } |
1289 | #endif | | 1405 | #endif |
1290 | | | 1406 | |
1291 | asid = pai->pai_asid << MIPS_TLB_PID_SHIFT; | | | |
1292 | if (mips_pg_v(pte->pt_entry) && | | 1407 | if (mips_pg_v(pte->pt_entry) && |
1293 | mips_tlbpfn_to_paddr(pte->pt_entry) != pa) { | | 1408 | mips_tlbpfn_to_paddr(pte->pt_entry) != pa) { |
1294 | pmap_remove(pmap, va, va + NBPG); | | 1409 | pmap_remove(pmap, va, va + NBPG); |
1295 | #ifdef DEBUG | | 1410 | PMAP_COUNT(user_mappings_changed); |
1296 | enter_stats.mchange++; | | | |
1297 | #endif | | | |
1298 | } | | 1411 | } |
1299 | | | 1412 | |
1300 | if (!mips_pg_v(pte->pt_entry)) | | 1413 | if (!mips_pg_v(pte->pt_entry)) |
1301 | pmap->pm_stats.resident_count++; | | 1414 | pmap->pm_stats.resident_count++; |
1302 | pte->pt_entry = npte; | | 1415 | pte->pt_entry = npte; |
1303 | | | 1416 | |
1304 | if (needsupdate) | | 1417 | pmap_tlb_update(pmap, va, npte); |
1305 | pmap_tlb_update(pmap, va, npte); | | | |
1306 | | | 1418 | |
1307 | #ifdef MIPS3_PLUS /* XXX mmu XXX */ | | 1419 | if (pg != NULL && (prot == (VM_PROT_READ | VM_PROT_EXECUTE))) { |
1308 | if (MIPS_HAS_R4K_MMU && (prot == (VM_PROT_READ | VM_PROT_EXECUTE))) { | | | |
1309 | #ifdef DEBUG | | 1420 | #ifdef DEBUG |
1310 | if (pmapdebug & PDB_ENTER) | | 1421 | if (pmapdebug & PDB_ENTER) |
1311 | printf("pmap_enter: flush I cache va %#"PRIxVADDR" (%#"PRIxPADDR")\n", | | 1422 | printf("pmap_enter: flush I cache va %#"PRIxVADDR" (%#"PRIxPADDR")\n", |
1312 | va - NBPG, pa); | | 1423 | va - NBPG, pa); |
1313 | #endif | | 1424 | #endif |
1314 | /* XXXJRT */ | | 1425 | PMAP_COUNT(exec_mappings); |
1315 | mips_icache_sync_range_index(va, PAGE_SIZE); | | 1426 | if (!PG_MD_EXECPAGE_P(pg) && PG_MD_CACHED_P(pg)) { |
| | | 1427 | pmap_page_syncicache(pg); |
| | | 1428 | pmap_set_page_attributes(pg, PG_MD_EXECPAGE); |
| | | 1429 | PMAP_COUNT(exec_synced_mappings); |
| | | 1430 | } |
1316 | } | | 1431 | } |
1317 | #endif | | | |
1318 | | | 1432 | |
1319 | return 0; | | 1433 | return 0; |
1320 | } | | 1434 | } |
1321 | | | 1435 | |
1322 | void | | 1436 | void |
1323 | pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot) | | 1437 | pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot) |
1324 | { | | 1438 | { |
| | | 1439 | const bool managed = PAGE_IS_MANAGED(pa); |
1325 | pt_entry_t *pte; | | 1440 | pt_entry_t *pte; |
1326 | u_int npte; | | 1441 | u_int npte; |
1327 | bool managed = PAGE_IS_MANAGED(pa); | | | |
1328 | | | 1442 | |
1329 | #ifdef DEBUG | | 1443 | #ifdef DEBUG |
1330 | if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) | | 1444 | if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) |
1331 | printf("pmap_kenter_pa(%#"PRIxVADDR", %#"PRIxPADDR", %x)\n", va, pa, prot); | | 1445 | printf("pmap_kenter_pa(%#"PRIxVADDR", %#"PRIxPADDR", %x)\n", va, pa, prot); |
1332 | #endif | | 1446 | #endif |
| | | 1447 | PMAP_COUNT(kenter_pa); |
| | | 1448 | if (!PMAP_PAGE_COLOROK_P(pa, va) && managed) |
| | | 1449 | PMAP_COUNT(kenter_pa_bad); |
| | | 1450 | |
| | | 1451 | if (!managed) |
| | | 1452 | PMAP_COUNT(kenter_pa_unmanaged); |
1333 | | | 1453 | |
1334 | if (MIPS_HAS_R4K_MMU) { | | 1454 | if (MIPS_HAS_R4K_MMU) { |
1335 | npte = mips3_paddr_to_tlbpfn(pa) | MIPS3_PG_WIRED; | | 1455 | npte = mips3_paddr_to_tlbpfn(pa) |
1336 | if (prot & VM_PROT_WRITE) { | | 1456 | | ((prot & VM_PROT_WRITE) ? MIPS3_PG_D : MIPS3_PG_RO) |
1337 | npte |= MIPS3_PG_D; | | 1457 | | (managed ? MIPS3_PG_CACHED : MIPS3_PG_UNCACHED) |
1338 | } else { | | 1458 | | MIPS3_PG_WIRED | MIPS3_PG_V | MIPS3_PG_G; |
1339 | npte |= MIPS3_PG_RO; | | | |
1340 | } | | | |
1341 | if (managed) { | | | |
1342 | npte |= MIPS3_PG_CACHED; | | | |
1343 | } else { | | | |
1344 | npte |= MIPS3_PG_UNCACHED; | | | |
1345 | } | | | |
1346 | npte |= MIPS3_PG_V | MIPS3_PG_G; | | | |
1347 | } else { | | 1459 | } else { |
1348 | npte = mips1_paddr_to_tlbpfn(pa) | MIPS1_PG_WIRED; | | 1460 | npte = mips1_paddr_to_tlbpfn(pa) |
1349 | if (prot & VM_PROT_WRITE) { | | 1461 | | ((prot & VM_PROT_WRITE) ? MIPS1_PG_D : MIPS1_PG_RO) |
1350 | npte |= MIPS1_PG_D; | | 1462 | | (managed ? 0 : MIPS1_PG_N) |
1351 | } else { | | 1463 | | MIPS1_PG_WIRED | MIPS1_PG_V | MIPS1_PG_G; |
1352 | npte |= MIPS1_PG_RO; | | | |
1353 | } | | | |
1354 | if (managed) { | | | |
1355 | npte |= 0; | | | |
1356 | } else { | | | |
1357 | npte |= MIPS1_PG_N; | | | |
1358 | } | | | |
1359 | npte |= MIPS1_PG_V | MIPS1_PG_G; | | | |
1360 | } | | 1464 | } |
1361 | pte = kvtopte(va); | | 1465 | pte = kvtopte(va); |
1362 | KASSERT(!mips_pg_v(pte->pt_entry)); | | 1466 | KASSERT(!mips_pg_v(pte->pt_entry)); |
1363 | pte->pt_entry = npte; | | 1467 | pte->pt_entry = npte; |
1364 | pmap_tlb_update(pmap_kernel(), va, npte); | | 1468 | pmap_tlb_update(pmap_kernel(), va, npte); |
1365 | } | | 1469 | } |
1366 | | | 1470 | |
1367 | void | | 1471 | void |
1368 | pmap_kremove(vaddr_t va, vsize_t len) | | 1472 | pmap_kremove(vaddr_t va, vsize_t len) |
1369 | { | | 1473 | { |
1370 | pt_entry_t *pte; | | | |
1371 | vaddr_t eva; | | | |
1372 | u_int entry; | | | |
1373 | | | | |
1374 | #ifdef DEBUG | | 1474 | #ifdef DEBUG |
1375 | if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE)) | | 1475 | if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE)) |
1376 | printf("pmap_kremove(%#"PRIxVADDR", %#"PRIxVSIZE")\n", va, len); | | 1476 | printf("pmap_kremove(%#"PRIxVADDR", %#"PRIxVSIZE")\n", va, len); |
1377 | #endif | | 1477 | #endif |
1378 | | | 1478 | |
1379 | pte = kvtopte(va); | | 1479 | const uint32_t new_pt_entry = |
1380 | eva = va + len; | | 1480 | (MIPS_HAS_R4K_MMU ? MIPS3_PG_NV | MIPS3_PG_G : MIPS1_PG_NV); |
1381 | for (; va < eva; va += PAGE_SIZE, pte++) { | | 1481 | |
1382 | entry = pte->pt_entry; | | 1482 | pt_entry_t *pte = kvtopte(va); |
1383 | if (!mips_pg_v(entry)) { | | 1483 | for (vaddr_t eva = va + len; va < eva; va += PAGE_SIZE, pte++) { |
| | | 1484 | uint32_t pt_entry = pte->pt_entry; |
| | | 1485 | if (!mips_pg_v(pt_entry)) { |
1384 | continue; | | 1486 | continue; |
1385 | } | | 1487 | } |
1386 | if (MIPS_HAS_R4K_MMU) { | | 1488 | |
1387 | #ifndef sbmips /* XXX XXX if (dcache_is_virtual) - should also check icache virtual && EXEC mapping */ | | 1489 | PMAP_COUNT(kremove_pages); |
1388 | mips_dcache_wbinv_range(va, PAGE_SIZE); | | 1490 | if (MIPS_HAS_R4K_MMU && MIPS_CACHE_VIRTUAL_ALIAS) { |
1389 | #endif | | 1491 | struct vm_page *pg = |
1390 | pte->pt_entry = MIPS3_PG_NV | MIPS3_PG_G; | | 1492 | PHYS_TO_VM_PAGE(mips_tlbpfn_to_paddr(pt_entry)); |
1391 | } else { | | 1493 | if (pg != NULL) { |
1392 | pte->pt_entry = MIPS1_PG_NV; | | 1494 | pv_entry_t pv = &pg->mdpage.pvh_first; |
| | | 1495 | if (pv->pv_pmap == NULL) { |
| | | 1496 | pv->pv_va = va; |
| | | 1497 | } else if (PG_MD_CACHED_P(pg) |
| | | 1498 | && mips_cache_badalias(pv->pv_va, va)) { |
| | | 1499 | mips_dcache_wbinv_range(va, PAGE_SIZE); |
| | | 1500 | } |
| | | 1501 | } |
1393 | } | | 1502 | } |
| | | 1503 | |
| | | 1504 | pte->pt_entry = new_pt_entry; |
1394 | pmap_tlb_invalidate_addr(pmap_kernel(), va); | | 1505 | pmap_tlb_invalidate_addr(pmap_kernel(), va); |
1395 | } | | 1506 | } |
1396 | } | | 1507 | } |
1397 | | | 1508 | |
1398 | /* | | 1509 | /* |
1399 | * Routine: pmap_unwire | | 1510 | * Routine: pmap_unwire |
1400 | * Function: Clear the wired attribute for a map/virtual-address | | 1511 | * Function: Clear the wired attribute for a map/virtual-address |
1401 | * pair. | | 1512 | * pair. |
1402 | * In/out conditions: | | 1513 | * In/out conditions: |
1403 | * The mapping must already exist in the pmap. | | 1514 | * The mapping must already exist in the pmap. |
1404 | */ | | 1515 | */ |
1405 | void | | 1516 | void |
1406 | pmap_unwire(pmap_t pmap, vaddr_t va) | | 1517 | pmap_unwire(pmap_t pmap, vaddr_t va) |
1407 | { | | 1518 | { |
1408 | pt_entry_t *pte; | | 1519 | pt_entry_t *pte; |
1409 | | | 1520 | |
| | | 1521 | PMAP_COUNT(unwire); |
1410 | #ifdef DEBUG | | 1522 | #ifdef DEBUG |
1411 | if (pmapdebug & (PDB_FOLLOW|PDB_WIRING)) | | 1523 | if (pmapdebug & (PDB_FOLLOW|PDB_WIRING)) |
1412 | printf("pmap_unwire(%p, %#"PRIxVADDR")\n", pmap, va); | | 1524 | printf("pmap_unwire(%p, %#"PRIxVADDR")\n", pmap, va); |
1413 | #endif | | 1525 | #endif |
1414 | /* | | 1526 | /* |
1415 | * Don't need to flush the TLB since PG_WIRED is only in software. | | 1527 | * Don't need to flush the TLB since PG_WIRED is only in software. |
1416 | */ | | 1528 | */ |
1417 | if (pmap == pmap_kernel()) { | | 1529 | if (pmap == pmap_kernel()) { |
1418 | /* change entries in kernel pmap */ | | 1530 | /* change entries in kernel pmap */ |
1419 | #ifdef PARANOIADIAG | | 1531 | #ifdef PARANOIADIAG |
1420 | if (va < VM_MIN_KERNEL_ADDRESS || va >= virtual_end) | | 1532 | if (va < VM_MIN_KERNEL_ADDRESS || va >= virtual_end) |
1421 | panic("pmap_unwire"); | | 1533 | panic("pmap_unwire"); |
1422 | #endif | | 1534 | #endif |
| @@ -1511,222 +1623,159 @@ done: | | | @@ -1511,222 +1623,159 @@ done: |
1511 | | | 1623 | |
1512 | /* | | 1624 | /* |
1513 | * Copy the range specified by src_addr/len | | 1625 | * Copy the range specified by src_addr/len |
1514 | * from the source map to the range dst_addr/len | | 1626 | * from the source map to the range dst_addr/len |
1515 | * in the destination map. | | 1627 | * in the destination map. |
1516 | * | | 1628 | * |
1517 | * This routine is only advisory and need not do anything. | | 1629 | * This routine is only advisory and need not do anything. |
1518 | */ | | 1630 | */ |
1519 | void | | 1631 | void |
1520 | pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len, | | 1632 | pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len, |
1521 | vaddr_t src_addr) | | 1633 | vaddr_t src_addr) |
1522 | { | | 1634 | { |
1523 | | | 1635 | |
| | | 1636 | PMAP_COUNT(copy); |
1524 | #ifdef DEBUG | | 1637 | #ifdef DEBUG |
1525 | if (pmapdebug & PDB_FOLLOW) | | 1638 | if (pmapdebug & PDB_FOLLOW) |
1526 | printf("pmap_copy(%p, %p, %#"PRIxVADDR", %#"PRIxVSIZE", %#"PRIxVADDR")\n", | | 1639 | printf("pmap_copy(%p, %p, %#"PRIxVADDR", %#"PRIxVSIZE", %#"PRIxVADDR")\n", |
1527 | dst_pmap, src_pmap, dst_addr, len, src_addr); | | 1640 | dst_pmap, src_pmap, dst_addr, len, src_addr); |
1528 | #endif | | 1641 | #endif |
1529 | } | | 1642 | } |
1530 | | | 1643 | |
1531 | /* | | 1644 | /* |
1532 | * Routine: pmap_collect | | 1645 | * Routine: pmap_collect |
1533 | * Function: | | 1646 | * Function: |
1534 | * Garbage collects the physical map system for | | 1647 | * Garbage collects the physical map system for |
1535 | * pages which are no longer used. | | 1648 | * pages which are no longer used. |
1536 | * Success need not be guaranteed -- that is, there | | 1649 | * Success need not be guaranteed -- that is, there |
1537 | * may well be pages which are not referenced, but | | 1650 | * may well be pages which are not referenced, but |
1538 | * others may be collected. | | 1651 | * others may be collected. |
1539 | * Usage: | | 1652 | * Usage: |
1540 | * Called by the pageout daemon when pages are scarce. | | 1653 | * Called by the pageout daemon when pages are scarce. |
1541 | */ | | 1654 | */ |
1542 | void | | 1655 | void |
1543 | pmap_collect(pmap_t pmap) | | 1656 | pmap_collect(pmap_t pmap) |
1544 | { | | 1657 | { |
1545 | | | 1658 | |
| | | 1659 | PMAP_COUNT(collect); |
1546 | #ifdef DEBUG | | 1660 | #ifdef DEBUG |
1547 | if (pmapdebug & PDB_FOLLOW) | | 1661 | if (pmapdebug & PDB_FOLLOW) |
1548 | printf("pmap_collect(%p)\n", pmap); | | 1662 | printf("pmap_collect(%p)\n", pmap); |
1549 | #endif | | 1663 | #endif |
1550 | } | | 1664 | } |
1551 | | | 1665 | |
1552 | /* | | 1666 | /* |
1553 | * pmap_zero_page zeros the specified page. | | 1667 | * pmap_zero_page zeros the specified page. |
1554 | */ | | 1668 | */ |
1555 | void | | 1669 | void |
1556 | pmap_zero_page(paddr_t phys) | | 1670 | pmap_zero_page(paddr_t dst_pa) |
1557 | { | | 1671 | { |
1558 | vaddr_t va; | | 1672 | vaddr_t dst_va; |
1559 | #if defined(MIPS3_PLUS) | | 1673 | pt_entry_t dst_tmp; |
1560 | struct vm_page *pg; | | | |
1561 | pv_entry_t pv; | | | |
1562 | #endif | | | |
1563 | | | 1674 | |
1564 | #ifdef DEBUG | | 1675 | #ifdef DEBUG |
1565 | if (pmapdebug & PDB_FOLLOW) | | 1676 | if (pmapdebug & PDB_FOLLOW) |
1566 | printf("pmap_zero_page(%#"PRIxPADDR")\n", phys); | | 1677 | printf("pmap_zero_page(%#"PRIxPADDR")\n", dst_pa); |
1567 | #endif | | | |
1568 | #ifdef PARANOIADIAG | | | |
1569 | if (!(phys < MIPS_MAX_MEM_ADDR)) | | | |
1570 | printf("pmap_zero_page(%#"PRIxPADDR") nonphys\n", phys); | | | |
1571 | #endif | | | |
1572 | #ifdef _LP64 | | | |
1573 | KASSERT(mips_options.mips3_xkphys_cached); | | | |
1574 | va = MIPS_PHYS_TO_XKPHYS_CACHED(phys); | | | |
1575 | #else | | | |
1576 | va = MIPS_PHYS_TO_KSEG0(phys); | | | |
1577 | #endif | | 1678 | #endif |
| | | 1679 | PMAP_COUNT(zeroed_pages); |
1578 | | | 1680 | |
1579 | #if defined(MIPS3_PLUS) /* XXX mmu XXX */ | | 1681 | struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst_pa); |
1580 | pg = PHYS_TO_VM_PAGE(phys); | | | |
1581 | if (mips_cache_info.mci_cache_virtual_alias) { | | | |
1582 | pv = pg->mdpage.pvh_list; | | | |
1583 | if ((pv->pv_flags & PV_UNCACHED) == 0 && | | | |
1584 | mips_cache_indexof(pv->pv_va) != mips_cache_indexof(va)) | | | |
1585 | mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE); | | | |
1586 | } | | | |
1587 | #endif | | | |
1588 | | | 1682 | |
1589 | mips_pagezero((void *)va); | | 1683 | dst_va = pmap_map_ephemeral_page(dst_pg, VM_PROT_READ|VM_PROT_WRITE, &dst_tmp); |
1590 | | | 1684 | |
1591 | #if defined(MIPS3_PLUS) /* XXX mmu XXX */ | | 1685 | mips_pagezero((void *)dst_va); |
1592 | /* | | 1686 | |
1593 | * If we have a virtually-indexed, physically-tagged WB cache, | | 1687 | pmap_unmap_ephemeral_page(dst_pg, dst_va, dst_tmp); |
1594 | * and no L2 cache to warn of aliased mappings, we must force a | | | |
1595 | * writeback of the destination out of the L1 cache. If we don't, | | | |
1596 | * later reads (from virtual addresses mapped to the destination PA) | | | |
1597 | * might read old stale DRAM footprint, not the just-written data. | | | |
1598 | * | | | |
1599 | * XXXJRT This is totally disgusting. | | | |
1600 | */ | | | |
1601 | if (MIPS_HAS_R4K_MMU) /* XXX VCED on kernel stack is not allowed */ | | | |
1602 | mips_dcache_wbinv_range(va, PAGE_SIZE); | | | |
1603 | #endif /* MIPS3_PLUS */ | | | |
1604 | } | | 1688 | } |
1605 | | | 1689 | |
1606 | /* | | 1690 | /* |
1607 | * pmap_copy_page copies the specified page. | | 1691 | * pmap_copy_page copies the specified page. |
1608 | */ | | 1692 | */ |
1609 | void | | 1693 | void |
1610 | pmap_copy_page(paddr_t src, paddr_t dst) | | 1694 | pmap_copy_page(paddr_t src_pa, paddr_t dst_pa) |
1611 | { | | 1695 | { |
1612 | vaddr_t src_va, dst_va; | | 1696 | vaddr_t src_va, dst_va; |
| | | 1697 | pt_entry_t src_tmp, dst_tmp; |
1613 | #ifdef DEBUG | | 1698 | #ifdef DEBUG |
1614 | if (pmapdebug & PDB_FOLLOW) | | 1699 | if (pmapdebug & PDB_FOLLOW) |
1615 | printf("pmap_copy_page(%#"PRIxPADDR", %#"PRIxPADDR")\n", src, dst); | | 1700 | printf("pmap_copy_page(%#"PRIxPADDR", %#"PRIxPADDR")\n", src_pa, dst_pa); |
1616 | #endif | | | |
1617 | #ifdef _LP64 | | | |
1618 | KASSERT(mips_options.mips3_xkphys_cached); | | | |
1619 | src_va = MIPS_PHYS_TO_XKPHYS_CACHED(src); | | | |
1620 | dst_va = MIPS_PHYS_TO_XKPHYS_CACHED(dst); | | | |
1621 | #else | | | |
1622 | src_va = MIPS_PHYS_TO_KSEG0(src); | | | |
1623 | dst_va = MIPS_PHYS_TO_KSEG0(dst); | | | |
1624 | #endif | | | |
1625 | #if !defined(_LP64) && defined(PARANOIADIAG) | | | |
1626 | if (!(src < MIPS_MAX_MEM_ADDR)) | | | |
1627 | printf("pmap_copy_page(%#"PRIxPADDR") src nonphys\n", src); | | | |
1628 | if (!(dst < MIPS_MAX_MEM_ADDR)) | | | |
1629 | printf("pmap_copy_page(%#"PRIxPADDR") dst nonphys\n", dst); | | | |
1630 | #endif | | 1701 | #endif |
| | | 1702 | struct vm_page *src_pg = PHYS_TO_VM_PAGE(src_pa); |
| | | 1703 | struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst_pa); |
1631 | | | 1704 | |
1632 | #if defined(MIPS3_PLUS) /* XXX mmu XXX */ | | 1705 | PMAP_COUNT(copied_pages); |
1633 | /* | | 1706 | |
1634 | * If we have a virtually-indexed, physically-tagged cache, | | 1707 | src_va = pmap_map_ephemeral_page(src_pg, VM_PROT_READ, &src_tmp); |
1635 | * and no L2 cache to warn of aliased mappings, we must force an | | 1708 | dst_va = pmap_map_ephemeral_page(dst_pg, VM_PROT_READ|VM_PROT_WRITE, &dst_tmp); |
1636 | * write-back of all L1 cache lines of the source physical address, | | | |
1637 | * irrespective of their virtual address (cache indexes). | | | |
1638 | * If we don't, our copy loop might read and copy stale DRAM | | | |
1639 | * footprint instead of the fresh (but dirty) data in a WB cache. | | | |
1640 | * XXX invalidate any cached lines of the destination PA | | | |
1641 | * here also? | | | |
1642 | * | | | |
1643 | * It would probably be better to map the destination as a | | | |
1644 | * write-through no allocate to reduce cache thrash. | | | |
1645 | */ | | | |
1646 | if (mips_cache_info.mci_cache_virtual_alias) { | | | |
1647 | /*XXX FIXME Not very sophisticated */ | | | |
1648 | mips_flushcache_allpvh(src); | | | |
1649 | #if 0 | | | |
1650 | mips_flushcache_allpvh(dst); | | | |
1651 | #endif | | | |
1652 | } | | | |
1653 | #endif /* MIPS3_PLUS */ | | | |
1654 | | | 1709 | |
1655 | mips_pagecopy((void *)dst_va, (void *)src_va); | | 1710 | mips_pagecopy((void *)dst_va, (void *)src_va); |
1656 | | | 1711 | |
1657 | #if defined(MIPS3_PLUS) /* XXX mmu XXX */ | | 1712 | pmap_unmap_ephemeral_page(dst_pg, dst_va, dst_tmp); |
1658 | /* | | 1713 | pmap_unmap_ephemeral_page(src_pg, src_va, src_tmp); |
1659 | * If we have a virtually-indexed, physically-tagged WB cache, | | | |
1660 | * and no L2 cache to warn of aliased mappings, we must force a | | | |
1661 | * writeback of the destination out of the L1 cache. If we don't, | | | |
1662 | * later reads (from virtual addresses mapped to the destination PA) | | | |
1663 | * might read old stale DRAM footprint, not the just-written data. | | | |
1664 | * XXX Do we need to also invalidate any cache lines matching | | | |
1665 | * the destination as well? | | | |
1666 | * | | | |
1667 | * XXXJRT -- This is totally disgusting. | | | |
1668 | */ | | | |
1669 | if (mips_cache_info.mci_cache_virtual_alias) { | | | |
1670 | mips_dcache_wbinv_range(src_va, PAGE_SIZE); | | | |
1671 | mips_dcache_wbinv_range(dst_va, PAGE_SIZE); | | | |
1672 | } | | | |
1673 | #endif /* MIPS3_PLUS */ | | | |
1674 | } | | 1714 | } |
1675 | | | 1715 | |
1676 | /* | | 1716 | /* |
1677 | * pmap_clear_reference: | | 1717 | * pmap_clear_reference: |
1678 | * | | 1718 | * |
1679 | * Clear the reference bit on the specified physical page. | | 1719 | * Clear the reference bit on the specified physical page. |
1680 | */ | | 1720 | */ |
1681 | bool | | 1721 | bool |
1682 | pmap_clear_reference(struct vm_page *pg) | | 1722 | pmap_clear_reference(struct vm_page *pg) |
1683 | { | | 1723 | { |
1684 | #ifdef DEBUG | | 1724 | #ifdef DEBUG |
1685 | if (pmapdebug & PDB_FOLLOW) | | 1725 | if (pmapdebug & PDB_FOLLOW) |
1686 | printf("pmap_clear_reference(%#"PRIxPADDR")\n", | | 1726 | printf("pmap_clear_reference(%#"PRIxPADDR")\n", |
1687 | VM_PAGE_TO_PHYS(pg)); | | 1727 | VM_PAGE_TO_PHYS(pg)); |
1688 | #endif | | 1728 | #endif |
1689 | return pmap_clear_page_attributes(pg, PV_REFERENCED); | | 1729 | return pmap_clear_page_attributes(pg, PG_MD_REFERENCED); |
1690 | } | | 1730 | } |
1691 | | | 1731 | |
1692 | /* | | 1732 | /* |
1693 | * pmap_is_referenced: | | 1733 | * pmap_is_referenced: |
1694 | * | | 1734 | * |
1695 | * Return whether or not the specified physical page is referenced | | 1735 | * Return whether or not the specified physical page is referenced |
1696 | * by any physical maps. | | 1736 | * by any physical maps. |
1697 | */ | | 1737 | */ |
1698 | bool | | 1738 | bool |
1699 | pmap_is_referenced(struct vm_page *pg) | | 1739 | pmap_is_referenced(struct vm_page *pg) |
1700 | { | | 1740 | { |
1701 | | | 1741 | |
1702 | return pg->mdpage.pvh_attrs & PV_REFERENCED; | | 1742 | return PG_MD_REFERENCED_P(pg); |
1703 | } | | 1743 | } |
1704 | | | 1744 | |
1705 | /* | | 1745 | /* |
1706 | * Clear the modify bits on the specified physical page. | | 1746 | * Clear the modify bits on the specified physical page. |
1707 | */ | | 1747 | */ |
1708 | bool | | 1748 | bool |
1709 | pmap_clear_modify(struct vm_page *pg) | | 1749 | pmap_clear_modify(struct vm_page *pg) |
1710 | { | | 1750 | { |
1711 | struct pv_entry *pv; | | 1751 | struct pv_entry *pv = &pg->mdpage.pvh_first; |
1712 | | | 1752 | |
| | | 1753 | PMAP_COUNT(clear_modify); |
1713 | #ifdef DEBUG | | 1754 | #ifdef DEBUG |
1714 | if (pmapdebug & PDB_FOLLOW) | | 1755 | if (pmapdebug & PDB_FOLLOW) |
1715 | printf("pmap_clear_modify(%#"PRIxPADDR")\n", VM_PAGE_TO_PHYS(pg)); | | 1756 | printf("pmap_clear_modify(%#"PRIxPADDR")\n", VM_PAGE_TO_PHYS(pg)); |
1716 | #endif | | 1757 | #endif |
1717 | if (!pmap_clear_page_attributes(pg, PV_MODIFIED)) | | 1758 | if (PG_MD_EXECPAGE_P(pg)) { |
| | | 1759 | if (pv->pv_pmap == NULL) { |
| | | 1760 | pmap_clear_page_attributes(pg, PG_MD_EXECPAGE); |
| | | 1761 | PMAP_COUNT(exec_uncached_clear_modify); |
| | | 1762 | } else { |
| | | 1763 | pmap_page_syncicache(pg); |
| | | 1764 | PMAP_COUNT(exec_synced_clear_modify); |
| | | 1765 | } |
| | | 1766 | } |
| | | 1767 | if (!pmap_clear_page_attributes(pg, PG_MD_MODIFIED)) |
1718 | return false; | | 1768 | return false; |
1719 | pv = pg->mdpage.pvh_list; | | | |
1720 | if (pv->pv_pmap == NULL) { | | 1769 | if (pv->pv_pmap == NULL) { |
1721 | return true; | | 1770 | return true; |
1722 | } | | 1771 | } |
1723 | | | 1772 | |
1724 | /* | | 1773 | /* |
1725 | * remove write access from any pages that are dirty | | 1774 | * remove write access from any pages that are dirty |
1726 | * so we can tell if they are written to again later. | | 1775 | * so we can tell if they are written to again later. |
1727 | * flush the VAC first if there is one. | | 1776 | * flush the VAC first if there is one. |
1728 | */ | | 1777 | */ |
1729 | for (; pv; pv = pv->pv_next) { | | 1778 | for (; pv; pv = pv->pv_next) { |
1730 | pmap_t pmap = pv->pv_pmap; | | 1779 | pmap_t pmap = pv->pv_pmap; |
1731 | vaddr_t va = pv->pv_va; | | 1780 | vaddr_t va = pv->pv_va; |
1732 | pt_entry_t *pte; | | 1781 | pt_entry_t *pte; |
| @@ -1736,442 +1785,450 @@ pmap_clear_modify(struct vm_page *pg) | | | @@ -1736,442 +1785,450 @@ pmap_clear_modify(struct vm_page *pg) |
1736 | } else { | | 1785 | } else { |
1737 | pte = pmap_pte_lookup(pmap, va); | | 1786 | pte = pmap_pte_lookup(pmap, va); |
1738 | KASSERT(pte); | | 1787 | KASSERT(pte); |
1739 | } | | 1788 | } |
1740 | pt_entry = pte->pt_entry & ~mips_pg_m_bit(); | | 1789 | pt_entry = pte->pt_entry & ~mips_pg_m_bit(); |
1741 | if (pte->pt_entry == pt_entry) { | | 1790 | if (pte->pt_entry == pt_entry) { |
1742 | continue; | | 1791 | continue; |
1743 | } | | 1792 | } |
1744 | KASSERT(pt_entry & MIPS3_PG_V); | | 1793 | KASSERT(pt_entry & MIPS3_PG_V); |
1745 | /* | | 1794 | /* |
1746 | * Why? Why? | | 1795 | * Why? Why? |
1747 | */ | | 1796 | */ |
1748 | if (MIPS_HAS_R4K_MMU | | 1797 | if (MIPS_HAS_R4K_MMU |
1749 | && mips_cache_info.mci_cache_virtual_alias) { | | 1798 | && MIPS_CACHE_VIRTUAL_ALIAS) { |
1750 | if (PMAP_IS_ACTIVE(pmap)) { | | 1799 | if (PMAP_IS_ACTIVE(pmap)) { |
1751 | mips_dcache_wbinv_range(va, PAGE_SIZE); | | 1800 | mips_dcache_wbinv_range(va, PAGE_SIZE); |
1752 | } else { | | 1801 | } else { |
1753 | mips_dcache_wbinv_range_index(va, PAGE_SIZE); | | 1802 | mips_dcache_wbinv_range_index(va, PAGE_SIZE); |
1754 | } | | 1803 | } |
1755 | } | | 1804 | } |
1756 | pte->pt_entry = pt_entry; | | 1805 | pte->pt_entry = pt_entry; |
1757 | pmap_tlb_invalidate_addr(pmap, va); | | 1806 | pmap_tlb_invalidate_addr(pmap, va); |
1758 | } | | 1807 | } |
1759 | return true; | | 1808 | return true; |
1760 | } | | 1809 | } |
1761 | | | 1810 | |
1762 | /* | | 1811 | /* |
1763 | * pmap_is_modified: | | 1812 | * pmap_is_modified: |
1764 | * | | 1813 | * |
1765 | * Return whether or not the specified physical page is modified | | 1814 | * Return whether or not the specified physical page is modified |
1766 | * by any physical maps. | | 1815 | * by any physical maps. |
1767 | */ | | 1816 | */ |
1768 | bool | | 1817 | bool |
1769 | pmap_is_modified(struct vm_page *pg) | | 1818 | pmap_is_modified(struct vm_page *pg) |
1770 | { | | 1819 | { |
1771 | | | 1820 | |
1772 | return pg->mdpage.pvh_attrs & PV_MODIFIED; | | 1821 | return PG_MD_MODIFIED_P(pg); |
1773 | } | | 1822 | } |
1774 | | | 1823 | |
1775 | /* | | 1824 | /* |
1776 | * pmap_set_modified: | | 1825 | * pmap_set_modified: |
1777 | * | | 1826 | * |
1778 | * Sets the page modified reference bit for the specified page. | | 1827 | * Sets the page modified reference bit for the specified page. |
1779 | */ | | 1828 | */ |
1780 | void | | 1829 | void |
1781 | pmap_set_modified(paddr_t pa) | | 1830 | pmap_set_modified(paddr_t pa) |
1782 | { | | 1831 | { |
1783 | struct vm_page *pg = PHYS_TO_VM_PAGE(pa); | | 1832 | struct vm_page *pg = PHYS_TO_VM_PAGE(pa); |
1784 | pmap_set_page_attributes(pg, PV_MODIFIED | PV_REFERENCED); | | 1833 | pmap_set_page_attributes(pg, PG_MD_MODIFIED | PG_MD_REFERENCED); |
1785 | } | | 1834 | } |
1786 | | | 1835 | |
1787 | /******************** pv_entry management ********************/ | | 1836 | /******************** pv_entry management ********************/ |
1788 | | | 1837 | |
1789 | /* | | 1838 | /* |
1790 | * Enter the pmap and virtual address into the | | 1839 | * Enter the pmap and virtual address into the |
1791 | * physical to virtual map table. | | 1840 | * physical to virtual map table. |
1792 | */ | | 1841 | */ |
1793 | void | | 1842 | void |
1794 | pmap_enter_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, u_int *npte) | | 1843 | pmap_enter_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, u_int *npte) |
1795 | { | | 1844 | { |
1796 | pv_entry_t pv, npv; | | 1845 | pv_entry_t pv, npv; |
1797 | | | 1846 | |
1798 | pv = pg->mdpage.pvh_list; | | 1847 | pv = &pg->mdpage.pvh_first; |
1799 | #ifdef DEBUG | | 1848 | #ifdef DEBUG |
1800 | if (pmapdebug & PDB_ENTER) | | 1849 | if (pmapdebug & PDB_ENTER) |
1801 | printf("pmap_enter: pv %p: was %#"PRIxVADDR"/%p/%p\n", | | 1850 | printf("pmap_enter: pv %p: was %#"PRIxVADDR"/%p/%p\n", |
1802 | pv, pv->pv_va, pv->pv_pmap, pv->pv_next); | | 1851 | pv, pv->pv_va, pv->pv_pmap, pv->pv_next); |
1803 | #endif | | 1852 | #endif |
1804 | #if defined(MIPS3_NO_PV_UNCACHED) | | 1853 | #if defined(MIPS3_NO_PV_UNCACHED) |
1805 | again: | | 1854 | again: |
1806 | #endif | | 1855 | #endif |
1807 | if (pv->pv_pmap == NULL) { | | 1856 | if (pv->pv_pmap == NULL) { |
1808 | | | 1857 | KASSERT(pv->pv_next == NULL); |
1809 | /* | | 1858 | /* |
1810 | * No entries yet, use header as the first entry | | 1859 | * No entries yet, use header as the first entry |
1811 | */ | | 1860 | */ |
1812 | | | | |
1813 | #ifdef DEBUG | | 1861 | #ifdef DEBUG |
1814 | if (pmapdebug & PDB_PVENTRY) | | 1862 | if (pmapdebug & PDB_PVENTRY) |
1815 | printf("pmap_enter: first pv: pmap %p va %#"PRIxVADDR"\n", | | 1863 | printf("pmap_enter: first pv: pmap %p va %#"PRIxVADDR"\n", |
1816 | pmap, va); | | 1864 | pmap, va); |
1817 | enter_stats.firstpv++; | | | |
1818 | #endif | | 1865 | #endif |
| | | 1866 | PMAP_COUNT(primary_mappings); |
| | | 1867 | PMAP_COUNT(mappings); |
| | | 1868 | pmap_clear_page_attributes(pg, PG_MD_UNCACHED); |
1819 | pv->pv_va = va; | | 1869 | pv->pv_va = va; |
1820 | pv->pv_flags &= ~PV_UNCACHED; | | | |
1821 | pv->pv_pmap = pmap; | | 1870 | pv->pv_pmap = pmap; |
1822 | pv->pv_next = NULL; | | 1871 | pv->pv_next = NULL; |
1823 | } else { | | 1872 | } else { |
1824 | #if defined(MIPS3_PLUS) /* XXX mmu XXX */ | | 1873 | #if defined(MIPS3_PLUS) /* XXX mmu XXX */ |
1825 | if (mips_cache_info.mci_cache_virtual_alias) { | | 1874 | if (MIPS_CACHE_VIRTUAL_ALIAS) { |
1826 | /* | | 1875 | /* |
1827 | * There is at least one other VA mapping this page. | | 1876 | * There is at least one other VA mapping this page. |
1828 | * Check if they are cache index compatible. | | 1877 | * Check if they are cache index compatible. |
1829 | */ | | 1878 | */ |
1830 | | | 1879 | |
1831 | #if defined(MIPS3_NO_PV_UNCACHED) | | 1880 | #if defined(MIPS3_NO_PV_UNCACHED) |
1832 | | | 1881 | |
1833 | /* | | 1882 | /* |
1834 | * Instead of mapping uncached, which some platforms | | 1883 | * Instead of mapping uncached, which some platforms |
1835 | * cannot support, remove the mapping from the pmap. | | 1884 | * cannot support, remove the mapping from the pmap. |
1836 | * When this address is touched again, the uvm will | | 1885 | * When this address is touched again, the uvm will |
1837 | * fault it in. Because of this, each page will only | | 1886 | * fault it in. Because of this, each page will only |
1838 | * be mapped with one index at any given time. | | 1887 | * be mapped with one index at any given time. |
1839 | */ | | 1888 | */ |
1840 | | | 1889 | |
1841 | for (npv = pv; npv; npv = npv->pv_next) { | | 1890 | if (mips_cache_badalias(pv->pv_va, va)) { |
1842 | if (mips_cache_indexof(npv->pv_va) != | | 1891 | for (npv = pv; npv; npv = npv->pv_next) { |
1843 | mips_cache_indexof(va)) { | | | |
1844 | pmap_remove(npv->pv_pmap, npv->pv_va, | | 1892 | pmap_remove(npv->pv_pmap, npv->pv_va, |
1845 | npv->pv_va + PAGE_SIZE); | | 1893 | npv->pv_va + PAGE_SIZE); |
1846 | pmap_update(npv->pv_pmap); | | 1894 | pmap_update(npv->pv_pmap); |
1847 | goto again; | | 1895 | goto again; |
1848 | } | | 1896 | } |
1849 | } | | 1897 | } |
1850 | #else /* !MIPS3_NO_PV_UNCACHED */ | | 1898 | #else /* !MIPS3_NO_PV_UNCACHED */ |
1851 | if (!(pv->pv_flags & PV_UNCACHED)) { | | 1899 | if (PG_MD_CACHED_P(pg)) { |
1852 | for (npv = pv; npv; npv = npv->pv_next) { | | 1900 | /* |
1853 | | | 1901 | * If this page is cached, then all mappings |
1854 | /* | | 1902 | * have the same cache alias so we only need |
1855 | * Check cache aliasing incompatibility. | | 1903 | * to check the first page to see if it's |
1856 | * If one exists, re-map this page | | 1904 | * incompatible with the new mapping. |
1857 | * uncached until all mappings have | | 1905 | * |
1858 | * the same index again. | | 1906 | * If the mappings are incompatible, map this |
1859 | */ | | 1907 | * page as uncached and re-map all the current |
1860 | if (mips_cache_indexof(npv->pv_va) != | | 1908 | * mapping as uncached until all pages can |
1861 | mips_cache_indexof(va)) { | | 1909 | * share the same cache index again. |
1862 | pmap_page_cache(pg,PV_UNCACHED); | | 1910 | */ |
1863 | mips_dcache_wbinv_range_index( | | 1911 | if (mips_cache_badalias(pv->pv_va, va)) { |
1864 | pv->pv_va, PAGE_SIZE); | | 1912 | pmap_page_cache(pg, false); |
1865 | *npte = (*npte & | | 1913 | mips_dcache_wbinv_range_index( |
1866 | ~MIPS3_PG_CACHEMODE) | | | 1914 | pv->pv_va, PAGE_SIZE); |
1867 | MIPS3_PG_UNCACHED; | | 1915 | *npte = (*npte & |
1868 | #ifdef DEBUG | | 1916 | ~MIPS3_PG_CACHEMODE) | |
1869 | enter_stats.ci++; | | 1917 | MIPS3_PG_UNCACHED; |
1870 | #endif | | 1918 | PMAP_COUNT(page_cache_evictions); |
1871 | break; | | | |
1872 | } | | | |
1873 | } | | 1919 | } |
1874 | } else { | | 1920 | } else { |
1875 | *npte = (*npte & ~MIPS3_PG_CACHEMODE) | | | 1921 | *npte = (*npte & ~MIPS3_PG_CACHEMODE) | |
1876 | MIPS3_PG_UNCACHED; | | 1922 | MIPS3_PG_UNCACHED; |
| | | 1923 | PMAP_COUNT(page_cache_evictions); |
1877 | } | | 1924 | } |
1878 | #endif /* !MIPS3_NO_PV_UNCACHED */ | | 1925 | #endif /* !MIPS3_NO_PV_UNCACHED */ |
1879 | } | | 1926 | } |
1880 | #endif /* MIPS3_PLUS */ | | 1927 | #endif /* MIPS3_PLUS */ |
1881 | | | 1928 | |
1882 | /* | | 1929 | /* |
1883 | * There is at least one other VA mapping this page. | | 1930 | * There is at least one other VA mapping this page. |
1884 | * Place this entry after the header. | | 1931 | * Place this entry after the header. |
1885 | * | | 1932 | * |
1886 | * Note: the entry may already be in the table if | | 1933 | * Note: the entry may already be in the table if |
1887 | * we are only changing the protection bits. | | 1934 | * we are only changing the protection bits. |
1888 | */ | | 1935 | */ |
1889 | | | 1936 | |
1890 | for (npv = pv; npv; npv = npv->pv_next) { | | 1937 | for (npv = pv; npv; npv = npv->pv_next) { |
1891 | if (pmap == npv->pv_pmap && va == npv->pv_va) { | | 1938 | if (pmap == npv->pv_pmap && va == npv->pv_va) { |
1892 | #ifdef PARANOIADIAG | | 1939 | #ifdef PARANOIADIAG |
1893 | pt_entry_t *pte; | | 1940 | pt_entry_t *pte; |
1894 | unsigned pt_entry; | | 1941 | uint32_t pt_entry; |
1895 | | | 1942 | |
1896 | if (pmap == pmap_kernel()) { | | 1943 | if (pmap == pmap_kernel()) { |
1897 | pt_entry = kvtopte(va)->pt_entry; | | 1944 | pt_entry = kvtopte(va)->pt_entry; |
1898 | } else { | | 1945 | } else { |
1899 | pte = pmap_pte_lookup(pmap, va); | | 1946 | pte = pmap_pte_lookup(pmap, va); |
1900 | if (pte) { | | 1947 | if (pte) { |
1901 | pt_entry = pte->pt_entry; | | 1948 | pt_entry = pte->pt_entry; |
1902 | } else | | 1949 | } else |
1903 | pt_entry = 0; | | 1950 | pt_entry = 0; |
1904 | } | | 1951 | } |
1905 | if (!mips_pg_v(pt_entry) || | | 1952 | if (!mips_pg_v(pt_entry) || |
1906 | mips_tlbpfn_to_paddr(pt_entry) != | | 1953 | mips_tlbpfn_to_paddr(pt_entry) != |
1907 | VM_PAGE_TO_PHYS(pg)) | | 1954 | VM_PAGE_TO_PHYS(pg)) |
1908 | printf( | | 1955 | printf( |
1909 | "pmap_enter: found va %#"PRIxVADDR" pa %#"PRIxPADDR" in pv_table but != %x\n", | | 1956 | "pmap_enter: found va %#"PRIxVADDR" pa %#"PRIxPADDR" in pv_table but != %x\n", |
1910 | va, VM_PAGE_TO_PHYS(pg), | | 1957 | va, VM_PAGE_TO_PHYS(pg), |
1911 | pt_entry); | | 1958 | pt_entry); |
1912 | #endif | | 1959 | #endif |
| | | 1960 | PMAP_COUNT(remappings); |
1913 | return; | | 1961 | return; |
1914 | } | | 1962 | } |
1915 | } | | 1963 | } |
1916 | #ifdef DEBUG | | 1964 | #ifdef DEBUG |
1917 | if (pmapdebug & PDB_PVENTRY) | | 1965 | if (pmapdebug & PDB_PVENTRY) |
1918 | printf("pmap_enter: new pv: pmap %p va %#"PRIxVADDR"\n", | | 1966 | printf("pmap_enter: new pv: pmap %p va %#"PRIxVADDR"\n", |
1919 | pmap, va); | | 1967 | pmap, va); |
1920 | #endif | | 1968 | #endif |
1921 | npv = (pv_entry_t)pmap_pv_alloc(); | | 1969 | npv = (pv_entry_t)pmap_pv_alloc(); |
1922 | if (npv == NULL) | | 1970 | if (npv == NULL) |
1923 | panic("pmap_enter_pv: pmap_pv_alloc() failed"); | | 1971 | panic("pmap_enter_pv: pmap_pv_alloc() failed"); |
1924 | npv->pv_va = va; | | 1972 | npv->pv_va = va; |
1925 | npv->pv_pmap = pmap; | | 1973 | npv->pv_pmap = pmap; |
1926 | npv->pv_flags = pv->pv_flags; | | | |
1927 | npv->pv_next = pv->pv_next; | | 1974 | npv->pv_next = pv->pv_next; |
1928 | pv->pv_next = npv; | | 1975 | pv->pv_next = npv; |
1929 | #ifdef DEBUG | | 1976 | PMAP_COUNT(mappings); |
1930 | if (!npv->pv_next) | | | |
1931 | enter_stats.secondpv++; | | | |
1932 | #endif | | | |
1933 | } | | 1977 | } |
1934 | } | | 1978 | } |
1935 | | | 1979 | |
1936 | /* | | 1980 | /* |
1937 | * Remove a physical to virtual address translation. | | 1981 | * Remove a physical to virtual address translation. |
1938 | * If cache was inhibited on this page, and there are no more cache | | 1982 | * If cache was inhibited on this page, and there are no more cache |
1939 | * conflicts, restore caching. | | 1983 | * conflicts, restore caching. |
1940 | * Flush the cache if the last page is removed (should always be cached | | 1984 | * Flush the cache if the last page is removed (should always be cached |
1941 | * at this point). | | 1985 | * at this point). |
1942 | */ | | 1986 | */ |
1943 | void | | 1987 | void |
1944 | pmap_remove_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg) | | 1988 | pmap_remove_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, bool dirty) |
1945 | { | | 1989 | { |
1946 | pv_entry_t pv, npv; | | 1990 | pv_entry_t pv, npv; |
1947 | int last; | | 1991 | bool last; |
1948 | | | 1992 | |
1949 | #ifdef DEBUG | | 1993 | #ifdef DEBUG |
1950 | if (pmapdebug & (PDB_FOLLOW|PDB_PVENTRY)) | | 1994 | if (pmapdebug & (PDB_FOLLOW|PDB_PVENTRY)) |
1951 | printf("pmap_remove_pv(%p, %#"PRIxVADDR", %#"PRIxPADDR")\n", pmap, va, | | 1995 | printf("pmap_remove_pv(%p, %#"PRIxVADDR", %#"PRIxPADDR")\n", pmap, va, |
1952 | VM_PAGE_TO_PHYS(pg)); | | 1996 | VM_PAGE_TO_PHYS(pg)); |
1953 | #endif | | 1997 | #endif |
1954 | | | 1998 | |
1955 | pv = pg->mdpage.pvh_list; | | 1999 | pv = &pg->mdpage.pvh_first; |
1956 | | | 2000 | |
1957 | /* | | 2001 | /* |
1958 | * If it is the first entry on the list, it is actually | | 2002 | * If it is the first entry on the list, it is actually |
1959 | * in the header and we must copy the following entry up | | 2003 | * in the header and we must copy the following entry up |
1960 | * to the header. Otherwise we must search the list for | | 2004 | * to the header. Otherwise we must search the list for |
1961 | * the entry. In either case we free the now unused entry. | | 2005 | * the entry. In either case we free the now unused entry. |
1962 | */ | | 2006 | */ |
1963 | | | 2007 | |
1964 | last = 0; | | 2008 | last = false; |
1965 | if (pmap == pv->pv_pmap && va == pv->pv_va) { | | 2009 | if (pmap == pv->pv_pmap && va == pv->pv_va) { |
1966 | npv = pv->pv_next; | | 2010 | npv = pv->pv_next; |
1967 | if (npv) { | | 2011 | if (npv) { |
1968 | | | | |
1969 | /* | | | |
1970 | * Copy current modified and referenced status to | | | |
1971 | * the following entry before copying. | | | |
1972 | */ | | | |
1973 | npv->pv_flags |= | | | |
1974 | pv->pv_flags & (PV_MODIFIED | PV_REFERENCED); | | | |
1975 | *pv = *npv; | | 2012 | *pv = *npv; |
1976 | pmap_pv_free(npv); | | 2013 | pmap_pv_free(npv); |
1977 | } else { | | 2014 | } else { |
| | | 2015 | pmap_clear_page_attributes(pg, PG_MD_UNCACHED); |
1978 | pv->pv_pmap = NULL; | | 2016 | pv->pv_pmap = NULL; |
1979 | last = 1; /* Last mapping removed */ | | 2017 | last = true; /* Last mapping removed */ |
1980 | } | | 2018 | } |
1981 | #ifdef DEBUG | | 2019 | PMAP_COUNT(remove_pvfirst); |
1982 | remove_stats.pvfirst++; | | | |
1983 | #endif | | | |
1984 | } else { | | 2020 | } else { |
1985 | for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) { | | 2021 | for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) { |
1986 | #ifdef DEBUG | | 2022 | PMAP_COUNT(remove_pvsearch); |
1987 | remove_stats.pvsearch++; | | | |
1988 | #endif | | | |
1989 | if (pmap == npv->pv_pmap && va == npv->pv_va) | | 2023 | if (pmap == npv->pv_pmap && va == npv->pv_va) |
1990 | break; | | 2024 | break; |
1991 | } | | 2025 | } |
1992 | if (npv) { | | 2026 | if (npv) { |
1993 | pv->pv_next = npv->pv_next; | | 2027 | pv->pv_next = npv->pv_next; |
1994 | pmap_pv_free(npv); | | 2028 | pmap_pv_free(npv); |
1995 | } | | 2029 | } |
1996 | } | | 2030 | } |
| | | 2031 | if (PG_MD_EXECPAGE_P(pg) && dirty) { |
| | | 2032 | if (last) { |
| | | 2033 | /* |
| | | 2034 | * If this was the page's last mapping, we no longer |
| | | 2035 | * care about it execness. |
| | | 2036 | */ |
| | | 2037 | pmap_clear_page_attributes(pg, PG_MD_EXECPAGE); |
| | | 2038 | PMAP_COUNT(exec_uncached_remove); |
| | | 2039 | } else { |
| | | 2040 | /* |
| | | 2041 | * Someone still has it mapped as an executable page |
| | | 2042 | * so we must sync it. |
| | | 2043 | */ |
| | | 2044 | pmap_page_syncicache(pg); |
| | | 2045 | PMAP_COUNT(exec_synced_remove); |
| | | 2046 | } |
| | | 2047 | } |
1997 | #ifdef MIPS3_PLUS /* XXX mmu XXX */ | | 2048 | #ifdef MIPS3_PLUS /* XXX mmu XXX */ |
1998 | #if !defined(MIPS3_NO_PV_UNCACHED) | | 2049 | #ifndef MIPS3_NO_PV_UNCACHED |
1999 | if (MIPS_HAS_R4K_MMU && pv->pv_flags & PV_UNCACHED) { | | 2050 | if (MIPS_HAS_R4K_MMU && PG_MD_UNCACHED_P(pg)) { |
2000 | | | 2051 | |
2001 | /* | | 2052 | /* |
2002 | * Page is currently uncached, check if alias mapping has been | | 2053 | * Page is currently uncached, check if alias mapping has been |
2003 | * removed. If it was, then reenable caching. | | 2054 | * removed. If it was, then reenable caching. |
2004 | */ | | 2055 | */ |
2005 | | | 2056 | |
2006 | pv = pg->mdpage.pvh_list; | | 2057 | pv = &pg->mdpage.pvh_first; |
2007 | for (npv = pv->pv_next; npv; npv = npv->pv_next) { | | 2058 | for (npv = pv->pv_next; npv; npv = npv->pv_next) { |
2008 | if (mips_cache_indexof(pv->pv_va ^ npv->pv_va)) | | 2059 | if (mips_cache_badalias(pv->pv_va, npv->pv_va)) |
2009 | break; | | 2060 | break; |
2010 | } | | 2061 | } |
2011 | if (npv == NULL) | | 2062 | if (npv == NULL) |
2012 | pmap_page_cache(pg, 0); | | 2063 | pmap_page_cache(pg, true); |
2013 | } | | 2064 | } |
2014 | #endif | | 2065 | #endif |
2015 | if (MIPS_HAS_R4K_MMU && last != 0) | | 2066 | if (MIPS_HAS_R4K_MMU && last) /* XXX why */ |
2016 | mips_dcache_wbinv_range_index(va, PAGE_SIZE); | | 2067 | mips_dcache_wbinv_range_index(va, PAGE_SIZE); |
2017 | #endif /* MIPS3_PLUS */ | | 2068 | #endif /* MIPS3_PLUS */ |
2018 | } | | 2069 | } |
2019 | | | 2070 | |
2020 | /* | | 2071 | /* |
2021 | * pmap_pv_page_alloc: | | 2072 | * pmap_pv_page_alloc: |
2022 | * | | 2073 | * |
2023 | * Allocate a page for the pv_entry pool. | | 2074 | * Allocate a page for the pv_entry pool. |
2024 | */ | | 2075 | */ |
2025 | void * | | 2076 | void * |
2026 | pmap_pv_page_alloc(struct pool *pp, int flags) | | 2077 | pmap_pv_page_alloc(struct pool *pp, int flags) |
2027 | { | | 2078 | { |
2028 | const struct vm_page *pg = PMAP_ALLOC_POOLPAGE(UVM_PGA_USERESERVE); | | 2079 | struct vm_page *pg = PMAP_ALLOC_POOLPAGE(UVM_PGA_USERESERVE); |
2029 | if (pg == NULL) | | 2080 | if (pg == NULL) |
2030 | return NULL; | | 2081 | return NULL; |
2031 | | | 2082 | |
2032 | const paddr_t pa = VM_PAGE_TO_PHYS(pg); | | 2083 | return (void *)mips_pmap_map_poolpage(VM_PAGE_TO_PHYS(pg)); |
2033 | #ifdef _LP64 | | | |
2034 | KASSERT(mips_options.mips3_xkphys_cached); | | | |
2035 | const vaddr_t va = MIPS_PHYS_TO_XKPHYS_CACHED(pa); | | | |
2036 | #else | | | |
2037 | const vaddr_t va = MIPS_PHYS_TO_KSEG0(pa); | | | |
2038 | #endif | | | |
2039 | #if defined(MIPS3_PLUS) | | | |
2040 | if (mips_cache_info.mci_cache_virtual_alias) { | | | |
2041 | pv_entry_t pv = pg->mdpage.pvh_list; | | | |
2042 | if ((pv->pv_flags & PV_UNCACHED) == 0 && | | | |
2043 | mips_cache_indexof(pv->pv_va) != mips_cache_indexof(va)) | | | |
2044 | mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE); | | | |
2045 | } | | | |
2046 | #endif | | | |
2047 | return (void *)va; | | | |
2048 | } | | 2084 | } |
2049 | | | 2085 | |
2050 | /* | | 2086 | /* |
2051 | * pmap_pv_page_free: | | 2087 | * pmap_pv_page_free: |
2052 | * | | 2088 | * |
2053 | * Free a pv_entry pool page. | | 2089 | * Free a pv_entry pool page. |
2054 | */ | | 2090 | */ |
2055 | void | | 2091 | void |
2056 | pmap_pv_page_free(struct pool *pp, void *v) | | 2092 | pmap_pv_page_free(struct pool *pp, void *v) |
2057 | { | | 2093 | { |
2058 | paddr_t phys; | | 2094 | vaddr_t va = (vaddr_t)v; |
| | | 2095 | paddr_t pa; |
2059 | | | 2096 | |
2060 | #ifdef MIPS3_PLUS | | | |
2061 | if (mips_cache_info.mci_cache_virtual_alias) | | | |
2062 | mips_dcache_inv_range((vaddr_t)v, PAGE_SIZE); | | | |
2063 | #endif | | | |
2064 | #ifdef _LP64 | | 2097 | #ifdef _LP64 |
2065 | KASSERT(MIPS_XKPHYS_P(v)); | | 2098 | KASSERT(MIPS_XKPHYS_P(va)); |
2066 | phys = MIPS_XKPHYS_TO_PHYS((vaddr_t)v); | | 2099 | pa = MIPS_XKPHYS_TO_PHYS(va); |
2067 | #else | | 2100 | #else |
2068 | phys = MIPS_KSEG0_TO_PHYS((vaddr_t)v); | | 2101 | KASSERT(MIPS_KSEG0_P(va)); |
| | | 2102 | pa = MIPS_KSEG0_TO_PHYS(va); |
2069 | #endif | | 2103 | #endif |
2070 | uvm_pagefree(PHYS_TO_VM_PAGE(phys)); | | 2104 | #ifdef MIPS3_PLUS |
| | | 2105 | if (MIPS_CACHE_VIRTUAL_ALIAS) |
| | | 2106 | mips_dcache_inv_range(va, PAGE_SIZE); |
| | | 2107 | #endif |
| | | 2108 | struct vm_page *pg = PHYS_TO_VM_PAGE(pa); |
| | | 2109 | pmap_clear_page_attributes(pg, PG_MD_POOLPAGE); |
| | | 2110 | uvm_pagefree(pg); |
2071 | } | | 2111 | } |
2072 | | | 2112 | |
2073 | pt_entry_t * | | 2113 | pt_entry_t * |
2074 | pmap_pte(pmap_t pmap, vaddr_t va) | | 2114 | pmap_pte(pmap_t pmap, vaddr_t va) |
2075 | { | | 2115 | { |
2076 | pt_entry_t *pte; | | 2116 | pt_entry_t *pte; |
2077 | | | 2117 | |
2078 | if (pmap == pmap_kernel()) | | 2118 | if (pmap == pmap_kernel()) |
2079 | pte = kvtopte(va); | | 2119 | pte = kvtopte(va); |
2080 | else | | 2120 | else |
2081 | pte = pmap_pte_lookup(pmap, va); | | 2121 | pte = pmap_pte_lookup(pmap, va); |
2082 | return pte; | | 2122 | return pte; |
2083 | } | | 2123 | } |
2084 | | | 2124 | |
2085 | #ifdef MIPS3_PLUS /* XXX mmu XXX */ | | 2125 | #ifdef MIPS3_PLUS /* XXX mmu XXX */ |
2086 | /* | | 2126 | /* |
2087 | * Find first virtual address >= *vap that doesn't cause | | 2127 | * Find first virtual address >= *vap that doesn't cause |
2088 | * a cache alias conflict. | | 2128 | * a cache alias conflict. |
2089 | */ | | 2129 | */ |
2090 | void | | 2130 | void |
2091 | pmap_prefer(vaddr_t foff, vaddr_t *vap, int td) | | 2131 | pmap_prefer(vaddr_t foff, vaddr_t *vap, vsize_t sz, int td) |
2092 | { | | 2132 | { |
2093 | const struct mips_cache_info * const mci = &mips_cache_info; | | 2133 | const struct mips_cache_info * const mci = &mips_cache_info; |
2094 | vaddr_t va; | | 2134 | vaddr_t va; |
2095 | vsize_t d; | | 2135 | vsize_t d; |
| | | 2136 | vsize_t prefer_mask = ptoa(uvmexp.colormask); |
| | | 2137 | |
| | | 2138 | PMAP_COUNT(prefer_requests); |
2096 | | | 2139 | |
2097 | if (MIPS_HAS_R4K_MMU) { | | 2140 | if (MIPS_HAS_R4K_MMU) { |
| | | 2141 | prefer_mask |= mci->mci_cache_prefer_mask; |
| | | 2142 | } |
| | | 2143 | |
| | | 2144 | if (prefer_mask) { |
2098 | va = *vap; | | 2145 | va = *vap; |
2099 | | | 2146 | |
2100 | d = foff - va; | | 2147 | d = foff - va; |
2101 | d &= mci->mci_cache_prefer_mask; | | 2148 | d &= prefer_mask; |
2102 | if (td && d) | | 2149 | if (d) { |
2103 | d = -((-d) & mci->mci_cache_prefer_mask); | | 2150 | if (td) |
2104 | *vap = va + d; | | 2151 | *vap = trunc_page(va -((-d) & prefer_mask)); |
| | | 2152 | else |
| | | 2153 | *vap = round_page(va + d); |
| | | 2154 | PMAP_COUNT(prefer_adjustments); |
| | | 2155 | } |
2105 | } | | 2156 | } |
2106 | } | | 2157 | } |
2107 | #endif /* MIPS3_PLUS */ | | 2158 | #endif /* MIPS3_PLUS */ |
2108 | | | 2159 | |
2109 | struct vm_page * | | 2160 | struct vm_page * |
2110 | mips_pmap_alloc_poolpage(int flags) | | 2161 | mips_pmap_alloc_poolpage(int flags) |
2111 | { | | 2162 | { |
2112 | /* | | 2163 | /* |
2113 | * On 32bit kernels, we must make sure that we only allocate pages that | | 2164 | * On 32bit kernels, we must make sure that we only allocate pages that |
2114 | * can be mapped via KSEG0. On 64bit kernels, try to allocated from | | 2165 | * can be mapped via KSEG0. On 64bit kernels, try to allocated from |
2115 | * the first 4G. If all memory is in KSEG0/4G, then we can just | | 2166 | * the first 4G. If all memory is in KSEG0/4G, then we can just |
2116 | * use the default freelist otherwise we must use the pool page list. | | 2167 | * use the default freelist otherwise we must use the pool page list. |
2117 | */ | | 2168 | */ |
2118 | if (mips_poolpage_vmfreelist != VM_FREELIST_DEFAULT) | | 2169 | if (mips_poolpage_vmfreelist != VM_FREELIST_DEFAULT) |
2119 | return uvm_pagealloc_strat(NULL, 0, NULL, flags, | | 2170 | return uvm_pagealloc_strat(NULL, 0, NULL, flags, |
2120 | UVM_PGA_STRAT_ONLY, mips_poolpage_vmfreelist); | | 2171 | UVM_PGA_STRAT_ONLY, mips_poolpage_vmfreelist); |
2121 | | | 2172 | |
2122 | return uvm_pagealloc(NULL, 0, NULL, flags); | | 2173 | return uvm_pagealloc(NULL, 0, NULL, flags); |
2123 | } | | 2174 | } |
2124 | | | 2175 | |
2125 | vaddr_t | | 2176 | vaddr_t |
2126 | mips_pmap_map_poolpage(paddr_t pa) | | 2177 | mips_pmap_map_poolpage(paddr_t pa) |
2127 | { | | 2178 | { |
2128 | vaddr_t va; | | 2179 | vaddr_t va; |
2129 | #if defined(MIPS3_PLUS) | | | |
2130 | struct vm_page *pg; | | | |
2131 | pv_entry_t pv; | | | |
2132 | #endif | | | |
2133 | | | 2180 | |
2134 | #ifdef _LP64 | | 2181 | #ifdef _LP64 |
2135 | KASSERT(mips_options.mips3_xkphys_cached); | | 2182 | KASSERT(mips_options.mips3_xkphys_cached); |
2136 | va = MIPS_PHYS_TO_XKPHYS_CACHED(pa); | | 2183 | va = MIPS_PHYS_TO_XKPHYS_CACHED(pa); |
2137 | #else | | 2184 | #else |
2138 | if (pa > MIPS_PHYS_MASK) | | 2185 | if (pa > MIPS_PHYS_MASK) |
2139 | panic("mips_pmap_map_poolpage: " | | 2186 | panic("mips_pmap_map_poolpage: " |
2140 | "pa #%"PRIxPADDR" can not be mapped into KSEG0", pa); | | 2187 | "pa #%"PRIxPADDR" can not be mapped into KSEG0", pa); |
2141 | | | 2188 | |
2142 | va = MIPS_PHYS_TO_KSEG0(pa); | | 2189 | va = MIPS_PHYS_TO_KSEG0(pa); |
2143 | #endif | | 2190 | #endif |
| | | 2191 | struct vm_page *pg = PHYS_TO_VM_PAGE(pa); |
| | | 2192 | KASSERT(pg); |
| | | 2193 | pmap_set_page_attributes(pg, PG_MD_POOLPAGE); |
2144 | #if defined(MIPS3_PLUS) | | 2194 | #if defined(MIPS3_PLUS) |
2145 | if (mips_cache_info.mci_cache_virtual_alias) { | | 2195 | if (MIPS_CACHE_VIRTUAL_ALIAS) { |
2146 | pg = PHYS_TO_VM_PAGE(pa); | | 2196 | /* |
2147 | pv = pg->mdpage.pvh_list; | | 2197 | * If this page was last mapped with an address that might |
2148 | if ((pv->pv_flags & PV_UNCACHED) == 0 && | | 2198 | * cause aliases, flush the page from the cache. |
2149 | mips_cache_indexof(pv->pv_va) != mips_cache_indexof(va)) | | 2199 | */ |
2150 | mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE); | | 2200 | pv_entry_t pv = &pg->mdpage.pvh_first; |
| | | 2201 | KASSERT(pv->pv_pmap == NULL); |
| | | 2202 | if (PG_MD_CACHED_P(pg) && mips_cache_badalias(pv->pv_va, va)) |
| | | 2203 | mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE); |
| | | 2204 | pv->pv_va = va; |
2151 | } | | 2205 | } |
2152 | #endif | | 2206 | #endif |
2153 | return va; | | 2207 | return va; |
2154 | } | | 2208 | } |
2155 | | | 2209 | |
2156 | paddr_t | | 2210 | paddr_t |
2157 | mips_pmap_unmap_poolpage(vaddr_t va) | | 2211 | mips_pmap_unmap_poolpage(vaddr_t va) |
2158 | { | | 2212 | { |
2159 | paddr_t pa; | | 2213 | paddr_t pa; |
2160 | | | 2214 | |
2161 | #ifdef _LP64 | | 2215 | #ifdef _LP64 |
2162 | KASSERT(MIPS_XKPHYS_P(va)); | | 2216 | KASSERT(MIPS_XKPHYS_P(va)); |
2163 | pa = MIPS_XKPHYS_TO_PHYS(va); | | 2217 | pa = MIPS_XKPHYS_TO_PHYS(va); |
2164 | #else | | 2218 | #else |
| | | 2219 | KASSERT(MIPS_KSEG0_P(va)); |
2165 | pa = MIPS_KSEG0_TO_PHYS(va); | | 2220 | pa = MIPS_KSEG0_TO_PHYS(va); |
2166 | #endif | | 2221 | #endif |
| | | 2222 | struct vm_page *pg = PHYS_TO_VM_PAGE(pa); |
| | | 2223 | pmap_clear_page_attributes(pg, PG_MD_POOLPAGE); |
2167 | #if defined(MIPS3_PLUS) | | 2224 | #if defined(MIPS3_PLUS) |
2168 | if (mips_cache_info.mci_cache_virtual_alias) { | | 2225 | if (MIPS_CACHE_VIRTUAL_ALIAS) { |
2169 | mips_dcache_inv_range(va, PAGE_SIZE); | | 2226 | mips_dcache_inv_range(va, PAGE_SIZE); |
2170 | } | | 2227 | } |
2171 | #endif | | 2228 | #endif |
2172 | return pa; | | 2229 | return pa; |
2173 | } | | 2230 | } |
2174 | | | 2231 | |
2175 | /******************** page table page management ********************/ | | 2232 | /******************** page table page management ********************/ |
2176 | | | 2233 | |
2177 | /* TO BE DONE */ | | 2234 | /* TO BE DONE */ |