Sat Oct 30 17:44:04 2010 UTC ()
Use VM_PAGE_TO_MD() to locate struct vm_page_md.  No functional
changes.


(uebayasi)
diff -r1.189 -r1.190 src/sys/arch/mips/mips/pmap.c

cvs diff -r1.189 -r1.190 src/sys/arch/mips/mips/Attic/pmap.c (expand / switch to unified diff)

--- src/sys/arch/mips/mips/Attic/pmap.c 2010/07/06 20:50:34 1.189
+++ src/sys/arch/mips/mips/Attic/pmap.c 2010/10/30 17:44:04 1.190
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.189 2010/07/06 20:50:34 cegger Exp $ */ 1/* $NetBSD: pmap.c,v 1.190 2010/10/30 17:44:04 uebayasi Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center and by Chris G. Demetriou. 9 * NASA Ames Research Center and by Chris G. Demetriou.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -57,27 +57,27 @@ @@ -57,27 +57,27 @@
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE. 63 * SUCH DAMAGE.
64 * 64 *
65 * @(#)pmap.c 8.4 (Berkeley) 1/26/94 65 * @(#)pmap.c 8.4 (Berkeley) 1/26/94
66 */ 66 */
67 67
68#include <sys/cdefs.h> 68#include <sys/cdefs.h>
69 69
70__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.189 2010/07/06 20:50:34 cegger Exp $"); 70__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.190 2010/10/30 17:44:04 uebayasi Exp $");
71 71
72/* 72/*
73 * Manages physical address maps. 73 * Manages physical address maps.
74 * 74 *
75 * In addition to hardware address maps, this 75 * In addition to hardware address maps, this
76 * module is called upon to provide software-use-only 76 * module is called upon to provide software-use-only
77 * maps which may or may not be stored in the same 77 * maps which may or may not be stored in the same
78 * form as hardware maps. These pseudo-maps are 78 * form as hardware maps. These pseudo-maps are
79 * used to store intermediate results from copy 79 * used to store intermediate results from copy
80 * operations to and from address spaces. 80 * operations to and from address spaces.
81 * 81 *
82 * Since the information managed by this module is 82 * Since the information managed by this module is
83 * also stored by the logical address mapping module, 83 * also stored by the logical address mapping module,
@@ -124,26 +124,28 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.1 @@ -124,26 +124,28 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.1
124#include <sys/mutex.h> 124#include <sys/mutex.h>
125#ifdef SYSVSHM 125#ifdef SYSVSHM
126#include <sys/shm.h> 126#include <sys/shm.h>
127#endif 127#endif
128#include <sys/socketvar.h> /* XXX: for sock_loan_thresh */ 128#include <sys/socketvar.h> /* XXX: for sock_loan_thresh */
129 129
130#include <uvm/uvm.h> 130#include <uvm/uvm.h>
131 131
132#include <mips/cache.h> 132#include <mips/cache.h>
133#include <mips/cpuregs.h> 133#include <mips/cpuregs.h>
134#include <mips/locore.h> 134#include <mips/locore.h>
135#include <mips/pte.h> 135#include <mips/pte.h>
136 136
 137#define VM_PAGE_TO_MD(pg) (&(pg)->mdpage)
 138
137CTASSERT(MIPS_KSEG0_START < 0); 139CTASSERT(MIPS_KSEG0_START < 0);
138CTASSERT((intptr_t)MIPS_PHYS_TO_KSEG0(0x1000) < 0); 140CTASSERT((intptr_t)MIPS_PHYS_TO_KSEG0(0x1000) < 0);
139CTASSERT(MIPS_KSEG1_START < 0); 141CTASSERT(MIPS_KSEG1_START < 0);
140CTASSERT((intptr_t)MIPS_PHYS_TO_KSEG1(0x1000) < 0); 142CTASSERT((intptr_t)MIPS_PHYS_TO_KSEG1(0x1000) < 0);
141CTASSERT(MIPS_KSEG2_START < 0); 143CTASSERT(MIPS_KSEG2_START < 0);
142CTASSERT(MIPS_MAX_MEM_ADDR < 0); 144CTASSERT(MIPS_MAX_MEM_ADDR < 0);
143CTASSERT(MIPS_RESERVED_ADDR < 0); 145CTASSERT(MIPS_RESERVED_ADDR < 0);
144CTASSERT((uint32_t)MIPS_KSEG0_START == 0x80000000); 146CTASSERT((uint32_t)MIPS_KSEG0_START == 0x80000000);
145CTASSERT((uint32_t)MIPS_KSEG1_START == 0xa0000000); 147CTASSERT((uint32_t)MIPS_KSEG1_START == 0xa0000000);
146CTASSERT((uint32_t)MIPS_KSEG2_START == 0xc0000000); 148CTASSERT((uint32_t)MIPS_KSEG2_START == 0xc0000000);
147CTASSERT((uint32_t)MIPS_MAX_MEM_ADDR == 0xbe000000); 149CTASSERT((uint32_t)MIPS_MAX_MEM_ADDR == 0xbe000000);
148CTASSERT((uint32_t)MIPS_RESERVED_ADDR == 0xbfc80000); 150CTASSERT((uint32_t)MIPS_RESERVED_ADDR == 0xbfc80000);
149CTASSERT(MIPS_KSEG0_P(MIPS_PHYS_TO_KSEG0(0))); 151CTASSERT(MIPS_KSEG0_P(MIPS_PHYS_TO_KSEG0(0)));
@@ -255,39 +257,41 @@ struct pool_allocator pmap_pv_page_alloc @@ -255,39 +257,41 @@ struct pool_allocator pmap_pv_page_alloc
255 */ 257 */
256 258
257#if defined(MIPS3_PLUS) /* XXX mmu XXX */ 259#if defined(MIPS3_PLUS) /* XXX mmu XXX */
258void mips_dump_segtab(struct proc *); 260void mips_dump_segtab(struct proc *);
259static void mips_flushcache_allpvh(paddr_t); 261static void mips_flushcache_allpvh(paddr_t);
260 262
261/* 263/*
262 * Flush virtual addresses associated with a given physical address 264 * Flush virtual addresses associated with a given physical address
263 */ 265 */
264static void 266static void
265mips_flushcache_allpvh(paddr_t pa) 267mips_flushcache_allpvh(paddr_t pa)
266{ 268{
267 struct vm_page *pg; 269 struct vm_page *pg;
 270 struct vm_page_md *md;
268 struct pv_entry *pv; 271 struct pv_entry *pv;
269 272
270 pg = PHYS_TO_VM_PAGE(pa); 273 pg = PHYS_TO_VM_PAGE(pa);
271 if (pg == NULL) { 274 if (pg == NULL) {
272 /* page is unmanaged */ 275 /* page is unmanaged */
273#ifdef DIAGNOSTIC 276#ifdef DIAGNOSTIC
274 printf("mips_flushcache_allpvh(): unmanaged pa = %#"PRIxPADDR"\n", 277 printf("mips_flushcache_allpvh(): unmanaged pa = %#"PRIxPADDR"\n",
275 pa); 278 pa);
276#endif 279#endif
277 return; 280 return;
278 } 281 }
279 282
280 pv = pg->mdpage.pvh_list; 283 md = VM_PAGE_TO_MD(pg);
 284 pv = md->pvh_list;
281 285
282#if defined(MIPS3_NO_PV_UNCACHED) 286#if defined(MIPS3_NO_PV_UNCACHED)
283 /* No current mapping. Cache was flushed by pmap_remove_pv() */ 287 /* No current mapping. Cache was flushed by pmap_remove_pv() */
284 if (pv->pv_pmap == NULL) 288 if (pv->pv_pmap == NULL)
285 return; 289 return;
286 290
287 /* Only one index is allowed at a time */ 291 /* Only one index is allowed at a time */
288 if (mips_cache_indexof(pa) != mips_cache_indexof(pv->pv_va)) 292 if (mips_cache_indexof(pa) != mips_cache_indexof(pv->pv_va))
289 mips_dcache_wbinv_range_index(pv->pv_va, NBPG); 293 mips_dcache_wbinv_range_index(pv->pv_va, NBPG);
290#else 294#else
291 while (pv) { 295 while (pv) {
292 mips_dcache_wbinv_range_index(pv->pv_va, NBPG); 296 mips_dcache_wbinv_range_index(pv->pv_va, NBPG);
293 pv = pv->pv_next; 297 pv = pv->pv_next;
@@ -512,27 +516,27 @@ pmap_init(void) @@ -512,27 +516,27 @@ pmap_init(void)
512 if (pmapdebug & (PDB_FOLLOW|PDB_INIT)) 516 if (pmapdebug & (PDB_FOLLOW|PDB_INIT))
513 printf("pmap_init()\n"); 517 printf("pmap_init()\n");
514#endif 518#endif
515 519
516 /* 520 /*
517 * Memory for the pv entry heads has 521 * Memory for the pv entry heads has
518 * already been allocated. Initialize the physical memory 522 * already been allocated. Initialize the physical memory
519 * segments. 523 * segments.
520 */ 524 */
521 pv = pv_table; 525 pv = pv_table;
522 for (bank = 0; bank < vm_nphysseg; bank++) { 526 for (bank = 0; bank < vm_nphysseg; bank++) {
523 s = vm_physmem[bank].end - vm_physmem[bank].start; 527 s = vm_physmem[bank].end - vm_physmem[bank].start;
524 for (i = 0; i < s; i++) 528 for (i = 0; i < s; i++)
525 vm_physmem[bank].pgs[i].mdpage.pvh_list = pv++; 529 VM_PAGE_TO_MD(&vm_physmem[bank].pgs[i])->pvh_list = pv++;
526 } 530 }
527 531
528 /* 532 /*
529 * Set a low water mark on the pv_entry pool, so that we are 533 * Set a low water mark on the pv_entry pool, so that we are
530 * more likely to have these around even in extreme memory 534 * more likely to have these around even in extreme memory
531 * starvation. 535 * starvation.
532 */ 536 */
533 pool_setlowat(&pmap_pv_pool, pmap_pv_lowat); 537 pool_setlowat(&pmap_pv_pool, pmap_pv_lowat);
534 538
535 /* 539 /*
536 * Now it is safe to enable pv entry recording. 540 * Now it is safe to enable pv entry recording.
537 */ 541 */
538 pmap_initialized = true; 542 pmap_initialized = true;
@@ -851,60 +855,61 @@ pmap_remove(pmap_t pmap, vaddr_t sva, va @@ -851,60 +855,61 @@ pmap_remove(pmap_t pmap, vaddr_t sva, va
851 } 855 }
852 } 856 }
853 } 857 }
854} 858}
855 859
856/* 860/*
857 * pmap_page_protect: 861 * pmap_page_protect:
858 * 862 *
859 * Lower the permission for all mappings to a given page. 863 * Lower the permission for all mappings to a given page.
860 */ 864 */
861void 865void
862pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 866pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
863{ 867{
 868 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
864 pv_entry_t pv; 869 pv_entry_t pv;
865 vaddr_t va; 870 vaddr_t va;
866 871
867#ifdef DEBUG 872#ifdef DEBUG
868 if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) || 873 if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
869 (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))) 874 (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
870 printf("pmap_page_protect(%#"PRIxPADDR", %x)\n", 875 printf("pmap_page_protect(%#"PRIxPADDR", %x)\n",
871 VM_PAGE_TO_PHYS(pg), prot); 876 VM_PAGE_TO_PHYS(pg), prot);
872#endif 877#endif
873 switch (prot) { 878 switch (prot) {
874 case VM_PROT_READ|VM_PROT_WRITE: 879 case VM_PROT_READ|VM_PROT_WRITE:
875 case VM_PROT_ALL: 880 case VM_PROT_ALL:
876 break; 881 break;
877 882
878 /* copy_on_write */ 883 /* copy_on_write */
879 case VM_PROT_READ: 884 case VM_PROT_READ:
880 case VM_PROT_READ|VM_PROT_EXECUTE: 885 case VM_PROT_READ|VM_PROT_EXECUTE:
881 pv = pg->mdpage.pvh_list; 886 pv = md->pvh_list;
882 /* 887 /*
883 * Loop over all current mappings setting/clearing as appropos. 888 * Loop over all current mappings setting/clearing as appropos.
884 */ 889 */
885 if (pv->pv_pmap != NULL) { 890 if (pv->pv_pmap != NULL) {
886 for (; pv; pv = pv->pv_next) { 891 for (; pv; pv = pv->pv_next) {
887 va = pv->pv_va; 892 va = pv->pv_va;
888 pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE, 893 pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE,
889 prot); 894 prot);
890 pmap_update(pv->pv_pmap); 895 pmap_update(pv->pv_pmap);
891 } 896 }
892 } 897 }
893 break; 898 break;
894 899
895 /* remove_all */ 900 /* remove_all */
896 default: 901 default:
897 pv = pg->mdpage.pvh_list; 902 pv = md->pvh_list;
898 while (pv->pv_pmap != NULL) { 903 while (pv->pv_pmap != NULL) {
899 pmap_remove(pv->pv_pmap, pv->pv_va, 904 pmap_remove(pv->pv_pmap, pv->pv_va,
900 pv->pv_va + PAGE_SIZE); 905 pv->pv_va + PAGE_SIZE);
901 } 906 }
902 pmap_update(pv->pv_pmap); 907 pmap_update(pv->pv_pmap);
903 } 908 }
904} 909}
905 910
906/* 911/*
907 * Set the physical protection on the 912 * Set the physical protection on the
908 * specified range of this map as requested. 913 * specified range of this map as requested.
909 */ 914 */
910void 915void
@@ -1069,38 +1074,39 @@ pmap_is_page_ro(pmap_t pmap, vaddr_t va, @@ -1069,38 +1074,39 @@ pmap_is_page_ro(pmap_t pmap, vaddr_t va,
1069 1074
1070 return entry & mips_pg_ro_bit(); 1075 return entry & mips_pg_ro_bit();
1071} 1076}
1072 1077
1073#if defined(MIPS3_PLUS) && !defined(MIPS3_NO_PV_UNCACHED) /* XXX mmu XXX */ 1078#if defined(MIPS3_PLUS) && !defined(MIPS3_NO_PV_UNCACHED) /* XXX mmu XXX */
1074/* 1079/*
1075 * pmap_page_cache: 1080 * pmap_page_cache:
1076 * 1081 *
1077 * Change all mappings of a managed page to cached/uncached. 1082 * Change all mappings of a managed page to cached/uncached.
1078 */ 1083 */
1079static void 1084static void
1080pmap_page_cache(struct vm_page *pg, int mode) 1085pmap_page_cache(struct vm_page *pg, int mode)
1081{ 1086{
 1087 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1082 pv_entry_t pv; 1088 pv_entry_t pv;
1083 pt_entry_t *pte; 1089 pt_entry_t *pte;
1084 unsigned entry; 1090 unsigned entry;
1085 unsigned newmode; 1091 unsigned newmode;
1086 unsigned asid, needupdate; 1092 unsigned asid, needupdate;
1087 1093
1088#ifdef DEBUG 1094#ifdef DEBUG
1089 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) 1095 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
1090 printf("pmap_page_uncache(%#"PRIxPADDR")\n", VM_PAGE_TO_PHYS(pg)); 1096 printf("pmap_page_uncache(%#"PRIxPADDR")\n", VM_PAGE_TO_PHYS(pg));
1091#endif 1097#endif
1092 newmode = mode & PV_UNCACHED ? MIPS3_PG_UNCACHED : MIPS3_PG_CACHED; 1098 newmode = mode & PV_UNCACHED ? MIPS3_PG_UNCACHED : MIPS3_PG_CACHED;
1093 pv = pg->mdpage.pvh_list; 1099 pv = md->pvh_list;
1094 asid = pv->pv_pmap->pm_asid; 1100 asid = pv->pv_pmap->pm_asid;
1095 needupdate = (pv->pv_pmap->pm_asidgen == pmap_asid_generation); 1101 needupdate = (pv->pv_pmap->pm_asidgen == pmap_asid_generation);
1096 1102
1097 while (pv) { 1103 while (pv) {
1098 pv->pv_flags = (pv->pv_flags & ~PV_UNCACHED) | mode; 1104 pv->pv_flags = (pv->pv_flags & ~PV_UNCACHED) | mode;
1099 if (pv->pv_pmap == pmap_kernel()) { 1105 if (pv->pv_pmap == pmap_kernel()) {
1100 /* 1106 /*
1101 * Change entries in kernel pmap. 1107 * Change entries in kernel pmap.
1102 */ 1108 */
1103 pte = kvtopte(pv->pv_va); 1109 pte = kvtopte(pv->pv_va);
1104 entry = pte->pt_entry; 1110 entry = pte->pt_entry;
1105 if (entry & MIPS3_PG_V) { 1111 if (entry & MIPS3_PG_V) {
1106 entry = (entry & ~MIPS3_PG_CACHEMODE) | newmode; 1112 entry = (entry & ~MIPS3_PG_CACHEMODE) | newmode;
@@ -1182,27 +1188,28 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd @@ -1182,27 +1188,28 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd
1182 pa &= ~PGC_NOCACHE; 1188 pa &= ~PGC_NOCACHE;
1183 } else { 1189 } else {
1184 cached = 1; 1190 cached = 1;
1185 pa |= PGC_NOCACHE; 1191 pa |= PGC_NOCACHE;
1186 } 1192 }
1187#endif 1193#endif
1188 1194
1189 if (!(prot & VM_PROT_READ)) 1195 if (!(prot & VM_PROT_READ))
1190 panic("pmap_enter: prot"); 1196 panic("pmap_enter: prot");
1191#endif 1197#endif
1192 pg = PHYS_TO_VM_PAGE(pa); 1198 pg = PHYS_TO_VM_PAGE(pa);
1193 1199
1194 if (pg) { 1200 if (pg) {
1195 int *attrs = &pg->mdpage.pvh_attrs; 1201 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 1202 int *attrs = &md->pvh_attrs;
1196 1203
1197 /* Set page referenced/modified status based on flags */ 1204 /* Set page referenced/modified status based on flags */
1198 if (flags & VM_PROT_WRITE) 1205 if (flags & VM_PROT_WRITE)
1199 *attrs |= PGA_MODIFIED | PGA_REFERENCED; 1206 *attrs |= PGA_MODIFIED | PGA_REFERENCED;
1200 else if (flags & VM_PROT_ALL) 1207 else if (flags & VM_PROT_ALL)
1201 *attrs |= PGA_REFERENCED; 1208 *attrs |= PGA_REFERENCED;
1202 if (!(prot & VM_PROT_WRITE)) 1209 if (!(prot & VM_PROT_WRITE))
1203 /* 1210 /*
1204 * If page is not yet referenced, we could emulate this 1211 * If page is not yet referenced, we could emulate this
1205 * by not setting the page valid, and setting the 1212 * by not setting the page valid, and setting the
1206 * referenced status in the TLB fault handler, similar 1213 * referenced status in the TLB fault handler, similar
1207 * to how page modified status is done for UTLBmod 1214 * to how page modified status is done for UTLBmod
1208 * exceptions. 1215 * exceptions.
@@ -1636,27 +1643,28 @@ pmap_zero_page(paddr_t phys) @@ -1636,27 +1643,28 @@ pmap_zero_page(paddr_t phys)
1636 if (!(phys < MIPS_MAX_MEM_ADDR)) 1643 if (!(phys < MIPS_MAX_MEM_ADDR))
1637 printf("pmap_zero_page(%#"PRIxPADDR") nonphys\n", phys); 1644 printf("pmap_zero_page(%#"PRIxPADDR") nonphys\n", phys);
1638#endif 1645#endif
1639#ifdef _LP64 1646#ifdef _LP64
1640 KASSERT(mips3_xkphys_cached); 1647 KASSERT(mips3_xkphys_cached);
1641 va = MIPS_PHYS_TO_XKPHYS_CACHED(phys); 1648 va = MIPS_PHYS_TO_XKPHYS_CACHED(phys);
1642#else 1649#else
1643 va = MIPS_PHYS_TO_KSEG0(phys); 1650 va = MIPS_PHYS_TO_KSEG0(phys);
1644#endif 1651#endif
1645 1652
1646#if defined(MIPS3_PLUS) /* XXX mmu XXX */ 1653#if defined(MIPS3_PLUS) /* XXX mmu XXX */
1647 pg = PHYS_TO_VM_PAGE(phys); 1654 pg = PHYS_TO_VM_PAGE(phys);
1648 if (mips_cache_virtual_alias) { 1655 if (mips_cache_virtual_alias) {
1649 pv = pg->mdpage.pvh_list; 1656 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 1657 pv = md->pvh_list;
1650 if ((pv->pv_flags & PV_UNCACHED) == 0 && 1658 if ((pv->pv_flags & PV_UNCACHED) == 0 &&
1651 mips_cache_indexof(pv->pv_va) != mips_cache_indexof(va)) 1659 mips_cache_indexof(pv->pv_va) != mips_cache_indexof(va))
1652 mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE); 1660 mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
1653 } 1661 }
1654#endif 1662#endif
1655 1663
1656 mips_pagezero((void *)va); 1664 mips_pagezero((void *)va);
1657 1665
1658#if defined(MIPS3_PLUS) /* XXX mmu XXX */ 1666#if defined(MIPS3_PLUS) /* XXX mmu XXX */
1659 /* 1667 /*
1660 * If we have a virtually-indexed, physically-tagged WB cache, 1668 * If we have a virtually-indexed, physically-tagged WB cache,
1661 * and no L2 cache to warn of aliased mappings, we must force a 1669 * and no L2 cache to warn of aliased mappings, we must force a
1662 * writeback of the destination out of the L1 cache. If we don't, 1670 * writeback of the destination out of the L1 cache. If we don't,
@@ -1738,78 +1746,81 @@ pmap_copy_page(paddr_t src, paddr_t dst) @@ -1738,78 +1746,81 @@ pmap_copy_page(paddr_t src, paddr_t dst)
1738 mips_dcache_wbinv_range(dst_va, PAGE_SIZE); 1746 mips_dcache_wbinv_range(dst_va, PAGE_SIZE);
1739 } 1747 }
1740#endif /* MIPS3_PLUS */ 1748#endif /* MIPS3_PLUS */
1741} 1749}
1742 1750
1743/* 1751/*
1744 * pmap_clear_reference: 1752 * pmap_clear_reference:
1745 * 1753 *
1746 * Clear the reference bit on the specified physical page. 1754 * Clear the reference bit on the specified physical page.
1747 */ 1755 */
1748bool 1756bool
1749pmap_clear_reference(struct vm_page *pg) 1757pmap_clear_reference(struct vm_page *pg)
1750{ 1758{
 1759 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1751 int *attrp; 1760 int *attrp;
1752 bool rv; 1761 bool rv;
1753 1762
1754#ifdef DEBUG 1763#ifdef DEBUG
1755 if (pmapdebug & PDB_FOLLOW) 1764 if (pmapdebug & PDB_FOLLOW)
1756 printf("pmap_clear_reference(%#"PRIxPADDR")\n", 1765 printf("pmap_clear_reference(%#"PRIxPADDR")\n",
1757 VM_PAGE_TO_PHYS(pg)); 1766 VM_PAGE_TO_PHYS(pg));
1758#endif 1767#endif
1759 attrp = &pg->mdpage.pvh_attrs; 1768 attrp = &md->pvh_attrs;
1760 rv = *attrp & PGA_REFERENCED; 1769 rv = *attrp & PGA_REFERENCED;
1761 *attrp &= ~PGA_REFERENCED; 1770 *attrp &= ~PGA_REFERENCED;
1762 return rv; 1771 return rv;
1763} 1772}
1764 1773
1765/* 1774/*
1766 * pmap_is_referenced: 1775 * pmap_is_referenced:
1767 * 1776 *
1768 * Return whether or not the specified physical page is referenced 1777 * Return whether or not the specified physical page is referenced
1769 * by any physical maps. 1778 * by any physical maps.
1770 */ 1779 */
1771bool 1780bool
1772pmap_is_referenced(struct vm_page *pg) 1781pmap_is_referenced(struct vm_page *pg)
1773{ 1782{
 1783 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1774 1784
1775 return pg->mdpage.pvh_attrs & PGA_REFERENCED; 1785 return md->pvh_attrs & PGA_REFERENCED;
1776} 1786}
1777 1787
1778/* 1788/*
1779 * Clear the modify bits on the specified physical page. 1789 * Clear the modify bits on the specified physical page.
1780 */ 1790 */
1781bool 1791bool
1782pmap_clear_modify(struct vm_page *pg) 1792pmap_clear_modify(struct vm_page *pg)
1783{ 1793{
 1794 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1784 struct pmap *pmap; 1795 struct pmap *pmap;
1785 struct pv_entry *pv; 1796 struct pv_entry *pv;
1786 pt_entry_t *pte; 1797 pt_entry_t *pte;
1787 int *attrp; 1798 int *attrp;
1788 vaddr_t va; 1799 vaddr_t va;
1789 unsigned asid; 1800 unsigned asid;
1790 bool rv; 1801 bool rv;
1791 1802
1792#ifdef DEBUG 1803#ifdef DEBUG
1793 if (pmapdebug & PDB_FOLLOW) 1804 if (pmapdebug & PDB_FOLLOW)
1794 printf("pmap_clear_modify(%#"PRIxPADDR")\n", VM_PAGE_TO_PHYS(pg)); 1805 printf("pmap_clear_modify(%#"PRIxPADDR")\n", VM_PAGE_TO_PHYS(pg));
1795#endif 1806#endif
1796 attrp = &pg->mdpage.pvh_attrs; 1807 attrp = &md->pvh_attrs;
1797 rv = *attrp & PGA_MODIFIED; 1808 rv = *attrp & PGA_MODIFIED;
1798 *attrp &= ~PGA_MODIFIED; 1809 *attrp &= ~PGA_MODIFIED;
1799 if (!rv) { 1810 if (!rv) {
1800 return rv; 1811 return rv;
1801 } 1812 }
1802 pv = pg->mdpage.pvh_list; 1813 pv = md->pvh_list;
1803 if (pv->pv_pmap == NULL) { 1814 if (pv->pv_pmap == NULL) {
1804 return true; 1815 return true;
1805 } 1816 }
1806 1817
1807 /* 1818 /*
1808 * remove write access from any pages that are dirty 1819 * remove write access from any pages that are dirty
1809 * so we can tell if they are written to again later. 1820 * so we can tell if they are written to again later.
1810 * flush the VAC first if there is one. 1821 * flush the VAC first if there is one.
1811 */ 1822 */
1812 1823
1813 for (; pv; pv = pv->pv_next) { 1824 for (; pv; pv = pv->pv_next) {
1814 pmap = pv->pv_pmap; 1825 pmap = pv->pv_pmap;
1815 va = pv->pv_va; 1826 va = pv->pv_va;
@@ -1839,42 +1850,45 @@ pmap_clear_modify(struct vm_page *pg) @@ -1839,42 +1850,45 @@ pmap_clear_modify(struct vm_page *pg)
1839 } 1850 }
1840 return true; 1851 return true;
1841} 1852}
1842 1853
1843/* 1854/*
1844 * pmap_is_modified: 1855 * pmap_is_modified:
1845 * 1856 *
1846 * Return whether or not the specified physical page is modified 1857 * Return whether or not the specified physical page is modified
1847 * by any physical maps. 1858 * by any physical maps.
1848 */ 1859 */
1849bool 1860bool
1850pmap_is_modified(struct vm_page *pg) 1861pmap_is_modified(struct vm_page *pg)
1851{ 1862{
 1863 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1852 1864
1853 return pg->mdpage.pvh_attrs & PGA_MODIFIED; 1865 return md->pvh_attrs & PGA_MODIFIED;
1854} 1866}
1855 1867
1856/* 1868/*
1857 * pmap_set_modified: 1869 * pmap_set_modified:
1858 * 1870 *
1859 * Sets the page modified reference bit for the specified page. 1871 * Sets the page modified reference bit for the specified page.
1860 */ 1872 */
1861void 1873void
1862pmap_set_modified(paddr_t pa) 1874pmap_set_modified(paddr_t pa)
1863{ 1875{
1864 struct vm_page *pg; 1876 struct vm_page *pg;
 1877 struct vm_page_md *md;
1865 1878
1866 pg = PHYS_TO_VM_PAGE(pa); 1879 pg = PHYS_TO_VM_PAGE(pa);
1867 pg->mdpage.pvh_attrs |= PGA_MODIFIED | PGA_REFERENCED; 1880 md = VM_PAGE_TO_MD(pg);
 1881 md->pvh_attrs |= PGA_MODIFIED | PGA_REFERENCED;
1868} 1882}
1869 1883
1870/******************** misc. functions ********************/ 1884/******************** misc. functions ********************/
1871 1885
1872/* 1886/*
1873 * Allocate TLB address space tag (called ASID or TLBPID) and return it. 1887 * Allocate TLB address space tag (called ASID or TLBPID) and return it.
1874 * It takes almost as much or more time to search the TLB for a 1888 * It takes almost as much or more time to search the TLB for a
1875 * specific ASID and flush those entries as it does to flush the entire TLB. 1889 * specific ASID and flush those entries as it does to flush the entire TLB.
1876 * Therefore, when we allocate a new ASID, we just take the next number. When 1890 * Therefore, when we allocate a new ASID, we just take the next number. When
1877 * we run out of numbers, we flush the TLB, increment the generation count 1891 * we run out of numbers, we flush the TLB, increment the generation count
1878 * and start over. ASID zero is reserved for kernel use. 1892 * and start over. ASID zero is reserved for kernel use.
1879 */ 1893 */
1880void 1894void
@@ -1904,29 +1918,30 @@ pmap_asid_alloc(pmap_t pmap) @@ -1904,29 +1918,30 @@ pmap_asid_alloc(pmap_t pmap)
1904 } 1918 }
1905#endif 1919#endif
1906} 1920}
1907 1921
1908/******************** pv_entry management ********************/ 1922/******************** pv_entry management ********************/
1909 1923
1910/* 1924/*
1911 * Enter the pmap and virtual address into the 1925 * Enter the pmap and virtual address into the
1912 * physical to virtual map table. 1926 * physical to virtual map table.
1913 */ 1927 */
1914void 1928void
1915pmap_enter_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, u_int *npte) 1929pmap_enter_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, u_int *npte)
1916{ 1930{
 1931 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1917 pv_entry_t pv, npv; 1932 pv_entry_t pv, npv;
1918 1933
1919 pv = pg->mdpage.pvh_list; 1934 pv = md->pvh_list;
1920#ifdef DEBUG 1935#ifdef DEBUG
1921 if (pmapdebug & PDB_ENTER) 1936 if (pmapdebug & PDB_ENTER)
1922 printf("pmap_enter: pv %p: was %#"PRIxVADDR"/%p/%p\n", 1937 printf("pmap_enter: pv %p: was %#"PRIxVADDR"/%p/%p\n",
1923 pv, pv->pv_va, pv->pv_pmap, pv->pv_next); 1938 pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
1924#endif 1939#endif
1925#if defined(MIPS3_NO_PV_UNCACHED) 1940#if defined(MIPS3_NO_PV_UNCACHED)
1926again: 1941again:
1927#endif 1942#endif
1928 if (pv->pv_pmap == NULL) { 1943 if (pv->pv_pmap == NULL) {
1929 1944
1930 /* 1945 /*
1931 * No entries yet, use header as the first entry 1946 * No entries yet, use header as the first entry
1932 */ 1947 */
@@ -2056,36 +2071,37 @@ again: @@ -2056,36 +2071,37 @@ again:
2056 } 2071 }
2057} 2072}
2058 2073
2059/* 2074/*
2060 * Remove a physical to virtual address translation. 2075 * Remove a physical to virtual address translation.
2061 * If cache was inhibited on this page, and there are no more cache 2076 * If cache was inhibited on this page, and there are no more cache
2062 * conflicts, restore caching. 2077 * conflicts, restore caching.
2063 * Flush the cache if the last page is removed (should always be cached 2078 * Flush the cache if the last page is removed (should always be cached
2064 * at this point). 2079 * at this point).
2065 */ 2080 */
2066void 2081void
2067pmap_remove_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg) 2082pmap_remove_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg)
2068{ 2083{
 2084 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
2069 pv_entry_t pv, npv; 2085 pv_entry_t pv, npv;
2070 int last; 2086 int last;
2071 2087
2072#ifdef DEBUG 2088#ifdef DEBUG
2073 if (pmapdebug & (PDB_FOLLOW|PDB_PVENTRY)) 2089 if (pmapdebug & (PDB_FOLLOW|PDB_PVENTRY))
2074 printf("pmap_remove_pv(%p, %#"PRIxVADDR", %#"PRIxPADDR")\n", pmap, va, 2090 printf("pmap_remove_pv(%p, %#"PRIxVADDR", %#"PRIxPADDR")\n", pmap, va,
2075 VM_PAGE_TO_PHYS(pg)); 2091 VM_PAGE_TO_PHYS(pg));
2076#endif 2092#endif
2077 2093
2078 pv = pg->mdpage.pvh_list; 2094 pv = md->pvh_list;
2079 2095
2080 /* 2096 /*
2081 * If it is the first entry on the list, it is actually 2097 * If it is the first entry on the list, it is actually
2082 * in the header and we must copy the following entry up 2098 * in the header and we must copy the following entry up
2083 * to the header. Otherwise we must search the list for 2099 * to the header. Otherwise we must search the list for
2084 * the entry. In either case we free the now unused entry. 2100 * the entry. In either case we free the now unused entry.
2085 */ 2101 */
2086 2102
2087 last = 0; 2103 last = 0;
2088 if (pmap == pv->pv_pmap && va == pv->pv_va) { 2104 if (pmap == pv->pv_pmap && va == pv->pv_va) {
2089 npv = pv->pv_next; 2105 npv = pv->pv_next;
2090 if (npv) { 2106 if (npv) {
2091 *pv = *npv; 2107 *pv = *npv;
@@ -2109,70 +2125,72 @@ pmap_remove_pv(pmap_t pmap, vaddr_t va,  @@ -2109,70 +2125,72 @@ pmap_remove_pv(pmap_t pmap, vaddr_t va,
2109 pv->pv_next = npv->pv_next; 2125 pv->pv_next = npv->pv_next;
2110 pmap_pv_free(npv); 2126 pmap_pv_free(npv);
2111 } 2127 }
2112 } 2128 }
2113#ifdef MIPS3_PLUS /* XXX mmu XXX */ 2129#ifdef MIPS3_PLUS /* XXX mmu XXX */
2114#if !defined(MIPS3_NO_PV_UNCACHED) 2130#if !defined(MIPS3_NO_PV_UNCACHED)
2115 if (MIPS_HAS_R4K_MMU && pv->pv_flags & PV_UNCACHED) { 2131 if (MIPS_HAS_R4K_MMU && pv->pv_flags & PV_UNCACHED) {
2116 2132
2117 /* 2133 /*
2118 * Page is currently uncached, check if alias mapping has been 2134 * Page is currently uncached, check if alias mapping has been
2119 * removed. If it was, then reenable caching. 2135 * removed. If it was, then reenable caching.
2120 */ 2136 */
2121 2137
2122 pv = pg->mdpage.pvh_list; 2138 pv = md->pvh_list;
2123 for (npv = pv->pv_next; npv; npv = npv->pv_next) { 2139 for (npv = pv->pv_next; npv; npv = npv->pv_next) {
2124 if (mips_cache_indexof(pv->pv_va ^ npv->pv_va)) 2140 if (mips_cache_indexof(pv->pv_va ^ npv->pv_va))
2125 break; 2141 break;
2126 } 2142 }
2127 if (npv == NULL) 2143 if (npv == NULL)
2128 pmap_page_cache(pg, 0); 2144 pmap_page_cache(pg, 0);
2129 } 2145 }
2130#endif 2146#endif
2131 if (MIPS_HAS_R4K_MMU && last != 0) 2147 if (MIPS_HAS_R4K_MMU && last != 0)
2132 mips_dcache_wbinv_range_index(va, PAGE_SIZE); 2148 mips_dcache_wbinv_range_index(va, PAGE_SIZE);
2133#endif /* MIPS3_PLUS */ 2149#endif /* MIPS3_PLUS */
2134} 2150}
2135 2151
2136/* 2152/*
2137 * pmap_pv_page_alloc: 2153 * pmap_pv_page_alloc:
2138 * 2154 *
2139 * Allocate a page for the pv_entry pool. 2155 * Allocate a page for the pv_entry pool.
2140 */ 2156 */
2141void * 2157void *
2142pmap_pv_page_alloc(struct pool *pp, int flags) 2158pmap_pv_page_alloc(struct pool *pp, int flags)
2143{ 2159{
2144 struct vm_page *pg; 2160 struct vm_page *pg;
 2161 struct vm_page_md *md;
2145 paddr_t phys; 2162 paddr_t phys;
2146#if defined(MIPS3_PLUS) 2163#if defined(MIPS3_PLUS)
2147 pv_entry_t pv; 2164 pv_entry_t pv;
2148#endif 2165#endif
2149 vaddr_t va; 2166 vaddr_t va;
2150 2167
2151 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); 2168 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
2152 if (pg == NULL) 2169 if (pg == NULL)
2153 return NULL; 2170 return NULL;
2154 2171
 2172 md = VM_PAGE_TO_MD(pg);
2155 phys = VM_PAGE_TO_PHYS(pg); 2173 phys = VM_PAGE_TO_PHYS(pg);
2156#ifdef _LP64 2174#ifdef _LP64
2157 KASSERT(mips3_xkphys_cached); 2175 KASSERT(mips3_xkphys_cached);
2158 va = MIPS_PHYS_TO_XKPHYS_CACHED(phys); 2176 va = MIPS_PHYS_TO_XKPHYS_CACHED(phys);
2159#else 2177#else
2160 va = MIPS_PHYS_TO_KSEG0(phys); 2178 va = MIPS_PHYS_TO_KSEG0(phys);
2161#endif 2179#endif
2162#if defined(MIPS3_PLUS) 2180#if defined(MIPS3_PLUS)
2163 if (mips_cache_virtual_alias) { 2181 if (mips_cache_virtual_alias) {
2164 pg = PHYS_TO_VM_PAGE(phys); 2182 pg = PHYS_TO_VM_PAGE(phys);
2165 pv = pg->mdpage.pvh_list; 2183 pv = md->pvh_list;
2166 if ((pv->pv_flags & PV_UNCACHED) == 0 && 2184 if ((pv->pv_flags & PV_UNCACHED) == 0 &&
2167 mips_cache_indexof(pv->pv_va) != mips_cache_indexof(va)) 2185 mips_cache_indexof(pv->pv_va) != mips_cache_indexof(va))
2168 mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE); 2186 mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
2169 } 2187 }
2170#endif 2188#endif
2171 return (void *)va; 2189 return (void *)va;
2172} 2190}
2173 2191
2174/* 2192/*
2175 * pmap_pv_page_free: 2193 * pmap_pv_page_free:
2176 * 2194 *
2177 * Free a pv_entry pool page. 2195 * Free a pv_entry pool page.
2178 */ 2196 */
@@ -2241,27 +2259,28 @@ mips_pmap_map_poolpage(paddr_t pa) @@ -2241,27 +2259,28 @@ mips_pmap_map_poolpage(paddr_t pa)
2241#ifdef _LP64 2259#ifdef _LP64
2242 KASSERT(mips3_xkphys_cached); 2260 KASSERT(mips3_xkphys_cached);
2243 va = MIPS_PHYS_TO_XKPHYS_CACHED(pa); 2261 va = MIPS_PHYS_TO_XKPHYS_CACHED(pa);
2244#else 2262#else
2245 if (pa <= MIPS_PHYS_MASK) 2263 if (pa <= MIPS_PHYS_MASK)
2246 va = MIPS_PHYS_TO_KSEG0(pa); 2264 va = MIPS_PHYS_TO_KSEG0(pa);
2247 else 2265 else
2248 panic("mips_pmap_map_poolpage: " 2266 panic("mips_pmap_map_poolpage: "
2249 "pa #%"PRIxPADDR" can not be mapped into KSEG0", pa); 2267 "pa #%"PRIxPADDR" can not be mapped into KSEG0", pa);
2250#endif 2268#endif
2251#if defined(MIPS3_PLUS) 2269#if defined(MIPS3_PLUS)
2252 if (mips_cache_virtual_alias) { 2270 if (mips_cache_virtual_alias) {
2253 pg = PHYS_TO_VM_PAGE(pa); 2271 pg = PHYS_TO_VM_PAGE(pa);
2254 pv = pg->mdpage.pvh_list; 2272 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 2273 pv = md->pvh_list;
2255 if ((pv->pv_flags & PV_UNCACHED) == 0 && 2274 if ((pv->pv_flags & PV_UNCACHED) == 0 &&
2256 mips_cache_indexof(pv->pv_va) != mips_cache_indexof(va)) 2275 mips_cache_indexof(pv->pv_va) != mips_cache_indexof(va))
2257 mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE); 2276 mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
2258 } 2277 }
2259#endif 2278#endif
2260 return va; 2279 return va;
2261} 2280}
2262 2281
2263paddr_t 2282paddr_t
2264mips_pmap_unmap_poolpage(vaddr_t va) 2283mips_pmap_unmap_poolpage(vaddr_t va)
2265{ 2284{
2266 paddr_t pa; 2285 paddr_t pa;
2267 2286