Sun Oct 31 03:43:03 2010 UTC ()
More struct vm_page * -> struct vm_page_md * adjustments.


(uebayasi)
diff -r1.211.2.16 -r1.211.2.17 src/sys/arch/arm/arm32/pmap.c

cvs diff -r1.211.2.16 -r1.211.2.17 src/sys/arch/arm/arm32/pmap.c (expand / switch to unified diff)

--- src/sys/arch/arm/arm32/pmap.c 2010/10/30 08:41:06 1.211.2.16
+++ src/sys/arch/arm/arm32/pmap.c 2010/10/31 03:43:02 1.211.2.17
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.211.2.16 2010/10/30 08:41:06 uebayasi Exp $ */ 1/* $NetBSD: pmap.c,v 1.211.2.17 2010/10/31 03:43:02 uebayasi Exp $ */
2 2
3/* 3/*
4 * Copyright 2003 Wasabi Systems, Inc. 4 * Copyright 2003 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Steve C. Woodford for Wasabi Systems, Inc. 7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -202,27 +202,27 @@ @@ -202,27 +202,27 @@
202#include <sys/pool.h> 202#include <sys/pool.h>
203#include <sys/cdefs.h> 203#include <sys/cdefs.h>
204#include <sys/cpu.h> 204#include <sys/cpu.h>
205#include <sys/sysctl.h> 205#include <sys/sysctl.h>
206  206
207#include <uvm/uvm.h> 207#include <uvm/uvm.h>
208 208
209#include <machine/bus.h> 209#include <machine/bus.h>
210#include <machine/pmap.h> 210#include <machine/pmap.h>
211#include <machine/pcb.h> 211#include <machine/pcb.h>
212#include <machine/param.h> 212#include <machine/param.h>
213#include <arm/arm32/katelib.h> 213#include <arm/arm32/katelib.h>
214 214
215__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.211.2.16 2010/10/30 08:41:06 uebayasi Exp $"); 215__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.211.2.17 2010/10/31 03:43:02 uebayasi Exp $");
216 216
217#ifdef PMAP_DEBUG 217#ifdef PMAP_DEBUG
218 218
219/* XXX need to get rid of all refs to this */ 219/* XXX need to get rid of all refs to this */
220int pmap_debug_level = 0; 220int pmap_debug_level = 0;
221 221
222/* 222/*
223 * for switching to potentially finer grained debugging 223 * for switching to potentially finer grained debugging
224 */ 224 */
225#define PDB_FOLLOW 0x0001 225#define PDB_FOLLOW 0x0001
226#define PDB_INIT 0x0002 226#define PDB_INIT 0x0002
227#define PDB_ENTER 0x0004 227#define PDB_ENTER 0x0004
228#define PDB_REMOVE 0x0008 228#define PDB_REMOVE 0x0008
@@ -653,40 +653,40 @@ static void pmap_use_l1(pmap_t); @@ -653,40 +653,40 @@ static void pmap_use_l1(pmap_t);
653 653
654static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vaddr_t); 654static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vaddr_t);
655static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vaddr_t); 655static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vaddr_t);
656static void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int); 656static void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int);
657static int pmap_l2ptp_ctor(void *, void *, int); 657static int pmap_l2ptp_ctor(void *, void *, int);
658static int pmap_l2dtable_ctor(void *, void *, int); 658static int pmap_l2dtable_ctor(void *, void *, int);
659 659
660static void pmap_vac_me_harder(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); 660static void pmap_vac_me_harder(struct vm_page_md *, paddr_t, pmap_t, vaddr_t);
661#ifdef PMAP_CACHE_VIVT 661#ifdef PMAP_CACHE_VIVT
662static void pmap_vac_me_kpmap(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); 662static void pmap_vac_me_kpmap(struct vm_page_md *, paddr_t, pmap_t, vaddr_t);
663static void pmap_vac_me_user(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); 663static void pmap_vac_me_user(struct vm_page_md *, paddr_t, pmap_t, vaddr_t);
664#endif 664#endif
665 665
666static void pmap_clearbit(struct vm_page *, u_int); 666static void pmap_clearbit(struct vm_page_md *, paddr_t, u_int);
667#ifdef PMAP_CACHE_VIVT 667#ifdef PMAP_CACHE_VIVT
668static int pmap_clean_page(struct pv_entry *, bool); 668static int pmap_clean_page(struct pv_entry *, bool);
669#endif 669#endif
670#ifdef PMAP_CACHE_VIPT 670#ifdef PMAP_CACHE_VIPT
671static void pmap_syncicache_page(struct vm_page_md *, paddr_t); 671static void pmap_syncicache_page(struct vm_page_md *, paddr_t);
672enum pmap_flush_op { 672enum pmap_flush_op {
673 PMAP_FLUSH_PRIMARY, 673 PMAP_FLUSH_PRIMARY,
674 PMAP_FLUSH_SECONDARY, 674 PMAP_FLUSH_SECONDARY,
675 PMAP_CLEAN_PRIMARY 675 PMAP_CLEAN_PRIMARY
676}; 676};
677static void pmap_flush_page(struct vm_page_md *, paddr_t, enum pmap_flush_op); 677static void pmap_flush_page(struct vm_page_md *, paddr_t, enum pmap_flush_op);
678#endif 678#endif
679static void pmap_page_remove(struct vm_page *); 679static void pmap_page_remove(struct vm_page_md *, paddr_t);
680 680
681static void pmap_init_l1(struct l1_ttable *, pd_entry_t *); 681static void pmap_init_l1(struct l1_ttable *, pd_entry_t *);
682static vaddr_t kernel_pt_lookup(paddr_t); 682static vaddr_t kernel_pt_lookup(paddr_t);
683 683
684 684
685/* 685/*
686 * External function prototypes 686 * External function prototypes
687 */ 687 */
688extern void bzero_page(vaddr_t); 688extern void bzero_page(vaddr_t);
689extern void bcopy_page(vaddr_t, vaddr_t); 689extern void bcopy_page(vaddr_t, vaddr_t);
690 690
691/* 691/*
692 * Misc variables 692 * Misc variables
@@ -964,27 +964,27 @@ static struct pv_entry * @@ -964,27 +964,27 @@ static struct pv_entry *
964pmap_remove_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) 964pmap_remove_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va)
965{ 965{
966 struct pv_entry *pv, **prevptr; 966 struct pv_entry *pv, **prevptr;
967 967
968 NPDEBUG(PDB_PVDUMP, 968 NPDEBUG(PDB_PVDUMP,
969 printf("pmap_remove_pv: pm %p, md %p, va 0x%08lx\n", pm, md, va)); 969 printf("pmap_remove_pv: pm %p, md %p, va 0x%08lx\n", pm, md, va));
970 970
971 prevptr = &SLIST_FIRST(&md->pvh_list); /* prev pv_entry ptr */ 971 prevptr = &SLIST_FIRST(&md->pvh_list); /* prev pv_entry ptr */
972 pv = *prevptr; 972 pv = *prevptr;
973 973
974 while (pv) { 974 while (pv) {
975 if (pv->pv_pmap == pm && pv->pv_va == va) { /* match? */ 975 if (pv->pv_pmap == pm && pv->pv_va == va) { /* match? */
976 NPDEBUG(PDB_PVDUMP, printf("pmap_remove_pv: pm %p, md " 976 NPDEBUG(PDB_PVDUMP, printf("pmap_remove_pv: pm %p, md "
977 "%p\n", pm, md)); 977 "%p, flags 0x%x\n", pm, md, pv->pv_flags));
978 if (pv->pv_flags & PVF_WIRED) { 978 if (pv->pv_flags & PVF_WIRED) {
979 --pm->pm_stats.wired_count; 979 --pm->pm_stats.wired_count;
980 } 980 }
981 *prevptr = SLIST_NEXT(pv, pv_link); /* remove it! */ 981 *prevptr = SLIST_NEXT(pv, pv_link); /* remove it! */
982 if (pm == pmap_kernel()) { 982 if (pm == pmap_kernel()) {
983 PMAPCOUNT(kernel_unmappings); 983 PMAPCOUNT(kernel_unmappings);
984 if (pv->pv_flags & PVF_WRITE) 984 if (pv->pv_flags & PVF_WRITE)
985 md->krw_mappings--; 985 md->krw_mappings--;
986 else 986 else
987 md->kro_mappings--; 987 md->kro_mappings--;
988 } else { 988 } else {
989 if (pv->pv_flags & PVF_WRITE) 989 if (pv->pv_flags & PVF_WRITE)
990 md->urw_mappings--; 990 md->urw_mappings--;
@@ -2111,45 +2111,44 @@ pmap_vac_me_harder(struct vm_page_md *md @@ -2111,45 +2111,44 @@ pmap_vac_me_harder(struct vm_page_md *md
2111 *ptep = pte; 2111 *ptep = pte;
2112 PTE_SYNC_CURRENT(pv->pv_pmap, ptep); 2112 PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
2113 } 2113 }
2114} 2114}
2115#endif /* PMAP_CACHE_VIPT */ 2115#endif /* PMAP_CACHE_VIPT */
2116 2116
2117 2117
2118/* 2118/*
2119 * Modify pte bits for all ptes corresponding to the given physical address. 2119 * Modify pte bits for all ptes corresponding to the given physical address.
2120 * We use `maskbits' rather than `clearbits' because we're always passing 2120 * We use `maskbits' rather than `clearbits' because we're always passing
2121 * constants and the latter would require an extra inversion at run-time. 2121 * constants and the latter would require an extra inversion at run-time.
2122 */ 2122 */
2123static void 2123static void
2124pmap_clearbit(struct vm_page *pg, u_int maskbits) 2124pmap_clearbit(struct vm_page_md *md, paddr_t pa, u_int maskbits)
2125{ 2125{
2126 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 
2127 struct l2_bucket *l2b; 2126 struct l2_bucket *l2b;
2128 struct pv_entry *pv; 2127 struct pv_entry *pv;
2129 pt_entry_t *ptep, npte, opte; 2128 pt_entry_t *ptep, npte, opte;
2130 pmap_t pm; 2129 pmap_t pm;
2131 vaddr_t va; 2130 vaddr_t va;
2132 u_int oflags; 2131 u_int oflags;
2133#ifdef PMAP_CACHE_VIPT 2132#ifdef PMAP_CACHE_VIPT
2134 const bool want_syncicache = PV_IS_EXEC_P(md->pvh_attrs); 2133 const bool want_syncicache = PV_IS_EXEC_P(md->pvh_attrs);
2135 bool need_syncicache = false; 2134 bool need_syncicache = false;
2136 bool did_syncicache = false; 2135 bool did_syncicache = false;
2137 bool need_vac_me_harder = false; 2136 bool need_vac_me_harder = false;
2138#endif 2137#endif
2139 2138
2140 NPDEBUG(PDB_BITS, 2139 NPDEBUG(PDB_BITS,
2141 printf("pmap_clearbit: pg %p (0x%08lx) mask 0x%x\n", 2140 printf("pmap_clearbit: md %p mask 0x%x\n",
2142 pg, VM_PAGE_TO_PHYS(pg), maskbits)); 2141 md, maskbits));
2143 2142
2144 PMAP_HEAD_TO_MAP_LOCK(); 2143 PMAP_HEAD_TO_MAP_LOCK();
2145 simple_lock(&md->pvh_slock); 2144 simple_lock(&md->pvh_slock);
2146 2145
2147#ifdef PMAP_CACHE_VIPT 2146#ifdef PMAP_CACHE_VIPT
2148 /* 2147 /*
2149 * If we might want to sync the I-cache and we've modified it, 2148 * If we might want to sync the I-cache and we've modified it,
2150 * then we know we definitely need to sync or discard it. 2149 * then we know we definitely need to sync or discard it.
2151 */ 2150 */
2152 if (want_syncicache) 2151 if (want_syncicache)
2153 need_syncicache = md->pvh_attrs & PVF_MOD; 2152 need_syncicache = md->pvh_attrs & PVF_MOD;
2154#endif 2153#endif
2155 /* 2154 /*
@@ -2307,37 +2306,37 @@ pmap_clearbit(struct vm_page *pg, u_int  @@ -2307,37 +2306,37 @@ pmap_clearbit(struct vm_page *pg, u_int
2307 2306
2308 pmap_release_pmap_lock(pm); 2307 pmap_release_pmap_lock(pm);
2309 2308
2310 NPDEBUG(PDB_BITS, 2309 NPDEBUG(PDB_BITS,
2311 printf("pmap_clearbit: pm %p va 0x%lx opte 0x%08x npte 0x%08x\n", 2310 printf("pmap_clearbit: pm %p va 0x%lx opte 0x%08x npte 0x%08x\n",
2312 pm, va, opte, npte)); 2311 pm, va, opte, npte));
2313 } 2312 }
2314 2313
2315#ifdef PMAP_CACHE_VIPT 2314#ifdef PMAP_CACHE_VIPT
2316 /* 2315 /*
2317 * If we need to sync the I-cache and we haven't done it yet, do it. 2316 * If we need to sync the I-cache and we haven't done it yet, do it.
2318 */ 2317 */
2319 if (need_syncicache && !did_syncicache) { 2318 if (need_syncicache && !did_syncicache) {
2320 pmap_syncicache_page(md, VM_PAGE_TO_PHYS(pg)); 2319 pmap_syncicache_page(md, pa);
2321 PMAPCOUNT(exec_synced_clearbit); 2320 PMAPCOUNT(exec_synced_clearbit);
2322 } 2321 }
2323 /* 2322 /*
2324 * If we are changing this to read-only, we need to call vac_me_harder 2323 * If we are changing this to read-only, we need to call vac_me_harder
2325 * so we can change all the read-only pages to cacheable. We pretend 2324 * so we can change all the read-only pages to cacheable. We pretend
2326 * this as a page deletion. 2325 * this as a page deletion.
2327 */ 2326 */
2328 if (need_vac_me_harder) { 2327 if (need_vac_me_harder) {
2329 if (md->pvh_attrs & PVF_NC) 2328 if (md->pvh_attrs & PVF_NC)
2330 pmap_vac_me_harder(md, VM_PAGE_TO_PHYS(pg), NULL, 0); 2329 pmap_vac_me_harder(md, pa, NULL, 0);
2331 } 2330 }
2332#endif 2331#endif
2333 2332
2334 simple_unlock(&md->pvh_slock); 2333 simple_unlock(&md->pvh_slock);
2335 PMAP_HEAD_TO_MAP_UNLOCK(); 2334 PMAP_HEAD_TO_MAP_UNLOCK();
2336} 2335}
2337 2336
2338/* 2337/*
2339 * pmap_clean_page() 2338 * pmap_clean_page()
2340 * 2339 *
2341 * This is a local function used to work out the best strategy to clean 2340 * This is a local function used to work out the best strategy to clean
2342 * a single page referenced by its entry in the PV table. It's used by 2341 * a single page referenced by its entry in the PV table. It's used by
2343 * pmap_copy_page, pmap_zero page and maybe some others later on. 2342 * pmap_copy_page, pmap_zero page and maybe some others later on.
@@ -2557,39 +2556,37 @@ pmap_flush_page(struct vm_page_md *md, p @@ -2557,39 +2556,37 @@ pmap_flush_page(struct vm_page_md *md, p
2557 pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset); 2556 pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset);
2558 } 2557 }
2559} 2558}
2560#endif /* PMAP_CACHE_VIPT */ 2559#endif /* PMAP_CACHE_VIPT */
2561 2560
2562/* 2561/*
2563 * Routine: pmap_page_remove 2562 * Routine: pmap_page_remove
2564 * Function: 2563 * Function:
2565 * Removes this physical page from 2564 * Removes this physical page from
2566 * all physical maps in which it resides. 2565 * all physical maps in which it resides.
2567 * Reflects back modify bits to the pager. 2566 * Reflects back modify bits to the pager.
2568 */ 2567 */
2569static void 2568static void
2570pmap_page_remove(struct vm_page *pg) 2569pmap_page_remove(struct vm_page_md *md, paddr_t pa)
2571{ 2570{
2572 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 
2573 paddr_t pa = VM_PAGE_TO_PHYS(pg); 
2574 struct l2_bucket *l2b; 2571 struct l2_bucket *l2b;
2575 struct pv_entry *pv, *npv, **pvp; 2572 struct pv_entry *pv, *npv, **pvp;
2576 pmap_t pm; 2573 pmap_t pm;
2577 pt_entry_t *ptep; 2574 pt_entry_t *ptep;
2578 bool flush; 2575 bool flush;
2579 u_int flags; 2576 u_int flags;
2580 2577
2581 NPDEBUG(PDB_FOLLOW, 2578 NPDEBUG(PDB_FOLLOW,
2582 printf("pmap_page_remove: pg %p (0x%08lx)\n", pg, 2579 printf("pmap_page_remove: md %p (0x%08lx)\n", md,
2583 pa)); 2580 pa));
2584 2581
2585 PMAP_HEAD_TO_MAP_LOCK(); 2582 PMAP_HEAD_TO_MAP_LOCK();
2586 simple_lock(&md->pvh_slock); 2583 simple_lock(&md->pvh_slock);
2587 2584
2588 pv = SLIST_FIRST(&md->pvh_list); 2585 pv = SLIST_FIRST(&md->pvh_list);
2589 if (pv == NULL) { 2586 if (pv == NULL) {
2590#ifdef PMAP_CACHE_VIPT 2587#ifdef PMAP_CACHE_VIPT
2591 /* 2588 /*
2592 * We *know* the page contents are about to be replaced. 2589 * We *know* the page contents are about to be replaced.
2593 * Discard the exec contents 2590 * Discard the exec contents
2594 */ 2591 */
2595 if (PV_IS_EXEC_P(md->pvh_attrs)) 2592 if (PV_IS_EXEC_P(md->pvh_attrs))
@@ -2886,29 +2883,27 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_ @@ -2886,29 +2883,27 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_
2886 if (pm->pm_cstate.cs_cache_d && 2883 if (pm->pm_cstate.cs_cache_d &&
2887 (oflags & PVF_NC) == 0 && 2884 (oflags & PVF_NC) == 0 &&
2888 l2pte_writable_p(opte) && 2885 l2pte_writable_p(opte) &&
2889 (prot & VM_PROT_WRITE) == 0) 2886 (prot & VM_PROT_WRITE) == 0)
2890 cpu_dcache_wb_range(va, PAGE_SIZE); 2887 cpu_dcache_wb_range(va, PAGE_SIZE);
2891#endif 2888#endif
2892 } else { 2889 } else {
2893 /* 2890 /*
2894 * New mapping, or changing the backing page 2891 * New mapping, or changing the backing page
2895 * of an existing mapping. 2892 * of an existing mapping.
2896 */ 2893 */
2897 if (opg) { 2894 if (opg) {
2898 struct vm_page_md *omd = VM_PAGE_TO_MD(opg); 2895 struct vm_page_md *omd = VM_PAGE_TO_MD(opg);
2899 paddr_t opa; 2896 paddr_t opa = VM_PAGE_TO_PHYS(opg);
2900 
2901 opa = VM_PAGE_TO_PHYS(opg); 
2902 2897
2903 /* 2898 /*
2904 * Replacing an existing mapping with a new one. 2899 * Replacing an existing mapping with a new one.
2905 * It is part of our managed memory so we 2900 * It is part of our managed memory so we
2906 * must remove it from the PV list 2901 * must remove it from the PV list
2907 */ 2902 */
2908 simple_lock(&omd->pvh_slock); 2903 simple_lock(&omd->pvh_slock);
2909 pv = pmap_remove_pv(omd, opa, pm, va); 2904 pv = pmap_remove_pv(omd, opa, pm, va);
2910 pmap_vac_me_harder(omd, opa, pm, 0); 2905 pmap_vac_me_harder(omd, opa, pm, 0);
2911 simple_unlock(&omd->pvh_slock); 2906 simple_unlock(&omd->pvh_slock);
2912 oflags = pv->pv_flags; 2907 oflags = pv->pv_flags;
2913 2908
2914#ifdef PMAP_CACHE_VIVT 2909#ifdef PMAP_CACHE_VIVT
@@ -2935,53 +2930,55 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_ @@ -2935,53 +2930,55 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_
2935 if ((pv = pool_get(&pmap_pv_pool, PR_NOWAIT)) == NULL){ 2930 if ((pv = pool_get(&pmap_pv_pool, PR_NOWAIT)) == NULL){
2936 if ((flags & PMAP_CANFAIL) == 0) 2931 if ((flags & PMAP_CANFAIL) == 0)
2937 panic("pmap_enter: no pv entries"); 2932 panic("pmap_enter: no pv entries");
2938 2933
2939 if (pm != pmap_kernel()) 2934 if (pm != pmap_kernel())
2940 pmap_free_l2_bucket(pm, l2b, 0); 2935 pmap_free_l2_bucket(pm, l2b, 0);
2941 pmap_release_pmap_lock(pm); 2936 pmap_release_pmap_lock(pm);
2942 PMAP_MAP_TO_HEAD_UNLOCK(); 2937 PMAP_MAP_TO_HEAD_UNLOCK();
2943 NPDEBUG(PDB_ENTER, 2938 NPDEBUG(PDB_ENTER,
2944 printf("pmap_enter: ENOMEM\n")); 2939 printf("pmap_enter: ENOMEM\n"));
2945 return (ENOMEM); 2940 return (ENOMEM);
2946 } 2941 }
2947 2942
2948 pmap_enter_pv(md, VM_PAGE_TO_PHYS(pg), pv, pm, va, nflags); 2943 pmap_enter_pv(md, pa, pv, pm, va, nflags);
2949 } 2944 }
2950 } else { 2945 } else {
2951 /* 2946 /*
2952 * We're mapping an unmanaged page. 2947 * We're mapping an unmanaged page.
2953 * These are always readable, and possibly writable, from 2948 * These are always readable, and possibly writable, from
2954 * the get go as we don't need to track ref/mod status. 2949 * the get go as we don't need to track ref/mod status.
2955 */ 2950 */
2956 npte |= l2pte_set_readonly(L2_S_PROTO); 2951 npte |= l2pte_set_readonly(L2_S_PROTO);
2957 if (prot & VM_PROT_WRITE) 2952 if (prot & VM_PROT_WRITE)
2958 npte = l2pte_set_writable(npte); 2953 npte = l2pte_set_writable(npte);
2959 2954
2960 /* 2955 /*
2961 * Make sure the vector table is mapped cacheable 2956 * Make sure the vector table is mapped cacheable
2962 */ 2957 */
2963 if (pm != pmap_kernel() && va == vector_page) 2958 if (pm != pmap_kernel() && va == vector_page)
2964 npte |= pte_l2_s_cache_mode; 2959 npte |= pte_l2_s_cache_mode;
2965 2960
2966 if (opg) { 2961 if (opg) {
2967 /* 2962 /*
2968 * Looks like there's an existing 'managed' mapping 2963 * Looks like there's an existing 'managed' mapping
2969 * at this address. 2964 * at this address.
2970 */ 2965 */
2971 struct vm_page_md *omd = VM_PAGE_TO_MD(opg); 2966 struct vm_page_md *omd = VM_PAGE_TO_MD(opg);
 2967 paddr_t opa = VM_PAGE_TO_PHYS(opg);
 2968
2972 simple_lock(&omd->pvh_slock); 2969 simple_lock(&omd->pvh_slock);
2973 pv = pmap_remove_pv(omd, VM_PAGE_TO_PHYS(opg), pm, va); 2970 pv = pmap_remove_pv(omd, opa, pm, va);
2974 pmap_vac_me_harder(omd, VM_PAGE_TO_PHYS(opg), pm, 0); 2971 pmap_vac_me_harder(omd, opa, pm, 0);
2975 simple_unlock(&omd->pvh_slock); 2972 simple_unlock(&omd->pvh_slock);
2976 oflags = pv->pv_flags; 2973 oflags = pv->pv_flags;
2977 2974
2978#ifdef PMAP_CACHE_VIVT 2975#ifdef PMAP_CACHE_VIVT
2979 if ((oflags & PVF_NC) == 0 && l2pte_valid(opte)) { 2976 if ((oflags & PVF_NC) == 0 && l2pte_valid(opte)) {
2980 if (PV_BEEN_EXECD(oflags)) 2977 if (PV_BEEN_EXECD(oflags))
2981 pmap_idcache_wbinv_range(pm, va, 2978 pmap_idcache_wbinv_range(pm, va,
2982 PAGE_SIZE); 2979 PAGE_SIZE);
2983 else 2980 else
2984 if (PV_BEEN_REFD(oflags)) 2981 if (PV_BEEN_REFD(oflags))
2985 pmap_dcache_wb_range(pm, va, PAGE_SIZE, 2982 pmap_dcache_wb_range(pm, va, PAGE_SIZE,
2986 true, (oflags & PVF_WRITE) == 0); 2983 true, (oflags & PVF_WRITE) == 0);
2987 } 2984 }
@@ -3042,34 +3039,36 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_ @@ -3042,34 +3039,36 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_
3042 3039
3043 if (PV_BEEN_EXECD(oflags)) 3040 if (PV_BEEN_EXECD(oflags))
3044 pmap_tlb_flushID_SE(pm, va); 3041 pmap_tlb_flushID_SE(pm, va);
3045 else 3042 else
3046 if (PV_BEEN_REFD(oflags)) 3043 if (PV_BEEN_REFD(oflags))
3047 pmap_tlb_flushD_SE(pm, va); 3044 pmap_tlb_flushD_SE(pm, va);
3048 3045
3049 NPDEBUG(PDB_ENTER, 3046 NPDEBUG(PDB_ENTER,
3050 printf("pmap_enter: is_cached %d cs 0x%08x\n", 3047 printf("pmap_enter: is_cached %d cs 0x%08x\n",
3051 is_cached, pm->pm_cstate.cs_all)); 3048 is_cached, pm->pm_cstate.cs_all));
3052 3049
3053 if (pg != NULL) { 3050 if (pg != NULL) {
3054 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3051 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
 3052
3055 simple_lock(&md->pvh_slock); 3053 simple_lock(&md->pvh_slock);
3056 pmap_vac_me_harder(md, VM_PAGE_TO_PHYS(pg), pm, va); 3054 pmap_vac_me_harder(md, pa, pm, va);
3057 simple_unlock(&md->pvh_slock); 3055 simple_unlock(&md->pvh_slock);
3058 } 3056 }
3059 } 3057 }
3060#if defined(PMAP_CACHE_VIPT) && defined(DIAGNOSTIC) 3058#if defined(PMAP_CACHE_VIPT) && defined(DIAGNOSTIC)
3061 if (pg) { 3059 if (pg) {
3062 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3060 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
 3061
3063 simple_lock(&md->pvh_slock); 3062 simple_lock(&md->pvh_slock);
3064 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 3063 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
3065 KASSERT(((md->pvh_attrs & PVF_WRITE) == 0) == (md->urw_mappings + md->krw_mappings == 0)); 3064 KASSERT(((md->pvh_attrs & PVF_WRITE) == 0) == (md->urw_mappings + md->krw_mappings == 0));
3066 simple_unlock(&md->pvh_slock); 3065 simple_unlock(&md->pvh_slock);
3067 } 3066 }
3068#endif 3067#endif
3069 3068
3070 pmap_release_pmap_lock(pm); 3069 pmap_release_pmap_lock(pm);
3071 PMAP_MAP_TO_HEAD_UNLOCK(); 3070 PMAP_MAP_TO_HEAD_UNLOCK();
3072 3071
3073 return (0); 3072 return (0);
3074} 3073}
3075 3074
@@ -3156,29 +3155,30 @@ pmap_remove(pmap_t pm, vaddr_t sva, vadd @@ -3156,29 +3155,30 @@ pmap_remove(pmap_t pm, vaddr_t sva, vadd
3156 3155
3157 pa = l2pte_pa(pte); 3156 pa = l2pte_pa(pte);
3158 is_exec = 0; 3157 is_exec = 0;
3159 is_refd = 1; 3158 is_refd = 1;
3160 3159
3161 /* 3160 /*
3162 * Update flags. In a number of circumstances, 3161 * Update flags. In a number of circumstances,
3163 * we could cluster a lot of these and do a 3162 * we could cluster a lot of these and do a
3164 * number of sequential pages in one go. 3163 * number of sequential pages in one go.
3165 */ 3164 */
3166 if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { 3165 if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
3167 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3166 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3168 struct pv_entry *pv; 3167 struct pv_entry *pv;
 3168
3169 simple_lock(&md->pvh_slock); 3169 simple_lock(&md->pvh_slock);
3170 pv = pmap_remove_pv(md, VM_PAGE_TO_PHYS(pg), pm, sva); 3170 pv = pmap_remove_pv(md, pa, pm, sva);
3171 pmap_vac_me_harder(md, VM_PAGE_TO_PHYS(pg), pm, 0); 3171 pmap_vac_me_harder(md, pa, pm, 0);
3172 simple_unlock(&md->pvh_slock); 3172 simple_unlock(&md->pvh_slock);
3173 if (pv != NULL) { 3173 if (pv != NULL) {
3174 if (pm->pm_remove_all == false) { 3174 if (pm->pm_remove_all == false) {
3175 is_exec = 3175 is_exec =
3176 PV_BEEN_EXECD(pv->pv_flags); 3176 PV_BEEN_EXECD(pv->pv_flags);
3177 is_refd = 3177 is_refd =
3178 PV_BEEN_REFD(pv->pv_flags); 3178 PV_BEEN_REFD(pv->pv_flags);
3179 } 3179 }
3180 pool_put(&pmap_pv_pool, pv); 3180 pool_put(&pmap_pv_pool, pv);
3181 } 3181 }
3182 } 3182 }
3183 mappings++; 3183 mappings++;
3184 3184
@@ -3280,52 +3280,53 @@ pmap_remove(pmap_t pm, vaddr_t sva, vadd @@ -3280,52 +3280,53 @@ pmap_remove(pmap_t pm, vaddr_t sva, vadd
3280 pmap_free_l2_bucket(pm, l2b, mappings); 3280 pmap_free_l2_bucket(pm, l2b, mappings);
3281 pm->pm_stats.resident_count -= mappings; 3281 pm->pm_stats.resident_count -= mappings;
3282 } 3282 }
3283 3283
3284 pmap_release_pmap_lock(pm); 3284 pmap_release_pmap_lock(pm);
3285 PMAP_MAP_TO_HEAD_UNLOCK(); 3285 PMAP_MAP_TO_HEAD_UNLOCK();
3286} 3286}
3287 3287
3288#ifdef PMAP_CACHE_VIPT 3288#ifdef PMAP_CACHE_VIPT
3289static struct pv_entry * 3289static struct pv_entry *
3290pmap_kremove_pg(struct vm_page *pg, vaddr_t va) 3290pmap_kremove_pg(struct vm_page *pg, vaddr_t va)
3291{ 3291{
3292 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3292 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
 3293 paddr_t pa = VM_PAGE_TO_PHYS(pg);
3293 struct pv_entry *pv; 3294 struct pv_entry *pv;
3294 3295
3295 simple_lock(&md->pvh_slock); 3296 simple_lock(&md->pvh_slock);
3296 KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & (PVF_COLORED|PVF_NC)); 3297 KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & (PVF_COLORED|PVF_NC));
3297 KASSERT((md->pvh_attrs & PVF_KMPAGE) == 0); 3298 KASSERT((md->pvh_attrs & PVF_KMPAGE) == 0);
3298 3299
3299 pv = pmap_remove_pv(md, VM_PAGE_TO_PHYS(pg), pmap_kernel(), va); 3300 pv = pmap_remove_pv(md, pa, pmap_kernel(), va);
3300 KASSERT(pv); 3301 KASSERT(pv);
3301 KASSERT(pv->pv_flags & PVF_KENTRY); 3302 KASSERT(pv->pv_flags & PVF_KENTRY);
3302 3303
3303 /* 3304 /*
3304 * If we are removing a writeable mapping to a cached exec page, 3305 * If we are removing a writeable mapping to a cached exec page,
3305 * if it's the last mapping then clear it execness other sync 3306 * if it's the last mapping then clear it execness other sync
3306 * the page to the icache. 3307 * the page to the icache.
3307 */ 3308 */
3308 if ((md->pvh_attrs & (PVF_NC|PVF_EXEC)) == PVF_EXEC 3309 if ((md->pvh_attrs & (PVF_NC|PVF_EXEC)) == PVF_EXEC
3309 && (pv->pv_flags & PVF_WRITE) != 0) { 3310 && (pv->pv_flags & PVF_WRITE) != 0) {
3310 if (SLIST_EMPTY(&md->pvh_list)) { 3311 if (SLIST_EMPTY(&md->pvh_list)) {
3311 md->pvh_attrs &= ~PVF_EXEC; 3312 md->pvh_attrs &= ~PVF_EXEC;
3312 PMAPCOUNT(exec_discarded_kremove); 3313 PMAPCOUNT(exec_discarded_kremove);
3313 } else { 3314 } else {
3314 pmap_syncicache_page(md, VM_PAGE_TO_PHYS(pg)); 3315 pmap_syncicache_page(md, pa);
3315 PMAPCOUNT(exec_synced_kremove); 3316 PMAPCOUNT(exec_synced_kremove);
3316 } 3317 }
3317 } 3318 }
3318 pmap_vac_me_harder(md, VM_PAGE_TO_PHYS(pg), pmap_kernel(), 0); 3319 pmap_vac_me_harder(md, pa, pmap_kernel(), 0);
3319 simple_unlock(&md->pvh_slock); 3320 simple_unlock(&md->pvh_slock);
3320 3321
3321 return pv; 3322 return pv;
3322} 3323}
3323#endif /* PMAP_CACHE_VIPT */ 3324#endif /* PMAP_CACHE_VIPT */
3324 3325
3325/* 3326/*
3326 * pmap_kenter_pa: enter an unmanaged, wired kernel mapping 3327 * pmap_kenter_pa: enter an unmanaged, wired kernel mapping
3327 * 3328 *
3328 * We assume there is already sufficient KVM space available 3329 * We assume there is already sufficient KVM space available
3329 * to do this, as we can't allocate L2 descriptor tables/metadata 3330 * to do this, as we can't allocate L2 descriptor tables/metadata
3330 * from here. 3331 * from here.
3331 */ 3332 */
@@ -3335,28 +3336,28 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v @@ -3335,28 +3336,28 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v
3335 struct l2_bucket *l2b; 3336 struct l2_bucket *l2b;
3336 pt_entry_t *ptep, opte; 3337 pt_entry_t *ptep, opte;
3337#ifdef PMAP_CACHE_VIVT 3338#ifdef PMAP_CACHE_VIVT
3338 struct vm_page *pg = (flags & PMAP_KMPAGE) ? PHYS_TO_VM_PAGE(pa) : NULL; 3339 struct vm_page *pg = (flags & PMAP_KMPAGE) ? PHYS_TO_VM_PAGE(pa) : NULL;
3339#endif 3340#endif
3340#ifdef PMAP_CACHE_VIPT 3341#ifdef PMAP_CACHE_VIPT
3341 struct vm_page *pg = PHYS_TO_VM_PAGE(pa); 3342 struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
3342 struct vm_page *opg; 3343 struct vm_page *opg;
3343 struct pv_entry *pv = NULL; 3344 struct pv_entry *pv = NULL;
3344#endif 3345#endif
3345 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3346 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3346 3347
3347 NPDEBUG(PDB_KENTER, 3348 NPDEBUG(PDB_KENTER,
3348 printf("pmap_kenter_pa: va 0x%08lx, pa 0x%08lx, prot 0x%x pg %p md %p\n", 3349 printf("pmap_kenter_pa: va 0x%08lx, pa 0x%08lx, prot 0x%x\n",
3349 va, pa, prot, pg, md)); 3350 va, pa, prot));
3350 3351
3351 l2b = pmap_get_l2_bucket(pmap_kernel(), va); 3352 l2b = pmap_get_l2_bucket(pmap_kernel(), va);
3352 KDASSERT(l2b != NULL); 3353 KDASSERT(l2b != NULL);
3353 3354
3354 ptep = &l2b->l2b_kva[l2pte_index(va)]; 3355 ptep = &l2b->l2b_kva[l2pte_index(va)];
3355 opte = *ptep; 3356 opte = *ptep;
3356 3357
3357 if (opte == 0) { 3358 if (opte == 0) {
3358 PMAPCOUNT(kenter_mappings); 3359 PMAPCOUNT(kenter_mappings);
3359 l2b->l2b_occupancy++; 3360 l2b->l2b_occupancy++;
3360 } else { 3361 } else {
3361 PMAPCOUNT(kenter_remappings); 3362 PMAPCOUNT(kenter_remappings);
3362#ifdef PMAP_CACHE_VIPT 3363#ifdef PMAP_CACHE_VIPT
@@ -3389,60 +3390,60 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v @@ -3389,60 +3390,60 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v
3389 simple_lock(&md->pvh_slock); 3390 simple_lock(&md->pvh_slock);
3390 KASSERT(md->urw_mappings == 0); 3391 KASSERT(md->urw_mappings == 0);
3391 KASSERT(md->uro_mappings == 0); 3392 KASSERT(md->uro_mappings == 0);
3392 KASSERT(md->krw_mappings == 0); 3393 KASSERT(md->krw_mappings == 0);
3393 KASSERT(md->kro_mappings == 0); 3394 KASSERT(md->kro_mappings == 0);
3394#ifdef PMAP_CACHE_VIPT 3395#ifdef PMAP_CACHE_VIPT
3395 KASSERT(pv == NULL); 3396 KASSERT(pv == NULL);
3396 KASSERT(arm_cache_prefer_mask == 0 || (va & PVF_COLORED) == 0); 3397 KASSERT(arm_cache_prefer_mask == 0 || (va & PVF_COLORED) == 0);
3397 KASSERT((md->pvh_attrs & PVF_NC) == 0); 3398 KASSERT((md->pvh_attrs & PVF_NC) == 0);
3398 /* if there is a color conflict, evict from cache. */ 3399 /* if there is a color conflict, evict from cache. */
3399 if (pmap_is_page_colored_p(md) 3400 if (pmap_is_page_colored_p(md)
3400 && ((va ^ md->pvh_attrs) & arm_cache_prefer_mask)) { 3401 && ((va ^ md->pvh_attrs) & arm_cache_prefer_mask)) {
3401 PMAPCOUNT(vac_color_change); 3402 PMAPCOUNT(vac_color_change);
3402 pmap_flush_page(md, VM_PAGE_TO_PHYS(pg), PMAP_FLUSH_PRIMARY); 3403 pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY);
3403 } else if (md->pvh_attrs & PVF_MULTCLR) { 3404 } else if (md->pvh_attrs & PVF_MULTCLR) {
3404 /* 3405 /*
3405 * If this page has multiple colors, expunge 3406 * If this page has multiple colors, expunge
3406 * them. 3407 * them.
3407 */ 3408 */
3408 PMAPCOUNT(vac_flush_lots2); 3409 PMAPCOUNT(vac_flush_lots2);
3409 pmap_flush_page(md, VM_PAGE_TO_PHYS(pg), PMAP_FLUSH_SECONDARY); 3410 pmap_flush_page(md, pa, PMAP_FLUSH_SECONDARY);
3410 } 3411 }
3411 md->pvh_attrs &= PAGE_SIZE - 1; 3412 md->pvh_attrs &= PAGE_SIZE - 1;
3412 md->pvh_attrs |= PVF_KMPAGE 3413 md->pvh_attrs |= PVF_KMPAGE
3413 | PVF_COLORED | PVF_DIRTY 3414 | PVF_COLORED | PVF_DIRTY
3414 | (va & arm_cache_prefer_mask); 3415 | (va & arm_cache_prefer_mask);
3415#endif 3416#endif
3416#ifdef PMAP_CACHE_VIVT 3417#ifdef PMAP_CACHE_VIVT
3417 md->pvh_attrs |= PVF_KMPAGE; 3418 md->pvh_attrs |= PVF_KMPAGE;
3418#endif 3419#endif
3419 pmap_kmpages++; 3420 pmap_kmpages++;
3420 simple_unlock(&md->pvh_slock); 3421 simple_unlock(&md->pvh_slock);
3421#ifdef PMAP_CACHE_VIPT 3422#ifdef PMAP_CACHE_VIPT
3422 } else { 3423 } else {
3423 if (pv == NULL) { 3424 if (pv == NULL) {
3424 pv = pool_get(&pmap_pv_pool, PR_NOWAIT); 3425 pv = pool_get(&pmap_pv_pool, PR_NOWAIT);
3425 KASSERT(pv != NULL); 3426 KASSERT(pv != NULL);
3426 } 3427 }
3427 pmap_enter_pv(md, VM_PAGE_TO_PHYS(pg), pv, pmap_kernel(), va, 3428 pmap_enter_pv(md, pa, pv, pmap_kernel(), va,
3428 PVF_WIRED | PVF_KENTRY 3429 PVF_WIRED | PVF_KENTRY
3429 | (prot & VM_PROT_WRITE ? PVF_WRITE : 0)); 3430 | (prot & VM_PROT_WRITE ? PVF_WRITE : 0));
3430 if ((prot & VM_PROT_WRITE) 3431 if ((prot & VM_PROT_WRITE)
3431 && !(md->pvh_attrs & PVF_NC)) 3432 && !(md->pvh_attrs & PVF_NC))
3432 md->pvh_attrs |= PVF_DIRTY; 3433 md->pvh_attrs |= PVF_DIRTY;
3433 KASSERT((prot & VM_PROT_WRITE) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 3434 KASSERT((prot & VM_PROT_WRITE) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
3434 simple_lock(&md->pvh_slock); 3435 simple_lock(&md->pvh_slock);
3435 pmap_vac_me_harder(md, VM_PAGE_TO_PHYS(pg), pmap_kernel(), va); 3436 pmap_vac_me_harder(md, pa, pmap_kernel(), va);
3436 simple_unlock(&md->pvh_slock); 3437 simple_unlock(&md->pvh_slock);
3437#endif 3438#endif
3438 } 3439 }
3439#ifdef PMAP_CACHE_VIPT 3440#ifdef PMAP_CACHE_VIPT
3440 } else { 3441 } else {
3441 if (pv != NULL) 3442 if (pv != NULL)
3442 pool_put(&pmap_pv_pool, pv); 3443 pool_put(&pmap_pv_pool, pv);
3443#endif 3444#endif
3444 } 3445 }
3445} 3446}
3446 3447
3447void 3448void
3448pmap_kremove(vaddr_t va, vsize_t len) 3449pmap_kremove(vaddr_t va, vsize_t len)
@@ -3466,26 +3467,27 @@ pmap_kremove(vaddr_t va, vsize_t len) @@ -3466,26 +3467,27 @@ pmap_kremove(vaddr_t va, vsize_t len)
3466 next_bucket = eva; 3467 next_bucket = eva;
3467 3468
3468 l2b = pmap_get_l2_bucket(pmap_kernel(), va); 3469 l2b = pmap_get_l2_bucket(pmap_kernel(), va);
3469 KDASSERT(l2b != NULL); 3470 KDASSERT(l2b != NULL);
3470 3471
3471 sptep = ptep = &l2b->l2b_kva[l2pte_index(va)]; 3472 sptep = ptep = &l2b->l2b_kva[l2pte_index(va)];
3472 mappings = 0; 3473 mappings = 0;
3473 3474
3474 while (va < next_bucket) { 3475 while (va < next_bucket) {
3475 opte = *ptep; 3476 opte = *ptep;
3476 opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); 3477 opg = PHYS_TO_VM_PAGE(l2pte_pa(opte));
3477 if (opg) { 3478 if (opg) {
3478 struct vm_page_md *omd = VM_PAGE_TO_MD(opg); 3479 struct vm_page_md *omd = VM_PAGE_TO_MD(opg);
 3480
3479 if (omd->pvh_attrs & PVF_KMPAGE) { 3481 if (omd->pvh_attrs & PVF_KMPAGE) {
3480 simple_lock(&omd->pvh_slock); 3482 simple_lock(&omd->pvh_slock);
3481 KASSERT(omd->urw_mappings == 0); 3483 KASSERT(omd->urw_mappings == 0);
3482 KASSERT(omd->uro_mappings == 0); 3484 KASSERT(omd->uro_mappings == 0);
3483 KASSERT(omd->krw_mappings == 0); 3485 KASSERT(omd->krw_mappings == 0);
3484 KASSERT(omd->kro_mappings == 0); 3486 KASSERT(omd->kro_mappings == 0);
3485 omd->pvh_attrs &= ~PVF_KMPAGE; 3487 omd->pvh_attrs &= ~PVF_KMPAGE;
3486#ifdef PMAP_CACHE_VIPT 3488#ifdef PMAP_CACHE_VIPT
3487 omd->pvh_attrs &= ~PVF_WRITE; 3489 omd->pvh_attrs &= ~PVF_WRITE;
3488#endif 3490#endif
3489 pmap_kmpages--; 3491 pmap_kmpages--;
3490 simple_unlock(&omd->pvh_slock); 3492 simple_unlock(&omd->pvh_slock);
3491#ifdef PMAP_CACHE_VIPT 3493#ifdef PMAP_CACHE_VIPT
@@ -3642,30 +3644,32 @@ pmap_protect(pmap_t pm, vaddr_t sva, vad @@ -3642,30 +3644,32 @@ pmap_protect(pmap_t pm, vaddr_t sva, vad
3642 * active, write-back the page. 3644 * active, write-back the page.
3643 */ 3645 */
3644 pmap_dcache_wb_range(pm, sva, PAGE_SIZE, 3646 pmap_dcache_wb_range(pm, sva, PAGE_SIZE,
3645 false, false); 3647 false, false);
3646#endif 3648#endif
3647 3649
3648 pg = PHYS_TO_VM_PAGE(l2pte_pa(pte)); 3650 pg = PHYS_TO_VM_PAGE(l2pte_pa(pte));
3649 pte = l2pte_set_readonly(pte); 3651 pte = l2pte_set_readonly(pte);
3650 *ptep = pte; 3652 *ptep = pte;
3651 PTE_SYNC(ptep); 3653 PTE_SYNC(ptep);
3652 3654
3653 if (pg != NULL) { 3655 if (pg != NULL) {
3654 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3656 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
 3657 paddr_t pa = VM_PAGE_TO_PHYS(pg);
 3658
3655 simple_lock(&md->pvh_slock); 3659 simple_lock(&md->pvh_slock);
3656 f = pmap_modify_pv(md, VM_PAGE_TO_PHYS(pg), pm, sva, 3660 f = pmap_modify_pv(md, pa, pm, sva,
3657 clr_mask, 0); 3661 clr_mask, 0);
3658 pmap_vac_me_harder(md, VM_PAGE_TO_PHYS(pg), pm, sva); 3662 pmap_vac_me_harder(md, pa, pm, sva);
3659 simple_unlock(&md->pvh_slock); 3663 simple_unlock(&md->pvh_slock);
3660 } else 3664 } else
3661 f = PVF_REF | PVF_EXEC; 3665 f = PVF_REF | PVF_EXEC;
3662 3666
3663 if (flush >= 0) { 3667 if (flush >= 0) {
3664 flush++; 3668 flush++;
3665 flags |= f; 3669 flags |= f;
3666 } else 3670 } else
3667 if (PV_BEEN_EXECD(f)) 3671 if (PV_BEEN_EXECD(f))
3668 pmap_tlb_flushID_SE(pm, sva); 3672 pmap_tlb_flushID_SE(pm, sva);
3669 else 3673 else
3670 if (PV_BEEN_REFD(f)) 3674 if (PV_BEEN_REFD(f))
3671 pmap_tlb_flushD_SE(pm, sva); 3675 pmap_tlb_flushD_SE(pm, sva);
@@ -3721,98 +3725,102 @@ pmap_icache_sync_range(pmap_t pm, vaddr_ @@ -3721,98 +3725,102 @@ pmap_icache_sync_range(pmap_t pm, vaddr_
3721 cpu_icache_sync_range(sva, 3725 cpu_icache_sync_range(sva,
3722 min(page_size, eva - sva)); 3726 min(page_size, eva - sva));
3723 } 3727 }
3724 } 3728 }
3725 } 3729 }
3726 3730
3727 pmap_release_pmap_lock(pm); 3731 pmap_release_pmap_lock(pm);
3728 PMAP_MAP_TO_HEAD_UNLOCK(); 3732 PMAP_MAP_TO_HEAD_UNLOCK();
3729} 3733}
3730 3734
3731void 3735void
3732pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 3736pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
3733{ 3737{
 3738 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
 3739 paddr_t pa = VM_PAGE_TO_PHYS(pg);
3734 3740
3735 NPDEBUG(PDB_PROTECT, 3741 NPDEBUG(PDB_PROTECT,
3736 printf("pmap_page_protect: pg %p (0x%08lx), prot 0x%x\n", 3742 printf("pmap_page_protect: md %p (0x%08lx), prot 0x%x\n",
3737 pg, VM_PAGE_TO_PHYS(pg), prot)); 3743 md, pa, prot));
3738 3744
3739 switch(prot) { 3745 switch(prot) {
3740 case VM_PROT_READ|VM_PROT_WRITE: 3746 case VM_PROT_READ|VM_PROT_WRITE:
3741#if defined(PMAP_CHECK_VIPT) && defined(PMAP_APX) 3747#if defined(PMAP_CHECK_VIPT) && defined(PMAP_APX)
3742 pmap_clearbit(pg, PVF_EXEC); 3748 pmap_clearbit(md, pa, PVF_EXEC);
3743 break; 3749 break;
3744#endif 3750#endif
3745 case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE: 3751 case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
3746 break; 3752 break;
3747 3753
3748 case VM_PROT_READ: 3754 case VM_PROT_READ:
3749#if defined(PMAP_CHECK_VIPT) && defined(PMAP_APX) 3755#if defined(PMAP_CHECK_VIPT) && defined(PMAP_APX)
3750 pmap_clearbit(pg, PVF_WRITE|PVF_EXEC); 3756 pmap_clearbit(md, pa, PVF_WRITE|PVF_EXEC);
3751 break; 3757 break;
3752#endif 3758#endif
3753 case VM_PROT_READ|VM_PROT_EXECUTE: 3759 case VM_PROT_READ|VM_PROT_EXECUTE:
3754 pmap_clearbit(pg, PVF_WRITE); 3760 pmap_clearbit(md, pa, PVF_WRITE);
3755 break; 3761 break;
3756 3762
3757 default: 3763 default:
3758 pmap_page_remove(pg); 3764 pmap_page_remove(md, pa);
3759 break; 3765 break;
3760 } 3766 }
3761} 3767}
3762 3768
3763/* 3769/*
3764 * pmap_clear_modify: 3770 * pmap_clear_modify:
3765 * 3771 *
3766 * Clear the "modified" attribute for a page. 3772 * Clear the "modified" attribute for a page.
3767 */ 3773 */
3768bool 3774bool
3769pmap_clear_modify(struct vm_page *pg) 3775pmap_clear_modify(struct vm_page *pg)
3770{ 3776{
3771 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3777 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
 3778 paddr_t pa = VM_PAGE_TO_PHYS(pg);
3772 bool rv; 3779 bool rv;
3773 3780
3774 if (md->pvh_attrs & PVF_MOD) { 3781 if (md->pvh_attrs & PVF_MOD) {
3775 rv = true; 3782 rv = true;
3776#ifdef PMAP_CACHE_VIPT 3783#ifdef PMAP_CACHE_VIPT
3777 /* 3784 /*
3778 * If we are going to clear the modified bit and there are 3785 * If we are going to clear the modified bit and there are
3779 * no other modified bits set, flush the page to memory and 3786 * no other modified bits set, flush the page to memory and
3780 * mark it clean. 3787 * mark it clean.
3781 */ 3788 */
3782 if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) == PVF_MOD) 3789 if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) == PVF_MOD)
3783 pmap_flush_page(md, VM_PAGE_TO_PHYS(pg), PMAP_CLEAN_PRIMARY); 3790 pmap_flush_page(md, pa, PMAP_CLEAN_PRIMARY);
3784#endif 3791#endif
3785 pmap_clearbit(pg, PVF_MOD); 3792 pmap_clearbit(md, pa, PVF_MOD);
3786 } else 3793 } else
3787 rv = false; 3794 rv = false;
3788 3795
3789 return (rv); 3796 return (rv);
3790} 3797}
3791 3798
3792/* 3799/*
3793 * pmap_clear_reference: 3800 * pmap_clear_reference:
3794 * 3801 *
3795 * Clear the "referenced" attribute for a page. 3802 * Clear the "referenced" attribute for a page.
3796 */ 3803 */
3797bool 3804bool
3798pmap_clear_reference(struct vm_page *pg) 3805pmap_clear_reference(struct vm_page *pg)
3799{ 3806{
3800 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3807 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
 3808 paddr_t pa = VM_PAGE_TO_PHYS(pg);
3801 bool rv; 3809 bool rv;
3802 3810
3803 if (md->pvh_attrs & PVF_REF) { 3811 if (md->pvh_attrs & PVF_REF) {
3804 rv = true; 3812 rv = true;
3805 pmap_clearbit(pg, PVF_REF); 3813 pmap_clearbit(md, pa, PVF_REF);
3806 } else 3814 } else
3807 rv = false; 3815 rv = false;
3808 3816
3809 return (rv); 3817 return (rv);
3810} 3818}
3811 3819
3812/* 3820/*
3813 * pmap_is_modified: 3821 * pmap_is_modified:
3814 * 3822 *
3815 * Test if a page has the "modified" attribute. 3823 * Test if a page has the "modified" attribute.
3816 */ 3824 */
3817/* See <arm/arm32/pmap.h> */ 3825/* See <arm/arm32/pmap.h> */
3818 3826
@@ -3900,27 +3908,27 @@ pmap_fault_fixup(pmap_t pm, vaddr_t va,  @@ -3900,27 +3908,27 @@ pmap_fault_fixup(pmap_t pm, vaddr_t va,
3900 * Do the flags say this page is writable? If not then it 3908 * Do the flags say this page is writable? If not then it
3901 * is a genuine write fault. If yes then the write fault is 3909 * is a genuine write fault. If yes then the write fault is
3902 * our fault as we did not reflect the write access in the 3910 * our fault as we did not reflect the write access in the
3903 * PTE. Now we know a write has occurred we can correct this 3911 * PTE. Now we know a write has occurred we can correct this
3904 * and also set the modified bit 3912 * and also set the modified bit
3905 */ 3913 */
3906 if ((pv->pv_flags & PVF_WRITE) == 0) { 3914 if ((pv->pv_flags & PVF_WRITE) == 0) {
3907 simple_unlock(&md->pvh_slock); 3915 simple_unlock(&md->pvh_slock);
3908 goto out; 3916 goto out;
3909 } 3917 }
3910 3918
3911 NPDEBUG(PDB_FOLLOW, 3919 NPDEBUG(PDB_FOLLOW,
3912 printf("pmap_fault_fixup: mod emul. pm %p, va 0x%08lx, pa 0x%08lx\n", 3920 printf("pmap_fault_fixup: mod emul. pm %p, va 0x%08lx, pa 0x%08lx\n",
3913 pm, va, VM_PAGE_TO_PHYS(pg))); 3921 pm, va, pa));
3914 3922
3915 md->pvh_attrs |= PVF_REF | PVF_MOD; 3923 md->pvh_attrs |= PVF_REF | PVF_MOD;
3916 pv->pv_flags |= PVF_REF | PVF_MOD; 3924 pv->pv_flags |= PVF_REF | PVF_MOD;
3917#ifdef PMAP_CACHE_VIPT 3925#ifdef PMAP_CACHE_VIPT
3918 /* 3926 /*
3919 * If there are cacheable mappings for this page, mark it dirty. 3927 * If there are cacheable mappings for this page, mark it dirty.
3920 */ 3928 */
3921 if ((md->pvh_attrs & PVF_NC) == 0) 3929 if ((md->pvh_attrs & PVF_NC) == 0)
3922 md->pvh_attrs |= PVF_DIRTY; 3930 md->pvh_attrs |= PVF_DIRTY;
3923#endif 3931#endif
3924 simple_unlock(&md->pvh_slock); 3932 simple_unlock(&md->pvh_slock);
3925 3933
3926 /*  3934 /*
@@ -3953,27 +3961,27 @@ pmap_fault_fixup(pmap_t pm, vaddr_t va,  @@ -3953,27 +3961,27 @@ pmap_fault_fixup(pmap_t pm, vaddr_t va,
3953 3961
3954 pv = pmap_find_pv(md, pm, va); 3962 pv = pmap_find_pv(md, pm, va);
3955 if (pv == NULL) { 3963 if (pv == NULL) {
3956 simple_unlock(&md->pvh_slock); 3964 simple_unlock(&md->pvh_slock);
3957 goto out; 3965 goto out;
3958 } 3966 }
3959 3967
3960 md->pvh_attrs |= PVF_REF; 3968 md->pvh_attrs |= PVF_REF;
3961 pv->pv_flags |= PVF_REF; 3969 pv->pv_flags |= PVF_REF;
3962 simple_unlock(&md->pvh_slock); 3970 simple_unlock(&md->pvh_slock);
3963 3971
3964 NPDEBUG(PDB_FOLLOW, 3972 NPDEBUG(PDB_FOLLOW,
3965 printf("pmap_fault_fixup: ref emul. pm %p, va 0x%08lx, pa 0x%08lx\n", 3973 printf("pmap_fault_fixup: ref emul. pm %p, va 0x%08lx, pa 0x%08lx\n",
3966 pm, va, VM_PAGE_TO_PHYS(pg))); 3974 pm, va, pa));
3967 3975
3968 *ptep = l2pte_set_readonly((pte & ~L2_TYPE_MASK) | L2_S_PROTO); 3976 *ptep = l2pte_set_readonly((pte & ~L2_TYPE_MASK) | L2_S_PROTO);
3969 PTE_SYNC(ptep); 3977 PTE_SYNC(ptep);
3970 rv = 1; 3978 rv = 1;
3971 } 3979 }
3972 3980
3973 /* 3981 /*
3974 * We know there is a valid mapping here, so simply 3982 * We know there is a valid mapping here, so simply
3975 * fix up the L1 if necessary. 3983 * fix up the L1 if necessary.
3976 */ 3984 */
3977 pl1pd = &pm->pm_l1->l1_kva[l1idx]; 3985 pl1pd = &pm->pm_l1->l1_kva[l1idx];
3978 l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | L1_C_PROTO; 3986 l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | L1_C_PROTO;
3979 if (*pl1pd != l1pd) { 3987 if (*pl1pd != l1pd) {
@@ -4101,28 +4109,29 @@ pmap_unwire(pmap_t pm, vaddr_t va) @@ -4101,28 +4109,29 @@ pmap_unwire(pmap_t pm, vaddr_t va)
4101 4109
4102 l2b = pmap_get_l2_bucket(pm, va); 4110 l2b = pmap_get_l2_bucket(pm, va);
4103 KDASSERT(l2b != NULL); 4111 KDASSERT(l2b != NULL);
4104 4112
4105 ptep = &l2b->l2b_kva[l2pte_index(va)]; 4113 ptep = &l2b->l2b_kva[l2pte_index(va)];
4106 pte = *ptep; 4114 pte = *ptep;
4107 4115
4108 /* Extract the physical address of the page */ 4116 /* Extract the physical address of the page */
4109 pa = l2pte_pa(pte); 4117 pa = l2pte_pa(pte);
4110 4118
4111 if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { 4119 if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
4112 /* Update the wired bit in the pv entry for this page. */ 4120 /* Update the wired bit in the pv entry for this page. */
4113 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 4121 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
 4122
4114 simple_lock(&md->pvh_slock); 4123 simple_lock(&md->pvh_slock);
4115 (void) pmap_modify_pv(md, VM_PAGE_TO_PHYS(pg), pm, va, PVF_WIRED, 0); 4124 (void) pmap_modify_pv(md, pa, pm, va, PVF_WIRED, 0);
4116 simple_unlock(&md->pvh_slock); 4125 simple_unlock(&md->pvh_slock);
4117 } 4126 }
4118 4127
4119 pmap_release_pmap_lock(pm); 4128 pmap_release_pmap_lock(pm);
4120 PMAP_MAP_TO_HEAD_UNLOCK(); 4129 PMAP_MAP_TO_HEAD_UNLOCK();
4121} 4130}
4122 4131
4123void 4132void
4124pmap_activate(struct lwp *l) 4133pmap_activate(struct lwp *l)
4125{ 4134{
4126 extern int block_userspace_access; 4135 extern int block_userspace_access;
4127 pmap_t opm, npm, rpm; 4136 pmap_t opm, npm, rpm;
4128 uint32_t odacr, ndacr; 4137 uint32_t odacr, ndacr;