Wed Jul 16 00:19:57 2008 UTC ()
Revamp bookkeeping for pages entered by pmap_kenter_pa.  Keep track of them
on pvlists so that the cacheability can be properly tracked.


(matt)
diff -r1.181 -r1.182 src/sys/arch/arm/arm32/pmap.c
diff -r1.85 -r1.86 src/sys/arch/arm/include/arm32/pmap.h

cvs diff -r1.181 -r1.182 src/sys/arch/arm/arm32/pmap.c (expand / switch to unified diff)

--- src/sys/arch/arm/arm32/pmap.c 2008/07/09 23:22:15 1.181
+++ src/sys/arch/arm/arm32/pmap.c 2008/07/16 00:19:57 1.182
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.181 2008/07/09 23:22:15 scw Exp $ */ 1/* $NetBSD: pmap.c,v 1.182 2008/07/16 00:19:57 matt Exp $ */
2 2
3/* 3/*
4 * Copyright 2003 Wasabi Systems, Inc. 4 * Copyright 2003 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Steve C. Woodford for Wasabi Systems, Inc. 7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -201,27 +201,27 @@ @@ -201,27 +201,27 @@
201#include <sys/user.h> 201#include <sys/user.h>
202#include <sys/pool.h> 202#include <sys/pool.h>
203#include <sys/cdefs.h> 203#include <sys/cdefs.h>
204#include <sys/cpu.h> 204#include <sys/cpu.h>
205  205
206#include <uvm/uvm.h> 206#include <uvm/uvm.h>
207 207
208#include <machine/bus.h> 208#include <machine/bus.h>
209#include <machine/pmap.h> 209#include <machine/pmap.h>
210#include <machine/pcb.h> 210#include <machine/pcb.h>
211#include <machine/param.h> 211#include <machine/param.h>
212#include <arm/arm32/katelib.h> 212#include <arm/arm32/katelib.h>
213 213
214__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.181 2008/07/09 23:22:15 scw Exp $"); 214__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.182 2008/07/16 00:19:57 matt Exp $");
215 215
216#ifdef PMAP_DEBUG 216#ifdef PMAP_DEBUG
217 217
218/* XXX need to get rid of all refs to this */ 218/* XXX need to get rid of all refs to this */
219int pmap_debug_level = 0; 219int pmap_debug_level = 0;
220 220
221/* 221/*
222 * for switching to potentially finer grained debugging 222 * for switching to potentially finer grained debugging
223 */ 223 */
224#define PDB_FOLLOW 0x0001 224#define PDB_FOLLOW 0x0001
225#define PDB_INIT 0x0002 225#define PDB_INIT 0x0002
226#define PDB_ENTER 0x0004 226#define PDB_ENTER 0x0004
227#define PDB_REMOVE 0x0008 227#define PDB_REMOVE 0x0008
@@ -304,38 +304,41 @@ static vaddr_t pmap_kernel_l2ptp_kva; @@ -304,38 +304,41 @@ static vaddr_t pmap_kernel_l2ptp_kva;
304static paddr_t pmap_kernel_l2ptp_phys; 304static paddr_t pmap_kernel_l2ptp_phys;
305 305
306#ifdef PMAPCOUNT 306#ifdef PMAPCOUNT
307#define PMAP_EVCNT_INITIALIZER(name) \ 307#define PMAP_EVCNT_INITIALIZER(name) \
308 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", name) 308 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", name)
309 309
310#ifdef PMAP_CACHE_VIPT 310#ifdef PMAP_CACHE_VIPT
311static struct evcnt pmap_ev_vac_color_new = 311static struct evcnt pmap_ev_vac_color_new =
312 PMAP_EVCNT_INITIALIZER("new page color"); 312 PMAP_EVCNT_INITIALIZER("new page color");
313static struct evcnt pmap_ev_vac_color_reuse = 313static struct evcnt pmap_ev_vac_color_reuse =
314 PMAP_EVCNT_INITIALIZER("ok first page color"); 314 PMAP_EVCNT_INITIALIZER("ok first page color");
315static struct evcnt pmap_ev_vac_color_ok = 315static struct evcnt pmap_ev_vac_color_ok =
316 PMAP_EVCNT_INITIALIZER("ok page color"); 316 PMAP_EVCNT_INITIALIZER("ok page color");
 317static struct evcnt pmap_ev_vac_color_blind =
 318 PMAP_EVCNT_INITIALIZER("blind page color");
317static struct evcnt pmap_ev_vac_color_change = 319static struct evcnt pmap_ev_vac_color_change =
318 PMAP_EVCNT_INITIALIZER("change page color"); 320 PMAP_EVCNT_INITIALIZER("change page color");
319static struct evcnt pmap_ev_vac_color_erase = 321static struct evcnt pmap_ev_vac_color_erase =
320 PMAP_EVCNT_INITIALIZER("erase page color"); 322 PMAP_EVCNT_INITIALIZER("erase page color");
321static struct evcnt pmap_ev_vac_color_none = 323static struct evcnt pmap_ev_vac_color_none =
322 PMAP_EVCNT_INITIALIZER("no page color"); 324 PMAP_EVCNT_INITIALIZER("no page color");
323static struct evcnt pmap_ev_vac_color_restore = 325static struct evcnt pmap_ev_vac_color_restore =
324 PMAP_EVCNT_INITIALIZER("restore page color"); 326 PMAP_EVCNT_INITIALIZER("restore page color");
325 327
326EVCNT_ATTACH_STATIC(pmap_ev_vac_color_new); 328EVCNT_ATTACH_STATIC(pmap_ev_vac_color_new);
327EVCNT_ATTACH_STATIC(pmap_ev_vac_color_reuse); 329EVCNT_ATTACH_STATIC(pmap_ev_vac_color_reuse);
328EVCNT_ATTACH_STATIC(pmap_ev_vac_color_ok); 330EVCNT_ATTACH_STATIC(pmap_ev_vac_color_ok);
 331EVCNT_ATTACH_STATIC(pmap_ev_vac_color_blind);
329EVCNT_ATTACH_STATIC(pmap_ev_vac_color_change); 332EVCNT_ATTACH_STATIC(pmap_ev_vac_color_change);
330EVCNT_ATTACH_STATIC(pmap_ev_vac_color_erase); 333EVCNT_ATTACH_STATIC(pmap_ev_vac_color_erase);
331EVCNT_ATTACH_STATIC(pmap_ev_vac_color_none); 334EVCNT_ATTACH_STATIC(pmap_ev_vac_color_none);
332EVCNT_ATTACH_STATIC(pmap_ev_vac_color_restore); 335EVCNT_ATTACH_STATIC(pmap_ev_vac_color_restore);
333#endif 336#endif
334 337
335static struct evcnt pmap_ev_mappings = 338static struct evcnt pmap_ev_mappings =
336 PMAP_EVCNT_INITIALIZER("pages mapped"); 339 PMAP_EVCNT_INITIALIZER("pages mapped");
337static struct evcnt pmap_ev_unmappings = 340static struct evcnt pmap_ev_unmappings =
338 PMAP_EVCNT_INITIALIZER("pages unmapped"); 341 PMAP_EVCNT_INITIALIZER("pages unmapped");
339static struct evcnt pmap_ev_remappings = 342static struct evcnt pmap_ev_remappings =
340 PMAP_EVCNT_INITIALIZER("pages remapped"); 343 PMAP_EVCNT_INITIALIZER("pages remapped");
341 344
@@ -769,28 +772,27 @@ static inline void @@ -769,28 +772,27 @@ static inline void
769pmap_dcache_wbinv_all(pmap_t pm) 772pmap_dcache_wbinv_all(pmap_t pm)
770{ 773{
771 if (pm->pm_cstate.cs_cache_d) { 774 if (pm->pm_cstate.cs_cache_d) {
772 cpu_dcache_wbinv_all(); 775 cpu_dcache_wbinv_all();
773 pm->pm_cstate.cs_cache_d = 0; 776 pm->pm_cstate.cs_cache_d = 0;
774 } 777 }
775} 778}
776#endif /* PMAP_CACHE_VIVT */ 779#endif /* PMAP_CACHE_VIVT */
777 780
778static inline bool 781static inline bool
779pmap_is_current(pmap_t pm) 782pmap_is_current(pmap_t pm)
780{ 783{
781 784
782 if (pm == pmap_kernel() || 785 if (pm == pmap_kernel() || curproc->p_vmspace->vm_map.pmap == pm)
783 (curproc && curproc->p_vmspace->vm_map.pmap == pm)) 
784 return true; 786 return true;
785 787
786 return false; 788 return false;
787} 789}
788 790
789static inline bool 791static inline bool
790pmap_is_cached(pmap_t pm) 792pmap_is_cached(pmap_t pm)
791{ 793{
792 794
793 if (pm == pmap_kernel() || pmap_recent_user == NULL || 795 if (pm == pmap_kernel() || pmap_recent_user == NULL ||
794 pmap_recent_user == pm) 796 pmap_recent_user == pm)
795 return (true); 797 return (true);
796 798
@@ -829,38 +831,49 @@ do { \ @@ -829,38 +831,49 @@ do { \
829/* 831/*
830 * pmap_enter_pv: enter a mapping onto a vm_page lst 832 * pmap_enter_pv: enter a mapping onto a vm_page lst
831 * 833 *
832 * => caller should hold the proper lock on pmap_main_lock 834 * => caller should hold the proper lock on pmap_main_lock
833 * => caller should have pmap locked 835 * => caller should have pmap locked
834 * => we will gain the lock on the vm_page and allocate the new pv_entry 836 * => we will gain the lock on the vm_page and allocate the new pv_entry
835 * => caller should adjust ptp's wire_count before calling 837 * => caller should adjust ptp's wire_count before calling
836 * => caller should not adjust pmap's wire_count 838 * => caller should not adjust pmap's wire_count
837 */ 839 */
838static void 840static void
839pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, pmap_t pm, 841pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, pmap_t pm,
840 vaddr_t va, u_int flags) 842 vaddr_t va, u_int flags)
841{ 843{
 844 struct pv_entry **pvp;
842 845
843 NPDEBUG(PDB_PVDUMP, 846 NPDEBUG(PDB_PVDUMP,
844 printf("pmap_enter_pv: pm %p, pg %p, flags 0x%x\n", pm, pg, flags)); 847 printf("pmap_enter_pv: pm %p, pg %p, flags 0x%x\n", pm, pg, flags));
845 848
846 pve->pv_pmap = pm; 849 pve->pv_pmap = pm;
847 pve->pv_va = va; 850 pve->pv_va = va;
848 pve->pv_flags = flags; 851 pve->pv_flags = flags;
849 852
850 simple_lock(&pg->mdpage.pvh_slock); /* lock vm_page */ 853 simple_lock(&pg->mdpage.pvh_slock); /* lock vm_page */
851 pve->pv_next = pg->mdpage.pvh_list; /* add to ... */ 854 pvp = &pg->mdpage.pvh_list;
852 pg->mdpage.pvh_list = pve; /* ... locked list */ 855#ifdef PMAP_CACHE_VIPT
853 pg->mdpage.pvh_attrs |= flags & (PVF_REF | PVF_MOD); 856 /*
 857 * Insert unmapped entries at the head of the pv list.
 858 */
 859 if (__predict_true((flags & PVF_KENTRY) == 0)) {
 860 while (*pvp != NULL && (*pvp)->pv_flags & PVF_KENTRY)
 861 pvp = &(*pvp)->pv_next;
 862 }
 863#endif
 864 pve->pv_next = *pvp; /* add to ... */
 865 *pvp = pve; /* ... locked list */
 866 pg->mdpage.pvh_attrs |= flags & (PVF_REF | PVF_MOD | PVF_KENTRY);
854 if (pm == pmap_kernel()) { 867 if (pm == pmap_kernel()) {
855 PMAPCOUNT(kernel_mappings); 868 PMAPCOUNT(kernel_mappings);
856 if (flags & PVF_WRITE) 869 if (flags & PVF_WRITE)
857 pg->mdpage.krw_mappings++; 870 pg->mdpage.krw_mappings++;
858 else 871 else
859 pg->mdpage.kro_mappings++; 872 pg->mdpage.kro_mappings++;
860 } else 873 } else
861 if (flags & PVF_WRITE) 874 if (flags & PVF_WRITE)
862 pg->mdpage.urw_mappings++; 875 pg->mdpage.urw_mappings++;
863 else 876 else
864 pg->mdpage.uro_mappings++; 877 pg->mdpage.uro_mappings++;
865 878
866#ifdef PMAP_CACHE_VIPT 879#ifdef PMAP_CACHE_VIPT
@@ -923,26 +936,38 @@ pmap_remove_pv(struct vm_page *pg, pmap_ @@ -923,26 +936,38 @@ pmap_remove_pv(struct vm_page *pg, pmap_
923 936
924 prevptr = &pg->mdpage.pvh_list; /* previous pv_entry pointer */ 937 prevptr = &pg->mdpage.pvh_list; /* previous pv_entry pointer */
925 pve = *prevptr; 938 pve = *prevptr;
926 939
927 while (pve) { 940 while (pve) {
928 if (pve->pv_pmap == pm && pve->pv_va == va) { /* match? */ 941 if (pve->pv_pmap == pm && pve->pv_va == va) { /* match? */
929 NPDEBUG(PDB_PVDUMP, printf("pmap_remove_pv: pm %p, pg " 942 NPDEBUG(PDB_PVDUMP, printf("pmap_remove_pv: pm %p, pg "
930 "%p, flags 0x%x\n", pm, pg, pve->pv_flags)); 943 "%p, flags 0x%x\n", pm, pg, pve->pv_flags));
931 if (pve->pv_flags & PVF_WIRED) { 944 if (pve->pv_flags & PVF_WIRED) {
932 if (skip_wired) 945 if (skip_wired)
933 return (NULL); 946 return (NULL);
934 --pm->pm_stats.wired_count; 947 --pm->pm_stats.wired_count;
935 } 948 }
 949#ifdef PMAP_CACHE_VIPT
 950 /*
 951 * If we are removing the first pv entry and its
 952 * a KENTRY, if the next one isn't also a KENTER,
 953 * clear KENTRY from the page attributes.
 954 */
 955 if (pg->mdpage.pvh_list == pve
 956 && (pve->pv_flags & PVF_KENTRY)
 957 && (pve->pv_next == NULL
 958 || (pve->pv_next->pv_flags & PVF_KENTRY) == 0))
 959 pg->mdpage.pvh_attrs &= ~PVF_KENTRY;
 960#endif
936 *prevptr = pve->pv_next; /* remove it! */ 961 *prevptr = pve->pv_next; /* remove it! */
937 if (pm == pmap_kernel()) { 962 if (pm == pmap_kernel()) {
938 PMAPCOUNT(kernel_unmappings); 963 PMAPCOUNT(kernel_unmappings);
939 if (pve->pv_flags & PVF_WRITE) 964 if (pve->pv_flags & PVF_WRITE)
940 pg->mdpage.krw_mappings--; 965 pg->mdpage.krw_mappings--;
941 else 966 else
942 pg->mdpage.kro_mappings--; 967 pg->mdpage.kro_mappings--;
943 } else 968 } else
944 if (pve->pv_flags & PVF_WRITE) 969 if (pve->pv_flags & PVF_WRITE)
945 pg->mdpage.urw_mappings--; 970 pg->mdpage.urw_mappings--;
946 else 971 else
947 pg->mdpage.uro_mappings--; 972 pg->mdpage.uro_mappings--;
948 973
@@ -961,26 +986,36 @@ pmap_remove_pv(struct vm_page *pg, pmap_ @@ -961,26 +986,36 @@ pmap_remove_pv(struct vm_page *pg, pmap_
961 PMAPCOUNT(exec_discarded_unmap); 986 PMAPCOUNT(exec_discarded_unmap);
962 } else { 987 } else {
963 pmap_syncicache_page(pg); 988 pmap_syncicache_page(pg);
964 PMAPCOUNT(exec_synced_unmap); 989 PMAPCOUNT(exec_synced_unmap);
965 } 990 }
966 } 991 }
967#endif /* PMAP_CACHE_VIPT */ 992#endif /* PMAP_CACHE_VIPT */
968 break; 993 break;
969 } 994 }
970 prevptr = &pve->pv_next; /* previous pointer */ 995 prevptr = &pve->pv_next; /* previous pointer */
971 pve = pve->pv_next; /* advance */ 996 pve = pve->pv_next; /* advance */
972 } 997 }
973 998
 999#ifdef PMAP_CACHE_VIPT
 1000 /*
 1001 * If this was a writeable page and there are no more writeable
 1002 * mappings (ignoring KMPAGE), clear the WRITE flag.
 1003 */
 1004 if ((pg->mdpage.pvh_attrs & PVF_WRITE)
 1005 && pg->mdpage.krw_mappings + pg->mdpage.urw_mappings == 0)
 1006 pg->mdpage.pvh_attrs &= ~PVF_WRITE;
 1007#endif /* PMAP_CACHE_VIPT */
 1008
974 return(pve); /* return removed pve */ 1009 return(pve); /* return removed pve */
975} 1010}
976 1011
977/* 1012/*
978 * 1013 *
979 * pmap_modify_pv: Update pv flags 1014 * pmap_modify_pv: Update pv flags
980 * 1015 *
981 * => caller should hold lock on vm_page [so that attrs can be adjusted] 1016 * => caller should hold lock on vm_page [so that attrs can be adjusted]
982 * => caller should NOT adjust pmap's wire_count 1017 * => caller should NOT adjust pmap's wire_count
983 * => caller must call pmap_vac_me_harder() if writable status of a page 1018 * => caller must call pmap_vac_me_harder() if writable status of a page
984 * may have changed. 1019 * may have changed.
985 * => we return the old flags 1020 * => we return the old flags
986 *  1021 *
@@ -1025,26 +1060,28 @@ pmap_modify_pv(struct vm_page *pg, pmap_ @@ -1025,26 +1060,28 @@ pmap_modify_pv(struct vm_page *pg, pmap_
1025 pg->mdpage.kro_mappings++; 1060 pg->mdpage.kro_mappings++;
1026 pg->mdpage.krw_mappings--; 1061 pg->mdpage.krw_mappings--;
1027 } 1062 }
1028 } else 1063 } else
1029 if (flags & PVF_WRITE) { 1064 if (flags & PVF_WRITE) {
1030 pg->mdpage.urw_mappings++; 1065 pg->mdpage.urw_mappings++;
1031 pg->mdpage.uro_mappings--; 1066 pg->mdpage.uro_mappings--;
1032 } else { 1067 } else {
1033 pg->mdpage.uro_mappings++; 1068 pg->mdpage.uro_mappings++;
1034 pg->mdpage.urw_mappings--; 1069 pg->mdpage.urw_mappings--;
1035 } 1070 }
1036 } 1071 }
1037#ifdef PMAP_CACHE_VIPT 1072#ifdef PMAP_CACHE_VIPT
 1073 if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0)
 1074 pg->mdpage.pvh_attrs &= ~PVF_WRITE;
1038 /* 1075 /*
1039 * We have two cases here: the first is from enter_pv (new exec 1076 * We have two cases here: the first is from enter_pv (new exec
1040 * page), the second is a combined pmap_remove_pv/pmap_enter_pv. 1077 * page), the second is a combined pmap_remove_pv/pmap_enter_pv.
1041 * Since in latter, pmap_enter_pv won't do anything, we just have 1078 * Since in latter, pmap_enter_pv won't do anything, we just have
1042 * to do what pmap_remove_pv would do. 1079 * to do what pmap_remove_pv would do.
1043 */ 1080 */
1044 if ((PV_IS_EXEC_P(flags) && !PV_IS_EXEC_P(pg->mdpage.pvh_attrs)) 1081 if ((PV_IS_EXEC_P(flags) && !PV_IS_EXEC_P(pg->mdpage.pvh_attrs))
1045 || (PV_IS_EXEC_P(pg->mdpage.pvh_attrs) 1082 || (PV_IS_EXEC_P(pg->mdpage.pvh_attrs)
1046 || (!(flags & PVF_WRITE) && (oflags & PVF_WRITE)))) { 1083 || (!(flags & PVF_WRITE) && (oflags & PVF_WRITE)))) {
1047 pmap_syncicache_page(pg); 1084 pmap_syncicache_page(pg);
1048 PMAPCOUNT(exec_synced_remap); 1085 PMAPCOUNT(exec_synced_remap);
1049 } 1086 }
1050#endif 1087#endif
@@ -1761,188 +1798,189 @@ pmap_check_sets(paddr_t pa) @@ -1761,188 +1798,189 @@ pmap_check_sets(paddr_t pa)
1761 asm("mrc p15, 3, %0, c15, c0, 0" : "=r"(v)); 1798 asm("mrc p15, 3, %0, c15, c0, 0" : "=r"(v));
1762 1799
1763 if ((v & (1 | ~(PAGE_SIZE-1))) == pa) { 1800 if ((v & (1 | ~(PAGE_SIZE-1))) == pa) {
1764 mask |= 1 << (set >> 7); 1801 mask |= 1 << (set >> 7);
1765 } 1802 }
1766 } 1803 }
1767 } 1804 }
1768 return mask; 1805 return mask;
1769} 1806}
1770#endif 1807#endif
1771static void 1808static void
1772pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t va) 1809pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t va)
1773{ 1810{
1774 struct pv_entry *pv, pv0; 1811 struct pv_entry *pv;
1775 vaddr_t tst_mask; 1812 vaddr_t tst_mask;
1776 bool bad_alias; 1813 bool bad_alias;
1777 struct l2_bucket *l2b; 1814 struct l2_bucket *l2b;
1778 pt_entry_t *ptep, pte, opte; 1815 pt_entry_t *ptep, pte, opte;
1779 1816
1780 /* do we need to do anything? */ 1817 /* do we need to do anything? */
1781 if (arm_cache_prefer_mask == 0) 1818 if (arm_cache_prefer_mask == 0)
1782 return; 1819 return;
1783 1820
1784 NPDEBUG(PDB_VAC, printf("pmap_vac_me_harder: pg=%p, pmap=%p va=%08lx\n", 1821 NPDEBUG(PDB_VAC, printf("pmap_vac_me_harder: pg=%p, pmap=%p va=%08lx\n",
1785 pg, pm, va)); 1822 pg, pm, va));
1786 1823
1787#define popc4(x) \ 1824#define popc4(x) \
1788 (((0x94 >> ((x & 3) << 1)) & 3) + ((0x94 >> ((x & 12) >> 1)) & 3)) 1825 (((0x94 >> ((x & 3) << 1)) & 3) + ((0x94 >> ((x & 12) >> 1)) & 3))
1789#if 0 1826#if 0
1790 tst_mask = pmap_check_sets(pg->phys_addr); 1827 tst_mask = pmap_check_sets(pg->phys_addr);
1791 KASSERT(popc4(tst_mask) < 2); 1828 KASSERT(popc4(tst_mask) < 2);
1792#endif 1829#endif
1793 1830
1794 KASSERT(!va || pm || (pg->mdpage.pvh_attrs & PVF_KENTRY)); 1831 KASSERT(!va || pm);
1795 1832
1796 /* Already a conflict? */ 1833 /* Already a conflict? */
1797 if (__predict_false(pg->mdpage.pvh_attrs & PVF_NC)) { 1834 if (__predict_false(pg->mdpage.pvh_attrs & PVF_NC)) {
1798 /* just an add, things are already non-cached */ 1835 /* just an add, things are already non-cached */
1799 bad_alias = false; 1836 bad_alias = false;
1800 if (va) { 1837 if (va) {
1801 PMAPCOUNT(vac_color_none); 1838 PMAPCOUNT(vac_color_none);
1802 bad_alias = true; 1839 bad_alias = true;
 1840 KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
1803 goto fixup; 1841 goto fixup;
1804 } 1842 }
1805 pv = pg->mdpage.pvh_list; 1843 pv = pg->mdpage.pvh_list;
1806 /* the list can't be empty because it would be cachable */ 1844 /* the list can't be empty because it would be cachable */
1807 if (pg->mdpage.pvh_attrs & PVF_KENTRY) { 1845 if (pg->mdpage.pvh_attrs & PVF_KMPAGE) {
1808 tst_mask = pg->mdpage.pvh_attrs; 1846 tst_mask = pg->mdpage.pvh_attrs;
1809 } else { 1847 } else {
1810 KASSERT(pv); 1848 KASSERT(pv);
1811 tst_mask = pv->pv_va; 1849 tst_mask = pv->pv_va;
1812 pv = pv->pv_next; 1850 pv = pv->pv_next;
1813 } 1851 }
1814 /* 1852 /*
1815 * Only check for a bad alias if we have writable mappings. 1853 * Only check for a bad alias if we have writable mappings.
1816 */ 1854 */
1817 if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings > 0 1855 if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings > 0) {
1818 || (pg->mdpage.pvh_attrs & PVF_KENTRY)) { 
1819 tst_mask &= arm_cache_prefer_mask; 1856 tst_mask &= arm_cache_prefer_mask;
1820 for (; pv && !bad_alias; pv = pv->pv_next) { 1857 for (; pv && !bad_alias; pv = pv->pv_next) {
1821 /* if there's a bad alias, stop checking. */ 1858 /* if there's a bad alias, stop checking. */
1822 if (tst_mask != (pv->pv_va & arm_cache_prefer_mask)) 1859 if (tst_mask != (pv->pv_va & arm_cache_prefer_mask))
1823 bad_alias = true; 1860 bad_alias = true;
1824 } 1861 }
 1862 pg->mdpage.pvh_attrs |= PVF_WRITE;
1825 } 1863 }
1826 /* If no conflicting colors, set everything back to cached */ 1864 /* If no conflicting colors, set everything back to cached */
1827 if (!bad_alias) { 1865 if (!bad_alias) {
1828 PMAPCOUNT(vac_color_restore); 1866 PMAPCOUNT(vac_color_restore);
1829 pg->mdpage.pvh_attrs |= PVF_COLORED; 1867 pg->mdpage.pvh_attrs &= (PAGE_SIZE - 1) & ~PVF_NC;
1830 if (!(pg->mdpage.pvh_attrs & PVF_KENTRY)) { 1868 pg->mdpage.pvh_attrs |= tst_mask | PVF_COLORED;
1831 pg->mdpage.pvh_attrs &= PAGE_SIZE - 1; 
1832 pg->mdpage.pvh_attrs |= tst_mask; 
1833 } 
1834 pg->mdpage.pvh_attrs &= ~PVF_NC; 
1835 } else { 1869 } else {
1836 KASSERT(pg->mdpage.pvh_list != NULL); 1870 KASSERT(pg->mdpage.pvh_list != NULL);
1837 KASSERT((pg->mdpage.pvh_attrs & PVF_KENTRY) 1871 KASSERT(pg->mdpage.pvh_list->pv_next != NULL);
1838 || pg->mdpage.pvh_list->pv_next != NULL); 
1839 } 1872 }
 1873 KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
1840 } else if (!va) { 1874 } else if (!va) {
1841 KASSERT(pmap_is_page_colored_p(pg)); 1875 KASSERT(pmap_is_page_colored_p(pg));
1842 if (pm == NULL) 1876 pg->mdpage.pvh_attrs &= (PAGE_SIZE - 1) | arm_cache_prefer_mask;
1843 pg->mdpage.pvh_attrs &= 1877 if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0)
1844 (PAGE_SIZE - 1) | arm_cache_prefer_mask; 1878 pg->mdpage.pvh_attrs &= ~PVF_WRITE;
 1879 KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
1845 return; 1880 return;
1846 } else if (!pmap_is_page_colored_p(pg)) { 1881 } else if (!pmap_is_page_colored_p(pg)) {
1847 /* not colored so we just use its color */ 1882 /* not colored so we just use its color */
1848 PMAPCOUNT(vac_color_new); 1883 PMAPCOUNT(vac_color_new);
1849 pg->mdpage.pvh_attrs &= PAGE_SIZE - 1; 1884 pg->mdpage.pvh_attrs &= PAGE_SIZE - 1;
1850 if (pm == NULL) 1885 pg->mdpage.pvh_attrs |= PVF_COLORED
1851 pg->mdpage.pvh_attrs |= PVF_COLORED | va; 1886 | (va & arm_cache_prefer_mask);
1852 else 1887 if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings > 0)
1853 pg->mdpage.pvh_attrs |= PVF_COLORED 1888 pg->mdpage.pvh_attrs |= PVF_WRITE;
1854 | (va & arm_cache_prefer_mask); 1889 KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
1855 return; 1890 return;
1856 } else if (!((pg->mdpage.pvh_attrs ^ va) & arm_cache_prefer_mask) 1891 } else if (((pg->mdpage.pvh_attrs ^ va) & arm_cache_prefer_mask) == 0) {
 1892 bad_alias = false;
 1893 if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings > 0) {
 1894 /*
 1895 * We now have writeable mappings and more than one
 1896 * readonly mapping, verify the colors don't clash
 1897 * and mark the page as writeable.
 1898 */
 1899 if (pg->mdpage.uro_mappings + pg->mdpage.kro_mappings > 1
 1900 && (pg->mdpage.pvh_attrs & PVF_WRITE) == 0) {
 1901 tst_mask = pg->mdpage.pvh_attrs & arm_cache_prefer_mask;
 1902 for (pv = pg->mdpage.pvh_list;
 1903 pv && !bad_alias;
 1904 pv = pv->pv_next) {
 1905 /* if there's a bad alias, stop checking. */
 1906 if (tst_mask != (pv->pv_va & arm_cache_prefer_mask))
 1907 bad_alias = true;
 1908 }
 1909 }
 1910 pg->mdpage.pvh_attrs |= PVF_WRITE;
 1911 }
 1912 /* If no conflicting colors, set everything back to cached */
 1913 if (!bad_alias) {
 1914 if (pg->mdpage.pvh_list)
 1915 PMAPCOUNT(vac_color_reuse);
 1916 else
 1917 PMAPCOUNT(vac_color_ok);
 1918 /* matching color, just return */
 1919 KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
 1920 return;
 1921 }
 1922 KASSERT(pg->mdpage.pvh_list != NULL);
 1923 KASSERT(pg->mdpage.pvh_list->pv_next != NULL);
 1924
 1925 /* color conflict. evict from cache. */
 1926
 1927 pmap_flush_page(pg);
 1928 pg->mdpage.pvh_attrs &= ~PVF_COLORED;
 1929 pg->mdpage.pvh_attrs |= PVF_NC;
 1930 } else if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0
 1931 && (pg->mdpage.pvh_attrs & PVF_KMPAGE) == 0) {
 1932 KASSERT((pg->mdpage.pvh_attrs & PVF_WRITE) == 0);
1857 /* 1933 /*
1858 * If the VA matches the existing color or if all the mappings 1934 * If all the mappings are read-only, don't do anything.
1859 * are read-only, don't do anything. 
1860 */ 1935 */
1861 || (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0 1936 PMAPCOUNT(vac_color_blind);
1862 && (pg->mdpage.pvh_attrs & PVF_KENTRY) == 0)) { 1937 KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
1863 if (pm == NULL) { 
1864 pg->mdpage.pvh_attrs &= PAGE_SIZE - 1; 
1865 pg->mdpage.pvh_attrs |= va; 
1866 } 
1867 if (pg->mdpage.pvh_list) 
1868 PMAPCOUNT(vac_color_reuse); 
1869 else 
1870 PMAPCOUNT(vac_color_ok); 
1871 /* matching color, just return */ 
1872 return; 1938 return;
1873 } else { 1939 } else {
 1940 if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings > 0)
 1941 pg->mdpage.pvh_attrs |= PVF_WRITE;
 1942
1874 /* color conflict. evict from cache. */ 1943 /* color conflict. evict from cache. */
1875 pmap_flush_page(pg); 1944 pmap_flush_page(pg);
1876 1945
1877 /* the list can't be empty because this was a enter/modify */ 1946 /* the list can't be empty because this was a enter/modify */
1878 pv = pg->mdpage.pvh_list; 1947 pv = pg->mdpage.pvh_list;
1879 KASSERT((pg->mdpage.pvh_attrs & PVF_KENTRY) || pv); 1948 KASSERT(pv);
1880 1949
1881 /* 1950 /*
1882 * If there's only one mapped page, change color to the 1951 * If there's only one mapped page, change color to the
1883 * page's new color and return. 1952 * page's new color and return.
1884 */ 1953 */
1885 if (((pg->mdpage.pvh_attrs & PVF_KENTRY) 1954 if (pv->pv_next == NULL) {
1886 ? pv : pv->pv_next) == NULL) { 
1887 PMAPCOUNT(vac_color_change); 1955 PMAPCOUNT(vac_color_change);
1888 pg->mdpage.pvh_attrs &= PAGE_SIZE - 1; 1956 pg->mdpage.pvh_attrs &= PAGE_SIZE - 1;
1889 if (pm == NULL) 1957 pg->mdpage.pvh_attrs |= (va & arm_cache_prefer_mask);
1890 pg->mdpage.pvh_attrs |= va; 1958 KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
1891 else 
1892 pg->mdpage.pvh_attrs |= 
1893 (va & arm_cache_prefer_mask); 
1894 return; 1959 return;
1895 } 1960 }
1896 bad_alias = true; 1961 bad_alias = true;
1897 pg->mdpage.pvh_attrs &= ~PVF_COLORED; 1962 pg->mdpage.pvh_attrs &= ~PVF_COLORED;
1898 pg->mdpage.pvh_attrs |= PVF_NC; 1963 pg->mdpage.pvh_attrs |= PVF_NC;
1899 PMAPCOUNT(vac_color_erase); 1964 PMAPCOUNT(vac_color_erase);
1900 } 1965 }
1901 1966
1902 fixup: 1967 fixup:
1903 /* 1968 KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
1904 * If the pmap is NULL, then we got called from pmap_kenter_pa 
1905 * and we must save the kenter'ed va. And this changes the 
1906 * color to match the kenter'ed page. if this is a remove clear 
1907 * saved va bits which retaining the color bits. 
1908 */ 
1909 if (pm == NULL) { 
1910 if (va) { 
1911 pg->mdpage.pvh_attrs &= (PAGE_SIZE - 1); 
1912 pg->mdpage.pvh_attrs |= va; 
1913 } else { 
1914 pg->mdpage.pvh_attrs &= 
1915 ((PAGE_SIZE - 1) | arm_cache_prefer_mask); 
1916 } 
1917 } 
1918 
1919 pv = pg->mdpage.pvh_list; 
1920 
1921 /* 
1922 * If this page has an kenter'ed mapping, fake up a pv entry. 
1923 */ 
1924 if (__predict_false(pg->mdpage.pvh_attrs & PVF_KENTRY)) { 
1925 pv0.pv_pmap = pmap_kernel(); 
1926 pv0.pv_va = pg->mdpage.pvh_attrs & ~(PAGE_SIZE - 1); 
1927 pv0.pv_next = pv; 
1928 pv0.pv_flags = PVF_REF; 
1929 pv = &pv0; 
1930 } 
1931 1969
1932 /* 1970 /*
1933 * Turn cacheing on/off for all pages. 1971 * Turn cacheing on/off for all pages.
1934 */ 1972 */
1935 for (; pv; pv = pv->pv_next) { 1973 for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
1936 l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); 1974 l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
1937 ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 1975 ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
1938 opte = *ptep; 1976 opte = *ptep;
1939 pte = opte & ~L2_S_CACHE_MASK; 1977 pte = opte & ~L2_S_CACHE_MASK;
1940 if (bad_alias) { 1978 if (bad_alias) {
1941 pv->pv_flags |= PVF_NC; 1979 pv->pv_flags |= PVF_NC;
1942 } else { 1980 } else {
1943 pv->pv_flags &= ~PVF_NC; 1981 pv->pv_flags &= ~PVF_NC;
1944 pte |= pte_l2_s_cache_mode; 1982 pte |= pte_l2_s_cache_mode;
1945 } 1983 }
1946 if (opte == pte) /* only update is there's a change */ 1984 if (opte == pte) /* only update is there's a change */
1947 continue; 1985 continue;
1948 1986
@@ -2082,26 +2120,28 @@ pmap_clearbit(struct vm_page *pg, u_int  @@ -2082,26 +2120,28 @@ pmap_clearbit(struct vm_page *pg, u_int
2082 2120
2083 if (maskbits & oflags & PVF_WRITE) { 2121 if (maskbits & oflags & PVF_WRITE) {
2084 /* 2122 /*
2085 * Keep alias accounting up to date 2123 * Keep alias accounting up to date
2086 */ 2124 */
2087 if (pv->pv_pmap == pmap_kernel()) { 2125 if (pv->pv_pmap == pmap_kernel()) {
2088 pg->mdpage.krw_mappings--; 2126 pg->mdpage.krw_mappings--;
2089 pg->mdpage.kro_mappings++; 2127 pg->mdpage.kro_mappings++;
2090 } else { 2128 } else {
2091 pg->mdpage.urw_mappings--; 2129 pg->mdpage.urw_mappings--;
2092 pg->mdpage.uro_mappings++; 2130 pg->mdpage.uro_mappings++;
2093 } 2131 }
2094#ifdef PMAP_CACHE_VIPT 2132#ifdef PMAP_CACHE_VIPT
 2133 if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0)
 2134 pg->mdpage.pvh_attrs &= ~PVF_WRITE;
2095 if (want_syncicache) 2135 if (want_syncicache)
2096 need_syncicache = true; 2136 need_syncicache = true;
2097#endif 2137#endif
2098 } 2138 }
2099 } 2139 }
2100 2140
2101 if (maskbits & PVF_REF) { 2141 if (maskbits & PVF_REF) {
2102#ifdef PMAP_CACHE_VIVT 2142#ifdef PMAP_CACHE_VIVT
2103 if ((pv->pv_flags & PVF_NC) == 0 && 2143 if ((pv->pv_flags & PVF_NC) == 0 &&
2104 (maskbits & (PVF_WRITE|PVF_MOD)) == 0 && 2144 (maskbits & (PVF_WRITE|PVF_MOD)) == 0 &&
2105 l2pte_valid(npte)) { 2145 l2pte_valid(npte)) {
2106 /* 2146 /*
2107 * Check npte here; we may have already 2147 * Check npte here; we may have already
@@ -2192,30 +2232,27 @@ pmap_clean_page(struct pv_entry *pv, boo @@ -2192,30 +2232,27 @@ pmap_clean_page(struct pv_entry *pv, boo
2192 u_int flags = 0; 2232 u_int flags = 0;
2193 vaddr_t page_to_clean = 0; 2233 vaddr_t page_to_clean = 0;
2194 2234
2195 if (pv == NULL) { 2235 if (pv == NULL) {
2196 /* nothing mapped in so nothing to flush */ 2236 /* nothing mapped in so nothing to flush */
2197 return (0); 2237 return (0);
2198 } 2238 }
2199 2239
2200 /* 2240 /*
2201 * Since we flush the cache each time we change to a different 2241 * Since we flush the cache each time we change to a different
2202 * user vmspace, we only need to flush the page if it is in the 2242 * user vmspace, we only need to flush the page if it is in the
2203 * current pmap. 2243 * current pmap.
2204 */ 2244 */
2205 if (curproc) 2245 pm = curproc->p_vmspace->vm_map.pmap;
2206 pm = curproc->p_vmspace->vm_map.pmap; 
2207 else 
2208 pm = pmap_kernel(); 
2209 2246
2210 for (npv = pv; npv; npv = npv->pv_next) { 2247 for (npv = pv; npv; npv = npv->pv_next) {
2211 if (npv->pv_pmap == pmap_kernel() || npv->pv_pmap == pm) { 2248 if (npv->pv_pmap == pmap_kernel() || npv->pv_pmap == pm) {
2212 flags |= npv->pv_flags; 2249 flags |= npv->pv_flags;
2213 /* 2250 /*
2214 * The page is mapped non-cacheable in  2251 * The page is mapped non-cacheable in
2215 * this map. No need to flush the cache. 2252 * this map. No need to flush the cache.
2216 */ 2253 */
2217 if (npv->pv_flags & PVF_NC) { 2254 if (npv->pv_flags & PVF_NC) {
2218#ifdef DIAGNOSTIC 2255#ifdef DIAGNOSTIC
2219 if (cache_needs_cleaning) 2256 if (cache_needs_cleaning)
2220 panic("pmap_clean_page: " 2257 panic("pmap_clean_page: "
2221 "cache inconsistency"); 2258 "cache inconsistency");
@@ -2346,82 +2383,102 @@ pmap_flush_page(struct vm_page *pg) @@ -2346,82 +2383,102 @@ pmap_flush_page(struct vm_page *pg)
2346#endif /* PMAP_CACHE_VIPT */ 2383#endif /* PMAP_CACHE_VIPT */
2347 2384
2348/* 2385/*
2349 * Routine: pmap_page_remove 2386 * Routine: pmap_page_remove
2350 * Function: 2387 * Function:
2351 * Removes this physical page from 2388 * Removes this physical page from
2352 * all physical maps in which it resides. 2389 * all physical maps in which it resides.
2353 * Reflects back modify bits to the pager. 2390 * Reflects back modify bits to the pager.
2354 */ 2391 */
2355static void 2392static void
2356pmap_page_remove(struct vm_page *pg) 2393pmap_page_remove(struct vm_page *pg)
2357{ 2394{
2358 struct l2_bucket *l2b; 2395 struct l2_bucket *l2b;
2359 struct pv_entry *pv, *npv; 2396 struct pv_entry *pv, *npv, **pvp;
2360 pmap_t pm, curpm; 2397 pmap_t pm, curpm;
2361 pt_entry_t *ptep, pte; 2398 pt_entry_t *ptep, pte;
2362 bool flush; 2399 bool flush;
2363 u_int flags; 2400 u_int flags;
2364 2401
2365 NPDEBUG(PDB_FOLLOW, 2402 NPDEBUG(PDB_FOLLOW,
2366 printf("pmap_page_remove: pg %p (0x%08lx)\n", pg, 2403 printf("pmap_page_remove: pg %p (0x%08lx)\n", pg,
2367 VM_PAGE_TO_PHYS(pg))); 2404 VM_PAGE_TO_PHYS(pg)));
2368 2405
2369 PMAP_HEAD_TO_MAP_LOCK(); 2406 PMAP_HEAD_TO_MAP_LOCK();
2370 simple_lock(&pg->mdpage.pvh_slock); 2407 simple_lock(&pg->mdpage.pvh_slock);
2371 2408
2372 pv = pg->mdpage.pvh_list; 2409 pv = pg->mdpage.pvh_list;
2373 if (pv == NULL) { 2410 if (pv == NULL) {
2374#ifdef PMAP_CACHE_VIPT 2411#ifdef PMAP_CACHE_VIPT
2375 /* 2412 /*
2376 * We *know* the page contents are about to be replaced. 2413 * We *know* the page contents are about to be replaced.
2377 * Discard the exec contents 2414 * Discard the exec contents
2378 */ 2415 */
2379 if (PV_IS_EXEC_P(pg->mdpage.pvh_attrs)) 2416 if (PV_IS_EXEC_P(pg->mdpage.pvh_attrs))
2380 PMAPCOUNT(exec_discarded_page_protect); 2417 PMAPCOUNT(exec_discarded_page_protect);
2381 pg->mdpage.pvh_attrs &= ~PVF_EXEC; 2418 pg->mdpage.pvh_attrs &= ~PVF_EXEC;
 2419 KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
2382#endif 2420#endif
2383 simple_unlock(&pg->mdpage.pvh_slock); 2421 simple_unlock(&pg->mdpage.pvh_slock);
2384 PMAP_HEAD_TO_MAP_UNLOCK(); 2422 PMAP_HEAD_TO_MAP_UNLOCK();
2385 return; 2423 return;
2386 } 2424 }
2387#ifdef PMAP_CACHE_VIPT 2425#ifdef PMAP_CACHE_VIPT
2388 KASSERT(pmap_is_page_colored_p(pg)); 2426 KASSERT(pmap_is_page_colored_p(pg));
2389#endif 2427#endif
2390 2428
2391 /* 2429 /*
2392 * Clear alias counts 2430 * Clear alias counts
2393 */ 2431 */
 2432#ifdef PMAP_CACHE_VIVT
2394 pg->mdpage.k_mappings = 0; 2433 pg->mdpage.k_mappings = 0;
 2434#endif
2395 pg->mdpage.urw_mappings = pg->mdpage.uro_mappings = 0; 2435 pg->mdpage.urw_mappings = pg->mdpage.uro_mappings = 0;
2396 2436
2397 flush = false; 2437 flush = false;
2398 flags = 0; 2438 flags = 0;
2399 if (curproc) 2439 curpm = curproc->p_vmspace->vm_map.pmap;
2400 curpm = curproc->p_vmspace->vm_map.pmap; 
2401 else 
2402 curpm = pmap_kernel(); 
2403 2440
2404#ifdef PMAP_CACHE_VIVT 2441#ifdef PMAP_CACHE_VIVT
2405 pmap_clean_page(pv, false); 2442 pmap_clean_page(pv, false);
2406#endif 2443#endif
2407 2444
 2445 pvp = &pg->mdpage.pvh_list;
2408 while (pv) { 2446 while (pv) {
2409 pm = pv->pv_pmap; 2447 pm = pv->pv_pmap;
 2448 npv = pv->pv_next;
2410 if (flush == false && (pm == curpm || pm == pmap_kernel())) 2449 if (flush == false && (pm == curpm || pm == pmap_kernel()))
2411 flush = true; 2450 flush = true;
2412 2451
2413 if (pm == pmap_kernel()) 2452 if (pm == pmap_kernel()) {
 2453#ifdef PMAP_CACHE_VIPT
 2454 /*
 2455 * If this was unmanaged mapping, it must be preserved.
 2456 * Move it back on the list and advance the end-of-list
 2457 * pointer.
 2458 */
 2459 if (pv->pv_flags & PVF_KENTRY) {
 2460 *pvp = pv;
 2461 pvp = &pv->pv_next;
 2462 pv = npv;
 2463 continue;
 2464 }
 2465 if (pv->pv_flags & PVF_WRITE)
 2466 pg->mdpage.krw_mappings--;
 2467 else
 2468 pg->mdpage.kro_mappings--;
 2469#endif
2414 PMAPCOUNT(kernel_unmappings); 2470 PMAPCOUNT(kernel_unmappings);
 2471 }
2415 PMAPCOUNT(unmappings); 2472 PMAPCOUNT(unmappings);
2416 2473
2417 pmap_acquire_pmap_lock(pm); 2474 pmap_acquire_pmap_lock(pm);
2418 2475
2419 l2b = pmap_get_l2_bucket(pm, pv->pv_va); 2476 l2b = pmap_get_l2_bucket(pm, pv->pv_va);
2420 KDASSERT(l2b != NULL); 2477 KDASSERT(l2b != NULL);
2421 2478
2422 ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 2479 ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
2423 pte = *ptep; 2480 pte = *ptep;
2424 2481
2425 /* 2482 /*
2426 * Update statistics 2483 * Update statistics
2427 */ 2484 */
@@ -2430,47 +2487,49 @@ pmap_page_remove(struct vm_page *pg) @@ -2430,47 +2487,49 @@ pmap_page_remove(struct vm_page *pg)
2430 /* Wired bit */ 2487 /* Wired bit */
2431 if (pv->pv_flags & PVF_WIRED) 2488 if (pv->pv_flags & PVF_WIRED)
2432 --pm->pm_stats.wired_count; 2489 --pm->pm_stats.wired_count;
2433 2490
2434 flags |= pv->pv_flags; 2491 flags |= pv->pv_flags;
2435 2492
2436 /* 2493 /*
2437 * Invalidate the PTEs. 2494 * Invalidate the PTEs.
2438 */ 2495 */
2439 *ptep = 0; 2496 *ptep = 0;
2440 PTE_SYNC_CURRENT(pm, ptep); 2497 PTE_SYNC_CURRENT(pm, ptep);
2441 pmap_free_l2_bucket(pm, l2b, 1); 2498 pmap_free_l2_bucket(pm, l2b, 1);
2442 2499
2443 npv = pv->pv_next; 
2444 pool_put(&pmap_pv_pool, pv); 2500 pool_put(&pmap_pv_pool, pv);
2445 pv = npv; 2501 pv = npv;
 2502 /*
 2503 * if we reach the end of the list and there are still
 2504 * mappings, they might be able to be cached now.
 2505 */
2446 if (pv == NULL) { 2506 if (pv == NULL) {
2447 pg->mdpage.pvh_list = NULL; 2507 *pvp = NULL;
2448 if (pg->mdpage.pvh_attrs & PVF_KENTRY) 2508 if (pg->mdpage.pvh_list != NULL)
2449 pmap_vac_me_harder(pg, pm, 0); 2509 pmap_vac_me_harder(pg, pm, 0);
2450 } 2510 }
2451 pmap_release_pmap_lock(pm); 2511 pmap_release_pmap_lock(pm);
2452 } 2512 }
2453#ifdef PMAP_CACHE_VIPT 2513#ifdef PMAP_CACHE_VIPT
2454 /* 2514 /*
2455 * Since there are now no mappings, there isn't reason to mark it 2515 * Its EXEC cache is now gone.
2456 * as uncached. Its EXEC cache is also gone. 
2457 */ 2516 */
2458 if (PV_IS_EXEC_P(pg->mdpage.pvh_attrs)) 2517 if (PV_IS_EXEC_P(pg->mdpage.pvh_attrs))
2459 PMAPCOUNT(exec_discarded_page_protect); 2518 PMAPCOUNT(exec_discarded_page_protect);
2460 pg->mdpage.pvh_attrs &= ~(PVF_NC|PVF_EXEC); 2519 pg->mdpage.pvh_attrs &= ~PVF_EXEC;
2461#endif 2520 if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0)
2462#ifdef PMAP_CACHE_VIVT 2521 pg->mdpage.pvh_attrs &= ~PVF_WRITE;
2463 pg->mdpage.pvh_list = NULL; 2522 KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
2464#endif 2523#endif
2465 simple_unlock(&pg->mdpage.pvh_slock); 2524 simple_unlock(&pg->mdpage.pvh_slock);
2466 PMAP_HEAD_TO_MAP_UNLOCK(); 2525 PMAP_HEAD_TO_MAP_UNLOCK();
2467 2526
2468 if (flush) { 2527 if (flush) {
2469 /* 2528 /*
2470 * Note: We can't use pmap_tlb_flush{I,}D() here since that 2529 * Note: We can't use pmap_tlb_flush{I,}D() here since that
2471 * would need a subsequent call to pmap_update() to ensure 2530 * would need a subsequent call to pmap_update() to ensure
2472 * curpm->pm_cstate.cs_all is reset. Our callers are not 2531 * curpm->pm_cstate.cs_all is reset. Our callers are not
2473 * required to do that (see pmap(9)), so we can't modify 2532 * required to do that (see pmap(9)), so we can't modify
2474 * the current pmap's state. 2533 * the current pmap's state.
2475 */ 2534 */
2476 if (PV_BEEN_EXECD(flags)) 2535 if (PV_BEEN_EXECD(flags))
@@ -3026,119 +3085,154 @@ pmap_do_remove(pmap_t pm, vaddr_t sva, v @@ -3026,119 +3085,154 @@ pmap_do_remove(pmap_t pm, vaddr_t sva, v
3026#endif 3085#endif
3027 pm->pm_remove_all = true; 3086 pm->pm_remove_all = true;
3028 } 3087 }
3029 } 3088 }
3030 3089
3031 pmap_free_l2_bucket(pm, l2b, mappings); 3090 pmap_free_l2_bucket(pm, l2b, mappings);
3032 pm->pm_stats.resident_count -= mappings; 3091 pm->pm_stats.resident_count -= mappings;
3033 } 3092 }
3034 3093
3035 pmap_release_pmap_lock(pm); 3094 pmap_release_pmap_lock(pm);
3036 PMAP_MAP_TO_HEAD_UNLOCK(); 3095 PMAP_MAP_TO_HEAD_UNLOCK();
3037} 3096}
3038 3097
 3098#ifdef PMAP_CACHE_VIPT
 3099static struct pv_entry *
 3100pmap_kremove_pg(struct vm_page *pg, vaddr_t va)
 3101{
 3102 struct pv_entry *pv;
 3103
 3104 simple_lock(&pg->mdpage.pvh_slock);
 3105 KASSERT(pg->mdpage.pvh_attrs & (PVF_COLORED|PVF_NC));
 3106 KASSERT((pg->mdpage.pvh_attrs & PVF_KMPAGE) == 0);
 3107
 3108 pv = pmap_remove_pv(pg, pmap_kernel(), va, false);
 3109 KASSERT(pv);
 3110 KASSERT(pv->pv_flags & PVF_KENTRY);
 3111
 3112 /*
 3113 * If we are removing a writeable mapping to a cached exec page,
 3114 * if it's the last mapping then clear it execness other sync
 3115 * the page to the icache.
 3116 */
 3117 if ((pg->mdpage.pvh_attrs & (PVF_NC|PVF_EXEC)) == PVF_EXEC
 3118 && (pv->pv_flags & PVF_WRITE) != 0) {
 3119 if (pg->mdpage.pvh_list == NULL) {
 3120 pg->mdpage.pvh_attrs &= ~PVF_EXEC;
 3121 PMAPCOUNT(exec_discarded_kremove);
 3122 } else {
 3123 pmap_syncicache_page(pg);
 3124 PMAPCOUNT(exec_synced_kremove);
 3125 }
 3126 }
 3127 pmap_vac_me_harder(pg, pmap_kernel(), 0);
 3128 simple_unlock(&pg->mdpage.pvh_slock);
 3129
 3130 return pv;
 3131}
 3132#endif /* PMAP_CACHE_VIPT */
 3133
3039/* 3134/*
3040 * pmap_kenter_pa: enter an unmanaged, wired kernel mapping 3135 * pmap_kenter_pa: enter an unmanaged, wired kernel mapping
3041 * 3136 *
3042 * We assume there is already sufficient KVM space available 3137 * We assume there is already sufficient KVM space available
3043 * to do this, as we can't allocate L2 descriptor tables/metadata 3138 * to do this, as we can't allocate L2 descriptor tables/metadata
3044 * from here. 3139 * from here.
3045 */ 3140 */
3046void 3141void
3047pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot) 3142pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
3048{ 3143{
3049 struct l2_bucket *l2b; 3144 struct l2_bucket *l2b;
3050 pt_entry_t *ptep, opte; 3145 pt_entry_t *ptep, opte;
3051#ifdef PMAP_CACHE_VIPT 3146#ifdef PMAP_CACHE_VIPT
3052 struct vm_page *pg = PHYS_TO_VM_PAGE(pa); 3147 struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
3053 struct vm_page *opg; 3148 struct vm_page *opg;
 3149 struct pv_entry *pv = NULL;
3054#endif 3150#endif
3055 3151
3056 
3057 NPDEBUG(PDB_KENTER, 3152 NPDEBUG(PDB_KENTER,
3058 printf("pmap_kenter_pa: va 0x%08lx, pa 0x%08lx, prot 0x%x\n", 3153 printf("pmap_kenter_pa: va 0x%08lx, pa 0x%08lx, prot 0x%x\n",
3059 va, pa, prot)); 3154 va, pa, prot));
3060 3155
3061 l2b = pmap_get_l2_bucket(pmap_kernel(), va); 3156 l2b = pmap_get_l2_bucket(pmap_kernel(), va);
3062 KDASSERT(l2b != NULL); 3157 KDASSERT(l2b != NULL);
3063 3158
3064 ptep = &l2b->l2b_kva[l2pte_index(va)]; 3159 ptep = &l2b->l2b_kva[l2pte_index(va)];
3065 opte = *ptep; 3160 opte = *ptep;
3066 3161
3067 if (opte == 0) { 3162 if (opte == 0) {
3068 PMAPCOUNT(kenter_mappings); 3163 PMAPCOUNT(kenter_mappings);
3069 l2b->l2b_occupancy++; 3164 l2b->l2b_occupancy++;
3070 } else { 3165 } else {
3071 PMAPCOUNT(kenter_remappings); 3166 PMAPCOUNT(kenter_remappings);
3072#ifdef PMAP_CACHE_VIPT 3167#ifdef PMAP_CACHE_VIPT
3073 opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); 3168 opg = PHYS_TO_VM_PAGE(l2pte_pa(opte));
3074 if (opg) { 3169 if (opg) {
3075 KASSERT(opg != pg); 3170 KASSERT(opg != pg);
 3171 KASSERT((opg->mdpage.pvh_attrs & PVF_KMPAGE) == 0);
 3172 KASSERT((prot & PMAP_KMPAGE) == 0);
3076 simple_lock(&opg->mdpage.pvh_slock); 3173 simple_lock(&opg->mdpage.pvh_slock);
3077 KASSERT(opg->mdpage.pvh_attrs & PVF_KENTRY); 3174 pv = pmap_kremove_pg(opg, va);
3078 if (PV_IS_EXEC_P(opg->mdpage.pvh_attrs) 
3079 && !(opg->mdpage.pvh_attrs & PVF_NC)) { 
3080 if (opg->mdpage.pvh_list == NULL) { 
3081 opg->mdpage.pvh_attrs &= ~PVF_EXEC; 
3082 PMAPCOUNT(exec_discarded_kremove); 
3083 } else { 
3084 pmap_syncicache_page(opg); 
3085 PMAPCOUNT(exec_synced_kremove); 
3086 } 
3087 } 
3088 KASSERT(opg->mdpage.pvh_attrs & (PVF_COLORED|PVF_NC)); 
3089 if (L2_AP(AP_W) & opte) { 
3090 KASSERT(opg->mdpage.pvh_attrs & PVF_KENTRY); 
3091 opg->mdpage.pvh_attrs &= ~PVF_KENTRY; 
3092 pmap_vac_me_harder(opg, NULL, 0); 
3093 } else { 
3094 KASSERT(opg->mdpage.kro_mappings > 0); 
3095 opg->mdpage.kro_mappings--; 
3096 } 
3097 simple_unlock(&opg->mdpage.pvh_slock); 3175 simple_unlock(&opg->mdpage.pvh_slock);
3098 } 3176 }
3099#endif 3177#endif
3100 if (l2pte_valid(opte)) { 3178 if (l2pte_valid(opte)) {
3101#ifdef PMAP_CACHE_VIVT 3179#ifdef PMAP_CACHE_VIVT
3102 cpu_dcache_wbinv_range(va, PAGE_SIZE); 3180 cpu_dcache_wbinv_range(va, PAGE_SIZE);
3103#endif 3181#endif
3104 cpu_tlb_flushD_SE(va); 3182 cpu_tlb_flushD_SE(va);
3105 cpu_cpwait(); 3183 cpu_cpwait();
3106 } 3184 }
3107 } 3185 }
3108 3186
3109 *ptep = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | 3187 *ptep = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) |
3110 pte_l2_s_cache_mode; 3188 pte_l2_s_cache_mode;
3111 PTE_SYNC(ptep); 3189 PTE_SYNC(ptep);
3112 3190
3113#ifdef PMAP_CACHE_VIPT 3191#ifdef PMAP_CACHE_VIPT
3114 if (pg) { 3192 if (pg) {
3115 simple_lock(&pg->mdpage.pvh_slock); 3193 if (prot & PMAP_KMPAGE) {
3116 if (prot & VM_PROT_WRITE) { 3194 KASSERT(pv == NULL);
3117 /* 3195 KASSERT((va & PVF_COLORED) == 0);
3118 * If they want a writeable page, make sure it 3196 simple_lock(&pg->mdpage.pvh_slock);
3119 * isn't already mapped in the kernel. 
3120 */ 
3121 KASSERT((pg->mdpage.pvh_attrs & PVF_KENTRY) == 0); 
3122 KASSERT(pg->mdpage.kro_mappings == 0); 
3123 pg->mdpage.pvh_attrs |= PVF_KENTRY; 
3124 pmap_vac_me_harder(pg, NULL, va); 
3125 } else { 
3126 KASSERT(pg->mdpage.krw_mappings == 0); 
3127 KASSERT(pg->mdpage.urw_mappings == 0); 3197 KASSERT(pg->mdpage.urw_mappings == 0);
 3198 KASSERT(pg->mdpage.uro_mappings == 0);
 3199 KASSERT(pg->mdpage.krw_mappings == 0);
 3200 KASSERT(pg->mdpage.kro_mappings == 0);
3128 KASSERT((pg->mdpage.pvh_attrs & PVF_NC) == 0); 3201 KASSERT((pg->mdpage.pvh_attrs & PVF_NC) == 0);
3129 pg->mdpage.kro_mappings++; 3202 /* if there is a color conflict, evict from cache. */
 3203 if (pmap_is_page_colored_p(pg)
 3204 && ((va ^ pg->mdpage.pvh_attrs) & arm_cache_prefer_mask))
 3205 pmap_flush_page(pg);
 3206 pg->mdpage.pvh_attrs &= PAGE_SIZE - 1;
 3207 pg->mdpage.pvh_attrs |= PVF_KMPAGE
 3208 | PVF_COLORED
 3209 | (va & arm_cache_prefer_mask);
 3210 simple_unlock(&pg->mdpage.pvh_slock);
 3211 } else {
 3212 if (pv == NULL) {
 3213 pv = pool_get(&pmap_pv_pool, PR_NOWAIT);
 3214 KASSERT(pv != NULL);
 3215 }
 3216 pmap_enter_pv(pg, pv, pmap_kernel(), va,
 3217 PVF_WIRED | PVF_KENTRY
 3218 | (prot & VM_PROT_WRITE ? PVF_WRITE : 0));
 3219 simple_lock(&pg->mdpage.pvh_slock);
 3220 pmap_vac_me_harder(pg, pmap_kernel(), va);
 3221 simple_unlock(&pg->mdpage.pvh_slock);
3130 } 3222 }
3131 simple_unlock(&pg->mdpage.pvh_slock); 3223 } else {
 3224 if (pv != NULL)
 3225 pool_put(&pmap_pv_pool, pv);
3132 } 3226 }
3133#endif 3227#endif
3134} 3228}
3135 3229
3136void 3230void
3137pmap_kremove(vaddr_t va, vsize_t len) 3231pmap_kremove(vaddr_t va, vsize_t len)
3138{ 3232{
3139 struct l2_bucket *l2b; 3233 struct l2_bucket *l2b;
3140 pt_entry_t *ptep, *sptep, opte; 3234 pt_entry_t *ptep, *sptep, opte;
3141 vaddr_t next_bucket, eva; 3235 vaddr_t next_bucket, eva;
3142 u_int mappings; 3236 u_int mappings;
3143#ifdef PMAP_CACHE_VIPT 3237#ifdef PMAP_CACHE_VIPT
3144 struct vm_page *opg; 3238 struct vm_page *opg;
@@ -3157,48 +3251,39 @@ pmap_kremove(vaddr_t va, vsize_t len) @@ -3157,48 +3251,39 @@ pmap_kremove(vaddr_t va, vsize_t len)
3157 next_bucket = eva; 3251 next_bucket = eva;
3158 3252
3159 l2b = pmap_get_l2_bucket(pmap_kernel(), va); 3253 l2b = pmap_get_l2_bucket(pmap_kernel(), va);
3160 KDASSERT(l2b != NULL); 3254 KDASSERT(l2b != NULL);
3161 3255
3162 sptep = ptep = &l2b->l2b_kva[l2pte_index(va)]; 3256 sptep = ptep = &l2b->l2b_kva[l2pte_index(va)];
3163 mappings = 0; 3257 mappings = 0;
3164 3258
3165 while (va < next_bucket) { 3259 while (va < next_bucket) {
3166 opte = *ptep; 3260 opte = *ptep;
3167#ifdef PMAP_CACHE_VIPT 3261#ifdef PMAP_CACHE_VIPT
3168 opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); 3262 opg = PHYS_TO_VM_PAGE(l2pte_pa(opte));
3169 if (opg) { 3263 if (opg) {
3170 simple_lock(&opg->mdpage.pvh_slock); 3264 if (opg->mdpage.pvh_attrs & PVF_KMPAGE) {
3171 if (PV_IS_EXEC_P(opg->mdpage.pvh_attrs) 3265 simple_lock(&opg->mdpage.pvh_slock);
3172 && !(opg->mdpage.pvh_attrs & PVF_NC)) { 3266 KASSERT(opg->mdpage.urw_mappings == 0);
3173 if (opg->mdpage.pvh_list == NULL) { 3267 KASSERT(opg->mdpage.uro_mappings == 0);
3174 opg->mdpage.pvh_attrs &= 3268 KASSERT(opg->mdpage.krw_mappings == 0);
3175 ~PVF_EXEC; 3269 KASSERT(opg->mdpage.kro_mappings == 0);
3176 PMAPCOUNT(exec_discarded_kremove); 3270 opg->mdpage.pvh_attrs &=
3177 } else { 3271 ~(PVF_KMPAGE|PVF_WRITE);
3178 pmap_syncicache_page(opg); 3272 simple_unlock(&opg->mdpage.pvh_slock);
3179 PMAPCOUNT(exec_synced_kremove); 
3180 } 
3181 } 
3182 KASSERT(opg->mdpage.pvh_attrs & (PVF_COLORED|PVF_NC)); 
3183 if (L2_AP(AP_W) & opte) { 
3184 KASSERT(opg->mdpage.pvh_attrs & PVF_KENTRY); 
3185 opg->mdpage.pvh_attrs &= ~PVF_KENTRY; 
3186 pmap_vac_me_harder(opg, NULL, 0); 
3187 } else { 3273 } else {
3188 KASSERT(opg->mdpage.kro_mappings > 0); 3274 pool_put(&pmap_pv_pool,
3189 opg->mdpage.kro_mappings--; 3275 pmap_kremove_pg(opg, va));
3190 } 3276 }
3191 simple_unlock(&opg->mdpage.pvh_slock); 
3192 } 3277 }
3193#endif 3278#endif
3194 if (l2pte_valid(opte)) { 3279 if (l2pte_valid(opte)) {
3195#ifdef PMAP_CACHE_VIVT 3280#ifdef PMAP_CACHE_VIVT
3196 cpu_dcache_wbinv_range(va, PAGE_SIZE); 3281 cpu_dcache_wbinv_range(va, PAGE_SIZE);
3197#endif 3282#endif
3198 cpu_tlb_flushD_SE(va); 3283 cpu_tlb_flushD_SE(va);
3199 } 3284 }
3200 if (opte) { 3285 if (opte) {
3201 *ptep = 0; 3286 *ptep = 0;
3202 mappings++; 3287 mappings++;
3203 } 3288 }
3204 va += PAGE_SIZE; 3289 va += PAGE_SIZE;
@@ -4453,51 +4538,31 @@ pmap_grow_map(vaddr_t va, pt_entry_t cac @@ -4453,51 +4538,31 @@ pmap_grow_map(vaddr_t va, pt_entry_t cac
4453 pa = pv.pv_pa; 4538 pa = pv.pv_pa;
4454#else 4539#else
4455 if (uvm_page_physget(&pa) == false) 4540 if (uvm_page_physget(&pa) == false)
4456 return (1); 4541 return (1);
4457#endif /* PMAP_STEAL_MEMORY */ 4542#endif /* PMAP_STEAL_MEMORY */
4458 } else { 4543 } else {
4459 struct vm_page *pg; 4544 struct vm_page *pg;
4460 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); 4545 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
4461 if (pg == NULL) 4546 if (pg == NULL)
4462 return (1); 4547 return (1);
4463 pa = VM_PAGE_TO_PHYS(pg); 4548 pa = VM_PAGE_TO_PHYS(pg);
4464#ifdef PMAP_CACHE_VIPT 4549#ifdef PMAP_CACHE_VIPT
4465 /* 4550 /*
4466 * This new page must not have any mappings. However, it might 4551 * This new page must not have any mappings. Enter it via
4467 * have previously used and therefore present in the cache. If 4552 * pmap_kenter_pa and let that routine do the hard work.
4468 * it doesn't have the desired color, we have to flush it from 
4469 * the cache. And while we are at it, make sure to clear its 
4470 * EXEC status. 
4471 */ 4553 */
4472 KASSERT(!(pg->mdpage.pvh_attrs & PVF_KENTRY)); 
4473 KASSERT(pg->mdpage.pvh_list == NULL); 4554 KASSERT(pg->mdpage.pvh_list == NULL);
4474 if (pmap_is_page_colored_p(pg)) { 4555 pmap_kenter_pa(va, pa, VM_PROT_READ|VM_PROT_WRITE|PMAP_KMPAGE);
4475 if ((va ^ pg->mdpage.pvh_attrs) & arm_cache_prefer_mask) { 
4476 pmap_flush_page(pg); 
4477 PMAPCOUNT(vac_color_change); 
4478 } else { 
4479 PMAPCOUNT(vac_color_reuse); 
4480 } 
4481 } else { 
4482 PMAPCOUNT(vac_color_new); 
4483 } 
4484 if (PV_IS_EXEC_P(pg->mdpage.pvh_attrs)) 
4485 PMAPCOUNT(exec_discarded_kremove); 
4486 /* 
4487 * We'll pretend this page was entered by pmap_kenter_pa 
4488 */ 
4489 pg->mdpage.pvh_attrs &= (PAGE_SIZE - 1) & ~PVF_EXEC; 
4490 pg->mdpage.pvh_attrs |= va | PVF_KENTRY | PVF_COLORED | PVF_REF | PVF_MOD; 
4491#endif 4556#endif
4492 } 4557 }
4493 4558
4494 if (pap) 4559 if (pap)
4495 *pap = pa; 4560 *pap = pa;
4496 4561
4497 PMAPCOUNT(pt_mappings); 4562 PMAPCOUNT(pt_mappings);
4498 l2b = pmap_get_l2_bucket(pmap_kernel(), va); 4563 l2b = pmap_get_l2_bucket(pmap_kernel(), va);
4499 KDASSERT(l2b != NULL); 4564 KDASSERT(l2b != NULL);
4500 4565
4501 ptep = &l2b->l2b_kva[l2pte_index(va)]; 4566 ptep = &l2b->l2b_kva[l2pte_index(va)];
4502 *ptep = L2_S_PROTO | pa | cache_mode | 4567 *ptep = L2_S_PROTO | pa | cache_mode |
4503 L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE); 4568 L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE);
@@ -5160,27 +5225,28 @@ pmap_postinit(void) @@ -5160,27 +5225,28 @@ pmap_postinit(void)
5160 5225
5161 error = uvm_pglistalloc(L1_TABLE_SIZE, physical_start, 5226 error = uvm_pglistalloc(L1_TABLE_SIZE, physical_start,
5162 physical_end, L1_TABLE_SIZE, 0, &plist, 1, M_WAITOK); 5227 physical_end, L1_TABLE_SIZE, 0, &plist, 1, M_WAITOK);
5163 if (error) 5228 if (error)
5164 panic("Cannot allocate L1 physical pages"); 5229 panic("Cannot allocate L1 physical pages");
5165 5230
5166 m = TAILQ_FIRST(&plist); 5231 m = TAILQ_FIRST(&plist);
5167 eva = va + L1_TABLE_SIZE; 5232 eva = va + L1_TABLE_SIZE;
5168 pl1pt = (pd_entry_t *)va; 5233 pl1pt = (pd_entry_t *)va;
5169 5234
5170 while (m && va < eva) { 5235 while (m && va < eva) {
5171 paddr_t pa = VM_PAGE_TO_PHYS(m); 5236 paddr_t pa = VM_PAGE_TO_PHYS(m);
5172 5237
5173 pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE); 5238 pmap_kenter_pa(va, pa,
 5239 VM_PROT_READ|VM_PROT_WRITE|PMAP_KMPAGE);
5174 5240
5175 /* 5241 /*
5176 * Make sure the L1 descriptor table is mapped 5242 * Make sure the L1 descriptor table is mapped
5177 * with the cache-mode set to write-through. 5243 * with the cache-mode set to write-through.
5178 */ 5244 */
5179 l2b = pmap_get_l2_bucket(pmap_kernel(), va); 5245 l2b = pmap_get_l2_bucket(pmap_kernel(), va);
5180 ptep = &l2b->l2b_kva[l2pte_index(va)]; 5246 ptep = &l2b->l2b_kva[l2pte_index(va)];
5181 pte = *ptep; 5247 pte = *ptep;
5182 pte = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; 5248 pte = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt;
5183 *ptep = pte; 5249 *ptep = pte;
5184 PTE_SYNC(ptep); 5250 PTE_SYNC(ptep);
5185 cpu_tlb_flushD_SE(va); 5251 cpu_tlb_flushD_SE(va);
5186 5252

cvs diff -r1.85 -r1.86 src/sys/arch/arm/include/arm32/pmap.h (expand / switch to unified diff)

--- src/sys/arch/arm/include/arm32/pmap.h 2008/04/27 18:58:44 1.85
+++ src/sys/arch/arm/include/arm32/pmap.h 2008/07/16 00:19:57 1.86
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.h,v 1.85 2008/04/27 18:58:44 matt Exp $ */ 1/* $NetBSD: pmap.h,v 1.86 2008/07/16 00:19:57 matt Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2002, 2003 Wasabi Systems, Inc. 4 * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -226,26 +226,27 @@ extern pv_addr_t kernel_l1pt; @@ -226,26 +226,27 @@ extern pv_addr_t kernel_l1pt;
226 * 226 *
227 * Note the "non-cacheable" flag generally means the page has 227 * Note the "non-cacheable" flag generally means the page has
228 * multiple mappings in a given address space. 228 * multiple mappings in a given address space.
229 */ 229 */
230#define PVF_MOD 0x01 /* page is modified */ 230#define PVF_MOD 0x01 /* page is modified */
231#define PVF_REF 0x02 /* page is referenced */ 231#define PVF_REF 0x02 /* page is referenced */
232#define PVF_WIRED 0x04 /* mapping is wired */ 232#define PVF_WIRED 0x04 /* mapping is wired */
233#define PVF_WRITE 0x08 /* mapping is writable */ 233#define PVF_WRITE 0x08 /* mapping is writable */
234#define PVF_EXEC 0x10 /* mapping is executable */ 234#define PVF_EXEC 0x10 /* mapping is executable */
235#define PVF_UNC 0x20 /* mapping is 'user' non-cacheable */ 235#define PVF_UNC 0x20 /* mapping is 'user' non-cacheable */
236#define PVF_KNC 0x40 /* mapping is 'kernel' non-cacheable */ 236#define PVF_KNC 0x40 /* mapping is 'kernel' non-cacheable */
237#define PVF_COLORED 0x80 /* page has or had a color */ 237#define PVF_COLORED 0x80 /* page has or had a color */
238#define PVF_KENTRY 0x0100 /* page entered via pmap_kenter_pa */ 238#define PVF_KENTRY 0x0100 /* page entered via pmap_kenter_pa */
 239#define PVF_KMPAGE 0x0200 /* page is used for kmem */
239#define PVF_NC (PVF_UNC|PVF_KNC) 240#define PVF_NC (PVF_UNC|PVF_KNC)
240 241
241/* 242/*
242 * Commonly referenced structures 243 * Commonly referenced structures
243 */ 244 */
244extern struct pmap kernel_pmap_store; 245extern struct pmap kernel_pmap_store;
245extern int pmap_debug_level; /* Only exists if PMAP_DEBUG */ 246extern int pmap_debug_level; /* Only exists if PMAP_DEBUG */
246 247
247/* 248/*
248 * Macros that we need to export 249 * Macros that we need to export
249 */ 250 */
250#define pmap_kernel() (&kernel_pmap_store) 251#define pmap_kernel() (&kernel_pmap_store)
251#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) 252#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)