Sat Apr 18 10:46:33 2020 UTC ()
Remove PMAP_DEBUG by converting to UVMHIST


(skrll)
diff -r1.407 -r1.408 src/sys/arch/arm/arm32/pmap.c

cvs diff -r1.407 -r1.408 src/sys/arch/arm/arm32/pmap.c (expand / switch to unified diff)

--- src/sys/arch/arm/arm32/pmap.c 2020/04/17 11:21:06 1.407
+++ src/sys/arch/arm/arm32/pmap.c 2020/04/18 10:46:32 1.408
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.407 2020/04/17 11:21:06 skrll Exp $ */ 1/* $NetBSD: pmap.c,v 1.408 2020/04/18 10:46:32 skrll Exp $ */
2 2
3/* 3/*
4 * Copyright 2003 Wasabi Systems, Inc. 4 * Copyright 2003 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Steve C. Woodford for Wasabi Systems, Inc. 7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -169,111 +169,65 @@ @@ -169,111 +169,65 @@
169 * space for kernel use only. This would require re-linking all 169 * space for kernel use only. This would require re-linking all
170 * applications so that the text section starts above this 1MB 170 * applications so that the text section starts above this 1MB
171 * boundary. 171 * boundary.
172 * 172 *
173 * o Tracking which VM space is resident in the cache/tlb has not yet 173 * o Tracking which VM space is resident in the cache/tlb has not yet
174 * been implemented for MP systems. 174 * been implemented for MP systems.
175 * 175 *
176 * o Finally, there is a pathological condition where two cpus running 176 * o Finally, there is a pathological condition where two cpus running
177 * two separate processes (not lwps) which happen to share an L1 177 * two separate processes (not lwps) which happen to share an L1
178 * can get into a fight over one or more L1 entries. This will result 178 * can get into a fight over one or more L1 entries. This will result
179 * in a significant slow-down if both processes are in tight loops. 179 * in a significant slow-down if both processes are in tight loops.
180 */ 180 */
181 181
182/* 
183 * Special compilation symbols 
184 * PMAP_DEBUG - Build in pmap_debug_level code 
185 */ 
186 
187/* Include header files */ 182/* Include header files */
188 183
189#include "opt_arm_debug.h" 184#include "opt_arm_debug.h"
190#include "opt_cpuoptions.h" 185#include "opt_cpuoptions.h"
191#include "opt_pmap_debug.h" 
192#include "opt_ddb.h" 186#include "opt_ddb.h"
193#include "opt_lockdebug.h" 187#include "opt_lockdebug.h"
194#include "opt_multiprocessor.h" 188#include "opt_multiprocessor.h"
195 189
196#ifdef MULTIPROCESSOR 190#ifdef MULTIPROCESSOR
197#define _INTR_PRIVATE 191#define _INTR_PRIVATE
198#endif 192#endif
199 193
200#include <sys/cdefs.h> 194#include <sys/cdefs.h>
201__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.407 2020/04/17 11:21:06 skrll Exp $"); 195__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.408 2020/04/18 10:46:32 skrll Exp $");
202 196
203#include <sys/atomic.h> 197#include <sys/atomic.h>
204#include <sys/param.h> 198#include <sys/param.h>
205#include <sys/types.h> 199#include <sys/types.h>
206#include <sys/atomic.h> 200#include <sys/atomic.h>
207#include <sys/bus.h> 201#include <sys/bus.h>
208#include <sys/cpu.h> 202#include <sys/cpu.h>
209#include <sys/intr.h> 203#include <sys/intr.h>
210#include <sys/kernel.h> 204#include <sys/kernel.h>
211#include <sys/kernhist.h> 205#include <sys/kernhist.h>
212#include <sys/kmem.h> 206#include <sys/kmem.h>
213#include <sys/pool.h> 207#include <sys/pool.h>
214#include <sys/proc.h> 208#include <sys/proc.h>
215#include <sys/sysctl.h> 209#include <sys/sysctl.h>
216#include <sys/systm.h> 210#include <sys/systm.h>
217 211
218#include <uvm/uvm.h> 212#include <uvm/uvm.h>
219#include <uvm/pmap/pmap_pvt.h> 213#include <uvm/pmap/pmap_pvt.h>
220 214
221#include <arm/locore.h> 215#include <arm/locore.h>
222 216
223#ifdef DDB 217#ifdef DDB
224#include <arm/db_machdep.h> 218#include <arm/db_machdep.h>
225#endif 219#endif
226 220
227//#define PMAP_DEBUG 
228#ifdef PMAP_DEBUG 
229 
230/* XXX need to get rid of all refs to this */ 
231int pmap_debug_level = 0; 
232 
233/* 
234 * for switching to potentially finer grained debugging 
235 */ 
236#define PDB_FOLLOW 0x0001 
237#define PDB_INIT 0x0002 
238#define PDB_ENTER 0x0004 
239#define PDB_REMOVE 0x0008 
240#define PDB_CREATE 0x0010 
241#define PDB_PTPAGE 0x0020 
242#define PDB_GROWKERN 0x0040 
243#define PDB_BITS 0x0080 
244#define PDB_COLLECT 0x0100 
245#define PDB_PROTECT 0x0200 
246#define PDB_MAP_L1 0x0400 
247#define PDB_BOOTSTRAP 0x1000 
248#define PDB_PARANOIA 0x2000 
249#define PDB_WIRING 0x4000 
250#define PDB_PVDUMP 0x8000 
251#define PDB_VAC 0x10000 
252#define PDB_KENTER 0x20000 
253#define PDB_KREMOVE 0x40000 
254#define PDB_EXEC 0x80000 
255 
256int debugmap = 1; 
257int pmapdebug = 0; 
258#define NPDEBUG(_lev_,_stat_) \ 
259 if (pmapdebug & (_lev_)) \ 
260 ((_stat_)) 
261 
262#else /* PMAP_DEBUG */ 
263#define NPDEBUG(_lev_,_stat_) /* Nothing */ 
264#endif /* PMAP_DEBUG */ 
265 
266 
267#ifdef VERBOSE_INIT_ARM 221#ifdef VERBOSE_INIT_ARM
268#define VPRINTF(...) printf(__VA_ARGS__) 222#define VPRINTF(...) printf(__VA_ARGS__)
269#else 223#else
270#define VPRINTF(...) __nothing 224#define VPRINTF(...) __nothing
271#endif 225#endif
272 226
273/* 227/*
274 * pmap_kernel() points here 228 * pmap_kernel() points here
275 */ 229 */
276static struct pmap kernel_pmap_store = { 230static struct pmap kernel_pmap_store = {
277#ifndef ARM_MMU_EXTENDED 231#ifndef ARM_MMU_EXTENDED
278 .pm_activated = true, 232 .pm_activated = true,
279 .pm_domain = PMAP_DOMAIN_KERNEL, 233 .pm_domain = PMAP_DOMAIN_KERNEL,
@@ -798,37 +752,26 @@ static struct pool_allocator pmap_l1tt_a @@ -798,37 +752,26 @@ static struct pool_allocator pmap_l1tt_a
798 */ 752 */
799vaddr_t virtual_avail; 753vaddr_t virtual_avail;
800vaddr_t virtual_end; 754vaddr_t virtual_end;
801vaddr_t pmap_curmaxkvaddr; 755vaddr_t pmap_curmaxkvaddr;
802 756
803paddr_t avail_start; 757paddr_t avail_start;
804paddr_t avail_end; 758paddr_t avail_end;
805 759
806pv_addrqh_t pmap_boot_freeq = SLIST_HEAD_INITIALIZER(&pmap_boot_freeq); 760pv_addrqh_t pmap_boot_freeq = SLIST_HEAD_INITIALIZER(&pmap_boot_freeq);
807pv_addr_t kernelpages; 761pv_addr_t kernelpages;
808pv_addr_t kernel_l1pt; 762pv_addr_t kernel_l1pt;
809pv_addr_t systempage; 763pv_addr_t systempage;
810 764
811/* Function to set the debug level of the pmap code */ 
812 
813#ifdef PMAP_DEBUG 
814void 
815pmap_debug(int level) 
816{ 
817 pmap_debug_level = level; 
818 printf("pmap_debug: level=%d\n", pmap_debug_level); 
819} 
820#endif /* PMAP_DEBUG */ 
821 
822#ifdef PMAP_CACHE_VIPT 765#ifdef PMAP_CACHE_VIPT
823#define PMAP_VALIDATE_MD_PAGE(md) \ 766#define PMAP_VALIDATE_MD_PAGE(md) \
824 KASSERTMSG(arm_cache_prefer_mask == 0 || (((md)->pvh_attrs & PVF_WRITE) == 0) == ((md)->urw_mappings + (md)->krw_mappings == 0), \ 767 KASSERTMSG(arm_cache_prefer_mask == 0 || (((md)->pvh_attrs & PVF_WRITE) == 0) == ((md)->urw_mappings + (md)->krw_mappings == 0), \
825 "(md) %p: attrs=%#x urw=%u krw=%u", (md), \ 768 "(md) %p: attrs=%#x urw=%u krw=%u", (md), \
826 (md)->pvh_attrs, (md)->urw_mappings, (md)->krw_mappings); 769 (md)->pvh_attrs, (md)->urw_mappings, (md)->krw_mappings);
827#endif /* PMAP_CACHE_VIPT */ 770#endif /* PMAP_CACHE_VIPT */
828/* 771/*
829 * A bunch of routines to conditionally flush the caches/TLB depending 772 * A bunch of routines to conditionally flush the caches/TLB depending
830 * on whether the specified pmap actually needs to be flushed at any 773 * on whether the specified pmap actually needs to be flushed at any
831 * given time. 774 * given time.
832 */ 775 */
833static inline void 776static inline void
834pmap_tlb_flush_SE(pmap_t pm, vaddr_t va, u_int flags) 777pmap_tlb_flush_SE(pmap_t pm, vaddr_t va, u_int flags)
@@ -1008,30 +951,33 @@ pmap_pte_sync_current(pmap_t pm, pt_entr @@ -1008,30 +951,33 @@ pmap_pte_sync_current(pmap_t pm, pt_entr
1008/* 951/*
1009 * pmap_enter_pv: enter a mapping onto a vm_page lst 952 * pmap_enter_pv: enter a mapping onto a vm_page lst
1010 * 953 *
1011 * => caller should hold the proper lock on pmap_main_lock 954 * => caller should hold the proper lock on pmap_main_lock
1012 * => caller should have pmap locked 955 * => caller should have pmap locked
1013 * => we will gain the lock on the vm_page and allocate the new pv_entry 956 * => we will gain the lock on the vm_page and allocate the new pv_entry
1014 * => caller should adjust ptp's wire_count before calling 957 * => caller should adjust ptp's wire_count before calling
1015 * => caller should not adjust pmap's wire_count 958 * => caller should not adjust pmap's wire_count
1016 */ 959 */
1017static void 960static void
1018pmap_enter_pv(struct vm_page_md *md, paddr_t pa, struct pv_entry *pv, pmap_t pm, 961pmap_enter_pv(struct vm_page_md *md, paddr_t pa, struct pv_entry *pv, pmap_t pm,
1019 vaddr_t va, u_int flags) 962 vaddr_t va, u_int flags)
1020{ 963{
1021 struct pv_entry **pvp; 964 UVMHIST_FUNC(__func__);
 965 UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx pm %#jx va %#jx",
 966 (uintptr_t)md, (uintptr_t)pa, (uintptr_t)pm, va);
 967 UVMHIST_LOG(maphist, "...pv %#jx flags %#jx",
 968 (uintptr_t)pv, flags, 0, 0);
1022 969
1023 NPDEBUG(PDB_PVDUMP, 970 struct pv_entry **pvp;
1024 printf("pmap_enter_pv: pm %p, md %p, flags 0x%x\n", pm, md, flags)); 
1025 971
1026 pv->pv_pmap = pm; 972 pv->pv_pmap = pm;
1027 pv->pv_va = va; 973 pv->pv_va = va;
1028 pv->pv_flags = flags; 974 pv->pv_flags = flags;
1029 975
1030 pvp = &SLIST_FIRST(&md->pvh_list); 976 pvp = &SLIST_FIRST(&md->pvh_list);
1031#ifdef PMAP_CACHE_VIPT 977#ifdef PMAP_CACHE_VIPT
1032 /* 978 /*
1033 * Insert unmanaged entries, writeable first, at the head of 979 * Insert unmanaged entries, writeable first, at the head of
1034 * the pv list. 980 * the pv list.
1035 */ 981 */
1036 if (__predict_true(!PV_IS_KENTRY_P(flags))) { 982 if (__predict_true(!PV_IS_KENTRY_P(flags))) {
1037 while (*pvp != NULL && PV_IS_KENTRY_P((*pvp)->pv_flags)) 983 while (*pvp != NULL && PV_IS_KENTRY_P((*pvp)->pv_flags))
@@ -1116,38 +1062,39 @@ pmap_find_pv(struct vm_page_md *md, pmap @@ -1116,38 +1062,39 @@ pmap_find_pv(struct vm_page_md *md, pmap
1116/* 1062/*
1117 * pmap_remove_pv: try to remove a mapping from a pv_list 1063 * pmap_remove_pv: try to remove a mapping from a pv_list
1118 * 1064 *
1119 * => caller should hold proper lock on pmap_main_lock 1065 * => caller should hold proper lock on pmap_main_lock
1120 * => pmap should be locked 1066 * => pmap should be locked
1121 * => caller should hold lock on vm_page [so that attrs can be adjusted] 1067 * => caller should hold lock on vm_page [so that attrs can be adjusted]
1122 * => caller should adjust ptp's wire_count and free PTP if needed 1068 * => caller should adjust ptp's wire_count and free PTP if needed
1123 * => caller should NOT adjust pmap's wire_count 1069 * => caller should NOT adjust pmap's wire_count
1124 * => we return the removed pv 1070 * => we return the removed pv
1125 */ 1071 */
1126static struct pv_entry * 1072static struct pv_entry *
1127pmap_remove_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) 1073pmap_remove_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va)
1128{ 1074{
1129 struct pv_entry *pv, **prevptr; 1075 UVMHIST_FUNC(__func__);
 1076 UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx pm %#jx va %#jx",
 1077 (uintptr_t)md, (uintptr_t)pa, (uintptr_t)pm, va);
1130 1078
1131 NPDEBUG(PDB_PVDUMP, 1079 struct pv_entry *pv, **prevptr;
1132 printf("pmap_remove_pv: pm %p, md %p, va 0x%08lx\n", pm, md, va)); 
1133 1080
1134 prevptr = &SLIST_FIRST(&md->pvh_list); /* prev pv_entry ptr */ 1081 prevptr = &SLIST_FIRST(&md->pvh_list); /* prev pv_entry ptr */
1135 pv = *prevptr; 1082 pv = *prevptr;
1136 1083
1137 while (pv) { 1084 while (pv) {
1138 if (pv->pv_pmap == pm && pv->pv_va == va) { /* match? */ 1085 if (pv->pv_pmap == pm && pv->pv_va == va) { /* match? */
1139 NPDEBUG(PDB_PVDUMP, printf("pmap_remove_pv: pm %p, md " 1086 UVMHIST_LOG(maphist, "pm %#jx md %#jx flags %#jx",
1140 "%p, flags 0x%x\n", pm, md, pv->pv_flags)); 1087 (uintptr_t)pm, (uintptr_t)md, pv->pv_flags, 0);
1141 if (pv->pv_flags & PVF_WIRED) { 1088 if (pv->pv_flags & PVF_WIRED) {
1142 --pm->pm_stats.wired_count; 1089 --pm->pm_stats.wired_count;
1143 } 1090 }
1144 *prevptr = SLIST_NEXT(pv, pv_link); /* remove it! */ 1091 *prevptr = SLIST_NEXT(pv, pv_link); /* remove it! */
1145 if (pm == pmap_kernel()) { 1092 if (pm == pmap_kernel()) {
1146 PMAPCOUNT(kernel_unmappings); 1093 PMAPCOUNT(kernel_unmappings);
1147 if (pv->pv_flags & PVF_WRITE) 1094 if (pv->pv_flags & PVF_WRITE)
1148 md->krw_mappings--; 1095 md->krw_mappings--;
1149 else 1096 else
1150 md->kro_mappings--; 1097 md->kro_mappings--;
1151 } else { 1098 } else {
1152 if (pv->pv_flags & PVF_WRITE) 1099 if (pv->pv_flags & PVF_WRITE)
1153 md->urw_mappings--; 1100 md->urw_mappings--;
@@ -1212,35 +1159,38 @@ pmap_remove_pv(struct vm_page_md *md, pa @@ -1212,35 +1159,38 @@ pmap_remove_pv(struct vm_page_md *md, pa
1212 * => caller should NOT adjust pmap's wire_count 1159 * => caller should NOT adjust pmap's wire_count
1213 * => caller must call pmap_vac_me_harder() if writable status of a page 1160 * => caller must call pmap_vac_me_harder() if writable status of a page
1214 * may have changed. 1161 * may have changed.
1215 * => we return the old flags 1162 * => we return the old flags
1216 * 1163 *
1217 * Modify a physical-virtual mapping in the pv table 1164 * Modify a physical-virtual mapping in the pv table
1218 */ 1165 */
1219static u_int 1166static u_int
1220pmap_modify_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va, 1167pmap_modify_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va,
1221 u_int clr_mask, u_int set_mask) 1168 u_int clr_mask, u_int set_mask)
1222{ 1169{
1223 struct pv_entry *npv; 1170 struct pv_entry *npv;
1224 u_int flags, oflags; 1171 u_int flags, oflags;
 1172 UVMHIST_FUNC(__func__);
 1173 UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx pm %#jx va %#jx",
 1174 (uintptr_t)md, (uintptr_t)pa, (uintptr_t)pm, va);
 1175 UVMHIST_LOG(maphist, "... clr %#jx set %#jx", clr_mask, set_mask, 0, 0);
1225 1176
1226 KASSERT(!PV_IS_KENTRY_P(clr_mask)); 1177 KASSERT(!PV_IS_KENTRY_P(clr_mask));
1227 KASSERT(!PV_IS_KENTRY_P(set_mask)); 1178 KASSERT(!PV_IS_KENTRY_P(set_mask));
1228 1179
1229 if ((npv = pmap_find_pv(md, pm, va)) == NULL) 1180 if ((npv = pmap_find_pv(md, pm, va)) == NULL) {
 1181 UVMHIST_LOG(maphist, "<--- done (not found)", 0, 0, 0, 0);
1230 return 0; 1182 return 0;
1231 1183 }
1232 NPDEBUG(PDB_PVDUMP, 
1233 printf("pmap_modify_pv: pm %p, md %p, clr 0x%x, set 0x%x, flags 0x%x\n", pm, md, clr_mask, set_mask, npv->pv_flags)); 
1234 1184
1235 /* 1185 /*
1236 * There is at least one VA mapping this page. 1186 * There is at least one VA mapping this page.
1237 */ 1187 */
1238 1188
1239 if (clr_mask & (PVF_REF | PVF_MOD)) { 1189 if (clr_mask & (PVF_REF | PVF_MOD)) {
1240 md->pvh_attrs |= set_mask & (PVF_REF | PVF_MOD); 1190 md->pvh_attrs |= set_mask & (PVF_REF | PVF_MOD);
1241#if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) 1191#if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
1242 if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC) 1192 if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC)
1243 md->pvh_attrs |= PVF_DIRTY; 1193 md->pvh_attrs |= PVF_DIRTY;
1244 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 1194 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
1245#endif /* PMAP_CACHE_VIPT && !ARM_MMU_EXTENDED */ 1195#endif /* PMAP_CACHE_VIPT && !ARM_MMU_EXTENDED */
1246 } 1196 }
@@ -1291,26 +1241,28 @@ pmap_modify_pv(struct vm_page_md *md, pa @@ -1291,26 +1241,28 @@ pmap_modify_pv(struct vm_page_md *md, pa
1291 if ((PV_IS_EXEC_P(flags) && !PV_IS_EXEC_P(md->pvh_attrs)) 1241 if ((PV_IS_EXEC_P(flags) && !PV_IS_EXEC_P(md->pvh_attrs))
1292 || (PV_IS_EXEC_P(md->pvh_attrs) 1242 || (PV_IS_EXEC_P(md->pvh_attrs)
1293 || (!(flags & PVF_WRITE) && (oflags & PVF_WRITE)))) { 1243 || (!(flags & PVF_WRITE) && (oflags & PVF_WRITE)))) {
1294 pmap_syncicache_page(md, pa); 1244 pmap_syncicache_page(md, pa);
1295 PMAPCOUNT(exec_synced_remap); 1245 PMAPCOUNT(exec_synced_remap);
1296 } 1246 }
1297#ifndef ARM_MMU_EXTENDED 1247#ifndef ARM_MMU_EXTENDED
1298 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 1248 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
1299#endif /* !ARM_MMU_EXTENDED */ 1249#endif /* !ARM_MMU_EXTENDED */
1300#endif /* PMAP_CACHE_VIPT */ 1250#endif /* PMAP_CACHE_VIPT */
1301 1251
1302 PMAPCOUNT(remappings); 1252 PMAPCOUNT(remappings);
1303 1253
 1254 UVMHIST_LOG(maphist, "<--- done", 0, 0, 0, 0);
 1255
1304 return oflags; 1256 return oflags;
1305} 1257}
1306 1258
1307 1259
1308#if defined(ARM_MMU_EXTENDED) 1260#if defined(ARM_MMU_EXTENDED)
1309int 1261int
1310pmap_maxproc_set(int nmaxproc) 1262pmap_maxproc_set(int nmaxproc)
1311{ 1263{
1312 static const char pmap_l1ttpool_warnmsg[] = 1264 static const char pmap_l1ttpool_warnmsg[] =
1313 "WARNING: l1ttpool limit reached; increase kern.maxproc"; 1265 "WARNING: l1ttpool limit reached; increase kern.maxproc";
1314 1266
1315 pool_cache_prime(&pmap_l1tt_cache, nmaxproc); 1267 pool_cache_prime(&pmap_l1tt_cache, nmaxproc);
1316 1268
@@ -2045,40 +1997,42 @@ pmap_vac_me_user(struct vm_page_md *md,  @@ -2045,40 +1997,42 @@ pmap_vac_me_user(struct vm_page_md *md,
2045 } 1997 }
2046 1998
2047 l2pte_set(ptep, npte, opte); 1999 l2pte_set(ptep, npte, opte);
2048 PTE_SYNC_CURRENT(pv->pv_pmap, ptep); 2000 PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
2049 } 2001 }
2050 } 2002 }
2051} 2003}
2052#endif 2004#endif
2053 2005
2054#ifdef PMAP_CACHE_VIPT 2006#ifdef PMAP_CACHE_VIPT
2055static void 2007static void
2056pmap_vac_me_harder(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) 2008pmap_vac_me_harder(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va)
2057{ 2009{
 2010
2058#ifndef ARM_MMU_EXTENDED 2011#ifndef ARM_MMU_EXTENDED
2059 struct pv_entry *pv; 2012 struct pv_entry *pv;
2060 vaddr_t tst_mask; 2013 vaddr_t tst_mask;
2061 bool bad_alias; 2014 bool bad_alias;
2062 const u_int 2015 const u_int
2063 rw_mappings = md->urw_mappings + md->krw_mappings, 2016 rw_mappings = md->urw_mappings + md->krw_mappings,
2064 ro_mappings = md->uro_mappings + md->kro_mappings; 2017 ro_mappings = md->uro_mappings + md->kro_mappings;
2065 2018
2066 /* do we need to do anything? */ 2019 /* do we need to do anything? */
2067 if (arm_cache_prefer_mask == 0) 2020 if (arm_cache_prefer_mask == 0)
2068 return; 2021 return;
2069 2022
2070 NPDEBUG(PDB_VAC, printf("pmap_vac_me_harder: md=%p, pmap=%p va=%08lx\n", 2023 UVMHIST_FUNC(__func__);
2071 md, pm, va)); 2024 UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx pm %#jx va %#jx",
 2025 (uintptr_t)md, (uintptr_t)pa, (uintptr_t)pm, va);
2072 2026
2073 KASSERT(!va || pm); 2027 KASSERT(!va || pm);
2074 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 2028 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
2075 2029
2076 /* Already a conflict? */ 2030 /* Already a conflict? */
2077 if (__predict_false(md->pvh_attrs & PVF_NC)) { 2031 if (__predict_false(md->pvh_attrs & PVF_NC)) {
2078 /* just an add, things are already non-cached */ 2032 /* just an add, things are already non-cached */
2079 KASSERT(!(md->pvh_attrs & PVF_DIRTY)); 2033 KASSERT(!(md->pvh_attrs & PVF_DIRTY));
2080 KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); 2034 KASSERT(!(md->pvh_attrs & PVF_MULTCLR));
2081 bad_alias = false; 2035 bad_alias = false;
2082 if (va) { 2036 if (va) {
2083 PMAPCOUNT(vac_color_none); 2037 PMAPCOUNT(vac_color_none);
2084 bad_alias = true; 2038 bad_alias = true;
@@ -2356,29 +2310,29 @@ pmap_clearbit(struct vm_page_md *md, pad @@ -2356,29 +2310,29 @@ pmap_clearbit(struct vm_page_md *md, pad
2356#ifdef PMAP_CACHE_VIPT 2310#ifdef PMAP_CACHE_VIPT
2357 const bool want_syncicache = PV_IS_EXEC_P(md->pvh_attrs); 2311 const bool want_syncicache = PV_IS_EXEC_P(md->pvh_attrs);
2358 bool need_syncicache = false; 2312 bool need_syncicache = false;
2359#ifdef ARM_MMU_EXTENDED 2313#ifdef ARM_MMU_EXTENDED
2360 const u_int execbits = (maskbits & PVF_EXEC) ? L2_XS_XN : 0; 2314 const u_int execbits = (maskbits & PVF_EXEC) ? L2_XS_XN : 0;
2361#else 2315#else
2362 const u_int execbits = 0; 2316 const u_int execbits = 0;
2363 bool need_vac_me_harder = false; 2317 bool need_vac_me_harder = false;
2364#endif 2318#endif
2365#else 2319#else
2366 const u_int execbits = 0; 2320 const u_int execbits = 0;
2367#endif 2321#endif
2368 2322
2369 NPDEBUG(PDB_BITS, 2323 UVMHIST_FUNC(__func__);
2370 printf("pmap_clearbit: md %p mask 0x%x\n", 2324 UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx maskbits %#jx",
2371 md, maskbits)); 2325 (uintptr_t)md, pa, maskbits, 0);
2372 2326
2373#ifdef PMAP_CACHE_VIPT 2327#ifdef PMAP_CACHE_VIPT
2374 /* 2328 /*
2375 * If we might want to sync the I-cache and we've modified it, 2329 * If we might want to sync the I-cache and we've modified it,
2376 * then we know we definitely need to sync or discard it. 2330 * then we know we definitely need to sync or discard it.
2377 */ 2331 */
2378 if (want_syncicache) { 2332 if (want_syncicache) {
2379 if (md->pvh_attrs & PVF_MOD) { 2333 if (md->pvh_attrs & PVF_MOD) {
2380 need_syncicache = true; 2334 need_syncicache = true;
2381 } 2335 }
2382 } 2336 }
2383#endif 2337#endif
2384 KASSERT(pmap_page_locked_p(md)); 2338 KASSERT(pmap_page_locked_p(md));
@@ -2451,29 +2405,28 @@ pmap_clearbit(struct vm_page_md *md, pad @@ -2451,29 +2405,28 @@ pmap_clearbit(struct vm_page_md *md, pad
2451 pv->pv_flags &= ~maskbits; 2405 pv->pv_flags &= ~maskbits;
2452 2406
2453 struct l2_bucket * const l2b = pmap_get_l2_bucket(pm, va); 2407 struct l2_bucket * const l2b = pmap_get_l2_bucket(pm, va);
2454 KASSERTMSG(l2b != NULL, "%#lx", va); 2408 KASSERTMSG(l2b != NULL, "%#lx", va);
2455 2409
2456 pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)]; 2410 pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)];
2457 const pt_entry_t opte = *ptep; 2411 const pt_entry_t opte = *ptep;
2458 pt_entry_t npte = opte | execbits; 2412 pt_entry_t npte = opte | execbits;
2459 2413
2460#ifdef ARM_MMU_EXTENDED 2414#ifdef ARM_MMU_EXTENDED
2461 KASSERT((opte & L2_XS_nG) == (pm == pmap_kernel() ? 0 : L2_XS_nG)); 2415 KASSERT((opte & L2_XS_nG) == (pm == pmap_kernel() ? 0 : L2_XS_nG));
2462#endif 2416#endif
2463 2417
2464 NPDEBUG(PDB_BITS, 2418 UVMHIST_LOG(maphist, "pv %#jx pm %#jx va %#jx flag %#jx",
2465 printf( "%s: pv %p, pm %p, va 0x%08lx, flag 0x%x\n", 2419 (uintptr_t)pv, (uintptr_t)pm, va, oflags);
2466 __func__, pv, pm, va, oflags)); 
2467 2420
2468 if (maskbits & (PVF_WRITE|PVF_MOD)) { 2421 if (maskbits & (PVF_WRITE|PVF_MOD)) {
2469#ifdef PMAP_CACHE_VIVT 2422#ifdef PMAP_CACHE_VIVT
2470 if ((oflags & PVF_NC)) { 2423 if ((oflags & PVF_NC)) {
2471 /* 2424 /*
2472 * Entry is not cacheable: 2425 * Entry is not cacheable:
2473 * 2426 *
2474 * Don't turn caching on again if this is a 2427 * Don't turn caching on again if this is a
2475 * modified emulation. This would be 2428 * modified emulation. This would be
2476 * inconsitent with the settings created by 2429 * inconsitent with the settings created by
2477 * pmap_vac_me_harder(). Otherwise, it's safe 2430 * pmap_vac_me_harder(). Otherwise, it's safe
2478 * to re-enable cacheing. 2431 * to re-enable cacheing.
2479 * 2432 *
@@ -2558,29 +2511,28 @@ pmap_clearbit(struct vm_page_md *md, pad @@ -2558,29 +2511,28 @@ pmap_clearbit(struct vm_page_md *md, pad
2558 if (npte != opte) { 2511 if (npte != opte) {
2559 l2pte_reset(ptep); 2512 l2pte_reset(ptep);
2560 PTE_SYNC(ptep); 2513 PTE_SYNC(ptep);
2561 2514
2562 /* Flush the TLB entry if a current pmap. */ 2515 /* Flush the TLB entry if a current pmap. */
2563 pmap_tlb_flush_SE(pm, va, oflags); 2516 pmap_tlb_flush_SE(pm, va, oflags);
2564 2517
2565 l2pte_set(ptep, npte, 0); 2518 l2pte_set(ptep, npte, 0);
2566 PTE_SYNC(ptep); 2519 PTE_SYNC(ptep);
2567 } 2520 }
2568 2521
2569 pmap_release_pmap_lock(pm); 2522 pmap_release_pmap_lock(pm);
2570 2523
2571 NPDEBUG(PDB_BITS, 2524 UVMHIST_LOG(maphist, "pm %#jx va %#jx opte %#jx npte %#jx",
2572 printf("pmap_clearbit: pm %p va 0x%lx opte 0x%08x npte 0x%08x\n", 2525 (uintptr_t)pm, va, opte, npte);
2573 pm, va, opte, npte)); 
2574 2526
2575 /* Move to next entry. */ 2527 /* Move to next entry. */
2576 pv = SLIST_NEXT(pv, pv_link); 2528 pv = SLIST_NEXT(pv, pv_link);
2577 } 2529 }
2578 2530
2579#if defined(PMAP_CACHE_VIPT) 2531#if defined(PMAP_CACHE_VIPT)
2580 /* 2532 /*
2581 * If we need to sync the I-cache and we haven't done it yet, do it. 2533 * If we need to sync the I-cache and we haven't done it yet, do it.
2582 */ 2534 */
2583 if (need_syncicache) { 2535 if (need_syncicache) {
2584 pmap_syncicache_page(md, pa); 2536 pmap_syncicache_page(md, pa);
2585 PMAPCOUNT(exec_synced_clearbit); 2537 PMAPCOUNT(exec_synced_clearbit);
2586 } 2538 }
@@ -2675,28 +2627,30 @@ pmap_clean_page(struct vm_page_md *md, b @@ -2675,28 +2627,30 @@ pmap_clean_page(struct vm_page_md *md, b
2675#ifdef PMAP_CACHE_VIPT 2627#ifdef PMAP_CACHE_VIPT
2676/* 2628/*
2677 * Sync a page with the I-cache. Since this is a VIPT, we must pick the 2629 * Sync a page with the I-cache. Since this is a VIPT, we must pick the
2678 * right cache alias to make sure we flush the right stuff. 2630 * right cache alias to make sure we flush the right stuff.
2679 */ 2631 */
2680void 2632void
2681pmap_syncicache_page(struct vm_page_md *md, paddr_t pa) 2633pmap_syncicache_page(struct vm_page_md *md, paddr_t pa)
2682{ 2634{
2683 pmap_t kpm = pmap_kernel(); 2635 pmap_t kpm = pmap_kernel();
2684 const size_t way_size = arm_pcache.icache_type == CACHE_TYPE_PIPT 2636 const size_t way_size = arm_pcache.icache_type == CACHE_TYPE_PIPT
2685 ? PAGE_SIZE 2637 ? PAGE_SIZE
2686 : arm_pcache.icache_way_size; 2638 : arm_pcache.icache_way_size;
2687 2639
2688 NPDEBUG(PDB_EXEC, printf("pmap_syncicache_page: md=%p (attrs=%#x)\n", 2640 UVMHIST_FUNC(__func__);
2689 md, md->pvh_attrs)); 2641 UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx (attrs=%#jx)",
 2642 (uintptr_t)md, pa, md->pvh_attrs, 0);
 2643
2690 /* 2644 /*
2691 * No need to clean the page if it's non-cached. 2645 * No need to clean the page if it's non-cached.
2692 */ 2646 */
2693#ifndef ARM_MMU_EXTENDED 2647#ifndef ARM_MMU_EXTENDED
2694 if (md->pvh_attrs & PVF_NC) 2648 if (md->pvh_attrs & PVF_NC)
2695 return; 2649 return;
2696 KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & PVF_COLORED); 2650 KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & PVF_COLORED);
2697#endif 2651#endif
2698 2652
2699 pt_entry_t * const ptep = cpu_cdst_pte(0); 2653 pt_entry_t * const ptep = cpu_cdst_pte(0);
2700 const vaddr_t dstp = cpu_cdstp(0); 2654 const vaddr_t dstp = cpu_cdstp(0);
2701#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 2655#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
2702 if (way_size <= PAGE_SIZE) { 2656 if (way_size <= PAGE_SIZE) {
@@ -2750,26 +2704,30 @@ pmap_syncicache_page(struct vm_page_md * @@ -2750,26 +2704,30 @@ pmap_syncicache_page(struct vm_page_md *
2750 PMAPCOUNT(exec_synced); 2704 PMAPCOUNT(exec_synced);
2751} 2705}
2752 2706
2753#ifndef ARM_MMU_EXTENDED 2707#ifndef ARM_MMU_EXTENDED
2754void 2708void
2755pmap_flush_page(struct vm_page_md *md, paddr_t pa, enum pmap_flush_op flush) 2709pmap_flush_page(struct vm_page_md *md, paddr_t pa, enum pmap_flush_op flush)
2756{ 2710{
2757 vsize_t va_offset, end_va; 2711 vsize_t va_offset, end_va;
2758 bool wbinv_p; 2712 bool wbinv_p;
2759 2713
2760 if (arm_cache_prefer_mask == 0) 2714 if (arm_cache_prefer_mask == 0)
2761 return; 2715 return;
2762 2716
 2717 UVMHIST_FUNC(__func__);
 2718 UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx op %#jx",
 2719 (uintptr_t)md, pa, op, 0);
 2720
2763 switch (flush) { 2721 switch (flush) {
2764 case PMAP_FLUSH_PRIMARY: 2722 case PMAP_FLUSH_PRIMARY:
2765 if (md->pvh_attrs & PVF_MULTCLR) { 2723 if (md->pvh_attrs & PVF_MULTCLR) {
2766 va_offset = 0; 2724 va_offset = 0;
2767 end_va = arm_cache_prefer_mask; 2725 end_va = arm_cache_prefer_mask;
2768 md->pvh_attrs &= ~PVF_MULTCLR; 2726 md->pvh_attrs &= ~PVF_MULTCLR;
2769 PMAPCOUNT(vac_flush_lots); 2727 PMAPCOUNT(vac_flush_lots);
2770 } else { 2728 } else {
2771 va_offset = md->pvh_attrs & arm_cache_prefer_mask; 2729 va_offset = md->pvh_attrs & arm_cache_prefer_mask;
2772 end_va = va_offset; 2730 end_va = va_offset;
2773 PMAPCOUNT(vac_flush_one); 2731 PMAPCOUNT(vac_flush_one);
2774 } 2732 }
2775 /* 2733 /*
@@ -2792,28 +2750,28 @@ pmap_flush_page(struct vm_page_md *md, p @@ -2792,28 +2750,28 @@ pmap_flush_page(struct vm_page_md *md, p
2792 /* 2750 /*
2793 * Mark that the page is no longer dirty. 2751 * Mark that the page is no longer dirty.
2794 */ 2752 */
2795 if ((md->pvh_attrs & PVF_DMOD) == 0) 2753 if ((md->pvh_attrs & PVF_DMOD) == 0)
2796 md->pvh_attrs &= ~PVF_DIRTY; 2754 md->pvh_attrs &= ~PVF_DIRTY;
2797 PMAPCOUNT(vac_clean_one); 2755 PMAPCOUNT(vac_clean_one);
2798 break; 2756 break;
2799 default: 2757 default:
2800 return; 2758 return;
2801 } 2759 }
2802 2760
2803 KASSERT(!(md->pvh_attrs & PVF_NC)); 2761 KASSERT(!(md->pvh_attrs & PVF_NC));
2804 2762
2805 NPDEBUG(PDB_VAC, printf("pmap_flush_page: md=%p (attrs=%#x)\n", 2763 UVMHIST_LOg(maphist, "md %#jx (attrs=%#jx)",(uintptr_t)md,
2806 md, md->pvh_attrs)); 2764 md->pvh_attrs);
2807 2765
2808 const size_t scache_line_size = arm_scache.dcache_line_size; 2766 const size_t scache_line_size = arm_scache.dcache_line_size;
2809 2767
2810 for (; va_offset <= end_va; va_offset += PAGE_SIZE) { 2768 for (; va_offset <= end_va; va_offset += PAGE_SIZE) {
2811 pt_entry_t * const ptep = cpu_cdst_pte(va_offset); 2769 pt_entry_t * const ptep = cpu_cdst_pte(va_offset);
2812 const vaddr_t dstp = cpu_cdstp(va_offset); 2770 const vaddr_t dstp = cpu_cdstp(va_offset);
2813 const pt_entry_t opte = *ptep; 2771 const pt_entry_t opte = *ptep;
2814 2772
2815 if (flush == PMAP_FLUSH_SECONDARY 2773 if (flush == PMAP_FLUSH_SECONDARY
2816 && va_offset == (md->pvh_attrs & arm_cache_prefer_mask)) 2774 && va_offset == (md->pvh_attrs & arm_cache_prefer_mask))
2817 continue; 2775 continue;
2818 2776
2819 pmap_tlb_flush_SE(pmap_kernel(), dstp, PVF_REF | PVF_EXEC); 2777 pmap_tlb_flush_SE(pmap_kernel(), dstp, PVF_REF | PVF_EXEC);
@@ -2873,29 +2831,28 @@ pmap_flush_page(struct vm_page_md *md, p @@ -2873,29 +2831,28 @@ pmap_flush_page(struct vm_page_md *md, p
2873 * Reflects back modify bits to the pager. 2831 * Reflects back modify bits to the pager.
2874 */ 2832 */
2875static void 2833static void
2876pmap_page_remove(struct vm_page_md *md, paddr_t pa) 2834pmap_page_remove(struct vm_page_md *md, paddr_t pa)
2877{ 2835{
2878 struct l2_bucket *l2b; 2836 struct l2_bucket *l2b;
2879 struct pv_entry *pv; 2837 struct pv_entry *pv;
2880 pt_entry_t *ptep; 2838 pt_entry_t *ptep;
2881#ifndef ARM_MMU_EXTENDED 2839#ifndef ARM_MMU_EXTENDED
2882 bool flush = false; 2840 bool flush = false;
2883#endif 2841#endif
2884 u_int flags = 0; 2842 u_int flags = 0;
2885 2843
2886 NPDEBUG(PDB_FOLLOW, 2844 UVMHIST_FUNC(__func__);
2887 printf("pmap_page_remove: md %p (0x%08lx)\n", md, 2845 UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx", (uintptr_t)md, pa, 0, 0);
2888 pa)); 
2889 2846
2890 struct pv_entry **pvp = &SLIST_FIRST(&md->pvh_list); 2847 struct pv_entry **pvp = &SLIST_FIRST(&md->pvh_list);
2891 pmap_acquire_page_lock(md); 2848 pmap_acquire_page_lock(md);
2892 if (*pvp == NULL) { 2849 if (*pvp == NULL) {
2893#ifdef PMAP_CACHE_VIPT 2850#ifdef PMAP_CACHE_VIPT
2894 /* 2851 /*
2895 * We *know* the page contents are about to be replaced. 2852 * We *know* the page contents are about to be replaced.
2896 * Discard the exec contents 2853 * Discard the exec contents
2897 */ 2854 */
2898 if (PV_IS_EXEC_P(md->pvh_attrs)) 2855 if (PV_IS_EXEC_P(md->pvh_attrs))
2899 PMAPCOUNT(exec_discarded_page_protect); 2856 PMAPCOUNT(exec_discarded_page_protect);
2900 md->pvh_attrs &= ~PVF_EXEC; 2857 md->pvh_attrs &= ~PVF_EXEC;
2901 PMAP_VALIDATE_MD_PAGE(md); 2858 PMAP_VALIDATE_MD_PAGE(md);
@@ -4083,29 +4040,29 @@ pmap_pv_protect(paddr_t pa, vm_prot_t pr @@ -4083,29 +4040,29 @@ pmap_pv_protect(paddr_t pa, vm_prot_t pr
4083{ 4040{
4084 4041
4085 /* the only case is remove at the moment */ 4042 /* the only case is remove at the moment */
4086 KASSERT(prot == VM_PROT_NONE); 4043 KASSERT(prot == VM_PROT_NONE);
4087 pmap_pv_remove(pa); 4044 pmap_pv_remove(pa);
4088} 4045}
4089 4046
4090void 4047void
4091pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot) 4048pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
4092{ 4049{
4093 struct l2_bucket *l2b; 4050 struct l2_bucket *l2b;
4094 vaddr_t next_bucket; 4051 vaddr_t next_bucket;
4095 4052
4096 NPDEBUG(PDB_PROTECT, 4053 UVMHIST_FUNC(__func__);
4097 printf("pmap_protect: pm %p sva 0x%lx eva 0x%lx prot 0x%x\n", 4054 UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx...#%jx prot %#jx",
4098 pm, sva, eva, prot)); 4055 (uintptr_t)pm, sva, eva, prot);
4099 4056
4100 if ((prot & VM_PROT_READ) == 0) { 4057 if ((prot & VM_PROT_READ) == 0) {
4101 pmap_remove(pm, sva, eva); 4058 pmap_remove(pm, sva, eva);
4102 return; 4059 return;
4103 } 4060 }
4104 4061
4105 if (prot & VM_PROT_WRITE) { 4062 if (prot & VM_PROT_WRITE) {
4106 /* 4063 /*
4107 * If this is a read->write transition, just ignore it and let 4064 * If this is a read->write transition, just ignore it and let
4108 * uvm_fault() take care of it later. 4065 * uvm_fault() take care of it later.
4109 */ 4066 */
4110 return; 4067 return;
4111 } 4068 }
@@ -4200,29 +4157,29 @@ pmap_protect(pmap_t pm, vaddr_t sva, vad @@ -4200,29 +4157,29 @@ pmap_protect(pmap_t pm, vaddr_t sva, vad
4200#endif 4157#endif
4201 4158
4202 pmap_release_pmap_lock(pm); 4159 pmap_release_pmap_lock(pm);
4203} 4160}
4204 4161
4205void 4162void
4206pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva) 4163pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva)
4207{ 4164{
4208 struct l2_bucket *l2b; 4165 struct l2_bucket *l2b;
4209 pt_entry_t *ptep; 4166 pt_entry_t *ptep;
4210 vaddr_t next_bucket; 4167 vaddr_t next_bucket;
4211 vsize_t page_size = trunc_page(sva) + PAGE_SIZE - sva; 4168 vsize_t page_size = trunc_page(sva) + PAGE_SIZE - sva;
4212 4169
4213 NPDEBUG(PDB_EXEC, 4170 UVMHIST_FUNC(__func__);
4214 printf("pmap_icache_sync_range: pm %p sva 0x%lx eva 0x%lx\n", 4171 UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx...#%jx",
4215 pm, sva, eva)); 4172 (uintptr_t)pm, sva, eva, 0);
4216 4173
4217 pmap_acquire_pmap_lock(pm); 4174 pmap_acquire_pmap_lock(pm);
4218 4175
4219 while (sva < eva) { 4176 while (sva < eva) {
4220 next_bucket = L2_NEXT_BUCKET_VA(sva); 4177 next_bucket = L2_NEXT_BUCKET_VA(sva);
4221 if (next_bucket > eva) 4178 if (next_bucket > eva)
4222 next_bucket = eva; 4179 next_bucket = eva;
4223 4180
4224 l2b = pmap_get_l2_bucket(pm, sva); 4181 l2b = pmap_get_l2_bucket(pm, sva);
4225 if (l2b == NULL) { 4182 if (l2b == NULL) {
4226 sva = next_bucket; 4183 sva = next_bucket;
4227 continue; 4184 continue;
4228 } 4185 }
@@ -4238,29 +4195,29 @@ pmap_icache_sync_range(pmap_t pm, vaddr_ @@ -4238,29 +4195,29 @@ pmap_icache_sync_range(pmap_t pm, vaddr_
4238 } 4195 }
4239 } 4196 }
4240 } 4197 }
4241 4198
4242 pmap_release_pmap_lock(pm); 4199 pmap_release_pmap_lock(pm);
4243} 4200}
4244 4201
4245void 4202void
4246pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 4203pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
4247{ 4204{
4248 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 4205 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
4249 paddr_t pa = VM_PAGE_TO_PHYS(pg); 4206 paddr_t pa = VM_PAGE_TO_PHYS(pg);
4250 4207
4251 NPDEBUG(PDB_PROTECT, 4208 UVMHIST_FUNC(__func__);
4252 printf("pmap_page_protect: md %p (0x%08lx), prot 0x%x\n", 4209 UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx prot %#jx",
4253 md, pa, prot)); 4210 (uintptr_t)md, pa, prot, 0);
4254 4211
4255 switch(prot) { 4212 switch(prot) {
4256 case VM_PROT_READ|VM_PROT_WRITE: 4213 case VM_PROT_READ|VM_PROT_WRITE:
4257#if defined(ARM_MMU_EXTENDED) 4214#if defined(ARM_MMU_EXTENDED)
4258 pmap_acquire_page_lock(md); 4215 pmap_acquire_page_lock(md);
4259 pmap_clearbit(md, pa, PVF_EXEC); 4216 pmap_clearbit(md, pa, PVF_EXEC);
4260 pmap_release_page_lock(md); 4217 pmap_release_page_lock(md);
4261 break; 4218 break;
4262#endif 4219#endif
4263 case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE: 4220 case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
4264 break; 4221 break;
4265 4222
4266 case VM_PROT_READ: 4223 case VM_PROT_READ:
@@ -4856,27 +4813,28 @@ pmap_procwr(struct proc *p, vaddr_t va,  @@ -4856,27 +4813,28 @@ pmap_procwr(struct proc *p, vaddr_t va,
4856 * Function: Clear the wired attribute for a map/virtual-address pair. 4813 * Function: Clear the wired attribute for a map/virtual-address pair.
4857 * 4814 *
4858 * In/out conditions: 4815 * In/out conditions:
4859 * The mapping must already exist in the pmap. 4816 * The mapping must already exist in the pmap.
4860 */ 4817 */
4861void 4818void
4862pmap_unwire(pmap_t pm, vaddr_t va) 4819pmap_unwire(pmap_t pm, vaddr_t va)
4863{ 4820{
4864 struct l2_bucket *l2b; 4821 struct l2_bucket *l2b;
4865 pt_entry_t *ptep, pte; 4822 pt_entry_t *ptep, pte;
4866 struct vm_page *pg; 4823 struct vm_page *pg;
4867 paddr_t pa; 4824 paddr_t pa;
4868 4825
4869 NPDEBUG(PDB_WIRING, printf("pmap_unwire: pm %p, va 0x%08lx\n", pm, va)); 4826 UVMHIST_FUNC(__func__);
 4827 UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx", (uintptr_t)pm, va, 0, 0);
4870 4828
4871 pmap_acquire_pmap_lock(pm); 4829 pmap_acquire_pmap_lock(pm);
4872 4830
4873 l2b = pmap_get_l2_bucket(pm, va); 4831 l2b = pmap_get_l2_bucket(pm, va);
4874 KDASSERT(l2b != NULL); 4832 KDASSERT(l2b != NULL);
4875 4833
4876 ptep = &l2b->l2b_kva[l2pte_index(va)]; 4834 ptep = &l2b->l2b_kva[l2pte_index(va)];
4877 pte = *ptep; 4835 pte = *ptep;
4878 4836
4879 /* Extract the physical address of the page */ 4837 /* Extract the physical address of the page */
4880 pa = l2pte_pa(pte); 4838 pa = l2pte_pa(pte);
4881 4839
4882 if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { 4840 if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
@@ -4917,29 +4875,26 @@ pmap_md_pdetab_activate(pmap_t pm, struc @@ -4917,29 +4875,26 @@ pmap_md_pdetab_activate(pmap_t pm, struc
4917 4875
4918 cpu_setttb(pm->pm_l1_pa, pai->pai_asid); 4876 cpu_setttb(pm->pm_l1_pa, pai->pai_asid);
4919 /* 4877 /*
4920 * Now we can reenable tablewalks since the CONTEXTIDR and TTRB0 4878 * Now we can reenable tablewalks since the CONTEXTIDR and TTRB0
4921 * have been updated. 4879 * have been updated.
4922 */ 4880 */
4923 arm_isb(); 4881 arm_isb();
4924 4882
4925 if (pm != pmap_kernel()) { 4883 if (pm != pmap_kernel()) {
4926 armreg_ttbcr_write(old_ttbcr & ~TTBCR_S_PD0); 4884 armreg_ttbcr_write(old_ttbcr & ~TTBCR_S_PD0);
4927 } 4885 }
4928 cpu_cpwait(); 4886 cpu_cpwait();
4929 4887
4930 UVMHIST_LOG(maphist, " pm %#jx pm->pm_l1_pa %08jx asid %ju... done", 
4931 (uintptr_t)pm, pm->pm_l1_pa, pai->pai_asid, 0); 
4932 
4933 KASSERTMSG(ci->ci_pmap_asid_cur == pai->pai_asid, "%u vs %u", 4888 KASSERTMSG(ci->ci_pmap_asid_cur == pai->pai_asid, "%u vs %u",
4934 ci->ci_pmap_asid_cur, pai->pai_asid); 4889 ci->ci_pmap_asid_cur, pai->pai_asid);
4935 ci->ci_pmap_cur = pm; 4890 ci->ci_pmap_cur = pm;
4936} 4891}
4937 4892
4938void 4893void
4939pmap_md_pdetab_deactivate(pmap_t pm) 4894pmap_md_pdetab_deactivate(pmap_t pm)
4940{ 4895{
4941 4896
4942 UVMHIST_FUNC(__func__); 4897 UVMHIST_FUNC(__func__);
4943 UVMHIST_CALLARGS(maphist, "pm %#jx", (uintptr_t)pm, 0, 0, 0); 4898 UVMHIST_CALLARGS(maphist, "pm %#jx", (uintptr_t)pm, 0, 0, 0);
4944 4899
4945 kpreempt_disable(); 4900 kpreempt_disable();
@@ -5969,39 +5924,39 @@ pmap_grow_l2_bucket(pmap_t pm, vaddr_t v @@ -5969,39 +5924,39 @@ pmap_grow_l2_bucket(pmap_t pm, vaddr_t v
5969 l2b->l2b_l1slot = l1slot; 5924 l2b->l2b_l1slot = l1slot;
5970 l2b->l2b_pa = pmap_kernel_l2ptp_phys; 5925 l2b->l2b_pa = pmap_kernel_l2ptp_phys;
5971 5926
5972 pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL; 5927 pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL;
5973 pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL; 5928 pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL;
5974 } 5929 }
5975 5930
5976 return l2b; 5931 return l2b;
5977} 5932}
5978 5933
5979vaddr_t 5934vaddr_t
5980pmap_growkernel(vaddr_t maxkvaddr) 5935pmap_growkernel(vaddr_t maxkvaddr)
5981{ 5936{
 5937 UVMHIST_FUNC(__func__);
 5938 UVMHIST_CALLARGS(maphist, "growing kernel from %#jx to %#jx\n",
 5939 pmap_curmaxkvaddr, maxkvaddr, 0, 0);
 5940
5982 pmap_t kpm = pmap_kernel(); 5941 pmap_t kpm = pmap_kernel();
5983#ifndef ARM_MMU_EXTENDED 5942#ifndef ARM_MMU_EXTENDED
5984 struct l1_ttable *l1; 5943 struct l1_ttable *l1;
5985#endif 5944#endif
5986 int s; 5945 int s;
5987 5946
5988 if (maxkvaddr <= pmap_curmaxkvaddr) 5947 if (maxkvaddr <= pmap_curmaxkvaddr)
5989 goto out; /* we are OK */ 5948 goto out; /* we are OK */
5990 5949
5991 NPDEBUG(PDB_GROWKERN, 
5992 printf("pmap_growkernel: growing kernel from 0x%lx to 0x%lx\n", 
5993 pmap_curmaxkvaddr, maxkvaddr)); 
5994 
5995 KDASSERT(maxkvaddr <= virtual_end); 5950 KDASSERT(maxkvaddr <= virtual_end);
5996 5951
5997 /* 5952 /*
5998 * whoops! we need to add kernel PTPs 5953 * whoops! we need to add kernel PTPs
5999 */ 5954 */
6000 5955
6001 s = splvm(); /* to be safe */ 5956 s = splvm(); /* to be safe */
6002 mutex_enter(&kpm_lock); 5957 mutex_enter(&kpm_lock);
6003 5958
6004 /* Map 1MB at a time */ 5959 /* Map 1MB at a time */
6005 size_t l1slot = l1pte_index(pmap_curmaxkvaddr); 5960 size_t l1slot = l1pte_index(pmap_curmaxkvaddr);
6006#ifdef ARM_MMU_EXTENDED 5961#ifdef ARM_MMU_EXTENDED
6007 pd_entry_t * const spdep = &kpm->pm_l1[l1slot]; 5962 pd_entry_t * const spdep = &kpm->pm_l1[l1slot];