Sat Jul 4 16:58:11 2020 UTC ()
Use tlen for temporary length variable instead of l, which is usually
used for struct lwp *.

No binary changes.


(rin)
diff -r1.82 -r1.83 src/sys/arch/aarch64/aarch64/pmap.c

cvs diff -r1.82 -r1.83 src/sys/arch/aarch64/aarch64/pmap.c (switch to unified diff)

--- src/sys/arch/aarch64/aarch64/pmap.c 2020/07/02 13:01:11 1.82
+++ src/sys/arch/aarch64/aarch64/pmap.c 2020/07/04 16:58:11 1.83
@@ -1,1961 +1,1961 @@ @@ -1,1961 +1,1961 @@
1/* $NetBSD: pmap.c,v 1.82 2020/07/02 13:01:11 rin Exp $ */ 1/* $NetBSD: pmap.c,v 1.83 2020/07/04 16:58:11 rin Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org> 4 * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
15 * 15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE. 26 * POSSIBILITY OF SUCH DAMAGE.
27 */ 27 */
28 28
29#include <sys/cdefs.h> 29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.82 2020/07/02 13:01:11 rin Exp $"); 30__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.83 2020/07/04 16:58:11 rin Exp $");
31 31
32#include "opt_arm_debug.h" 32#include "opt_arm_debug.h"
33#include "opt_ddb.h" 33#include "opt_ddb.h"
34#include "opt_multiprocessor.h" 34#include "opt_multiprocessor.h"
35#include "opt_pmap.h" 35#include "opt_pmap.h"
36#include "opt_uvmhist.h" 36#include "opt_uvmhist.h"
37 37
38#include <sys/param.h> 38#include <sys/param.h>
39#include <sys/types.h> 39#include <sys/types.h>
40#include <sys/kmem.h> 40#include <sys/kmem.h>
41#include <sys/vmem.h> 41#include <sys/vmem.h>
42#include <sys/atomic.h> 42#include <sys/atomic.h>
43#include <sys/asan.h> 43#include <sys/asan.h>
44 44
45#include <uvm/uvm.h> 45#include <uvm/uvm.h>
46#include <uvm/pmap/pmap_pvt.h> 46#include <uvm/pmap/pmap_pvt.h>
47 47
48#include <aarch64/pmap.h> 48#include <aarch64/pmap.h>
49#include <aarch64/pte.h> 49#include <aarch64/pte.h>
50#include <aarch64/armreg.h> 50#include <aarch64/armreg.h>
51#include <aarch64/cpufunc.h> 51#include <aarch64/cpufunc.h>
52#include <aarch64/locore.h> 52#include <aarch64/locore.h>
53#include <aarch64/machdep.h> 53#include <aarch64/machdep.h>
54#ifdef DDB 54#ifdef DDB
55#include <aarch64/db_machdep.h> 55#include <aarch64/db_machdep.h>
56#include <ddb/db_access.h> 56#include <ddb/db_access.h>
57#endif 57#endif
58 58
59//#define PMAP_PV_DEBUG 59//#define PMAP_PV_DEBUG
60 60
61#ifdef VERBOSE_INIT_ARM 61#ifdef VERBOSE_INIT_ARM
62#define VPRINTF(...) printf(__VA_ARGS__) 62#define VPRINTF(...) printf(__VA_ARGS__)
63#else 63#else
64#define VPRINTF(...) __nothing 64#define VPRINTF(...) __nothing
65#endif 65#endif
66 66
67UVMHIST_DEFINE(pmaphist); 67UVMHIST_DEFINE(pmaphist);
68#ifdef UVMHIST 68#ifdef UVMHIST
69 69
70#ifndef UVMHIST_PMAPHIST_SIZE 70#ifndef UVMHIST_PMAPHIST_SIZE
71#define UVMHIST_PMAPHIST_SIZE (1024 * 4) 71#define UVMHIST_PMAPHIST_SIZE (1024 * 4)
72#endif 72#endif
73 73
74struct kern_history_ent pmaphistbuf[UVMHIST_PMAPHIST_SIZE]; 74struct kern_history_ent pmaphistbuf[UVMHIST_PMAPHIST_SIZE];
75 75
76static void 76static void
77pmap_hist_init(void) 77pmap_hist_init(void)
78{ 78{
79 static bool inited = false; 79 static bool inited = false;
80 if (inited == false) { 80 if (inited == false) {
81 UVMHIST_INIT_STATIC(pmaphist, pmaphistbuf); 81 UVMHIST_INIT_STATIC(pmaphist, pmaphistbuf);
82 inited = true; 82 inited = true;
83 } 83 }
84} 84}
85#define PMAP_HIST_INIT() pmap_hist_init() 85#define PMAP_HIST_INIT() pmap_hist_init()
86 86
87#else /* UVMHIST */ 87#else /* UVMHIST */
88 88
89#define PMAP_HIST_INIT() ((void)0) 89#define PMAP_HIST_INIT() ((void)0)
90 90
91#endif /* UVMHIST */ 91#endif /* UVMHIST */
92 92
93 93
94#ifdef PMAPCOUNTERS 94#ifdef PMAPCOUNTERS
95#define PMAP_COUNT(name) (pmap_evcnt_##name.ev_count++ + 0) 95#define PMAP_COUNT(name) (pmap_evcnt_##name.ev_count++ + 0)
96#define PMAP_COUNTER(name, desc) \ 96#define PMAP_COUNTER(name, desc) \
97 struct evcnt pmap_evcnt_##name = \ 97 struct evcnt pmap_evcnt_##name = \
98 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", desc); \ 98 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", desc); \
99 EVCNT_ATTACH_STATIC(pmap_evcnt_##name) 99 EVCNT_ATTACH_STATIC(pmap_evcnt_##name)
100 100
101PMAP_COUNTER(pdp_alloc_boot, "page table page allocate (uvm_pageboot_alloc)"); 101PMAP_COUNTER(pdp_alloc_boot, "page table page allocate (uvm_pageboot_alloc)");
102PMAP_COUNTER(pdp_alloc, "page table page allocate (uvm_pagealloc)"); 102PMAP_COUNTER(pdp_alloc, "page table page allocate (uvm_pagealloc)");
103PMAP_COUNTER(pdp_free, "page table page free (uvm_pagefree)"); 103PMAP_COUNTER(pdp_free, "page table page free (uvm_pagefree)");
104 104
105PMAP_COUNTER(pv_enter, "pv_entry fill"); 105PMAP_COUNTER(pv_enter, "pv_entry fill");
106PMAP_COUNTER(pv_remove_dyn, "pv_entry free and unlink dynamic"); 106PMAP_COUNTER(pv_remove_dyn, "pv_entry free and unlink dynamic");
107PMAP_COUNTER(pv_remove_emb, "pv_entry clear embedded"); 107PMAP_COUNTER(pv_remove_emb, "pv_entry clear embedded");
108PMAP_COUNTER(pv_remove_nopv, "no pv_entry found when removing pv"); 108PMAP_COUNTER(pv_remove_nopv, "no pv_entry found when removing pv");
109 109
110PMAP_COUNTER(activate, "pmap_activate call"); 110PMAP_COUNTER(activate, "pmap_activate call");
111PMAP_COUNTER(deactivate, "pmap_deactivate call"); 111PMAP_COUNTER(deactivate, "pmap_deactivate call");
112PMAP_COUNTER(create, "pmap_create call"); 112PMAP_COUNTER(create, "pmap_create call");
113PMAP_COUNTER(destroy, "pmap_destroy call"); 113PMAP_COUNTER(destroy, "pmap_destroy call");
114 114
115PMAP_COUNTER(page_protect, "pmap_page_protect call"); 115PMAP_COUNTER(page_protect, "pmap_page_protect call");
116PMAP_COUNTER(protect, "pmap_protect call"); 116PMAP_COUNTER(protect, "pmap_protect call");
117PMAP_COUNTER(protect_remove_fallback, "pmap_protect with no-read"); 117PMAP_COUNTER(protect_remove_fallback, "pmap_protect with no-read");
118PMAP_COUNTER(protect_none, "pmap_protect non-exists pages"); 118PMAP_COUNTER(protect_none, "pmap_protect non-exists pages");
119PMAP_COUNTER(protect_managed, "pmap_protect managed pages"); 119PMAP_COUNTER(protect_managed, "pmap_protect managed pages");
120PMAP_COUNTER(protect_unmanaged, "pmap_protect unmanaged pages"); 120PMAP_COUNTER(protect_unmanaged, "pmap_protect unmanaged pages");
121PMAP_COUNTER(protect_pvmanaged, "pmap_protect pv-tracked unmanaged pages"); 121PMAP_COUNTER(protect_pvmanaged, "pmap_protect pv-tracked unmanaged pages");
122 122
123PMAP_COUNTER(clear_modify, "pmap_clear_modify call"); 123PMAP_COUNTER(clear_modify, "pmap_clear_modify call");
124PMAP_COUNTER(clear_modify_pages, "pmap_clear_modify pages"); 124PMAP_COUNTER(clear_modify_pages, "pmap_clear_modify pages");
125PMAP_COUNTER(clear_reference, "pmap_clear_reference call"); 125PMAP_COUNTER(clear_reference, "pmap_clear_reference call");
126PMAP_COUNTER(clear_reference_pages, "pmap_clear_reference pages"); 126PMAP_COUNTER(clear_reference_pages, "pmap_clear_reference pages");
127 127
128PMAP_COUNTER(fixup_referenced, "page reference emulations"); 128PMAP_COUNTER(fixup_referenced, "page reference emulations");
129PMAP_COUNTER(fixup_modified, "page modification emulations"); 129PMAP_COUNTER(fixup_modified, "page modification emulations");
130 130
131PMAP_COUNTER(kern_mappings_bad, "kernel pages mapped (bad color)"); 131PMAP_COUNTER(kern_mappings_bad, "kernel pages mapped (bad color)");
132PMAP_COUNTER(kern_mappings_bad_wired, "kernel pages mapped (wired bad color)"); 132PMAP_COUNTER(kern_mappings_bad_wired, "kernel pages mapped (wired bad color)");
133PMAP_COUNTER(user_mappings_bad, "user pages mapped (bad color, not wired)"); 133PMAP_COUNTER(user_mappings_bad, "user pages mapped (bad color, not wired)");
134PMAP_COUNTER(user_mappings_bad_wired, "user pages mapped (bad color, wired)"); 134PMAP_COUNTER(user_mappings_bad_wired, "user pages mapped (bad color, wired)");
135PMAP_COUNTER(kern_mappings, "kernel pages mapped"); 135PMAP_COUNTER(kern_mappings, "kernel pages mapped");
136PMAP_COUNTER(user_mappings, "user pages mapped"); 136PMAP_COUNTER(user_mappings, "user pages mapped");
137PMAP_COUNTER(user_mappings_changed, "user mapping changed"); 137PMAP_COUNTER(user_mappings_changed, "user mapping changed");
138PMAP_COUNTER(kern_mappings_changed, "kernel mapping changed"); 138PMAP_COUNTER(kern_mappings_changed, "kernel mapping changed");
139PMAP_COUNTER(uncached_mappings, "uncached pages mapped"); 139PMAP_COUNTER(uncached_mappings, "uncached pages mapped");
140PMAP_COUNTER(unmanaged_mappings, "unmanaged pages mapped"); 140PMAP_COUNTER(unmanaged_mappings, "unmanaged pages mapped");
141PMAP_COUNTER(pvmanaged_mappings, "pv-tracked unmanaged pages mapped"); 141PMAP_COUNTER(pvmanaged_mappings, "pv-tracked unmanaged pages mapped");
142PMAP_COUNTER(managed_mappings, "managed pages mapped"); 142PMAP_COUNTER(managed_mappings, "managed pages mapped");
143PMAP_COUNTER(mappings, "pages mapped (including remapped)"); 143PMAP_COUNTER(mappings, "pages mapped (including remapped)");
144PMAP_COUNTER(remappings, "pages remapped"); 144PMAP_COUNTER(remappings, "pages remapped");
145 145
146PMAP_COUNTER(pv_entry_cannotalloc, "pv_entry allocation failure"); 146PMAP_COUNTER(pv_entry_cannotalloc, "pv_entry allocation failure");
147 147
148PMAP_COUNTER(unwire, "pmap_unwire call"); 148PMAP_COUNTER(unwire, "pmap_unwire call");
149PMAP_COUNTER(unwire_failure, "pmap_unwire failure"); 149PMAP_COUNTER(unwire_failure, "pmap_unwire failure");
150 150
151#else /* PMAPCOUNTERS */ 151#else /* PMAPCOUNTERS */
152#define PMAP_COUNT(name) __nothing 152#define PMAP_COUNT(name) __nothing
153#endif /* PMAPCOUNTERS */ 153#endif /* PMAPCOUNTERS */
154 154
155/* 155/*
156 * invalidate TLB entry for ASID and VA. 156 * invalidate TLB entry for ASID and VA.
157 * `ll' invalidates only the Last Level (usually L3) of TLB entry 157 * `ll' invalidates only the Last Level (usually L3) of TLB entry
158 */ 158 */
159#define AARCH64_TLBI_BY_ASID_VA(asid, va, ll) \ 159#define AARCH64_TLBI_BY_ASID_VA(asid, va, ll) \
160 do { \ 160 do { \
161 if ((ll)) { \ 161 if ((ll)) { \
162 if ((asid) == 0) \ 162 if ((asid) == 0) \
163 aarch64_tlbi_by_va_ll((va)); \ 163 aarch64_tlbi_by_va_ll((va)); \
164 else \ 164 else \
165 aarch64_tlbi_by_asid_va_ll((asid), (va)); \ 165 aarch64_tlbi_by_asid_va_ll((asid), (va)); \
166 } else { \ 166 } else { \
167 if ((asid) == 0) \ 167 if ((asid) == 0) \
168 aarch64_tlbi_by_va((va)); \ 168 aarch64_tlbi_by_va((va)); \
169 else \ 169 else \
170 aarch64_tlbi_by_asid_va((asid), (va)); \ 170 aarch64_tlbi_by_asid_va((asid), (va)); \
171 } \ 171 } \
172 } while (0/*CONSTCOND*/) 172 } while (0/*CONSTCOND*/)
173 173
174/* 174/*
175 * require access permission in pte to invalidate instruction cache. 175 * require access permission in pte to invalidate instruction cache.
176 * change the pte to accessible temporarly before cpu_icache_sync_range(). 176 * change the pte to accessible temporarly before cpu_icache_sync_range().
177 * this macro modifies PTE (*ptep). need to update PTE after this. 177 * this macro modifies PTE (*ptep). need to update PTE after this.
178 */ 178 */
179#define PTE_ICACHE_SYNC_PAGE(pte, ptep, pm, va, ll) \ 179#define PTE_ICACHE_SYNC_PAGE(pte, ptep, pm, va, ll) \
180 do { \ 180 do { \
181 atomic_swap_64((ptep), (pte) | LX_BLKPAG_AF); \ 181 atomic_swap_64((ptep), (pte) | LX_BLKPAG_AF); \
182 AARCH64_TLBI_BY_ASID_VA((pm)->pm_asid, (va), (ll)); \ 182 AARCH64_TLBI_BY_ASID_VA((pm)->pm_asid, (va), (ll)); \
183 cpu_icache_sync_range((va), PAGE_SIZE); \ 183 cpu_icache_sync_range((va), PAGE_SIZE); \
184 } while (0/*CONSTCOND*/) 184 } while (0/*CONSTCOND*/)
185 185
186#define VM_PAGE_TO_PP(pg) (&(pg)->mdpage.mdpg_pp) 186#define VM_PAGE_TO_PP(pg) (&(pg)->mdpage.mdpg_pp)
187 187
188#define L3INDEXMASK (L3_SIZE * Ln_ENTRIES - 1) 188#define L3INDEXMASK (L3_SIZE * Ln_ENTRIES - 1)
189#define PDPSWEEP_TRIGGER 512 189#define PDPSWEEP_TRIGGER 512
190 190
191static pt_entry_t *_pmap_pte_lookup_l3(struct pmap *, vaddr_t); 191static pt_entry_t *_pmap_pte_lookup_l3(struct pmap *, vaddr_t);
192static pt_entry_t *_pmap_pte_lookup_bs(struct pmap *, vaddr_t, vsize_t *); 192static pt_entry_t *_pmap_pte_lookup_bs(struct pmap *, vaddr_t, vsize_t *);
193static pt_entry_t _pmap_pte_adjust_prot(pt_entry_t, vm_prot_t, vm_prot_t, bool); 193static pt_entry_t _pmap_pte_adjust_prot(pt_entry_t, vm_prot_t, vm_prot_t, bool);
194static pt_entry_t _pmap_pte_adjust_cacheflags(pt_entry_t, u_int); 194static pt_entry_t _pmap_pte_adjust_cacheflags(pt_entry_t, u_int);
195static void _pmap_remove(struct pmap *, vaddr_t, vaddr_t, bool, 195static void _pmap_remove(struct pmap *, vaddr_t, vaddr_t, bool,
196 struct pv_entry **); 196 struct pv_entry **);
197static int _pmap_enter(struct pmap *, vaddr_t, paddr_t, vm_prot_t, u_int, bool); 197static int _pmap_enter(struct pmap *, vaddr_t, paddr_t, vm_prot_t, u_int, bool);
198 198
199static struct pmap kernel_pmap __cacheline_aligned; 199static struct pmap kernel_pmap __cacheline_aligned;
200 200
201struct pmap * const kernel_pmap_ptr = &kernel_pmap; 201struct pmap * const kernel_pmap_ptr = &kernel_pmap;
202static vaddr_t pmap_maxkvaddr; 202static vaddr_t pmap_maxkvaddr;
203 203
204vaddr_t virtual_avail, virtual_end; 204vaddr_t virtual_avail, virtual_end;
205vaddr_t virtual_devmap_addr; 205vaddr_t virtual_devmap_addr;
206bool pmap_devmap_bootstrap_done = false; 206bool pmap_devmap_bootstrap_done = false;
207 207
208static struct pool_cache _pmap_cache; 208static struct pool_cache _pmap_cache;
209static struct pool_cache _pmap_pv_pool; 209static struct pool_cache _pmap_pv_pool;
210 210
211/* Set to LX_BLKPAG_GP if supported. */ 211/* Set to LX_BLKPAG_GP if supported. */
212uint64_t pmap_attr_gp = 0; 212uint64_t pmap_attr_gp = 0;
213 213
214static inline void 214static inline void
215pmap_pv_lock(struct pmap_page *pp) 215pmap_pv_lock(struct pmap_page *pp)
216{ 216{
217 217
218 mutex_spin_enter(&pp->pp_pvlock); 218 mutex_spin_enter(&pp->pp_pvlock);
219} 219}
220 220
221static inline void 221static inline void
222pmap_pv_unlock(struct pmap_page *pp) 222pmap_pv_unlock(struct pmap_page *pp)
223{ 223{
224 224
225 mutex_spin_exit(&pp->pp_pvlock); 225 mutex_spin_exit(&pp->pp_pvlock);
226} 226}
227 227
228 228
229static inline void 229static inline void
230pm_lock(struct pmap *pm) 230pm_lock(struct pmap *pm)
231{ 231{
232 mutex_spin_enter(&pm->pm_lock); 232 mutex_spin_enter(&pm->pm_lock);
233} 233}
234 234
235static inline void 235static inline void
236pm_unlock(struct pmap *pm) 236pm_unlock(struct pmap *pm)
237{ 237{
238 mutex_spin_exit(&pm->pm_lock); 238 mutex_spin_exit(&pm->pm_lock);
239} 239}
240 240
241static bool 241static bool
242pm_reverse_lock(struct pmap *pm, struct pmap_page *pp) 242pm_reverse_lock(struct pmap *pm, struct pmap_page *pp)
243{ 243{
244 244
245 KASSERT(mutex_owned(&pp->pp_pvlock)); 245 KASSERT(mutex_owned(&pp->pp_pvlock));
246 246
247 if (__predict_true(mutex_tryenter(&pm->pm_lock))) 247 if (__predict_true(mutex_tryenter(&pm->pm_lock)))
248 return true; 248 return true;
249 249
250 if (pm != pmap_kernel()) 250 if (pm != pmap_kernel())
251 pmap_reference(pm); 251 pmap_reference(pm);
252 mutex_spin_exit(&pp->pp_pvlock); 252 mutex_spin_exit(&pp->pp_pvlock);
253 mutex_spin_enter(&pm->pm_lock); 253 mutex_spin_enter(&pm->pm_lock);
254 /* nothing, just wait for lock */ 254 /* nothing, just wait for lock */
255 mutex_spin_exit(&pm->pm_lock); 255 mutex_spin_exit(&pm->pm_lock);
256 if (pm != pmap_kernel()) 256 if (pm != pmap_kernel())
257 pmap_destroy(pm); 257 pmap_destroy(pm);
258 mutex_spin_enter(&pp->pp_pvlock); 258 mutex_spin_enter(&pp->pp_pvlock);
259 return false; 259 return false;
260} 260}
261 261
262static inline struct pmap_page * 262static inline struct pmap_page *
263phys_to_pp(paddr_t pa) 263phys_to_pp(paddr_t pa)
264{ 264{
265 struct vm_page *pg; 265 struct vm_page *pg;
266 266
267 pg = PHYS_TO_VM_PAGE(pa); 267 pg = PHYS_TO_VM_PAGE(pa);
268 if (pg != NULL) 268 if (pg != NULL)
269 return VM_PAGE_TO_PP(pg); 269 return VM_PAGE_TO_PP(pg);
270 270
271#ifdef __HAVE_PMAP_PV_TRACK 271#ifdef __HAVE_PMAP_PV_TRACK
272 return pmap_pv_tracked(pa); 272 return pmap_pv_tracked(pa);
273#else 273#else
274 return NULL; 274 return NULL;
275#endif /* __HAVE_PMAP_PV_TRACK */ 275#endif /* __HAVE_PMAP_PV_TRACK */
276} 276}
277 277
278#define IN_RANGE(va,sta,end) (((sta) <= (va)) && ((va) < (end))) 278#define IN_RANGE(va,sta,end) (((sta) <= (va)) && ((va) < (end)))
279 279
280#define IN_KSEG_ADDR(va) \ 280#define IN_KSEG_ADDR(va) \
281 IN_RANGE((va), AARCH64_KSEG_START, AARCH64_KSEG_END) 281 IN_RANGE((va), AARCH64_KSEG_START, AARCH64_KSEG_END)
282 282
283#ifdef DIAGNOSTIC 283#ifdef DIAGNOSTIC
284#define KASSERT_PM_ADDR(pm,va) \ 284#define KASSERT_PM_ADDR(pm,va) \
285 do { \ 285 do { \
286 int space = aarch64_addressspace(va); \ 286 int space = aarch64_addressspace(va); \
287 if ((pm) == pmap_kernel()) { \ 287 if ((pm) == pmap_kernel()) { \
288 KASSERTMSG(space == AARCH64_ADDRSPACE_UPPER, \ 288 KASSERTMSG(space == AARCH64_ADDRSPACE_UPPER, \
289 "%s: kernel pm %p: va=%016lx" \ 289 "%s: kernel pm %p: va=%016lx" \
290 " is out of upper address space\n", \ 290 " is out of upper address space\n", \
291 __func__, (pm), (va)); \ 291 __func__, (pm), (va)); \
292 KASSERTMSG(IN_RANGE((va), VM_MIN_KERNEL_ADDRESS, \ 292 KASSERTMSG(IN_RANGE((va), VM_MIN_KERNEL_ADDRESS, \
293 VM_MAX_KERNEL_ADDRESS), \ 293 VM_MAX_KERNEL_ADDRESS), \
294 "%s: kernel pm %p: va=%016lx" \ 294 "%s: kernel pm %p: va=%016lx" \
295 " is not kernel address\n", \ 295 " is not kernel address\n", \
296 __func__, (pm), (va)); \ 296 __func__, (pm), (va)); \
297 } else { \ 297 } else { \
298 KASSERTMSG(space == AARCH64_ADDRSPACE_LOWER, \ 298 KASSERTMSG(space == AARCH64_ADDRSPACE_LOWER, \
299 "%s: user pm %p: va=%016lx" \ 299 "%s: user pm %p: va=%016lx" \
300 " is out of lower address space\n", \ 300 " is out of lower address space\n", \
301 __func__, (pm), (va)); \ 301 __func__, (pm), (va)); \
302 KASSERTMSG(IN_RANGE((va), \ 302 KASSERTMSG(IN_RANGE((va), \
303 VM_MIN_ADDRESS, VM_MAX_ADDRESS), \ 303 VM_MIN_ADDRESS, VM_MAX_ADDRESS), \
304 "%s: user pm %p: va=%016lx" \ 304 "%s: user pm %p: va=%016lx" \
305 " is not user address\n", \ 305 " is not user address\n", \
306 __func__, (pm), (va)); \ 306 __func__, (pm), (va)); \
307 } \ 307 } \
308 } while (0 /* CONSTCOND */) 308 } while (0 /* CONSTCOND */)
309#else /* DIAGNOSTIC */ 309#else /* DIAGNOSTIC */
310#define KASSERT_PM_ADDR(pm,va) 310#define KASSERT_PM_ADDR(pm,va)
311#endif /* DIAGNOSTIC */ 311#endif /* DIAGNOSTIC */
312 312
313 313
314static const struct pmap_devmap *pmap_devmap_table; 314static const struct pmap_devmap *pmap_devmap_table;
315 315
316static vsize_t 316static vsize_t
317pmap_map_chunk(vaddr_t va, paddr_t pa, vsize_t size, 317pmap_map_chunk(vaddr_t va, paddr_t pa, vsize_t size,
318 vm_prot_t prot, u_int flags) 318 vm_prot_t prot, u_int flags)
319{ 319{
320 pt_entry_t attr; 320 pt_entry_t attr;
321 vsize_t resid = round_page(size); 321 vsize_t resid = round_page(size);
322 322
323 attr = _pmap_pte_adjust_prot(0, prot, VM_PROT_ALL, false); 323 attr = _pmap_pte_adjust_prot(0, prot, VM_PROT_ALL, false);
324 attr = _pmap_pte_adjust_cacheflags(attr, flags); 324 attr = _pmap_pte_adjust_cacheflags(attr, flags);
325 pmapboot_enter_range(va, pa, resid, attr, 325 pmapboot_enter_range(va, pa, resid, attr,
326 PMAPBOOT_ENTER_NOOVERWRITE, bootpage_alloc, printf); 326 PMAPBOOT_ENTER_NOOVERWRITE, bootpage_alloc, printf);
327 aarch64_tlbi_all(); 327 aarch64_tlbi_all();
328 328
329 return resid; 329 return resid;
330} 330}
331 331
332void 332void
333pmap_devmap_register(const struct pmap_devmap *table) 333pmap_devmap_register(const struct pmap_devmap *table)
334{ 334{
335 pmap_devmap_table = table; 335 pmap_devmap_table = table;
336} 336}
337 337
338void 338void
339pmap_devmap_bootstrap(vaddr_t l0pt, const struct pmap_devmap *table) 339pmap_devmap_bootstrap(vaddr_t l0pt, const struct pmap_devmap *table)
340{ 340{
341 vaddr_t va; 341 vaddr_t va;
342 int i; 342 int i;
343 343
344 pmap_devmap_register(table); 344 pmap_devmap_register(table);
345 345
346 VPRINTF("%s:\n", __func__); 346 VPRINTF("%s:\n", __func__);
347 for (i = 0; table[i].pd_size != 0; i++) { 347 for (i = 0; table[i].pd_size != 0; i++) {
348 VPRINTF(" devmap: pa %08lx-%08lx = va %016lx\n", 348 VPRINTF(" devmap: pa %08lx-%08lx = va %016lx\n",
349 table[i].pd_pa, 349 table[i].pd_pa,
350 table[i].pd_pa + table[i].pd_size - 1, 350 table[i].pd_pa + table[i].pd_size - 1,
351 table[i].pd_va); 351 table[i].pd_va);
352 va = table[i].pd_va; 352 va = table[i].pd_va;
353 353
354 KASSERT((VM_KERNEL_IO_ADDRESS <= va) && 354 KASSERT((VM_KERNEL_IO_ADDRESS <= va) &&
355 (va < (VM_KERNEL_IO_ADDRESS + VM_KERNEL_IO_SIZE))); 355 (va < (VM_KERNEL_IO_ADDRESS + VM_KERNEL_IO_SIZE)));
356 356
357 /* update and check virtual_devmap_addr */ 357 /* update and check virtual_devmap_addr */
358 if (virtual_devmap_addr == 0 || virtual_devmap_addr > va) { 358 if (virtual_devmap_addr == 0 || virtual_devmap_addr > va) {
359 virtual_devmap_addr = va; 359 virtual_devmap_addr = va;
360 } 360 }
361 361
362 pmap_map_chunk( 362 pmap_map_chunk(
363 table[i].pd_va, 363 table[i].pd_va,
364 table[i].pd_pa, 364 table[i].pd_pa,
365 table[i].pd_size, 365 table[i].pd_size,
366 table[i].pd_prot, 366 table[i].pd_prot,
367 table[i].pd_flags); 367 table[i].pd_flags);
368 } 368 }
369 369
370 pmap_devmap_bootstrap_done = true; 370 pmap_devmap_bootstrap_done = true;
371} 371}
372 372
373const struct pmap_devmap * 373const struct pmap_devmap *
374pmap_devmap_find_va(vaddr_t va, vsize_t size) 374pmap_devmap_find_va(vaddr_t va, vsize_t size)
375{ 375{
376 paddr_t endva; 376 paddr_t endva;
377 int i; 377 int i;
378 378
379 if (pmap_devmap_table == NULL) 379 if (pmap_devmap_table == NULL)
380 return NULL; 380 return NULL;
381 381
382 endva = va + size; 382 endva = va + size;
383 for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) { 383 for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
384 if ((va >= pmap_devmap_table[i].pd_va) && 384 if ((va >= pmap_devmap_table[i].pd_va) &&
385 (endva <= pmap_devmap_table[i].pd_va + 385 (endva <= pmap_devmap_table[i].pd_va +
386 pmap_devmap_table[i].pd_size)) 386 pmap_devmap_table[i].pd_size))
387 return &pmap_devmap_table[i]; 387 return &pmap_devmap_table[i];
388 } 388 }
389 return NULL; 389 return NULL;
390} 390}
391 391
392const struct pmap_devmap * 392const struct pmap_devmap *
393pmap_devmap_find_pa(paddr_t pa, psize_t size) 393pmap_devmap_find_pa(paddr_t pa, psize_t size)
394{ 394{
395 paddr_t endpa; 395 paddr_t endpa;
396 int i; 396 int i;
397 397
398 if (pmap_devmap_table == NULL) 398 if (pmap_devmap_table == NULL)
399 return NULL; 399 return NULL;
400 400
401 endpa = pa + size; 401 endpa = pa + size;
402 for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) { 402 for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
403 if (pa >= pmap_devmap_table[i].pd_pa && 403 if (pa >= pmap_devmap_table[i].pd_pa &&
404 (endpa <= pmap_devmap_table[i].pd_pa + 404 (endpa <= pmap_devmap_table[i].pd_pa +
405 pmap_devmap_table[i].pd_size)) 405 pmap_devmap_table[i].pd_size))
406 return (&pmap_devmap_table[i]); 406 return (&pmap_devmap_table[i]);
407 } 407 }
408 return NULL; 408 return NULL;
409} 409}
410 410
411vaddr_t 411vaddr_t
412pmap_devmap_phystov(paddr_t pa) 412pmap_devmap_phystov(paddr_t pa)
413{ 413{
414 const struct pmap_devmap *table; 414 const struct pmap_devmap *table;
415 paddr_t offset; 415 paddr_t offset;
416 416
417 table = pmap_devmap_find_pa(pa, 0); 417 table = pmap_devmap_find_pa(pa, 0);
418 if (table == NULL) 418 if (table == NULL)
419 return 0; 419 return 0;
420 420
421 offset = pa - table->pd_pa; 421 offset = pa - table->pd_pa;
422 return table->pd_va + offset; 422 return table->pd_va + offset;
423} 423}
424 424
425vaddr_t 425vaddr_t
426pmap_devmap_vtophys(paddr_t va) 426pmap_devmap_vtophys(paddr_t va)
427{ 427{
428 const struct pmap_devmap *table; 428 const struct pmap_devmap *table;
429 vaddr_t offset; 429 vaddr_t offset;
430 430
431 table = pmap_devmap_find_va(va, 0); 431 table = pmap_devmap_find_va(va, 0);
432 if (table == NULL) 432 if (table == NULL)
433 return 0; 433 return 0;
434 434
435 offset = va - table->pd_va; 435 offset = va - table->pd_va;
436 return table->pd_pa + offset; 436 return table->pd_pa + offset;
437} 437}
438 438
439void 439void
440pmap_bootstrap(vaddr_t vstart, vaddr_t vend) 440pmap_bootstrap(vaddr_t vstart, vaddr_t vend)
441{ 441{
442 struct pmap *kpm; 442 struct pmap *kpm;
443 pd_entry_t *l0; 443 pd_entry_t *l0;
444 paddr_t l0pa; 444 paddr_t l0pa;
445 445
446 PMAP_HIST_INIT(); /* init once */ 446 PMAP_HIST_INIT(); /* init once */
447 447
448 UVMHIST_FUNC(__func__); 448 UVMHIST_FUNC(__func__);
449 UVMHIST_CALLED(pmaphist); 449 UVMHIST_CALLED(pmaphist);
450 450
451 uvmexp.ncolors = aarch64_cache_vindexsize / PAGE_SIZE; 451 uvmexp.ncolors = aarch64_cache_vindexsize / PAGE_SIZE;
452 452
453 /* devmap already uses last of va? */ 453 /* devmap already uses last of va? */
454 if (virtual_devmap_addr != 0 && virtual_devmap_addr < vend) 454 if (virtual_devmap_addr != 0 && virtual_devmap_addr < vend)
455 vend = virtual_devmap_addr; 455 vend = virtual_devmap_addr;
456 456
457 virtual_avail = vstart; 457 virtual_avail = vstart;
458 virtual_end = vend; 458 virtual_end = vend;
459 pmap_maxkvaddr = vstart; 459 pmap_maxkvaddr = vstart;
460 460
461 aarch64_tlbi_all(); 461 aarch64_tlbi_all();
462 462
463 l0pa = reg_ttbr1_el1_read(); 463 l0pa = reg_ttbr1_el1_read();
464 l0 = (void *)AARCH64_PA_TO_KVA(l0pa); 464 l0 = (void *)AARCH64_PA_TO_KVA(l0pa);
465 465
466 memset(&kernel_pmap, 0, sizeof(kernel_pmap)); 466 memset(&kernel_pmap, 0, sizeof(kernel_pmap));
467 kpm = pmap_kernel(); 467 kpm = pmap_kernel();
468 kpm->pm_asid = 0; 468 kpm->pm_asid = 0;
469 kpm->pm_refcnt = 1; 469 kpm->pm_refcnt = 1;
470 kpm->pm_idlepdp = 0; 470 kpm->pm_idlepdp = 0;
471 kpm->pm_l0table = l0; 471 kpm->pm_l0table = l0;
472 kpm->pm_l0table_pa = l0pa; 472 kpm->pm_l0table_pa = l0pa;
473 kpm->pm_activated = true; 473 kpm->pm_activated = true;
474 LIST_INIT(&kpm->pm_vmlist); 474 LIST_INIT(&kpm->pm_vmlist);
475 mutex_init(&kpm->pm_lock, MUTEX_DEFAULT, IPL_VM); 475 mutex_init(&kpm->pm_lock, MUTEX_DEFAULT, IPL_VM);
476 476
477 CTASSERT(sizeof(kpm->pm_stats.wired_count) == sizeof(long)); 477 CTASSERT(sizeof(kpm->pm_stats.wired_count) == sizeof(long));
478 CTASSERT(sizeof(kpm->pm_stats.resident_count) == sizeof(long)); 478 CTASSERT(sizeof(kpm->pm_stats.resident_count) == sizeof(long));
479#define PMSTAT_INC_WIRED_COUNT(pm) do { \ 479#define PMSTAT_INC_WIRED_COUNT(pm) do { \
480 KASSERT(mutex_owned(&(pm)->pm_lock)); \ 480 KASSERT(mutex_owned(&(pm)->pm_lock)); \
481 (pm)->pm_stats.wired_count++; \ 481 (pm)->pm_stats.wired_count++; \
482} while (/* CONSTCOND */ 0); 482} while (/* CONSTCOND */ 0);
483#define PMSTAT_DEC_WIRED_COUNT(pm) do{ \ 483#define PMSTAT_DEC_WIRED_COUNT(pm) do{ \
484 KASSERT(mutex_owned(&(pm)->pm_lock)); \ 484 KASSERT(mutex_owned(&(pm)->pm_lock)); \
485 (pm)->pm_stats.wired_count--; \ 485 (pm)->pm_stats.wired_count--; \
486} while (/* CONSTCOND */ 0); 486} while (/* CONSTCOND */ 0);
487#define PMSTAT_INC_RESIDENT_COUNT(pm) do { \ 487#define PMSTAT_INC_RESIDENT_COUNT(pm) do { \
488 KASSERT(mutex_owned(&(pm)->pm_lock)); \ 488 KASSERT(mutex_owned(&(pm)->pm_lock)); \
489 (pm)->pm_stats.resident_count++; \ 489 (pm)->pm_stats.resident_count++; \
490} while (/* CONSTCOND */ 0); 490} while (/* CONSTCOND */ 0);
491#define PMSTAT_DEC_RESIDENT_COUNT(pm) do { \ 491#define PMSTAT_DEC_RESIDENT_COUNT(pm) do { \
492 KASSERT(mutex_owned(&(pm)->pm_lock)); \ 492 KASSERT(mutex_owned(&(pm)->pm_lock)); \
493 (pm)->pm_stats.resident_count--; \ 493 (pm)->pm_stats.resident_count--; \
494} while (/* CONSTCOND */ 0); 494} while (/* CONSTCOND */ 0);
495} 495}
496 496
497inline static int 497inline static int
498_pmap_color(vaddr_t addr) /* or paddr_t */ 498_pmap_color(vaddr_t addr) /* or paddr_t */
499{ 499{
500 return (addr >> PGSHIFT) & (uvmexp.ncolors - 1); 500 return (addr >> PGSHIFT) & (uvmexp.ncolors - 1);
501} 501}
502 502
503static int 503static int
504_pmap_pmap_ctor(void *arg, void *v, int flags) 504_pmap_pmap_ctor(void *arg, void *v, int flags)
505{ 505{
506 memset(v, 0, sizeof(struct pmap)); 506 memset(v, 0, sizeof(struct pmap));
507 return 0; 507 return 0;
508} 508}
509 509
510static int 510static int
511_pmap_pv_ctor(void *arg, void *v, int flags) 511_pmap_pv_ctor(void *arg, void *v, int flags)
512{ 512{
513 memset(v, 0, sizeof(struct pv_entry)); 513 memset(v, 0, sizeof(struct pv_entry));
514 return 0; 514 return 0;
515} 515}
516 516
517void 517void
518pmap_init(void) 518pmap_init(void)
519{ 519{
520 520
521 pool_cache_bootstrap(&_pmap_cache, sizeof(struct pmap), 521 pool_cache_bootstrap(&_pmap_cache, sizeof(struct pmap),
522 coherency_unit, 0, 0, "pmappl", NULL, IPL_NONE, _pmap_pmap_ctor, 522 coherency_unit, 0, 0, "pmappl", NULL, IPL_NONE, _pmap_pmap_ctor,
523 NULL, NULL); 523 NULL, NULL);
524 524
525 pool_cache_bootstrap(&_pmap_pv_pool, sizeof(struct pv_entry), 525 pool_cache_bootstrap(&_pmap_pv_pool, sizeof(struct pv_entry),
526 32, 0, PR_LARGECACHE, "pvpl", NULL, IPL_NONE, _pmap_pv_ctor, 526 32, 0, PR_LARGECACHE, "pvpl", NULL, IPL_NONE, _pmap_pv_ctor,
527 NULL, NULL); 527 NULL, NULL);
528} 528}
529 529
530void 530void
531pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp) 531pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
532{ 532{
533 *vstartp = virtual_avail; 533 *vstartp = virtual_avail;
534 *vendp = virtual_end; 534 *vendp = virtual_end;
535} 535}
536 536
537vaddr_t 537vaddr_t
538pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp) 538pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
539{ 539{
540 int npage; 540 int npage;
541 paddr_t pa; 541 paddr_t pa;
542 vaddr_t va; 542 vaddr_t va;
543 psize_t bank_npage; 543 psize_t bank_npage;
544 uvm_physseg_t bank; 544 uvm_physseg_t bank;
545 545
546 UVMHIST_FUNC(__func__); 546 UVMHIST_FUNC(__func__);
547 UVMHIST_CALLED(pmaphist); 547 UVMHIST_CALLED(pmaphist);
548 548
549 UVMHIST_LOG(pmaphist, "size=%llu, *vstartp=%llx, *vendp=%llx", 549 UVMHIST_LOG(pmaphist, "size=%llu, *vstartp=%llx, *vendp=%llx",
550 size, *vstartp, *vendp, 0); 550 size, *vstartp, *vendp, 0);
551 551
552 size = round_page(size); 552 size = round_page(size);
553 npage = atop(size); 553 npage = atop(size);
554 554
555 for (bank = uvm_physseg_get_first(); uvm_physseg_valid_p(bank); 555 for (bank = uvm_physseg_get_first(); uvm_physseg_valid_p(bank);
556 bank = uvm_physseg_get_next(bank)) { 556 bank = uvm_physseg_get_next(bank)) {
557 557
558 bank_npage = uvm_physseg_get_avail_end(bank) - 558 bank_npage = uvm_physseg_get_avail_end(bank) -
559 uvm_physseg_get_avail_start(bank); 559 uvm_physseg_get_avail_start(bank);
560 if (npage <= bank_npage) 560 if (npage <= bank_npage)
561 break; 561 break;
562 } 562 }
563 563
564 if (!uvm_physseg_valid_p(bank)) { 564 if (!uvm_physseg_valid_p(bank)) {
565 panic("%s: no memory", __func__); 565 panic("%s: no memory", __func__);
566 } 566 }
567 567
568 /* Steal pages */ 568 /* Steal pages */
569 pa = ptoa(uvm_physseg_get_avail_start(bank)); 569 pa = ptoa(uvm_physseg_get_avail_start(bank));
570 va = AARCH64_PA_TO_KVA(pa); 570 va = AARCH64_PA_TO_KVA(pa);
571 uvm_physseg_unplug(atop(pa), npage); 571 uvm_physseg_unplug(atop(pa), npage);
572 572
573 for (; npage > 0; npage--, pa += PAGE_SIZE) 573 for (; npage > 0; npage--, pa += PAGE_SIZE)
574 pmap_zero_page(pa); 574 pmap_zero_page(pa);
575 575
576 return va; 576 return va;
577} 577}
578 578
579void 579void
580pmap_reference(struct pmap *pm) 580pmap_reference(struct pmap *pm)
581{ 581{
582 atomic_inc_uint(&pm->pm_refcnt); 582 atomic_inc_uint(&pm->pm_refcnt);
583} 583}
584 584
585paddr_t 585paddr_t
586pmap_alloc_pdp(struct pmap *pm, struct vm_page **pgp, int flags, bool waitok) 586pmap_alloc_pdp(struct pmap *pm, struct vm_page **pgp, int flags, bool waitok)
587{ 587{
588 paddr_t pa; 588 paddr_t pa;
589 struct vm_page *pg; 589 struct vm_page *pg;
590 590
591 UVMHIST_FUNC(__func__); 591 UVMHIST_FUNC(__func__);
592 UVMHIST_CALLED(pmaphist); 592 UVMHIST_CALLED(pmaphist);
593 593
594 if (uvm.page_init_done) { 594 if (uvm.page_init_done) {
595 int aflags = ((flags & PMAP_CANFAIL) ? 0 : UVM_PGA_USERESERVE) | 595 int aflags = ((flags & PMAP_CANFAIL) ? 0 : UVM_PGA_USERESERVE) |
596 UVM_PGA_ZERO; 596 UVM_PGA_ZERO;
597 retry: 597 retry:
598 pg = uvm_pagealloc(NULL, 0, NULL, aflags); 598 pg = uvm_pagealloc(NULL, 0, NULL, aflags);
599 if (pg == NULL) { 599 if (pg == NULL) {
600 if (waitok) { 600 if (waitok) {
601 uvm_wait("pmap_alloc_pdp"); 601 uvm_wait("pmap_alloc_pdp");
602 goto retry; 602 goto retry;
603 } 603 }
604 return POOL_PADDR_INVALID; 604 return POOL_PADDR_INVALID;
605 } 605 }
606 606
607 LIST_INSERT_HEAD(&pm->pm_vmlist, pg, pageq.list); 607 LIST_INSERT_HEAD(&pm->pm_vmlist, pg, pageq.list);
608 pg->flags &= ~PG_BUSY; /* never busy */ 608 pg->flags &= ~PG_BUSY; /* never busy */
609 pg->wire_count = 1; /* max = 1 + Ln_ENTRIES = 513 */ 609 pg->wire_count = 1; /* max = 1 + Ln_ENTRIES = 513 */
610 pa = VM_PAGE_TO_PHYS(pg); 610 pa = VM_PAGE_TO_PHYS(pg);
611 PMAP_COUNT(pdp_alloc); 611 PMAP_COUNT(pdp_alloc);
612 PMAP_PAGE_INIT(VM_PAGE_TO_PP(pg)); 612 PMAP_PAGE_INIT(VM_PAGE_TO_PP(pg));
613 } else { 613 } else {
614 /* uvm_pageboot_alloc() returns AARCH64 KSEG address */ 614 /* uvm_pageboot_alloc() returns AARCH64 KSEG address */
615 pg = NULL; 615 pg = NULL;
616 pa = AARCH64_KVA_TO_PA( 616 pa = AARCH64_KVA_TO_PA(
617 uvm_pageboot_alloc(Ln_TABLE_SIZE)); 617 uvm_pageboot_alloc(Ln_TABLE_SIZE));
618 PMAP_COUNT(pdp_alloc_boot); 618 PMAP_COUNT(pdp_alloc_boot);
619 } 619 }
620 if (pgp != NULL) 620 if (pgp != NULL)
621 *pgp = pg; 621 *pgp = pg;
622 622
623 UVMHIST_LOG(pmaphist, "pa=%llx, pg=%llx", 623 UVMHIST_LOG(pmaphist, "pa=%llx, pg=%llx",
624 pa, pg, 0, 0); 624 pa, pg, 0, 0);
625 625
626 return pa; 626 return pa;
627} 627}
628 628
629static void 629static void
630pmap_free_pdp(struct pmap *pm, struct vm_page *pg) 630pmap_free_pdp(struct pmap *pm, struct vm_page *pg)
631{ 631{
632 632
633 KASSERT(pm != pmap_kernel()); 633 KASSERT(pm != pmap_kernel());
634 KASSERT(VM_PAGE_TO_PP(pg)->pp_pv.pv_pmap == NULL); 634 KASSERT(VM_PAGE_TO_PP(pg)->pp_pv.pv_pmap == NULL);
635 KASSERT(VM_PAGE_TO_PP(pg)->pp_pv.pv_next == NULL); 635 KASSERT(VM_PAGE_TO_PP(pg)->pp_pv.pv_next == NULL);
636 636
637 LIST_REMOVE(pg, pageq.list); 637 LIST_REMOVE(pg, pageq.list);
638 pg->wire_count = 0; 638 pg->wire_count = 0;
639 uvm_pagefree(pg); 639 uvm_pagefree(pg);
640 PMAP_COUNT(pdp_free); 640 PMAP_COUNT(pdp_free);
641} 641}
642 642
643/* free empty page table pages */ 643/* free empty page table pages */
644static int 644static int
645_pmap_sweep_pdp(struct pmap *pm) 645_pmap_sweep_pdp(struct pmap *pm)
646{ 646{
647 struct vm_page *pg, *tmp; 647 struct vm_page *pg, *tmp;
648 pd_entry_t *ptep_in_parent, opte __diagused; 648 pd_entry_t *ptep_in_parent, opte __diagused;
649 paddr_t pa, pdppa; 649 paddr_t pa, pdppa;
650 int nsweep; 650 int nsweep;
651 uint16_t wirecount __diagused; 651 uint16_t wirecount __diagused;
652 652
653 KASSERT(mutex_owned(&pm->pm_lock) || pm->pm_refcnt == 0); 653 KASSERT(mutex_owned(&pm->pm_lock) || pm->pm_refcnt == 0);
654 654
655 nsweep = 0; 655 nsweep = 0;
656 LIST_FOREACH_SAFE(pg, &pm->pm_vmlist, pageq.list, tmp) { 656 LIST_FOREACH_SAFE(pg, &pm->pm_vmlist, pageq.list, tmp) {
657 if (pg->wire_count != 1) 657 if (pg->wire_count != 1)
658 continue; 658 continue;
659 659
660 pa = VM_PAGE_TO_PHYS(pg); 660 pa = VM_PAGE_TO_PHYS(pg);
661 if (pa == pm->pm_l0table_pa) 661 if (pa == pm->pm_l0table_pa)
662 continue; 662 continue;
663 663
664 ptep_in_parent = VM_PAGE_TO_MD(pg)->mdpg_ptep_parent; 664 ptep_in_parent = VM_PAGE_TO_MD(pg)->mdpg_ptep_parent;
665 if (ptep_in_parent == NULL) { 665 if (ptep_in_parent == NULL) {
666 /* no parent */ 666 /* no parent */
667 pmap_free_pdp(pm, pg); 667 pmap_free_pdp(pm, pg);
668 nsweep++; 668 nsweep++;
669 continue; 669 continue;
670 } 670 }
671 671
672 /* unlink from parent */ 672 /* unlink from parent */
673 opte = atomic_swap_64(ptep_in_parent, 0); 673 opte = atomic_swap_64(ptep_in_parent, 0);
674 KASSERT(lxpde_valid(opte)); 674 KASSERT(lxpde_valid(opte));
675 wirecount = --pg->wire_count; /* 1 -> 0 */ 675 wirecount = --pg->wire_count; /* 1 -> 0 */
676 KASSERT(wirecount == 0); 676 KASSERT(wirecount == 0);
677 pmap_free_pdp(pm, pg); 677 pmap_free_pdp(pm, pg);
678 nsweep++; 678 nsweep++;
679 679
680 /* L3->L2->L1. no need for L0 */ 680 /* L3->L2->L1. no need for L0 */
681 pdppa = AARCH64_KVA_TO_PA(trunc_page((vaddr_t)ptep_in_parent)); 681 pdppa = AARCH64_KVA_TO_PA(trunc_page((vaddr_t)ptep_in_parent));
682 if (pdppa == pm->pm_l0table_pa) 682 if (pdppa == pm->pm_l0table_pa)
683 continue; 683 continue;
684 684
685 pg = PHYS_TO_VM_PAGE(pdppa); 685 pg = PHYS_TO_VM_PAGE(pdppa);
686 KASSERT(pg != NULL); 686 KASSERT(pg != NULL);
687 KASSERTMSG(pg->wire_count >= 1, 687 KASSERTMSG(pg->wire_count >= 1,
688 "wire_count=%d", pg->wire_count); 688 "wire_count=%d", pg->wire_count);
689 /* decrement wire_count of parent */ 689 /* decrement wire_count of parent */
690 wirecount = --pg->wire_count; 690 wirecount = --pg->wire_count;
691 KASSERTMSG(pg->wire_count <= (Ln_ENTRIES + 1), 691 KASSERTMSG(pg->wire_count <= (Ln_ENTRIES + 1),
692 "pm=%p[%d], pg=%p, wire_count=%d", 692 "pm=%p[%d], pg=%p, wire_count=%d",
693 pm, pm->pm_asid, pg, pg->wire_count); 693 pm, pm->pm_asid, pg, pg->wire_count);
694 } 694 }
695 pm->pm_idlepdp = 0; 695 pm->pm_idlepdp = 0;
696 696
697 return nsweep; 697 return nsweep;
698} 698}
699 699
700static void 700static void
701_pmap_free_pdp_all(struct pmap *pm) 701_pmap_free_pdp_all(struct pmap *pm)
702{ 702{
703 struct vm_page *pg; 703 struct vm_page *pg;
704 704
705 while ((pg = LIST_FIRST(&pm->pm_vmlist)) != NULL) { 705 while ((pg = LIST_FIRST(&pm->pm_vmlist)) != NULL) {
706 pmap_free_pdp(pm, pg); 706 pmap_free_pdp(pm, pg);
707 } 707 }
708} 708}
709 709
710vaddr_t 710vaddr_t
711pmap_growkernel(vaddr_t maxkvaddr) 711pmap_growkernel(vaddr_t maxkvaddr)
712{ 712{
713 UVMHIST_FUNC(__func__); 713 UVMHIST_FUNC(__func__);
714 UVMHIST_CALLED(pmaphist); 714 UVMHIST_CALLED(pmaphist);
715 715
716 UVMHIST_LOG(pmaphist, "maxkvaddr=%llx, pmap_maxkvaddr=%llx", 716 UVMHIST_LOG(pmaphist, "maxkvaddr=%llx, pmap_maxkvaddr=%llx",
717 maxkvaddr, pmap_maxkvaddr, 0, 0); 717 maxkvaddr, pmap_maxkvaddr, 0, 0);
718 718
719 kasan_shadow_map((void *)pmap_maxkvaddr, 719 kasan_shadow_map((void *)pmap_maxkvaddr,
720 (size_t)(maxkvaddr - pmap_maxkvaddr)); 720 (size_t)(maxkvaddr - pmap_maxkvaddr));
721 721
722 pmap_maxkvaddr = maxkvaddr; 722 pmap_maxkvaddr = maxkvaddr;
723 723
724 return maxkvaddr; 724 return maxkvaddr;
725} 725}
726 726
727bool 727bool
728pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap) 728pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap)
729{ 729{
730 730
731 return pmap_extract_coherency(pm, va, pap, NULL); 731 return pmap_extract_coherency(pm, va, pap, NULL);
732} 732}
733 733
734bool 734bool
735pmap_extract_coherency(struct pmap *pm, vaddr_t va, paddr_t *pap, 735pmap_extract_coherency(struct pmap *pm, vaddr_t va, paddr_t *pap,
736 bool *coherencyp) 736 bool *coherencyp)
737{ 737{
738 pt_entry_t *ptep, pte; 738 pt_entry_t *ptep, pte;
739 paddr_t pa; 739 paddr_t pa;
740 vsize_t blocksize = 0; 740 vsize_t blocksize = 0;
741 int space; 741 int space;
742 bool coherency; 742 bool coherency;
743 extern char __kernel_text[]; 743 extern char __kernel_text[];
744 extern char _end[]; 744 extern char _end[];
745 745
746 coherency = false; 746 coherency = false;
747 747
748 space = aarch64_addressspace(va); 748 space = aarch64_addressspace(va);
749 if (pm == pmap_kernel()) { 749 if (pm == pmap_kernel()) {
750 if (space != AARCH64_ADDRSPACE_UPPER) 750 if (space != AARCH64_ADDRSPACE_UPPER)
751 return false; 751 return false;
752 752
753 if (IN_RANGE(va, (vaddr_t)__kernel_text, (vaddr_t)_end)) { 753 if (IN_RANGE(va, (vaddr_t)__kernel_text, (vaddr_t)_end)) {
754 /* kernel text/data/bss are definitely linear mapped */ 754 /* kernel text/data/bss are definitely linear mapped */
755 pa = KERN_VTOPHYS(va); 755 pa = KERN_VTOPHYS(va);
756 goto mapped; 756 goto mapped;
757 } else if (IN_KSEG_ADDR(va)) { 757 } else if (IN_KSEG_ADDR(va)) {
758 /* 758 /*
759 * also KSEG is linear mapped, but areas that have no 759 * also KSEG is linear mapped, but areas that have no
760 * physical memory haven't been mapped. 760 * physical memory haven't been mapped.
761 * fast lookup by using the S1E1R/PAR_EL1 registers. 761 * fast lookup by using the S1E1R/PAR_EL1 registers.
762 */ 762 */
763 register_t s = daif_disable(DAIF_I|DAIF_F); 763 register_t s = daif_disable(DAIF_I|DAIF_F);
764 reg_s1e1r_write(va); 764 reg_s1e1r_write(va);
765 __asm __volatile ("isb"); 765 __asm __volatile ("isb");
766 uint64_t par = reg_par_el1_read(); 766 uint64_t par = reg_par_el1_read();
767 reg_daif_write(s); 767 reg_daif_write(s);
768 768
769 if (par & PAR_F) 769 if (par & PAR_F)
770 return false; 770 return false;
771 pa = (__SHIFTOUT(par, PAR_PA) << PAR_PA_SHIFT) + 771 pa = (__SHIFTOUT(par, PAR_PA) << PAR_PA_SHIFT) +
772 (va & __BITS(PAR_PA_SHIFT - 1, 0)); 772 (va & __BITS(PAR_PA_SHIFT - 1, 0));
773 goto mapped; 773 goto mapped;
774 } 774 }
775 } else { 775 } else {
776 if (space != AARCH64_ADDRSPACE_LOWER) 776 if (space != AARCH64_ADDRSPACE_LOWER)
777 return false; 777 return false;
778 } 778 }
779 779
780 /* 780 /*
781 * other areas, it isn't able to examined using the PAR_EL1 register, 781 * other areas, it isn't able to examined using the PAR_EL1 register,
782 * because the page may be in an access fault state due to 782 * because the page may be in an access fault state due to
783 * reference bit emulation. 783 * reference bit emulation.
784 */ 784 */
785 ptep = _pmap_pte_lookup_bs(pm, va, &blocksize); 785 ptep = _pmap_pte_lookup_bs(pm, va, &blocksize);
786 if (ptep == NULL) 786 if (ptep == NULL)
787 return false; 787 return false;
788 pte = *ptep; 788 pte = *ptep;
789 if (!lxpde_valid(pte)) 789 if (!lxpde_valid(pte))
790 return false; 790 return false;
791 pa = lxpde_pa(pte) + (va & (blocksize - 1)); 791 pa = lxpde_pa(pte) + (va & (blocksize - 1));
792 792
793 switch (pte & LX_BLKPAG_ATTR_MASK) { 793 switch (pte & LX_BLKPAG_ATTR_MASK) {
794 case LX_BLKPAG_ATTR_NORMAL_NC: 794 case LX_BLKPAG_ATTR_NORMAL_NC:
795 case LX_BLKPAG_ATTR_DEVICE_MEM: 795 case LX_BLKPAG_ATTR_DEVICE_MEM:
796 case LX_BLKPAG_ATTR_DEVICE_MEM_SO: 796 case LX_BLKPAG_ATTR_DEVICE_MEM_SO:
797 coherency = true; 797 coherency = true;
798 break; 798 break;
799 } 799 }
800 800
801 mapped: 801 mapped:
802 if (pap != NULL) 802 if (pap != NULL)
803 *pap = pa; 803 *pap = pa;
804 if (coherencyp != NULL) 804 if (coherencyp != NULL)
805 *coherencyp = coherency; 805 *coherencyp = coherency;
806 return true; 806 return true;
807} 807}
808 808
809paddr_t 809paddr_t
810vtophys(vaddr_t va) 810vtophys(vaddr_t va)
811{ 811{
812 struct pmap *pm; 812 struct pmap *pm;
813 paddr_t pa; 813 paddr_t pa;
814 814
815 /* even if TBI is disabled, AARCH64_ADDRTOP_TAG means KVA */ 815 /* even if TBI is disabled, AARCH64_ADDRTOP_TAG means KVA */
816 if ((uint64_t)va & AARCH64_ADDRTOP_TAG) 816 if ((uint64_t)va & AARCH64_ADDRTOP_TAG)
817 pm = pmap_kernel(); 817 pm = pmap_kernel();
818 else 818 else
819 pm = curlwp->l_proc->p_vmspace->vm_map.pmap; 819 pm = curlwp->l_proc->p_vmspace->vm_map.pmap;
820 820
821 if (pmap_extract(pm, va, &pa) == false) 821 if (pmap_extract(pm, va, &pa) == false)
822 return VTOPHYS_FAILED; 822 return VTOPHYS_FAILED;
823 return pa; 823 return pa;
824} 824}
825 825
826/* 826/*
827 * return pointer of the pte. regardess of whether the entry is valid or not. 827 * return pointer of the pte. regardess of whether the entry is valid or not.
828 */ 828 */
829static pt_entry_t * 829static pt_entry_t *
830_pmap_pte_lookup_bs(struct pmap *pm, vaddr_t va, vsize_t *bs) 830_pmap_pte_lookup_bs(struct pmap *pm, vaddr_t va, vsize_t *bs)
831{ 831{
832 pt_entry_t *ptep; 832 pt_entry_t *ptep;
833 pd_entry_t *l0, *l1, *l2, *l3; 833 pd_entry_t *l0, *l1, *l2, *l3;
834 pd_entry_t pde; 834 pd_entry_t pde;
835 vsize_t blocksize; 835 vsize_t blocksize;
836 unsigned int idx; 836 unsigned int idx;
837 837
838 /* 838 /*
839 * traverse L0 -> L1 -> L2 -> L3 839 * traverse L0 -> L1 -> L2 -> L3
840 */ 840 */
841 blocksize = L0_SIZE; 841 blocksize = L0_SIZE;
842 l0 = pm->pm_l0table; 842 l0 = pm->pm_l0table;
843 idx = l0pde_index(va); 843 idx = l0pde_index(va);
844 ptep = &l0[idx]; 844 ptep = &l0[idx];
845 pde = *ptep; 845 pde = *ptep;
846 if (!l0pde_valid(pde)) 846 if (!l0pde_valid(pde))
847 goto done; 847 goto done;
848 848
849 blocksize = L1_SIZE; 849 blocksize = L1_SIZE;
850 l1 = (pd_entry_t *)AARCH64_PA_TO_KVA(l0pde_pa(pde)); 850 l1 = (pd_entry_t *)AARCH64_PA_TO_KVA(l0pde_pa(pde));
851 idx = l1pde_index(va); 851 idx = l1pde_index(va);
852 ptep = &l1[idx]; 852 ptep = &l1[idx];
853 pde = *ptep; 853 pde = *ptep;
854 if (!l1pde_valid(pde) || l1pde_is_block(pde)) 854 if (!l1pde_valid(pde) || l1pde_is_block(pde))
855 goto done; 855 goto done;
856 856
857 blocksize = L2_SIZE; 857 blocksize = L2_SIZE;
858 l2 = (pd_entry_t *)AARCH64_PA_TO_KVA(l1pde_pa(pde)); 858 l2 = (pd_entry_t *)AARCH64_PA_TO_KVA(l1pde_pa(pde));
859 idx = l2pde_index(va); 859 idx = l2pde_index(va);
860 ptep = &l2[idx]; 860 ptep = &l2[idx];
861 pde = *ptep; 861 pde = *ptep;
862 if (!l2pde_valid(pde) || l2pde_is_block(pde)) 862 if (!l2pde_valid(pde) || l2pde_is_block(pde))
863 goto done; 863 goto done;
864 864
865 blocksize = L3_SIZE; 865 blocksize = L3_SIZE;
866 l3 = (pd_entry_t *)AARCH64_PA_TO_KVA(l2pde_pa(pde)); 866 l3 = (pd_entry_t *)AARCH64_PA_TO_KVA(l2pde_pa(pde));
867 idx = l3pte_index(va); 867 idx = l3pte_index(va);
868 ptep = &l3[idx]; 868 ptep = &l3[idx];
869 869
870 done: 870 done:
871 if (bs != NULL) 871 if (bs != NULL)
872 *bs = blocksize; 872 *bs = blocksize;
873 return ptep; 873 return ptep;
874} 874}
875 875
876static pt_entry_t * 876static pt_entry_t *
877_pmap_pte_lookup_l3(struct pmap *pm, vaddr_t va) 877_pmap_pte_lookup_l3(struct pmap *pm, vaddr_t va)
878{ 878{
879 pt_entry_t *ptep; 879 pt_entry_t *ptep;
880 vsize_t blocksize = 0; 880 vsize_t blocksize = 0;
881 881
882 ptep = _pmap_pte_lookup_bs(pm, va, &blocksize); 882 ptep = _pmap_pte_lookup_bs(pm, va, &blocksize);
883 if ((ptep != NULL) && (blocksize == L3_SIZE)) 883 if ((ptep != NULL) && (blocksize == L3_SIZE))
884 return ptep; 884 return ptep;
885 885
886 return NULL; 886 return NULL;
887} 887}
888 888
889void 889void
890pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva) 890pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva)
891{ 891{
892 pt_entry_t *ptep = NULL, pte; 892 pt_entry_t *ptep = NULL, pte;
893 vaddr_t va; 893 vaddr_t va;
894 vsize_t blocksize = 0; 894 vsize_t blocksize = 0;
895 895
896 KASSERT_PM_ADDR(pm, sva); 896 KASSERT_PM_ADDR(pm, sva);
897 897
898 pm_lock(pm); 898 pm_lock(pm);
899 899
900 for (va = sva; va < eva; va = (va + blocksize) & ~(blocksize - 1)) { 900 for (va = sva; va < eva; va = (va + blocksize) & ~(blocksize - 1)) {
901 /* va is belong to the same L3 table as before? */ 901 /* va is belong to the same L3 table as before? */
902 if ((blocksize == L3_SIZE) && ((va & L3INDEXMASK) != 0)) { 902 if ((blocksize == L3_SIZE) && ((va & L3INDEXMASK) != 0)) {
903 ptep++; 903 ptep++;
904 } else { 904 } else {
905 ptep = _pmap_pte_lookup_bs(pm, va, &blocksize); 905 ptep = _pmap_pte_lookup_bs(pm, va, &blocksize);
906 if (ptep == NULL) 906 if (ptep == NULL)
907 break; 907 break;
908 } 908 }
909 909
910 pte = *ptep; 910 pte = *ptep;
911 if (lxpde_valid(pte)) { 911 if (lxpde_valid(pte)) {
912 vaddr_t eob = (va + blocksize) & ~(blocksize - 1); 912 vaddr_t eob = (va + blocksize) & ~(blocksize - 1);
913 vsize_t len = ulmin(eva, eob) - va; 913 vsize_t len = ulmin(eva, eob) - va;
914 914
915 if (l3pte_writable(pte)) { 915 if (l3pte_writable(pte)) {
916 cpu_icache_sync_range(va, len); 916 cpu_icache_sync_range(va, len);
917 } else { 917 } else {
918 /* 918 /*
919 * change to writable temporally 919 * change to writable temporally
920 * to do cpu_icache_sync_range() 920 * to do cpu_icache_sync_range()
921 */ 921 */
922 pt_entry_t opte = pte; 922 pt_entry_t opte = pte;
923 pte = pte & ~(LX_BLKPAG_AF|LX_BLKPAG_AP); 923 pte = pte & ~(LX_BLKPAG_AF|LX_BLKPAG_AP);
924 pte |= (LX_BLKPAG_AF|LX_BLKPAG_AP_RW); 924 pte |= (LX_BLKPAG_AF|LX_BLKPAG_AP_RW);
925 atomic_swap_64(ptep, pte); 925 atomic_swap_64(ptep, pte);
926 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true); 926 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true);
927 cpu_icache_sync_range(va, len); 927 cpu_icache_sync_range(va, len);
928 atomic_swap_64(ptep, opte); 928 atomic_swap_64(ptep, opte);
929 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true); 929 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true);
930 } 930 }
931 } 931 }
932 } 932 }
933 933
934 pm_unlock(pm); 934 pm_unlock(pm);
935} 935}
936 936
937/* 937/*
938 * Routine: pmap_procwr 938 * Routine: pmap_procwr
939 * 939 *
940 * Function: 940 * Function:
941 * Synchronize caches corresponding to [addr, addr+len) in p. 941 * Synchronize caches corresponding to [addr, addr+len) in p.
942 * 942 *
943 */ 943 */
944void 944void
945pmap_procwr(struct proc *p, vaddr_t sva, int len) 945pmap_procwr(struct proc *p, vaddr_t sva, int len)
946{ 946{
947 947
948 if (__predict_true(p == curproc)) 948 if (__predict_true(p == curproc))
949 cpu_icache_sync_range(sva, len); 949 cpu_icache_sync_range(sva, len);
950 else { 950 else {
951 struct pmap *pm = p->p_vmspace->vm_map.pmap; 951 struct pmap *pm = p->p_vmspace->vm_map.pmap;
952 paddr_t pa; 952 paddr_t pa;
953 vaddr_t va, eva; 953 vaddr_t va, eva;
954 int l; 954 int tlen;
955 955
956 for (va = sva; len > 0; va = eva, len -= l) { 956 for (va = sva; len > 0; va = eva, len -= tlen) {
957 eva = uimin(va + len, trunc_page(va + PAGE_SIZE)); 957 eva = uimin(va + len, trunc_page(va + PAGE_SIZE));
958 l = eva - va; 958 tlen = eva - va;
959 if (!pmap_extract(pm, va, &pa)) 959 if (!pmap_extract(pm, va, &pa))
960 continue; 960 continue;
961 va = AARCH64_PA_TO_KVA(pa); 961 va = AARCH64_PA_TO_KVA(pa);
962 cpu_icache_sync_range(va, l); 962 cpu_icache_sync_range(va, tlen);
963 } 963 }
964 } 964 }
965} 965}
966 966
967static pt_entry_t 967static pt_entry_t
968_pmap_pte_adjust_prot(pt_entry_t pte, vm_prot_t prot, vm_prot_t protmask, 968_pmap_pte_adjust_prot(pt_entry_t pte, vm_prot_t prot, vm_prot_t protmask,
969 bool user) 969 bool user)
970{ 970{
971 vm_prot_t masked; 971 vm_prot_t masked;
972 pt_entry_t xn; 972 pt_entry_t xn;
973 973
974 masked = prot & protmask; 974 masked = prot & protmask;
975 pte &= ~(LX_BLKPAG_OS_RWMASK|LX_BLKPAG_AF|LX_BLKPAG_AP); 975 pte &= ~(LX_BLKPAG_OS_RWMASK|LX_BLKPAG_AF|LX_BLKPAG_AP);
976 976
977 /* keep prot for ref/mod emulation */ 977 /* keep prot for ref/mod emulation */
978 switch (prot & (VM_PROT_READ|VM_PROT_WRITE)) { 978 switch (prot & (VM_PROT_READ|VM_PROT_WRITE)) {
979 case 0: 979 case 0:
980 default: 980 default:
981 break; 981 break;
982 case VM_PROT_READ: 982 case VM_PROT_READ:
983 pte |= LX_BLKPAG_OS_READ; 983 pte |= LX_BLKPAG_OS_READ;
984 break; 984 break;
985 case VM_PROT_WRITE: 985 case VM_PROT_WRITE:
986 case VM_PROT_READ|VM_PROT_WRITE: 986 case VM_PROT_READ|VM_PROT_WRITE:
987 pte |= (LX_BLKPAG_OS_READ|LX_BLKPAG_OS_WRITE); 987 pte |= (LX_BLKPAG_OS_READ|LX_BLKPAG_OS_WRITE);
988 break; 988 break;
989 } 989 }
990 990
991 switch (masked & (VM_PROT_READ|VM_PROT_WRITE)) { 991 switch (masked & (VM_PROT_READ|VM_PROT_WRITE)) {
992 case 0: 992 case 0:
993 default: 993 default:
994 /* cannot access due to No LX_BLKPAG_AF */ 994 /* cannot access due to No LX_BLKPAG_AF */
995 pte |= LX_BLKPAG_AP_RO; 995 pte |= LX_BLKPAG_AP_RO;
996 break; 996 break;
997 case VM_PROT_READ: 997 case VM_PROT_READ:
998 /* actual permission of pte */ 998 /* actual permission of pte */
999 pte |= LX_BLKPAG_AF; 999 pte |= LX_BLKPAG_AF;
1000 pte |= LX_BLKPAG_AP_RO; 1000 pte |= LX_BLKPAG_AP_RO;
1001 break; 1001 break;
1002 case VM_PROT_WRITE: 1002 case VM_PROT_WRITE:
1003 case VM_PROT_READ|VM_PROT_WRITE: 1003 case VM_PROT_READ|VM_PROT_WRITE:
1004 /* actual permission of pte */ 1004 /* actual permission of pte */
1005 pte |= LX_BLKPAG_AF; 1005 pte |= LX_BLKPAG_AF;
1006 pte |= LX_BLKPAG_AP_RW; 1006 pte |= LX_BLKPAG_AP_RW;
1007 break; 1007 break;
1008 } 1008 }
1009 1009
1010 /* executable for kernel or user? first set never exec both */ 1010 /* executable for kernel or user? first set never exec both */
1011 pte |= (LX_BLKPAG_UXN|LX_BLKPAG_PXN); 1011 pte |= (LX_BLKPAG_UXN|LX_BLKPAG_PXN);
1012 /* and either to executable */ 1012 /* and either to executable */
1013 xn = user ? LX_BLKPAG_UXN : LX_BLKPAG_PXN; 1013 xn = user ? LX_BLKPAG_UXN : LX_BLKPAG_PXN;
1014 if (prot & VM_PROT_EXECUTE) 1014 if (prot & VM_PROT_EXECUTE)
1015 pte &= ~xn; 1015 pte &= ~xn;
1016 1016
1017 return pte; 1017 return pte;
1018} 1018}
1019 1019
1020static pt_entry_t 1020static pt_entry_t
1021_pmap_pte_adjust_cacheflags(pt_entry_t pte, u_int flags) 1021_pmap_pte_adjust_cacheflags(pt_entry_t pte, u_int flags)
1022{ 1022{
1023 1023
1024 pte &= ~LX_BLKPAG_ATTR_MASK; 1024 pte &= ~LX_BLKPAG_ATTR_MASK;
1025 1025
1026 switch (flags & (PMAP_CACHE_MASK|PMAP_DEV_MASK)) { 1026 switch (flags & (PMAP_CACHE_MASK|PMAP_DEV_MASK)) {
1027 case PMAP_DEV_SO ... PMAP_DEV_SO | PMAP_CACHE_MASK: 1027 case PMAP_DEV_SO ... PMAP_DEV_SO | PMAP_CACHE_MASK:
1028 pte |= LX_BLKPAG_ATTR_DEVICE_MEM_SO; /* Device-nGnRnE */ 1028 pte |= LX_BLKPAG_ATTR_DEVICE_MEM_SO; /* Device-nGnRnE */
1029 break; 1029 break;
1030 case PMAP_DEV ... PMAP_DEV | PMAP_CACHE_MASK: 1030 case PMAP_DEV ... PMAP_DEV | PMAP_CACHE_MASK:
1031 pte |= LX_BLKPAG_ATTR_DEVICE_MEM; /* Device-nGnRE */ 1031 pte |= LX_BLKPAG_ATTR_DEVICE_MEM; /* Device-nGnRE */
1032 break; 1032 break;
1033 case PMAP_NOCACHE: 1033 case PMAP_NOCACHE:
1034 case PMAP_NOCACHE_OVR: 1034 case PMAP_NOCACHE_OVR:
1035 case PMAP_WRITE_COMBINE: 1035 case PMAP_WRITE_COMBINE:
1036 pte |= LX_BLKPAG_ATTR_NORMAL_NC; /* only no-cache */ 1036 pte |= LX_BLKPAG_ATTR_NORMAL_NC; /* only no-cache */
1037 break; 1037 break;
1038 case PMAP_WRITE_BACK: 1038 case PMAP_WRITE_BACK:
1039 case 0: 1039 case 0:
1040 default: 1040 default:
1041 pte |= LX_BLKPAG_ATTR_NORMAL_WB; 1041 pte |= LX_BLKPAG_ATTR_NORMAL_WB;
1042 break; 1042 break;
1043 } 1043 }
1044 1044
1045 return pte; 1045 return pte;
1046} 1046}
1047 1047
1048static struct pv_entry * 1048static struct pv_entry *
1049_pmap_remove_pv(struct pmap_page *pp, struct pmap *pm, vaddr_t va, 1049_pmap_remove_pv(struct pmap_page *pp, struct pmap *pm, vaddr_t va,
1050 pt_entry_t pte) 1050 pt_entry_t pte)
1051{ 1051{
1052 struct pv_entry *pv, *ppv; 1052 struct pv_entry *pv, *ppv;
1053 1053
1054 UVMHIST_FUNC(__func__); 1054 UVMHIST_FUNC(__func__);
1055 UVMHIST_CALLED(pmaphist); 1055 UVMHIST_CALLED(pmaphist);
1056 1056
1057 UVMHIST_LOG(pmaphist, "pp=%p, pm=%p, va=%llx, pte=%llx", 1057 UVMHIST_LOG(pmaphist, "pp=%p, pm=%p, va=%llx, pte=%llx",
1058 pp, pm, va, pte); 1058 pp, pm, va, pte);
1059 1059
1060 KASSERT(mutex_owned(&pp->pp_pvlock)); 1060 KASSERT(mutex_owned(&pp->pp_pvlock));
1061 1061
1062 for (ppv = NULL, pv = &pp->pp_pv; pv != NULL; pv = pv->pv_next) { 1062 for (ppv = NULL, pv = &pp->pp_pv; pv != NULL; pv = pv->pv_next) {
1063 if (pv->pv_pmap == pm && trunc_page(pv->pv_va) == va) { 1063 if (pv->pv_pmap == pm && trunc_page(pv->pv_va) == va) {
1064 break; 1064 break;
1065 } 1065 }
1066 ppv = pv; 1066 ppv = pv;
1067 } 1067 }
1068 if (ppv == NULL) { 1068 if (ppv == NULL) {
1069 /* embedded in pmap_page */ 1069 /* embedded in pmap_page */
1070 pv->pv_pmap = NULL; 1070 pv->pv_pmap = NULL;
1071 pv = NULL; 1071 pv = NULL;
1072 PMAP_COUNT(pv_remove_emb); 1072 PMAP_COUNT(pv_remove_emb);
1073 } else if (pv != NULL) { 1073 } else if (pv != NULL) {
1074 /* dynamically allocated */ 1074 /* dynamically allocated */
1075 ppv->pv_next = pv->pv_next; 1075 ppv->pv_next = pv->pv_next;
1076 PMAP_COUNT(pv_remove_dyn); 1076 PMAP_COUNT(pv_remove_dyn);
1077 } else { 1077 } else {
1078 PMAP_COUNT(pv_remove_nopv); 1078 PMAP_COUNT(pv_remove_nopv);
1079 } 1079 }
1080 1080
1081 return pv; 1081 return pv;
1082} 1082}
1083 1083
1084#if defined(PMAP_PV_DEBUG) || defined(DDB) 1084#if defined(PMAP_PV_DEBUG) || defined(DDB)
1085 1085
1086static char * 1086static char *
1087str_vmflags(uint32_t flags) 1087str_vmflags(uint32_t flags)
1088{ 1088{
1089 static int idx = 0; 1089 static int idx = 0;
1090 static char buf[4][32]; /* XXX */ 1090 static char buf[4][32]; /* XXX */
1091 char *p; 1091 char *p;
1092 1092
1093 p = buf[idx]; 1093 p = buf[idx];
1094 idx = (idx + 1) & 3; 1094 idx = (idx + 1) & 3;
1095 1095
1096 p[0] = (flags & VM_PROT_READ) ? 'R' : '-'; 1096 p[0] = (flags & VM_PROT_READ) ? 'R' : '-';
1097 p[1] = (flags & VM_PROT_WRITE) ? 'W' : '-'; 1097 p[1] = (flags & VM_PROT_WRITE) ? 'W' : '-';
1098 p[2] = (flags & VM_PROT_EXECUTE) ? 'X' : '-'; 1098 p[2] = (flags & VM_PROT_EXECUTE) ? 'X' : '-';
1099 if (flags & PMAP_WIRED) 1099 if (flags & PMAP_WIRED)
1100 memcpy(&p[3], ",WIRED\0", 7); 1100 memcpy(&p[3], ",WIRED\0", 7);
1101 else 1101 else
1102 p[3] = '\0'; 1102 p[3] = '\0';
1103 1103
1104 return p; 1104 return p;
1105} 1105}
1106 1106
1107static void 1107static void
1108pg_dump(struct vm_page *pg, void (*pr)(const char *, ...) __printflike(1, 2)) 1108pg_dump(struct vm_page *pg, void (*pr)(const char *, ...) __printflike(1, 2))
1109{ 1109{
1110 pr("pg=%p\n", pg); 1110 pr("pg=%p\n", pg);
1111 pr(" pg->uanon = %p\n", pg->uanon); 1111 pr(" pg->uanon = %p\n", pg->uanon);
1112 pr(" pg->uobject = %p\n", pg->uobject); 1112 pr(" pg->uobject = %p\n", pg->uobject);
1113 pr(" pg->offset = %zu\n", pg->offset); 1113 pr(" pg->offset = %zu\n", pg->offset);
1114 pr(" pg->flags = %u\n", pg->flags); 1114 pr(" pg->flags = %u\n", pg->flags);
1115 pr(" pg->loan_count = %u\n", pg->loan_count); 1115 pr(" pg->loan_count = %u\n", pg->loan_count);
1116 pr(" pg->wire_count = %u\n", pg->wire_count); 1116 pr(" pg->wire_count = %u\n", pg->wire_count);
1117 pr(" pg->pqflags = %u\n", pg->pqflags); 1117 pr(" pg->pqflags = %u\n", pg->pqflags);
1118 pr(" pg->phys_addr = %016lx\n", VM_PAGE_TO_PHYS(pg)); 1118 pr(" pg->phys_addr = %016lx\n", VM_PAGE_TO_PHYS(pg));
1119} 1119}
1120 1120
1121static void 1121static void
1122pv_dump(struct pmap_page *pp, void (*pr)(const char *, ...) __printflike(1, 2)) 1122pv_dump(struct pmap_page *pp, void (*pr)(const char *, ...) __printflike(1, 2))
1123{ 1123{
1124 struct pv_entry *pv; 1124 struct pv_entry *pv;
1125 int i, flags; 1125 int i, flags;
1126 1126
1127 i = 0; 1127 i = 0;
1128 flags = pp->pp_pv.pv_va & (PAGE_SIZE - 1); 1128 flags = pp->pp_pv.pv_va & (PAGE_SIZE - 1);
1129 1129
1130 pr("pp=%p\n", pp); 1130 pr("pp=%p\n", pp);
1131 pr(" pp flags=%08x %s\n", flags, str_vmflags(flags)); 1131 pr(" pp flags=%08x %s\n", flags, str_vmflags(flags));
1132 1132
1133 for (pv = &pp->pp_pv; pv != NULL; pv = pv->pv_next) { 1133 for (pv = &pp->pp_pv; pv != NULL; pv = pv->pv_next) {
1134 if (pv->pv_pmap == NULL) { 1134 if (pv->pv_pmap == NULL) {
1135 KASSERT(pv == &pp->pp_pv); 1135 KASSERT(pv == &pp->pp_pv);
1136 continue; 1136 continue;
1137 } 1137 }
1138 pr(" pv[%d] pv=%p\n", 1138 pr(" pv[%d] pv=%p\n",
1139 i, pv); 1139 i, pv);
1140 pr(" pv[%d].pv_pmap = %p (asid=%d)\n", 1140 pr(" pv[%d].pv_pmap = %p (asid=%d)\n",
1141 i, pv->pv_pmap, pv->pv_pmap->pm_asid); 1141 i, pv->pv_pmap, pv->pv_pmap->pm_asid);
1142 pr(" pv[%d].pv_va = %016lx (color=%d)\n", 1142 pr(" pv[%d].pv_va = %016lx (color=%d)\n",
1143 i, trunc_page(pv->pv_va), _pmap_color(pv->pv_va)); 1143 i, trunc_page(pv->pv_va), _pmap_color(pv->pv_va));
1144 pr(" pv[%d].pv_ptep = %p\n", 1144 pr(" pv[%d].pv_ptep = %p\n",
1145 i, pv->pv_ptep); 1145 i, pv->pv_ptep);
1146 i++; 1146 i++;
1147 } 1147 }
1148} 1148}
1149#endif /* PMAP_PV_DEBUG & DDB */ 1149#endif /* PMAP_PV_DEBUG & DDB */
1150 1150
1151static int 1151static int
1152_pmap_enter_pv(struct pmap_page *pp, struct pmap *pm, struct pv_entry **pvp, 1152_pmap_enter_pv(struct pmap_page *pp, struct pmap *pm, struct pv_entry **pvp,
1153 vaddr_t va, pt_entry_t *ptep, paddr_t pa, u_int flags) 1153 vaddr_t va, pt_entry_t *ptep, paddr_t pa, u_int flags)
1154{ 1154{
1155 struct pv_entry *pv; 1155 struct pv_entry *pv;
1156 1156
1157 UVMHIST_FUNC(__func__); 1157 UVMHIST_FUNC(__func__);
1158 UVMHIST_CALLED(pmaphist); 1158 UVMHIST_CALLED(pmaphist);
1159 1159
1160 UVMHIST_LOG(pmaphist, "pp=%p, pm=%p, va=%llx, pa=%llx", pp, pm, va, pa); 1160 UVMHIST_LOG(pmaphist, "pp=%p, pm=%p, va=%llx, pa=%llx", pp, pm, va, pa);
1161 UVMHIST_LOG(pmaphist, "ptep=%p, flags=%08x", ptep, flags, 0, 0); 1161 UVMHIST_LOG(pmaphist, "ptep=%p, flags=%08x", ptep, flags, 0, 0);
1162 1162
1163 KASSERT(mutex_owned(&pp->pp_pvlock)); 1163 KASSERT(mutex_owned(&pp->pp_pvlock));
1164 KASSERT(trunc_page(va) == va); 1164 KASSERT(trunc_page(va) == va);
1165 1165
1166 /* 1166 /*
1167 * mapping cannot be already registered at this VA. 1167 * mapping cannot be already registered at this VA.
1168 */ 1168 */
1169 if (pp->pp_pv.pv_pmap == NULL) { 1169 if (pp->pp_pv.pv_pmap == NULL) {
1170 /* 1170 /*
1171 * claim pv_entry embedded in pmap_page. 1171 * claim pv_entry embedded in pmap_page.
1172 * take care not to wipe out acc/mod flags. 1172 * take care not to wipe out acc/mod flags.
1173 */ 1173 */
1174 pv = &pp->pp_pv; 1174 pv = &pp->pp_pv;
1175 pv->pv_va = (pv->pv_va & (PAGE_SIZE - 1)) | va; 1175 pv->pv_va = (pv->pv_va & (PAGE_SIZE - 1)) | va;
1176 } else { 1176 } else {
1177 /* 1177 /*
1178 * create and link new pv. 1178 * create and link new pv.
1179 * pv is already allocated at beginning of _pmap_enter(). 1179 * pv is already allocated at beginning of _pmap_enter().
1180 */ 1180 */
1181 pv = *pvp; 1181 pv = *pvp;
1182 if (pv == NULL) 1182 if (pv == NULL)
1183 return ENOMEM; 1183 return ENOMEM;
1184 *pvp = NULL; 1184 *pvp = NULL;
1185 pv->pv_next = pp->pp_pv.pv_next; 1185 pv->pv_next = pp->pp_pv.pv_next;
1186 pp->pp_pv.pv_next = pv; 1186 pp->pp_pv.pv_next = pv;
1187 pv->pv_va = va; 1187 pv->pv_va = va;
1188 } 1188 }
1189 pv->pv_pmap = pm; 1189 pv->pv_pmap = pm;
1190 pv->pv_ptep = ptep; 1190 pv->pv_ptep = ptep;
1191 PMAP_COUNT(pv_enter); 1191 PMAP_COUNT(pv_enter);
1192 1192
1193#ifdef PMAP_PV_DEBUG 1193#ifdef PMAP_PV_DEBUG
1194 printf("pv %p alias added va=%016lx -> pa=%016lx\n", pv, va, pa); 1194 printf("pv %p alias added va=%016lx -> pa=%016lx\n", pv, va, pa);
1195 pv_dump(pp, printf); 1195 pv_dump(pp, printf);
1196#endif 1196#endif
1197 1197
1198 return 0; 1198 return 0;
1199} 1199}
1200 1200
1201void 1201void
1202pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 1202pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1203{ 1203{
1204 1204
1205 _pmap_enter(pmap_kernel(), va, pa, prot, flags | PMAP_WIRED, true); 1205 _pmap_enter(pmap_kernel(), va, pa, prot, flags | PMAP_WIRED, true);
1206} 1206}
1207 1207
1208void 1208void
1209pmap_kremove(vaddr_t va, vsize_t size) 1209pmap_kremove(vaddr_t va, vsize_t size)
1210{ 1210{
1211 struct pmap *kpm = pmap_kernel(); 1211 struct pmap *kpm = pmap_kernel();
1212 1212
1213 UVMHIST_FUNC(__func__); 1213 UVMHIST_FUNC(__func__);
1214 UVMHIST_CALLED(pmaphist); 1214 UVMHIST_CALLED(pmaphist);
1215 1215
1216 UVMHIST_LOG(pmaphist, "va=%llx, size=%llx", va, size, 0, 0); 1216 UVMHIST_LOG(pmaphist, "va=%llx, size=%llx", va, size, 0, 0);
1217 1217
1218 KDASSERT((va & PGOFSET) == 0); 1218 KDASSERT((va & PGOFSET) == 0);
1219 KDASSERT((size & PGOFSET) == 0); 1219 KDASSERT((size & PGOFSET) == 0);
1220 1220
1221 KDASSERT(!IN_KSEG_ADDR(va)); 1221 KDASSERT(!IN_KSEG_ADDR(va));
1222 KDASSERT(IN_RANGE(va, VM_MIN_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS)); 1222 KDASSERT(IN_RANGE(va, VM_MIN_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS));
1223 1223
1224 pm_lock(kpm); 1224 pm_lock(kpm);
1225 _pmap_remove(kpm, va, va + size, true, NULL); 1225 _pmap_remove(kpm, va, va + size, true, NULL);
1226 pm_unlock(kpm); 1226 pm_unlock(kpm);
1227} 1227}
1228 1228
1229static void 1229static void
1230_pmap_protect_pv(struct pmap_page *pp, struct pv_entry *pv, vm_prot_t prot) 1230_pmap_protect_pv(struct pmap_page *pp, struct pv_entry *pv, vm_prot_t prot)
1231{ 1231{
1232 pt_entry_t *ptep, pte; 1232 pt_entry_t *ptep, pte;
1233 vm_prot_t pteprot; 1233 vm_prot_t pteprot;
1234 uint32_t mdattr; 1234 uint32_t mdattr;
1235 const bool user = (pv->pv_pmap != pmap_kernel()); 1235 const bool user = (pv->pv_pmap != pmap_kernel());
1236 1236
1237 UVMHIST_FUNC(__func__); 1237 UVMHIST_FUNC(__func__);
1238 UVMHIST_CALLED(pmaphist); 1238 UVMHIST_CALLED(pmaphist);
1239 1239
1240 UVMHIST_LOG(pmaphist, "pp=%p, pv=%p, prot=%08x", pp, pv, prot, 0); 1240 UVMHIST_LOG(pmaphist, "pp=%p, pv=%p, prot=%08x", pp, pv, prot, 0);
1241 KASSERT(mutex_owned(&pv->pv_pmap->pm_lock)); 1241 KASSERT(mutex_owned(&pv->pv_pmap->pm_lock));
1242 1242
1243 /* get prot mask from referenced/modified */ 1243 /* get prot mask from referenced/modified */
1244 mdattr = pp->pp_pv.pv_va & (VM_PROT_READ | VM_PROT_WRITE); 1244 mdattr = pp->pp_pv.pv_va & (VM_PROT_READ | VM_PROT_WRITE);
1245 ptep = pv->pv_ptep; 1245 ptep = pv->pv_ptep;
1246 pte = *ptep; 1246 pte = *ptep;
1247 1247
1248 /* get prot mask from pte */ 1248 /* get prot mask from pte */
1249 pteprot = 0; 1249 pteprot = 0;
1250 if (pte & LX_BLKPAG_AF) 1250 if (pte & LX_BLKPAG_AF)
1251 pteprot |= VM_PROT_READ; 1251 pteprot |= VM_PROT_READ;
1252 if ((pte & LX_BLKPAG_AP) == LX_BLKPAG_AP_RW) 1252 if ((pte & LX_BLKPAG_AP) == LX_BLKPAG_AP_RW)
1253 pteprot |= VM_PROT_WRITE; 1253 pteprot |= VM_PROT_WRITE;
1254 if (l3pte_executable(pte, user)) 1254 if (l3pte_executable(pte, user))
1255 pteprot |= VM_PROT_EXECUTE; 1255 pteprot |= VM_PROT_EXECUTE;
1256 1256
1257 /* new prot = prot & pteprot & mdattr */ 1257 /* new prot = prot & pteprot & mdattr */
1258 pte = _pmap_pte_adjust_prot(pte, prot & pteprot, mdattr, user); 1258 pte = _pmap_pte_adjust_prot(pte, prot & pteprot, mdattr, user);
1259 atomic_swap_64(ptep, pte); 1259 atomic_swap_64(ptep, pte);
1260 AARCH64_TLBI_BY_ASID_VA(pv->pv_pmap->pm_asid, trunc_page(pv->pv_va), 1260 AARCH64_TLBI_BY_ASID_VA(pv->pv_pmap->pm_asid, trunc_page(pv->pv_va),
1261 true); 1261 true);
1262} 1262}
1263 1263
1264void 1264void
1265pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot) 1265pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1266{ 1266{
1267 pt_entry_t *ptep = NULL, pte; 1267 pt_entry_t *ptep = NULL, pte;
1268 vaddr_t va; 1268 vaddr_t va;
1269 vsize_t blocksize = 0; 1269 vsize_t blocksize = 0;
1270 const bool user = (pm != pmap_kernel()); 1270 const bool user = (pm != pmap_kernel());
1271 1271
1272 KASSERT((prot & VM_PROT_READ) || !(prot & VM_PROT_WRITE)); 1272 KASSERT((prot & VM_PROT_READ) || !(prot & VM_PROT_WRITE));
1273 1273
1274 UVMHIST_FUNC(__func__); 1274 UVMHIST_FUNC(__func__);
1275 UVMHIST_CALLED(pmaphist); 1275 UVMHIST_CALLED(pmaphist);
1276 1276
1277 UVMHIST_LOG(pmaphist, "pm=%p, sva=%016lx, eva=%016lx, prot=%08x", 1277 UVMHIST_LOG(pmaphist, "pm=%p, sva=%016lx, eva=%016lx, prot=%08x",
1278 pm, sva, eva, prot); 1278 pm, sva, eva, prot);
1279 1279
1280 KASSERT_PM_ADDR(pm, sva); 1280 KASSERT_PM_ADDR(pm, sva);
1281 KASSERT(!IN_KSEG_ADDR(sva)); 1281 KASSERT(!IN_KSEG_ADDR(sva));
1282 1282
1283 /* PROT_EXEC requires implicit PROT_READ */ 1283 /* PROT_EXEC requires implicit PROT_READ */
1284 if (prot & VM_PROT_EXECUTE) 1284 if (prot & VM_PROT_EXECUTE)
1285 prot |= VM_PROT_READ; 1285 prot |= VM_PROT_READ;
1286 1286
1287 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1287 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1288 PMAP_COUNT(protect_remove_fallback); 1288 PMAP_COUNT(protect_remove_fallback);
1289 pmap_remove(pm, sva, eva); 1289 pmap_remove(pm, sva, eva);
1290 return; 1290 return;
1291 } 1291 }
1292 PMAP_COUNT(protect); 1292 PMAP_COUNT(protect);
1293 1293
1294 KDASSERT((sva & PAGE_MASK) == 0); 1294 KDASSERT((sva & PAGE_MASK) == 0);
1295 KDASSERT((eva & PAGE_MASK) == 0); 1295 KDASSERT((eva & PAGE_MASK) == 0);
1296 1296
1297 pm_lock(pm); 1297 pm_lock(pm);
1298 1298
1299 for (va = sva; va < eva; va = (va + blocksize) & ~(blocksize - 1)) { 1299 for (va = sva; va < eva; va = (va + blocksize) & ~(blocksize - 1)) {
1300#ifdef UVMHIST 1300#ifdef UVMHIST
1301 pt_entry_t opte; 1301 pt_entry_t opte;
1302#endif 1302#endif
1303 struct vm_page *pg; 1303 struct vm_page *pg;
1304 struct pmap_page *pp; 1304 struct pmap_page *pp;
1305 paddr_t pa; 1305 paddr_t pa;
1306 uint32_t mdattr; 1306 uint32_t mdattr;
1307 bool executable; 1307 bool executable;
1308 1308
1309 /* va is belong to the same L3 table as before? */ 1309 /* va is belong to the same L3 table as before? */
1310 if ((blocksize == L3_SIZE) && ((va & L3INDEXMASK) != 0)) 1310 if ((blocksize == L3_SIZE) && ((va & L3INDEXMASK) != 0))
1311 ptep++; 1311 ptep++;
1312 else 1312 else
1313 ptep = _pmap_pte_lookup_bs(pm, va, &blocksize); 1313 ptep = _pmap_pte_lookup_bs(pm, va, &blocksize);
1314 1314
1315 pte = *ptep; 1315 pte = *ptep;
1316 if (!lxpde_valid(pte)) { 1316 if (!lxpde_valid(pte)) {
1317 PMAP_COUNT(protect_none); 1317 PMAP_COUNT(protect_none);
1318 continue; 1318 continue;
1319 } 1319 }
1320 1320
1321 pa = lxpde_pa(pte); 1321 pa = lxpde_pa(pte);
1322 pg = PHYS_TO_VM_PAGE(pa); 1322 pg = PHYS_TO_VM_PAGE(pa);
1323 if (pg != NULL) { 1323 if (pg != NULL) {
1324 pp = VM_PAGE_TO_PP(pg); 1324 pp = VM_PAGE_TO_PP(pg);
1325 PMAP_COUNT(protect_managed); 1325 PMAP_COUNT(protect_managed);
1326 } else { 1326 } else {
1327#ifdef __HAVE_PMAP_PV_TRACK 1327#ifdef __HAVE_PMAP_PV_TRACK
1328 pp = pmap_pv_tracked(pa); 1328 pp = pmap_pv_tracked(pa);
1329#ifdef PMAPCOUNTERS 1329#ifdef PMAPCOUNTERS
1330 if (pp != NULL) 1330 if (pp != NULL)
1331 PMAP_COUNT(protect_pvmanaged); 1331 PMAP_COUNT(protect_pvmanaged);
1332 else 1332 else
1333 PMAP_COUNT(protect_unmanaged); 1333 PMAP_COUNT(protect_unmanaged);
1334#endif 1334#endif
1335#else 1335#else
1336 pp = NULL; 1336 pp = NULL;
1337 PMAP_COUNT(protect_unmanaged); 1337 PMAP_COUNT(protect_unmanaged);
1338#endif /* __HAVE_PMAP_PV_TRACK */ 1338#endif /* __HAVE_PMAP_PV_TRACK */
1339 } 1339 }
1340 1340
1341 if (pp != NULL) { 1341 if (pp != NULL) {
1342 /* get prot mask from referenced/modified */ 1342 /* get prot mask from referenced/modified */
1343 mdattr = pp->pp_pv.pv_va & 1343 mdattr = pp->pp_pv.pv_va &
1344 (VM_PROT_READ | VM_PROT_WRITE); 1344 (VM_PROT_READ | VM_PROT_WRITE);
1345 } else { 1345 } else {
1346 /* unmanaged page */ 1346 /* unmanaged page */
1347 mdattr = VM_PROT_ALL; 1347 mdattr = VM_PROT_ALL;
1348 } 1348 }
1349 1349
1350#ifdef UVMHIST 1350#ifdef UVMHIST
1351 opte = pte; 1351 opte = pte;
1352#endif 1352#endif
1353 executable = l3pte_executable(pte, user); 1353 executable = l3pte_executable(pte, user);
1354 pte = _pmap_pte_adjust_prot(pte, prot, mdattr, user); 1354 pte = _pmap_pte_adjust_prot(pte, prot, mdattr, user);
1355 1355
1356 if (!executable && (prot & VM_PROT_EXECUTE)) { 1356 if (!executable && (prot & VM_PROT_EXECUTE)) {
1357 /* non-exec -> exec */ 1357 /* non-exec -> exec */
1358 UVMHIST_LOG(pmaphist, "icache_sync: " 1358 UVMHIST_LOG(pmaphist, "icache_sync: "
1359 "pm=%p, va=%016lx, pte: %016lx -> %016lx", 1359 "pm=%p, va=%016lx, pte: %016lx -> %016lx",
1360 pm, va, opte, pte); 1360 pm, va, opte, pte);
1361 if (!l3pte_readable(pte)) { 1361 if (!l3pte_readable(pte)) {
1362 PTE_ICACHE_SYNC_PAGE(pte, ptep, pm, va, true); 1362 PTE_ICACHE_SYNC_PAGE(pte, ptep, pm, va, true);
1363 atomic_swap_64(ptep, pte); 1363 atomic_swap_64(ptep, pte);
1364 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true); 1364 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true);
1365 } else { 1365 } else {
1366 atomic_swap_64(ptep, pte); 1366 atomic_swap_64(ptep, pte);
1367 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true); 1367 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true);
1368 cpu_icache_sync_range(va, PAGE_SIZE); 1368 cpu_icache_sync_range(va, PAGE_SIZE);
1369 } 1369 }
1370 } else { 1370 } else {
1371 atomic_swap_64(ptep, pte); 1371 atomic_swap_64(ptep, pte);
1372 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true); 1372 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true);
1373 } 1373 }
1374 } 1374 }
1375 1375
1376 pm_unlock(pm); 1376 pm_unlock(pm);
1377} 1377}
1378 1378
1379void 1379void
1380pmap_activate(struct lwp *l) 1380pmap_activate(struct lwp *l)
1381{ 1381{
1382 struct pmap *pm = l->l_proc->p_vmspace->vm_map.pmap; 1382 struct pmap *pm = l->l_proc->p_vmspace->vm_map.pmap;
1383 uint64_t ttbr0, tcr; 1383 uint64_t ttbr0, tcr;
1384 1384
1385 UVMHIST_FUNC(__func__); 1385 UVMHIST_FUNC(__func__);
1386 UVMHIST_CALLED(pmaphist); 1386 UVMHIST_CALLED(pmaphist);
1387 1387
1388 if (pm == pmap_kernel()) 1388 if (pm == pmap_kernel())
1389 return; 1389 return;
1390 if (l != curlwp) 1390 if (l != curlwp)
1391 return; 1391 return;
1392 1392
1393 KASSERT(pm->pm_l0table != NULL); 1393 KASSERT(pm->pm_l0table != NULL);
1394 1394
1395 UVMHIST_LOG(pmaphist, "lwp=%p (pid=%d)", l, l->l_proc->p_pid, 0, 0); 1395 UVMHIST_LOG(pmaphist, "lwp=%p (pid=%d)", l, l->l_proc->p_pid, 0, 0);
1396 1396
1397 /* Disable translation table walks using TTBR0 */ 1397 /* Disable translation table walks using TTBR0 */
1398 tcr = reg_tcr_el1_read(); 1398 tcr = reg_tcr_el1_read();
1399 reg_tcr_el1_write(tcr | TCR_EPD0); 1399 reg_tcr_el1_write(tcr | TCR_EPD0);
1400 __asm __volatile("isb" ::: "memory"); 1400 __asm __volatile("isb" ::: "memory");
1401 1401
1402 /* XXX */ 1402 /* XXX */
1403 CTASSERT(PID_MAX <= 65535); /* 16bit ASID */ 1403 CTASSERT(PID_MAX <= 65535); /* 16bit ASID */
1404 if (pm->pm_asid == -1) 1404 if (pm->pm_asid == -1)
1405 pm->pm_asid = l->l_proc->p_pid; 1405 pm->pm_asid = l->l_proc->p_pid;
1406 1406
1407 ttbr0 = ((uint64_t)pm->pm_asid << 48) | pm->pm_l0table_pa; 1407 ttbr0 = ((uint64_t)pm->pm_asid << 48) | pm->pm_l0table_pa;
1408 cpu_set_ttbr0(ttbr0); 1408 cpu_set_ttbr0(ttbr0);
1409 1409
1410 /* Re-enable translation table walks using TTBR0 */ 1410 /* Re-enable translation table walks using TTBR0 */
1411 tcr = reg_tcr_el1_read(); 1411 tcr = reg_tcr_el1_read();
1412 reg_tcr_el1_write(tcr & ~TCR_EPD0); 1412 reg_tcr_el1_write(tcr & ~TCR_EPD0);
1413 __asm __volatile("isb" ::: "memory"); 1413 __asm __volatile("isb" ::: "memory");
1414 1414
1415 pm->pm_activated = true; 1415 pm->pm_activated = true;
1416 1416
1417 PMAP_COUNT(activate); 1417 PMAP_COUNT(activate);
1418} 1418}
1419 1419
1420void 1420void
1421pmap_deactivate(struct lwp *l) 1421pmap_deactivate(struct lwp *l)
1422{ 1422{
1423 struct pmap *pm = l->l_proc->p_vmspace->vm_map.pmap; 1423 struct pmap *pm = l->l_proc->p_vmspace->vm_map.pmap;
1424 uint64_t tcr; 1424 uint64_t tcr;
1425 1425
1426 UVMHIST_FUNC(__func__); 1426 UVMHIST_FUNC(__func__);
1427 UVMHIST_CALLED(pmaphist); 1427 UVMHIST_CALLED(pmaphist);
1428 1428
1429 if (pm == pmap_kernel()) 1429 if (pm == pmap_kernel())
1430 return; 1430 return;
1431 1431
1432 UVMHIST_LOG(pmaphist, "lwp=%p, asid=%d", l, pm->pm_asid, 0, 0); 1432 UVMHIST_LOG(pmaphist, "lwp=%p, asid=%d", l, pm->pm_asid, 0, 0);
1433 1433
1434 /* Disable translation table walks using TTBR0 */ 1434 /* Disable translation table walks using TTBR0 */
1435 tcr = reg_tcr_el1_read(); 1435 tcr = reg_tcr_el1_read();
1436 reg_tcr_el1_write(tcr | TCR_EPD0); 1436 reg_tcr_el1_write(tcr | TCR_EPD0);
1437 __asm __volatile("isb" ::: "memory"); 1437 __asm __volatile("isb" ::: "memory");
1438 1438
1439 /* XXX */ 1439 /* XXX */
1440 pm->pm_activated = false; 1440 pm->pm_activated = false;
1441 1441
1442 PMAP_COUNT(deactivate); 1442 PMAP_COUNT(deactivate);
1443} 1443}
1444 1444
1445struct pmap * 1445struct pmap *
1446pmap_create(void) 1446pmap_create(void)
1447{ 1447{
1448 struct pmap *pm; 1448 struct pmap *pm;
1449 1449
1450 UVMHIST_FUNC(__func__); 1450 UVMHIST_FUNC(__func__);
1451 UVMHIST_CALLED(pmaphist); 1451 UVMHIST_CALLED(pmaphist);
1452 1452
1453 pm = pool_cache_get(&_pmap_cache, PR_WAITOK); 1453 pm = pool_cache_get(&_pmap_cache, PR_WAITOK);
1454 memset(pm, 0, sizeof(*pm)); 1454 memset(pm, 0, sizeof(*pm));
1455 pm->pm_refcnt = 1; 1455 pm->pm_refcnt = 1;
1456 pm->pm_idlepdp = 0; 1456 pm->pm_idlepdp = 0;
1457 pm->pm_asid = -1; 1457 pm->pm_asid = -1;
1458 LIST_INIT(&pm->pm_vmlist); 1458 LIST_INIT(&pm->pm_vmlist);
1459 mutex_init(&pm->pm_lock, MUTEX_DEFAULT, IPL_VM); 1459 mutex_init(&pm->pm_lock, MUTEX_DEFAULT, IPL_VM);
1460 1460
1461 pm->pm_l0table_pa = pmap_alloc_pdp(pm, NULL, 0, true); 1461 pm->pm_l0table_pa = pmap_alloc_pdp(pm, NULL, 0, true);
1462 KASSERT(pm->pm_l0table_pa != POOL_PADDR_INVALID); 1462 KASSERT(pm->pm_l0table_pa != POOL_PADDR_INVALID);
1463 pm->pm_l0table = (pd_entry_t *)AARCH64_PA_TO_KVA(pm->pm_l0table_pa); 1463 pm->pm_l0table = (pd_entry_t *)AARCH64_PA_TO_KVA(pm->pm_l0table_pa);
1464 KASSERT(((vaddr_t)pm->pm_l0table & (PAGE_SIZE - 1)) == 0); 1464 KASSERT(((vaddr_t)pm->pm_l0table & (PAGE_SIZE - 1)) == 0);
1465 1465
1466 UVMHIST_LOG(pmaphist, "pm=%p, pm_l0table=%016lx, pm_l0table_pa=%016lx", 1466 UVMHIST_LOG(pmaphist, "pm=%p, pm_l0table=%016lx, pm_l0table_pa=%016lx",
1467 pm, pm->pm_l0table, pm->pm_l0table_pa, 0); 1467 pm, pm->pm_l0table, pm->pm_l0table_pa, 0);
1468 1468
1469 PMAP_COUNT(create); 1469 PMAP_COUNT(create);
1470 return pm; 1470 return pm;
1471} 1471}
1472 1472
1473void 1473void
1474pmap_destroy(struct pmap *pm) 1474pmap_destroy(struct pmap *pm)
1475{ 1475{
1476 unsigned int refcnt; 1476 unsigned int refcnt;
1477 1477
1478 UVMHIST_FUNC(__func__); 1478 UVMHIST_FUNC(__func__);
1479 UVMHIST_CALLED(pmaphist); 1479 UVMHIST_CALLED(pmaphist);
1480 1480
1481 UVMHIST_LOG(pmaphist, 1481 UVMHIST_LOG(pmaphist,
1482 "pm=%p, pm_l0table=%016lx, pm_l0table_pa=%016lx, refcnt=%d", 1482 "pm=%p, pm_l0table=%016lx, pm_l0table_pa=%016lx, refcnt=%d",
1483 pm, pm->pm_l0table, pm->pm_l0table_pa, pm->pm_refcnt); 1483 pm, pm->pm_l0table, pm->pm_l0table_pa, pm->pm_refcnt);
1484 1484
1485 if (pm == NULL) 1485 if (pm == NULL)
1486 return; 1486 return;
1487 1487
1488 if (pm == pmap_kernel()) 1488 if (pm == pmap_kernel())
1489 panic("cannot destroy kernel pmap"); 1489 panic("cannot destroy kernel pmap");
1490 1490
1491 refcnt = atomic_dec_uint_nv(&pm->pm_refcnt); 1491 refcnt = atomic_dec_uint_nv(&pm->pm_refcnt);
1492 if (refcnt > 0) 1492 if (refcnt > 0)
1493 return; 1493 return;
1494 1494
1495 aarch64_tlbi_by_asid(pm->pm_asid); 1495 aarch64_tlbi_by_asid(pm->pm_asid);
1496 1496
1497 _pmap_free_pdp_all(pm); 1497 _pmap_free_pdp_all(pm);
1498 mutex_destroy(&pm->pm_lock); 1498 mutex_destroy(&pm->pm_lock);
1499 1499
1500 pool_cache_put(&_pmap_cache, pm); 1500 pool_cache_put(&_pmap_cache, pm);
1501 1501
1502 PMAP_COUNT(destroy); 1502 PMAP_COUNT(destroy);
1503} 1503}
1504 1504
1505static inline void 1505static inline void
1506_pmap_pdp_setparent(struct pmap *pm, struct vm_page *pg, pt_entry_t *ptep) 1506_pmap_pdp_setparent(struct pmap *pm, struct vm_page *pg, pt_entry_t *ptep)
1507{ 1507{
1508 1508
1509 if ((pm != pmap_kernel()) && (pg != NULL)) { 1509 if ((pm != pmap_kernel()) && (pg != NULL)) {
1510 KASSERT(mutex_owned(&pm->pm_lock)); 1510 KASSERT(mutex_owned(&pm->pm_lock));
1511 VM_PAGE_TO_MD(pg)->mdpg_ptep_parent = ptep; 1511 VM_PAGE_TO_MD(pg)->mdpg_ptep_parent = ptep;
1512 } 1512 }
1513} 1513}
1514 1514
1515/* 1515/*
1516 * increment reference counter of the page descriptor page. 1516 * increment reference counter of the page descriptor page.
1517 * the reference counter should be equal to 1517 * the reference counter should be equal to
1518 * 1 + num of valid entries the page has. 1518 * 1 + num of valid entries the page has.
1519 */ 1519 */
1520static inline void 1520static inline void
1521_pmap_pdp_addref(struct pmap *pm, paddr_t pdppa, struct vm_page *pdppg_hint) 1521_pmap_pdp_addref(struct pmap *pm, paddr_t pdppa, struct vm_page *pdppg_hint)
1522{ 1522{
1523 struct vm_page *pg; 1523 struct vm_page *pg;
1524 1524
1525 /* kernel L0-L3 page will be never freed */ 1525 /* kernel L0-L3 page will be never freed */
1526 if (pm == pmap_kernel()) 1526 if (pm == pmap_kernel())
1527 return; 1527 return;
1528 1528
1529 KASSERT(mutex_owned(&pm->pm_lock)); 1529 KASSERT(mutex_owned(&pm->pm_lock));
1530 1530
1531 /* no need for L0 page */ 1531 /* no need for L0 page */
1532 if (pm->pm_l0table_pa == pdppa) 1532 if (pm->pm_l0table_pa == pdppa)
1533 return; 1533 return;
1534 1534
1535 pg = pdppg_hint; 1535 pg = pdppg_hint;
1536 if (pg == NULL) 1536 if (pg == NULL)
1537 pg = PHYS_TO_VM_PAGE(pdppa); 1537 pg = PHYS_TO_VM_PAGE(pdppa);
1538 KASSERT(pg != NULL); 1538 KASSERT(pg != NULL);
1539 1539
1540 pg->wire_count++; 1540 pg->wire_count++;
1541 1541
1542 KASSERTMSG(pg->wire_count <= (Ln_ENTRIES + 1), 1542 KASSERTMSG(pg->wire_count <= (Ln_ENTRIES + 1),
1543 "pg=%p, wire_count=%d", pg, pg->wire_count); 1543 "pg=%p, wire_count=%d", pg, pg->wire_count);
1544} 1544}
1545 1545
1546/* 1546/*
1547 * decrement reference counter of the page descriptr page. 1547 * decrement reference counter of the page descriptr page.
1548 * if reference counter is 1(=empty), pages will be freed, and return true. 1548 * if reference counter is 1(=empty), pages will be freed, and return true.
1549 * otherwise return false. 1549 * otherwise return false.
1550 * kernel page, or L0 page descriptor page will be never freed. 1550 * kernel page, or L0 page descriptor page will be never freed.
1551 */ 1551 */
1552static bool 1552static bool
1553_pmap_pdp_delref(struct pmap *pm, paddr_t pdppa, bool do_free_pdp) 1553_pmap_pdp_delref(struct pmap *pm, paddr_t pdppa, bool do_free_pdp)
1554{ 1554{
1555 struct vm_page *pg; 1555 struct vm_page *pg;
1556 bool removed; 1556 bool removed;
1557 uint16_t wirecount; 1557 uint16_t wirecount;
1558 1558
1559 /* kernel L0-L3 page will be never freed */ 1559 /* kernel L0-L3 page will be never freed */
1560 if (pm == pmap_kernel()) 1560 if (pm == pmap_kernel())
1561 return false; 1561 return false;
1562 1562
1563 KASSERT(mutex_owned(&pm->pm_lock)); 1563 KASSERT(mutex_owned(&pm->pm_lock));
1564 1564
1565 /* no need for L0 page */ 1565 /* no need for L0 page */
1566 if (pm->pm_l0table_pa == pdppa) 1566 if (pm->pm_l0table_pa == pdppa)
1567 return false; 1567 return false;
1568 1568
1569 pg = PHYS_TO_VM_PAGE(pdppa); 1569 pg = PHYS_TO_VM_PAGE(pdppa);
1570 KASSERT(pg != NULL); 1570 KASSERT(pg != NULL);
1571 1571
1572 wirecount = --pg->wire_count; 1572 wirecount = --pg->wire_count;
1573 1573
1574 if (!do_free_pdp) { 1574 if (!do_free_pdp) {
1575 /* 1575 /*
1576 * pm_idlepdp is counted by only pmap_page_protect() with 1576 * pm_idlepdp is counted by only pmap_page_protect() with
1577 * VM_PROT_NONE. it is not correct because without considering 1577 * VM_PROT_NONE. it is not correct because without considering
1578 * pmap_enter(), but useful hint to just sweep. 1578 * pmap_enter(), but useful hint to just sweep.
1579 */ 1579 */
1580 if (wirecount == 1) 1580 if (wirecount == 1)
1581 pm->pm_idlepdp++; 1581 pm->pm_idlepdp++;
1582 return false; 1582 return false;
1583 } 1583 }
1584 1584
1585 /* if no reference, free pdp */ 1585 /* if no reference, free pdp */
1586 removed = false; 1586 removed = false;
1587 while (wirecount == 1) { 1587 while (wirecount == 1) {
1588 pd_entry_t *ptep_in_parent, opte __diagused; 1588 pd_entry_t *ptep_in_parent, opte __diagused;
1589 ptep_in_parent = VM_PAGE_TO_MD(pg)->mdpg_ptep_parent; 1589 ptep_in_parent = VM_PAGE_TO_MD(pg)->mdpg_ptep_parent;
1590 if (ptep_in_parent == NULL) { 1590 if (ptep_in_parent == NULL) {
1591 /* no parent */ 1591 /* no parent */
1592 pmap_free_pdp(pm, pg); 1592 pmap_free_pdp(pm, pg);
1593 removed = true; 1593 removed = true;
1594 break; 1594 break;
1595 } 1595 }
1596 1596
1597 /* unlink from parent */ 1597 /* unlink from parent */
1598 opte = atomic_swap_64(ptep_in_parent, 0); 1598 opte = atomic_swap_64(ptep_in_parent, 0);
1599 KASSERT(lxpde_valid(opte)); 1599 KASSERT(lxpde_valid(opte));
1600 wirecount = atomic_add_32_nv(&pg->wire_count, -1); /* 1 -> 0 */ 1600 wirecount = atomic_add_32_nv(&pg->wire_count, -1); /* 1 -> 0 */
1601 KASSERT(wirecount == 0); 1601 KASSERT(wirecount == 0);
1602 pmap_free_pdp(pm, pg); 1602 pmap_free_pdp(pm, pg);
1603 removed = true; 1603 removed = true;
1604 1604
1605 /* L3->L2->L1. no need for L0 */ 1605 /* L3->L2->L1. no need for L0 */
1606 pdppa = AARCH64_KVA_TO_PA(trunc_page((vaddr_t)ptep_in_parent)); 1606 pdppa = AARCH64_KVA_TO_PA(trunc_page((vaddr_t)ptep_in_parent));
1607 if (pdppa == pm->pm_l0table_pa) 1607 if (pdppa == pm->pm_l0table_pa)
1608 break; 1608 break;
1609 1609
1610 pg = PHYS_TO_VM_PAGE(pdppa); 1610 pg = PHYS_TO_VM_PAGE(pdppa);
1611 KASSERT(pg != NULL); 1611 KASSERT(pg != NULL);
1612 KASSERTMSG(pg->wire_count >= 1, 1612 KASSERTMSG(pg->wire_count >= 1,
1613 "wire_count=%d", pg->wire_count); 1613 "wire_count=%d", pg->wire_count);
1614 /* decrement wire_count of parent */ 1614 /* decrement wire_count of parent */
1615 wirecount = atomic_add_32_nv(&pg->wire_count, -1); 1615 wirecount = atomic_add_32_nv(&pg->wire_count, -1);
1616 KASSERTMSG(pg->wire_count <= (Ln_ENTRIES + 1), 1616 KASSERTMSG(pg->wire_count <= (Ln_ENTRIES + 1),
1617 "pm=%p[%d], pg=%p, wire_count=%d", 1617 "pm=%p[%d], pg=%p, wire_count=%d",
1618 pm, pm->pm_asid, pg, pg->wire_count); 1618 pm, pm->pm_asid, pg, pg->wire_count);
1619 } 1619 }
1620 1620
1621 return removed; 1621 return removed;
1622} 1622}
1623 1623
1624static int 1624static int
1625_pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, 1625_pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot,
1626 u_int flags, bool kenter) 1626 u_int flags, bool kenter)
1627{ 1627{
1628 struct vm_page *pdppg, *pdppg0; 1628 struct vm_page *pdppg, *pdppg0;
1629 struct pmap_page *pp, *opp, *pps[2]; 1629 struct pmap_page *pp, *opp, *pps[2];
1630 struct pv_entry *spv, *opv = NULL; 1630 struct pv_entry *spv, *opv = NULL;
1631 pd_entry_t pde; 1631 pd_entry_t pde;
1632 pt_entry_t attr, pte, opte, *ptep; 1632 pt_entry_t attr, pte, opte, *ptep;
1633 pd_entry_t *l0, *l1, *l2, *l3; 1633 pd_entry_t *l0, *l1, *l2, *l3;
1634 paddr_t pdppa, pdppa0; 1634 paddr_t pdppa, pdppa0;
1635 uint32_t mdattr; 1635 uint32_t mdattr;
1636 unsigned int idx; 1636 unsigned int idx;
1637 int error = 0; 1637 int error = 0;
1638 const bool user = (pm != pmap_kernel()); 1638 const bool user = (pm != pmap_kernel());
1639 bool need_sync_icache, need_enter_pv; 1639 bool need_sync_icache, need_enter_pv;
1640 bool l3only = true; 1640 bool l3only = true;
1641 1641
1642 UVMHIST_FUNC(__func__); 1642 UVMHIST_FUNC(__func__);
1643 UVMHIST_CALLED(pmaphist); 1643 UVMHIST_CALLED(pmaphist);
1644 1644
1645 UVMHIST_LOG(pmaphist, "pm=%p, kentermode=%d", pm, kenter, 0, 0); 1645 UVMHIST_LOG(pmaphist, "pm=%p, kentermode=%d", pm, kenter, 0, 0);
1646 UVMHIST_LOG(pmaphist, "va=%016lx, pa=%016lx, prot=%08x, flags=%08x", 1646 UVMHIST_LOG(pmaphist, "va=%016lx, pa=%016lx, prot=%08x, flags=%08x",
1647 va, pa, prot, flags); 1647 va, pa, prot, flags);
1648 1648
1649 KASSERT_PM_ADDR(pm, va); 1649 KASSERT_PM_ADDR(pm, va);
1650 KASSERT(!IN_KSEG_ADDR(va)); 1650 KASSERT(!IN_KSEG_ADDR(va));
1651 1651
1652#ifdef PMAPCOUNTERS 1652#ifdef PMAPCOUNTERS
1653 PMAP_COUNT(mappings); 1653 PMAP_COUNT(mappings);
1654 if (_pmap_color(va) == _pmap_color(pa)) { 1654 if (_pmap_color(va) == _pmap_color(pa)) {
1655 if (user) { 1655 if (user) {
1656 PMAP_COUNT(user_mappings); 1656 PMAP_COUNT(user_mappings);
1657 } else { 1657 } else {
1658 PMAP_COUNT(kern_mappings); 1658 PMAP_COUNT(kern_mappings);
1659 } 1659 }
1660 } else if (flags & PMAP_WIRED) { 1660 } else if (flags & PMAP_WIRED) {
1661 if (user) { 1661 if (user) {
1662 PMAP_COUNT(user_mappings_bad_wired); 1662 PMAP_COUNT(user_mappings_bad_wired);
1663 } else { 1663 } else {
1664 PMAP_COUNT(kern_mappings_bad_wired); 1664 PMAP_COUNT(kern_mappings_bad_wired);
1665 } 1665 }
1666 } else { 1666 } else {
1667 if (user) { 1667 if (user) {
1668 PMAP_COUNT(user_mappings_bad); 1668 PMAP_COUNT(user_mappings_bad);
1669 } else { 1669 } else {
1670 PMAP_COUNT(kern_mappings_bad); 1670 PMAP_COUNT(kern_mappings_bad);
1671 } 1671 }
1672 } 1672 }
1673#endif 1673#endif
1674 1674
1675 if (kenter) { 1675 if (kenter) {
1676 pp = NULL; 1676 pp = NULL;
1677 } else { 1677 } else {
1678 struct vm_page *pg = PHYS_TO_VM_PAGE(pa); 1678 struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
1679 if (pg != NULL) { 1679 if (pg != NULL) {
1680 pp = VM_PAGE_TO_PP(pg); 1680 pp = VM_PAGE_TO_PP(pg);
1681 PMAP_COUNT(managed_mappings); 1681 PMAP_COUNT(managed_mappings);
1682 } else { 1682 } else {
1683#ifdef __HAVE_PMAP_PV_TRACK 1683#ifdef __HAVE_PMAP_PV_TRACK
1684 pp = pmap_pv_tracked(pa); 1684 pp = pmap_pv_tracked(pa);
1685#ifdef PMAPCOUNTERS 1685#ifdef PMAPCOUNTERS
1686 if (pp != NULL) 1686 if (pp != NULL)
1687 PMAP_COUNT(pvmanaged_mappings); 1687 PMAP_COUNT(pvmanaged_mappings);
1688 else 1688 else
1689 PMAP_COUNT(unmanaged_mappings); 1689 PMAP_COUNT(unmanaged_mappings);
1690#endif 1690#endif
1691#else 1691#else
1692 pp = NULL; 1692 pp = NULL;
1693 PMAP_COUNT(unmanaged_mappings); 1693 PMAP_COUNT(unmanaged_mappings);
1694#endif /* __HAVE_PMAP_PV_TRACK */ 1694#endif /* __HAVE_PMAP_PV_TRACK */
1695 } 1695 }
1696 } 1696 }
1697 1697
1698 if (pp != NULL) { 1698 if (pp != NULL) {
1699 /* 1699 /*
1700 * allocate pv in advance of pm_lock() to avoid locking myself. 1700 * allocate pv in advance of pm_lock() to avoid locking myself.
1701 * pool_cache_get() may call pmap_kenter() internally. 1701 * pool_cache_get() may call pmap_kenter() internally.
1702 */ 1702 */
1703 spv = pool_cache_get(&_pmap_pv_pool, PR_NOWAIT); 1703 spv = pool_cache_get(&_pmap_pv_pool, PR_NOWAIT);
1704 need_enter_pv = true; 1704 need_enter_pv = true;
1705 } else { 1705 } else {
1706 spv = NULL; 1706 spv = NULL;
1707 need_enter_pv = false; 1707 need_enter_pv = false;
1708 } 1708 }
1709 1709
1710 pm_lock(pm); 1710 pm_lock(pm);
1711 1711
1712 if (pm->pm_idlepdp >= PDPSWEEP_TRIGGER) { 1712 if (pm->pm_idlepdp >= PDPSWEEP_TRIGGER) {
1713 if (_pmap_sweep_pdp(pm) != 0) { 1713 if (_pmap_sweep_pdp(pm) != 0) {
1714 /* several L1-L3 page table pages have been freed */ 1714 /* several L1-L3 page table pages have been freed */
1715 aarch64_tlbi_by_asid(pm->pm_asid); 1715 aarch64_tlbi_by_asid(pm->pm_asid);
1716 } 1716 }
1717 } 1717 }
1718 1718
1719 /* 1719 /*
1720 * traverse L0 -> L1 -> L2 -> L3 table with growing pdp if needed. 1720 * traverse L0 -> L1 -> L2 -> L3 table with growing pdp if needed.
1721 */ 1721 */
1722 l0 = pm->pm_l0table; 1722 l0 = pm->pm_l0table;
1723 1723
1724 idx = l0pde_index(va); 1724 idx = l0pde_index(va);
1725 pde = l0[idx]; 1725 pde = l0[idx];
1726 if (!l0pde_valid(pde)) { 1726 if (!l0pde_valid(pde)) {
1727 /* no need to increment L0 occupancy. L0 page never freed */ 1727 /* no need to increment L0 occupancy. L0 page never freed */
1728 pdppa = pmap_alloc_pdp(pm, &pdppg, flags, false); /* L1 pdp */ 1728 pdppa = pmap_alloc_pdp(pm, &pdppg, flags, false); /* L1 pdp */
1729 if (pdppa == POOL_PADDR_INVALID) { 1729 if (pdppa == POOL_PADDR_INVALID) {
1730 if (flags & PMAP_CANFAIL) { 1730 if (flags & PMAP_CANFAIL) {
1731 error = ENOMEM; 1731 error = ENOMEM;
1732 goto fail0; 1732 goto fail0;
1733 } 1733 }
1734 pm_unlock(pm); 1734 pm_unlock(pm);
1735 panic("%s: cannot allocate L1 table", __func__); 1735 panic("%s: cannot allocate L1 table", __func__);
1736 } 1736 }
1737 atomic_swap_64(&l0[idx], pdppa | L0_TABLE); 1737 atomic_swap_64(&l0[idx], pdppa | L0_TABLE);
1738 _pmap_pdp_setparent(pm, pdppg, &l0[idx]); 1738 _pmap_pdp_setparent(pm, pdppg, &l0[idx]);
1739 l3only = false; 1739 l3only = false;
1740 } else { 1740 } else {
1741 pdppa = l0pde_pa(pde); 1741 pdppa = l0pde_pa(pde);
1742 pdppg = NULL; 1742 pdppg = NULL;
1743 } 1743 }
1744 l1 = (void *)AARCH64_PA_TO_KVA(pdppa); 1744 l1 = (void *)AARCH64_PA_TO_KVA(pdppa);
1745 1745
1746 idx = l1pde_index(va); 1746 idx = l1pde_index(va);
1747 pde = l1[idx]; 1747 pde = l1[idx];
1748 if (!l1pde_valid(pde)) { 1748 if (!l1pde_valid(pde)) {
1749 pdppa0 = pdppa; 1749 pdppa0 = pdppa;
1750 pdppg0 = pdppg; 1750 pdppg0 = pdppg;
1751 pdppa = pmap_alloc_pdp(pm, &pdppg, flags, false); /* L2 pdp */ 1751 pdppa = pmap_alloc_pdp(pm, &pdppg, flags, false); /* L2 pdp */
1752 if (pdppa == POOL_PADDR_INVALID) { 1752 if (pdppa == POOL_PADDR_INVALID) {
1753 if (flags & PMAP_CANFAIL) { 1753 if (flags & PMAP_CANFAIL) {
1754 error = ENOMEM; 1754 error = ENOMEM;
1755 goto fail0; 1755 goto fail0;
1756 } 1756 }
1757 pm_unlock(pm); 1757 pm_unlock(pm);
1758 panic("%s: cannot allocate L2 table", __func__); 1758 panic("%s: cannot allocate L2 table", __func__);
1759 } 1759 }
1760 atomic_swap_64(&l1[idx], pdppa | L1_TABLE); 1760 atomic_swap_64(&l1[idx], pdppa | L1_TABLE);
1761 _pmap_pdp_addref(pm, pdppa0, pdppg0); /* L1 occupancy++ */ 1761 _pmap_pdp_addref(pm, pdppa0, pdppg0); /* L1 occupancy++ */
1762 _pmap_pdp_setparent(pm, pdppg, &l1[idx]); 1762 _pmap_pdp_setparent(pm, pdppg, &l1[idx]);
1763 l3only = false; 1763 l3only = false;
1764 } else { 1764 } else {
1765 pdppa = l1pde_pa(pde); 1765 pdppa = l1pde_pa(pde);
1766 pdppg = NULL; 1766 pdppg = NULL;
1767 } 1767 }
1768 l2 = (void *)AARCH64_PA_TO_KVA(pdppa); 1768 l2 = (void *)AARCH64_PA_TO_KVA(pdppa);
1769 1769
1770 idx = l2pde_index(va); 1770 idx = l2pde_index(va);
1771 pde = l2[idx]; 1771 pde = l2[idx];
1772 if (!l2pde_valid(pde)) { 1772 if (!l2pde_valid(pde)) {
1773 pdppa0 = pdppa; 1773 pdppa0 = pdppa;
1774 pdppg0 = pdppg; 1774 pdppg0 = pdppg;
1775 pdppa = pmap_alloc_pdp(pm, &pdppg, flags, false); /* L3 pdp */ 1775 pdppa = pmap_alloc_pdp(pm, &pdppg, flags, false); /* L3 pdp */
1776 if (pdppa == POOL_PADDR_INVALID) { 1776 if (pdppa == POOL_PADDR_INVALID) {
1777 if (flags & PMAP_CANFAIL) { 1777 if (flags & PMAP_CANFAIL) {
1778 error = ENOMEM; 1778 error = ENOMEM;
1779 goto fail0; 1779 goto fail0;
1780 } 1780 }
1781 pm_unlock(pm); 1781 pm_unlock(pm);
1782 panic("%s: cannot allocate L3 table", __func__); 1782 panic("%s: cannot allocate L3 table", __func__);
1783 } 1783 }
1784 atomic_swap_64(&l2[idx], pdppa | L2_TABLE); 1784 atomic_swap_64(&l2[idx], pdppa | L2_TABLE);
1785 _pmap_pdp_addref(pm, pdppa0, pdppg0); /* L2 occupancy++ */ 1785 _pmap_pdp_addref(pm, pdppa0, pdppg0); /* L2 occupancy++ */
1786 _pmap_pdp_setparent(pm, pdppg, &l2[idx]); 1786 _pmap_pdp_setparent(pm, pdppg, &l2[idx]);
1787 l3only = false; 1787 l3only = false;
1788 } else { 1788 } else {
1789 pdppa = l2pde_pa(pde); 1789 pdppa = l2pde_pa(pde);
1790 pdppg = NULL; 1790 pdppg = NULL;
1791 } 1791 }
1792 l3 = (void *)AARCH64_PA_TO_KVA(pdppa); 1792 l3 = (void *)AARCH64_PA_TO_KVA(pdppa);
1793 1793
1794 idx = l3pte_index(va); 1794 idx = l3pte_index(va);
1795 ptep = &l3[idx]; /* as PTE */ 1795 ptep = &l3[idx]; /* as PTE */
1796 1796
1797 opte = atomic_swap_64(ptep, 0); 1797 opte = atomic_swap_64(ptep, 0);
1798 need_sync_icache = (prot & VM_PROT_EXECUTE); 1798 need_sync_icache = (prot & VM_PROT_EXECUTE);
1799 1799
1800 /* for lock ordering for old page and new page */ 1800 /* for lock ordering for old page and new page */
1801 pps[0] = pp; 1801 pps[0] = pp;
1802 pps[1] = NULL; 1802 pps[1] = NULL;
1803 1803
1804 /* remap? */ 1804 /* remap? */
1805 if (l3pte_valid(opte)) { 1805 if (l3pte_valid(opte)) {
1806 bool need_remove_pv; 1806 bool need_remove_pv;
1807 1807
1808 KASSERT(!kenter); /* pmap_kenter_pa() cannot override */ 1808 KASSERT(!kenter); /* pmap_kenter_pa() cannot override */
1809 if (opte & LX_BLKPAG_OS_WIRED) { 1809 if (opte & LX_BLKPAG_OS_WIRED) {
1810 PMSTAT_DEC_WIRED_COUNT(pm); 1810 PMSTAT_DEC_WIRED_COUNT(pm);
1811 } 1811 }
1812 PMSTAT_DEC_RESIDENT_COUNT(pm); 1812 PMSTAT_DEC_RESIDENT_COUNT(pm);
1813#ifdef PMAPCOUNTERS 1813#ifdef PMAPCOUNTERS
1814 PMAP_COUNT(remappings); 1814 PMAP_COUNT(remappings);
1815 if (user) { 1815 if (user) {
1816 PMAP_COUNT(user_mappings_changed); 1816 PMAP_COUNT(user_mappings_changed);
1817 } else { 1817 } else {
1818 PMAP_COUNT(kern_mappings_changed); 1818 PMAP_COUNT(kern_mappings_changed);
1819 } 1819 }
1820#endif 1820#endif
1821 UVMHIST_LOG(pmaphist, 1821 UVMHIST_LOG(pmaphist,
1822 "va=%016lx has already mapped." 1822 "va=%016lx has already mapped."
1823 " old-pa=%016lx new-pa=%016lx, old-pte=%016llx\n", 1823 " old-pa=%016lx new-pa=%016lx, old-pte=%016llx\n",
1824 va, l3pte_pa(opte), pa, opte); 1824 va, l3pte_pa(opte), pa, opte);
1825 1825
1826 if (pa == l3pte_pa(opte)) { 1826 if (pa == l3pte_pa(opte)) {
1827 /* old and new pte have same pa, no need to update pv */ 1827 /* old and new pte have same pa, no need to update pv */
1828 need_remove_pv = (pp == NULL); 1828 need_remove_pv = (pp == NULL);
1829 need_enter_pv = false; 1829 need_enter_pv = false;
1830 if (need_sync_icache && l3pte_executable(opte, user)) 1830 if (need_sync_icache && l3pte_executable(opte, user))
1831 need_sync_icache = false; 1831 need_sync_icache = false;
1832 } else { 1832 } else {
1833 need_remove_pv = true; 1833 need_remove_pv = true;
1834 } 1834 }
1835 1835
1836 if (need_remove_pv && 1836 if (need_remove_pv &&
1837 ((opp = phys_to_pp(l3pte_pa(opte))) != NULL)) { 1837 ((opp = phys_to_pp(l3pte_pa(opte))) != NULL)) {
1838 /* 1838 /*
1839 * need to lock both pp and opp(old pp) 1839 * need to lock both pp and opp(old pp)
1840 * against deadlock, and 'pp' maybe NULL. 1840 * against deadlock, and 'pp' maybe NULL.
1841 */ 1841 */
1842 if (pp < opp) { 1842 if (pp < opp) {
1843 pps[0] = pp; 1843 pps[0] = pp;
1844 pps[1] = opp; 1844 pps[1] = opp;
1845 } else { 1845 } else {
1846 pps[0] = opp; 1846 pps[0] = opp;
1847 pps[1] = pp; 1847 pps[1] = pp;
1848 } 1848 }
1849 if (pps[0] != NULL) 1849 if (pps[0] != NULL)
1850 pmap_pv_lock(pps[0]); 1850 pmap_pv_lock(pps[0]);
1851 if (pps[1] != NULL) 1851 if (pps[1] != NULL)
1852 pmap_pv_lock(pps[1]); 1852 pmap_pv_lock(pps[1]);
1853 opv = _pmap_remove_pv(opp, pm, va, opte); 1853 opv = _pmap_remove_pv(opp, pm, va, opte);
1854 } else { 1854 } else {
1855 if (pp != NULL) 1855 if (pp != NULL)
1856 pmap_pv_lock(pp); 1856 pmap_pv_lock(pp);
1857 } 1857 }
1858 } else { 1858 } else {
1859 if (pp != NULL) 1859 if (pp != NULL)
1860 pmap_pv_lock(pp); 1860 pmap_pv_lock(pp);
1861 } 1861 }
1862 1862
1863 if (!l3pte_valid(opte)) 1863 if (!l3pte_valid(opte))
1864 _pmap_pdp_addref(pm, pdppa, pdppg); /* L3 occupancy++ */ 1864 _pmap_pdp_addref(pm, pdppa, pdppg); /* L3 occupancy++ */
1865 1865
1866 /* 1866 /*
1867 * read permission is treated as an access permission internally. 1867 * read permission is treated as an access permission internally.
1868 * require to add PROT_READ even if only PROT_WRITE or PROT_EXEC 1868 * require to add PROT_READ even if only PROT_WRITE or PROT_EXEC
1869 */ 1869 */
1870 if (prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) 1870 if (prot & (VM_PROT_WRITE|VM_PROT_EXECUTE))
1871 prot |= VM_PROT_READ; 1871 prot |= VM_PROT_READ;
1872 if (flags & (VM_PROT_WRITE|VM_PROT_EXECUTE)) 1872 if (flags & (VM_PROT_WRITE|VM_PROT_EXECUTE))
1873 flags |= VM_PROT_READ; 1873 flags |= VM_PROT_READ;
1874 1874
1875 mdattr = VM_PROT_READ | VM_PROT_WRITE; 1875 mdattr = VM_PROT_READ | VM_PROT_WRITE;
1876 if (need_enter_pv) { 1876 if (need_enter_pv) {
1877 error = _pmap_enter_pv(pp, pm, &spv, va, ptep, pa, flags); 1877 error = _pmap_enter_pv(pp, pm, &spv, va, ptep, pa, flags);
1878 if (error != 0) { 1878 if (error != 0) {
1879 /* 1879 /*
1880 * If pmap_enter() fails, 1880 * If pmap_enter() fails,
1881 * it must not leave behind an existing pmap entry. 1881 * it must not leave behind an existing pmap entry.
1882 */ 1882 */
1883 if (lxpde_valid(opte)) { 1883 if (lxpde_valid(opte)) {
1884 bool pdpremoved = _pmap_pdp_delref(pm, 1884 bool pdpremoved = _pmap_pdp_delref(pm,
1885 AARCH64_KVA_TO_PA(trunc_page( 1885 AARCH64_KVA_TO_PA(trunc_page(
1886 (vaddr_t)ptep)), true); 1886 (vaddr_t)ptep)), true);
1887 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, 1887 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid,
1888 va, !pdpremoved); 1888 va, !pdpremoved);
1889 } 1889 }
1890 PMAP_COUNT(pv_entry_cannotalloc); 1890 PMAP_COUNT(pv_entry_cannotalloc);
1891 if (flags & PMAP_CANFAIL) 1891 if (flags & PMAP_CANFAIL)
1892 goto fail1; 1892 goto fail1;
1893 panic("pmap_enter: failed to allocate pv_entry"); 1893 panic("pmap_enter: failed to allocate pv_entry");
1894 } 1894 }
1895 } 1895 }
1896 1896
1897 if (pp != NULL) { 1897 if (pp != NULL) {
1898 /* update referenced/modified flags */ 1898 /* update referenced/modified flags */
1899 pp->pp_pv.pv_va |= (flags & (VM_PROT_READ | VM_PROT_WRITE)); 1899 pp->pp_pv.pv_va |= (flags & (VM_PROT_READ | VM_PROT_WRITE));
1900 mdattr &= (uint32_t)pp->pp_pv.pv_va; 1900 mdattr &= (uint32_t)pp->pp_pv.pv_va;
1901 } 1901 }
1902 1902
1903#ifdef PMAPCOUNTERS 1903#ifdef PMAPCOUNTERS
1904 switch (flags & PMAP_CACHE_MASK) { 1904 switch (flags & PMAP_CACHE_MASK) {
1905 case PMAP_NOCACHE: 1905 case PMAP_NOCACHE:
1906 case PMAP_NOCACHE_OVR: 1906 case PMAP_NOCACHE_OVR:
1907 PMAP_COUNT(uncached_mappings); 1907 PMAP_COUNT(uncached_mappings);
1908 break; 1908 break;
1909 } 1909 }
1910#endif 1910#endif
1911 1911
1912 attr = _pmap_pte_adjust_prot(L3_PAGE, prot, mdattr, user); 1912 attr = _pmap_pte_adjust_prot(L3_PAGE, prot, mdattr, user);
1913 attr = _pmap_pte_adjust_cacheflags(attr, flags); 1913 attr = _pmap_pte_adjust_cacheflags(attr, flags);
1914 if (VM_MAXUSER_ADDRESS > va) 1914 if (VM_MAXUSER_ADDRESS > va)
1915 attr |= LX_BLKPAG_APUSER; 1915 attr |= LX_BLKPAG_APUSER;
1916 if (flags & PMAP_WIRED) 1916 if (flags & PMAP_WIRED)
1917 attr |= LX_BLKPAG_OS_WIRED; 1917 attr |= LX_BLKPAG_OS_WIRED;
1918#ifdef MULTIPROCESSOR 1918#ifdef MULTIPROCESSOR
1919 attr |= LX_BLKPAG_SH_IS; 1919 attr |= LX_BLKPAG_SH_IS;
1920#endif 1920#endif
1921 1921
1922 pte = pa | attr; 1922 pte = pa | attr;
1923 1923
1924 if (need_sync_icache) { 1924 if (need_sync_icache) {
1925 /* non-exec -> exec */ 1925 /* non-exec -> exec */
1926 UVMHIST_LOG(pmaphist, 1926 UVMHIST_LOG(pmaphist,
1927 "icache_sync: pm=%p, va=%016lx, pte: %016lx -> %016lx", 1927 "icache_sync: pm=%p, va=%016lx, pte: %016lx -> %016lx",
1928 pm, va, opte, pte); 1928 pm, va, opte, pte);
1929 if (!l3pte_readable(pte)) { 1929 if (!l3pte_readable(pte)) {
1930 PTE_ICACHE_SYNC_PAGE(pte, ptep, pm, va, l3only); 1930 PTE_ICACHE_SYNC_PAGE(pte, ptep, pm, va, l3only);
1931 atomic_swap_64(ptep, pte); 1931 atomic_swap_64(ptep, pte);
1932 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va ,true); 1932 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va ,true);
1933 } else { 1933 } else {
1934 atomic_swap_64(ptep, pte); 1934 atomic_swap_64(ptep, pte);
1935 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, l3only); 1935 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, l3only);
1936 cpu_icache_sync_range(va, PAGE_SIZE); 1936 cpu_icache_sync_range(va, PAGE_SIZE);
1937 } 1937 }
1938 } else { 1938 } else {
1939 atomic_swap_64(ptep, pte); 1939 atomic_swap_64(ptep, pte);
1940 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, l3only); 1940 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, l3only);
1941 } 1941 }
1942 1942
1943 if (pte & LX_BLKPAG_OS_WIRED) { 1943 if (pte & LX_BLKPAG_OS_WIRED) {
1944 PMSTAT_INC_WIRED_COUNT(pm); 1944 PMSTAT_INC_WIRED_COUNT(pm);
1945 } 1945 }
1946 PMSTAT_INC_RESIDENT_COUNT(pm); 1946 PMSTAT_INC_RESIDENT_COUNT(pm);
1947 1947
1948 fail1: 1948 fail1:
1949 if (pps[1] != NULL) 1949 if (pps[1] != NULL)
1950 pmap_pv_unlock(pps[1]); 1950 pmap_pv_unlock(pps[1]);
1951 if (pps[0] != NULL) 1951 if (pps[0] != NULL)
1952 pmap_pv_unlock(pps[0]); 1952 pmap_pv_unlock(pps[0]);
1953 fail0: 1953 fail0:
1954 pm_unlock(pm); 1954 pm_unlock(pm);
1955 1955
1956 /* spare pv was not used. discard */ 1956 /* spare pv was not used. discard */
1957 if (spv != NULL) 1957 if (spv != NULL)
1958 pool_cache_put(&_pmap_pv_pool, spv); 1958 pool_cache_put(&_pmap_pv_pool, spv);
1959 1959
1960 if (opv != NULL) 1960 if (opv != NULL)
1961 pool_cache_put(&_pmap_pv_pool, opv); 1961 pool_cache_put(&_pmap_pv_pool, opv);