Mon Sep 17 00:15:55 2018 UTC ()
delete debug printf and KASSERT.


(ryo)
diff -r1.23 -r1.24 src/sys/arch/aarch64/aarch64/pmap.c

cvs diff -r1.23 -r1.24 src/sys/arch/aarch64/aarch64/pmap.c (switch to unified diff)

--- src/sys/arch/aarch64/aarch64/pmap.c 2018/09/10 16:43:24 1.23
+++ src/sys/arch/aarch64/aarch64/pmap.c 2018/09/17 00:15:55 1.24
@@ -1,2273 +1,2259 @@ @@ -1,2273 +1,2259 @@
1/* $NetBSD: pmap.c,v 1.23 2018/09/10 16:43:24 maxv Exp $ */ 1/* $NetBSD: pmap.c,v 1.24 2018/09/17 00:15:55 ryo Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org> 4 * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
15 * 15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE. 26 * POSSIBILITY OF SUCH DAMAGE.
27 */ 27 */
28 28
29#include <sys/cdefs.h> 29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.23 2018/09/10 16:43:24 maxv Exp $"); 30__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.24 2018/09/17 00:15:55 ryo Exp $");
31 31
32#include "opt_arm_debug.h" 32#include "opt_arm_debug.h"
33#include "opt_ddb.h" 33#include "opt_ddb.h"
34#include "opt_uvmhist.h" 34#include "opt_uvmhist.h"
35#include "opt_pmap.h" 35#include "opt_pmap.h"
36 36
37#include <sys/param.h> 37#include <sys/param.h>
38#include <sys/types.h> 38#include <sys/types.h>
39#include <sys/kmem.h> 39#include <sys/kmem.h>
40#include <sys/vmem.h> 40#include <sys/vmem.h>
41#include <sys/atomic.h> 41#include <sys/atomic.h>
42 42
43#include <uvm/uvm.h> 43#include <uvm/uvm.h>
44 44
45#include <aarch64/pmap.h> 45#include <aarch64/pmap.h>
46#include <aarch64/pte.h> 46#include <aarch64/pte.h>
47#include <aarch64/armreg.h> 47#include <aarch64/armreg.h>
48#include <aarch64/cpufunc.h> 48#include <aarch64/cpufunc.h>
49#include <aarch64/machdep.h> 49#include <aarch64/machdep.h>
50 50
51//#define PMAP_DEBUG 51//#define PMAP_DEBUG
52//#define PMAP_PV_DEBUG 52//#define PMAP_PV_DEBUG
53 53
54#ifdef VERBOSE_INIT_ARM 54#ifdef VERBOSE_INIT_ARM
55#define VPRINTF(...) printf(__VA_ARGS__) 55#define VPRINTF(...) printf(__VA_ARGS__)
56#else 56#else
57#define VPRINTF(...) do { } while (/* CONSTCOND */ 0) 57#define VPRINTF(...) do { } while (/* CONSTCOND */ 0)
58#endif 58#endif
59 59
60UVMHIST_DEFINE(pmaphist); 60UVMHIST_DEFINE(pmaphist);
61#ifdef UVMHIST 61#ifdef UVMHIST
62 62
63#ifndef UVMHIST_PMAPHIST_SIZE 63#ifndef UVMHIST_PMAPHIST_SIZE
64#define UVMHIST_PMAPHIST_SIZE (1024 * 4) 64#define UVMHIST_PMAPHIST_SIZE (1024 * 4)
65#endif 65#endif
66 66
67struct kern_history_ent pmaphistbuf[UVMHIST_PMAPHIST_SIZE]; 67struct kern_history_ent pmaphistbuf[UVMHIST_PMAPHIST_SIZE];
68 68
69static void 69static void
70pmap_hist_init(void) 70pmap_hist_init(void)
71{ 71{
72 static bool inited = false; 72 static bool inited = false;
73 if (inited == false) { 73 if (inited == false) {
74 UVMHIST_INIT_STATIC(pmaphist, pmaphistbuf); 74 UVMHIST_INIT_STATIC(pmaphist, pmaphistbuf);
75 inited = true; 75 inited = true;
76 } 76 }
77} 77}
78#define PMAP_HIST_INIT() pmap_hist_init() 78#define PMAP_HIST_INIT() pmap_hist_init()
79 79
80#else /* UVMHIST */ 80#else /* UVMHIST */
81 81
82#define PMAP_HIST_INIT() ((void)0) 82#define PMAP_HIST_INIT() ((void)0)
83 83
84#endif /* UVMHIST */ 84#endif /* UVMHIST */
85 85
86 86
87#ifdef PMAPCOUNTERS 87#ifdef PMAPCOUNTERS
88#define PMAP_COUNT(name) (pmap_evcnt_##name.ev_count++ + 0) 88#define PMAP_COUNT(name) (pmap_evcnt_##name.ev_count++ + 0)
89#define PMAP_COUNTER(name, desc) \ 89#define PMAP_COUNTER(name, desc) \
90 struct evcnt pmap_evcnt_##name = \ 90 struct evcnt pmap_evcnt_##name = \
91 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", desc); \ 91 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", desc); \
92 EVCNT_ATTACH_STATIC(pmap_evcnt_##name) 92 EVCNT_ATTACH_STATIC(pmap_evcnt_##name)
93 93
94PMAP_COUNTER(pdp_alloc_boot, "page table page allocate (uvm_pageboot_alloc)"); 94PMAP_COUNTER(pdp_alloc_boot, "page table page allocate (uvm_pageboot_alloc)");
95PMAP_COUNTER(pdp_alloc, "page table page allocate (uvm_pagealloc)"); 95PMAP_COUNTER(pdp_alloc, "page table page allocate (uvm_pagealloc)");
96PMAP_COUNTER(pdp_free, "page table page free (uvm_pagefree)"); 96PMAP_COUNTER(pdp_free, "page table page free (uvm_pagefree)");
97 97
98PMAP_COUNTER(pv_enter, "pv_entry allocate and link"); 98PMAP_COUNTER(pv_enter, "pv_entry allocate and link");
99PMAP_COUNTER(pv_remove, "pv_entry free and unlink"); 99PMAP_COUNTER(pv_remove, "pv_entry free and unlink");
100PMAP_COUNTER(pv_remove_nopv, "no pv_entry found when removing pv"); 100PMAP_COUNTER(pv_remove_nopv, "no pv_entry found when removing pv");
101 101
102PMAP_COUNTER(activate, "pmap_activate call"); 102PMAP_COUNTER(activate, "pmap_activate call");
103PMAP_COUNTER(deactivate, "pmap_deactivate call"); 103PMAP_COUNTER(deactivate, "pmap_deactivate call");
104PMAP_COUNTER(create, "pmap_create call"); 104PMAP_COUNTER(create, "pmap_create call");
105PMAP_COUNTER(destroy, "pmap_destroy call"); 105PMAP_COUNTER(destroy, "pmap_destroy call");
106 106
107PMAP_COUNTER(page_protect, "pmap_page_protect call"); 107PMAP_COUNTER(page_protect, "pmap_page_protect call");
108PMAP_COUNTER(protect, "pmap_protect call"); 108PMAP_COUNTER(protect, "pmap_protect call");
109PMAP_COUNTER(protect_remove_fallback, "pmap_protect with no-read"); 109PMAP_COUNTER(protect_remove_fallback, "pmap_protect with no-read");
110PMAP_COUNTER(protect_none, "pmap_protect non-exists pages"); 110PMAP_COUNTER(protect_none, "pmap_protect non-exists pages");
111PMAP_COUNTER(protect_managed, "pmap_protect managed pages"); 111PMAP_COUNTER(protect_managed, "pmap_protect managed pages");
112PMAP_COUNTER(protect_unmanaged, "pmap_protect unmanaged pages"); 112PMAP_COUNTER(protect_unmanaged, "pmap_protect unmanaged pages");
113 113
114PMAP_COUNTER(clear_modify, "pmap_clear_modify call"); 114PMAP_COUNTER(clear_modify, "pmap_clear_modify call");
115PMAP_COUNTER(clear_modify_pages, "pmap_clear_modify pages"); 115PMAP_COUNTER(clear_modify_pages, "pmap_clear_modify pages");
116PMAP_COUNTER(clear_reference, "pmap_clear_reference call"); 116PMAP_COUNTER(clear_reference, "pmap_clear_reference call");
117PMAP_COUNTER(clear_reference_pages, "pmap_clear_reference pages"); 117PMAP_COUNTER(clear_reference_pages, "pmap_clear_reference pages");
118 118
119PMAP_COUNTER(fixup_referenced, "page reference emulations"); 119PMAP_COUNTER(fixup_referenced, "page reference emulations");
120PMAP_COUNTER(fixup_modified, "page modification emulations"); 120PMAP_COUNTER(fixup_modified, "page modification emulations");
121 121
122PMAP_COUNTER(kern_mappings_bad, "kernel pages mapped (bad color)"); 122PMAP_COUNTER(kern_mappings_bad, "kernel pages mapped (bad color)");
123PMAP_COUNTER(kern_mappings_bad_wired, "kernel pages mapped (wired bad color)"); 123PMAP_COUNTER(kern_mappings_bad_wired, "kernel pages mapped (wired bad color)");
124PMAP_COUNTER(user_mappings_bad, "user pages mapped (bad color, not wired)"); 124PMAP_COUNTER(user_mappings_bad, "user pages mapped (bad color, not wired)");
125PMAP_COUNTER(user_mappings_bad_wired, "user pages mapped (bad colo, wiredr)"); 125PMAP_COUNTER(user_mappings_bad_wired, "user pages mapped (bad colo, wiredr)");
126PMAP_COUNTER(kern_mappings, "kernel pages mapped"); 126PMAP_COUNTER(kern_mappings, "kernel pages mapped");
127PMAP_COUNTER(user_mappings, "user pages mapped"); 127PMAP_COUNTER(user_mappings, "user pages mapped");
128PMAP_COUNTER(user_mappings_changed, "user mapping changed"); 128PMAP_COUNTER(user_mappings_changed, "user mapping changed");
129PMAP_COUNTER(kern_mappings_changed, "kernel mapping changed"); 129PMAP_COUNTER(kern_mappings_changed, "kernel mapping changed");
130PMAP_COUNTER(uncached_mappings, "uncached pages mapped"); 130PMAP_COUNTER(uncached_mappings, "uncached pages mapped");
131PMAP_COUNTER(unmanaged_mappings, "unmanaged pages mapped"); 131PMAP_COUNTER(unmanaged_mappings, "unmanaged pages mapped");
132PMAP_COUNTER(managed_mappings, "managed pages mapped"); 132PMAP_COUNTER(managed_mappings, "managed pages mapped");
133PMAP_COUNTER(mappings, "pages mapped (including remapped)"); 133PMAP_COUNTER(mappings, "pages mapped (including remapped)");
134PMAP_COUNTER(remappings, "pages remapped"); 134PMAP_COUNTER(remappings, "pages remapped");
135 135
136PMAP_COUNTER(pv_entry_cannotalloc, "pv_entry allocation failure"); 136PMAP_COUNTER(pv_entry_cannotalloc, "pv_entry allocation failure");
137 137
138PMAP_COUNTER(unwire, "pmap_unwire call"); 138PMAP_COUNTER(unwire, "pmap_unwire call");
139PMAP_COUNTER(unwire_failure, "pmap_unwire failure"); 139PMAP_COUNTER(unwire_failure, "pmap_unwire failure");
140 140
141#else /* PMAPCOUNTERS */ 141#else /* PMAPCOUNTERS */
142#define PMAP_COUNT(name) __nothing 142#define PMAP_COUNT(name) __nothing
143#endif /* PMAPCOUNTERS */ 143#endif /* PMAPCOUNTERS */
144 144
145/* saved permission bit for referenced/modified emulation */ 145/* saved permission bit for referenced/modified emulation */
146#define LX_BLKPAG_OS_READ LX_BLKPAG_OS_0 146#define LX_BLKPAG_OS_READ LX_BLKPAG_OS_0
147#define LX_BLKPAG_OS_WRITE LX_BLKPAG_OS_1 147#define LX_BLKPAG_OS_WRITE LX_BLKPAG_OS_1
148#define LX_BLKPAG_OS_WIRED LX_BLKPAG_OS_2 148#define LX_BLKPAG_OS_WIRED LX_BLKPAG_OS_2
149#define LX_BLKPAG_OS_RWMASK (LX_BLKPAG_OS_WRITE|LX_BLKPAG_OS_READ) 149#define LX_BLKPAG_OS_RWMASK (LX_BLKPAG_OS_WRITE|LX_BLKPAG_OS_READ)
150 150
151/* memory attributes are configured MAIR_EL1 in locore */ 151/* memory attributes are configured MAIR_EL1 in locore */
152#define LX_BLKPAG_ATTR_NORMAL_WB __SHIFTIN(0, LX_BLKPAG_ATTR_INDX) 152#define LX_BLKPAG_ATTR_NORMAL_WB __SHIFTIN(0, LX_BLKPAG_ATTR_INDX)
153#define LX_BLKPAG_ATTR_NORMAL_NC __SHIFTIN(1, LX_BLKPAG_ATTR_INDX) 153#define LX_BLKPAG_ATTR_NORMAL_NC __SHIFTIN(1, LX_BLKPAG_ATTR_INDX)
154#define LX_BLKPAG_ATTR_NORMAL_WT __SHIFTIN(2, LX_BLKPAG_ATTR_INDX) 154#define LX_BLKPAG_ATTR_NORMAL_WT __SHIFTIN(2, LX_BLKPAG_ATTR_INDX)
155#define LX_BLKPAG_ATTR_DEVICE_MEM __SHIFTIN(3, LX_BLKPAG_ATTR_INDX) 155#define LX_BLKPAG_ATTR_DEVICE_MEM __SHIFTIN(3, LX_BLKPAG_ATTR_INDX)
156#define LX_BLKPAG_ATTR_MASK LX_BLKPAG_ATTR_INDX 156#define LX_BLKPAG_ATTR_MASK LX_BLKPAG_ATTR_INDX
157 157
158/* 158/*
159 * invalidate TLB entry for ASID and VA. 159 * invalidate TLB entry for ASID and VA.
160 * `ll' invalidates only the Last Level (usually L3) of TLB entry 160 * `ll' invalidates only the Last Level (usually L3) of TLB entry
161 */ 161 */
162#define AARCH64_TLBI_BY_ASID_VA(asid, va, ll) \ 162#define AARCH64_TLBI_BY_ASID_VA(asid, va, ll) \
163 do { \ 163 do { \
164 if ((ll)) { \ 164 if ((ll)) { \
165 if ((asid) == 0) \ 165 if ((asid) == 0) \
166 aarch64_tlbi_by_va_ll((va)); \ 166 aarch64_tlbi_by_va_ll((va)); \
167 else \ 167 else \
168 aarch64_tlbi_by_asid_va_ll((asid), (va)); \ 168 aarch64_tlbi_by_asid_va_ll((asid), (va)); \
169 } else { \ 169 } else { \
170 if ((asid) == 0) \ 170 if ((asid) == 0) \
171 aarch64_tlbi_by_va((va)); \ 171 aarch64_tlbi_by_va((va)); \
172 else \ 172 else \
173 aarch64_tlbi_by_asid_va((asid), (va)); \ 173 aarch64_tlbi_by_asid_va((asid), (va)); \
174 } \ 174 } \
175 } while (0/*CONSTCOND*/) 175 } while (0/*CONSTCOND*/)
176 176
177/* 177/*
178 * aarch64 require write permission in pte to invalidate instruction cache. 178 * aarch64 require write permission in pte to invalidate instruction cache.
179 * changing pte to writable temporarly before cpu_icache_sync_range(). 179 * changing pte to writable temporarly before cpu_icache_sync_range().
180 * this macro modifies PTE (*ptep). need to update PTE after this. 180 * this macro modifies PTE (*ptep). need to update PTE after this.
181 */ 181 */
182#define PTE_ICACHE_SYNC_PAGE(pte, ptep, pm, va, ll) \ 182#define PTE_ICACHE_SYNC_PAGE(pte, ptep, pm, va, ll) \
183 do { \ 183 do { \
184 pt_entry_t tpte; \ 184 pt_entry_t tpte; \
185 tpte = (pte) & ~(LX_BLKPAG_AF|LX_BLKPAG_AP); \ 185 tpte = (pte) & ~(LX_BLKPAG_AF|LX_BLKPAG_AP); \
186 tpte |= (LX_BLKPAG_AF|LX_BLKPAG_AP_RW); \ 186 tpte |= (LX_BLKPAG_AF|LX_BLKPAG_AP_RW); \
187 tpte |= (LX_BLKPAG_UXN|LX_BLKPAG_PXN); \ 187 tpte |= (LX_BLKPAG_UXN|LX_BLKPAG_PXN); \
188 atomic_swap_64((ptep), tpte); \ 188 atomic_swap_64((ptep), tpte); \
189 AARCH64_TLBI_BY_ASID_VA((pm)->pm_asid, (va), (ll)); \ 189 AARCH64_TLBI_BY_ASID_VA((pm)->pm_asid, (va), (ll)); \
190 cpu_icache_sync_range((va), PAGE_SIZE); \ 190 cpu_icache_sync_range((va), PAGE_SIZE); \
191 } while (0/*CONSTCOND*/) 191 } while (0/*CONSTCOND*/)
192 192
193struct pv_entry { 193struct pv_entry {
194 TAILQ_ENTRY(pv_entry) pv_link; 194 TAILQ_ENTRY(pv_entry) pv_link;
195 struct pmap *pv_pmap; 195 struct pmap *pv_pmap;
196 vaddr_t pv_va; 196 vaddr_t pv_va;
197 paddr_t pv_pa; /* debug */ 197 paddr_t pv_pa; /* debug */
198 pt_entry_t *pv_ptep; /* for fast pte lookup */ 198 pt_entry_t *pv_ptep; /* for fast pte lookup */
199}; 199};
200 200
201static pt_entry_t *_pmap_pte_lookup(struct pmap *, vaddr_t); 201static pt_entry_t *_pmap_pte_lookup(struct pmap *, vaddr_t);
202static pt_entry_t _pmap_pte_adjust_prot(pt_entry_t, vm_prot_t, vm_prot_t, bool); 202static pt_entry_t _pmap_pte_adjust_prot(pt_entry_t, vm_prot_t, vm_prot_t, bool);
203static pt_entry_t _pmap_pte_adjust_cacheflags(pt_entry_t, u_int); 203static pt_entry_t _pmap_pte_adjust_cacheflags(pt_entry_t, u_int);
204static void _pmap_remove(struct pmap *, vaddr_t, bool); 204static void _pmap_remove(struct pmap *, vaddr_t, bool);
205static int _pmap_enter(struct pmap *, vaddr_t, paddr_t, vm_prot_t, u_int, bool); 205static int _pmap_enter(struct pmap *, vaddr_t, paddr_t, vm_prot_t, u_int, bool);
206 206
207static struct pmap kernel_pmap; 207static struct pmap kernel_pmap;
208 208
209struct pmap * const kernel_pmap_ptr = &kernel_pmap; 209struct pmap * const kernel_pmap_ptr = &kernel_pmap;
210static vaddr_t pmap_maxkvaddr; 210static vaddr_t pmap_maxkvaddr;
211 211
212vaddr_t virtual_avail, virtual_end; 212vaddr_t virtual_avail, virtual_end;
213vaddr_t virtual_devmap_addr; 213vaddr_t virtual_devmap_addr;
214 214
215static struct pool_cache _pmap_cache; 215static struct pool_cache _pmap_cache;
216static struct pool_cache _pmap_pv_pool; 216static struct pool_cache _pmap_pv_pool;
217 217
218 218
219static inline void 219static inline void
220pmap_pv_lock(struct vm_page_md *md) 220pmap_pv_lock(struct vm_page_md *md)
221{ 221{
222 222
223 mutex_enter(&md->mdpg_pvlock); 223 mutex_enter(&md->mdpg_pvlock);
224} 224}
225 225
226static inline void 226static inline void
227pmap_pv_unlock(struct vm_page_md *md) 227pmap_pv_unlock(struct vm_page_md *md)
228{ 228{
229 229
230 mutex_exit(&md->mdpg_pvlock); 230 mutex_exit(&md->mdpg_pvlock);
231} 231}
232 232
233 233
234static inline void 234static inline void
235pm_lock(struct pmap *pm) 235pm_lock(struct pmap *pm)
236{ 236{
237 mutex_enter(&pm->pm_lock); 237 mutex_enter(&pm->pm_lock);
238} 238}
239 239
240static inline void 240static inline void
241pm_unlock(struct pmap *pm) 241pm_unlock(struct pmap *pm)
242{ 242{
243 mutex_exit(&pm->pm_lock); 243 mutex_exit(&pm->pm_lock);
244} 244}
245 245
246static void __unused 246static void __unused
247pm_addr_check(struct pmap *pm, vaddr_t va, const char *prefix) 247pm_addr_check(struct pmap *pm, vaddr_t va, const char *prefix)
248{ 248{
249 if (pm == pmap_kernel()) { 249 if (pm == pmap_kernel()) {
250 if (VM_MIN_KERNEL_ADDRESS <= va && va < VM_MAX_KERNEL_ADDRESS) { 250 if (VM_MIN_KERNEL_ADDRESS <= va && va < VM_MAX_KERNEL_ADDRESS) {
251 // ok 251 // ok
252 } else { 252 } else {
253 printf("%s: kernel pm %p:" 253 printf("%s: kernel pm %p:"
254 " va=%016lx is not kernel address\n", 254 " va=%016lx is not kernel address\n",
255 prefix, pm, va); 255 prefix, pm, va);
256 panic("pm_addr_check"); 256 panic("pm_addr_check");
257 } 257 }
258 } else { 258 } else {
259 if (VM_MIN_ADDRESS <= va && va <= VM_MAX_ADDRESS) { 259 if (VM_MIN_ADDRESS <= va && va <= VM_MAX_ADDRESS) {
260 // ok 260 // ok
261 } else { 261 } else {
262 printf( 262 printf(
263 "%s: user pm %p: va=%016lx is not kernel address\n", 263 "%s: user pm %p: va=%016lx is not kernel address\n",
264 prefix, pm, va); 264 prefix, pm, va);
265 panic("pm_addr_check"); 265 panic("pm_addr_check");
266 } 266 }
267 } 267 }
268} 268}
269#define PM_ADDR_CHECK(pm, va) pm_addr_check(pm, va, __func__) 269#define PM_ADDR_CHECK(pm, va) pm_addr_check(pm, va, __func__)
270#define IN_KSEG_ADDR(va) \ 270#define IN_KSEG_ADDR(va) \
271 ((AARCH64_KSEG_START <= (va)) && ((va) < AARCH64_KSEG_END)) 271 ((AARCH64_KSEG_START <= (va)) && ((va) < AARCH64_KSEG_END))
272 272
273 273
274static const struct pmap_devmap *pmap_devmap_table; 274static const struct pmap_devmap *pmap_devmap_table;
275 275
276/* XXX: for now, only support for devmap */ 276/* XXX: for now, only support for devmap */
277static vsize_t 277static vsize_t
278_pmap_map_chunk(pd_entry_t *l2, vaddr_t va, paddr_t pa, vsize_t size, 278_pmap_map_chunk(pd_entry_t *l2, vaddr_t va, paddr_t pa, vsize_t size,
279 vm_prot_t prot, u_int flags) 279 vm_prot_t prot, u_int flags)
280{ 280{
281 pd_entry_t oldpte __debugused; 281 pd_entry_t oldpte __debugused;
282 pt_entry_t attr; 282 pt_entry_t attr;
283 vsize_t resid; 283 vsize_t resid;
284 284
285 oldpte = l2[l2pde_index(va)]; 285 oldpte = l2[l2pde_index(va)];
286 KDASSERT(!l2pde_valid(oldpte)); 286 KDASSERT(!l2pde_valid(oldpte));
287 287
288 attr = _pmap_pte_adjust_prot(L2_BLOCK, prot, VM_PROT_ALL, false); 288 attr = _pmap_pte_adjust_prot(L2_BLOCK, prot, VM_PROT_ALL, false);
289 attr = _pmap_pte_adjust_cacheflags(attr, flags | PMAP_DEV); 289 attr = _pmap_pte_adjust_cacheflags(attr, flags | PMAP_DEV);
290#ifdef MULTIPROCESSOR 290#ifdef MULTIPROCESSOR
291 attr |= LX_BLKPAG_SH_IS; 291 attr |= LX_BLKPAG_SH_IS;
292#endif 292#endif
293 /* user cannot execute, and kernel follows the prot */ 293 /* user cannot execute, and kernel follows the prot */
294 attr |= (LX_BLKPAG_UXN|LX_BLKPAG_PXN); 294 attr |= (LX_BLKPAG_UXN|LX_BLKPAG_PXN);
295 if (prot & VM_PROT_EXECUTE) 295 if (prot & VM_PROT_EXECUTE)
296 attr &= ~LX_BLKPAG_PXN; 296 attr &= ~LX_BLKPAG_PXN;
297 297
298 resid = (size + (L2_SIZE - 1)) & ~(L2_SIZE - 1); 298 resid = (size + (L2_SIZE - 1)) & ~(L2_SIZE - 1);
299 size = resid; 299 size = resid;
300 300
301 while (resid > 0) { 301 while (resid > 0) {
302 pt_entry_t pte; 302 pt_entry_t pte;
303 303
304 pte = pa | attr; 304 pte = pa | attr;
305 305
306 if (prot & VM_PROT_EXECUTE) { 306 if (prot & VM_PROT_EXECUTE) {
307 pt_entry_t tpte; 307 pt_entry_t tpte;
308 /* need write permission to invalidate icache */ 308 /* need write permission to invalidate icache */
309 tpte = pte & ~(LX_BLKPAG_AF|LX_BLKPAG_AP); 309 tpte = pte & ~(LX_BLKPAG_AF|LX_BLKPAG_AP);
310 tpte |= (LX_BLKPAG_AF|LX_BLKPAG_AP_RW); 310 tpte |= (LX_BLKPAG_AF|LX_BLKPAG_AP_RW);
311 tpte |= (LX_BLKPAG_UXN|LX_BLKPAG_PXN); 311 tpte |= (LX_BLKPAG_UXN|LX_BLKPAG_PXN);
312 atomic_swap_64(&l2[l2pde_index(va)], tpte); 312 atomic_swap_64(&l2[l2pde_index(va)], tpte);
313 aarch64_tlbi_by_va(va); 313 aarch64_tlbi_by_va(va);
314 cpu_icache_sync_range(va, L2_SIZE); 314 cpu_icache_sync_range(va, L2_SIZE);
315 } 315 }
316 atomic_swap_64(&l2[l2pde_index(va)], pte); 316 atomic_swap_64(&l2[l2pde_index(va)], pte);
317 aarch64_tlbi_by_va(va); 317 aarch64_tlbi_by_va(va);
318 318
319 va += L2_SIZE; 319 va += L2_SIZE;
320 pa += L2_SIZE; 320 pa += L2_SIZE;
321 resid -= L2_SIZE; 321 resid -= L2_SIZE;
322 } 322 }
323 323
324 return size; 324 return size;
325} 325}
326 326
327void 327void
328pmap_devmap_register(const struct pmap_devmap *table) 328pmap_devmap_register(const struct pmap_devmap *table)
329{ 329{
330 pmap_devmap_table = table; 330 pmap_devmap_table = table;
331} 331}
332 332
333void 333void
334pmap_devmap_bootstrap(const struct pmap_devmap *table) 334pmap_devmap_bootstrap(const struct pmap_devmap *table)
335{ 335{
336 pd_entry_t *l0, *l1, *l2; 336 pd_entry_t *l0, *l1, *l2;
337 vaddr_t va; 337 vaddr_t va;
338 int i; 338 int i;
339 339
340 pmap_devmap_register(table); 340 pmap_devmap_register(table);
341 341
342 l0 = (void *)AARCH64_PA_TO_KVA(reg_ttbr1_el1_read()); 342 l0 = (void *)AARCH64_PA_TO_KVA(reg_ttbr1_el1_read());
343 343
344 VPRINTF("%s:\n", __func__); 344 VPRINTF("%s:\n", __func__);
345 for (i = 0; table[i].pd_size != 0; i++) { 345 for (i = 0; table[i].pd_size != 0; i++) {
346 VPRINTF(" devmap: pa %08lx-%08lx = va %016lx\n", 346 VPRINTF(" devmap: pa %08lx-%08lx = va %016lx\n",
347 table[i].pd_pa, 347 table[i].pd_pa,
348 table[i].pd_pa + table[i].pd_size - 1, 348 table[i].pd_pa + table[i].pd_size - 1,
349 table[i].pd_va); 349 table[i].pd_va);
350 va = table[i].pd_va; 350 va = table[i].pd_va;
351 351
352 /* update and check virtual_devmap_addr */ 352 /* update and check virtual_devmap_addr */
353 if ((virtual_devmap_addr == 0) || 353 if ((virtual_devmap_addr == 0) ||
354 (virtual_devmap_addr > va)) { 354 (virtual_devmap_addr > va)) {
355 virtual_devmap_addr = va; 355 virtual_devmap_addr = va;
356 356
357 /* XXX: only one L2 table is allocated for devmap */ 357 /* XXX: only one L2 table is allocated for devmap */
358 if ((VM_MAX_KERNEL_ADDRESS - virtual_devmap_addr) > 358 if ((VM_MAX_KERNEL_ADDRESS - virtual_devmap_addr) >
359 (L2_SIZE * Ln_ENTRIES)) { 359 (L2_SIZE * Ln_ENTRIES)) {
360 panic("devmap va:%016lx out of range." 360 panic("devmap va:%016lx out of range."
361 " available devmap range is %016lx-%016lx", 361 " available devmap range is %016lx-%016lx",
362 va, 362 va,
363 VM_MAX_KERNEL_ADDRESS - 363 VM_MAX_KERNEL_ADDRESS -
364 (L2_SIZE * Ln_ENTRIES), 364 (L2_SIZE * Ln_ENTRIES),
365 VM_MAX_KERNEL_ADDRESS); 365 VM_MAX_KERNEL_ADDRESS);
366 } 366 }
367 } 367 }
368 368
369 l1 = (void *)l0pde_pa(l0[l0pde_index(va)]); 369 l1 = (void *)l0pde_pa(l0[l0pde_index(va)]);
370 KASSERT(l1 != NULL); 370 KASSERT(l1 != NULL);
371 l1 = (void *)AARCH64_PA_TO_KVA((paddr_t)l1); 371 l1 = (void *)AARCH64_PA_TO_KVA((paddr_t)l1);
372 372
373 l2 = (void *)l1pde_pa(l1[l1pde_index(va)]); 373 l2 = (void *)l1pde_pa(l1[l1pde_index(va)]);
374 if (l2 == NULL) 374 if (l2 == NULL)
375 panic("L2 table for devmap is not allocated"); 375 panic("L2 table for devmap is not allocated");
376 376
377 l2 = (void *)AARCH64_PA_TO_KVA((paddr_t)l2); 377 l2 = (void *)AARCH64_PA_TO_KVA((paddr_t)l2);
378 378
379 _pmap_map_chunk(l2, 379 _pmap_map_chunk(l2,
380 table[i].pd_va, 380 table[i].pd_va,
381 table[i].pd_pa, 381 table[i].pd_pa,
382 table[i].pd_size, 382 table[i].pd_size,
383 table[i].pd_prot, 383 table[i].pd_prot,
384 table[i].pd_flags); 384 table[i].pd_flags);
385 } 385 }
386} 386}
387 387
388const struct pmap_devmap * 388const struct pmap_devmap *
389pmap_devmap_find_va(vaddr_t va, vsize_t size) 389pmap_devmap_find_va(vaddr_t va, vsize_t size)
390{ 390{
391 paddr_t endva; 391 paddr_t endva;
392 int i; 392 int i;
393 393
394 if (pmap_devmap_table == NULL) 394 if (pmap_devmap_table == NULL)
395 return NULL; 395 return NULL;
396 396
397 endva = va + size; 397 endva = va + size;
398 for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) { 398 for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
399 if ((va >= pmap_devmap_table[i].pd_va) && 399 if ((va >= pmap_devmap_table[i].pd_va) &&
400 (endva <= pmap_devmap_table[i].pd_va + 400 (endva <= pmap_devmap_table[i].pd_va +
401 pmap_devmap_table[i].pd_size)) 401 pmap_devmap_table[i].pd_size))
402 return &pmap_devmap_table[i]; 402 return &pmap_devmap_table[i];
403 } 403 }
404 return NULL; 404 return NULL;
405} 405}
406 406
407const struct pmap_devmap * 407const struct pmap_devmap *
408pmap_devmap_find_pa(paddr_t pa, psize_t size) 408pmap_devmap_find_pa(paddr_t pa, psize_t size)
409{ 409{
410 paddr_t endpa; 410 paddr_t endpa;
411 int i; 411 int i;
412 412
413 if (pmap_devmap_table == NULL) 413 if (pmap_devmap_table == NULL)
414 return NULL; 414 return NULL;
415 415
416 endpa = pa + size; 416 endpa = pa + size;
417 for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) { 417 for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
418 if (pa >= pmap_devmap_table[i].pd_pa && 418 if (pa >= pmap_devmap_table[i].pd_pa &&
419 (endpa <= pmap_devmap_table[i].pd_pa + 419 (endpa <= pmap_devmap_table[i].pd_pa +
420 pmap_devmap_table[i].pd_size)) 420 pmap_devmap_table[i].pd_size))
421 return (&pmap_devmap_table[i]); 421 return (&pmap_devmap_table[i]);
422 } 422 }
423 return NULL; 423 return NULL;
424} 424}
425 425
426vaddr_t 426vaddr_t
427pmap_devmap_phystov(paddr_t pa) 427pmap_devmap_phystov(paddr_t pa)
428{ 428{
429 const struct pmap_devmap *table; 429 const struct pmap_devmap *table;
430 paddr_t offset; 430 paddr_t offset;
431 431
432 table = pmap_devmap_find_pa(pa, 0); 432 table = pmap_devmap_find_pa(pa, 0);
433 if (table == NULL) 433 if (table == NULL)
434 return 0; 434 return 0;
435 435
436 offset = pa - table->pd_pa; 436 offset = pa - table->pd_pa;
437 return table->pd_va + offset; 437 return table->pd_va + offset;
438} 438}
439 439
440vaddr_t 440vaddr_t
441pmap_devmap_vtophys(paddr_t va) 441pmap_devmap_vtophys(paddr_t va)
442{ 442{
443 const struct pmap_devmap *table; 443 const struct pmap_devmap *table;
444 vaddr_t offset; 444 vaddr_t offset;
445 445
446 table = pmap_devmap_find_va(va, 0); 446 table = pmap_devmap_find_va(va, 0);
447 if (table == NULL) 447 if (table == NULL)
448 return 0; 448 return 0;
449 449
450 offset = va - table->pd_va; 450 offset = va - table->pd_va;
451 return table->pd_pa + offset; 451 return table->pd_pa + offset;
452} 452}
453 453
454void 454void
455pmap_bootstrap(vaddr_t vstart, vaddr_t vend) 455pmap_bootstrap(vaddr_t vstart, vaddr_t vend)
456{ 456{
457 struct pmap *kpm; 457 struct pmap *kpm;
458 pd_entry_t *l0; 458 pd_entry_t *l0;
459 paddr_t l0pa; 459 paddr_t l0pa;
460 460
461 PMAP_HIST_INIT(); /* init once */ 461 PMAP_HIST_INIT(); /* init once */
462 462
463 UVMHIST_FUNC(__func__); 463 UVMHIST_FUNC(__func__);
464 UVMHIST_CALLED(pmaphist); 464 UVMHIST_CALLED(pmaphist);
465 465
466#if 0 466#if 0
467 /* uvmexp.ncolors = icachesize / icacheways / PAGE_SIZE; */ 467 /* uvmexp.ncolors = icachesize / icacheways / PAGE_SIZE; */
468 uvmexp.ncolors = aarch64_cache_vindexsize / PAGE_SIZE; 468 uvmexp.ncolors = aarch64_cache_vindexsize / PAGE_SIZE;
469#endif 469#endif
470 470
471 /* devmap already uses last of va? */ 471 /* devmap already uses last of va? */
472 if ((virtual_devmap_addr != 0) && (virtual_devmap_addr < vend)) 472 if ((virtual_devmap_addr != 0) && (virtual_devmap_addr < vend))
473 vend = virtual_devmap_addr; 473 vend = virtual_devmap_addr;
474 474
475 virtual_avail = vstart; 475 virtual_avail = vstart;
476 virtual_end = vend; 476 virtual_end = vend;
477 pmap_maxkvaddr = vstart; 477 pmap_maxkvaddr = vstart;
478 478
479 aarch64_tlbi_all(); 479 aarch64_tlbi_all();
480 480
481 l0pa = reg_ttbr1_el1_read(); 481 l0pa = reg_ttbr1_el1_read();
482 l0 = (void *)AARCH64_PA_TO_KVA(l0pa); 482 l0 = (void *)AARCH64_PA_TO_KVA(l0pa);
483 483
484 memset(&kernel_pmap, 0, sizeof(kernel_pmap)); 484 memset(&kernel_pmap, 0, sizeof(kernel_pmap));
485 kpm = pmap_kernel(); 485 kpm = pmap_kernel();
486 kpm->pm_asid = 0; 486 kpm->pm_asid = 0;
487 kpm->pm_refcnt = 1; 487 kpm->pm_refcnt = 1;
488 kpm->pm_l0table = l0; 488 kpm->pm_l0table = l0;
489 kpm->pm_l0table_pa = l0pa; 489 kpm->pm_l0table_pa = l0pa;
490 kpm->pm_activated = true; 490 kpm->pm_activated = true;
491 SLIST_INIT(&kpm->pm_vmlist); 491 SLIST_INIT(&kpm->pm_vmlist);
492 mutex_init(&kpm->pm_lock, MUTEX_DEFAULT, IPL_VM); 492 mutex_init(&kpm->pm_lock, MUTEX_DEFAULT, IPL_VM);
493} 493}
494 494
495inline static int 495inline static int
496_pmap_color(vaddr_t addr) /* or paddr_t */ 496_pmap_color(vaddr_t addr) /* or paddr_t */
497{ 497{
498 return (addr >> PGSHIFT) & (uvmexp.ncolors - 1); 498 return (addr >> PGSHIFT) & (uvmexp.ncolors - 1);
499} 499}
500 500
501static int 501static int
502_pmap_pmap_ctor(void *arg, void *v, int flags) 502_pmap_pmap_ctor(void *arg, void *v, int flags)
503{ 503{
504 memset(v, 0, sizeof(struct pmap)); 504 memset(v, 0, sizeof(struct pmap));
505 return 0; 505 return 0;
506} 506}
507 507
508static int 508static int
509_pmap_pv_ctor(void *arg, void *v, int flags) 509_pmap_pv_ctor(void *arg, void *v, int flags)
510{ 510{
511 memset(v, 0, sizeof(struct pv_entry)); 511 memset(v, 0, sizeof(struct pv_entry));
512 return 0; 512 return 0;
513} 513}
514 514
515void 515void
516pmap_init(void) 516pmap_init(void)
517{ 517{
518 struct vm_page *pg; 518 struct vm_page *pg;
519 struct vm_page_md *md; 519 struct vm_page_md *md;
520 uvm_physseg_t i; 520 uvm_physseg_t i;
521 paddr_t pfn; 521 paddr_t pfn;
522 522
523 pool_cache_bootstrap(&_pmap_cache, sizeof(struct pmap), 523 pool_cache_bootstrap(&_pmap_cache, sizeof(struct pmap),
524 0, 0, 0, "pmappl", NULL, IPL_NONE, _pmap_pmap_ctor, NULL, NULL); 524 0, 0, 0, "pmappl", NULL, IPL_NONE, _pmap_pmap_ctor, NULL, NULL);
525 pool_cache_bootstrap(&_pmap_pv_pool, sizeof(struct pv_entry), 525 pool_cache_bootstrap(&_pmap_pv_pool, sizeof(struct pv_entry),
526 0, 0, 0, "pvpl", NULL, IPL_VM, _pmap_pv_ctor, NULL, NULL); 526 0, 0, 0, "pvpl", NULL, IPL_VM, _pmap_pv_ctor, NULL, NULL);
527 527
528 /* 528 /*
529 * initialize vm_page_md:mdpg_pvlock at this time. 529 * initialize vm_page_md:mdpg_pvlock at this time.
530 * When LOCKDEBUG, mutex_init() calls km_alloc, 530 * When LOCKDEBUG, mutex_init() calls km_alloc,
531 * but VM_MDPAGE_INIT() is called before initialized kmem_vm_arena. 531 * but VM_MDPAGE_INIT() is called before initialized kmem_vm_arena.
532 */ 532 */
533 for (i = uvm_physseg_get_first(); 533 for (i = uvm_physseg_get_first();
534 uvm_physseg_valid_p(i); 534 uvm_physseg_valid_p(i);
535 i = uvm_physseg_get_next(i)) { 535 i = uvm_physseg_get_next(i)) {
536 for (pfn = uvm_physseg_get_start(i); 536 for (pfn = uvm_physseg_get_start(i);
537 pfn < uvm_physseg_get_end(i); 537 pfn < uvm_physseg_get_end(i);
538 pfn++) { 538 pfn++) {
539 pg = PHYS_TO_VM_PAGE(ptoa(pfn)); 539 pg = PHYS_TO_VM_PAGE(ptoa(pfn));
540 md = VM_PAGE_TO_MD(pg); 540 md = VM_PAGE_TO_MD(pg);
541 mutex_init(&md->mdpg_pvlock, MUTEX_SPIN, IPL_VM); 541 mutex_init(&md->mdpg_pvlock, MUTEX_SPIN, IPL_VM);
542 } 542 }
543 } 543 }
544} 544}
545 545
546void 546void
547pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp) 547pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
548{ 548{
549 *vstartp = virtual_avail; 549 *vstartp = virtual_avail;
550 *vendp = virtual_end; 550 *vendp = virtual_end;
551} 551}
552 552
553vaddr_t 553vaddr_t
554pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp) 554pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
555{ 555{
556 int npage; 556 int npage;
557 paddr_t pa; 557 paddr_t pa;
558 vaddr_t va; 558 vaddr_t va;
559 psize_t bank_npage; 559 psize_t bank_npage;
560 uvm_physseg_t bank; 560 uvm_physseg_t bank;
561 561
562 UVMHIST_FUNC(__func__); 562 UVMHIST_FUNC(__func__);
563 UVMHIST_CALLED(pmaphist); 563 UVMHIST_CALLED(pmaphist);
564 564
565 UVMHIST_LOG(pmaphist, "size=%llu, *vstartp=%llx, *vendp=%llx", 565 UVMHIST_LOG(pmaphist, "size=%llu, *vstartp=%llx, *vendp=%llx",
566 size, *vstartp, *vendp, 0); 566 size, *vstartp, *vendp, 0);
567 567
568 size = round_page(size); 568 size = round_page(size);
569 npage = atop(size); 569 npage = atop(size);
570 570
571 for (bank = uvm_physseg_get_first(); uvm_physseg_valid_p(bank); 571 for (bank = uvm_physseg_get_first(); uvm_physseg_valid_p(bank);
572 bank = uvm_physseg_get_next(bank)) { 572 bank = uvm_physseg_get_next(bank)) {
573 573
574 bank_npage = uvm_physseg_get_avail_end(bank) - 574 bank_npage = uvm_physseg_get_avail_end(bank) -
575 uvm_physseg_get_avail_start(bank); 575 uvm_physseg_get_avail_start(bank);
576 if (npage <= bank_npage) 576 if (npage <= bank_npage)
577 break; 577 break;
578 } 578 }
579 579
580 if (!uvm_physseg_valid_p(bank)) { 580 if (!uvm_physseg_valid_p(bank)) {
581 panic("%s: no memory", __func__); 581 panic("%s: no memory", __func__);
582 } 582 }
583 583
584 /* Steal pages */ 584 /* Steal pages */
585 pa = ptoa(uvm_physseg_get_avail_start(bank)); 585 pa = ptoa(uvm_physseg_get_avail_start(bank));
586 va = AARCH64_PA_TO_KVA(pa); 586 va = AARCH64_PA_TO_KVA(pa);
587 uvm_physseg_unplug(atop(pa), npage); 587 uvm_physseg_unplug(atop(pa), npage);
588 588
589 for (; npage > 0; npage--, pa += PAGE_SIZE) 589 for (; npage > 0; npage--, pa += PAGE_SIZE)
590 pmap_zero_page(pa); 590 pmap_zero_page(pa);
591 591
592 return va; 592 return va;
593} 593}
594 594
595void 595void
596pmap_reference(struct pmap *pm) 596pmap_reference(struct pmap *pm)
597{ 597{
598 atomic_inc_uint(&pm->pm_refcnt); 598 atomic_inc_uint(&pm->pm_refcnt);
599} 599}
600 600
601pd_entry_t * 601pd_entry_t *
602pmap_alloc_pdp(struct pmap *pm, paddr_t *pap) 602pmap_alloc_pdp(struct pmap *pm, paddr_t *pap)
603{ 603{
604 paddr_t pa; 604 paddr_t pa;
605 605
606 UVMHIST_FUNC(__func__); 606 UVMHIST_FUNC(__func__);
607 UVMHIST_CALLED(pmaphist); 607 UVMHIST_CALLED(pmaphist);
608 608
609 if (uvm.page_init_done) { 609 if (uvm.page_init_done) {
610 struct vm_page *pg; 610 struct vm_page *pg;
611 611
612 pg = uvm_pagealloc(NULL, 0, NULL, 612 pg = uvm_pagealloc(NULL, 0, NULL,
613 UVM_PGA_USERESERVE | UVM_PGA_ZERO); 613 UVM_PGA_USERESERVE | UVM_PGA_ZERO);
614 if (pg == NULL) 614 if (pg == NULL)
615 panic("%s: cannot allocate L3 table", __func__); 615 panic("%s: cannot allocate L3 table", __func__);
616 pa = VM_PAGE_TO_PHYS(pg); 616 pa = VM_PAGE_TO_PHYS(pg);
617 617
618 SLIST_INSERT_HEAD(&pm->pm_vmlist, pg, mdpage.mdpg_vmlist); 618 SLIST_INSERT_HEAD(&pm->pm_vmlist, pg, mdpage.mdpg_vmlist);
619 PMAP_COUNT(pdp_alloc); 619 PMAP_COUNT(pdp_alloc);
620 620
621 } else { 621 } else {
622 /* uvm_pageboot_alloc() returns AARCH64 KSEG address */ 622 /* uvm_pageboot_alloc() returns AARCH64 KSEG address */
623 pa = AARCH64_KVA_TO_PA( 623 pa = AARCH64_KVA_TO_PA(
624 uvm_pageboot_alloc(Ln_TABLE_SIZE)); 624 uvm_pageboot_alloc(Ln_TABLE_SIZE));
625 PMAP_COUNT(pdp_alloc_boot); 625 PMAP_COUNT(pdp_alloc_boot);
626 } 626 }
627 if (pap != NULL) 627 if (pap != NULL)
628 *pap = pa; 628 *pap = pa;
629 629
630 UVMHIST_LOG(pmaphist, "pa=%llx, va=%llx", 630 UVMHIST_LOG(pmaphist, "pa=%llx, va=%llx",
631 pa, AARCH64_PA_TO_KVA(pa), 0, 0); 631 pa, AARCH64_PA_TO_KVA(pa), 0, 0);
632 632
633 return (void *)AARCH64_PA_TO_KVA(pa); 633 return (void *)AARCH64_PA_TO_KVA(pa);
634} 634}
635 635
636static void 636static void
637_pmap_free_pdp_all(struct pmap *pm) 637_pmap_free_pdp_all(struct pmap *pm)
638{ 638{
639 struct vm_page *pg, *tmp; 639 struct vm_page *pg, *tmp;
640 640
641 SLIST_FOREACH_SAFE(pg, &pm->pm_vmlist, mdpage.mdpg_vmlist, tmp) { 641 SLIST_FOREACH_SAFE(pg, &pm->pm_vmlist, mdpage.mdpg_vmlist, tmp) {
642 uvm_pagefree(pg); 642 uvm_pagefree(pg);
643 PMAP_COUNT(pdp_free); 643 PMAP_COUNT(pdp_free);
644 } 644 }
645} 645}
646 646
647vaddr_t 647vaddr_t
648pmap_growkernel(vaddr_t maxkvaddr) 648pmap_growkernel(vaddr_t maxkvaddr)
649{ 649{
650 UVMHIST_FUNC(__func__); 650 UVMHIST_FUNC(__func__);
651 UVMHIST_CALLED(pmaphist); 651 UVMHIST_CALLED(pmaphist);
652 652
653 UVMHIST_LOG(pmaphist, "maxkvaddr=%llx, pmap_maxkvaddr=%llx", 653 UVMHIST_LOG(pmaphist, "maxkvaddr=%llx, pmap_maxkvaddr=%llx",
654 maxkvaddr, pmap_maxkvaddr, 0, 0); 654 maxkvaddr, pmap_maxkvaddr, 0, 0);
655 655
656 pmap_maxkvaddr = maxkvaddr; 656 pmap_maxkvaddr = maxkvaddr;
657 657
658 return maxkvaddr; 658 return maxkvaddr;
659} 659}
660 660
661bool 661bool
662pmap_extract_coherency(struct pmap *pm, vaddr_t va, paddr_t *pap, 662pmap_extract_coherency(struct pmap *pm, vaddr_t va, paddr_t *pap,
663 bool *coherencyp) 663 bool *coherencyp)
664{ 664{
665 if (coherencyp) 665 if (coherencyp)
666 *coherencyp = false; 666 *coherencyp = false;
667 667
668 return pmap_extract(pm, va, pap); 668 return pmap_extract(pm, va, pap);
669} 669}
670 670
671bool 671bool
672pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap) 672pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap)
673{ 673{
674 static pt_entry_t *ptep; 674 static pt_entry_t *ptep;
675 paddr_t pa; 675 paddr_t pa;
676 bool found; 676 bool found;
677 677
678#if 0 678#if 0
679 PM_ADDR_CHECK(pm, va); 679 PM_ADDR_CHECK(pm, va);
680#else 680#else
681 if (((pm == pmap_kernel()) && 681 if (((pm == pmap_kernel()) &&
682 !(VM_MIN_KERNEL_ADDRESS <= va && va < VM_MAX_KERNEL_ADDRESS) && 682 !(VM_MIN_KERNEL_ADDRESS <= va && va < VM_MAX_KERNEL_ADDRESS) &&
683 !(AARCH64_KSEG_START <= va && va < AARCH64_KSEG_END)) || 683 !(AARCH64_KSEG_START <= va && va < AARCH64_KSEG_END)) ||
684 ((pm != pmap_kernel()) && 684 ((pm != pmap_kernel()) &&
685 !(VM_MIN_ADDRESS <= va && va <= VM_MAX_ADDRESS))) 685 !(VM_MIN_ADDRESS <= va && va <= VM_MAX_ADDRESS)))
686 return false; 686 return false;
687#endif 687#endif
688 688
689 extern char __kernel_text[]; 689 extern char __kernel_text[];
690 extern char _end[]; 690 extern char _end[];
691 if ((vaddr_t)__kernel_text <= va && va < (vaddr_t)_end) { 691 if ((vaddr_t)__kernel_text <= va && va < (vaddr_t)_end) {
692 pa = KERN_VTOPHYS(va); 692 pa = KERN_VTOPHYS(va);
693 found = true; 693 found = true;
694 } else if (AARCH64_KSEG_START <= va && va < AARCH64_KSEG_END) { 694 } else if (AARCH64_KSEG_START <= va && va < AARCH64_KSEG_END) {
695 pa = AARCH64_KVA_TO_PA(va); 695 pa = AARCH64_KVA_TO_PA(va);
696 found = true; 696 found = true;
697 } else { 697 } else {
698 pt_entry_t pte; 698 pt_entry_t pte;
699 699
700 ptep = _pmap_pte_lookup(pm, va); 700 ptep = _pmap_pte_lookup(pm, va);
701 if (ptep == NULL) { 701 if (ptep == NULL) {
702 pd_entry_t pde, *l1, *l2, *l3; 702 pd_entry_t pde, *l1, *l2, *l3;
703 703
704 /* 704 /*
705 * traverse L0 -> L1 -> L2 -> L3 table 705 * traverse L0 -> L1 -> L2 -> L3 table
706 * with considering block 706 * with considering block
707 */ 707 */
708 pde = pm->pm_l0table[l0pde_index(va)]; 708 pde = pm->pm_l0table[l0pde_index(va)];
709 if (!l0pde_valid(pde)) { 709 if (!l0pde_valid(pde)) {
710 found = false; 710 found = false;
711 goto done; 711 goto done;
712 } 712 }
713 713
714 l1 = (void *)AARCH64_PA_TO_KVA(l0pde_pa(pde)); 714 l1 = (void *)AARCH64_PA_TO_KVA(l0pde_pa(pde));
715 pde = l1[l1pde_index(va)]; 715 pde = l1[l1pde_index(va)];
716 if (!l1pde_valid(pde)) { 716 if (!l1pde_valid(pde)) {
717 found = false; 717 found = false;
718 goto done; 718 goto done;
719 } 719 }
720 if (l1pde_is_block(pde)) { 720 if (l1pde_is_block(pde)) {
721 pa = l1pde_pa(pde) + (va & L1_OFFSET); 721 pa = l1pde_pa(pde) + (va & L1_OFFSET);
722 found = true; 722 found = true;
723 goto done; 723 goto done;
724 } 724 }
725 725
726 KASSERT(l1pde_is_table(pde)); 726 KASSERT(l1pde_is_table(pde));
727 727
728 l2 = (void *)AARCH64_PA_TO_KVA(l1pde_pa(pde)); 728 l2 = (void *)AARCH64_PA_TO_KVA(l1pde_pa(pde));
729 pde = l2[l2pde_index(va)]; 729 pde = l2[l2pde_index(va)];
730 if (!l2pde_valid(pde)) { 730 if (!l2pde_valid(pde)) {
731 found = false; 731 found = false;
732 goto done; 732 goto done;
733 } 733 }
734 if (l2pde_is_block(pde)) { 734 if (l2pde_is_block(pde)) {
735 pa = l2pde_pa(pde) + (va & L2_OFFSET); 735 pa = l2pde_pa(pde) + (va & L2_OFFSET);
736 found = true; 736 found = true;
737 goto done; 737 goto done;
738 } 738 }
739 739
740 KASSERT(l2pde_is_table(pde)); 740 KASSERT(l2pde_is_table(pde));
741 741
742 l3 = (void *)AARCH64_PA_TO_KVA(l2pde_pa(pde)); 742 l3 = (void *)AARCH64_PA_TO_KVA(l2pde_pa(pde));
743 pte = l3[l3pte_index(va)]; 743 pte = l3[l3pte_index(va)];
744 744
745 } else { 745 } else {
746 pte = *ptep; 746 pte = *ptep;
747 } 747 }
748 if (!l3pte_valid(pte) || !l3pte_is_page(pte)) { 748 if (!l3pte_valid(pte) || !l3pte_is_page(pte)) {
749 found = false; 749 found = false;
750 goto done; 750 goto done;
751 } 751 }
752 752
753 KASSERT(l3pte_is_page(pte)); 753 KASSERT(l3pte_is_page(pte));
754 pa = l3pte_pa(pte) + (va & L3_OFFSET); 754 pa = l3pte_pa(pte) + (va & L3_OFFSET);
755 found = true; 755 found = true;
756 } 756 }
757 done: 757 done:
758 if (found && (pap != NULL)) 758 if (found && (pap != NULL))
759 *pap = pa; 759 *pap = pa;
760 return found; 760 return found;
761} 761}
762 762
763paddr_t 763paddr_t
764vtophys(vaddr_t va) 764vtophys(vaddr_t va)
765{ 765{
766 struct pmap *pm; 766 struct pmap *pm;
767 paddr_t pa; 767 paddr_t pa;
768 768
769 if (VM_MIN_KERNEL_ADDRESS <= va && va < VM_MAX_KERNEL_ADDRESS) { 769 if (VM_MIN_KERNEL_ADDRESS <= va && va < VM_MAX_KERNEL_ADDRESS) {
770 if (pmap_extract(pmap_kernel(), va, &pa) == false) { 770 if (pmap_extract(pmap_kernel(), va, &pa) == false) {
771 return VTOPHYS_FAILED; 771 return VTOPHYS_FAILED;
772 } 772 }
773 } else if (IN_KSEG_ADDR(va)) { 773 } else if (IN_KSEG_ADDR(va)) {
774 pa = AARCH64_KVA_TO_PA(va); 774 pa = AARCH64_KVA_TO_PA(va);
775 } else if (VM_MIN_ADDRESS <= va && va <= VM_MAX_ADDRESS) { 775 } else if (VM_MIN_ADDRESS <= va && va <= VM_MAX_ADDRESS) {
776 if (curlwp->l_proc == NULL) 776 if (curlwp->l_proc == NULL)
777 return VTOPHYS_FAILED; 777 return VTOPHYS_FAILED;
778 pm = curlwp->l_proc->p_vmspace->vm_map.pmap; 778 pm = curlwp->l_proc->p_vmspace->vm_map.pmap;
779 if (pmap_extract(pm, va, &pa) == false) { 779 if (pmap_extract(pm, va, &pa) == false) {
780 return VTOPHYS_FAILED; 780 return VTOPHYS_FAILED;
781 } 781 }
782 } else { 782 } else {
783 return VTOPHYS_FAILED; 783 return VTOPHYS_FAILED;
784 } 784 }
785 return pa; 785 return pa;
786} 786}
787 787
788static pt_entry_t * 788static pt_entry_t *
789_pmap_pte_lookup(struct pmap *pm, vaddr_t va) 789_pmap_pte_lookup(struct pmap *pm, vaddr_t va)
790{ 790{
791 if (AARCH64_KSEG_START <= va && va < AARCH64_KSEG_END) { 791 if (AARCH64_KSEG_START <= va && va < AARCH64_KSEG_END) {
792 panic("page entry is mapped in KSEG"); 792 panic("page entry is mapped in KSEG");
793 } else { 793 } else {
794 pd_entry_t *l0, *l1, *l2, *l3; 794 pd_entry_t *l0, *l1, *l2, *l3;
795 pd_entry_t pde; 795 pd_entry_t pde;
796 pt_entry_t *ptep; 796 pt_entry_t *ptep;
797 unsigned int idx; 797 unsigned int idx;
798 798
799 /* 799 /*
800 * traverse L0 -> L1 -> L2 -> L3 table 800 * traverse L0 -> L1 -> L2 -> L3 table
801 */ 801 */
802 l0 = pm->pm_l0table; 802 l0 = pm->pm_l0table;
803 803
804 idx = l0pde_index(va); 804 idx = l0pde_index(va);
805 pde = l0[idx]; 805 pde = l0[idx];
806 if (!l0pde_valid(pde)) 806 if (!l0pde_valid(pde))
807 return NULL; 807 return NULL;
808 808
809 l1 = (void *)AARCH64_PA_TO_KVA(l0pde_pa(pde)); 809 l1 = (void *)AARCH64_PA_TO_KVA(l0pde_pa(pde));
810 idx = l1pde_index(va); 810 idx = l1pde_index(va);
811 pde = l1[idx]; 811 pde = l1[idx];
812 if (!l1pde_valid(pde)) 812 if (!l1pde_valid(pde))
813 return NULL; 813 return NULL;
814 814
815 if (l1pde_is_block(pde)) 815 if (l1pde_is_block(pde))
816 return NULL; 816 return NULL;
817 817
818 l2 = (void *)AARCH64_PA_TO_KVA(l1pde_pa(pde)); 818 l2 = (void *)AARCH64_PA_TO_KVA(l1pde_pa(pde));
819 idx = l2pde_index(va); 819 idx = l2pde_index(va);
820 pde = l2[idx]; 820 pde = l2[idx];
821 if (!l2pde_valid(pde)) 821 if (!l2pde_valid(pde))
822 return NULL; 822 return NULL;
823 if (l2pde_is_block(pde)) 823 if (l2pde_is_block(pde))
824 return NULL; 824 return NULL;
825 825
826 l3 = (void *)AARCH64_PA_TO_KVA(l2pde_pa(pde)); 826 l3 = (void *)AARCH64_PA_TO_KVA(l2pde_pa(pde));
827 idx = l3pte_index(va); 827 idx = l3pte_index(va);
828 ptep = &l3[idx]; /* as PTE */ 828 ptep = &l3[idx]; /* as PTE */
829 829
830 return ptep; 830 return ptep;
831 } 831 }
832 832
833 return NULL; 833 return NULL;
834} 834}
835 835
836static pt_entry_t 836static pt_entry_t
837_pmap_pte_adjust_prot(pt_entry_t pte, vm_prot_t prot, vm_prot_t protmask, 837_pmap_pte_adjust_prot(pt_entry_t pte, vm_prot_t prot, vm_prot_t protmask,
838 bool user) 838 bool user)
839{ 839{
840 vm_prot_t masked; 840 vm_prot_t masked;
841 pt_entry_t xn; 841 pt_entry_t xn;
842 842
843 masked = prot & protmask; 843 masked = prot & protmask;
844 pte &= ~(LX_BLKPAG_OS_RWMASK|LX_BLKPAG_AF|LX_BLKPAG_AP); 844 pte &= ~(LX_BLKPAG_OS_RWMASK|LX_BLKPAG_AF|LX_BLKPAG_AP);
845 845
846 /* keep prot for ref/mod emulation */ 846 /* keep prot for ref/mod emulation */
847 switch (prot & (VM_PROT_READ|VM_PROT_WRITE)) { 847 switch (prot & (VM_PROT_READ|VM_PROT_WRITE)) {
848 case 0: 848 case 0:
849 default: 849 default:
850 break; 850 break;
851 case VM_PROT_READ: 851 case VM_PROT_READ:
852 pte |= LX_BLKPAG_OS_READ; 852 pte |= LX_BLKPAG_OS_READ;
853 break; 853 break;
854 case VM_PROT_WRITE: 854 case VM_PROT_WRITE:
855 case VM_PROT_READ|VM_PROT_WRITE: 855 case VM_PROT_READ|VM_PROT_WRITE:
856 pte |= (LX_BLKPAG_OS_READ|LX_BLKPAG_OS_WRITE); 856 pte |= (LX_BLKPAG_OS_READ|LX_BLKPAG_OS_WRITE);
857 break; 857 break;
858 } 858 }
859 859
860 switch (masked & (VM_PROT_READ|VM_PROT_WRITE)) { 860 switch (masked & (VM_PROT_READ|VM_PROT_WRITE)) {
861 case 0: 861 case 0:
862 default: 862 default:
863 /* cannot access due to No LX_BLKPAG_AF */ 863 /* cannot access due to No LX_BLKPAG_AF */
864 pte |= LX_BLKPAG_AP_RO; 864 pte |= LX_BLKPAG_AP_RO;
865 break; 865 break;
866 case VM_PROT_READ: 866 case VM_PROT_READ:
867 /* actual permission of pte */ 867 /* actual permission of pte */
868 pte |= LX_BLKPAG_AF; 868 pte |= LX_BLKPAG_AF;
869 pte |= LX_BLKPAG_AP_RO; 869 pte |= LX_BLKPAG_AP_RO;
870 break; 870 break;
871 case VM_PROT_WRITE: 871 case VM_PROT_WRITE:
872 case VM_PROT_READ|VM_PROT_WRITE: 872 case VM_PROT_READ|VM_PROT_WRITE:
873 /* actual permission of pte */ 873 /* actual permission of pte */
874 pte |= LX_BLKPAG_AF; 874 pte |= LX_BLKPAG_AF;
875 pte |= LX_BLKPAG_AP_RW; 875 pte |= LX_BLKPAG_AP_RW;
876 break; 876 break;
877 } 877 }
878 878
879 /* executable for kernel or user? first set never exec both */ 879 /* executable for kernel or user? first set never exec both */
880 pte |= (LX_BLKPAG_UXN|LX_BLKPAG_PXN); 880 pte |= (LX_BLKPAG_UXN|LX_BLKPAG_PXN);
881 /* and either to executable */ 881 /* and either to executable */
882 xn = user ? LX_BLKPAG_UXN : LX_BLKPAG_PXN; 882 xn = user ? LX_BLKPAG_UXN : LX_BLKPAG_PXN;
883 if (prot & VM_PROT_EXECUTE) 883 if (prot & VM_PROT_EXECUTE)
884 pte &= ~xn;  884 pte &= ~xn;
885 885
886 return pte; 886 return pte;
887} 887}
888 888
889static pt_entry_t 889static pt_entry_t
890_pmap_pte_adjust_cacheflags(pt_entry_t pte, u_int flags) 890_pmap_pte_adjust_cacheflags(pt_entry_t pte, u_int flags)
891{ 891{
892 892
893 pte &= ~LX_BLKPAG_ATTR_MASK; 893 pte &= ~LX_BLKPAG_ATTR_MASK;
894 894
895 switch (flags & (PMAP_CACHE_MASK|PMAP_DEV)) { 895 switch (flags & (PMAP_CACHE_MASK|PMAP_DEV)) {
896 case PMAP_DEV ... PMAP_DEV | PMAP_CACHE_MASK: 896 case PMAP_DEV ... PMAP_DEV | PMAP_CACHE_MASK:
897 pte |= LX_BLKPAG_ATTR_DEVICE_MEM; /* nGnRnE */ 897 pte |= LX_BLKPAG_ATTR_DEVICE_MEM; /* nGnRnE */
898 break; 898 break;
899 case PMAP_NOCACHE: 899 case PMAP_NOCACHE:
900 case PMAP_NOCACHE_OVR: 900 case PMAP_NOCACHE_OVR:
901 case PMAP_WRITE_COMBINE: 901 case PMAP_WRITE_COMBINE:
902 pte |= LX_BLKPAG_ATTR_NORMAL_NC; /* only no-cache */ 902 pte |= LX_BLKPAG_ATTR_NORMAL_NC; /* only no-cache */
903 break; 903 break;
904 case PMAP_WRITE_BACK: 904 case PMAP_WRITE_BACK:
905 case 0: 905 case 0:
906 default: 906 default:
907 pte |= LX_BLKPAG_ATTR_NORMAL_WB; 907 pte |= LX_BLKPAG_ATTR_NORMAL_WB;
908 break; 908 break;
909 } 909 }
910 910
911 return pte; 911 return pte;
912} 912}
913 913
914static struct pv_entry * 914static struct pv_entry *
915_pmap_remove_pv(struct vm_page *pg, struct pmap *pm, vaddr_t va, pt_entry_t pte) 915_pmap_remove_pv(struct vm_page *pg, struct pmap *pm, vaddr_t va, pt_entry_t pte)
916{ 916{
917 struct vm_page_md *md; 917 struct vm_page_md *md;
918 struct pv_entry *pv; 918 struct pv_entry *pv;
919 919
920 UVMHIST_FUNC(__func__); 920 UVMHIST_FUNC(__func__);
921 UVMHIST_CALLED(pmaphist); 921 UVMHIST_CALLED(pmaphist);
922 922
923 UVMHIST_LOG(pmaphist, "pg=%p, pm=%p, va=%llx, pte=%llx", 923 UVMHIST_LOG(pmaphist, "pg=%p, pm=%p, va=%llx, pte=%llx",
924 pg, pm, va, pte); 924 pg, pm, va, pte);
925 925
926 md = VM_PAGE_TO_MD(pg); 926 md = VM_PAGE_TO_MD(pg);
927 927
928 pmap_pv_lock(md); 928 pmap_pv_lock(md);
929 929
930 TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) { 930 TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) {
931 if ((pm == pv->pv_pmap) && (va == pv->pv_va)) { 931 if ((pm == pv->pv_pmap) && (va == pv->pv_va)) {
932 TAILQ_REMOVE(&md->mdpg_pvhead, pv, pv_link); 932 TAILQ_REMOVE(&md->mdpg_pvhead, pv, pv_link);
933 PMAP_COUNT(pv_remove); 933 PMAP_COUNT(pv_remove);
934 break; 934 break;
935 } 935 }
936 } 936 }
937#ifdef PMAPCOUNTERS 937#ifdef PMAPCOUNTERS
938 if (pv == NULL) { 938 if (pv == NULL) {
939 PMAP_COUNT(pv_remove_nopv); 939 PMAP_COUNT(pv_remove_nopv);
940 } 940 }
941#endif 941#endif
942 942
943 pmap_pv_unlock(md); 943 pmap_pv_unlock(md);
944 944
945 return pv; 945 return pv;
946} 946}
947 947
948#if defined(PMAP_PV_DEBUG) || defined(DDB) 948#if defined(PMAP_PV_DEBUG) || defined(DDB)
949 949
950static char * 950static char *
951str_vmflags(uint32_t flags) 951str_vmflags(uint32_t flags)
952{ 952{
953 static int idx = 0; 953 static int idx = 0;
954 static char buf[4][32]; /* XXX */ 954 static char buf[4][32]; /* XXX */
955 char *p; 955 char *p;
956 956
957 p = buf[idx]; 957 p = buf[idx];
958 idx = (idx + 1) & 3; 958 idx = (idx + 1) & 3;
959 959
960 p[0] = (flags & VM_PROT_READ) ? 'R' : '-'; 960 p[0] = (flags & VM_PROT_READ) ? 'R' : '-';
961 p[1] = (flags & VM_PROT_WRITE) ? 'W' : '-'; 961 p[1] = (flags & VM_PROT_WRITE) ? 'W' : '-';
962 p[2] = (flags & VM_PROT_EXECUTE) ? 'X' : '-'; 962 p[2] = (flags & VM_PROT_EXECUTE) ? 'X' : '-';
963 if (flags & PMAP_WIRED) 963 if (flags & PMAP_WIRED)
964 memcpy(&p[3], ",WIRED\0", 7); 964 memcpy(&p[3], ",WIRED\0", 7);
965 else 965 else
966 p[3] = '\0'; 966 p[3] = '\0';
967 967
968 return p; 968 return p;
969} 969}
970 970
971static void 971static void
972pg_dump(struct vm_page *pg, void (*pr)(const char *, ...)) 972pg_dump(struct vm_page *pg, void (*pr)(const char *, ...))
973{ 973{
974 pr("pg=%p\n", pg); 974 pr("pg=%p\n", pg);
975 pr(" pg->uanon = %p\n", pg->uanon); 975 pr(" pg->uanon = %p\n", pg->uanon);
976 pr(" pg->uobject = %p\n", pg->uobject); 976 pr(" pg->uobject = %p\n", pg->uobject);
977 pr(" pg->offset = %zu\n", pg->offset); 977 pr(" pg->offset = %zu\n", pg->offset);
978 pr(" pg->flags = %u\n", pg->flags); 978 pr(" pg->flags = %u\n", pg->flags);
979 pr(" pg->loan_count = %u\n", pg->loan_count); 979 pr(" pg->loan_count = %u\n", pg->loan_count);
980 pr(" pg->wire_count = %u\n", pg->wire_count); 980 pr(" pg->wire_count = %u\n", pg->wire_count);
981 pr(" pg->pqflags = %u\n", pg->pqflags); 981 pr(" pg->pqflags = %u\n", pg->pqflags);
982 pr(" pg->phys_addr = %016lx\n", pg->phys_addr); 982 pr(" pg->phys_addr = %016lx\n", pg->phys_addr);
983} 983}
984 984
985static void 985static void
986pv_dump(struct vm_page_md *md, void (*pr)(const char *, ...)) 986pv_dump(struct vm_page_md *md, void (*pr)(const char *, ...))
987{ 987{
988 struct pv_entry *pv; 988 struct pv_entry *pv;
989 int i; 989 int i;
990 990
991 i = 0; 991 i = 0;
992 992
993 pr("md=%p\n", md); 993 pr("md=%p\n", md);
994 pr(" md->mdpg_flags=%08x %s\n", md->mdpg_flags, 994 pr(" md->mdpg_flags=%08x %s\n", md->mdpg_flags,
995 str_vmflags(md->mdpg_flags)); 995 str_vmflags(md->mdpg_flags));
996 996
997 TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) { 997 TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) {
998 pr(" pv[%d] pv=%p\n", 998 pr(" pv[%d] pv=%p\n",
999 i, pv); 999 i, pv);
1000 pr(" pv[%d].pv_pmap =%p (asid=%d)\n", 1000 pr(" pv[%d].pv_pmap =%p (asid=%d)\n",
1001 i, pv->pv_pmap, pv->pv_pmap->pm_asid); 1001 i, pv->pv_pmap, pv->pv_pmap->pm_asid);
1002 pr(" pv[%d].pv_va =%016lx (color=%d)\n", 1002 pr(" pv[%d].pv_va =%016lx (color=%d)\n",
1003 i, pv->pv_va, _pmap_color(pv->pv_va)); 1003 i, pv->pv_va, _pmap_color(pv->pv_va));
1004 pr(" pv[%d].pv_pa =%016lx (color=%d)\n", 1004 pr(" pv[%d].pv_pa =%016lx (color=%d)\n",
1005 i, pv->pv_pa, _pmap_color(pv->pv_pa)); 1005 i, pv->pv_pa, _pmap_color(pv->pv_pa));
1006 i++; 1006 i++;
1007 } 1007 }
1008} 1008}
1009#endif /* PMAP_PV_DEBUG & DDB */ 1009#endif /* PMAP_PV_DEBUG & DDB */
1010 1010
1011static int 1011static int
1012_pmap_enter_pv(struct vm_page *pg, struct pmap *pm, struct pv_entry **pvp, vaddr_t va, 1012_pmap_enter_pv(struct vm_page *pg, struct pmap *pm, struct pv_entry **pvp, vaddr_t va,
1013 pt_entry_t *ptep, paddr_t pa, u_int flags) 1013 pt_entry_t *ptep, paddr_t pa, u_int flags)
1014{ 1014{
1015 struct vm_page_md *md; 1015 struct vm_page_md *md;
1016 struct pv_entry *pv; 1016 struct pv_entry *pv;
1017 1017
1018 UVMHIST_FUNC(__func__); 1018 UVMHIST_FUNC(__func__);
1019 UVMHIST_CALLED(pmaphist); 1019 UVMHIST_CALLED(pmaphist);
1020 1020
1021 UVMHIST_LOG(pmaphist, "pg=%p, pm=%p, va=%llx, pa=%llx", pg, pm, va, pa); 1021 UVMHIST_LOG(pmaphist, "pg=%p, pm=%p, va=%llx, pa=%llx", pg, pm, va, pa);
1022 UVMHIST_LOG(pmaphist, "ptep=%p, flags=%08x", ptep, flags, 0, 0); 1022 UVMHIST_LOG(pmaphist, "ptep=%p, flags=%08x", ptep, flags, 0, 0);
1023 1023
1024 md = VM_PAGE_TO_MD(pg); 1024 md = VM_PAGE_TO_MD(pg);
1025 1025
1026 pmap_pv_lock(md); 1026 pmap_pv_lock(md);
1027 1027
1028 /* pv is already registered? */ 1028 /* pv is already registered? */
1029 TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) { 1029 TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) {
1030 if ((pm == pv->pv_pmap) && (va == pv->pv_va)) { 1030 if ((pm == pv->pv_pmap) && (va == pv->pv_va)) {
1031 break; 1031 break;
1032 } 1032 }
1033 } 1033 }
1034 1034
1035 if (pv == NULL) { 1035 if (pv == NULL) {
1036 pmap_pv_unlock(md); 1036 pmap_pv_unlock(md);
1037 1037
1038 /* 1038 /*
1039 * create and link new pv. 1039 * create and link new pv.
1040 * pv is already allocated at beginning of _pmap_enter(). 1040 * pv is already allocated at beginning of _pmap_enter().
1041 */ 1041 */
1042 pv = *pvp; 1042 pv = *pvp;
1043 if (pv == NULL) 1043 if (pv == NULL)
1044 return ENOMEM; 1044 return ENOMEM;
1045 *pvp = NULL; 1045 *pvp = NULL;
1046 1046
1047 pv->pv_pmap = pm; 1047 pv->pv_pmap = pm;
1048 pv->pv_va = va; 1048 pv->pv_va = va;
1049 pv->pv_pa = pa; 1049 pv->pv_pa = pa;
1050 pv->pv_ptep = ptep; 1050 pv->pv_ptep = ptep;
1051 1051
1052 pmap_pv_lock(md); 1052 pmap_pv_lock(md);
1053 TAILQ_INSERT_HEAD(&md->mdpg_pvhead, pv, pv_link); 1053 TAILQ_INSERT_HEAD(&md->mdpg_pvhead, pv, pv_link);
1054 PMAP_COUNT(pv_enter); 1054 PMAP_COUNT(pv_enter);
1055 1055
1056#ifdef PMAP_PV_DEBUG 1056#ifdef PMAP_PV_DEBUG
1057 if (!TAILQ_EMPTY(&md->mdpg_pvhead)){ 1057 if (!TAILQ_EMPTY(&md->mdpg_pvhead)){
1058 printf("pv %p alias added va=%016lx -> pa=%016lx\n", 1058 printf("pv %p alias added va=%016lx -> pa=%016lx\n",
1059 pv, va, pa); 1059 pv, va, pa);
1060 pv_dump(md, printf); 1060 pv_dump(md, printf);
1061 } 1061 }
1062#endif 1062#endif
1063 } 1063 }
1064 pmap_pv_unlock(md); 1064 pmap_pv_unlock(md);
1065 return 0; 1065 return 0;
1066} 1066}
1067 1067
1068void 1068void
1069pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 1069pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1070{ 1070{
1071 int s; 1071 int s;
1072 1072
1073 s = splvm(); 1073 s = splvm();
1074 _pmap_enter(pmap_kernel(), va, pa, prot, flags | PMAP_WIRED, true); 1074 _pmap_enter(pmap_kernel(), va, pa, prot, flags | PMAP_WIRED, true);
1075 splx(s); 1075 splx(s);
1076} 1076}
1077 1077
1078void 1078void
1079pmap_kremove(vaddr_t va, vsize_t size) 1079pmap_kremove(vaddr_t va, vsize_t size)
1080{ 1080{
1081 struct pmap *kpm = pmap_kernel(); 1081 struct pmap *kpm = pmap_kernel();
1082 vaddr_t eva; 1082 vaddr_t eva;
1083 int s; 1083 int s;
1084 1084
1085 UVMHIST_FUNC(__func__); 1085 UVMHIST_FUNC(__func__);
1086 UVMHIST_CALLED(pmaphist); 1086 UVMHIST_CALLED(pmaphist);
1087 1087
1088 UVMHIST_LOG(pmaphist, "va=%llx, size=%llx", va, size, 0, 0); 1088 UVMHIST_LOG(pmaphist, "va=%llx, size=%llx", va, size, 0, 0);
1089 1089
1090 KDASSERT((va & PGOFSET) == 0); 1090 KDASSERT((va & PGOFSET) == 0);
1091 KDASSERT((size & PGOFSET) == 0); 1091 KDASSERT((size & PGOFSET) == 0);
1092 1092
1093 KASSERT(!IN_KSEG_ADDR(va)); 1093 KASSERT(!IN_KSEG_ADDR(va));
1094 1094
1095 eva = va + size; 1095 eva = va + size;
1096 KDASSERT(VM_MIN_KERNEL_ADDRESS <= va && eva < VM_MAX_KERNEL_ADDRESS); 1096 KDASSERT(VM_MIN_KERNEL_ADDRESS <= va && eva < VM_MAX_KERNEL_ADDRESS);
1097 1097
1098 s = splvm(); 1098 s = splvm();
1099 for (; va < eva; va += PAGE_SIZE) { 1099 for (; va < eva; va += PAGE_SIZE) {
1100 _pmap_remove(kpm, va, true); 1100 _pmap_remove(kpm, va, true);
1101 } 1101 }
1102 splx(s); 1102 splx(s);
1103} 1103}
1104 1104
1105static void 1105static void
1106_pmap_protect_pv(struct vm_page *pg, struct pv_entry *pv, vm_prot_t prot) 1106_pmap_protect_pv(struct vm_page *pg, struct pv_entry *pv, vm_prot_t prot)
1107{ 1107{
1108 pt_entry_t *ptep, pte; 1108 pt_entry_t *ptep, pte;
1109 vm_prot_t pteprot; 1109 vm_prot_t pteprot;
1110 uint32_t mdattr; 1110 uint32_t mdattr;
1111 const bool user = (pv->pv_pmap != pmap_kernel()); 1111 const bool user = (pv->pv_pmap != pmap_kernel());
1112 1112
1113 UVMHIST_FUNC(__func__); 1113 UVMHIST_FUNC(__func__);
1114 UVMHIST_CALLED(pmaphist); 1114 UVMHIST_CALLED(pmaphist);
1115 1115
1116 UVMHIST_LOG(pmaphist, "pg=%p, pv=%p, prot=%08x", pg, pv, prot, 0); 1116 UVMHIST_LOG(pmaphist, "pg=%p, pv=%p, prot=%08x", pg, pv, prot, 0);
1117 1117
1118 /* get prot mask from referenced/modified */ 1118 /* get prot mask from referenced/modified */
1119 mdattr = VM_PAGE_TO_MD(pg)->mdpg_flags & 1119 mdattr = VM_PAGE_TO_MD(pg)->mdpg_flags &
1120 (VM_PROT_READ | VM_PROT_WRITE); 1120 (VM_PROT_READ | VM_PROT_WRITE);
1121 1121
1122 pm_lock(pv->pv_pmap); 1122 pm_lock(pv->pv_pmap);
1123 1123
1124 ptep = pv->pv_ptep; 1124 ptep = pv->pv_ptep;
1125 pte = *ptep; 1125 pte = *ptep;
1126 1126
1127 /* get prot mask from pte */ 1127 /* get prot mask from pte */
1128 pteprot = 0; 1128 pteprot = 0;
1129 if (pte & LX_BLKPAG_AF) 1129 if (pte & LX_BLKPAG_AF)
1130 pteprot |= VM_PROT_READ; 1130 pteprot |= VM_PROT_READ;
1131 if ((pte & LX_BLKPAG_AP) == LX_BLKPAG_AP_RW) 1131 if ((pte & LX_BLKPAG_AP) == LX_BLKPAG_AP_RW)
1132 pteprot |= VM_PROT_WRITE; 1132 pteprot |= VM_PROT_WRITE;
1133 if (l3pte_executable(pte, user)) 1133 if (l3pte_executable(pte, user))
1134 pteprot |= VM_PROT_EXECUTE; 1134 pteprot |= VM_PROT_EXECUTE;
1135 1135
1136 /* new prot = prot & pteprot & mdattr */ 1136 /* new prot = prot & pteprot & mdattr */
1137 pte = _pmap_pte_adjust_prot(pte, prot & pteprot, mdattr, user); 1137 pte = _pmap_pte_adjust_prot(pte, prot & pteprot, mdattr, user);
1138 atomic_swap_64(ptep, pte); 1138 atomic_swap_64(ptep, pte);
1139 AARCH64_TLBI_BY_ASID_VA(pv->pv_pmap->pm_asid, pv->pv_va, true); 1139 AARCH64_TLBI_BY_ASID_VA(pv->pv_pmap->pm_asid, pv->pv_va, true);
1140 1140
1141 pm_unlock(pv->pv_pmap); 1141 pm_unlock(pv->pv_pmap);
1142} 1142}
1143 1143
1144void 1144void
1145pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot) 1145pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1146{ 1146{
1147 vaddr_t va; 1147 vaddr_t va;
1148 const bool user = (pm != pmap_kernel()); 1148 const bool user = (pm != pmap_kernel());
1149 1149
1150 KASSERT((prot & VM_PROT_READ) || !(prot & VM_PROT_WRITE)); 1150 KASSERT((prot & VM_PROT_READ) || !(prot & VM_PROT_WRITE));
1151 1151
1152 UVMHIST_FUNC(__func__); 1152 UVMHIST_FUNC(__func__);
1153 UVMHIST_CALLED(pmaphist); 1153 UVMHIST_CALLED(pmaphist);
1154 1154
1155 UVMHIST_LOG(pmaphist, "pm=%p, sva=%016lx, eva=%016lx, prot=%08x", 1155 UVMHIST_LOG(pmaphist, "pm=%p, sva=%016lx, eva=%016lx, prot=%08x",
1156 pm, sva, eva, prot); 1156 pm, sva, eva, prot);
1157 1157
1158 PM_ADDR_CHECK(pm, sva); 1158 PM_ADDR_CHECK(pm, sva);
1159 1159
1160 KASSERT(!IN_KSEG_ADDR(sva)); 1160 KASSERT(!IN_KSEG_ADDR(sva));
1161 1161
1162 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1162 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1163 PMAP_COUNT(protect_remove_fallback); 1163 PMAP_COUNT(protect_remove_fallback);
1164 pmap_remove(pm, sva, eva); 1164 pmap_remove(pm, sva, eva);
1165 return; 1165 return;
1166 } 1166 }
1167 PMAP_COUNT(protect); 1167 PMAP_COUNT(protect);
1168 1168
1169 KDASSERT((sva & PAGE_MASK) == 0); 1169 KDASSERT((sva & PAGE_MASK) == 0);
1170 KDASSERT((eva & PAGE_MASK) == 0); 1170 KDASSERT((eva & PAGE_MASK) == 0);
1171 1171
1172 pm_lock(pm); 1172 pm_lock(pm);
1173 1173
1174 for (va = sva; va < eva; va += PAGE_SIZE) { 1174 for (va = sva; va < eva; va += PAGE_SIZE) {
1175 pt_entry_t *ptep, pte; 1175 pt_entry_t *ptep, pte;
1176#ifdef UVMHIST 1176#ifdef UVMHIST
1177 pt_entry_t opte; 1177 pt_entry_t opte;
1178#endif 1178#endif
1179 struct vm_page *pg; 1179 struct vm_page *pg;
1180 paddr_t pa; 1180 paddr_t pa;
1181 uint32_t mdattr; 1181 uint32_t mdattr;
1182 bool executable; 1182 bool executable;
1183 1183
1184 ptep = _pmap_pte_lookup(pm, va); 1184 ptep = _pmap_pte_lookup(pm, va);
1185 if (ptep == NULL) { 1185 if (ptep == NULL) {
1186 PMAP_COUNT(protect_none); 1186 PMAP_COUNT(protect_none);
1187 continue; 1187 continue;
1188 } 1188 }
1189 1189
1190 pte = *ptep; 1190 pte = *ptep;
1191 1191
1192 if (!l3pte_valid(pte)) { 1192 if (!l3pte_valid(pte)) {
1193 PMAP_COUNT(protect_none); 1193 PMAP_COUNT(protect_none);
1194 continue; 1194 continue;
1195 } 1195 }
1196 1196
1197 pa = l3pte_pa(pte); 1197 pa = l3pte_pa(pte);
1198 pg = PHYS_TO_VM_PAGE(pa); 1198 pg = PHYS_TO_VM_PAGE(pa);
1199 1199
1200 if (pg != NULL) { 1200 if (pg != NULL) {
1201 /* get prot mask from referenced/modified */ 1201 /* get prot mask from referenced/modified */
1202 mdattr = VM_PAGE_TO_MD(pg)->mdpg_flags & 1202 mdattr = VM_PAGE_TO_MD(pg)->mdpg_flags &
1203 (VM_PROT_READ | VM_PROT_WRITE); 1203 (VM_PROT_READ | VM_PROT_WRITE);
1204 PMAP_COUNT(protect_managed); 1204 PMAP_COUNT(protect_managed);
1205 } else { 1205 } else {
1206 /* unmanaged page */ 1206 /* unmanaged page */
1207 mdattr = VM_PROT_ALL; 1207 mdattr = VM_PROT_ALL;
1208 PMAP_COUNT(protect_unmanaged); 1208 PMAP_COUNT(protect_unmanaged);
1209 } 1209 }
1210 1210
1211 pte = *ptep; 1211 pte = *ptep;
1212#ifdef UVMHIST 1212#ifdef UVMHIST
1213 opte = pte; 1213 opte = pte;
1214#endif 1214#endif
1215 executable = l3pte_executable(pte, user); 1215 executable = l3pte_executable(pte, user);
1216 pte = _pmap_pte_adjust_prot(pte, prot, mdattr, user); 1216 pte = _pmap_pte_adjust_prot(pte, prot, mdattr, user);
1217 1217
1218 if (!executable && (prot & VM_PROT_EXECUTE)) { 1218 if (!executable && (prot & VM_PROT_EXECUTE)) {
1219 /* non-exec -> exec */ 1219 /* non-exec -> exec */
1220 UVMHIST_LOG(pmaphist, "icache_sync: " 1220 UVMHIST_LOG(pmaphist, "icache_sync: "
1221 "pm=%p, va=%016lx, pte: %016lx -> %016lx", 1221 "pm=%p, va=%016lx, pte: %016lx -> %016lx",
1222 pm, va, opte, pte); 1222 pm, va, opte, pte);
1223 if (!l3pte_writable(pte)) { 1223 if (!l3pte_writable(pte)) {
1224 PTE_ICACHE_SYNC_PAGE(pte, ptep, pm, va, true); 1224 PTE_ICACHE_SYNC_PAGE(pte, ptep, pm, va, true);
1225 atomic_swap_64(ptep, pte); 1225 atomic_swap_64(ptep, pte);
1226 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true); 1226 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true);
1227 } else { 1227 } else {
1228 atomic_swap_64(ptep, pte); 1228 atomic_swap_64(ptep, pte);
1229 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true); 1229 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true);
1230 cpu_icache_sync_range(va, PAGE_SIZE); 1230 cpu_icache_sync_range(va, PAGE_SIZE);
1231 } 1231 }
1232 } else { 1232 } else {
1233 atomic_swap_64(ptep, pte); 1233 atomic_swap_64(ptep, pte);
1234 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true); 1234 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true);
1235 } 1235 }
1236 } 1236 }
1237 1237
1238 pm_unlock(pm); 1238 pm_unlock(pm);
1239} 1239}
1240 1240
1241void 1241void
1242pmap_activate(struct lwp *l) 1242pmap_activate(struct lwp *l)
1243{ 1243{
1244 struct pmap *pm = l->l_proc->p_vmspace->vm_map.pmap; 1244 struct pmap *pm = l->l_proc->p_vmspace->vm_map.pmap;
1245 uint64_t ttbr0; 1245 uint64_t ttbr0;
1246 1246
1247 UVMHIST_FUNC(__func__); 1247 UVMHIST_FUNC(__func__);
1248 UVMHIST_CALLED(pmaphist); 1248 UVMHIST_CALLED(pmaphist);
1249 1249
1250 if (pm == pmap_kernel()) 1250 if (pm == pmap_kernel())
1251 return; 1251 return;
1252 if (l != curlwp) 1252 if (l != curlwp)
1253 return; 1253 return;
1254 1254
1255 KASSERT(pm->pm_l0table != NULL); 1255 KASSERT(pm->pm_l0table != NULL);
1256 1256
1257 UVMHIST_LOG(pmaphist, "lwp=%p (pid=%d)", l, l->l_proc->p_pid, 0, 0); 1257 UVMHIST_LOG(pmaphist, "lwp=%p (pid=%d)", l, l->l_proc->p_pid, 0, 0);
1258 1258
1259 /* XXX */ 1259 /* XXX */
1260 CTASSERT(PID_MAX <= 65535); /* 16bit ASID */ 1260 CTASSERT(PID_MAX <= 65535); /* 16bit ASID */
1261 if (pm->pm_asid == -1) 1261 if (pm->pm_asid == -1)
1262 pm->pm_asid = l->l_proc->p_pid; 1262 pm->pm_asid = l->l_proc->p_pid;
1263 1263
1264 ttbr0 = ((uint64_t)pm->pm_asid << 48) | pm->pm_l0table_pa; 1264 ttbr0 = ((uint64_t)pm->pm_asid << 48) | pm->pm_l0table_pa;
1265 aarch64_set_ttbr0(ttbr0); 1265 aarch64_set_ttbr0(ttbr0);
1266 1266
1267 pm->pm_activated = true; 1267 pm->pm_activated = true;
1268 1268
1269 PMAP_COUNT(activate); 1269 PMAP_COUNT(activate);
1270} 1270}
1271 1271
1272void 1272void
1273pmap_deactivate(struct lwp *l) 1273pmap_deactivate(struct lwp *l)
1274{ 1274{
1275 struct pmap *pm = l->l_proc->p_vmspace->vm_map.pmap; 1275 struct pmap *pm = l->l_proc->p_vmspace->vm_map.pmap;
1276 1276
1277 UVMHIST_FUNC(__func__); 1277 UVMHIST_FUNC(__func__);
1278 UVMHIST_CALLED(pmaphist); 1278 UVMHIST_CALLED(pmaphist);
1279 1279
1280 if (pm == pmap_kernel()) 1280 if (pm == pmap_kernel())
1281 return; 1281 return;
1282 1282
1283 UVMHIST_LOG(pmaphist, "lwp=%p, asid=%d", l, pm->pm_asid, 0, 0); 1283 UVMHIST_LOG(pmaphist, "lwp=%p, asid=%d", l, pm->pm_asid, 0, 0);
1284 1284
1285 /* XXX */ 1285 /* XXX */
1286 pm->pm_activated = false; 1286 pm->pm_activated = false;
1287 1287
1288 PMAP_COUNT(deactivate); 1288 PMAP_COUNT(deactivate);
1289} 1289}
1290 1290
1291struct pmap * 1291struct pmap *
1292pmap_create(void) 1292pmap_create(void)
1293{ 1293{
1294 struct pmap *pm; 1294 struct pmap *pm;
1295 1295
1296 UVMHIST_FUNC(__func__); 1296 UVMHIST_FUNC(__func__);
1297 UVMHIST_CALLED(pmaphist); 1297 UVMHIST_CALLED(pmaphist);
1298 1298
1299 pm = pool_cache_get(&_pmap_cache, PR_WAITOK); 1299 pm = pool_cache_get(&_pmap_cache, PR_WAITOK);
1300 memset(pm, 0, sizeof(*pm)); 1300 memset(pm, 0, sizeof(*pm));
1301 pm->pm_refcnt = 1; 1301 pm->pm_refcnt = 1;
1302 pm->pm_asid = -1; 1302 pm->pm_asid = -1;
1303 SLIST_INIT(&pm->pm_vmlist); 1303 SLIST_INIT(&pm->pm_vmlist);
1304 mutex_init(&pm->pm_lock, MUTEX_DEFAULT, IPL_VM); 1304 mutex_init(&pm->pm_lock, MUTEX_DEFAULT, IPL_VM);
1305 pm->pm_l0table = pmap_alloc_pdp(pm, &pm->pm_l0table_pa); 1305 pm->pm_l0table = pmap_alloc_pdp(pm, &pm->pm_l0table_pa);
1306 KASSERT(((vaddr_t)pm->pm_l0table & (PAGE_SIZE - 1)) == 0); 1306 KASSERT(((vaddr_t)pm->pm_l0table & (PAGE_SIZE - 1)) == 0);
1307 1307
1308 UVMHIST_LOG(pmaphist, "pm=%p, pm_l0table=%016lx, pm_l0table_pa=%016lx", 1308 UVMHIST_LOG(pmaphist, "pm=%p, pm_l0table=%016lx, pm_l0table_pa=%016lx",
1309 pm, pm->pm_l0table, pm->pm_l0table_pa, 0); 1309 pm, pm->pm_l0table, pm->pm_l0table_pa, 0);
1310 1310
1311 PMAP_COUNT(create); 1311 PMAP_COUNT(create);
1312 return pm; 1312 return pm;
1313} 1313}
1314 1314
1315void 1315void
1316pmap_destroy(struct pmap *pm) 1316pmap_destroy(struct pmap *pm)
1317{ 1317{
1318 unsigned int refcnt; 1318 unsigned int refcnt;
1319 1319
1320 UVMHIST_FUNC(__func__); 1320 UVMHIST_FUNC(__func__);
1321 UVMHIST_CALLED(pmaphist); 1321 UVMHIST_CALLED(pmaphist);
1322 1322
1323 UVMHIST_LOG(pmaphist, 1323 UVMHIST_LOG(pmaphist,
1324 "pm=%p, pm_l0table=%016lx, pm_l0table_pa=%016lx, refcnt=%d", 1324 "pm=%p, pm_l0table=%016lx, pm_l0table_pa=%016lx, refcnt=%d",
1325 pm, pm->pm_l0table, pm->pm_l0table_pa, pm->pm_refcnt); 1325 pm, pm->pm_l0table, pm->pm_l0table_pa, pm->pm_refcnt);
1326 1326
1327 if (pm == NULL) 1327 if (pm == NULL)
1328 return; 1328 return;
1329 1329
1330 if (pm == pmap_kernel()) 1330 if (pm == pmap_kernel())
1331 panic("cannot destroy kernel pmap"); 1331 panic("cannot destroy kernel pmap");
1332 1332
1333 refcnt = atomic_dec_uint_nv(&pm->pm_refcnt); 1333 refcnt = atomic_dec_uint_nv(&pm->pm_refcnt);
1334 if (refcnt > 0) 1334 if (refcnt > 0)
1335 return; 1335 return;
1336 1336
1337 aarch64_tlbi_by_asid(pm->pm_asid); 1337 aarch64_tlbi_by_asid(pm->pm_asid);
1338 1338
1339 _pmap_free_pdp_all(pm); 1339 _pmap_free_pdp_all(pm);
1340 mutex_destroy(&pm->pm_lock); 1340 mutex_destroy(&pm->pm_lock);
1341 pool_cache_put(&_pmap_cache, pm); 1341 pool_cache_put(&_pmap_cache, pm);
1342 1342
1343 PMAP_COUNT(destroy); 1343 PMAP_COUNT(destroy);
1344} 1344}
1345 1345
1346static int 1346static int
1347_pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, 1347_pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot,
1348 u_int flags, bool kenter) 1348 u_int flags, bool kenter)
1349{ 1349{
1350 struct vm_page *pg; 1350 struct vm_page *pg;
1351 struct pv_entry *spv, *opv = NULL; 1351 struct pv_entry *spv, *opv = NULL;
1352 pd_entry_t pde; 1352 pd_entry_t pde;
1353 pt_entry_t attr, pte, *ptep; 1353 pt_entry_t attr, pte, *ptep;
1354#ifdef UVMHIST 1354#ifdef UVMHIST
1355 pt_entry_t opte; 1355 pt_entry_t opte;
1356#endif 1356#endif
1357 pd_entry_t *l0, *l1, *l2, *l3; 1357 pd_entry_t *l0, *l1, *l2, *l3;
1358 paddr_t pdppa; 1358 paddr_t pdppa;
1359 uint32_t mdattr; 1359 uint32_t mdattr;
1360 unsigned int idx; 1360 unsigned int idx;
1361 int error = 0; 1361 int error = 0;
1362 const bool user = (pm != pmap_kernel()); 1362 const bool user = (pm != pmap_kernel());
1363 bool executable; 1363 bool executable;
1364 bool l3only = true; 1364 bool l3only = true;
1365 1365
1366 UVMHIST_FUNC(__func__); 1366 UVMHIST_FUNC(__func__);
1367 UVMHIST_CALLED(pmaphist); 1367 UVMHIST_CALLED(pmaphist);
1368 1368
1369 UVMHIST_LOG(pmaphist, "pm=%p, kentermode=%d", pm, kenter, 0, 0); 1369 UVMHIST_LOG(pmaphist, "pm=%p, kentermode=%d", pm, kenter, 0, 0);
1370 UVMHIST_LOG(pmaphist, "va=%016lx, pa=%016lx, prot=%08x, flags=%08x", 1370 UVMHIST_LOG(pmaphist, "va=%016lx, pa=%016lx, prot=%08x, flags=%08x",
1371 va, pa, prot, flags); 1371 va, pa, prot, flags);
1372 1372
1373 PM_ADDR_CHECK(pm, va); 1373 PM_ADDR_CHECK(pm, va);
1374 1374
1375#ifdef PMAPCOUNTERS 1375#ifdef PMAPCOUNTERS
1376 PMAP_COUNT(mappings); 1376 PMAP_COUNT(mappings);
1377 if (_pmap_color(va) == _pmap_color(pa)) { 1377 if (_pmap_color(va) == _pmap_color(pa)) {
1378 if (user) { 1378 if (user) {
1379 PMAP_COUNT(user_mappings); 1379 PMAP_COUNT(user_mappings);
1380 } else { 1380 } else {
1381 PMAP_COUNT(kern_mappings); 1381 PMAP_COUNT(kern_mappings);
1382 } 1382 }
1383 } else if (flags & PMAP_WIRED) { 1383 } else if (flags & PMAP_WIRED) {
1384 if (user) { 1384 if (user) {
1385 PMAP_COUNT(user_mappings_bad_wired); 1385 PMAP_COUNT(user_mappings_bad_wired);
1386 } else { 1386 } else {
1387 PMAP_COUNT(kern_mappings_bad_wired); 1387 PMAP_COUNT(kern_mappings_bad_wired);
1388 } 1388 }
1389 } else { 1389 } else {
1390 if (user) { 1390 if (user) {
1391 PMAP_COUNT(user_mappings_bad); 1391 PMAP_COUNT(user_mappings_bad);
1392 } else { 1392 } else {
1393 PMAP_COUNT(kern_mappings_bad); 1393 PMAP_COUNT(kern_mappings_bad);
1394 } 1394 }
1395 } 1395 }
1396#endif 1396#endif
1397 1397
1398 if (kenter) 1398 if (kenter)
1399 pg = NULL; 1399 pg = NULL;
1400 else 1400 else
1401 pg = PHYS_TO_VM_PAGE(pa); 1401 pg = PHYS_TO_VM_PAGE(pa);
1402 1402
1403 if (pg != NULL) { 1403 if (pg != NULL) {
1404 PMAP_COUNT(managed_mappings); 1404 PMAP_COUNT(managed_mappings);
1405 /* 1405 /*
1406 * allocate pv in advance of pm_lock() to avoid locking myself. 1406 * allocate pv in advance of pm_lock() to avoid locking myself.
1407 * pool_cache_get() may call pmap_kenter() internally. 1407 * pool_cache_get() may call pmap_kenter() internally.
1408 */ 1408 */
1409 spv = pool_cache_get(&_pmap_pv_pool, PR_NOWAIT); 1409 spv = pool_cache_get(&_pmap_pv_pool, PR_NOWAIT);
1410 } else { 1410 } else {
1411 PMAP_COUNT(unmanaged_mappings); 1411 PMAP_COUNT(unmanaged_mappings);
1412 spv = NULL; 1412 spv = NULL;
1413 } 1413 }
1414 1414
1415 pm_lock(pm); 1415 pm_lock(pm);
1416 1416
1417 /* 1417 /*
1418 * traverse L0 -> L1 -> L2 -> L3 table with growing pdp if needed. 1418 * traverse L0 -> L1 -> L2 -> L3 table with growing pdp if needed.
1419 */ 1419 */
1420 l0 = pm->pm_l0table; 1420 l0 = pm->pm_l0table;
1421 1421
1422 idx = l0pde_index(va); 1422 idx = l0pde_index(va);
1423 pde = l0[idx]; 1423 pde = l0[idx];
1424 if (!l0pde_valid(pde)) { 1424 if (!l0pde_valid(pde)) {
1425 pmap_alloc_pdp(pm, &pdppa); 1425 pmap_alloc_pdp(pm, &pdppa);
1426 KASSERT(pdppa != POOL_PADDR_INVALID); 1426 KASSERT(pdppa != POOL_PADDR_INVALID);
1427 atomic_swap_64(&l0[idx], pdppa | L0_TABLE); 1427 atomic_swap_64(&l0[idx], pdppa | L0_TABLE);
1428 l3only = false; 1428 l3only = false;
1429 } else { 1429 } else {
1430 pdppa = l0pde_pa(pde); 1430 pdppa = l0pde_pa(pde);
1431 } 1431 }
1432 l1 = (void *)AARCH64_PA_TO_KVA(pdppa); 1432 l1 = (void *)AARCH64_PA_TO_KVA(pdppa);
1433 1433
1434 idx = l1pde_index(va); 1434 idx = l1pde_index(va);
1435 pde = l1[idx]; 1435 pde = l1[idx];
1436 if (!l1pde_valid(pde)) { 1436 if (!l1pde_valid(pde)) {
1437 pmap_alloc_pdp(pm, &pdppa); 1437 pmap_alloc_pdp(pm, &pdppa);
1438 KASSERT(pdppa != POOL_PADDR_INVALID); 1438 KASSERT(pdppa != POOL_PADDR_INVALID);
1439 atomic_swap_64(&l1[idx], pdppa | L1_TABLE); 1439 atomic_swap_64(&l1[idx], pdppa | L1_TABLE);
1440 l3only = false; 1440 l3only = false;
1441 } else { 1441 } else {
1442 pdppa = l1pde_pa(pde); 1442 pdppa = l1pde_pa(pde);
1443 } 1443 }
1444 l2 = (void *)AARCH64_PA_TO_KVA(pdppa); 1444 l2 = (void *)AARCH64_PA_TO_KVA(pdppa);
1445 1445
1446 idx = l2pde_index(va); 1446 idx = l2pde_index(va);
1447 pde = l2[idx]; 1447 pde = l2[idx];
1448 if (!l2pde_valid(pde)) { 1448 if (!l2pde_valid(pde)) {
1449 pmap_alloc_pdp(pm, &pdppa); 1449 pmap_alloc_pdp(pm, &pdppa);
1450 KASSERT(pdppa != POOL_PADDR_INVALID); 1450 KASSERT(pdppa != POOL_PADDR_INVALID);
1451 atomic_swap_64(&l2[idx], pdppa | L2_TABLE); 1451 atomic_swap_64(&l2[idx], pdppa | L2_TABLE);
1452 l3only = false; 1452 l3only = false;
1453 } else { 1453 } else {
1454 pdppa = l2pde_pa(pde); 1454 pdppa = l2pde_pa(pde);
1455 } 1455 }
1456 l3 = (void *)AARCH64_PA_TO_KVA(pdppa); 1456 l3 = (void *)AARCH64_PA_TO_KVA(pdppa);
1457 1457
1458 idx = l3pte_index(va); 1458 idx = l3pte_index(va);
1459 ptep = &l3[idx]; /* as PTE */ 1459 ptep = &l3[idx]; /* as PTE */
1460 1460
1461 pte = *ptep; 1461 pte = *ptep;
1462#ifdef UVMHIST 1462#ifdef UVMHIST
1463 opte = pte; 1463 opte = pte;
1464#endif 1464#endif
1465 executable = l3pte_executable(pte, user); 1465 executable = l3pte_executable(pte, user);
1466 1466
1467 if (l3pte_valid(pte)) { 1467 if (l3pte_valid(pte)) {
1468 KASSERT(!kenter); /* pmap_kenter_pa() cannot override */ 1468 KASSERT(!kenter); /* pmap_kenter_pa() cannot override */
1469 1469
1470 PMAP_COUNT(remappings); 1470 PMAP_COUNT(remappings);
1471 1471
1472 /* pte is Already mapped */ 1472 /* pte is Already mapped */
1473 if (l3pte_pa(pte) != pa) { 1473 if (l3pte_pa(pte) != pa) {
1474 struct vm_page *opg; 1474 struct vm_page *opg;
1475 1475
1476#ifdef PMAPCOUNTERS 1476#ifdef PMAPCOUNTERS
1477 if (user) { 1477 if (user) {
1478 PMAP_COUNT(user_mappings_changed); 1478 PMAP_COUNT(user_mappings_changed);
1479 } else { 1479 } else {
1480 PMAP_COUNT(kern_mappings_changed); 1480 PMAP_COUNT(kern_mappings_changed);
1481 } 1481 }
1482#endif 1482#endif
1483 1483
1484 UVMHIST_LOG(pmaphist, 1484 UVMHIST_LOG(pmaphist,
1485 "va=%016lx has already mapped." 1485 "va=%016lx has already mapped."
1486 " old-pa=%016lx new-pa=%016lx, pte=%016llx\n", 1486 " old-pa=%016lx new-pa=%016lx, pte=%016llx\n",
1487 va, l3pte_pa(pte), pa, pte); 1487 va, l3pte_pa(pte), pa, pte);
1488 1488
1489 opg = PHYS_TO_VM_PAGE(l3pte_pa(pte)); 1489 opg = PHYS_TO_VM_PAGE(l3pte_pa(pte));
1490 if (opg != NULL) 1490 if (opg != NULL)
1491 opv = _pmap_remove_pv(opg, pm, va, pte); 1491 opv = _pmap_remove_pv(opg, pm, va, pte);
1492 } 1492 }
1493 1493
1494 if (pte & LX_BLKPAG_OS_WIRED) 1494 if (pte & LX_BLKPAG_OS_WIRED)
1495 pm->pm_stats.wired_count--; 1495 pm->pm_stats.wired_count--;
1496 pm->pm_stats.resident_count--; 1496 pm->pm_stats.resident_count--;
1497 } 1497 }
1498 1498
1499 /* 1499 /*
1500 * read permission is treated as an access permission internally. 1500 * read permission is treated as an access permission internally.
1501 * require to add PROT_READ even if only PROT_WRITE or PROT_EXEC 1501 * require to add PROT_READ even if only PROT_WRITE or PROT_EXEC
1502 * for wired mapping. 1502 * for wired mapping.
1503 */ 1503 */
1504 if ((flags & PMAP_WIRED) && (prot & (VM_PROT_WRITE|VM_PROT_EXECUTE))) 1504 if ((flags & PMAP_WIRED) && (prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)))
1505 prot |= VM_PROT_READ; 1505 prot |= VM_PROT_READ;
1506 1506
1507 mdattr = VM_PROT_READ | VM_PROT_WRITE; 1507 mdattr = VM_PROT_READ | VM_PROT_WRITE;
1508 if (pg != NULL) { 1508 if (pg != NULL) {
1509 error = _pmap_enter_pv(pg, pm, &spv, va, ptep, pa, flags); 1509 error = _pmap_enter_pv(pg, pm, &spv, va, ptep, pa, flags);
1510 1510
1511 if (error != 0) { 1511 if (error != 0) {
1512 /* 1512 /*
1513 * If pmap_enter() fails, 1513 * If pmap_enter() fails,
1514 * it must not leave behind an existing pmap entry. 1514 * it must not leave behind an existing pmap entry.
1515 */ 1515 */
1516 if (!kenter && ((pte & LX_BLKPAG_OS_WIRED) == 0)) 1516 if (!kenter && ((pte & LX_BLKPAG_OS_WIRED) == 0))
1517 atomic_swap_64(ptep, 0); 1517 atomic_swap_64(ptep, 0);
1518 1518
1519 PMAP_COUNT(pv_entry_cannotalloc); 1519 PMAP_COUNT(pv_entry_cannotalloc);
1520 if (flags & PMAP_CANFAIL) 1520 if (flags & PMAP_CANFAIL)
1521 goto done; 1521 goto done;
1522 panic("pmap_enter: failed to allocate pv_entry"); 1522 panic("pmap_enter: failed to allocate pv_entry");
1523 } 1523 }
1524 1524
1525 /* update referenced/modified flags */ 1525 /* update referenced/modified flags */
1526 VM_PAGE_TO_MD(pg)->mdpg_flags |= 1526 VM_PAGE_TO_MD(pg)->mdpg_flags |=
1527 (flags & (VM_PROT_READ | VM_PROT_WRITE)); 1527 (flags & (VM_PROT_READ | VM_PROT_WRITE));
1528 mdattr &= VM_PAGE_TO_MD(pg)->mdpg_flags; 1528 mdattr &= VM_PAGE_TO_MD(pg)->mdpg_flags;
1529 } 1529 }
1530 1530
1531#ifdef PMAPCOUNTERS 1531#ifdef PMAPCOUNTERS
1532 switch (flags & PMAP_CACHE_MASK) { 1532 switch (flags & PMAP_CACHE_MASK) {
1533 case PMAP_NOCACHE: 1533 case PMAP_NOCACHE:
1534 case PMAP_NOCACHE_OVR: 1534 case PMAP_NOCACHE_OVR:
1535 PMAP_COUNT(uncached_mappings); 1535 PMAP_COUNT(uncached_mappings);
1536 break; 1536 break;
1537 } 1537 }
1538#endif 1538#endif
1539 1539
1540 attr = _pmap_pte_adjust_prot(L3_PAGE, prot, mdattr, user); 1540 attr = _pmap_pte_adjust_prot(L3_PAGE, prot, mdattr, user);
1541 attr = _pmap_pte_adjust_cacheflags(attr, flags); 1541 attr = _pmap_pte_adjust_cacheflags(attr, flags);
1542 if (VM_MAXUSER_ADDRESS > va) 1542 if (VM_MAXUSER_ADDRESS > va)
1543 attr |= LX_BLKPAG_APUSER; 1543 attr |= LX_BLKPAG_APUSER;
1544 if (flags & PMAP_WIRED) 1544 if (flags & PMAP_WIRED)
1545 attr |= LX_BLKPAG_OS_WIRED; 1545 attr |= LX_BLKPAG_OS_WIRED;
1546#ifdef MULTIPROCESSOR 1546#ifdef MULTIPROCESSOR
1547 attr |= LX_BLKPAG_SH_IS; 1547 attr |= LX_BLKPAG_SH_IS;
1548#endif 1548#endif
1549 1549
1550 pte = pa | attr; 1550 pte = pa | attr;
1551 1551
1552 if (!executable && (prot & VM_PROT_EXECUTE)) { 1552 if (!executable && (prot & VM_PROT_EXECUTE)) {
1553 /* non-exec -> exec */ 1553 /* non-exec -> exec */
1554 UVMHIST_LOG(pmaphist, 1554 UVMHIST_LOG(pmaphist,
1555 "icache_sync: pm=%p, va=%016lx, pte: %016lx -> %016lx", 1555 "icache_sync: pm=%p, va=%016lx, pte: %016lx -> %016lx",
1556 pm, va, opte, pte); 1556 pm, va, opte, pte);
1557 if (!l3pte_writable(pte)) { 1557 if (!l3pte_writable(pte)) {
1558 PTE_ICACHE_SYNC_PAGE(pte, ptep, pm, va, l3only); 1558 PTE_ICACHE_SYNC_PAGE(pte, ptep, pm, va, l3only);
1559 atomic_swap_64(ptep, pte); 1559 atomic_swap_64(ptep, pte);
1560 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va ,true); 1560 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va ,true);
1561 } else { 1561 } else {
1562 atomic_swap_64(ptep, pte); 1562 atomic_swap_64(ptep, pte);
1563 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, l3only); 1563 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, l3only);
1564 cpu_icache_sync_range(va, PAGE_SIZE); 1564 cpu_icache_sync_range(va, PAGE_SIZE);
1565 } 1565 }
1566 } else { 1566 } else {
1567 atomic_swap_64(ptep, pte); 1567 atomic_swap_64(ptep, pte);
1568 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, l3only); 1568 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, l3only);
1569 } 1569 }
1570 1570
1571 if (pte & LX_BLKPAG_OS_WIRED) 1571 if (pte & LX_BLKPAG_OS_WIRED)
1572 pm->pm_stats.wired_count++; 1572 pm->pm_stats.wired_count++;
1573 pm->pm_stats.resident_count++; 1573 pm->pm_stats.resident_count++;
1574 1574
1575 done: 1575 done:
1576 pm_unlock(pm); 1576 pm_unlock(pm);
1577 1577
1578 /* spare pv was not used. discard */ 1578 /* spare pv was not used. discard */
1579 if (spv != NULL) 1579 if (spv != NULL)
1580 pool_cache_put(&_pmap_pv_pool, spv); 1580 pool_cache_put(&_pmap_pv_pool, spv);
1581 1581
1582 if (opv != NULL) 1582 if (opv != NULL)
1583 pool_cache_put(&_pmap_pv_pool, opv); 1583 pool_cache_put(&_pmap_pv_pool, opv);
1584 1584
1585 return error; 1585 return error;
1586} 1586}
1587 1587
1588int 1588int
1589pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 1589pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1590{ 1590{
1591 KASSERT((prot & VM_PROT_READ) || !(prot & VM_PROT_WRITE)); 1591 KASSERT((prot & VM_PROT_READ) || !(prot & VM_PROT_WRITE));
1592 1592
1593 return _pmap_enter(pm, va, pa, prot, flags, false); 1593 return _pmap_enter(pm, va, pa, prot, flags, false);
1594} 1594}
1595 1595
1596void 1596void
1597pmap_remove_all(struct pmap *pm) 1597pmap_remove_all(struct pmap *pm)
1598{ 1598{
1599 /* nothing to do */ 1599 /* nothing to do */
1600} 1600}
1601 1601
1602static void 1602static void
1603_pmap_remove(struct pmap *pm, vaddr_t va, bool kremove) 1603_pmap_remove(struct pmap *pm, vaddr_t va, bool kremove)
1604{ 1604{
1605 pt_entry_t pte, *ptep; 1605 pt_entry_t pte, *ptep;
1606 struct vm_page *pg; 1606 struct vm_page *pg;
1607 struct pv_entry *opv = NULL; 1607 struct pv_entry *opv = NULL;
1608 paddr_t pa; 1608 paddr_t pa;
1609 1609
1610 1610
1611 UVMHIST_FUNC(__func__); 1611 UVMHIST_FUNC(__func__);
1612 UVMHIST_CALLED(pmaphist); 1612 UVMHIST_CALLED(pmaphist);
1613 1613
1614 UVMHIST_LOG(pmaphist, "pm=%p, va=%016lx, kremovemode=%d", 1614 UVMHIST_LOG(pmaphist, "pm=%p, va=%016lx, kremovemode=%d",
1615 pm, va, kremove, 0); 1615 pm, va, kremove, 0);
1616 1616
1617 pm_lock(pm); 1617 pm_lock(pm);
1618 1618
1619 ptep = _pmap_pte_lookup(pm, va); 1619 ptep = _pmap_pte_lookup(pm, va);
1620 if (ptep != NULL) { 1620 if (ptep != NULL) {
1621 pte = *ptep; 1621 pte = *ptep;
1622 if (!l3pte_valid(pte)) 1622 if (!l3pte_valid(pte))
1623 goto done; 1623 goto done;
1624 1624
1625 pa = l3pte_pa(pte); 1625 pa = l3pte_pa(pte);
1626 1626
1627 if (kremove) 1627 if (kremove)
1628 pg = NULL; 1628 pg = NULL;
1629 else 1629 else
1630 pg = PHYS_TO_VM_PAGE(pa); 1630 pg = PHYS_TO_VM_PAGE(pa);
1631 1631
1632 if (pg != NULL) 1632 if (pg != NULL)
1633 opv = _pmap_remove_pv(pg, pm, va, pte); 1633 opv = _pmap_remove_pv(pg, pm, va, pte);
1634 1634
1635 atomic_swap_64(ptep, 0); 1635 atomic_swap_64(ptep, 0);
1636 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true); 1636 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true);
1637 1637
1638 if ((pte & LX_BLKPAG_OS_WIRED) != 0) 1638 if ((pte & LX_BLKPAG_OS_WIRED) != 0)
1639 pm->pm_stats.wired_count--; 1639 pm->pm_stats.wired_count--;
1640 pm->pm_stats.resident_count--; 1640 pm->pm_stats.resident_count--;
1641 } 1641 }
1642 done: 1642 done:
1643 pm_unlock(pm); 1643 pm_unlock(pm);
1644 1644
1645 if (opv != NULL) 1645 if (opv != NULL)
1646 pool_cache_put(&_pmap_pv_pool, opv); 1646 pool_cache_put(&_pmap_pv_pool, opv);
1647} 1647}
1648 1648
1649void 1649void
1650pmap_remove(struct pmap *pm, vaddr_t sva, vaddr_t eva) 1650pmap_remove(struct pmap *pm, vaddr_t sva, vaddr_t eva)
1651{ 1651{
1652 vaddr_t va; 1652 vaddr_t va;
1653 1653
1654 PM_ADDR_CHECK(pm, sva); 1654 PM_ADDR_CHECK(pm, sva);
1655 1655
1656 KASSERT(!IN_KSEG_ADDR(sva)); 1656 KASSERT(!IN_KSEG_ADDR(sva));
1657 1657
1658 for (va = sva; va < eva; va += PAGE_SIZE) 1658 for (va = sva; va < eva; va += PAGE_SIZE)
1659 _pmap_remove(pm, va, false); 1659 _pmap_remove(pm, va, false);
1660} 1660}
1661 1661
1662void 1662void
1663pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 1663pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
1664{ 1664{
1665 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 1665 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
1666 struct pv_entry *pv, *pvtmp; 1666 struct pv_entry *pv, *pvtmp;
1667 pt_entry_t opte; 1667 pt_entry_t opte;
1668 1668
1669 KASSERT((prot & VM_PROT_READ) || !(prot & VM_PROT_WRITE)); 1669 KASSERT((prot & VM_PROT_READ) || !(prot & VM_PROT_WRITE));
1670 1670
1671 UVMHIST_FUNC(__func__); 1671 UVMHIST_FUNC(__func__);
1672 UVMHIST_CALLED(pmaphist); 1672 UVMHIST_CALLED(pmaphist);
1673 1673
1674 UVMHIST_LOG(pmaphist, "pg=%p, md=%p, pa=%016lx, prot=%08x", 1674 UVMHIST_LOG(pmaphist, "pg=%p, md=%p, pa=%016lx, prot=%08x",
1675 pg, md, VM_PAGE_TO_PHYS(pg), prot); 1675 pg, md, VM_PAGE_TO_PHYS(pg), prot);
1676 1676
1677 1677
1678 if ((prot & (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)) == 1678 if ((prot & (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
1679 VM_PROT_NONE) { 1679 VM_PROT_NONE) {
1680 1680
1681 /* remove all pages reference to this physical page */ 1681 /* remove all pages reference to this physical page */
1682 pmap_pv_lock(md); 1682 pmap_pv_lock(md);
1683 TAILQ_FOREACH_SAFE(pv, &md->mdpg_pvhead, pv_link, pvtmp) { 1683 TAILQ_FOREACH_SAFE(pv, &md->mdpg_pvhead, pv_link, pvtmp) {
1684 1684
1685 opte = atomic_swap_64(pv->pv_ptep, 0); 1685 opte = atomic_swap_64(pv->pv_ptep, 0);
1686 AARCH64_TLBI_BY_ASID_VA(pv->pv_pmap->pm_asid, 1686 AARCH64_TLBI_BY_ASID_VA(pv->pv_pmap->pm_asid,
1687 pv->pv_va, true); 1687 pv->pv_va, true);
1688 1688
1689 if ((opte & LX_BLKPAG_OS_WIRED) != 0) 1689 if ((opte & LX_BLKPAG_OS_WIRED) != 0)
1690 pv->pv_pmap->pm_stats.wired_count--; 1690 pv->pv_pmap->pm_stats.wired_count--;
1691 pv->pv_pmap->pm_stats.resident_count--; 1691 pv->pv_pmap->pm_stats.resident_count--;
1692 1692
1693 TAILQ_REMOVE(&md->mdpg_pvhead, pv, pv_link); 1693 TAILQ_REMOVE(&md->mdpg_pvhead, pv, pv_link);
1694 PMAP_COUNT(pv_remove); 1694 PMAP_COUNT(pv_remove);
1695 pool_cache_put(&_pmap_pv_pool, pv); 1695 pool_cache_put(&_pmap_pv_pool, pv);
1696 } 1696 }
1697 pmap_pv_unlock(md); 1697 pmap_pv_unlock(md);
1698 1698
1699 } else { 1699 } else {
1700 pmap_pv_lock(md); 1700 pmap_pv_lock(md);
1701 TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) { 1701 TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) {
1702 _pmap_protect_pv(pg, pv, prot); 1702 _pmap_protect_pv(pg, pv, prot);
1703 } 1703 }
1704 pmap_pv_unlock(md); 1704 pmap_pv_unlock(md);
1705 } 1705 }
1706} 1706}
1707 1707
1708void 1708void
1709pmap_unwire(struct pmap *pm, vaddr_t va) 1709pmap_unwire(struct pmap *pm, vaddr_t va)
1710{ 1710{
1711 pt_entry_t pte, *ptep; 1711 pt_entry_t pte, *ptep;
1712 1712
1713 UVMHIST_FUNC(__func__); 1713 UVMHIST_FUNC(__func__);
1714 UVMHIST_CALLED(pmaphist); 1714 UVMHIST_CALLED(pmaphist);
1715 1715
1716 UVMHIST_LOG(pmaphist, "pm=%p, va=%016lx", pm, va, 0, 0); 1716 UVMHIST_LOG(pmaphist, "pm=%p, va=%016lx", pm, va, 0, 0);
1717 1717
1718 PMAP_COUNT(unwire); 1718 PMAP_COUNT(unwire);
1719 1719
1720 PM_ADDR_CHECK(pm, va); 1720 PM_ADDR_CHECK(pm, va);
1721 1721
1722 pm_lock(pm); 1722 pm_lock(pm);
1723 ptep = _pmap_pte_lookup(pm, va); 1723 ptep = _pmap_pte_lookup(pm, va);
1724 if (ptep != NULL) { 1724 if (ptep != NULL) {
1725 pte = *ptep; 1725 pte = *ptep;
1726 if (!l3pte_valid(pte) || 1726 if (!l3pte_valid(pte) ||
1727 ((pte & LX_BLKPAG_OS_WIRED) == 0)) { 1727 ((pte & LX_BLKPAG_OS_WIRED) == 0)) {
1728 /* invalid pte, or pte is not wired */ 1728 /* invalid pte, or pte is not wired */
1729 PMAP_COUNT(unwire_failure); 1729 PMAP_COUNT(unwire_failure);
1730 pm_unlock(pm); 1730 pm_unlock(pm);
1731 return; 1731 return;
1732 } 1732 }
1733 1733
1734 pte &= ~LX_BLKPAG_OS_WIRED; 1734 pte &= ~LX_BLKPAG_OS_WIRED;
1735 atomic_swap_64(ptep, pte); 1735 atomic_swap_64(ptep, pte);
1736 1736
1737 pm->pm_stats.wired_count--; 1737 pm->pm_stats.wired_count--;
1738 } 1738 }
1739 pm_unlock(pm); 1739 pm_unlock(pm);
1740} 1740}
1741 1741
1742bool 1742bool
1743pmap_fault_fixup(struct pmap *pm, vaddr_t va, vm_prot_t accessprot, bool user) 1743pmap_fault_fixup(struct pmap *pm, vaddr_t va, vm_prot_t accessprot, bool user)
1744{ 1744{
1745 struct vm_page *pg; 1745 struct vm_page *pg;
1746 struct vm_page_md *md; 1746 struct vm_page_md *md;
1747 pt_entry_t *ptep, pte; 1747 pt_entry_t *ptep, pte;
1748 vm_prot_t pmap_prot; 1748 vm_prot_t pmap_prot;
1749 paddr_t pa; 1749 paddr_t pa;
1750 bool fixed = false; 1750 bool fixed = false;
1751 1751
1752 UVMHIST_FUNC(__func__); 1752 UVMHIST_FUNC(__func__);
1753 UVMHIST_CALLED(pmaphist); 1753 UVMHIST_CALLED(pmaphist);
1754 1754
1755 UVMHIST_LOG(pmaphist, "pm=%p, va=%016lx, accessprot=%08x", 1755 UVMHIST_LOG(pmaphist, "pm=%p, va=%016lx, accessprot=%08x",
1756 pm, va, accessprot, 0); 1756 pm, va, accessprot, 0);
1757 1757
1758 1758
1759#if 0 1759#if 0
1760 PM_ADDR_CHECK(pm, va); 1760 PM_ADDR_CHECK(pm, va);
1761#else 1761#else
1762 if (((pm == pmap_kernel()) && 1762 if (((pm == pmap_kernel()) &&
1763 !(VM_MIN_KERNEL_ADDRESS <= va && va < VM_MAX_KERNEL_ADDRESS)) || 1763 !(VM_MIN_KERNEL_ADDRESS <= va && va < VM_MAX_KERNEL_ADDRESS)) ||
1764 ((pm != pmap_kernel()) && 1764 ((pm != pmap_kernel()) &&
1765 !(VM_MIN_ADDRESS <= va && va <= VM_MAX_ADDRESS))) { 1765 !(VM_MIN_ADDRESS <= va && va <= VM_MAX_ADDRESS))) {
1766 1766
1767 UVMHIST_LOG(pmaphist, 1767 UVMHIST_LOG(pmaphist,
1768 "pmap space and va mismatch: pm=%s, va=%016lx", 1768 "pmap space and va mismatch: pm=%s, va=%016lx",
1769 (pm == pmap_kernel()) ? "kernel" : "user", va, 0, 0); 1769 (pm == pmap_kernel()) ? "kernel" : "user", va, 0, 0);
1770 return false; 1770 return false;
1771 } 1771 }
1772#endif 1772#endif
1773 1773
1774 pm_lock(pm); 1774 pm_lock(pm);
1775 1775
1776 ptep = _pmap_pte_lookup(pm, va); 1776 ptep = _pmap_pte_lookup(pm, va);
1777 if (ptep == NULL) { 1777 if (ptep == NULL) {
1778 UVMHIST_LOG(pmaphist, "pte_lookup failure: va=%016lx", 1778 UVMHIST_LOG(pmaphist, "pte_lookup failure: va=%016lx",
1779 va, 0, 0, 0); 1779 va, 0, 0, 0);
1780 goto done; 1780 goto done;
1781 } 1781 }
1782 1782
1783 pte = *ptep; 1783 pte = *ptep;
1784 if (!l3pte_valid(pte)) { 1784 if (!l3pte_valid(pte)) {
1785 UVMHIST_LOG(pmaphist, "invalid pte: %016llx: va=%016lx", 1785 UVMHIST_LOG(pmaphist, "invalid pte: %016llx: va=%016lx",
1786 pte, va, 0, 0); 1786 pte, va, 0, 0);
1787 goto done; 1787 goto done;
1788 } 1788 }
1789 1789
1790 pa = l3pte_pa(*ptep); 1790 pa = l3pte_pa(*ptep);
1791 pg = PHYS_TO_VM_PAGE(pa); 1791 pg = PHYS_TO_VM_PAGE(pa);
1792 if (pg == NULL) { 1792 if (pg == NULL) {
1793 UVMHIST_LOG(pmaphist, "pg not found: va=%016lx", va, 0, 0, 0); 1793 UVMHIST_LOG(pmaphist, "pg not found: va=%016lx", va, 0, 0, 0);
1794 goto done; 1794 goto done;
1795 } 1795 }
1796 md = VM_PAGE_TO_MD(pg); 1796 md = VM_PAGE_TO_MD(pg);
1797 1797
1798 /* get prot by pmap_enter() (stored in software use bit in pte) */ 1798 /* get prot by pmap_enter() (stored in software use bit in pte) */
1799 switch (pte & (LX_BLKPAG_OS_READ|LX_BLKPAG_OS_WRITE)) { 1799 switch (pte & (LX_BLKPAG_OS_READ|LX_BLKPAG_OS_WRITE)) {
1800 case 0: 1800 case 0:
1801 default: 1801 default:
1802 pmap_prot = 0; 1802 pmap_prot = 0;
1803 break; 1803 break;
1804 case LX_BLKPAG_OS_READ: 1804 case LX_BLKPAG_OS_READ:
1805 pmap_prot = VM_PROT_READ; 1805 pmap_prot = VM_PROT_READ;
1806 break; 1806 break;
1807 case LX_BLKPAG_OS_WRITE: 1807 case LX_BLKPAG_OS_WRITE:
1808 case LX_BLKPAG_OS_READ|LX_BLKPAG_OS_WRITE: 1808 case LX_BLKPAG_OS_READ|LX_BLKPAG_OS_WRITE:
1809 pmap_prot = (VM_PROT_READ|VM_PROT_WRITE); 1809 pmap_prot = (VM_PROT_READ|VM_PROT_WRITE);
1810 break; 1810 break;
1811 } 1811 }
1812 if (l3pte_executable(pte, pm != pmap_kernel())) 1812 if (l3pte_executable(pte, pm != pmap_kernel()))
1813 pmap_prot |= VM_PROT_EXECUTE; 1813 pmap_prot |= VM_PROT_EXECUTE;
1814 1814
1815 UVMHIST_LOG(pmaphist, "va=%016lx, pmapprot=%08x, accessprot=%08x", 1815 UVMHIST_LOG(pmaphist, "va=%016lx, pmapprot=%08x, accessprot=%08x",
1816 va, pmap_prot, accessprot, 0); 1816 va, pmap_prot, accessprot, 0);
1817 1817
1818 /* ignore except read/write */ 1818 /* ignore except read/write */
1819 accessprot &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); 1819 accessprot &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
1820 1820
1821 /* no permission to read/write/execute for this page */ 1821 /* no permission to read/write/execute for this page */
1822 if ((pmap_prot & accessprot) != accessprot) { 1822 if ((pmap_prot & accessprot) != accessprot) {
1823 UVMHIST_LOG(pmaphist, "no permission to access", 0, 0, 0, 0); 1823 UVMHIST_LOG(pmaphist, "no permission to access", 0, 0, 0, 0);
1824 goto done; 1824 goto done;
1825 } 1825 }
1826 1826
1827 if ((pte & LX_BLKPAG_AF) && ((pte & LX_BLKPAG_AP) == LX_BLKPAG_AP_RW)) { 1827 /* pte is readable and writable, but occured fault? probably copy(9) */
1828#if 1 /* XXX: DEBUG */ 1828 if ((pte & LX_BLKPAG_AF) && ((pte & LX_BLKPAG_AP) == LX_BLKPAG_AP_RW))
1829 if (!user) { 
1830 /* 
1831 * pte is readable and writable, but occured fault? 
1832 * unprivileged load/store, or else ? 
1833 */ 
1834 printf("%s: fault: va=%016lx pte=%08" PRIx64 
1835 ": pte is rw." 
1836 " unprivileged load/store ? (onfault=%p)\n", 
1837 __func__, va, pte, curlwp->l_md.md_onfault); 
1838 } 
1839#endif 
1840 goto done; 1829 goto done;
1841 } 
1842 KASSERT(((pte & LX_BLKPAG_AF) == 0) || 
1843 ((pte & LX_BLKPAG_AP) == LX_BLKPAG_AP_RO)); 
1844 1830
1845 pmap_pv_lock(md); 1831 pmap_pv_lock(md);
1846 if ((pte & LX_BLKPAG_AF) == 0) { 1832 if ((pte & LX_BLKPAG_AF) == 0) {
1847 /* pte has no AF bit, set referenced and AF bit */ 1833 /* pte has no AF bit, set referenced and AF bit */
1848 UVMHIST_LOG(pmaphist, 1834 UVMHIST_LOG(pmaphist,
1849 "REFERENCED:" 1835 "REFERENCED:"
1850 " va=%016lx, pa=%016lx, pte_prot=%08x, accessprot=%08x", 1836 " va=%016lx, pa=%016lx, pte_prot=%08x, accessprot=%08x",
1851 va, pa, pmap_prot, accessprot); 1837 va, pa, pmap_prot, accessprot);
1852 md->mdpg_flags |= VM_PROT_READ; /* set referenced */ 1838 md->mdpg_flags |= VM_PROT_READ; /* set referenced */
1853 pte |= LX_BLKPAG_AF; 1839 pte |= LX_BLKPAG_AF;
1854 1840
1855 PMAP_COUNT(fixup_referenced); 1841 PMAP_COUNT(fixup_referenced);
1856 } 1842 }
1857 if ((accessprot & VM_PROT_WRITE) && 1843 if ((accessprot & VM_PROT_WRITE) &&
1858 ((pte & LX_BLKPAG_AP) == LX_BLKPAG_AP_RO)) { 1844 ((pte & LX_BLKPAG_AP) == LX_BLKPAG_AP_RO)) {
1859 /* pte is not RW. set modified and RW */ 1845 /* pte is not RW. set modified and RW */
1860 1846
1861 UVMHIST_LOG(pmaphist, "MODIFIED:" 1847 UVMHIST_LOG(pmaphist, "MODIFIED:"
1862 " va=%016lx, pa=%016lx, pte_prot=%08x, accessprot=%08x", 1848 " va=%016lx, pa=%016lx, pte_prot=%08x, accessprot=%08x",
1863 va, pa, pmap_prot, accessprot); 1849 va, pa, pmap_prot, accessprot);
1864 md->mdpg_flags |= VM_PROT_WRITE; /* set modified */ 1850 md->mdpg_flags |= VM_PROT_WRITE; /* set modified */
1865 pte &= ~LX_BLKPAG_AP; 1851 pte &= ~LX_BLKPAG_AP;
1866 pte |= LX_BLKPAG_AP_RW; 1852 pte |= LX_BLKPAG_AP_RW;
1867 1853
1868 PMAP_COUNT(fixup_modified); 1854 PMAP_COUNT(fixup_modified);
1869 } 1855 }
1870 pmap_pv_unlock(md); 1856 pmap_pv_unlock(md);
1871 1857
1872 atomic_swap_64(ptep, pte); 1858 atomic_swap_64(ptep, pte);
1873 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true); 1859 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true);
1874 1860
1875 fixed = true; 1861 fixed = true;
1876 1862
1877 done: 1863 done:
1878 pm_unlock(pm); 1864 pm_unlock(pm);
1879 return fixed; 1865 return fixed;
1880} 1866}
1881 1867
1882bool 1868bool
1883pmap_clear_modify(struct vm_page *pg) 1869pmap_clear_modify(struct vm_page *pg)
1884{ 1870{
1885 struct pv_entry *pv; 1871 struct pv_entry *pv;
1886 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 1872 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1887 pt_entry_t *ptep, pte, opte; 1873 pt_entry_t *ptep, pte, opte;
1888 vaddr_t va; 1874 vaddr_t va;
1889 1875
1890 UVMHIST_FUNC(__func__); 1876 UVMHIST_FUNC(__func__);
1891 UVMHIST_CALLED(pmaphist); 1877 UVMHIST_CALLED(pmaphist);
1892 1878
1893 UVMHIST_LOG(pmaphist, "pg=%p, mdpg_flags=%08x", 1879 UVMHIST_LOG(pmaphist, "pg=%p, mdpg_flags=%08x",
1894 pg, md->mdpg_flags, 0, 0); 1880 pg, md->mdpg_flags, 0, 0);
1895 1881
1896 pmap_pv_lock(md); 1882 pmap_pv_lock(md);
1897 1883
1898 if ((md->mdpg_flags & VM_PROT_WRITE) == 0) { 1884 if ((md->mdpg_flags & VM_PROT_WRITE) == 0) {
1899 pmap_pv_unlock(md); 1885 pmap_pv_unlock(md);
1900 return false; 1886 return false;
1901 } 1887 }
1902 1888
1903 md->mdpg_flags &= ~VM_PROT_WRITE; 1889 md->mdpg_flags &= ~VM_PROT_WRITE;
1904 1890
1905 PMAP_COUNT(clear_modify); 1891 PMAP_COUNT(clear_modify);
1906 TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) { 1892 TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) {
1907 PMAP_COUNT(clear_modify_pages); 1893 PMAP_COUNT(clear_modify_pages);
1908 1894
1909 va = pv->pv_va; 1895 va = pv->pv_va;
1910 1896
1911 ptep = pv->pv_ptep; 1897 ptep = pv->pv_ptep;
1912 opte = pte = *ptep; 1898 opte = pte = *ptep;
1913 tryagain: 1899 tryagain:
1914 if (!l3pte_valid(pte)) 1900 if (!l3pte_valid(pte))
1915 continue; 1901 continue;
1916 1902
1917 /* clear write permission */ 1903 /* clear write permission */
1918 pte &= ~LX_BLKPAG_AP; 1904 pte &= ~LX_BLKPAG_AP;
1919 pte |= LX_BLKPAG_AP_RO; 1905 pte |= LX_BLKPAG_AP_RO;
1920 1906
1921 /* XXX: possible deadlock if using PM_LOCK(). this is racy */ 1907 /* XXX: possible deadlock if using PM_LOCK(). this is racy */
1922 if ((pte = atomic_cas_64(ptep, opte, pte)) != opte) { 1908 if ((pte = atomic_cas_64(ptep, opte, pte)) != opte) {
1923 opte = pte; 1909 opte = pte;
1924 goto tryagain; 1910 goto tryagain;
1925 } 1911 }
1926 1912
1927 AARCH64_TLBI_BY_ASID_VA(pv->pv_pmap->pm_asid, va, true); 1913 AARCH64_TLBI_BY_ASID_VA(pv->pv_pmap->pm_asid, va, true);
1928 1914
1929 UVMHIST_LOG(pmaphist, 1915 UVMHIST_LOG(pmaphist,
1930 "va=%016llx, ptep=%p, pa=%016lx, RW -> RO", 1916 "va=%016llx, ptep=%p, pa=%016lx, RW -> RO",
1931 va, ptep, l3pte_pa(pte), 0); 1917 va, ptep, l3pte_pa(pte), 0);
1932 } 1918 }
1933 1919
1934 pmap_pv_unlock(md); 1920 pmap_pv_unlock(md);
1935 1921
1936 return true; 1922 return true;
1937} 1923}
1938 1924
1939bool 1925bool
1940pmap_clear_reference(struct vm_page *pg) 1926pmap_clear_reference(struct vm_page *pg)
1941{ 1927{
1942 struct pv_entry *pv; 1928 struct pv_entry *pv;
1943 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 1929 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1944 pt_entry_t *ptep, pte, opte; 1930 pt_entry_t *ptep, pte, opte;
1945 vaddr_t va; 1931 vaddr_t va;
1946 1932
1947 UVMHIST_FUNC(__func__); 1933 UVMHIST_FUNC(__func__);
1948 UVMHIST_CALLED(pmaphist); 1934 UVMHIST_CALLED(pmaphist);
1949 1935
1950 UVMHIST_LOG(pmaphist, "pg=%p, mdpg_flags=%08x", 1936 UVMHIST_LOG(pmaphist, "pg=%p, mdpg_flags=%08x",
1951 pg, md->mdpg_flags, 0, 0); 1937 pg, md->mdpg_flags, 0, 0);
1952 1938
1953 pmap_pv_lock(md); 1939 pmap_pv_lock(md);
1954 1940
1955 if ((md->mdpg_flags & VM_PROT_READ) == 0) { 1941 if ((md->mdpg_flags & VM_PROT_READ) == 0) {
1956 pmap_pv_unlock(md); 1942 pmap_pv_unlock(md);
1957 return false; 1943 return false;
1958 } 1944 }
1959 md->mdpg_flags &= ~VM_PROT_READ; 1945 md->mdpg_flags &= ~VM_PROT_READ;
1960 1946
1961 PMAP_COUNT(clear_reference); 1947 PMAP_COUNT(clear_reference);
1962 TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) { 1948 TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) {
1963 PMAP_COUNT(clear_reference_pages); 1949 PMAP_COUNT(clear_reference_pages);
1964 1950
1965 va = pv->pv_va; 1951 va = pv->pv_va;
1966 1952
1967 ptep = pv->pv_ptep; 1953 ptep = pv->pv_ptep;
1968 opte = pte = *ptep; 1954 opte = pte = *ptep;
1969 tryagain: 1955 tryagain:
1970 if (!l3pte_valid(pte)) 1956 if (!l3pte_valid(pte))
1971 continue; 1957 continue;
1972 1958
1973 /* clear access permission */ 1959 /* clear access permission */
1974 pte &= ~LX_BLKPAG_AF; 1960 pte &= ~LX_BLKPAG_AF;
1975 1961
1976 /* XXX: possible deadlock if using PM_LOCK(). this is racy */ 1962 /* XXX: possible deadlock if using PM_LOCK(). this is racy */
1977 if ((pte = atomic_cas_64(ptep, opte, pte)) != opte) { 1963 if ((pte = atomic_cas_64(ptep, opte, pte)) != opte) {
1978 opte = pte; 1964 opte = pte;
1979 goto tryagain; 1965 goto tryagain;
1980 } 1966 }
1981 1967
1982 AARCH64_TLBI_BY_ASID_VA(pv->pv_pmap->pm_asid, va, true); 1968 AARCH64_TLBI_BY_ASID_VA(pv->pv_pmap->pm_asid, va, true);
1983 1969
1984 UVMHIST_LOG(pmaphist, "va=%016llx, ptep=%p, pa=%016lx, unse AF", 1970 UVMHIST_LOG(pmaphist, "va=%016llx, ptep=%p, pa=%016lx, unse AF",
1985 va, ptep, l3pte_pa(pte), 0); 1971 va, ptep, l3pte_pa(pte), 0);
1986 } 1972 }
1987 1973
1988 pmap_pv_unlock(md); 1974 pmap_pv_unlock(md);
1989 1975
1990 return true; 1976 return true;
1991} 1977}
1992 1978
1993bool 1979bool
1994pmap_is_modified(struct vm_page *pg) 1980pmap_is_modified(struct vm_page *pg)
1995{ 1981{
1996 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 1982 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1997 1983
1998 return (md->mdpg_flags & VM_PROT_WRITE); 1984 return (md->mdpg_flags & VM_PROT_WRITE);
1999} 1985}
2000 1986
2001bool 1987bool
2002pmap_is_referenced(struct vm_page *pg) 1988pmap_is_referenced(struct vm_page *pg)
2003{ 1989{
2004 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 1990 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
2005 1991
2006 return (md->mdpg_flags & VM_PROT_READ); 1992 return (md->mdpg_flags & VM_PROT_READ);
2007} 1993}
2008 1994
2009#ifdef DDB 1995#ifdef DDB
2010/* get pointer to kernel segment L2 or L3 table entry */ 1996/* get pointer to kernel segment L2 or L3 table entry */
2011pt_entry_t * 1997pt_entry_t *
2012kvtopte(vaddr_t va) 1998kvtopte(vaddr_t va)
2013{ 1999{
2014 pd_entry_t *l0, *l1, *l2, *l3; 2000 pd_entry_t *l0, *l1, *l2, *l3;
2015 pd_entry_t pde; 2001 pd_entry_t pde;
2016 pt_entry_t *ptep; 2002 pt_entry_t *ptep;
2017 unsigned int idx; 2003 unsigned int idx;
2018 2004
2019 KASSERT(VM_MIN_KERNEL_ADDRESS <= va && va < VM_MAX_KERNEL_ADDRESS); 2005 KASSERT(VM_MIN_KERNEL_ADDRESS <= va && va < VM_MAX_KERNEL_ADDRESS);
2020 2006
2021 /* 2007 /*
2022 * traverse L0 -> L1 -> L2 block (or -> L3 table) 2008 * traverse L0 -> L1 -> L2 block (or -> L3 table)
2023 */ 2009 */
2024 l0 = pmap_kernel()->pm_l0table; 2010 l0 = pmap_kernel()->pm_l0table;
2025 2011
2026 idx = l0pde_index(va); 2012 idx = l0pde_index(va);
2027 pde = l0[idx]; 2013 pde = l0[idx];
2028 if (!l0pde_valid(pde)) 2014 if (!l0pde_valid(pde))
2029 return NULL; 2015 return NULL;
2030 2016
2031 l1 = (void *)AARCH64_PA_TO_KVA(l0pde_pa(pde)); 2017 l1 = (void *)AARCH64_PA_TO_KVA(l0pde_pa(pde));
2032 idx = l1pde_index(va); 2018 idx = l1pde_index(va);
2033 pde = l1[idx]; 2019 pde = l1[idx];
2034 if (!l1pde_valid(pde)) 2020 if (!l1pde_valid(pde))
2035 return NULL; 2021 return NULL;
2036 2022
2037 if (l1pde_is_block(pde)) 2023 if (l1pde_is_block(pde))
2038 return NULL; 2024 return NULL;
2039 2025
2040 l2 = (void *)AARCH64_PA_TO_KVA(l1pde_pa(pde)); 2026 l2 = (void *)AARCH64_PA_TO_KVA(l1pde_pa(pde));
2041 idx = l2pde_index(va); 2027 idx = l2pde_index(va);
2042 pde = l2[idx]; 2028 pde = l2[idx];
2043 if (!l2pde_valid(pde)) 2029 if (!l2pde_valid(pde))
2044 return NULL; 2030 return NULL;
2045 if (l2pde_is_block(pde)) 2031 if (l2pde_is_block(pde))
2046 return &l2[idx]; /* kernel text/data use L2 blocks */ 2032 return &l2[idx]; /* kernel text/data use L2 blocks */
2047 2033
2048 l3 = (void *)AARCH64_PA_TO_KVA(l2pde_pa(pde)); 2034 l3 = (void *)AARCH64_PA_TO_KVA(l2pde_pa(pde));
2049 idx = l3pte_index(va); 2035 idx = l3pte_index(va);
2050 ptep = &l3[idx]; /* or may use L3 page? */ 2036 ptep = &l3[idx]; /* or may use L3 page? */
2051 2037
2052 return ptep; 2038 return ptep;
2053} 2039}
2054 2040
2055/* change attribute of kernel segment */ 2041/* change attribute of kernel segment */
2056pt_entry_t 2042pt_entry_t
2057pmap_kvattr(vaddr_t va, vm_prot_t prot) 2043pmap_kvattr(vaddr_t va, vm_prot_t prot)
2058{ 2044{
2059 pt_entry_t *ptep, pte, opte; 2045 pt_entry_t *ptep, pte, opte;
2060 2046
2061 KASSERT(VM_MIN_KERNEL_ADDRESS <= va && va < VM_MAX_KERNEL_ADDRESS); 2047 KASSERT(VM_MIN_KERNEL_ADDRESS <= va && va < VM_MAX_KERNEL_ADDRESS);
2062 2048
2063 ptep = kvtopte(va); 2049 ptep = kvtopte(va);
2064 if (ptep == NULL) 2050 if (ptep == NULL)
2065 panic("%s: %016lx is not mapped\n", __func__, va); 2051 panic("%s: %016lx is not mapped\n", __func__, va);
2066 2052
2067 opte = pte = *ptep; 2053 opte = pte = *ptep;
2068 2054
2069 pte &= ~(LX_BLKPAG_AF|LX_BLKPAG_AP); 2055 pte &= ~(LX_BLKPAG_AF|LX_BLKPAG_AP);
2070 switch (prot & (VM_PROT_READ|VM_PROT_WRITE)) { 2056 switch (prot & (VM_PROT_READ|VM_PROT_WRITE)) {
2071 case 0: 2057 case 0:
2072 break; 2058 break;
2073 case VM_PROT_READ: 2059 case VM_PROT_READ:
2074 pte |= (LX_BLKPAG_AF|LX_BLKPAG_AP_RO); 2060 pte |= (LX_BLKPAG_AF|LX_BLKPAG_AP_RO);
2075 break; 2061 break;
2076 case VM_PROT_WRITE: 2062 case VM_PROT_WRITE:
2077 case VM_PROT_READ|VM_PROT_WRITE: 2063 case VM_PROT_READ|VM_PROT_WRITE:
2078 pte |= (LX_BLKPAG_AF|LX_BLKPAG_AP_RW); 2064 pte |= (LX_BLKPAG_AF|LX_BLKPAG_AP_RW);
2079 break; 2065 break;
2080 } 2066 }
2081 2067
2082 if ((prot & VM_PROT_EXECUTE) == 0) { 2068 if ((prot & VM_PROT_EXECUTE) == 0) {
2083 pte |= LX_BLKPAG_PXN; 2069 pte |= LX_BLKPAG_PXN;
2084 } else { 2070 } else {
2085 pte |= LX_BLKPAG_AF; 2071 pte |= LX_BLKPAG_AF;
2086 pte &= ~LX_BLKPAG_PXN; 2072 pte &= ~LX_BLKPAG_PXN;
2087 } 2073 }
2088 2074
2089 *ptep = pte; 2075 *ptep = pte;
2090 2076
2091 return opte; 2077 return opte;
2092} 2078}
2093 2079
2094static void 2080static void
2095pmap_db_pte_print(pt_entry_t pte, int level, void (*pr)(const char *, ...)) 2081pmap_db_pte_print(pt_entry_t pte, int level, void (*pr)(const char *, ...))
2096{ 2082{
2097 if (pte == 0) { 2083 if (pte == 0) {
2098 pr(" UNUSED\n"); 2084 pr(" UNUSED\n");
2099 2085
2100 } else if (level == 0) { 2086 } else if (level == 0) {
2101 /* L0 pde */ 2087 /* L0 pde */
2102 pr(", %s", 2088 pr(", %s",
2103 l1pde_is_table(pte) ? "TABLE" : "***ILLEGAL TYPE***"); 2089 l1pde_is_table(pte) ? "TABLE" : "***ILLEGAL TYPE***");
2104 pr(", %s", l0pde_valid(pte) ? "VALID" : "***INVALID***"); 2090 pr(", %s", l0pde_valid(pte) ? "VALID" : "***INVALID***");
2105 2091
2106 pr(", PA=%016lx", l0pde_pa(pte)); 2092 pr(", PA=%016lx", l0pde_pa(pte));
2107 2093
2108 } else if (((level == 1) && l1pde_is_block(pte)) || 2094 } else if (((level == 1) && l1pde_is_block(pte)) ||
2109 ((level == 2) && l2pde_is_block(pte)) || 2095 ((level == 2) && l2pde_is_block(pte)) ||
2110 (level == 3)) { 2096 (level == 3)) {
2111 2097
2112 if (level == 3) { 2098 if (level == 3) {
2113 pr(", %s", 2099 pr(", %s",
2114 l3pte_is_page(pte) ? " PAGE" : "**ILLEGAL TYPE**"); 2100 l3pte_is_page(pte) ? " PAGE" : "**ILLEGAL TYPE**");
2115 pr(", %s", 2101 pr(", %s",
2116 l3pte_valid(pte) ? "VALID" : "**INVALID**"); 2102 l3pte_valid(pte) ? "VALID" : "**INVALID**");
2117 } else { 2103 } else {
2118 pr(", %s", l1pde_is_table(pte) ? "TABLE" : "BLOCK"); 2104 pr(", %s", l1pde_is_table(pte) ? "TABLE" : "BLOCK");
2119 pr(", %s", 2105 pr(", %s",
2120 l1pde_valid(pte) ? "VALID" : "**INVALID**"); 2106 l1pde_valid(pte) ? "VALID" : "**INVALID**");
2121 } 2107 }
2122 2108
2123 pr(", PA=%016lx", l3pte_pa(pte)); 2109 pr(", PA=%016lx", l3pte_pa(pte));
2124 2110
2125 /* L[12] block, or L3 pte */ 2111 /* L[12] block, or L3 pte */
2126 pr(", %s", (pte & LX_BLKPAG_UXN) ? "UXN" : "---"); 2112 pr(", %s", (pte & LX_BLKPAG_UXN) ? "UXN" : "---");
2127 pr(", %s", (pte & LX_BLKPAG_PXN) ? "PXN" : "---"); 2113 pr(", %s", (pte & LX_BLKPAG_PXN) ? "PXN" : "---");
2128 2114
2129 if (pte & LX_BLKPAG_CONTIG) 2115 if (pte & LX_BLKPAG_CONTIG)
2130 pr(",CONTIG"); 2116 pr(",CONTIG");
2131 2117
2132 pr(", %s", (pte & LX_BLKPAG_NG) ? "NG" : "--"); 2118 pr(", %s", (pte & LX_BLKPAG_NG) ? "NG" : "--");
2133 pr(", %s", (pte & LX_BLKPAG_AF) ? "AF" : "--"); 2119 pr(", %s", (pte & LX_BLKPAG_AF) ? "AF" : "--");
2134 2120
2135 switch (pte & LX_BLKPAG_SH) { 2121 switch (pte & LX_BLKPAG_SH) {
2136 case LX_BLKPAG_SH_NS: 2122 case LX_BLKPAG_SH_NS:
2137 pr(", SH_NS"); 2123 pr(", SH_NS");
2138 break; 2124 break;
2139 case LX_BLKPAG_SH_OS: 2125 case LX_BLKPAG_SH_OS:
2140 pr(", SH_OS"); 2126 pr(", SH_OS");
2141 break; 2127 break;
2142 case LX_BLKPAG_SH_IS: 2128 case LX_BLKPAG_SH_IS:
2143 pr(", SH_IS"); 2129 pr(", SH_IS");
2144 break; 2130 break;
2145 default: 2131 default:
2146 pr(", SH_??"); 2132 pr(", SH_??");
2147 break; 2133 break;
2148 } 2134 }
2149 2135
2150 pr(", %s", (pte & LX_BLKPAG_AP_RO) ? "RO" : "RW"); 2136 pr(", %s", (pte & LX_BLKPAG_AP_RO) ? "RO" : "RW");
2151 pr(", %s", (pte & LX_BLKPAG_APUSER) ? "EL0" : "EL1"); 2137 pr(", %s", (pte & LX_BLKPAG_APUSER) ? "EL0" : "EL1");
2152 2138
2153 switch (pte & LX_BLKPAG_ATTR_MASK) { 2139 switch (pte & LX_BLKPAG_ATTR_MASK) {
2154 case LX_BLKPAG_ATTR_NORMAL_WB: 2140 case LX_BLKPAG_ATTR_NORMAL_WB:
2155 pr(", WRITEBACK"); 2141 pr(", WRITEBACK");
2156 break; 2142 break;
2157 case LX_BLKPAG_ATTR_NORMAL_NC: 2143 case LX_BLKPAG_ATTR_NORMAL_NC:
2158 pr(", NOCACHE"); 2144 pr(", NOCACHE");
2159 break; 2145 break;
2160 case LX_BLKPAG_ATTR_NORMAL_WT: 2146 case LX_BLKPAG_ATTR_NORMAL_WT:
2161 pr(", WHITETHRU"); 2147 pr(", WHITETHRU");
2162 break; 2148 break;
2163 case LX_BLKPAG_ATTR_DEVICE_MEM: 2149 case LX_BLKPAG_ATTR_DEVICE_MEM:
2164 pr(", DEVICE"); 2150 pr(", DEVICE");
2165 break; 2151 break;
2166 } 2152 }
2167 2153
2168 if (pte & LX_BLKPAG_OS_READ) 2154 if (pte & LX_BLKPAG_OS_READ)
2169 pr(", pmap_read"); 2155 pr(", pmap_read");
2170 if (pte & LX_BLKPAG_OS_WRITE) 2156 if (pte & LX_BLKPAG_OS_WRITE)
2171 pr(", pmap_write"); 2157 pr(", pmap_write");
2172 if ((pte & LX_BLKPAG_UXN) == 0) 2158 if ((pte & LX_BLKPAG_UXN) == 0)
2173 pr(", user-executable"); 2159 pr(", user-executable");
2174 if ((pte & LX_BLKPAG_PXN) == 0) 2160 if ((pte & LX_BLKPAG_PXN) == 0)
2175 pr(", kernel-executable"); 2161 pr(", kernel-executable");
2176 2162
2177 } else { 2163 } else {
2178 /* L1 and L2 pde */ 2164 /* L1 and L2 pde */
2179 pr(", %s", l1pde_is_table(pte) ? "TABLE" : "BLOCK"); 2165 pr(", %s", l1pde_is_table(pte) ? "TABLE" : "BLOCK");
2180 pr(", %s", l1pde_valid(pte) ? "VALID" : "**INVALID**"); 2166 pr(", %s", l1pde_valid(pte) ? "VALID" : "**INVALID**");
2181 pr(", PA=%016lx", l1pde_pa(pte)); 2167 pr(", PA=%016lx", l1pde_pa(pte));
2182 } 2168 }
2183 pr("\n"); 2169 pr("\n");
2184} 2170}
2185 2171
2186 2172
2187void 2173void
2188pmap_db_pteinfo(vaddr_t va, void (*pr)(const char *, ...)) 2174pmap_db_pteinfo(vaddr_t va, void (*pr)(const char *, ...))
2189{ 2175{
2190 struct pmap *pm; 2176 struct pmap *pm;
2191 struct vm_page *pg; 2177 struct vm_page *pg;
2192 bool user; 2178 bool user;
2193 2179
2194 2180
2195 if (VM_MAXUSER_ADDRESS > va) { 2181 if (VM_MAXUSER_ADDRESS > va) {
2196 pm = curlwp->l_proc->p_vmspace->vm_map.pmap; 2182 pm = curlwp->l_proc->p_vmspace->vm_map.pmap;
2197 user = true; 2183 user = true;
2198 } else { 2184 } else {
2199 pm = pmap_kernel(); 2185 pm = pmap_kernel();
2200 user = false; 2186 user = false;
2201 } 2187 }
2202 2188
2203 2189
2204 pd_entry_t *l0, *l1, *l2, *l3; 2190 pd_entry_t *l0, *l1, *l2, *l3;
2205 pd_entry_t pde; 2191 pd_entry_t pde;
2206 pt_entry_t pte; 2192 pt_entry_t pte;
2207 struct vm_page_md *md; 2193 struct vm_page_md *md;
2208 paddr_t pa; 2194 paddr_t pa;
2209 unsigned int idx; 2195 unsigned int idx;
2210 2196
2211 /* 2197 /*
2212 * traverse L0 -> L1 -> L2 -> L3 table 2198 * traverse L0 -> L1 -> L2 -> L3 table
2213 */ 2199 */
2214 2200
2215 l0 = pm->pm_l0table; 2201 l0 = pm->pm_l0table;
2216 2202
2217 pr("TTBR%d=%016llx (%016llx)", user ? 0 : 1, 2203 pr("TTBR%d=%016llx (%016llx)", user ? 0 : 1,
2218 pm->pm_l0table_pa, l0); 2204 pm->pm_l0table_pa, l0);
2219 pr(", input-va=%016llx," 2205 pr(", input-va=%016llx,"
2220 " L0-index=%d, L1-index=%d, L2-index=%d, L3-index=%d\n", 2206 " L0-index=%d, L1-index=%d, L2-index=%d, L3-index=%d\n",
2221 va, 2207 va,
2222 (va & L0_ADDR_BITS) >> L0_SHIFT, 2208 (va & L0_ADDR_BITS) >> L0_SHIFT,
2223 (va & L1_ADDR_BITS) >> L1_SHIFT, 2209 (va & L1_ADDR_BITS) >> L1_SHIFT,
2224 (va & L2_ADDR_BITS) >> L2_SHIFT, 2210 (va & L2_ADDR_BITS) >> L2_SHIFT,
2225 (va & L3_ADDR_BITS) >> L3_SHIFT); 2211 (va & L3_ADDR_BITS) >> L3_SHIFT);
2226 2212
2227 idx = l0pde_index(va); 2213 idx = l0pde_index(va);
2228 pde = l0[idx]; 2214 pde = l0[idx];
2229 2215
2230 pr("L0[%3d]=%016llx", idx, pde); 2216 pr("L0[%3d]=%016llx", idx, pde);
2231 pmap_db_pte_print(pde, 0, pr); 2217 pmap_db_pte_print(pde, 0, pr);
2232 2218
2233 if (!l0pde_valid(pde)) 2219 if (!l0pde_valid(pde))
2234 return; 2220 return;
2235 2221
2236 l1 = (void *)AARCH64_PA_TO_KVA(l0pde_pa(pde)); 2222 l1 = (void *)AARCH64_PA_TO_KVA(l0pde_pa(pde));
2237 idx = l1pde_index(va); 2223 idx = l1pde_index(va);
2238 pde = l1[idx]; 2224 pde = l1[idx];
2239 2225
2240 pr(" L1[%3d]=%016llx", idx, pde); 2226 pr(" L1[%3d]=%016llx", idx, pde);
2241 pmap_db_pte_print(pde, 1, pr); 2227 pmap_db_pte_print(pde, 1, pr);
2242 2228
2243 if (!l1pde_valid(pde) || l1pde_is_block(pde)) 2229 if (!l1pde_valid(pde) || l1pde_is_block(pde))
2244 return; 2230 return;
2245 2231
2246 l2 = (void *)AARCH64_PA_TO_KVA(l1pde_pa(pde)); 2232 l2 = (void *)AARCH64_PA_TO_KVA(l1pde_pa(pde));
2247 idx = l2pde_index(va); 2233 idx = l2pde_index(va);
2248 pde = l2[idx]; 2234 pde = l2[idx];
2249 2235
2250 pr(" L2[%3d]=%016llx", idx, pde); 2236 pr(" L2[%3d]=%016llx", idx, pde);
2251 pmap_db_pte_print(pde, 2, pr); 2237 pmap_db_pte_print(pde, 2, pr);
2252 2238
2253 if (!l2pde_valid(pde) || l2pde_is_block(pde)) 2239 if (!l2pde_valid(pde) || l2pde_is_block(pde))
2254 return; 2240 return;
2255 2241
2256 l3 = (void *)AARCH64_PA_TO_KVA(l2pde_pa(pde)); 2242 l3 = (void *)AARCH64_PA_TO_KVA(l2pde_pa(pde));
2257 idx = l3pte_index(va); 2243 idx = l3pte_index(va);
2258 pte = l3[idx]; 2244 pte = l3[idx];
2259 2245
2260 pr(" L3[%3d]=%016llx", idx, pte); 2246 pr(" L3[%3d]=%016llx", idx, pte);
2261 pmap_db_pte_print(pte, 3, pr); 2247 pmap_db_pte_print(pte, 3, pr);
2262 2248
2263 pa = l3pte_pa(pte); 2249 pa = l3pte_pa(pte);
2264 pg = PHYS_TO_VM_PAGE(pa); 2250 pg = PHYS_TO_VM_PAGE(pa);
2265 if (pg == NULL) { 2251 if (pg == NULL) {
2266 pr("No VM_PAGE\n"); 2252 pr("No VM_PAGE\n");
2267 } else { 2253 } else {
2268 pg_dump(pg, pr); 2254 pg_dump(pg, pr);
2269 md = VM_PAGE_TO_MD(pg); 2255 md = VM_PAGE_TO_MD(pg);
2270 pv_dump(md, pr); 2256 pv_dump(md, pr);
2271 } 2257 }
2272} 2258}
2273#endif /* DDB */ 2259#endif /* DDB */