| @@ -1,1425 +1,1424 @@ | | | @@ -1,1425 +1,1424 @@ |
1 | /* $NetBSD: pmap.c,v 1.58 2019/12/28 17:19:43 jmcneill Exp $ */ | | 1 | /* $NetBSD: pmap.c,v 1.59 2019/12/30 15:42:39 skrll Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org> | | 4 | * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org> |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * Redistribution and use in source and binary forms, with or without | | 7 | * Redistribution and use in source and binary forms, with or without |
8 | * modification, are permitted provided that the following conditions | | 8 | * modification, are permitted provided that the following conditions |
9 | * are met: | | 9 | * are met: |
10 | * 1. Redistributions of source code must retain the above copyright | | 10 | * 1. Redistributions of source code must retain the above copyright |
11 | * notice, this list of conditions and the following disclaimer. | | 11 | * notice, this list of conditions and the following disclaimer. |
12 | * 2. Redistributions in binary form must reproduce the above copyright | | 12 | * 2. Redistributions in binary form must reproduce the above copyright |
13 | * notice, this list of conditions and the following disclaimer in the | | 13 | * notice, this list of conditions and the following disclaimer in the |
14 | * documentation and/or other materials provided with the distribution. | | 14 | * documentation and/or other materials provided with the distribution. |
15 | * | | 15 | * |
16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | | 16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | | 17 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
18 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | | 18 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
19 | * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, | | 19 | * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, |
20 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | | 20 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
21 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | | 21 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
22 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 22 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
23 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | | 23 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
24 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | | 24 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING |
25 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 25 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
26 | * POSSIBILITY OF SUCH DAMAGE. | | 26 | * POSSIBILITY OF SUCH DAMAGE. |
27 | */ | | 27 | */ |
28 | | | 28 | |
29 | #include <sys/cdefs.h> | | 29 | #include <sys/cdefs.h> |
30 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.58 2019/12/28 17:19:43 jmcneill Exp $"); | | 30 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.59 2019/12/30 15:42:39 skrll Exp $"); |
31 | | | 31 | |
32 | #include "opt_arm_debug.h" | | 32 | #include "opt_arm_debug.h" |
33 | #include "opt_ddb.h" | | 33 | #include "opt_ddb.h" |
34 | #include "opt_multiprocessor.h" | | 34 | #include "opt_multiprocessor.h" |
35 | #include "opt_pmap.h" | | 35 | #include "opt_pmap.h" |
36 | #include "opt_uvmhist.h" | | 36 | #include "opt_uvmhist.h" |
37 | | | 37 | |
38 | #include <sys/param.h> | | 38 | #include <sys/param.h> |
39 | #include <sys/types.h> | | 39 | #include <sys/types.h> |
40 | #include <sys/kmem.h> | | 40 | #include <sys/kmem.h> |
41 | #include <sys/vmem.h> | | 41 | #include <sys/vmem.h> |
42 | #include <sys/atomic.h> | | 42 | #include <sys/atomic.h> |
43 | #include <sys/asan.h> | | 43 | #include <sys/asan.h> |
44 | | | 44 | |
45 | #include <uvm/uvm.h> | | 45 | #include <uvm/uvm.h> |
46 | | | 46 | |
47 | #include <aarch64/pmap.h> | | 47 | #include <aarch64/pmap.h> |
48 | #include <aarch64/pte.h> | | 48 | #include <aarch64/pte.h> |
49 | #include <aarch64/armreg.h> | | 49 | #include <aarch64/armreg.h> |
50 | #include <aarch64/cpufunc.h> | | 50 | #include <aarch64/cpufunc.h> |
51 | #include <aarch64/machdep.h> | | 51 | #include <aarch64/machdep.h> |
52 | #ifdef DDB | | 52 | #ifdef DDB |
53 | #include <aarch64/db_machdep.h> | | 53 | #include <aarch64/db_machdep.h> |
54 | #include <ddb/db_access.h> | | 54 | #include <ddb/db_access.h> |
55 | #endif | | 55 | #endif |
56 | | | 56 | |
57 | //#define PMAP_DEBUG | | 57 | //#define PMAP_DEBUG |
58 | //#define PMAP_PV_DEBUG | | 58 | //#define PMAP_PV_DEBUG |
59 | | | 59 | |
60 | #ifdef VERBOSE_INIT_ARM | | 60 | #ifdef VERBOSE_INIT_ARM |
61 | #define VPRINTF(...) printf(__VA_ARGS__) | | 61 | #define VPRINTF(...) printf(__VA_ARGS__) |
62 | #else | | 62 | #else |
63 | #define VPRINTF(...) __nothing | | 63 | #define VPRINTF(...) __nothing |
64 | #endif | | 64 | #endif |
65 | | | 65 | |
66 | UVMHIST_DEFINE(pmaphist); | | 66 | UVMHIST_DEFINE(pmaphist); |
67 | #ifdef UVMHIST | | 67 | #ifdef UVMHIST |
68 | | | 68 | |
69 | #ifndef UVMHIST_PMAPHIST_SIZE | | 69 | #ifndef UVMHIST_PMAPHIST_SIZE |
70 | #define UVMHIST_PMAPHIST_SIZE (1024 * 4) | | 70 | #define UVMHIST_PMAPHIST_SIZE (1024 * 4) |
71 | #endif | | 71 | #endif |
72 | | | 72 | |
73 | struct kern_history_ent pmaphistbuf[UVMHIST_PMAPHIST_SIZE]; | | 73 | struct kern_history_ent pmaphistbuf[UVMHIST_PMAPHIST_SIZE]; |
74 | | | 74 | |
75 | static void | | 75 | static void |
76 | pmap_hist_init(void) | | 76 | pmap_hist_init(void) |
77 | { | | 77 | { |
78 | static bool inited = false; | | 78 | static bool inited = false; |
79 | if (inited == false) { | | 79 | if (inited == false) { |
80 | UVMHIST_INIT_STATIC(pmaphist, pmaphistbuf); | | 80 | UVMHIST_INIT_STATIC(pmaphist, pmaphistbuf); |
81 | inited = true; | | 81 | inited = true; |
82 | } | | 82 | } |
83 | } | | 83 | } |
84 | #define PMAP_HIST_INIT() pmap_hist_init() | | 84 | #define PMAP_HIST_INIT() pmap_hist_init() |
85 | | | 85 | |
86 | #else /* UVMHIST */ | | 86 | #else /* UVMHIST */ |
87 | | | 87 | |
88 | #define PMAP_HIST_INIT() ((void)0) | | 88 | #define PMAP_HIST_INIT() ((void)0) |
89 | | | 89 | |
90 | #endif /* UVMHIST */ | | 90 | #endif /* UVMHIST */ |
91 | | | 91 | |
92 | | | 92 | |
93 | #ifdef PMAPCOUNTERS | | 93 | #ifdef PMAPCOUNTERS |
94 | #define PMAP_COUNT(name) (pmap_evcnt_##name.ev_count++ + 0) | | 94 | #define PMAP_COUNT(name) (pmap_evcnt_##name.ev_count++ + 0) |
95 | #define PMAP_COUNTER(name, desc) \ | | 95 | #define PMAP_COUNTER(name, desc) \ |
96 | struct evcnt pmap_evcnt_##name = \ | | 96 | struct evcnt pmap_evcnt_##name = \ |
97 | EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", desc); \ | | 97 | EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", desc); \ |
98 | EVCNT_ATTACH_STATIC(pmap_evcnt_##name) | | 98 | EVCNT_ATTACH_STATIC(pmap_evcnt_##name) |
99 | | | 99 | |
100 | PMAP_COUNTER(pdp_alloc_boot, "page table page allocate (uvm_pageboot_alloc)"); | | 100 | PMAP_COUNTER(pdp_alloc_boot, "page table page allocate (uvm_pageboot_alloc)"); |
101 | PMAP_COUNTER(pdp_alloc, "page table page allocate (uvm_pagealloc)"); | | 101 | PMAP_COUNTER(pdp_alloc, "page table page allocate (uvm_pagealloc)"); |
102 | PMAP_COUNTER(pdp_free, "page table page free (uvm_pagefree)"); | | 102 | PMAP_COUNTER(pdp_free, "page table page free (uvm_pagefree)"); |
103 | | | 103 | |
104 | PMAP_COUNTER(pv_enter, "pv_entry allocate and link"); | | 104 | PMAP_COUNTER(pv_enter, "pv_entry allocate and link"); |
105 | PMAP_COUNTER(pv_remove, "pv_entry free and unlink"); | | 105 | PMAP_COUNTER(pv_remove, "pv_entry free and unlink"); |
106 | PMAP_COUNTER(pv_remove_nopv, "no pv_entry found when removing pv"); | | 106 | PMAP_COUNTER(pv_remove_nopv, "no pv_entry found when removing pv"); |
107 | | | 107 | |
108 | PMAP_COUNTER(activate, "pmap_activate call"); | | 108 | PMAP_COUNTER(activate, "pmap_activate call"); |
109 | PMAP_COUNTER(deactivate, "pmap_deactivate call"); | | 109 | PMAP_COUNTER(deactivate, "pmap_deactivate call"); |
110 | PMAP_COUNTER(create, "pmap_create call"); | | 110 | PMAP_COUNTER(create, "pmap_create call"); |
111 | PMAP_COUNTER(destroy, "pmap_destroy call"); | | 111 | PMAP_COUNTER(destroy, "pmap_destroy call"); |
112 | | | 112 | |
113 | PMAP_COUNTER(page_protect, "pmap_page_protect call"); | | 113 | PMAP_COUNTER(page_protect, "pmap_page_protect call"); |
114 | PMAP_COUNTER(protect, "pmap_protect call"); | | 114 | PMAP_COUNTER(protect, "pmap_protect call"); |
115 | PMAP_COUNTER(protect_remove_fallback, "pmap_protect with no-read"); | | 115 | PMAP_COUNTER(protect_remove_fallback, "pmap_protect with no-read"); |
116 | PMAP_COUNTER(protect_none, "pmap_protect non-exists pages"); | | 116 | PMAP_COUNTER(protect_none, "pmap_protect non-exists pages"); |
117 | PMAP_COUNTER(protect_managed, "pmap_protect managed pages"); | | 117 | PMAP_COUNTER(protect_managed, "pmap_protect managed pages"); |
118 | PMAP_COUNTER(protect_unmanaged, "pmap_protect unmanaged pages"); | | 118 | PMAP_COUNTER(protect_unmanaged, "pmap_protect unmanaged pages"); |
119 | | | 119 | |
120 | PMAP_COUNTER(clear_modify, "pmap_clear_modify call"); | | 120 | PMAP_COUNTER(clear_modify, "pmap_clear_modify call"); |
121 | PMAP_COUNTER(clear_modify_pages, "pmap_clear_modify pages"); | | 121 | PMAP_COUNTER(clear_modify_pages, "pmap_clear_modify pages"); |
122 | PMAP_COUNTER(clear_reference, "pmap_clear_reference call"); | | 122 | PMAP_COUNTER(clear_reference, "pmap_clear_reference call"); |
123 | PMAP_COUNTER(clear_reference_pages, "pmap_clear_reference pages"); | | 123 | PMAP_COUNTER(clear_reference_pages, "pmap_clear_reference pages"); |
124 | | | 124 | |
125 | PMAP_COUNTER(fixup_referenced, "page reference emulations"); | | 125 | PMAP_COUNTER(fixup_referenced, "page reference emulations"); |
126 | PMAP_COUNTER(fixup_modified, "page modification emulations"); | | 126 | PMAP_COUNTER(fixup_modified, "page modification emulations"); |
127 | | | 127 | |
128 | PMAP_COUNTER(kern_mappings_bad, "kernel pages mapped (bad color)"); | | 128 | PMAP_COUNTER(kern_mappings_bad, "kernel pages mapped (bad color)"); |
129 | PMAP_COUNTER(kern_mappings_bad_wired, "kernel pages mapped (wired bad color)"); | | 129 | PMAP_COUNTER(kern_mappings_bad_wired, "kernel pages mapped (wired bad color)"); |
130 | PMAP_COUNTER(user_mappings_bad, "user pages mapped (bad color, not wired)"); | | 130 | PMAP_COUNTER(user_mappings_bad, "user pages mapped (bad color, not wired)"); |
131 | PMAP_COUNTER(user_mappings_bad_wired, "user pages mapped (bad colo, wiredr)"); | | 131 | PMAP_COUNTER(user_mappings_bad_wired, "user pages mapped (bad colo, wiredr)"); |
132 | PMAP_COUNTER(kern_mappings, "kernel pages mapped"); | | 132 | PMAP_COUNTER(kern_mappings, "kernel pages mapped"); |
133 | PMAP_COUNTER(user_mappings, "user pages mapped"); | | 133 | PMAP_COUNTER(user_mappings, "user pages mapped"); |
134 | PMAP_COUNTER(user_mappings_changed, "user mapping changed"); | | 134 | PMAP_COUNTER(user_mappings_changed, "user mapping changed"); |
135 | PMAP_COUNTER(kern_mappings_changed, "kernel mapping changed"); | | 135 | PMAP_COUNTER(kern_mappings_changed, "kernel mapping changed"); |
136 | PMAP_COUNTER(uncached_mappings, "uncached pages mapped"); | | 136 | PMAP_COUNTER(uncached_mappings, "uncached pages mapped"); |
137 | PMAP_COUNTER(unmanaged_mappings, "unmanaged pages mapped"); | | 137 | PMAP_COUNTER(unmanaged_mappings, "unmanaged pages mapped"); |
138 | PMAP_COUNTER(managed_mappings, "managed pages mapped"); | | 138 | PMAP_COUNTER(managed_mappings, "managed pages mapped"); |
139 | PMAP_COUNTER(mappings, "pages mapped (including remapped)"); | | 139 | PMAP_COUNTER(mappings, "pages mapped (including remapped)"); |
140 | PMAP_COUNTER(remappings, "pages remapped"); | | 140 | PMAP_COUNTER(remappings, "pages remapped"); |
141 | | | 141 | |
142 | PMAP_COUNTER(pv_entry_cannotalloc, "pv_entry allocation failure"); | | 142 | PMAP_COUNTER(pv_entry_cannotalloc, "pv_entry allocation failure"); |
143 | | | 143 | |
144 | PMAP_COUNTER(unwire, "pmap_unwire call"); | | 144 | PMAP_COUNTER(unwire, "pmap_unwire call"); |
145 | PMAP_COUNTER(unwire_failure, "pmap_unwire failure"); | | 145 | PMAP_COUNTER(unwire_failure, "pmap_unwire failure"); |
146 | | | 146 | |
147 | #else /* PMAPCOUNTERS */ | | 147 | #else /* PMAPCOUNTERS */ |
148 | #define PMAP_COUNT(name) __nothing | | 148 | #define PMAP_COUNT(name) __nothing |
149 | #endif /* PMAPCOUNTERS */ | | 149 | #endif /* PMAPCOUNTERS */ |
150 | | | 150 | |
151 | /* | | 151 | /* |
152 | * invalidate TLB entry for ASID and VA. | | 152 | * invalidate TLB entry for ASID and VA. |
153 | * `ll' invalidates only the Last Level (usually L3) of TLB entry | | 153 | * `ll' invalidates only the Last Level (usually L3) of TLB entry |
154 | */ | | 154 | */ |
155 | #define AARCH64_TLBI_BY_ASID_VA(asid, va, ll) \ | | 155 | #define AARCH64_TLBI_BY_ASID_VA(asid, va, ll) \ |
156 | do { \ | | 156 | do { \ |
157 | if ((ll)) { \ | | 157 | if ((ll)) { \ |
158 | if ((asid) == 0) \ | | 158 | if ((asid) == 0) \ |
159 | aarch64_tlbi_by_va_ll((va)); \ | | 159 | aarch64_tlbi_by_va_ll((va)); \ |
160 | else \ | | 160 | else \ |
161 | aarch64_tlbi_by_asid_va_ll((asid), (va)); \ | | 161 | aarch64_tlbi_by_asid_va_ll((asid), (va)); \ |
162 | } else { \ | | 162 | } else { \ |
163 | if ((asid) == 0) \ | | 163 | if ((asid) == 0) \ |
164 | aarch64_tlbi_by_va((va)); \ | | 164 | aarch64_tlbi_by_va((va)); \ |
165 | else \ | | 165 | else \ |
166 | aarch64_tlbi_by_asid_va((asid), (va)); \ | | 166 | aarch64_tlbi_by_asid_va((asid), (va)); \ |
167 | } \ | | 167 | } \ |
168 | } while (0/*CONSTCOND*/) | | 168 | } while (0/*CONSTCOND*/) |
169 | | | 169 | |
170 | /* | | 170 | /* |
171 | * aarch64 require write permission in pte to invalidate instruction cache. | | 171 | * aarch64 require write permission in pte to invalidate instruction cache. |
172 | * changing pte to writable temporarly before cpu_icache_sync_range(). | | 172 | * changing pte to writable temporarly before cpu_icache_sync_range(). |
173 | * this macro modifies PTE (*ptep). need to update PTE after this. | | 173 | * this macro modifies PTE (*ptep). need to update PTE after this. |
174 | */ | | 174 | */ |
175 | #define PTE_ICACHE_SYNC_PAGE(pte, ptep, pm, va, ll) \ | | 175 | #define PTE_ICACHE_SYNC_PAGE(pte, ptep, pm, va, ll) \ |
176 | do { \ | | 176 | do { \ |
177 | pt_entry_t tpte; \ | | 177 | pt_entry_t tpte; \ |
178 | tpte = (pte) & ~(LX_BLKPAG_AF|LX_BLKPAG_AP); \ | | 178 | tpte = (pte) & ~(LX_BLKPAG_AF|LX_BLKPAG_AP); \ |
179 | tpte |= (LX_BLKPAG_AF|LX_BLKPAG_AP_RW); \ | | 179 | tpte |= (LX_BLKPAG_AF|LX_BLKPAG_AP_RW); \ |
180 | tpte |= (LX_BLKPAG_UXN|LX_BLKPAG_PXN); \ | | 180 | tpte |= (LX_BLKPAG_UXN|LX_BLKPAG_PXN); \ |
181 | atomic_swap_64((ptep), tpte); \ | | 181 | atomic_swap_64((ptep), tpte); \ |
182 | AARCH64_TLBI_BY_ASID_VA((pm)->pm_asid, (va), (ll)); \ | | 182 | AARCH64_TLBI_BY_ASID_VA((pm)->pm_asid, (va), (ll)); \ |
183 | cpu_icache_sync_range((va), PAGE_SIZE); \ | | 183 | cpu_icache_sync_range((va), PAGE_SIZE); \ |
184 | } while (0/*CONSTCOND*/) | | 184 | } while (0/*CONSTCOND*/) |
185 | | | 185 | |
186 | struct pv_entry { | | 186 | struct pv_entry { |
187 | TAILQ_ENTRY(pv_entry) pv_link; | | 187 | TAILQ_ENTRY(pv_entry) pv_link; |
188 | struct pmap *pv_pmap; | | 188 | struct pmap *pv_pmap; |
189 | vaddr_t pv_va; | | 189 | vaddr_t pv_va; |
190 | paddr_t pv_pa; /* debug */ | | 190 | paddr_t pv_pa; /* debug */ |
191 | pt_entry_t *pv_ptep; /* for fast pte lookup */ | | 191 | pt_entry_t *pv_ptep; /* for fast pte lookup */ |
192 | }; | | 192 | }; |
193 | #define pv_next pv_link.tqe_next | | 193 | #define pv_next pv_link.tqe_next |
194 | | | 194 | |
195 | #define L3INDEXMASK (L3_SIZE * Ln_ENTRIES - 1) | | 195 | #define L3INDEXMASK (L3_SIZE * Ln_ENTRIES - 1) |
196 | #define PDPSWEEP_TRIGGER 512 | | 196 | #define PDPSWEEP_TRIGGER 512 |
197 | | | 197 | |
198 | static pt_entry_t *_pmap_pte_lookup_l3(struct pmap *, vaddr_t); | | 198 | static pt_entry_t *_pmap_pte_lookup_l3(struct pmap *, vaddr_t); |
199 | static pt_entry_t *_pmap_pte_lookup_bs(struct pmap *, vaddr_t, vsize_t *); | | 199 | static pt_entry_t *_pmap_pte_lookup_bs(struct pmap *, vaddr_t, vsize_t *); |
200 | static pt_entry_t _pmap_pte_adjust_prot(pt_entry_t, vm_prot_t, vm_prot_t, bool); | | 200 | static pt_entry_t _pmap_pte_adjust_prot(pt_entry_t, vm_prot_t, vm_prot_t, bool); |
201 | static pt_entry_t _pmap_pte_adjust_cacheflags(pt_entry_t, u_int); | | 201 | static pt_entry_t _pmap_pte_adjust_cacheflags(pt_entry_t, u_int); |
202 | static void _pmap_remove(struct pmap *, vaddr_t, vaddr_t, bool, | | 202 | static void _pmap_remove(struct pmap *, vaddr_t, vaddr_t, bool, |
203 | struct pv_entry **); | | 203 | struct pv_entry **); |
204 | static int _pmap_enter(struct pmap *, vaddr_t, paddr_t, vm_prot_t, u_int, bool); | | 204 | static int _pmap_enter(struct pmap *, vaddr_t, paddr_t, vm_prot_t, u_int, bool); |
205 | | | 205 | |
206 | static struct pmap kernel_pmap; | | 206 | static struct pmap kernel_pmap; |
207 | | | 207 | |
208 | struct pmap * const kernel_pmap_ptr = &kernel_pmap; | | 208 | struct pmap * const kernel_pmap_ptr = &kernel_pmap; |
209 | static vaddr_t pmap_maxkvaddr; | | 209 | static vaddr_t pmap_maxkvaddr; |
210 | | | 210 | |
211 | vaddr_t virtual_avail, virtual_end; | | 211 | vaddr_t virtual_avail, virtual_end; |
212 | vaddr_t virtual_devmap_addr; | | 212 | vaddr_t virtual_devmap_addr; |
213 | bool pmap_devmap_bootstrap_done = false; | | 213 | bool pmap_devmap_bootstrap_done = false; |
214 | | | 214 | |
215 | static struct pool_cache _pmap_cache; | | 215 | static struct pool_cache _pmap_cache; |
216 | static struct pool_cache _pmap_pv_pool; | | 216 | static struct pool_cache _pmap_pv_pool; |
217 | | | 217 | |
218 | | | 218 | |
219 | static inline void | | 219 | static inline void |
220 | pmap_pv_lock(struct vm_page_md *md) | | 220 | pmap_pv_lock(struct vm_page_md *md) |
221 | { | | 221 | { |
222 | | | 222 | |
223 | mutex_enter(&md->mdpg_pvlock); | | 223 | mutex_enter(&md->mdpg_pvlock); |
224 | } | | 224 | } |
225 | | | 225 | |
226 | static inline void | | 226 | static inline void |
227 | pmap_pv_unlock(struct vm_page_md *md) | | 227 | pmap_pv_unlock(struct vm_page_md *md) |
228 | { | | 228 | { |
229 | | | 229 | |
230 | mutex_exit(&md->mdpg_pvlock); | | 230 | mutex_exit(&md->mdpg_pvlock); |
231 | } | | 231 | } |
232 | | | 232 | |
233 | | | 233 | |
234 | static inline void | | 234 | static inline void |
235 | pm_lock(struct pmap *pm) | | 235 | pm_lock(struct pmap *pm) |
236 | { | | 236 | { |
237 | mutex_enter(&pm->pm_lock); | | 237 | mutex_enter(&pm->pm_lock); |
238 | } | | 238 | } |
239 | | | 239 | |
240 | static inline void | | 240 | static inline void |
241 | pm_unlock(struct pmap *pm) | | 241 | pm_unlock(struct pmap *pm) |
242 | { | | 242 | { |
243 | mutex_exit(&pm->pm_lock); | | 243 | mutex_exit(&pm->pm_lock); |
244 | } | | 244 | } |
245 | | | 245 | |
246 | #define IN_RANGE(va,sta,end) (((sta) <= (va)) && ((va) < (end))) | | 246 | #define IN_RANGE(va,sta,end) (((sta) <= (va)) && ((va) < (end))) |
247 | | | 247 | |
248 | #define IN_KSEG_ADDR(va) \ | | 248 | #define IN_KSEG_ADDR(va) \ |
249 | IN_RANGE((va), AARCH64_KSEG_START, AARCH64_KSEG_END) | | 249 | IN_RANGE((va), AARCH64_KSEG_START, AARCH64_KSEG_END) |
250 | | | 250 | |
251 | #define KASSERT_PM_ADDR(pm, va) \ | | 251 | #define KASSERT_PM_ADDR(pm, va) \ |
252 | do { \ | | 252 | do { \ |
253 | if ((pm) == pmap_kernel()) { \ | | 253 | if ((pm) == pmap_kernel()) { \ |
254 | KASSERTMSG(IN_RANGE((va), VM_MIN_KERNEL_ADDRESS, \ | | 254 | KASSERTMSG(IN_RANGE((va), VM_MIN_KERNEL_ADDRESS, \ |
255 | VM_MAX_KERNEL_ADDRESS), \ | | 255 | VM_MAX_KERNEL_ADDRESS), \ |
256 | "%s: kernel pm %p: va=%016lx" \ | | 256 | "%s: kernel pm %p: va=%016lx" \ |
257 | " is not kernel address\n", \ | | 257 | " is not kernel address\n", \ |
258 | __func__, (pm), (va)); \ | | 258 | __func__, (pm), (va)); \ |
259 | } else { \ | | 259 | } else { \ |
260 | KASSERTMSG(IN_RANGE((va), \ | | 260 | KASSERTMSG(IN_RANGE((va), \ |
261 | VM_MIN_ADDRESS, VM_MAX_ADDRESS), \ | | 261 | VM_MIN_ADDRESS, VM_MAX_ADDRESS), \ |
262 | "%s: user pm %p: va=%016lx" \ | | 262 | "%s: user pm %p: va=%016lx" \ |
263 | " is not user address\n", \ | | 263 | " is not user address\n", \ |
264 | __func__, (pm), (va)); \ | | 264 | __func__, (pm), (va)); \ |
265 | } \ | | 265 | } \ |
266 | } while (0 /* CONSTCOND */) | | 266 | } while (0 /* CONSTCOND */) |
267 | | | 267 | |
268 | | | 268 | |
269 | static const struct pmap_devmap *pmap_devmap_table; | | 269 | static const struct pmap_devmap *pmap_devmap_table; |
270 | | | 270 | |
271 | static vsize_t | | 271 | static vsize_t |
272 | pmap_map_chunk(vaddr_t va, paddr_t pa, vsize_t size, | | 272 | pmap_map_chunk(vaddr_t va, paddr_t pa, vsize_t size, |
273 | vm_prot_t prot, u_int flags) | | 273 | vm_prot_t prot, u_int flags) |
274 | { | | 274 | { |
275 | pt_entry_t attr; | | 275 | pt_entry_t attr; |
276 | psize_t blocksize; | | 276 | psize_t blocksize; |
277 | int rc; | | 277 | int rc; |
278 | | | 278 | |
279 | /* devmap always use L2 mapping */ | | 279 | /* devmap always use L2 mapping */ |
280 | blocksize = L2_SIZE; | | 280 | blocksize = L2_SIZE; |
281 | | | 281 | |
282 | attr = _pmap_pte_adjust_prot(L2_BLOCK, prot, VM_PROT_ALL, false); | | 282 | attr = _pmap_pte_adjust_prot(L2_BLOCK, prot, VM_PROT_ALL, false); |
283 | attr = _pmap_pte_adjust_cacheflags(attr, flags); | | 283 | attr = _pmap_pte_adjust_cacheflags(attr, flags); |
284 | /* user cannot execute, and kernel follows the prot */ | | 284 | /* user cannot execute, and kernel follows the prot */ |
285 | attr |= (LX_BLKPAG_UXN|LX_BLKPAG_PXN); | | 285 | attr |= (LX_BLKPAG_UXN|LX_BLKPAG_PXN); |
286 | if (prot & VM_PROT_EXECUTE) | | 286 | if (prot & VM_PROT_EXECUTE) |
287 | attr &= ~LX_BLKPAG_PXN; | | 287 | attr &= ~LX_BLKPAG_PXN; |
288 | | | 288 | |
289 | rc = pmapboot_enter(va, pa, size, blocksize, attr, | | 289 | rc = pmapboot_enter(va, pa, size, blocksize, attr, |
290 | PMAPBOOT_ENTER_NOOVERWRITE, bootpage_alloc, NULL); | | 290 | PMAPBOOT_ENTER_NOOVERWRITE, bootpage_alloc, NULL); |
291 | if (rc != 0) | | 291 | if (rc != 0) |
292 | panic("%s: pmapboot_enter failed. %lx is already mapped?\n", | | 292 | panic("%s: pmapboot_enter failed. %lx is already mapped?\n", |
293 | __func__, va); | | 293 | __func__, va); |
294 | | | 294 | |
295 | aarch64_tlbi_by_va(va); | | 295 | aarch64_tlbi_by_va(va); |
296 | | | 296 | |
297 | return ((va + size + blocksize - 1) & ~(blocksize - 1)) - va; | | 297 | return ((va + size + blocksize - 1) & ~(blocksize - 1)) - va; |
298 | } | | 298 | } |
299 | | | 299 | |
300 | void | | 300 | void |
301 | pmap_devmap_register(const struct pmap_devmap *table) | | 301 | pmap_devmap_register(const struct pmap_devmap *table) |
302 | { | | 302 | { |
303 | pmap_devmap_table = table; | | 303 | pmap_devmap_table = table; |
304 | } | | 304 | } |
305 | | | 305 | |
306 | void | | 306 | void |
307 | pmap_devmap_bootstrap(vaddr_t l0pt, const struct pmap_devmap *table) | | 307 | pmap_devmap_bootstrap(vaddr_t l0pt, const struct pmap_devmap *table) |
308 | { | | 308 | { |
309 | vaddr_t va; | | 309 | vaddr_t va; |
310 | int i; | | 310 | int i; |
311 | | | 311 | |
312 | pmap_devmap_register(table); | | 312 | pmap_devmap_register(table); |
313 | | | 313 | |
314 | VPRINTF("%s:\n", __func__); | | 314 | VPRINTF("%s:\n", __func__); |
315 | for (i = 0; table[i].pd_size != 0; i++) { | | 315 | for (i = 0; table[i].pd_size != 0; i++) { |
316 | VPRINTF(" devmap: pa %08lx-%08lx = va %016lx\n", | | 316 | VPRINTF(" devmap: pa %08lx-%08lx = va %016lx\n", |
317 | table[i].pd_pa, | | 317 | table[i].pd_pa, |
318 | table[i].pd_pa + table[i].pd_size - 1, | | 318 | table[i].pd_pa + table[i].pd_size - 1, |
319 | table[i].pd_va); | | 319 | table[i].pd_va); |
320 | va = table[i].pd_va; | | 320 | va = table[i].pd_va; |
321 | | | 321 | |
322 | KASSERT((VM_KERNEL_IO_ADDRESS <= va) && | | 322 | KASSERT((VM_KERNEL_IO_ADDRESS <= va) && |
323 | (va < (VM_KERNEL_IO_ADDRESS + VM_KERNEL_IO_SIZE))); | | 323 | (va < (VM_KERNEL_IO_ADDRESS + VM_KERNEL_IO_SIZE))); |
324 | | | 324 | |
325 | /* update and check virtual_devmap_addr */ | | 325 | /* update and check virtual_devmap_addr */ |
326 | if ((virtual_devmap_addr == 0) || | | 326 | if (virtual_devmap_addr == 0 || virtual_devmap_addr > va) { |
327 | (virtual_devmap_addr > va)) { | | | |
328 | virtual_devmap_addr = va; | | 327 | virtual_devmap_addr = va; |
329 | } | | 328 | } |
330 | | | 329 | |
331 | pmap_map_chunk( | | 330 | pmap_map_chunk( |
332 | table[i].pd_va, | | 331 | table[i].pd_va, |
333 | table[i].pd_pa, | | 332 | table[i].pd_pa, |
334 | table[i].pd_size, | | 333 | table[i].pd_size, |
335 | table[i].pd_prot, | | 334 | table[i].pd_prot, |
336 | table[i].pd_flags); | | 335 | table[i].pd_flags); |
337 | } | | 336 | } |
338 | | | 337 | |
339 | pmap_devmap_bootstrap_done = true; | | 338 | pmap_devmap_bootstrap_done = true; |
340 | } | | 339 | } |
341 | | | 340 | |
342 | const struct pmap_devmap * | | 341 | const struct pmap_devmap * |
343 | pmap_devmap_find_va(vaddr_t va, vsize_t size) | | 342 | pmap_devmap_find_va(vaddr_t va, vsize_t size) |
344 | { | | 343 | { |
345 | paddr_t endva; | | 344 | paddr_t endva; |
346 | int i; | | 345 | int i; |
347 | | | 346 | |
348 | if (pmap_devmap_table == NULL) | | 347 | if (pmap_devmap_table == NULL) |
349 | return NULL; | | 348 | return NULL; |
350 | | | 349 | |
351 | endva = va + size; | | 350 | endva = va + size; |
352 | for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) { | | 351 | for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) { |
353 | if ((va >= pmap_devmap_table[i].pd_va) && | | 352 | if ((va >= pmap_devmap_table[i].pd_va) && |
354 | (endva <= pmap_devmap_table[i].pd_va + | | 353 | (endva <= pmap_devmap_table[i].pd_va + |
355 | pmap_devmap_table[i].pd_size)) | | 354 | pmap_devmap_table[i].pd_size)) |
356 | return &pmap_devmap_table[i]; | | 355 | return &pmap_devmap_table[i]; |
357 | } | | 356 | } |
358 | return NULL; | | 357 | return NULL; |
359 | } | | 358 | } |
360 | | | 359 | |
361 | const struct pmap_devmap * | | 360 | const struct pmap_devmap * |
362 | pmap_devmap_find_pa(paddr_t pa, psize_t size) | | 361 | pmap_devmap_find_pa(paddr_t pa, psize_t size) |
363 | { | | 362 | { |
364 | paddr_t endpa; | | 363 | paddr_t endpa; |
365 | int i; | | 364 | int i; |
366 | | | 365 | |
367 | if (pmap_devmap_table == NULL) | | 366 | if (pmap_devmap_table == NULL) |
368 | return NULL; | | 367 | return NULL; |
369 | | | 368 | |
370 | endpa = pa + size; | | 369 | endpa = pa + size; |
371 | for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) { | | 370 | for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) { |
372 | if (pa >= pmap_devmap_table[i].pd_pa && | | 371 | if (pa >= pmap_devmap_table[i].pd_pa && |
373 | (endpa <= pmap_devmap_table[i].pd_pa + | | 372 | (endpa <= pmap_devmap_table[i].pd_pa + |
374 | pmap_devmap_table[i].pd_size)) | | 373 | pmap_devmap_table[i].pd_size)) |
375 | return (&pmap_devmap_table[i]); | | 374 | return (&pmap_devmap_table[i]); |
376 | } | | 375 | } |
377 | return NULL; | | 376 | return NULL; |
378 | } | | 377 | } |
379 | | | 378 | |
380 | vaddr_t | | 379 | vaddr_t |
381 | pmap_devmap_phystov(paddr_t pa) | | 380 | pmap_devmap_phystov(paddr_t pa) |
382 | { | | 381 | { |
383 | const struct pmap_devmap *table; | | 382 | const struct pmap_devmap *table; |
384 | paddr_t offset; | | 383 | paddr_t offset; |
385 | | | 384 | |
386 | table = pmap_devmap_find_pa(pa, 0); | | 385 | table = pmap_devmap_find_pa(pa, 0); |
387 | if (table == NULL) | | 386 | if (table == NULL) |
388 | return 0; | | 387 | return 0; |
389 | | | 388 | |
390 | offset = pa - table->pd_pa; | | 389 | offset = pa - table->pd_pa; |
391 | return table->pd_va + offset; | | 390 | return table->pd_va + offset; |
392 | } | | 391 | } |
393 | | | 392 | |
394 | vaddr_t | | 393 | vaddr_t |
395 | pmap_devmap_vtophys(paddr_t va) | | 394 | pmap_devmap_vtophys(paddr_t va) |
396 | { | | 395 | { |
397 | const struct pmap_devmap *table; | | 396 | const struct pmap_devmap *table; |
398 | vaddr_t offset; | | 397 | vaddr_t offset; |
399 | | | 398 | |
400 | table = pmap_devmap_find_va(va, 0); | | 399 | table = pmap_devmap_find_va(va, 0); |
401 | if (table == NULL) | | 400 | if (table == NULL) |
402 | return 0; | | 401 | return 0; |
403 | | | 402 | |
404 | offset = va - table->pd_va; | | 403 | offset = va - table->pd_va; |
405 | return table->pd_pa + offset; | | 404 | return table->pd_pa + offset; |
406 | } | | 405 | } |
407 | | | 406 | |
408 | void | | 407 | void |
409 | pmap_bootstrap(vaddr_t vstart, vaddr_t vend) | | 408 | pmap_bootstrap(vaddr_t vstart, vaddr_t vend) |
410 | { | | 409 | { |
411 | struct pmap *kpm; | | 410 | struct pmap *kpm; |
412 | pd_entry_t *l0; | | 411 | pd_entry_t *l0; |
413 | paddr_t l0pa; | | 412 | paddr_t l0pa; |
414 | | | 413 | |
415 | PMAP_HIST_INIT(); /* init once */ | | 414 | PMAP_HIST_INIT(); /* init once */ |
416 | | | 415 | |
417 | UVMHIST_FUNC(__func__); | | 416 | UVMHIST_FUNC(__func__); |
418 | UVMHIST_CALLED(pmaphist); | | 417 | UVMHIST_CALLED(pmaphist); |
419 | | | 418 | |
420 | #if 0 | | 419 | #if 0 |
421 | /* uvmexp.ncolors = icachesize / icacheways / PAGE_SIZE; */ | | 420 | /* uvmexp.ncolors = icachesize / icacheways / PAGE_SIZE; */ |
422 | uvmexp.ncolors = aarch64_cache_vindexsize / PAGE_SIZE; | | 421 | uvmexp.ncolors = aarch64_cache_vindexsize / PAGE_SIZE; |
423 | #endif | | 422 | #endif |
424 | | | 423 | |
425 | /* devmap already uses last of va? */ | | 424 | /* devmap already uses last of va? */ |
426 | if ((virtual_devmap_addr != 0) && (virtual_devmap_addr < vend)) | | 425 | if (virtual_devmap_addr != 0 && virtual_devmap_addr < vend) |
427 | vend = virtual_devmap_addr; | | 426 | vend = virtual_devmap_addr; |
428 | | | 427 | |
429 | virtual_avail = vstart; | | 428 | virtual_avail = vstart; |
430 | virtual_end = vend; | | 429 | virtual_end = vend; |
431 | pmap_maxkvaddr = vstart; | | 430 | pmap_maxkvaddr = vstart; |
432 | | | 431 | |
433 | aarch64_tlbi_all(); | | 432 | aarch64_tlbi_all(); |
434 | | | 433 | |
435 | l0pa = reg_ttbr1_el1_read(); | | 434 | l0pa = reg_ttbr1_el1_read(); |
436 | l0 = (void *)AARCH64_PA_TO_KVA(l0pa); | | 435 | l0 = (void *)AARCH64_PA_TO_KVA(l0pa); |
437 | | | 436 | |
438 | memset(&kernel_pmap, 0, sizeof(kernel_pmap)); | | 437 | memset(&kernel_pmap, 0, sizeof(kernel_pmap)); |
439 | kpm = pmap_kernel(); | | 438 | kpm = pmap_kernel(); |
440 | kpm->pm_asid = 0; | | 439 | kpm->pm_asid = 0; |
441 | kpm->pm_refcnt = 1; | | 440 | kpm->pm_refcnt = 1; |
442 | kpm->pm_idlepdp = 0; | | 441 | kpm->pm_idlepdp = 0; |
443 | kpm->pm_l0table = l0; | | 442 | kpm->pm_l0table = l0; |
444 | kpm->pm_l0table_pa = l0pa; | | 443 | kpm->pm_l0table_pa = l0pa; |
445 | kpm->pm_activated = true; | | 444 | kpm->pm_activated = true; |
446 | TAILQ_INIT(&kpm->pm_vmlist); | | 445 | TAILQ_INIT(&kpm->pm_vmlist); |
447 | mutex_init(&kpm->pm_lock, MUTEX_DEFAULT, IPL_VM); | | 446 | mutex_init(&kpm->pm_lock, MUTEX_DEFAULT, IPL_VM); |
448 | | | 447 | |
449 | CTASSERT(sizeof(kpm->pm_stats.wired_count) == sizeof(long)); | | 448 | CTASSERT(sizeof(kpm->pm_stats.wired_count) == sizeof(long)); |
450 | CTASSERT(sizeof(kpm->pm_stats.resident_count) == sizeof(long)); | | 449 | CTASSERT(sizeof(kpm->pm_stats.resident_count) == sizeof(long)); |
451 | #define PMSTAT_INC_WIRED_COUNT(pm) \ | | 450 | #define PMSTAT_INC_WIRED_COUNT(pm) \ |
452 | atomic_inc_ulong(&(pm)->pm_stats.wired_count) | | 451 | atomic_inc_ulong(&(pm)->pm_stats.wired_count) |
453 | #define PMSTAT_DEC_WIRED_COUNT(pm) \ | | 452 | #define PMSTAT_DEC_WIRED_COUNT(pm) \ |
454 | atomic_dec_ulong(&(pm)->pm_stats.wired_count) | | 453 | atomic_dec_ulong(&(pm)->pm_stats.wired_count) |
455 | #define PMSTAT_INC_RESIDENT_COUNT(pm) \ | | 454 | #define PMSTAT_INC_RESIDENT_COUNT(pm) \ |
456 | atomic_inc_ulong(&(pm)->pm_stats.resident_count) | | 455 | atomic_inc_ulong(&(pm)->pm_stats.resident_count) |
457 | #define PMSTAT_DEC_RESIDENT_COUNT(pm) \ | | 456 | #define PMSTAT_DEC_RESIDENT_COUNT(pm) \ |
458 | atomic_dec_ulong(&(pm)->pm_stats.resident_count) | | 457 | atomic_dec_ulong(&(pm)->pm_stats.resident_count) |
459 | } | | 458 | } |
460 | | | 459 | |
461 | inline static int | | 460 | inline static int |
462 | _pmap_color(vaddr_t addr) /* or paddr_t */ | | 461 | _pmap_color(vaddr_t addr) /* or paddr_t */ |
463 | { | | 462 | { |
464 | return (addr >> PGSHIFT) & (uvmexp.ncolors - 1); | | 463 | return (addr >> PGSHIFT) & (uvmexp.ncolors - 1); |
465 | } | | 464 | } |
466 | | | 465 | |
467 | static int | | 466 | static int |
468 | _pmap_pmap_ctor(void *arg, void *v, int flags) | | 467 | _pmap_pmap_ctor(void *arg, void *v, int flags) |
469 | { | | 468 | { |
470 | memset(v, 0, sizeof(struct pmap)); | | 469 | memset(v, 0, sizeof(struct pmap)); |
471 | return 0; | | 470 | return 0; |
472 | } | | 471 | } |
473 | | | 472 | |
474 | static int | | 473 | static int |
475 | _pmap_pv_ctor(void *arg, void *v, int flags) | | 474 | _pmap_pv_ctor(void *arg, void *v, int flags) |
476 | { | | 475 | { |
477 | memset(v, 0, sizeof(struct pv_entry)); | | 476 | memset(v, 0, sizeof(struct pv_entry)); |
478 | return 0; | | 477 | return 0; |
479 | } | | 478 | } |
480 | | | 479 | |
481 | void | | 480 | void |
482 | pmap_init(void) | | 481 | pmap_init(void) |
483 | { | | 482 | { |
484 | struct vm_page *pg; | | 483 | struct vm_page *pg; |
485 | struct vm_page_md *md; | | 484 | struct vm_page_md *md; |
486 | uvm_physseg_t i; | | 485 | uvm_physseg_t i; |
487 | paddr_t pfn; | | 486 | paddr_t pfn; |
488 | | | 487 | |
489 | pool_cache_bootstrap(&_pmap_cache, sizeof(struct pmap), | | 488 | pool_cache_bootstrap(&_pmap_cache, sizeof(struct pmap), |
490 | 0, 0, 0, "pmappl", NULL, IPL_NONE, _pmap_pmap_ctor, NULL, NULL); | | 489 | 0, 0, 0, "pmappl", NULL, IPL_NONE, _pmap_pmap_ctor, NULL, NULL); |
491 | pool_cache_bootstrap(&_pmap_pv_pool, sizeof(struct pv_entry), | | 490 | pool_cache_bootstrap(&_pmap_pv_pool, sizeof(struct pv_entry), |
492 | 0, 0, 0, "pvpl", NULL, IPL_VM, _pmap_pv_ctor, NULL, NULL); | | 491 | 0, 0, 0, "pvpl", NULL, IPL_VM, _pmap_pv_ctor, NULL, NULL); |
493 | | | 492 | |
494 | /* | | 493 | /* |
495 | * initialize vm_page_md:mdpg_pvlock at this time. | | 494 | * initialize vm_page_md:mdpg_pvlock at this time. |
496 | * When LOCKDEBUG, mutex_init() calls km_alloc, | | 495 | * When LOCKDEBUG, mutex_init() calls km_alloc, |
497 | * but VM_MDPAGE_INIT() is called before initialized kmem_vm_arena. | | 496 | * but VM_MDPAGE_INIT() is called before initialized kmem_vm_arena. |
498 | */ | | 497 | */ |
499 | for (i = uvm_physseg_get_first(); | | 498 | for (i = uvm_physseg_get_first(); |
500 | uvm_physseg_valid_p(i); | | 499 | uvm_physseg_valid_p(i); |
501 | i = uvm_physseg_get_next(i)) { | | 500 | i = uvm_physseg_get_next(i)) { |
502 | for (pfn = uvm_physseg_get_start(i); | | 501 | for (pfn = uvm_physseg_get_start(i); |
503 | pfn < uvm_physseg_get_end(i); | | 502 | pfn < uvm_physseg_get_end(i); |
504 | pfn++) { | | 503 | pfn++) { |
505 | pg = PHYS_TO_VM_PAGE(ptoa(pfn)); | | 504 | pg = PHYS_TO_VM_PAGE(ptoa(pfn)); |
506 | md = VM_PAGE_TO_MD(pg); | | 505 | md = VM_PAGE_TO_MD(pg); |
507 | mutex_init(&md->mdpg_pvlock, MUTEX_SPIN, IPL_VM); | | 506 | mutex_init(&md->mdpg_pvlock, MUTEX_SPIN, IPL_VM); |
508 | } | | 507 | } |
509 | } | | 508 | } |
510 | } | | 509 | } |
511 | | | 510 | |
512 | void | | 511 | void |
513 | pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp) | | 512 | pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp) |
514 | { | | 513 | { |
515 | *vstartp = virtual_avail; | | 514 | *vstartp = virtual_avail; |
516 | *vendp = virtual_end; | | 515 | *vendp = virtual_end; |
517 | } | | 516 | } |
518 | | | 517 | |
519 | vaddr_t | | 518 | vaddr_t |
520 | pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp) | | 519 | pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp) |
521 | { | | 520 | { |
522 | int npage; | | 521 | int npage; |
523 | paddr_t pa; | | 522 | paddr_t pa; |
524 | vaddr_t va; | | 523 | vaddr_t va; |
525 | psize_t bank_npage; | | 524 | psize_t bank_npage; |
526 | uvm_physseg_t bank; | | 525 | uvm_physseg_t bank; |
527 | | | 526 | |
528 | UVMHIST_FUNC(__func__); | | 527 | UVMHIST_FUNC(__func__); |
529 | UVMHIST_CALLED(pmaphist); | | 528 | UVMHIST_CALLED(pmaphist); |
530 | | | 529 | |
531 | UVMHIST_LOG(pmaphist, "size=%llu, *vstartp=%llx, *vendp=%llx", | | 530 | UVMHIST_LOG(pmaphist, "size=%llu, *vstartp=%llx, *vendp=%llx", |
532 | size, *vstartp, *vendp, 0); | | 531 | size, *vstartp, *vendp, 0); |
533 | | | 532 | |
534 | size = round_page(size); | | 533 | size = round_page(size); |
535 | npage = atop(size); | | 534 | npage = atop(size); |
536 | | | 535 | |
537 | for (bank = uvm_physseg_get_first(); uvm_physseg_valid_p(bank); | | 536 | for (bank = uvm_physseg_get_first(); uvm_physseg_valid_p(bank); |
538 | bank = uvm_physseg_get_next(bank)) { | | 537 | bank = uvm_physseg_get_next(bank)) { |
539 | | | 538 | |
540 | bank_npage = uvm_physseg_get_avail_end(bank) - | | 539 | bank_npage = uvm_physseg_get_avail_end(bank) - |
541 | uvm_physseg_get_avail_start(bank); | | 540 | uvm_physseg_get_avail_start(bank); |
542 | if (npage <= bank_npage) | | 541 | if (npage <= bank_npage) |
543 | break; | | 542 | break; |
544 | } | | 543 | } |
545 | | | 544 | |
546 | if (!uvm_physseg_valid_p(bank)) { | | 545 | if (!uvm_physseg_valid_p(bank)) { |
547 | panic("%s: no memory", __func__); | | 546 | panic("%s: no memory", __func__); |
548 | } | | 547 | } |
549 | | | 548 | |
550 | /* Steal pages */ | | 549 | /* Steal pages */ |
551 | pa = ptoa(uvm_physseg_get_avail_start(bank)); | | 550 | pa = ptoa(uvm_physseg_get_avail_start(bank)); |
552 | va = AARCH64_PA_TO_KVA(pa); | | 551 | va = AARCH64_PA_TO_KVA(pa); |
553 | uvm_physseg_unplug(atop(pa), npage); | | 552 | uvm_physseg_unplug(atop(pa), npage); |
554 | | | 553 | |
555 | for (; npage > 0; npage--, pa += PAGE_SIZE) | | 554 | for (; npage > 0; npage--, pa += PAGE_SIZE) |
556 | pmap_zero_page(pa); | | 555 | pmap_zero_page(pa); |
557 | | | 556 | |
558 | return va; | | 557 | return va; |
559 | } | | 558 | } |
560 | | | 559 | |
561 | void | | 560 | void |
562 | pmap_reference(struct pmap *pm) | | 561 | pmap_reference(struct pmap *pm) |
563 | { | | 562 | { |
564 | atomic_inc_uint(&pm->pm_refcnt); | | 563 | atomic_inc_uint(&pm->pm_refcnt); |
565 | } | | 564 | } |
566 | | | 565 | |
567 | paddr_t | | 566 | paddr_t |
568 | pmap_alloc_pdp(struct pmap *pm, struct vm_page **pgp, int flags, bool waitok) | | 567 | pmap_alloc_pdp(struct pmap *pm, struct vm_page **pgp, int flags, bool waitok) |
569 | { | | 568 | { |
570 | paddr_t pa; | | 569 | paddr_t pa; |
571 | struct vm_page *pg; | | 570 | struct vm_page *pg; |
572 | | | 571 | |
573 | UVMHIST_FUNC(__func__); | | 572 | UVMHIST_FUNC(__func__); |
574 | UVMHIST_CALLED(pmaphist); | | 573 | UVMHIST_CALLED(pmaphist); |
575 | | | 574 | |
576 | if (uvm.page_init_done) { | | 575 | if (uvm.page_init_done) { |
577 | int aflags = ((flags & PMAP_CANFAIL) ? 0 : UVM_PGA_USERESERVE) | | | 576 | int aflags = ((flags & PMAP_CANFAIL) ? 0 : UVM_PGA_USERESERVE) | |
578 | UVM_PGA_ZERO; | | 577 | UVM_PGA_ZERO; |
579 | retry: | | 578 | retry: |
580 | pg = uvm_pagealloc(NULL, 0, NULL, aflags); | | 579 | pg = uvm_pagealloc(NULL, 0, NULL, aflags); |
581 | if (pg == NULL) { | | 580 | if (pg == NULL) { |
582 | if (waitok) { | | 581 | if (waitok) { |
583 | uvm_wait("pmap_alloc_pdp"); | | 582 | uvm_wait("pmap_alloc_pdp"); |
584 | goto retry; | | 583 | goto retry; |
585 | } | | 584 | } |
586 | return POOL_PADDR_INVALID; | | 585 | return POOL_PADDR_INVALID; |
587 | } | | 586 | } |
588 | | | 587 | |
589 | TAILQ_INSERT_HEAD(&pm->pm_vmlist, pg, mdpage.mdpg_vmlist); | | 588 | TAILQ_INSERT_HEAD(&pm->pm_vmlist, pg, mdpage.mdpg_vmlist); |
590 | pg->flags &= ~PG_BUSY; /* never busy */ | | 589 | pg->flags &= ~PG_BUSY; /* never busy */ |
591 | pg->wire_count = 1; /* max = 1 + Ln_ENTRIES = 513 */ | | 590 | pg->wire_count = 1; /* max = 1 + Ln_ENTRIES = 513 */ |
592 | pa = VM_PAGE_TO_PHYS(pg); | | 591 | pa = VM_PAGE_TO_PHYS(pg); |
593 | PMAP_COUNT(pdp_alloc); | | 592 | PMAP_COUNT(pdp_alloc); |
594 | | | 593 | |
595 | VM_PAGE_TO_MD(pg)->mdpg_ptep_parent = NULL; | | 594 | VM_PAGE_TO_MD(pg)->mdpg_ptep_parent = NULL; |
596 | | | 595 | |
597 | } else { | | 596 | } else { |
598 | /* uvm_pageboot_alloc() returns AARCH64 KSEG address */ | | 597 | /* uvm_pageboot_alloc() returns AARCH64 KSEG address */ |
599 | pg = NULL; | | 598 | pg = NULL; |
600 | pa = AARCH64_KVA_TO_PA( | | 599 | pa = AARCH64_KVA_TO_PA( |
601 | uvm_pageboot_alloc(Ln_TABLE_SIZE)); | | 600 | uvm_pageboot_alloc(Ln_TABLE_SIZE)); |
602 | PMAP_COUNT(pdp_alloc_boot); | | 601 | PMAP_COUNT(pdp_alloc_boot); |
603 | } | | 602 | } |
604 | if (pgp != NULL) | | 603 | if (pgp != NULL) |
605 | *pgp = pg; | | 604 | *pgp = pg; |
606 | | | 605 | |
607 | UVMHIST_LOG(pmaphist, "pa=%llx, pg=%llx", | | 606 | UVMHIST_LOG(pmaphist, "pa=%llx, pg=%llx", |
608 | pa, pg, 0, 0); | | 607 | pa, pg, 0, 0); |
609 | | | 608 | |
610 | return pa; | | 609 | return pa; |
611 | } | | 610 | } |
612 | | | 611 | |
613 | static void | | 612 | static void |
614 | pmap_free_pdp(struct pmap *pm, struct vm_page *pg) | | 613 | pmap_free_pdp(struct pmap *pm, struct vm_page *pg) |
615 | { | | 614 | { |
616 | TAILQ_REMOVE(&pm->pm_vmlist, pg, mdpage.mdpg_vmlist); | | 615 | TAILQ_REMOVE(&pm->pm_vmlist, pg, mdpage.mdpg_vmlist); |
617 | pg->flags |= PG_BUSY; | | 616 | pg->flags |= PG_BUSY; |
618 | pg->wire_count = 0; | | 617 | pg->wire_count = 0; |
619 | VM_MDPAGE_INIT(pg); | | 618 | VM_MDPAGE_INIT(pg); |
620 | | | 619 | |
621 | uvm_pagefree(pg); | | 620 | uvm_pagefree(pg); |
622 | PMAP_COUNT(pdp_free); | | 621 | PMAP_COUNT(pdp_free); |
623 | } | | 622 | } |
624 | | | 623 | |
625 | /* free empty page table pages */ | | 624 | /* free empty page table pages */ |
626 | static int | | 625 | static int |
627 | _pmap_sweep_pdp(struct pmap *pm) | | 626 | _pmap_sweep_pdp(struct pmap *pm) |
628 | { | | 627 | { |
629 | struct vm_page *pg, *tmp; | | 628 | struct vm_page *pg, *tmp; |
630 | pd_entry_t *ptep_in_parent, opte __diagused; | | 629 | pd_entry_t *ptep_in_parent, opte __diagused; |
631 | paddr_t pa, pdppa; | | 630 | paddr_t pa, pdppa; |
632 | int nsweep; | | 631 | int nsweep; |
633 | uint16_t wirecount __diagused; | | 632 | uint16_t wirecount __diagused; |
634 | | | 633 | |
635 | nsweep = 0; | | 634 | nsweep = 0; |
636 | TAILQ_FOREACH_SAFE(pg, &pm->pm_vmlist, mdpage.mdpg_vmlist, tmp) { | | 635 | TAILQ_FOREACH_SAFE(pg, &pm->pm_vmlist, mdpage.mdpg_vmlist, tmp) { |
637 | if (pg->wire_count != 1) | | 636 | if (pg->wire_count != 1) |
638 | continue; | | 637 | continue; |
639 | | | 638 | |
640 | pa = VM_PAGE_TO_PHYS(pg); | | 639 | pa = VM_PAGE_TO_PHYS(pg); |
641 | if (pa == pm->pm_l0table_pa) | | 640 | if (pa == pm->pm_l0table_pa) |
642 | continue; | | 641 | continue; |
643 | | | 642 | |
644 | ptep_in_parent = VM_PAGE_TO_MD(pg)->mdpg_ptep_parent; | | 643 | ptep_in_parent = VM_PAGE_TO_MD(pg)->mdpg_ptep_parent; |
645 | if (ptep_in_parent == NULL) { | | 644 | if (ptep_in_parent == NULL) { |
646 | /* no parent */ | | 645 | /* no parent */ |
647 | pmap_free_pdp(pm, pg); | | 646 | pmap_free_pdp(pm, pg); |
648 | nsweep++; | | 647 | nsweep++; |
649 | continue; | | 648 | continue; |
650 | } | | 649 | } |
651 | | | 650 | |
652 | /* unlink from parent */ | | 651 | /* unlink from parent */ |
653 | opte = atomic_swap_64(ptep_in_parent, 0); | | 652 | opte = atomic_swap_64(ptep_in_parent, 0); |
654 | KASSERT(lxpde_valid(opte)); | | 653 | KASSERT(lxpde_valid(opte)); |
655 | wirecount = atomic_add_32_nv(&pg->wire_count, -1); /* 1 -> 0 */ | | 654 | wirecount = atomic_add_32_nv(&pg->wire_count, -1); /* 1 -> 0 */ |
656 | KASSERT(wirecount == 0); | | 655 | KASSERT(wirecount == 0); |
657 | pmap_free_pdp(pm, pg); | | 656 | pmap_free_pdp(pm, pg); |
658 | nsweep++; | | 657 | nsweep++; |
659 | | | 658 | |
660 | /* L3->L2->L1. no need for L0 */ | | 659 | /* L3->L2->L1. no need for L0 */ |
661 | pdppa = AARCH64_KVA_TO_PA(trunc_page((vaddr_t)ptep_in_parent)); | | 660 | pdppa = AARCH64_KVA_TO_PA(trunc_page((vaddr_t)ptep_in_parent)); |
662 | if (pdppa == pm->pm_l0table_pa) | | 661 | if (pdppa == pm->pm_l0table_pa) |
663 | continue; | | 662 | continue; |
664 | | | 663 | |
665 | pg = PHYS_TO_VM_PAGE(pdppa); | | 664 | pg = PHYS_TO_VM_PAGE(pdppa); |
666 | KASSERT(pg != NULL); | | 665 | KASSERT(pg != NULL); |
667 | KASSERTMSG(pg->wire_count >= 1, | | 666 | KASSERTMSG(pg->wire_count >= 1, |
668 | "wire_count=%d", pg->wire_count); | | 667 | "wire_count=%d", pg->wire_count); |
669 | /* decrement wire_count of parent */ | | 668 | /* decrement wire_count of parent */ |
670 | wirecount = atomic_add_32_nv(&pg->wire_count, -1); | | 669 | wirecount = atomic_add_32_nv(&pg->wire_count, -1); |
671 | KASSERTMSG(pg->wire_count <= (Ln_ENTRIES + 1), | | 670 | KASSERTMSG(pg->wire_count <= (Ln_ENTRIES + 1), |
672 | "pm=%p[%d], pg=%p, wire_count=%d", | | 671 | "pm=%p[%d], pg=%p, wire_count=%d", |
673 | pm, pm->pm_asid, pg, pg->wire_count); | | 672 | pm, pm->pm_asid, pg, pg->wire_count); |
674 | } | | 673 | } |
675 | atomic_swap_uint(&pm->pm_idlepdp, 0); | | 674 | atomic_swap_uint(&pm->pm_idlepdp, 0); |
676 | | | 675 | |
677 | return nsweep; | | 676 | return nsweep; |
678 | } | | 677 | } |
679 | | | 678 | |
680 | static void | | 679 | static void |
681 | _pmap_free_pdp_all(struct pmap *pm) | | 680 | _pmap_free_pdp_all(struct pmap *pm) |
682 | { | | 681 | { |
683 | struct vm_page *pg, *tmp; | | 682 | struct vm_page *pg, *tmp; |
684 | | | 683 | |
685 | TAILQ_FOREACH_SAFE(pg, &pm->pm_vmlist, mdpage.mdpg_vmlist, tmp) { | | 684 | TAILQ_FOREACH_SAFE(pg, &pm->pm_vmlist, mdpage.mdpg_vmlist, tmp) { |
686 | pmap_free_pdp(pm, pg); | | 685 | pmap_free_pdp(pm, pg); |
687 | } | | 686 | } |
688 | } | | 687 | } |
689 | | | 688 | |
690 | vaddr_t | | 689 | vaddr_t |
691 | pmap_growkernel(vaddr_t maxkvaddr) | | 690 | pmap_growkernel(vaddr_t maxkvaddr) |
692 | { | | 691 | { |
693 | UVMHIST_FUNC(__func__); | | 692 | UVMHIST_FUNC(__func__); |
694 | UVMHIST_CALLED(pmaphist); | | 693 | UVMHIST_CALLED(pmaphist); |
695 | | | 694 | |
696 | UVMHIST_LOG(pmaphist, "maxkvaddr=%llx, pmap_maxkvaddr=%llx", | | 695 | UVMHIST_LOG(pmaphist, "maxkvaddr=%llx, pmap_maxkvaddr=%llx", |
697 | maxkvaddr, pmap_maxkvaddr, 0, 0); | | 696 | maxkvaddr, pmap_maxkvaddr, 0, 0); |
698 | | | 697 | |
699 | kasan_shadow_map((void *)pmap_maxkvaddr, | | 698 | kasan_shadow_map((void *)pmap_maxkvaddr, |
700 | (size_t)(maxkvaddr - pmap_maxkvaddr)); | | 699 | (size_t)(maxkvaddr - pmap_maxkvaddr)); |
701 | | | 700 | |
702 | pmap_maxkvaddr = maxkvaddr; | | 701 | pmap_maxkvaddr = maxkvaddr; |
703 | | | 702 | |
704 | return maxkvaddr; | | 703 | return maxkvaddr; |
705 | } | | 704 | } |
706 | | | 705 | |
707 | bool | | 706 | bool |
708 | pmap_extract_coherency(struct pmap *pm, vaddr_t va, paddr_t *pap, | | 707 | pmap_extract_coherency(struct pmap *pm, vaddr_t va, paddr_t *pap, |
709 | bool *coherencyp) | | 708 | bool *coherencyp) |
710 | { | | 709 | { |
711 | if (coherencyp) | | 710 | if (coherencyp) |
712 | *coherencyp = false; | | 711 | *coherencyp = false; |
713 | | | 712 | |
714 | return pmap_extract(pm, va, pap); | | 713 | return pmap_extract(pm, va, pap); |
715 | } | | 714 | } |
716 | | | 715 | |
717 | bool | | 716 | bool |
718 | pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap) | | 717 | pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap) |
719 | { | | 718 | { |
720 | pt_entry_t *ptep, pte; | | 719 | pt_entry_t *ptep, pte; |
721 | paddr_t pa; | | 720 | paddr_t pa; |
722 | vsize_t blocksize = 0; | | 721 | vsize_t blocksize = 0; |
723 | extern char __kernel_text[]; | | 722 | extern char __kernel_text[]; |
724 | extern char _end[]; | | 723 | extern char _end[]; |
725 | | | 724 | |
726 | if (IN_RANGE(va, (vaddr_t)__kernel_text, (vaddr_t)_end)) { | | 725 | if (IN_RANGE(va, (vaddr_t)__kernel_text, (vaddr_t)_end)) { |
727 | /* fast loookup */ | | 726 | /* fast loookup */ |
728 | pa = KERN_VTOPHYS(va); | | 727 | pa = KERN_VTOPHYS(va); |
729 | } else if (IN_KSEG_ADDR(va)) { | | 728 | } else if (IN_KSEG_ADDR(va)) { |
730 | /* fast loookup. should be used only if actually mapped? */ | | 729 | /* fast loookup. should be used only if actually mapped? */ |
731 | pa = AARCH64_KVA_TO_PA(va); | | 730 | pa = AARCH64_KVA_TO_PA(va); |
732 | } else { | | 731 | } else { |
733 | ptep = _pmap_pte_lookup_bs(pm, va, &blocksize); | | 732 | ptep = _pmap_pte_lookup_bs(pm, va, &blocksize); |
734 | if (ptep == NULL) | | 733 | if (ptep == NULL) |
735 | return false; | | 734 | return false; |
736 | pte = *ptep; | | 735 | pte = *ptep; |
737 | if (!lxpde_valid(pte)) | | 736 | if (!lxpde_valid(pte)) |
738 | return false; | | 737 | return false; |
739 | pa = lxpde_pa(pte) + (va & (blocksize - 1)); | | 738 | pa = lxpde_pa(pte) + (va & (blocksize - 1)); |
740 | } | | 739 | } |
741 | | | 740 | |
742 | if (pap != NULL) | | 741 | if (pap != NULL) |
743 | *pap = pa; | | 742 | *pap = pa; |
744 | return true; | | 743 | return true; |
745 | } | | 744 | } |
746 | | | 745 | |
747 | paddr_t | | 746 | paddr_t |
748 | vtophys(vaddr_t va) | | 747 | vtophys(vaddr_t va) |
749 | { | | 748 | { |
750 | struct pmap *pm; | | 749 | struct pmap *pm; |
751 | paddr_t pa; | | 750 | paddr_t pa; |
752 | | | 751 | |
753 | if (va & TTBR_SEL_VA) | | 752 | if (va & TTBR_SEL_VA) |
754 | pm = pmap_kernel(); | | 753 | pm = pmap_kernel(); |
755 | else | | 754 | else |
756 | pm = curlwp->l_proc->p_vmspace->vm_map.pmap; | | 755 | pm = curlwp->l_proc->p_vmspace->vm_map.pmap; |
757 | | | 756 | |
758 | if (pmap_extract(pm, va, &pa) == false) | | 757 | if (pmap_extract(pm, va, &pa) == false) |
759 | return VTOPHYS_FAILED; | | 758 | return VTOPHYS_FAILED; |
760 | return pa; | | 759 | return pa; |
761 | } | | 760 | } |
762 | | | 761 | |
763 | /* | | 762 | /* |
764 | * return pointer of the pte. regardess of whether the entry is valid or not. | | 763 | * return pointer of the pte. regardess of whether the entry is valid or not. |
765 | */ | | 764 | */ |
766 | static pt_entry_t * | | 765 | static pt_entry_t * |
767 | _pmap_pte_lookup_bs(struct pmap *pm, vaddr_t va, vsize_t *bs) | | 766 | _pmap_pte_lookup_bs(struct pmap *pm, vaddr_t va, vsize_t *bs) |
768 | { | | 767 | { |
769 | pt_entry_t *ptep; | | 768 | pt_entry_t *ptep; |
770 | pd_entry_t *l0, *l1, *l2, *l3; | | 769 | pd_entry_t *l0, *l1, *l2, *l3; |
771 | pd_entry_t pde; | | 770 | pd_entry_t pde; |
772 | vsize_t blocksize; | | 771 | vsize_t blocksize; |
773 | unsigned int idx; | | 772 | unsigned int idx; |
774 | | | 773 | |
775 | if (((pm == pmap_kernel()) && ((va & TTBR_SEL_VA) == 0)) || | | 774 | if (((pm == pmap_kernel()) && ((va & TTBR_SEL_VA) == 0)) || |
776 | ((pm != pmap_kernel()) && ((va & TTBR_SEL_VA) != 0))) { | | 775 | ((pm != pmap_kernel()) && ((va & TTBR_SEL_VA) != 0))) { |
777 | blocksize = 0; | | 776 | blocksize = 0; |
778 | ptep = NULL; | | 777 | ptep = NULL; |
779 | goto done; | | 778 | goto done; |
780 | } | | 779 | } |
781 | | | 780 | |
782 | /* | | 781 | /* |
783 | * traverse L0 -> L1 -> L2 -> L3 | | 782 | * traverse L0 -> L1 -> L2 -> L3 |
784 | */ | | 783 | */ |
785 | blocksize = L0_SIZE; | | 784 | blocksize = L0_SIZE; |
786 | l0 = pm->pm_l0table; | | 785 | l0 = pm->pm_l0table; |
787 | idx = l0pde_index(va); | | 786 | idx = l0pde_index(va); |
788 | ptep = &l0[idx]; | | 787 | ptep = &l0[idx]; |
789 | pde = *ptep; | | 788 | pde = *ptep; |
790 | if (!l0pde_valid(pde)) | | 789 | if (!l0pde_valid(pde)) |
791 | goto done; | | 790 | goto done; |
792 | | | 791 | |
793 | blocksize = L1_SIZE; | | 792 | blocksize = L1_SIZE; |
794 | l1 = (pd_entry_t *)AARCH64_PA_TO_KVA(l0pde_pa(pde)); | | 793 | l1 = (pd_entry_t *)AARCH64_PA_TO_KVA(l0pde_pa(pde)); |
795 | idx = l1pde_index(va); | | 794 | idx = l1pde_index(va); |
796 | ptep = &l1[idx]; | | 795 | ptep = &l1[idx]; |
797 | pde = *ptep; | | 796 | pde = *ptep; |
798 | if (!l1pde_valid(pde) || l1pde_is_block(pde)) | | 797 | if (!l1pde_valid(pde) || l1pde_is_block(pde)) |
799 | goto done; | | 798 | goto done; |
800 | | | 799 | |
801 | blocksize = L2_SIZE; | | 800 | blocksize = L2_SIZE; |
802 | l2 = (pd_entry_t *)AARCH64_PA_TO_KVA(l1pde_pa(pde)); | | 801 | l2 = (pd_entry_t *)AARCH64_PA_TO_KVA(l1pde_pa(pde)); |
803 | idx = l2pde_index(va); | | 802 | idx = l2pde_index(va); |
804 | ptep = &l2[idx]; | | 803 | ptep = &l2[idx]; |
805 | pde = *ptep; | | 804 | pde = *ptep; |
806 | if (!l2pde_valid(pde) || l2pde_is_block(pde)) | | 805 | if (!l2pde_valid(pde) || l2pde_is_block(pde)) |
807 | goto done; | | 806 | goto done; |
808 | | | 807 | |
809 | blocksize = L3_SIZE; | | 808 | blocksize = L3_SIZE; |
810 | l3 = (pd_entry_t *)AARCH64_PA_TO_KVA(l2pde_pa(pde)); | | 809 | l3 = (pd_entry_t *)AARCH64_PA_TO_KVA(l2pde_pa(pde)); |
811 | idx = l3pte_index(va); | | 810 | idx = l3pte_index(va); |
812 | ptep = &l3[idx]; | | 811 | ptep = &l3[idx]; |
813 | | | 812 | |
814 | done: | | 813 | done: |
815 | if (bs != NULL) | | 814 | if (bs != NULL) |
816 | *bs = blocksize; | | 815 | *bs = blocksize; |
817 | return ptep; | | 816 | return ptep; |
818 | } | | 817 | } |
819 | | | 818 | |
820 | static pt_entry_t * | | 819 | static pt_entry_t * |
821 | _pmap_pte_lookup_l3(struct pmap *pm, vaddr_t va) | | 820 | _pmap_pte_lookup_l3(struct pmap *pm, vaddr_t va) |
822 | { | | 821 | { |
823 | pt_entry_t *ptep; | | 822 | pt_entry_t *ptep; |
824 | vsize_t blocksize = 0; | | 823 | vsize_t blocksize = 0; |
825 | | | 824 | |
826 | ptep = _pmap_pte_lookup_bs(pm, va, &blocksize); | | 825 | ptep = _pmap_pte_lookup_bs(pm, va, &blocksize); |
827 | if ((ptep != NULL) && (blocksize == L3_SIZE)) | | 826 | if ((ptep != NULL) && (blocksize == L3_SIZE)) |
828 | return ptep; | | 827 | return ptep; |
829 | | | 828 | |
830 | return NULL; | | 829 | return NULL; |
831 | } | | 830 | } |
832 | | | 831 | |
833 | void | | 832 | void |
834 | pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva) | | 833 | pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva) |
835 | { | | 834 | { |
836 | pt_entry_t *ptep = NULL, pte; | | 835 | pt_entry_t *ptep = NULL, pte; |
837 | vaddr_t va; | | 836 | vaddr_t va; |
838 | vsize_t blocksize = 0; | | 837 | vsize_t blocksize = 0; |
839 | | | 838 | |
840 | pm_lock(pm); | | 839 | pm_lock(pm); |
841 | | | 840 | |
842 | for (va = sva; va < eva; va = (va + blocksize) & ~(blocksize - 1)) { | | 841 | for (va = sva; va < eva; va = (va + blocksize) & ~(blocksize - 1)) { |
843 | /* va is belong to the same L3 table as before? */ | | 842 | /* va is belong to the same L3 table as before? */ |
844 | if ((blocksize == L3_SIZE) && ((va & L3INDEXMASK) != 0)) { | | 843 | if ((blocksize == L3_SIZE) && ((va & L3INDEXMASK) != 0)) { |
845 | ptep++; | | 844 | ptep++; |
846 | } else { | | 845 | } else { |
847 | ptep = _pmap_pte_lookup_bs(pm, va, &blocksize); | | 846 | ptep = _pmap_pte_lookup_bs(pm, va, &blocksize); |
848 | if (ptep == NULL) | | 847 | if (ptep == NULL) |
849 | break; | | 848 | break; |
850 | } | | 849 | } |
851 | | | 850 | |
852 | pte = *ptep; | | 851 | pte = *ptep; |
853 | if (lxpde_valid(pte)) { | | 852 | if (lxpde_valid(pte)) { |
854 | vaddr_t eob = (va + blocksize) & ~(blocksize - 1); | | 853 | vaddr_t eob = (va + blocksize) & ~(blocksize - 1); |
855 | vsize_t len = ulmin(eva, eob - va); | | 854 | vsize_t len = ulmin(eva, eob - va); |
856 | | | 855 | |
857 | if (l3pte_writable(pte)) { | | 856 | if (l3pte_writable(pte)) { |
858 | cpu_icache_sync_range(va, len); | | 857 | cpu_icache_sync_range(va, len); |
859 | } else { | | 858 | } else { |
860 | /* | | 859 | /* |
861 | * change to writable temporally | | 860 | * change to writable temporally |
862 | * to do cpu_icache_sync_range() | | 861 | * to do cpu_icache_sync_range() |
863 | */ | | 862 | */ |
864 | pt_entry_t opte = pte; | | 863 | pt_entry_t opte = pte; |
865 | pte = pte & ~(LX_BLKPAG_AF|LX_BLKPAG_AP); | | 864 | pte = pte & ~(LX_BLKPAG_AF|LX_BLKPAG_AP); |
866 | pte |= (LX_BLKPAG_AF|LX_BLKPAG_AP_RW); | | 865 | pte |= (LX_BLKPAG_AF|LX_BLKPAG_AP_RW); |
867 | atomic_swap_64(ptep, pte); | | 866 | atomic_swap_64(ptep, pte); |
868 | AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true); | | 867 | AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true); |
869 | cpu_icache_sync_range(va, len); | | 868 | cpu_icache_sync_range(va, len); |
870 | atomic_swap_64(ptep, opte); | | 869 | atomic_swap_64(ptep, opte); |
871 | AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true); | | 870 | AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true); |
872 | } | | 871 | } |
873 | } | | 872 | } |
874 | } | | 873 | } |
875 | | | 874 | |
876 | pm_unlock(pm); | | 875 | pm_unlock(pm); |
877 | } | | 876 | } |
878 | | | 877 | |
879 | /* | | 878 | /* |
880 | * Routine: pmap_procwr | | 879 | * Routine: pmap_procwr |
881 | * | | 880 | * |
882 | * Function: | | 881 | * Function: |
883 | * Synchronize caches corresponding to [addr, addr+len) in p. | | 882 | * Synchronize caches corresponding to [addr, addr+len) in p. |
884 | * | | 883 | * |
885 | */ | | 884 | */ |
886 | void | | 885 | void |
887 | pmap_procwr(struct proc *p, vaddr_t va, int len) | | 886 | pmap_procwr(struct proc *p, vaddr_t va, int len) |
888 | { | | 887 | { |
889 | | | 888 | |
890 | /* We only need to do anything if it is the current process. */ | | 889 | /* We only need to do anything if it is the current process. */ |
891 | if (p == curproc) | | 890 | if (p == curproc) |
892 | cpu_icache_sync_range(va, len); | | 891 | cpu_icache_sync_range(va, len); |
893 | } | | 892 | } |
894 | | | 893 | |
895 | static pt_entry_t | | 894 | static pt_entry_t |
896 | _pmap_pte_adjust_prot(pt_entry_t pte, vm_prot_t prot, vm_prot_t protmask, | | 895 | _pmap_pte_adjust_prot(pt_entry_t pte, vm_prot_t prot, vm_prot_t protmask, |
897 | bool user) | | 896 | bool user) |
898 | { | | 897 | { |
899 | vm_prot_t masked; | | 898 | vm_prot_t masked; |
900 | pt_entry_t xn; | | 899 | pt_entry_t xn; |
901 | | | 900 | |
902 | masked = prot & protmask; | | 901 | masked = prot & protmask; |
903 | pte &= ~(LX_BLKPAG_OS_RWMASK|LX_BLKPAG_AF|LX_BLKPAG_AP); | | 902 | pte &= ~(LX_BLKPAG_OS_RWMASK|LX_BLKPAG_AF|LX_BLKPAG_AP); |
904 | | | 903 | |
905 | /* keep prot for ref/mod emulation */ | | 904 | /* keep prot for ref/mod emulation */ |
906 | switch (prot & (VM_PROT_READ|VM_PROT_WRITE)) { | | 905 | switch (prot & (VM_PROT_READ|VM_PROT_WRITE)) { |
907 | case 0: | | 906 | case 0: |
908 | default: | | 907 | default: |
909 | break; | | 908 | break; |
910 | case VM_PROT_READ: | | 909 | case VM_PROT_READ: |
911 | pte |= LX_BLKPAG_OS_READ; | | 910 | pte |= LX_BLKPAG_OS_READ; |
912 | break; | | 911 | break; |
913 | case VM_PROT_WRITE: | | 912 | case VM_PROT_WRITE: |
914 | case VM_PROT_READ|VM_PROT_WRITE: | | 913 | case VM_PROT_READ|VM_PROT_WRITE: |
915 | pte |= (LX_BLKPAG_OS_READ|LX_BLKPAG_OS_WRITE); | | 914 | pte |= (LX_BLKPAG_OS_READ|LX_BLKPAG_OS_WRITE); |
916 | break; | | 915 | break; |
917 | } | | 916 | } |
918 | | | 917 | |
919 | switch (masked & (VM_PROT_READ|VM_PROT_WRITE)) { | | 918 | switch (masked & (VM_PROT_READ|VM_PROT_WRITE)) { |
920 | case 0: | | 919 | case 0: |
921 | default: | | 920 | default: |
922 | /* cannot access due to No LX_BLKPAG_AF */ | | 921 | /* cannot access due to No LX_BLKPAG_AF */ |
923 | pte |= LX_BLKPAG_AP_RO; | | 922 | pte |= LX_BLKPAG_AP_RO; |
924 | break; | | 923 | break; |
925 | case VM_PROT_READ: | | 924 | case VM_PROT_READ: |
926 | /* actual permission of pte */ | | 925 | /* actual permission of pte */ |
927 | pte |= LX_BLKPAG_AF; | | 926 | pte |= LX_BLKPAG_AF; |
928 | pte |= LX_BLKPAG_AP_RO; | | 927 | pte |= LX_BLKPAG_AP_RO; |
929 | break; | | 928 | break; |
930 | case VM_PROT_WRITE: | | 929 | case VM_PROT_WRITE: |
931 | case VM_PROT_READ|VM_PROT_WRITE: | | 930 | case VM_PROT_READ|VM_PROT_WRITE: |
932 | /* actual permission of pte */ | | 931 | /* actual permission of pte */ |
933 | pte |= LX_BLKPAG_AF; | | 932 | pte |= LX_BLKPAG_AF; |
934 | pte |= LX_BLKPAG_AP_RW; | | 933 | pte |= LX_BLKPAG_AP_RW; |
935 | break; | | 934 | break; |
936 | } | | 935 | } |
937 | | | 936 | |
938 | /* executable for kernel or user? first set never exec both */ | | 937 | /* executable for kernel or user? first set never exec both */ |
939 | pte |= (LX_BLKPAG_UXN|LX_BLKPAG_PXN); | | 938 | pte |= (LX_BLKPAG_UXN|LX_BLKPAG_PXN); |
940 | /* and either to executable */ | | 939 | /* and either to executable */ |
941 | xn = user ? LX_BLKPAG_UXN : LX_BLKPAG_PXN; | | 940 | xn = user ? LX_BLKPAG_UXN : LX_BLKPAG_PXN; |
942 | if (prot & VM_PROT_EXECUTE) | | 941 | if (prot & VM_PROT_EXECUTE) |
943 | pte &= ~xn; | | 942 | pte &= ~xn; |
944 | | | 943 | |
945 | return pte; | | 944 | return pte; |
946 | } | | 945 | } |
947 | | | 946 | |
948 | static pt_entry_t | | 947 | static pt_entry_t |
949 | _pmap_pte_adjust_cacheflags(pt_entry_t pte, u_int flags) | | 948 | _pmap_pte_adjust_cacheflags(pt_entry_t pte, u_int flags) |
950 | { | | 949 | { |
951 | | | 950 | |
952 | pte &= ~LX_BLKPAG_ATTR_MASK; | | 951 | pte &= ~LX_BLKPAG_ATTR_MASK; |
953 | | | 952 | |
954 | switch (flags & (PMAP_CACHE_MASK|PMAP_DEV_MASK)) { | | 953 | switch (flags & (PMAP_CACHE_MASK|PMAP_DEV_MASK)) { |
955 | case PMAP_DEV_SO ... PMAP_DEV_SO | PMAP_CACHE_MASK: | | 954 | case PMAP_DEV_SO ... PMAP_DEV_SO | PMAP_CACHE_MASK: |
956 | pte |= LX_BLKPAG_ATTR_DEVICE_MEM_SO; /* Device-nGnRnE */ | | 955 | pte |= LX_BLKPAG_ATTR_DEVICE_MEM_SO; /* Device-nGnRnE */ |
957 | break; | | 956 | break; |
958 | case PMAP_DEV ... PMAP_DEV | PMAP_CACHE_MASK: | | 957 | case PMAP_DEV ... PMAP_DEV | PMAP_CACHE_MASK: |
959 | pte |= LX_BLKPAG_ATTR_DEVICE_MEM; /* Device-nGnRE */ | | 958 | pte |= LX_BLKPAG_ATTR_DEVICE_MEM; /* Device-nGnRE */ |
960 | break; | | 959 | break; |
961 | case PMAP_NOCACHE: | | 960 | case PMAP_NOCACHE: |
962 | case PMAP_NOCACHE_OVR: | | 961 | case PMAP_NOCACHE_OVR: |
963 | case PMAP_WRITE_COMBINE: | | 962 | case PMAP_WRITE_COMBINE: |
964 | pte |= LX_BLKPAG_ATTR_NORMAL_NC; /* only no-cache */ | | 963 | pte |= LX_BLKPAG_ATTR_NORMAL_NC; /* only no-cache */ |
965 | break; | | 964 | break; |
966 | case PMAP_WRITE_BACK: | | 965 | case PMAP_WRITE_BACK: |
967 | case 0: | | 966 | case 0: |
968 | default: | | 967 | default: |
969 | pte |= LX_BLKPAG_ATTR_NORMAL_WB; | | 968 | pte |= LX_BLKPAG_ATTR_NORMAL_WB; |
970 | break; | | 969 | break; |
971 | } | | 970 | } |
972 | | | 971 | |
973 | return pte; | | 972 | return pte; |
974 | } | | 973 | } |
975 | | | 974 | |
976 | static struct pv_entry * | | 975 | static struct pv_entry * |
977 | _pmap_remove_pv(struct vm_page *pg, struct pmap *pm, vaddr_t va, pt_entry_t pte) | | 976 | _pmap_remove_pv(struct vm_page *pg, struct pmap *pm, vaddr_t va, pt_entry_t pte) |
978 | { | | 977 | { |
979 | struct vm_page_md *md; | | 978 | struct vm_page_md *md; |
980 | struct pv_entry *pv; | | 979 | struct pv_entry *pv; |
981 | | | 980 | |
982 | UVMHIST_FUNC(__func__); | | 981 | UVMHIST_FUNC(__func__); |
983 | UVMHIST_CALLED(pmaphist); | | 982 | UVMHIST_CALLED(pmaphist); |
984 | | | 983 | |
985 | UVMHIST_LOG(pmaphist, "pg=%p, pm=%p, va=%llx, pte=%llx", | | 984 | UVMHIST_LOG(pmaphist, "pg=%p, pm=%p, va=%llx, pte=%llx", |
986 | pg, pm, va, pte); | | 985 | pg, pm, va, pte); |
987 | | | 986 | |
988 | md = VM_PAGE_TO_MD(pg); | | 987 | md = VM_PAGE_TO_MD(pg); |
989 | | | 988 | |
990 | TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) { | | 989 | TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) { |
991 | if ((pm == pv->pv_pmap) && (va == pv->pv_va)) { | | 990 | if ((pm == pv->pv_pmap) && (va == pv->pv_va)) { |
992 | TAILQ_REMOVE(&md->mdpg_pvhead, pv, pv_link); | | 991 | TAILQ_REMOVE(&md->mdpg_pvhead, pv, pv_link); |
993 | PMAP_COUNT(pv_remove); | | 992 | PMAP_COUNT(pv_remove); |
994 | break; | | 993 | break; |
995 | } | | 994 | } |
996 | } | | 995 | } |
997 | #ifdef PMAPCOUNTERS | | 996 | #ifdef PMAPCOUNTERS |
998 | if (pv == NULL) { | | 997 | if (pv == NULL) { |
999 | PMAP_COUNT(pv_remove_nopv); | | 998 | PMAP_COUNT(pv_remove_nopv); |
1000 | } | | 999 | } |
1001 | #endif | | 1000 | #endif |
1002 | | | 1001 | |
1003 | return pv; | | 1002 | return pv; |
1004 | } | | 1003 | } |
1005 | | | 1004 | |
1006 | #if defined(PMAP_PV_DEBUG) || defined(DDB) | | 1005 | #if defined(PMAP_PV_DEBUG) || defined(DDB) |
1007 | | | 1006 | |
1008 | static char * | | 1007 | static char * |
1009 | str_vmflags(uint32_t flags) | | 1008 | str_vmflags(uint32_t flags) |
1010 | { | | 1009 | { |
1011 | static int idx = 0; | | 1010 | static int idx = 0; |
1012 | static char buf[4][32]; /* XXX */ | | 1011 | static char buf[4][32]; /* XXX */ |
1013 | char *p; | | 1012 | char *p; |
1014 | | | 1013 | |
1015 | p = buf[idx]; | | 1014 | p = buf[idx]; |
1016 | idx = (idx + 1) & 3; | | 1015 | idx = (idx + 1) & 3; |
1017 | | | 1016 | |
1018 | p[0] = (flags & VM_PROT_READ) ? 'R' : '-'; | | 1017 | p[0] = (flags & VM_PROT_READ) ? 'R' : '-'; |
1019 | p[1] = (flags & VM_PROT_WRITE) ? 'W' : '-'; | | 1018 | p[1] = (flags & VM_PROT_WRITE) ? 'W' : '-'; |
1020 | p[2] = (flags & VM_PROT_EXECUTE) ? 'X' : '-'; | | 1019 | p[2] = (flags & VM_PROT_EXECUTE) ? 'X' : '-'; |
1021 | if (flags & PMAP_WIRED) | | 1020 | if (flags & PMAP_WIRED) |
1022 | memcpy(&p[3], ",WIRED\0", 7); | | 1021 | memcpy(&p[3], ",WIRED\0", 7); |
1023 | else | | 1022 | else |
1024 | p[3] = '\0'; | | 1023 | p[3] = '\0'; |
1025 | | | 1024 | |
1026 | return p; | | 1025 | return p; |
1027 | } | | 1026 | } |
1028 | | | 1027 | |
1029 | static void | | 1028 | static void |
1030 | pg_dump(struct vm_page *pg, void (*pr)(const char *, ...) __printflike(1, 2)) | | 1029 | pg_dump(struct vm_page *pg, void (*pr)(const char *, ...) __printflike(1, 2)) |
1031 | { | | 1030 | { |
1032 | pr("pg=%p\n", pg); | | 1031 | pr("pg=%p\n", pg); |
1033 | pr(" pg->uanon = %p\n", pg->uanon); | | 1032 | pr(" pg->uanon = %p\n", pg->uanon); |
1034 | pr(" pg->uobject = %p\n", pg->uobject); | | 1033 | pr(" pg->uobject = %p\n", pg->uobject); |
1035 | pr(" pg->offset = %zu\n", pg->offset); | | 1034 | pr(" pg->offset = %zu\n", pg->offset); |
1036 | pr(" pg->flags = %u\n", pg->flags); | | 1035 | pr(" pg->flags = %u\n", pg->flags); |
1037 | pr(" pg->loan_count = %u\n", pg->loan_count); | | 1036 | pr(" pg->loan_count = %u\n", pg->loan_count); |
1038 | pr(" pg->wire_count = %u\n", pg->wire_count); | | 1037 | pr(" pg->wire_count = %u\n", pg->wire_count); |
1039 | pr(" pg->pqflags = %u\n", pg->pqflags); | | 1038 | pr(" pg->pqflags = %u\n", pg->pqflags); |
1040 | pr(" pg->phys_addr = %016lx\n", VM_PAGE_TO_PHYS(pg)); | | 1039 | pr(" pg->phys_addr = %016lx\n", VM_PAGE_TO_PHYS(pg)); |
1041 | } | | 1040 | } |
1042 | | | 1041 | |
1043 | static void | | 1042 | static void |
1044 | pv_dump(struct vm_page_md *md, void (*pr)(const char *, ...) __printflike(1, 2)) | | 1043 | pv_dump(struct vm_page_md *md, void (*pr)(const char *, ...) __printflike(1, 2)) |
1045 | { | | 1044 | { |
1046 | struct pv_entry *pv; | | 1045 | struct pv_entry *pv; |
1047 | int i; | | 1046 | int i; |
1048 | | | 1047 | |
1049 | i = 0; | | 1048 | i = 0; |
1050 | | | 1049 | |
1051 | pr("md=%p\n", md); | | 1050 | pr("md=%p\n", md); |
1052 | pr(" md->mdpg_flags=%08x %s\n", md->mdpg_flags, | | 1051 | pr(" md->mdpg_flags=%08x %s\n", md->mdpg_flags, |
1053 | str_vmflags(md->mdpg_flags)); | | 1052 | str_vmflags(md->mdpg_flags)); |
1054 | | | 1053 | |
1055 | TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) { | | 1054 | TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) { |
1056 | pr(" pv[%d] pv=%p\n", | | 1055 | pr(" pv[%d] pv=%p\n", |
1057 | i, pv); | | 1056 | i, pv); |
1058 | pr(" pv[%d].pv_pmap = %p (asid=%d)\n", | | 1057 | pr(" pv[%d].pv_pmap = %p (asid=%d)\n", |
1059 | i, pv->pv_pmap, pv->pv_pmap->pm_asid); | | 1058 | i, pv->pv_pmap, pv->pv_pmap->pm_asid); |
1060 | pr(" pv[%d].pv_va = %016lx (color=%d)\n", | | 1059 | pr(" pv[%d].pv_va = %016lx (color=%d)\n", |
1061 | i, pv->pv_va, _pmap_color(pv->pv_va)); | | 1060 | i, pv->pv_va, _pmap_color(pv->pv_va)); |
1062 | pr(" pv[%d].pv_pa = %016lx (color=%d)\n", | | 1061 | pr(" pv[%d].pv_pa = %016lx (color=%d)\n", |
1063 | i, pv->pv_pa, _pmap_color(pv->pv_pa)); | | 1062 | i, pv->pv_pa, _pmap_color(pv->pv_pa)); |
1064 | pr(" pv[%d].pv_ptep = %p\n", | | 1063 | pr(" pv[%d].pv_ptep = %p\n", |
1065 | i, pv->pv_ptep); | | 1064 | i, pv->pv_ptep); |
1066 | i++; | | 1065 | i++; |
1067 | } | | 1066 | } |
1068 | } | | 1067 | } |
1069 | #endif /* PMAP_PV_DEBUG & DDB */ | | 1068 | #endif /* PMAP_PV_DEBUG & DDB */ |
1070 | | | 1069 | |
1071 | static int | | 1070 | static int |
1072 | _pmap_enter_pv(struct vm_page *pg, struct pmap *pm, struct pv_entry **pvp, | | 1071 | _pmap_enter_pv(struct vm_page *pg, struct pmap *pm, struct pv_entry **pvp, |
1073 | vaddr_t va, pt_entry_t *ptep, paddr_t pa, u_int flags) | | 1072 | vaddr_t va, pt_entry_t *ptep, paddr_t pa, u_int flags) |
1074 | { | | 1073 | { |
1075 | struct vm_page_md *md; | | 1074 | struct vm_page_md *md; |
1076 | struct pv_entry *pv; | | 1075 | struct pv_entry *pv; |
1077 | | | 1076 | |
1078 | UVMHIST_FUNC(__func__); | | 1077 | UVMHIST_FUNC(__func__); |
1079 | UVMHIST_CALLED(pmaphist); | | 1078 | UVMHIST_CALLED(pmaphist); |
1080 | | | 1079 | |
1081 | UVMHIST_LOG(pmaphist, "pg=%p, pm=%p, va=%llx, pa=%llx", pg, pm, va, pa); | | 1080 | UVMHIST_LOG(pmaphist, "pg=%p, pm=%p, va=%llx, pa=%llx", pg, pm, va, pa); |
1082 | UVMHIST_LOG(pmaphist, "ptep=%p, flags=%08x", ptep, flags, 0, 0); | | 1081 | UVMHIST_LOG(pmaphist, "ptep=%p, flags=%08x", ptep, flags, 0, 0); |
1083 | | | 1082 | |
1084 | md = VM_PAGE_TO_MD(pg); | | 1083 | md = VM_PAGE_TO_MD(pg); |
1085 | | | 1084 | |
1086 | /* pv is already registered? */ | | 1085 | /* pv is already registered? */ |
1087 | TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) { | | 1086 | TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) { |
1088 | if ((pm == pv->pv_pmap) && (va == pv->pv_va)) { | | 1087 | if ((pm == pv->pv_pmap) && (va == pv->pv_va)) { |
1089 | break; | | 1088 | break; |
1090 | } | | 1089 | } |
1091 | } | | 1090 | } |
1092 | | | 1091 | |
1093 | if (pv == NULL) { | | 1092 | if (pv == NULL) { |
1094 | /* | | 1093 | /* |
1095 | * create and link new pv. | | 1094 | * create and link new pv. |
1096 | * pv is already allocated at beginning of _pmap_enter(). | | 1095 | * pv is already allocated at beginning of _pmap_enter(). |
1097 | */ | | 1096 | */ |
1098 | pv = *pvp; | | 1097 | pv = *pvp; |
1099 | if (pv == NULL) | | 1098 | if (pv == NULL) |
1100 | return ENOMEM; | | 1099 | return ENOMEM; |
1101 | *pvp = NULL; | | 1100 | *pvp = NULL; |
1102 | | | 1101 | |
1103 | pv->pv_pmap = pm; | | 1102 | pv->pv_pmap = pm; |
1104 | pv->pv_va = va; | | 1103 | pv->pv_va = va; |
1105 | pv->pv_pa = pa; | | 1104 | pv->pv_pa = pa; |
1106 | pv->pv_ptep = ptep; | | 1105 | pv->pv_ptep = ptep; |
1107 | | | 1106 | |
1108 | TAILQ_INSERT_HEAD(&md->mdpg_pvhead, pv, pv_link); | | 1107 | TAILQ_INSERT_HEAD(&md->mdpg_pvhead, pv, pv_link); |
1109 | PMAP_COUNT(pv_enter); | | 1108 | PMAP_COUNT(pv_enter); |
1110 | | | 1109 | |
1111 | #ifdef PMAP_PV_DEBUG | | 1110 | #ifdef PMAP_PV_DEBUG |
1112 | if (!TAILQ_EMPTY(&md->mdpg_pvhead)){ | | 1111 | if (!TAILQ_EMPTY(&md->mdpg_pvhead)){ |
1113 | printf("pv %p alias added va=%016lx -> pa=%016lx\n", | | 1112 | printf("pv %p alias added va=%016lx -> pa=%016lx\n", |
1114 | pv, va, pa); | | 1113 | pv, va, pa); |
1115 | pv_dump(md, printf); | | 1114 | pv_dump(md, printf); |
1116 | } | | 1115 | } |
1117 | #endif | | 1116 | #endif |
1118 | } | | 1117 | } |
1119 | | | 1118 | |
1120 | return 0; | | 1119 | return 0; |
1121 | } | | 1120 | } |
1122 | | | 1121 | |
1123 | void | | 1122 | void |
1124 | pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) | | 1123 | pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) |
1125 | { | | 1124 | { |
1126 | int s; | | 1125 | int s; |
1127 | | | 1126 | |
1128 | s = splvm(); | | 1127 | s = splvm(); |
1129 | _pmap_enter(pmap_kernel(), va, pa, prot, flags | PMAP_WIRED, true); | | 1128 | _pmap_enter(pmap_kernel(), va, pa, prot, flags | PMAP_WIRED, true); |
1130 | splx(s); | | 1129 | splx(s); |
1131 | } | | 1130 | } |
1132 | | | 1131 | |
1133 | void | | 1132 | void |
1134 | pmap_kremove(vaddr_t va, vsize_t size) | | 1133 | pmap_kremove(vaddr_t va, vsize_t size) |
1135 | { | | 1134 | { |
1136 | struct pmap *kpm = pmap_kernel(); | | 1135 | struct pmap *kpm = pmap_kernel(); |
1137 | int s; | | 1136 | int s; |
1138 | | | 1137 | |
1139 | UVMHIST_FUNC(__func__); | | 1138 | UVMHIST_FUNC(__func__); |
1140 | UVMHIST_CALLED(pmaphist); | | 1139 | UVMHIST_CALLED(pmaphist); |
1141 | | | 1140 | |
1142 | UVMHIST_LOG(pmaphist, "va=%llx, size=%llx", va, size, 0, 0); | | 1141 | UVMHIST_LOG(pmaphist, "va=%llx, size=%llx", va, size, 0, 0); |
1143 | | | 1142 | |
1144 | KDASSERT((va & PGOFSET) == 0); | | 1143 | KDASSERT((va & PGOFSET) == 0); |
1145 | KDASSERT((size & PGOFSET) == 0); | | 1144 | KDASSERT((size & PGOFSET) == 0); |
1146 | | | 1145 | |
1147 | KDASSERT(!IN_KSEG_ADDR(va)); | | 1146 | KDASSERT(!IN_KSEG_ADDR(va)); |
1148 | KDASSERT(IN_RANGE(va, VM_MIN_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS)); | | 1147 | KDASSERT(IN_RANGE(va, VM_MIN_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS)); |
1149 | | | 1148 | |
1150 | s = splvm(); | | 1149 | s = splvm(); |
1151 | pm_lock(kpm); | | 1150 | pm_lock(kpm); |
1152 | _pmap_remove(kpm, va, va + size, true, NULL); | | 1151 | _pmap_remove(kpm, va, va + size, true, NULL); |
1153 | pm_unlock(kpm); | | 1152 | pm_unlock(kpm); |
1154 | splx(s); | | 1153 | splx(s); |
1155 | } | | 1154 | } |
1156 | | | 1155 | |
1157 | static void | | 1156 | static void |
1158 | _pmap_protect_pv(struct vm_page *pg, struct pv_entry *pv, vm_prot_t prot) | | 1157 | _pmap_protect_pv(struct vm_page *pg, struct pv_entry *pv, vm_prot_t prot) |
1159 | { | | 1158 | { |
1160 | pt_entry_t *ptep, pte; | | 1159 | pt_entry_t *ptep, pte; |
1161 | vm_prot_t pteprot; | | 1160 | vm_prot_t pteprot; |
1162 | uint32_t mdattr; | | 1161 | uint32_t mdattr; |
1163 | const bool user = (pv->pv_pmap != pmap_kernel()); | | 1162 | const bool user = (pv->pv_pmap != pmap_kernel()); |
1164 | | | 1163 | |
1165 | UVMHIST_FUNC(__func__); | | 1164 | UVMHIST_FUNC(__func__); |
1166 | UVMHIST_CALLED(pmaphist); | | 1165 | UVMHIST_CALLED(pmaphist); |
1167 | | | 1166 | |
1168 | UVMHIST_LOG(pmaphist, "pg=%p, pv=%p, prot=%08x", pg, pv, prot, 0); | | 1167 | UVMHIST_LOG(pmaphist, "pg=%p, pv=%p, prot=%08x", pg, pv, prot, 0); |
1169 | | | 1168 | |
1170 | /* get prot mask from referenced/modified */ | | 1169 | /* get prot mask from referenced/modified */ |
1171 | mdattr = VM_PAGE_TO_MD(pg)->mdpg_flags & | | 1170 | mdattr = VM_PAGE_TO_MD(pg)->mdpg_flags & |
1172 | (VM_PROT_READ | VM_PROT_WRITE); | | 1171 | (VM_PROT_READ | VM_PROT_WRITE); |
1173 | | | 1172 | |
1174 | pm_lock(pv->pv_pmap); | | 1173 | pm_lock(pv->pv_pmap); |
1175 | | | 1174 | |
1176 | ptep = pv->pv_ptep; | | 1175 | ptep = pv->pv_ptep; |
1177 | pte = *ptep; | | 1176 | pte = *ptep; |
1178 | | | 1177 | |
1179 | /* get prot mask from pte */ | | 1178 | /* get prot mask from pte */ |
1180 | pteprot = 0; | | 1179 | pteprot = 0; |
1181 | if (pte & LX_BLKPAG_AF) | | 1180 | if (pte & LX_BLKPAG_AF) |
1182 | pteprot |= VM_PROT_READ; | | 1181 | pteprot |= VM_PROT_READ; |
1183 | if ((pte & LX_BLKPAG_AP) == LX_BLKPAG_AP_RW) | | 1182 | if ((pte & LX_BLKPAG_AP) == LX_BLKPAG_AP_RW) |
1184 | pteprot |= VM_PROT_WRITE; | | 1183 | pteprot |= VM_PROT_WRITE; |
1185 | if (l3pte_executable(pte, user)) | | 1184 | if (l3pte_executable(pte, user)) |
1186 | pteprot |= VM_PROT_EXECUTE; | | 1185 | pteprot |= VM_PROT_EXECUTE; |
1187 | | | 1186 | |
1188 | /* new prot = prot & pteprot & mdattr */ | | 1187 | /* new prot = prot & pteprot & mdattr */ |
1189 | pte = _pmap_pte_adjust_prot(pte, prot & pteprot, mdattr, user); | | 1188 | pte = _pmap_pte_adjust_prot(pte, prot & pteprot, mdattr, user); |
1190 | atomic_swap_64(ptep, pte); | | 1189 | atomic_swap_64(ptep, pte); |
1191 | AARCH64_TLBI_BY_ASID_VA(pv->pv_pmap->pm_asid, pv->pv_va, true); | | 1190 | AARCH64_TLBI_BY_ASID_VA(pv->pv_pmap->pm_asid, pv->pv_va, true); |
1192 | | | 1191 | |
1193 | pm_unlock(pv->pv_pmap); | | 1192 | pm_unlock(pv->pv_pmap); |
1194 | } | | 1193 | } |
1195 | | | 1194 | |
1196 | void | | 1195 | void |
1197 | pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot) | | 1196 | pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot) |
1198 | { | | 1197 | { |
1199 | pt_entry_t *ptep = NULL, pte; | | 1198 | pt_entry_t *ptep = NULL, pte; |
1200 | vaddr_t va; | | 1199 | vaddr_t va; |
1201 | vsize_t blocksize = 0; | | 1200 | vsize_t blocksize = 0; |
1202 | const bool user = (pm != pmap_kernel()); | | 1201 | const bool user = (pm != pmap_kernel()); |
1203 | | | 1202 | |
1204 | KASSERT((prot & VM_PROT_READ) || !(prot & VM_PROT_WRITE)); | | 1203 | KASSERT((prot & VM_PROT_READ) || !(prot & VM_PROT_WRITE)); |
1205 | | | 1204 | |
1206 | UVMHIST_FUNC(__func__); | | 1205 | UVMHIST_FUNC(__func__); |
1207 | UVMHIST_CALLED(pmaphist); | | 1206 | UVMHIST_CALLED(pmaphist); |
1208 | | | 1207 | |
1209 | UVMHIST_LOG(pmaphist, "pm=%p, sva=%016lx, eva=%016lx, prot=%08x", | | 1208 | UVMHIST_LOG(pmaphist, "pm=%p, sva=%016lx, eva=%016lx, prot=%08x", |
1210 | pm, sva, eva, prot); | | 1209 | pm, sva, eva, prot); |
1211 | | | 1210 | |
1212 | KASSERT_PM_ADDR(pm, sva); | | 1211 | KASSERT_PM_ADDR(pm, sva); |
1213 | KASSERT(!IN_KSEG_ADDR(sva)); | | 1212 | KASSERT(!IN_KSEG_ADDR(sva)); |
1214 | | | 1213 | |
1215 | if ((prot & VM_PROT_READ) == VM_PROT_NONE) { | | 1214 | if ((prot & VM_PROT_READ) == VM_PROT_NONE) { |
1216 | PMAP_COUNT(protect_remove_fallback); | | 1215 | PMAP_COUNT(protect_remove_fallback); |
1217 | pmap_remove(pm, sva, eva); | | 1216 | pmap_remove(pm, sva, eva); |
1218 | return; | | 1217 | return; |
1219 | } | | 1218 | } |
1220 | PMAP_COUNT(protect); | | 1219 | PMAP_COUNT(protect); |
1221 | | | 1220 | |
1222 | KDASSERT((sva & PAGE_MASK) == 0); | | 1221 | KDASSERT((sva & PAGE_MASK) == 0); |
1223 | KDASSERT((eva & PAGE_MASK) == 0); | | 1222 | KDASSERT((eva & PAGE_MASK) == 0); |
1224 | | | 1223 | |
1225 | pm_lock(pm); | | 1224 | pm_lock(pm); |
1226 | | | 1225 | |
1227 | for (va = sva; va < eva; va = (va + blocksize) & ~(blocksize - 1)) { | | 1226 | for (va = sva; va < eva; va = (va + blocksize) & ~(blocksize - 1)) { |
1228 | #ifdef UVMHIST | | 1227 | #ifdef UVMHIST |
1229 | pt_entry_t opte; | | 1228 | pt_entry_t opte; |
1230 | #endif | | 1229 | #endif |
1231 | struct vm_page *pg; | | 1230 | struct vm_page *pg; |
1232 | paddr_t pa; | | 1231 | paddr_t pa; |
1233 | uint32_t mdattr; | | 1232 | uint32_t mdattr; |
1234 | bool executable; | | 1233 | bool executable; |
1235 | | | 1234 | |
1236 | /* va is belong to the same L3 table as before? */ | | 1235 | /* va is belong to the same L3 table as before? */ |
1237 | if ((blocksize == L3_SIZE) && ((va & L3INDEXMASK) != 0)) | | 1236 | if ((blocksize == L3_SIZE) && ((va & L3INDEXMASK) != 0)) |
1238 | ptep++; | | 1237 | ptep++; |
1239 | else | | 1238 | else |
1240 | ptep = _pmap_pte_lookup_bs(pm, va, &blocksize); | | 1239 | ptep = _pmap_pte_lookup_bs(pm, va, &blocksize); |
1241 | | | 1240 | |
1242 | pte = *ptep; | | 1241 | pte = *ptep; |
1243 | if (!lxpde_valid(pte)) { | | 1242 | if (!lxpde_valid(pte)) { |
1244 | PMAP_COUNT(protect_none); | | 1243 | PMAP_COUNT(protect_none); |
1245 | continue; | | 1244 | continue; |
1246 | } | | 1245 | } |
1247 | | | 1246 | |
1248 | pa = lxpde_pa(pte); | | 1247 | pa = lxpde_pa(pte); |
1249 | pg = PHYS_TO_VM_PAGE(pa); | | 1248 | pg = PHYS_TO_VM_PAGE(pa); |
1250 | | | 1249 | |
1251 | if (pg != NULL) { | | 1250 | if (pg != NULL) { |
1252 | /* get prot mask from referenced/modified */ | | 1251 | /* get prot mask from referenced/modified */ |
1253 | mdattr = VM_PAGE_TO_MD(pg)->mdpg_flags & | | 1252 | mdattr = VM_PAGE_TO_MD(pg)->mdpg_flags & |
1254 | (VM_PROT_READ | VM_PROT_WRITE); | | 1253 | (VM_PROT_READ | VM_PROT_WRITE); |
1255 | PMAP_COUNT(protect_managed); | | 1254 | PMAP_COUNT(protect_managed); |
1256 | } else { | | 1255 | } else { |
1257 | /* unmanaged page */ | | 1256 | /* unmanaged page */ |
1258 | mdattr = VM_PROT_ALL; | | 1257 | mdattr = VM_PROT_ALL; |
1259 | PMAP_COUNT(protect_unmanaged); | | 1258 | PMAP_COUNT(protect_unmanaged); |
1260 | } | | 1259 | } |
1261 | | | 1260 | |
1262 | #ifdef UVMHIST | | 1261 | #ifdef UVMHIST |
1263 | opte = pte; | | 1262 | opte = pte; |
1264 | #endif | | 1263 | #endif |
1265 | executable = l3pte_executable(pte, user); | | 1264 | executable = l3pte_executable(pte, user); |
1266 | pte = _pmap_pte_adjust_prot(pte, prot, mdattr, user); | | 1265 | pte = _pmap_pte_adjust_prot(pte, prot, mdattr, user); |
1267 | | | 1266 | |
1268 | if (!executable && (prot & VM_PROT_EXECUTE)) { | | 1267 | if (!executable && (prot & VM_PROT_EXECUTE)) { |
1269 | /* non-exec -> exec */ | | 1268 | /* non-exec -> exec */ |
1270 | UVMHIST_LOG(pmaphist, "icache_sync: " | | 1269 | UVMHIST_LOG(pmaphist, "icache_sync: " |
1271 | "pm=%p, va=%016lx, pte: %016lx -> %016lx", | | 1270 | "pm=%p, va=%016lx, pte: %016lx -> %016lx", |
1272 | pm, va, opte, pte); | | 1271 | pm, va, opte, pte); |
1273 | if (!l3pte_writable(pte)) { | | 1272 | if (!l3pte_writable(pte)) { |
1274 | PTE_ICACHE_SYNC_PAGE(pte, ptep, pm, va, true); | | 1273 | PTE_ICACHE_SYNC_PAGE(pte, ptep, pm, va, true); |
1275 | atomic_swap_64(ptep, pte); | | 1274 | atomic_swap_64(ptep, pte); |
1276 | AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true); | | 1275 | AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true); |
1277 | } else { | | 1276 | } else { |
1278 | atomic_swap_64(ptep, pte); | | 1277 | atomic_swap_64(ptep, pte); |
1279 | AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true); | | 1278 | AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true); |
1280 | cpu_icache_sync_range(va, PAGE_SIZE); | | 1279 | cpu_icache_sync_range(va, PAGE_SIZE); |
1281 | } | | 1280 | } |
1282 | } else { | | 1281 | } else { |
1283 | atomic_swap_64(ptep, pte); | | 1282 | atomic_swap_64(ptep, pte); |
1284 | AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true); | | 1283 | AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true); |
1285 | } | | 1284 | } |
1286 | } | | 1285 | } |
1287 | | | 1286 | |
1288 | pm_unlock(pm); | | 1287 | pm_unlock(pm); |
1289 | } | | 1288 | } |
1290 | | | 1289 | |
1291 | void | | 1290 | void |
1292 | pmap_activate(struct lwp *l) | | 1291 | pmap_activate(struct lwp *l) |
1293 | { | | 1292 | { |
1294 | struct pmap *pm = l->l_proc->p_vmspace->vm_map.pmap; | | 1293 | struct pmap *pm = l->l_proc->p_vmspace->vm_map.pmap; |
1295 | uint64_t ttbr0, tcr; | | 1294 | uint64_t ttbr0, tcr; |
1296 | | | 1295 | |
1297 | UVMHIST_FUNC(__func__); | | 1296 | UVMHIST_FUNC(__func__); |
1298 | UVMHIST_CALLED(pmaphist); | | 1297 | UVMHIST_CALLED(pmaphist); |
1299 | | | 1298 | |
1300 | if (pm == pmap_kernel()) | | 1299 | if (pm == pmap_kernel()) |
1301 | return; | | 1300 | return; |
1302 | if (l != curlwp) | | 1301 | if (l != curlwp) |
1303 | return; | | 1302 | return; |
1304 | | | 1303 | |
1305 | KASSERT(pm->pm_l0table != NULL); | | 1304 | KASSERT(pm->pm_l0table != NULL); |
1306 | | | 1305 | |
1307 | UVMHIST_LOG(pmaphist, "lwp=%p (pid=%d)", l, l->l_proc->p_pid, 0, 0); | | 1306 | UVMHIST_LOG(pmaphist, "lwp=%p (pid=%d)", l, l->l_proc->p_pid, 0, 0); |
1308 | | | 1307 | |
1309 | /* Disable translation table walks using TTBR0 */ | | 1308 | /* Disable translation table walks using TTBR0 */ |
1310 | tcr = reg_tcr_el1_read(); | | 1309 | tcr = reg_tcr_el1_read(); |
1311 | reg_tcr_el1_write(tcr | TCR_EPD0); | | 1310 | reg_tcr_el1_write(tcr | TCR_EPD0); |
1312 | __asm __volatile("isb" ::: "memory"); | | 1311 | __asm __volatile("isb" ::: "memory"); |
1313 | | | 1312 | |
1314 | /* XXX */ | | 1313 | /* XXX */ |
1315 | CTASSERT(PID_MAX <= 65535); /* 16bit ASID */ | | 1314 | CTASSERT(PID_MAX <= 65535); /* 16bit ASID */ |
1316 | if (pm->pm_asid == -1) | | 1315 | if (pm->pm_asid == -1) |
1317 | pm->pm_asid = l->l_proc->p_pid; | | 1316 | pm->pm_asid = l->l_proc->p_pid; |
1318 | | | 1317 | |
1319 | ttbr0 = ((uint64_t)pm->pm_asid << 48) | pm->pm_l0table_pa; | | 1318 | ttbr0 = ((uint64_t)pm->pm_asid << 48) | pm->pm_l0table_pa; |
1320 | cpu_set_ttbr0(ttbr0); | | 1319 | cpu_set_ttbr0(ttbr0); |
1321 | | | 1320 | |
1322 | /* Re-enable translation table walks using TTBR0 */ | | 1321 | /* Re-enable translation table walks using TTBR0 */ |
1323 | tcr = reg_tcr_el1_read(); | | 1322 | tcr = reg_tcr_el1_read(); |
1324 | reg_tcr_el1_write(tcr & ~TCR_EPD0); | | 1323 | reg_tcr_el1_write(tcr & ~TCR_EPD0); |
1325 | __asm __volatile("isb" ::: "memory"); | | 1324 | __asm __volatile("isb" ::: "memory"); |
1326 | | | 1325 | |
1327 | pm->pm_activated = true; | | 1326 | pm->pm_activated = true; |
1328 | | | 1327 | |
1329 | PMAP_COUNT(activate); | | 1328 | PMAP_COUNT(activate); |
1330 | } | | 1329 | } |
1331 | | | 1330 | |
1332 | void | | 1331 | void |
1333 | pmap_deactivate(struct lwp *l) | | 1332 | pmap_deactivate(struct lwp *l) |
1334 | { | | 1333 | { |
1335 | struct pmap *pm = l->l_proc->p_vmspace->vm_map.pmap; | | 1334 | struct pmap *pm = l->l_proc->p_vmspace->vm_map.pmap; |
1336 | uint64_t tcr; | | 1335 | uint64_t tcr; |
1337 | | | 1336 | |
1338 | UVMHIST_FUNC(__func__); | | 1337 | UVMHIST_FUNC(__func__); |
1339 | UVMHIST_CALLED(pmaphist); | | 1338 | UVMHIST_CALLED(pmaphist); |
1340 | | | 1339 | |
1341 | if (pm == pmap_kernel()) | | 1340 | if (pm == pmap_kernel()) |
1342 | return; | | 1341 | return; |
1343 | | | 1342 | |
1344 | UVMHIST_LOG(pmaphist, "lwp=%p, asid=%d", l, pm->pm_asid, 0, 0); | | 1343 | UVMHIST_LOG(pmaphist, "lwp=%p, asid=%d", l, pm->pm_asid, 0, 0); |
1345 | | | 1344 | |
1346 | /* Disable translation table walks using TTBR0 */ | | 1345 | /* Disable translation table walks using TTBR0 */ |
1347 | tcr = reg_tcr_el1_read(); | | 1346 | tcr = reg_tcr_el1_read(); |
1348 | reg_tcr_el1_write(tcr | TCR_EPD0); | | 1347 | reg_tcr_el1_write(tcr | TCR_EPD0); |
1349 | __asm __volatile("isb" ::: "memory"); | | 1348 | __asm __volatile("isb" ::: "memory"); |
1350 | | | 1349 | |
1351 | /* XXX */ | | 1350 | /* XXX */ |
1352 | pm->pm_activated = false; | | 1351 | pm->pm_activated = false; |
1353 | | | 1352 | |
1354 | PMAP_COUNT(deactivate); | | 1353 | PMAP_COUNT(deactivate); |
1355 | } | | 1354 | } |
1356 | | | 1355 | |
1357 | struct pmap * | | 1356 | struct pmap * |
1358 | pmap_create(void) | | 1357 | pmap_create(void) |
1359 | { | | 1358 | { |
1360 | struct pmap *pm; | | 1359 | struct pmap *pm; |
1361 | | | 1360 | |
1362 | UVMHIST_FUNC(__func__); | | 1361 | UVMHIST_FUNC(__func__); |
1363 | UVMHIST_CALLED(pmaphist); | | 1362 | UVMHIST_CALLED(pmaphist); |
1364 | | | 1363 | |
1365 | pm = pool_cache_get(&_pmap_cache, PR_WAITOK); | | 1364 | pm = pool_cache_get(&_pmap_cache, PR_WAITOK); |
1366 | memset(pm, 0, sizeof(*pm)); | | 1365 | memset(pm, 0, sizeof(*pm)); |
1367 | pm->pm_refcnt = 1; | | 1366 | pm->pm_refcnt = 1; |
1368 | pm->pm_idlepdp = 0; | | 1367 | pm->pm_idlepdp = 0; |
1369 | pm->pm_asid = -1; | | 1368 | pm->pm_asid = -1; |
1370 | TAILQ_INIT(&pm->pm_vmlist); | | 1369 | TAILQ_INIT(&pm->pm_vmlist); |
1371 | mutex_init(&pm->pm_lock, MUTEX_DEFAULT, IPL_VM); | | 1370 | mutex_init(&pm->pm_lock, MUTEX_DEFAULT, IPL_VM); |
1372 | | | 1371 | |
1373 | pm->pm_l0table_pa = pmap_alloc_pdp(pm, NULL, 0, true); | | 1372 | pm->pm_l0table_pa = pmap_alloc_pdp(pm, NULL, 0, true); |
1374 | KASSERT(pm->pm_l0table_pa != POOL_PADDR_INVALID); | | 1373 | KASSERT(pm->pm_l0table_pa != POOL_PADDR_INVALID); |
1375 | pm->pm_l0table = (pd_entry_t *)AARCH64_PA_TO_KVA(pm->pm_l0table_pa); | | 1374 | pm->pm_l0table = (pd_entry_t *)AARCH64_PA_TO_KVA(pm->pm_l0table_pa); |
1376 | KASSERT(((vaddr_t)pm->pm_l0table & (PAGE_SIZE - 1)) == 0); | | 1375 | KASSERT(((vaddr_t)pm->pm_l0table & (PAGE_SIZE - 1)) == 0); |
1377 | | | 1376 | |
1378 | UVMHIST_LOG(pmaphist, "pm=%p, pm_l0table=%016lx, pm_l0table_pa=%016lx", | | 1377 | UVMHIST_LOG(pmaphist, "pm=%p, pm_l0table=%016lx, pm_l0table_pa=%016lx", |
1379 | pm, pm->pm_l0table, pm->pm_l0table_pa, 0); | | 1378 | pm, pm->pm_l0table, pm->pm_l0table_pa, 0); |
1380 | | | 1379 | |
1381 | PMAP_COUNT(create); | | 1380 | PMAP_COUNT(create); |
1382 | return pm; | | 1381 | return pm; |
1383 | } | | 1382 | } |
1384 | | | 1383 | |
1385 | void | | 1384 | void |
1386 | pmap_destroy(struct pmap *pm) | | 1385 | pmap_destroy(struct pmap *pm) |
1387 | { | | 1386 | { |
1388 | unsigned int refcnt; | | 1387 | unsigned int refcnt; |
1389 | | | 1388 | |
1390 | UVMHIST_FUNC(__func__); | | 1389 | UVMHIST_FUNC(__func__); |
1391 | UVMHIST_CALLED(pmaphist); | | 1390 | UVMHIST_CALLED(pmaphist); |
1392 | | | 1391 | |
1393 | UVMHIST_LOG(pmaphist, | | 1392 | UVMHIST_LOG(pmaphist, |
1394 | "pm=%p, pm_l0table=%016lx, pm_l0table_pa=%016lx, refcnt=%d", | | 1393 | "pm=%p, pm_l0table=%016lx, pm_l0table_pa=%016lx, refcnt=%d", |
1395 | pm, pm->pm_l0table, pm->pm_l0table_pa, pm->pm_refcnt); | | 1394 | pm, pm->pm_l0table, pm->pm_l0table_pa, pm->pm_refcnt); |
1396 | | | 1395 | |
1397 | if (pm == NULL) | | 1396 | if (pm == NULL) |
1398 | return; | | 1397 | return; |
1399 | | | 1398 | |
1400 | if (pm == pmap_kernel()) | | 1399 | if (pm == pmap_kernel()) |
1401 | panic("cannot destroy kernel pmap"); | | 1400 | panic("cannot destroy kernel pmap"); |
1402 | | | 1401 | |
1403 | refcnt = atomic_dec_uint_nv(&pm->pm_refcnt); | | 1402 | refcnt = atomic_dec_uint_nv(&pm->pm_refcnt); |
1404 | if (refcnt > 0) | | 1403 | if (refcnt > 0) |
1405 | return; | | 1404 | return; |
1406 | | | 1405 | |
1407 | aarch64_tlbi_by_asid(pm->pm_asid); | | 1406 | aarch64_tlbi_by_asid(pm->pm_asid); |
1408 | | | 1407 | |
1409 | _pmap_free_pdp_all(pm); | | 1408 | _pmap_free_pdp_all(pm); |
1410 | mutex_destroy(&pm->pm_lock); | | 1409 | mutex_destroy(&pm->pm_lock); |
1411 | | | 1410 | |
1412 | pool_cache_put(&_pmap_cache, pm); | | 1411 | pool_cache_put(&_pmap_cache, pm); |
1413 | | | 1412 | |
1414 | PMAP_COUNT(destroy); | | 1413 | PMAP_COUNT(destroy); |
1415 | } | | 1414 | } |
1416 | | | 1415 | |
1417 | static inline void | | 1416 | static inline void |
1418 | _pmap_pdp_setparent(struct pmap *pm, struct vm_page *pg, pt_entry_t *ptep) | | 1417 | _pmap_pdp_setparent(struct pmap *pm, struct vm_page *pg, pt_entry_t *ptep) |
1419 | { | | 1418 | { |
1420 | if ((pm != pmap_kernel()) && (pg != NULL)) | | 1419 | if ((pm != pmap_kernel()) && (pg != NULL)) |
1421 | VM_PAGE_TO_MD(pg)->mdpg_ptep_parent = ptep; | | 1420 | VM_PAGE_TO_MD(pg)->mdpg_ptep_parent = ptep; |
1422 | } | | 1421 | } |
1423 | | | 1422 | |
1424 | /* | | 1423 | /* |
1425 | * increment reference counter of the page descriptor page. | | 1424 | * increment reference counter of the page descriptor page. |