| @@ -1,1194 +1,1194 @@ | | | @@ -1,1194 +1,1194 @@ |
1 | /* $NetBSD: pmap.c,v 1.411 2020/04/19 21:24:36 ad Exp $ */ | | 1 | /* $NetBSD: pmap.c,v 1.412 2020/04/21 06:45:16 skrll Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright 2003 Wasabi Systems, Inc. | | 4 | * Copyright 2003 Wasabi Systems, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * Written by Steve C. Woodford for Wasabi Systems, Inc. | | 7 | * Written by Steve C. Woodford for Wasabi Systems, Inc. |
8 | * | | 8 | * |
9 | * Redistribution and use in source and binary forms, with or without | | 9 | * Redistribution and use in source and binary forms, with or without |
10 | * modification, are permitted provided that the following conditions | | 10 | * modification, are permitted provided that the following conditions |
11 | * are met: | | 11 | * are met: |
12 | * 1. Redistributions of source code must retain the above copyright | | 12 | * 1. Redistributions of source code must retain the above copyright |
13 | * notice, this list of conditions and the following disclaimer. | | 13 | * notice, this list of conditions and the following disclaimer. |
14 | * 2. Redistributions in binary form must reproduce the above copyright | | 14 | * 2. Redistributions in binary form must reproduce the above copyright |
15 | * notice, this list of conditions and the following disclaimer in the | | 15 | * notice, this list of conditions and the following disclaimer in the |
16 | * documentation and/or other materials provided with the distribution. | | 16 | * documentation and/or other materials provided with the distribution. |
17 | * 3. All advertising materials mentioning features or use of this software | | 17 | * 3. All advertising materials mentioning features or use of this software |
18 | * must display the following acknowledgement: | | 18 | * must display the following acknowledgement: |
19 | * This product includes software developed for the NetBSD Project by | | 19 | * This product includes software developed for the NetBSD Project by |
20 | * Wasabi Systems, Inc. | | 20 | * Wasabi Systems, Inc. |
21 | * 4. The name of Wasabi Systems, Inc. may not be used to endorse | | 21 | * 4. The name of Wasabi Systems, Inc. may not be used to endorse |
22 | * or promote products derived from this software without specific prior | | 22 | * or promote products derived from this software without specific prior |
23 | * written permission. | | 23 | * written permission. |
24 | * | | 24 | * |
25 | * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND | | 25 | * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND |
26 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 26 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
27 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 27 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
28 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC | | 28 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC |
29 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 29 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
30 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 30 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
31 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 31 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
32 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 32 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
33 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 33 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
34 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 34 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
35 | * POSSIBILITY OF SUCH DAMAGE. | | 35 | * POSSIBILITY OF SUCH DAMAGE. |
36 | */ | | 36 | */ |
37 | | | 37 | |
38 | /* | | 38 | /* |
39 | * Copyright (c) 2002-2003 Wasabi Systems, Inc. | | 39 | * Copyright (c) 2002-2003 Wasabi Systems, Inc. |
40 | * Copyright (c) 2001 Richard Earnshaw | | 40 | * Copyright (c) 2001 Richard Earnshaw |
41 | * Copyright (c) 2001-2002 Christopher Gilbert | | 41 | * Copyright (c) 2001-2002 Christopher Gilbert |
42 | * All rights reserved. | | 42 | * All rights reserved. |
43 | * | | 43 | * |
44 | * 1. Redistributions of source code must retain the above copyright | | 44 | * 1. Redistributions of source code must retain the above copyright |
45 | * notice, this list of conditions and the following disclaimer. | | 45 | * notice, this list of conditions and the following disclaimer. |
46 | * 2. Redistributions in binary form must reproduce the above copyright | | 46 | * 2. Redistributions in binary form must reproduce the above copyright |
47 | * notice, this list of conditions and the following disclaimer in the | | 47 | * notice, this list of conditions and the following disclaimer in the |
48 | * documentation and/or other materials provided with the distribution. | | 48 | * documentation and/or other materials provided with the distribution. |
49 | * 3. The name of the company nor the name of the author may be used to | | 49 | * 3. The name of the company nor the name of the author may be used to |
50 | * endorse or promote products derived from this software without specific | | 50 | * endorse or promote products derived from this software without specific |
51 | * prior written permission. | | 51 | * prior written permission. |
52 | * | | 52 | * |
53 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED | | 53 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
54 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | | 54 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF |
55 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | | 55 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
56 | * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, | | 56 | * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, |
57 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | | 57 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
58 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | | 58 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
59 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 59 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
60 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | | 60 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
61 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 61 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
62 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 62 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
63 | * SUCH DAMAGE. | | 63 | * SUCH DAMAGE. |
64 | */ | | 64 | */ |
65 | | | 65 | |
66 | /*- | | 66 | /*- |
67 | * Copyright (c) 1999, 2020 The NetBSD Foundation, Inc. | | 67 | * Copyright (c) 1999, 2020 The NetBSD Foundation, Inc. |
68 | * All rights reserved. | | 68 | * All rights reserved. |
69 | * | | 69 | * |
70 | * This code is derived from software contributed to The NetBSD Foundation | | 70 | * This code is derived from software contributed to The NetBSD Foundation |
71 | * by Charles M. Hannum. | | 71 | * by Charles M. Hannum. |
72 | * | | 72 | * |
73 | * Redistribution and use in source and binary forms, with or without | | 73 | * Redistribution and use in source and binary forms, with or without |
74 | * modification, are permitted provided that the following conditions | | 74 | * modification, are permitted provided that the following conditions |
75 | * are met: | | 75 | * are met: |
76 | * 1. Redistributions of source code must retain the above copyright | | 76 | * 1. Redistributions of source code must retain the above copyright |
77 | * notice, this list of conditions and the following disclaimer. | | 77 | * notice, this list of conditions and the following disclaimer. |
78 | * 2. Redistributions in binary form must reproduce the above copyright | | 78 | * 2. Redistributions in binary form must reproduce the above copyright |
79 | * notice, this list of conditions and the following disclaimer in the | | 79 | * notice, this list of conditions and the following disclaimer in the |
80 | * documentation and/or other materials provided with the distribution. | | 80 | * documentation and/or other materials provided with the distribution. |
81 | * | | 81 | * |
82 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 82 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
83 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 83 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
84 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 84 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
85 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 85 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
86 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 86 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
87 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 87 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
88 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 88 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
89 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 89 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
90 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 90 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
91 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 91 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
92 | * POSSIBILITY OF SUCH DAMAGE. | | 92 | * POSSIBILITY OF SUCH DAMAGE. |
93 | */ | | 93 | */ |
94 | | | 94 | |
95 | /* | | 95 | /* |
96 | * Copyright (c) 1994-1998 Mark Brinicombe. | | 96 | * Copyright (c) 1994-1998 Mark Brinicombe. |
97 | * Copyright (c) 1994 Brini. | | 97 | * Copyright (c) 1994 Brini. |
98 | * All rights reserved. | | 98 | * All rights reserved. |
99 | * | | 99 | * |
100 | * This code is derived from software written for Brini by Mark Brinicombe | | 100 | * This code is derived from software written for Brini by Mark Brinicombe |
101 | * | | 101 | * |
102 | * Redistribution and use in source and binary forms, with or without | | 102 | * Redistribution and use in source and binary forms, with or without |
103 | * modification, are permitted provided that the following conditions | | 103 | * modification, are permitted provided that the following conditions |
104 | * are met: | | 104 | * are met: |
105 | * 1. Redistributions of source code must retain the above copyright | | 105 | * 1. Redistributions of source code must retain the above copyright |
106 | * notice, this list of conditions and the following disclaimer. | | 106 | * notice, this list of conditions and the following disclaimer. |
107 | * 2. Redistributions in binary form must reproduce the above copyright | | 107 | * 2. Redistributions in binary form must reproduce the above copyright |
108 | * notice, this list of conditions and the following disclaimer in the | | 108 | * notice, this list of conditions and the following disclaimer in the |
109 | * documentation and/or other materials provided with the distribution. | | 109 | * documentation and/or other materials provided with the distribution. |
110 | * 3. All advertising materials mentioning features or use of this software | | 110 | * 3. All advertising materials mentioning features or use of this software |
111 | * must display the following acknowledgement: | | 111 | * must display the following acknowledgement: |
112 | * This product includes software developed by Mark Brinicombe. | | 112 | * This product includes software developed by Mark Brinicombe. |
113 | * 4. The name of the author may not be used to endorse or promote products | | 113 | * 4. The name of the author may not be used to endorse or promote products |
114 | * derived from this software without specific prior written permission. | | 114 | * derived from this software without specific prior written permission. |
115 | * | | 115 | * |
116 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | | 116 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
117 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | | 117 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
118 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | | 118 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
119 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | | 119 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
120 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | | 120 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
121 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | | 121 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
122 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | | 122 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
123 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | | 123 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
124 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | | 124 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
125 | * | | 125 | * |
126 | * RiscBSD kernel project | | 126 | * RiscBSD kernel project |
127 | * | | 127 | * |
128 | * pmap.c | | 128 | * pmap.c |
129 | * | | 129 | * |
130 | * Machine dependent vm stuff | | 130 | * Machine dependent vm stuff |
131 | * | | 131 | * |
132 | * Created : 20/09/94 | | 132 | * Created : 20/09/94 |
133 | */ | | 133 | */ |
134 | | | 134 | |
135 | /* | | 135 | /* |
136 | * armv6 and VIPT cache support by 3am Software Foundry, | | 136 | * armv6 and VIPT cache support by 3am Software Foundry, |
137 | * Copyright (c) 2007 Microsoft | | 137 | * Copyright (c) 2007 Microsoft |
138 | */ | | 138 | */ |
139 | | | 139 | |
140 | /* | | 140 | /* |
141 | * Performance improvements, UVM changes, overhauls and part-rewrites | | 141 | * Performance improvements, UVM changes, overhauls and part-rewrites |
142 | * were contributed by Neil A. Carson <neil@causality.com>. | | 142 | * were contributed by Neil A. Carson <neil@causality.com>. |
143 | */ | | 143 | */ |
144 | | | 144 | |
145 | /* | | 145 | /* |
146 | * Overhauled again to speedup the pmap, use MMU Domains so that L1 tables | | 146 | * Overhauled again to speedup the pmap, use MMU Domains so that L1 tables |
147 | * can be shared, and re-work the KVM layout, by Steve Woodford of Wasabi | | 147 | * can be shared, and re-work the KVM layout, by Steve Woodford of Wasabi |
148 | * Systems, Inc. | | 148 | * Systems, Inc. |
149 | * | | 149 | * |
150 | * There are still a few things outstanding at this time: | | 150 | * There are still a few things outstanding at this time: |
151 | * | | 151 | * |
152 | * - There are some unresolved issues for MP systems: | | 152 | * - There are some unresolved issues for MP systems: |
153 | * | | 153 | * |
154 | * o The L1 metadata needs a lock, or more specifically, some places | | 154 | * o The L1 metadata needs a lock, or more specifically, some places |
155 | * need to acquire an exclusive lock when modifying L1 translation | | 155 | * need to acquire an exclusive lock when modifying L1 translation |
156 | * table entries. | | 156 | * table entries. |
157 | * | | 157 | * |
158 | * o When one cpu modifies an L1 entry, and that L1 table is also | | 158 | * o When one cpu modifies an L1 entry, and that L1 table is also |
159 | * being used by another cpu, then the latter will need to be told | | 159 | * being used by another cpu, then the latter will need to be told |
160 | * that a tlb invalidation may be necessary. (But only if the old | | 160 | * that a tlb invalidation may be necessary. (But only if the old |
161 | * domain number in the L1 entry being over-written is currently | | 161 | * domain number in the L1 entry being over-written is currently |
162 | * the active domain on that cpu). I guess there are lots more tlb | | 162 | * the active domain on that cpu). I guess there are lots more tlb |
163 | * shootdown issues too... | | 163 | * shootdown issues too... |
164 | * | | 164 | * |
165 | * o If the vector_page is at 0x00000000 instead of in kernel VA space, | | 165 | * o If the vector_page is at 0x00000000 instead of in kernel VA space, |
166 | * then MP systems will lose big-time because of the MMU domain hack. | | 166 | * then MP systems will lose big-time because of the MMU domain hack. |
167 | * The only way this can be solved (apart from moving the vector | | 167 | * The only way this can be solved (apart from moving the vector |
168 | * page to 0xffff0000) is to reserve the first 1MB of user address | | 168 | * page to 0xffff0000) is to reserve the first 1MB of user address |
169 | * space for kernel use only. This would require re-linking all | | 169 | * space for kernel use only. This would require re-linking all |
170 | * applications so that the text section starts above this 1MB | | 170 | * applications so that the text section starts above this 1MB |
171 | * boundary. | | 171 | * boundary. |
172 | * | | 172 | * |
173 | * o Tracking which VM space is resident in the cache/tlb has not yet | | 173 | * o Tracking which VM space is resident in the cache/tlb has not yet |
174 | * been implemented for MP systems. | | 174 | * been implemented for MP systems. |
175 | * | | 175 | * |
176 | * o Finally, there is a pathological condition where two cpus running | | 176 | * o Finally, there is a pathological condition where two cpus running |
177 | * two separate processes (not lwps) which happen to share an L1 | | 177 | * two separate processes (not lwps) which happen to share an L1 |
178 | * can get into a fight over one or more L1 entries. This will result | | 178 | * can get into a fight over one or more L1 entries. This will result |
179 | * in a significant slow-down if both processes are in tight loops. | | 179 | * in a significant slow-down if both processes are in tight loops. |
180 | */ | | 180 | */ |
181 | | | 181 | |
182 | /* Include header files */ | | 182 | /* Include header files */ |
183 | | | 183 | |
184 | #include "opt_arm_debug.h" | | 184 | #include "opt_arm_debug.h" |
185 | #include "opt_cpuoptions.h" | | 185 | #include "opt_cpuoptions.h" |
186 | #include "opt_ddb.h" | | 186 | #include "opt_ddb.h" |
187 | #include "opt_lockdebug.h" | | 187 | #include "opt_lockdebug.h" |
188 | #include "opt_multiprocessor.h" | | 188 | #include "opt_multiprocessor.h" |
189 | | | 189 | |
190 | #ifdef MULTIPROCESSOR | | 190 | #ifdef MULTIPROCESSOR |
191 | #define _INTR_PRIVATE | | 191 | #define _INTR_PRIVATE |
192 | #endif | | 192 | #endif |
193 | | | 193 | |
194 | #include <sys/cdefs.h> | | 194 | #include <sys/cdefs.h> |
195 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.411 2020/04/19 21:24:36 ad Exp $"); | | 195 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.412 2020/04/21 06:45:16 skrll Exp $"); |
196 | | | 196 | |
197 | #include <sys/atomic.h> | | 197 | #include <sys/atomic.h> |
198 | #include <sys/param.h> | | 198 | #include <sys/param.h> |
199 | #include <sys/types.h> | | 199 | #include <sys/types.h> |
200 | #include <sys/atomic.h> | | 200 | #include <sys/atomic.h> |
201 | #include <sys/bus.h> | | 201 | #include <sys/bus.h> |
202 | #include <sys/cpu.h> | | 202 | #include <sys/cpu.h> |
203 | #include <sys/intr.h> | | 203 | #include <sys/intr.h> |
204 | #include <sys/kernel.h> | | 204 | #include <sys/kernel.h> |
205 | #include <sys/kernhist.h> | | 205 | #include <sys/kernhist.h> |
206 | #include <sys/kmem.h> | | 206 | #include <sys/kmem.h> |
207 | #include <sys/pool.h> | | 207 | #include <sys/pool.h> |
208 | #include <sys/proc.h> | | 208 | #include <sys/proc.h> |
209 | #include <sys/sysctl.h> | | 209 | #include <sys/sysctl.h> |
210 | #include <sys/systm.h> | | 210 | #include <sys/systm.h> |
211 | | | 211 | |
212 | #include <uvm/uvm.h> | | 212 | #include <uvm/uvm.h> |
213 | #include <uvm/pmap/pmap_pvt.h> | | 213 | #include <uvm/pmap/pmap_pvt.h> |
214 | | | 214 | |
215 | #include <arm/locore.h> | | 215 | #include <arm/locore.h> |
216 | | | 216 | |
217 | #ifdef DDB | | 217 | #ifdef DDB |
218 | #include <arm/db_machdep.h> | | 218 | #include <arm/db_machdep.h> |
219 | #endif | | 219 | #endif |
220 | | | 220 | |
221 | #ifdef VERBOSE_INIT_ARM | | 221 | #ifdef VERBOSE_INIT_ARM |
222 | #define VPRINTF(...) printf(__VA_ARGS__) | | 222 | #define VPRINTF(...) printf(__VA_ARGS__) |
223 | #else | | 223 | #else |
224 | #define VPRINTF(...) __nothing | | 224 | #define VPRINTF(...) __nothing |
225 | #endif | | 225 | #endif |
226 | | | 226 | |
227 | /* | | 227 | /* |
228 | * pmap_kernel() points here | | 228 | * pmap_kernel() points here |
229 | */ | | 229 | */ |
230 | static struct pmap kernel_pmap_store = { | | 230 | static struct pmap kernel_pmap_store = { |
231 | #ifndef ARM_MMU_EXTENDED | | 231 | #ifndef ARM_MMU_EXTENDED |
232 | .pm_activated = true, | | 232 | .pm_activated = true, |
233 | .pm_domain = PMAP_DOMAIN_KERNEL, | | 233 | .pm_domain = PMAP_DOMAIN_KERNEL, |
234 | .pm_cstate.cs_all = PMAP_CACHE_STATE_ALL, | | 234 | .pm_cstate.cs_all = PMAP_CACHE_STATE_ALL, |
235 | #endif | | 235 | #endif |
236 | }; | | 236 | }; |
237 | struct pmap * const kernel_pmap_ptr = &kernel_pmap_store; | | 237 | struct pmap * const kernel_pmap_ptr = &kernel_pmap_store; |
238 | #undef pmap_kernel | | 238 | #undef pmap_kernel |
239 | #define pmap_kernel() (&kernel_pmap_store) | | 239 | #define pmap_kernel() (&kernel_pmap_store) |
240 | #ifdef PMAP_NEED_ALLOC_POOLPAGE | | 240 | #ifdef PMAP_NEED_ALLOC_POOLPAGE |
241 | int arm_poolpage_vmfreelist = VM_FREELIST_DEFAULT; | | 241 | int arm_poolpage_vmfreelist = VM_FREELIST_DEFAULT; |
242 | #endif | | 242 | #endif |
243 | | | 243 | |
244 | /* | | 244 | /* |
245 | * Pool and cache that pmap structures are allocated from. | | 245 | * Pool and cache that pmap structures are allocated from. |
246 | * We use a cache to avoid clearing the pm_l2[] array (1KB) | | 246 | * We use a cache to avoid clearing the pm_l2[] array (1KB) |
247 | * in pmap_create(). | | 247 | * in pmap_create(). |
248 | */ | | 248 | */ |
249 | static struct pool_cache pmap_cache; | | 249 | static struct pool_cache pmap_cache; |
250 | | | 250 | |
251 | /* | | 251 | /* |
252 | * Pool of PV structures | | 252 | * Pool of PV structures |
253 | */ | | 253 | */ |
254 | static struct pool pmap_pv_pool; | | 254 | static struct pool pmap_pv_pool; |
255 | static void *pmap_bootstrap_pv_page_alloc(struct pool *, int); | | 255 | static void *pmap_bootstrap_pv_page_alloc(struct pool *, int); |
256 | static void pmap_bootstrap_pv_page_free(struct pool *, void *); | | 256 | static void pmap_bootstrap_pv_page_free(struct pool *, void *); |
257 | static struct pool_allocator pmap_bootstrap_pv_allocator = { | | 257 | static struct pool_allocator pmap_bootstrap_pv_allocator = { |
258 | pmap_bootstrap_pv_page_alloc, pmap_bootstrap_pv_page_free | | 258 | pmap_bootstrap_pv_page_alloc, pmap_bootstrap_pv_page_free |
259 | }; | | 259 | }; |
260 | | | 260 | |
261 | /* | | 261 | /* |
262 | * Pool and cache of l2_dtable structures. | | 262 | * Pool and cache of l2_dtable structures. |
263 | * We use a cache to avoid clearing the structures when they're | | 263 | * We use a cache to avoid clearing the structures when they're |
264 | * allocated. (196 bytes) | | 264 | * allocated. (196 bytes) |
265 | */ | | 265 | */ |
266 | static struct pool_cache pmap_l2dtable_cache; | | 266 | static struct pool_cache pmap_l2dtable_cache; |
267 | static vaddr_t pmap_kernel_l2dtable_kva; | | 267 | static vaddr_t pmap_kernel_l2dtable_kva; |
268 | | | 268 | |
269 | /* | | 269 | /* |
270 | * Pool and cache of L2 page descriptors. | | 270 | * Pool and cache of L2 page descriptors. |
271 | * We use a cache to avoid clearing the descriptor table | | 271 | * We use a cache to avoid clearing the descriptor table |
272 | * when they're allocated. (1KB) | | 272 | * when they're allocated. (1KB) |
273 | */ | | 273 | */ |
274 | static struct pool_cache pmap_l2ptp_cache; | | 274 | static struct pool_cache pmap_l2ptp_cache; |
275 | static vaddr_t pmap_kernel_l2ptp_kva; | | 275 | static vaddr_t pmap_kernel_l2ptp_kva; |
276 | static paddr_t pmap_kernel_l2ptp_phys; | | 276 | static paddr_t pmap_kernel_l2ptp_phys; |
277 | | | 277 | |
278 | #ifdef PMAPCOUNTERS | | 278 | #ifdef PMAPCOUNTERS |
279 | #define PMAP_EVCNT_INITIALIZER(name) \ | | 279 | #define PMAP_EVCNT_INITIALIZER(name) \ |
280 | EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", name) | | 280 | EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", name) |
281 | | | 281 | |
282 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 282 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
283 | static struct evcnt pmap_ev_vac_clean_one = | | 283 | static struct evcnt pmap_ev_vac_clean_one = |
284 | PMAP_EVCNT_INITIALIZER("clean page (1 color)"); | | 284 | PMAP_EVCNT_INITIALIZER("clean page (1 color)"); |
285 | static struct evcnt pmap_ev_vac_flush_one = | | 285 | static struct evcnt pmap_ev_vac_flush_one = |
286 | PMAP_EVCNT_INITIALIZER("flush page (1 color)"); | | 286 | PMAP_EVCNT_INITIALIZER("flush page (1 color)"); |
287 | static struct evcnt pmap_ev_vac_flush_lots = | | 287 | static struct evcnt pmap_ev_vac_flush_lots = |
288 | PMAP_EVCNT_INITIALIZER("flush page (2+ colors)"); | | 288 | PMAP_EVCNT_INITIALIZER("flush page (2+ colors)"); |
289 | static struct evcnt pmap_ev_vac_flush_lots2 = | | 289 | static struct evcnt pmap_ev_vac_flush_lots2 = |
290 | PMAP_EVCNT_INITIALIZER("flush page (2+ colors, kmpage)"); | | 290 | PMAP_EVCNT_INITIALIZER("flush page (2+ colors, kmpage)"); |
291 | EVCNT_ATTACH_STATIC(pmap_ev_vac_clean_one); | | 291 | EVCNT_ATTACH_STATIC(pmap_ev_vac_clean_one); |
292 | EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_one); | | 292 | EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_one); |
293 | EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots); | | 293 | EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots); |
294 | EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots2); | | 294 | EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots2); |
295 | | | 295 | |
296 | static struct evcnt pmap_ev_vac_color_new = | | 296 | static struct evcnt pmap_ev_vac_color_new = |
297 | PMAP_EVCNT_INITIALIZER("new page color"); | | 297 | PMAP_EVCNT_INITIALIZER("new page color"); |
298 | static struct evcnt pmap_ev_vac_color_reuse = | | 298 | static struct evcnt pmap_ev_vac_color_reuse = |
299 | PMAP_EVCNT_INITIALIZER("ok first page color"); | | 299 | PMAP_EVCNT_INITIALIZER("ok first page color"); |
300 | static struct evcnt pmap_ev_vac_color_ok = | | 300 | static struct evcnt pmap_ev_vac_color_ok = |
301 | PMAP_EVCNT_INITIALIZER("ok page color"); | | 301 | PMAP_EVCNT_INITIALIZER("ok page color"); |
302 | static struct evcnt pmap_ev_vac_color_blind = | | 302 | static struct evcnt pmap_ev_vac_color_blind = |
303 | PMAP_EVCNT_INITIALIZER("blind page color"); | | 303 | PMAP_EVCNT_INITIALIZER("blind page color"); |
304 | static struct evcnt pmap_ev_vac_color_change = | | 304 | static struct evcnt pmap_ev_vac_color_change = |
305 | PMAP_EVCNT_INITIALIZER("change page color"); | | 305 | PMAP_EVCNT_INITIALIZER("change page color"); |
306 | static struct evcnt pmap_ev_vac_color_erase = | | 306 | static struct evcnt pmap_ev_vac_color_erase = |
307 | PMAP_EVCNT_INITIALIZER("erase page color"); | | 307 | PMAP_EVCNT_INITIALIZER("erase page color"); |
308 | static struct evcnt pmap_ev_vac_color_none = | | 308 | static struct evcnt pmap_ev_vac_color_none = |
309 | PMAP_EVCNT_INITIALIZER("no page color"); | | 309 | PMAP_EVCNT_INITIALIZER("no page color"); |
310 | static struct evcnt pmap_ev_vac_color_restore = | | 310 | static struct evcnt pmap_ev_vac_color_restore = |
311 | PMAP_EVCNT_INITIALIZER("restore page color"); | | 311 | PMAP_EVCNT_INITIALIZER("restore page color"); |
312 | | | 312 | |
313 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_new); | | 313 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_new); |
314 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_reuse); | | 314 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_reuse); |
315 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_ok); | | 315 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_ok); |
316 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_blind); | | 316 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_blind); |
317 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_change); | | 317 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_change); |
318 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_erase); | | 318 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_erase); |
319 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_none); | | 319 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_none); |
320 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_restore); | | 320 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_restore); |
321 | #endif | | 321 | #endif |
322 | | | 322 | |
323 | static struct evcnt pmap_ev_mappings = | | 323 | static struct evcnt pmap_ev_mappings = |
324 | PMAP_EVCNT_INITIALIZER("pages mapped"); | | 324 | PMAP_EVCNT_INITIALIZER("pages mapped"); |
325 | static struct evcnt pmap_ev_unmappings = | | 325 | static struct evcnt pmap_ev_unmappings = |
326 | PMAP_EVCNT_INITIALIZER("pages unmapped"); | | 326 | PMAP_EVCNT_INITIALIZER("pages unmapped"); |
327 | static struct evcnt pmap_ev_remappings = | | 327 | static struct evcnt pmap_ev_remappings = |
328 | PMAP_EVCNT_INITIALIZER("pages remapped"); | | 328 | PMAP_EVCNT_INITIALIZER("pages remapped"); |
329 | | | 329 | |
330 | EVCNT_ATTACH_STATIC(pmap_ev_mappings); | | 330 | EVCNT_ATTACH_STATIC(pmap_ev_mappings); |
331 | EVCNT_ATTACH_STATIC(pmap_ev_unmappings); | | 331 | EVCNT_ATTACH_STATIC(pmap_ev_unmappings); |
332 | EVCNT_ATTACH_STATIC(pmap_ev_remappings); | | 332 | EVCNT_ATTACH_STATIC(pmap_ev_remappings); |
333 | | | 333 | |
334 | static struct evcnt pmap_ev_kernel_mappings = | | 334 | static struct evcnt pmap_ev_kernel_mappings = |
335 | PMAP_EVCNT_INITIALIZER("kernel pages mapped"); | | 335 | PMAP_EVCNT_INITIALIZER("kernel pages mapped"); |
336 | static struct evcnt pmap_ev_kernel_unmappings = | | 336 | static struct evcnt pmap_ev_kernel_unmappings = |
337 | PMAP_EVCNT_INITIALIZER("kernel pages unmapped"); | | 337 | PMAP_EVCNT_INITIALIZER("kernel pages unmapped"); |
338 | static struct evcnt pmap_ev_kernel_remappings = | | 338 | static struct evcnt pmap_ev_kernel_remappings = |
339 | PMAP_EVCNT_INITIALIZER("kernel pages remapped"); | | 339 | PMAP_EVCNT_INITIALIZER("kernel pages remapped"); |
340 | | | 340 | |
341 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_mappings); | | 341 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_mappings); |
342 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_unmappings); | | 342 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_unmappings); |
343 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_remappings); | | 343 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_remappings); |
344 | | | 344 | |
345 | static struct evcnt pmap_ev_kenter_mappings = | | 345 | static struct evcnt pmap_ev_kenter_mappings = |
346 | PMAP_EVCNT_INITIALIZER("kenter pages mapped"); | | 346 | PMAP_EVCNT_INITIALIZER("kenter pages mapped"); |
347 | static struct evcnt pmap_ev_kenter_unmappings = | | 347 | static struct evcnt pmap_ev_kenter_unmappings = |
348 | PMAP_EVCNT_INITIALIZER("kenter pages unmapped"); | | 348 | PMAP_EVCNT_INITIALIZER("kenter pages unmapped"); |
349 | static struct evcnt pmap_ev_kenter_remappings = | | 349 | static struct evcnt pmap_ev_kenter_remappings = |
350 | PMAP_EVCNT_INITIALIZER("kenter pages remapped"); | | 350 | PMAP_EVCNT_INITIALIZER("kenter pages remapped"); |
351 | static struct evcnt pmap_ev_pt_mappings = | | 351 | static struct evcnt pmap_ev_pt_mappings = |
352 | PMAP_EVCNT_INITIALIZER("page table pages mapped"); | | 352 | PMAP_EVCNT_INITIALIZER("page table pages mapped"); |
353 | | | 353 | |
354 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_mappings); | | 354 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_mappings); |
355 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_unmappings); | | 355 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_unmappings); |
356 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_remappings); | | 356 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_remappings); |
357 | EVCNT_ATTACH_STATIC(pmap_ev_pt_mappings); | | 357 | EVCNT_ATTACH_STATIC(pmap_ev_pt_mappings); |
358 | | | 358 | |
359 | static struct evcnt pmap_ev_fixup_mod = | | 359 | static struct evcnt pmap_ev_fixup_mod = |
360 | PMAP_EVCNT_INITIALIZER("page modification emulations"); | | 360 | PMAP_EVCNT_INITIALIZER("page modification emulations"); |
361 | static struct evcnt pmap_ev_fixup_ref = | | 361 | static struct evcnt pmap_ev_fixup_ref = |
362 | PMAP_EVCNT_INITIALIZER("page reference emulations"); | | 362 | PMAP_EVCNT_INITIALIZER("page reference emulations"); |
363 | static struct evcnt pmap_ev_fixup_exec = | | 363 | static struct evcnt pmap_ev_fixup_exec = |
364 | PMAP_EVCNT_INITIALIZER("exec pages fixed up"); | | 364 | PMAP_EVCNT_INITIALIZER("exec pages fixed up"); |
365 | static struct evcnt pmap_ev_fixup_pdes = | | 365 | static struct evcnt pmap_ev_fixup_pdes = |
366 | PMAP_EVCNT_INITIALIZER("pdes fixed up"); | | 366 | PMAP_EVCNT_INITIALIZER("pdes fixed up"); |
367 | #ifndef ARM_MMU_EXTENDED | | 367 | #ifndef ARM_MMU_EXTENDED |
368 | static struct evcnt pmap_ev_fixup_ptesync = | | 368 | static struct evcnt pmap_ev_fixup_ptesync = |
369 | PMAP_EVCNT_INITIALIZER("ptesync fixed"); | | 369 | PMAP_EVCNT_INITIALIZER("ptesync fixed"); |
370 | #endif | | 370 | #endif |
371 | | | 371 | |
372 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_mod); | | 372 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_mod); |
373 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_ref); | | 373 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_ref); |
374 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_exec); | | 374 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_exec); |
375 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_pdes); | | 375 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_pdes); |
376 | #ifndef ARM_MMU_EXTENDED | | 376 | #ifndef ARM_MMU_EXTENDED |
377 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_ptesync); | | 377 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_ptesync); |
378 | #endif | | 378 | #endif |
379 | | | 379 | |
380 | #ifdef PMAP_CACHE_VIPT | | 380 | #ifdef PMAP_CACHE_VIPT |
381 | static struct evcnt pmap_ev_exec_mappings = | | 381 | static struct evcnt pmap_ev_exec_mappings = |
382 | PMAP_EVCNT_INITIALIZER("exec pages mapped"); | | 382 | PMAP_EVCNT_INITIALIZER("exec pages mapped"); |
383 | static struct evcnt pmap_ev_exec_cached = | | 383 | static struct evcnt pmap_ev_exec_cached = |
384 | PMAP_EVCNT_INITIALIZER("exec pages cached"); | | 384 | PMAP_EVCNT_INITIALIZER("exec pages cached"); |
385 | | | 385 | |
386 | EVCNT_ATTACH_STATIC(pmap_ev_exec_mappings); | | 386 | EVCNT_ATTACH_STATIC(pmap_ev_exec_mappings); |
387 | EVCNT_ATTACH_STATIC(pmap_ev_exec_cached); | | 387 | EVCNT_ATTACH_STATIC(pmap_ev_exec_cached); |
388 | | | 388 | |
389 | static struct evcnt pmap_ev_exec_synced = | | 389 | static struct evcnt pmap_ev_exec_synced = |
390 | PMAP_EVCNT_INITIALIZER("exec pages synced"); | | 390 | PMAP_EVCNT_INITIALIZER("exec pages synced"); |
391 | static struct evcnt pmap_ev_exec_synced_map = | | 391 | static struct evcnt pmap_ev_exec_synced_map = |
392 | PMAP_EVCNT_INITIALIZER("exec pages synced (MP)"); | | 392 | PMAP_EVCNT_INITIALIZER("exec pages synced (MP)"); |
393 | static struct evcnt pmap_ev_exec_synced_unmap = | | 393 | static struct evcnt pmap_ev_exec_synced_unmap = |
394 | PMAP_EVCNT_INITIALIZER("exec pages synced (UM)"); | | 394 | PMAP_EVCNT_INITIALIZER("exec pages synced (UM)"); |
395 | static struct evcnt pmap_ev_exec_synced_remap = | | 395 | static struct evcnt pmap_ev_exec_synced_remap = |
396 | PMAP_EVCNT_INITIALIZER("exec pages synced (RM)"); | | 396 | PMAP_EVCNT_INITIALIZER("exec pages synced (RM)"); |
397 | static struct evcnt pmap_ev_exec_synced_clearbit = | | 397 | static struct evcnt pmap_ev_exec_synced_clearbit = |
398 | PMAP_EVCNT_INITIALIZER("exec pages synced (DG)"); | | 398 | PMAP_EVCNT_INITIALIZER("exec pages synced (DG)"); |
399 | #ifndef ARM_MMU_EXTENDED | | 399 | #ifndef ARM_MMU_EXTENDED |
400 | static struct evcnt pmap_ev_exec_synced_kremove = | | 400 | static struct evcnt pmap_ev_exec_synced_kremove = |
401 | PMAP_EVCNT_INITIALIZER("exec pages synced (KU)"); | | 401 | PMAP_EVCNT_INITIALIZER("exec pages synced (KU)"); |
402 | #endif | | 402 | #endif |
403 | | | 403 | |
404 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced); | | 404 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced); |
405 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_map); | | 405 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_map); |
406 | #ifndef ARM_MMU_EXTENDED | | 406 | #ifndef ARM_MMU_EXTENDED |
407 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_unmap); | | 407 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_unmap); |
408 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_remap); | | 408 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_remap); |
409 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_clearbit); | | 409 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_clearbit); |
410 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_kremove); | | 410 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_kremove); |
411 | #endif | | 411 | #endif |
412 | | | 412 | |
413 | static struct evcnt pmap_ev_exec_discarded_unmap = | | 413 | static struct evcnt pmap_ev_exec_discarded_unmap = |
414 | PMAP_EVCNT_INITIALIZER("exec pages discarded (UM)"); | | 414 | PMAP_EVCNT_INITIALIZER("exec pages discarded (UM)"); |
415 | static struct evcnt pmap_ev_exec_discarded_zero = | | 415 | static struct evcnt pmap_ev_exec_discarded_zero = |
416 | PMAP_EVCNT_INITIALIZER("exec pages discarded (ZP)"); | | 416 | PMAP_EVCNT_INITIALIZER("exec pages discarded (ZP)"); |
417 | static struct evcnt pmap_ev_exec_discarded_copy = | | 417 | static struct evcnt pmap_ev_exec_discarded_copy = |
418 | PMAP_EVCNT_INITIALIZER("exec pages discarded (CP)"); | | 418 | PMAP_EVCNT_INITIALIZER("exec pages discarded (CP)"); |
419 | static struct evcnt pmap_ev_exec_discarded_page_protect = | | 419 | static struct evcnt pmap_ev_exec_discarded_page_protect = |
420 | PMAP_EVCNT_INITIALIZER("exec pages discarded (PP)"); | | 420 | PMAP_EVCNT_INITIALIZER("exec pages discarded (PP)"); |
421 | static struct evcnt pmap_ev_exec_discarded_clearbit = | | 421 | static struct evcnt pmap_ev_exec_discarded_clearbit = |
422 | PMAP_EVCNT_INITIALIZER("exec pages discarded (DG)"); | | 422 | PMAP_EVCNT_INITIALIZER("exec pages discarded (DG)"); |
423 | static struct evcnt pmap_ev_exec_discarded_kremove = | | 423 | static struct evcnt pmap_ev_exec_discarded_kremove = |
424 | PMAP_EVCNT_INITIALIZER("exec pages discarded (KU)"); | | 424 | PMAP_EVCNT_INITIALIZER("exec pages discarded (KU)"); |
425 | #ifdef ARM_MMU_EXTENDED | | 425 | #ifdef ARM_MMU_EXTENDED |
426 | static struct evcnt pmap_ev_exec_discarded_modfixup = | | 426 | static struct evcnt pmap_ev_exec_discarded_modfixup = |
427 | PMAP_EVCNT_INITIALIZER("exec pages discarded (MF)"); | | 427 | PMAP_EVCNT_INITIALIZER("exec pages discarded (MF)"); |
428 | #endif | | 428 | #endif |
429 | | | 429 | |
430 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_unmap); | | 430 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_unmap); |
431 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_zero); | | 431 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_zero); |
432 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_copy); | | 432 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_copy); |
433 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_page_protect); | | 433 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_page_protect); |
434 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_clearbit); | | 434 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_clearbit); |
435 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_kremove); | | 435 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_kremove); |
436 | #ifdef ARM_MMU_EXTENDED | | 436 | #ifdef ARM_MMU_EXTENDED |
437 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_modfixup); | | 437 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_modfixup); |
438 | #endif | | 438 | #endif |
439 | #endif /* PMAP_CACHE_VIPT */ | | 439 | #endif /* PMAP_CACHE_VIPT */ |
440 | | | 440 | |
441 | static struct evcnt pmap_ev_updates = PMAP_EVCNT_INITIALIZER("updates"); | | 441 | static struct evcnt pmap_ev_updates = PMAP_EVCNT_INITIALIZER("updates"); |
442 | static struct evcnt pmap_ev_collects = PMAP_EVCNT_INITIALIZER("collects"); | | 442 | static struct evcnt pmap_ev_collects = PMAP_EVCNT_INITIALIZER("collects"); |
443 | static struct evcnt pmap_ev_activations = PMAP_EVCNT_INITIALIZER("activations"); | | 443 | static struct evcnt pmap_ev_activations = PMAP_EVCNT_INITIALIZER("activations"); |
444 | | | 444 | |
445 | EVCNT_ATTACH_STATIC(pmap_ev_updates); | | 445 | EVCNT_ATTACH_STATIC(pmap_ev_updates); |
446 | EVCNT_ATTACH_STATIC(pmap_ev_collects); | | 446 | EVCNT_ATTACH_STATIC(pmap_ev_collects); |
447 | EVCNT_ATTACH_STATIC(pmap_ev_activations); | | 447 | EVCNT_ATTACH_STATIC(pmap_ev_activations); |
448 | | | 448 | |
449 | #define PMAPCOUNT(x) ((void)(pmap_ev_##x.ev_count++)) | | 449 | #define PMAPCOUNT(x) ((void)(pmap_ev_##x.ev_count++)) |
450 | #else | | 450 | #else |
451 | #define PMAPCOUNT(x) ((void)0) | | 451 | #define PMAPCOUNT(x) ((void)0) |
452 | #endif | | 452 | #endif |
453 | | | 453 | |
454 | #ifdef ARM_MMU_EXTENDED | | 454 | #ifdef ARM_MMU_EXTENDED |
455 | void pmap_md_pdetab_activate(pmap_t, struct lwp *); | | 455 | void pmap_md_pdetab_activate(pmap_t, struct lwp *); |
456 | void pmap_md_pdetab_deactivate(pmap_t pm); | | 456 | void pmap_md_pdetab_deactivate(pmap_t pm); |
457 | #endif | | 457 | #endif |
458 | | | 458 | |
459 | /* | | 459 | /* |
460 | * pmap copy/zero page, and mem(5) hook point | | 460 | * pmap copy/zero page, and mem(5) hook point |
461 | */ | | 461 | */ |
462 | static pt_entry_t *csrc_pte, *cdst_pte; | | 462 | static pt_entry_t *csrc_pte, *cdst_pte; |
463 | static vaddr_t csrcp, cdstp; | | 463 | static vaddr_t csrcp, cdstp; |
464 | #ifdef MULTIPROCESSOR | | 464 | #ifdef MULTIPROCESSOR |
465 | static size_t cnptes; | | 465 | static size_t cnptes; |
466 | #define cpu_csrc_pte(o) (csrc_pte + cnptes * cpu_number() + ((o) >> L2_S_SHIFT)) | | 466 | #define cpu_csrc_pte(o) (csrc_pte + cnptes * cpu_number() + ((o) >> L2_S_SHIFT)) |
467 | #define cpu_cdst_pte(o) (cdst_pte + cnptes * cpu_number() + ((o) >> L2_S_SHIFT)) | | 467 | #define cpu_cdst_pte(o) (cdst_pte + cnptes * cpu_number() + ((o) >> L2_S_SHIFT)) |
468 | #define cpu_csrcp(o) (csrcp + L2_S_SIZE * cnptes * cpu_number() + (o)) | | 468 | #define cpu_csrcp(o) (csrcp + L2_S_SIZE * cnptes * cpu_number() + (o)) |
469 | #define cpu_cdstp(o) (cdstp + L2_S_SIZE * cnptes * cpu_number() + (o)) | | 469 | #define cpu_cdstp(o) (cdstp + L2_S_SIZE * cnptes * cpu_number() + (o)) |
470 | #else | | 470 | #else |
471 | #define cpu_csrc_pte(o) (csrc_pte + ((o) >> L2_S_SHIFT)) | | 471 | #define cpu_csrc_pte(o) (csrc_pte + ((o) >> L2_S_SHIFT)) |
472 | #define cpu_cdst_pte(o) (cdst_pte + ((o) >> L2_S_SHIFT)) | | 472 | #define cpu_cdst_pte(o) (cdst_pte + ((o) >> L2_S_SHIFT)) |
473 | #define cpu_csrcp(o) (csrcp + (o)) | | 473 | #define cpu_csrcp(o) (csrcp + (o)) |
474 | #define cpu_cdstp(o) (cdstp + (o)) | | 474 | #define cpu_cdstp(o) (cdstp + (o)) |
475 | #endif | | 475 | #endif |
476 | vaddr_t memhook; /* used by mem.c & others */ | | 476 | vaddr_t memhook; /* used by mem.c & others */ |
477 | kmutex_t memlock __cacheline_aligned; /* used by mem.c & others */ | | 477 | kmutex_t memlock __cacheline_aligned; /* used by mem.c & others */ |
478 | kmutex_t pmap_lock __cacheline_aligned; | | 478 | kmutex_t pmap_lock __cacheline_aligned; |
479 | kmutex_t kpm_lock __cacheline_aligned; | | 479 | kmutex_t kpm_lock __cacheline_aligned; |
480 | extern void *msgbufaddr; | | 480 | extern void *msgbufaddr; |
481 | int pmap_kmpages; | | 481 | int pmap_kmpages; |
482 | /* | | 482 | /* |
483 | * Flag to indicate if pmap_init() has done its thing | | 483 | * Flag to indicate if pmap_init() has done its thing |
484 | */ | | 484 | */ |
485 | bool pmap_initialized; | | 485 | bool pmap_initialized; |
486 | | | 486 | |
487 | #if defined(ARM_MMU_EXTENDED) && defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) | | 487 | #if defined(ARM_MMU_EXTENDED) && defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) |
488 | /* | | 488 | /* |
489 | * Virtual end of direct-mapped memory | | 489 | * Virtual end of direct-mapped memory |
490 | */ | | 490 | */ |
491 | vaddr_t pmap_directlimit; | | 491 | vaddr_t pmap_directlimit; |
492 | #endif | | 492 | #endif |
493 | | | 493 | |
494 | /* | | 494 | /* |
495 | * Misc. locking data structures | | 495 | * Misc. locking data structures |
496 | */ | | 496 | */ |
497 | | | 497 | |
498 | static inline void | | 498 | static inline void |
499 | pmap_acquire_pmap_lock(pmap_t pm) | | 499 | pmap_acquire_pmap_lock(pmap_t pm) |
500 | { | | 500 | { |
501 | #if defined(MULTIPROCESSOR) && defined(DDB) | | 501 | #if defined(MULTIPROCESSOR) && defined(DDB) |
502 | if (__predict_false(db_onproc != NULL)) | | 502 | if (__predict_false(db_onproc != NULL)) |
503 | return; | | 503 | return; |
504 | #endif | | 504 | #endif |
505 | | | 505 | |
506 | mutex_enter(&pm->pm_lock); | | 506 | mutex_enter(&pm->pm_lock); |
507 | } | | 507 | } |
508 | | | 508 | |
509 | static inline void | | 509 | static inline void |
510 | pmap_release_pmap_lock(pmap_t pm) | | 510 | pmap_release_pmap_lock(pmap_t pm) |
511 | { | | 511 | { |
512 | #if defined(MULTIPROCESSOR) && defined(DDB) | | 512 | #if defined(MULTIPROCESSOR) && defined(DDB) |
513 | if (__predict_false(db_onproc != NULL)) | | 513 | if (__predict_false(db_onproc != NULL)) |
514 | return; | | 514 | return; |
515 | #endif | | 515 | #endif |
516 | mutex_exit(&pm->pm_lock); | | 516 | mutex_exit(&pm->pm_lock); |
517 | } | | 517 | } |
518 | | | 518 | |
519 | static inline void | | 519 | static inline void |
520 | pmap_acquire_page_lock(struct vm_page_md *md) | | 520 | pmap_acquire_page_lock(struct vm_page_md *md) |
521 | { | | 521 | { |
522 | mutex_enter(&pmap_lock); | | 522 | mutex_enter(&pmap_lock); |
523 | } | | 523 | } |
524 | | | 524 | |
525 | static inline void | | 525 | static inline void |
526 | pmap_release_page_lock(struct vm_page_md *md) | | 526 | pmap_release_page_lock(struct vm_page_md *md) |
527 | { | | 527 | { |
528 | mutex_exit(&pmap_lock); | | 528 | mutex_exit(&pmap_lock); |
529 | } | | 529 | } |
530 | | | 530 | |
531 | #ifdef DIAGNOSTIC | | 531 | #ifdef DIAGNOSTIC |
532 | static inline int | | 532 | static inline int |
533 | pmap_page_locked_p(struct vm_page_md *md) | | 533 | pmap_page_locked_p(struct vm_page_md *md) |
534 | { | | 534 | { |
535 | return mutex_owned(&pmap_lock); | | 535 | return mutex_owned(&pmap_lock); |
536 | } | | 536 | } |
537 | #endif | | 537 | #endif |
538 | | | 538 | |
539 | | | 539 | |
540 | /* | | 540 | /* |
541 | * Metadata for L1 translation tables. | | 541 | * Metadata for L1 translation tables. |
542 | */ | | 542 | */ |
543 | #ifndef ARM_MMU_EXTENDED | | 543 | #ifndef ARM_MMU_EXTENDED |
544 | struct l1_ttable { | | 544 | struct l1_ttable { |
545 | /* Entry on the L1 Table list */ | | 545 | /* Entry on the L1 Table list */ |
546 | SLIST_ENTRY(l1_ttable) l1_link; | | 546 | SLIST_ENTRY(l1_ttable) l1_link; |
547 | | | 547 | |
548 | /* Entry on the L1 Least Recently Used list */ | | 548 | /* Entry on the L1 Least Recently Used list */ |
549 | TAILQ_ENTRY(l1_ttable) l1_lru; | | 549 | TAILQ_ENTRY(l1_ttable) l1_lru; |
550 | | | 550 | |
551 | /* Track how many domains are allocated from this L1 */ | | 551 | /* Track how many domains are allocated from this L1 */ |
552 | volatile u_int l1_domain_use_count; | | 552 | volatile u_int l1_domain_use_count; |
553 | | | 553 | |
554 | /* | | 554 | /* |
555 | * A free-list of domain numbers for this L1. | | 555 | * A free-list of domain numbers for this L1. |
556 | * We avoid using ffs() and a bitmap to track domains since ffs() | | 556 | * We avoid using ffs() and a bitmap to track domains since ffs() |
557 | * is slow on ARM. | | 557 | * is slow on ARM. |
558 | */ | | 558 | */ |
559 | uint8_t l1_domain_first; | | 559 | uint8_t l1_domain_first; |
560 | uint8_t l1_domain_free[PMAP_DOMAINS]; | | 560 | uint8_t l1_domain_free[PMAP_DOMAINS]; |
561 | | | 561 | |
562 | /* Physical address of this L1 page table */ | | 562 | /* Physical address of this L1 page table */ |
563 | paddr_t l1_physaddr; | | 563 | paddr_t l1_physaddr; |
564 | | | 564 | |
565 | /* KVA of this L1 page table */ | | 565 | /* KVA of this L1 page table */ |
566 | pd_entry_t *l1_kva; | | 566 | pd_entry_t *l1_kva; |
567 | }; | | 567 | }; |
568 | | | 568 | |
569 | /* | | 569 | /* |
570 | * L1 Page Tables are tracked using a Least Recently Used list. | | 570 | * L1 Page Tables are tracked using a Least Recently Used list. |
571 | * - New L1s are allocated from the HEAD. | | 571 | * - New L1s are allocated from the HEAD. |
572 | * - Freed L1s are added to the TAIL. | | 572 | * - Freed L1s are added to the TAIL. |
573 | * - Recently accessed L1s (where an 'access' is some change to one of | | 573 | * - Recently accessed L1s (where an 'access' is some change to one of |
574 | * the userland pmaps which owns this L1) are moved to the TAIL. | | 574 | * the userland pmaps which owns this L1) are moved to the TAIL. |
575 | */ | | 575 | */ |
576 | static TAILQ_HEAD(, l1_ttable) l1_lru_list; | | 576 | static TAILQ_HEAD(, l1_ttable) l1_lru_list; |
577 | static kmutex_t l1_lru_lock __cacheline_aligned; | | 577 | static kmutex_t l1_lru_lock __cacheline_aligned; |
578 | | | 578 | |
579 | /* | | 579 | /* |
580 | * A list of all L1 tables | | 580 | * A list of all L1 tables |
581 | */ | | 581 | */ |
582 | static SLIST_HEAD(, l1_ttable) l1_list; | | 582 | static SLIST_HEAD(, l1_ttable) l1_list; |
583 | #endif /* ARM_MMU_EXTENDED */ | | 583 | #endif /* ARM_MMU_EXTENDED */ |
584 | | | 584 | |
585 | /* | | 585 | /* |
586 | * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots. | | 586 | * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots. |
587 | * | | 587 | * |
588 | * This is normally 16MB worth L2 page descriptors for any given pmap. | | 588 | * This is normally 16MB worth L2 page descriptors for any given pmap. |
589 | * Reference counts are maintained for L2 descriptors so they can be | | 589 | * Reference counts are maintained for L2 descriptors so they can be |
590 | * freed when empty. | | 590 | * freed when empty. |
591 | */ | | 591 | */ |
592 | struct l2_bucket { | | 592 | struct l2_bucket { |
593 | pt_entry_t *l2b_kva; /* KVA of L2 Descriptor Table */ | | 593 | pt_entry_t *l2b_kva; /* KVA of L2 Descriptor Table */ |
594 | paddr_t l2b_pa; /* Physical address of same */ | | 594 | paddr_t l2b_pa; /* Physical address of same */ |
595 | u_short l2b_l1slot; /* This L2 table's L1 index */ | | 595 | u_short l2b_l1slot; /* This L2 table's L1 index */ |
596 | u_short l2b_occupancy; /* How many active descriptors */ | | 596 | u_short l2b_occupancy; /* How many active descriptors */ |
597 | }; | | 597 | }; |
598 | | | 598 | |
599 | struct l2_dtable { | | 599 | struct l2_dtable { |
600 | /* The number of L2 page descriptors allocated to this l2_dtable */ | | 600 | /* The number of L2 page descriptors allocated to this l2_dtable */ |
601 | u_int l2_occupancy; | | 601 | u_int l2_occupancy; |
602 | | | 602 | |
603 | /* List of L2 page descriptors */ | | 603 | /* List of L2 page descriptors */ |
604 | struct l2_bucket l2_bucket[L2_BUCKET_SIZE]; | | 604 | struct l2_bucket l2_bucket[L2_BUCKET_SIZE]; |
605 | }; | | 605 | }; |
606 | | | 606 | |
607 | /* | | 607 | /* |
608 | * Given an L1 table index, calculate the corresponding l2_dtable index | | 608 | * Given an L1 table index, calculate the corresponding l2_dtable index |
609 | * and bucket index within the l2_dtable. | | 609 | * and bucket index within the l2_dtable. |
610 | */ | | 610 | */ |
611 | #define L2_BUCKET_XSHIFT (L2_BUCKET_XLOG2 - L1_S_SHIFT) | | 611 | #define L2_BUCKET_XSHIFT (L2_BUCKET_XLOG2 - L1_S_SHIFT) |
612 | #define L2_BUCKET_XFRAME (~(vaddr_t)0 << L2_BUCKET_XLOG2) | | 612 | #define L2_BUCKET_XFRAME (~(vaddr_t)0 << L2_BUCKET_XLOG2) |
613 | #define L2_BUCKET_IDX(l1slot) ((l1slot) >> L2_BUCKET_XSHIFT) | | 613 | #define L2_BUCKET_IDX(l1slot) ((l1slot) >> L2_BUCKET_XSHIFT) |
614 | #define L2_IDX(l1slot) (L2_BUCKET_IDX(l1slot) >> L2_BUCKET_LOG2) | | 614 | #define L2_IDX(l1slot) (L2_BUCKET_IDX(l1slot) >> L2_BUCKET_LOG2) |
615 | #define L2_BUCKET(l1slot) (L2_BUCKET_IDX(l1slot) & (L2_BUCKET_SIZE - 1)) | | 615 | #define L2_BUCKET(l1slot) (L2_BUCKET_IDX(l1slot) & (L2_BUCKET_SIZE - 1)) |
616 | | | 616 | |
617 | __CTASSERT(0x100000000ULL == ((uint64_t)L2_SIZE * L2_BUCKET_SIZE * L1_S_SIZE)); | | 617 | __CTASSERT(0x100000000ULL == ((uint64_t)L2_SIZE * L2_BUCKET_SIZE * L1_S_SIZE)); |
618 | __CTASSERT(L2_BUCKET_XFRAME == ~(L2_BUCKET_XSIZE-1)); | | 618 | __CTASSERT(L2_BUCKET_XFRAME == ~(L2_BUCKET_XSIZE-1)); |
619 | | | 619 | |
620 | /* | | 620 | /* |
621 | * Given a virtual address, this macro returns the | | 621 | * Given a virtual address, this macro returns the |
622 | * virtual address required to drop into the next L2 bucket. | | 622 | * virtual address required to drop into the next L2 bucket. |
623 | */ | | 623 | */ |
624 | #define L2_NEXT_BUCKET_VA(va) (((va) & L2_BUCKET_XFRAME) + L2_BUCKET_XSIZE) | | 624 | #define L2_NEXT_BUCKET_VA(va) (((va) & L2_BUCKET_XFRAME) + L2_BUCKET_XSIZE) |
625 | | | 625 | |
626 | /* | | 626 | /* |
627 | * L2 allocation. | | 627 | * L2 allocation. |
628 | */ | | 628 | */ |
629 | #define pmap_alloc_l2_dtable() \ | | 629 | #define pmap_alloc_l2_dtable() \ |
630 | pool_cache_get(&pmap_l2dtable_cache, PR_NOWAIT) | | 630 | pool_cache_get(&pmap_l2dtable_cache, PR_NOWAIT) |
631 | #define pmap_free_l2_dtable(l2) \ | | 631 | #define pmap_free_l2_dtable(l2) \ |
632 | pool_cache_put(&pmap_l2dtable_cache, (l2)) | | 632 | pool_cache_put(&pmap_l2dtable_cache, (l2)) |
633 | #define pmap_alloc_l2_ptp(pap) \ | | 633 | #define pmap_alloc_l2_ptp(pap) \ |
634 | ((pt_entry_t *)pool_cache_get_paddr(&pmap_l2ptp_cache,\ | | 634 | ((pt_entry_t *)pool_cache_get_paddr(&pmap_l2ptp_cache,\ |
635 | PR_NOWAIT, (pap))) | | 635 | PR_NOWAIT, (pap))) |
636 | | | 636 | |
637 | /* | | 637 | /* |
638 | * We try to map the page tables write-through, if possible. However, not | | 638 | * We try to map the page tables write-through, if possible. However, not |
639 | * all CPUs have a write-through cache mode, so on those we have to sync | | 639 | * all CPUs have a write-through cache mode, so on those we have to sync |
640 | * the cache when we frob page tables. | | 640 | * the cache when we frob page tables. |
641 | * | | 641 | * |
642 | * We try to evaluate this at compile time, if possible. However, it's | | 642 | * We try to evaluate this at compile time, if possible. However, it's |
643 | * not always possible to do that, hence this run-time var. | | 643 | * not always possible to do that, hence this run-time var. |
644 | */ | | 644 | */ |
645 | int pmap_needs_pte_sync; | | 645 | int pmap_needs_pte_sync; |
646 | | | 646 | |
647 | /* | | 647 | /* |
648 | * Real definition of pv_entry. | | 648 | * Real definition of pv_entry. |
649 | */ | | 649 | */ |
650 | struct pv_entry { | | 650 | struct pv_entry { |
651 | SLIST_ENTRY(pv_entry) pv_link; /* next pv_entry */ | | 651 | SLIST_ENTRY(pv_entry) pv_link; /* next pv_entry */ |
652 | pmap_t pv_pmap; /* pmap where mapping lies */ | | 652 | pmap_t pv_pmap; /* pmap where mapping lies */ |
653 | vaddr_t pv_va; /* virtual address for mapping */ | | 653 | vaddr_t pv_va; /* virtual address for mapping */ |
654 | u_int pv_flags; /* flags */ | | 654 | u_int pv_flags; /* flags */ |
655 | }; | | 655 | }; |
656 | | | 656 | |
657 | /* | | 657 | /* |
658 | * Macros to determine if a mapping might be resident in the | | 658 | * Macros to determine if a mapping might be resident in the |
659 | * instruction/data cache and/or TLB | | 659 | * instruction/data cache and/or TLB |
660 | */ | | 660 | */ |
661 | #if ARM_MMU_V7 > 0 && !defined(ARM_MMU_EXTENDED) | | 661 | #if ARM_MMU_V7 > 0 && !defined(ARM_MMU_EXTENDED) |
662 | /* | | 662 | /* |
663 | * Speculative loads by Cortex cores can cause TLB entries to be filled even if | | 663 | * Speculative loads by Cortex cores can cause TLB entries to be filled even if |
664 | * there are no explicit accesses, so there may be always be TLB entries to | | 664 | * there are no explicit accesses, so there may be always be TLB entries to |
665 | * flush. If we used ASIDs then this would not be a problem. | | 665 | * flush. If we used ASIDs then this would not be a problem. |
666 | */ | | 666 | */ |
667 | #define PV_BEEN_EXECD(f) (((f) & PVF_EXEC) == PVF_EXEC) | | 667 | #define PV_BEEN_EXECD(f) (((f) & PVF_EXEC) == PVF_EXEC) |
668 | #define PV_BEEN_REFD(f) (true) | | 668 | #define PV_BEEN_REFD(f) (true) |
669 | #else | | 669 | #else |
670 | #define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC)) | | 670 | #define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC)) |
671 | #define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0) | | 671 | #define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0) |
672 | #endif | | 672 | #endif |
673 | #define PV_IS_EXEC_P(f) (((f) & PVF_EXEC) != 0) | | 673 | #define PV_IS_EXEC_P(f) (((f) & PVF_EXEC) != 0) |
674 | #define PV_IS_KENTRY_P(f) (((f) & PVF_KENTRY) != 0) | | 674 | #define PV_IS_KENTRY_P(f) (((f) & PVF_KENTRY) != 0) |
675 | #define PV_IS_WRITE_P(f) (((f) & PVF_WRITE) != 0) | | 675 | #define PV_IS_WRITE_P(f) (((f) & PVF_WRITE) != 0) |
676 | | | 676 | |
677 | /* | | 677 | /* |
678 | * Local prototypes | | 678 | * Local prototypes |
679 | */ | | 679 | */ |
680 | static bool pmap_set_pt_cache_mode(pd_entry_t *, vaddr_t, size_t); | | 680 | static bool pmap_set_pt_cache_mode(pd_entry_t *, vaddr_t, size_t); |
681 | static void pmap_alloc_specials(vaddr_t *, int, vaddr_t *, | | 681 | static void pmap_alloc_specials(vaddr_t *, int, vaddr_t *, |
682 | pt_entry_t **); | | 682 | pt_entry_t **); |
683 | static bool pmap_is_current(pmap_t) __unused; | | 683 | static bool pmap_is_current(pmap_t) __unused; |
684 | static bool pmap_is_cached(pmap_t); | | 684 | static bool pmap_is_cached(pmap_t); |
685 | static void pmap_enter_pv(struct vm_page_md *, paddr_t, struct pv_entry *, | | 685 | static void pmap_enter_pv(struct vm_page_md *, paddr_t, struct pv_entry *, |
686 | pmap_t, vaddr_t, u_int); | | 686 | pmap_t, vaddr_t, u_int); |
687 | static struct pv_entry *pmap_find_pv(struct vm_page_md *, pmap_t, vaddr_t); | | 687 | static struct pv_entry *pmap_find_pv(struct vm_page_md *, pmap_t, vaddr_t); |
688 | static struct pv_entry *pmap_remove_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); | | 688 | static struct pv_entry *pmap_remove_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); |
689 | static u_int pmap_modify_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t, | | 689 | static u_int pmap_modify_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t, |
690 | u_int, u_int); | | 690 | u_int, u_int); |
691 | | | 691 | |
692 | static void pmap_pinit(pmap_t); | | 692 | static void pmap_pinit(pmap_t); |
693 | static int pmap_pmap_ctor(void *, void *, int); | | 693 | static int pmap_pmap_ctor(void *, void *, int); |
694 | | | 694 | |
695 | static void pmap_alloc_l1(pmap_t); | | 695 | static void pmap_alloc_l1(pmap_t); |
696 | static void pmap_free_l1(pmap_t); | | 696 | static void pmap_free_l1(pmap_t); |
697 | #ifndef ARM_MMU_EXTENDED | | 697 | #ifndef ARM_MMU_EXTENDED |
698 | static void pmap_use_l1(pmap_t); | | 698 | static void pmap_use_l1(pmap_t); |
699 | #endif | | 699 | #endif |
700 | | | 700 | |
701 | static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vaddr_t); | | 701 | static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vaddr_t); |
702 | static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vaddr_t); | | 702 | static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vaddr_t); |
703 | static void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int); | | 703 | static void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int); |
704 | static int pmap_l2ptp_ctor(void *, void *, int); | | 704 | static int pmap_l2ptp_ctor(void *, void *, int); |
705 | static int pmap_l2dtable_ctor(void *, void *, int); | | 705 | static int pmap_l2dtable_ctor(void *, void *, int); |
706 | | | 706 | |
707 | static void pmap_vac_me_harder(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); | | 707 | static void pmap_vac_me_harder(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); |
708 | #ifdef PMAP_CACHE_VIVT | | 708 | #ifdef PMAP_CACHE_VIVT |
709 | static void pmap_vac_me_kpmap(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); | | 709 | static void pmap_vac_me_kpmap(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); |
710 | static void pmap_vac_me_user(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); | | 710 | static void pmap_vac_me_user(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); |
711 | #endif | | 711 | #endif |
712 | | | 712 | |
713 | static void pmap_clearbit(struct vm_page_md *, paddr_t, u_int); | | 713 | static void pmap_clearbit(struct vm_page_md *, paddr_t, u_int); |
714 | #ifdef PMAP_CACHE_VIVT | | 714 | #ifdef PMAP_CACHE_VIVT |
715 | static bool pmap_clean_page(struct vm_page_md *, bool); | | 715 | static bool pmap_clean_page(struct vm_page_md *, bool); |
716 | #endif | | 716 | #endif |
717 | #ifdef PMAP_CACHE_VIPT | | 717 | #ifdef PMAP_CACHE_VIPT |
718 | static void pmap_syncicache_page(struct vm_page_md *, paddr_t); | | 718 | static void pmap_syncicache_page(struct vm_page_md *, paddr_t); |
719 | enum pmap_flush_op { | | 719 | enum pmap_flush_op { |
720 | PMAP_FLUSH_PRIMARY, | | 720 | PMAP_FLUSH_PRIMARY, |
721 | PMAP_FLUSH_SECONDARY, | | 721 | PMAP_FLUSH_SECONDARY, |
722 | PMAP_CLEAN_PRIMARY | | 722 | PMAP_CLEAN_PRIMARY |
723 | }; | | 723 | }; |
724 | #ifndef ARM_MMU_EXTENDED | | 724 | #ifndef ARM_MMU_EXTENDED |
725 | static void pmap_flush_page(struct vm_page_md *, paddr_t, enum pmap_flush_op); | | 725 | static void pmap_flush_page(struct vm_page_md *, paddr_t, enum pmap_flush_op); |
726 | #endif | | 726 | #endif |
727 | #endif | | 727 | #endif |
728 | static void pmap_page_remove(struct vm_page_md *, paddr_t); | | 728 | static void pmap_page_remove(struct vm_page_md *, paddr_t); |
729 | static void pmap_pv_remove(paddr_t); | | 729 | static void pmap_pv_remove(paddr_t); |
730 | | | 730 | |
731 | #ifndef ARM_MMU_EXTENDED | | 731 | #ifndef ARM_MMU_EXTENDED |
732 | static void pmap_init_l1(struct l1_ttable *, pd_entry_t *); | | 732 | static void pmap_init_l1(struct l1_ttable *, pd_entry_t *); |
733 | #endif | | 733 | #endif |
734 | static vaddr_t kernel_pt_lookup(paddr_t); | | 734 | static vaddr_t kernel_pt_lookup(paddr_t); |
735 | | | 735 | |
736 | #ifdef ARM_MMU_EXTENDED | | 736 | #ifdef ARM_MMU_EXTENDED |
737 | static struct pool_cache pmap_l1tt_cache; | | 737 | static struct pool_cache pmap_l1tt_cache; |
738 | | | 738 | |
739 | static int pmap_l1tt_ctor(void *, void *, int); | | 739 | static int pmap_l1tt_ctor(void *, void *, int); |
740 | static void * pmap_l1tt_alloc(struct pool *, int); | | 740 | static void * pmap_l1tt_alloc(struct pool *, int); |
741 | static void pmap_l1tt_free(struct pool *, void *); | | 741 | static void pmap_l1tt_free(struct pool *, void *); |
742 | | | 742 | |
743 | static struct pool_allocator pmap_l1tt_allocator = { | | 743 | static struct pool_allocator pmap_l1tt_allocator = { |
744 | .pa_alloc = pmap_l1tt_alloc, | | 744 | .pa_alloc = pmap_l1tt_alloc, |
745 | .pa_free = pmap_l1tt_free, | | 745 | .pa_free = pmap_l1tt_free, |
746 | .pa_pagesz = L1TT_SIZE, | | 746 | .pa_pagesz = L1TT_SIZE, |
747 | }; | | 747 | }; |
748 | #endif | | 748 | #endif |
749 | | | 749 | |
750 | /* | | 750 | /* |
751 | * Misc variables | | 751 | * Misc variables |
752 | */ | | 752 | */ |
753 | vaddr_t virtual_avail; | | 753 | vaddr_t virtual_avail; |
754 | vaddr_t virtual_end; | | 754 | vaddr_t virtual_end; |
755 | vaddr_t pmap_curmaxkvaddr; | | 755 | vaddr_t pmap_curmaxkvaddr; |
756 | | | 756 | |
757 | paddr_t avail_start; | | 757 | paddr_t avail_start; |
758 | paddr_t avail_end; | | 758 | paddr_t avail_end; |
759 | | | 759 | |
760 | pv_addrqh_t pmap_boot_freeq = SLIST_HEAD_INITIALIZER(&pmap_boot_freeq); | | 760 | pv_addrqh_t pmap_boot_freeq = SLIST_HEAD_INITIALIZER(&pmap_boot_freeq); |
761 | pv_addr_t kernelpages; | | 761 | pv_addr_t kernelpages; |
762 | pv_addr_t kernel_l1pt; | | 762 | pv_addr_t kernel_l1pt; |
763 | pv_addr_t systempage; | | 763 | pv_addr_t systempage; |
764 | | | 764 | |
765 | #ifdef PMAP_CACHE_VIPT | | 765 | #ifdef PMAP_CACHE_VIPT |
766 | #define PMAP_VALIDATE_MD_PAGE(md) \ | | 766 | #define PMAP_VALIDATE_MD_PAGE(md) \ |
767 | KASSERTMSG(arm_cache_prefer_mask == 0 || (((md)->pvh_attrs & PVF_WRITE) == 0) == ((md)->urw_mappings + (md)->krw_mappings == 0), \ | | 767 | KASSERTMSG(arm_cache_prefer_mask == 0 || (((md)->pvh_attrs & PVF_WRITE) == 0) == ((md)->urw_mappings + (md)->krw_mappings == 0), \ |
768 | "(md) %p: attrs=%#x urw=%u krw=%u", (md), \ | | 768 | "(md) %p: attrs=%#x urw=%u krw=%u", (md), \ |
769 | (md)->pvh_attrs, (md)->urw_mappings, (md)->krw_mappings); | | 769 | (md)->pvh_attrs, (md)->urw_mappings, (md)->krw_mappings); |
770 | #endif /* PMAP_CACHE_VIPT */ | | 770 | #endif /* PMAP_CACHE_VIPT */ |
771 | /* | | 771 | /* |
772 | * A bunch of routines to conditionally flush the caches/TLB depending | | 772 | * A bunch of routines to conditionally flush the caches/TLB depending |
773 | * on whether the specified pmap actually needs to be flushed at any | | 773 | * on whether the specified pmap actually needs to be flushed at any |
774 | * given time. | | 774 | * given time. |
775 | */ | | 775 | */ |
776 | static inline void | | 776 | static inline void |
777 | pmap_tlb_flush_SE(pmap_t pm, vaddr_t va, u_int flags) | | 777 | pmap_tlb_flush_SE(pmap_t pm, vaddr_t va, u_int flags) |
778 | { | | 778 | { |
779 | #ifdef ARM_MMU_EXTENDED | | 779 | #ifdef ARM_MMU_EXTENDED |
780 | pmap_tlb_invalidate_addr(pm, va); | | 780 | pmap_tlb_invalidate_addr(pm, va); |
781 | #else | | 781 | #else |
782 | if (pm->pm_cstate.cs_tlb_id != 0) { | | 782 | if (pm->pm_cstate.cs_tlb_id != 0) { |
783 | if (PV_BEEN_EXECD(flags)) { | | 783 | if (PV_BEEN_EXECD(flags)) { |
784 | cpu_tlb_flushID_SE(va); | | 784 | cpu_tlb_flushID_SE(va); |
785 | } else if (PV_BEEN_REFD(flags)) { | | 785 | } else if (PV_BEEN_REFD(flags)) { |
786 | cpu_tlb_flushD_SE(va); | | 786 | cpu_tlb_flushD_SE(va); |
787 | } | | 787 | } |
788 | } | | 788 | } |
789 | #endif /* ARM_MMU_EXTENDED */ | | 789 | #endif /* ARM_MMU_EXTENDED */ |
790 | } | | 790 | } |
791 | | | 791 | |
792 | #ifndef ARM_MMU_EXTENDED | | 792 | #ifndef ARM_MMU_EXTENDED |
793 | static inline void | | 793 | static inline void |
794 | pmap_tlb_flushID(pmap_t pm) | | 794 | pmap_tlb_flushID(pmap_t pm) |
795 | { | | 795 | { |
796 | if (pm->pm_cstate.cs_tlb_id) { | | 796 | if (pm->pm_cstate.cs_tlb_id) { |
797 | cpu_tlb_flushID(); | | 797 | cpu_tlb_flushID(); |
798 | #if ARM_MMU_V7 == 0 | | 798 | #if ARM_MMU_V7 == 0 |
799 | /* | | 799 | /* |
800 | * Speculative loads by Cortex cores can cause TLB entries to | | 800 | * Speculative loads by Cortex cores can cause TLB entries to |
801 | * be filled even if there are no explicit accesses, so there | | 801 | * be filled even if there are no explicit accesses, so there |
802 | * may be always be TLB entries to flush. If we used ASIDs | | 802 | * may be always be TLB entries to flush. If we used ASIDs |
803 | * then it would not be a problem. | | 803 | * then it would not be a problem. |
804 | * This is not true for other CPUs. | | 804 | * This is not true for other CPUs. |
805 | */ | | 805 | */ |
806 | pm->pm_cstate.cs_tlb = 0; | | 806 | pm->pm_cstate.cs_tlb = 0; |
807 | #endif /* ARM_MMU_V7 */ | | 807 | #endif /* ARM_MMU_V7 */ |
808 | } | | 808 | } |
809 | } | | 809 | } |
810 | | | 810 | |
811 | static inline void | | 811 | static inline void |
812 | pmap_tlb_flushD(pmap_t pm) | | 812 | pmap_tlb_flushD(pmap_t pm) |
813 | { | | 813 | { |
814 | if (pm->pm_cstate.cs_tlb_d) { | | 814 | if (pm->pm_cstate.cs_tlb_d) { |
815 | cpu_tlb_flushD(); | | 815 | cpu_tlb_flushD(); |
816 | #if ARM_MMU_V7 == 0 | | 816 | #if ARM_MMU_V7 == 0 |
817 | /* | | 817 | /* |
818 | * Speculative loads by Cortex cores can cause TLB entries to | | 818 | * Speculative loads by Cortex cores can cause TLB entries to |
819 | * be filled even if there are no explicit accesses, so there | | 819 | * be filled even if there are no explicit accesses, so there |
820 | * may be always be TLB entries to flush. If we used ASIDs | | 820 | * may be always be TLB entries to flush. If we used ASIDs |
821 | * then it would not be a problem. | | 821 | * then it would not be a problem. |
822 | * This is not true for other CPUs. | | 822 | * This is not true for other CPUs. |
823 | */ | | 823 | */ |
824 | pm->pm_cstate.cs_tlb_d = 0; | | 824 | pm->pm_cstate.cs_tlb_d = 0; |
825 | #endif /* ARM_MMU_V7 */ | | 825 | #endif /* ARM_MMU_V7 */ |
826 | } | | 826 | } |
827 | } | | 827 | } |
828 | #endif /* ARM_MMU_EXTENDED */ | | 828 | #endif /* ARM_MMU_EXTENDED */ |
829 | | | 829 | |
830 | #ifdef PMAP_CACHE_VIVT | | 830 | #ifdef PMAP_CACHE_VIVT |
831 | static inline void | | 831 | static inline void |
832 | pmap_cache_wbinv_page(pmap_t pm, vaddr_t va, bool do_inv, u_int flags) | | 832 | pmap_cache_wbinv_page(pmap_t pm, vaddr_t va, bool do_inv, u_int flags) |
833 | { | | 833 | { |
834 | if (PV_BEEN_EXECD(flags) && pm->pm_cstate.cs_cache_id) { | | 834 | if (PV_BEEN_EXECD(flags) && pm->pm_cstate.cs_cache_id) { |
835 | cpu_idcache_wbinv_range(va, PAGE_SIZE); | | 835 | cpu_idcache_wbinv_range(va, PAGE_SIZE); |
836 | } else if (PV_BEEN_REFD(flags) && pm->pm_cstate.cs_cache_d) { | | 836 | } else if (PV_BEEN_REFD(flags) && pm->pm_cstate.cs_cache_d) { |
837 | if (do_inv) { | | 837 | if (do_inv) { |
838 | if (flags & PVF_WRITE) | | 838 | if (flags & PVF_WRITE) |
839 | cpu_dcache_wbinv_range(va, PAGE_SIZE); | | 839 | cpu_dcache_wbinv_range(va, PAGE_SIZE); |
840 | else | | 840 | else |
841 | cpu_dcache_inv_range(va, PAGE_SIZE); | | 841 | cpu_dcache_inv_range(va, PAGE_SIZE); |
842 | } else if (flags & PVF_WRITE) { | | 842 | } else if (flags & PVF_WRITE) { |
843 | cpu_dcache_wb_range(va, PAGE_SIZE); | | 843 | cpu_dcache_wb_range(va, PAGE_SIZE); |
844 | } | | 844 | } |
845 | } | | 845 | } |
846 | } | | 846 | } |
847 | | | 847 | |
848 | static inline void | | 848 | static inline void |
849 | pmap_cache_wbinv_all(pmap_t pm, u_int flags) | | 849 | pmap_cache_wbinv_all(pmap_t pm, u_int flags) |
850 | { | | 850 | { |
851 | if (PV_BEEN_EXECD(flags)) { | | 851 | if (PV_BEEN_EXECD(flags)) { |
852 | if (pm->pm_cstate.cs_cache_id) { | | 852 | if (pm->pm_cstate.cs_cache_id) { |
853 | cpu_idcache_wbinv_all(); | | 853 | cpu_idcache_wbinv_all(); |
854 | pm->pm_cstate.cs_cache = 0; | | 854 | pm->pm_cstate.cs_cache = 0; |
855 | } | | 855 | } |
856 | } else if (pm->pm_cstate.cs_cache_d) { | | 856 | } else if (pm->pm_cstate.cs_cache_d) { |
857 | cpu_dcache_wbinv_all(); | | 857 | cpu_dcache_wbinv_all(); |
858 | pm->pm_cstate.cs_cache_d = 0; | | 858 | pm->pm_cstate.cs_cache_d = 0; |
859 | } | | 859 | } |
860 | } | | 860 | } |
861 | #endif /* PMAP_CACHE_VIVT */ | | 861 | #endif /* PMAP_CACHE_VIVT */ |
862 | | | 862 | |
863 | static inline uint8_t | | 863 | static inline uint8_t |
864 | pmap_domain(pmap_t pm) | | 864 | pmap_domain(pmap_t pm) |
865 | { | | 865 | { |
866 | #ifdef ARM_MMU_EXTENDED | | 866 | #ifdef ARM_MMU_EXTENDED |
867 | return pm == pmap_kernel() ? PMAP_DOMAIN_KERNEL : PMAP_DOMAIN_USER; | | 867 | return pm == pmap_kernel() ? PMAP_DOMAIN_KERNEL : PMAP_DOMAIN_USER; |
868 | #else | | 868 | #else |
869 | return pm->pm_domain; | | 869 | return pm->pm_domain; |
870 | #endif | | 870 | #endif |
871 | } | | 871 | } |
872 | | | 872 | |
873 | static inline pd_entry_t * | | 873 | static inline pd_entry_t * |
874 | pmap_l1_kva(pmap_t pm) | | 874 | pmap_l1_kva(pmap_t pm) |
875 | { | | 875 | { |
876 | #ifdef ARM_MMU_EXTENDED | | 876 | #ifdef ARM_MMU_EXTENDED |
877 | return pm->pm_l1; | | 877 | return pm->pm_l1; |
878 | #else | | 878 | #else |
879 | return pm->pm_l1->l1_kva; | | 879 | return pm->pm_l1->l1_kva; |
880 | #endif | | 880 | #endif |
881 | } | | 881 | } |
882 | | | 882 | |
883 | static inline bool | | 883 | static inline bool |
884 | pmap_is_current(pmap_t pm) | | 884 | pmap_is_current(pmap_t pm) |
885 | { | | 885 | { |
886 | if (pm == pmap_kernel() || curproc->p_vmspace->vm_map.pmap == pm) | | 886 | if (pm == pmap_kernel() || curproc->p_vmspace->vm_map.pmap == pm) |
887 | return true; | | 887 | return true; |
888 | | | 888 | |
889 | return false; | | 889 | return false; |
890 | } | | 890 | } |
891 | | | 891 | |
892 | static inline bool | | 892 | static inline bool |
893 | pmap_is_cached(pmap_t pm) | | 893 | pmap_is_cached(pmap_t pm) |
894 | { | | 894 | { |
895 | #ifdef ARM_MMU_EXTENDED | | 895 | #ifdef ARM_MMU_EXTENDED |
896 | if (pm == pmap_kernel()) | | 896 | if (pm == pmap_kernel()) |
897 | return true; | | 897 | return true; |
898 | #ifdef MULTIPROCESSOR | | 898 | #ifdef MULTIPROCESSOR |
899 | // Is this pmap active on any CPU? | | 899 | // Is this pmap active on any CPU? |
900 | if (!kcpuset_iszero(pm->pm_active)) | | 900 | if (!kcpuset_iszero(pm->pm_active)) |
901 | return true; | | 901 | return true; |
902 | #else | | 902 | #else |
903 | struct pmap_tlb_info * const ti = cpu_tlb_info(curcpu()); | | 903 | struct pmap_tlb_info * const ti = cpu_tlb_info(curcpu()); |
904 | // Is this pmap active? | | 904 | // Is this pmap active? |
905 | if (PMAP_PAI_ASIDVALID_P(PMAP_PAI(pm, ti), ti)) | | 905 | if (PMAP_PAI_ASIDVALID_P(PMAP_PAI(pm, ti), ti)) |
906 | return true; | | 906 | return true; |
907 | #endif | | 907 | #endif |
908 | #else | | 908 | #else |
909 | struct cpu_info * const ci = curcpu(); | | 909 | struct cpu_info * const ci = curcpu(); |
910 | if (pm == pmap_kernel() || ci->ci_pmap_lastuser == NULL | | 910 | if (pm == pmap_kernel() || ci->ci_pmap_lastuser == NULL |
911 | || ci->ci_pmap_lastuser == pm) | | 911 | || ci->ci_pmap_lastuser == pm) |
912 | return true; | | 912 | return true; |
913 | #endif /* ARM_MMU_EXTENDED */ | | 913 | #endif /* ARM_MMU_EXTENDED */ |
914 | | | 914 | |
915 | return false; | | 915 | return false; |
916 | } | | 916 | } |
917 | | | 917 | |
918 | /* | | 918 | /* |
919 | * PTE_SYNC_CURRENT: | | 919 | * PTE_SYNC_CURRENT: |
920 | * | | 920 | * |
921 | * Make sure the pte is written out to RAM. | | 921 | * Make sure the pte is written out to RAM. |
922 | * We need to do this for one of two cases: | | 922 | * We need to do this for one of two cases: |
923 | * - We're dealing with the kernel pmap | | 923 | * - We're dealing with the kernel pmap |
924 | * - There is no pmap active in the cache/tlb. | | 924 | * - There is no pmap active in the cache/tlb. |
925 | * - The specified pmap is 'active' in the cache/tlb. | | 925 | * - The specified pmap is 'active' in the cache/tlb. |
926 | */ | | 926 | */ |
927 | | | 927 | |
928 | #ifdef PMAP_INCLUDE_PTE_SYNC | | 928 | #ifdef PMAP_INCLUDE_PTE_SYNC |
929 | static inline void | | 929 | static inline void |
930 | pmap_pte_sync_current(pmap_t pm, pt_entry_t *ptep) | | 930 | pmap_pte_sync_current(pmap_t pm, pt_entry_t *ptep) |
931 | { | | 931 | { |
932 | if (PMAP_NEEDS_PTE_SYNC && pmap_is_cached(pm)) | | 932 | if (PMAP_NEEDS_PTE_SYNC && pmap_is_cached(pm)) |
933 | PTE_SYNC(ptep); | | 933 | PTE_SYNC(ptep); |
934 | arm_dsb(); | | 934 | arm_dsb(); |
935 | } | | 935 | } |
936 | | | 936 | |
937 | # define PTE_SYNC_CURRENT(pm, ptep) pmap_pte_sync_current(pm, ptep) | | 937 | # define PTE_SYNC_CURRENT(pm, ptep) pmap_pte_sync_current(pm, ptep) |
938 | #else | | 938 | #else |
939 | # define PTE_SYNC_CURRENT(pm, ptep) __nothing | | 939 | # define PTE_SYNC_CURRENT(pm, ptep) __nothing |
940 | #endif | | 940 | #endif |
941 | | | 941 | |
942 | /* | | 942 | /* |
943 | * main pv_entry manipulation functions: | | 943 | * main pv_entry manipulation functions: |
944 | * pmap_enter_pv: enter a mapping onto a vm_page list | | 944 | * pmap_enter_pv: enter a mapping onto a vm_page list |
945 | * pmap_remove_pv: remove a mapping from a vm_page list | | 945 | * pmap_remove_pv: remove a mapping from a vm_page list |
946 | * | | 946 | * |
947 | * NOTE: pmap_enter_pv expects to lock the pvh itself | | 947 | * NOTE: pmap_enter_pv expects to lock the pvh itself |
948 | * pmap_remove_pv expects the caller to lock the pvh before calling | | 948 | * pmap_remove_pv expects the caller to lock the pvh before calling |
949 | */ | | 949 | */ |
950 | | | 950 | |
951 | /* | | 951 | /* |
952 | * pmap_enter_pv: enter a mapping onto a vm_page lst | | 952 | * pmap_enter_pv: enter a mapping onto a vm_page lst |
953 | * | | 953 | * |
954 | * => caller should hold the proper lock on pmap_main_lock | | 954 | * => caller should hold the proper lock on pmap_main_lock |
955 | * => caller should have pmap locked | | 955 | * => caller should have pmap locked |
956 | * => we will gain the lock on the vm_page and allocate the new pv_entry | | 956 | * => we will gain the lock on the vm_page and allocate the new pv_entry |
957 | * => caller should adjust ptp's wire_count before calling | | 957 | * => caller should adjust ptp's wire_count before calling |
958 | * => caller should not adjust pmap's wire_count | | 958 | * => caller should not adjust pmap's wire_count |
959 | */ | | 959 | */ |
960 | static void | | 960 | static void |
961 | pmap_enter_pv(struct vm_page_md *md, paddr_t pa, struct pv_entry *pv, pmap_t pm, | | 961 | pmap_enter_pv(struct vm_page_md *md, paddr_t pa, struct pv_entry *pv, pmap_t pm, |
962 | vaddr_t va, u_int flags) | | 962 | vaddr_t va, u_int flags) |
963 | { | | 963 | { |
964 | UVMHIST_FUNC(__func__); | | 964 | UVMHIST_FUNC(__func__); |
965 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx pm %#jx va %#jx", | | 965 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx pm %#jx va %#jx", |
966 | (uintptr_t)md, (uintptr_t)pa, (uintptr_t)pm, va); | | 966 | (uintptr_t)md, (uintptr_t)pa, (uintptr_t)pm, va); |
967 | UVMHIST_LOG(maphist, "...pv %#jx flags %#jx", | | 967 | UVMHIST_LOG(maphist, "...pv %#jx flags %#jx", |
968 | (uintptr_t)pv, flags, 0, 0); | | 968 | (uintptr_t)pv, flags, 0, 0); |
969 | | | 969 | |
970 | struct pv_entry **pvp; | | 970 | struct pv_entry **pvp; |
971 | | | 971 | |
972 | pv->pv_pmap = pm; | | 972 | pv->pv_pmap = pm; |
973 | pv->pv_va = va; | | 973 | pv->pv_va = va; |
974 | pv->pv_flags = flags; | | 974 | pv->pv_flags = flags; |
975 | | | 975 | |
976 | pvp = &SLIST_FIRST(&md->pvh_list); | | 976 | pvp = &SLIST_FIRST(&md->pvh_list); |
977 | #ifdef PMAP_CACHE_VIPT | | 977 | #ifdef PMAP_CACHE_VIPT |
978 | /* | | 978 | /* |
979 | * Insert unmanaged entries, writeable first, at the head of | | 979 | * Insert unmanaged entries, writeable first, at the head of |
980 | * the pv list. | | 980 | * the pv list. |
981 | */ | | 981 | */ |
982 | if (__predict_true(!PV_IS_KENTRY_P(flags))) { | | 982 | if (__predict_true(!PV_IS_KENTRY_P(flags))) { |
983 | while (*pvp != NULL && PV_IS_KENTRY_P((*pvp)->pv_flags)) | | 983 | while (*pvp != NULL && PV_IS_KENTRY_P((*pvp)->pv_flags)) |
984 | pvp = &SLIST_NEXT(*pvp, pv_link); | | 984 | pvp = &SLIST_NEXT(*pvp, pv_link); |
985 | } | | 985 | } |
986 | if (!PV_IS_WRITE_P(flags)) { | | 986 | if (!PV_IS_WRITE_P(flags)) { |
987 | while (*pvp != NULL && PV_IS_WRITE_P((*pvp)->pv_flags)) | | 987 | while (*pvp != NULL && PV_IS_WRITE_P((*pvp)->pv_flags)) |
988 | pvp = &SLIST_NEXT(*pvp, pv_link); | | 988 | pvp = &SLIST_NEXT(*pvp, pv_link); |
989 | } | | 989 | } |
990 | #endif | | 990 | #endif |
991 | SLIST_NEXT(pv, pv_link) = *pvp; /* add to ... */ | | 991 | SLIST_NEXT(pv, pv_link) = *pvp; /* add to ... */ |
992 | *pvp = pv; /* ... locked list */ | | 992 | *pvp = pv; /* ... locked list */ |
993 | md->pvh_attrs |= flags & (PVF_REF | PVF_MOD); | | 993 | md->pvh_attrs |= flags & (PVF_REF | PVF_MOD); |
994 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 994 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
995 | if ((pv->pv_flags & PVF_KWRITE) == PVF_KWRITE) | | 995 | if ((pv->pv_flags & PVF_KWRITE) == PVF_KWRITE) |
996 | md->pvh_attrs |= PVF_KMOD; | | 996 | md->pvh_attrs |= PVF_KMOD; |
997 | if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC) | | 997 | if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC) |
998 | md->pvh_attrs |= PVF_DIRTY; | | 998 | md->pvh_attrs |= PVF_DIRTY; |
999 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 999 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
1000 | #endif | | 1000 | #endif |
1001 | if (pm == pmap_kernel()) { | | 1001 | if (pm == pmap_kernel()) { |
1002 | PMAPCOUNT(kernel_mappings); | | 1002 | PMAPCOUNT(kernel_mappings); |
1003 | if (flags & PVF_WRITE) | | 1003 | if (flags & PVF_WRITE) |
1004 | md->krw_mappings++; | | 1004 | md->krw_mappings++; |
1005 | else | | 1005 | else |
1006 | md->kro_mappings++; | | 1006 | md->kro_mappings++; |
1007 | } else { | | 1007 | } else { |
1008 | if (flags & PVF_WRITE) | | 1008 | if (flags & PVF_WRITE) |
1009 | md->urw_mappings++; | | 1009 | md->urw_mappings++; |
1010 | else | | 1010 | else |
1011 | md->uro_mappings++; | | 1011 | md->uro_mappings++; |
1012 | } | | 1012 | } |
1013 | | | 1013 | |
1014 | #ifdef PMAP_CACHE_VIPT | | 1014 | #ifdef PMAP_CACHE_VIPT |
1015 | #ifndef ARM_MMU_EXTENDED | | 1015 | #ifndef ARM_MMU_EXTENDED |
1016 | /* | | 1016 | /* |
1017 | * Even though pmap_vac_me_harder will set PVF_WRITE for us, | | 1017 | * Even though pmap_vac_me_harder will set PVF_WRITE for us, |
1018 | * do it here as well to keep the mappings & KVF_WRITE consistent. | | 1018 | * do it here as well to keep the mappings & KVF_WRITE consistent. |
1019 | */ | | 1019 | */ |
1020 | if (arm_cache_prefer_mask != 0 && (flags & PVF_WRITE) != 0) { | | 1020 | if (arm_cache_prefer_mask != 0 && (flags & PVF_WRITE) != 0) { |
1021 | md->pvh_attrs |= PVF_WRITE; | | 1021 | md->pvh_attrs |= PVF_WRITE; |
1022 | } | | 1022 | } |
1023 | #endif | | 1023 | #endif |
1024 | /* | | 1024 | /* |
1025 | * If this is an exec mapping and its the first exec mapping | | 1025 | * If this is an exec mapping and its the first exec mapping |
1026 | * for this page, make sure to sync the I-cache. | | 1026 | * for this page, make sure to sync the I-cache. |
1027 | */ | | 1027 | */ |
1028 | if (PV_IS_EXEC_P(flags)) { | | 1028 | if (PV_IS_EXEC_P(flags)) { |
1029 | if (!PV_IS_EXEC_P(md->pvh_attrs)) { | | 1029 | if (!PV_IS_EXEC_P(md->pvh_attrs)) { |
1030 | pmap_syncicache_page(md, pa); | | 1030 | pmap_syncicache_page(md, pa); |
1031 | PMAPCOUNT(exec_synced_map); | | 1031 | PMAPCOUNT(exec_synced_map); |
1032 | } | | 1032 | } |
1033 | PMAPCOUNT(exec_mappings); | | 1033 | PMAPCOUNT(exec_mappings); |
1034 | } | | 1034 | } |
1035 | #endif | | 1035 | #endif |
1036 | | | 1036 | |
1037 | PMAPCOUNT(mappings); | | 1037 | PMAPCOUNT(mappings); |
1038 | | | 1038 | |
1039 | if (pv->pv_flags & PVF_WIRED) | | 1039 | if (pv->pv_flags & PVF_WIRED) |
1040 | ++pm->pm_stats.wired_count; | | 1040 | ++pm->pm_stats.wired_count; |
1041 | } | | 1041 | } |
1042 | | | 1042 | |
1043 | /* | | 1043 | /* |
1044 | * | | 1044 | * |
1045 | * pmap_find_pv: Find a pv entry | | 1045 | * pmap_find_pv: Find a pv entry |
1046 | * | | 1046 | * |
1047 | * => caller should hold lock on vm_page | | 1047 | * => caller should hold lock on vm_page |
1048 | */ | | 1048 | */ |
1049 | static inline struct pv_entry * | | 1049 | static inline struct pv_entry * |
1050 | pmap_find_pv(struct vm_page_md *md, pmap_t pm, vaddr_t va) | | 1050 | pmap_find_pv(struct vm_page_md *md, pmap_t pm, vaddr_t va) |
1051 | { | | 1051 | { |
1052 | struct pv_entry *pv; | | 1052 | struct pv_entry *pv; |
1053 | | | 1053 | |
1054 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { | | 1054 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { |
1055 | if (pm == pv->pv_pmap && va == pv->pv_va) | | 1055 | if (pm == pv->pv_pmap && va == pv->pv_va) |
1056 | break; | | 1056 | break; |
1057 | } | | 1057 | } |
1058 | | | 1058 | |
1059 | return pv; | | 1059 | return pv; |
1060 | } | | 1060 | } |
1061 | | | 1061 | |
1062 | /* | | 1062 | /* |
1063 | * pmap_remove_pv: try to remove a mapping from a pv_list | | 1063 | * pmap_remove_pv: try to remove a mapping from a pv_list |
1064 | * | | 1064 | * |
1065 | * => caller should hold proper lock on pmap_main_lock | | 1065 | * => caller should hold proper lock on pmap_main_lock |
1066 | * => pmap should be locked | | 1066 | * => pmap should be locked |
1067 | * => caller should hold lock on vm_page [so that attrs can be adjusted] | | 1067 | * => caller should hold lock on vm_page [so that attrs can be adjusted] |
1068 | * => caller should adjust ptp's wire_count and free PTP if needed | | 1068 | * => caller should adjust ptp's wire_count and free PTP if needed |
1069 | * => caller should NOT adjust pmap's wire_count | | 1069 | * => caller should NOT adjust pmap's wire_count |
1070 | * => we return the removed pv | | 1070 | * => we return the removed pv |
1071 | */ | | 1071 | */ |
1072 | static struct pv_entry * | | 1072 | static struct pv_entry * |
1073 | pmap_remove_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) | | 1073 | pmap_remove_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) |
1074 | { | | 1074 | { |
1075 | UVMHIST_FUNC(__func__); | | 1075 | UVMHIST_FUNC(__func__); |
1076 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx pm %#jx va %#jx", | | 1076 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx pm %#jx va %#jx", |
1077 | (uintptr_t)md, (uintptr_t)pa, (uintptr_t)pm, va); | | 1077 | (uintptr_t)md, (uintptr_t)pa, (uintptr_t)pm, va); |
1078 | | | 1078 | |
1079 | struct pv_entry *pv, **prevptr; | | 1079 | struct pv_entry *pv, **prevptr; |
1080 | | | 1080 | |
1081 | prevptr = &SLIST_FIRST(&md->pvh_list); /* prev pv_entry ptr */ | | 1081 | prevptr = &SLIST_FIRST(&md->pvh_list); /* prev pv_entry ptr */ |
1082 | pv = *prevptr; | | 1082 | pv = *prevptr; |
1083 | | | 1083 | |
1084 | while (pv) { | | 1084 | while (pv) { |
1085 | if (pv->pv_pmap == pm && pv->pv_va == va) { /* match? */ | | 1085 | if (pv->pv_pmap == pm && pv->pv_va == va) { /* match? */ |
1086 | UVMHIST_LOG(maphist, "pm %#jx md %#jx flags %#jx", | | 1086 | UVMHIST_LOG(maphist, "pm %#jx md %#jx flags %#jx", |
1087 | (uintptr_t)pm, (uintptr_t)md, pv->pv_flags, 0); | | 1087 | (uintptr_t)pm, (uintptr_t)md, pv->pv_flags, 0); |
1088 | if (pv->pv_flags & PVF_WIRED) { | | 1088 | if (pv->pv_flags & PVF_WIRED) { |
1089 | --pm->pm_stats.wired_count; | | 1089 | --pm->pm_stats.wired_count; |
1090 | } | | 1090 | } |
1091 | *prevptr = SLIST_NEXT(pv, pv_link); /* remove it! */ | | 1091 | *prevptr = SLIST_NEXT(pv, pv_link); /* remove it! */ |
1092 | if (pm == pmap_kernel()) { | | 1092 | if (pm == pmap_kernel()) { |
1093 | PMAPCOUNT(kernel_unmappings); | | 1093 | PMAPCOUNT(kernel_unmappings); |
1094 | if (pv->pv_flags & PVF_WRITE) | | 1094 | if (pv->pv_flags & PVF_WRITE) |
1095 | md->krw_mappings--; | | 1095 | md->krw_mappings--; |
1096 | else | | 1096 | else |
1097 | md->kro_mappings--; | | 1097 | md->kro_mappings--; |
1098 | } else { | | 1098 | } else { |
1099 | if (pv->pv_flags & PVF_WRITE) | | 1099 | if (pv->pv_flags & PVF_WRITE) |
1100 | md->urw_mappings--; | | 1100 | md->urw_mappings--; |
1101 | else | | 1101 | else |
1102 | md->uro_mappings--; | | 1102 | md->uro_mappings--; |
1103 | } | | 1103 | } |
1104 | | | 1104 | |
1105 | PMAPCOUNT(unmappings); | | 1105 | PMAPCOUNT(unmappings); |
1106 | #ifdef PMAP_CACHE_VIPT | | 1106 | #ifdef PMAP_CACHE_VIPT |
1107 | /* | | 1107 | /* |
1108 | * If this page has had an exec mapping, then if | | 1108 | * If this page has had an exec mapping, then if |
1109 | * this was the last mapping, discard the contents, | | 1109 | * this was the last mapping, discard the contents, |
1110 | * otherwise sync the i-cache for this page. | | 1110 | * otherwise sync the i-cache for this page. |
1111 | */ | | 1111 | */ |
1112 | if (PV_IS_EXEC_P(md->pvh_attrs)) { | | 1112 | if (PV_IS_EXEC_P(md->pvh_attrs)) { |
1113 | if (SLIST_EMPTY(&md->pvh_list)) { | | 1113 | if (SLIST_EMPTY(&md->pvh_list)) { |
1114 | md->pvh_attrs &= ~PVF_EXEC; | | 1114 | md->pvh_attrs &= ~PVF_EXEC; |
1115 | PMAPCOUNT(exec_discarded_unmap); | | 1115 | PMAPCOUNT(exec_discarded_unmap); |
1116 | } else if (pv->pv_flags & PVF_WRITE) { | | 1116 | } else if (pv->pv_flags & PVF_WRITE) { |
1117 | pmap_syncicache_page(md, pa); | | 1117 | pmap_syncicache_page(md, pa); |
1118 | PMAPCOUNT(exec_synced_unmap); | | 1118 | PMAPCOUNT(exec_synced_unmap); |
1119 | } | | 1119 | } |
1120 | } | | 1120 | } |
1121 | #endif /* PMAP_CACHE_VIPT */ | | 1121 | #endif /* PMAP_CACHE_VIPT */ |
1122 | break; | | 1122 | break; |
1123 | } | | 1123 | } |
1124 | prevptr = &SLIST_NEXT(pv, pv_link); /* previous pointer */ | | 1124 | prevptr = &SLIST_NEXT(pv, pv_link); /* previous pointer */ |
1125 | pv = *prevptr; /* advance */ | | 1125 | pv = *prevptr; /* advance */ |
1126 | } | | 1126 | } |
1127 | | | 1127 | |
1128 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 1128 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
1129 | /* | | 1129 | /* |
1130 | * If we no longer have a WRITEABLE KENTRY at the head of list, | | 1130 | * If we no longer have a WRITEABLE KENTRY at the head of list, |
1131 | * clear the KMOD attribute from the page. | | 1131 | * clear the KMOD attribute from the page. |
1132 | */ | | 1132 | */ |
1133 | if (SLIST_FIRST(&md->pvh_list) == NULL | | 1133 | if (SLIST_FIRST(&md->pvh_list) == NULL |
1134 | || (SLIST_FIRST(&md->pvh_list)->pv_flags & PVF_KWRITE) != PVF_KWRITE) | | 1134 | || (SLIST_FIRST(&md->pvh_list)->pv_flags & PVF_KWRITE) != PVF_KWRITE) |
1135 | md->pvh_attrs &= ~PVF_KMOD; | | 1135 | md->pvh_attrs &= ~PVF_KMOD; |
1136 | | | 1136 | |
1137 | /* | | 1137 | /* |
1138 | * If this was a writeable page and there are no more writeable | | 1138 | * If this was a writeable page and there are no more writeable |
1139 | * mappings (ignoring KMPAGE), clear the WRITE flag and writeback | | 1139 | * mappings (ignoring KMPAGE), clear the WRITE flag and writeback |
1140 | * the contents to memory. | | 1140 | * the contents to memory. |
1141 | */ | | 1141 | */ |
1142 | if (arm_cache_prefer_mask != 0) { | | 1142 | if (arm_cache_prefer_mask != 0) { |
1143 | if (md->krw_mappings + md->urw_mappings == 0) | | 1143 | if (md->krw_mappings + md->urw_mappings == 0) |
1144 | md->pvh_attrs &= ~PVF_WRITE; | | 1144 | md->pvh_attrs &= ~PVF_WRITE; |
1145 | PMAP_VALIDATE_MD_PAGE(md); | | 1145 | PMAP_VALIDATE_MD_PAGE(md); |
1146 | } | | 1146 | } |
1147 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 1147 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
1148 | #endif /* PMAP_CACHE_VIPT && !ARM_MMU_EXTENDED */ | | 1148 | #endif /* PMAP_CACHE_VIPT && !ARM_MMU_EXTENDED */ |
1149 | | | 1149 | |
1150 | /* return removed pv */ | | 1150 | /* return removed pv */ |
1151 | return pv; | | 1151 | return pv; |
1152 | } | | 1152 | } |
1153 | | | 1153 | |
1154 | /* | | 1154 | /* |
1155 | * | | 1155 | * |
1156 | * pmap_modify_pv: Update pv flags | | 1156 | * pmap_modify_pv: Update pv flags |
1157 | * | | 1157 | * |
1158 | * => caller should hold lock on vm_page [so that attrs can be adjusted] | | 1158 | * => caller should hold lock on vm_page [so that attrs can be adjusted] |
1159 | * => caller should NOT adjust pmap's wire_count | | 1159 | * => caller should NOT adjust pmap's wire_count |
1160 | * => caller must call pmap_vac_me_harder() if writable status of a page | | 1160 | * => caller must call pmap_vac_me_harder() if writable status of a page |
1161 | * may have changed. | | 1161 | * may have changed. |
1162 | * => we return the old flags | | 1162 | * => we return the old flags |
1163 | * | | 1163 | * |
1164 | * Modify a physical-virtual mapping in the pv table | | 1164 | * Modify a physical-virtual mapping in the pv table |
1165 | */ | | 1165 | */ |
1166 | static u_int | | 1166 | static u_int |
1167 | pmap_modify_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va, | | 1167 | pmap_modify_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va, |
1168 | u_int clr_mask, u_int set_mask) | | 1168 | u_int clr_mask, u_int set_mask) |
1169 | { | | 1169 | { |
1170 | struct pv_entry *npv; | | 1170 | struct pv_entry *npv; |
1171 | u_int flags, oflags; | | 1171 | u_int flags, oflags; |
1172 | UVMHIST_FUNC(__func__); | | 1172 | UVMHIST_FUNC(__func__); |
1173 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx pm %#jx va %#jx", | | 1173 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx pm %#jx va %#jx", |
1174 | (uintptr_t)md, (uintptr_t)pa, (uintptr_t)pm, va); | | 1174 | (uintptr_t)md, (uintptr_t)pa, (uintptr_t)pm, va); |
1175 | UVMHIST_LOG(maphist, "... clr %#jx set %#jx", clr_mask, set_mask, 0, 0); | | 1175 | UVMHIST_LOG(maphist, "... clr %#jx set %#jx", clr_mask, set_mask, 0, 0); |
1176 | | | 1176 | |
1177 | KASSERT(!PV_IS_KENTRY_P(clr_mask)); | | 1177 | KASSERT(!PV_IS_KENTRY_P(clr_mask)); |
1178 | KASSERT(!PV_IS_KENTRY_P(set_mask)); | | 1178 | KASSERT(!PV_IS_KENTRY_P(set_mask)); |
1179 | | | 1179 | |
1180 | if ((npv = pmap_find_pv(md, pm, va)) == NULL) { | | 1180 | if ((npv = pmap_find_pv(md, pm, va)) == NULL) { |
1181 | UVMHIST_LOG(maphist, "<--- done (not found)", 0, 0, 0, 0); | | 1181 | UVMHIST_LOG(maphist, "<--- done (not found)", 0, 0, 0, 0); |
1182 | return 0; | | 1182 | return 0; |
1183 | } | | 1183 | } |
1184 | | | 1184 | |
1185 | /* | | 1185 | /* |
1186 | * There is at least one VA mapping this page. | | 1186 | * There is at least one VA mapping this page. |
1187 | */ | | 1187 | */ |
1188 | | | 1188 | |
1189 | if (clr_mask & (PVF_REF | PVF_MOD)) { | | 1189 | if (clr_mask & (PVF_REF | PVF_MOD)) { |
1190 | md->pvh_attrs |= set_mask & (PVF_REF | PVF_MOD); | | 1190 | md->pvh_attrs |= set_mask & (PVF_REF | PVF_MOD); |
1191 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 1191 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
1192 | if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC) | | 1192 | if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC) |
1193 | md->pvh_attrs |= PVF_DIRTY; | | 1193 | md->pvh_attrs |= PVF_DIRTY; |
1194 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 1194 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
| @@ -1378,2006 +1378,1998 @@ static inline void | | | @@ -1378,2006 +1378,1998 @@ static inline void |
1378 | pmap_use_l1(pmap_t pm) | | 1378 | pmap_use_l1(pmap_t pm) |
1379 | { | | 1379 | { |
1380 | struct l1_ttable *l1; | | 1380 | struct l1_ttable *l1; |
1381 | | | 1381 | |
1382 | /* | | 1382 | /* |
1383 | * Do nothing if we're in interrupt context. | | 1383 | * Do nothing if we're in interrupt context. |
1384 | * Access to an L1 by the kernel pmap must not affect | | 1384 | * Access to an L1 by the kernel pmap must not affect |
1385 | * the LRU list. | | 1385 | * the LRU list. |
1386 | */ | | 1386 | */ |
1387 | if (cpu_intr_p() || pm == pmap_kernel()) | | 1387 | if (cpu_intr_p() || pm == pmap_kernel()) |
1388 | return; | | 1388 | return; |
1389 | | | 1389 | |
1390 | l1 = pm->pm_l1; | | 1390 | l1 = pm->pm_l1; |
1391 | | | 1391 | |
1392 | /* | | 1392 | /* |
1393 | * If the L1 is not currently on the LRU list, just return | | 1393 | * If the L1 is not currently on the LRU list, just return |
1394 | */ | | 1394 | */ |
1395 | if (l1->l1_domain_use_count == PMAP_DOMAINS) | | 1395 | if (l1->l1_domain_use_count == PMAP_DOMAINS) |
1396 | return; | | 1396 | return; |
1397 | | | 1397 | |
1398 | mutex_spin_enter(&l1_lru_lock); | | 1398 | mutex_spin_enter(&l1_lru_lock); |
1399 | | | 1399 | |
1400 | /* | | 1400 | /* |
1401 | * Check the use count again, now that we've acquired the lock | | 1401 | * Check the use count again, now that we've acquired the lock |
1402 | */ | | 1402 | */ |
1403 | if (l1->l1_domain_use_count == PMAP_DOMAINS) { | | 1403 | if (l1->l1_domain_use_count == PMAP_DOMAINS) { |
1404 | mutex_spin_exit(&l1_lru_lock); | | 1404 | mutex_spin_exit(&l1_lru_lock); |
1405 | return; | | 1405 | return; |
1406 | } | | 1406 | } |
1407 | | | 1407 | |
1408 | /* | | 1408 | /* |
1409 | * Move the L1 to the back of the LRU list | | 1409 | * Move the L1 to the back of the LRU list |
1410 | */ | | 1410 | */ |
1411 | TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); | | 1411 | TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); |
1412 | TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); | | 1412 | TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); |
1413 | | | 1413 | |
1414 | mutex_spin_exit(&l1_lru_lock); | | 1414 | mutex_spin_exit(&l1_lru_lock); |
1415 | } | | 1415 | } |
1416 | #endif /* !ARM_MMU_EXTENDED */ | | 1416 | #endif /* !ARM_MMU_EXTENDED */ |
1417 | | | 1417 | |
1418 | /* | | 1418 | /* |
1419 | * void pmap_free_l2_ptp(pt_entry_t *, paddr_t *) | | 1419 | * void pmap_free_l2_ptp(pt_entry_t *, paddr_t *) |
1420 | * | | 1420 | * |
1421 | * Free an L2 descriptor table. | | 1421 | * Free an L2 descriptor table. |
1422 | */ | | 1422 | */ |
1423 | static inline void | | 1423 | static inline void |
1424 | #if defined(PMAP_INCLUDE_PTE_SYNC) && defined(PMAP_CACHE_VIVT) | | 1424 | #if defined(PMAP_INCLUDE_PTE_SYNC) && defined(PMAP_CACHE_VIVT) |
1425 | pmap_free_l2_ptp(bool need_sync, pt_entry_t *l2, paddr_t pa) | | 1425 | pmap_free_l2_ptp(bool need_sync, pt_entry_t *l2, paddr_t pa) |
1426 | #else | | 1426 | #else |
1427 | pmap_free_l2_ptp(pt_entry_t *l2, paddr_t pa) | | 1427 | pmap_free_l2_ptp(pt_entry_t *l2, paddr_t pa) |
1428 | #endif | | 1428 | #endif |
1429 | { | | 1429 | { |
1430 | #if defined(PMAP_INCLUDE_PTE_SYNC) && defined(PMAP_CACHE_VIVT) | | 1430 | #if defined(PMAP_INCLUDE_PTE_SYNC) && defined(PMAP_CACHE_VIVT) |
1431 | /* | | 1431 | /* |
1432 | * Note: With a write-back cache, we may need to sync this | | 1432 | * Note: With a write-back cache, we may need to sync this |
1433 | * L2 table before re-using it. | | 1433 | * L2 table before re-using it. |
1434 | * This is because it may have belonged to a non-current | | 1434 | * This is because it may have belonged to a non-current |
1435 | * pmap, in which case the cache syncs would have been | | 1435 | * pmap, in which case the cache syncs would have been |
1436 | * skipped for the pages that were being unmapped. If the | | 1436 | * skipped for the pages that were being unmapped. If the |
1437 | * L2 table were then to be immediately re-allocated to | | 1437 | * L2 table were then to be immediately re-allocated to |
1438 | * the *current* pmap, it may well contain stale mappings | | 1438 | * the *current* pmap, it may well contain stale mappings |
1439 | * which have not yet been cleared by a cache write-back | | 1439 | * which have not yet been cleared by a cache write-back |
1440 | * and so would still be visible to the mmu. | | 1440 | * and so would still be visible to the mmu. |
1441 | */ | | 1441 | */ |
1442 | if (need_sync) | | 1442 | if (need_sync) |
1443 | PTE_SYNC_RANGE(l2, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); | | 1443 | PTE_SYNC_RANGE(l2, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); |
1444 | #endif /* PMAP_INCLUDE_PTE_SYNC && PMAP_CACHE_VIVT */ | | 1444 | #endif /* PMAP_INCLUDE_PTE_SYNC && PMAP_CACHE_VIVT */ |
1445 | pool_cache_put_paddr(&pmap_l2ptp_cache, (void *)l2, pa); | | 1445 | pool_cache_put_paddr(&pmap_l2ptp_cache, (void *)l2, pa); |
1446 | } | | 1446 | } |
1447 | | | 1447 | |
1448 | /* | | 1448 | /* |
1449 | * Returns a pointer to the L2 bucket associated with the specified pmap | | 1449 | * Returns a pointer to the L2 bucket associated with the specified pmap |
1450 | * and VA, or NULL if no L2 bucket exists for the address. | | 1450 | * and VA, or NULL if no L2 bucket exists for the address. |
1451 | */ | | 1451 | */ |
1452 | static inline struct l2_bucket * | | 1452 | static inline struct l2_bucket * |
1453 | pmap_get_l2_bucket(pmap_t pm, vaddr_t va) | | 1453 | pmap_get_l2_bucket(pmap_t pm, vaddr_t va) |
1454 | { | | 1454 | { |
1455 | const size_t l1slot = l1pte_index(va); | | 1455 | const size_t l1slot = l1pte_index(va); |
1456 | struct l2_dtable *l2; | | 1456 | struct l2_dtable *l2; |
1457 | struct l2_bucket *l2b; | | 1457 | struct l2_bucket *l2b; |
1458 | | | 1458 | |
1459 | if ((l2 = pm->pm_l2[L2_IDX(l1slot)]) == NULL || | | 1459 | if ((l2 = pm->pm_l2[L2_IDX(l1slot)]) == NULL || |
1460 | (l2b = &l2->l2_bucket[L2_BUCKET(l1slot)])->l2b_kva == NULL) | | 1460 | (l2b = &l2->l2_bucket[L2_BUCKET(l1slot)])->l2b_kva == NULL) |
1461 | return NULL; | | 1461 | return NULL; |
1462 | | | 1462 | |
1463 | return l2b; | | 1463 | return l2b; |
1464 | } | | 1464 | } |
1465 | | | 1465 | |
1466 | /* | | 1466 | /* |
1467 | * Returns a pointer to the L2 bucket associated with the specified pmap | | 1467 | * Returns a pointer to the L2 bucket associated with the specified pmap |
1468 | * and VA. | | 1468 | * and VA. |
1469 | * | | 1469 | * |
1470 | * If no L2 bucket exists, perform the necessary allocations to put an L2 | | 1470 | * If no L2 bucket exists, perform the necessary allocations to put an L2 |
1471 | * bucket/page table in place. | | 1471 | * bucket/page table in place. |
1472 | * | | 1472 | * |
1473 | * Note that if a new L2 bucket/page was allocated, the caller *must* | | 1473 | * Note that if a new L2 bucket/page was allocated, the caller *must* |
1474 | * increment the bucket occupancy counter appropriately *before* | | 1474 | * increment the bucket occupancy counter appropriately *before* |
1475 | * releasing the pmap's lock to ensure no other thread or cpu deallocates | | 1475 | * releasing the pmap's lock to ensure no other thread or cpu deallocates |
1476 | * the bucket/page in the meantime. | | 1476 | * the bucket/page in the meantime. |
1477 | */ | | 1477 | */ |
1478 | static struct l2_bucket * | | 1478 | static struct l2_bucket * |
1479 | pmap_alloc_l2_bucket(pmap_t pm, vaddr_t va) | | 1479 | pmap_alloc_l2_bucket(pmap_t pm, vaddr_t va) |
1480 | { | | 1480 | { |
1481 | const size_t l1slot = l1pte_index(va); | | 1481 | const size_t l1slot = l1pte_index(va); |
1482 | struct l2_dtable *l2; | | 1482 | struct l2_dtable *l2; |
1483 | | | 1483 | |
1484 | if ((l2 = pm->pm_l2[L2_IDX(l1slot)]) == NULL) { | | 1484 | if ((l2 = pm->pm_l2[L2_IDX(l1slot)]) == NULL) { |
1485 | /* | | 1485 | /* |
1486 | * No mapping at this address, as there is | | 1486 | * No mapping at this address, as there is |
1487 | * no entry in the L1 table. | | 1487 | * no entry in the L1 table. |
1488 | * Need to allocate a new l2_dtable. | | 1488 | * Need to allocate a new l2_dtable. |
1489 | */ | | 1489 | */ |
1490 | if ((l2 = pmap_alloc_l2_dtable()) == NULL) | | 1490 | if ((l2 = pmap_alloc_l2_dtable()) == NULL) |
1491 | return NULL; | | 1491 | return NULL; |
1492 | | | 1492 | |
1493 | /* | | 1493 | /* |
1494 | * Link it into the parent pmap | | 1494 | * Link it into the parent pmap |
1495 | */ | | 1495 | */ |
1496 | pm->pm_l2[L2_IDX(l1slot)] = l2; | | 1496 | pm->pm_l2[L2_IDX(l1slot)] = l2; |
1497 | } | | 1497 | } |
1498 | | | 1498 | |
1499 | struct l2_bucket * const l2b = &l2->l2_bucket[L2_BUCKET(l1slot)]; | | 1499 | struct l2_bucket * const l2b = &l2->l2_bucket[L2_BUCKET(l1slot)]; |
1500 | | | 1500 | |
1501 | /* | | 1501 | /* |
1502 | * Fetch pointer to the L2 page table associated with the address. | | 1502 | * Fetch pointer to the L2 page table associated with the address. |
1503 | */ | | 1503 | */ |
1504 | if (l2b->l2b_kva == NULL) { | | 1504 | if (l2b->l2b_kva == NULL) { |
1505 | pt_entry_t *ptep; | | 1505 | pt_entry_t *ptep; |
1506 | | | 1506 | |
1507 | /* | | 1507 | /* |
1508 | * No L2 page table has been allocated. Chances are, this | | 1508 | * No L2 page table has been allocated. Chances are, this |
1509 | * is because we just allocated the l2_dtable, above. | | 1509 | * is because we just allocated the l2_dtable, above. |
1510 | */ | | 1510 | */ |
1511 | if ((ptep = pmap_alloc_l2_ptp(&l2b->l2b_pa)) == NULL) { | | 1511 | if ((ptep = pmap_alloc_l2_ptp(&l2b->l2b_pa)) == NULL) { |
1512 | /* | | 1512 | /* |
1513 | * Oops, no more L2 page tables available at this | | 1513 | * Oops, no more L2 page tables available at this |
1514 | * time. We may need to deallocate the l2_dtable | | 1514 | * time. We may need to deallocate the l2_dtable |
1515 | * if we allocated a new one above. | | 1515 | * if we allocated a new one above. |
1516 | */ | | 1516 | */ |
1517 | if (l2->l2_occupancy == 0) { | | 1517 | if (l2->l2_occupancy == 0) { |
1518 | pm->pm_l2[L2_IDX(l1slot)] = NULL; | | 1518 | pm->pm_l2[L2_IDX(l1slot)] = NULL; |
1519 | pmap_free_l2_dtable(l2); | | 1519 | pmap_free_l2_dtable(l2); |
1520 | } | | 1520 | } |
1521 | return NULL; | | 1521 | return NULL; |
1522 | } | | 1522 | } |
1523 | | | 1523 | |
1524 | l2->l2_occupancy++; | | 1524 | l2->l2_occupancy++; |
1525 | l2b->l2b_kva = ptep; | | 1525 | l2b->l2b_kva = ptep; |
1526 | l2b->l2b_l1slot = l1slot; | | 1526 | l2b->l2b_l1slot = l1slot; |
1527 | | | 1527 | |
1528 | #ifdef ARM_MMU_EXTENDED | | 1528 | #ifdef ARM_MMU_EXTENDED |
1529 | /* | | 1529 | /* |
1530 | * We know there will be a mapping here, so simply | | 1530 | * We know there will be a mapping here, so simply |
1531 | * enter this PTP into the L1 now. | | 1531 | * enter this PTP into the L1 now. |
1532 | */ | | 1532 | */ |
1533 | pd_entry_t * const pdep = pmap_l1_kva(pm) + l1slot; | | 1533 | pd_entry_t * const pdep = pmap_l1_kva(pm) + l1slot; |
1534 | pd_entry_t npde = L1_C_PROTO | l2b->l2b_pa | | 1534 | pd_entry_t npde = L1_C_PROTO | l2b->l2b_pa |
1535 | | L1_C_DOM(pmap_domain(pm)); | | 1535 | | L1_C_DOM(pmap_domain(pm)); |
1536 | KASSERT(*pdep == 0); | | 1536 | KASSERT(*pdep == 0); |
1537 | l1pte_setone(pdep, npde); | | 1537 | l1pte_setone(pdep, npde); |
1538 | PDE_SYNC(pdep); | | 1538 | PDE_SYNC(pdep); |
1539 | #endif | | 1539 | #endif |
1540 | } | | 1540 | } |
1541 | | | 1541 | |
1542 | return l2b; | | 1542 | return l2b; |
1543 | } | | 1543 | } |
1544 | | | 1544 | |
1545 | /* | | 1545 | /* |
1546 | * One or more mappings in the specified L2 descriptor table have just been | | 1546 | * One or more mappings in the specified L2 descriptor table have just been |
1547 | * invalidated. | | 1547 | * invalidated. |
1548 | * | | 1548 | * |
1549 | * Garbage collect the metadata and descriptor table itself if necessary. | | 1549 | * Garbage collect the metadata and descriptor table itself if necessary. |
1550 | * | | 1550 | * |
1551 | * The pmap lock must be acquired when this is called (not necessary | | 1551 | * The pmap lock must be acquired when this is called (not necessary |
1552 | * for the kernel pmap). | | 1552 | * for the kernel pmap). |
1553 | */ | | 1553 | */ |
1554 | static void | | 1554 | static void |
1555 | pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count) | | 1555 | pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count) |
1556 | { | | 1556 | { |
1557 | KDASSERT(count <= l2b->l2b_occupancy); | | 1557 | KDASSERT(count <= l2b->l2b_occupancy); |
1558 | | | 1558 | |
1559 | /* | | 1559 | /* |
1560 | * Update the bucket's reference count according to how many | | 1560 | * Update the bucket's reference count according to how many |
1561 | * PTEs the caller has just invalidated. | | 1561 | * PTEs the caller has just invalidated. |
1562 | */ | | 1562 | */ |
1563 | l2b->l2b_occupancy -= count; | | 1563 | l2b->l2b_occupancy -= count; |
1564 | | | 1564 | |
1565 | /* | | 1565 | /* |
1566 | * Note: | | 1566 | * Note: |
1567 | * | | 1567 | * |
1568 | * Level 2 page tables allocated to the kernel pmap are never freed | | 1568 | * Level 2 page tables allocated to the kernel pmap are never freed |
1569 | * as that would require checking all Level 1 page tables and | | 1569 | * as that would require checking all Level 1 page tables and |
1570 | * removing any references to the Level 2 page table. See also the | | 1570 | * removing any references to the Level 2 page table. See also the |
1571 | * comment elsewhere about never freeing bootstrap L2 descriptors. | | 1571 | * comment elsewhere about never freeing bootstrap L2 descriptors. |
1572 | * | | 1572 | * |
1573 | * We make do with just invalidating the mapping in the L2 table. | | 1573 | * We make do with just invalidating the mapping in the L2 table. |
1574 | * | | 1574 | * |
1575 | * This isn't really a big deal in practice and, in fact, leads | | 1575 | * This isn't really a big deal in practice and, in fact, leads |
1576 | * to a performance win over time as we don't need to continually | | 1576 | * to a performance win over time as we don't need to continually |
1577 | * alloc/free. | | 1577 | * alloc/free. |
1578 | */ | | 1578 | */ |
1579 | if (l2b->l2b_occupancy > 0 || pm == pmap_kernel()) | | 1579 | if (l2b->l2b_occupancy > 0 || pm == pmap_kernel()) |
1580 | return; | | 1580 | return; |
1581 | | | 1581 | |
1582 | /* | | 1582 | /* |
1583 | * There are no more valid mappings in this level 2 page table. | | 1583 | * There are no more valid mappings in this level 2 page table. |
1584 | * Go ahead and NULL-out the pointer in the bucket, then | | 1584 | * Go ahead and NULL-out the pointer in the bucket, then |
1585 | * free the page table. | | 1585 | * free the page table. |
1586 | */ | | 1586 | */ |
1587 | const size_t l1slot = l2b->l2b_l1slot; | | 1587 | const size_t l1slot = l2b->l2b_l1slot; |
1588 | pt_entry_t * const ptep = l2b->l2b_kva; | | 1588 | pt_entry_t * const ptep = l2b->l2b_kva; |
1589 | l2b->l2b_kva = NULL; | | 1589 | l2b->l2b_kva = NULL; |
1590 | | | 1590 | |
1591 | pd_entry_t * const pdep = pmap_l1_kva(pm) + l1slot; | | 1591 | pd_entry_t * const pdep = pmap_l1_kva(pm) + l1slot; |
1592 | pd_entry_t pde __diagused = *pdep; | | 1592 | pd_entry_t pde __diagused = *pdep; |
1593 | | | 1593 | |
1594 | #ifdef ARM_MMU_EXTENDED | | 1594 | #ifdef ARM_MMU_EXTENDED |
1595 | /* | | 1595 | /* |
1596 | * Invalidate the L1 slot. | | 1596 | * Invalidate the L1 slot. |
1597 | */ | | 1597 | */ |
1598 | KASSERT((pde & L1_TYPE_MASK) == L1_TYPE_C); | | 1598 | KASSERT((pde & L1_TYPE_MASK) == L1_TYPE_C); |
1599 | #else | | 1599 | #else |
1600 | /* | | 1600 | /* |
1601 | * If the L1 slot matches the pmap's domain number, then invalidate it. | | 1601 | * If the L1 slot matches the pmap's domain number, then invalidate it. |
1602 | */ | | 1602 | */ |
1603 | if ((pde & (L1_C_DOM_MASK|L1_TYPE_MASK)) | | 1603 | if ((pde & (L1_C_DOM_MASK|L1_TYPE_MASK)) |
1604 | == (L1_C_DOM(pmap_domain(pm))|L1_TYPE_C)) { | | 1604 | == (L1_C_DOM(pmap_domain(pm))|L1_TYPE_C)) { |
1605 | #endif | | 1605 | #endif |
1606 | l1pte_setone(pdep, 0); | | 1606 | l1pte_setone(pdep, 0); |
1607 | PDE_SYNC(pdep); | | 1607 | PDE_SYNC(pdep); |
1608 | #ifndef ARM_MMU_EXTENDED | | 1608 | #ifndef ARM_MMU_EXTENDED |
1609 | } | | 1609 | } |
1610 | #endif | | 1610 | #endif |
1611 | | | 1611 | |
1612 | /* | | 1612 | /* |
1613 | * Release the L2 descriptor table back to the pool cache. | | 1613 | * Release the L2 descriptor table back to the pool cache. |
1614 | */ | | 1614 | */ |
1615 | #if defined(PMAP_INCLUDE_PTE_SYNC) && defined(PMAP_CACHE_VIVT) | | 1615 | #if defined(PMAP_INCLUDE_PTE_SYNC) && defined(PMAP_CACHE_VIVT) |
1616 | pmap_free_l2_ptp(!pmap_is_cached(pm), ptep, l2b->l2b_pa); | | 1616 | pmap_free_l2_ptp(!pmap_is_cached(pm), ptep, l2b->l2b_pa); |
1617 | #else | | 1617 | #else |
1618 | pmap_free_l2_ptp(ptep, l2b->l2b_pa); | | 1618 | pmap_free_l2_ptp(ptep, l2b->l2b_pa); |
1619 | #endif | | 1619 | #endif |
1620 | | | 1620 | |
1621 | /* | | 1621 | /* |
1622 | * Update the reference count in the associated l2_dtable | | 1622 | * Update the reference count in the associated l2_dtable |
1623 | */ | | 1623 | */ |
1624 | struct l2_dtable * const l2 = pm->pm_l2[L2_IDX(l1slot)]; | | 1624 | struct l2_dtable * const l2 = pm->pm_l2[L2_IDX(l1slot)]; |
1625 | if (--l2->l2_occupancy > 0) | | 1625 | if (--l2->l2_occupancy > 0) |
1626 | return; | | 1626 | return; |
1627 | | | 1627 | |
1628 | /* | | 1628 | /* |
1629 | * There are no more valid mappings in any of the Level 1 | | 1629 | * There are no more valid mappings in any of the Level 1 |
1630 | * slots managed by this l2_dtable. Go ahead and NULL-out | | 1630 | * slots managed by this l2_dtable. Go ahead and NULL-out |
1631 | * the pointer in the parent pmap and free the l2_dtable. | | 1631 | * the pointer in the parent pmap and free the l2_dtable. |
1632 | */ | | 1632 | */ |
1633 | pm->pm_l2[L2_IDX(l1slot)] = NULL; | | 1633 | pm->pm_l2[L2_IDX(l1slot)] = NULL; |
1634 | pmap_free_l2_dtable(l2); | | 1634 | pmap_free_l2_dtable(l2); |
1635 | } | | 1635 | } |
1636 | | | 1636 | |
1637 | #if defined(ARM_MMU_EXTENDED) | | 1637 | #if defined(ARM_MMU_EXTENDED) |
1638 | /* | | 1638 | /* |
1639 | * Pool cache constructors for L1 translation tables | | 1639 | * Pool cache constructors for L1 translation tables |
1640 | */ | | 1640 | */ |
1641 | | | 1641 | |
1642 | static int | | 1642 | static int |
1643 | pmap_l1tt_ctor(void *arg, void *v, int flags) | | 1643 | pmap_l1tt_ctor(void *arg, void *v, int flags) |
1644 | { | | 1644 | { |
1645 | #ifndef PMAP_INCLUDE_PTE_SYNC | | 1645 | #ifndef PMAP_INCLUDE_PTE_SYNC |
1646 | #error not supported | | 1646 | #error not supported |
1647 | #endif | | 1647 | #endif |
1648 | | | 1648 | |
1649 | memset(v, 0, L1TT_SIZE); | | 1649 | memset(v, 0, L1TT_SIZE); |
1650 | PTE_SYNC_RANGE(v, L1TT_SIZE / sizeof(pt_entry_t)); | | 1650 | PTE_SYNC_RANGE(v, L1TT_SIZE / sizeof(pt_entry_t)); |
1651 | return 0; | | 1651 | return 0; |
1652 | } | | 1652 | } |
1653 | #endif | | 1653 | #endif |
1654 | | | 1654 | |
1655 | /* | | 1655 | /* |
1656 | * Pool cache constructors for L2 descriptor tables, metadata and pmap | | 1656 | * Pool cache constructors for L2 descriptor tables, metadata and pmap |
1657 | * structures. | | 1657 | * structures. |
1658 | */ | | 1658 | */ |
1659 | static int | | 1659 | static int |
1660 | pmap_l2ptp_ctor(void *arg, void *v, int flags) | | 1660 | pmap_l2ptp_ctor(void *arg, void *v, int flags) |
1661 | { | | 1661 | { |
1662 | #ifndef PMAP_INCLUDE_PTE_SYNC | | 1662 | #ifndef PMAP_INCLUDE_PTE_SYNC |
1663 | vaddr_t va = (vaddr_t)v & ~PGOFSET; | | 1663 | vaddr_t va = (vaddr_t)v & ~PGOFSET; |
1664 | | | 1664 | |
1665 | /* | | 1665 | /* |
1666 | * The mappings for these page tables were initially made using | | 1666 | * The mappings for these page tables were initially made using |
1667 | * pmap_kenter_pa() by the pool subsystem. Therefore, the cache- | | 1667 | * pmap_kenter_pa() by the pool subsystem. Therefore, the cache- |
1668 | * mode will not be right for page table mappings. To avoid | | 1668 | * mode will not be right for page table mappings. To avoid |
1669 | * polluting the pmap_kenter_pa() code with a special case for | | 1669 | * polluting the pmap_kenter_pa() code with a special case for |
1670 | * page tables, we simply fix up the cache-mode here if it's not | | 1670 | * page tables, we simply fix up the cache-mode here if it's not |
1671 | * correct. | | 1671 | * correct. |
1672 | */ | | 1672 | */ |
1673 | if (pte_l2_s_cache_mode != pte_l2_s_cache_mode_pt) { | | 1673 | if (pte_l2_s_cache_mode != pte_l2_s_cache_mode_pt) { |
1674 | const struct l2_bucket * const l2b = | | 1674 | const struct l2_bucket * const l2b = |
1675 | pmap_get_l2_bucket(pmap_kernel(), va); | | 1675 | pmap_get_l2_bucket(pmap_kernel(), va); |
1676 | KASSERTMSG(l2b != NULL, "%#lx", va); | | 1676 | KASSERTMSG(l2b != NULL, "%#lx", va); |
1677 | pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)]; | | 1677 | pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)]; |
1678 | const pt_entry_t opte = *ptep; | | 1678 | const pt_entry_t opte = *ptep; |
1679 | | | 1679 | |
1680 | if ((opte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { | | 1680 | if ((opte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { |
1681 | /* | | 1681 | /* |
1682 | * Page tables must have the cache-mode set correctly. | | 1682 | * Page tables must have the cache-mode set correctly. |
1683 | */ | | 1683 | */ |
1684 | const pt_entry_t npte = (opte & ~L2_S_CACHE_MASK) | | 1684 | const pt_entry_t npte = (opte & ~L2_S_CACHE_MASK) |
1685 | | pte_l2_s_cache_mode_pt; | | 1685 | | pte_l2_s_cache_mode_pt; |
1686 | l2pte_set(ptep, npte, opte); | | 1686 | l2pte_set(ptep, npte, opte); |
1687 | PTE_SYNC(ptep); | | 1687 | PTE_SYNC(ptep); |
1688 | cpu_tlb_flushD_SE(va); | | 1688 | cpu_tlb_flushD_SE(va); |
1689 | cpu_cpwait(); | | 1689 | cpu_cpwait(); |
1690 | } | | 1690 | } |
1691 | } | | 1691 | } |
1692 | #endif | | 1692 | #endif |
1693 | | | 1693 | |
1694 | memset(v, 0, L2_TABLE_SIZE_REAL); | | 1694 | memset(v, 0, L2_TABLE_SIZE_REAL); |
1695 | PTE_SYNC_RANGE(v, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); | | 1695 | PTE_SYNC_RANGE(v, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); |
1696 | return 0; | | 1696 | return 0; |
1697 | } | | 1697 | } |
1698 | | | 1698 | |
1699 | static int | | 1699 | static int |
1700 | pmap_l2dtable_ctor(void *arg, void *v, int flags) | | 1700 | pmap_l2dtable_ctor(void *arg, void *v, int flags) |
1701 | { | | 1701 | { |
1702 | | | 1702 | |
1703 | memset(v, 0, sizeof(struct l2_dtable)); | | 1703 | memset(v, 0, sizeof(struct l2_dtable)); |
1704 | return 0; | | 1704 | return 0; |
1705 | } | | 1705 | } |
1706 | | | 1706 | |
1707 | static int | | 1707 | static int |
1708 | pmap_pmap_ctor(void *arg, void *v, int flags) | | 1708 | pmap_pmap_ctor(void *arg, void *v, int flags) |
1709 | { | | 1709 | { |
1710 | | | 1710 | |
1711 | memset(v, 0, sizeof(struct pmap)); | | 1711 | memset(v, 0, sizeof(struct pmap)); |
1712 | return 0; | | 1712 | return 0; |
1713 | } | | 1713 | } |
1714 | | | 1714 | |
1715 | static void | | 1715 | static void |
1716 | pmap_pinit(pmap_t pm) | | 1716 | pmap_pinit(pmap_t pm) |
1717 | { | | 1717 | { |
1718 | #ifndef ARM_HAS_VBAR | | 1718 | #ifndef ARM_HAS_VBAR |
1719 | struct l2_bucket *l2b; | | 1719 | struct l2_bucket *l2b; |
1720 | | | 1720 | |
1721 | if (vector_page < KERNEL_BASE) { | | 1721 | if (vector_page < KERNEL_BASE) { |
1722 | /* | | 1722 | /* |
1723 | * Map the vector page. | | 1723 | * Map the vector page. |
1724 | */ | | 1724 | */ |
1725 | pmap_enter(pm, vector_page, systempage.pv_pa, | | 1725 | pmap_enter(pm, vector_page, systempage.pv_pa, |
1726 | VM_PROT_READ | VM_PROT_EXECUTE, | | 1726 | VM_PROT_READ | VM_PROT_EXECUTE, |
1727 | VM_PROT_READ | VM_PROT_EXECUTE | PMAP_WIRED); | | 1727 | VM_PROT_READ | VM_PROT_EXECUTE | PMAP_WIRED); |
1728 | pmap_update(pm); | | 1728 | pmap_update(pm); |
1729 | | | 1729 | |
1730 | pm->pm_pl1vec = pmap_l1_kva(pm) + l1pte_index(vector_page); | | 1730 | pm->pm_pl1vec = pmap_l1_kva(pm) + l1pte_index(vector_page); |
1731 | l2b = pmap_get_l2_bucket(pm, vector_page); | | 1731 | l2b = pmap_get_l2_bucket(pm, vector_page); |
1732 | KASSERTMSG(l2b != NULL, "%#lx", vector_page); | | 1732 | KASSERTMSG(l2b != NULL, "%#lx", vector_page); |
1733 | pm->pm_l1vec = l2b->l2b_pa | L1_C_PROTO | | | 1733 | pm->pm_l1vec = l2b->l2b_pa | L1_C_PROTO | |
1734 | L1_C_DOM(pmap_domain(pm)); | | 1734 | L1_C_DOM(pmap_domain(pm)); |
1735 | } else | | 1735 | } else |
1736 | pm->pm_pl1vec = NULL; | | 1736 | pm->pm_pl1vec = NULL; |
1737 | #endif | | 1737 | #endif |
1738 | } | | 1738 | } |
1739 | | | 1739 | |
1740 | #ifdef PMAP_CACHE_VIVT | | 1740 | #ifdef PMAP_CACHE_VIVT |
1741 | /* | | 1741 | /* |
1742 | * Since we have a virtually indexed cache, we may need to inhibit caching if | | 1742 | * Since we have a virtually indexed cache, we may need to inhibit caching if |
1743 | * there is more than one mapping and at least one of them is writable. | | 1743 | * there is more than one mapping and at least one of them is writable. |
1744 | * Since we purge the cache on every context switch, we only need to check for | | 1744 | * Since we purge the cache on every context switch, we only need to check for |
1745 | * other mappings within the same pmap, or kernel_pmap. | | 1745 | * other mappings within the same pmap, or kernel_pmap. |
1746 | * This function is also called when a page is unmapped, to possibly reenable | | 1746 | * This function is also called when a page is unmapped, to possibly reenable |
1747 | * caching on any remaining mappings. | | 1747 | * caching on any remaining mappings. |
1748 | * | | 1748 | * |
1749 | * The code implements the following logic, where: | | 1749 | * The code implements the following logic, where: |
1750 | * | | 1750 | * |
1751 | * KW = # of kernel read/write pages | | 1751 | * KW = # of kernel read/write pages |
1752 | * KR = # of kernel read only pages | | 1752 | * KR = # of kernel read only pages |
1753 | * UW = # of user read/write pages | | 1753 | * UW = # of user read/write pages |
1754 | * UR = # of user read only pages | | 1754 | * UR = # of user read only pages |
1755 | * | | 1755 | * |
1756 | * KC = kernel mapping is cacheable | | 1756 | * KC = kernel mapping is cacheable |
1757 | * UC = user mapping is cacheable | | 1757 | * UC = user mapping is cacheable |
1758 | * | | 1758 | * |
1759 | * KW=0,KR=0 KW=0,KR>0 KW=1,KR=0 KW>1,KR>=0 | | 1759 | * KW=0,KR=0 KW=0,KR>0 KW=1,KR=0 KW>1,KR>=0 |
1760 | * +--------------------------------------------- | | 1760 | * +--------------------------------------------- |
1761 | * UW=0,UR=0 | --- KC=1 KC=1 KC=0 | | 1761 | * UW=0,UR=0 | --- KC=1 KC=1 KC=0 |
1762 | * UW=0,UR>0 | UC=1 KC=1,UC=1 KC=0,UC=0 KC=0,UC=0 | | 1762 | * UW=0,UR>0 | UC=1 KC=1,UC=1 KC=0,UC=0 KC=0,UC=0 |
1763 | * UW=1,UR=0 | UC=1 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0 | | 1763 | * UW=1,UR=0 | UC=1 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0 |
1764 | * UW>1,UR>=0 | UC=0 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0 | | 1764 | * UW>1,UR>=0 | UC=0 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0 |
1765 | */ | | 1765 | */ |
1766 | | | 1766 | |
1767 | static const int pmap_vac_flags[4][4] = { | | 1767 | static const int pmap_vac_flags[4][4] = { |
1768 | {-1, 0, 0, PVF_KNC}, | | 1768 | {-1, 0, 0, PVF_KNC}, |
1769 | {0, 0, PVF_NC, PVF_NC}, | | 1769 | {0, 0, PVF_NC, PVF_NC}, |
1770 | {0, PVF_NC, PVF_NC, PVF_NC}, | | 1770 | {0, PVF_NC, PVF_NC, PVF_NC}, |
1771 | {PVF_UNC, PVF_NC, PVF_NC, PVF_NC} | | 1771 | {PVF_UNC, PVF_NC, PVF_NC, PVF_NC} |
1772 | }; | | 1772 | }; |
1773 | | | 1773 | |
1774 | static inline int | | 1774 | static inline int |
1775 | pmap_get_vac_flags(const struct vm_page_md *md) | | 1775 | pmap_get_vac_flags(const struct vm_page_md *md) |
1776 | { | | 1776 | { |
1777 | int kidx, uidx; | | 1777 | int kidx, uidx; |
1778 | | | 1778 | |
1779 | kidx = 0; | | 1779 | kidx = 0; |
1780 | if (md->kro_mappings || md->krw_mappings > 1) | | 1780 | if (md->kro_mappings || md->krw_mappings > 1) |
1781 | kidx |= 1; | | 1781 | kidx |= 1; |
1782 | if (md->krw_mappings) | | 1782 | if (md->krw_mappings) |
1783 | kidx |= 2; | | 1783 | kidx |= 2; |
1784 | | | 1784 | |
1785 | uidx = 0; | | 1785 | uidx = 0; |
1786 | if (md->uro_mappings || md->urw_mappings > 1) | | 1786 | if (md->uro_mappings || md->urw_mappings > 1) |
1787 | uidx |= 1; | | 1787 | uidx |= 1; |
1788 | if (md->urw_mappings) | | 1788 | if (md->urw_mappings) |
1789 | uidx |= 2; | | 1789 | uidx |= 2; |
1790 | | | 1790 | |
1791 | return pmap_vac_flags[uidx][kidx]; | | 1791 | return pmap_vac_flags[uidx][kidx]; |
1792 | } | | 1792 | } |
1793 | | | 1793 | |
1794 | static inline void | | 1794 | static inline void |
1795 | pmap_vac_me_harder(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) | | 1795 | pmap_vac_me_harder(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) |
1796 | { | | 1796 | { |
1797 | int nattr; | | 1797 | int nattr; |
1798 | | | 1798 | |
1799 | nattr = pmap_get_vac_flags(md); | | 1799 | nattr = pmap_get_vac_flags(md); |
1800 | | | 1800 | |
1801 | if (nattr < 0) { | | 1801 | if (nattr < 0) { |
1802 | md->pvh_attrs &= ~PVF_NC; | | 1802 | md->pvh_attrs &= ~PVF_NC; |
1803 | return; | | 1803 | return; |
1804 | } | | 1804 | } |
1805 | | | 1805 | |
1806 | if (nattr == 0 && (md->pvh_attrs & PVF_NC) == 0) | | 1806 | if (nattr == 0 && (md->pvh_attrs & PVF_NC) == 0) |
1807 | return; | | 1807 | return; |
1808 | | | 1808 | |
1809 | if (pm == pmap_kernel()) | | 1809 | if (pm == pmap_kernel()) |
1810 | pmap_vac_me_kpmap(md, pa, pm, va); | | 1810 | pmap_vac_me_kpmap(md, pa, pm, va); |
1811 | else | | 1811 | else |
1812 | pmap_vac_me_user(md, pa, pm, va); | | 1812 | pmap_vac_me_user(md, pa, pm, va); |
1813 | | | 1813 | |
1814 | md->pvh_attrs = (md->pvh_attrs & ~PVF_NC) | nattr; | | 1814 | md->pvh_attrs = (md->pvh_attrs & ~PVF_NC) | nattr; |
1815 | } | | 1815 | } |
1816 | | | 1816 | |
1817 | static void | | 1817 | static void |
1818 | pmap_vac_me_kpmap(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) | | 1818 | pmap_vac_me_kpmap(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) |
1819 | { | | 1819 | { |
1820 | u_int u_cacheable, u_entries; | | 1820 | u_int u_cacheable, u_entries; |
1821 | struct pv_entry *pv; | | 1821 | struct pv_entry *pv; |
1822 | pmap_t last_pmap = pm; | | 1822 | pmap_t last_pmap = pm; |
1823 | | | 1823 | |
1824 | /* | | 1824 | /* |
1825 | * Pass one, see if there are both kernel and user pmaps for | | 1825 | * Pass one, see if there are both kernel and user pmaps for |
1826 | * this page. Calculate whether there are user-writable or | | 1826 | * this page. Calculate whether there are user-writable or |
1827 | * kernel-writable pages. | | 1827 | * kernel-writable pages. |
1828 | */ | | 1828 | */ |
1829 | u_cacheable = 0; | | 1829 | u_cacheable = 0; |
1830 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { | | 1830 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { |
1831 | if (pv->pv_pmap != pm && (pv->pv_flags & PVF_NC) == 0) | | 1831 | if (pv->pv_pmap != pm && (pv->pv_flags & PVF_NC) == 0) |
1832 | u_cacheable++; | | 1832 | u_cacheable++; |
1833 | } | | 1833 | } |
1834 | | | 1834 | |
1835 | u_entries = md->urw_mappings + md->uro_mappings; | | 1835 | u_entries = md->urw_mappings + md->uro_mappings; |
1836 | | | 1836 | |
1837 | /* | | 1837 | /* |
1838 | * We know we have just been updating a kernel entry, so if | | 1838 | * We know we have just been updating a kernel entry, so if |
1839 | * all user pages are already cacheable, then there is nothing | | 1839 | * all user pages are already cacheable, then there is nothing |
1840 | * further to do. | | 1840 | * further to do. |
1841 | */ | | 1841 | */ |
1842 | if (md->k_mappings == 0 && u_cacheable == u_entries) | | 1842 | if (md->k_mappings == 0 && u_cacheable == u_entries) |
1843 | return; | | 1843 | return; |
1844 | | | 1844 | |
1845 | if (u_entries) { | | 1845 | if (u_entries) { |
1846 | /* | | 1846 | /* |
1847 | * Scan over the list again, for each entry, if it | | 1847 | * Scan over the list again, for each entry, if it |
1848 | * might not be set correctly, call pmap_vac_me_user | | 1848 | * might not be set correctly, call pmap_vac_me_user |
1849 | * to recalculate the settings. | | 1849 | * to recalculate the settings. |
1850 | */ | | 1850 | */ |
1851 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { | | 1851 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { |
1852 | /* | | 1852 | /* |
1853 | * We know kernel mappings will get set | | 1853 | * We know kernel mappings will get set |
1854 | * correctly in other calls. We also know | | 1854 | * correctly in other calls. We also know |
1855 | * that if the pmap is the same as last_pmap | | 1855 | * that if the pmap is the same as last_pmap |
1856 | * then we've just handled this entry. | | 1856 | * then we've just handled this entry. |
1857 | */ | | 1857 | */ |
1858 | if (pv->pv_pmap == pm || pv->pv_pmap == last_pmap) | | 1858 | if (pv->pv_pmap == pm || pv->pv_pmap == last_pmap) |
1859 | continue; | | 1859 | continue; |
1860 | | | 1860 | |
1861 | /* | | 1861 | /* |
1862 | * If there are kernel entries and this page | | 1862 | * If there are kernel entries and this page |
1863 | * is writable but non-cacheable, then we can | | 1863 | * is writable but non-cacheable, then we can |
1864 | * skip this entry also. | | 1864 | * skip this entry also. |
1865 | */ | | 1865 | */ |
1866 | if (md->k_mappings && | | 1866 | if (md->k_mappings && |
1867 | (pv->pv_flags & (PVF_NC | PVF_WRITE)) == | | 1867 | (pv->pv_flags & (PVF_NC | PVF_WRITE)) == |
1868 | (PVF_NC | PVF_WRITE)) | | 1868 | (PVF_NC | PVF_WRITE)) |
1869 | continue; | | 1869 | continue; |
1870 | | | 1870 | |
1871 | /* | | 1871 | /* |
1872 | * Similarly if there are no kernel-writable | | 1872 | * Similarly if there are no kernel-writable |
1873 | * entries and the page is already | | 1873 | * entries and the page is already |
1874 | * read-only/cacheable. | | 1874 | * read-only/cacheable. |
1875 | */ | | 1875 | */ |
1876 | if (md->krw_mappings == 0 && | | 1876 | if (md->krw_mappings == 0 && |
1877 | (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0) | | 1877 | (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0) |
1878 | continue; | | 1878 | continue; |
1879 | | | 1879 | |
1880 | /* | | 1880 | /* |
1881 | * For some of the remaining cases, we know | | 1881 | * For some of the remaining cases, we know |
1882 | * that we must recalculate, but for others we | | 1882 | * that we must recalculate, but for others we |
1883 | * can't tell if they are correct or not, so | | 1883 | * can't tell if they are correct or not, so |
1884 | * we recalculate anyway. | | 1884 | * we recalculate anyway. |
1885 | */ | | 1885 | */ |
1886 | pmap_vac_me_user(md, pa, (last_pmap = pv->pv_pmap), 0); | | 1886 | pmap_vac_me_user(md, pa, (last_pmap = pv->pv_pmap), 0); |
1887 | } | | 1887 | } |
1888 | | | 1888 | |
1889 | if (md->k_mappings == 0) | | 1889 | if (md->k_mappings == 0) |
1890 | return; | | 1890 | return; |
1891 | } | | 1891 | } |
1892 | | | 1892 | |
1893 | pmap_vac_me_user(md, pa, pm, va); | | 1893 | pmap_vac_me_user(md, pa, pm, va); |
1894 | } | | 1894 | } |
1895 | | | 1895 | |
1896 | static void | | 1896 | static void |
1897 | pmap_vac_me_user(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) | | 1897 | pmap_vac_me_user(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) |
1898 | { | | 1898 | { |
1899 | pmap_t kpmap = pmap_kernel(); | | 1899 | pmap_t kpmap = pmap_kernel(); |
1900 | struct pv_entry *pv, *npv = NULL; | | 1900 | struct pv_entry *pv, *npv = NULL; |
1901 | u_int entries = 0; | | 1901 | u_int entries = 0; |
1902 | u_int writable = 0; | | 1902 | u_int writable = 0; |
1903 | u_int cacheable_entries = 0; | | 1903 | u_int cacheable_entries = 0; |
1904 | u_int kern_cacheable = 0; | | 1904 | u_int kern_cacheable = 0; |
1905 | u_int other_writable = 0; | | 1905 | u_int other_writable = 0; |
1906 | | | 1906 | |
1907 | /* | | 1907 | /* |
1908 | * Count mappings and writable mappings in this pmap. | | 1908 | * Count mappings and writable mappings in this pmap. |
1909 | * Include kernel mappings as part of our own. | | 1909 | * Include kernel mappings as part of our own. |
1910 | * Keep a pointer to the first one. | | 1910 | * Keep a pointer to the first one. |
1911 | */ | | 1911 | */ |
1912 | npv = NULL; | | 1912 | npv = NULL; |
1913 | KASSERT(pmap_page_locked_p(md)); | | 1913 | KASSERT(pmap_page_locked_p(md)); |
1914 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { | | 1914 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { |
1915 | /* Count mappings in the same pmap */ | | 1915 | /* Count mappings in the same pmap */ |
1916 | if (pm == pv->pv_pmap || kpmap == pv->pv_pmap) { | | 1916 | if (pm == pv->pv_pmap || kpmap == pv->pv_pmap) { |
1917 | if (entries++ == 0) | | 1917 | if (entries++ == 0) |
1918 | npv = pv; | | 1918 | npv = pv; |
1919 | | | 1919 | |
1920 | /* Cacheable mappings */ | | 1920 | /* Cacheable mappings */ |
1921 | if ((pv->pv_flags & PVF_NC) == 0) { | | 1921 | if ((pv->pv_flags & PVF_NC) == 0) { |
1922 | cacheable_entries++; | | 1922 | cacheable_entries++; |
1923 | if (kpmap == pv->pv_pmap) | | 1923 | if (kpmap == pv->pv_pmap) |
1924 | kern_cacheable++; | | 1924 | kern_cacheable++; |
1925 | } | | 1925 | } |
1926 | | | 1926 | |
1927 | /* Writable mappings */ | | 1927 | /* Writable mappings */ |
1928 | if (pv->pv_flags & PVF_WRITE) | | 1928 | if (pv->pv_flags & PVF_WRITE) |
1929 | ++writable; | | 1929 | ++writable; |
1930 | } else if (pv->pv_flags & PVF_WRITE) | | 1930 | } else if (pv->pv_flags & PVF_WRITE) |
1931 | other_writable = 1; | | 1931 | other_writable = 1; |
1932 | } | | 1932 | } |
1933 | | | 1933 | |
1934 | /* | | 1934 | /* |
1935 | * Enable or disable caching as necessary. | | 1935 | * Enable or disable caching as necessary. |
1936 | * Note: the first entry might be part of the kernel pmap, | | 1936 | * Note: the first entry might be part of the kernel pmap, |
1937 | * so we can't assume this is indicative of the state of the | | 1937 | * so we can't assume this is indicative of the state of the |
1938 | * other (maybe non-kpmap) entries. | | 1938 | * other (maybe non-kpmap) entries. |
1939 | */ | | 1939 | */ |
1940 | if ((entries > 1 && writable) || | | 1940 | if ((entries > 1 && writable) || |
1941 | (entries > 0 && pm == kpmap && other_writable)) { | | 1941 | (entries > 0 && pm == kpmap && other_writable)) { |
1942 | if (cacheable_entries == 0) { | | 1942 | if (cacheable_entries == 0) { |
1943 | return; | | 1943 | return; |
1944 | } | | 1944 | } |
1945 | | | 1945 | |
1946 | for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) { | | 1946 | for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) { |
1947 | if ((pm != pv->pv_pmap && kpmap != pv->pv_pmap) || | | 1947 | if ((pm != pv->pv_pmap && kpmap != pv->pv_pmap) || |
1948 | (pv->pv_flags & PVF_NC)) | | 1948 | (pv->pv_flags & PVF_NC)) |
1949 | continue; | | 1949 | continue; |
1950 | | | 1950 | |
1951 | pv->pv_flags |= PVF_NC; | | 1951 | pv->pv_flags |= PVF_NC; |
1952 | | | 1952 | |
1953 | struct l2_bucket * const l2b | | 1953 | struct l2_bucket * const l2b |
1954 | = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); | | 1954 | = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); |
1955 | KASSERTMSG(l2b != NULL, "%#lx", va); | | 1955 | KASSERTMSG(l2b != NULL, "%#lx", va); |
1956 | pt_entry_t * const ptep | | 1956 | pt_entry_t * const ptep |
1957 | = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; | | 1957 | = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; |
1958 | const pt_entry_t opte = *ptep; | | 1958 | const pt_entry_t opte = *ptep; |
1959 | pt_entry_t npte = opte & ~L2_S_CACHE_MASK; | | 1959 | pt_entry_t npte = opte & ~L2_S_CACHE_MASK; |
1960 | | | 1960 | |
1961 | if ((va != pv->pv_va || pm != pv->pv_pmap) | | 1961 | if ((va != pv->pv_va || pm != pv->pv_pmap) |
1962 | && l2pte_valid_p(opte)) { | | 1962 | && l2pte_valid_p(opte)) { |
1963 | pmap_cache_wbinv_page(pv->pv_pmap, pv->pv_va, | | 1963 | pmap_cache_wbinv_page(pv->pv_pmap, pv->pv_va, |
1964 | true, pv->pv_flags); | | 1964 | true, pv->pv_flags); |
1965 | pmap_tlb_flush_SE(pv->pv_pmap, pv->pv_va, | | 1965 | pmap_tlb_flush_SE(pv->pv_pmap, pv->pv_va, |
1966 | pv->pv_flags); | | 1966 | pv->pv_flags); |
1967 | } | | 1967 | } |
1968 | | | 1968 | |
1969 | l2pte_set(ptep, npte, opte); | | 1969 | l2pte_set(ptep, npte, opte); |
1970 | PTE_SYNC_CURRENT(pv->pv_pmap, ptep); | | 1970 | PTE_SYNC_CURRENT(pv->pv_pmap, ptep); |
1971 | } | | 1971 | } |
1972 | cpu_cpwait(); | | 1972 | cpu_cpwait(); |
1973 | } else if (entries > cacheable_entries) { | | 1973 | } else if (entries > cacheable_entries) { |
1974 | /* | | 1974 | /* |
1975 | * Turn cacheing back on for some pages. If it is a kernel | | 1975 | * Turn cacheing back on for some pages. If it is a kernel |
1976 | * page, only do so if there are no other writable pages. | | 1976 | * page, only do so if there are no other writable pages. |
1977 | */ | | 1977 | */ |
1978 | for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) { | | 1978 | for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) { |
1979 | if (!(pv->pv_flags & PVF_NC) || (pm != pv->pv_pmap && | | 1979 | if (!(pv->pv_flags & PVF_NC) || (pm != pv->pv_pmap && |
1980 | (kpmap != pv->pv_pmap || other_writable))) | | 1980 | (kpmap != pv->pv_pmap || other_writable))) |
1981 | continue; | | 1981 | continue; |
1982 | | | 1982 | |
1983 | pv->pv_flags &= ~PVF_NC; | | 1983 | pv->pv_flags &= ~PVF_NC; |
1984 | | | 1984 | |
1985 | struct l2_bucket * const l2b | | 1985 | struct l2_bucket * const l2b |
1986 | = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); | | 1986 | = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); |
1987 | KASSERTMSG(l2b != NULL, "%#lx", va); | | 1987 | KASSERTMSG(l2b != NULL, "%#lx", va); |
1988 | pt_entry_t * const ptep | | 1988 | pt_entry_t * const ptep |
1989 | = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; | | 1989 | = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; |
1990 | const pt_entry_t opte = *ptep; | | 1990 | const pt_entry_t opte = *ptep; |
1991 | pt_entry_t npte = (opte & ~L2_S_CACHE_MASK) | | 1991 | pt_entry_t npte = (opte & ~L2_S_CACHE_MASK) |
1992 | | pte_l2_s_cache_mode; | | 1992 | | pte_l2_s_cache_mode; |
1993 | | | 1993 | |
1994 | if (l2pte_valid_p(opte)) { | | 1994 | if (l2pte_valid_p(opte)) { |
1995 | pmap_tlb_flush_SE(pv->pv_pmap, pv->pv_va, | | 1995 | pmap_tlb_flush_SE(pv->pv_pmap, pv->pv_va, |
1996 | pv->pv_flags); | | 1996 | pv->pv_flags); |
1997 | } | | 1997 | } |
1998 | | | 1998 | |
1999 | l2pte_set(ptep, npte, opte); | | 1999 | l2pte_set(ptep, npte, opte); |
2000 | PTE_SYNC_CURRENT(pv->pv_pmap, ptep); | | 2000 | PTE_SYNC_CURRENT(pv->pv_pmap, ptep); |
2001 | } | | 2001 | } |
2002 | } | | 2002 | } |
2003 | } | | 2003 | } |
2004 | #endif | | 2004 | #endif |
2005 | | | 2005 | |
2006 | #ifdef PMAP_CACHE_VIPT | | 2006 | #ifdef PMAP_CACHE_VIPT |
2007 | static void | | 2007 | static void |
2008 | pmap_vac_me_harder(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) | | 2008 | pmap_vac_me_harder(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) |
2009 | { | | 2009 | { |
2010 | | | 2010 | |
2011 | #ifndef ARM_MMU_EXTENDED | | 2011 | #ifndef ARM_MMU_EXTENDED |
2012 | struct pv_entry *pv; | | 2012 | struct pv_entry *pv; |
2013 | vaddr_t tst_mask; | | 2013 | vaddr_t tst_mask; |
2014 | bool bad_alias; | | 2014 | bool bad_alias; |
2015 | const u_int | | 2015 | const u_int |
2016 | rw_mappings = md->urw_mappings + md->krw_mappings, | | 2016 | rw_mappings = md->urw_mappings + md->krw_mappings, |
2017 | ro_mappings = md->uro_mappings + md->kro_mappings; | | 2017 | ro_mappings = md->uro_mappings + md->kro_mappings; |
2018 | | | 2018 | |
2019 | /* do we need to do anything? */ | | 2019 | /* do we need to do anything? */ |
2020 | if (arm_cache_prefer_mask == 0) | | 2020 | if (arm_cache_prefer_mask == 0) |
2021 | return; | | 2021 | return; |
2022 | | | 2022 | |
2023 | UVMHIST_FUNC(__func__); | | 2023 | UVMHIST_FUNC(__func__); |
2024 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx pm %#jx va %#jx", | | 2024 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx pm %#jx va %#jx", |
2025 | (uintptr_t)md, (uintptr_t)pa, (uintptr_t)pm, va); | | 2025 | (uintptr_t)md, (uintptr_t)pa, (uintptr_t)pm, va); |
2026 | | | 2026 | |
2027 | KASSERT(!va || pm); | | 2027 | KASSERT(!va || pm); |
2028 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 2028 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
2029 | | | 2029 | |
2030 | /* Already a conflict? */ | | 2030 | /* Already a conflict? */ |
2031 | if (__predict_false(md->pvh_attrs & PVF_NC)) { | | 2031 | if (__predict_false(md->pvh_attrs & PVF_NC)) { |
2032 | /* just an add, things are already non-cached */ | | 2032 | /* just an add, things are already non-cached */ |
2033 | KASSERT(!(md->pvh_attrs & PVF_DIRTY)); | | 2033 | KASSERT(!(md->pvh_attrs & PVF_DIRTY)); |
2034 | KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); | | 2034 | KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); |
2035 | bad_alias = false; | | 2035 | bad_alias = false; |
2036 | if (va) { | | 2036 | if (va) { |
2037 | PMAPCOUNT(vac_color_none); | | 2037 | PMAPCOUNT(vac_color_none); |
2038 | bad_alias = true; | | 2038 | bad_alias = true; |
2039 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); | | 2039 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); |
2040 | goto fixup; | | 2040 | goto fixup; |
2041 | } | | 2041 | } |
2042 | pv = SLIST_FIRST(&md->pvh_list); | | 2042 | pv = SLIST_FIRST(&md->pvh_list); |
2043 | /* the list can't be empty because it would be cachable */ | | 2043 | /* the list can't be empty because it would be cachable */ |
2044 | if (md->pvh_attrs & PVF_KMPAGE) { | | 2044 | if (md->pvh_attrs & PVF_KMPAGE) { |
2045 | tst_mask = md->pvh_attrs; | | 2045 | tst_mask = md->pvh_attrs; |
2046 | } else { | | 2046 | } else { |
2047 | KASSERT(pv); | | 2047 | KASSERT(pv); |
2048 | tst_mask = pv->pv_va; | | 2048 | tst_mask = pv->pv_va; |
2049 | pv = SLIST_NEXT(pv, pv_link); | | 2049 | pv = SLIST_NEXT(pv, pv_link); |
2050 | } | | 2050 | } |
2051 | /* | | 2051 | /* |
2052 | * Only check for a bad alias if we have writable mappings. | | 2052 | * Only check for a bad alias if we have writable mappings. |
2053 | */ | | 2053 | */ |
2054 | tst_mask &= arm_cache_prefer_mask; | | 2054 | tst_mask &= arm_cache_prefer_mask; |
2055 | if (rw_mappings > 0) { | | 2055 | if (rw_mappings > 0) { |
2056 | for (; pv && !bad_alias; pv = SLIST_NEXT(pv, pv_link)) { | | 2056 | for (; pv && !bad_alias; pv = SLIST_NEXT(pv, pv_link)) { |
2057 | /* if there's a bad alias, stop checking. */ | | 2057 | /* if there's a bad alias, stop checking. */ |
2058 | if (tst_mask != (pv->pv_va & arm_cache_prefer_mask)) | | 2058 | if (tst_mask != (pv->pv_va & arm_cache_prefer_mask)) |
2059 | bad_alias = true; | | 2059 | bad_alias = true; |
2060 | } | | 2060 | } |
2061 | md->pvh_attrs |= PVF_WRITE; | | 2061 | md->pvh_attrs |= PVF_WRITE; |
2062 | if (!bad_alias) | | 2062 | if (!bad_alias) |
2063 | md->pvh_attrs |= PVF_DIRTY; | | 2063 | md->pvh_attrs |= PVF_DIRTY; |
2064 | } else { | | 2064 | } else { |
2065 | /* | | 2065 | /* |
2066 | * We have only read-only mappings. Let's see if there | | 2066 | * We have only read-only mappings. Let's see if there |
2067 | * are multiple colors in use or if we mapped a KMPAGE. | | 2067 | * are multiple colors in use or if we mapped a KMPAGE. |
2068 | * If the latter, we have a bad alias. If the former, | | 2068 | * If the latter, we have a bad alias. If the former, |
2069 | * we need to remember that. | | 2069 | * we need to remember that. |
2070 | */ | | 2070 | */ |
2071 | for (; pv; pv = SLIST_NEXT(pv, pv_link)) { | | 2071 | for (; pv; pv = SLIST_NEXT(pv, pv_link)) { |
2072 | if (tst_mask != (pv->pv_va & arm_cache_prefer_mask)) { | | 2072 | if (tst_mask != (pv->pv_va & arm_cache_prefer_mask)) { |
2073 | if (md->pvh_attrs & PVF_KMPAGE) | | 2073 | if (md->pvh_attrs & PVF_KMPAGE) |
2074 | bad_alias = true; | | 2074 | bad_alias = true; |
2075 | break; | | 2075 | break; |
2076 | } | | 2076 | } |
2077 | } | | 2077 | } |
2078 | md->pvh_attrs &= ~PVF_WRITE; | | 2078 | md->pvh_attrs &= ~PVF_WRITE; |
2079 | /* | | 2079 | /* |
2080 | * No KMPAGE and we exited early, so we must have | | 2080 | * No KMPAGE and we exited early, so we must have |
2081 | * multiple color mappings. | | 2081 | * multiple color mappings. |
2082 | */ | | 2082 | */ |
2083 | if (!bad_alias && pv != NULL) | | 2083 | if (!bad_alias && pv != NULL) |
2084 | md->pvh_attrs |= PVF_MULTCLR; | | 2084 | md->pvh_attrs |= PVF_MULTCLR; |
2085 | } | | 2085 | } |
2086 | | | 2086 | |
2087 | /* If no conflicting colors, set everything back to cached */ | | 2087 | /* If no conflicting colors, set everything back to cached */ |
2088 | if (!bad_alias) { | | 2088 | if (!bad_alias) { |
2089 | #ifdef DEBUG | | 2089 | #ifdef DEBUG |
2090 | if ((md->pvh_attrs & PVF_WRITE) | | 2090 | if ((md->pvh_attrs & PVF_WRITE) |
2091 | || ro_mappings < 2) { | | 2091 | || ro_mappings < 2) { |
2092 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) | | 2092 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) |
2093 | KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0); | | 2093 | KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0); |
2094 | } | | 2094 | } |
2095 | #endif | | 2095 | #endif |
2096 | md->pvh_attrs &= (PAGE_SIZE - 1) & ~PVF_NC; | | 2096 | md->pvh_attrs &= (PAGE_SIZE - 1) & ~PVF_NC; |
2097 | md->pvh_attrs |= tst_mask | PVF_COLORED; | | 2097 | md->pvh_attrs |= tst_mask | PVF_COLORED; |
2098 | /* | | 2098 | /* |
2099 | * Restore DIRTY bit if page is modified | | 2099 | * Restore DIRTY bit if page is modified |
2100 | */ | | 2100 | */ |
2101 | if (md->pvh_attrs & PVF_DMOD) | | 2101 | if (md->pvh_attrs & PVF_DMOD) |
2102 | md->pvh_attrs |= PVF_DIRTY; | | 2102 | md->pvh_attrs |= PVF_DIRTY; |
2103 | PMAPCOUNT(vac_color_restore); | | 2103 | PMAPCOUNT(vac_color_restore); |
2104 | } else { | | 2104 | } else { |
2105 | KASSERT(SLIST_FIRST(&md->pvh_list) != NULL); | | 2105 | KASSERT(SLIST_FIRST(&md->pvh_list) != NULL); |
2106 | KASSERT(SLIST_NEXT(SLIST_FIRST(&md->pvh_list), pv_link) != NULL); | | 2106 | KASSERT(SLIST_NEXT(SLIST_FIRST(&md->pvh_list), pv_link) != NULL); |
2107 | } | | 2107 | } |
2108 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 2108 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
2109 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); | | 2109 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); |
2110 | } else if (!va) { | | 2110 | } else if (!va) { |
2111 | KASSERT(pmap_is_page_colored_p(md)); | | 2111 | KASSERT(pmap_is_page_colored_p(md)); |
2112 | KASSERT(!(md->pvh_attrs & PVF_WRITE) | | 2112 | KASSERT(!(md->pvh_attrs & PVF_WRITE) |
2113 | || (md->pvh_attrs & PVF_DIRTY)); | | 2113 | || (md->pvh_attrs & PVF_DIRTY)); |
2114 | if (rw_mappings == 0) { | | 2114 | if (rw_mappings == 0) { |
2115 | md->pvh_attrs &= ~PVF_WRITE; | | 2115 | md->pvh_attrs &= ~PVF_WRITE; |
2116 | if (ro_mappings == 1 | | 2116 | if (ro_mappings == 1 |
2117 | && (md->pvh_attrs & PVF_MULTCLR)) { | | 2117 | && (md->pvh_attrs & PVF_MULTCLR)) { |
2118 | /* | | 2118 | /* |
2119 | * If this is the last readonly mapping | | 2119 | * If this is the last readonly mapping |
2120 | * but it doesn't match the current color | | 2120 | * but it doesn't match the current color |
2121 | * for the page, change the current color | | 2121 | * for the page, change the current color |
2122 | * to match this last readonly mapping. | | 2122 | * to match this last readonly mapping. |
2123 | */ | | 2123 | */ |
2124 | pv = SLIST_FIRST(&md->pvh_list); | | 2124 | pv = SLIST_FIRST(&md->pvh_list); |
2125 | tst_mask = (md->pvh_attrs ^ pv->pv_va) | | 2125 | tst_mask = (md->pvh_attrs ^ pv->pv_va) |
2126 | & arm_cache_prefer_mask; | | 2126 | & arm_cache_prefer_mask; |
2127 | if (tst_mask) { | | 2127 | if (tst_mask) { |
2128 | md->pvh_attrs ^= tst_mask; | | 2128 | md->pvh_attrs ^= tst_mask; |
2129 | PMAPCOUNT(vac_color_change); | | 2129 | PMAPCOUNT(vac_color_change); |
2130 | } | | 2130 | } |
2131 | } | | 2131 | } |
2132 | } | | 2132 | } |
2133 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 2133 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
2134 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); | | 2134 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); |
2135 | return; | | 2135 | return; |
2136 | } else if (!pmap_is_page_colored_p(md)) { | | 2136 | } else if (!pmap_is_page_colored_p(md)) { |
2137 | /* not colored so we just use its color */ | | 2137 | /* not colored so we just use its color */ |
2138 | KASSERT(md->pvh_attrs & (PVF_WRITE|PVF_DIRTY)); | | 2138 | KASSERT(md->pvh_attrs & (PVF_WRITE|PVF_DIRTY)); |
2139 | KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); | | 2139 | KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); |
2140 | PMAPCOUNT(vac_color_new); | | 2140 | PMAPCOUNT(vac_color_new); |
2141 | md->pvh_attrs &= PAGE_SIZE - 1; | | 2141 | md->pvh_attrs &= PAGE_SIZE - 1; |
2142 | md->pvh_attrs |= PVF_COLORED | | 2142 | md->pvh_attrs |= PVF_COLORED |
2143 | | (va & arm_cache_prefer_mask) | | 2143 | | (va & arm_cache_prefer_mask) |
2144 | | (rw_mappings > 0 ? PVF_WRITE : 0); | | 2144 | | (rw_mappings > 0 ? PVF_WRITE : 0); |
2145 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 2145 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
2146 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); | | 2146 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); |
2147 | return; | | 2147 | return; |
2148 | } else if (((md->pvh_attrs ^ va) & arm_cache_prefer_mask) == 0) { | | 2148 | } else if (((md->pvh_attrs ^ va) & arm_cache_prefer_mask) == 0) { |
2149 | bad_alias = false; | | 2149 | bad_alias = false; |
2150 | if (rw_mappings > 0) { | | 2150 | if (rw_mappings > 0) { |
2151 | /* | | 2151 | /* |
2152 | * We now have writeable mappings and if we have | | 2152 | * We now have writeable mappings and if we have |
2153 | * readonly mappings in more than once color, we have | | 2153 | * readonly mappings in more than once color, we have |
2154 | * an aliasing problem. Regardless mark the page as | | 2154 | * an aliasing problem. Regardless mark the page as |
2155 | * writeable. | | 2155 | * writeable. |
2156 | */ | | 2156 | */ |
2157 | if (md->pvh_attrs & PVF_MULTCLR) { | | 2157 | if (md->pvh_attrs & PVF_MULTCLR) { |
2158 | if (ro_mappings < 2) { | | 2158 | if (ro_mappings < 2) { |
2159 | /* | | 2159 | /* |
2160 | * If we only have less than two | | 2160 | * If we only have less than two |
2161 | * read-only mappings, just flush the | | 2161 | * read-only mappings, just flush the |
2162 | * non-primary colors from the cache. | | 2162 | * non-primary colors from the cache. |
2163 | */ | | 2163 | */ |
2164 | pmap_flush_page(md, pa, | | 2164 | pmap_flush_page(md, pa, |
2165 | PMAP_FLUSH_SECONDARY); | | 2165 | PMAP_FLUSH_SECONDARY); |
2166 | } else { | | 2166 | } else { |
2167 | bad_alias = true; | | 2167 | bad_alias = true; |
2168 | } | | 2168 | } |
2169 | } | | 2169 | } |
2170 | md->pvh_attrs |= PVF_WRITE; | | 2170 | md->pvh_attrs |= PVF_WRITE; |
2171 | } | | 2171 | } |
2172 | /* If no conflicting colors, set everything back to cached */ | | 2172 | /* If no conflicting colors, set everything back to cached */ |
2173 | if (!bad_alias) { | | 2173 | if (!bad_alias) { |
2174 | #ifdef DEBUG | | 2174 | #ifdef DEBUG |
2175 | if (rw_mappings > 0 | | 2175 | if (rw_mappings > 0 |
2176 | || (md->pvh_attrs & PMAP_KMPAGE)) { | | 2176 | || (md->pvh_attrs & PMAP_KMPAGE)) { |
2177 | tst_mask = md->pvh_attrs & arm_cache_prefer_mask; | | 2177 | tst_mask = md->pvh_attrs & arm_cache_prefer_mask; |
2178 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) | | 2178 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) |
2179 | KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0); | | 2179 | KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0); |
2180 | } | | 2180 | } |
2181 | #endif | | 2181 | #endif |
2182 | if (SLIST_EMPTY(&md->pvh_list)) | | 2182 | if (SLIST_EMPTY(&md->pvh_list)) |
2183 | PMAPCOUNT(vac_color_reuse); | | 2183 | PMAPCOUNT(vac_color_reuse); |
2184 | else | | 2184 | else |
2185 | PMAPCOUNT(vac_color_ok); | | 2185 | PMAPCOUNT(vac_color_ok); |
2186 | | | 2186 | |
2187 | /* matching color, just return */ | | 2187 | /* matching color, just return */ |
2188 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 2188 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
2189 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); | | 2189 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); |
2190 | return; | | 2190 | return; |
2191 | } | | 2191 | } |
2192 | KASSERT(SLIST_FIRST(&md->pvh_list) != NULL); | | 2192 | KASSERT(SLIST_FIRST(&md->pvh_list) != NULL); |
2193 | KASSERT(SLIST_NEXT(SLIST_FIRST(&md->pvh_list), pv_link) != NULL); | | 2193 | KASSERT(SLIST_NEXT(SLIST_FIRST(&md->pvh_list), pv_link) != NULL); |
2194 | | | 2194 | |
2195 | /* color conflict. evict from cache. */ | | 2195 | /* color conflict. evict from cache. */ |
2196 | | | 2196 | |
2197 | pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY); | | 2197 | pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY); |
2198 | md->pvh_attrs &= ~PVF_COLORED; | | 2198 | md->pvh_attrs &= ~PVF_COLORED; |
2199 | md->pvh_attrs |= PVF_NC; | | 2199 | md->pvh_attrs |= PVF_NC; |
2200 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 2200 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
2201 | KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); | | 2201 | KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); |
2202 | PMAPCOUNT(vac_color_erase); | | 2202 | PMAPCOUNT(vac_color_erase); |
2203 | } else if (rw_mappings == 0 | | 2203 | } else if (rw_mappings == 0 |
2204 | && (md->pvh_attrs & PVF_KMPAGE) == 0) { | | 2204 | && (md->pvh_attrs & PVF_KMPAGE) == 0) { |
2205 | KASSERT((md->pvh_attrs & PVF_WRITE) == 0); | | 2205 | KASSERT((md->pvh_attrs & PVF_WRITE) == 0); |
2206 | | | 2206 | |
2207 | /* | | 2207 | /* |
2208 | * If the page has dirty cache lines, clean it. | | 2208 | * If the page has dirty cache lines, clean it. |
2209 | */ | | 2209 | */ |
2210 | if (md->pvh_attrs & PVF_DIRTY) | | 2210 | if (md->pvh_attrs & PVF_DIRTY) |
2211 | pmap_flush_page(md, pa, PMAP_CLEAN_PRIMARY); | | 2211 | pmap_flush_page(md, pa, PMAP_CLEAN_PRIMARY); |
2212 | | | 2212 | |
2213 | /* | | 2213 | /* |
2214 | * If this is the first remapping (we know that there are no | | 2214 | * If this is the first remapping (we know that there are no |
2215 | * writeable mappings), then this is a simple color change. | | 2215 | * writeable mappings), then this is a simple color change. |
2216 | * Otherwise this is a seconary r/o mapping, which means | | 2216 | * Otherwise this is a seconary r/o mapping, which means |
2217 | * we don't have to do anything. | | 2217 | * we don't have to do anything. |
2218 | */ | | 2218 | */ |
2219 | if (ro_mappings == 1) { | | 2219 | if (ro_mappings == 1) { |
2220 | KASSERT(((md->pvh_attrs ^ va) & arm_cache_prefer_mask) != 0); | | 2220 | KASSERT(((md->pvh_attrs ^ va) & arm_cache_prefer_mask) != 0); |
2221 | md->pvh_attrs &= PAGE_SIZE - 1; | | 2221 | md->pvh_attrs &= PAGE_SIZE - 1; |
2222 | md->pvh_attrs |= (va & arm_cache_prefer_mask); | | 2222 | md->pvh_attrs |= (va & arm_cache_prefer_mask); |
2223 | PMAPCOUNT(vac_color_change); | | 2223 | PMAPCOUNT(vac_color_change); |
2224 | } else { | | 2224 | } else { |
2225 | PMAPCOUNT(vac_color_blind); | | 2225 | PMAPCOUNT(vac_color_blind); |
2226 | } | | 2226 | } |
2227 | md->pvh_attrs |= PVF_MULTCLR; | | 2227 | md->pvh_attrs |= PVF_MULTCLR; |
2228 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 2228 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
2229 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); | | 2229 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); |
2230 | return; | | 2230 | return; |
2231 | } else { | | 2231 | } else { |
2232 | if (rw_mappings > 0) | | 2232 | if (rw_mappings > 0) |
2233 | md->pvh_attrs |= PVF_WRITE; | | 2233 | md->pvh_attrs |= PVF_WRITE; |
2234 | | | 2234 | |
2235 | /* color conflict. evict from cache. */ | | 2235 | /* color conflict. evict from cache. */ |
2236 | pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY); | | 2236 | pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY); |
2237 | | | 2237 | |
2238 | /* the list can't be empty because this was a enter/modify */ | | 2238 | /* the list can't be empty because this was a enter/modify */ |
2239 | pv = SLIST_FIRST(&md->pvh_list); | | 2239 | pv = SLIST_FIRST(&md->pvh_list); |
2240 | if ((md->pvh_attrs & PVF_KMPAGE) == 0) { | | 2240 | if ((md->pvh_attrs & PVF_KMPAGE) == 0) { |
2241 | KASSERT(pv); | | 2241 | KASSERT(pv); |
2242 | /* | | 2242 | /* |
2243 | * If there's only one mapped page, change color to the | | 2243 | * If there's only one mapped page, change color to the |
2244 | * page's new color and return. Restore the DIRTY bit | | 2244 | * page's new color and return. Restore the DIRTY bit |
2245 | * that was erased by pmap_flush_page. | | 2245 | * that was erased by pmap_flush_page. |
2246 | */ | | 2246 | */ |
2247 | if (SLIST_NEXT(pv, pv_link) == NULL) { | | 2247 | if (SLIST_NEXT(pv, pv_link) == NULL) { |
2248 | md->pvh_attrs &= PAGE_SIZE - 1; | | 2248 | md->pvh_attrs &= PAGE_SIZE - 1; |
2249 | md->pvh_attrs |= (va & arm_cache_prefer_mask); | | 2249 | md->pvh_attrs |= (va & arm_cache_prefer_mask); |
2250 | if (md->pvh_attrs & PVF_DMOD) | | 2250 | if (md->pvh_attrs & PVF_DMOD) |
2251 | md->pvh_attrs |= PVF_DIRTY; | | 2251 | md->pvh_attrs |= PVF_DIRTY; |
2252 | PMAPCOUNT(vac_color_change); | | 2252 | PMAPCOUNT(vac_color_change); |
2253 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 2253 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
2254 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); | | 2254 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); |
2255 | KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); | | 2255 | KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); |
2256 | return; | | 2256 | return; |
2257 | } | | 2257 | } |
2258 | } | | 2258 | } |
2259 | bad_alias = true; | | 2259 | bad_alias = true; |
2260 | md->pvh_attrs &= ~PVF_COLORED; | | 2260 | md->pvh_attrs &= ~PVF_COLORED; |
2261 | md->pvh_attrs |= PVF_NC; | | 2261 | md->pvh_attrs |= PVF_NC; |
2262 | PMAPCOUNT(vac_color_erase); | | 2262 | PMAPCOUNT(vac_color_erase); |
2263 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 2263 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
2264 | } | | 2264 | } |
2265 | | | 2265 | |
2266 | fixup: | | 2266 | fixup: |
2267 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); | | 2267 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); |
2268 | | | 2268 | |
2269 | /* | | 2269 | /* |
2270 | * Turn cacheing on/off for all pages. | | 2270 | * Turn cacheing on/off for all pages. |
2271 | */ | | 2271 | */ |
2272 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { | | 2272 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { |
2273 | struct l2_bucket * const l2b = pmap_get_l2_bucket(pv->pv_pmap, | | 2273 | struct l2_bucket * const l2b = pmap_get_l2_bucket(pv->pv_pmap, |
2274 | pv->pv_va); | | 2274 | pv->pv_va); |
2275 | KASSERTMSG(l2b != NULL, "%#lx", va); | | 2275 | KASSERTMSG(l2b != NULL, "%#lx", va); |
2276 | pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; | | 2276 | pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; |
2277 | const pt_entry_t opte = *ptep; | | 2277 | const pt_entry_t opte = *ptep; |
2278 | pt_entry_t npte = opte & ~L2_S_CACHE_MASK; | | 2278 | pt_entry_t npte = opte & ~L2_S_CACHE_MASK; |
2279 | if (bad_alias) { | | 2279 | if (bad_alias) { |
2280 | pv->pv_flags |= PVF_NC; | | 2280 | pv->pv_flags |= PVF_NC; |
2281 | } else { | | 2281 | } else { |
2282 | pv->pv_flags &= ~PVF_NC; | | 2282 | pv->pv_flags &= ~PVF_NC; |
2283 | npte |= pte_l2_s_cache_mode; | | 2283 | npte |= pte_l2_s_cache_mode; |
2284 | } | | 2284 | } |
2285 | | | 2285 | |
2286 | if (opte == npte) /* only update is there's a change */ | | 2286 | if (opte == npte) /* only update is there's a change */ |
2287 | continue; | | 2287 | continue; |
2288 | | | 2288 | |
2289 | if (l2pte_valid_p(opte)) { | | 2289 | if (l2pte_valid_p(opte)) { |
2290 | pmap_tlb_flush_SE(pv->pv_pmap, pv->pv_va, pv->pv_flags); | | 2290 | pmap_tlb_flush_SE(pv->pv_pmap, pv->pv_va, pv->pv_flags); |
2291 | } | | 2291 | } |
2292 | | | 2292 | |
2293 | l2pte_set(ptep, npte, opte); | | 2293 | l2pte_set(ptep, npte, opte); |
2294 | PTE_SYNC_CURRENT(pv->pv_pmap, ptep); | | 2294 | PTE_SYNC_CURRENT(pv->pv_pmap, ptep); |
2295 | } | | 2295 | } |
2296 | #endif /* !ARM_MMU_EXTENDED */ | | 2296 | #endif /* !ARM_MMU_EXTENDED */ |
2297 | } | | 2297 | } |
2298 | #endif /* PMAP_CACHE_VIPT */ | | 2298 | #endif /* PMAP_CACHE_VIPT */ |
2299 | | | 2299 | |
2300 | | | 2300 | |
2301 | /* | | 2301 | /* |
2302 | * Modify pte bits for all ptes corresponding to the given physical address. | | 2302 | * Modify pte bits for all ptes corresponding to the given physical address. |
2303 | * We use `maskbits' rather than `clearbits' because we're always passing | | 2303 | * We use `maskbits' rather than `clearbits' because we're always passing |
2304 | * constants and the latter would require an extra inversion at run-time. | | 2304 | * constants and the latter would require an extra inversion at run-time. |
2305 | */ | | 2305 | */ |
2306 | static void | | 2306 | static void |
2307 | pmap_clearbit(struct vm_page_md *md, paddr_t pa, u_int maskbits) | | 2307 | pmap_clearbit(struct vm_page_md *md, paddr_t pa, u_int maskbits) |
2308 | { | | 2308 | { |
2309 | struct pv_entry *pv; | | 2309 | struct pv_entry *pv; |
2310 | #ifdef PMAP_CACHE_VIPT | | 2310 | #ifdef PMAP_CACHE_VIPT |
2311 | const bool want_syncicache = PV_IS_EXEC_P(md->pvh_attrs); | | 2311 | const bool want_syncicache = PV_IS_EXEC_P(md->pvh_attrs); |
2312 | bool need_syncicache = false; | | 2312 | bool need_syncicache = false; |
2313 | #ifdef ARM_MMU_EXTENDED | | 2313 | #ifdef ARM_MMU_EXTENDED |
2314 | const u_int execbits = (maskbits & PVF_EXEC) ? L2_XS_XN : 0; | | 2314 | const u_int execbits = (maskbits & PVF_EXEC) ? L2_XS_XN : 0; |
2315 | #else | | 2315 | #else |
2316 | const u_int execbits = 0; | | 2316 | const u_int execbits = 0; |
2317 | bool need_vac_me_harder = false; | | 2317 | bool need_vac_me_harder = false; |
2318 | #endif | | 2318 | #endif |
2319 | #else | | 2319 | #else |
2320 | const u_int execbits = 0; | | 2320 | const u_int execbits = 0; |
2321 | #endif | | 2321 | #endif |
2322 | | | 2322 | |
2323 | UVMHIST_FUNC(__func__); | | 2323 | UVMHIST_FUNC(__func__); |
2324 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx maskbits %#jx", | | 2324 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx maskbits %#jx", |
2325 | (uintptr_t)md, pa, maskbits, 0); | | 2325 | (uintptr_t)md, pa, maskbits, 0); |
2326 | | | 2326 | |
2327 | #ifdef PMAP_CACHE_VIPT | | 2327 | #ifdef PMAP_CACHE_VIPT |
2328 | /* | | 2328 | /* |
2329 | * If we might want to sync the I-cache and we've modified it, | | 2329 | * If we might want to sync the I-cache and we've modified it, |
2330 | * then we know we definitely need to sync or discard it. | | 2330 | * then we know we definitely need to sync or discard it. |
2331 | */ | | 2331 | */ |
2332 | if (want_syncicache) { | | 2332 | if (want_syncicache) { |
2333 | if (md->pvh_attrs & PVF_MOD) { | | 2333 | if (md->pvh_attrs & PVF_MOD) { |
2334 | need_syncicache = true; | | 2334 | need_syncicache = true; |
2335 | } | | 2335 | } |
2336 | } | | 2336 | } |
2337 | #endif | | 2337 | #endif |
2338 | KASSERT(pmap_page_locked_p(md)); | | 2338 | KASSERT(pmap_page_locked_p(md)); |
2339 | | | 2339 | |
2340 | /* | | 2340 | /* |
2341 | * Clear saved attributes (modify, reference) | | 2341 | * Clear saved attributes (modify, reference) |
2342 | */ | | 2342 | */ |
2343 | md->pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF)); | | 2343 | md->pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF)); |
2344 | | | 2344 | |
2345 | if (SLIST_EMPTY(&md->pvh_list)) { | | 2345 | if (SLIST_EMPTY(&md->pvh_list)) { |
2346 | #if defined(PMAP_CACHE_VIPT) | | 2346 | #if defined(PMAP_CACHE_VIPT) |
2347 | if (need_syncicache) { | | 2347 | if (need_syncicache) { |
2348 | /* | | 2348 | /* |
2349 | * No one has it mapped, so just discard it. The next | | 2349 | * No one has it mapped, so just discard it. The next |
2350 | * exec remapping will cause it to be synced. | | 2350 | * exec remapping will cause it to be synced. |
2351 | */ | | 2351 | */ |
2352 | md->pvh_attrs &= ~PVF_EXEC; | | 2352 | md->pvh_attrs &= ~PVF_EXEC; |
2353 | PMAPCOUNT(exec_discarded_clearbit); | | 2353 | PMAPCOUNT(exec_discarded_clearbit); |
2354 | } | | 2354 | } |
2355 | #endif | | 2355 | #endif |
2356 | return; | | 2356 | return; |
2357 | } | | 2357 | } |
2358 | | | 2358 | |
2359 | /* | | 2359 | /* |
2360 | * Loop over all current mappings setting/clearing as appropos | | 2360 | * Loop over all current mappings setting/clearing as appropos |
2361 | */ | | 2361 | */ |
2362 | for (pv = SLIST_FIRST(&md->pvh_list); pv != NULL;) { | | 2362 | for (pv = SLIST_FIRST(&md->pvh_list); pv != NULL;) { |
2363 | pmap_t pm = pv->pv_pmap; | | 2363 | pmap_t pm = pv->pv_pmap; |
2364 | const vaddr_t va = pv->pv_va; | | 2364 | const vaddr_t va = pv->pv_va; |
2365 | const u_int oflags = pv->pv_flags; | | 2365 | const u_int oflags = pv->pv_flags; |
2366 | #ifndef ARM_MMU_EXTENDED | | 2366 | #ifndef ARM_MMU_EXTENDED |
2367 | /* | | 2367 | /* |
2368 | * Kernel entries are unmanaged and as such not to be changed. | | 2368 | * Kernel entries are unmanaged and as such not to be changed. |
2369 | */ | | 2369 | */ |
2370 | if (PV_IS_KENTRY_P(oflags)) { | | 2370 | if (PV_IS_KENTRY_P(oflags)) { |
2371 | pv = SLIST_NEXT(pv, pv_link); | | 2371 | pv = SLIST_NEXT(pv, pv_link); |
2372 | continue; | | 2372 | continue; |
2373 | } | | 2373 | } |
2374 | #endif | | 2374 | #endif |
2375 | | | 2375 | |
2376 | /* | | 2376 | /* |
2377 | * Anything to do? | | | |
2378 | */ | | | |
2379 | if ((oflags & maskbits) == 0 && execbits == 0) { | | | |
2380 | pv = SLIST_NEXT(pv, pv_link); | | | |
2381 | continue; | | | |
2382 | } | | | |
2383 | | | | |
2384 | /* | | | |
2385 | * Try to get a hold on the pmap's lock. We must do this | | 2377 | * Try to get a hold on the pmap's lock. We must do this |
2386 | * while still holding the page locked, to know that the | | 2378 | * while still holding the page locked, to know that the |
2387 | * page is still associated with the pmap and the mapping is | | 2379 | * page is still associated with the pmap and the mapping is |
2388 | * in place. If a hold can't be had, unlock and wait for | | 2380 | * in place. If a hold can't be had, unlock and wait for |
2389 | * the pmap's lock to become available and retry. The pmap | | 2381 | * the pmap's lock to become available and retry. The pmap |
2390 | * must be ref'd over this dance to stop it disappearing | | 2382 | * must be ref'd over this dance to stop it disappearing |
2391 | * behind us. | | 2383 | * behind us. |
2392 | */ | | 2384 | */ |
2393 | if (!mutex_tryenter(&pm->pm_lock)) { | | 2385 | if (!mutex_tryenter(&pm->pm_lock)) { |
2394 | pmap_reference(pm); | | 2386 | pmap_reference(pm); |
2395 | pmap_release_page_lock(md); | | 2387 | pmap_release_page_lock(md); |
2396 | pmap_acquire_pmap_lock(pm); | | 2388 | pmap_acquire_pmap_lock(pm); |
2397 | /* nothing, just wait for it */ | | 2389 | /* nothing, just wait for it */ |
2398 | pmap_release_pmap_lock(pm); | | 2390 | pmap_release_pmap_lock(pm); |
2399 | pmap_destroy(pm); | | 2391 | pmap_destroy(pm); |
2400 | /* Restart from the beginning. */ | | 2392 | /* Restart from the beginning. */ |
2401 | pmap_acquire_page_lock(md); | | 2393 | pmap_acquire_page_lock(md); |
2402 | pv = SLIST_FIRST(&md->pvh_list); | | 2394 | pv = SLIST_FIRST(&md->pvh_list); |
2403 | continue; | | 2395 | continue; |
2404 | } | | 2396 | } |
2405 | pv->pv_flags &= ~maskbits; | | 2397 | pv->pv_flags &= ~maskbits; |
2406 | | | 2398 | |
2407 | struct l2_bucket * const l2b = pmap_get_l2_bucket(pm, va); | | 2399 | struct l2_bucket * const l2b = pmap_get_l2_bucket(pm, va); |
2408 | KASSERTMSG(l2b != NULL, "%#lx", va); | | 2400 | KASSERTMSG(l2b != NULL, "%#lx", va); |
2409 | | | 2401 | |
2410 | pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)]; | | 2402 | pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)]; |
2411 | const pt_entry_t opte = *ptep; | | 2403 | const pt_entry_t opte = *ptep; |
2412 | pt_entry_t npte = opte | execbits; | | 2404 | pt_entry_t npte = opte | execbits; |
2413 | | | 2405 | |
2414 | #ifdef ARM_MMU_EXTENDED | | 2406 | #ifdef ARM_MMU_EXTENDED |
2415 | KASSERT((opte & L2_XS_nG) == (pm == pmap_kernel() ? 0 : L2_XS_nG)); | | 2407 | KASSERT((opte & L2_XS_nG) == (pm == pmap_kernel() ? 0 : L2_XS_nG)); |
2416 | #endif | | 2408 | #endif |
2417 | | | 2409 | |
2418 | UVMHIST_LOG(maphist, "pv %#jx pm %#jx va %#jx flag %#jx", | | 2410 | UVMHIST_LOG(maphist, "pv %#jx pm %#jx va %#jx flag %#jx", |
2419 | (uintptr_t)pv, (uintptr_t)pm, va, oflags); | | 2411 | (uintptr_t)pv, (uintptr_t)pm, va, oflags); |
2420 | | | 2412 | |
2421 | if (maskbits & (PVF_WRITE|PVF_MOD)) { | | 2413 | if (maskbits & (PVF_WRITE|PVF_MOD)) { |
2422 | #ifdef PMAP_CACHE_VIVT | | 2414 | #ifdef PMAP_CACHE_VIVT |
2423 | if ((oflags & PVF_NC)) { | | 2415 | if ((oflags & PVF_NC)) { |
2424 | /* | | 2416 | /* |
2425 | * Entry is not cacheable: | | 2417 | * Entry is not cacheable: |
2426 | * | | 2418 | * |
2427 | * Don't turn caching on again if this is a | | 2419 | * Don't turn caching on again if this is a |
2428 | * modified emulation. This would be | | 2420 | * modified emulation. This would be |
2429 | * inconsitent with the settings created by | | 2421 | * inconsitent with the settings created by |
2430 | * pmap_vac_me_harder(). Otherwise, it's safe | | 2422 | * pmap_vac_me_harder(). Otherwise, it's safe |
2431 | * to re-enable cacheing. | | 2423 | * to re-enable cacheing. |
2432 | * | | 2424 | * |
2433 | * There's no need to call pmap_vac_me_harder() | | 2425 | * There's no need to call pmap_vac_me_harder() |
2434 | * here: all pages are losing their write | | 2426 | * here: all pages are losing their write |
2435 | * permission. | | 2427 | * permission. |
2436 | */ | | 2428 | */ |
2437 | if (maskbits & PVF_WRITE) { | | 2429 | if (maskbits & PVF_WRITE) { |
2438 | npte |= pte_l2_s_cache_mode; | | 2430 | npte |= pte_l2_s_cache_mode; |
2439 | pv->pv_flags &= ~PVF_NC; | | 2431 | pv->pv_flags &= ~PVF_NC; |
2440 | } | | 2432 | } |
2441 | } else if (l2pte_writable_p(opte)) { | | 2433 | } else if (l2pte_writable_p(opte)) { |
2442 | /* | | 2434 | /* |
2443 | * Entry is writable/cacheable: check if pmap | | 2435 | * Entry is writable/cacheable: check if pmap |
2444 | * is current if it is flush it, otherwise it | | 2436 | * is current if it is flush it, otherwise it |
2445 | * won't be in the cache | | 2437 | * won't be in the cache |
2446 | */ | | 2438 | */ |
2447 | pmap_cache_wbinv_page(pm, va, | | 2439 | pmap_cache_wbinv_page(pm, va, |
2448 | (maskbits & PVF_REF) != 0, | | 2440 | (maskbits & PVF_REF) != 0, |
2449 | oflags|PVF_WRITE); | | 2441 | oflags|PVF_WRITE); |
2450 | } | | 2442 | } |
2451 | #endif | | 2443 | #endif |
2452 | | | 2444 | |
2453 | /* make the pte read only */ | | 2445 | /* make the pte read only */ |
2454 | npte = l2pte_set_readonly(npte); | | 2446 | npte = l2pte_set_readonly(npte); |
2455 | | | 2447 | |
2456 | if ((maskbits & oflags & PVF_WRITE)) { | | 2448 | if ((maskbits & oflags & PVF_WRITE)) { |
2457 | /* | | 2449 | /* |
2458 | * Keep alias accounting up to date | | 2450 | * Keep alias accounting up to date |
2459 | */ | | 2451 | */ |
2460 | if (pm == pmap_kernel()) { | | 2452 | if (pm == pmap_kernel()) { |
2461 | md->krw_mappings--; | | 2453 | md->krw_mappings--; |
2462 | md->kro_mappings++; | | 2454 | md->kro_mappings++; |
2463 | } else { | | 2455 | } else { |
2464 | md->urw_mappings--; | | 2456 | md->urw_mappings--; |
2465 | md->uro_mappings++; | | 2457 | md->uro_mappings++; |
2466 | } | | 2458 | } |
2467 | #ifdef PMAP_CACHE_VIPT | | 2459 | #ifdef PMAP_CACHE_VIPT |
2468 | if (arm_cache_prefer_mask != 0) { | | 2460 | if (arm_cache_prefer_mask != 0) { |
2469 | if (md->urw_mappings + md->krw_mappings == 0) { | | 2461 | if (md->urw_mappings + md->krw_mappings == 0) { |
2470 | md->pvh_attrs &= ~PVF_WRITE; | | 2462 | md->pvh_attrs &= ~PVF_WRITE; |
2471 | } else { | | 2463 | } else { |
2472 | PMAP_VALIDATE_MD_PAGE(md); | | 2464 | PMAP_VALIDATE_MD_PAGE(md); |
2473 | } | | 2465 | } |
2474 | } | | 2466 | } |
2475 | if (want_syncicache) | | 2467 | if (want_syncicache) |
2476 | need_syncicache = true; | | 2468 | need_syncicache = true; |
2477 | #ifndef ARM_MMU_EXTENDED | | 2469 | #ifndef ARM_MMU_EXTENDED |
2478 | need_vac_me_harder = true; | | 2470 | need_vac_me_harder = true; |
2479 | #endif | | 2471 | #endif |
2480 | #endif /* PMAP_CACHE_VIPT */ | | 2472 | #endif /* PMAP_CACHE_VIPT */ |
2481 | } | | 2473 | } |
2482 | } | | 2474 | } |
2483 | | | 2475 | |
2484 | if (maskbits & PVF_REF) { | | 2476 | if (maskbits & PVF_REF) { |
2485 | if (true | | 2477 | if (true |
2486 | #ifndef ARM_MMU_EXTENDED | | 2478 | #ifndef ARM_MMU_EXTENDED |
2487 | && (oflags & PVF_NC) == 0 | | 2479 | && (oflags & PVF_NC) == 0 |
2488 | #endif | | 2480 | #endif |
2489 | && (maskbits & (PVF_WRITE|PVF_MOD)) == 0 | | 2481 | && (maskbits & (PVF_WRITE|PVF_MOD)) == 0 |
2490 | && l2pte_valid_p(npte)) { | | 2482 | && l2pte_valid_p(npte)) { |
2491 | #ifdef PMAP_CACHE_VIVT | | 2483 | #ifdef PMAP_CACHE_VIVT |
2492 | /* | | 2484 | /* |
2493 | * Check npte here; we may have already | | 2485 | * Check npte here; we may have already |
2494 | * done the wbinv above, and the validity | | 2486 | * done the wbinv above, and the validity |
2495 | * of the PTE is the same for opte and | | 2487 | * of the PTE is the same for opte and |
2496 | * npte. | | 2488 | * npte. |
2497 | */ | | 2489 | */ |
2498 | pmap_cache_wbinv_page(pm, va, true, oflags); | | 2490 | pmap_cache_wbinv_page(pm, va, true, oflags); |
2499 | #endif | | 2491 | #endif |
2500 | } | | 2492 | } |
2501 | | | 2493 | |
2502 | /* | | 2494 | /* |
2503 | * Make the PTE invalid so that we will take a | | 2495 | * Make the PTE invalid so that we will take a |
2504 | * page fault the next time the mapping is | | 2496 | * page fault the next time the mapping is |
2505 | * referenced. | | 2497 | * referenced. |
2506 | */ | | 2498 | */ |
2507 | npte &= ~L2_TYPE_MASK; | | 2499 | npte &= ~L2_TYPE_MASK; |
2508 | npte |= L2_TYPE_INV; | | 2500 | npte |= L2_TYPE_INV; |
2509 | } | | 2501 | } |
2510 | | | 2502 | |
2511 | if (npte != opte) { | | 2503 | if (npte != opte) { |
2512 | l2pte_reset(ptep); | | 2504 | l2pte_reset(ptep); |
2513 | PTE_SYNC(ptep); | | 2505 | PTE_SYNC(ptep); |
2514 | | | 2506 | |
2515 | /* Flush the TLB entry if a current pmap. */ | | 2507 | /* Flush the TLB entry if a current pmap. */ |
2516 | pmap_tlb_flush_SE(pm, va, oflags); | | 2508 | pmap_tlb_flush_SE(pm, va, oflags); |
2517 | | | 2509 | |
2518 | l2pte_set(ptep, npte, 0); | | 2510 | l2pte_set(ptep, npte, 0); |
2519 | PTE_SYNC(ptep); | | 2511 | PTE_SYNC(ptep); |
2520 | } | | 2512 | } |
2521 | | | 2513 | |
2522 | pmap_release_pmap_lock(pm); | | 2514 | pmap_release_pmap_lock(pm); |
2523 | | | 2515 | |
2524 | UVMHIST_LOG(maphist, "pm %#jx va %#jx opte %#jx npte %#jx", | | 2516 | UVMHIST_LOG(maphist, "pm %#jx va %#jx opte %#jx npte %#jx", |
2525 | (uintptr_t)pm, va, opte, npte); | | 2517 | (uintptr_t)pm, va, opte, npte); |
2526 | | | 2518 | |
2527 | /* Move to next entry. */ | | 2519 | /* Move to next entry. */ |
2528 | pv = SLIST_NEXT(pv, pv_link); | | 2520 | pv = SLIST_NEXT(pv, pv_link); |
2529 | } | | 2521 | } |
2530 | | | 2522 | |
2531 | #if defined(PMAP_CACHE_VIPT) | | 2523 | #if defined(PMAP_CACHE_VIPT) |
2532 | /* | | 2524 | /* |
2533 | * If we need to sync the I-cache and we haven't done it yet, do it. | | 2525 | * If we need to sync the I-cache and we haven't done it yet, do it. |
2534 | */ | | 2526 | */ |
2535 | if (need_syncicache) { | | 2527 | if (need_syncicache) { |
2536 | pmap_syncicache_page(md, pa); | | 2528 | pmap_syncicache_page(md, pa); |
2537 | PMAPCOUNT(exec_synced_clearbit); | | 2529 | PMAPCOUNT(exec_synced_clearbit); |
2538 | } | | 2530 | } |
2539 | #ifndef ARM_MMU_EXTENDED | | 2531 | #ifndef ARM_MMU_EXTENDED |
2540 | /* | | 2532 | /* |
2541 | * If we are changing this to read-only, we need to call vac_me_harder | | 2533 | * If we are changing this to read-only, we need to call vac_me_harder |
2542 | * so we can change all the read-only pages to cacheable. We pretend | | 2534 | * so we can change all the read-only pages to cacheable. We pretend |
2543 | * this as a page deletion. | | 2535 | * this as a page deletion. |
2544 | */ | | 2536 | */ |
2545 | if (need_vac_me_harder) { | | 2537 | if (need_vac_me_harder) { |
2546 | if (md->pvh_attrs & PVF_NC) | | 2538 | if (md->pvh_attrs & PVF_NC) |
2547 | pmap_vac_me_harder(md, pa, NULL, 0); | | 2539 | pmap_vac_me_harder(md, pa, NULL, 0); |
2548 | } | | 2540 | } |
2549 | #endif /* !ARM_MMU_EXTENDED */ | | 2541 | #endif /* !ARM_MMU_EXTENDED */ |
2550 | #endif /* PMAP_CACHE_VIPT */ | | 2542 | #endif /* PMAP_CACHE_VIPT */ |
2551 | } | | 2543 | } |
2552 | | | 2544 | |
2553 | /* | | 2545 | /* |
2554 | * pmap_clean_page() | | 2546 | * pmap_clean_page() |
2555 | * | | 2547 | * |
2556 | * This is a local function used to work out the best strategy to clean | | 2548 | * This is a local function used to work out the best strategy to clean |
2557 | * a single page referenced by its entry in the PV table. It's used by | | 2549 | * a single page referenced by its entry in the PV table. It's used by |
2558 | * pmap_copy_page, pmap_zero_page and maybe some others later on. | | 2550 | * pmap_copy_page, pmap_zero_page and maybe some others later on. |
2559 | * | | 2551 | * |
2560 | * Its policy is effectively: | | 2552 | * Its policy is effectively: |
2561 | * o If there are no mappings, we don't bother doing anything with the cache. | | 2553 | * o If there are no mappings, we don't bother doing anything with the cache. |
2562 | * o If there is one mapping, we clean just that page. | | 2554 | * o If there is one mapping, we clean just that page. |
2563 | * o If there are multiple mappings, we clean the entire cache. | | 2555 | * o If there are multiple mappings, we clean the entire cache. |
2564 | * | | 2556 | * |
2565 | * So that some functions can be further optimised, it returns 0 if it didn't | | 2557 | * So that some functions can be further optimised, it returns 0 if it didn't |
2566 | * clean the entire cache, or 1 if it did. | | 2558 | * clean the entire cache, or 1 if it did. |
2567 | * | | 2559 | * |
2568 | * XXX One bug in this routine is that if the pv_entry has a single page | | 2560 | * XXX One bug in this routine is that if the pv_entry has a single page |
2569 | * mapped at 0x00000000 a whole cache clean will be performed rather than | | 2561 | * mapped at 0x00000000 a whole cache clean will be performed rather than |
2570 | * just the 1 page. Since this should not occur in everyday use and if it does | | 2562 | * just the 1 page. Since this should not occur in everyday use and if it does |
2571 | * it will just result in not the most efficient clean for the page. | | 2563 | * it will just result in not the most efficient clean for the page. |
2572 | */ | | 2564 | */ |
2573 | #ifdef PMAP_CACHE_VIVT | | 2565 | #ifdef PMAP_CACHE_VIVT |
2574 | static bool | | 2566 | static bool |
2575 | pmap_clean_page(struct vm_page_md *md, bool is_src) | | 2567 | pmap_clean_page(struct vm_page_md *md, bool is_src) |
2576 | { | | 2568 | { |
2577 | struct pv_entry *pv; | | 2569 | struct pv_entry *pv; |
2578 | pmap_t pm_to_clean = NULL; | | 2570 | pmap_t pm_to_clean = NULL; |
2579 | bool cache_needs_cleaning = false; | | 2571 | bool cache_needs_cleaning = false; |
2580 | vaddr_t page_to_clean = 0; | | 2572 | vaddr_t page_to_clean = 0; |
2581 | u_int flags = 0; | | 2573 | u_int flags = 0; |
2582 | | | 2574 | |
2583 | /* | | 2575 | /* |
2584 | * Since we flush the cache each time we change to a different | | 2576 | * Since we flush the cache each time we change to a different |
2585 | * user vmspace, we only need to flush the page if it is in the | | 2577 | * user vmspace, we only need to flush the page if it is in the |
2586 | * current pmap. | | 2578 | * current pmap. |
2587 | */ | | 2579 | */ |
2588 | KASSERT(pmap_page_locked_p(md)); | | 2580 | KASSERT(pmap_page_locked_p(md)); |
2589 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { | | 2581 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { |
2590 | if (pmap_is_current(pv->pv_pmap)) { | | 2582 | if (pmap_is_current(pv->pv_pmap)) { |
2591 | flags |= pv->pv_flags; | | 2583 | flags |= pv->pv_flags; |
2592 | /* | | 2584 | /* |
2593 | * The page is mapped non-cacheable in | | 2585 | * The page is mapped non-cacheable in |
2594 | * this map. No need to flush the cache. | | 2586 | * this map. No need to flush the cache. |
2595 | */ | | 2587 | */ |
2596 | if (pv->pv_flags & PVF_NC) { | | 2588 | if (pv->pv_flags & PVF_NC) { |
2597 | #ifdef DIAGNOSTIC | | 2589 | #ifdef DIAGNOSTIC |
2598 | KASSERT(!cache_needs_cleaning); | | 2590 | KASSERT(!cache_needs_cleaning); |
2599 | #endif | | 2591 | #endif |
2600 | break; | | 2592 | break; |
2601 | } else if (is_src && (pv->pv_flags & PVF_WRITE) == 0) | | 2593 | } else if (is_src && (pv->pv_flags & PVF_WRITE) == 0) |
2602 | continue; | | 2594 | continue; |
2603 | if (cache_needs_cleaning) { | | 2595 | if (cache_needs_cleaning) { |
2604 | page_to_clean = 0; | | 2596 | page_to_clean = 0; |
2605 | break; | | 2597 | break; |
2606 | } else { | | 2598 | } else { |
2607 | page_to_clean = pv->pv_va; | | 2599 | page_to_clean = pv->pv_va; |
2608 | pm_to_clean = pv->pv_pmap; | | 2600 | pm_to_clean = pv->pv_pmap; |
2609 | } | | 2601 | } |
2610 | cache_needs_cleaning = true; | | 2602 | cache_needs_cleaning = true; |
2611 | } | | 2603 | } |
2612 | } | | 2604 | } |
2613 | | | 2605 | |
2614 | if (page_to_clean) { | | 2606 | if (page_to_clean) { |
2615 | pmap_cache_wbinv_page(pm_to_clean, page_to_clean, | | 2607 | pmap_cache_wbinv_page(pm_to_clean, page_to_clean, |
2616 | !is_src, flags | PVF_REF); | | 2608 | !is_src, flags | PVF_REF); |
2617 | } else if (cache_needs_cleaning) { | | 2609 | } else if (cache_needs_cleaning) { |
2618 | pmap_t const pm = curproc->p_vmspace->vm_map.pmap; | | 2610 | pmap_t const pm = curproc->p_vmspace->vm_map.pmap; |
2619 | | | 2611 | |
2620 | pmap_cache_wbinv_all(pm, flags); | | 2612 | pmap_cache_wbinv_all(pm, flags); |
2621 | return true; | | 2613 | return true; |
2622 | } | | 2614 | } |
2623 | return false; | | 2615 | return false; |
2624 | } | | 2616 | } |
2625 | #endif | | 2617 | #endif |
2626 | | | 2618 | |
2627 | #ifdef PMAP_CACHE_VIPT | | 2619 | #ifdef PMAP_CACHE_VIPT |
2628 | /* | | 2620 | /* |
2629 | * Sync a page with the I-cache. Since this is a VIPT, we must pick the | | 2621 | * Sync a page with the I-cache. Since this is a VIPT, we must pick the |
2630 | * right cache alias to make sure we flush the right stuff. | | 2622 | * right cache alias to make sure we flush the right stuff. |
2631 | */ | | 2623 | */ |
2632 | void | | 2624 | void |
2633 | pmap_syncicache_page(struct vm_page_md *md, paddr_t pa) | | 2625 | pmap_syncicache_page(struct vm_page_md *md, paddr_t pa) |
2634 | { | | 2626 | { |
2635 | pmap_t kpm = pmap_kernel(); | | 2627 | pmap_t kpm = pmap_kernel(); |
2636 | const size_t way_size = arm_pcache.icache_type == CACHE_TYPE_PIPT | | 2628 | const size_t way_size = arm_pcache.icache_type == CACHE_TYPE_PIPT |
2637 | ? PAGE_SIZE | | 2629 | ? PAGE_SIZE |
2638 | : arm_pcache.icache_way_size; | | 2630 | : arm_pcache.icache_way_size; |
2639 | | | 2631 | |
2640 | UVMHIST_FUNC(__func__); | | 2632 | UVMHIST_FUNC(__func__); |
2641 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx (attrs=%#jx)", | | 2633 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx (attrs=%#jx)", |
2642 | (uintptr_t)md, pa, md->pvh_attrs, 0); | | 2634 | (uintptr_t)md, pa, md->pvh_attrs, 0); |
2643 | | | 2635 | |
2644 | /* | | 2636 | /* |
2645 | * No need to clean the page if it's non-cached. | | 2637 | * No need to clean the page if it's non-cached. |
2646 | */ | | 2638 | */ |
2647 | #ifndef ARM_MMU_EXTENDED | | 2639 | #ifndef ARM_MMU_EXTENDED |
2648 | if (md->pvh_attrs & PVF_NC) | | 2640 | if (md->pvh_attrs & PVF_NC) |
2649 | return; | | 2641 | return; |
2650 | KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & PVF_COLORED); | | 2642 | KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & PVF_COLORED); |
2651 | #endif | | 2643 | #endif |
2652 | | | 2644 | |
2653 | pt_entry_t * const ptep = cpu_cdst_pte(0); | | 2645 | pt_entry_t * const ptep = cpu_cdst_pte(0); |
2654 | const vaddr_t dstp = cpu_cdstp(0); | | 2646 | const vaddr_t dstp = cpu_cdstp(0); |
2655 | #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS | | 2647 | #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS |
2656 | if (way_size <= PAGE_SIZE) { | | 2648 | if (way_size <= PAGE_SIZE) { |
2657 | bool ok = false; | | 2649 | bool ok = false; |
2658 | vaddr_t vdstp = pmap_direct_mapped_phys(pa, &ok, dstp); | | 2650 | vaddr_t vdstp = pmap_direct_mapped_phys(pa, &ok, dstp); |
2659 | if (ok) { | | 2651 | if (ok) { |
2660 | cpu_icache_sync_range(vdstp, way_size); | | 2652 | cpu_icache_sync_range(vdstp, way_size); |
2661 | return; | | 2653 | return; |
2662 | } | | 2654 | } |
2663 | } | | 2655 | } |
2664 | #endif | | 2656 | #endif |
2665 | | | 2657 | |
2666 | /* | | 2658 | /* |
2667 | * We don't worry about the color of the exec page, we map the | | 2659 | * We don't worry about the color of the exec page, we map the |
2668 | * same page to pages in the way and then do the icache_sync on | | 2660 | * same page to pages in the way and then do the icache_sync on |
2669 | * the entire way making sure we are cleaned. | | 2661 | * the entire way making sure we are cleaned. |
2670 | */ | | 2662 | */ |
2671 | const pt_entry_t npte = L2_S_PROTO | pa | pte_l2_s_cache_mode | | 2663 | const pt_entry_t npte = L2_S_PROTO | pa | pte_l2_s_cache_mode |
2672 | | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE); | | 2664 | | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE); |
2673 | | | 2665 | |
2674 | for (size_t i = 0, j = 0; i < way_size; | | 2666 | for (size_t i = 0, j = 0; i < way_size; |
2675 | i += PAGE_SIZE, j += PAGE_SIZE / L2_S_SIZE) { | | 2667 | i += PAGE_SIZE, j += PAGE_SIZE / L2_S_SIZE) { |
2676 | l2pte_reset(ptep + j); | | 2668 | l2pte_reset(ptep + j); |
2677 | PTE_SYNC(ptep + j); | | 2669 | PTE_SYNC(ptep + j); |
2678 | | | 2670 | |
2679 | pmap_tlb_flush_SE(kpm, dstp + i, PVF_REF | PVF_EXEC); | | 2671 | pmap_tlb_flush_SE(kpm, dstp + i, PVF_REF | PVF_EXEC); |
2680 | /* | | 2672 | /* |
2681 | * Set up a PTE with to flush these cache lines. | | 2673 | * Set up a PTE with to flush these cache lines. |
2682 | */ | | 2674 | */ |
2683 | l2pte_set(ptep + j, npte, 0); | | 2675 | l2pte_set(ptep + j, npte, 0); |
2684 | } | | 2676 | } |
2685 | PTE_SYNC_RANGE(ptep, way_size / L2_S_SIZE); | | 2677 | PTE_SYNC_RANGE(ptep, way_size / L2_S_SIZE); |
2686 | | | 2678 | |
2687 | /* | | 2679 | /* |
2688 | * Flush it. | | 2680 | * Flush it. |
2689 | */ | | 2681 | */ |
2690 | cpu_icache_sync_range(dstp, way_size); | | 2682 | cpu_icache_sync_range(dstp, way_size); |
2691 | | | 2683 | |
2692 | for (size_t i = 0, j = 0; i < way_size; | | 2684 | for (size_t i = 0, j = 0; i < way_size; |
2693 | i += PAGE_SIZE, j += PAGE_SIZE / L2_S_SIZE) { | | 2685 | i += PAGE_SIZE, j += PAGE_SIZE / L2_S_SIZE) { |
2694 | /* | | 2686 | /* |
2695 | * Unmap the page(s). | | 2687 | * Unmap the page(s). |
2696 | */ | | 2688 | */ |
2697 | l2pte_reset(ptep + j); | | 2689 | l2pte_reset(ptep + j); |
2698 | PTE_SYNC(ptep + j); | | 2690 | PTE_SYNC(ptep + j); |
2699 | | | 2691 | |
2700 | pmap_tlb_flush_SE(kpm, dstp + i, PVF_REF | PVF_EXEC); | | 2692 | pmap_tlb_flush_SE(kpm, dstp + i, PVF_REF | PVF_EXEC); |
2701 | } | | 2693 | } |
2702 | | | 2694 | |
2703 | md->pvh_attrs |= PVF_EXEC; | | 2695 | md->pvh_attrs |= PVF_EXEC; |
2704 | PMAPCOUNT(exec_synced); | | 2696 | PMAPCOUNT(exec_synced); |
2705 | } | | 2697 | } |
2706 | | | 2698 | |
2707 | #ifndef ARM_MMU_EXTENDED | | 2699 | #ifndef ARM_MMU_EXTENDED |
2708 | void | | 2700 | void |
2709 | pmap_flush_page(struct vm_page_md *md, paddr_t pa, enum pmap_flush_op flush) | | 2701 | pmap_flush_page(struct vm_page_md *md, paddr_t pa, enum pmap_flush_op flush) |
2710 | { | | 2702 | { |
2711 | vsize_t va_offset, end_va; | | 2703 | vsize_t va_offset, end_va; |
2712 | bool wbinv_p; | | 2704 | bool wbinv_p; |
2713 | | | 2705 | |
2714 | if (arm_cache_prefer_mask == 0) | | 2706 | if (arm_cache_prefer_mask == 0) |
2715 | return; | | 2707 | return; |
2716 | | | 2708 | |
2717 | UVMHIST_FUNC(__func__); | | 2709 | UVMHIST_FUNC(__func__); |
2718 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx op %#jx", | | 2710 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx op %#jx", |
2719 | (uintptr_t)md, pa, op, 0); | | 2711 | (uintptr_t)md, pa, op, 0); |
2720 | | | 2712 | |
2721 | switch (flush) { | | 2713 | switch (flush) { |
2722 | case PMAP_FLUSH_PRIMARY: | | 2714 | case PMAP_FLUSH_PRIMARY: |
2723 | if (md->pvh_attrs & PVF_MULTCLR) { | | 2715 | if (md->pvh_attrs & PVF_MULTCLR) { |
2724 | va_offset = 0; | | 2716 | va_offset = 0; |
2725 | end_va = arm_cache_prefer_mask; | | 2717 | end_va = arm_cache_prefer_mask; |
2726 | md->pvh_attrs &= ~PVF_MULTCLR; | | 2718 | md->pvh_attrs &= ~PVF_MULTCLR; |
2727 | PMAPCOUNT(vac_flush_lots); | | 2719 | PMAPCOUNT(vac_flush_lots); |
2728 | } else { | | 2720 | } else { |
2729 | va_offset = md->pvh_attrs & arm_cache_prefer_mask; | | 2721 | va_offset = md->pvh_attrs & arm_cache_prefer_mask; |
2730 | end_va = va_offset; | | 2722 | end_va = va_offset; |
2731 | PMAPCOUNT(vac_flush_one); | | 2723 | PMAPCOUNT(vac_flush_one); |
2732 | } | | 2724 | } |
2733 | /* | | 2725 | /* |
2734 | * Mark that the page is no longer dirty. | | 2726 | * Mark that the page is no longer dirty. |
2735 | */ | | 2727 | */ |
2736 | md->pvh_attrs &= ~PVF_DIRTY; | | 2728 | md->pvh_attrs &= ~PVF_DIRTY; |
2737 | wbinv_p = true; | | 2729 | wbinv_p = true; |
2738 | break; | | 2730 | break; |
2739 | case PMAP_FLUSH_SECONDARY: | | 2731 | case PMAP_FLUSH_SECONDARY: |
2740 | va_offset = 0; | | 2732 | va_offset = 0; |
2741 | end_va = arm_cache_prefer_mask; | | 2733 | end_va = arm_cache_prefer_mask; |
2742 | wbinv_p = true; | | 2734 | wbinv_p = true; |
2743 | md->pvh_attrs &= ~PVF_MULTCLR; | | 2735 | md->pvh_attrs &= ~PVF_MULTCLR; |
2744 | PMAPCOUNT(vac_flush_lots); | | 2736 | PMAPCOUNT(vac_flush_lots); |
2745 | break; | | 2737 | break; |
2746 | case PMAP_CLEAN_PRIMARY: | | 2738 | case PMAP_CLEAN_PRIMARY: |
2747 | va_offset = md->pvh_attrs & arm_cache_prefer_mask; | | 2739 | va_offset = md->pvh_attrs & arm_cache_prefer_mask; |
2748 | end_va = va_offset; | | 2740 | end_va = va_offset; |
2749 | wbinv_p = false; | | 2741 | wbinv_p = false; |
2750 | /* | | 2742 | /* |
2751 | * Mark that the page is no longer dirty. | | 2743 | * Mark that the page is no longer dirty. |
2752 | */ | | 2744 | */ |
2753 | if ((md->pvh_attrs & PVF_DMOD) == 0) | | 2745 | if ((md->pvh_attrs & PVF_DMOD) == 0) |
2754 | md->pvh_attrs &= ~PVF_DIRTY; | | 2746 | md->pvh_attrs &= ~PVF_DIRTY; |
2755 | PMAPCOUNT(vac_clean_one); | | 2747 | PMAPCOUNT(vac_clean_one); |
2756 | break; | | 2748 | break; |
2757 | default: | | 2749 | default: |
2758 | return; | | 2750 | return; |
2759 | } | | 2751 | } |
2760 | | | 2752 | |
2761 | KASSERT(!(md->pvh_attrs & PVF_NC)); | | 2753 | KASSERT(!(md->pvh_attrs & PVF_NC)); |
2762 | | | 2754 | |
2763 | UVMHIST_LOG(maphist, "md %#jx (attrs=%#jx)", (uintptr_t)md, | | 2755 | UVMHIST_LOG(maphist, "md %#jx (attrs=%#jx)", (uintptr_t)md, |
2764 | md->pvh_attrs, 0, 0); | | 2756 | md->pvh_attrs, 0, 0); |
2765 | | | 2757 | |
2766 | const size_t scache_line_size = arm_scache.dcache_line_size; | | 2758 | const size_t scache_line_size = arm_scache.dcache_line_size; |
2767 | | | 2759 | |
2768 | for (; va_offset <= end_va; va_offset += PAGE_SIZE) { | | 2760 | for (; va_offset <= end_va; va_offset += PAGE_SIZE) { |
2769 | pt_entry_t * const ptep = cpu_cdst_pte(va_offset); | | 2761 | pt_entry_t * const ptep = cpu_cdst_pte(va_offset); |
2770 | const vaddr_t dstp = cpu_cdstp(va_offset); | | 2762 | const vaddr_t dstp = cpu_cdstp(va_offset); |
2771 | const pt_entry_t opte = *ptep; | | 2763 | const pt_entry_t opte = *ptep; |
2772 | | | 2764 | |
2773 | if (flush == PMAP_FLUSH_SECONDARY | | 2765 | if (flush == PMAP_FLUSH_SECONDARY |
2774 | && va_offset == (md->pvh_attrs & arm_cache_prefer_mask)) | | 2766 | && va_offset == (md->pvh_attrs & arm_cache_prefer_mask)) |
2775 | continue; | | 2767 | continue; |
2776 | | | 2768 | |
2777 | pmap_tlb_flush_SE(pmap_kernel(), dstp, PVF_REF | PVF_EXEC); | | 2769 | pmap_tlb_flush_SE(pmap_kernel(), dstp, PVF_REF | PVF_EXEC); |
2778 | /* | | 2770 | /* |
2779 | * Set up a PTE with the right coloring to flush | | 2771 | * Set up a PTE with the right coloring to flush |
2780 | * existing cache entries. | | 2772 | * existing cache entries. |
2781 | */ | | 2773 | */ |
2782 | const pt_entry_t npte = L2_S_PROTO | | 2774 | const pt_entry_t npte = L2_S_PROTO |
2783 | | pa | | 2775 | | pa |
2784 | | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE) | | 2776 | | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE) |
2785 | | pte_l2_s_cache_mode; | | 2777 | | pte_l2_s_cache_mode; |
2786 | l2pte_set(ptep, npte, opte); | | 2778 | l2pte_set(ptep, npte, opte); |
2787 | PTE_SYNC(ptep); | | 2779 | PTE_SYNC(ptep); |
2788 | | | 2780 | |
2789 | /* | | 2781 | /* |
2790 | * Flush it. Make sure to flush secondary cache too since | | 2782 | * Flush it. Make sure to flush secondary cache too since |
2791 | * bus_dma will ignore uncached pages. | | 2783 | * bus_dma will ignore uncached pages. |
2792 | */ | | 2784 | */ |
2793 | if (scache_line_size != 0) { | | 2785 | if (scache_line_size != 0) { |
2794 | cpu_dcache_wb_range(dstp, PAGE_SIZE); | | 2786 | cpu_dcache_wb_range(dstp, PAGE_SIZE); |
2795 | if (wbinv_p) { | | 2787 | if (wbinv_p) { |
2796 | cpu_sdcache_wbinv_range(dstp, pa, PAGE_SIZE); | | 2788 | cpu_sdcache_wbinv_range(dstp, pa, PAGE_SIZE); |
2797 | cpu_dcache_inv_range(dstp, PAGE_SIZE); | | 2789 | cpu_dcache_inv_range(dstp, PAGE_SIZE); |
2798 | } else { | | 2790 | } else { |
2799 | cpu_sdcache_wb_range(dstp, pa, PAGE_SIZE); | | 2791 | cpu_sdcache_wb_range(dstp, pa, PAGE_SIZE); |
2800 | } | | 2792 | } |
2801 | } else { | | 2793 | } else { |
2802 | if (wbinv_p) { | | 2794 | if (wbinv_p) { |
2803 | cpu_dcache_wbinv_range(dstp, PAGE_SIZE); | | 2795 | cpu_dcache_wbinv_range(dstp, PAGE_SIZE); |
2804 | } else { | | 2796 | } else { |
2805 | cpu_dcache_wb_range(dstp, PAGE_SIZE); | | 2797 | cpu_dcache_wb_range(dstp, PAGE_SIZE); |
2806 | } | | 2798 | } |
2807 | } | | 2799 | } |
2808 | | | 2800 | |
2809 | /* | | 2801 | /* |
2810 | * Restore the page table entry since we might have interrupted | | 2802 | * Restore the page table entry since we might have interrupted |
2811 | * pmap_zero_page or pmap_copy_page which was already using | | 2803 | * pmap_zero_page or pmap_copy_page which was already using |
2812 | * this pte. | | 2804 | * this pte. |
2813 | */ | | 2805 | */ |
2814 | if (opte) { | | 2806 | if (opte) { |
2815 | l2pte_set(ptep, opte, npte); | | 2807 | l2pte_set(ptep, opte, npte); |
2816 | } else { | | 2808 | } else { |
2817 | l2pte_reset(ptep); | | 2809 | l2pte_reset(ptep); |
2818 | } | | 2810 | } |
2819 | PTE_SYNC(ptep); | | 2811 | PTE_SYNC(ptep); |
2820 | pmap_tlb_flush_SE(pmap_kernel(), dstp, PVF_REF | PVF_EXEC); | | 2812 | pmap_tlb_flush_SE(pmap_kernel(), dstp, PVF_REF | PVF_EXEC); |
2821 | } | | 2813 | } |
2822 | } | | 2814 | } |
2823 | #endif /* ARM_MMU_EXTENDED */ | | 2815 | #endif /* ARM_MMU_EXTENDED */ |
2824 | #endif /* PMAP_CACHE_VIPT */ | | 2816 | #endif /* PMAP_CACHE_VIPT */ |
2825 | | | 2817 | |
2826 | /* | | 2818 | /* |
2827 | * Routine: pmap_page_remove | | 2819 | * Routine: pmap_page_remove |
2828 | * Function: | | 2820 | * Function: |
2829 | * Removes this physical page from | | 2821 | * Removes this physical page from |
2830 | * all physical maps in which it resides. | | 2822 | * all physical maps in which it resides. |
2831 | * Reflects back modify bits to the pager. | | 2823 | * Reflects back modify bits to the pager. |
2832 | */ | | 2824 | */ |
2833 | static void | | 2825 | static void |
2834 | pmap_page_remove(struct vm_page_md *md, paddr_t pa) | | 2826 | pmap_page_remove(struct vm_page_md *md, paddr_t pa) |
2835 | { | | 2827 | { |
2836 | struct l2_bucket *l2b; | | 2828 | struct l2_bucket *l2b; |
2837 | struct pv_entry *pv; | | 2829 | struct pv_entry *pv; |
2838 | pt_entry_t *ptep; | | 2830 | pt_entry_t *ptep; |
2839 | #ifndef ARM_MMU_EXTENDED | | 2831 | #ifndef ARM_MMU_EXTENDED |
2840 | bool flush = false; | | 2832 | bool flush = false; |
2841 | #endif | | 2833 | #endif |
2842 | u_int flags = 0; | | 2834 | u_int flags = 0; |
2843 | | | 2835 | |
2844 | UVMHIST_FUNC(__func__); | | 2836 | UVMHIST_FUNC(__func__); |
2845 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx", (uintptr_t)md, pa, 0, 0); | | 2837 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx", (uintptr_t)md, pa, 0, 0); |
2846 | | | 2838 | |
2847 | struct pv_entry **pvp = &SLIST_FIRST(&md->pvh_list); | | 2839 | struct pv_entry **pvp = &SLIST_FIRST(&md->pvh_list); |
2848 | pmap_acquire_page_lock(md); | | 2840 | pmap_acquire_page_lock(md); |
2849 | if (*pvp == NULL) { | | 2841 | if (*pvp == NULL) { |
2850 | #ifdef PMAP_CACHE_VIPT | | 2842 | #ifdef PMAP_CACHE_VIPT |
2851 | /* | | 2843 | /* |
2852 | * We *know* the page contents are about to be replaced. | | 2844 | * We *know* the page contents are about to be replaced. |
2853 | * Discard the exec contents | | 2845 | * Discard the exec contents |
2854 | */ | | 2846 | */ |
2855 | if (PV_IS_EXEC_P(md->pvh_attrs)) | | 2847 | if (PV_IS_EXEC_P(md->pvh_attrs)) |
2856 | PMAPCOUNT(exec_discarded_page_protect); | | 2848 | PMAPCOUNT(exec_discarded_page_protect); |
2857 | md->pvh_attrs &= ~PVF_EXEC; | | 2849 | md->pvh_attrs &= ~PVF_EXEC; |
2858 | PMAP_VALIDATE_MD_PAGE(md); | | 2850 | PMAP_VALIDATE_MD_PAGE(md); |
2859 | #endif | | 2851 | #endif |
2860 | pmap_release_page_lock(md); | | 2852 | pmap_release_page_lock(md); |
2861 | return; | | 2853 | return; |
2862 | } | | 2854 | } |
2863 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 2855 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
2864 | KASSERT(arm_cache_prefer_mask == 0 || pmap_is_page_colored_p(md)); | | 2856 | KASSERT(arm_cache_prefer_mask == 0 || pmap_is_page_colored_p(md)); |
2865 | #endif | | 2857 | #endif |
2866 | | | 2858 | |
2867 | /* | | 2859 | /* |
2868 | * Clear alias counts | | 2860 | * Clear alias counts |
2869 | */ | | 2861 | */ |
2870 | #ifdef PMAP_CACHE_VIVT | | 2862 | #ifdef PMAP_CACHE_VIVT |
2871 | md->k_mappings = 0; | | 2863 | md->k_mappings = 0; |
2872 | #endif | | 2864 | #endif |
2873 | md->urw_mappings = md->uro_mappings = 0; | | 2865 | md->urw_mappings = md->uro_mappings = 0; |
2874 | | | 2866 | |
2875 | #ifdef PMAP_CACHE_VIVT | | 2867 | #ifdef PMAP_CACHE_VIVT |
2876 | pmap_clean_page(md, false); | | 2868 | pmap_clean_page(md, false); |
2877 | #endif | | 2869 | #endif |
2878 | | | 2870 | |
2879 | for (pv = *pvp; pv != NULL;) { | | 2871 | for (pv = *pvp; pv != NULL;) { |
2880 | pmap_t pm = pv->pv_pmap; | | 2872 | pmap_t pm = pv->pv_pmap; |
2881 | #ifndef ARM_MMU_EXTENDED | | 2873 | #ifndef ARM_MMU_EXTENDED |
2882 | if (flush == false && pmap_is_current(pm)) | | 2874 | if (flush == false && pmap_is_current(pm)) |
2883 | flush = true; | | 2875 | flush = true; |
2884 | #endif | | 2876 | #endif |
2885 | | | 2877 | |
2886 | #ifdef PMAP_CACHE_VIPT | | 2878 | #ifdef PMAP_CACHE_VIPT |
2887 | if (pm == pmap_kernel() && PV_IS_KENTRY_P(pv->pv_flags)) { | | 2879 | if (pm == pmap_kernel() && PV_IS_KENTRY_P(pv->pv_flags)) { |
2888 | /* If this was unmanaged mapping, it must be ignored. */ | | 2880 | /* If this was unmanaged mapping, it must be ignored. */ |
2889 | pvp = &SLIST_NEXT(pv, pv_link); | | 2881 | pvp = &SLIST_NEXT(pv, pv_link); |
2890 | pv = *pvp; | | 2882 | pv = *pvp; |
2891 | continue; | | 2883 | continue; |
2892 | } | | 2884 | } |
2893 | #endif | | 2885 | #endif |
2894 | | | 2886 | |
2895 | /* | | 2887 | /* |
2896 | * Try to get a hold on the pmap's lock. We must do this | | 2888 | * Try to get a hold on the pmap's lock. We must do this |
2897 | * while still holding the page locked, to know that the | | 2889 | * while still holding the page locked, to know that the |
2898 | * page is still associated with the pmap and the mapping is | | 2890 | * page is still associated with the pmap and the mapping is |
2899 | * in place. If a hold can't be had, unlock and wait for | | 2891 | * in place. If a hold can't be had, unlock and wait for |
2900 | * the pmap's lock to become available and retry. The pmap | | 2892 | * the pmap's lock to become available and retry. The pmap |
2901 | * must be ref'd over this dance to stop it disappearing | | 2893 | * must be ref'd over this dance to stop it disappearing |
2902 | * behind us. | | 2894 | * behind us. |
2903 | */ | | 2895 | */ |
2904 | if (!mutex_tryenter(&pm->pm_lock)) { | | 2896 | if (!mutex_tryenter(&pm->pm_lock)) { |
2905 | pmap_reference(pm); | | 2897 | pmap_reference(pm); |
2906 | pmap_release_page_lock(md); | | 2898 | pmap_release_page_lock(md); |
2907 | pmap_acquire_pmap_lock(pm); | | 2899 | pmap_acquire_pmap_lock(pm); |
2908 | /* nothing, just wait for it */ | | 2900 | /* nothing, just wait for it */ |
2909 | pmap_release_pmap_lock(pm); | | 2901 | pmap_release_pmap_lock(pm); |
2910 | pmap_destroy(pm); | | 2902 | pmap_destroy(pm); |
2911 | /* Restart from the beginning. */ | | 2903 | /* Restart from the beginning. */ |
2912 | pmap_acquire_page_lock(md); | | 2904 | pmap_acquire_page_lock(md); |
2913 | pvp = &SLIST_FIRST(&md->pvh_list); | | 2905 | pvp = &SLIST_FIRST(&md->pvh_list); |
2914 | pv = *pvp; | | 2906 | pv = *pvp; |
2915 | continue; | | 2907 | continue; |
2916 | } | | 2908 | } |
2917 | | | 2909 | |
2918 | if (pm == pmap_kernel()) { | | 2910 | if (pm == pmap_kernel()) { |
2919 | #ifdef PMAP_CACHE_VIPT | | 2911 | #ifdef PMAP_CACHE_VIPT |
2920 | if (pv->pv_flags & PVF_WRITE) | | 2912 | if (pv->pv_flags & PVF_WRITE) |
2921 | md->krw_mappings--; | | 2913 | md->krw_mappings--; |
2922 | else | | 2914 | else |
2923 | md->kro_mappings--; | | 2915 | md->kro_mappings--; |
2924 | #endif | | 2916 | #endif |
2925 | PMAPCOUNT(kernel_unmappings); | | 2917 | PMAPCOUNT(kernel_unmappings); |
2926 | } | | 2918 | } |
2927 | *pvp = SLIST_NEXT(pv, pv_link); /* remove from list */ | | 2919 | *pvp = SLIST_NEXT(pv, pv_link); /* remove from list */ |
2928 | PMAPCOUNT(unmappings); | | 2920 | PMAPCOUNT(unmappings); |
2929 | | | 2921 | |
2930 | pmap_release_page_lock(md); | | 2922 | pmap_release_page_lock(md); |
2931 | | | 2923 | |
2932 | l2b = pmap_get_l2_bucket(pm, pv->pv_va); | | 2924 | l2b = pmap_get_l2_bucket(pm, pv->pv_va); |
2933 | KASSERTMSG(l2b != NULL, "%#lx", pv->pv_va); | | 2925 | KASSERTMSG(l2b != NULL, "%#lx", pv->pv_va); |
2934 | | | 2926 | |
2935 | ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; | | 2927 | ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; |
2936 | | | 2928 | |
2937 | /* | | 2929 | /* |
2938 | * Update statistics | | 2930 | * Update statistics |
2939 | */ | | 2931 | */ |
2940 | --pm->pm_stats.resident_count; | | 2932 | --pm->pm_stats.resident_count; |
2941 | | | 2933 | |
2942 | /* Wired bit */ | | 2934 | /* Wired bit */ |
2943 | if (pv->pv_flags & PVF_WIRED) | | 2935 | if (pv->pv_flags & PVF_WIRED) |
2944 | --pm->pm_stats.wired_count; | | 2936 | --pm->pm_stats.wired_count; |
2945 | | | 2937 | |
2946 | flags |= pv->pv_flags; | | 2938 | flags |= pv->pv_flags; |
2947 | | | 2939 | |
2948 | /* | | 2940 | /* |
2949 | * Invalidate the PTEs. | | 2941 | * Invalidate the PTEs. |
2950 | */ | | 2942 | */ |
2951 | l2pte_reset(ptep); | | 2943 | l2pte_reset(ptep); |
2952 | PTE_SYNC_CURRENT(pm, ptep); | | 2944 | PTE_SYNC_CURRENT(pm, ptep); |
2953 | | | 2945 | |
2954 | #ifdef ARM_MMU_EXTENDED | | 2946 | #ifdef ARM_MMU_EXTENDED |
2955 | pmap_tlb_invalidate_addr(pm, pv->pv_va); | | 2947 | pmap_tlb_invalidate_addr(pm, pv->pv_va); |
2956 | #endif | | 2948 | #endif |
2957 | | | 2949 | |
2958 | pmap_free_l2_bucket(pm, l2b, PAGE_SIZE / L2_S_SIZE); | | 2950 | pmap_free_l2_bucket(pm, l2b, PAGE_SIZE / L2_S_SIZE); |
2959 | | | 2951 | |
2960 | pmap_release_pmap_lock(pm); | | 2952 | pmap_release_pmap_lock(pm); |
2961 | | | 2953 | |
2962 | pool_put(&pmap_pv_pool, pv); | | 2954 | pool_put(&pmap_pv_pool, pv); |
2963 | pmap_acquire_page_lock(md); | | 2955 | pmap_acquire_page_lock(md); |
2964 | | | 2956 | |
2965 | /* | | 2957 | /* |
2966 | * Restart at the beginning of the list. | | 2958 | * Restart at the beginning of the list. |
2967 | */ | | 2959 | */ |
2968 | pvp = &SLIST_FIRST(&md->pvh_list); | | 2960 | pvp = &SLIST_FIRST(&md->pvh_list); |
2969 | pv = *pvp; | | 2961 | pv = *pvp; |
2970 | } | | 2962 | } |
2971 | /* | | 2963 | /* |
2972 | * if we reach the end of the list and there are still mappings, they | | 2964 | * if we reach the end of the list and there are still mappings, they |
2973 | * might be able to be cached now. And they must be kernel mappings. | | 2965 | * might be able to be cached now. And they must be kernel mappings. |
2974 | */ | | 2966 | */ |
2975 | if (!SLIST_EMPTY(&md->pvh_list)) { | | 2967 | if (!SLIST_EMPTY(&md->pvh_list)) { |
2976 | pmap_vac_me_harder(md, pa, pmap_kernel(), 0); | | 2968 | pmap_vac_me_harder(md, pa, pmap_kernel(), 0); |
2977 | } | | 2969 | } |
2978 | | | 2970 | |
2979 | #ifdef PMAP_CACHE_VIPT | | 2971 | #ifdef PMAP_CACHE_VIPT |
2980 | /* | | 2972 | /* |
2981 | * Its EXEC cache is now gone. | | 2973 | * Its EXEC cache is now gone. |
2982 | */ | | 2974 | */ |
2983 | if (PV_IS_EXEC_P(md->pvh_attrs)) | | 2975 | if (PV_IS_EXEC_P(md->pvh_attrs)) |
2984 | PMAPCOUNT(exec_discarded_page_protect); | | 2976 | PMAPCOUNT(exec_discarded_page_protect); |
2985 | md->pvh_attrs &= ~PVF_EXEC; | | 2977 | md->pvh_attrs &= ~PVF_EXEC; |
2986 | KASSERT(md->urw_mappings == 0); | | 2978 | KASSERT(md->urw_mappings == 0); |
2987 | KASSERT(md->uro_mappings == 0); | | 2979 | KASSERT(md->uro_mappings == 0); |
2988 | #ifndef ARM_MMU_EXTENDED | | 2980 | #ifndef ARM_MMU_EXTENDED |
2989 | if (arm_cache_prefer_mask != 0) { | | 2981 | if (arm_cache_prefer_mask != 0) { |
2990 | if (md->krw_mappings == 0) | | 2982 | if (md->krw_mappings == 0) |
2991 | md->pvh_attrs &= ~PVF_WRITE; | | 2983 | md->pvh_attrs &= ~PVF_WRITE; |
2992 | PMAP_VALIDATE_MD_PAGE(md); | | 2984 | PMAP_VALIDATE_MD_PAGE(md); |
2993 | } | | 2985 | } |
2994 | #endif /* ARM_MMU_EXTENDED */ | | 2986 | #endif /* ARM_MMU_EXTENDED */ |
2995 | #endif /* PMAP_CACHE_VIPT */ | | 2987 | #endif /* PMAP_CACHE_VIPT */ |
2996 | pmap_release_page_lock(md); | | 2988 | pmap_release_page_lock(md); |
2997 | | | 2989 | |
2998 | #ifndef ARM_MMU_EXTENDED | | 2990 | #ifndef ARM_MMU_EXTENDED |
2999 | if (flush) { | | 2991 | if (flush) { |
3000 | /* | | 2992 | /* |
3001 | * Note: We can't use pmap_tlb_flush{I,D}() here since that | | 2993 | * Note: We can't use pmap_tlb_flush{I,D}() here since that |
3002 | * would need a subsequent call to pmap_update() to ensure | | 2994 | * would need a subsequent call to pmap_update() to ensure |
3003 | * curpm->pm_cstate.cs_all is reset. Our callers are not | | 2995 | * curpm->pm_cstate.cs_all is reset. Our callers are not |
3004 | * required to do that (see pmap(9)), so we can't modify | | 2996 | * required to do that (see pmap(9)), so we can't modify |
3005 | * the current pmap's state. | | 2997 | * the current pmap's state. |
3006 | */ | | 2998 | */ |
3007 | if (PV_BEEN_EXECD(flags)) | | 2999 | if (PV_BEEN_EXECD(flags)) |
3008 | cpu_tlb_flushID(); | | 3000 | cpu_tlb_flushID(); |
3009 | else | | 3001 | else |
3010 | cpu_tlb_flushD(); | | 3002 | cpu_tlb_flushD(); |
3011 | } | | 3003 | } |
3012 | cpu_cpwait(); | | 3004 | cpu_cpwait(); |
3013 | #endif /* ARM_MMU_EXTENDED */ | | 3005 | #endif /* ARM_MMU_EXTENDED */ |
3014 | } | | 3006 | } |
3015 | | | 3007 | |
3016 | /* | | 3008 | /* |
3017 | * pmap_t pmap_create(void) | | 3009 | * pmap_t pmap_create(void) |
3018 | * | | 3010 | * |
3019 | * Create a new pmap structure from scratch. | | 3011 | * Create a new pmap structure from scratch. |
3020 | */ | | 3012 | */ |
3021 | pmap_t | | 3013 | pmap_t |
3022 | pmap_create(void) | | 3014 | pmap_create(void) |
3023 | { | | 3015 | { |
3024 | pmap_t pm; | | 3016 | pmap_t pm; |
3025 | | | 3017 | |
3026 | pm = pool_cache_get(&pmap_cache, PR_WAITOK); | | 3018 | pm = pool_cache_get(&pmap_cache, PR_WAITOK); |
3027 | | | 3019 | |
3028 | mutex_init(&pm->pm_lock, MUTEX_DEFAULT, IPL_NONE); | | 3020 | mutex_init(&pm->pm_lock, MUTEX_DEFAULT, IPL_NONE); |
3029 | | | 3021 | |
3030 | pm->pm_refs = 1; | | 3022 | pm->pm_refs = 1; |
3031 | pm->pm_stats.wired_count = 0; | | 3023 | pm->pm_stats.wired_count = 0; |
3032 | pm->pm_stats.resident_count = 1; | | 3024 | pm->pm_stats.resident_count = 1; |
3033 | #ifdef ARM_MMU_EXTENDED | | 3025 | #ifdef ARM_MMU_EXTENDED |
3034 | #ifdef MULTIPROCESSOR | | 3026 | #ifdef MULTIPROCESSOR |
3035 | kcpuset_create(&pm->pm_active, true); | | 3027 | kcpuset_create(&pm->pm_active, true); |
3036 | kcpuset_create(&pm->pm_onproc, true); | | 3028 | kcpuset_create(&pm->pm_onproc, true); |
3037 | #endif | | 3029 | #endif |
3038 | #else | | 3030 | #else |
3039 | pm->pm_cstate.cs_all = 0; | | 3031 | pm->pm_cstate.cs_all = 0; |
3040 | #endif | | 3032 | #endif |
3041 | pmap_alloc_l1(pm); | | 3033 | pmap_alloc_l1(pm); |
3042 | | | 3034 | |
3043 | /* | | 3035 | /* |
3044 | * Note: The pool cache ensures that the pm_l2[] array is already | | 3036 | * Note: The pool cache ensures that the pm_l2[] array is already |
3045 | * initialised to zero. | | 3037 | * initialised to zero. |
3046 | */ | | 3038 | */ |
3047 | | | 3039 | |
3048 | pmap_pinit(pm); | | 3040 | pmap_pinit(pm); |
3049 | | | 3041 | |
3050 | return pm; | | 3042 | return pm; |
3051 | } | | 3043 | } |
3052 | | | 3044 | |
3053 | u_int | | 3045 | u_int |
3054 | arm32_mmap_flags(paddr_t pa) | | 3046 | arm32_mmap_flags(paddr_t pa) |
3055 | { | | 3047 | { |
3056 | /* | | 3048 | /* |
3057 | * the upper 8 bits in pmap_enter()'s flags are reserved for MD stuff | | 3049 | * the upper 8 bits in pmap_enter()'s flags are reserved for MD stuff |
3058 | * and we're using the upper bits in page numbers to pass flags around | | 3050 | * and we're using the upper bits in page numbers to pass flags around |
3059 | * so we might as well use the same bits | | 3051 | * so we might as well use the same bits |
3060 | */ | | 3052 | */ |
3061 | return (u_int)pa & PMAP_MD_MASK; | | 3053 | return (u_int)pa & PMAP_MD_MASK; |
3062 | } | | 3054 | } |
3063 | /* | | 3055 | /* |
3064 | * int pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, | | 3056 | * int pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, |
3065 | * u_int flags) | | 3057 | * u_int flags) |
3066 | * | | 3058 | * |
3067 | * Insert the given physical page (p) at | | 3059 | * Insert the given physical page (p) at |
3068 | * the specified virtual address (v) in the | | 3060 | * the specified virtual address (v) in the |
3069 | * target physical map with the protection requested. | | 3061 | * target physical map with the protection requested. |
3070 | * | | 3062 | * |
3071 | * NB: This is the only routine which MAY NOT lazy-evaluate | | 3063 | * NB: This is the only routine which MAY NOT lazy-evaluate |
3072 | * or lose information. That is, this routine must actually | | 3064 | * or lose information. That is, this routine must actually |
3073 | * insert this page into the given map NOW. | | 3065 | * insert this page into the given map NOW. |
3074 | */ | | 3066 | */ |
3075 | int | | 3067 | int |
3076 | pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) | | 3068 | pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) |
3077 | { | | 3069 | { |
3078 | struct l2_bucket *l2b; | | 3070 | struct l2_bucket *l2b; |
3079 | struct vm_page *pg, *opg; | | 3071 | struct vm_page *pg, *opg; |
3080 | u_int nflags; | | 3072 | u_int nflags; |
3081 | u_int oflags; | | 3073 | u_int oflags; |
3082 | const bool kpm_p = (pm == pmap_kernel()); | | 3074 | const bool kpm_p = (pm == pmap_kernel()); |
3083 | #ifdef ARM_HAS_VBAR | | 3075 | #ifdef ARM_HAS_VBAR |
3084 | const bool vector_page_p = false; | | 3076 | const bool vector_page_p = false; |
3085 | #else | | 3077 | #else |
3086 | const bool vector_page_p = (va == vector_page); | | 3078 | const bool vector_page_p = (va == vector_page); |
3087 | #endif | | 3079 | #endif |
3088 | struct pmap_page *pp = pmap_pv_tracked(pa); | | 3080 | struct pmap_page *pp = pmap_pv_tracked(pa); |
3089 | struct pv_entry *new_pv = NULL; | | 3081 | struct pv_entry *new_pv = NULL; |
3090 | struct pv_entry *old_pv = NULL; | | 3082 | struct pv_entry *old_pv = NULL; |
3091 | int error = 0; | | 3083 | int error = 0; |
3092 | | | 3084 | |
3093 | UVMHIST_FUNC(__func__); | | 3085 | UVMHIST_FUNC(__func__); |
3094 | UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx pa %#jx prot %#jx", | | 3086 | UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx pa %#jx prot %#jx", |
3095 | (uintptr_t)pm, va, pa, prot); | | 3087 | (uintptr_t)pm, va, pa, prot); |
3096 | UVMHIST_LOG(maphist, " flag %#jx", flags, 0, 0, 0); | | 3088 | UVMHIST_LOG(maphist, " flag %#jx", flags, 0, 0, 0); |
3097 | | | 3089 | |
3098 | KDASSERT((flags & PMAP_WIRED) == 0 || (flags & VM_PROT_ALL) != 0); | | 3090 | KDASSERT((flags & PMAP_WIRED) == 0 || (flags & VM_PROT_ALL) != 0); |
3099 | KDASSERT(((va | pa) & PGOFSET) == 0); | | 3091 | KDASSERT(((va | pa) & PGOFSET) == 0); |
3100 | | | 3092 | |
3101 | /* | | 3093 | /* |
3102 | * Get a pointer to the page. Later on in this function, we | | 3094 | * Get a pointer to the page. Later on in this function, we |
3103 | * test for a managed page by checking pg != NULL. | | 3095 | * test for a managed page by checking pg != NULL. |
3104 | */ | | 3096 | */ |
3105 | pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL; | | 3097 | pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL; |
3106 | /* | | 3098 | /* |
3107 | * if we may need a new pv entry allocate if now, as we can't do it | | 3099 | * if we may need a new pv entry allocate if now, as we can't do it |
3108 | * with the kernel_pmap locked | | 3100 | * with the kernel_pmap locked |
3109 | */ | | 3101 | */ |
3110 | if (pg || pp) | | 3102 | if (pg || pp) |
3111 | new_pv = pool_get(&pmap_pv_pool, PR_NOWAIT); | | 3103 | new_pv = pool_get(&pmap_pv_pool, PR_NOWAIT); |
3112 | | | 3104 | |
3113 | nflags = 0; | | 3105 | nflags = 0; |
3114 | if (prot & VM_PROT_WRITE) | | 3106 | if (prot & VM_PROT_WRITE) |
3115 | nflags |= PVF_WRITE; | | 3107 | nflags |= PVF_WRITE; |
3116 | if (prot & VM_PROT_EXECUTE) | | 3108 | if (prot & VM_PROT_EXECUTE) |
3117 | nflags |= PVF_EXEC; | | 3109 | nflags |= PVF_EXEC; |
3118 | if (flags & PMAP_WIRED) | | 3110 | if (flags & PMAP_WIRED) |
3119 | nflags |= PVF_WIRED; | | 3111 | nflags |= PVF_WIRED; |
3120 | | | 3112 | |
3121 | pmap_acquire_pmap_lock(pm); | | 3113 | pmap_acquire_pmap_lock(pm); |
3122 | | | 3114 | |
3123 | /* | | 3115 | /* |
3124 | * Fetch the L2 bucket which maps this page, allocating one if | | 3116 | * Fetch the L2 bucket which maps this page, allocating one if |
3125 | * necessary for user pmaps. | | 3117 | * necessary for user pmaps. |
3126 | */ | | 3118 | */ |
3127 | if (kpm_p) { | | 3119 | if (kpm_p) { |
3128 | l2b = pmap_get_l2_bucket(pm, va); | | 3120 | l2b = pmap_get_l2_bucket(pm, va); |
3129 | } else { | | 3121 | } else { |
3130 | l2b = pmap_alloc_l2_bucket(pm, va); | | 3122 | l2b = pmap_alloc_l2_bucket(pm, va); |
3131 | } | | 3123 | } |
3132 | if (l2b == NULL) { | | 3124 | if (l2b == NULL) { |
3133 | if (flags & PMAP_CANFAIL) { | | 3125 | if (flags & PMAP_CANFAIL) { |
3134 | pmap_release_pmap_lock(pm); | | 3126 | pmap_release_pmap_lock(pm); |
3135 | error = ENOMEM; | | 3127 | error = ENOMEM; |
3136 | goto free_pv; | | 3128 | goto free_pv; |
3137 | } | | 3129 | } |
3138 | panic("pmap_enter: failed to allocate L2 bucket"); | | 3130 | panic("pmap_enter: failed to allocate L2 bucket"); |
3139 | } | | 3131 | } |
3140 | pt_entry_t *ptep = &l2b->l2b_kva[l2pte_index(va)]; | | 3132 | pt_entry_t *ptep = &l2b->l2b_kva[l2pte_index(va)]; |
3141 | const pt_entry_t opte = *ptep; | | 3133 | const pt_entry_t opte = *ptep; |
3142 | pt_entry_t npte = pa; | | 3134 | pt_entry_t npte = pa; |
3143 | oflags = 0; | | 3135 | oflags = 0; |
3144 | | | 3136 | |
3145 | if (opte) { | | 3137 | if (opte) { |
3146 | /* | | 3138 | /* |
3147 | * There is already a mapping at this address. | | 3139 | * There is already a mapping at this address. |
3148 | * If the physical address is different, lookup the | | 3140 | * If the physical address is different, lookup the |
3149 | * vm_page. | | 3141 | * vm_page. |
3150 | */ | | 3142 | */ |
3151 | if (l2pte_pa(opte) != pa) { | | 3143 | if (l2pte_pa(opte) != pa) { |
3152 | KASSERT(!pmap_pv_tracked(pa)); | | 3144 | KASSERT(!pmap_pv_tracked(pa)); |
3153 | opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); | | 3145 | opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); |
3154 | } else | | 3146 | } else |
3155 | opg = pg; | | 3147 | opg = pg; |
3156 | } else | | 3148 | } else |
3157 | opg = NULL; | | 3149 | opg = NULL; |
3158 | | | 3150 | |
3159 | if (pg || pp) { | | 3151 | if (pg || pp) { |
3160 | KASSERT((pg != NULL) != (pp != NULL)); | | 3152 | KASSERT((pg != NULL) != (pp != NULL)); |
3161 | struct vm_page_md *md = (pg != NULL) ? VM_PAGE_TO_MD(pg) : | | 3153 | struct vm_page_md *md = (pg != NULL) ? VM_PAGE_TO_MD(pg) : |
3162 | PMAP_PAGE_TO_MD(pp); | | 3154 | PMAP_PAGE_TO_MD(pp); |
3163 | | | 3155 | |
3164 | /* | | 3156 | /* |
3165 | * This is to be a managed mapping. | | 3157 | * This is to be a managed mapping. |
3166 | */ | | 3158 | */ |
3167 | pmap_acquire_page_lock(md); | | 3159 | pmap_acquire_page_lock(md); |
3168 | if ((flags & VM_PROT_ALL) || (md->pvh_attrs & PVF_REF)) { | | 3160 | if ((flags & VM_PROT_ALL) || (md->pvh_attrs & PVF_REF)) { |
3169 | /* | | 3161 | /* |
3170 | * - The access type indicates that we don't need | | 3162 | * - The access type indicates that we don't need |
3171 | * to do referenced emulation. | | 3163 | * to do referenced emulation. |
3172 | * OR | | 3164 | * OR |
3173 | * - The physical page has already been referenced | | 3165 | * - The physical page has already been referenced |
3174 | * so no need to re-do referenced emulation here. | | 3166 | * so no need to re-do referenced emulation here. |
3175 | */ | | 3167 | */ |
3176 | npte |= l2pte_set_readonly(L2_S_PROTO); | | 3168 | npte |= l2pte_set_readonly(L2_S_PROTO); |
3177 | | | 3169 | |
3178 | nflags |= PVF_REF; | | 3170 | nflags |= PVF_REF; |
3179 | | | 3171 | |
3180 | if ((prot & VM_PROT_WRITE) != 0 && | | 3172 | if ((prot & VM_PROT_WRITE) != 0 && |
3181 | ((flags & VM_PROT_WRITE) != 0 || | | 3173 | ((flags & VM_PROT_WRITE) != 0 || |
3182 | (md->pvh_attrs & PVF_MOD) != 0)) { | | 3174 | (md->pvh_attrs & PVF_MOD) != 0)) { |
3183 | /* | | 3175 | /* |
3184 | * This is a writable mapping, and the | | 3176 | * This is a writable mapping, and the |
3185 | * page's mod state indicates it has | | 3177 | * page's mod state indicates it has |
3186 | * already been modified. Make it | | 3178 | * already been modified. Make it |
3187 | * writable from the outset. | | 3179 | * writable from the outset. |
3188 | */ | | 3180 | */ |
3189 | npte = l2pte_set_writable(npte); | | 3181 | npte = l2pte_set_writable(npte); |
3190 | nflags |= PVF_MOD; | | 3182 | nflags |= PVF_MOD; |
3191 | } | | 3183 | } |
3192 | | | 3184 | |
3193 | #ifdef ARM_MMU_EXTENDED | | 3185 | #ifdef ARM_MMU_EXTENDED |
3194 | /* | | 3186 | /* |
3195 | * If the page has been cleaned, then the pvh_attrs | | 3187 | * If the page has been cleaned, then the pvh_attrs |
3196 | * will have PVF_EXEC set, so mark it execute so we | | 3188 | * will have PVF_EXEC set, so mark it execute so we |
3197 | * don't get an access fault when trying to execute | | 3189 | * don't get an access fault when trying to execute |
3198 | * from it. | | 3190 | * from it. |
3199 | */ | | 3191 | */ |
3200 | if (md->pvh_attrs & nflags & PVF_EXEC) { | | 3192 | if (md->pvh_attrs & nflags & PVF_EXEC) { |
3201 | npte &= ~L2_XS_XN; | | 3193 | npte &= ~L2_XS_XN; |
3202 | } | | 3194 | } |
3203 | #endif | | 3195 | #endif |
3204 | } else { | | 3196 | } else { |
3205 | /* | | 3197 | /* |
3206 | * Need to do page referenced emulation. | | 3198 | * Need to do page referenced emulation. |
3207 | */ | | 3199 | */ |
3208 | npte |= L2_TYPE_INV; | | 3200 | npte |= L2_TYPE_INV; |
3209 | } | | 3201 | } |
3210 | | | 3202 | |
3211 | if (flags & ARM32_MMAP_WRITECOMBINE) { | | 3203 | if (flags & ARM32_MMAP_WRITECOMBINE) { |
3212 | npte |= pte_l2_s_wc_mode; | | 3204 | npte |= pte_l2_s_wc_mode; |
3213 | } else | | 3205 | } else |
3214 | npte |= pte_l2_s_cache_mode; | | 3206 | npte |= pte_l2_s_cache_mode; |
3215 | | | 3207 | |
3216 | if (pg != NULL && pg == opg) { | | 3208 | if (pg != NULL && pg == opg) { |
3217 | /* | | 3209 | /* |
3218 | * We're changing the attrs of an existing mapping. | | 3210 | * We're changing the attrs of an existing mapping. |
3219 | */ | | 3211 | */ |
3220 | oflags = pmap_modify_pv(md, pa, pm, va, | | 3212 | oflags = pmap_modify_pv(md, pa, pm, va, |
3221 | PVF_WRITE | PVF_EXEC | PVF_WIRED | | | 3213 | PVF_WRITE | PVF_EXEC | PVF_WIRED | |
3222 | PVF_MOD | PVF_REF, nflags); | | 3214 | PVF_MOD | PVF_REF, nflags); |
3223 | | | 3215 | |
3224 | #ifdef PMAP_CACHE_VIVT | | 3216 | #ifdef PMAP_CACHE_VIVT |
3225 | /* | | 3217 | /* |
3226 | * We may need to flush the cache if we're | | 3218 | * We may need to flush the cache if we're |
3227 | * doing rw-ro... | | 3219 | * doing rw-ro... |
3228 | */ | | 3220 | */ |
3229 | if (pm->pm_cstate.cs_cache_d && | | 3221 | if (pm->pm_cstate.cs_cache_d && |
3230 | (oflags & PVF_NC) == 0 && | | 3222 | (oflags & PVF_NC) == 0 && |
3231 | l2pte_writable_p(opte) && | | 3223 | l2pte_writable_p(opte) && |
3232 | (prot & VM_PROT_WRITE) == 0) | | 3224 | (prot & VM_PROT_WRITE) == 0) |
3233 | cpu_dcache_wb_range(va, PAGE_SIZE); | | 3225 | cpu_dcache_wb_range(va, PAGE_SIZE); |
3234 | #endif | | 3226 | #endif |
3235 | } else { | | 3227 | } else { |
3236 | struct pv_entry *pv; | | 3228 | struct pv_entry *pv; |
3237 | /* | | 3229 | /* |
3238 | * New mapping, or changing the backing page | | 3230 | * New mapping, or changing the backing page |
3239 | * of an existing mapping. | | 3231 | * of an existing mapping. |
3240 | */ | | 3232 | */ |
3241 | if (opg) { | | 3233 | if (opg) { |
3242 | struct vm_page_md *omd = VM_PAGE_TO_MD(opg); | | 3234 | struct vm_page_md *omd = VM_PAGE_TO_MD(opg); |
3243 | paddr_t opa = VM_PAGE_TO_PHYS(opg); | | 3235 | paddr_t opa = VM_PAGE_TO_PHYS(opg); |
3244 | | | 3236 | |
3245 | /* | | 3237 | /* |
3246 | * Replacing an existing mapping with a new one. | | 3238 | * Replacing an existing mapping with a new one. |
3247 | * It is part of our managed memory so we | | 3239 | * It is part of our managed memory so we |
3248 | * must remove it from the PV list | | 3240 | * must remove it from the PV list |
3249 | */ | | 3241 | */ |
3250 | pv = pmap_remove_pv(omd, opa, pm, va); | | 3242 | pv = pmap_remove_pv(omd, opa, pm, va); |
3251 | pmap_vac_me_harder(omd, opa, pm, 0); | | 3243 | pmap_vac_me_harder(omd, opa, pm, 0); |
3252 | oflags = pv->pv_flags; | | 3244 | oflags = pv->pv_flags; |
3253 | | | 3245 | |
3254 | #ifdef PMAP_CACHE_VIVT | | 3246 | #ifdef PMAP_CACHE_VIVT |
3255 | /* | | 3247 | /* |
3256 | * If the old mapping was valid (ref/mod | | 3248 | * If the old mapping was valid (ref/mod |
3257 | * emulation creates 'invalid' mappings | | 3249 | * emulation creates 'invalid' mappings |
3258 | * initially) then make sure to frob | | 3250 | * initially) then make sure to frob |
3259 | * the cache. | | 3251 | * the cache. |
3260 | */ | | 3252 | */ |
3261 | if (!(oflags & PVF_NC) && l2pte_valid_p(opte)) { | | 3253 | if (!(oflags & PVF_NC) && l2pte_valid_p(opte)) { |
3262 | pmap_cache_wbinv_page(pm, va, true, | | 3254 | pmap_cache_wbinv_page(pm, va, true, |
3263 | oflags); | | 3255 | oflags); |
3264 | } | | 3256 | } |
3265 | #endif | | 3257 | #endif |
3266 | } else { | | 3258 | } else { |
3267 | pv = new_pv; | | 3259 | pv = new_pv; |
3268 | new_pv = NULL; | | 3260 | new_pv = NULL; |
3269 | if (pv == NULL) { | | 3261 | if (pv == NULL) { |
3270 | pmap_release_page_lock(md); | | 3262 | pmap_release_page_lock(md); |
3271 | pmap_release_pmap_lock(pm); | | 3263 | pmap_release_pmap_lock(pm); |
3272 | if ((flags & PMAP_CANFAIL) == 0) | | 3264 | if ((flags & PMAP_CANFAIL) == 0) |
3273 | panic("pmap_enter: " | | 3265 | panic("pmap_enter: " |
3274 | "no pv entries"); | | 3266 | "no pv entries"); |
3275 | | | 3267 | |
3276 | pmap_free_l2_bucket(pm, l2b, 0); | | 3268 | pmap_free_l2_bucket(pm, l2b, 0); |
3277 | UVMHIST_LOG(maphist, " <-- done (ENOMEM)", | | 3269 | UVMHIST_LOG(maphist, " <-- done (ENOMEM)", |
3278 | 0, 0, 0, 0); | | 3270 | 0, 0, 0, 0); |
3279 | return ENOMEM; | | 3271 | return ENOMEM; |
3280 | } | | 3272 | } |
3281 | } | | 3273 | } |
3282 | | | 3274 | |
3283 | pmap_enter_pv(md, pa, pv, pm, va, nflags); | | 3275 | pmap_enter_pv(md, pa, pv, pm, va, nflags); |
3284 | } | | 3276 | } |
3285 | pmap_release_page_lock(md); | | 3277 | pmap_release_page_lock(md); |
3286 | } else { | | 3278 | } else { |
3287 | /* | | 3279 | /* |
3288 | * We're mapping an unmanaged page. | | 3280 | * We're mapping an unmanaged page. |
3289 | * These are always readable, and possibly writable, from | | 3281 | * These are always readable, and possibly writable, from |
3290 | * the get go as we don't need to track ref/mod status. | | 3282 | * the get go as we don't need to track ref/mod status. |
3291 | */ | | 3283 | */ |
3292 | npte |= l2pte_set_readonly(L2_S_PROTO); | | 3284 | npte |= l2pte_set_readonly(L2_S_PROTO); |
3293 | if (prot & VM_PROT_WRITE) | | 3285 | if (prot & VM_PROT_WRITE) |
3294 | npte = l2pte_set_writable(npte); | | 3286 | npte = l2pte_set_writable(npte); |
3295 | | | 3287 | |
3296 | /* | | 3288 | /* |
3297 | * Make sure the vector table is mapped cacheable | | 3289 | * Make sure the vector table is mapped cacheable |
3298 | */ | | 3290 | */ |
3299 | if ((vector_page_p && !kpm_p) | | 3291 | if ((vector_page_p && !kpm_p) |
3300 | || (flags & ARM32_MMAP_CACHEABLE)) { | | 3292 | || (flags & ARM32_MMAP_CACHEABLE)) { |
3301 | npte |= pte_l2_s_cache_mode; | | 3293 | npte |= pte_l2_s_cache_mode; |
3302 | #ifdef ARM_MMU_EXTENDED | | 3294 | #ifdef ARM_MMU_EXTENDED |
3303 | npte &= ~L2_XS_XN; /* and executable */ | | 3295 | npte &= ~L2_XS_XN; /* and executable */ |
3304 | #endif | | 3296 | #endif |
3305 | } else if (flags & ARM32_MMAP_WRITECOMBINE) { | | 3297 | } else if (flags & ARM32_MMAP_WRITECOMBINE) { |
3306 | npte |= pte_l2_s_wc_mode; | | 3298 | npte |= pte_l2_s_wc_mode; |
3307 | } | | 3299 | } |
3308 | if (opg) { | | 3300 | if (opg) { |
3309 | /* | | 3301 | /* |
3310 | * Looks like there's an existing 'managed' mapping | | 3302 | * Looks like there's an existing 'managed' mapping |
3311 | * at this address. | | 3303 | * at this address. |
3312 | */ | | 3304 | */ |
3313 | struct vm_page_md *omd = VM_PAGE_TO_MD(opg); | | 3305 | struct vm_page_md *omd = VM_PAGE_TO_MD(opg); |
3314 | paddr_t opa = VM_PAGE_TO_PHYS(opg); | | 3306 | paddr_t opa = VM_PAGE_TO_PHYS(opg); |
3315 | | | 3307 | |
3316 | pmap_acquire_page_lock(omd); | | 3308 | pmap_acquire_page_lock(omd); |
3317 | old_pv = pmap_remove_pv(omd, opa, pm, va); | | 3309 | old_pv = pmap_remove_pv(omd, opa, pm, va); |
3318 | pmap_vac_me_harder(omd, opa, pm, 0); | | 3310 | pmap_vac_me_harder(omd, opa, pm, 0); |
3319 | oflags = old_pv->pv_flags; | | 3311 | oflags = old_pv->pv_flags; |
3320 | pmap_release_page_lock(omd); | | 3312 | pmap_release_page_lock(omd); |
3321 | | | 3313 | |
3322 | #ifdef PMAP_CACHE_VIVT | | 3314 | #ifdef PMAP_CACHE_VIVT |
3323 | if (!(oflags & PVF_NC) && l2pte_valid_p(opte)) { | | 3315 | if (!(oflags & PVF_NC) && l2pte_valid_p(opte)) { |
3324 | pmap_cache_wbinv_page(pm, va, true, oflags); | | 3316 | pmap_cache_wbinv_page(pm, va, true, oflags); |
3325 | } | | 3317 | } |
3326 | #endif | | 3318 | #endif |
3327 | } | | 3319 | } |
3328 | } | | 3320 | } |
3329 | | | 3321 | |
3330 | /* | | 3322 | /* |
3331 | * Make sure userland mappings get the right permissions | | 3323 | * Make sure userland mappings get the right permissions |
3332 | */ | | 3324 | */ |
3333 | if (!vector_page_p && !kpm_p) { | | 3325 | if (!vector_page_p && !kpm_p) { |
3334 | npte |= L2_S_PROT_U; | | 3326 | npte |= L2_S_PROT_U; |
3335 | #ifdef ARM_MMU_EXTENDED | | 3327 | #ifdef ARM_MMU_EXTENDED |
3336 | npte |= L2_XS_nG; /* user pages are not global */ | | 3328 | npte |= L2_XS_nG; /* user pages are not global */ |
3337 | #endif | | 3329 | #endif |
3338 | } | | 3330 | } |
3339 | | | 3331 | |
3340 | /* | | 3332 | /* |
3341 | * Keep the stats up to date | | 3333 | * Keep the stats up to date |
3342 | */ | | 3334 | */ |
3343 | if (opte == 0) { | | 3335 | if (opte == 0) { |
3344 | l2b->l2b_occupancy += PAGE_SIZE / L2_S_SIZE; | | 3336 | l2b->l2b_occupancy += PAGE_SIZE / L2_S_SIZE; |
3345 | pm->pm_stats.resident_count++; | | 3337 | pm->pm_stats.resident_count++; |
3346 | } | | 3338 | } |
3347 | | | 3339 | |
3348 | UVMHIST_LOG(maphist, " opte %#jx npte %#jx", opte, npte, 0, 0); | | 3340 | UVMHIST_LOG(maphist, " opte %#jx npte %#jx", opte, npte, 0, 0); |
3349 | | | 3341 | |
3350 | #if defined(ARM_MMU_EXTENDED) | | 3342 | #if defined(ARM_MMU_EXTENDED) |
3351 | /* | | 3343 | /* |
3352 | * If exec protection was requested but the page hasn't been synced, | | 3344 | * If exec protection was requested but the page hasn't been synced, |
3353 | * sync it now and allow execution from it. | | 3345 | * sync it now and allow execution from it. |
3354 | */ | | 3346 | */ |
3355 | if ((nflags & PVF_EXEC) && (npte & L2_XS_XN)) { | | 3347 | if ((nflags & PVF_EXEC) && (npte & L2_XS_XN)) { |
3356 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 3348 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
3357 | npte &= ~L2_XS_XN; | | 3349 | npte &= ~L2_XS_XN; |
3358 | pmap_syncicache_page(md, pa); | | 3350 | pmap_syncicache_page(md, pa); |
3359 | PMAPCOUNT(exec_synced_map); | | 3351 | PMAPCOUNT(exec_synced_map); |
3360 | } | | 3352 | } |
3361 | #endif | | 3353 | #endif |
3362 | /* | | 3354 | /* |
3363 | * If this is just a wiring change, the two PTEs will be | | 3355 | * If this is just a wiring change, the two PTEs will be |
3364 | * identical, so there's no need to update the page table. | | 3356 | * identical, so there's no need to update the page table. |
3365 | */ | | 3357 | */ |
3366 | if (npte != opte) { | | 3358 | if (npte != opte) { |
3367 | l2pte_reset(ptep); | | 3359 | l2pte_reset(ptep); |
3368 | PTE_SYNC(ptep); | | 3360 | PTE_SYNC(ptep); |
3369 | if (l2pte_valid_p(opte)) { | | 3361 | if (l2pte_valid_p(opte)) { |
3370 | pmap_tlb_flush_SE(pm, va, oflags); | | 3362 | pmap_tlb_flush_SE(pm, va, oflags); |
3371 | } | | 3363 | } |
3372 | l2pte_set(ptep, npte, 0); | | 3364 | l2pte_set(ptep, npte, 0); |
3373 | PTE_SYNC(ptep); | | 3365 | PTE_SYNC(ptep); |
3374 | #ifndef ARM_MMU_EXTENDED | | 3366 | #ifndef ARM_MMU_EXTENDED |
3375 | bool is_cached = pmap_is_cached(pm); | | 3367 | bool is_cached = pmap_is_cached(pm); |
3376 | if (is_cached) { | | 3368 | if (is_cached) { |
3377 | /* | | 3369 | /* |
3378 | * We only need to frob the cache/tlb if this pmap | | 3370 | * We only need to frob the cache/tlb if this pmap |
3379 | * is current | | 3371 | * is current |
3380 | */ | | 3372 | */ |
3381 | if (!vector_page_p && l2pte_valid_p(npte)) { | | 3373 | if (!vector_page_p && l2pte_valid_p(npte)) { |
3382 | /* | | 3374 | /* |
3383 | * This mapping is likely to be accessed as | | 3375 | * This mapping is likely to be accessed as |