| @@ -1,1194 +1,1194 @@ | | | @@ -1,1194 +1,1194 @@ |
1 | /* $NetBSD: pmap.c,v 1.425 2021/02/01 19:02:28 skrll Exp $ */ | | 1 | /* $NetBSD: pmap.c,v 1.426 2021/03/14 10:36:46 skrll Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright 2003 Wasabi Systems, Inc. | | 4 | * Copyright 2003 Wasabi Systems, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * Written by Steve C. Woodford for Wasabi Systems, Inc. | | 7 | * Written by Steve C. Woodford for Wasabi Systems, Inc. |
8 | * | | 8 | * |
9 | * Redistribution and use in source and binary forms, with or without | | 9 | * Redistribution and use in source and binary forms, with or without |
10 | * modification, are permitted provided that the following conditions | | 10 | * modification, are permitted provided that the following conditions |
11 | * are met: | | 11 | * are met: |
12 | * 1. Redistributions of source code must retain the above copyright | | 12 | * 1. Redistributions of source code must retain the above copyright |
13 | * notice, this list of conditions and the following disclaimer. | | 13 | * notice, this list of conditions and the following disclaimer. |
14 | * 2. Redistributions in binary form must reproduce the above copyright | | 14 | * 2. Redistributions in binary form must reproduce the above copyright |
15 | * notice, this list of conditions and the following disclaimer in the | | 15 | * notice, this list of conditions and the following disclaimer in the |
16 | * documentation and/or other materials provided with the distribution. | | 16 | * documentation and/or other materials provided with the distribution. |
17 | * 3. All advertising materials mentioning features or use of this software | | 17 | * 3. All advertising materials mentioning features or use of this software |
18 | * must display the following acknowledgement: | | 18 | * must display the following acknowledgement: |
19 | * This product includes software developed for the NetBSD Project by | | 19 | * This product includes software developed for the NetBSD Project by |
20 | * Wasabi Systems, Inc. | | 20 | * Wasabi Systems, Inc. |
21 | * 4. The name of Wasabi Systems, Inc. may not be used to endorse | | 21 | * 4. The name of Wasabi Systems, Inc. may not be used to endorse |
22 | * or promote products derived from this software without specific prior | | 22 | * or promote products derived from this software without specific prior |
23 | * written permission. | | 23 | * written permission. |
24 | * | | 24 | * |
25 | * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND | | 25 | * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND |
26 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 26 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
27 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 27 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
28 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC | | 28 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC |
29 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 29 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
30 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 30 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
31 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 31 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
32 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 32 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
33 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 33 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
34 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 34 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
35 | * POSSIBILITY OF SUCH DAMAGE. | | 35 | * POSSIBILITY OF SUCH DAMAGE. |
36 | */ | | 36 | */ |
37 | | | 37 | |
38 | /* | | 38 | /* |
39 | * Copyright (c) 2002-2003 Wasabi Systems, Inc. | | 39 | * Copyright (c) 2002-2003 Wasabi Systems, Inc. |
40 | * Copyright (c) 2001 Richard Earnshaw | | 40 | * Copyright (c) 2001 Richard Earnshaw |
41 | * Copyright (c) 2001-2002 Christopher Gilbert | | 41 | * Copyright (c) 2001-2002 Christopher Gilbert |
42 | * All rights reserved. | | 42 | * All rights reserved. |
43 | * | | 43 | * |
44 | * 1. Redistributions of source code must retain the above copyright | | 44 | * 1. Redistributions of source code must retain the above copyright |
45 | * notice, this list of conditions and the following disclaimer. | | 45 | * notice, this list of conditions and the following disclaimer. |
46 | * 2. Redistributions in binary form must reproduce the above copyright | | 46 | * 2. Redistributions in binary form must reproduce the above copyright |
47 | * notice, this list of conditions and the following disclaimer in the | | 47 | * notice, this list of conditions and the following disclaimer in the |
48 | * documentation and/or other materials provided with the distribution. | | 48 | * documentation and/or other materials provided with the distribution. |
49 | * 3. The name of the company nor the name of the author may be used to | | 49 | * 3. The name of the company nor the name of the author may be used to |
50 | * endorse or promote products derived from this software without specific | | 50 | * endorse or promote products derived from this software without specific |
51 | * prior written permission. | | 51 | * prior written permission. |
52 | * | | 52 | * |
53 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED | | 53 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
54 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | | 54 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF |
55 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | | 55 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
56 | * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, | | 56 | * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, |
57 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | | 57 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
58 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | | 58 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
59 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 59 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
60 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | | 60 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
61 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 61 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
62 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 62 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
63 | * SUCH DAMAGE. | | 63 | * SUCH DAMAGE. |
64 | */ | | 64 | */ |
65 | | | 65 | |
66 | /*- | | 66 | /*- |
67 | * Copyright (c) 1999, 2020 The NetBSD Foundation, Inc. | | 67 | * Copyright (c) 1999, 2020 The NetBSD Foundation, Inc. |
68 | * All rights reserved. | | 68 | * All rights reserved. |
69 | * | | 69 | * |
70 | * This code is derived from software contributed to The NetBSD Foundation | | 70 | * This code is derived from software contributed to The NetBSD Foundation |
71 | * by Charles M. Hannum. | | 71 | * by Charles M. Hannum. |
72 | * | | 72 | * |
73 | * Redistribution and use in source and binary forms, with or without | | 73 | * Redistribution and use in source and binary forms, with or without |
74 | * modification, are permitted provided that the following conditions | | 74 | * modification, are permitted provided that the following conditions |
75 | * are met: | | 75 | * are met: |
76 | * 1. Redistributions of source code must retain the above copyright | | 76 | * 1. Redistributions of source code must retain the above copyright |
77 | * notice, this list of conditions and the following disclaimer. | | 77 | * notice, this list of conditions and the following disclaimer. |
78 | * 2. Redistributions in binary form must reproduce the above copyright | | 78 | * 2. Redistributions in binary form must reproduce the above copyright |
79 | * notice, this list of conditions and the following disclaimer in the | | 79 | * notice, this list of conditions and the following disclaimer in the |
80 | * documentation and/or other materials provided with the distribution. | | 80 | * documentation and/or other materials provided with the distribution. |
81 | * | | 81 | * |
82 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 82 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
83 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 83 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
84 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 84 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
85 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 85 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
86 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 86 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
87 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 87 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
88 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 88 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
89 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 89 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
90 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 90 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
91 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 91 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
92 | * POSSIBILITY OF SUCH DAMAGE. | | 92 | * POSSIBILITY OF SUCH DAMAGE. |
93 | */ | | 93 | */ |
94 | | | 94 | |
95 | /* | | 95 | /* |
96 | * Copyright (c) 1994-1998 Mark Brinicombe. | | 96 | * Copyright (c) 1994-1998 Mark Brinicombe. |
97 | * Copyright (c) 1994 Brini. | | 97 | * Copyright (c) 1994 Brini. |
98 | * All rights reserved. | | 98 | * All rights reserved. |
99 | * | | 99 | * |
100 | * This code is derived from software written for Brini by Mark Brinicombe | | 100 | * This code is derived from software written for Brini by Mark Brinicombe |
101 | * | | 101 | * |
102 | * Redistribution and use in source and binary forms, with or without | | 102 | * Redistribution and use in source and binary forms, with or without |
103 | * modification, are permitted provided that the following conditions | | 103 | * modification, are permitted provided that the following conditions |
104 | * are met: | | 104 | * are met: |
105 | * 1. Redistributions of source code must retain the above copyright | | 105 | * 1. Redistributions of source code must retain the above copyright |
106 | * notice, this list of conditions and the following disclaimer. | | 106 | * notice, this list of conditions and the following disclaimer. |
107 | * 2. Redistributions in binary form must reproduce the above copyright | | 107 | * 2. Redistributions in binary form must reproduce the above copyright |
108 | * notice, this list of conditions and the following disclaimer in the | | 108 | * notice, this list of conditions and the following disclaimer in the |
109 | * documentation and/or other materials provided with the distribution. | | 109 | * documentation and/or other materials provided with the distribution. |
110 | * 3. All advertising materials mentioning features or use of this software | | 110 | * 3. All advertising materials mentioning features or use of this software |
111 | * must display the following acknowledgement: | | 111 | * must display the following acknowledgement: |
112 | * This product includes software developed by Mark Brinicombe. | | 112 | * This product includes software developed by Mark Brinicombe. |
113 | * 4. The name of the author may not be used to endorse or promote products | | 113 | * 4. The name of the author may not be used to endorse or promote products |
114 | * derived from this software without specific prior written permission. | | 114 | * derived from this software without specific prior written permission. |
115 | * | | 115 | * |
116 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | | 116 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
117 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | | 117 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
118 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | | 118 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
119 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | | 119 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
120 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | | 120 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
121 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | | 121 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
122 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | | 122 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
123 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | | 123 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
124 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | | 124 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
125 | * | | 125 | * |
126 | * RiscBSD kernel project | | 126 | * RiscBSD kernel project |
127 | * | | 127 | * |
128 | * pmap.c | | 128 | * pmap.c |
129 | * | | 129 | * |
130 | * Machine dependent vm stuff | | 130 | * Machine dependent vm stuff |
131 | * | | 131 | * |
132 | * Created : 20/09/94 | | 132 | * Created : 20/09/94 |
133 | */ | | 133 | */ |
134 | | | 134 | |
135 | /* | | 135 | /* |
136 | * armv6 and VIPT cache support by 3am Software Foundry, | | 136 | * armv6 and VIPT cache support by 3am Software Foundry, |
137 | * Copyright (c) 2007 Microsoft | | 137 | * Copyright (c) 2007 Microsoft |
138 | */ | | 138 | */ |
139 | | | 139 | |
140 | /* | | 140 | /* |
141 | * Performance improvements, UVM changes, overhauls and part-rewrites | | 141 | * Performance improvements, UVM changes, overhauls and part-rewrites |
142 | * were contributed by Neil A. Carson <neil@causality.com>. | | 142 | * were contributed by Neil A. Carson <neil@causality.com>. |
143 | */ | | 143 | */ |
144 | | | 144 | |
145 | /* | | 145 | /* |
146 | * Overhauled again to speedup the pmap, use MMU Domains so that L1 tables | | 146 | * Overhauled again to speedup the pmap, use MMU Domains so that L1 tables |
147 | * can be shared, and re-work the KVM layout, by Steve Woodford of Wasabi | | 147 | * can be shared, and re-work the KVM layout, by Steve Woodford of Wasabi |
148 | * Systems, Inc. | | 148 | * Systems, Inc. |
149 | * | | 149 | * |
150 | * There are still a few things outstanding at this time: | | 150 | * There are still a few things outstanding at this time: |
151 | * | | 151 | * |
152 | * - There are some unresolved issues for MP systems: | | 152 | * - There are some unresolved issues for MP systems: |
153 | * | | 153 | * |
154 | * o The L1 metadata needs a lock, or more specifically, some places | | 154 | * o The L1 metadata needs a lock, or more specifically, some places |
155 | * need to acquire an exclusive lock when modifying L1 translation | | 155 | * need to acquire an exclusive lock when modifying L1 translation |
156 | * table entries. | | 156 | * table entries. |
157 | * | | 157 | * |
158 | * o When one cpu modifies an L1 entry, and that L1 table is also | | 158 | * o When one cpu modifies an L1 entry, and that L1 table is also |
159 | * being used by another cpu, then the latter will need to be told | | 159 | * being used by another cpu, then the latter will need to be told |
160 | * that a tlb invalidation may be necessary. (But only if the old | | 160 | * that a tlb invalidation may be necessary. (But only if the old |
161 | * domain number in the L1 entry being over-written is currently | | 161 | * domain number in the L1 entry being over-written is currently |
162 | * the active domain on that cpu). I guess there are lots more tlb | | 162 | * the active domain on that cpu). I guess there are lots more tlb |
163 | * shootdown issues too... | | 163 | * shootdown issues too... |
164 | * | | 164 | * |
165 | * o If the vector_page is at 0x00000000 instead of in kernel VA space, | | 165 | * o If the vector_page is at 0x00000000 instead of in kernel VA space, |
166 | * then MP systems will lose big-time because of the MMU domain hack. | | 166 | * then MP systems will lose big-time because of the MMU domain hack. |
167 | * The only way this can be solved (apart from moving the vector | | 167 | * The only way this can be solved (apart from moving the vector |
168 | * page to 0xffff0000) is to reserve the first 1MB of user address | | 168 | * page to 0xffff0000) is to reserve the first 1MB of user address |
169 | * space for kernel use only. This would require re-linking all | | 169 | * space for kernel use only. This would require re-linking all |
170 | * applications so that the text section starts above this 1MB | | 170 | * applications so that the text section starts above this 1MB |
171 | * boundary. | | 171 | * boundary. |
172 | * | | 172 | * |
173 | * o Tracking which VM space is resident in the cache/tlb has not yet | | 173 | * o Tracking which VM space is resident in the cache/tlb has not yet |
174 | * been implemented for MP systems. | | 174 | * been implemented for MP systems. |
175 | * | | 175 | * |
176 | * o Finally, there is a pathological condition where two cpus running | | 176 | * o Finally, there is a pathological condition where two cpus running |
177 | * two separate processes (not lwps) which happen to share an L1 | | 177 | * two separate processes (not lwps) which happen to share an L1 |
178 | * can get into a fight over one or more L1 entries. This will result | | 178 | * can get into a fight over one or more L1 entries. This will result |
179 | * in a significant slow-down if both processes are in tight loops. | | 179 | * in a significant slow-down if both processes are in tight loops. |
180 | */ | | 180 | */ |
181 | | | 181 | |
182 | /* Include header files */ | | 182 | /* Include header files */ |
183 | | | 183 | |
184 | #include "opt_arm_debug.h" | | 184 | #include "opt_arm_debug.h" |
185 | #include "opt_cpuoptions.h" | | 185 | #include "opt_cpuoptions.h" |
186 | #include "opt_ddb.h" | | 186 | #include "opt_ddb.h" |
187 | #include "opt_lockdebug.h" | | 187 | #include "opt_lockdebug.h" |
188 | #include "opt_multiprocessor.h" | | 188 | #include "opt_multiprocessor.h" |
189 | | | 189 | |
190 | #ifdef MULTIPROCESSOR | | 190 | #ifdef MULTIPROCESSOR |
191 | #define _INTR_PRIVATE | | 191 | #define _INTR_PRIVATE |
192 | #endif | | 192 | #endif |
193 | | | 193 | |
194 | #include <sys/cdefs.h> | | 194 | #include <sys/cdefs.h> |
195 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.425 2021/02/01 19:02:28 skrll Exp $"); | | 195 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.426 2021/03/14 10:36:46 skrll Exp $"); |
196 | | | 196 | |
197 | #include <sys/param.h> | | 197 | #include <sys/param.h> |
198 | #include <sys/types.h> | | 198 | #include <sys/types.h> |
199 | | | 199 | |
200 | #include <sys/asan.h> | | 200 | #include <sys/asan.h> |
201 | #include <sys/atomic.h> | | 201 | #include <sys/atomic.h> |
202 | #include <sys/bus.h> | | 202 | #include <sys/bus.h> |
203 | #include <sys/cpu.h> | | 203 | #include <sys/cpu.h> |
204 | #include <sys/intr.h> | | 204 | #include <sys/intr.h> |
205 | #include <sys/kernel.h> | | 205 | #include <sys/kernel.h> |
206 | #include <sys/kernhist.h> | | 206 | #include <sys/kernhist.h> |
207 | #include <sys/kmem.h> | | 207 | #include <sys/kmem.h> |
208 | #include <sys/pool.h> | | 208 | #include <sys/pool.h> |
209 | #include <sys/proc.h> | | 209 | #include <sys/proc.h> |
210 | #include <sys/sysctl.h> | | 210 | #include <sys/sysctl.h> |
211 | #include <sys/systm.h> | | 211 | #include <sys/systm.h> |
212 | | | 212 | |
213 | #include <uvm/uvm.h> | | 213 | #include <uvm/uvm.h> |
214 | #include <uvm/pmap/pmap_pvt.h> | | 214 | #include <uvm/pmap/pmap_pvt.h> |
215 | | | 215 | |
216 | #include <arm/locore.h> | | 216 | #include <arm/locore.h> |
217 | | | 217 | |
218 | #ifdef DDB | | 218 | #ifdef DDB |
219 | #include <arm/db_machdep.h> | | 219 | #include <arm/db_machdep.h> |
220 | #endif | | 220 | #endif |
221 | | | 221 | |
222 | #ifdef VERBOSE_INIT_ARM | | 222 | #ifdef VERBOSE_INIT_ARM |
223 | #define VPRINTF(...) printf(__VA_ARGS__) | | 223 | #define VPRINTF(...) printf(__VA_ARGS__) |
224 | #else | | 224 | #else |
225 | #define VPRINTF(...) __nothing | | 225 | #define VPRINTF(...) __nothing |
226 | #endif | | 226 | #endif |
227 | | | 227 | |
228 | /* | | 228 | /* |
229 | * pmap_kernel() points here | | 229 | * pmap_kernel() points here |
230 | */ | | 230 | */ |
231 | static struct pmap kernel_pmap_store = { | | 231 | static struct pmap kernel_pmap_store = { |
232 | #ifndef ARM_MMU_EXTENDED | | 232 | #ifndef ARM_MMU_EXTENDED |
233 | .pm_activated = true, | | 233 | .pm_activated = true, |
234 | .pm_domain = PMAP_DOMAIN_KERNEL, | | 234 | .pm_domain = PMAP_DOMAIN_KERNEL, |
235 | .pm_cstate.cs_all = PMAP_CACHE_STATE_ALL, | | 235 | .pm_cstate.cs_all = PMAP_CACHE_STATE_ALL, |
236 | #endif | | 236 | #endif |
237 | }; | | 237 | }; |
238 | struct pmap * const kernel_pmap_ptr = &kernel_pmap_store; | | 238 | struct pmap * const kernel_pmap_ptr = &kernel_pmap_store; |
239 | #undef pmap_kernel | | 239 | #undef pmap_kernel |
240 | #define pmap_kernel() (&kernel_pmap_store) | | 240 | #define pmap_kernel() (&kernel_pmap_store) |
241 | #ifdef PMAP_NEED_ALLOC_POOLPAGE | | 241 | #ifdef PMAP_NEED_ALLOC_POOLPAGE |
242 | int arm_poolpage_vmfreelist = VM_FREELIST_DEFAULT; | | 242 | int arm_poolpage_vmfreelist = VM_FREELIST_DEFAULT; |
243 | #endif | | 243 | #endif |
244 | | | 244 | |
245 | /* | | 245 | /* |
246 | * Pool and cache that pmap structures are allocated from. | | 246 | * Pool and cache that pmap structures are allocated from. |
247 | * We use a cache to avoid clearing the pm_l2[] array (1KB) | | 247 | * We use a cache to avoid clearing the pm_l2[] array (1KB) |
248 | * in pmap_create(). | | 248 | * in pmap_create(). |
249 | */ | | 249 | */ |
250 | static struct pool_cache pmap_cache; | | 250 | static struct pool_cache pmap_cache; |
251 | | | 251 | |
252 | /* | | 252 | /* |
253 | * Pool of PV structures | | 253 | * Pool of PV structures |
254 | */ | | 254 | */ |
255 | static struct pool pmap_pv_pool; | | 255 | static struct pool pmap_pv_pool; |
256 | static void *pmap_bootstrap_pv_page_alloc(struct pool *, int); | | 256 | static void *pmap_bootstrap_pv_page_alloc(struct pool *, int); |
257 | static void pmap_bootstrap_pv_page_free(struct pool *, void *); | | 257 | static void pmap_bootstrap_pv_page_free(struct pool *, void *); |
258 | static struct pool_allocator pmap_bootstrap_pv_allocator = { | | 258 | static struct pool_allocator pmap_bootstrap_pv_allocator = { |
259 | pmap_bootstrap_pv_page_alloc, pmap_bootstrap_pv_page_free | | 259 | pmap_bootstrap_pv_page_alloc, pmap_bootstrap_pv_page_free |
260 | }; | | 260 | }; |
261 | | | 261 | |
262 | /* | | 262 | /* |
263 | * Pool and cache of l2_dtable structures. | | 263 | * Pool and cache of l2_dtable structures. |
264 | * We use a cache to avoid clearing the structures when they're | | 264 | * We use a cache to avoid clearing the structures when they're |
265 | * allocated. (196 bytes) | | 265 | * allocated. (196 bytes) |
266 | */ | | 266 | */ |
267 | static struct pool_cache pmap_l2dtable_cache; | | 267 | static struct pool_cache pmap_l2dtable_cache; |
268 | static vaddr_t pmap_kernel_l2dtable_kva; | | 268 | static vaddr_t pmap_kernel_l2dtable_kva; |
269 | | | 269 | |
270 | /* | | 270 | /* |
271 | * Pool and cache of L2 page descriptors. | | 271 | * Pool and cache of L2 page descriptors. |
272 | * We use a cache to avoid clearing the descriptor table | | 272 | * We use a cache to avoid clearing the descriptor table |
273 | * when they're allocated. (1KB) | | 273 | * when they're allocated. (1KB) |
274 | */ | | 274 | */ |
275 | static struct pool_cache pmap_l2ptp_cache; | | 275 | static struct pool_cache pmap_l2ptp_cache; |
276 | static vaddr_t pmap_kernel_l2ptp_kva; | | 276 | static vaddr_t pmap_kernel_l2ptp_kva; |
277 | static paddr_t pmap_kernel_l2ptp_phys; | | 277 | static paddr_t pmap_kernel_l2ptp_phys; |
278 | | | 278 | |
279 | #ifdef PMAPCOUNTERS | | 279 | #ifdef PMAPCOUNTERS |
280 | #define PMAP_EVCNT_INITIALIZER(name) \ | | 280 | #define PMAP_EVCNT_INITIALIZER(name) \ |
281 | EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", name) | | 281 | EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", name) |
282 | | | 282 | |
283 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 283 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
284 | static struct evcnt pmap_ev_vac_clean_one = | | 284 | static struct evcnt pmap_ev_vac_clean_one = |
285 | PMAP_EVCNT_INITIALIZER("clean page (1 color)"); | | 285 | PMAP_EVCNT_INITIALIZER("clean page (1 color)"); |
286 | static struct evcnt pmap_ev_vac_flush_one = | | 286 | static struct evcnt pmap_ev_vac_flush_one = |
287 | PMAP_EVCNT_INITIALIZER("flush page (1 color)"); | | 287 | PMAP_EVCNT_INITIALIZER("flush page (1 color)"); |
288 | static struct evcnt pmap_ev_vac_flush_lots = | | 288 | static struct evcnt pmap_ev_vac_flush_lots = |
289 | PMAP_EVCNT_INITIALIZER("flush page (2+ colors)"); | | 289 | PMAP_EVCNT_INITIALIZER("flush page (2+ colors)"); |
290 | static struct evcnt pmap_ev_vac_flush_lots2 = | | 290 | static struct evcnt pmap_ev_vac_flush_lots2 = |
291 | PMAP_EVCNT_INITIALIZER("flush page (2+ colors, kmpage)"); | | 291 | PMAP_EVCNT_INITIALIZER("flush page (2+ colors, kmpage)"); |
292 | EVCNT_ATTACH_STATIC(pmap_ev_vac_clean_one); | | 292 | EVCNT_ATTACH_STATIC(pmap_ev_vac_clean_one); |
293 | EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_one); | | 293 | EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_one); |
294 | EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots); | | 294 | EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots); |
295 | EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots2); | | 295 | EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots2); |
296 | | | 296 | |
297 | static struct evcnt pmap_ev_vac_color_new = | | 297 | static struct evcnt pmap_ev_vac_color_new = |
298 | PMAP_EVCNT_INITIALIZER("new page color"); | | 298 | PMAP_EVCNT_INITIALIZER("new page color"); |
299 | static struct evcnt pmap_ev_vac_color_reuse = | | 299 | static struct evcnt pmap_ev_vac_color_reuse = |
300 | PMAP_EVCNT_INITIALIZER("ok first page color"); | | 300 | PMAP_EVCNT_INITIALIZER("ok first page color"); |
301 | static struct evcnt pmap_ev_vac_color_ok = | | 301 | static struct evcnt pmap_ev_vac_color_ok = |
302 | PMAP_EVCNT_INITIALIZER("ok page color"); | | 302 | PMAP_EVCNT_INITIALIZER("ok page color"); |
303 | static struct evcnt pmap_ev_vac_color_blind = | | 303 | static struct evcnt pmap_ev_vac_color_blind = |
304 | PMAP_EVCNT_INITIALIZER("blind page color"); | | 304 | PMAP_EVCNT_INITIALIZER("blind page color"); |
305 | static struct evcnt pmap_ev_vac_color_change = | | 305 | static struct evcnt pmap_ev_vac_color_change = |
306 | PMAP_EVCNT_INITIALIZER("change page color"); | | 306 | PMAP_EVCNT_INITIALIZER("change page color"); |
307 | static struct evcnt pmap_ev_vac_color_erase = | | 307 | static struct evcnt pmap_ev_vac_color_erase = |
308 | PMAP_EVCNT_INITIALIZER("erase page color"); | | 308 | PMAP_EVCNT_INITIALIZER("erase page color"); |
309 | static struct evcnt pmap_ev_vac_color_none = | | 309 | static struct evcnt pmap_ev_vac_color_none = |
310 | PMAP_EVCNT_INITIALIZER("no page color"); | | 310 | PMAP_EVCNT_INITIALIZER("no page color"); |
311 | static struct evcnt pmap_ev_vac_color_restore = | | 311 | static struct evcnt pmap_ev_vac_color_restore = |
312 | PMAP_EVCNT_INITIALIZER("restore page color"); | | 312 | PMAP_EVCNT_INITIALIZER("restore page color"); |
313 | | | 313 | |
314 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_new); | | 314 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_new); |
315 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_reuse); | | 315 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_reuse); |
316 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_ok); | | 316 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_ok); |
317 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_blind); | | 317 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_blind); |
318 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_change); | | 318 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_change); |
319 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_erase); | | 319 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_erase); |
320 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_none); | | 320 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_none); |
321 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_restore); | | 321 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_restore); |
322 | #endif | | 322 | #endif |
323 | | | 323 | |
324 | static struct evcnt pmap_ev_mappings = | | 324 | static struct evcnt pmap_ev_mappings = |
325 | PMAP_EVCNT_INITIALIZER("pages mapped"); | | 325 | PMAP_EVCNT_INITIALIZER("pages mapped"); |
326 | static struct evcnt pmap_ev_unmappings = | | 326 | static struct evcnt pmap_ev_unmappings = |
327 | PMAP_EVCNT_INITIALIZER("pages unmapped"); | | 327 | PMAP_EVCNT_INITIALIZER("pages unmapped"); |
328 | static struct evcnt pmap_ev_remappings = | | 328 | static struct evcnt pmap_ev_remappings = |
329 | PMAP_EVCNT_INITIALIZER("pages remapped"); | | 329 | PMAP_EVCNT_INITIALIZER("pages remapped"); |
330 | | | 330 | |
331 | EVCNT_ATTACH_STATIC(pmap_ev_mappings); | | 331 | EVCNT_ATTACH_STATIC(pmap_ev_mappings); |
332 | EVCNT_ATTACH_STATIC(pmap_ev_unmappings); | | 332 | EVCNT_ATTACH_STATIC(pmap_ev_unmappings); |
333 | EVCNT_ATTACH_STATIC(pmap_ev_remappings); | | 333 | EVCNT_ATTACH_STATIC(pmap_ev_remappings); |
334 | | | 334 | |
335 | static struct evcnt pmap_ev_kernel_mappings = | | 335 | static struct evcnt pmap_ev_kernel_mappings = |
336 | PMAP_EVCNT_INITIALIZER("kernel pages mapped"); | | 336 | PMAP_EVCNT_INITIALIZER("kernel pages mapped"); |
337 | static struct evcnt pmap_ev_kernel_unmappings = | | 337 | static struct evcnt pmap_ev_kernel_unmappings = |
338 | PMAP_EVCNT_INITIALIZER("kernel pages unmapped"); | | 338 | PMAP_EVCNT_INITIALIZER("kernel pages unmapped"); |
339 | static struct evcnt pmap_ev_kernel_remappings = | | 339 | static struct evcnt pmap_ev_kernel_remappings = |
340 | PMAP_EVCNT_INITIALIZER("kernel pages remapped"); | | 340 | PMAP_EVCNT_INITIALIZER("kernel pages remapped"); |
341 | | | 341 | |
342 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_mappings); | | 342 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_mappings); |
343 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_unmappings); | | 343 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_unmappings); |
344 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_remappings); | | 344 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_remappings); |
345 | | | 345 | |
346 | static struct evcnt pmap_ev_kenter_mappings = | | 346 | static struct evcnt pmap_ev_kenter_mappings = |
347 | PMAP_EVCNT_INITIALIZER("kenter pages mapped"); | | 347 | PMAP_EVCNT_INITIALIZER("kenter pages mapped"); |
348 | static struct evcnt pmap_ev_kenter_unmappings = | | 348 | static struct evcnt pmap_ev_kenter_unmappings = |
349 | PMAP_EVCNT_INITIALIZER("kenter pages unmapped"); | | 349 | PMAP_EVCNT_INITIALIZER("kenter pages unmapped"); |
350 | static struct evcnt pmap_ev_kenter_remappings = | | 350 | static struct evcnt pmap_ev_kenter_remappings = |
351 | PMAP_EVCNT_INITIALIZER("kenter pages remapped"); | | 351 | PMAP_EVCNT_INITIALIZER("kenter pages remapped"); |
352 | static struct evcnt pmap_ev_pt_mappings = | | 352 | static struct evcnt pmap_ev_pt_mappings = |
353 | PMAP_EVCNT_INITIALIZER("page table pages mapped"); | | 353 | PMAP_EVCNT_INITIALIZER("page table pages mapped"); |
354 | | | 354 | |
355 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_mappings); | | 355 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_mappings); |
356 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_unmappings); | | 356 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_unmappings); |
357 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_remappings); | | 357 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_remappings); |
358 | EVCNT_ATTACH_STATIC(pmap_ev_pt_mappings); | | 358 | EVCNT_ATTACH_STATIC(pmap_ev_pt_mappings); |
359 | | | 359 | |
360 | static struct evcnt pmap_ev_fixup_mod = | | 360 | static struct evcnt pmap_ev_fixup_mod = |
361 | PMAP_EVCNT_INITIALIZER("page modification emulations"); | | 361 | PMAP_EVCNT_INITIALIZER("page modification emulations"); |
362 | static struct evcnt pmap_ev_fixup_ref = | | 362 | static struct evcnt pmap_ev_fixup_ref = |
363 | PMAP_EVCNT_INITIALIZER("page reference emulations"); | | 363 | PMAP_EVCNT_INITIALIZER("page reference emulations"); |
364 | static struct evcnt pmap_ev_fixup_exec = | | 364 | static struct evcnt pmap_ev_fixup_exec = |
365 | PMAP_EVCNT_INITIALIZER("exec pages fixed up"); | | 365 | PMAP_EVCNT_INITIALIZER("exec pages fixed up"); |
366 | static struct evcnt pmap_ev_fixup_pdes = | | 366 | static struct evcnt pmap_ev_fixup_pdes = |
367 | PMAP_EVCNT_INITIALIZER("pdes fixed up"); | | 367 | PMAP_EVCNT_INITIALIZER("pdes fixed up"); |
368 | #ifndef ARM_MMU_EXTENDED | | 368 | #ifndef ARM_MMU_EXTENDED |
369 | static struct evcnt pmap_ev_fixup_ptesync = | | 369 | static struct evcnt pmap_ev_fixup_ptesync = |
370 | PMAP_EVCNT_INITIALIZER("ptesync fixed"); | | 370 | PMAP_EVCNT_INITIALIZER("ptesync fixed"); |
371 | #endif | | 371 | #endif |
372 | | | 372 | |
373 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_mod); | | 373 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_mod); |
374 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_ref); | | 374 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_ref); |
375 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_exec); | | 375 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_exec); |
376 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_pdes); | | 376 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_pdes); |
377 | #ifndef ARM_MMU_EXTENDED | | 377 | #ifndef ARM_MMU_EXTENDED |
378 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_ptesync); | | 378 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_ptesync); |
379 | #endif | | 379 | #endif |
380 | | | 380 | |
381 | #ifdef PMAP_CACHE_VIPT | | 381 | #ifdef PMAP_CACHE_VIPT |
382 | static struct evcnt pmap_ev_exec_mappings = | | 382 | static struct evcnt pmap_ev_exec_mappings = |
383 | PMAP_EVCNT_INITIALIZER("exec pages mapped"); | | 383 | PMAP_EVCNT_INITIALIZER("exec pages mapped"); |
384 | static struct evcnt pmap_ev_exec_cached = | | 384 | static struct evcnt pmap_ev_exec_cached = |
385 | PMAP_EVCNT_INITIALIZER("exec pages cached"); | | 385 | PMAP_EVCNT_INITIALIZER("exec pages cached"); |
386 | | | 386 | |
387 | EVCNT_ATTACH_STATIC(pmap_ev_exec_mappings); | | 387 | EVCNT_ATTACH_STATIC(pmap_ev_exec_mappings); |
388 | EVCNT_ATTACH_STATIC(pmap_ev_exec_cached); | | 388 | EVCNT_ATTACH_STATIC(pmap_ev_exec_cached); |
389 | | | 389 | |
390 | static struct evcnt pmap_ev_exec_synced = | | 390 | static struct evcnt pmap_ev_exec_synced = |
391 | PMAP_EVCNT_INITIALIZER("exec pages synced"); | | 391 | PMAP_EVCNT_INITIALIZER("exec pages synced"); |
392 | static struct evcnt pmap_ev_exec_synced_map = | | 392 | static struct evcnt pmap_ev_exec_synced_map = |
393 | PMAP_EVCNT_INITIALIZER("exec pages synced (MP)"); | | 393 | PMAP_EVCNT_INITIALIZER("exec pages synced (MP)"); |
394 | static struct evcnt pmap_ev_exec_synced_unmap = | | 394 | static struct evcnt pmap_ev_exec_synced_unmap = |
395 | PMAP_EVCNT_INITIALIZER("exec pages synced (UM)"); | | 395 | PMAP_EVCNT_INITIALIZER("exec pages synced (UM)"); |
396 | static struct evcnt pmap_ev_exec_synced_remap = | | 396 | static struct evcnt pmap_ev_exec_synced_remap = |
397 | PMAP_EVCNT_INITIALIZER("exec pages synced (RM)"); | | 397 | PMAP_EVCNT_INITIALIZER("exec pages synced (RM)"); |
398 | static struct evcnt pmap_ev_exec_synced_clearbit = | | 398 | static struct evcnt pmap_ev_exec_synced_clearbit = |
399 | PMAP_EVCNT_INITIALIZER("exec pages synced (DG)"); | | 399 | PMAP_EVCNT_INITIALIZER("exec pages synced (DG)"); |
400 | #ifndef ARM_MMU_EXTENDED | | 400 | #ifndef ARM_MMU_EXTENDED |
401 | static struct evcnt pmap_ev_exec_synced_kremove = | | 401 | static struct evcnt pmap_ev_exec_synced_kremove = |
402 | PMAP_EVCNT_INITIALIZER("exec pages synced (KU)"); | | 402 | PMAP_EVCNT_INITIALIZER("exec pages synced (KU)"); |
403 | #endif | | 403 | #endif |
404 | | | 404 | |
405 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced); | | 405 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced); |
406 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_map); | | 406 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_map); |
407 | #ifndef ARM_MMU_EXTENDED | | 407 | #ifndef ARM_MMU_EXTENDED |
408 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_unmap); | | 408 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_unmap); |
409 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_remap); | | 409 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_remap); |
410 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_clearbit); | | 410 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_clearbit); |
411 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_kremove); | | 411 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_kremove); |
412 | #endif | | 412 | #endif |
413 | | | 413 | |
414 | static struct evcnt pmap_ev_exec_discarded_unmap = | | 414 | static struct evcnt pmap_ev_exec_discarded_unmap = |
415 | PMAP_EVCNT_INITIALIZER("exec pages discarded (UM)"); | | 415 | PMAP_EVCNT_INITIALIZER("exec pages discarded (UM)"); |
416 | static struct evcnt pmap_ev_exec_discarded_zero = | | 416 | static struct evcnt pmap_ev_exec_discarded_zero = |
417 | PMAP_EVCNT_INITIALIZER("exec pages discarded (ZP)"); | | 417 | PMAP_EVCNT_INITIALIZER("exec pages discarded (ZP)"); |
418 | static struct evcnt pmap_ev_exec_discarded_copy = | | 418 | static struct evcnt pmap_ev_exec_discarded_copy = |
419 | PMAP_EVCNT_INITIALIZER("exec pages discarded (CP)"); | | 419 | PMAP_EVCNT_INITIALIZER("exec pages discarded (CP)"); |
420 | static struct evcnt pmap_ev_exec_discarded_page_protect = | | 420 | static struct evcnt pmap_ev_exec_discarded_page_protect = |
421 | PMAP_EVCNT_INITIALIZER("exec pages discarded (PP)"); | | 421 | PMAP_EVCNT_INITIALIZER("exec pages discarded (PP)"); |
422 | static struct evcnt pmap_ev_exec_discarded_clearbit = | | 422 | static struct evcnt pmap_ev_exec_discarded_clearbit = |
423 | PMAP_EVCNT_INITIALIZER("exec pages discarded (DG)"); | | 423 | PMAP_EVCNT_INITIALIZER("exec pages discarded (DG)"); |
424 | static struct evcnt pmap_ev_exec_discarded_kremove = | | 424 | static struct evcnt pmap_ev_exec_discarded_kremove = |
425 | PMAP_EVCNT_INITIALIZER("exec pages discarded (KU)"); | | 425 | PMAP_EVCNT_INITIALIZER("exec pages discarded (KU)"); |
426 | #ifdef ARM_MMU_EXTENDED | | 426 | #ifdef ARM_MMU_EXTENDED |
427 | static struct evcnt pmap_ev_exec_discarded_modfixup = | | 427 | static struct evcnt pmap_ev_exec_discarded_modfixup = |
428 | PMAP_EVCNT_INITIALIZER("exec pages discarded (MF)"); | | 428 | PMAP_EVCNT_INITIALIZER("exec pages discarded (MF)"); |
429 | #endif | | 429 | #endif |
430 | | | 430 | |
431 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_unmap); | | 431 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_unmap); |
432 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_zero); | | 432 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_zero); |
433 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_copy); | | 433 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_copy); |
434 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_page_protect); | | 434 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_page_protect); |
435 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_clearbit); | | 435 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_clearbit); |
436 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_kremove); | | 436 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_kremove); |
437 | #ifdef ARM_MMU_EXTENDED | | 437 | #ifdef ARM_MMU_EXTENDED |
438 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_modfixup); | | 438 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_modfixup); |
439 | #endif | | 439 | #endif |
440 | #endif /* PMAP_CACHE_VIPT */ | | 440 | #endif /* PMAP_CACHE_VIPT */ |
441 | | | 441 | |
442 | static struct evcnt pmap_ev_updates = PMAP_EVCNT_INITIALIZER("updates"); | | 442 | static struct evcnt pmap_ev_updates = PMAP_EVCNT_INITIALIZER("updates"); |
443 | static struct evcnt pmap_ev_collects = PMAP_EVCNT_INITIALIZER("collects"); | | 443 | static struct evcnt pmap_ev_collects = PMAP_EVCNT_INITIALIZER("collects"); |
444 | static struct evcnt pmap_ev_activations = PMAP_EVCNT_INITIALIZER("activations"); | | 444 | static struct evcnt pmap_ev_activations = PMAP_EVCNT_INITIALIZER("activations"); |
445 | | | 445 | |
446 | EVCNT_ATTACH_STATIC(pmap_ev_updates); | | 446 | EVCNT_ATTACH_STATIC(pmap_ev_updates); |
447 | EVCNT_ATTACH_STATIC(pmap_ev_collects); | | 447 | EVCNT_ATTACH_STATIC(pmap_ev_collects); |
448 | EVCNT_ATTACH_STATIC(pmap_ev_activations); | | 448 | EVCNT_ATTACH_STATIC(pmap_ev_activations); |
449 | | | 449 | |
450 | #define PMAPCOUNT(x) ((void)(pmap_ev_##x.ev_count++)) | | 450 | #define PMAPCOUNT(x) ((void)(pmap_ev_##x.ev_count++)) |
451 | #else | | 451 | #else |
452 | #define PMAPCOUNT(x) ((void)0) | | 452 | #define PMAPCOUNT(x) ((void)0) |
453 | #endif | | 453 | #endif |
454 | | | 454 | |
455 | #ifdef ARM_MMU_EXTENDED | | 455 | #ifdef ARM_MMU_EXTENDED |
456 | void pmap_md_pdetab_activate(pmap_t, struct lwp *); | | 456 | void pmap_md_pdetab_activate(pmap_t, struct lwp *); |
457 | void pmap_md_pdetab_deactivate(pmap_t pm); | | 457 | void pmap_md_pdetab_deactivate(pmap_t pm); |
458 | #endif | | 458 | #endif |
459 | | | 459 | |
460 | /* | | 460 | /* |
461 | * pmap copy/zero page, and mem(5) hook point | | 461 | * pmap copy/zero page, and mem(5) hook point |
462 | */ | | 462 | */ |
463 | static pt_entry_t *csrc_pte, *cdst_pte; | | 463 | static pt_entry_t *csrc_pte, *cdst_pte; |
464 | static vaddr_t csrcp, cdstp; | | 464 | static vaddr_t csrcp, cdstp; |
465 | #ifdef MULTIPROCESSOR | | 465 | #ifdef MULTIPROCESSOR |
466 | static size_t cnptes; | | 466 | static size_t cnptes; |
467 | #define cpu_csrc_pte(o) (csrc_pte + cnptes * cpu_number() + ((o) >> L2_S_SHIFT)) | | 467 | #define cpu_csrc_pte(o) (csrc_pte + cnptes * cpu_number() + ((o) >> L2_S_SHIFT)) |
468 | #define cpu_cdst_pte(o) (cdst_pte + cnptes * cpu_number() + ((o) >> L2_S_SHIFT)) | | 468 | #define cpu_cdst_pte(o) (cdst_pte + cnptes * cpu_number() + ((o) >> L2_S_SHIFT)) |
469 | #define cpu_csrcp(o) (csrcp + L2_S_SIZE * cnptes * cpu_number() + (o)) | | 469 | #define cpu_csrcp(o) (csrcp + L2_S_SIZE * cnptes * cpu_number() + (o)) |
470 | #define cpu_cdstp(o) (cdstp + L2_S_SIZE * cnptes * cpu_number() + (o)) | | 470 | #define cpu_cdstp(o) (cdstp + L2_S_SIZE * cnptes * cpu_number() + (o)) |
471 | #else | | 471 | #else |
472 | #define cpu_csrc_pte(o) (csrc_pte + ((o) >> L2_S_SHIFT)) | | 472 | #define cpu_csrc_pte(o) (csrc_pte + ((o) >> L2_S_SHIFT)) |
473 | #define cpu_cdst_pte(o) (cdst_pte + ((o) >> L2_S_SHIFT)) | | 473 | #define cpu_cdst_pte(o) (cdst_pte + ((o) >> L2_S_SHIFT)) |
474 | #define cpu_csrcp(o) (csrcp + (o)) | | 474 | #define cpu_csrcp(o) (csrcp + (o)) |
475 | #define cpu_cdstp(o) (cdstp + (o)) | | 475 | #define cpu_cdstp(o) (cdstp + (o)) |
476 | #endif | | 476 | #endif |
477 | vaddr_t memhook; /* used by mem.c & others */ | | 477 | vaddr_t memhook; /* used by mem.c & others */ |
478 | kmutex_t memlock __cacheline_aligned; /* used by mem.c & others */ | | 478 | kmutex_t memlock __cacheline_aligned; /* used by mem.c & others */ |
479 | kmutex_t pmap_lock __cacheline_aligned; | | 479 | kmutex_t pmap_lock __cacheline_aligned; |
480 | kmutex_t kpm_lock __cacheline_aligned; | | 480 | kmutex_t kpm_lock __cacheline_aligned; |
481 | extern void *msgbufaddr; | | 481 | extern void *msgbufaddr; |
482 | int pmap_kmpages; | | 482 | int pmap_kmpages; |
483 | /* | | 483 | /* |
484 | * Flag to indicate if pmap_init() has done its thing | | 484 | * Flag to indicate if pmap_init() has done its thing |
485 | */ | | 485 | */ |
486 | bool pmap_initialized; | | 486 | bool pmap_initialized; |
487 | | | 487 | |
488 | #if defined(ARM_MMU_EXTENDED) && defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) | | 488 | #if defined(ARM_MMU_EXTENDED) && defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) |
489 | /* | | 489 | /* |
490 | * Virtual end of direct-mapped memory | | 490 | * Virtual end of direct-mapped memory |
491 | */ | | 491 | */ |
492 | vaddr_t pmap_directlimit; | | 492 | vaddr_t pmap_directlimit; |
493 | #endif | | 493 | #endif |
494 | | | 494 | |
495 | /* | | 495 | /* |
496 | * Misc. locking data structures | | 496 | * Misc. locking data structures |
497 | */ | | 497 | */ |
498 | | | 498 | |
499 | static inline void | | 499 | static inline void |
500 | pmap_acquire_pmap_lock(pmap_t pm) | | 500 | pmap_acquire_pmap_lock(pmap_t pm) |
501 | { | | 501 | { |
502 | #if defined(MULTIPROCESSOR) && defined(DDB) | | 502 | #if defined(MULTIPROCESSOR) && defined(DDB) |
503 | if (__predict_false(db_onproc != NULL)) | | 503 | if (__predict_false(db_onproc != NULL)) |
504 | return; | | 504 | return; |
505 | #endif | | 505 | #endif |
506 | | | 506 | |
507 | mutex_enter(&pm->pm_lock); | | 507 | mutex_enter(&pm->pm_lock); |
508 | } | | 508 | } |
509 | | | 509 | |
510 | static inline void | | 510 | static inline void |
511 | pmap_release_pmap_lock(pmap_t pm) | | 511 | pmap_release_pmap_lock(pmap_t pm) |
512 | { | | 512 | { |
513 | #if defined(MULTIPROCESSOR) && defined(DDB) | | 513 | #if defined(MULTIPROCESSOR) && defined(DDB) |
514 | if (__predict_false(db_onproc != NULL)) | | 514 | if (__predict_false(db_onproc != NULL)) |
515 | return; | | 515 | return; |
516 | #endif | | 516 | #endif |
517 | mutex_exit(&pm->pm_lock); | | 517 | mutex_exit(&pm->pm_lock); |
518 | } | | 518 | } |
519 | | | 519 | |
520 | static inline void | | 520 | static inline void |
521 | pmap_acquire_page_lock(struct vm_page_md *md) | | 521 | pmap_acquire_page_lock(struct vm_page_md *md) |
522 | { | | 522 | { |
523 | mutex_enter(&pmap_lock); | | 523 | mutex_enter(&pmap_lock); |
524 | } | | 524 | } |
525 | | | 525 | |
526 | static inline void | | 526 | static inline void |
527 | pmap_release_page_lock(struct vm_page_md *md) | | 527 | pmap_release_page_lock(struct vm_page_md *md) |
528 | { | | 528 | { |
529 | mutex_exit(&pmap_lock); | | 529 | mutex_exit(&pmap_lock); |
530 | } | | 530 | } |
531 | | | 531 | |
532 | #ifdef DIAGNOSTIC | | 532 | #ifdef DIAGNOSTIC |
533 | static inline int | | 533 | static inline int |
534 | pmap_page_locked_p(struct vm_page_md *md) | | 534 | pmap_page_locked_p(struct vm_page_md *md) |
535 | { | | 535 | { |
536 | return mutex_owned(&pmap_lock); | | 536 | return mutex_owned(&pmap_lock); |
537 | } | | 537 | } |
538 | #endif | | 538 | #endif |
539 | | | 539 | |
540 | | | 540 | |
541 | /* | | 541 | /* |
542 | * Metadata for L1 translation tables. | | 542 | * Metadata for L1 translation tables. |
543 | */ | | 543 | */ |
544 | #ifndef ARM_MMU_EXTENDED | | 544 | #ifndef ARM_MMU_EXTENDED |
545 | struct l1_ttable { | | 545 | struct l1_ttable { |
546 | /* Entry on the L1 Table list */ | | 546 | /* Entry on the L1 Table list */ |
547 | SLIST_ENTRY(l1_ttable) l1_link; | | 547 | SLIST_ENTRY(l1_ttable) l1_link; |
548 | | | 548 | |
549 | /* Entry on the L1 Least Recently Used list */ | | 549 | /* Entry on the L1 Least Recently Used list */ |
550 | TAILQ_ENTRY(l1_ttable) l1_lru; | | 550 | TAILQ_ENTRY(l1_ttable) l1_lru; |
551 | | | 551 | |
552 | /* Track how many domains are allocated from this L1 */ | | 552 | /* Track how many domains are allocated from this L1 */ |
553 | volatile u_int l1_domain_use_count; | | 553 | volatile u_int l1_domain_use_count; |
554 | | | 554 | |
555 | /* | | 555 | /* |
556 | * A free-list of domain numbers for this L1. | | 556 | * A free-list of domain numbers for this L1. |
557 | * We avoid using ffs() and a bitmap to track domains since ffs() | | 557 | * We avoid using ffs() and a bitmap to track domains since ffs() |
558 | * is slow on ARM. | | 558 | * is slow on ARM. |
559 | */ | | 559 | */ |
560 | uint8_t l1_domain_first; | | 560 | uint8_t l1_domain_first; |
561 | uint8_t l1_domain_free[PMAP_DOMAINS]; | | 561 | uint8_t l1_domain_free[PMAP_DOMAINS]; |
562 | | | 562 | |
563 | /* Physical address of this L1 page table */ | | 563 | /* Physical address of this L1 page table */ |
564 | paddr_t l1_physaddr; | | 564 | paddr_t l1_physaddr; |
565 | | | 565 | |
566 | /* KVA of this L1 page table */ | | 566 | /* KVA of this L1 page table */ |
567 | pd_entry_t *l1_kva; | | 567 | pd_entry_t *l1_kva; |
568 | }; | | 568 | }; |
569 | | | 569 | |
570 | /* | | 570 | /* |
571 | * L1 Page Tables are tracked using a Least Recently Used list. | | 571 | * L1 Page Tables are tracked using a Least Recently Used list. |
572 | * - New L1s are allocated from the HEAD. | | 572 | * - New L1s are allocated from the HEAD. |
573 | * - Freed L1s are added to the TAIL. | | 573 | * - Freed L1s are added to the TAIL. |
574 | * - Recently accessed L1s (where an 'access' is some change to one of | | 574 | * - Recently accessed L1s (where an 'access' is some change to one of |
575 | * the userland pmaps which owns this L1) are moved to the TAIL. | | 575 | * the userland pmaps which owns this L1) are moved to the TAIL. |
576 | */ | | 576 | */ |
577 | static TAILQ_HEAD(, l1_ttable) l1_lru_list; | | 577 | static TAILQ_HEAD(, l1_ttable) l1_lru_list; |
578 | static kmutex_t l1_lru_lock __cacheline_aligned; | | 578 | static kmutex_t l1_lru_lock __cacheline_aligned; |
579 | | | 579 | |
580 | /* | | 580 | /* |
581 | * A list of all L1 tables | | 581 | * A list of all L1 tables |
582 | */ | | 582 | */ |
583 | static SLIST_HEAD(, l1_ttable) l1_list; | | 583 | static SLIST_HEAD(, l1_ttable) l1_list; |
584 | #endif /* ARM_MMU_EXTENDED */ | | 584 | #endif /* ARM_MMU_EXTENDED */ |
585 | | | 585 | |
586 | /* | | 586 | /* |
587 | * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots. | | 587 | * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots. |
588 | * | | 588 | * |
589 | * This is normally 16MB worth L2 page descriptors for any given pmap. | | 589 | * This is normally 16MB worth L2 page descriptors for any given pmap. |
590 | * Reference counts are maintained for L2 descriptors so they can be | | 590 | * Reference counts are maintained for L2 descriptors so they can be |
591 | * freed when empty. | | 591 | * freed when empty. |
592 | */ | | 592 | */ |
593 | struct l2_bucket { | | 593 | struct l2_bucket { |
594 | pt_entry_t *l2b_kva; /* KVA of L2 Descriptor Table */ | | 594 | pt_entry_t *l2b_kva; /* KVA of L2 Descriptor Table */ |
595 | paddr_t l2b_pa; /* Physical address of same */ | | 595 | paddr_t l2b_pa; /* Physical address of same */ |
596 | u_short l2b_l1slot; /* This L2 table's L1 index */ | | 596 | u_short l2b_l1slot; /* This L2 table's L1 index */ |
597 | u_short l2b_occupancy; /* How many active descriptors */ | | 597 | u_short l2b_occupancy; /* How many active descriptors */ |
598 | }; | | 598 | }; |
599 | | | 599 | |
600 | struct l2_dtable { | | 600 | struct l2_dtable { |
601 | /* The number of L2 page descriptors allocated to this l2_dtable */ | | 601 | /* The number of L2 page descriptors allocated to this l2_dtable */ |
602 | u_int l2_occupancy; | | 602 | u_int l2_occupancy; |
603 | | | 603 | |
604 | /* List of L2 page descriptors */ | | 604 | /* List of L2 page descriptors */ |
605 | struct l2_bucket l2_bucket[L2_BUCKET_SIZE]; | | 605 | struct l2_bucket l2_bucket[L2_BUCKET_SIZE]; |
606 | }; | | 606 | }; |
607 | | | 607 | |
608 | /* | | 608 | /* |
609 | * Given an L1 table index, calculate the corresponding l2_dtable index | | 609 | * Given an L1 table index, calculate the corresponding l2_dtable index |
610 | * and bucket index within the l2_dtable. | | 610 | * and bucket index within the l2_dtable. |
611 | */ | | 611 | */ |
612 | #define L2_BUCKET_XSHIFT (L2_BUCKET_XLOG2 - L1_S_SHIFT) | | 612 | #define L2_BUCKET_XSHIFT (L2_BUCKET_XLOG2 - L1_S_SHIFT) |
613 | #define L2_BUCKET_XFRAME (~(vaddr_t)0 << L2_BUCKET_XLOG2) | | 613 | #define L2_BUCKET_XFRAME (~(vaddr_t)0 << L2_BUCKET_XLOG2) |
614 | #define L2_BUCKET_IDX(l1slot) ((l1slot) >> L2_BUCKET_XSHIFT) | | 614 | #define L2_BUCKET_IDX(l1slot) ((l1slot) >> L2_BUCKET_XSHIFT) |
615 | #define L2_IDX(l1slot) (L2_BUCKET_IDX(l1slot) >> L2_BUCKET_LOG2) | | 615 | #define L2_IDX(l1slot) (L2_BUCKET_IDX(l1slot) >> L2_BUCKET_LOG2) |
616 | #define L2_BUCKET(l1slot) (L2_BUCKET_IDX(l1slot) & (L2_BUCKET_SIZE - 1)) | | 616 | #define L2_BUCKET(l1slot) (L2_BUCKET_IDX(l1slot) & (L2_BUCKET_SIZE - 1)) |
617 | | | 617 | |
618 | __CTASSERT(0x100000000ULL == ((uint64_t)L2_SIZE * L2_BUCKET_SIZE * L1_S_SIZE)); | | 618 | __CTASSERT(0x100000000ULL == ((uint64_t)L2_SIZE * L2_BUCKET_SIZE * L1_S_SIZE)); |
619 | __CTASSERT(L2_BUCKET_XFRAME == ~(L2_BUCKET_XSIZE-1)); | | 619 | __CTASSERT(L2_BUCKET_XFRAME == ~(L2_BUCKET_XSIZE-1)); |
620 | | | 620 | |
621 | /* | | 621 | /* |
622 | * Given a virtual address, this macro returns the | | 622 | * Given a virtual address, this macro returns the |
623 | * virtual address required to drop into the next L2 bucket. | | 623 | * virtual address required to drop into the next L2 bucket. |
624 | */ | | 624 | */ |
625 | #define L2_NEXT_BUCKET_VA(va) (((va) & L2_BUCKET_XFRAME) + L2_BUCKET_XSIZE) | | 625 | #define L2_NEXT_BUCKET_VA(va) (((va) & L2_BUCKET_XFRAME) + L2_BUCKET_XSIZE) |
626 | | | 626 | |
627 | /* | | 627 | /* |
628 | * L2 allocation. | | 628 | * L2 allocation. |
629 | */ | | 629 | */ |
630 | #define pmap_alloc_l2_dtable() \ | | 630 | #define pmap_alloc_l2_dtable() \ |
631 | pool_cache_get(&pmap_l2dtable_cache, PR_NOWAIT) | | 631 | pool_cache_get(&pmap_l2dtable_cache, PR_NOWAIT) |
632 | #define pmap_free_l2_dtable(l2) \ | | 632 | #define pmap_free_l2_dtable(l2) \ |
633 | pool_cache_put(&pmap_l2dtable_cache, (l2)) | | 633 | pool_cache_put(&pmap_l2dtable_cache, (l2)) |
634 | #define pmap_alloc_l2_ptp(pap) \ | | 634 | #define pmap_alloc_l2_ptp(pap) \ |
635 | ((pt_entry_t *)pool_cache_get_paddr(&pmap_l2ptp_cache,\ | | 635 | ((pt_entry_t *)pool_cache_get_paddr(&pmap_l2ptp_cache,\ |
636 | PR_NOWAIT, (pap))) | | 636 | PR_NOWAIT, (pap))) |
637 | | | 637 | |
638 | /* | | 638 | /* |
639 | * We try to map the page tables write-through, if possible. However, not | | 639 | * We try to map the page tables write-through, if possible. However, not |
640 | * all CPUs have a write-through cache mode, so on those we have to sync | | 640 | * all CPUs have a write-through cache mode, so on those we have to sync |
641 | * the cache when we frob page tables. | | 641 | * the cache when we frob page tables. |
642 | * | | 642 | * |
643 | * We try to evaluate this at compile time, if possible. However, it's | | 643 | * We try to evaluate this at compile time, if possible. However, it's |
644 | * not always possible to do that, hence this run-time var. | | 644 | * not always possible to do that, hence this run-time var. |
645 | */ | | 645 | */ |
646 | int pmap_needs_pte_sync; | | 646 | int pmap_needs_pte_sync; |
647 | | | 647 | |
648 | /* | | 648 | /* |
649 | * Real definition of pv_entry. | | 649 | * Real definition of pv_entry. |
650 | */ | | 650 | */ |
651 | struct pv_entry { | | 651 | struct pv_entry { |
652 | SLIST_ENTRY(pv_entry) pv_link; /* next pv_entry */ | | 652 | SLIST_ENTRY(pv_entry) pv_link; /* next pv_entry */ |
653 | pmap_t pv_pmap; /* pmap where mapping lies */ | | 653 | pmap_t pv_pmap; /* pmap where mapping lies */ |
654 | vaddr_t pv_va; /* virtual address for mapping */ | | 654 | vaddr_t pv_va; /* virtual address for mapping */ |
655 | u_int pv_flags; /* flags */ | | 655 | u_int pv_flags; /* flags */ |
656 | }; | | 656 | }; |
657 | | | 657 | |
658 | /* | | 658 | /* |
659 | * Macros to determine if a mapping might be resident in the | | 659 | * Macros to determine if a mapping might be resident in the |
660 | * instruction/data cache and/or TLB | | 660 | * instruction/data cache and/or TLB |
661 | */ | | 661 | */ |
662 | #if ARM_MMU_V7 > 0 && !defined(ARM_MMU_EXTENDED) | | 662 | #if ARM_MMU_V7 > 0 && !defined(ARM_MMU_EXTENDED) |
663 | /* | | 663 | /* |
664 | * Speculative loads by Cortex cores can cause TLB entries to be filled even if | | 664 | * Speculative loads by Cortex cores can cause TLB entries to be filled even if |
665 | * there are no explicit accesses, so there may be always be TLB entries to | | 665 | * there are no explicit accesses, so there may be always be TLB entries to |
666 | * flush. If we used ASIDs then this would not be a problem. | | 666 | * flush. If we used ASIDs then this would not be a problem. |
667 | */ | | 667 | */ |
668 | #define PV_BEEN_EXECD(f) (((f) & PVF_EXEC) == PVF_EXEC) | | 668 | #define PV_BEEN_EXECD(f) (((f) & PVF_EXEC) == PVF_EXEC) |
669 | #define PV_BEEN_REFD(f) (true) | | 669 | #define PV_BEEN_REFD(f) (true) |
670 | #else | | 670 | #else |
671 | #define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC)) | | 671 | #define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC)) |
672 | #define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0) | | 672 | #define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0) |
673 | #endif | | 673 | #endif |
674 | #define PV_IS_EXEC_P(f) (((f) & PVF_EXEC) != 0) | | 674 | #define PV_IS_EXEC_P(f) (((f) & PVF_EXEC) != 0) |
675 | #define PV_IS_KENTRY_P(f) (((f) & PVF_KENTRY) != 0) | | 675 | #define PV_IS_KENTRY_P(f) (((f) & PVF_KENTRY) != 0) |
676 | #define PV_IS_WRITE_P(f) (((f) & PVF_WRITE) != 0) | | 676 | #define PV_IS_WRITE_P(f) (((f) & PVF_WRITE) != 0) |
677 | | | 677 | |
678 | /* | | 678 | /* |
679 | * Local prototypes | | 679 | * Local prototypes |
680 | */ | | 680 | */ |
681 | static bool pmap_set_pt_cache_mode(pd_entry_t *, vaddr_t, size_t); | | 681 | static bool pmap_set_pt_cache_mode(pd_entry_t *, vaddr_t, size_t); |
682 | static void pmap_alloc_specials(vaddr_t *, int, vaddr_t *, | | 682 | static void pmap_alloc_specials(vaddr_t *, int, vaddr_t *, |
683 | pt_entry_t **); | | 683 | pt_entry_t **); |
684 | static bool pmap_is_current(pmap_t) __unused; | | 684 | static bool pmap_is_current(pmap_t) __unused; |
685 | static bool pmap_is_cached(pmap_t); | | 685 | static bool pmap_is_cached(pmap_t); |
686 | static void pmap_enter_pv(struct vm_page_md *, paddr_t, struct pv_entry *, | | 686 | static void pmap_enter_pv(struct vm_page_md *, paddr_t, struct pv_entry *, |
687 | pmap_t, vaddr_t, u_int); | | 687 | pmap_t, vaddr_t, u_int); |
688 | static struct pv_entry *pmap_find_pv(struct vm_page_md *, pmap_t, vaddr_t); | | 688 | static struct pv_entry *pmap_find_pv(struct vm_page_md *, pmap_t, vaddr_t); |
689 | static struct pv_entry *pmap_remove_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); | | 689 | static struct pv_entry *pmap_remove_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); |
690 | static u_int pmap_modify_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t, | | 690 | static u_int pmap_modify_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t, |
691 | u_int, u_int); | | 691 | u_int, u_int); |
692 | | | 692 | |
693 | static void pmap_pinit(pmap_t); | | 693 | static void pmap_pinit(pmap_t); |
694 | static int pmap_pmap_ctor(void *, void *, int); | | 694 | static int pmap_pmap_ctor(void *, void *, int); |
695 | | | 695 | |
696 | static void pmap_alloc_l1(pmap_t); | | 696 | static void pmap_alloc_l1(pmap_t); |
697 | static void pmap_free_l1(pmap_t); | | 697 | static void pmap_free_l1(pmap_t); |
698 | #ifndef ARM_MMU_EXTENDED | | 698 | #ifndef ARM_MMU_EXTENDED |
699 | static void pmap_use_l1(pmap_t); | | 699 | static void pmap_use_l1(pmap_t); |
700 | #endif | | 700 | #endif |
701 | | | 701 | |
702 | static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vaddr_t); | | 702 | static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vaddr_t); |
703 | static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vaddr_t); | | 703 | static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vaddr_t); |
704 | static void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int); | | 704 | static void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int); |
705 | static int pmap_l2ptp_ctor(void *, void *, int); | | 705 | static int pmap_l2ptp_ctor(void *, void *, int); |
706 | static int pmap_l2dtable_ctor(void *, void *, int); | | 706 | static int pmap_l2dtable_ctor(void *, void *, int); |
707 | | | 707 | |
708 | static void pmap_vac_me_harder(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); | | 708 | static void pmap_vac_me_harder(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); |
709 | #ifdef PMAP_CACHE_VIVT | | 709 | #ifdef PMAP_CACHE_VIVT |
710 | static void pmap_vac_me_kpmap(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); | | 710 | static void pmap_vac_me_kpmap(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); |
711 | static void pmap_vac_me_user(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); | | 711 | static void pmap_vac_me_user(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); |
712 | #endif | | 712 | #endif |
713 | | | 713 | |
714 | static void pmap_clearbit(struct vm_page_md *, paddr_t, u_int); | | 714 | static void pmap_clearbit(struct vm_page_md *, paddr_t, u_int); |
715 | #ifdef PMAP_CACHE_VIVT | | 715 | #ifdef PMAP_CACHE_VIVT |
716 | static bool pmap_clean_page(struct vm_page_md *, bool); | | 716 | static bool pmap_clean_page(struct vm_page_md *, bool); |
717 | #endif | | 717 | #endif |
718 | #ifdef PMAP_CACHE_VIPT | | 718 | #ifdef PMAP_CACHE_VIPT |
719 | static void pmap_syncicache_page(struct vm_page_md *, paddr_t); | | 719 | static void pmap_syncicache_page(struct vm_page_md *, paddr_t); |
720 | enum pmap_flush_op { | | 720 | enum pmap_flush_op { |
721 | PMAP_FLUSH_PRIMARY, | | 721 | PMAP_FLUSH_PRIMARY, |
722 | PMAP_FLUSH_SECONDARY, | | 722 | PMAP_FLUSH_SECONDARY, |
723 | PMAP_CLEAN_PRIMARY | | 723 | PMAP_CLEAN_PRIMARY |
724 | }; | | 724 | }; |
725 | #ifndef ARM_MMU_EXTENDED | | 725 | #ifndef ARM_MMU_EXTENDED |
726 | static void pmap_flush_page(struct vm_page_md *, paddr_t, enum pmap_flush_op); | | 726 | static void pmap_flush_page(struct vm_page_md *, paddr_t, enum pmap_flush_op); |
727 | #endif | | 727 | #endif |
728 | #endif | | 728 | #endif |
729 | static void pmap_page_remove(struct vm_page_md *, paddr_t); | | 729 | static void pmap_page_remove(struct vm_page_md *, paddr_t); |
730 | static void pmap_pv_remove(paddr_t); | | 730 | static void pmap_pv_remove(paddr_t); |
731 | | | 731 | |
732 | #ifndef ARM_MMU_EXTENDED | | 732 | #ifndef ARM_MMU_EXTENDED |
733 | static void pmap_init_l1(struct l1_ttable *, pd_entry_t *); | | 733 | static void pmap_init_l1(struct l1_ttable *, pd_entry_t *); |
734 | #endif | | 734 | #endif |
735 | static vaddr_t kernel_pt_lookup(paddr_t); | | 735 | static vaddr_t kernel_pt_lookup(paddr_t); |
736 | | | 736 | |
737 | #ifdef ARM_MMU_EXTENDED | | 737 | #ifdef ARM_MMU_EXTENDED |
738 | static struct pool_cache pmap_l1tt_cache; | | 738 | static struct pool_cache pmap_l1tt_cache; |
739 | | | 739 | |
740 | static int pmap_l1tt_ctor(void *, void *, int); | | 740 | static int pmap_l1tt_ctor(void *, void *, int); |
741 | static void * pmap_l1tt_alloc(struct pool *, int); | | 741 | static void * pmap_l1tt_alloc(struct pool *, int); |
742 | static void pmap_l1tt_free(struct pool *, void *); | | 742 | static void pmap_l1tt_free(struct pool *, void *); |
743 | | | 743 | |
744 | static struct pool_allocator pmap_l1tt_allocator = { | | 744 | static struct pool_allocator pmap_l1tt_allocator = { |
745 | .pa_alloc = pmap_l1tt_alloc, | | 745 | .pa_alloc = pmap_l1tt_alloc, |
746 | .pa_free = pmap_l1tt_free, | | 746 | .pa_free = pmap_l1tt_free, |
747 | .pa_pagesz = L1TT_SIZE, | | 747 | .pa_pagesz = L1TT_SIZE, |
748 | }; | | 748 | }; |
749 | #endif | | 749 | #endif |
750 | | | 750 | |
751 | /* | | 751 | /* |
752 | * Misc variables | | 752 | * Misc variables |
753 | */ | | 753 | */ |
754 | vaddr_t virtual_avail; | | 754 | vaddr_t virtual_avail; |
755 | vaddr_t virtual_end; | | 755 | vaddr_t virtual_end; |
756 | vaddr_t pmap_curmaxkvaddr; | | 756 | vaddr_t pmap_curmaxkvaddr; |
757 | | | 757 | |
758 | paddr_t avail_start; | | 758 | paddr_t avail_start; |
759 | paddr_t avail_end; | | 759 | paddr_t avail_end; |
760 | | | 760 | |
761 | pv_addrqh_t pmap_boot_freeq = SLIST_HEAD_INITIALIZER(&pmap_boot_freeq); | | 761 | pv_addrqh_t pmap_boot_freeq = SLIST_HEAD_INITIALIZER(&pmap_boot_freeq); |
762 | pv_addr_t kernelpages; | | 762 | pv_addr_t kernelpages; |
763 | pv_addr_t kernel_l1pt; | | 763 | pv_addr_t kernel_l1pt; |
764 | pv_addr_t systempage; | | 764 | pv_addr_t systempage; |
765 | | | 765 | |
766 | #ifdef PMAP_CACHE_VIPT | | 766 | #ifdef PMAP_CACHE_VIPT |
767 | #define PMAP_VALIDATE_MD_PAGE(md) \ | | 767 | #define PMAP_VALIDATE_MD_PAGE(md) \ |
768 | KASSERTMSG(arm_cache_prefer_mask == 0 || (((md)->pvh_attrs & PVF_WRITE) == 0) == ((md)->urw_mappings + (md)->krw_mappings == 0), \ | | 768 | KASSERTMSG(arm_cache_prefer_mask == 0 || (((md)->pvh_attrs & PVF_WRITE) == 0) == ((md)->urw_mappings + (md)->krw_mappings == 0), \ |
769 | "(md) %p: attrs=%#x urw=%u krw=%u", (md), \ | | 769 | "(md) %p: attrs=%#x urw=%u krw=%u", (md), \ |
770 | (md)->pvh_attrs, (md)->urw_mappings, (md)->krw_mappings); | | 770 | (md)->pvh_attrs, (md)->urw_mappings, (md)->krw_mappings); |
771 | #endif /* PMAP_CACHE_VIPT */ | | 771 | #endif /* PMAP_CACHE_VIPT */ |
772 | /* | | 772 | /* |
773 | * A bunch of routines to conditionally flush the caches/TLB depending | | 773 | * A bunch of routines to conditionally flush the caches/TLB depending |
774 | * on whether the specified pmap actually needs to be flushed at any | | 774 | * on whether the specified pmap actually needs to be flushed at any |
775 | * given time. | | 775 | * given time. |
776 | */ | | 776 | */ |
777 | static inline void | | 777 | static inline void |
778 | pmap_tlb_flush_SE(pmap_t pm, vaddr_t va, u_int flags) | | 778 | pmap_tlb_flush_SE(pmap_t pm, vaddr_t va, u_int flags) |
779 | { | | 779 | { |
780 | #ifdef ARM_MMU_EXTENDED | | 780 | #ifdef ARM_MMU_EXTENDED |
781 | pmap_tlb_invalidate_addr(pm, va); | | 781 | pmap_tlb_invalidate_addr(pm, va); |
782 | #else | | 782 | #else |
783 | if (pm->pm_cstate.cs_tlb_id != 0) { | | 783 | if (pm->pm_cstate.cs_tlb_id != 0) { |
784 | if (PV_BEEN_EXECD(flags)) { | | 784 | if (PV_BEEN_EXECD(flags)) { |
785 | cpu_tlb_flushID_SE(va); | | 785 | cpu_tlb_flushID_SE(va); |
786 | } else if (PV_BEEN_REFD(flags)) { | | 786 | } else if (PV_BEEN_REFD(flags)) { |
787 | cpu_tlb_flushD_SE(va); | | 787 | cpu_tlb_flushD_SE(va); |
788 | } | | 788 | } |
789 | } | | 789 | } |
790 | #endif /* ARM_MMU_EXTENDED */ | | 790 | #endif /* ARM_MMU_EXTENDED */ |
791 | } | | 791 | } |
792 | | | 792 | |
793 | #ifndef ARM_MMU_EXTENDED | | 793 | #ifndef ARM_MMU_EXTENDED |
794 | static inline void | | 794 | static inline void |
795 | pmap_tlb_flushID(pmap_t pm) | | 795 | pmap_tlb_flushID(pmap_t pm) |
796 | { | | 796 | { |
797 | if (pm->pm_cstate.cs_tlb_id) { | | 797 | if (pm->pm_cstate.cs_tlb_id) { |
798 | cpu_tlb_flushID(); | | 798 | cpu_tlb_flushID(); |
799 | #if ARM_MMU_V7 == 0 | | 799 | #if ARM_MMU_V7 == 0 |
800 | /* | | 800 | /* |
801 | * Speculative loads by Cortex cores can cause TLB entries to | | 801 | * Speculative loads by Cortex cores can cause TLB entries to |
802 | * be filled even if there are no explicit accesses, so there | | 802 | * be filled even if there are no explicit accesses, so there |
803 | * may be always be TLB entries to flush. If we used ASIDs | | 803 | * may be always be TLB entries to flush. If we used ASIDs |
804 | * then it would not be a problem. | | 804 | * then it would not be a problem. |
805 | * This is not true for other CPUs. | | 805 | * This is not true for other CPUs. |
806 | */ | | 806 | */ |
807 | pm->pm_cstate.cs_tlb = 0; | | 807 | pm->pm_cstate.cs_tlb = 0; |
808 | #endif /* ARM_MMU_V7 */ | | 808 | #endif /* ARM_MMU_V7 */ |
809 | } | | 809 | } |
810 | } | | 810 | } |
811 | | | 811 | |
812 | static inline void | | 812 | static inline void |
813 | pmap_tlb_flushD(pmap_t pm) | | 813 | pmap_tlb_flushD(pmap_t pm) |
814 | { | | 814 | { |
815 | if (pm->pm_cstate.cs_tlb_d) { | | 815 | if (pm->pm_cstate.cs_tlb_d) { |
816 | cpu_tlb_flushD(); | | 816 | cpu_tlb_flushD(); |
817 | #if ARM_MMU_V7 == 0 | | 817 | #if ARM_MMU_V7 == 0 |
818 | /* | | 818 | /* |
819 | * Speculative loads by Cortex cores can cause TLB entries to | | 819 | * Speculative loads by Cortex cores can cause TLB entries to |
820 | * be filled even if there are no explicit accesses, so there | | 820 | * be filled even if there are no explicit accesses, so there |
821 | * may be always be TLB entries to flush. If we used ASIDs | | 821 | * may be always be TLB entries to flush. If we used ASIDs |
822 | * then it would not be a problem. | | 822 | * then it would not be a problem. |
823 | * This is not true for other CPUs. | | 823 | * This is not true for other CPUs. |
824 | */ | | 824 | */ |
825 | pm->pm_cstate.cs_tlb_d = 0; | | 825 | pm->pm_cstate.cs_tlb_d = 0; |
826 | #endif /* ARM_MMU_V7 */ | | 826 | #endif /* ARM_MMU_V7 */ |
827 | } | | 827 | } |
828 | } | | 828 | } |
829 | #endif /* ARM_MMU_EXTENDED */ | | 829 | #endif /* ARM_MMU_EXTENDED */ |
830 | | | 830 | |
831 | #ifdef PMAP_CACHE_VIVT | | 831 | #ifdef PMAP_CACHE_VIVT |
832 | static inline void | | 832 | static inline void |
833 | pmap_cache_wbinv_page(pmap_t pm, vaddr_t va, bool do_inv, u_int flags) | | 833 | pmap_cache_wbinv_page(pmap_t pm, vaddr_t va, bool do_inv, u_int flags) |
834 | { | | 834 | { |
835 | if (PV_BEEN_EXECD(flags) && pm->pm_cstate.cs_cache_id) { | | 835 | if (PV_BEEN_EXECD(flags) && pm->pm_cstate.cs_cache_id) { |
836 | cpu_idcache_wbinv_range(va, PAGE_SIZE); | | 836 | cpu_idcache_wbinv_range(va, PAGE_SIZE); |
837 | } else if (PV_BEEN_REFD(flags) && pm->pm_cstate.cs_cache_d) { | | 837 | } else if (PV_BEEN_REFD(flags) && pm->pm_cstate.cs_cache_d) { |
838 | if (do_inv) { | | 838 | if (do_inv) { |
839 | if (flags & PVF_WRITE) | | 839 | if (flags & PVF_WRITE) |
840 | cpu_dcache_wbinv_range(va, PAGE_SIZE); | | 840 | cpu_dcache_wbinv_range(va, PAGE_SIZE); |
841 | else | | 841 | else |
842 | cpu_dcache_inv_range(va, PAGE_SIZE); | | 842 | cpu_dcache_inv_range(va, PAGE_SIZE); |
843 | } else if (flags & PVF_WRITE) { | | 843 | } else if (flags & PVF_WRITE) { |
844 | cpu_dcache_wb_range(va, PAGE_SIZE); | | 844 | cpu_dcache_wb_range(va, PAGE_SIZE); |
845 | } | | 845 | } |
846 | } | | 846 | } |
847 | } | | 847 | } |
848 | | | 848 | |
849 | static inline void | | 849 | static inline void |
850 | pmap_cache_wbinv_all(pmap_t pm, u_int flags) | | 850 | pmap_cache_wbinv_all(pmap_t pm, u_int flags) |
851 | { | | 851 | { |
852 | if (PV_BEEN_EXECD(flags)) { | | 852 | if (PV_BEEN_EXECD(flags)) { |
853 | if (pm->pm_cstate.cs_cache_id) { | | 853 | if (pm->pm_cstate.cs_cache_id) { |
854 | cpu_idcache_wbinv_all(); | | 854 | cpu_idcache_wbinv_all(); |
855 | pm->pm_cstate.cs_cache = 0; | | 855 | pm->pm_cstate.cs_cache = 0; |
856 | } | | 856 | } |
857 | } else if (pm->pm_cstate.cs_cache_d) { | | 857 | } else if (pm->pm_cstate.cs_cache_d) { |
858 | cpu_dcache_wbinv_all(); | | 858 | cpu_dcache_wbinv_all(); |
859 | pm->pm_cstate.cs_cache_d = 0; | | 859 | pm->pm_cstate.cs_cache_d = 0; |
860 | } | | 860 | } |
861 | } | | 861 | } |
862 | #endif /* PMAP_CACHE_VIVT */ | | 862 | #endif /* PMAP_CACHE_VIVT */ |
863 | | | 863 | |
864 | static inline uint8_t | | 864 | static inline uint8_t |
865 | pmap_domain(pmap_t pm) | | 865 | pmap_domain(pmap_t pm) |
866 | { | | 866 | { |
867 | #ifdef ARM_MMU_EXTENDED | | 867 | #ifdef ARM_MMU_EXTENDED |
868 | return pm == pmap_kernel() ? PMAP_DOMAIN_KERNEL : PMAP_DOMAIN_USER; | | 868 | return pm == pmap_kernel() ? PMAP_DOMAIN_KERNEL : PMAP_DOMAIN_USER; |
869 | #else | | 869 | #else |
870 | return pm->pm_domain; | | 870 | return pm->pm_domain; |
871 | #endif | | 871 | #endif |
872 | } | | 872 | } |
873 | | | 873 | |
874 | static inline pd_entry_t * | | 874 | static inline pd_entry_t * |
875 | pmap_l1_kva(pmap_t pm) | | 875 | pmap_l1_kva(pmap_t pm) |
876 | { | | 876 | { |
877 | #ifdef ARM_MMU_EXTENDED | | 877 | #ifdef ARM_MMU_EXTENDED |
878 | return pm->pm_l1; | | 878 | return pm->pm_l1; |
879 | #else | | 879 | #else |
880 | return pm->pm_l1->l1_kva; | | 880 | return pm->pm_l1->l1_kva; |
881 | #endif | | 881 | #endif |
882 | } | | 882 | } |
883 | | | 883 | |
884 | static inline bool | | 884 | static inline bool |
885 | pmap_is_current(pmap_t pm) | | 885 | pmap_is_current(pmap_t pm) |
886 | { | | 886 | { |
887 | if (pm == pmap_kernel() || curproc->p_vmspace->vm_map.pmap == pm) | | 887 | if (pm == pmap_kernel() || curproc->p_vmspace->vm_map.pmap == pm) |
888 | return true; | | 888 | return true; |
889 | | | 889 | |
890 | return false; | | 890 | return false; |
891 | } | | 891 | } |
892 | | | 892 | |
893 | static inline bool | | 893 | static inline bool |
894 | pmap_is_cached(pmap_t pm) | | 894 | pmap_is_cached(pmap_t pm) |
895 | { | | 895 | { |
896 | #ifdef ARM_MMU_EXTENDED | | 896 | #ifdef ARM_MMU_EXTENDED |
897 | if (pm == pmap_kernel()) | | 897 | if (pm == pmap_kernel()) |
898 | return true; | | 898 | return true; |
899 | #ifdef MULTIPROCESSOR | | 899 | #ifdef MULTIPROCESSOR |
900 | // Is this pmap active on any CPU? | | 900 | // Is this pmap active on any CPU? |
901 | if (!kcpuset_iszero(pm->pm_active)) | | 901 | if (!kcpuset_iszero(pm->pm_active)) |
902 | return true; | | 902 | return true; |
903 | #else | | 903 | #else |
904 | struct pmap_tlb_info * const ti = cpu_tlb_info(curcpu()); | | 904 | struct pmap_tlb_info * const ti = cpu_tlb_info(curcpu()); |
905 | // Is this pmap active? | | 905 | // Is this pmap active? |
906 | if (PMAP_PAI_ASIDVALID_P(PMAP_PAI(pm, ti), ti)) | | 906 | if (PMAP_PAI_ASIDVALID_P(PMAP_PAI(pm, ti), ti)) |
907 | return true; | | 907 | return true; |
908 | #endif | | 908 | #endif |
909 | #else | | 909 | #else |
910 | struct cpu_info * const ci = curcpu(); | | 910 | struct cpu_info * const ci = curcpu(); |
911 | if (pm == pmap_kernel() || ci->ci_pmap_lastuser == NULL | | 911 | if (pm == pmap_kernel() || ci->ci_pmap_lastuser == NULL |
912 | || ci->ci_pmap_lastuser == pm) | | 912 | || ci->ci_pmap_lastuser == pm) |
913 | return true; | | 913 | return true; |
914 | #endif /* ARM_MMU_EXTENDED */ | | 914 | #endif /* ARM_MMU_EXTENDED */ |
915 | | | 915 | |
916 | return false; | | 916 | return false; |
917 | } | | 917 | } |
918 | | | 918 | |
919 | /* | | 919 | /* |
920 | * PTE_SYNC_CURRENT: | | 920 | * PTE_SYNC_CURRENT: |
921 | * | | 921 | * |
922 | * Make sure the pte is written out to RAM. | | 922 | * Make sure the pte is written out to RAM. |
923 | * We need to do this for one of two cases: | | 923 | * We need to do this for one of two cases: |
924 | * - We're dealing with the kernel pmap | | 924 | * - We're dealing with the kernel pmap |
925 | * - There is no pmap active in the cache/tlb. | | 925 | * - There is no pmap active in the cache/tlb. |
926 | * - The specified pmap is 'active' in the cache/tlb. | | 926 | * - The specified pmap is 'active' in the cache/tlb. |
927 | */ | | 927 | */ |
928 | | | 928 | |
929 | #ifdef PMAP_INCLUDE_PTE_SYNC | | 929 | #ifdef PMAP_INCLUDE_PTE_SYNC |
930 | static inline void | | 930 | static inline void |
931 | pmap_pte_sync_current(pmap_t pm, pt_entry_t *ptep) | | 931 | pmap_pte_sync_current(pmap_t pm, pt_entry_t *ptep) |
932 | { | | 932 | { |
933 | if (PMAP_NEEDS_PTE_SYNC && pmap_is_cached(pm)) | | 933 | if (PMAP_NEEDS_PTE_SYNC && pmap_is_cached(pm)) |
934 | PTE_SYNC(ptep); | | 934 | PTE_SYNC(ptep); |
935 | dsb(sy); | | 935 | dsb(sy); |
936 | } | | 936 | } |
937 | | | 937 | |
938 | # define PTE_SYNC_CURRENT(pm, ptep) pmap_pte_sync_current(pm, ptep) | | 938 | # define PTE_SYNC_CURRENT(pm, ptep) pmap_pte_sync_current(pm, ptep) |
939 | #else | | 939 | #else |
940 | # define PTE_SYNC_CURRENT(pm, ptep) __nothing | | 940 | # define PTE_SYNC_CURRENT(pm, ptep) __nothing |
941 | #endif | | 941 | #endif |
942 | | | 942 | |
943 | /* | | 943 | /* |
944 | * main pv_entry manipulation functions: | | 944 | * main pv_entry manipulation functions: |
945 | * pmap_enter_pv: enter a mapping onto a vm_page list | | 945 | * pmap_enter_pv: enter a mapping onto a vm_page list |
946 | * pmap_remove_pv: remove a mapping from a vm_page list | | 946 | * pmap_remove_pv: remove a mapping from a vm_page list |
947 | * | | 947 | * |
948 | * NOTE: pmap_enter_pv expects to lock the pvh itself | | 948 | * NOTE: pmap_enter_pv expects to lock the pvh itself |
949 | * pmap_remove_pv expects the caller to lock the pvh before calling | | 949 | * pmap_remove_pv expects the caller to lock the pvh before calling |
950 | */ | | 950 | */ |
951 | | | 951 | |
952 | /* | | 952 | /* |
953 | * pmap_enter_pv: enter a mapping onto a vm_page lst | | 953 | * pmap_enter_pv: enter a mapping onto a vm_page lst |
954 | * | | 954 | * |
955 | * => caller should hold the proper lock on pmap_main_lock | | 955 | * => caller should hold the proper lock on pmap_main_lock |
956 | * => caller should have pmap locked | | 956 | * => caller should have pmap locked |
957 | * => we will gain the lock on the vm_page and allocate the new pv_entry | | 957 | * => we will gain the lock on the vm_page and allocate the new pv_entry |
958 | * => caller should adjust ptp's wire_count before calling | | 958 | * => caller should adjust ptp's wire_count before calling |
959 | * => caller should not adjust pmap's wire_count | | 959 | * => caller should not adjust pmap's wire_count |
960 | */ | | 960 | */ |
961 | static void | | 961 | static void |
962 | pmap_enter_pv(struct vm_page_md *md, paddr_t pa, struct pv_entry *pv, pmap_t pm, | | 962 | pmap_enter_pv(struct vm_page_md *md, paddr_t pa, struct pv_entry *pv, pmap_t pm, |
963 | vaddr_t va, u_int flags) | | 963 | vaddr_t va, u_int flags) |
964 | { | | 964 | { |
965 | UVMHIST_FUNC(__func__); | | 965 | UVMHIST_FUNC(__func__); |
966 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx pm %#jx va %#jx", | | 966 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx pm %#jx va %#jx", |
967 | (uintptr_t)md, (uintptr_t)pa, (uintptr_t)pm, va); | | 967 | (uintptr_t)md, (uintptr_t)pa, (uintptr_t)pm, va); |
968 | UVMHIST_LOG(maphist, "...pv %#jx flags %#jx", | | 968 | UVMHIST_LOG(maphist, "...pv %#jx flags %#jx", |
969 | (uintptr_t)pv, flags, 0, 0); | | 969 | (uintptr_t)pv, flags, 0, 0); |
970 | | | 970 | |
971 | struct pv_entry **pvp; | | 971 | struct pv_entry **pvp; |
972 | | | 972 | |
973 | pv->pv_pmap = pm; | | 973 | pv->pv_pmap = pm; |
974 | pv->pv_va = va; | | 974 | pv->pv_va = va; |
975 | pv->pv_flags = flags; | | 975 | pv->pv_flags = flags; |
976 | | | 976 | |
977 | pvp = &SLIST_FIRST(&md->pvh_list); | | 977 | pvp = &SLIST_FIRST(&md->pvh_list); |
978 | #ifdef PMAP_CACHE_VIPT | | 978 | #ifdef PMAP_CACHE_VIPT |
979 | /* | | 979 | /* |
980 | * Insert unmanaged entries, writeable first, at the head of | | 980 | * Insert unmanaged entries, writeable first, at the head of |
981 | * the pv list. | | 981 | * the pv list. |
982 | */ | | 982 | */ |
983 | if (__predict_true(!PV_IS_KENTRY_P(flags))) { | | 983 | if (__predict_true(!PV_IS_KENTRY_P(flags))) { |
984 | while (*pvp != NULL && PV_IS_KENTRY_P((*pvp)->pv_flags)) | | 984 | while (*pvp != NULL && PV_IS_KENTRY_P((*pvp)->pv_flags)) |
985 | pvp = &SLIST_NEXT(*pvp, pv_link); | | 985 | pvp = &SLIST_NEXT(*pvp, pv_link); |
986 | } | | 986 | } |
987 | if (!PV_IS_WRITE_P(flags)) { | | 987 | if (!PV_IS_WRITE_P(flags)) { |
988 | while (*pvp != NULL && PV_IS_WRITE_P((*pvp)->pv_flags)) | | 988 | while (*pvp != NULL && PV_IS_WRITE_P((*pvp)->pv_flags)) |
989 | pvp = &SLIST_NEXT(*pvp, pv_link); | | 989 | pvp = &SLIST_NEXT(*pvp, pv_link); |
990 | } | | 990 | } |
991 | #endif | | 991 | #endif |
992 | SLIST_NEXT(pv, pv_link) = *pvp; /* add to ... */ | | 992 | SLIST_NEXT(pv, pv_link) = *pvp; /* add to ... */ |
993 | *pvp = pv; /* ... locked list */ | | 993 | *pvp = pv; /* ... locked list */ |
994 | md->pvh_attrs |= flags & (PVF_REF | PVF_MOD); | | 994 | md->pvh_attrs |= flags & (PVF_REF | PVF_MOD); |
995 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 995 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
996 | if ((pv->pv_flags & PVF_KWRITE) == PVF_KWRITE) | | 996 | if ((pv->pv_flags & PVF_KWRITE) == PVF_KWRITE) |
997 | md->pvh_attrs |= PVF_KMOD; | | 997 | md->pvh_attrs |= PVF_KMOD; |
998 | if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC) | | 998 | if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC) |
999 | md->pvh_attrs |= PVF_DIRTY; | | 999 | md->pvh_attrs |= PVF_DIRTY; |
1000 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 1000 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
1001 | #endif | | 1001 | #endif |
1002 | if (pm == pmap_kernel()) { | | 1002 | if (pm == pmap_kernel()) { |
1003 | PMAPCOUNT(kernel_mappings); | | 1003 | PMAPCOUNT(kernel_mappings); |
1004 | if (flags & PVF_WRITE) | | 1004 | if (flags & PVF_WRITE) |
1005 | md->krw_mappings++; | | 1005 | md->krw_mappings++; |
1006 | else | | 1006 | else |
1007 | md->kro_mappings++; | | 1007 | md->kro_mappings++; |
1008 | } else { | | 1008 | } else { |
1009 | if (flags & PVF_WRITE) | | 1009 | if (flags & PVF_WRITE) |
1010 | md->urw_mappings++; | | 1010 | md->urw_mappings++; |
1011 | else | | 1011 | else |
1012 | md->uro_mappings++; | | 1012 | md->uro_mappings++; |
1013 | } | | 1013 | } |
1014 | | | 1014 | |
1015 | #ifdef PMAP_CACHE_VIPT | | 1015 | #ifdef PMAP_CACHE_VIPT |
1016 | #ifndef ARM_MMU_EXTENDED | | 1016 | #ifndef ARM_MMU_EXTENDED |
1017 | /* | | 1017 | /* |
1018 | * Even though pmap_vac_me_harder will set PVF_WRITE for us, | | 1018 | * Even though pmap_vac_me_harder will set PVF_WRITE for us, |
1019 | * do it here as well to keep the mappings & KVF_WRITE consistent. | | 1019 | * do it here as well to keep the mappings & KVF_WRITE consistent. |
1020 | */ | | 1020 | */ |
1021 | if (arm_cache_prefer_mask != 0 && (flags & PVF_WRITE) != 0) { | | 1021 | if (arm_cache_prefer_mask != 0 && (flags & PVF_WRITE) != 0) { |
1022 | md->pvh_attrs |= PVF_WRITE; | | 1022 | md->pvh_attrs |= PVF_WRITE; |
1023 | } | | 1023 | } |
1024 | #endif | | 1024 | #endif |
1025 | /* | | 1025 | /* |
1026 | * If this is an exec mapping and its the first exec mapping | | 1026 | * If this is an exec mapping and its the first exec mapping |
1027 | * for this page, make sure to sync the I-cache. | | 1027 | * for this page, make sure to sync the I-cache. |
1028 | */ | | 1028 | */ |
1029 | if (PV_IS_EXEC_P(flags)) { | | 1029 | if (PV_IS_EXEC_P(flags)) { |
1030 | if (!PV_IS_EXEC_P(md->pvh_attrs)) { | | 1030 | if (!PV_IS_EXEC_P(md->pvh_attrs)) { |
1031 | pmap_syncicache_page(md, pa); | | 1031 | pmap_syncicache_page(md, pa); |
1032 | PMAPCOUNT(exec_synced_map); | | 1032 | PMAPCOUNT(exec_synced_map); |
1033 | } | | 1033 | } |
1034 | PMAPCOUNT(exec_mappings); | | 1034 | PMAPCOUNT(exec_mappings); |
1035 | } | | 1035 | } |
1036 | #endif | | 1036 | #endif |
1037 | | | 1037 | |
1038 | PMAPCOUNT(mappings); | | 1038 | PMAPCOUNT(mappings); |
1039 | | | 1039 | |
1040 | if (pv->pv_flags & PVF_WIRED) | | 1040 | if (pv->pv_flags & PVF_WIRED) |
1041 | ++pm->pm_stats.wired_count; | | 1041 | ++pm->pm_stats.wired_count; |
1042 | } | | 1042 | } |
1043 | | | 1043 | |
1044 | /* | | 1044 | /* |
1045 | * | | 1045 | * |
1046 | * pmap_find_pv: Find a pv entry | | 1046 | * pmap_find_pv: Find a pv entry |
1047 | * | | 1047 | * |
1048 | * => caller should hold lock on vm_page | | 1048 | * => caller should hold lock on vm_page |
1049 | */ | | 1049 | */ |
1050 | static inline struct pv_entry * | | 1050 | static inline struct pv_entry * |
1051 | pmap_find_pv(struct vm_page_md *md, pmap_t pm, vaddr_t va) | | 1051 | pmap_find_pv(struct vm_page_md *md, pmap_t pm, vaddr_t va) |
1052 | { | | 1052 | { |
1053 | struct pv_entry *pv; | | 1053 | struct pv_entry *pv; |
1054 | | | 1054 | |
1055 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { | | 1055 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { |
1056 | if (pm == pv->pv_pmap && va == pv->pv_va) | | 1056 | if (pm == pv->pv_pmap && va == pv->pv_va) |
1057 | break; | | 1057 | break; |
1058 | } | | 1058 | } |
1059 | | | 1059 | |
1060 | return pv; | | 1060 | return pv; |
1061 | } | | 1061 | } |
1062 | | | 1062 | |
1063 | /* | | 1063 | /* |
1064 | * pmap_remove_pv: try to remove a mapping from a pv_list | | 1064 | * pmap_remove_pv: try to remove a mapping from a pv_list |
1065 | * | | 1065 | * |
1066 | * => caller should hold proper lock on pmap_main_lock | | 1066 | * => caller should hold proper lock on pmap_main_lock |
1067 | * => pmap should be locked | | 1067 | * => pmap should be locked |
1068 | * => caller should hold lock on vm_page [so that attrs can be adjusted] | | 1068 | * => caller should hold lock on vm_page [so that attrs can be adjusted] |
1069 | * => caller should adjust ptp's wire_count and free PTP if needed | | 1069 | * => caller should adjust ptp's wire_count and free PTP if needed |
1070 | * => caller should NOT adjust pmap's wire_count | | 1070 | * => caller should NOT adjust pmap's wire_count |
1071 | * => we return the removed pv | | 1071 | * => we return the removed pv |
1072 | */ | | 1072 | */ |
1073 | static struct pv_entry * | | 1073 | static struct pv_entry * |
1074 | pmap_remove_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) | | 1074 | pmap_remove_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) |
1075 | { | | 1075 | { |
1076 | UVMHIST_FUNC(__func__); | | 1076 | UVMHIST_FUNC(__func__); |
1077 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx pm %#jx va %#jx", | | 1077 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx pm %#jx va %#jx", |
1078 | (uintptr_t)md, (uintptr_t)pa, (uintptr_t)pm, va); | | 1078 | (uintptr_t)md, (uintptr_t)pa, (uintptr_t)pm, va); |
1079 | | | 1079 | |
1080 | struct pv_entry *pv, **prevptr; | | 1080 | struct pv_entry *pv, **prevptr; |
1081 | | | 1081 | |
1082 | prevptr = &SLIST_FIRST(&md->pvh_list); /* prev pv_entry ptr */ | | 1082 | prevptr = &SLIST_FIRST(&md->pvh_list); /* prev pv_entry ptr */ |
1083 | pv = *prevptr; | | 1083 | pv = *prevptr; |
1084 | | | 1084 | |
1085 | while (pv) { | | 1085 | while (pv) { |
1086 | if (pv->pv_pmap == pm && pv->pv_va == va) { /* match? */ | | 1086 | if (pv->pv_pmap == pm && pv->pv_va == va) { /* match? */ |
1087 | UVMHIST_LOG(maphist, "pm %#jx md %#jx flags %#jx", | | 1087 | UVMHIST_LOG(maphist, "pm %#jx md %#jx flags %#jx", |
1088 | (uintptr_t)pm, (uintptr_t)md, pv->pv_flags, 0); | | 1088 | (uintptr_t)pm, (uintptr_t)md, pv->pv_flags, 0); |
1089 | if (pv->pv_flags & PVF_WIRED) { | | 1089 | if (pv->pv_flags & PVF_WIRED) { |
1090 | --pm->pm_stats.wired_count; | | 1090 | --pm->pm_stats.wired_count; |
1091 | } | | 1091 | } |
1092 | *prevptr = SLIST_NEXT(pv, pv_link); /* remove it! */ | | 1092 | *prevptr = SLIST_NEXT(pv, pv_link); /* remove it! */ |
1093 | if (pm == pmap_kernel()) { | | 1093 | if (pm == pmap_kernel()) { |
1094 | PMAPCOUNT(kernel_unmappings); | | 1094 | PMAPCOUNT(kernel_unmappings); |
1095 | if (pv->pv_flags & PVF_WRITE) | | 1095 | if (pv->pv_flags & PVF_WRITE) |
1096 | md->krw_mappings--; | | 1096 | md->krw_mappings--; |
1097 | else | | 1097 | else |
1098 | md->kro_mappings--; | | 1098 | md->kro_mappings--; |
1099 | } else { | | 1099 | } else { |
1100 | if (pv->pv_flags & PVF_WRITE) | | 1100 | if (pv->pv_flags & PVF_WRITE) |
1101 | md->urw_mappings--; | | 1101 | md->urw_mappings--; |
1102 | else | | 1102 | else |
1103 | md->uro_mappings--; | | 1103 | md->uro_mappings--; |
1104 | } | | 1104 | } |
1105 | | | 1105 | |
1106 | PMAPCOUNT(unmappings); | | 1106 | PMAPCOUNT(unmappings); |
1107 | #ifdef PMAP_CACHE_VIPT | | 1107 | #ifdef PMAP_CACHE_VIPT |
1108 | /* | | 1108 | /* |
1109 | * If this page has had an exec mapping, then if | | 1109 | * If this page has had an exec mapping, then if |
1110 | * this was the last mapping, discard the contents, | | 1110 | * this was the last mapping, discard the contents, |
1111 | * otherwise sync the i-cache for this page. | | 1111 | * otherwise sync the i-cache for this page. |
1112 | */ | | 1112 | */ |
1113 | if (PV_IS_EXEC_P(md->pvh_attrs)) { | | 1113 | if (PV_IS_EXEC_P(md->pvh_attrs)) { |
1114 | if (SLIST_EMPTY(&md->pvh_list)) { | | 1114 | if (SLIST_EMPTY(&md->pvh_list)) { |
1115 | md->pvh_attrs &= ~PVF_EXEC; | | 1115 | md->pvh_attrs &= ~PVF_EXEC; |
1116 | PMAPCOUNT(exec_discarded_unmap); | | 1116 | PMAPCOUNT(exec_discarded_unmap); |
1117 | } else if (pv->pv_flags & PVF_WRITE) { | | 1117 | } else if (pv->pv_flags & PVF_WRITE) { |
1118 | pmap_syncicache_page(md, pa); | | 1118 | pmap_syncicache_page(md, pa); |
1119 | PMAPCOUNT(exec_synced_unmap); | | 1119 | PMAPCOUNT(exec_synced_unmap); |
1120 | } | | 1120 | } |
1121 | } | | 1121 | } |
1122 | #endif /* PMAP_CACHE_VIPT */ | | 1122 | #endif /* PMAP_CACHE_VIPT */ |
1123 | break; | | 1123 | break; |
1124 | } | | 1124 | } |
1125 | prevptr = &SLIST_NEXT(pv, pv_link); /* previous pointer */ | | 1125 | prevptr = &SLIST_NEXT(pv, pv_link); /* previous pointer */ |
1126 | pv = *prevptr; /* advance */ | | 1126 | pv = *prevptr; /* advance */ |
1127 | } | | 1127 | } |
1128 | | | 1128 | |
1129 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 1129 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
1130 | /* | | 1130 | /* |
1131 | * If we no longer have a WRITEABLE KENTRY at the head of list, | | 1131 | * If we no longer have a WRITEABLE KENTRY at the head of list, |
1132 | * clear the KMOD attribute from the page. | | 1132 | * clear the KMOD attribute from the page. |
1133 | */ | | 1133 | */ |
1134 | if (SLIST_FIRST(&md->pvh_list) == NULL | | 1134 | if (SLIST_FIRST(&md->pvh_list) == NULL |
1135 | || (SLIST_FIRST(&md->pvh_list)->pv_flags & PVF_KWRITE) != PVF_KWRITE) | | 1135 | || (SLIST_FIRST(&md->pvh_list)->pv_flags & PVF_KWRITE) != PVF_KWRITE) |
1136 | md->pvh_attrs &= ~PVF_KMOD; | | 1136 | md->pvh_attrs &= ~PVF_KMOD; |
1137 | | | 1137 | |
1138 | /* | | 1138 | /* |
1139 | * If this was a writeable page and there are no more writeable | | 1139 | * If this was a writeable page and there are no more writeable |
1140 | * mappings (ignoring KMPAGE), clear the WRITE flag and writeback | | 1140 | * mappings (ignoring KMPAGE), clear the WRITE flag and writeback |
1141 | * the contents to memory. | | 1141 | * the contents to memory. |
1142 | */ | | 1142 | */ |
1143 | if (arm_cache_prefer_mask != 0) { | | 1143 | if (arm_cache_prefer_mask != 0) { |
1144 | if (md->krw_mappings + md->urw_mappings == 0) | | 1144 | if (md->krw_mappings + md->urw_mappings == 0) |
1145 | md->pvh_attrs &= ~PVF_WRITE; | | 1145 | md->pvh_attrs &= ~PVF_WRITE; |
1146 | PMAP_VALIDATE_MD_PAGE(md); | | 1146 | PMAP_VALIDATE_MD_PAGE(md); |
1147 | } | | 1147 | } |
1148 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 1148 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
1149 | #endif /* PMAP_CACHE_VIPT && !ARM_MMU_EXTENDED */ | | 1149 | #endif /* PMAP_CACHE_VIPT && !ARM_MMU_EXTENDED */ |
1150 | | | 1150 | |
1151 | /* return removed pv */ | | 1151 | /* return removed pv */ |
1152 | return pv; | | 1152 | return pv; |
1153 | } | | 1153 | } |
1154 | | | 1154 | |
1155 | /* | | 1155 | /* |
1156 | * | | 1156 | * |
1157 | * pmap_modify_pv: Update pv flags | | 1157 | * pmap_modify_pv: Update pv flags |
1158 | * | | 1158 | * |
1159 | * => caller should hold lock on vm_page [so that attrs can be adjusted] | | 1159 | * => caller should hold lock on vm_page [so that attrs can be adjusted] |
1160 | * => caller should NOT adjust pmap's wire_count | | 1160 | * => caller should NOT adjust pmap's wire_count |
1161 | * => caller must call pmap_vac_me_harder() if writable status of a page | | 1161 | * => caller must call pmap_vac_me_harder() if writable status of a page |
1162 | * may have changed. | | 1162 | * may have changed. |
1163 | * => we return the old flags | | 1163 | * => we return the old flags |
1164 | * | | 1164 | * |
1165 | * Modify a physical-virtual mapping in the pv table | | 1165 | * Modify a physical-virtual mapping in the pv table |
1166 | */ | | 1166 | */ |
1167 | static u_int | | 1167 | static u_int |
1168 | pmap_modify_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va, | | 1168 | pmap_modify_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va, |
1169 | u_int clr_mask, u_int set_mask) | | 1169 | u_int clr_mask, u_int set_mask) |
1170 | { | | 1170 | { |
1171 | struct pv_entry *npv; | | 1171 | struct pv_entry *npv; |
1172 | u_int flags, oflags; | | 1172 | u_int flags, oflags; |
1173 | UVMHIST_FUNC(__func__); | | 1173 | UVMHIST_FUNC(__func__); |
1174 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx pm %#jx va %#jx", | | 1174 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx pm %#jx va %#jx", |
1175 | (uintptr_t)md, (uintptr_t)pa, (uintptr_t)pm, va); | | 1175 | (uintptr_t)md, (uintptr_t)pa, (uintptr_t)pm, va); |
1176 | UVMHIST_LOG(maphist, "... clr %#jx set %#jx", clr_mask, set_mask, 0, 0); | | 1176 | UVMHIST_LOG(maphist, "... clr %#jx set %#jx", clr_mask, set_mask, 0, 0); |
1177 | | | 1177 | |
1178 | KASSERT(!PV_IS_KENTRY_P(clr_mask)); | | 1178 | KASSERT(!PV_IS_KENTRY_P(clr_mask)); |
1179 | KASSERT(!PV_IS_KENTRY_P(set_mask)); | | 1179 | KASSERT(!PV_IS_KENTRY_P(set_mask)); |
1180 | | | 1180 | |
1181 | if ((npv = pmap_find_pv(md, pm, va)) == NULL) { | | 1181 | if ((npv = pmap_find_pv(md, pm, va)) == NULL) { |
1182 | UVMHIST_LOG(maphist, "<--- done (not found)", 0, 0, 0, 0); | | 1182 | UVMHIST_LOG(maphist, "<--- done (not found)", 0, 0, 0, 0); |
1183 | return 0; | | 1183 | return 0; |
1184 | } | | 1184 | } |
1185 | | | 1185 | |
1186 | /* | | 1186 | /* |
1187 | * There is at least one VA mapping this page. | | 1187 | * There is at least one VA mapping this page. |
1188 | */ | | 1188 | */ |
1189 | | | 1189 | |
1190 | if (clr_mask & (PVF_REF | PVF_MOD)) { | | 1190 | if (clr_mask & (PVF_REF | PVF_MOD)) { |
1191 | md->pvh_attrs |= set_mask & (PVF_REF | PVF_MOD); | | 1191 | md->pvh_attrs |= set_mask & (PVF_REF | PVF_MOD); |
1192 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 1192 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
1193 | if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC) | | 1193 | if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC) |
1194 | md->pvh_attrs |= PVF_DIRTY; | | 1194 | md->pvh_attrs |= PVF_DIRTY; |
| @@ -1841,4373 +1841,4405 @@ pmap_vac_me_kpmap(struct vm_page_md *md, | | | @@ -1841,4373 +1841,4405 @@ pmap_vac_me_kpmap(struct vm_page_md *md, |
1841 | * further to do. | | 1841 | * further to do. |
1842 | */ | | 1842 | */ |
1843 | if (md->k_mappings == 0 && u_cacheable == u_entries) | | 1843 | if (md->k_mappings == 0 && u_cacheable == u_entries) |
1844 | return; | | 1844 | return; |
1845 | | | 1845 | |
1846 | if (u_entries) { | | 1846 | if (u_entries) { |
1847 | /* | | 1847 | /* |
1848 | * Scan over the list again, for each entry, if it | | 1848 | * Scan over the list again, for each entry, if it |
1849 | * might not be set correctly, call pmap_vac_me_user | | 1849 | * might not be set correctly, call pmap_vac_me_user |
1850 | * to recalculate the settings. | | 1850 | * to recalculate the settings. |
1851 | */ | | 1851 | */ |
1852 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { | | 1852 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { |
1853 | /* | | 1853 | /* |
1854 | * We know kernel mappings will get set | | 1854 | * We know kernel mappings will get set |
1855 | * correctly in other calls. We also know | | 1855 | * correctly in other calls. We also know |
1856 | * that if the pmap is the same as last_pmap | | 1856 | * that if the pmap is the same as last_pmap |
1857 | * then we've just handled this entry. | | 1857 | * then we've just handled this entry. |
1858 | */ | | 1858 | */ |
1859 | if (pv->pv_pmap == pm || pv->pv_pmap == last_pmap) | | 1859 | if (pv->pv_pmap == pm || pv->pv_pmap == last_pmap) |
1860 | continue; | | 1860 | continue; |
1861 | | | 1861 | |
1862 | /* | | 1862 | /* |
1863 | * If there are kernel entries and this page | | 1863 | * If there are kernel entries and this page |
1864 | * is writable but non-cacheable, then we can | | 1864 | * is writable but non-cacheable, then we can |
1865 | * skip this entry also. | | 1865 | * skip this entry also. |
1866 | */ | | 1866 | */ |
1867 | if (md->k_mappings && | | 1867 | if (md->k_mappings && |
1868 | (pv->pv_flags & (PVF_NC | PVF_WRITE)) == | | 1868 | (pv->pv_flags & (PVF_NC | PVF_WRITE)) == |
1869 | (PVF_NC | PVF_WRITE)) | | 1869 | (PVF_NC | PVF_WRITE)) |
1870 | continue; | | 1870 | continue; |
1871 | | | 1871 | |
1872 | /* | | 1872 | /* |
1873 | * Similarly if there are no kernel-writable | | 1873 | * Similarly if there are no kernel-writable |
1874 | * entries and the page is already | | 1874 | * entries and the page is already |
1875 | * read-only/cacheable. | | 1875 | * read-only/cacheable. |
1876 | */ | | 1876 | */ |
1877 | if (md->krw_mappings == 0 && | | 1877 | if (md->krw_mappings == 0 && |
1878 | (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0) | | 1878 | (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0) |
1879 | continue; | | 1879 | continue; |
1880 | | | 1880 | |
1881 | /* | | 1881 | /* |
1882 | * For some of the remaining cases, we know | | 1882 | * For some of the remaining cases, we know |
1883 | * that we must recalculate, but for others we | | 1883 | * that we must recalculate, but for others we |
1884 | * can't tell if they are correct or not, so | | 1884 | * can't tell if they are correct or not, so |
1885 | * we recalculate anyway. | | 1885 | * we recalculate anyway. |
1886 | */ | | 1886 | */ |
1887 | pmap_vac_me_user(md, pa, (last_pmap = pv->pv_pmap), 0); | | 1887 | pmap_vac_me_user(md, pa, (last_pmap = pv->pv_pmap), 0); |
1888 | } | | 1888 | } |
1889 | | | 1889 | |
1890 | if (md->k_mappings == 0) | | 1890 | if (md->k_mappings == 0) |
1891 | return; | | 1891 | return; |
1892 | } | | 1892 | } |
1893 | | | 1893 | |
1894 | pmap_vac_me_user(md, pa, pm, va); | | 1894 | pmap_vac_me_user(md, pa, pm, va); |
1895 | } | | 1895 | } |
1896 | | | 1896 | |
1897 | static void | | 1897 | static void |
1898 | pmap_vac_me_user(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) | | 1898 | pmap_vac_me_user(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) |
1899 | { | | 1899 | { |
1900 | pmap_t kpmap = pmap_kernel(); | | 1900 | pmap_t kpmap = pmap_kernel(); |
1901 | struct pv_entry *pv, *npv = NULL; | | 1901 | struct pv_entry *pv, *npv = NULL; |
1902 | u_int entries = 0; | | 1902 | u_int entries = 0; |
1903 | u_int writable = 0; | | 1903 | u_int writable = 0; |
1904 | u_int cacheable_entries = 0; | | 1904 | u_int cacheable_entries = 0; |
1905 | u_int kern_cacheable = 0; | | 1905 | u_int kern_cacheable = 0; |
1906 | u_int other_writable = 0; | | 1906 | u_int other_writable = 0; |
1907 | | | 1907 | |
1908 | /* | | 1908 | /* |
1909 | * Count mappings and writable mappings in this pmap. | | 1909 | * Count mappings and writable mappings in this pmap. |
1910 | * Include kernel mappings as part of our own. | | 1910 | * Include kernel mappings as part of our own. |
1911 | * Keep a pointer to the first one. | | 1911 | * Keep a pointer to the first one. |
1912 | */ | | 1912 | */ |
1913 | npv = NULL; | | 1913 | npv = NULL; |
1914 | KASSERT(pmap_page_locked_p(md)); | | 1914 | KASSERT(pmap_page_locked_p(md)); |
1915 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { | | 1915 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { |
1916 | /* Count mappings in the same pmap */ | | 1916 | /* Count mappings in the same pmap */ |
1917 | if (pm == pv->pv_pmap || kpmap == pv->pv_pmap) { | | 1917 | if (pm == pv->pv_pmap || kpmap == pv->pv_pmap) { |
1918 | if (entries++ == 0) | | 1918 | if (entries++ == 0) |
1919 | npv = pv; | | 1919 | npv = pv; |
1920 | | | 1920 | |
1921 | /* Cacheable mappings */ | | 1921 | /* Cacheable mappings */ |
1922 | if ((pv->pv_flags & PVF_NC) == 0) { | | 1922 | if ((pv->pv_flags & PVF_NC) == 0) { |
1923 | cacheable_entries++; | | 1923 | cacheable_entries++; |
1924 | if (kpmap == pv->pv_pmap) | | 1924 | if (kpmap == pv->pv_pmap) |
1925 | kern_cacheable++; | | 1925 | kern_cacheable++; |
1926 | } | | 1926 | } |
1927 | | | 1927 | |
1928 | /* Writable mappings */ | | 1928 | /* Writable mappings */ |
1929 | if (pv->pv_flags & PVF_WRITE) | | 1929 | if (pv->pv_flags & PVF_WRITE) |
1930 | ++writable; | | 1930 | ++writable; |
1931 | } else if (pv->pv_flags & PVF_WRITE) | | 1931 | } else if (pv->pv_flags & PVF_WRITE) |
1932 | other_writable = 1; | | 1932 | other_writable = 1; |
1933 | } | | 1933 | } |
1934 | | | 1934 | |
1935 | /* | | 1935 | /* |
1936 | * Enable or disable caching as necessary. | | 1936 | * Enable or disable caching as necessary. |
1937 | * Note: the first entry might be part of the kernel pmap, | | 1937 | * Note: the first entry might be part of the kernel pmap, |
1938 | * so we can't assume this is indicative of the state of the | | 1938 | * so we can't assume this is indicative of the state of the |
1939 | * other (maybe non-kpmap) entries. | | 1939 | * other (maybe non-kpmap) entries. |
1940 | */ | | 1940 | */ |
1941 | if ((entries > 1 && writable) || | | 1941 | if ((entries > 1 && writable) || |
1942 | (entries > 0 && pm == kpmap && other_writable)) { | | 1942 | (entries > 0 && pm == kpmap && other_writable)) { |
1943 | if (cacheable_entries == 0) { | | 1943 | if (cacheable_entries == 0) { |
1944 | return; | | 1944 | return; |
1945 | } | | 1945 | } |
1946 | | | 1946 | |
1947 | for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) { | | 1947 | for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) { |
1948 | if ((pm != pv->pv_pmap && kpmap != pv->pv_pmap) || | | 1948 | if ((pm != pv->pv_pmap && kpmap != pv->pv_pmap) || |
1949 | (pv->pv_flags & PVF_NC)) | | 1949 | (pv->pv_flags & PVF_NC)) |
1950 | continue; | | 1950 | continue; |
1951 | | | 1951 | |
1952 | pv->pv_flags |= PVF_NC; | | 1952 | pv->pv_flags |= PVF_NC; |
1953 | | | 1953 | |
1954 | struct l2_bucket * const l2b | | 1954 | struct l2_bucket * const l2b |
1955 | = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); | | 1955 | = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); |
1956 | KASSERTMSG(l2b != NULL, "%#lx", va); | | 1956 | KASSERTMSG(l2b != NULL, "%#lx", va); |
1957 | pt_entry_t * const ptep | | 1957 | pt_entry_t * const ptep |
1958 | = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; | | 1958 | = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; |
1959 | const pt_entry_t opte = *ptep; | | 1959 | const pt_entry_t opte = *ptep; |
1960 | pt_entry_t npte = opte & ~L2_S_CACHE_MASK; | | 1960 | pt_entry_t npte = opte & ~L2_S_CACHE_MASK; |
1961 | | | 1961 | |
1962 | if ((va != pv->pv_va || pm != pv->pv_pmap) | | 1962 | if ((va != pv->pv_va || pm != pv->pv_pmap) |
1963 | && l2pte_valid_p(opte)) { | | 1963 | && l2pte_valid_p(opte)) { |
1964 | pmap_cache_wbinv_page(pv->pv_pmap, pv->pv_va, | | 1964 | pmap_cache_wbinv_page(pv->pv_pmap, pv->pv_va, |
1965 | true, pv->pv_flags); | | 1965 | true, pv->pv_flags); |
1966 | pmap_tlb_flush_SE(pv->pv_pmap, pv->pv_va, | | 1966 | pmap_tlb_flush_SE(pv->pv_pmap, pv->pv_va, |
1967 | pv->pv_flags); | | 1967 | pv->pv_flags); |
1968 | } | | 1968 | } |
1969 | | | 1969 | |
1970 | l2pte_set(ptep, npte, opte); | | 1970 | l2pte_set(ptep, npte, opte); |
1971 | PTE_SYNC_CURRENT(pv->pv_pmap, ptep); | | 1971 | PTE_SYNC_CURRENT(pv->pv_pmap, ptep); |
1972 | } | | 1972 | } |
1973 | cpu_cpwait(); | | 1973 | cpu_cpwait(); |
1974 | } else if (entries > cacheable_entries) { | | 1974 | } else if (entries > cacheable_entries) { |
1975 | /* | | 1975 | /* |
1976 | * Turn cacheing back on for some pages. If it is a kernel | | 1976 | * Turn cacheing back on for some pages. If it is a kernel |
1977 | * page, only do so if there are no other writable pages. | | 1977 | * page, only do so if there are no other writable pages. |
1978 | */ | | 1978 | */ |
1979 | for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) { | | 1979 | for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) { |
1980 | if (!(pv->pv_flags & PVF_NC) || (pm != pv->pv_pmap && | | 1980 | if (!(pv->pv_flags & PVF_NC) || (pm != pv->pv_pmap && |
1981 | (kpmap != pv->pv_pmap || other_writable))) | | 1981 | (kpmap != pv->pv_pmap || other_writable))) |
1982 | continue; | | 1982 | continue; |
1983 | | | 1983 | |
1984 | pv->pv_flags &= ~PVF_NC; | | 1984 | pv->pv_flags &= ~PVF_NC; |
1985 | | | 1985 | |
1986 | struct l2_bucket * const l2b | | 1986 | struct l2_bucket * const l2b |
1987 | = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); | | 1987 | = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); |
1988 | KASSERTMSG(l2b != NULL, "%#lx", va); | | 1988 | KASSERTMSG(l2b != NULL, "%#lx", va); |
1989 | pt_entry_t * const ptep | | 1989 | pt_entry_t * const ptep |
1990 | = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; | | 1990 | = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; |
1991 | const pt_entry_t opte = *ptep; | | 1991 | const pt_entry_t opte = *ptep; |
1992 | pt_entry_t npte = (opte & ~L2_S_CACHE_MASK) | | 1992 | pt_entry_t npte = (opte & ~L2_S_CACHE_MASK) |
1993 | | pte_l2_s_cache_mode; | | 1993 | | pte_l2_s_cache_mode; |
1994 | | | 1994 | |
1995 | if (l2pte_valid_p(opte)) { | | 1995 | if (l2pte_valid_p(opte)) { |
1996 | pmap_tlb_flush_SE(pv->pv_pmap, pv->pv_va, | | 1996 | pmap_tlb_flush_SE(pv->pv_pmap, pv->pv_va, |
1997 | pv->pv_flags); | | 1997 | pv->pv_flags); |
1998 | } | | 1998 | } |
1999 | | | 1999 | |
2000 | l2pte_set(ptep, npte, opte); | | 2000 | l2pte_set(ptep, npte, opte); |
2001 | PTE_SYNC_CURRENT(pv->pv_pmap, ptep); | | 2001 | PTE_SYNC_CURRENT(pv->pv_pmap, ptep); |
2002 | } | | 2002 | } |
2003 | } | | 2003 | } |
2004 | } | | 2004 | } |
2005 | #endif | | 2005 | #endif |
2006 | | | 2006 | |
2007 | #ifdef PMAP_CACHE_VIPT | | 2007 | #ifdef PMAP_CACHE_VIPT |
2008 | static void | | 2008 | static void |
2009 | pmap_vac_me_harder(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) | | 2009 | pmap_vac_me_harder(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) |
2010 | { | | 2010 | { |
2011 | | | 2011 | |
2012 | #ifndef ARM_MMU_EXTENDED | | 2012 | #ifndef ARM_MMU_EXTENDED |
2013 | struct pv_entry *pv; | | 2013 | struct pv_entry *pv; |
2014 | vaddr_t tst_mask; | | 2014 | vaddr_t tst_mask; |
2015 | bool bad_alias; | | 2015 | bool bad_alias; |
2016 | const u_int | | 2016 | const u_int |
2017 | rw_mappings = md->urw_mappings + md->krw_mappings, | | 2017 | rw_mappings = md->urw_mappings + md->krw_mappings, |
2018 | ro_mappings = md->uro_mappings + md->kro_mappings; | | 2018 | ro_mappings = md->uro_mappings + md->kro_mappings; |
2019 | | | 2019 | |
2020 | /* do we need to do anything? */ | | 2020 | /* do we need to do anything? */ |
2021 | if (arm_cache_prefer_mask == 0) | | 2021 | if (arm_cache_prefer_mask == 0) |
2022 | return; | | 2022 | return; |
2023 | | | 2023 | |
2024 | UVMHIST_FUNC(__func__); | | 2024 | UVMHIST_FUNC(__func__); |
2025 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx pm %#jx va %#jx", | | 2025 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx pm %#jx va %#jx", |
2026 | (uintptr_t)md, (uintptr_t)pa, (uintptr_t)pm, va); | | 2026 | (uintptr_t)md, (uintptr_t)pa, (uintptr_t)pm, va); |
2027 | | | 2027 | |
2028 | KASSERT(!va || pm); | | 2028 | KASSERT(!va || pm); |
2029 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 2029 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
2030 | | | 2030 | |
2031 | /* Already a conflict? */ | | 2031 | /* Already a conflict? */ |
2032 | if (__predict_false(md->pvh_attrs & PVF_NC)) { | | 2032 | if (__predict_false(md->pvh_attrs & PVF_NC)) { |
2033 | /* just an add, things are already non-cached */ | | 2033 | /* just an add, things are already non-cached */ |
2034 | KASSERT(!(md->pvh_attrs & PVF_DIRTY)); | | 2034 | KASSERT(!(md->pvh_attrs & PVF_DIRTY)); |
2035 | KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); | | 2035 | KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); |
2036 | bad_alias = false; | | 2036 | bad_alias = false; |
2037 | if (va) { | | 2037 | if (va) { |
2038 | PMAPCOUNT(vac_color_none); | | 2038 | PMAPCOUNT(vac_color_none); |
2039 | bad_alias = true; | | 2039 | bad_alias = true; |
2040 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); | | 2040 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); |
2041 | goto fixup; | | 2041 | goto fixup; |
2042 | } | | 2042 | } |
2043 | pv = SLIST_FIRST(&md->pvh_list); | | 2043 | pv = SLIST_FIRST(&md->pvh_list); |
2044 | /* the list can't be empty because it would be cachable */ | | 2044 | /* the list can't be empty because it would be cachable */ |
2045 | if (md->pvh_attrs & PVF_KMPAGE) { | | 2045 | if (md->pvh_attrs & PVF_KMPAGE) { |
2046 | tst_mask = md->pvh_attrs; | | 2046 | tst_mask = md->pvh_attrs; |
2047 | } else { | | 2047 | } else { |
2048 | KASSERT(pv); | | 2048 | KASSERT(pv); |
2049 | tst_mask = pv->pv_va; | | 2049 | tst_mask = pv->pv_va; |
2050 | pv = SLIST_NEXT(pv, pv_link); | | 2050 | pv = SLIST_NEXT(pv, pv_link); |
2051 | } | | 2051 | } |
2052 | /* | | 2052 | /* |
2053 | * Only check for a bad alias if we have writable mappings. | | 2053 | * Only check for a bad alias if we have writable mappings. |
2054 | */ | | 2054 | */ |
2055 | tst_mask &= arm_cache_prefer_mask; | | 2055 | tst_mask &= arm_cache_prefer_mask; |
2056 | if (rw_mappings > 0) { | | 2056 | if (rw_mappings > 0) { |
2057 | for (; pv && !bad_alias; pv = SLIST_NEXT(pv, pv_link)) { | | 2057 | for (; pv && !bad_alias; pv = SLIST_NEXT(pv, pv_link)) { |
2058 | /* if there's a bad alias, stop checking. */ | | 2058 | /* if there's a bad alias, stop checking. */ |
2059 | if (tst_mask != (pv->pv_va & arm_cache_prefer_mask)) | | 2059 | if (tst_mask != (pv->pv_va & arm_cache_prefer_mask)) |
2060 | bad_alias = true; | | 2060 | bad_alias = true; |
2061 | } | | 2061 | } |
2062 | md->pvh_attrs |= PVF_WRITE; | | 2062 | md->pvh_attrs |= PVF_WRITE; |
2063 | if (!bad_alias) | | 2063 | if (!bad_alias) |
2064 | md->pvh_attrs |= PVF_DIRTY; | | 2064 | md->pvh_attrs |= PVF_DIRTY; |
2065 | } else { | | 2065 | } else { |
2066 | /* | | 2066 | /* |
2067 | * We have only read-only mappings. Let's see if there | | 2067 | * We have only read-only mappings. Let's see if there |
2068 | * are multiple colors in use or if we mapped a KMPAGE. | | 2068 | * are multiple colors in use or if we mapped a KMPAGE. |
2069 | * If the latter, we have a bad alias. If the former, | | 2069 | * If the latter, we have a bad alias. If the former, |
2070 | * we need to remember that. | | 2070 | * we need to remember that. |
2071 | */ | | 2071 | */ |
2072 | for (; pv; pv = SLIST_NEXT(pv, pv_link)) { | | 2072 | for (; pv; pv = SLIST_NEXT(pv, pv_link)) { |
2073 | if (tst_mask != (pv->pv_va & arm_cache_prefer_mask)) { | | 2073 | if (tst_mask != (pv->pv_va & arm_cache_prefer_mask)) { |
2074 | if (md->pvh_attrs & PVF_KMPAGE) | | 2074 | if (md->pvh_attrs & PVF_KMPAGE) |
2075 | bad_alias = true; | | 2075 | bad_alias = true; |
2076 | break; | | 2076 | break; |
2077 | } | | 2077 | } |
2078 | } | | 2078 | } |
2079 | md->pvh_attrs &= ~PVF_WRITE; | | 2079 | md->pvh_attrs &= ~PVF_WRITE; |
2080 | /* | | 2080 | /* |
2081 | * No KMPAGE and we exited early, so we must have | | 2081 | * No KMPAGE and we exited early, so we must have |
2082 | * multiple color mappings. | | 2082 | * multiple color mappings. |
2083 | */ | | 2083 | */ |
2084 | if (!bad_alias && pv != NULL) | | 2084 | if (!bad_alias && pv != NULL) |
2085 | md->pvh_attrs |= PVF_MULTCLR; | | 2085 | md->pvh_attrs |= PVF_MULTCLR; |
2086 | } | | 2086 | } |
2087 | | | 2087 | |
2088 | /* If no conflicting colors, set everything back to cached */ | | 2088 | /* If no conflicting colors, set everything back to cached */ |
2089 | if (!bad_alias) { | | 2089 | if (!bad_alias) { |
2090 | #ifdef DEBUG | | 2090 | #ifdef DEBUG |
2091 | if ((md->pvh_attrs & PVF_WRITE) | | 2091 | if ((md->pvh_attrs & PVF_WRITE) |
2092 | || ro_mappings < 2) { | | 2092 | || ro_mappings < 2) { |
2093 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) | | 2093 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) |
2094 | KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0); | | 2094 | KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0); |
2095 | } | | 2095 | } |
2096 | #endif | | 2096 | #endif |
2097 | md->pvh_attrs &= (PAGE_SIZE - 1) & ~PVF_NC; | | 2097 | md->pvh_attrs &= (PAGE_SIZE - 1) & ~PVF_NC; |
2098 | md->pvh_attrs |= tst_mask | PVF_COLORED; | | 2098 | md->pvh_attrs |= tst_mask | PVF_COLORED; |
2099 | /* | | 2099 | /* |
2100 | * Restore DIRTY bit if page is modified | | 2100 | * Restore DIRTY bit if page is modified |
2101 | */ | | 2101 | */ |
2102 | if (md->pvh_attrs & PVF_DMOD) | | 2102 | if (md->pvh_attrs & PVF_DMOD) |
2103 | md->pvh_attrs |= PVF_DIRTY; | | 2103 | md->pvh_attrs |= PVF_DIRTY; |
2104 | PMAPCOUNT(vac_color_restore); | | 2104 | PMAPCOUNT(vac_color_restore); |
2105 | } else { | | 2105 | } else { |
2106 | KASSERT(SLIST_FIRST(&md->pvh_list) != NULL); | | 2106 | KASSERT(SLIST_FIRST(&md->pvh_list) != NULL); |
2107 | KASSERT(SLIST_NEXT(SLIST_FIRST(&md->pvh_list), pv_link) != NULL); | | 2107 | KASSERT(SLIST_NEXT(SLIST_FIRST(&md->pvh_list), pv_link) != NULL); |
2108 | } | | 2108 | } |
2109 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 2109 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
2110 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); | | 2110 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); |
2111 | } else if (!va) { | | 2111 | } else if (!va) { |
2112 | KASSERT(pmap_is_page_colored_p(md)); | | 2112 | KASSERT(pmap_is_page_colored_p(md)); |
2113 | KASSERT(!(md->pvh_attrs & PVF_WRITE) | | 2113 | KASSERT(!(md->pvh_attrs & PVF_WRITE) |
2114 | || (md->pvh_attrs & PVF_DIRTY)); | | 2114 | || (md->pvh_attrs & PVF_DIRTY)); |
2115 | if (rw_mappings == 0) { | | 2115 | if (rw_mappings == 0) { |
2116 | md->pvh_attrs &= ~PVF_WRITE; | | 2116 | md->pvh_attrs &= ~PVF_WRITE; |
2117 | if (ro_mappings == 1 | | 2117 | if (ro_mappings == 1 |
2118 | && (md->pvh_attrs & PVF_MULTCLR)) { | | 2118 | && (md->pvh_attrs & PVF_MULTCLR)) { |
2119 | /* | | 2119 | /* |
2120 | * If this is the last readonly mapping | | 2120 | * If this is the last readonly mapping |
2121 | * but it doesn't match the current color | | 2121 | * but it doesn't match the current color |
2122 | * for the page, change the current color | | 2122 | * for the page, change the current color |
2123 | * to match this last readonly mapping. | | 2123 | * to match this last readonly mapping. |
2124 | */ | | 2124 | */ |
2125 | pv = SLIST_FIRST(&md->pvh_list); | | 2125 | pv = SLIST_FIRST(&md->pvh_list); |
2126 | tst_mask = (md->pvh_attrs ^ pv->pv_va) | | 2126 | tst_mask = (md->pvh_attrs ^ pv->pv_va) |
2127 | & arm_cache_prefer_mask; | | 2127 | & arm_cache_prefer_mask; |
2128 | if (tst_mask) { | | 2128 | if (tst_mask) { |
2129 | md->pvh_attrs ^= tst_mask; | | 2129 | md->pvh_attrs ^= tst_mask; |
2130 | PMAPCOUNT(vac_color_change); | | 2130 | PMAPCOUNT(vac_color_change); |
2131 | } | | 2131 | } |
2132 | } | | 2132 | } |
2133 | } | | 2133 | } |
2134 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 2134 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
2135 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); | | 2135 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); |
2136 | return; | | 2136 | return; |
2137 | } else if (!pmap_is_page_colored_p(md)) { | | 2137 | } else if (!pmap_is_page_colored_p(md)) { |
2138 | /* not colored so we just use its color */ | | 2138 | /* not colored so we just use its color */ |
2139 | KASSERT(md->pvh_attrs & (PVF_WRITE|PVF_DIRTY)); | | 2139 | KASSERT(md->pvh_attrs & (PVF_WRITE|PVF_DIRTY)); |
2140 | KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); | | 2140 | KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); |
2141 | PMAPCOUNT(vac_color_new); | | 2141 | PMAPCOUNT(vac_color_new); |
2142 | md->pvh_attrs &= PAGE_SIZE - 1; | | 2142 | md->pvh_attrs &= PAGE_SIZE - 1; |
2143 | md->pvh_attrs |= PVF_COLORED | | 2143 | md->pvh_attrs |= PVF_COLORED |
2144 | | (va & arm_cache_prefer_mask) | | 2144 | | (va & arm_cache_prefer_mask) |
2145 | | (rw_mappings > 0 ? PVF_WRITE : 0); | | 2145 | | (rw_mappings > 0 ? PVF_WRITE : 0); |
2146 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 2146 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
2147 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); | | 2147 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); |
2148 | return; | | 2148 | return; |
2149 | } else if (((md->pvh_attrs ^ va) & arm_cache_prefer_mask) == 0) { | | 2149 | } else if (((md->pvh_attrs ^ va) & arm_cache_prefer_mask) == 0) { |
2150 | bad_alias = false; | | 2150 | bad_alias = false; |
2151 | if (rw_mappings > 0) { | | 2151 | if (rw_mappings > 0) { |
2152 | /* | | 2152 | /* |
2153 | * We now have writeable mappings and if we have | | 2153 | * We now have writeable mappings and if we have |
2154 | * readonly mappings in more than once color, we have | | 2154 | * readonly mappings in more than once color, we have |
2155 | * an aliasing problem. Regardless mark the page as | | 2155 | * an aliasing problem. Regardless mark the page as |
2156 | * writeable. | | 2156 | * writeable. |
2157 | */ | | 2157 | */ |
2158 | if (md->pvh_attrs & PVF_MULTCLR) { | | 2158 | if (md->pvh_attrs & PVF_MULTCLR) { |
2159 | if (ro_mappings < 2) { | | 2159 | if (ro_mappings < 2) { |
2160 | /* | | 2160 | /* |
2161 | * If we only have less than two | | 2161 | * If we only have less than two |
2162 | * read-only mappings, just flush the | | 2162 | * read-only mappings, just flush the |
2163 | * non-primary colors from the cache. | | 2163 | * non-primary colors from the cache. |
2164 | */ | | 2164 | */ |
2165 | pmap_flush_page(md, pa, | | 2165 | pmap_flush_page(md, pa, |
2166 | PMAP_FLUSH_SECONDARY); | | 2166 | PMAP_FLUSH_SECONDARY); |
2167 | } else { | | 2167 | } else { |
2168 | bad_alias = true; | | 2168 | bad_alias = true; |
2169 | } | | 2169 | } |
2170 | } | | 2170 | } |
2171 | md->pvh_attrs |= PVF_WRITE; | | 2171 | md->pvh_attrs |= PVF_WRITE; |
2172 | } | | 2172 | } |
2173 | /* If no conflicting colors, set everything back to cached */ | | 2173 | /* If no conflicting colors, set everything back to cached */ |
2174 | if (!bad_alias) { | | 2174 | if (!bad_alias) { |
2175 | #ifdef DEBUG | | 2175 | #ifdef DEBUG |
2176 | if (rw_mappings > 0 | | 2176 | if (rw_mappings > 0 |
2177 | || (md->pvh_attrs & PMAP_KMPAGE)) { | | 2177 | || (md->pvh_attrs & PMAP_KMPAGE)) { |
2178 | tst_mask = md->pvh_attrs & arm_cache_prefer_mask; | | 2178 | tst_mask = md->pvh_attrs & arm_cache_prefer_mask; |
2179 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) | | 2179 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) |
2180 | KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0); | | 2180 | KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0); |
2181 | } | | 2181 | } |
2182 | #endif | | 2182 | #endif |
2183 | if (SLIST_EMPTY(&md->pvh_list)) | | 2183 | if (SLIST_EMPTY(&md->pvh_list)) |
2184 | PMAPCOUNT(vac_color_reuse); | | 2184 | PMAPCOUNT(vac_color_reuse); |
2185 | else | | 2185 | else |
2186 | PMAPCOUNT(vac_color_ok); | | 2186 | PMAPCOUNT(vac_color_ok); |
2187 | | | 2187 | |
2188 | /* matching color, just return */ | | 2188 | /* matching color, just return */ |
2189 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 2189 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
2190 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); | | 2190 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); |
2191 | return; | | 2191 | return; |
2192 | } | | 2192 | } |
2193 | KASSERT(SLIST_FIRST(&md->pvh_list) != NULL); | | 2193 | KASSERT(SLIST_FIRST(&md->pvh_list) != NULL); |
2194 | KASSERT(SLIST_NEXT(SLIST_FIRST(&md->pvh_list), pv_link) != NULL); | | 2194 | KASSERT(SLIST_NEXT(SLIST_FIRST(&md->pvh_list), pv_link) != NULL); |
2195 | | | 2195 | |
2196 | /* color conflict. evict from cache. */ | | 2196 | /* color conflict. evict from cache. */ |
2197 | | | 2197 | |
2198 | pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY); | | 2198 | pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY); |
2199 | md->pvh_attrs &= ~PVF_COLORED; | | 2199 | md->pvh_attrs &= ~PVF_COLORED; |
2200 | md->pvh_attrs |= PVF_NC; | | 2200 | md->pvh_attrs |= PVF_NC; |
2201 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 2201 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
2202 | KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); | | 2202 | KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); |
2203 | PMAPCOUNT(vac_color_erase); | | 2203 | PMAPCOUNT(vac_color_erase); |
2204 | } else if (rw_mappings == 0 | | 2204 | } else if (rw_mappings == 0 |
2205 | && (md->pvh_attrs & PVF_KMPAGE) == 0) { | | 2205 | && (md->pvh_attrs & PVF_KMPAGE) == 0) { |
2206 | KASSERT((md->pvh_attrs & PVF_WRITE) == 0); | | 2206 | KASSERT((md->pvh_attrs & PVF_WRITE) == 0); |
2207 | | | 2207 | |
2208 | /* | | 2208 | /* |
2209 | * If the page has dirty cache lines, clean it. | | 2209 | * If the page has dirty cache lines, clean it. |
2210 | */ | | 2210 | */ |
2211 | if (md->pvh_attrs & PVF_DIRTY) | | 2211 | if (md->pvh_attrs & PVF_DIRTY) |
2212 | pmap_flush_page(md, pa, PMAP_CLEAN_PRIMARY); | | 2212 | pmap_flush_page(md, pa, PMAP_CLEAN_PRIMARY); |
2213 | | | 2213 | |
2214 | /* | | 2214 | /* |
2215 | * If this is the first remapping (we know that there are no | | 2215 | * If this is the first remapping (we know that there are no |
2216 | * writeable mappings), then this is a simple color change. | | 2216 | * writeable mappings), then this is a simple color change. |
2217 | * Otherwise this is a seconary r/o mapping, which means | | 2217 | * Otherwise this is a seconary r/o mapping, which means |
2218 | * we don't have to do anything. | | 2218 | * we don't have to do anything. |
2219 | */ | | 2219 | */ |
2220 | if (ro_mappings == 1) { | | 2220 | if (ro_mappings == 1) { |
2221 | KASSERT(((md->pvh_attrs ^ va) & arm_cache_prefer_mask) != 0); | | 2221 | KASSERT(((md->pvh_attrs ^ va) & arm_cache_prefer_mask) != 0); |
2222 | md->pvh_attrs &= PAGE_SIZE - 1; | | 2222 | md->pvh_attrs &= PAGE_SIZE - 1; |
2223 | md->pvh_attrs |= (va & arm_cache_prefer_mask); | | 2223 | md->pvh_attrs |= (va & arm_cache_prefer_mask); |
2224 | PMAPCOUNT(vac_color_change); | | 2224 | PMAPCOUNT(vac_color_change); |
2225 | } else { | | 2225 | } else { |
2226 | PMAPCOUNT(vac_color_blind); | | 2226 | PMAPCOUNT(vac_color_blind); |
2227 | } | | 2227 | } |
2228 | md->pvh_attrs |= PVF_MULTCLR; | | 2228 | md->pvh_attrs |= PVF_MULTCLR; |
2229 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 2229 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
2230 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); | | 2230 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); |
2231 | return; | | 2231 | return; |
2232 | } else { | | 2232 | } else { |
2233 | if (rw_mappings > 0) | | 2233 | if (rw_mappings > 0) |
2234 | md->pvh_attrs |= PVF_WRITE; | | 2234 | md->pvh_attrs |= PVF_WRITE; |
2235 | | | 2235 | |
2236 | /* color conflict. evict from cache. */ | | 2236 | /* color conflict. evict from cache. */ |
2237 | pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY); | | 2237 | pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY); |
2238 | | | 2238 | |
2239 | /* the list can't be empty because this was a enter/modify */ | | 2239 | /* the list can't be empty because this was a enter/modify */ |
2240 | pv = SLIST_FIRST(&md->pvh_list); | | 2240 | pv = SLIST_FIRST(&md->pvh_list); |
2241 | if ((md->pvh_attrs & PVF_KMPAGE) == 0) { | | 2241 | if ((md->pvh_attrs & PVF_KMPAGE) == 0) { |
2242 | KASSERT(pv); | | 2242 | KASSERT(pv); |
2243 | /* | | 2243 | /* |
2244 | * If there's only one mapped page, change color to the | | 2244 | * If there's only one mapped page, change color to the |
2245 | * page's new color and return. Restore the DIRTY bit | | 2245 | * page's new color and return. Restore the DIRTY bit |
2246 | * that was erased by pmap_flush_page. | | 2246 | * that was erased by pmap_flush_page. |
2247 | */ | | 2247 | */ |
2248 | if (SLIST_NEXT(pv, pv_link) == NULL) { | | 2248 | if (SLIST_NEXT(pv, pv_link) == NULL) { |
2249 | md->pvh_attrs &= PAGE_SIZE - 1; | | 2249 | md->pvh_attrs &= PAGE_SIZE - 1; |
2250 | md->pvh_attrs |= (va & arm_cache_prefer_mask); | | 2250 | md->pvh_attrs |= (va & arm_cache_prefer_mask); |
2251 | if (md->pvh_attrs & PVF_DMOD) | | 2251 | if (md->pvh_attrs & PVF_DMOD) |
2252 | md->pvh_attrs |= PVF_DIRTY; | | 2252 | md->pvh_attrs |= PVF_DIRTY; |
2253 | PMAPCOUNT(vac_color_change); | | 2253 | PMAPCOUNT(vac_color_change); |
2254 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 2254 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
2255 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); | | 2255 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); |
2256 | KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); | | 2256 | KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); |
2257 | return; | | 2257 | return; |
2258 | } | | 2258 | } |
2259 | } | | 2259 | } |
2260 | bad_alias = true; | | 2260 | bad_alias = true; |
2261 | md->pvh_attrs &= ~PVF_COLORED; | | 2261 | md->pvh_attrs &= ~PVF_COLORED; |
2262 | md->pvh_attrs |= PVF_NC; | | 2262 | md->pvh_attrs |= PVF_NC; |
2263 | PMAPCOUNT(vac_color_erase); | | 2263 | PMAPCOUNT(vac_color_erase); |
2264 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 2264 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
2265 | } | | 2265 | } |
2266 | | | 2266 | |
2267 | fixup: | | 2267 | fixup: |
2268 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); | | 2268 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); |
2269 | | | 2269 | |
2270 | /* | | 2270 | /* |
2271 | * Turn cacheing on/off for all pages. | | 2271 | * Turn cacheing on/off for all pages. |
2272 | */ | | 2272 | */ |
2273 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { | | 2273 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { |
2274 | struct l2_bucket * const l2b = pmap_get_l2_bucket(pv->pv_pmap, | | 2274 | struct l2_bucket * const l2b = pmap_get_l2_bucket(pv->pv_pmap, |
2275 | pv->pv_va); | | 2275 | pv->pv_va); |
2276 | KASSERTMSG(l2b != NULL, "%#lx", va); | | 2276 | KASSERTMSG(l2b != NULL, "%#lx", va); |
2277 | pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; | | 2277 | pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; |
2278 | const pt_entry_t opte = *ptep; | | 2278 | const pt_entry_t opte = *ptep; |
2279 | pt_entry_t npte = opte & ~L2_S_CACHE_MASK; | | 2279 | pt_entry_t npte = opte & ~L2_S_CACHE_MASK; |
2280 | if (bad_alias) { | | 2280 | if (bad_alias) { |
2281 | pv->pv_flags |= PVF_NC; | | 2281 | pv->pv_flags |= PVF_NC; |
2282 | } else { | | 2282 | } else { |
2283 | pv->pv_flags &= ~PVF_NC; | | 2283 | pv->pv_flags &= ~PVF_NC; |
2284 | npte |= pte_l2_s_cache_mode; | | 2284 | npte |= pte_l2_s_cache_mode; |
2285 | } | | 2285 | } |
2286 | | | 2286 | |
2287 | if (opte == npte) /* only update is there's a change */ | | 2287 | if (opte == npte) /* only update is there's a change */ |
2288 | continue; | | 2288 | continue; |
2289 | | | 2289 | |
2290 | if (l2pte_valid_p(opte)) { | | 2290 | if (l2pte_valid_p(opte)) { |
2291 | pmap_tlb_flush_SE(pv->pv_pmap, pv->pv_va, pv->pv_flags); | | 2291 | pmap_tlb_flush_SE(pv->pv_pmap, pv->pv_va, pv->pv_flags); |
2292 | } | | 2292 | } |
2293 | | | 2293 | |
2294 | l2pte_set(ptep, npte, opte); | | 2294 | l2pte_set(ptep, npte, opte); |
2295 | PTE_SYNC_CURRENT(pv->pv_pmap, ptep); | | 2295 | PTE_SYNC_CURRENT(pv->pv_pmap, ptep); |
2296 | } | | 2296 | } |
2297 | #endif /* !ARM_MMU_EXTENDED */ | | 2297 | #endif /* !ARM_MMU_EXTENDED */ |
2298 | } | | 2298 | } |
2299 | #endif /* PMAP_CACHE_VIPT */ | | 2299 | #endif /* PMAP_CACHE_VIPT */ |
2300 | | | 2300 | |
2301 | | | 2301 | |
2302 | /* | | 2302 | /* |
2303 | * Modify pte bits for all ptes corresponding to the given physical address. | | 2303 | * Modify pte bits for all ptes corresponding to the given physical address. |
2304 | * We use `maskbits' rather than `clearbits' because we're always passing | | 2304 | * We use `maskbits' rather than `clearbits' because we're always passing |
2305 | * constants and the latter would require an extra inversion at run-time. | | 2305 | * constants and the latter would require an extra inversion at run-time. |
2306 | */ | | 2306 | */ |
2307 | static void | | 2307 | static void |
2308 | pmap_clearbit(struct vm_page_md *md, paddr_t pa, u_int maskbits) | | 2308 | pmap_clearbit(struct vm_page_md *md, paddr_t pa, u_int maskbits) |
2309 | { | | 2309 | { |
2310 | struct pv_entry *pv; | | 2310 | struct pv_entry *pv; |
2311 | #ifdef PMAP_CACHE_VIPT | | 2311 | #ifdef PMAP_CACHE_VIPT |
2312 | const bool want_syncicache = PV_IS_EXEC_P(md->pvh_attrs); | | 2312 | const bool want_syncicache = PV_IS_EXEC_P(md->pvh_attrs); |
2313 | bool need_syncicache = false; | | 2313 | bool need_syncicache = false; |
2314 | #ifdef ARM_MMU_EXTENDED | | 2314 | #ifdef ARM_MMU_EXTENDED |
2315 | const u_int execbits = (maskbits & PVF_EXEC) ? L2_XS_XN : 0; | | 2315 | const u_int execbits = (maskbits & PVF_EXEC) ? L2_XS_XN : 0; |
2316 | #else | | 2316 | #else |
2317 | const u_int execbits = 0; | | 2317 | const u_int execbits = 0; |
2318 | bool need_vac_me_harder = false; | | 2318 | bool need_vac_me_harder = false; |
2319 | #endif | | 2319 | #endif |
2320 | #else | | 2320 | #else |
2321 | const u_int execbits = 0; | | 2321 | const u_int execbits = 0; |
2322 | #endif | | 2322 | #endif |
2323 | | | 2323 | |
2324 | UVMHIST_FUNC(__func__); | | 2324 | UVMHIST_FUNC(__func__); |
2325 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx maskbits %#jx", | | 2325 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx maskbits %#jx", |
2326 | (uintptr_t)md, pa, maskbits, 0); | | 2326 | (uintptr_t)md, pa, maskbits, 0); |
2327 | | | 2327 | |
2328 | #ifdef PMAP_CACHE_VIPT | | 2328 | #ifdef PMAP_CACHE_VIPT |
2329 | /* | | 2329 | /* |
2330 | * If we might want to sync the I-cache and we've modified it, | | 2330 | * If we might want to sync the I-cache and we've modified it, |
2331 | * then we know we definitely need to sync or discard it. | | 2331 | * then we know we definitely need to sync or discard it. |
2332 | */ | | 2332 | */ |
2333 | if (want_syncicache) { | | 2333 | if (want_syncicache) { |
2334 | if (md->pvh_attrs & PVF_MOD) { | | 2334 | if (md->pvh_attrs & PVF_MOD) { |
2335 | need_syncicache = true; | | 2335 | need_syncicache = true; |
2336 | } | | 2336 | } |
2337 | } | | 2337 | } |
2338 | #endif | | 2338 | #endif |
2339 | KASSERT(pmap_page_locked_p(md)); | | 2339 | KASSERT(pmap_page_locked_p(md)); |
2340 | | | 2340 | |
2341 | /* | | 2341 | /* |
2342 | * Clear saved attributes (modify, reference) | | 2342 | * Clear saved attributes (modify, reference) |
2343 | */ | | 2343 | */ |
2344 | md->pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF)); | | 2344 | md->pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF)); |
2345 | | | 2345 | |
2346 | if (SLIST_EMPTY(&md->pvh_list)) { | | 2346 | if (SLIST_EMPTY(&md->pvh_list)) { |
2347 | #if defined(PMAP_CACHE_VIPT) | | 2347 | #if defined(PMAP_CACHE_VIPT) |
2348 | if (need_syncicache) { | | 2348 | if (need_syncicache) { |
2349 | /* | | 2349 | /* |
2350 | * No one has it mapped, so just discard it. The next | | 2350 | * No one has it mapped, so just discard it. The next |
2351 | * exec remapping will cause it to be synced. | | 2351 | * exec remapping will cause it to be synced. |
2352 | */ | | 2352 | */ |
2353 | md->pvh_attrs &= ~PVF_EXEC; | | 2353 | md->pvh_attrs &= ~PVF_EXEC; |
2354 | PMAPCOUNT(exec_discarded_clearbit); | | 2354 | PMAPCOUNT(exec_discarded_clearbit); |
2355 | } | | 2355 | } |
2356 | #endif | | 2356 | #endif |
2357 | return; | | 2357 | return; |
2358 | } | | 2358 | } |
2359 | | | 2359 | |
2360 | /* | | 2360 | /* |
2361 | * Loop over all current mappings setting/clearing as appropriate | | 2361 | * Loop over all current mappings setting/clearing as appropriate |
2362 | */ | | 2362 | */ |
2363 | for (pv = SLIST_FIRST(&md->pvh_list); pv != NULL;) { | | 2363 | for (pv = SLIST_FIRST(&md->pvh_list); pv != NULL;) { |
2364 | pmap_t pm = pv->pv_pmap; | | 2364 | pmap_t pm = pv->pv_pmap; |
2365 | const vaddr_t va = pv->pv_va; | | 2365 | const vaddr_t va = pv->pv_va; |
2366 | const u_int oflags = pv->pv_flags; | | 2366 | const u_int oflags = pv->pv_flags; |
2367 | #ifndef ARM_MMU_EXTENDED | | 2367 | #ifndef ARM_MMU_EXTENDED |
2368 | /* | | 2368 | /* |
2369 | * Kernel entries are unmanaged and as such not to be changed. | | 2369 | * Kernel entries are unmanaged and as such not to be changed. |
2370 | */ | | 2370 | */ |
2371 | if (PV_IS_KENTRY_P(oflags)) { | | 2371 | if (PV_IS_KENTRY_P(oflags)) { |
2372 | pv = SLIST_NEXT(pv, pv_link); | | 2372 | pv = SLIST_NEXT(pv, pv_link); |
2373 | continue; | | 2373 | continue; |
2374 | } | | 2374 | } |
2375 | #endif | | 2375 | #endif |
2376 | | | 2376 | |
2377 | /* | | 2377 | /* |
2378 | * Try to get a hold on the pmap's lock. We must do this | | 2378 | * Try to get a hold on the pmap's lock. We must do this |
2379 | * while still holding the page locked, to know that the | | 2379 | * while still holding the page locked, to know that the |
2380 | * page is still associated with the pmap and the mapping is | | 2380 | * page is still associated with the pmap and the mapping is |
2381 | * in place. If a hold can't be had, unlock and wait for | | 2381 | * in place. If a hold can't be had, unlock and wait for |
2382 | * the pmap's lock to become available and retry. The pmap | | 2382 | * the pmap's lock to become available and retry. The pmap |
2383 | * must be ref'd over this dance to stop it disappearing | | 2383 | * must be ref'd over this dance to stop it disappearing |
2384 | * behind us. | | 2384 | * behind us. |
2385 | */ | | 2385 | */ |
2386 | if (!mutex_tryenter(&pm->pm_lock)) { | | 2386 | if (!mutex_tryenter(&pm->pm_lock)) { |
2387 | pmap_reference(pm); | | 2387 | pmap_reference(pm); |
2388 | pmap_release_page_lock(md); | | 2388 | pmap_release_page_lock(md); |
2389 | pmap_acquire_pmap_lock(pm); | | 2389 | pmap_acquire_pmap_lock(pm); |
2390 | /* nothing, just wait for it */ | | 2390 | /* nothing, just wait for it */ |
2391 | pmap_release_pmap_lock(pm); | | 2391 | pmap_release_pmap_lock(pm); |
2392 | pmap_destroy(pm); | | 2392 | pmap_destroy(pm); |
2393 | /* Restart from the beginning. */ | | 2393 | /* Restart from the beginning. */ |
2394 | pmap_acquire_page_lock(md); | | 2394 | pmap_acquire_page_lock(md); |
2395 | pv = SLIST_FIRST(&md->pvh_list); | | 2395 | pv = SLIST_FIRST(&md->pvh_list); |
2396 | continue; | | 2396 | continue; |
2397 | } | | 2397 | } |
2398 | pv->pv_flags &= ~maskbits; | | 2398 | pv->pv_flags &= ~maskbits; |
2399 | | | 2399 | |
2400 | struct l2_bucket * const l2b = pmap_get_l2_bucket(pm, va); | | 2400 | struct l2_bucket * const l2b = pmap_get_l2_bucket(pm, va); |
2401 | KASSERTMSG(l2b != NULL, "%#lx", va); | | 2401 | KASSERTMSG(l2b != NULL, "%#lx", va); |
2402 | | | 2402 | |
2403 | pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)]; | | 2403 | pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)]; |
2404 | const pt_entry_t opte = *ptep; | | 2404 | const pt_entry_t opte = *ptep; |
2405 | pt_entry_t npte = opte | execbits; | | 2405 | pt_entry_t npte = opte | execbits; |
2406 | | | 2406 | |
2407 | #ifdef ARM_MMU_EXTENDED | | 2407 | #ifdef ARM_MMU_EXTENDED |
2408 | KASSERT((opte & L2_XS_nG) == (pm == pmap_kernel() ? 0 : L2_XS_nG)); | | 2408 | KASSERT((opte & L2_XS_nG) == (pm == pmap_kernel() ? 0 : L2_XS_nG)); |
2409 | #endif | | 2409 | #endif |
2410 | | | 2410 | |
2411 | UVMHIST_LOG(maphist, "pv %#jx pm %#jx va %#jx flag %#jx", | | 2411 | UVMHIST_LOG(maphist, "pv %#jx pm %#jx va %#jx flag %#jx", |
2412 | (uintptr_t)pv, (uintptr_t)pm, va, oflags); | | 2412 | (uintptr_t)pv, (uintptr_t)pm, va, oflags); |
2413 | | | 2413 | |
2414 | if (maskbits & (PVF_WRITE|PVF_MOD)) { | | 2414 | if (maskbits & (PVF_WRITE|PVF_MOD)) { |
2415 | #ifdef PMAP_CACHE_VIVT | | 2415 | #ifdef PMAP_CACHE_VIVT |
2416 | if ((oflags & PVF_NC)) { | | 2416 | if ((oflags & PVF_NC)) { |
2417 | /* | | 2417 | /* |
2418 | * Entry is not cacheable: | | 2418 | * Entry is not cacheable: |
2419 | * | | 2419 | * |
2420 | * Don't turn caching on again if this is a | | 2420 | * Don't turn caching on again if this is a |
2421 | * modified emulation. This would be | | 2421 | * modified emulation. This would be |
2422 | * inconsitent with the settings created by | | 2422 | * inconsitent with the settings created by |
2423 | * pmap_vac_me_harder(). Otherwise, it's safe | | 2423 | * pmap_vac_me_harder(). Otherwise, it's safe |
2424 | * to re-enable cacheing. | | 2424 | * to re-enable cacheing. |
2425 | * | | 2425 | * |
2426 | * There's no need to call pmap_vac_me_harder() | | 2426 | * There's no need to call pmap_vac_me_harder() |
2427 | * here: all pages are losing their write | | 2427 | * here: all pages are losing their write |
2428 | * permission. | | 2428 | * permission. |
2429 | */ | | 2429 | */ |
2430 | if (maskbits & PVF_WRITE) { | | 2430 | if (maskbits & PVF_WRITE) { |
2431 | npte |= pte_l2_s_cache_mode; | | 2431 | npte |= pte_l2_s_cache_mode; |
2432 | pv->pv_flags &= ~PVF_NC; | | 2432 | pv->pv_flags &= ~PVF_NC; |
2433 | } | | 2433 | } |
2434 | } else if (l2pte_writable_p(opte)) { | | 2434 | } else if (l2pte_writable_p(opte)) { |
2435 | /* | | 2435 | /* |
2436 | * Entry is writable/cacheable: check if pmap | | 2436 | * Entry is writable/cacheable: check if pmap |
2437 | * is current if it is flush it, otherwise it | | 2437 | * is current if it is flush it, otherwise it |
2438 | * won't be in the cache | | 2438 | * won't be in the cache |
2439 | */ | | 2439 | */ |
2440 | pmap_cache_wbinv_page(pm, va, | | 2440 | pmap_cache_wbinv_page(pm, va, |
2441 | (maskbits & PVF_REF) != 0, | | 2441 | (maskbits & PVF_REF) != 0, |
2442 | oflags|PVF_WRITE); | | 2442 | oflags|PVF_WRITE); |
2443 | } | | 2443 | } |
2444 | #endif | | 2444 | #endif |
2445 | | | 2445 | |
2446 | /* make the pte read only */ | | 2446 | /* make the pte read only */ |
2447 | npte = l2pte_set_readonly(npte); | | 2447 | npte = l2pte_set_readonly(npte); |
2448 | | | 2448 | |
2449 | if ((maskbits & oflags & PVF_WRITE)) { | | 2449 | if ((maskbits & oflags & PVF_WRITE)) { |
2450 | /* | | 2450 | /* |
2451 | * Keep alias accounting up to date | | 2451 | * Keep alias accounting up to date |
2452 | */ | | 2452 | */ |
2453 | if (pm == pmap_kernel()) { | | 2453 | if (pm == pmap_kernel()) { |
2454 | md->krw_mappings--; | | 2454 | md->krw_mappings--; |
2455 | md->kro_mappings++; | | 2455 | md->kro_mappings++; |
2456 | } else { | | 2456 | } else { |
2457 | md->urw_mappings--; | | 2457 | md->urw_mappings--; |
2458 | md->uro_mappings++; | | 2458 | md->uro_mappings++; |
2459 | } | | 2459 | } |
2460 | #ifdef PMAP_CACHE_VIPT | | 2460 | #ifdef PMAP_CACHE_VIPT |
2461 | if (arm_cache_prefer_mask != 0) { | | 2461 | if (arm_cache_prefer_mask != 0) { |
2462 | if (md->urw_mappings + md->krw_mappings == 0) { | | 2462 | if (md->urw_mappings + md->krw_mappings == 0) { |
2463 | md->pvh_attrs &= ~PVF_WRITE; | | 2463 | md->pvh_attrs &= ~PVF_WRITE; |
2464 | } else { | | 2464 | } else { |
2465 | PMAP_VALIDATE_MD_PAGE(md); | | 2465 | PMAP_VALIDATE_MD_PAGE(md); |
2466 | } | | 2466 | } |
2467 | } | | 2467 | } |
2468 | if (want_syncicache) | | 2468 | if (want_syncicache) |
2469 | need_syncicache = true; | | 2469 | need_syncicache = true; |
2470 | #ifndef ARM_MMU_EXTENDED | | 2470 | #ifndef ARM_MMU_EXTENDED |
2471 | need_vac_me_harder = true; | | 2471 | need_vac_me_harder = true; |
2472 | #endif | | 2472 | #endif |
2473 | #endif /* PMAP_CACHE_VIPT */ | | 2473 | #endif /* PMAP_CACHE_VIPT */ |
2474 | } | | 2474 | } |
2475 | } | | 2475 | } |
2476 | | | 2476 | |
2477 | if (maskbits & PVF_REF) { | | 2477 | if (maskbits & PVF_REF) { |
2478 | if (true | | 2478 | if (true |
2479 | #ifndef ARM_MMU_EXTENDED | | 2479 | #ifndef ARM_MMU_EXTENDED |
2480 | && (oflags & PVF_NC) == 0 | | 2480 | && (oflags & PVF_NC) == 0 |
2481 | #endif | | 2481 | #endif |
2482 | && (maskbits & (PVF_WRITE|PVF_MOD)) == 0 | | 2482 | && (maskbits & (PVF_WRITE|PVF_MOD)) == 0 |
2483 | && l2pte_valid_p(npte)) { | | 2483 | && l2pte_valid_p(npte)) { |
2484 | #ifdef PMAP_CACHE_VIVT | | 2484 | #ifdef PMAP_CACHE_VIVT |
2485 | /* | | 2485 | /* |
2486 | * Check npte here; we may have already | | 2486 | * Check npte here; we may have already |
2487 | * done the wbinv above, and the validity | | 2487 | * done the wbinv above, and the validity |
2488 | * of the PTE is the same for opte and | | 2488 | * of the PTE is the same for opte and |
2489 | * npte. | | 2489 | * npte. |
2490 | */ | | 2490 | */ |
2491 | pmap_cache_wbinv_page(pm, va, true, oflags); | | 2491 | pmap_cache_wbinv_page(pm, va, true, oflags); |
2492 | #endif | | 2492 | #endif |
2493 | } | | 2493 | } |
2494 | | | 2494 | |
2495 | /* | | 2495 | /* |
2496 | * Make the PTE invalid so that we will take a | | 2496 | * Make the PTE invalid so that we will take a |
2497 | * page fault the next time the mapping is | | 2497 | * page fault the next time the mapping is |
2498 | * referenced. | | 2498 | * referenced. |
2499 | */ | | 2499 | */ |
2500 | npte &= ~L2_TYPE_MASK; | | 2500 | npte &= ~L2_TYPE_MASK; |
2501 | npte |= L2_TYPE_INV; | | 2501 | npte |= L2_TYPE_INV; |
2502 | } | | 2502 | } |
2503 | | | 2503 | |
2504 | if (npte != opte) { | | 2504 | if (npte != opte) { |
2505 | l2pte_reset(ptep); | | 2505 | l2pte_reset(ptep); |
2506 | PTE_SYNC(ptep); | | 2506 | PTE_SYNC(ptep); |
2507 | | | 2507 | |
2508 | /* Flush the TLB entry if a current pmap. */ | | 2508 | /* Flush the TLB entry if a current pmap. */ |
2509 | pmap_tlb_flush_SE(pm, va, oflags); | | 2509 | pmap_tlb_flush_SE(pm, va, oflags); |
2510 | | | 2510 | |
2511 | l2pte_set(ptep, npte, 0); | | 2511 | l2pte_set(ptep, npte, 0); |
2512 | PTE_SYNC(ptep); | | 2512 | PTE_SYNC(ptep); |
2513 | } | | 2513 | } |
2514 | | | 2514 | |
2515 | pmap_release_pmap_lock(pm); | | 2515 | pmap_release_pmap_lock(pm); |
2516 | | | 2516 | |
2517 | UVMHIST_LOG(maphist, "pm %#jx va %#jx opte %#jx npte %#jx", | | 2517 | UVMHIST_LOG(maphist, "pm %#jx va %#jx opte %#jx npte %#jx", |
2518 | (uintptr_t)pm, va, opte, npte); | | 2518 | (uintptr_t)pm, va, opte, npte); |
2519 | | | 2519 | |
2520 | /* Move to next entry. */ | | 2520 | /* Move to next entry. */ |
2521 | pv = SLIST_NEXT(pv, pv_link); | | 2521 | pv = SLIST_NEXT(pv, pv_link); |
2522 | } | | 2522 | } |
2523 | | | 2523 | |
2524 | #if defined(PMAP_CACHE_VIPT) | | 2524 | #if defined(PMAP_CACHE_VIPT) |
2525 | /* | | 2525 | /* |
2526 | * If we need to sync the I-cache and we haven't done it yet, do it. | | 2526 | * If we need to sync the I-cache and we haven't done it yet, do it. |
2527 | */ | | 2527 | */ |
2528 | if (need_syncicache) { | | 2528 | if (need_syncicache) { |
2529 | pmap_syncicache_page(md, pa); | | 2529 | pmap_syncicache_page(md, pa); |
2530 | PMAPCOUNT(exec_synced_clearbit); | | 2530 | PMAPCOUNT(exec_synced_clearbit); |
2531 | } | | 2531 | } |
2532 | #ifndef ARM_MMU_EXTENDED | | 2532 | #ifndef ARM_MMU_EXTENDED |
2533 | /* | | 2533 | /* |
2534 | * If we are changing this to read-only, we need to call vac_me_harder | | 2534 | * If we are changing this to read-only, we need to call vac_me_harder |
2535 | * so we can change all the read-only pages to cacheable. We pretend | | 2535 | * so we can change all the read-only pages to cacheable. We pretend |
2536 | * this as a page deletion. | | 2536 | * this as a page deletion. |
2537 | */ | | 2537 | */ |
2538 | if (need_vac_me_harder) { | | 2538 | if (need_vac_me_harder) { |
2539 | if (md->pvh_attrs & PVF_NC) | | 2539 | if (md->pvh_attrs & PVF_NC) |
2540 | pmap_vac_me_harder(md, pa, NULL, 0); | | 2540 | pmap_vac_me_harder(md, pa, NULL, 0); |
2541 | } | | 2541 | } |
2542 | #endif /* !ARM_MMU_EXTENDED */ | | 2542 | #endif /* !ARM_MMU_EXTENDED */ |
2543 | #endif /* PMAP_CACHE_VIPT */ | | 2543 | #endif /* PMAP_CACHE_VIPT */ |
2544 | } | | 2544 | } |
2545 | | | 2545 | |
2546 | /* | | 2546 | /* |
2547 | * pmap_clean_page() | | 2547 | * pmap_clean_page() |
2548 | * | | 2548 | * |
2549 | * This is a local function used to work out the best strategy to clean | | 2549 | * This is a local function used to work out the best strategy to clean |
2550 | * a single page referenced by its entry in the PV table. It's used by | | 2550 | * a single page referenced by its entry in the PV table. It's used by |
2551 | * pmap_copy_page, pmap_zero_page and maybe some others later on. | | 2551 | * pmap_copy_page, pmap_zero_page and maybe some others later on. |
2552 | * | | 2552 | * |
2553 | * Its policy is effectively: | | 2553 | * Its policy is effectively: |
2554 | * o If there are no mappings, we don't bother doing anything with the cache. | | 2554 | * o If there are no mappings, we don't bother doing anything with the cache. |
2555 | * o If there is one mapping, we clean just that page. | | 2555 | * o If there is one mapping, we clean just that page. |
2556 | * o If there are multiple mappings, we clean the entire cache. | | 2556 | * o If there are multiple mappings, we clean the entire cache. |
2557 | * | | 2557 | * |
2558 | * So that some functions can be further optimised, it returns 0 if it didn't | | 2558 | * So that some functions can be further optimised, it returns 0 if it didn't |
2559 | * clean the entire cache, or 1 if it did. | | 2559 | * clean the entire cache, or 1 if it did. |
2560 | * | | 2560 | * |
2561 | * XXX One bug in this routine is that if the pv_entry has a single page | | 2561 | * XXX One bug in this routine is that if the pv_entry has a single page |
2562 | * mapped at 0x00000000 a whole cache clean will be performed rather than | | 2562 | * mapped at 0x00000000 a whole cache clean will be performed rather than |
2563 | * just the 1 page. Since this should not occur in everyday use and if it does | | 2563 | * just the 1 page. Since this should not occur in everyday use and if it does |
2564 | * it will just result in not the most efficient clean for the page. | | 2564 | * it will just result in not the most efficient clean for the page. |
2565 | */ | | 2565 | */ |
2566 | #ifdef PMAP_CACHE_VIVT | | 2566 | #ifdef PMAP_CACHE_VIVT |
2567 | static bool | | 2567 | static bool |
2568 | pmap_clean_page(struct vm_page_md *md, bool is_src) | | 2568 | pmap_clean_page(struct vm_page_md *md, bool is_src) |
2569 | { | | 2569 | { |
2570 | struct pv_entry *pv; | | 2570 | struct pv_entry *pv; |
2571 | pmap_t pm_to_clean = NULL; | | 2571 | pmap_t pm_to_clean = NULL; |
2572 | bool cache_needs_cleaning = false; | | 2572 | bool cache_needs_cleaning = false; |
2573 | vaddr_t page_to_clean = 0; | | 2573 | vaddr_t page_to_clean = 0; |
2574 | u_int flags = 0; | | 2574 | u_int flags = 0; |
2575 | | | 2575 | |
2576 | /* | | 2576 | /* |
2577 | * Since we flush the cache each time we change to a different | | 2577 | * Since we flush the cache each time we change to a different |
2578 | * user vmspace, we only need to flush the page if it is in the | | 2578 | * user vmspace, we only need to flush the page if it is in the |
2579 | * current pmap. | | 2579 | * current pmap. |
2580 | */ | | 2580 | */ |
2581 | KASSERT(pmap_page_locked_p(md)); | | 2581 | KASSERT(pmap_page_locked_p(md)); |
2582 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { | | 2582 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { |
2583 | if (pmap_is_current(pv->pv_pmap)) { | | 2583 | if (pmap_is_current(pv->pv_pmap)) { |
2584 | flags |= pv->pv_flags; | | 2584 | flags |= pv->pv_flags; |
2585 | /* | | 2585 | /* |
2586 | * The page is mapped non-cacheable in | | 2586 | * The page is mapped non-cacheable in |
2587 | * this map. No need to flush the cache. | | 2587 | * this map. No need to flush the cache. |
2588 | */ | | 2588 | */ |
2589 | if (pv->pv_flags & PVF_NC) { | | 2589 | if (pv->pv_flags & PVF_NC) { |
2590 | #ifdef DIAGNOSTIC | | 2590 | #ifdef DIAGNOSTIC |
2591 | KASSERT(!cache_needs_cleaning); | | 2591 | KASSERT(!cache_needs_cleaning); |
2592 | #endif | | 2592 | #endif |
2593 | break; | | 2593 | break; |
2594 | } else if (is_src && (pv->pv_flags & PVF_WRITE) == 0) | | 2594 | } else if (is_src && (pv->pv_flags & PVF_WRITE) == 0) |
2595 | continue; | | 2595 | continue; |
2596 | if (cache_needs_cleaning) { | | 2596 | if (cache_needs_cleaning) { |
2597 | page_to_clean = 0; | | 2597 | page_to_clean = 0; |
2598 | break; | | 2598 | break; |
2599 | } else { | | 2599 | } else { |
2600 | page_to_clean = pv->pv_va; | | 2600 | page_to_clean = pv->pv_va; |
2601 | pm_to_clean = pv->pv_pmap; | | 2601 | pm_to_clean = pv->pv_pmap; |
2602 | } | | 2602 | } |
2603 | cache_needs_cleaning = true; | | 2603 | cache_needs_cleaning = true; |
2604 | } | | 2604 | } |
2605 | } | | 2605 | } |
2606 | | | 2606 | |
2607 | if (page_to_clean) { | | 2607 | if (page_to_clean) { |
2608 | pmap_cache_wbinv_page(pm_to_clean, page_to_clean, | | 2608 | pmap_cache_wbinv_page(pm_to_clean, page_to_clean, |
2609 | !is_src, flags | PVF_REF); | | 2609 | !is_src, flags | PVF_REF); |
2610 | } else if (cache_needs_cleaning) { | | 2610 | } else if (cache_needs_cleaning) { |
2611 | pmap_t const pm = curproc->p_vmspace->vm_map.pmap; | | 2611 | pmap_t const pm = curproc->p_vmspace->vm_map.pmap; |
2612 | | | 2612 | |
2613 | pmap_cache_wbinv_all(pm, flags); | | 2613 | pmap_cache_wbinv_all(pm, flags); |
2614 | return true; | | 2614 | return true; |
2615 | } | | 2615 | } |
2616 | return false; | | 2616 | return false; |
2617 | } | | 2617 | } |
2618 | #endif | | 2618 | #endif |
2619 | | | 2619 | |
2620 | #ifdef PMAP_CACHE_VIPT | | 2620 | #ifdef PMAP_CACHE_VIPT |
2621 | /* | | 2621 | /* |
2622 | * Sync a page with the I-cache. Since this is a VIPT, we must pick the | | 2622 | * Sync a page with the I-cache. Since this is a VIPT, we must pick the |
2623 | * right cache alias to make sure we flush the right stuff. | | 2623 | * right cache alias to make sure we flush the right stuff. |
2624 | */ | | 2624 | */ |
2625 | void | | 2625 | void |
2626 | pmap_syncicache_page(struct vm_page_md *md, paddr_t pa) | | 2626 | pmap_syncicache_page(struct vm_page_md *md, paddr_t pa) |
2627 | { | | 2627 | { |
2628 | pmap_t kpm = pmap_kernel(); | | 2628 | pmap_t kpm = pmap_kernel(); |
2629 | const size_t way_size = arm_pcache.icache_type == CACHE_TYPE_PIPT | | 2629 | const size_t way_size = arm_pcache.icache_type == CACHE_TYPE_PIPT |
2630 | ? PAGE_SIZE | | 2630 | ? PAGE_SIZE |
2631 | : arm_pcache.icache_way_size; | | 2631 | : arm_pcache.icache_way_size; |
2632 | | | 2632 | |
2633 | UVMHIST_FUNC(__func__); | | 2633 | UVMHIST_FUNC(__func__); |
2634 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx (attrs=%#jx)", | | 2634 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx (attrs=%#jx)", |
2635 | (uintptr_t)md, pa, md->pvh_attrs, 0); | | 2635 | (uintptr_t)md, pa, md->pvh_attrs, 0); |
2636 | | | 2636 | |
2637 | /* | | 2637 | /* |
2638 | * No need to clean the page if it's non-cached. | | 2638 | * No need to clean the page if it's non-cached. |
2639 | */ | | 2639 | */ |
2640 | #ifndef ARM_MMU_EXTENDED | | 2640 | #ifndef ARM_MMU_EXTENDED |
2641 | if (md->pvh_attrs & PVF_NC) | | 2641 | if (md->pvh_attrs & PVF_NC) |
2642 | return; | | 2642 | return; |
2643 | KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & PVF_COLORED); | | 2643 | KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & PVF_COLORED); |
2644 | #endif | | 2644 | #endif |
2645 | | | 2645 | |
2646 | pt_entry_t * const ptep = cpu_cdst_pte(0); | | 2646 | pt_entry_t * const ptep = cpu_cdst_pte(0); |
2647 | const vaddr_t dstp = cpu_cdstp(0); | | 2647 | const vaddr_t dstp = cpu_cdstp(0); |
2648 | #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS | | 2648 | #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS |
2649 | if (way_size <= PAGE_SIZE) { | | 2649 | if (way_size <= PAGE_SIZE) { |
2650 | bool ok = false; | | 2650 | bool ok = false; |
2651 | vaddr_t vdstp = pmap_direct_mapped_phys(pa, &ok, dstp); | | 2651 | vaddr_t vdstp = pmap_direct_mapped_phys(pa, &ok, dstp); |
2652 | if (ok) { | | 2652 | if (ok) { |
2653 | cpu_icache_sync_range(vdstp, way_size); | | 2653 | cpu_icache_sync_range(vdstp, way_size); |
2654 | return; | | 2654 | return; |
2655 | } | | 2655 | } |
2656 | } | | 2656 | } |
2657 | #endif | | 2657 | #endif |
2658 | | | 2658 | |
2659 | /* | | 2659 | /* |
2660 | * We don't worry about the color of the exec page, we map the | | 2660 | * We don't worry about the color of the exec page, we map the |
2661 | * same page to pages in the way and then do the icache_sync on | | 2661 | * same page to pages in the way and then do the icache_sync on |
2662 | * the entire way making sure we are cleaned. | | 2662 | * the entire way making sure we are cleaned. |
2663 | */ | | 2663 | */ |
2664 | const pt_entry_t npte = L2_S_PROTO | pa | pte_l2_s_cache_mode | | 2664 | const pt_entry_t npte = L2_S_PROTO | pa | pte_l2_s_cache_mode |
2665 | | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE); | | 2665 | | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE); |
2666 | | | 2666 | |
2667 | for (size_t i = 0, j = 0; i < way_size; | | 2667 | for (size_t i = 0, j = 0; i < way_size; |
2668 | i += PAGE_SIZE, j += PAGE_SIZE / L2_S_SIZE) { | | 2668 | i += PAGE_SIZE, j += PAGE_SIZE / L2_S_SIZE) { |
2669 | l2pte_reset(ptep + j); | | 2669 | l2pte_reset(ptep + j); |
2670 | PTE_SYNC(ptep + j); | | 2670 | PTE_SYNC(ptep + j); |
2671 | | | 2671 | |
2672 | pmap_tlb_flush_SE(kpm, dstp + i, PVF_REF | PVF_EXEC); | | 2672 | pmap_tlb_flush_SE(kpm, dstp + i, PVF_REF | PVF_EXEC); |
2673 | /* | | 2673 | /* |
2674 | * Set up a PTE with to flush these cache lines. | | 2674 | * Set up a PTE with to flush these cache lines. |
2675 | */ | | 2675 | */ |
2676 | l2pte_set(ptep + j, npte, 0); | | 2676 | l2pte_set(ptep + j, npte, 0); |
2677 | } | | 2677 | } |
2678 | PTE_SYNC_RANGE(ptep, way_size / L2_S_SIZE); | | 2678 | PTE_SYNC_RANGE(ptep, way_size / L2_S_SIZE); |
2679 | | | 2679 | |
2680 | /* | | 2680 | /* |
2681 | * Flush it. | | 2681 | * Flush it. |
2682 | */ | | 2682 | */ |
2683 | cpu_icache_sync_range(dstp, way_size); | | 2683 | cpu_icache_sync_range(dstp, way_size); |
2684 | | | 2684 | |
2685 | for (size_t i = 0, j = 0; i < way_size; | | 2685 | for (size_t i = 0, j = 0; i < way_size; |
2686 | i += PAGE_SIZE, j += PAGE_SIZE / L2_S_SIZE) { | | 2686 | i += PAGE_SIZE, j += PAGE_SIZE / L2_S_SIZE) { |
2687 | /* | | 2687 | /* |
2688 | * Unmap the page(s). | | 2688 | * Unmap the page(s). |
2689 | */ | | 2689 | */ |
2690 | l2pte_reset(ptep + j); | | 2690 | l2pte_reset(ptep + j); |
2691 | PTE_SYNC(ptep + j); | | 2691 | PTE_SYNC(ptep + j); |
2692 | | | 2692 | |
2693 | pmap_tlb_flush_SE(kpm, dstp + i, PVF_REF | PVF_EXEC); | | 2693 | pmap_tlb_flush_SE(kpm, dstp + i, PVF_REF | PVF_EXEC); |
2694 | } | | 2694 | } |
2695 | | | 2695 | |
2696 | md->pvh_attrs |= PVF_EXEC; | | 2696 | md->pvh_attrs |= PVF_EXEC; |
2697 | PMAPCOUNT(exec_synced); | | 2697 | PMAPCOUNT(exec_synced); |
2698 | } | | 2698 | } |
2699 | | | 2699 | |
2700 | #ifndef ARM_MMU_EXTENDED | | 2700 | #ifndef ARM_MMU_EXTENDED |
2701 | void | | 2701 | void |
2702 | pmap_flush_page(struct vm_page_md *md, paddr_t pa, enum pmap_flush_op flush) | | 2702 | pmap_flush_page(struct vm_page_md *md, paddr_t pa, enum pmap_flush_op flush) |
2703 | { | | 2703 | { |
2704 | vsize_t va_offset, end_va; | | 2704 | vsize_t va_offset, end_va; |
2705 | bool wbinv_p; | | 2705 | bool wbinv_p; |
2706 | | | 2706 | |
2707 | if (arm_cache_prefer_mask == 0) | | 2707 | if (arm_cache_prefer_mask == 0) |
2708 | return; | | 2708 | return; |
2709 | | | 2709 | |
2710 | UVMHIST_FUNC(__func__); | | 2710 | UVMHIST_FUNC(__func__); |
2711 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx op %#jx", | | 2711 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx op %#jx", |
2712 | (uintptr_t)md, pa, op, 0); | | 2712 | (uintptr_t)md, pa, op, 0); |
2713 | | | 2713 | |
2714 | switch (flush) { | | 2714 | switch (flush) { |
2715 | case PMAP_FLUSH_PRIMARY: | | 2715 | case PMAP_FLUSH_PRIMARY: |
2716 | if (md->pvh_attrs & PVF_MULTCLR) { | | 2716 | if (md->pvh_attrs & PVF_MULTCLR) { |
2717 | va_offset = 0; | | 2717 | va_offset = 0; |
2718 | end_va = arm_cache_prefer_mask; | | 2718 | end_va = arm_cache_prefer_mask; |
2719 | md->pvh_attrs &= ~PVF_MULTCLR; | | 2719 | md->pvh_attrs &= ~PVF_MULTCLR; |
2720 | PMAPCOUNT(vac_flush_lots); | | 2720 | PMAPCOUNT(vac_flush_lots); |
2721 | } else { | | 2721 | } else { |
2722 | va_offset = md->pvh_attrs & arm_cache_prefer_mask; | | 2722 | va_offset = md->pvh_attrs & arm_cache_prefer_mask; |
2723 | end_va = va_offset; | | 2723 | end_va = va_offset; |
2724 | PMAPCOUNT(vac_flush_one); | | 2724 | PMAPCOUNT(vac_flush_one); |
2725 | } | | 2725 | } |
2726 | /* | | 2726 | /* |
2727 | * Mark that the page is no longer dirty. | | 2727 | * Mark that the page is no longer dirty. |
2728 | */ | | 2728 | */ |
2729 | md->pvh_attrs &= ~PVF_DIRTY; | | 2729 | md->pvh_attrs &= ~PVF_DIRTY; |
2730 | wbinv_p = true; | | 2730 | wbinv_p = true; |
2731 | break; | | 2731 | break; |
2732 | case PMAP_FLUSH_SECONDARY: | | 2732 | case PMAP_FLUSH_SECONDARY: |
2733 | va_offset = 0; | | 2733 | va_offset = 0; |
2734 | end_va = arm_cache_prefer_mask; | | 2734 | end_va = arm_cache_prefer_mask; |
2735 | wbinv_p = true; | | 2735 | wbinv_p = true; |
2736 | md->pvh_attrs &= ~PVF_MULTCLR; | | 2736 | md->pvh_attrs &= ~PVF_MULTCLR; |
2737 | PMAPCOUNT(vac_flush_lots); | | 2737 | PMAPCOUNT(vac_flush_lots); |
2738 | break; | | 2738 | break; |
2739 | case PMAP_CLEAN_PRIMARY: | | 2739 | case PMAP_CLEAN_PRIMARY: |
2740 | va_offset = md->pvh_attrs & arm_cache_prefer_mask; | | 2740 | va_offset = md->pvh_attrs & arm_cache_prefer_mask; |
2741 | end_va = va_offset; | | 2741 | end_va = va_offset; |
2742 | wbinv_p = false; | | 2742 | wbinv_p = false; |
2743 | /* | | 2743 | /* |
2744 | * Mark that the page is no longer dirty. | | 2744 | * Mark that the page is no longer dirty. |
2745 | */ | | 2745 | */ |
2746 | if ((md->pvh_attrs & PVF_DMOD) == 0) | | 2746 | if ((md->pvh_attrs & PVF_DMOD) == 0) |
2747 | md->pvh_attrs &= ~PVF_DIRTY; | | 2747 | md->pvh_attrs &= ~PVF_DIRTY; |
2748 | PMAPCOUNT(vac_clean_one); | | 2748 | PMAPCOUNT(vac_clean_one); |
2749 | break; | | 2749 | break; |
2750 | default: | | 2750 | default: |
2751 | return; | | 2751 | return; |
2752 | } | | 2752 | } |
2753 | | | 2753 | |
2754 | KASSERT(!(md->pvh_attrs & PVF_NC)); | | 2754 | KASSERT(!(md->pvh_attrs & PVF_NC)); |
2755 | | | 2755 | |
2756 | UVMHIST_LOG(maphist, "md %#jx (attrs=%#jx)", (uintptr_t)md, | | 2756 | UVMHIST_LOG(maphist, "md %#jx (attrs=%#jx)", (uintptr_t)md, |
2757 | md->pvh_attrs, 0, 0); | | 2757 | md->pvh_attrs, 0, 0); |
2758 | | | 2758 | |
2759 | const size_t scache_line_size = arm_scache.dcache_line_size; | | 2759 | const size_t scache_line_size = arm_scache.dcache_line_size; |
2760 | | | 2760 | |
2761 | for (; va_offset <= end_va; va_offset += PAGE_SIZE) { | | 2761 | for (; va_offset <= end_va; va_offset += PAGE_SIZE) { |
2762 | pt_entry_t * const ptep = cpu_cdst_pte(va_offset); | | 2762 | pt_entry_t * const ptep = cpu_cdst_pte(va_offset); |
2763 | const vaddr_t dstp = cpu_cdstp(va_offset); | | 2763 | const vaddr_t dstp = cpu_cdstp(va_offset); |
2764 | const pt_entry_t opte = *ptep; | | 2764 | const pt_entry_t opte = *ptep; |
2765 | | | 2765 | |
2766 | if (flush == PMAP_FLUSH_SECONDARY | | 2766 | if (flush == PMAP_FLUSH_SECONDARY |
2767 | && va_offset == (md->pvh_attrs & arm_cache_prefer_mask)) | | 2767 | && va_offset == (md->pvh_attrs & arm_cache_prefer_mask)) |
2768 | continue; | | 2768 | continue; |
2769 | | | 2769 | |
2770 | pmap_tlb_flush_SE(pmap_kernel(), dstp, PVF_REF | PVF_EXEC); | | 2770 | pmap_tlb_flush_SE(pmap_kernel(), dstp, PVF_REF | PVF_EXEC); |
2771 | /* | | 2771 | /* |
2772 | * Set up a PTE with the right coloring to flush | | 2772 | * Set up a PTE with the right coloring to flush |
2773 | * existing cache entries. | | 2773 | * existing cache entries. |
2774 | */ | | 2774 | */ |
2775 | const pt_entry_t npte = L2_S_PROTO | | 2775 | const pt_entry_t npte = L2_S_PROTO |
2776 | | pa | | 2776 | | pa |
2777 | | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE) | | 2777 | | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE) |
2778 | | pte_l2_s_cache_mode; | | 2778 | | pte_l2_s_cache_mode; |
2779 | l2pte_set(ptep, npte, opte); | | 2779 | l2pte_set(ptep, npte, opte); |
2780 | PTE_SYNC(ptep); | | 2780 | PTE_SYNC(ptep); |
2781 | | | 2781 | |
2782 | /* | | 2782 | /* |
2783 | * Flush it. Make sure to flush secondary cache too since | | 2783 | * Flush it. Make sure to flush secondary cache too since |
2784 | * bus_dma will ignore uncached pages. | | 2784 | * bus_dma will ignore uncached pages. |
2785 | */ | | 2785 | */ |
2786 | if (scache_line_size != 0) { | | 2786 | if (scache_line_size != 0) { |
2787 | cpu_dcache_wb_range(dstp, PAGE_SIZE); | | 2787 | cpu_dcache_wb_range(dstp, PAGE_SIZE); |
2788 | if (wbinv_p) { | | 2788 | if (wbinv_p) { |
2789 | cpu_sdcache_wbinv_range(dstp, pa, PAGE_SIZE); | | 2789 | cpu_sdcache_wbinv_range(dstp, pa, PAGE_SIZE); |
2790 | cpu_dcache_inv_range(dstp, PAGE_SIZE); | | 2790 | cpu_dcache_inv_range(dstp, PAGE_SIZE); |
2791 | } else { | | 2791 | } else { |
2792 | cpu_sdcache_wb_range(dstp, pa, PAGE_SIZE); | | 2792 | cpu_sdcache_wb_range(dstp, pa, PAGE_SIZE); |
2793 | } | | 2793 | } |
2794 | } else { | | 2794 | } else { |
2795 | if (wbinv_p) { | | 2795 | if (wbinv_p) { |
2796 | cpu_dcache_wbinv_range(dstp, PAGE_SIZE); | | 2796 | cpu_dcache_wbinv_range(dstp, PAGE_SIZE); |
2797 | } else { | | 2797 | } else { |
2798 | cpu_dcache_wb_range(dstp, PAGE_SIZE); | | 2798 | cpu_dcache_wb_range(dstp, PAGE_SIZE); |
2799 | } | | 2799 | } |
2800 | } | | 2800 | } |
2801 | | | 2801 | |
2802 | /* | | 2802 | /* |
2803 | * Restore the page table entry since we might have interrupted | | 2803 | * Restore the page table entry since we might have interrupted |
2804 | * pmap_zero_page or pmap_copy_page which was already using | | 2804 | * pmap_zero_page or pmap_copy_page which was already using |
2805 | * this pte. | | 2805 | * this pte. |
2806 | */ | | 2806 | */ |
2807 | if (opte) { | | 2807 | if (opte) { |
2808 | l2pte_set(ptep, opte, npte); | | 2808 | l2pte_set(ptep, opte, npte); |
2809 | } else { | | 2809 | } else { |
2810 | l2pte_reset(ptep); | | 2810 | l2pte_reset(ptep); |
2811 | } | | 2811 | } |
2812 | PTE_SYNC(ptep); | | 2812 | PTE_SYNC(ptep); |
2813 | pmap_tlb_flush_SE(pmap_kernel(), dstp, PVF_REF | PVF_EXEC); | | 2813 | pmap_tlb_flush_SE(pmap_kernel(), dstp, PVF_REF | PVF_EXEC); |
2814 | } | | 2814 | } |
2815 | } | | 2815 | } |
2816 | #endif /* ARM_MMU_EXTENDED */ | | 2816 | #endif /* ARM_MMU_EXTENDED */ |
2817 | #endif /* PMAP_CACHE_VIPT */ | | 2817 | #endif /* PMAP_CACHE_VIPT */ |
2818 | | | 2818 | |
2819 | /* | | 2819 | /* |
2820 | * Routine: pmap_page_remove | | 2820 | * Routine: pmap_page_remove |
2821 | * Function: | | 2821 | * Function: |
2822 | * Removes this physical page from | | 2822 | * Removes this physical page from |
2823 | * all physical maps in which it resides. | | 2823 | * all physical maps in which it resides. |
2824 | * Reflects back modify bits to the pager. | | 2824 | * Reflects back modify bits to the pager. |
2825 | */ | | 2825 | */ |
2826 | static void | | 2826 | static void |
2827 | pmap_page_remove(struct vm_page_md *md, paddr_t pa) | | 2827 | pmap_page_remove(struct vm_page_md *md, paddr_t pa) |
2828 | { | | 2828 | { |
2829 | struct l2_bucket *l2b; | | 2829 | struct l2_bucket *l2b; |
2830 | struct pv_entry *pv; | | 2830 | struct pv_entry *pv; |
2831 | pt_entry_t *ptep; | | 2831 | pt_entry_t *ptep; |
2832 | #ifndef ARM_MMU_EXTENDED | | 2832 | #ifndef ARM_MMU_EXTENDED |
2833 | bool flush = false; | | 2833 | bool flush = false; |
2834 | #endif | | 2834 | #endif |
2835 | u_int flags = 0; | | 2835 | u_int flags = 0; |
2836 | | | 2836 | |
2837 | UVMHIST_FUNC(__func__); | | 2837 | UVMHIST_FUNC(__func__); |
2838 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx", (uintptr_t)md, pa, 0, 0); | | 2838 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx", (uintptr_t)md, pa, 0, 0); |
2839 | | | 2839 | |
| | | 2840 | kpreempt_disable(); |
2840 | pmap_acquire_page_lock(md); | | 2841 | pmap_acquire_page_lock(md); |
2841 | struct pv_entry **pvp = &SLIST_FIRST(&md->pvh_list); | | 2842 | struct pv_entry **pvp = &SLIST_FIRST(&md->pvh_list); |
2842 | if (*pvp == NULL) { | | 2843 | if (*pvp == NULL) { |
2843 | #ifdef PMAP_CACHE_VIPT | | 2844 | #ifdef PMAP_CACHE_VIPT |
2844 | /* | | 2845 | /* |
2845 | * We *know* the page contents are about to be replaced. | | 2846 | * We *know* the page contents are about to be replaced. |
2846 | * Discard the exec contents | | 2847 | * Discard the exec contents |
2847 | */ | | 2848 | */ |
2848 | if (PV_IS_EXEC_P(md->pvh_attrs)) | | 2849 | if (PV_IS_EXEC_P(md->pvh_attrs)) |
2849 | PMAPCOUNT(exec_discarded_page_protect); | | 2850 | PMAPCOUNT(exec_discarded_page_protect); |
2850 | md->pvh_attrs &= ~PVF_EXEC; | | 2851 | md->pvh_attrs &= ~PVF_EXEC; |
2851 | PMAP_VALIDATE_MD_PAGE(md); | | 2852 | PMAP_VALIDATE_MD_PAGE(md); |
2852 | #endif | | 2853 | #endif |
2853 | pmap_release_page_lock(md); | | 2854 | pmap_release_page_lock(md); |
| | | 2855 | kpreempt_enable(); |
| | | 2856 | |
2854 | return; | | 2857 | return; |
2855 | } | | 2858 | } |
2856 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 2859 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
2857 | KASSERT(arm_cache_prefer_mask == 0 || pmap_is_page_colored_p(md)); | | 2860 | KASSERT(arm_cache_prefer_mask == 0 || pmap_is_page_colored_p(md)); |
2858 | #endif | | 2861 | #endif |
2859 | | | 2862 | |
2860 | /* | | 2863 | /* |
2861 | * Clear alias counts | | 2864 | * Clear alias counts |
2862 | */ | | 2865 | */ |
2863 | #ifdef PMAP_CACHE_VIVT | | 2866 | #ifdef PMAP_CACHE_VIVT |
2864 | md->k_mappings = 0; | | 2867 | md->k_mappings = 0; |
2865 | #endif | | 2868 | #endif |
2866 | md->urw_mappings = md->uro_mappings = 0; | | 2869 | md->urw_mappings = md->uro_mappings = 0; |
2867 | | | 2870 | |
2868 | #ifdef PMAP_CACHE_VIVT | | 2871 | #ifdef PMAP_CACHE_VIVT |
2869 | pmap_clean_page(md, false); | | 2872 | pmap_clean_page(md, false); |
2870 | #endif | | 2873 | #endif |
2871 | | | 2874 | |
2872 | for (pv = *pvp; pv != NULL;) { | | 2875 | for (pv = *pvp; pv != NULL;) { |
2873 | pmap_t pm = pv->pv_pmap; | | 2876 | pmap_t pm = pv->pv_pmap; |
2874 | #ifndef ARM_MMU_EXTENDED | | 2877 | #ifndef ARM_MMU_EXTENDED |
2875 | if (flush == false && pmap_is_current(pm)) | | 2878 | if (flush == false && pmap_is_current(pm)) |
2876 | flush = true; | | 2879 | flush = true; |
2877 | #endif | | 2880 | #endif |
2878 | | | 2881 | |
2879 | #ifdef PMAP_CACHE_VIPT | | 2882 | #ifdef PMAP_CACHE_VIPT |
2880 | if (pm == pmap_kernel() && PV_IS_KENTRY_P(pv->pv_flags)) { | | 2883 | if (pm == pmap_kernel() && PV_IS_KENTRY_P(pv->pv_flags)) { |
2881 | /* If this was unmanaged mapping, it must be ignored. */ | | 2884 | /* If this was unmanaged mapping, it must be ignored. */ |
2882 | pvp = &SLIST_NEXT(pv, pv_link); | | 2885 | pvp = &SLIST_NEXT(pv, pv_link); |
2883 | pv = *pvp; | | 2886 | pv = *pvp; |
2884 | continue; | | 2887 | continue; |
2885 | } | | 2888 | } |
2886 | #endif | | 2889 | #endif |
2887 | | | 2890 | |
2888 | /* | | 2891 | /* |
2889 | * Try to get a hold on the pmap's lock. We must do this | | 2892 | * Try to get a hold on the pmap's lock. We must do this |
2890 | * while still holding the page locked, to know that the | | 2893 | * while still holding the page locked, to know that the |
2891 | * page is still associated with the pmap and the mapping is | | 2894 | * page is still associated with the pmap and the mapping is |
2892 | * in place. If a hold can't be had, unlock and wait for | | 2895 | * in place. If a hold can't be had, unlock and wait for |
2893 | * the pmap's lock to become available and retry. The pmap | | 2896 | * the pmap's lock to become available and retry. The pmap |
2894 | * must be ref'd over this dance to stop it disappearing | | 2897 | * must be ref'd over this dance to stop it disappearing |
2895 | * behind us. | | 2898 | * behind us. |
2896 | */ | | 2899 | */ |
2897 | if (!mutex_tryenter(&pm->pm_lock)) { | | 2900 | if (!mutex_tryenter(&pm->pm_lock)) { |
2898 | pmap_reference(pm); | | 2901 | pmap_reference(pm); |
2899 | pmap_release_page_lock(md); | | 2902 | pmap_release_page_lock(md); |
2900 | pmap_acquire_pmap_lock(pm); | | 2903 | pmap_acquire_pmap_lock(pm); |
2901 | /* nothing, just wait for it */ | | 2904 | /* nothing, just wait for it */ |
2902 | pmap_release_pmap_lock(pm); | | 2905 | pmap_release_pmap_lock(pm); |
2903 | pmap_destroy(pm); | | 2906 | pmap_destroy(pm); |
2904 | /* Restart from the beginning. */ | | 2907 | /* Restart from the beginning. */ |
2905 | pmap_acquire_page_lock(md); | | 2908 | pmap_acquire_page_lock(md); |
2906 | pvp = &SLIST_FIRST(&md->pvh_list); | | 2909 | pvp = &SLIST_FIRST(&md->pvh_list); |
2907 | pv = *pvp; | | 2910 | pv = *pvp; |
2908 | continue; | | 2911 | continue; |
2909 | } | | 2912 | } |
2910 | | | 2913 | |
2911 | if (pm == pmap_kernel()) { | | 2914 | if (pm == pmap_kernel()) { |
2912 | #ifdef PMAP_CACHE_VIPT | | 2915 | #ifdef PMAP_CACHE_VIPT |
2913 | if (pv->pv_flags & PVF_WRITE) | | 2916 | if (pv->pv_flags & PVF_WRITE) |
2914 | md->krw_mappings--; | | 2917 | md->krw_mappings--; |
2915 | else | | 2918 | else |
2916 | md->kro_mappings--; | | 2919 | md->kro_mappings--; |
2917 | #endif | | 2920 | #endif |
2918 | PMAPCOUNT(kernel_unmappings); | | 2921 | PMAPCOUNT(kernel_unmappings); |
2919 | } | | 2922 | } |
2920 | *pvp = SLIST_NEXT(pv, pv_link); /* remove from list */ | | 2923 | *pvp = SLIST_NEXT(pv, pv_link); /* remove from list */ |
2921 | PMAPCOUNT(unmappings); | | 2924 | PMAPCOUNT(unmappings); |
2922 | | | 2925 | |
2923 | pmap_release_page_lock(md); | | 2926 | pmap_release_page_lock(md); |
2924 | | | 2927 | |
2925 | l2b = pmap_get_l2_bucket(pm, pv->pv_va); | | 2928 | l2b = pmap_get_l2_bucket(pm, pv->pv_va); |
2926 | KASSERTMSG(l2b != NULL, "%#lx", pv->pv_va); | | 2929 | KASSERTMSG(l2b != NULL, "%#lx", pv->pv_va); |
2927 | | | 2930 | |
2928 | ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; | | 2931 | ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; |
2929 | | | 2932 | |
2930 | /* | | 2933 | /* |
2931 | * Update statistics | | 2934 | * Update statistics |
2932 | */ | | 2935 | */ |
2933 | --pm->pm_stats.resident_count; | | 2936 | --pm->pm_stats.resident_count; |
2934 | | | 2937 | |
2935 | /* Wired bit */ | | 2938 | /* Wired bit */ |
2936 | if (pv->pv_flags & PVF_WIRED) | | 2939 | if (pv->pv_flags & PVF_WIRED) |
2937 | --pm->pm_stats.wired_count; | | 2940 | --pm->pm_stats.wired_count; |
2938 | | | 2941 | |
2939 | flags |= pv->pv_flags; | | 2942 | flags |= pv->pv_flags; |
2940 | | | 2943 | |
2941 | /* | | 2944 | /* |
2942 | * Invalidate the PTEs. | | 2945 | * Invalidate the PTEs. |
2943 | */ | | 2946 | */ |
2944 | l2pte_reset(ptep); | | 2947 | l2pte_reset(ptep); |
2945 | PTE_SYNC_CURRENT(pm, ptep); | | 2948 | PTE_SYNC_CURRENT(pm, ptep); |
2946 | | | 2949 | |
2947 | #ifdef ARM_MMU_EXTENDED | | 2950 | #ifdef ARM_MMU_EXTENDED |
2948 | pmap_tlb_invalidate_addr(pm, pv->pv_va); | | 2951 | pmap_tlb_invalidate_addr(pm, pv->pv_va); |
2949 | #endif | | 2952 | #endif |
2950 | | | 2953 | |
2951 | pmap_free_l2_bucket(pm, l2b, PAGE_SIZE / L2_S_SIZE); | | 2954 | pmap_free_l2_bucket(pm, l2b, PAGE_SIZE / L2_S_SIZE); |
2952 | | | 2955 | |
2953 | pmap_release_pmap_lock(pm); | | 2956 | pmap_release_pmap_lock(pm); |
2954 | | | 2957 | |
2955 | pool_put(&pmap_pv_pool, pv); | | 2958 | pool_put(&pmap_pv_pool, pv); |
2956 | pmap_acquire_page_lock(md); | | 2959 | pmap_acquire_page_lock(md); |
2957 | | | 2960 | |
2958 | /* | | 2961 | /* |
2959 | * Restart at the beginning of the list. | | 2962 | * Restart at the beginning of the list. |
2960 | */ | | 2963 | */ |
2961 | pvp = &SLIST_FIRST(&md->pvh_list); | | 2964 | pvp = &SLIST_FIRST(&md->pvh_list); |
2962 | pv = *pvp; | | 2965 | pv = *pvp; |
2963 | } | | 2966 | } |
2964 | /* | | 2967 | /* |
2965 | * if we reach the end of the list and there are still mappings, they | | 2968 | * if we reach the end of the list and there are still mappings, they |
2966 | * might be able to be cached now. And they must be kernel mappings. | | 2969 | * might be able to be cached now. And they must be kernel mappings. |
2967 | */ | | 2970 | */ |
2968 | if (!SLIST_EMPTY(&md->pvh_list)) { | | 2971 | if (!SLIST_EMPTY(&md->pvh_list)) { |
2969 | pmap_vac_me_harder(md, pa, pmap_kernel(), 0); | | 2972 | pmap_vac_me_harder(md, pa, pmap_kernel(), 0); |
2970 | } | | 2973 | } |
2971 | | | 2974 | |
2972 | #ifdef PMAP_CACHE_VIPT | | 2975 | #ifdef PMAP_CACHE_VIPT |
2973 | /* | | 2976 | /* |
2974 | * Its EXEC cache is now gone. | | 2977 | * Its EXEC cache is now gone. |
2975 | */ | | 2978 | */ |
2976 | if (PV_IS_EXEC_P(md->pvh_attrs)) | | 2979 | if (PV_IS_EXEC_P(md->pvh_attrs)) |
2977 | PMAPCOUNT(exec_discarded_page_protect); | | 2980 | PMAPCOUNT(exec_discarded_page_protect); |
2978 | md->pvh_attrs &= ~PVF_EXEC; | | 2981 | md->pvh_attrs &= ~PVF_EXEC; |
2979 | KASSERT(md->urw_mappings == 0); | | 2982 | KASSERT(md->urw_mappings == 0); |
2980 | KASSERT(md->uro_mappings == 0); | | 2983 | KASSERT(md->uro_mappings == 0); |
2981 | #ifndef ARM_MMU_EXTENDED | | 2984 | #ifndef ARM_MMU_EXTENDED |
2982 | if (arm_cache_prefer_mask != 0) { | | 2985 | if (arm_cache_prefer_mask != 0) { |
2983 | if (md->krw_mappings == 0) | | 2986 | if (md->krw_mappings == 0) |
2984 | md->pvh_attrs &= ~PVF_WRITE; | | 2987 | md->pvh_attrs &= ~PVF_WRITE; |
2985 | PMAP_VALIDATE_MD_PAGE(md); | | 2988 | PMAP_VALIDATE_MD_PAGE(md); |
2986 | } | | 2989 | } |
2987 | #endif /* ARM_MMU_EXTENDED */ | | 2990 | #endif /* ARM_MMU_EXTENDED */ |
2988 | #endif /* PMAP_CACHE_VIPT */ | | 2991 | #endif /* PMAP_CACHE_VIPT */ |
2989 | pmap_release_page_lock(md); | | 2992 | pmap_release_page_lock(md); |
2990 | | | 2993 | |
2991 | #ifndef ARM_MMU_EXTENDED | | 2994 | #ifndef ARM_MMU_EXTENDED |
2992 | if (flush) { | | 2995 | if (flush) { |
2993 | /* | | 2996 | /* |
2994 | * Note: We can't use pmap_tlb_flush{I,D}() here since that | | 2997 | * Note: We can't use pmap_tlb_flush{I,D}() here since that |
2995 | * would need a subsequent call to pmap_update() to ensure | | 2998 | * would need a subsequent call to pmap_update() to ensure |
2996 | * curpm->pm_cstate.cs_all is reset. Our callers are not | | 2999 | * curpm->pm_cstate.cs_all is reset. Our callers are not |
2997 | * required to do that (see pmap(9)), so we can't modify | | 3000 | * required to do that (see pmap(9)), so we can't modify |
2998 | * the current pmap's state. | | 3001 | * the current pmap's state. |
2999 | */ | | 3002 | */ |
3000 | if (PV_BEEN_EXECD(flags)) | | 3003 | if (PV_BEEN_EXECD(flags)) |
3001 | cpu_tlb_flushID(); | | 3004 | cpu_tlb_flushID(); |
3002 | else | | 3005 | else |
3003 | cpu_tlb_flushD(); | | 3006 | cpu_tlb_flushD(); |
3004 | } | | 3007 | } |
3005 | cpu_cpwait(); | | 3008 | cpu_cpwait(); |
3006 | #endif /* ARM_MMU_EXTENDED */ | | 3009 | #endif /* ARM_MMU_EXTENDED */ |
| | | 3010 | |
| | | 3011 | kpreempt_enable(); |
3007 | } | | 3012 | } |
3008 | | | 3013 | |
3009 | /* | | 3014 | /* |
3010 | * pmap_t pmap_create(void) | | 3015 | * pmap_t pmap_create(void) |
3011 | * | | 3016 | * |
3012 | * Create a new pmap structure from scratch. | | 3017 | * Create a new pmap structure from scratch. |
3013 | */ | | 3018 | */ |
3014 | pmap_t | | 3019 | pmap_t |
3015 | pmap_create(void) | | 3020 | pmap_create(void) |
3016 | { | | 3021 | { |
3017 | pmap_t pm; | | 3022 | pmap_t pm; |
3018 | | | 3023 | |
3019 | pm = pool_cache_get(&pmap_cache, PR_WAITOK); | | 3024 | pm = pool_cache_get(&pmap_cache, PR_WAITOK); |
3020 | | | 3025 | |
3021 | mutex_init(&pm->pm_lock, MUTEX_DEFAULT, IPL_NONE); | | 3026 | mutex_init(&pm->pm_lock, MUTEX_DEFAULT, IPL_NONE); |
3022 | | | 3027 | |
3023 | pm->pm_refs = 1; | | 3028 | pm->pm_refs = 1; |
3024 | pm->pm_stats.wired_count = 0; | | 3029 | pm->pm_stats.wired_count = 0; |
3025 | pm->pm_stats.resident_count = 1; | | 3030 | pm->pm_stats.resident_count = 1; |
3026 | #ifdef ARM_MMU_EXTENDED | | 3031 | #ifdef ARM_MMU_EXTENDED |
3027 | #ifdef MULTIPROCESSOR | | 3032 | #ifdef MULTIPROCESSOR |
3028 | kcpuset_create(&pm->pm_active, true); | | 3033 | kcpuset_create(&pm->pm_active, true); |
3029 | kcpuset_create(&pm->pm_onproc, true); | | 3034 | kcpuset_create(&pm->pm_onproc, true); |
3030 | #endif | | 3035 | #endif |
3031 | #else | | 3036 | #else |
3032 | pm->pm_cstate.cs_all = 0; | | 3037 | pm->pm_cstate.cs_all = 0; |
3033 | #endif | | 3038 | #endif |
3034 | pmap_alloc_l1(pm); | | 3039 | pmap_alloc_l1(pm); |
3035 | | | 3040 | |
3036 | /* | | 3041 | /* |
3037 | * Note: The pool cache ensures that the pm_l2[] array is already | | 3042 | * Note: The pool cache ensures that the pm_l2[] array is already |
3038 | * initialised to zero. | | 3043 | * initialised to zero. |
3039 | */ | | 3044 | */ |
3040 | | | 3045 | |
3041 | pmap_pinit(pm); | | 3046 | pmap_pinit(pm); |
3042 | | | 3047 | |
3043 | return pm; | | 3048 | return pm; |
3044 | } | | 3049 | } |
3045 | | | 3050 | |
3046 | u_int | | 3051 | u_int |
3047 | arm32_mmap_flags(paddr_t pa) | | 3052 | arm32_mmap_flags(paddr_t pa) |
3048 | { | | 3053 | { |
3049 | /* | | 3054 | /* |
3050 | * the upper 8 bits in pmap_enter()'s flags are reserved for MD stuff | | 3055 | * the upper 8 bits in pmap_enter()'s flags are reserved for MD stuff |
3051 | * and we're using the upper bits in page numbers to pass flags around | | 3056 | * and we're using the upper bits in page numbers to pass flags around |
3052 | * so we might as well use the same bits | | 3057 | * so we might as well use the same bits |
3053 | */ | | 3058 | */ |
3054 | return (u_int)pa & PMAP_MD_MASK; | | 3059 | return (u_int)pa & PMAP_MD_MASK; |
3055 | } | | 3060 | } |
3056 | /* | | 3061 | /* |
3057 | * int pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, | | 3062 | * int pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, |
3058 | * u_int flags) | | 3063 | * u_int flags) |
3059 | * | | 3064 | * |
3060 | * Insert the given physical page (p) at | | 3065 | * Insert the given physical page (p) at |
3061 | * the specified virtual address (v) in the | | 3066 | * the specified virtual address (v) in the |
3062 | * target physical map with the protection requested. | | 3067 | * target physical map with the protection requested. |
3063 | * | | 3068 | * |
3064 | * NB: This is the only routine which MAY NOT lazy-evaluate | | 3069 | * NB: This is the only routine which MAY NOT lazy-evaluate |
3065 | * or lose information. That is, this routine must actually | | 3070 | * or lose information. That is, this routine must actually |
3066 | * insert this page into the given map NOW. | | 3071 | * insert this page into the given map NOW. |
3067 | */ | | 3072 | */ |
3068 | int | | 3073 | int |
3069 | pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) | | 3074 | pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) |
3070 | { | | 3075 | { |
3071 | struct l2_bucket *l2b; | | 3076 | struct l2_bucket *l2b; |
3072 | struct vm_page *pg, *opg; | | 3077 | struct vm_page *pg, *opg; |
3073 | u_int nflags; | | 3078 | u_int nflags; |
3074 | u_int oflags; | | 3079 | u_int oflags; |
3075 | const bool kpm_p = (pm == pmap_kernel()); | | 3080 | const bool kpm_p = (pm == pmap_kernel()); |
3076 | #ifdef ARM_HAS_VBAR | | 3081 | #ifdef ARM_HAS_VBAR |
3077 | const bool vector_page_p = false; | | 3082 | const bool vector_page_p = false; |
3078 | #else | | 3083 | #else |
3079 | const bool vector_page_p = (va == vector_page); | | 3084 | const bool vector_page_p = (va == vector_page); |
3080 | #endif | | 3085 | #endif |
3081 | struct pmap_page *pp = pmap_pv_tracked(pa); | | 3086 | struct pmap_page *pp = pmap_pv_tracked(pa); |
3082 | struct pv_entry *new_pv = NULL; | | 3087 | struct pv_entry *new_pv = NULL; |
3083 | struct pv_entry *old_pv = NULL; | | 3088 | struct pv_entry *old_pv = NULL; |
3084 | int error = 0; | | 3089 | int error = 0; |
3085 | | | 3090 | |
3086 | UVMHIST_FUNC(__func__); | | 3091 | UVMHIST_FUNC(__func__); |
3087 | UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx pa %#jx prot %#jx", | | 3092 | UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx pa %#jx prot %#jx", |
3088 | (uintptr_t)pm, va, pa, prot); | | 3093 | (uintptr_t)pm, va, pa, prot); |
3089 | UVMHIST_LOG(maphist, " flag %#jx", flags, 0, 0, 0); | | 3094 | UVMHIST_LOG(maphist, " flag %#jx", flags, 0, 0, 0); |
3090 | | | 3095 | |
3091 | KDASSERT((flags & PMAP_WIRED) == 0 || (flags & VM_PROT_ALL) != 0); | | 3096 | KDASSERT((flags & PMAP_WIRED) == 0 || (flags & VM_PROT_ALL) != 0); |
3092 | KDASSERT(((va | pa) & PGOFSET) == 0); | | 3097 | KDASSERT(((va | pa) & PGOFSET) == 0); |
3093 | | | 3098 | |
3094 | /* | | 3099 | /* |
3095 | * Get a pointer to the page. Later on in this function, we | | 3100 | * Get a pointer to the page. Later on in this function, we |
3096 | * test for a managed page by checking pg != NULL. | | 3101 | * test for a managed page by checking pg != NULL. |
3097 | */ | | 3102 | */ |
3098 | pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL; | | 3103 | pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL; |
3099 | /* | | 3104 | /* |
3100 | * if we may need a new pv entry allocate if now, as we can't do it | | 3105 | * if we may need a new pv entry allocate if now, as we can't do it |
3101 | * with the kernel_pmap locked | | 3106 | * with the kernel_pmap locked |
3102 | */ | | 3107 | */ |
3103 | if (pg || pp) | | 3108 | if (pg || pp) |
3104 | new_pv = pool_get(&pmap_pv_pool, PR_NOWAIT); | | 3109 | new_pv = pool_get(&pmap_pv_pool, PR_NOWAIT); |
3105 | | | 3110 | |
3106 | nflags = 0; | | 3111 | nflags = 0; |
3107 | if (prot & VM_PROT_WRITE) | | 3112 | if (prot & VM_PROT_WRITE) |
3108 | nflags |= PVF_WRITE; | | 3113 | nflags |= PVF_WRITE; |
3109 | if (prot & VM_PROT_EXECUTE) | | 3114 | if (prot & VM_PROT_EXECUTE) |
3110 | nflags |= PVF_EXEC; | | 3115 | nflags |= PVF_EXEC; |
3111 | if (flags & PMAP_WIRED) | | 3116 | if (flags & PMAP_WIRED) |
3112 | nflags |= PVF_WIRED; | | 3117 | nflags |= PVF_WIRED; |
3113 | | | 3118 | |
| | | 3119 | kpreempt_disable(); |
3114 | pmap_acquire_pmap_lock(pm); | | 3120 | pmap_acquire_pmap_lock(pm); |
3115 | | | 3121 | |
3116 | /* | | 3122 | /* |
3117 | * Fetch the L2 bucket which maps this page, allocating one if | | 3123 | * Fetch the L2 bucket which maps this page, allocating one if |
3118 | * necessary for user pmaps. | | 3124 | * necessary for user pmaps. |
3119 | */ | | 3125 | */ |
3120 | if (kpm_p) { | | 3126 | if (kpm_p) { |
3121 | l2b = pmap_get_l2_bucket(pm, va); | | 3127 | l2b = pmap_get_l2_bucket(pm, va); |
3122 | } else { | | 3128 | } else { |
3123 | l2b = pmap_alloc_l2_bucket(pm, va); | | 3129 | l2b = pmap_alloc_l2_bucket(pm, va); |
3124 | } | | 3130 | } |
3125 | if (l2b == NULL) { | | 3131 | if (l2b == NULL) { |
3126 | if (flags & PMAP_CANFAIL) { | | 3132 | if (flags & PMAP_CANFAIL) { |
3127 | pmap_release_pmap_lock(pm); | | 3133 | pmap_release_pmap_lock(pm); |
| | | 3134 | kpreempt_enable(); |
| | | 3135 | |
3128 | error = ENOMEM; | | 3136 | error = ENOMEM; |
3129 | goto free_pv; | | 3137 | goto free_pv; |
3130 | } | | 3138 | } |
3131 | panic("pmap_enter: failed to allocate L2 bucket"); | | 3139 | panic("pmap_enter: failed to allocate L2 bucket"); |
3132 | } | | 3140 | } |
3133 | pt_entry_t *ptep = &l2b->l2b_kva[l2pte_index(va)]; | | 3141 | pt_entry_t *ptep = &l2b->l2b_kva[l2pte_index(va)]; |
3134 | const pt_entry_t opte = *ptep; | | 3142 | const pt_entry_t opte = *ptep; |
3135 | pt_entry_t npte = pa; | | 3143 | pt_entry_t npte = pa; |
3136 | oflags = 0; | | 3144 | oflags = 0; |
3137 | | | 3145 | |
3138 | if (opte) { | | 3146 | if (opte) { |
3139 | /* | | 3147 | /* |
3140 | * There is already a mapping at this address. | | 3148 | * There is already a mapping at this address. |
3141 | * If the physical address is different, lookup the | | 3149 | * If the physical address is different, lookup the |
3142 | * vm_page. | | 3150 | * vm_page. |
3143 | */ | | 3151 | */ |
3144 | if (l2pte_pa(opte) != pa) { | | 3152 | if (l2pte_pa(opte) != pa) { |
3145 | KASSERT(!pmap_pv_tracked(pa)); | | 3153 | KASSERT(!pmap_pv_tracked(pa)); |
3146 | opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); | | 3154 | opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); |
3147 | } else | | 3155 | } else |
3148 | opg = pg; | | 3156 | opg = pg; |
3149 | } else | | 3157 | } else |
3150 | opg = NULL; | | 3158 | opg = NULL; |
3151 | | | 3159 | |
3152 | if (pg || pp) { | | 3160 | if (pg || pp) { |
3153 | KASSERT((pg != NULL) != (pp != NULL)); | | 3161 | KASSERT((pg != NULL) != (pp != NULL)); |
3154 | struct vm_page_md *md = (pg != NULL) ? VM_PAGE_TO_MD(pg) : | | 3162 | struct vm_page_md *md = (pg != NULL) ? VM_PAGE_TO_MD(pg) : |
3155 | PMAP_PAGE_TO_MD(pp); | | 3163 | PMAP_PAGE_TO_MD(pp); |
3156 | | | 3164 | |
3157 | UVMHIST_LOG(maphist, " pg %#jx pp %#jx pvh_attrs %#jx " | | 3165 | UVMHIST_LOG(maphist, " pg %#jx pp %#jx pvh_attrs %#jx " |
3158 | "nflags %#jx", (uintptr_t)pg, (uintptr_t)pp, | | 3166 | "nflags %#jx", (uintptr_t)pg, (uintptr_t)pp, |
3159 | md->pvh_attrs, nflags); | | 3167 | md->pvh_attrs, nflags); |
3160 | | | 3168 | |
3161 | /* | | 3169 | /* |
3162 | * This is to be a managed mapping. | | 3170 | * This is to be a managed mapping. |
3163 | */ | | 3171 | */ |
3164 | pmap_acquire_page_lock(md); | | 3172 | pmap_acquire_page_lock(md); |
3165 | if ((flags & VM_PROT_ALL) || (md->pvh_attrs & PVF_REF)) { | | 3173 | if ((flags & VM_PROT_ALL) || (md->pvh_attrs & PVF_REF)) { |
3166 | /* | | 3174 | /* |
3167 | * - The access type indicates that we don't need | | 3175 | * - The access type indicates that we don't need |
3168 | * to do referenced emulation. | | 3176 | * to do referenced emulation. |
3169 | * OR | | 3177 | * OR |
3170 | * - The physical page has already been referenced | | 3178 | * - The physical page has already been referenced |
3171 | * so no need to re-do referenced emulation here. | | 3179 | * so no need to re-do referenced emulation here. |
3172 | */ | | 3180 | */ |
3173 | npte |= l2pte_set_readonly(L2_S_PROTO); | | 3181 | npte |= l2pte_set_readonly(L2_S_PROTO); |
3174 | | | 3182 | |
3175 | nflags |= PVF_REF; | | 3183 | nflags |= PVF_REF; |
3176 | | | 3184 | |
3177 | if ((prot & VM_PROT_WRITE) != 0 && | | 3185 | if ((prot & VM_PROT_WRITE) != 0 && |
3178 | ((flags & VM_PROT_WRITE) != 0 || | | 3186 | ((flags & VM_PROT_WRITE) != 0 || |
3179 | (md->pvh_attrs & PVF_MOD) != 0)) { | | 3187 | (md->pvh_attrs & PVF_MOD) != 0)) { |
3180 | /* | | 3188 | /* |
3181 | * This is a writable mapping, and the | | 3189 | * This is a writable mapping, and the |
3182 | * page's mod state indicates it has | | 3190 | * page's mod state indicates it has |
3183 | * already been modified. Make it | | 3191 | * already been modified. Make it |
3184 | * writable from the outset. | | 3192 | * writable from the outset. |
3185 | */ | | 3193 | */ |
3186 | npte = l2pte_set_writable(npte); | | 3194 | npte = l2pte_set_writable(npte); |
3187 | nflags |= PVF_MOD; | | 3195 | nflags |= PVF_MOD; |
3188 | } | | 3196 | } |
3189 | | | 3197 | |
3190 | #ifdef ARM_MMU_EXTENDED | | 3198 | #ifdef ARM_MMU_EXTENDED |
3191 | /* | | 3199 | /* |
3192 | * If the page has been cleaned, then the pvh_attrs | | 3200 | * If the page has been cleaned, then the pvh_attrs |
3193 | * will have PVF_EXEC set, so mark it execute so we | | 3201 | * will have PVF_EXEC set, so mark it execute so we |
3194 | * don't get an access fault when trying to execute | | 3202 | * don't get an access fault when trying to execute |
3195 | * from it. | | 3203 | * from it. |
3196 | */ | | 3204 | */ |
3197 | if (md->pvh_attrs & nflags & PVF_EXEC) { | | 3205 | if (md->pvh_attrs & nflags & PVF_EXEC) { |
3198 | npte &= ~L2_XS_XN; | | 3206 | npte &= ~L2_XS_XN; |
3199 | } | | 3207 | } |
3200 | #endif | | 3208 | #endif |
3201 | } else { | | 3209 | } else { |
3202 | /* | | 3210 | /* |
3203 | * Need to do page referenced emulation. | | 3211 | * Need to do page referenced emulation. |
3204 | */ | | 3212 | */ |
3205 | npte |= L2_TYPE_INV; | | 3213 | npte |= L2_TYPE_INV; |
3206 | } | | 3214 | } |
3207 | | | 3215 | |
3208 | if (flags & ARM32_MMAP_WRITECOMBINE) { | | 3216 | if (flags & ARM32_MMAP_WRITECOMBINE) { |
3209 | npte |= pte_l2_s_wc_mode; | | 3217 | npte |= pte_l2_s_wc_mode; |
3210 | } else | | 3218 | } else |
3211 | npte |= pte_l2_s_cache_mode; | | 3219 | npte |= pte_l2_s_cache_mode; |
3212 | | | 3220 | |
3213 | if (pg != NULL && pg == opg) { | | 3221 | if (pg != NULL && pg == opg) { |
3214 | /* | | 3222 | /* |
3215 | * We're changing the attrs of an existing mapping. | | 3223 | * We're changing the attrs of an existing mapping. |
3216 | */ | | 3224 | */ |
3217 | oflags = pmap_modify_pv(md, pa, pm, va, | | 3225 | oflags = pmap_modify_pv(md, pa, pm, va, |
3218 | PVF_WRITE | PVF_EXEC | PVF_WIRED | | | 3226 | PVF_WRITE | PVF_EXEC | PVF_WIRED | |
3219 | PVF_MOD | PVF_REF, nflags); | | 3227 | PVF_MOD | PVF_REF, nflags); |
3220 | | | 3228 | |
3221 | #ifdef PMAP_CACHE_VIVT | | 3229 | #ifdef PMAP_CACHE_VIVT |
3222 | /* | | 3230 | /* |
3223 | * We may need to flush the cache if we're | | 3231 | * We may need to flush the cache if we're |
3224 | * doing rw-ro... | | 3232 | * doing rw-ro... |
3225 | */ | | 3233 | */ |
3226 | if (pm->pm_cstate.cs_cache_d && | | 3234 | if (pm->pm_cstate.cs_cache_d && |
3227 | (oflags & PVF_NC) == 0 && | | 3235 | (oflags & PVF_NC) == 0 && |
3228 | l2pte_writable_p(opte) && | | 3236 | l2pte_writable_p(opte) && |
3229 | (prot & VM_PROT_WRITE) == 0) | | 3237 | (prot & VM_PROT_WRITE) == 0) |
3230 | cpu_dcache_wb_range(va, PAGE_SIZE); | | 3238 | cpu_dcache_wb_range(va, PAGE_SIZE); |
3231 | #endif | | 3239 | #endif |
3232 | } else { | | 3240 | } else { |
3233 | struct pv_entry *pv; | | 3241 | struct pv_entry *pv; |
3234 | /* | | 3242 | /* |
3235 | * New mapping, or changing the backing page | | 3243 | * New mapping, or changing the backing page |
3236 | * of an existing mapping. | | 3244 | * of an existing mapping. |
3237 | */ | | 3245 | */ |
3238 | if (opg) { | | 3246 | if (opg) { |
3239 | struct vm_page_md *omd = VM_PAGE_TO_MD(opg); | | 3247 | struct vm_page_md *omd = VM_PAGE_TO_MD(opg); |
3240 | paddr_t opa = VM_PAGE_TO_PHYS(opg); | | 3248 | paddr_t opa = VM_PAGE_TO_PHYS(opg); |
3241 | | | 3249 | |
3242 | /* | | 3250 | /* |
3243 | * Replacing an existing mapping with a new one. | | 3251 | * Replacing an existing mapping with a new one. |
3244 | * It is part of our managed memory so we | | 3252 | * It is part of our managed memory so we |
3245 | * must remove it from the PV list | | 3253 | * must remove it from the PV list |
3246 | */ | | 3254 | */ |
3247 | pv = pmap_remove_pv(omd, opa, pm, va); | | 3255 | pv = pmap_remove_pv(omd, opa, pm, va); |
3248 | pmap_vac_me_harder(omd, opa, pm, 0); | | 3256 | pmap_vac_me_harder(omd, opa, pm, 0); |
3249 | oflags = pv->pv_flags; | | 3257 | oflags = pv->pv_flags; |
3250 | | | 3258 | |
3251 | #ifdef PMAP_CACHE_VIVT | | 3259 | #ifdef PMAP_CACHE_VIVT |
3252 | /* | | 3260 | /* |
3253 | * If the old mapping was valid (ref/mod | | 3261 | * If the old mapping was valid (ref/mod |
3254 | * emulation creates 'invalid' mappings | | 3262 | * emulation creates 'invalid' mappings |
3255 | * initially) then make sure to frob | | 3263 | * initially) then make sure to frob |
3256 | * the cache. | | 3264 | * the cache. |
3257 | */ | | 3265 | */ |
3258 | if (!(oflags & PVF_NC) && l2pte_valid_p(opte)) { | | 3266 | if (!(oflags & PVF_NC) && l2pte_valid_p(opte)) { |
3259 | pmap_cache_wbinv_page(pm, va, true, | | 3267 | pmap_cache_wbinv_page(pm, va, true, |
3260 | oflags); | | 3268 | oflags); |
3261 | } | | 3269 | } |
3262 | #endif | | 3270 | #endif |
3263 | } else { | | 3271 | } else { |
3264 | pv = new_pv; | | 3272 | pv = new_pv; |
3265 | new_pv = NULL; | | 3273 | new_pv = NULL; |
3266 | if (pv == NULL) { | | 3274 | if (pv == NULL) { |
3267 | pmap_release_page_lock(md); | | 3275 | pmap_release_page_lock(md); |
3268 | pmap_release_pmap_lock(pm); | | 3276 | pmap_release_pmap_lock(pm); |
3269 | if ((flags & PMAP_CANFAIL) == 0) | | 3277 | if ((flags & PMAP_CANFAIL) == 0) |
3270 | panic("pmap_enter: " | | 3278 | panic("pmap_enter: " |
3271 | "no pv entries"); | | 3279 | "no pv entries"); |
3272 | | | 3280 | |
3273 | pmap_free_l2_bucket(pm, l2b, 0); | | 3281 | pmap_free_l2_bucket(pm, l2b, 0); |
3274 | UVMHIST_LOG(maphist, " <-- done (ENOMEM)", | | 3282 | UVMHIST_LOG(maphist, " <-- done (ENOMEM)", |
3275 | 0, 0, 0, 0); | | 3283 | 0, 0, 0, 0); |
3276 | return ENOMEM; | | 3284 | return ENOMEM; |
3277 | } | | 3285 | } |
3278 | } | | 3286 | } |
3279 | | | 3287 | |
3280 | pmap_enter_pv(md, pa, pv, pm, va, nflags); | | 3288 | pmap_enter_pv(md, pa, pv, pm, va, nflags); |
3281 | } | | 3289 | } |
3282 | pmap_release_page_lock(md); | | 3290 | pmap_release_page_lock(md); |
3283 | } else { | | 3291 | } else { |
3284 | /* | | 3292 | /* |
3285 | * We're mapping an unmanaged page. | | 3293 | * We're mapping an unmanaged page. |
3286 | * These are always readable, and possibly writable, from | | 3294 | * These are always readable, and possibly writable, from |
3287 | * the get go as we don't need to track ref/mod status. | | 3295 | * the get go as we don't need to track ref/mod status. |
3288 | */ | | 3296 | */ |
3289 | npte |= l2pte_set_readonly(L2_S_PROTO); | | 3297 | npte |= l2pte_set_readonly(L2_S_PROTO); |
3290 | if (prot & VM_PROT_WRITE) | | 3298 | if (prot & VM_PROT_WRITE) |
3291 | npte = l2pte_set_writable(npte); | | 3299 | npte = l2pte_set_writable(npte); |
3292 | | | 3300 | |
3293 | /* | | 3301 | /* |
3294 | * Make sure the vector table is mapped cacheable | | 3302 | * Make sure the vector table is mapped cacheable |
3295 | */ | | 3303 | */ |
3296 | if ((vector_page_p && !kpm_p) | | 3304 | if ((vector_page_p && !kpm_p) |
3297 | || (flags & ARM32_MMAP_CACHEABLE)) { | | 3305 | || (flags & ARM32_MMAP_CACHEABLE)) { |
3298 | npte |= pte_l2_s_cache_mode; | | 3306 | npte |= pte_l2_s_cache_mode; |
3299 | #ifdef ARM_MMU_EXTENDED | | 3307 | #ifdef ARM_MMU_EXTENDED |
3300 | npte &= ~L2_XS_XN; /* and executable */ | | 3308 | npte &= ~L2_XS_XN; /* and executable */ |
3301 | #endif | | 3309 | #endif |
3302 | } else if (flags & ARM32_MMAP_WRITECOMBINE) { | | 3310 | } else if (flags & ARM32_MMAP_WRITECOMBINE) { |
3303 | npte |= pte_l2_s_wc_mode; | | 3311 | npte |= pte_l2_s_wc_mode; |
3304 | } | | 3312 | } |
3305 | if (opg) { | | 3313 | if (opg) { |
3306 | /* | | 3314 | /* |
3307 | * Looks like there's an existing 'managed' mapping | | 3315 | * Looks like there's an existing 'managed' mapping |
3308 | * at this address. | | 3316 | * at this address. |
3309 | */ | | 3317 | */ |
3310 | struct vm_page_md *omd = VM_PAGE_TO_MD(opg); | | 3318 | struct vm_page_md *omd = VM_PAGE_TO_MD(opg); |
3311 | paddr_t opa = VM_PAGE_TO_PHYS(opg); | | 3319 | paddr_t opa = VM_PAGE_TO_PHYS(opg); |
3312 | | | 3320 | |
3313 | pmap_acquire_page_lock(omd); | | 3321 | pmap_acquire_page_lock(omd); |
3314 | old_pv = pmap_remove_pv(omd, opa, pm, va); | | 3322 | old_pv = pmap_remove_pv(omd, opa, pm, va); |
3315 | pmap_vac_me_harder(omd, opa, pm, 0); | | 3323 | pmap_vac_me_harder(omd, opa, pm, 0); |
3316 | oflags = old_pv->pv_flags; | | 3324 | oflags = old_pv->pv_flags; |
3317 | pmap_release_page_lock(omd); | | 3325 | pmap_release_page_lock(omd); |
3318 | | | 3326 | |
3319 | #ifdef PMAP_CACHE_VIVT | | 3327 | #ifdef PMAP_CACHE_VIVT |
3320 | if (!(oflags & PVF_NC) && l2pte_valid_p(opte)) { | | 3328 | if (!(oflags & PVF_NC) && l2pte_valid_p(opte)) { |
3321 | pmap_cache_wbinv_page(pm, va, true, oflags); | | 3329 | pmap_cache_wbinv_page(pm, va, true, oflags); |
3322 | } | | 3330 | } |
3323 | #endif | | 3331 | #endif |
3324 | } | | 3332 | } |
3325 | } | | 3333 | } |
3326 | | | 3334 | |
3327 | /* | | 3335 | /* |
3328 | * Make sure userland mappings get the right permissions | | 3336 | * Make sure userland mappings get the right permissions |
3329 | */ | | 3337 | */ |
3330 | if (!vector_page_p && !kpm_p) { | | 3338 | if (!vector_page_p && !kpm_p) { |
3331 | npte |= L2_S_PROT_U; | | 3339 | npte |= L2_S_PROT_U; |
3332 | #ifdef ARM_MMU_EXTENDED | | 3340 | #ifdef ARM_MMU_EXTENDED |
3333 | npte |= L2_XS_nG; /* user pages are not global */ | | 3341 | npte |= L2_XS_nG; /* user pages are not global */ |
3334 | #endif | | 3342 | #endif |
3335 | } | | 3343 | } |
3336 | | | 3344 | |
3337 | /* | | 3345 | /* |
3338 | * Keep the stats up to date | | 3346 | * Keep the stats up to date |
3339 | */ | | 3347 | */ |
3340 | if (opte == 0) { | | 3348 | if (opte == 0) { |
3341 | l2b->l2b_occupancy += PAGE_SIZE / L2_S_SIZE; | | 3349 | l2b->l2b_occupancy += PAGE_SIZE / L2_S_SIZE; |
3342 | pm->pm_stats.resident_count++; | | 3350 | pm->pm_stats.resident_count++; |
3343 | } | | 3351 | } |
3344 | | | 3352 | |
3345 | UVMHIST_LOG(maphist, " opte %#jx npte %#jx", opte, npte, 0, 0); | | 3353 | UVMHIST_LOG(maphist, " opte %#jx npte %#jx", opte, npte, 0, 0); |
3346 | | | 3354 | |
3347 | #if defined(ARM_MMU_EXTENDED) | | 3355 | #if defined(ARM_MMU_EXTENDED) |
3348 | /* | | 3356 | /* |
3349 | * If exec protection was requested but the page hasn't been synced, | | 3357 | * If exec protection was requested but the page hasn't been synced, |
3350 | * sync it now and allow execution from it. | | 3358 | * sync it now and allow execution from it. |
3351 | */ | | 3359 | */ |
3352 | if ((nflags & PVF_EXEC) && (npte & L2_XS_XN)) { | | 3360 | if ((nflags & PVF_EXEC) && (npte & L2_XS_XN)) { |
3353 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 3361 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
3354 | npte &= ~L2_XS_XN; | | 3362 | npte &= ~L2_XS_XN; |
3355 | pmap_syncicache_page(md, pa); | | 3363 | pmap_syncicache_page(md, pa); |
3356 | PMAPCOUNT(exec_synced_map); | | 3364 | PMAPCOUNT(exec_synced_map); |
3357 | } | | 3365 | } |
3358 | #endif | | 3366 | #endif |
3359 | /* | | 3367 | /* |
3360 | * If this is just a wiring change, the two PTEs will be | | 3368 | * If this is just a wiring change, the two PTEs will be |
3361 | * identical, so there's no need to update the page table. | | 3369 | * identical, so there's no need to update the page table. |
3362 | */ | | 3370 | */ |
3363 | if (npte != opte) { | | 3371 | if (npte != opte) { |
3364 | l2pte_reset(ptep); | | 3372 | l2pte_reset(ptep); |
3365 | PTE_SYNC(ptep); | | 3373 | PTE_SYNC(ptep); |
3366 | if (l2pte_valid_p(opte)) { | | 3374 | if (l2pte_valid_p(opte)) { |
3367 | pmap_tlb_flush_SE(pm, va, oflags); | | 3375 | pmap_tlb_flush_SE(pm, va, oflags); |
3368 | } | | 3376 | } |
3369 | l2pte_set(ptep, npte, 0); | | 3377 | l2pte_set(ptep, npte, 0); |
3370 | PTE_SYNC(ptep); | | 3378 | PTE_SYNC(ptep); |
3371 | #ifndef ARM_MMU_EXTENDED | | 3379 | #ifndef ARM_MMU_EXTENDED |
3372 | bool is_cached = pmap_is_cached(pm); | | 3380 | bool is_cached = pmap_is_cached(pm); |
3373 | if (is_cached) { | | 3381 | if (is_cached) { |
3374 | /* | | 3382 | /* |
3375 | * We only need to frob the cache/tlb if this pmap | | 3383 | * We only need to frob the cache/tlb if this pmap |
3376 | * is current | | 3384 | * is current |
3377 | */ | | 3385 | */ |
3378 | if (!vector_page_p && l2pte_valid_p(npte)) { | | 3386 | if (!vector_page_p && l2pte_valid_p(npte)) { |
3379 | /* | | 3387 | /* |
3380 | * This mapping is likely to be accessed as | | 3388 | * This mapping is likely to be accessed as |
3381 | * soon as we return to userland. Fix up the | | 3389 | * soon as we return to userland. Fix up the |
3382 | * L1 entry to avoid taking another | | 3390 | * L1 entry to avoid taking another |
3383 | * page/domain fault. | | 3391 | * page/domain fault. |
3384 | */ | | 3392 | */ |
3385 | pd_entry_t *pdep = pmap_l1_kva(pm) | | 3393 | pd_entry_t *pdep = pmap_l1_kva(pm) |
3386 | + l1pte_index(va); | | 3394 | + l1pte_index(va); |
3387 | pd_entry_t pde = L1_C_PROTO | l2b->l2b_pa | | 3395 | pd_entry_t pde = L1_C_PROTO | l2b->l2b_pa |
3388 | | L1_C_DOM(pmap_domain(pm)); | | 3396 | | L1_C_DOM(pmap_domain(pm)); |
3389 | if (*pdep != pde) { | | 3397 | if (*pdep != pde) { |
3390 | l1pte_setone(pdep, pde); | | 3398 | l1pte_setone(pdep, pde); |
3391 | PDE_SYNC(pdep); | | 3399 | PDE_SYNC(pdep); |
3392 | } | | 3400 | } |
3393 | } | | 3401 | } |
3394 | } | | 3402 | } |
3395 | | | 3403 | |
3396 | UVMHIST_LOG(maphist, " is_cached %jd cs 0x%08jx", | | 3404 | UVMHIST_LOG(maphist, " is_cached %jd cs 0x%08jx", |
3397 | is_cached, pm->pm_cstate.cs_all, 0, 0); | | 3405 | is_cached, pm->pm_cstate.cs_all, 0, 0); |
3398 | | | 3406 | |
3399 | if (pg != NULL) { | | 3407 | if (pg != NULL) { |
3400 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 3408 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
3401 | | | 3409 | |
3402 | pmap_acquire_page_lock(md); | | 3410 | pmap_acquire_page_lock(md); |
3403 | pmap_vac_me_harder(md, pa, pm, va); | | 3411 | pmap_vac_me_harder(md, pa, pm, va); |
3404 | pmap_release_page_lock(md); | | 3412 | pmap_release_page_lock(md); |
3405 | } | | 3413 | } |
3406 | #endif | | 3414 | #endif |
3407 | } | | 3415 | } |
3408 | #if defined(PMAP_CACHE_VIPT) && defined(DIAGNOSTIC) | | 3416 | #if defined(PMAP_CACHE_VIPT) && defined(DIAGNOSTIC) |
3409 | if (pg) { | | 3417 | if (pg) { |
3410 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 3418 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
3411 | | | 3419 | |
3412 | pmap_acquire_page_lock(md); | | 3420 | pmap_acquire_page_lock(md); |
3413 | #ifndef ARM_MMU_EXTENDED | | 3421 | #ifndef ARM_MMU_EXTENDED |
3414 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 3422 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
3415 | #endif | | 3423 | #endif |
3416 | PMAP_VALIDATE_MD_PAGE(md); | | 3424 | PMAP_VALIDATE_MD_PAGE(md); |
3417 | pmap_release_page_lock(md); | | 3425 | pmap_release_page_lock(md); |
3418 | } | | 3426 | } |
3419 | #endif | | 3427 | #endif |
3420 | | | 3428 | |
3421 | pmap_release_pmap_lock(pm); | | 3429 | pmap_release_pmap_lock(pm); |
3422 | | | 3430 | kpreempt_enable(); |
3423 | | | 3431 | |
3424 | if (old_pv) | | 3432 | if (old_pv) |
3425 | pool_put(&pmap_pv_pool, old_pv); | | 3433 | pool_put(&pmap_pv_pool, old_pv); |
3426 | free_pv: | | 3434 | free_pv: |
3427 | if (new_pv) | | 3435 | if (new_pv) |
3428 | pool_put(&pmap_pv_pool, new_pv); | | 3436 | pool_put(&pmap_pv_pool, new_pv); |
| | | 3437 | |
3429 | return error; | | 3438 | return error; |
3430 | } | | 3439 | } |
3431 | | | 3440 | |
3432 | /* | | 3441 | /* |
3433 | * pmap_remove() | | 3442 | * pmap_remove() |
3434 | * | | 3443 | * |
3435 | * pmap_remove is responsible for nuking a number of mappings for a range | | 3444 | * pmap_remove is responsible for nuking a number of mappings for a range |
3436 | * of virtual address space in the current pmap. To do this efficiently | | 3445 | * of virtual address space in the current pmap. To do this efficiently |
3437 | * is interesting, because in a number of cases a wide virtual address | | 3446 | * is interesting, because in a number of cases a wide virtual address |
3438 | * range may be supplied that contains few actual mappings. So, the | | 3447 | * range may be supplied that contains few actual mappings. So, the |
3439 | * optimisations are: | | 3448 | * optimisations are: |
3440 | * 1. Skip over hunks of address space for which no L1 or L2 entry exists. | | 3449 | * 1. Skip over hunks of address space for which no L1 or L2 entry exists. |
3441 | * 2. Build up a list of pages we've hit, up to a maximum, so we can | | 3450 | * 2. Build up a list of pages we've hit, up to a maximum, so we can |
3442 | * maybe do just a partial cache clean. This path of execution is | | 3451 | * maybe do just a partial cache clean. This path of execution is |
3443 | * complicated by the fact that the cache must be flushed _before_ | | 3452 | * complicated by the fact that the cache must be flushed _before_ |
3444 | * the PTE is nuked, being a VAC :-) | | 3453 | * the PTE is nuked, being a VAC :-) |
3445 | * 3. If we're called after UVM calls pmap_remove_all(), we can defer | | 3454 | * 3. If we're called after UVM calls pmap_remove_all(), we can defer |
3446 | * all invalidations until pmap_update(), since pmap_remove_all() has | | 3455 | * all invalidations until pmap_update(), since pmap_remove_all() has |
3447 | * already flushed the cache. | | 3456 | * already flushed the cache. |
3448 | * 4. Maybe later fast-case a single page, but I don't think this is | | 3457 | * 4. Maybe later fast-case a single page, but I don't think this is |
3449 | * going to make _that_ much difference overall. | | 3458 | * going to make _that_ much difference overall. |
3450 | */ | | 3459 | */ |
3451 | | | 3460 | |
3452 | #define PMAP_REMOVE_CLEAN_LIST_SIZE 3 | | 3461 | #define PMAP_REMOVE_CLEAN_LIST_SIZE 3 |
3453 | | | 3462 | |
3454 | void | | 3463 | void |
3455 | pmap_remove(pmap_t pm, vaddr_t sva, vaddr_t eva) | | 3464 | pmap_remove(pmap_t pm, vaddr_t sva, vaddr_t eva) |
3456 | { | | 3465 | { |
3457 | SLIST_HEAD(,pv_entry) opv_list; | | 3466 | SLIST_HEAD(,pv_entry) opv_list; |
3458 | struct pv_entry *pv, *npv; | | 3467 | struct pv_entry *pv, *npv; |
3459 | UVMHIST_FUNC(__func__); | | 3468 | UVMHIST_FUNC(__func__); |
3460 | UVMHIST_CALLARGS(maphist, " (pm=%#jx, sva=%#jx, eva=%#jx)", | | 3469 | UVMHIST_CALLARGS(maphist, " (pm=%#jx, sva=%#jx, eva=%#jx)", |
3461 | (uintptr_t)pm, sva, eva, 0); | | 3470 | (uintptr_t)pm, sva, eva, 0); |
3462 | | | 3471 | |
3463 | #ifdef PMAP_FAULTINFO | | 3472 | #ifdef PMAP_FAULTINFO |
3464 | curpcb->pcb_faultinfo.pfi_faultaddr = 0; | | 3473 | curpcb->pcb_faultinfo.pfi_faultaddr = 0; |
3465 | curpcb->pcb_faultinfo.pfi_repeats = 0; | | 3474 | curpcb->pcb_faultinfo.pfi_repeats = 0; |
3466 | curpcb->pcb_faultinfo.pfi_faultptep = NULL; | | 3475 | curpcb->pcb_faultinfo.pfi_faultptep = NULL; |
3467 | #endif | | 3476 | #endif |
3468 | | | 3477 | |
3469 | SLIST_INIT(&opv_list); | | 3478 | SLIST_INIT(&opv_list); |
3470 | /* | | 3479 | /* |
3471 | * we lock in the pmap => pv_head direction | | 3480 | * we lock in the pmap => pv_head direction |
3472 | */ | | 3481 | */ |
| | | 3482 | kpreempt_disable(); |
3473 | pmap_acquire_pmap_lock(pm); | | 3483 | pmap_acquire_pmap_lock(pm); |
3474 | | | 3484 | |
3475 | #ifndef ARM_MMU_EXTENDED | | 3485 | #ifndef ARM_MMU_EXTENDED |
3476 | u_int cleanlist_idx, total, cnt; | | 3486 | u_int cleanlist_idx, total, cnt; |
3477 | struct { | | 3487 | struct { |
3478 | vaddr_t va; | | 3488 | vaddr_t va; |
3479 | pt_entry_t *ptep; | | 3489 | pt_entry_t *ptep; |
3480 | } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE]; | | 3490 | } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE]; |
3481 | | | 3491 | |
3482 | if (pm->pm_remove_all || !pmap_is_cached(pm)) { | | 3492 | if (pm->pm_remove_all || !pmap_is_cached(pm)) { |
3483 | cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; | | 3493 | cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; |
3484 | if (pm->pm_cstate.cs_tlb == 0) | | 3494 | if (pm->pm_cstate.cs_tlb == 0) |
3485 | pm->pm_remove_all = true; | | 3495 | pm->pm_remove_all = true; |
3486 | } else | | 3496 | } else |
3487 | cleanlist_idx = 0; | | 3497 | cleanlist_idx = 0; |
3488 | total = 0; | | 3498 | total = 0; |
3489 | #endif | | 3499 | #endif |
3490 | | | 3500 | |
3491 | while (sva < eva) { | | 3501 | while (sva < eva) { |
3492 | /* | | 3502 | /* |
3493 | * Do one L2 bucket's worth at a time. | | 3503 | * Do one L2 bucket's worth at a time. |
3494 | */ | | 3504 | */ |
3495 | vaddr_t next_bucket = L2_NEXT_BUCKET_VA(sva); | | 3505 | vaddr_t next_bucket = L2_NEXT_BUCKET_VA(sva); |
3496 | if (next_bucket > eva) | | 3506 | if (next_bucket > eva) |
3497 | next_bucket = eva; | | 3507 | next_bucket = eva; |
3498 | | | 3508 | |
3499 | struct l2_bucket * const l2b = pmap_get_l2_bucket(pm, sva); | | 3509 | struct l2_bucket * const l2b = pmap_get_l2_bucket(pm, sva); |
3500 | if (l2b == NULL) { | | 3510 | if (l2b == NULL) { |
3501 | sva = next_bucket; | | 3511 | sva = next_bucket; |
3502 | continue; | | 3512 | continue; |
3503 | } | | 3513 | } |
3504 | | | 3514 | |
3505 | pt_entry_t *ptep = &l2b->l2b_kva[l2pte_index(sva)]; | | 3515 | pt_entry_t *ptep = &l2b->l2b_kva[l2pte_index(sva)]; |
3506 | u_int mappings = 0; | | 3516 | u_int mappings = 0; |
3507 | | | 3517 | |
3508 | for (;sva < next_bucket; | | 3518 | for (;sva < next_bucket; |
3509 | sva += PAGE_SIZE, ptep += PAGE_SIZE / L2_S_SIZE) { | | 3519 | sva += PAGE_SIZE, ptep += PAGE_SIZE / L2_S_SIZE) { |
3510 | pt_entry_t opte = *ptep; | | 3520 | pt_entry_t opte = *ptep; |
3511 | | | 3521 | |
3512 | if (opte == 0) { | | 3522 | if (opte == 0) { |
3513 | /* Nothing here, move along */ | | 3523 | /* Nothing here, move along */ |
3514 | continue; | | 3524 | continue; |
3515 | } | | 3525 | } |
3516 | | | 3526 | |
3517 | u_int flags = PVF_REF; | | 3527 | u_int flags = PVF_REF; |
3518 | paddr_t pa = l2pte_pa(opte); | | 3528 | paddr_t pa = l2pte_pa(opte); |
3519 | struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); | | 3529 | struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); |
3520 | | | 3530 | |
3521 | /* | | 3531 | /* |
3522 | * Update flags. In a number of circumstances, | | 3532 | * Update flags. In a number of circumstances, |
3523 | * we could cluster a lot of these and do a | | 3533 | * we could cluster a lot of these and do a |
3524 | * number of sequential pages in one go. | | 3534 | * number of sequential pages in one go. |
3525 | */ | | 3535 | */ |
3526 | if (pg != NULL) { | | 3536 | if (pg != NULL) { |
3527 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 3537 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
3528 | | | 3538 | |
3529 | pmap_acquire_page_lock(md); | | 3539 | pmap_acquire_page_lock(md); |
3530 | pv = pmap_remove_pv(md, pa, pm, sva); | | 3540 | pv = pmap_remove_pv(md, pa, pm, sva); |
3531 | pmap_vac_me_harder(md, pa, pm, 0); | | 3541 | pmap_vac_me_harder(md, pa, pm, 0); |
3532 | pmap_release_page_lock(md); | | 3542 | pmap_release_page_lock(md); |
3533 | if (pv != NULL) { | | 3543 | if (pv != NULL) { |
3534 | if (pm->pm_remove_all == false) { | | 3544 | if (pm->pm_remove_all == false) { |
3535 | flags = pv->pv_flags; | | 3545 | flags = pv->pv_flags; |
3536 | } | | 3546 | } |
3537 | SLIST_INSERT_HEAD(&opv_list, | | 3547 | SLIST_INSERT_HEAD(&opv_list, |
3538 | pv, pv_link); | | 3548 | pv, pv_link); |
3539 | } | | 3549 | } |
3540 | } | | 3550 | } |
3541 | mappings += PAGE_SIZE / L2_S_SIZE; | | 3551 | mappings += PAGE_SIZE / L2_S_SIZE; |
3542 | | | 3552 | |
3543 | if (!l2pte_valid_p(opte)) { | | 3553 | if (!l2pte_valid_p(opte)) { |
3544 | /* | | 3554 | /* |
3545 | * Ref/Mod emulation is still active for this | | 3555 | * Ref/Mod emulation is still active for this |
3546 | * mapping, therefore it is has not yet been | | 3556 | * mapping, therefore it is has not yet been |
3547 | * accessed. No need to frob the cache/tlb. | | 3557 | * accessed. No need to frob the cache/tlb. |
3548 | */ | | 3558 | */ |
3549 | l2pte_reset(ptep); | | 3559 | l2pte_reset(ptep); |
3550 | PTE_SYNC_CURRENT(pm, ptep); | | 3560 | PTE_SYNC_CURRENT(pm, ptep); |
3551 | continue; | | 3561 | continue; |
3552 | } | | 3562 | } |
3553 | | | 3563 | |
3554 | #ifdef ARM_MMU_EXTENDED | | 3564 | #ifdef ARM_MMU_EXTENDED |
3555 | l2pte_reset(ptep); | | 3565 | l2pte_reset(ptep); |
3556 | PTE_SYNC(ptep); | | 3566 | PTE_SYNC(ptep); |
3557 | if (__predict_false(pm->pm_remove_all == false)) { | | 3567 | if (__predict_false(pm->pm_remove_all == false)) { |
3558 | pmap_tlb_flush_SE(pm, sva, flags); | | 3568 | pmap_tlb_flush_SE(pm, sva, flags); |
3559 | } | | 3569 | } |
3560 | #else | | 3570 | #else |
3561 | if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) { | | 3571 | if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) { |
3562 | /* Add to the clean list. */ | | 3572 | /* Add to the clean list. */ |
3563 | cleanlist[cleanlist_idx].ptep = ptep; | | 3573 | cleanlist[cleanlist_idx].ptep = ptep; |
3564 | cleanlist[cleanlist_idx].va = | | 3574 | cleanlist[cleanlist_idx].va = |
3565 | sva | (flags & PVF_EXEC); | | 3575 | sva | (flags & PVF_EXEC); |
3566 | cleanlist_idx++; | | 3576 | cleanlist_idx++; |
3567 | } else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) { | | 3577 | } else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) { |
3568 | /* Nuke everything if needed. */ | | 3578 | /* Nuke everything if needed. */ |
3569 | #ifdef PMAP_CACHE_VIVT | | 3579 | #ifdef PMAP_CACHE_VIVT |
3570 | pmap_cache_wbinv_all(pm, PVF_EXEC); | | 3580 | pmap_cache_wbinv_all(pm, PVF_EXEC); |
3571 | #endif | | 3581 | #endif |
3572 | /* | | 3582 | /* |
3573 | * Roll back the previous PTE list, | | 3583 | * Roll back the previous PTE list, |
3574 | * and zero out the current PTE. | | 3584 | * and zero out the current PTE. |
3575 | */ | | 3585 | */ |
3576 | for (cnt = 0; | | 3586 | for (cnt = 0; |
3577 | cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) { | | 3587 | cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) { |
3578 | l2pte_reset(cleanlist[cnt].ptep); | | 3588 | l2pte_reset(cleanlist[cnt].ptep); |
3579 | PTE_SYNC(cleanlist[cnt].ptep); | | 3589 | PTE_SYNC(cleanlist[cnt].ptep); |
3580 | } | | 3590 | } |
3581 | l2pte_reset(ptep); | | 3591 | l2pte_reset(ptep); |
3582 | PTE_SYNC(ptep); | | 3592 | PTE_SYNC(ptep); |
3583 | cleanlist_idx++; | | 3593 | cleanlist_idx++; |
3584 | pm->pm_remove_all = true; | | 3594 | pm->pm_remove_all = true; |
3585 | } else { | | 3595 | } else { |
3586 | l2pte_reset(ptep); | | 3596 | l2pte_reset(ptep); |
3587 | PTE_SYNC(ptep); | | 3597 | PTE_SYNC(ptep); |
3588 | if (pm->pm_remove_all == false) { | | 3598 | if (pm->pm_remove_all == false) { |
3589 | pmap_tlb_flush_SE(pm, sva, flags); | | 3599 | pmap_tlb_flush_SE(pm, sva, flags); |
3590 | } | | 3600 | } |
3591 | } | | 3601 | } |
3592 | #endif | | 3602 | #endif |
3593 | } | | 3603 | } |
3594 | | | 3604 | |
3595 | #ifndef ARM_MMU_EXTENDED | | 3605 | #ifndef ARM_MMU_EXTENDED |
3596 | /* | | 3606 | /* |
3597 | * Deal with any left overs | | 3607 | * Deal with any left overs |
3598 | */ | | 3608 | */ |
3599 | if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) { | | 3609 | if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) { |
3600 | total += cleanlist_idx; | | 3610 | total += cleanlist_idx; |
3601 | for (cnt = 0; cnt < cleanlist_idx; cnt++) { | | 3611 | for (cnt = 0; cnt < cleanlist_idx; cnt++) { |
3602 | l2pte_reset(cleanlist[cnt].ptep); | | 3612 | l2pte_reset(cleanlist[cnt].ptep); |
3603 | PTE_SYNC_CURRENT(pm, cleanlist[cnt].ptep); | | 3613 | PTE_SYNC_CURRENT(pm, cleanlist[cnt].ptep); |
3604 | vaddr_t va = cleanlist[cnt].va; | | 3614 | vaddr_t va = cleanlist[cnt].va; |
3605 | if (pm->pm_cstate.cs_all != 0) { | | 3615 | if (pm->pm_cstate.cs_all != 0) { |
3606 | vaddr_t clva = va & ~PAGE_MASK; | | 3616 | vaddr_t clva = va & ~PAGE_MASK; |
3607 | u_int flags = va & PVF_EXEC; | | 3617 | u_int flags = va & PVF_EXEC; |
3608 | #ifdef PMAP_CACHE_VIVT | | 3618 | #ifdef PMAP_CACHE_VIVT |
3609 | pmap_cache_wbinv_page(pm, clva, true, | | 3619 | pmap_cache_wbinv_page(pm, clva, true, |
3610 | PVF_REF | PVF_WRITE | flags); | | 3620 | PVF_REF | PVF_WRITE | flags); |
3611 | #endif | | 3621 | #endif |
3612 | pmap_tlb_flush_SE(pm, clva, | | 3622 | pmap_tlb_flush_SE(pm, clva, |
3613 | PVF_REF | flags); | | 3623 | PVF_REF | flags); |
3614 | } | | 3624 | } |
3615 | } | | 3625 | } |
3616 | | | 3626 | |
3617 | /* | | 3627 | /* |
3618 | * If it looks like we're removing a whole bunch | | 3628 | * If it looks like we're removing a whole bunch |
3619 | * of mappings, it's faster to just write-back | | 3629 | * of mappings, it's faster to just write-back |
3620 | * the whole cache now and defer TLB flushes until | | 3630 | * the whole cache now and defer TLB flushes until |
3621 | * pmap_update() is called. | | 3631 | * pmap_update() is called. |
3622 | */ | | 3632 | */ |
3623 | if (total <= PMAP_REMOVE_CLEAN_LIST_SIZE) | | 3633 | if (total <= PMAP_REMOVE_CLEAN_LIST_SIZE) |
3624 | cleanlist_idx = 0; | | 3634 | cleanlist_idx = 0; |
3625 | else { | | 3635 | else { |
3626 | cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; | | 3636 | cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; |
3627 | #ifdef PMAP_CACHE_VIVT | | 3637 | #ifdef PMAP_CACHE_VIVT |
3628 | pmap_cache_wbinv_all(pm, PVF_EXEC); | | 3638 | pmap_cache_wbinv_all(pm, PVF_EXEC); |
3629 | #endif | | 3639 | #endif |
3630 | pm->pm_remove_all = true; | | 3640 | pm->pm_remove_all = true; |
3631 | } | | 3641 | } |
3632 | } | | 3642 | } |
3633 | #endif /* ARM_MMU_EXTENDED */ | | 3643 | #endif /* ARM_MMU_EXTENDED */ |
3634 | | | 3644 | |
3635 | pmap_free_l2_bucket(pm, l2b, mappings); | | 3645 | pmap_free_l2_bucket(pm, l2b, mappings); |
3636 | pm->pm_stats.resident_count -= mappings / (PAGE_SIZE/L2_S_SIZE); | | 3646 | pm->pm_stats.resident_count -= mappings / (PAGE_SIZE/L2_S_SIZE); |
3637 | } | | 3647 | } |
3638 | | | 3648 | |
3639 | pmap_release_pmap_lock(pm); | | 3649 | pmap_release_pmap_lock(pm); |
| | | 3650 | kpreempt_enable(); |
| | | 3651 | |
3640 | SLIST_FOREACH_SAFE(pv, &opv_list, pv_link, npv) { | | 3652 | SLIST_FOREACH_SAFE(pv, &opv_list, pv_link, npv) { |
3641 | pool_put(&pmap_pv_pool, pv); | | 3653 | pool_put(&pmap_pv_pool, pv); |
3642 | } | | 3654 | } |
3643 | } | | 3655 | } |
3644 | | | 3656 | |
3645 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 3657 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
3646 | static struct pv_entry * | | 3658 | static struct pv_entry * |
3647 | pmap_kremove_pg(struct vm_page *pg, vaddr_t va) | | 3659 | pmap_kremove_pg(struct vm_page *pg, vaddr_t va) |
3648 | { | | 3660 | { |
3649 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 3661 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
3650 | paddr_t pa = VM_PAGE_TO_PHYS(pg); | | 3662 | paddr_t pa = VM_PAGE_TO_PHYS(pg); |
3651 | struct pv_entry *pv; | | 3663 | struct pv_entry *pv; |
3652 | | | 3664 | |
3653 | KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & (PVF_COLORED|PVF_NC)); | | 3665 | KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & (PVF_COLORED|PVF_NC)); |
3654 | KASSERT((md->pvh_attrs & PVF_KMPAGE) == 0); | | 3666 | KASSERT((md->pvh_attrs & PVF_KMPAGE) == 0); |
3655 | KASSERT(pmap_page_locked_p(md)); | | 3667 | KASSERT(pmap_page_locked_p(md)); |
3656 | | | 3668 | |
3657 | pv = pmap_remove_pv(md, pa, pmap_kernel(), va); | | 3669 | pv = pmap_remove_pv(md, pa, pmap_kernel(), va); |
3658 | KASSERTMSG(pv, "pg %p (pa #%lx) va %#lx", pg, pa, va); | | 3670 | KASSERTMSG(pv, "pg %p (pa #%lx) va %#lx", pg, pa, va); |
3659 | KASSERT(PV_IS_KENTRY_P(pv->pv_flags)); | | 3671 | KASSERT(PV_IS_KENTRY_P(pv->pv_flags)); |
3660 | | | 3672 | |
3661 | /* | | 3673 | /* |
3662 | * We are removing a writeable mapping to a cached exec page, if | | 3674 | * We are removing a writeable mapping to a cached exec page, if |
3663 | * it's the last mapping then clear its execness otherwise sync | | 3675 | * it's the last mapping then clear its execness otherwise sync |
3664 | * the page to the icache. | | 3676 | * the page to the icache. |
3665 | */ | | 3677 | */ |
3666 | if ((md->pvh_attrs & (PVF_NC|PVF_EXEC)) == PVF_EXEC | | 3678 | if ((md->pvh_attrs & (PVF_NC|PVF_EXEC)) == PVF_EXEC |
3667 | && (pv->pv_flags & PVF_WRITE) != 0) { | | 3679 | && (pv->pv_flags & PVF_WRITE) != 0) { |
3668 | if (SLIST_EMPTY(&md->pvh_list)) { | | 3680 | if (SLIST_EMPTY(&md->pvh_list)) { |
3669 | md->pvh_attrs &= ~PVF_EXEC; | | 3681 | md->pvh_attrs &= ~PVF_EXEC; |
3670 | PMAPCOUNT(exec_discarded_kremove); | | 3682 | PMAPCOUNT(exec_discarded_kremove); |
3671 | } else { | | 3683 | } else { |
3672 | pmap_syncicache_page(md, pa); | | 3684 | pmap_syncicache_page(md, pa); |
3673 | PMAPCOUNT(exec_synced_kremove); | | 3685 | PMAPCOUNT(exec_synced_kremove); |
3674 | } | | 3686 | } |
3675 | } | | 3687 | } |
3676 | pmap_vac_me_harder(md, pa, pmap_kernel(), 0); | | 3688 | pmap_vac_me_harder(md, pa, pmap_kernel(), 0); |
3677 | | | 3689 | |
3678 | return pv; | | 3690 | return pv; |
3679 | } | | 3691 | } |
3680 | #endif /* PMAP_CACHE_VIPT && !ARM_MMU_EXTENDED */ | | 3692 | #endif /* PMAP_CACHE_VIPT && !ARM_MMU_EXTENDED */ |
3681 | | | 3693 | |
3682 | /* | | 3694 | /* |
3683 | * pmap_kenter_pa: enter an unmanaged, wired kernel mapping | | 3695 | * pmap_kenter_pa: enter an unmanaged, wired kernel mapping |
3684 | * | | 3696 | * |
3685 | * We assume there is already sufficient KVM space available | | 3697 | * We assume there is already sufficient KVM space available |
3686 | * to do this, as we can't allocate L2 descriptor tables/metadata | | 3698 | * to do this, as we can't allocate L2 descriptor tables/metadata |
3687 | * from here. | | 3699 | * from here. |
3688 | */ | | 3700 | */ |
3689 | void | | 3701 | void |
3690 | pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) | | 3702 | pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) |
3691 | { | | 3703 | { |
3692 | #ifdef PMAP_CACHE_VIVT | | 3704 | #ifdef PMAP_CACHE_VIVT |
3693 | struct vm_page *pg = (flags & PMAP_KMPAGE) ? PHYS_TO_VM_PAGE(pa) : NULL; | | 3705 | struct vm_page *pg = (flags & PMAP_KMPAGE) ? PHYS_TO_VM_PAGE(pa) : NULL; |
3694 | #endif | | 3706 | #endif |
3695 | #ifdef PMAP_CACHE_VIPT | | 3707 | #ifdef PMAP_CACHE_VIPT |
3696 | struct vm_page *pg = PHYS_TO_VM_PAGE(pa); | | 3708 | struct vm_page *pg = PHYS_TO_VM_PAGE(pa); |
3697 | struct vm_page *opg; | | 3709 | struct vm_page *opg; |
3698 | #ifndef ARM_MMU_EXTENDED | | 3710 | #ifndef ARM_MMU_EXTENDED |
3699 | struct pv_entry *pv = NULL; | | 3711 | struct pv_entry *pv = NULL; |
3700 | #endif | | 3712 | #endif |
3701 | #endif | | 3713 | #endif |
3702 | struct vm_page_md *md = pg != NULL ? VM_PAGE_TO_MD(pg) : NULL; | | 3714 | struct vm_page_md *md = pg != NULL ? VM_PAGE_TO_MD(pg) : NULL; |
3703 | | | 3715 | |
3704 | UVMHIST_FUNC(__func__); | | 3716 | UVMHIST_FUNC(__func__); |
3705 | | | 3717 | |
3706 | if (pmap_initialized) { | | 3718 | if (pmap_initialized) { |
3707 | UVMHIST_CALLARGS(maphist, | | 3719 | UVMHIST_CALLARGS(maphist, |
3708 | "va=%#jx, pa=%#jx, prot=%#jx, flags=%#jx", va, pa, prot, | | 3720 | "va=%#jx, pa=%#jx, prot=%#jx, flags=%#jx", va, pa, prot, |
3709 | flags); | | 3721 | flags); |
3710 | } | | 3722 | } |
3711 | | | 3723 | |
| | | 3724 | kpreempt_disable(); |
3712 | pmap_t kpm = pmap_kernel(); | | 3725 | pmap_t kpm = pmap_kernel(); |
3713 | pmap_acquire_pmap_lock(kpm); | | 3726 | pmap_acquire_pmap_lock(kpm); |
3714 | struct l2_bucket * const l2b = pmap_get_l2_bucket(kpm, va); | | 3727 | struct l2_bucket * const l2b = pmap_get_l2_bucket(kpm, va); |
3715 | const size_t l1slot __diagused = l1pte_index(va); | | 3728 | const size_t l1slot __diagused = l1pte_index(va); |
3716 | KASSERTMSG(l2b != NULL, | | 3729 | KASSERTMSG(l2b != NULL, |
3717 | "va %#lx pa %#lx prot %d maxkvaddr %#lx: l2 %p l2b %p kva %p", | | 3730 | "va %#lx pa %#lx prot %d maxkvaddr %#lx: l2 %p l2b %p kva %p", |
3718 | va, pa, prot, pmap_curmaxkvaddr, kpm->pm_l2[L2_IDX(l1slot)], | | 3731 | va, pa, prot, pmap_curmaxkvaddr, kpm->pm_l2[L2_IDX(l1slot)], |
3719 | kpm->pm_l2[L2_IDX(l1slot)] | | 3732 | kpm->pm_l2[L2_IDX(l1slot)] |
3720 | ? &kpm->pm_l2[L2_IDX(l1slot)]->l2_bucket[L2_BUCKET(l1slot)] | | 3733 | ? &kpm->pm_l2[L2_IDX(l1slot)]->l2_bucket[L2_BUCKET(l1slot)] |
3721 | : NULL, | | 3734 | : NULL, |
3722 | kpm->pm_l2[L2_IDX(l1slot)] | | 3735 | kpm->pm_l2[L2_IDX(l1slot)] |
3723 | ? kpm->pm_l2[L2_IDX(l1slot)]->l2_bucket[L2_BUCKET(l1slot)].l2b_kva | | 3736 | ? kpm->pm_l2[L2_IDX(l1slot)]->l2_bucket[L2_BUCKET(l1slot)].l2b_kva |
3724 | : NULL); | | 3737 | : NULL); |
3725 | KASSERT(l2b->l2b_kva != NULL); | | 3738 | KASSERT(l2b->l2b_kva != NULL); |
3726 | | | 3739 | |
3727 | pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)]; | | 3740 | pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)]; |
3728 | const pt_entry_t opte = *ptep; | | 3741 | const pt_entry_t opte = *ptep; |
3729 | | | 3742 | |
3730 | if (opte == 0) { | | 3743 | if (opte == 0) { |
3731 | PMAPCOUNT(kenter_mappings); | | 3744 | PMAPCOUNT(kenter_mappings); |
3732 | l2b->l2b_occupancy += PAGE_SIZE / L2_S_SIZE; | | 3745 | l2b->l2b_occupancy += PAGE_SIZE / L2_S_SIZE; |
3733 | } else { | | 3746 | } else { |
3734 | PMAPCOUNT(kenter_remappings); | | 3747 | PMAPCOUNT(kenter_remappings); |
3735 | #ifdef PMAP_CACHE_VIPT | | 3748 | #ifdef PMAP_CACHE_VIPT |
3736 | opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); | | 3749 | opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); |
3737 | #if !defined(ARM_MMU_EXTENDED) || defined(DIAGNOSTIC) | | 3750 | #if !defined(ARM_MMU_EXTENDED) || defined(DIAGNOSTIC) |
3738 | struct vm_page_md *omd __diagused = VM_PAGE_TO_MD(opg); | | 3751 | struct vm_page_md *omd __diagused = VM_PAGE_TO_MD(opg); |
3739 | #endif | | 3752 | #endif |
3740 | if (opg && arm_cache_prefer_mask != 0) { | | 3753 | if (opg && arm_cache_prefer_mask != 0) { |
3741 | KASSERT(opg != pg); | | 3754 | KASSERT(opg != pg); |
3742 | KASSERT((omd->pvh_attrs & PVF_KMPAGE) == 0); | | 3755 | KASSERT((omd->pvh_attrs & PVF_KMPAGE) == 0); |
3743 | KASSERT((flags & PMAP_KMPAGE) == 0); | | 3756 | KASSERT((flags & PMAP_KMPAGE) == 0); |
3744 | #ifndef ARM_MMU_EXTENDED | | 3757 | #ifndef ARM_MMU_EXTENDED |
3745 | pmap_acquire_page_lock(omd); | | 3758 | pmap_acquire_page_lock(omd); |
3746 | pv = pmap_kremove_pg(opg, va); | | 3759 | pv = pmap_kremove_pg(opg, va); |
3747 | pmap_release_page_lock(omd); | | 3760 | pmap_release_page_lock(omd); |
3748 | #endif | | 3761 | #endif |
3749 | } | | 3762 | } |
3750 | #endif | | 3763 | #endif |
3751 | if (l2pte_valid_p(opte)) { | | 3764 | if (l2pte_valid_p(opte)) { |
3752 | l2pte_reset(ptep); | | 3765 | l2pte_reset(ptep); |
3753 | PTE_SYNC(ptep); | | 3766 | PTE_SYNC(ptep); |
3754 | #ifdef PMAP_CACHE_VIVT | | 3767 | #ifdef PMAP_CACHE_VIVT |
3755 | cpu_dcache_wbinv_range(va, PAGE_SIZE); | | 3768 | cpu_dcache_wbinv_range(va, PAGE_SIZE); |
3756 | #endif | | 3769 | #endif |
3757 | cpu_tlb_flushD_SE(va); | | 3770 | cpu_tlb_flushD_SE(va); |
3758 | cpu_cpwait(); | | 3771 | cpu_cpwait(); |
3759 | } | | 3772 | } |
3760 | } | | 3773 | } |
3761 | pmap_release_pmap_lock(kpm); | | 3774 | pmap_release_pmap_lock(kpm); |
3762 | pt_entry_t npte = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot); | | 3775 | pt_entry_t npte = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot); |
3763 | | | 3776 | |
3764 | if (flags & PMAP_PTE) { | | 3777 | if (flags & PMAP_PTE) { |
3765 | KASSERT((flags & PMAP_CACHE_MASK) == 0); | | 3778 | KASSERT((flags & PMAP_CACHE_MASK) == 0); |
3766 | if (!(flags & PMAP_NOCACHE)) | | 3779 | if (!(flags & PMAP_NOCACHE)) |
3767 | npte |= pte_l2_s_cache_mode_pt; | | 3780 | npte |= pte_l2_s_cache_mode_pt; |
3768 | } else { | | 3781 | } else { |
3769 | switch (flags & (PMAP_CACHE_MASK | PMAP_DEV_MASK)) { | | 3782 | switch (flags & (PMAP_CACHE_MASK | PMAP_DEV_MASK)) { |
3770 | case PMAP_DEV ... PMAP_DEV | PMAP_CACHE_MASK: | | 3783 | case PMAP_DEV ... PMAP_DEV | PMAP_CACHE_MASK: |
3771 | break; | | 3784 | break; |
3772 | case PMAP_NOCACHE: | | 3785 | case PMAP_NOCACHE: |
3773 | npte |= pte_l2_s_nocache_mode; | | 3786 | npte |= pte_l2_s_nocache_mode; |
3774 | break; | | 3787 | break; |
3775 | case PMAP_WRITE_COMBINE: | | 3788 | case PMAP_WRITE_COMBINE: |
3776 | npte |= pte_l2_s_wc_mode; | | 3789 | npte |= pte_l2_s_wc_mode; |
3777 | break; | | 3790 | break; |
3778 | default: | | 3791 | default: |
3779 | npte |= pte_l2_s_cache_mode; | | 3792 | npte |= pte_l2_s_cache_mode; |
3780 | break; | | 3793 | break; |
3781 | } | | 3794 | } |
3782 | } | | 3795 | } |
3783 | #ifdef ARM_MMU_EXTENDED | | 3796 | #ifdef ARM_MMU_EXTENDED |
3784 | if (prot & VM_PROT_EXECUTE) | | 3797 | if (prot & VM_PROT_EXECUTE) |
3785 | npte &= ~L2_XS_XN; | | 3798 | npte &= ~L2_XS_XN; |
3786 | #endif | | 3799 | #endif |
3787 | l2pte_set(ptep, npte, 0); | | 3800 | l2pte_set(ptep, npte, 0); |
3788 | PTE_SYNC(ptep); | | 3801 | PTE_SYNC(ptep); |
3789 | | | 3802 | |
3790 | if (pg) { | | 3803 | if (pg) { |
3791 | if (flags & PMAP_KMPAGE) { | | 3804 | if (flags & PMAP_KMPAGE) { |
3792 | KASSERT(md->urw_mappings == 0); | | 3805 | KASSERT(md->urw_mappings == 0); |
3793 | KASSERT(md->uro_mappings == 0); | | 3806 | KASSERT(md->uro_mappings == 0); |
3794 | KASSERT(md->krw_mappings == 0); | | 3807 | KASSERT(md->krw_mappings == 0); |
3795 | KASSERT(md->kro_mappings == 0); | | 3808 | KASSERT(md->kro_mappings == 0); |
3796 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 3809 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
3797 | KASSERT(pv == NULL); | | 3810 | KASSERT(pv == NULL); |
3798 | KASSERT(arm_cache_prefer_mask == 0 || (va & PVF_COLORED) == 0); | | 3811 | KASSERT(arm_cache_prefer_mask == 0 || (va & PVF_COLORED) == 0); |
3799 | KASSERT((md->pvh_attrs & PVF_NC) == 0); | | 3812 | KASSERT((md->pvh_attrs & PVF_NC) == 0); |
3800 | /* if there is a color conflict, evict from cache. */ | | 3813 | /* if there is a color conflict, evict from cache. */ |
3801 | if (pmap_is_page_colored_p(md) | | 3814 | if (pmap_is_page_colored_p(md) |
3802 | && ((va ^ md->pvh_attrs) & arm_cache_prefer_mask)) { | | 3815 | && ((va ^ md->pvh_attrs) & arm_cache_prefer_mask)) { |
3803 | PMAPCOUNT(vac_color_change); | | 3816 | PMAPCOUNT(vac_color_change); |
3804 | pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY); | | 3817 | pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY); |
3805 | } else if (md->pvh_attrs & PVF_MULTCLR) { | | 3818 | } else if (md->pvh_attrs & PVF_MULTCLR) { |
3806 | /* | | 3819 | /* |
3807 | * If this page has multiple colors, expunge | | 3820 | * If this page has multiple colors, expunge |
3808 | * them. | | 3821 | * them. |
3809 | */ | | 3822 | */ |
3810 | PMAPCOUNT(vac_flush_lots2); | | 3823 | PMAPCOUNT(vac_flush_lots2); |
3811 | pmap_flush_page(md, pa, PMAP_FLUSH_SECONDARY); | | 3824 | pmap_flush_page(md, pa, PMAP_FLUSH_SECONDARY); |
3812 | } | | 3825 | } |
3813 | /* | | 3826 | /* |
3814 | * Since this is a KMPAGE, there can be no contention | | 3827 | * Since this is a KMPAGE, there can be no contention |
3815 | * for this page so don't lock it. | | 3828 | * for this page so don't lock it. |
3816 | */ | | 3829 | */ |
3817 | md->pvh_attrs &= PAGE_SIZE - 1; | | 3830 | md->pvh_attrs &= PAGE_SIZE - 1; |
3818 | md->pvh_attrs |= PVF_KMPAGE | PVF_COLORED | PVF_DIRTY | | 3831 | md->pvh_attrs |= PVF_KMPAGE | PVF_COLORED | PVF_DIRTY |
3819 | | (va & arm_cache_prefer_mask); | | 3832 | | (va & arm_cache_prefer_mask); |
3820 | #else /* !PMAP_CACHE_VIPT || ARM_MMU_EXTENDED */ | | 3833 | #else /* !PMAP_CACHE_VIPT || ARM_MMU_EXTENDED */ |
3821 | md->pvh_attrs |= PVF_KMPAGE; | | 3834 | md->pvh_attrs |= PVF_KMPAGE; |
3822 | #endif | | 3835 | #endif |
3823 | atomic_inc_32(&pmap_kmpages); | | 3836 | atomic_inc_32(&pmap_kmpages); |
3824 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 3837 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
3825 | } else if (arm_cache_prefer_mask != 0) { | | 3838 | } else if (arm_cache_prefer_mask != 0) { |
3826 | if (pv == NULL) { | | 3839 | if (pv == NULL) { |
3827 | pv = pool_get(&pmap_pv_pool, PR_NOWAIT); | | 3840 | pv = pool_get(&pmap_pv_pool, PR_NOWAIT); |
3828 | KASSERT(pv != NULL); | | 3841 | KASSERT(pv != NULL); |
3829 | } | | 3842 | } |
3830 | pmap_acquire_page_lock(md); | | 3843 | pmap_acquire_page_lock(md); |
3831 | pmap_enter_pv(md, pa, pv, pmap_kernel(), va, | | 3844 | pmap_enter_pv(md, pa, pv, pmap_kernel(), va, |
3832 | PVF_WIRED | PVF_KENTRY | | 3845 | PVF_WIRED | PVF_KENTRY |
3833 | | (prot & VM_PROT_WRITE ? PVF_WRITE : 0)); | | 3846 | | (prot & VM_PROT_WRITE ? PVF_WRITE : 0)); |
3834 | if ((prot & VM_PROT_WRITE) | | 3847 | if ((prot & VM_PROT_WRITE) |
3835 | && !(md->pvh_attrs & PVF_NC)) | | 3848 | && !(md->pvh_attrs & PVF_NC)) |
3836 | md->pvh_attrs |= PVF_DIRTY; | | 3849 | md->pvh_attrs |= PVF_DIRTY; |
3837 | KASSERT((prot & VM_PROT_WRITE) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 3850 | KASSERT((prot & VM_PROT_WRITE) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
3838 | pmap_vac_me_harder(md, pa, pmap_kernel(), va); | | 3851 | pmap_vac_me_harder(md, pa, pmap_kernel(), va); |
3839 | pmap_release_page_lock(md); | | 3852 | pmap_release_page_lock(md); |
3840 | #endif | | 3853 | #endif |
3841 | } | | 3854 | } |
3842 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 3855 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
3843 | } else { | | 3856 | } else { |
3844 | if (pv != NULL) | | 3857 | if (pv != NULL) |
3845 | pool_put(&pmap_pv_pool, pv); | | 3858 | pool_put(&pmap_pv_pool, pv); |
3846 | #endif | | 3859 | #endif |
3847 | } | | 3860 | } |
| | | 3861 | kpreempt_enable(); |
| | | 3862 | |
3848 | if (pmap_initialized) { | | 3863 | if (pmap_initialized) { |
3849 | UVMHIST_LOG(maphist, " <-- done (ptep %#jx: %#jx -> %#jx)", | | 3864 | UVMHIST_LOG(maphist, " <-- done (ptep %#jx: %#jx -> %#jx)", |
3850 | (uintptr_t)ptep, opte, npte, 0); | | 3865 | (uintptr_t)ptep, opte, npte, 0); |
3851 | } | | 3866 | } |
3852 | | | 3867 | |
3853 | } | | 3868 | } |
3854 | | | 3869 | |
3855 | void | | 3870 | void |
3856 | pmap_kremove(vaddr_t va, vsize_t len) | | 3871 | pmap_kremove(vaddr_t va, vsize_t len) |
3857 | { | | 3872 | { |
3858 | #ifdef UVMHIST | | 3873 | #ifdef UVMHIST |
3859 | u_int total_mappings = 0; | | 3874 | u_int total_mappings = 0; |
3860 | #endif | | 3875 | #endif |
3861 | | | 3876 | |
3862 | PMAPCOUNT(kenter_unmappings); | | 3877 | PMAPCOUNT(kenter_unmappings); |
3863 | | | 3878 | |
3864 | UVMHIST_FUNC(__func__); | | 3879 | UVMHIST_FUNC(__func__); |
3865 | UVMHIST_CALLARGS(maphist, " (va=%#jx, len=%#jx)", va, len, 0, 0); | | 3880 | UVMHIST_CALLARGS(maphist, " (va=%#jx, len=%#jx)", va, len, 0, 0); |
3866 | | | 3881 | |
3867 | const vaddr_t eva = va + len; | | 3882 | const vaddr_t eva = va + len; |
3868 | pmap_t kpm = pmap_kernel(); | | 3883 | pmap_t kpm = pmap_kernel(); |
3869 | | | 3884 | |
| | | 3885 | kpreempt_disable(); |
3870 | pmap_acquire_pmap_lock(kpm); | | 3886 | pmap_acquire_pmap_lock(kpm); |
3871 | | | 3887 | |
3872 | while (va < eva) { | | 3888 | while (va < eva) { |
3873 | vaddr_t next_bucket = L2_NEXT_BUCKET_VA(va); | | 3889 | vaddr_t next_bucket = L2_NEXT_BUCKET_VA(va); |
3874 | if (next_bucket > eva) | | 3890 | if (next_bucket > eva) |
3875 | next_bucket = eva; | | 3891 | next_bucket = eva; |
3876 | | | 3892 | |
3877 | struct l2_bucket * const l2b = pmap_get_l2_bucket(kpm, va); | | 3893 | struct l2_bucket * const l2b = pmap_get_l2_bucket(kpm, va); |
3878 | KDASSERT(l2b != NULL); | | 3894 | KDASSERT(l2b != NULL); |
3879 | | | 3895 | |
3880 | pt_entry_t * const sptep = &l2b->l2b_kva[l2pte_index(va)]; | | 3896 | pt_entry_t * const sptep = &l2b->l2b_kva[l2pte_index(va)]; |
3881 | pt_entry_t *ptep = sptep; | | 3897 | pt_entry_t *ptep = sptep; |
3882 | u_int mappings = 0; | | 3898 | u_int mappings = 0; |
3883 | | | 3899 | |
3884 | while (va < next_bucket) { | | 3900 | while (va < next_bucket) { |
3885 | const pt_entry_t opte = *ptep; | | 3901 | const pt_entry_t opte = *ptep; |
3886 | struct vm_page *opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); | | 3902 | struct vm_page *opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); |
3887 | if (opg != NULL) { | | 3903 | if (opg != NULL) { |
3888 | struct vm_page_md *omd = VM_PAGE_TO_MD(opg); | | 3904 | struct vm_page_md *omd = VM_PAGE_TO_MD(opg); |
3889 | | | 3905 | |
3890 | if (omd->pvh_attrs & PVF_KMPAGE) { | | 3906 | if (omd->pvh_attrs & PVF_KMPAGE) { |
3891 | KASSERT(omd->urw_mappings == 0); | | 3907 | KASSERT(omd->urw_mappings == 0); |
3892 | KASSERT(omd->uro_mappings == 0); | | 3908 | KASSERT(omd->uro_mappings == 0); |
3893 | KASSERT(omd->krw_mappings == 0); | | 3909 | KASSERT(omd->krw_mappings == 0); |
3894 | KASSERT(omd->kro_mappings == 0); | | 3910 | KASSERT(omd->kro_mappings == 0); |
3895 | omd->pvh_attrs &= ~PVF_KMPAGE; | | 3911 | omd->pvh_attrs &= ~PVF_KMPAGE; |
3896 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 3912 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
3897 | if (arm_cache_prefer_mask != 0) { | | 3913 | if (arm_cache_prefer_mask != 0) { |
3898 | omd->pvh_attrs &= ~PVF_WRITE; | | 3914 | omd->pvh_attrs &= ~PVF_WRITE; |
3899 | } | | 3915 | } |
3900 | #endif | | 3916 | #endif |
3901 | atomic_dec_32(&pmap_kmpages); | | 3917 | atomic_dec_32(&pmap_kmpages); |
3902 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 3918 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
3903 | } else if (arm_cache_prefer_mask != 0) { | | 3919 | } else if (arm_cache_prefer_mask != 0) { |
3904 | pmap_acquire_page_lock(omd); | | 3920 | pmap_acquire_page_lock(omd); |
3905 | pool_put(&pmap_pv_pool, | | 3921 | pool_put(&pmap_pv_pool, |
3906 | pmap_kremove_pg(opg, va)); | | 3922 | pmap_kremove_pg(opg, va)); |
3907 | pmap_release_page_lock(omd); | | 3923 | pmap_release_page_lock(omd); |
3908 | #endif | | 3924 | #endif |
3909 | } | | 3925 | } |
3910 | } | | 3926 | } |
3911 | if (l2pte_valid_p(opte)) { | | 3927 | if (l2pte_valid_p(opte)) { |
3912 | l2pte_reset(ptep); | | 3928 | l2pte_reset(ptep); |
3913 | PTE_SYNC(ptep); | | 3929 | PTE_SYNC(ptep); |
3914 | #ifdef PMAP_CACHE_VIVT | | 3930 | #ifdef PMAP_CACHE_VIVT |
3915 | cpu_dcache_wbinv_range(va, PAGE_SIZE); | | 3931 | cpu_dcache_wbinv_range(va, PAGE_SIZE); |
3916 | #endif | | 3932 | #endif |
3917 | cpu_tlb_flushD_SE(va); | | 3933 | cpu_tlb_flushD_SE(va); |
3918 | | | 3934 | |
3919 | mappings += PAGE_SIZE / L2_S_SIZE; | | 3935 | mappings += PAGE_SIZE / L2_S_SIZE; |
3920 | } | | 3936 | } |
3921 | va += PAGE_SIZE; | | 3937 | va += PAGE_SIZE; |
3922 | ptep += PAGE_SIZE / L2_S_SIZE; | | 3938 | ptep += PAGE_SIZE / L2_S_SIZE; |
3923 | } | | 3939 | } |
3924 | KDASSERTMSG(mappings <= l2b->l2b_occupancy, "%u %u", | | 3940 | KDASSERTMSG(mappings <= l2b->l2b_occupancy, "%u %u", |
3925 | mappings, l2b->l2b_occupancy); | | 3941 | mappings, l2b->l2b_occupancy); |
3926 | l2b->l2b_occupancy -= mappings; | | 3942 | l2b->l2b_occupancy -= mappings; |
3927 | //PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep)); | | 3943 | //PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep)); |
3928 | #ifdef UVMHIST | | 3944 | #ifdef UVMHIST |
3929 | total_mappings += mappings; | | 3945 | total_mappings += mappings; |
3930 | #endif | | 3946 | #endif |
3931 | } | | 3947 | } |
3932 | pmap_release_pmap_lock(kpm); | | 3948 | pmap_release_pmap_lock(kpm); |
3933 | cpu_cpwait(); | | 3949 | cpu_cpwait(); |
| | | 3950 | kpreempt_enable(); |
| | | 3951 | |
3934 | UVMHIST_LOG(maphist, " <--- done (%ju mappings removed)", | | 3952 | UVMHIST_LOG(maphist, " <--- done (%ju mappings removed)", |
3935 | total_mappings, 0, 0, 0); | | 3953 | total_mappings, 0, 0, 0); |
3936 | } | | 3954 | } |
3937 | | | 3955 | |
3938 | bool | | 3956 | bool |
3939 | pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap) | | 3957 | pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap) |
3940 | { | | 3958 | { |
3941 | | | 3959 | |
3942 | return pmap_extract_coherency(pm, va, pap, NULL); | | 3960 | return pmap_extract_coherency(pm, va, pap, NULL); |
3943 | } | | 3961 | } |
3944 | | | 3962 | |
3945 | bool | | 3963 | bool |
3946 | pmap_extract_coherency(pmap_t pm, vaddr_t va, paddr_t *pap, bool *coherentp) | | 3964 | pmap_extract_coherency(pmap_t pm, vaddr_t va, paddr_t *pap, bool *coherentp) |
3947 | { | | 3965 | { |
3948 | struct l2_dtable *l2; | | 3966 | struct l2_dtable *l2; |
3949 | pd_entry_t *pdep, pde; | | 3967 | pd_entry_t *pdep, pde; |
3950 | pt_entry_t *ptep, pte; | | 3968 | pt_entry_t *ptep, pte; |
3951 | paddr_t pa; | | 3969 | paddr_t pa; |
3952 | u_int l1slot; | | 3970 | u_int l1slot; |
3953 | bool coherent; | | 3971 | bool coherent; |
3954 | | | 3972 | |
| | | 3973 | kpreempt_disable(); |
3955 | pmap_acquire_pmap_lock(pm); | | 3974 | pmap_acquire_pmap_lock(pm); |
3956 | | | 3975 | |
3957 | l1slot = l1pte_index(va); | | 3976 | l1slot = l1pte_index(va); |
3958 | pdep = pmap_l1_kva(pm) + l1slot; | | 3977 | pdep = pmap_l1_kva(pm) + l1slot; |
3959 | pde = *pdep; | | 3978 | pde = *pdep; |
3960 | | | 3979 | |
3961 | if (l1pte_section_p(pde)) { | | 3980 | if (l1pte_section_p(pde)) { |
3962 | /* | | 3981 | /* |
3963 | * These should only happen for pmap_kernel() | | 3982 | * These should only happen for pmap_kernel() |
3964 | */ | | 3983 | */ |
3965 | KDASSERT(pm == pmap_kernel()); | | 3984 | KDASSERT(pm == pmap_kernel()); |
3966 | pmap_release_pmap_lock(pm); | | 3985 | pmap_release_pmap_lock(pm); |
3967 | #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 | | 3986 | #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 |
3968 | if (l1pte_supersection_p(pde)) { | | 3987 | if (l1pte_supersection_p(pde)) { |
3969 | pa = (pde & L1_SS_FRAME) | (va & L1_SS_OFFSET); | | 3988 | pa = (pde & L1_SS_FRAME) | (va & L1_SS_OFFSET); |
3970 | } else | | 3989 | } else |
3971 | #endif | | 3990 | #endif |
3972 | pa = (pde & L1_S_FRAME) | (va & L1_S_OFFSET); | | 3991 | pa = (pde & L1_S_FRAME) | (va & L1_S_OFFSET); |
3973 | coherent = (pde & L1_S_CACHE_MASK) == 0; | | 3992 | coherent = (pde & L1_S_CACHE_MASK) == 0; |
3974 | } else { | | 3993 | } else { |
3975 | /* | | 3994 | /* |
3976 | * Note that we can't rely on the validity of the L1 | | 3995 | * Note that we can't rely on the validity of the L1 |
3977 | * descriptor as an indication that a mapping exists. | | 3996 | * descriptor as an indication that a mapping exists. |
3978 | * We have to look it up in the L2 dtable. | | 3997 | * We have to look it up in the L2 dtable. |
3979 | */ | | 3998 | */ |
3980 | l2 = pm->pm_l2[L2_IDX(l1slot)]; | | 3999 | l2 = pm->pm_l2[L2_IDX(l1slot)]; |
3981 | | | 4000 | |
3982 | if (l2 == NULL || | | 4001 | if (l2 == NULL || |
3983 | (ptep = l2->l2_bucket[L2_BUCKET(l1slot)].l2b_kva) == NULL) { | | 4002 | (ptep = l2->l2_bucket[L2_BUCKET(l1slot)].l2b_kva) == NULL) { |
3984 | pmap_release_pmap_lock(pm); | | 4003 | pmap_release_pmap_lock(pm); |
| | | 4004 | kpreempt_enable(); |
| | | 4005 | |
3985 | return false; | | 4006 | return false; |
3986 | } | | 4007 | } |
3987 | | | 4008 | |
3988 | pte = ptep[l2pte_index(va)]; | | 4009 | pte = ptep[l2pte_index(va)]; |
3989 | pmap_release_pmap_lock(pm); | | 4010 | pmap_release_pmap_lock(pm); |
| | | 4011 | kpreempt_enable(); |
3990 | | | 4012 | |
3991 | if (pte == 0) | | 4013 | if (pte == 0) |
3992 | return false; | | 4014 | return false; |
3993 | | | 4015 | |
3994 | switch (pte & L2_TYPE_MASK) { | | 4016 | switch (pte & L2_TYPE_MASK) { |
3995 | case L2_TYPE_L: | | 4017 | case L2_TYPE_L: |
3996 | pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); | | 4018 | pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); |
3997 | coherent = (pte & L2_L_CACHE_MASK) == 0; | | 4019 | coherent = (pte & L2_L_CACHE_MASK) == 0; |
3998 | break; | | 4020 | break; |
3999 | | | 4021 | |
4000 | default: | | 4022 | default: |
4001 | pa = (pte & ~PAGE_MASK) | (va & PAGE_MASK); | | 4023 | pa = (pte & ~PAGE_MASK) | (va & PAGE_MASK); |
4002 | coherent = (pte & L2_S_CACHE_MASK) == 0; | | 4024 | coherent = (pte & L2_S_CACHE_MASK) == 0; |
4003 | break; | | 4025 | break; |
4004 | } | | 4026 | } |
4005 | } | | 4027 | } |
4006 | | | 4028 | |
4007 | if (pap != NULL) | | 4029 | if (pap != NULL) |
4008 | *pap = pa; | | 4030 | *pap = pa; |
4009 | | | 4031 | |
4010 | if (coherentp != NULL) | | 4032 | if (coherentp != NULL) |
4011 | *coherentp = (pm == pmap_kernel() && coherent); | | 4033 | *coherentp = (pm == pmap_kernel() && coherent); |
4012 | | | 4034 | |
4013 | return true; | | 4035 | return true; |
4014 | } | | 4036 | } |
4015 | | | 4037 | |
4016 | /* | | 4038 | /* |
4017 | * pmap_pv_remove: remove an unmanaged pv-tracked page from all pmaps | | 4039 | * pmap_pv_remove: remove an unmanaged pv-tracked page from all pmaps |
4018 | * that map it | | 4040 | * that map it |
4019 | */ | | 4041 | */ |
4020 | | | 4042 | |
4021 | static void | | 4043 | static void |
4022 | pmap_pv_remove(paddr_t pa) | | 4044 | pmap_pv_remove(paddr_t pa) |
4023 | { | | 4045 | { |
4024 | struct pmap_page *pp; | | 4046 | struct pmap_page *pp; |
4025 | | | 4047 | |
| | | 4048 | KASSERT(kpreempt_disabled()); |
4026 | pp = pmap_pv_tracked(pa); | | 4049 | pp = pmap_pv_tracked(pa); |
4027 | if (pp == NULL) | | 4050 | if (pp == NULL) |
4028 | panic("pmap_pv_protect: page not pv-tracked: 0x%"PRIxPADDR, | | 4051 | panic("pmap_pv_protect: page not pv-tracked: 0x%"PRIxPADDR, |
4029 | pa); | | 4052 | pa); |
4030 | | | 4053 | |
4031 | struct vm_page_md *md = PMAP_PAGE_TO_MD(pp); | | 4054 | struct vm_page_md *md = PMAP_PAGE_TO_MD(pp); |
4032 | pmap_page_remove(md, pa); | | 4055 | pmap_page_remove(md, pa); |
4033 | } | | 4056 | } |
4034 | | | 4057 | |
4035 | void | | 4058 | void |
4036 | pmap_pv_protect(paddr_t pa, vm_prot_t prot) | | 4059 | pmap_pv_protect(paddr_t pa, vm_prot_t prot) |
4037 | { | | 4060 | { |
4038 | | | 4061 | |
4039 | /* the only case is remove at the moment */ | | 4062 | /* the only case is remove at the moment */ |
4040 | KASSERT(prot == VM_PROT_NONE); | | 4063 | KASSERT(prot == VM_PROT_NONE); |
4041 | pmap_pv_remove(pa); | | 4064 | pmap_pv_remove(pa); |
4042 | } | | 4065 | } |
4043 | | | 4066 | |
4044 | void | | 4067 | void |
4045 | pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot) | | 4068 | pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot) |
4046 | { | | 4069 | { |
4047 | struct l2_bucket *l2b; | | 4070 | struct l2_bucket *l2b; |
4048 | vaddr_t next_bucket; | | 4071 | vaddr_t next_bucket; |
4049 | | | 4072 | |
4050 | UVMHIST_FUNC(__func__); | | 4073 | UVMHIST_FUNC(__func__); |
4051 | UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx...#%jx prot %#jx", | | 4074 | UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx...#%jx prot %#jx", |
4052 | (uintptr_t)pm, sva, eva, prot); | | 4075 | (uintptr_t)pm, sva, eva, prot); |
4053 | | | 4076 | |
4054 | if ((prot & VM_PROT_READ) == 0) { | | 4077 | if ((prot & VM_PROT_READ) == 0) { |
4055 | pmap_remove(pm, sva, eva); | | 4078 | pmap_remove(pm, sva, eva); |
4056 | return; | | 4079 | return; |
4057 | } | | 4080 | } |
4058 | | | 4081 | |
4059 | if (prot & VM_PROT_WRITE) { | | 4082 | if (prot & VM_PROT_WRITE) { |
4060 | /* | | 4083 | /* |
4061 | * If this is a read->write transition, just ignore it and let | | 4084 | * If this is a read->write transition, just ignore it and let |
4062 | * uvm_fault() take care of it later. | | 4085 | * uvm_fault() take care of it later. |
4063 | */ | | 4086 | */ |
4064 | return; | | 4087 | return; |
4065 | } | | 4088 | } |
4066 | | | 4089 | |
| | | 4090 | kpreempt_disable(); |
4067 | pmap_acquire_pmap_lock(pm); | | 4091 | pmap_acquire_pmap_lock(pm); |
4068 | | | 4092 | |
4069 | #ifndef ARM_MMU_EXTENDED | | 4093 | #ifndef ARM_MMU_EXTENDED |
4070 | const bool flush = eva - sva >= PAGE_SIZE * 4; | | 4094 | const bool flush = eva - sva >= PAGE_SIZE * 4; |
4071 | u_int flags = 0; | | 4095 | u_int flags = 0; |
4072 | #endif | | 4096 | #endif |
4073 | u_int clr_mask = PVF_WRITE | ((prot & VM_PROT_EXECUTE) ? 0 : PVF_EXEC); | | 4097 | u_int clr_mask = PVF_WRITE | ((prot & VM_PROT_EXECUTE) ? 0 : PVF_EXEC); |
4074 | | | 4098 | |
4075 | while (sva < eva) { | | 4099 | while (sva < eva) { |
4076 | next_bucket = L2_NEXT_BUCKET_VA(sva); | | 4100 | next_bucket = L2_NEXT_BUCKET_VA(sva); |
4077 | if (next_bucket > eva) | | 4101 | if (next_bucket > eva) |
4078 | next_bucket = eva; | | 4102 | next_bucket = eva; |
4079 | | | 4103 | |
4080 | l2b = pmap_get_l2_bucket(pm, sva); | | 4104 | l2b = pmap_get_l2_bucket(pm, sva); |
4081 | if (l2b == NULL) { | | 4105 | if (l2b == NULL) { |
4082 | sva = next_bucket; | | 4106 | sva = next_bucket; |
4083 | continue; | | 4107 | continue; |
4084 | } | | 4108 | } |
4085 | | | 4109 | |
4086 | pt_entry_t *ptep = &l2b->l2b_kva[l2pte_index(sva)]; | | 4110 | pt_entry_t *ptep = &l2b->l2b_kva[l2pte_index(sva)]; |
4087 | | | 4111 | |
4088 | while (sva < next_bucket) { | | 4112 | while (sva < next_bucket) { |
4089 | const pt_entry_t opte = *ptep; | | 4113 | const pt_entry_t opte = *ptep; |
4090 | if (l2pte_valid_p(opte) && l2pte_writable_p(opte)) { | | 4114 | if (l2pte_valid_p(opte) && l2pte_writable_p(opte)) { |
4091 | struct vm_page *pg; | | 4115 | struct vm_page *pg; |
4092 | #ifndef ARM_MMU_EXTENDED | | 4116 | #ifndef ARM_MMU_EXTENDED |
4093 | u_int f; | | 4117 | u_int f; |
4094 | #endif | | 4118 | #endif |
4095 | | | 4119 | |
4096 | #ifdef PMAP_CACHE_VIVT | | 4120 | #ifdef PMAP_CACHE_VIVT |
4097 | /* | | 4121 | /* |
4098 | * OK, at this point, we know we're doing | | 4122 | * OK, at this point, we know we're doing |
4099 | * write-protect operation. If the pmap is | | 4123 | * write-protect operation. If the pmap is |
4100 | * active, write-back the page. | | 4124 | * active, write-back the page. |
4101 | */ | | 4125 | */ |
4102 | pmap_cache_wbinv_page(pm, sva, false, | | 4126 | pmap_cache_wbinv_page(pm, sva, false, |
4103 | PVF_REF | PVF_WRITE); | | 4127 | PVF_REF | PVF_WRITE); |
4104 | #endif | | 4128 | #endif |
4105 | | | 4129 | |
4106 | pg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); | | 4130 | pg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); |
4107 | pt_entry_t npte = l2pte_set_readonly(opte); | | 4131 | pt_entry_t npte = l2pte_set_readonly(opte); |
4108 | l2pte_reset(ptep); | | 4132 | l2pte_reset(ptep); |
4109 | PTE_SYNC(ptep); | | 4133 | PTE_SYNC(ptep); |
4110 | #ifdef ARM_MMU_EXTENDED | | 4134 | #ifdef ARM_MMU_EXTENDED |
4111 | pmap_tlb_flush_SE(pm, sva, PVF_REF); | | 4135 | pmap_tlb_flush_SE(pm, sva, PVF_REF); |
4112 | #endif | | 4136 | #endif |
4113 | l2pte_set(ptep, npte, 0); | | 4137 | l2pte_set(ptep, npte, 0); |
4114 | PTE_SYNC(ptep); | | 4138 | PTE_SYNC(ptep); |
4115 | | | 4139 | |
4116 | if (pg != NULL) { | | 4140 | if (pg != NULL) { |
4117 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 4141 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
4118 | paddr_t pa = VM_PAGE_TO_PHYS(pg); | | 4142 | paddr_t pa = VM_PAGE_TO_PHYS(pg); |
4119 | | | 4143 | |
4120 | pmap_acquire_page_lock(md); | | 4144 | pmap_acquire_page_lock(md); |
4121 | #ifndef ARM_MMU_EXTENDED | | 4145 | #ifndef ARM_MMU_EXTENDED |
4122 | f = | | 4146 | f = |
4123 | #endif | | 4147 | #endif |
4124 | pmap_modify_pv(md, pa, pm, sva, | | 4148 | pmap_modify_pv(md, pa, pm, sva, |
4125 | clr_mask, 0); | | 4149 | clr_mask, 0); |
4126 | pmap_vac_me_harder(md, pa, pm, sva); | | 4150 | pmap_vac_me_harder(md, pa, pm, sva); |
4127 | pmap_release_page_lock(md); | | 4151 | pmap_release_page_lock(md); |
4128 | #ifndef ARM_MMU_EXTENDED | | 4152 | #ifndef ARM_MMU_EXTENDED |
4129 | } else { | | 4153 | } else { |
4130 | f = PVF_REF | PVF_EXEC; | | 4154 | f = PVF_REF | PVF_EXEC; |
4131 | } | | 4155 | } |
4132 | | | 4156 | |
4133 | if (flush) { | | 4157 | if (flush) { |
4134 | flags |= f; | | 4158 | flags |= f; |
4135 | } else { | | 4159 | } else { |
4136 | pmap_tlb_flush_SE(pm, sva, f); | | 4160 | pmap_tlb_flush_SE(pm, sva, f); |
4137 | #endif | | 4161 | #endif |
4138 | } | | 4162 | } |
4139 | } | | 4163 | } |
4140 | | | 4164 | |
4141 | sva += PAGE_SIZE; | | 4165 | sva += PAGE_SIZE; |
4142 | ptep += PAGE_SIZE / L2_S_SIZE; | | 4166 | ptep += PAGE_SIZE / L2_S_SIZE; |
4143 | } | | 4167 | } |
4144 | } | | 4168 | } |
4145 | | | 4169 | |
4146 | #ifndef ARM_MMU_EXTENDED | | 4170 | #ifndef ARM_MMU_EXTENDED |
4147 | if (flush) { | | 4171 | if (flush) { |
4148 | if (PV_BEEN_EXECD(flags)) { | | 4172 | if (PV_BEEN_EXECD(flags)) { |
4149 | pmap_tlb_flushID(pm); | | 4173 | pmap_tlb_flushID(pm); |
4150 | } else if (PV_BEEN_REFD(flags)) { | | 4174 | } else if (PV_BEEN_REFD(flags)) { |
4151 | pmap_tlb_flushD(pm); | | 4175 | pmap_tlb_flushD(pm); |
4152 | } | | 4176 | } |
4153 | } | | 4177 | } |
4154 | #endif | | 4178 | #endif |
4155 | | | 4179 | |
4156 | pmap_release_pmap_lock(pm); | | 4180 | pmap_release_pmap_lock(pm); |
| | | 4181 | kpreempt_enable(); |
4157 | } | | 4182 | } |
4158 | | | 4183 | |
4159 | void | | 4184 | void |
4160 | pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva) | | 4185 | pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva) |
4161 | { | | 4186 | { |
4162 | struct l2_bucket *l2b; | | 4187 | struct l2_bucket *l2b; |
4163 | pt_entry_t *ptep; | | 4188 | pt_entry_t *ptep; |
4164 | vaddr_t next_bucket; | | 4189 | vaddr_t next_bucket; |
4165 | vsize_t page_size = trunc_page(sva) + PAGE_SIZE - sva; | | 4190 | vsize_t page_size = trunc_page(sva) + PAGE_SIZE - sva; |
4166 | | | 4191 | |
4167 | UVMHIST_FUNC(__func__); | | 4192 | UVMHIST_FUNC(__func__); |
4168 | UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx...#%jx", | | 4193 | UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx...#%jx", |
4169 | (uintptr_t)pm, sva, eva, 0); | | 4194 | (uintptr_t)pm, sva, eva, 0); |
4170 | | | 4195 | |
4171 | pmap_acquire_pmap_lock(pm); | | 4196 | pmap_acquire_pmap_lock(pm); |
4172 | | | 4197 | |
4173 | while (sva < eva) { | | 4198 | while (sva < eva) { |
4174 | next_bucket = L2_NEXT_BUCKET_VA(sva); | | 4199 | next_bucket = L2_NEXT_BUCKET_VA(sva); |
4175 | if (next_bucket > eva) | | 4200 | if (next_bucket > eva) |
4176 | next_bucket = eva; | | 4201 | next_bucket = eva; |
4177 | | | 4202 | |
4178 | l2b = pmap_get_l2_bucket(pm, sva); | | 4203 | l2b = pmap_get_l2_bucket(pm, sva); |
4179 | if (l2b == NULL) { | | 4204 | if (l2b == NULL) { |
4180 | sva = next_bucket; | | 4205 | sva = next_bucket; |
4181 | continue; | | 4206 | continue; |
4182 | } | | 4207 | } |
4183 | | | 4208 | |
4184 | for (ptep = &l2b->l2b_kva[l2pte_index(sva)]; | | 4209 | for (ptep = &l2b->l2b_kva[l2pte_index(sva)]; |
4185 | sva < next_bucket; | | 4210 | sva < next_bucket; |
4186 | sva += page_size, | | 4211 | sva += page_size, |
4187 | ptep += PAGE_SIZE / L2_S_SIZE, | | 4212 | ptep += PAGE_SIZE / L2_S_SIZE, |
4188 | page_size = PAGE_SIZE) { | | 4213 | page_size = PAGE_SIZE) { |
4189 | if (l2pte_valid_p(*ptep)) { | | 4214 | if (l2pte_valid_p(*ptep)) { |
4190 | cpu_icache_sync_range(sva, | | 4215 | cpu_icache_sync_range(sva, |
4191 | uimin(page_size, eva - sva)); | | 4216 | uimin(page_size, eva - sva)); |
4192 | } | | 4217 | } |
4193 | } | | 4218 | } |
4194 | } | | 4219 | } |
4195 | | | 4220 | |
4196 | pmap_release_pmap_lock(pm); | | 4221 | pmap_release_pmap_lock(pm); |
4197 | } | | 4222 | } |
4198 | | | 4223 | |
4199 | void | | 4224 | void |
4200 | pmap_page_protect(struct vm_page *pg, vm_prot_t prot) | | 4225 | pmap_page_protect(struct vm_page *pg, vm_prot_t prot) |
4201 | { | | 4226 | { |
4202 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 4227 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
4203 | paddr_t pa = VM_PAGE_TO_PHYS(pg); | | 4228 | paddr_t pa = VM_PAGE_TO_PHYS(pg); |
4204 | | | 4229 | |
4205 | UVMHIST_FUNC(__func__); | | 4230 | UVMHIST_FUNC(__func__); |
4206 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx prot %#jx", | | 4231 | UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx prot %#jx", |
4207 | (uintptr_t)md, pa, prot, 0); | | 4232 | (uintptr_t)md, pa, prot, 0); |
4208 | | | 4233 | |
4209 | switch(prot) { | | 4234 | switch(prot) { |
4210 | case VM_PROT_READ|VM_PROT_WRITE: | | 4235 | case VM_PROT_READ|VM_PROT_WRITE: |
4211 | #if defined(ARM_MMU_EXTENDED) | | 4236 | #if defined(ARM_MMU_EXTENDED) |
4212 | pmap_acquire_page_lock(md); | | 4237 | pmap_acquire_page_lock(md); |
4213 | pmap_clearbit(md, pa, PVF_EXEC); | | 4238 | pmap_clearbit(md, pa, PVF_EXEC); |
4214 | pmap_release_page_lock(md); | | 4239 | pmap_release_page_lock(md); |
4215 | break; | | 4240 | break; |
4216 | #endif | | 4241 | #endif |
4217 | case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE: | | 4242 | case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE: |
4218 | break; | | 4243 | break; |
4219 | | | 4244 | |
4220 | case VM_PROT_READ: | | 4245 | case VM_PROT_READ: |
4221 | #if defined(ARM_MMU_EXTENDED) | | 4246 | #if defined(ARM_MMU_EXTENDED) |
4222 | pmap_acquire_page_lock(md); | | 4247 | pmap_acquire_page_lock(md); |
4223 | pmap_clearbit(md, pa, PVF_WRITE|PVF_EXEC); | | 4248 | pmap_clearbit(md, pa, PVF_WRITE|PVF_EXEC); |
4224 | pmap_release_page_lock(md); | | 4249 | pmap_release_page_lock(md); |
4225 | break; | | 4250 | break; |
4226 | #endif | | 4251 | #endif |
4227 | case VM_PROT_READ|VM_PROT_EXECUTE: | | 4252 | case VM_PROT_READ|VM_PROT_EXECUTE: |
4228 | pmap_acquire_page_lock(md); | | 4253 | pmap_acquire_page_lock(md); |
4229 | pmap_clearbit(md, pa, PVF_WRITE); | | 4254 | pmap_clearbit(md, pa, PVF_WRITE); |
4230 | pmap_release_page_lock(md); | | 4255 | pmap_release_page_lock(md); |
4231 | break; | | 4256 | break; |
4232 | | | 4257 | |
4233 | default: | | 4258 | default: |
4234 | pmap_page_remove(md, pa); | | 4259 | pmap_page_remove(md, pa); |
4235 | break; | | 4260 | break; |
4236 | } | | 4261 | } |
4237 | } | | 4262 | } |
4238 | | | 4263 | |
4239 | /* | | 4264 | /* |
4240 | * pmap_clear_modify: | | 4265 | * pmap_clear_modify: |
4241 | * | | 4266 | * |
4242 | * Clear the "modified" attribute for a page. | | 4267 | * Clear the "modified" attribute for a page. |
4243 | */ | | 4268 | */ |
4244 | bool | | 4269 | bool |
4245 | pmap_clear_modify(struct vm_page *pg) | | 4270 | pmap_clear_modify(struct vm_page *pg) |
4246 | { | | 4271 | { |
4247 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 4272 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
4248 | paddr_t pa = VM_PAGE_TO_PHYS(pg); | | 4273 | paddr_t pa = VM_PAGE_TO_PHYS(pg); |
4249 | bool rv; | | 4274 | bool rv; |
4250 | | | 4275 | |
4251 | pmap_acquire_page_lock(md); | | 4276 | pmap_acquire_page_lock(md); |
4252 | | | 4277 | |
4253 | if (md->pvh_attrs & PVF_MOD) { | | 4278 | if (md->pvh_attrs & PVF_MOD) { |
4254 | rv = true; | | 4279 | rv = true; |
4255 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 4280 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
4256 | /* | | 4281 | /* |
4257 | * If we are going to clear the modified bit and there are | | 4282 | * If we are going to clear the modified bit and there are |
4258 | * no other modified bits set, flush the page to memory and | | 4283 | * no other modified bits set, flush the page to memory and |
4259 | * mark it clean. | | 4284 | * mark it clean. |
4260 | */ | | 4285 | */ |
4261 | if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) == PVF_MOD) | | 4286 | if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) == PVF_MOD) |
4262 | pmap_flush_page(md, pa, PMAP_CLEAN_PRIMARY); | | 4287 | pmap_flush_page(md, pa, PMAP_CLEAN_PRIMARY); |
4263 | #endif | | 4288 | #endif |
4264 | pmap_clearbit(md, pa, PVF_MOD); | | 4289 | pmap_clearbit(md, pa, PVF_MOD); |
4265 | } else { | | 4290 | } else { |
4266 | rv = false; | | 4291 | rv = false; |
4267 | } | | 4292 | } |
4268 | pmap_release_page_lock(md); | | 4293 | pmap_release_page_lock(md); |
4269 | | | 4294 | |
4270 | return rv; | | 4295 | return rv; |
4271 | } | | 4296 | } |
4272 | | | 4297 | |
4273 | /* | | 4298 | /* |
4274 | * pmap_clear_reference: | | 4299 | * pmap_clear_reference: |
4275 | * | | 4300 | * |
4276 | * Clear the "referenced" attribute for a page. | | 4301 | * Clear the "referenced" attribute for a page. |
4277 | */ | | 4302 | */ |
4278 | bool | | 4303 | bool |
4279 | pmap_clear_reference(struct vm_page *pg) | | 4304 | pmap_clear_reference(struct vm_page *pg) |
4280 | { | | 4305 | { |
4281 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 4306 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
4282 | paddr_t pa = VM_PAGE_TO_PHYS(pg); | | 4307 | paddr_t pa = VM_PAGE_TO_PHYS(pg); |
4283 | bool rv; | | 4308 | bool rv; |
4284 | | | 4309 | |
4285 | pmap_acquire_page_lock(md); | | 4310 | pmap_acquire_page_lock(md); |
4286 | | | 4311 | |
4287 | if (md->pvh_attrs & PVF_REF) { | | 4312 | if (md->pvh_attrs & PVF_REF) { |
4288 | rv = true; | | 4313 | rv = true; |
4289 | pmap_clearbit(md, pa, PVF_REF); | | 4314 | pmap_clearbit(md, pa, PVF_REF); |
4290 | } else { | | 4315 | } else { |
4291 | rv = false; | | 4316 | rv = false; |
4292 | } | | 4317 | } |
4293 | pmap_release_page_lock(md); | | 4318 | pmap_release_page_lock(md); |
4294 | | | 4319 | |
4295 | return rv; | | 4320 | return rv; |
4296 | } | | 4321 | } |
4297 | | | 4322 | |
4298 | /* | | 4323 | /* |
4299 | * pmap_is_modified: | | 4324 | * pmap_is_modified: |
4300 | * | | 4325 | * |
4301 | * Test if a page has the "modified" attribute. | | 4326 | * Test if a page has the "modified" attribute. |
4302 | */ | | 4327 | */ |
4303 | /* See <arm/arm32/pmap.h> */ | | 4328 | /* See <arm/arm32/pmap.h> */ |
4304 | | | 4329 | |
4305 | /* | | 4330 | /* |
4306 | * pmap_is_referenced: | | 4331 | * pmap_is_referenced: |
4307 | * | | 4332 | * |
4308 | * Test if a page has the "referenced" attribute. | | 4333 | * Test if a page has the "referenced" attribute. |
4309 | */ | | 4334 | */ |
4310 | /* See <arm/arm32/pmap.h> */ | | 4335 | /* See <arm/arm32/pmap.h> */ |
4311 | | | 4336 | |
4312 | #if defined(ARM_MMU_EXTENDED) && 0 | | 4337 | #if defined(ARM_MMU_EXTENDED) && 0 |
4313 | int | | 4338 | int |
4314 | pmap_prefetchabt_fixup(void *v) | | 4339 | pmap_prefetchabt_fixup(void *v) |
4315 | { | | 4340 | { |
4316 | struct trapframe * const tf = v; | | 4341 | struct trapframe * const tf = v; |
4317 | vaddr_t va = trunc_page(tf->tf_pc); | | 4342 | vaddr_t va = trunc_page(tf->tf_pc); |
4318 | int rv = ABORT_FIXUP_FAILED; | | 4343 | int rv = ABORT_FIXUP_FAILED; |
4319 | | | 4344 | |
4320 | if (!TRAP_USERMODE(tf) && va < VM_MAXUSER_ADDRESS) | | 4345 | if (!TRAP_USERMODE(tf) && va < VM_MAXUSER_ADDRESS) |
4321 | return rv; | | 4346 | return rv; |
4322 | | | 4347 | |
4323 | kpreempt_disable(); | | 4348 | kpreempt_disable(); |
4324 | pmap_t pm = curcpu()->ci_pmap_cur; | | 4349 | pmap_t pm = curcpu()->ci_pmap_cur; |
4325 | const size_t l1slot = l1pte_index(va); | | 4350 | const size_t l1slot = l1pte_index(va); |
4326 | struct l2_dtable * const l2 = pm->pm_l2[L2_IDX(l1slot)]; | | 4351 | struct l2_dtable * const l2 = pm->pm_l2[L2_IDX(l1slot)]; |
4327 | if (l2 == NULL) | | 4352 | if (l2 == NULL) |
4328 | goto out; | | 4353 | goto out; |
4329 | | | 4354 | |
4330 | struct l2_bucket * const l2b = &l2->l2_bucket[L2_BUCKET(l1slot)]; | | 4355 | struct l2_bucket * const l2b = &l2->l2_bucket[L2_BUCKET(l1slot)]; |
4331 | if (l2b->l2b_kva == NULL) | | 4356 | if (l2b->l2b_kva == NULL) |
4332 | goto out; | | 4357 | goto out; |
4333 | | | 4358 | |
4334 | /* | | 4359 | /* |
4335 | * Check the PTE itself. | | 4360 | * Check the PTE itself. |
4336 | */ | | 4361 | */ |
4337 | pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)]; | | 4362 | pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)]; |
4338 | const pt_entry_t opte = *ptep; | | 4363 | const pt_entry_t opte = *ptep; |
4339 | if ((opte & L2_S_PROT_U) == 0 || (opte & L2_XS_XN) == 0) | | 4364 | if ((opte & L2_S_PROT_U) == 0 || (opte & L2_XS_XN) == 0) |
4340 | goto out; | | 4365 | goto out; |
4341 | | | 4366 | |
4342 | paddr_t pa = l2pte_pa(opte); | | 4367 | paddr_t pa = l2pte_pa(opte); |
4343 | struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); | | 4368 | struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); |
4344 | KASSERT(pg != NULL); | | 4369 | KASSERT(pg != NULL); |
4345 | | | 4370 | |
4346 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); | | 4371 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); |
4347 | | | 4372 | |
4348 | pmap_acquire_page_lock(md); | | 4373 | pmap_acquire_page_lock(md); |
4349 | struct pv_entry * const pv = pmap_find_pv(md, pm, va); | | 4374 | struct pv_entry * const pv = pmap_find_pv(md, pm, va); |
4350 | KASSERT(pv != NULL); | | 4375 | KASSERT(pv != NULL); |
4351 | | | 4376 | |
4352 | if (PV_IS_EXEC_P(pv->pv_flags)) { | | 4377 | if (PV_IS_EXEC_P(pv->pv_flags)) { |
4353 | l2pte_reset(ptep); | | 4378 | l2pte_reset(ptep); |
4354 | PTE_SYNC(ptep); | | 4379 | PTE_SYNC(ptep); |
4355 | pmap_tlb_flush_SE(pm, va, PVF_EXEC | PVF_REF); | | 4380 | pmap_tlb_flush_SE(pm, va, PVF_EXEC | PVF_REF); |
4356 | if (!PV_IS_EXEC_P(md->pvh_attrs)) { | | 4381 | if (!PV_IS_EXEC_P(md->pvh_attrs)) { |
4357 | pmap_syncicache_page(md, pa); | | 4382 | pmap_syncicache_page(md, pa); |
4358 | } | | 4383 | } |
4359 | rv = ABORT_FIXUP_RETURN; | | 4384 | rv = ABORT_FIXUP_RETURN; |
4360 | l2pte_set(ptep, opte & ~L2_XS_XN, 0); | | 4385 | l2pte_set(ptep, opte & ~L2_XS_XN, 0); |
4361 | PTE_SYNC(ptep); | | 4386 | PTE_SYNC(ptep); |
4362 | } | | 4387 | } |
4363 | pmap_release_page_lock(md); | | 4388 | pmap_release_page_lock(md); |
4364 | | | 4389 | |
4365 | out: | | 4390 | out: |
4366 | kpreempt_enable(); | | 4391 | kpreempt_enable(); |
| | | 4392 | |
4367 | return rv; | | 4393 | return rv; |
4368 | } | | 4394 | } |
4369 | #endif | | 4395 | #endif |
4370 | | | 4396 | |
4371 | int | | 4397 | int |
4372 | pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, int user) | | 4398 | pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, int user) |
4373 | { | | 4399 | { |
4374 | struct l2_dtable *l2; | | 4400 | struct l2_dtable *l2; |
4375 | struct l2_bucket *l2b; | | 4401 | struct l2_bucket *l2b; |
4376 | paddr_t pa; | | 4402 | paddr_t pa; |
4377 | const size_t l1slot = l1pte_index(va); | | 4403 | const size_t l1slot = l1pte_index(va); |
4378 | int rv = 0; | | 4404 | int rv = 0; |
4379 | | | 4405 | |
4380 | UVMHIST_FUNC(__func__); | | 4406 | UVMHIST_FUNC(__func__); |
4381 | UVMHIST_CALLARGS(maphist, "pm=%#jx, va=%#jx, ftype=%#jx, user=%jd", | | 4407 | UVMHIST_CALLARGS(maphist, "pm=%#jx, va=%#jx, ftype=%#jx, user=%jd", |
4382 | (uintptr_t)pm, va, ftype, user); | | 4408 | (uintptr_t)pm, va, ftype, user); |
4383 | | | 4409 | |
4384 | va = trunc_page(va); | | 4410 | va = trunc_page(va); |
4385 | | | 4411 | |
4386 | KASSERT(!user || (pm != pmap_kernel())); | | 4412 | KASSERT(!user || (pm != pmap_kernel())); |
4387 | | | 4413 | |
4388 | #ifdef ARM_MMU_EXTENDED | | 4414 | #ifdef ARM_MMU_EXTENDED |
4389 | UVMHIST_LOG(maphist, " ti=%#jx pai=%#jx asid=%#jx", | | 4415 | UVMHIST_LOG(maphist, " ti=%#jx pai=%#jx asid=%#jx", |
4390 | (uintptr_t)cpu_tlb_info(curcpu()), | | 4416 | (uintptr_t)cpu_tlb_info(curcpu()), |
4391 | (uintptr_t)PMAP_PAI(pm, cpu_tlb_info(curcpu())), | | 4417 | (uintptr_t)PMAP_PAI(pm, cpu_tlb_info(curcpu())), |
4392 | (uintptr_t)PMAP_PAI(pm, cpu_tlb_info(curcpu()))->pai_asid, 0); | | 4418 | (uintptr_t)PMAP_PAI(pm, cpu_tlb_info(curcpu()))->pai_asid, 0); |
4393 | #endif | | 4419 | #endif |
4394 | | | 4420 | |
| | | 4421 | kpreempt_disable(); |
4395 | pmap_acquire_pmap_lock(pm); | | 4422 | pmap_acquire_pmap_lock(pm); |
4396 | | | 4423 | |
4397 | /* | | 4424 | /* |
4398 | * If there is no l2_dtable for this address, then the process | | 4425 | * If there is no l2_dtable for this address, then the process |
4399 | * has no business accessing it. | | 4426 | * has no business accessing it. |
4400 | * | | 4427 | * |
4401 | * Note: This will catch userland processes trying to access | | 4428 | * Note: This will catch userland processes trying to access |
4402 | * kernel addresses. | | 4429 | * kernel addresses. |
4403 | */ | | 4430 | */ |
4404 | l2 = pm->pm_l2[L2_IDX(l1slot)]; | | 4431 | l2 = pm->pm_l2[L2_IDX(l1slot)]; |
4405 | if (l2 == NULL) { | | 4432 | if (l2 == NULL) { |
4406 | UVMHIST_LOG(maphist, " no l2 for l1slot %#jx", l1slot, 0, 0, 0); | | 4433 | UVMHIST_LOG(maphist, " no l2 for l1slot %#jx", l1slot, 0, 0, 0); |
4407 | goto out; | | 4434 | goto out; |
4408 | } | | 4435 | } |
4409 | | | 4436 | |
4410 | /* | | 4437 | /* |
4411 | * Likewise if there is no L2 descriptor table | | 4438 | * Likewise if there is no L2 descriptor table |
4412 | */ | | 4439 | */ |
4413 | l2b = &l2->l2_bucket[L2_BUCKET(l1slot)]; | | 4440 | l2b = &l2->l2_bucket[L2_BUCKET(l1slot)]; |
4414 | if (l2b->l2b_kva == NULL) { | | 4441 | if (l2b->l2b_kva == NULL) { |
4415 | UVMHIST_LOG(maphist, " <-- done (no ptep for l1slot %#jx)", | | 4442 | UVMHIST_LOG(maphist, " <-- done (no ptep for l1slot %#jx)", |
4416 | l1slot, 0, 0, 0); | | 4443 | l1slot, 0, 0, 0); |
4417 | goto out; | | 4444 | goto out; |
4418 | } | | 4445 | } |
4419 | | | 4446 | |
4420 | /* | | 4447 | /* |
4421 | * Check the PTE itself. | | 4448 | * Check the PTE itself. |
4422 | */ | | 4449 | */ |
4423 | pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)]; | | 4450 | pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)]; |
4424 | pt_entry_t const opte = *ptep; | | 4451 | pt_entry_t const opte = *ptep; |
4425 | if (opte == 0 || (opte & L2_TYPE_MASK) == L2_TYPE_L) { | | 4452 | if (opte == 0 || (opte & L2_TYPE_MASK) == L2_TYPE_L) { |
4426 | UVMHIST_LOG(maphist, " <-- done (empty pte)", | | 4453 | UVMHIST_LOG(maphist, " <-- done (empty pte)", |
4427 | 0, 0, 0, 0); | | 4454 | 0, 0, 0, 0); |
4428 | goto out; | | 4455 | goto out; |
4429 | } | | 4456 | } |
4430 | | | 4457 | |
4431 | #ifndef ARM_HAS_VBAR | | 4458 | #ifndef ARM_HAS_VBAR |
4432 | /* | | 4459 | /* |
4433 | * Catch a userland access to the vector page mapped at 0x0 | | 4460 | * Catch a userland access to the vector page mapped at 0x0 |
4434 | */ | | 4461 | */ |
4435 | if (user && (opte & L2_S_PROT_U) == 0) { | | 4462 | if (user && (opte & L2_S_PROT_U) == 0) { |
4436 | UVMHIST_LOG(maphist, " <-- done (vector_page)", 0, 0, 0, 0); | | 4463 | UVMHIST_LOG(maphist, " <-- done (vector_page)", 0, 0, 0, 0); |
4437 | goto out; | | 4464 | goto out; |
4438 | } | | 4465 | } |
4439 | #endif | | 4466 | #endif |
4440 | | | 4467 | |
4441 | pa = l2pte_pa(opte); | | 4468 | pa = l2pte_pa(opte); |
4442 | UVMHIST_LOG(maphist, " pa %#jx opte %#jx ", pa, opte, 0, 0); | | 4469 | UVMHIST_LOG(maphist, " pa %#jx opte %#jx ", pa, opte, 0, 0); |
4443 | | | 4470 | |
4444 | if ((ftype & VM_PROT_WRITE) && !l2pte_writable_p(opte)) { | | 4471 | if ((ftype & VM_PROT_WRITE) && !l2pte_writable_p(opte)) { |
4445 | /* | | 4472 | /* |
4446 | * This looks like a good candidate for "page modified" | | 4473 | * This looks like a good candidate for "page modified" |
4447 | * emulation... | | 4474 | * emulation... |
4448 | */ | | 4475 | */ |
4449 | struct pv_entry *pv; | | 4476 | struct pv_entry *pv; |
4450 | struct vm_page *pg; | | 4477 | struct vm_page *pg; |
4451 | | | 4478 | |
4452 | /* Extract the physical address of the page */ | | 4479 | /* Extract the physical address of the page */ |
4453 | if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) { | | 4480 | if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) { |
4454 | UVMHIST_LOG(maphist, " <-- done (mod/ref unmanaged page)", 0, 0, 0, 0); | | 4481 | UVMHIST_LOG(maphist, " <-- done (mod/ref unmanaged page)", 0, 0, 0, 0); |
4455 | goto out; | | 4482 | goto out; |
4456 | } | | 4483 | } |
4457 | | | 4484 | |
4458 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 4485 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
4459 | | | 4486 | |
4460 | /* Get the current flags for this page. */ | | 4487 | /* Get the current flags for this page. */ |
4461 | pmap_acquire_page_lock(md); | | 4488 | pmap_acquire_page_lock(md); |
4462 | pv = pmap_find_pv(md, pm, va); | | 4489 | pv = pmap_find_pv(md, pm, va); |
4463 | if (pv == NULL || PV_IS_KENTRY_P(pv->pv_flags)) { | | 4490 | if (pv == NULL || PV_IS_KENTRY_P(pv->pv_flags)) { |
4464 | pmap_release_page_lock(md); | | 4491 | pmap_release_page_lock(md); |
4465 | UVMHIST_LOG(maphist, " <-- done (mod/ref emul: no PV)", 0, 0, 0, 0); | | 4492 | UVMHIST_LOG(maphist, " <-- done (mod/ref emul: no PV)", 0, 0, 0, 0); |
4466 | goto out; | | 4493 | goto out; |
4467 | } | | 4494 | } |
4468 | | | 4495 | |
4469 | /* | | 4496 | /* |
4470 | * Do the flags say this page is writable? If not then it | | 4497 | * Do the flags say this page is writable? If not then it |
4471 | * is a genuine write fault. If yes then the write fault is | | 4498 | * is a genuine write fault. If yes then the write fault is |
4472 | * our fault as we did not reflect the write access in the | | 4499 | * our fault as we did not reflect the write access in the |
4473 | * PTE. Now we know a write has occurred we can correct this | | 4500 | * PTE. Now we know a write has occurred we can correct this |
4474 | * and also set the modified bit | | 4501 | * and also set the modified bit |
4475 | */ | | 4502 | */ |
4476 | if ((pv->pv_flags & PVF_WRITE) == 0) { | | 4503 | if ((pv->pv_flags & PVF_WRITE) == 0) { |
4477 | pmap_release_page_lock(md); | | 4504 | pmap_release_page_lock(md); |
4478 | UVMHIST_LOG(maphist, " <-- done (write fault)", 0, 0, 0, 0); | | 4505 | UVMHIST_LOG(maphist, " <-- done (write fault)", 0, 0, 0, 0); |
4479 | goto out; | | 4506 | goto out; |
4480 | } | | 4507 | } |
4481 | | | 4508 | |
4482 | md->pvh_attrs |= PVF_REF | PVF_MOD; | | 4509 | md->pvh_attrs |= PVF_REF | PVF_MOD; |
4483 | pv->pv_flags |= PVF_REF | PVF_MOD; | | 4510 | pv->pv_flags |= PVF_REF | PVF_MOD; |
4484 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 4511 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
4485 | /* | | 4512 | /* |
4486 | * If there are cacheable mappings for this page, mark it dirty. | | 4513 | * If there are cacheable mappings for this page, mark it dirty. |
4487 | */ | | 4514 | */ |
4488 | if ((md->pvh_attrs & PVF_NC) == 0) | | 4515 | if ((md->pvh_attrs & PVF_NC) == 0) |
4489 | md->pvh_attrs |= PVF_DIRTY; | | 4516 | md->pvh_attrs |= PVF_DIRTY; |
4490 | #endif | | 4517 | #endif |
4491 | #ifdef ARM_MMU_EXTENDED | | 4518 | #ifdef ARM_MMU_EXTENDED |
4492 | if (md->pvh_attrs & PVF_EXEC) { | | 4519 | if (md->pvh_attrs & PVF_EXEC) { |
4493 | md->pvh_attrs &= ~PVF_EXEC; | | 4520 | md->pvh_attrs &= ~PVF_EXEC; |
4494 | PMAPCOUNT(exec_discarded_modfixup); | | 4521 | PMAPCOUNT(exec_discarded_modfixup); |
4495 | } | | 4522 | } |
4496 | #endif | | 4523 | #endif |
4497 | pmap_release_page_lock(md); | | 4524 | pmap_release_page_lock(md); |
4498 | | | 4525 | |
4499 | /* | | 4526 | /* |
4500 | * Re-enable write permissions for the page. No need to call | | 4527 | * Re-enable write permissions for the page. No need to call |
4501 | * pmap_vac_me_harder(), since this is just a | | 4528 | * pmap_vac_me_harder(), since this is just a |
4502 | * modified-emulation fault, and the PVF_WRITE bit isn't | | 4529 | * modified-emulation fault, and the PVF_WRITE bit isn't |
4503 | * changing. We've already set the cacheable bits based on | | 4530 | * changing. We've already set the cacheable bits based on |
4504 | * the assumption that we can write to this page. | | 4531 | * the assumption that we can write to this page. |
4505 | */ | | 4532 | */ |
4506 | const pt_entry_t npte = | | 4533 | const pt_entry_t npte = |
4507 | l2pte_set_writable((opte & ~L2_TYPE_MASK) | L2_S_PROTO) | | 4534 | l2pte_set_writable((opte & ~L2_TYPE_MASK) | L2_S_PROTO) |
4508 | #ifdef ARM_MMU_EXTENDED | | 4535 | #ifdef ARM_MMU_EXTENDED |
4509 | | (pm != pmap_kernel() ? L2_XS_nG : 0) | | 4536 | | (pm != pmap_kernel() ? L2_XS_nG : 0) |
4510 | #endif | | 4537 | #endif |
4511 | | 0; | | 4538 | | 0; |
4512 | l2pte_reset(ptep); | | 4539 | l2pte_reset(ptep); |
4513 | PTE_SYNC(ptep); | | 4540 | PTE_SYNC(ptep); |
4514 | pmap_tlb_flush_SE(pm, va, | | 4541 | pmap_tlb_flush_SE(pm, va, |
4515 | (ftype & VM_PROT_EXECUTE) ? PVF_EXEC | PVF_REF : PVF_REF); | | 4542 | (ftype & VM_PROT_EXECUTE) ? PVF_EXEC | PVF_REF : PVF_REF); |
4516 | l2pte_set(ptep, npte, 0); | | 4543 | l2pte_set(ptep, npte, 0); |
4517 | PTE_SYNC(ptep); | | 4544 | PTE_SYNC(ptep); |
4518 | PMAPCOUNT(fixup_mod); | | 4545 | PMAPCOUNT(fixup_mod); |
4519 | rv = 1; | | 4546 | rv = 1; |
4520 | UVMHIST_LOG(maphist, " <-- done (mod/ref emul: changed pte " | | 4547 | UVMHIST_LOG(maphist, " <-- done (mod/ref emul: changed pte " |
4521 | "from %#jx to %#jx)", opte, npte, 0, 0); | | 4548 | "from %#jx to %#jx)", opte, npte, 0, 0); |
4522 | } else if ((opte & L2_TYPE_MASK) == L2_TYPE_INV) { | | 4549 | } else if ((opte & L2_TYPE_MASK) == L2_TYPE_INV) { |
4523 | /* | | 4550 | /* |
4524 | * This looks like a good candidate for "page referenced" | | 4551 | * This looks like a good candidate for "page referenced" |
4525 | * emulation. | | 4552 | * emulation. |
4526 | */ | | 4553 | */ |
4527 | struct vm_page *pg; | | 4554 | struct vm_page *pg; |
4528 | | | 4555 | |
4529 | /* Extract the physical address of the page */ | | 4556 | /* Extract the physical address of the page */ |
4530 | if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) { | | 4557 | if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) { |
4531 | UVMHIST_LOG(maphist, " <-- done (ref emul: unmanaged page)", 0, 0, 0, 0); | | 4558 | UVMHIST_LOG(maphist, " <-- done (ref emul: unmanaged page)", 0, 0, 0, 0); |
4532 | goto out; | | 4559 | goto out; |
4533 | } | | 4560 | } |
4534 | | | 4561 | |
4535 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 4562 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
4536 | | | 4563 | |
4537 | /* Get the current flags for this page. */ | | 4564 | /* Get the current flags for this page. */ |
4538 | pmap_acquire_page_lock(md); | | 4565 | pmap_acquire_page_lock(md); |
4539 | struct pv_entry *pv = pmap_find_pv(md, pm, va); | | 4566 | struct pv_entry *pv = pmap_find_pv(md, pm, va); |
4540 | if (pv == NULL || PV_IS_KENTRY_P(pv->pv_flags)) { | | 4567 | if (pv == NULL || PV_IS_KENTRY_P(pv->pv_flags)) { |
4541 | pmap_release_page_lock(md); | | 4568 | pmap_release_page_lock(md); |
4542 | UVMHIST_LOG(maphist, " <-- done (ref emul no PV)", 0, 0, 0, 0); | | 4569 | UVMHIST_LOG(maphist, " <-- done (ref emul no PV)", 0, 0, 0, 0); |
4543 | goto out; | | 4570 | goto out; |
4544 | } | | 4571 | } |
4545 | | | 4572 | |
4546 | md->pvh_attrs |= PVF_REF; | | 4573 | md->pvh_attrs |= PVF_REF; |
4547 | pv->pv_flags |= PVF_REF; | | 4574 | pv->pv_flags |= PVF_REF; |
4548 | | | 4575 | |
4549 | pt_entry_t npte = | | 4576 | pt_entry_t npte = |
4550 | l2pte_set_readonly((opte & ~L2_TYPE_MASK) | L2_S_PROTO); | | 4577 | l2pte_set_readonly((opte & ~L2_TYPE_MASK) | L2_S_PROTO); |
4551 | #ifdef ARM_MMU_EXTENDED | | 4578 | #ifdef ARM_MMU_EXTENDED |
4552 | if (pm != pmap_kernel()) { | | 4579 | if (pm != pmap_kernel()) { |
4553 | npte |= L2_XS_nG; | | 4580 | npte |= L2_XS_nG; |
4554 | } | | 4581 | } |
4555 | /* | | 4582 | /* |
4556 | * If we got called from prefetch abort, then ftype will have | | 4583 | * If we got called from prefetch abort, then ftype will have |
4557 | * VM_PROT_EXECUTE set. Now see if we have no-execute set in | | 4584 | * VM_PROT_EXECUTE set. Now see if we have no-execute set in |
4558 | * the PTE. | | 4585 | * the PTE. |
4559 | */ | | 4586 | */ |
4560 | if (user && (ftype & VM_PROT_EXECUTE) && (npte & L2_XS_XN)) { | | 4587 | if (user && (ftype & VM_PROT_EXECUTE) && (npte & L2_XS_XN)) { |
4561 | /* | | 4588 | /* |
4562 | * Is this a mapping of an executable page? | | 4589 | * Is this a mapping of an executable page? |
4563 | */ | | 4590 | */ |
4564 | if ((pv->pv_flags & PVF_EXEC) == 0) { | | 4591 | if ((pv->pv_flags & PVF_EXEC) == 0) { |
4565 | pmap_release_page_lock(md); | | 4592 | pmap_release_page_lock(md); |
4566 | UVMHIST_LOG(maphist, " <-- done (ref emul: no exec)", | | 4593 | UVMHIST_LOG(maphist, " <-- done (ref emul: no exec)", |
4567 | 0, 0, 0, 0); | | 4594 | 0, 0, 0, 0); |
4568 | goto out; | | 4595 | goto out; |
4569 | } | | 4596 | } |
4570 | /* | | 4597 | /* |
4571 | * If we haven't synced the page, do so now. | | 4598 | * If we haven't synced the page, do so now. |
4572 | */ | | 4599 | */ |
4573 | if ((md->pvh_attrs & PVF_EXEC) == 0) { | | 4600 | if ((md->pvh_attrs & PVF_EXEC) == 0) { |
4574 | UVMHIST_LOG(maphist, " ref emul: syncicache " | | 4601 | UVMHIST_LOG(maphist, " ref emul: syncicache " |
4575 | "page #%#jx", pa, 0, 0, 0); | | 4602 | "page #%#jx", pa, 0, 0, 0); |
4576 | pmap_syncicache_page(md, pa); | | 4603 | pmap_syncicache_page(md, pa); |
4577 | PMAPCOUNT(fixup_exec); | | 4604 | PMAPCOUNT(fixup_exec); |
4578 | } | | 4605 | } |
4579 | npte &= ~L2_XS_XN; | | 4606 | npte &= ~L2_XS_XN; |
4580 | } | | 4607 | } |
4581 | #endif /* ARM_MMU_EXTENDED */ | | 4608 | #endif /* ARM_MMU_EXTENDED */ |
4582 | pmap_release_page_lock(md); | | 4609 | pmap_release_page_lock(md); |
4583 | l2pte_reset(ptep); | | 4610 | l2pte_reset(ptep); |
4584 | PTE_SYNC(ptep); | | 4611 | PTE_SYNC(ptep); |
4585 | pmap_tlb_flush_SE(pm, va, | | 4612 | pmap_tlb_flush_SE(pm, va, |
4586 | (ftype & VM_PROT_EXECUTE) ? PVF_EXEC | PVF_REF : PVF_REF); | | 4613 | (ftype & VM_PROT_EXECUTE) ? PVF_EXEC | PVF_REF : PVF_REF); |
4587 | l2pte_set(ptep, npte, 0); | | 4614 | l2pte_set(ptep, npte, 0); |
4588 | PTE_SYNC(ptep); | | 4615 | PTE_SYNC(ptep); |
4589 | PMAPCOUNT(fixup_ref); | | 4616 | PMAPCOUNT(fixup_ref); |
4590 | rv = 1; | | 4617 | rv = 1; |
4591 | UVMHIST_LOG(maphist, " <-- done (ref emul: changed pte from " | | 4618 | UVMHIST_LOG(maphist, " <-- done (ref emul: changed pte from " |
4592 | "%#jx to %#jx)", opte, npte, 0, 0); | | 4619 | "%#jx to %#jx)", opte, npte, 0, 0); |
4593 | #ifdef ARM_MMU_EXTENDED | | 4620 | #ifdef ARM_MMU_EXTENDED |
4594 | } else if (user && (ftype & VM_PROT_EXECUTE) && (opte & L2_XS_XN)) { | | 4621 | } else if (user && (ftype & VM_PROT_EXECUTE) && (opte & L2_XS_XN)) { |
4595 | struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); | | 4622 | struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); |
4596 | if (pg == NULL) { | | 4623 | if (pg == NULL) { |
4597 | UVMHIST_LOG(maphist, " <-- done (unmanaged page)", 0, 0, 0, 0); | | 4624 | UVMHIST_LOG(maphist, " <-- done (unmanaged page)", 0, 0, 0, 0); |
4598 | goto out; | | 4625 | goto out; |
4599 | } | | 4626 | } |
4600 | | | 4627 | |
4601 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); | | 4628 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); |
4602 | | | 4629 | |
4603 | /* Get the current flags for this page. */ | | 4630 | /* Get the current flags for this page. */ |
4604 | pmap_acquire_page_lock(md); | | 4631 | pmap_acquire_page_lock(md); |
4605 | struct pv_entry * const pv = pmap_find_pv(md, pm, va); | | 4632 | struct pv_entry * const pv = pmap_find_pv(md, pm, va); |
4606 | if (pv == NULL || (pv->pv_flags & PVF_EXEC) == 0) { | | 4633 | if (pv == NULL || (pv->pv_flags & PVF_EXEC) == 0) { |
4607 | pmap_release_page_lock(md); | | 4634 | pmap_release_page_lock(md); |
4608 | UVMHIST_LOG(maphist, " <-- done (no PV or not EXEC)", 0, 0, 0, 0); | | 4635 | UVMHIST_LOG(maphist, " <-- done (no PV or not EXEC)", 0, 0, 0, 0); |
4609 | goto out; | | 4636 | goto out; |
4610 | } | | 4637 | } |
4611 | | | 4638 | |
4612 | /* | | 4639 | /* |
4613 | * If we haven't synced the page, do so now. | | 4640 | * If we haven't synced the page, do so now. |
4614 | */ | | 4641 | */ |
4615 | if ((md->pvh_attrs & PVF_EXEC) == 0) { | | 4642 | if ((md->pvh_attrs & PVF_EXEC) == 0) { |
4616 | UVMHIST_LOG(maphist, "syncicache page #%#jx", | | 4643 | UVMHIST_LOG(maphist, "syncicache page #%#jx", |
4617 | pa, 0, 0, 0); | | 4644 | pa, 0, 0, 0); |
4618 | pmap_syncicache_page(md, pa); | | 4645 | pmap_syncicache_page(md, pa); |
4619 | } | | 4646 | } |
4620 | pmap_release_page_lock(md); | | 4647 | pmap_release_page_lock(md); |
4621 | /* | | 4648 | /* |
4622 | * Turn off no-execute. | | 4649 | * Turn off no-execute. |
4623 | */ | | 4650 | */ |
4624 | KASSERT(opte & L2_XS_nG); | | 4651 | KASSERT(opte & L2_XS_nG); |
4625 | l2pte_reset(ptep); | | 4652 | l2pte_reset(ptep); |
4626 | PTE_SYNC(ptep); | | 4653 | PTE_SYNC(ptep); |
4627 | pmap_tlb_flush_SE(pm, va, PVF_EXEC | PVF_REF); | | 4654 | pmap_tlb_flush_SE(pm, va, PVF_EXEC | PVF_REF); |
4628 | l2pte_set(ptep, opte & ~L2_XS_XN, 0); | | 4655 | l2pte_set(ptep, opte & ~L2_XS_XN, 0); |
4629 | PTE_SYNC(ptep); | | 4656 | PTE_SYNC(ptep); |
4630 | rv = 1; | | 4657 | rv = 1; |
4631 | PMAPCOUNT(fixup_exec); | | 4658 | PMAPCOUNT(fixup_exec); |
4632 | UVMHIST_LOG(maphist, "exec: changed pte from %#jx to %#jx", | | 4659 | UVMHIST_LOG(maphist, "exec: changed pte from %#jx to %#jx", |
4633 | opte, opte & ~L2_XS_XN, 0, 0); | | 4660 | opte, opte & ~L2_XS_XN, 0, 0); |
4634 | #endif | | 4661 | #endif |
4635 | } | | 4662 | } |
4636 | | | 4663 | |
4637 | #ifndef ARM_MMU_EXTENDED | | 4664 | #ifndef ARM_MMU_EXTENDED |
4638 | /* | | 4665 | /* |
4639 | * We know there is a valid mapping here, so simply | | 4666 | * We know there is a valid mapping here, so simply |
4640 | * fix up the L1 if necessary. | | 4667 | * fix up the L1 if necessary. |
4641 | */ | | 4668 | */ |
4642 | pd_entry_t * const pdep = pmap_l1_kva(pm) + l1slot; | | 4669 | pd_entry_t * const pdep = pmap_l1_kva(pm) + l1slot; |
4643 | pd_entry_t pde = L1_C_PROTO | l2b->l2b_pa | L1_C_DOM(pmap_domain(pm)); | | 4670 | pd_entry_t pde = L1_C_PROTO | l2b->l2b_pa | L1_C_DOM(pmap_domain(pm)); |
4644 | if (*pdep != pde) { | | 4671 | if (*pdep != pde) { |
4645 | l1pte_setone(pdep, pde); | | 4672 | l1pte_setone(pdep, pde); |
4646 | PDE_SYNC(pdep); | | 4673 | PDE_SYNC(pdep); |
4647 | rv = 1; | | 4674 | rv = 1; |
4648 | PMAPCOUNT(fixup_pdes); | | 4675 | PMAPCOUNT(fixup_pdes); |
4649 | } | | 4676 | } |
4650 | #endif | | 4677 | #endif |
4651 | | | 4678 | |
4652 | #ifdef CPU_SA110 | | 4679 | #ifdef CPU_SA110 |
4653 | /* | | 4680 | /* |
4654 | * There are bugs in the rev K SA110. This is a check for one | | 4681 | * There are bugs in the rev K SA110. This is a check for one |
4655 | * of them. | | 4682 | * of them. |
4656 | */ | | 4683 | */ |
4657 | if (rv == 0 && curcpu()->ci_arm_cputype == CPU_ID_SA110 && | | 4684 | if (rv == 0 && curcpu()->ci_arm_cputype == CPU_ID_SA110 && |
4658 | curcpu()->ci_arm_cpurev < 3) { | | 4685 | curcpu()->ci_arm_cpurev < 3) { |
4659 | /* Always current pmap */ | | 4686 | /* Always current pmap */ |
4660 | if (l2pte_valid_p(opte)) { | | 4687 | if (l2pte_valid_p(opte)) { |
4661 | extern int kernel_debug; | | 4688 | extern int kernel_debug; |
4662 | if (kernel_debug & 1) { | | 4689 | if (kernel_debug & 1) { |
4663 | struct proc *p = curlwp->l_proc; | | 4690 | struct proc *p = curlwp->l_proc; |
4664 | printf("prefetch_abort: page is already " | | 4691 | printf("prefetch_abort: page is already " |
4665 | "mapped - pte=%p *pte=%08x\n", ptep, opte); | | 4692 | "mapped - pte=%p *pte=%08x\n", ptep, opte); |
4666 | printf("prefetch_abort: pc=%08lx proc=%p " | | 4693 | printf("prefetch_abort: pc=%08lx proc=%p " |
4667 | "process=%s\n", va, p, p->p_comm); | | 4694 | "process=%s\n", va, p, p->p_comm); |
4668 | printf("prefetch_abort: far=%08x fs=%x\n", | | 4695 | printf("prefetch_abort: far=%08x fs=%x\n", |
4669 | cpu_faultaddress(), cpu_faultstatus()); | | 4696 | cpu_faultaddress(), cpu_faultstatus()); |
4670 | } | | 4697 | } |
4671 | #ifdef DDB | | 4698 | #ifdef DDB |
4672 | if (kernel_debug & 2) | | 4699 | if (kernel_debug & 2) |
4673 | Debugger(); | | 4700 | Debugger(); |
4674 | #endif | | 4701 | #endif |
4675 | rv = 1; | | 4702 | rv = 1; |
4676 | } | | 4703 | } |
4677 | } | | 4704 | } |
4678 | #endif /* CPU_SA110 */ | | 4705 | #endif /* CPU_SA110 */ |
4679 | | | 4706 | |
4680 | #ifndef ARM_MMU_EXTENDED | | 4707 | #ifndef ARM_MMU_EXTENDED |
4681 | /* | | 4708 | /* |
4682 | * If 'rv == 0' at this point, it generally indicates that there is a | | 4709 | * If 'rv == 0' at this point, it generally indicates that there is a |
4683 | * stale TLB entry for the faulting address. That might be due to a | | 4710 | * stale TLB entry for the faulting address. That might be due to a |
4684 | * wrong setting of pmap_needs_pte_sync. So set it and retry. | | 4711 | * wrong setting of pmap_needs_pte_sync. So set it and retry. |
4685 | */ | | 4712 | */ |
4686 | if (rv == 0 | | 4713 | if (rv == 0 |
4687 | && pm->pm_l1->l1_domain_use_count == 1 | | 4714 | && pm->pm_l1->l1_domain_use_count == 1 |
4688 | && pmap_needs_pte_sync == 0) { | | 4715 | && pmap_needs_pte_sync == 0) { |
4689 | pmap_needs_pte_sync = 1; | | 4716 | pmap_needs_pte_sync = 1; |
4690 | PTE_SYNC(ptep); | | 4717 | PTE_SYNC(ptep); |
4691 | PMAPCOUNT(fixup_ptesync); | | 4718 | PMAPCOUNT(fixup_ptesync); |
4692 | rv = 1; | | 4719 | rv = 1; |
4693 | } | | 4720 | } |
4694 | #endif | | 4721 | #endif |
4695 | | | 4722 | |
4696 | #ifndef MULTIPROCESSOR | | 4723 | #ifndef MULTIPROCESSOR |
4697 | #if defined(DEBUG) || 1 | | 4724 | #if defined(DEBUG) || 1 |
4698 | /* | | 4725 | /* |
4699 | * If 'rv == 0' at this point, it generally indicates that there is a | | 4726 | * If 'rv == 0' at this point, it generally indicates that there is a |
4700 | * stale TLB entry for the faulting address. This happens when two or | | 4727 | * stale TLB entry for the faulting address. This happens when two or |
4701 | * more processes are sharing an L1. Since we don't flush the TLB on | | 4728 | * more processes are sharing an L1. Since we don't flush the TLB on |
4702 | * a context switch between such processes, we can take domain faults | | 4729 | * a context switch between such processes, we can take domain faults |
4703 | * for mappings which exist at the same VA in both processes. EVEN IF | | 4730 | * for mappings which exist at the same VA in both processes. EVEN IF |
4704 | * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for | | 4731 | * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for |
4705 | * example. | | 4732 | * example. |
4706 | * | | 4733 | * |
4707 | * This is extremely likely to happen if pmap_enter() updated the L1 | | 4734 | * This is extremely likely to happen if pmap_enter() updated the L1 |
4708 | * entry for a recently entered mapping. In this case, the TLB is | | 4735 | * entry for a recently entered mapping. In this case, the TLB is |
4709 | * flushed for the new mapping, but there may still be TLB entries for | | 4736 | * flushed for the new mapping, but there may still be TLB entries for |
4710 | * other mappings belonging to other processes in the 1MB range | | 4737 | * other mappings belonging to other processes in the 1MB range |
4711 | * covered by the L1 entry. | | 4738 | * covered by the L1 entry. |
4712 | * | | 4739 | * |
4713 | * Since 'rv == 0', we know that the L1 already contains the correct | | 4740 | * Since 'rv == 0', we know that the L1 already contains the correct |
4714 | * value, so the fault must be due to a stale TLB entry. | | 4741 | * value, so the fault must be due to a stale TLB entry. |
4715 | * | | 4742 | * |
4716 | * Since we always need to flush the TLB anyway in the case where we | | 4743 | * Since we always need to flush the TLB anyway in the case where we |
4717 | * fixed up the L1, or frobbed the L2 PTE, we effectively deal with | | 4744 | * fixed up the L1, or frobbed the L2 PTE, we effectively deal with |
4718 | * stale TLB entries dynamically. | | 4745 | * stale TLB entries dynamically. |
4719 | * | | 4746 | * |
4720 | * However, the above condition can ONLY happen if the current L1 is | | 4747 | * However, the above condition can ONLY happen if the current L1 is |
4721 | * being shared. If it happens when the L1 is unshared, it indicates | | 4748 | * being shared. If it happens when the L1 is unshared, it indicates |
4722 | * that other parts of the pmap are not doing their job WRT managing | | 4749 | * that other parts of the pmap are not doing their job WRT managing |
4723 | * the TLB. | | 4750 | * the TLB. |
4724 | */ | | 4751 | */ |
4725 | if (rv == 0 | | 4752 | if (rv == 0 |
4726 | #ifndef ARM_MMU_EXTENDED | | 4753 | #ifndef ARM_MMU_EXTENDED |
4727 | && pm->pm_l1->l1_domain_use_count == 1 | | 4754 | && pm->pm_l1->l1_domain_use_count == 1 |
4728 | #endif | | 4755 | #endif |
4729 | && true) { | | 4756 | && true) { |
4730 | #ifdef DEBUG | | 4757 | #ifdef DEBUG |
4731 | extern int last_fault_code; | | 4758 | extern int last_fault_code; |
4732 | #else | | 4759 | #else |
4733 | int last_fault_code = ftype & VM_PROT_EXECUTE | | 4760 | int last_fault_code = ftype & VM_PROT_EXECUTE |
4734 | ? armreg_ifsr_read() | | 4761 | ? armreg_ifsr_read() |
4735 | : armreg_dfsr_read(); | | 4762 | : armreg_dfsr_read(); |
4736 | #endif | | 4763 | #endif |
4737 | printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n", | | 4764 | printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n", |
4738 | pm, va, ftype); | | 4765 | pm, va, ftype); |
4739 | printf("fixup: l2 %p, l2b %p, ptep %p, pte %#x\n", | | 4766 | printf("fixup: l2 %p, l2b %p, ptep %p, pte %#x\n", |
4740 | l2, l2b, ptep, opte); | | 4767 | l2, l2b, ptep, opte); |
4741 | | | 4768 | |
4742 | #ifndef ARM_MMU_EXTENDED | | 4769 | #ifndef ARM_MMU_EXTENDED |
4743 | printf("fixup: pdep %p, pde %#x, fsr %#x\n", | | 4770 | printf("fixup: pdep %p, pde %#x, fsr %#x\n", |
4744 | pdep, pde, last_fault_code); | | 4771 | pdep, pde, last_fault_code); |
4745 | #else | | 4772 | #else |
4746 | printf("fixup: pdep %p, pde %#x, ttbcr %#x\n", | | 4773 | printf("fixup: pdep %p, pde %#x, ttbcr %#x\n", |
4747 | &pmap_l1_kva(pm)[l1slot], pmap_l1_kva(pm)[l1slot], | | 4774 | &pmap_l1_kva(pm)[l1slot], pmap_l1_kva(pm)[l1slot], |
4748 | armreg_ttbcr_read()); | | 4775 | armreg_ttbcr_read()); |
4749 | printf("fixup: fsr %#x cpm %p casid %#x contextidr %#x dacr %#x\n", | | 4776 | printf("fixup: fsr %#x cpm %p casid %#x contextidr %#x dacr %#x\n", |
4750 | last_fault_code, curcpu()->ci_pmap_cur, | | 4777 | last_fault_code, curcpu()->ci_pmap_cur, |
4751 | curcpu()->ci_pmap_asid_cur, | | 4778 | curcpu()->ci_pmap_asid_cur, |
4752 | armreg_contextidr_read(), armreg_dacr_read()); | | 4779 | armreg_contextidr_read(), armreg_dacr_read()); |
4753 | #ifdef _ARM_ARCH_7 | | 4780 | #ifdef _ARM_ARCH_7 |
4754 | if (ftype & VM_PROT_WRITE) | | 4781 | if (ftype & VM_PROT_WRITE) |
4755 | armreg_ats1cuw_write(va); | | 4782 | armreg_ats1cuw_write(va); |
4756 | else | | 4783 | else |
4757 | armreg_ats1cur_write(va); | | 4784 | armreg_ats1cur_write(va); |
4758 | isb(); | | 4785 | isb(); |
4759 | printf("fixup: par %#x\n", armreg_par_read()); | | 4786 | printf("fixup: par %#x\n", armreg_par_read()); |
4760 | #endif | | 4787 | #endif |
4761 | #endif | | 4788 | #endif |
4762 | #ifdef DDB | | 4789 | #ifdef DDB |
4763 | extern int kernel_debug; | | 4790 | extern int kernel_debug; |
4764 | | | 4791 | |
4765 | if (kernel_debug & 2) { | | 4792 | if (kernel_debug & 2) { |
4766 | pmap_release_pmap_lock(pm); | | 4793 | pmap_release_pmap_lock(pm); |
4767 | #ifdef UVMHIST | | 4794 | #ifdef UVMHIST |
4768 | KERNHIST_DUMP(maphist); | | 4795 | KERNHIST_DUMP(maphist); |
4769 | #endif | | 4796 | #endif |
4770 | cpu_Debugger(); | | 4797 | cpu_Debugger(); |
4771 | pmap_acquire_pmap_lock(pm); | | 4798 | pmap_acquire_pmap_lock(pm); |
4772 | } | | 4799 | } |
4773 | #endif | | 4800 | #endif |
4774 | } | | 4801 | } |
4775 | #endif | | 4802 | #endif |
4776 | #endif | | 4803 | #endif |
4777 | | | 4804 | |
4778 | #ifndef ARM_MMU_EXTENDED | | 4805 | #ifndef ARM_MMU_EXTENDED |
4779 | /* Flush the TLB in the shared L1 case - see comment above */ | | 4806 | /* Flush the TLB in the shared L1 case - see comment above */ |
4780 | pmap_tlb_flush_SE(pm, va, | | 4807 | pmap_tlb_flush_SE(pm, va, |
4781 | (ftype & VM_PROT_EXECUTE) ? PVF_EXEC | PVF_REF : PVF_REF); | | 4808 | (ftype & VM_PROT_EXECUTE) ? PVF_EXEC | PVF_REF : PVF_REF); |
4782 | #endif | | 4809 | #endif |
4783 | | | 4810 | |
4784 | rv = 1; | | 4811 | rv = 1; |
4785 | | | 4812 | |
4786 | out: | | 4813 | out: |
4787 | pmap_release_pmap_lock(pm); | | 4814 | pmap_release_pmap_lock(pm); |
| | | 4815 | kpreempt_enable(); |
4788 | | | 4816 | |
4789 | return rv; | | 4817 | return rv; |
4790 | } | | 4818 | } |
4791 | | | 4819 | |
4792 | /* | | 4820 | /* |
4793 | * Routine: pmap_procwr | | 4821 | * Routine: pmap_procwr |
4794 | * | | 4822 | * |
4795 | * Function: | | 4823 | * Function: |
4796 | * Synchronize caches corresponding to [addr, addr+len) in p. | | 4824 | * Synchronize caches corresponding to [addr, addr+len) in p. |
4797 | * | | 4825 | * |
4798 | */ | | 4826 | */ |
4799 | void | | 4827 | void |
4800 | pmap_procwr(struct proc *p, vaddr_t va, int len) | | 4828 | pmap_procwr(struct proc *p, vaddr_t va, int len) |
4801 | { | | 4829 | { |
4802 | #ifndef ARM_MMU_EXTENDED | | 4830 | #ifndef ARM_MMU_EXTENDED |
4803 | | | 4831 | |
4804 | /* We only need to do anything if it is the current process. */ | | 4832 | /* We only need to do anything if it is the current process. */ |
4805 | if (p == curproc) | | 4833 | if (p == curproc) |
4806 | cpu_icache_sync_range(va, len); | | 4834 | cpu_icache_sync_range(va, len); |
4807 | #endif | | 4835 | #endif |
4808 | } | | 4836 | } |
4809 | | | 4837 | |
4810 | /* | | 4838 | /* |
4811 | * Routine: pmap_unwire | | 4839 | * Routine: pmap_unwire |
4812 | * Function: Clear the wired attribute for a map/virtual-address pair. | | 4840 | * Function: Clear the wired attribute for a map/virtual-address pair. |
4813 | * | | 4841 | * |
4814 | * In/out conditions: | | 4842 | * In/out conditions: |
4815 | * The mapping must already exist in the pmap. | | 4843 | * The mapping must already exist in the pmap. |
4816 | */ | | 4844 | */ |
4817 | void | | 4845 | void |
4818 | pmap_unwire(pmap_t pm, vaddr_t va) | | 4846 | pmap_unwire(pmap_t pm, vaddr_t va) |
4819 | { | | 4847 | { |
4820 | struct l2_bucket *l2b; | | 4848 | struct l2_bucket *l2b; |
4821 | pt_entry_t *ptep, pte; | | 4849 | pt_entry_t *ptep, pte; |
4822 | struct vm_page *pg; | | 4850 | struct vm_page *pg; |
4823 | paddr_t pa; | | 4851 | paddr_t pa; |
4824 | | | 4852 | |
4825 | UVMHIST_FUNC(__func__); | | 4853 | UVMHIST_FUNC(__func__); |
4826 | UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx", (uintptr_t)pm, va, 0, 0); | | 4854 | UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx", (uintptr_t)pm, va, 0, 0); |
4827 | | | 4855 | |
| | | 4856 | kpreempt_disable(); |
4828 | pmap_acquire_pmap_lock(pm); | | 4857 | pmap_acquire_pmap_lock(pm); |
4829 | | | 4858 | |
4830 | l2b = pmap_get_l2_bucket(pm, va); | | 4859 | l2b = pmap_get_l2_bucket(pm, va); |
4831 | KDASSERT(l2b != NULL); | | 4860 | KDASSERT(l2b != NULL); |
4832 | | | 4861 | |
4833 | ptep = &l2b->l2b_kva[l2pte_index(va)]; | | 4862 | ptep = &l2b->l2b_kva[l2pte_index(va)]; |
4834 | pte = *ptep; | | 4863 | pte = *ptep; |
4835 | | | 4864 | |
4836 | /* Extract the physical address of the page */ | | 4865 | /* Extract the physical address of the page */ |
4837 | pa = l2pte_pa(pte); | | 4866 | pa = l2pte_pa(pte); |
4838 | | | 4867 | |
4839 | if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { | | 4868 | if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { |
4840 | /* Update the wired bit in the pv entry for this page. */ | | 4869 | /* Update the wired bit in the pv entry for this page. */ |
4841 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 4870 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
4842 | | | 4871 | |
4843 | pmap_acquire_page_lock(md); | | 4872 | pmap_acquire_page_lock(md); |
4844 | (void) pmap_modify_pv(md, pa, pm, va, PVF_WIRED, 0); | | 4873 | (void) pmap_modify_pv(md, pa, pm, va, PVF_WIRED, 0); |
4845 | pmap_release_page_lock(md); | | 4874 | pmap_release_page_lock(md); |
4846 | } | | 4875 | } |
4847 | | | 4876 | |
4848 | pmap_release_pmap_lock(pm); | | 4877 | pmap_release_pmap_lock(pm); |
| | | 4878 | kpreempt_enable(); |
4849 | | | 4879 | |
4850 | UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0); | | 4880 | UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0); |
4851 | } | | 4881 | } |
4852 | | | 4882 | |
4853 | #ifdef ARM_MMU_EXTENDED | | 4883 | #ifdef ARM_MMU_EXTENDED |
4854 | void | | 4884 | void |
4855 | pmap_md_pdetab_activate(pmap_t pm, struct lwp *l) | | 4885 | pmap_md_pdetab_activate(pmap_t pm, struct lwp *l) |
4856 | { | | 4886 | { |
4857 | UVMHIST_FUNC(__func__); | | 4887 | UVMHIST_FUNC(__func__); |
4858 | struct cpu_info * const ci = curcpu(); | | 4888 | struct cpu_info * const ci = curcpu(); |
4859 | struct pmap_asid_info * const pai = PMAP_PAI(pm, cpu_tlb_info(ci)); | | 4889 | struct pmap_asid_info * const pai = PMAP_PAI(pm, cpu_tlb_info(ci)); |
4860 | | | 4890 | |
4861 | UVMHIST_CALLARGS(maphist, "pm %#jx (pm->pm_l1_pa %08jx asid %ju)", | | 4891 | UVMHIST_CALLARGS(maphist, "pm %#jx (pm->pm_l1_pa %08jx asid %ju)", |
4862 | (uintptr_t)pm, pm->pm_l1_pa, pai->pai_asid, 0); | | 4892 | (uintptr_t)pm, pm->pm_l1_pa, pai->pai_asid, 0); |
4863 | | | 4893 | |
4864 | /* | | 4894 | /* |
4865 | * Assume that TTBR1 has only global mappings and TTBR0 only | | 4895 | * Assume that TTBR1 has only global mappings and TTBR0 only |
4866 | * has non-global mappings. To prevent speculation from doing | | 4896 | * has non-global mappings. To prevent speculation from doing |
4867 | * evil things we disable translation table walks using TTBR0 | | 4897 | * evil things we disable translation table walks using TTBR0 |
4868 | * before setting the CONTEXTIDR (ASID) or new TTBR0 value. | | 4898 | * before setting the CONTEXTIDR (ASID) or new TTBR0 value. |
4869 | * Once both are set, table walks are reenabled. | | 4899 | * Once both are set, table walks are reenabled. |
4870 | */ | | 4900 | */ |
4871 | const uint32_t old_ttbcr = armreg_ttbcr_read(); | | 4901 | const uint32_t old_ttbcr = armreg_ttbcr_read(); |
4872 | armreg_ttbcr_write(old_ttbcr | TTBCR_S_PD0); | | 4902 | armreg_ttbcr_write(old_ttbcr | TTBCR_S_PD0); |
4873 | isb(); | | 4903 | isb(); |
4874 | | | 4904 | |
4875 | pmap_tlb_asid_acquire(pm, l); | | 4905 | pmap_tlb_asid_acquire(pm, l); |
4876 | | | 4906 | |
4877 | cpu_setttb(pm->pm_l1_pa, pai->pai_asid); | | 4907 | cpu_setttb(pm->pm_l1_pa, pai->pai_asid); |
4878 | /* | | 4908 | /* |
4879 | * Now we can reenable tablewalks since the CONTEXTIDR and TTRB0 | | 4909 | * Now we can reenable tablewalks since the CONTEXTIDR and TTRB0 |
4880 | * have been updated. | | 4910 | * have been updated. |
4881 | */ | | 4911 | */ |
4882 | isb(); | | 4912 | isb(); |
4883 | | | 4913 | |
4884 | if (pm != pmap_kernel()) { | | 4914 | if (pm != pmap_kernel()) { |
4885 | armreg_ttbcr_write(old_ttbcr & ~TTBCR_S_PD0); | | 4915 | armreg_ttbcr_write(old_ttbcr & ~TTBCR_S_PD0); |
4886 | } | | 4916 | } |
4887 | cpu_cpwait(); | | 4917 | cpu_cpwait(); |
4888 | | | 4918 | |
4889 | KASSERTMSG(ci->ci_pmap_asid_cur == pai->pai_asid, "%u vs %u", | | 4919 | KASSERTMSG(ci->ci_pmap_asid_cur == pai->pai_asid, "%u vs %u", |
4890 | ci->ci_pmap_asid_cur, pai->pai_asid); | | 4920 | ci->ci_pmap_asid_cur, pai->pai_asid); |
4891 | ci->ci_pmap_cur = pm; | | 4921 | ci->ci_pmap_cur = pm; |
4892 | } | | 4922 | } |
4893 | | | 4923 | |
4894 | void | | 4924 | void |
4895 | pmap_md_pdetab_deactivate(pmap_t pm) | | 4925 | pmap_md_pdetab_deactivate(pmap_t pm) |
4896 | { | | 4926 | { |
4897 | | | 4927 | |
4898 | UVMHIST_FUNC(__func__); | | 4928 | UVMHIST_FUNC(__func__); |
4899 | UVMHIST_CALLARGS(maphist, "pm %#jx", (uintptr_t)pm, 0, 0, 0); | | 4929 | UVMHIST_CALLARGS(maphist, "pm %#jx", (uintptr_t)pm, 0, 0, 0); |
4900 | | | 4930 | |
4901 | kpreempt_disable(); | | 4931 | kpreempt_disable(); |
4902 | struct cpu_info * const ci = curcpu(); | | 4932 | struct cpu_info * const ci = curcpu(); |
4903 | /* | | 4933 | /* |
4904 | * Disable translation table walks from TTBR0 while no pmap has been | | 4934 | * Disable translation table walks from TTBR0 while no pmap has been |
4905 | * activated. | | 4935 | * activated. |
4906 | */ | | 4936 | */ |
4907 | const uint32_t old_ttbcr = armreg_ttbcr_read(); | | 4937 | const uint32_t old_ttbcr = armreg_ttbcr_read(); |
4908 | armreg_ttbcr_write(old_ttbcr | TTBCR_S_PD0); | | 4938 | armreg_ttbcr_write(old_ttbcr | TTBCR_S_PD0); |
4909 | isb(); | | 4939 | isb(); |
4910 | pmap_tlb_asid_deactivate(pm); | | 4940 | pmap_tlb_asid_deactivate(pm); |
4911 | cpu_setttb(pmap_kernel()->pm_l1_pa, KERNEL_PID); | | 4941 | cpu_setttb(pmap_kernel()->pm_l1_pa, KERNEL_PID); |
4912 | isb(); | | 4942 | isb(); |
4913 | | | 4943 | |
4914 | ci->ci_pmap_cur = pmap_kernel(); | | 4944 | ci->ci_pmap_cur = pmap_kernel(); |
4915 | KASSERTMSG(ci->ci_pmap_asid_cur == KERNEL_PID, "ci_pmap_asid_cur %u", | | 4945 | KASSERTMSG(ci->ci_pmap_asid_cur == KERNEL_PID, "ci_pmap_asid_cur %u", |
4916 | ci->ci_pmap_asid_cur); | | 4946 | ci->ci_pmap_asid_cur); |
4917 | kpreempt_enable(); | | 4947 | kpreempt_enable(); |
4918 | } | | 4948 | } |
4919 | #endif | | 4949 | #endif |
4920 | | | 4950 | |
4921 | void | | 4951 | void |
4922 | pmap_activate(struct lwp *l) | | 4952 | pmap_activate(struct lwp *l) |
4923 | { | | 4953 | { |
4924 | extern int block_userspace_access; | | 4954 | extern int block_userspace_access; |
4925 | pmap_t npm = l->l_proc->p_vmspace->vm_map.pmap; | | 4955 | pmap_t npm = l->l_proc->p_vmspace->vm_map.pmap; |
4926 | | | 4956 | |
4927 | UVMHIST_FUNC(__func__); | | 4957 | UVMHIST_FUNC(__func__); |
4928 | UVMHIST_CALLARGS(maphist, "l=%#jx pm=%#jx", (uintptr_t)l, | | 4958 | UVMHIST_CALLARGS(maphist, "l=%#jx pm=%#jx", (uintptr_t)l, |
4929 | (uintptr_t)npm, 0, 0); | | 4959 | (uintptr_t)npm, 0, 0); |
4930 | | | 4960 | |
4931 | struct cpu_info * const ci = curcpu(); | | 4961 | struct cpu_info * const ci = curcpu(); |
4932 | | | 4962 | |
4933 | /* | | 4963 | /* |
4934 | * If activating a non-current lwp or the current lwp is | | 4964 | * If activating a non-current lwp or the current lwp is |
4935 | * already active, just return. | | 4965 | * already active, just return. |
4936 | */ | | 4966 | */ |
4937 | if (false | | 4967 | if (false |
4938 | || l != curlwp | | 4968 | || l != curlwp |
4939 | #ifdef ARM_MMU_EXTENDED | | 4969 | #ifdef ARM_MMU_EXTENDED |
4940 | || (ci->ci_pmap_cur == npm && | | 4970 | || (ci->ci_pmap_cur == npm && |
4941 | (npm == pmap_kernel() | | 4971 | (npm == pmap_kernel() |
4942 | /* || PMAP_PAI_ASIDVALID_P(pai, cpu_tlb_info(ci)) */)) | | 4972 | /* || PMAP_PAI_ASIDVALID_P(pai, cpu_tlb_info(ci)) */)) |
4943 | #else | | 4973 | #else |
4944 | || npm->pm_activated == true | | 4974 | || npm->pm_activated == true |
4945 | #endif | | 4975 | #endif |
4946 | || false) { | | 4976 | || false) { |
4947 | UVMHIST_LOG(maphist, " <-- (same pmap)", (uintptr_t)curlwp, | | 4977 | UVMHIST_LOG(maphist, " <-- (same pmap)", (uintptr_t)curlwp, |
4948 | (uintptr_t)l, 0, 0); | | 4978 | (uintptr_t)l, 0, 0); |
4949 | return; | | 4979 | return; |
4950 | } | | 4980 | } |
4951 | | | 4981 | |
4952 | #ifndef ARM_MMU_EXTENDED | | 4982 | #ifndef ARM_MMU_EXTENDED |
4953 | const uint32_t ndacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | | 4983 | const uint32_t ndacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) |
4954 | | (DOMAIN_CLIENT << (pmap_domain(npm) * 2)); | | 4984 | | (DOMAIN_CLIENT << (pmap_domain(npm) * 2)); |
4955 | | | 4985 | |
4956 | /* | | 4986 | /* |
4957 | * If TTB and DACR are unchanged, short-circuit all the | | 4987 | * If TTB and DACR are unchanged, short-circuit all the |
4958 | * TLB/cache management stuff. | | 4988 | * TLB/cache management stuff. |
4959 | */ | | 4989 | */ |
4960 | pmap_t opm = ci->ci_lastlwp | | 4990 | pmap_t opm = ci->ci_lastlwp |
4961 | ? ci->ci_lastlwp->l_proc->p_vmspace->vm_map.pmap | | 4991 | ? ci->ci_lastlwp->l_proc->p_vmspace->vm_map.pmap |
4962 | : NULL; | | 4992 | : NULL; |
4963 | if (opm != NULL) { | | 4993 | if (opm != NULL) { |
4964 | uint32_t odacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | | 4994 | uint32_t odacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) |
4965 | | (DOMAIN_CLIENT << (pmap_domain(opm) * 2)); | | 4995 | | (DOMAIN_CLIENT << (pmap_domain(opm) * 2)); |
4966 | | | 4996 | |
4967 | if (opm->pm_l1 == npm->pm_l1 && odacr == ndacr) | | 4997 | if (opm->pm_l1 == npm->pm_l1 && odacr == ndacr) |
4968 | goto all_done; | | 4998 | goto all_done; |
4969 | } | | 4999 | } |
4970 | #endif /* !ARM_MMU_EXTENDED */ | | 5000 | #endif /* !ARM_MMU_EXTENDED */ |
4971 | | | 5001 | |
4972 | PMAPCOUNT(activations); | | 5002 | PMAPCOUNT(activations); |
4973 | block_userspace_access = 1; | | 5003 | block_userspace_access = 1; |
4974 | | | 5004 | |
4975 | #ifndef ARM_MMU_EXTENDED | | 5005 | #ifndef ARM_MMU_EXTENDED |
4976 | /* | | 5006 | /* |
4977 | * If switching to a user vmspace which is different to the | | 5007 | * If switching to a user vmspace which is different to the |
4978 | * most recent one, and the most recent one is potentially | | 5008 | * most recent one, and the most recent one is potentially |
4979 | * live in the cache, we must write-back and invalidate the | | 5009 | * live in the cache, we must write-back and invalidate the |
4980 | * entire cache. | | 5010 | * entire cache. |
4981 | */ | | 5011 | */ |
4982 | pmap_t rpm = ci->ci_pmap_lastuser; | | 5012 | pmap_t rpm = ci->ci_pmap_lastuser; |
4983 | | | 5013 | |
4984 | /* | | 5014 | /* |
4985 | * XXXSCW: There's a corner case here which can leave turds in the | | 5015 | * XXXSCW: There's a corner case here which can leave turds in the |
4986 | * cache as reported in kern/41058. They're probably left over during | | 5016 | * cache as reported in kern/41058. They're probably left over during |
4987 | * tear-down and switching away from an exiting process. Until the root | | 5017 | * tear-down and switching away from an exiting process. Until the root |
4988 | * cause is identified and fixed, zap the cache when switching pmaps. | | 5018 | * cause is identified and fixed, zap the cache when switching pmaps. |
4989 | * This will result in a few unnecessary cache flushes, but that's | | 5019 | * This will result in a few unnecessary cache flushes, but that's |
4990 | * better than silently corrupting data. | | 5020 | * better than silently corrupting data. |
4991 | */ | | 5021 | */ |
4992 | #if 0 | | 5022 | #if 0 |
4993 | if (npm != pmap_kernel() && rpm && npm != rpm && | | 5023 | if (npm != pmap_kernel() && rpm && npm != rpm && |
4994 | rpm->pm_cstate.cs_cache) { | | 5024 | rpm->pm_cstate.cs_cache) { |
4995 | rpm->pm_cstate.cs_cache = 0; | | 5025 | rpm->pm_cstate.cs_cache = 0; |
4996 | #ifdef PMAP_CACHE_VIVT | | 5026 | #ifdef PMAP_CACHE_VIVT |
4997 | cpu_idcache_wbinv_all(); | | 5027 | cpu_idcache_wbinv_all(); |
4998 | #endif | | 5028 | #endif |
4999 | } | | 5029 | } |
5000 | #else | | 5030 | #else |
5001 | if (rpm) { | | 5031 | if (rpm) { |
5002 | rpm->pm_cstate.cs_cache = 0; | | 5032 | rpm->pm_cstate.cs_cache = 0; |
5003 | if (npm == pmap_kernel()) | | 5033 | if (npm == pmap_kernel()) |
5004 | ci->ci_pmap_lastuser = NULL; | | 5034 | ci->ci_pmap_lastuser = NULL; |
5005 | #ifdef PMAP_CACHE_VIVT | | 5035 | #ifdef PMAP_CACHE_VIVT |
5006 | cpu_idcache_wbinv_all(); | | 5036 | cpu_idcache_wbinv_all(); |
5007 | #endif | | 5037 | #endif |
5008 | } | | 5038 | } |
5009 | #endif | | 5039 | #endif |
5010 | | | 5040 | |
5011 | /* No interrupts while we frob the TTB/DACR */ | | 5041 | /* No interrupts while we frob the TTB/DACR */ |
5012 | uint32_t oldirqstate = disable_interrupts(IF32_bits); | | 5042 | uint32_t oldirqstate = disable_interrupts(IF32_bits); |
5013 | #endif /* !ARM_MMU_EXTENDED */ | | 5043 | #endif /* !ARM_MMU_EXTENDED */ |
5014 | | | 5044 | |
5015 | #ifndef ARM_HAS_VBAR | | 5045 | #ifndef ARM_HAS_VBAR |
5016 | /* | | 5046 | /* |
5017 | * For ARM_VECTORS_LOW, we MUST, I repeat, MUST fix up the L1 | | 5047 | * For ARM_VECTORS_LOW, we MUST, I repeat, MUST fix up the L1 |
5018 | * entry corresponding to 'vector_page' in the incoming L1 table | | 5048 | * entry corresponding to 'vector_page' in the incoming L1 table |
5019 | * before switching to it otherwise subsequent interrupts/exceptions | | 5049 | * before switching to it otherwise subsequent interrupts/exceptions |
5020 | * (including domain faults!) will jump into hyperspace. | | 5050 | * (including domain faults!) will jump into hyperspace. |
5021 | */ | | 5051 | */ |
5022 | if (npm->pm_pl1vec != NULL) { | | 5052 | if (npm->pm_pl1vec != NULL) { |
5023 | cpu_tlb_flushID_SE((u_int)vector_page); | | 5053 | cpu_tlb_flushID_SE((u_int)vector_page); |
5024 | cpu_cpwait(); | | 5054 | cpu_cpwait(); |
5025 | *npm->pm_pl1vec = npm->pm_l1vec; | | 5055 | *npm->pm_pl1vec = npm->pm_l1vec; |
5026 | PTE_SYNC(npm->pm_pl1vec); | | 5056 | PTE_SYNC(npm->pm_pl1vec); |
5027 | } | | 5057 | } |
5028 | #endif | | 5058 | #endif |
5029 | | | 5059 | |
5030 | #ifdef ARM_MMU_EXTENDED | | 5060 | #ifdef ARM_MMU_EXTENDED |
5031 | pmap_md_pdetab_activate(npm, l); | | 5061 | pmap_md_pdetab_activate(npm, l); |
5032 | #else | | 5062 | #else |
5033 | cpu_domains(ndacr); | | 5063 | cpu_domains(ndacr); |
5034 | if (npm == pmap_kernel() || npm == rpm) { | | 5064 | if (npm == pmap_kernel() || npm == rpm) { |
5035 | /* | | 5065 | /* |
5036 | * Switching to a kernel thread, or back to the | | 5066 | * Switching to a kernel thread, or back to the |
5037 | * same user vmspace as before... Simply update | | 5067 | * same user vmspace as before... Simply update |
5038 | * the TTB (no TLB flush required) | | 5068 | * the TTB (no TLB flush required) |
5039 | */ | | 5069 | */ |
5040 | cpu_setttb(npm->pm_l1->l1_physaddr, false); | | 5070 | cpu_setttb(npm->pm_l1->l1_physaddr, false); |
5041 | cpu_cpwait(); | | 5071 | cpu_cpwait(); |
5042 | } else { | | 5072 | } else { |
5043 | /* | | 5073 | /* |
5044 | * Otherwise, update TTB and flush TLB | | 5074 | * Otherwise, update TTB and flush TLB |
5045 | */ | | 5075 | */ |
5046 | cpu_context_switch(npm->pm_l1->l1_physaddr); | | 5076 | cpu_context_switch(npm->pm_l1->l1_physaddr); |
5047 | if (rpm != NULL) | | 5077 | if (rpm != NULL) |
5048 | rpm->pm_cstate.cs_tlb = 0; | | 5078 | rpm->pm_cstate.cs_tlb = 0; |
5049 | } | | 5079 | } |
5050 | | | 5080 | |
5051 | restore_interrupts(oldirqstate); | | 5081 | restore_interrupts(oldirqstate); |
5052 | #endif /* ARM_MMU_EXTENDED */ | | 5082 | #endif /* ARM_MMU_EXTENDED */ |
5053 | | | 5083 | |
5054 | block_userspace_access = 0; | | 5084 | block_userspace_access = 0; |
5055 | | | 5085 | |
5056 | #ifndef ARM_MMU_EXTENDED | | 5086 | #ifndef ARM_MMU_EXTENDED |
5057 | all_done: | | 5087 | all_done: |
5058 | /* | | 5088 | /* |
5059 | * The new pmap is resident. Make sure it's marked | | 5089 | * The new pmap is resident. Make sure it's marked |
5060 | * as resident in the cache/TLB. | | 5090 | * as resident in the cache/TLB. |
5061 | */ | | 5091 | */ |
5062 | npm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; | | 5092 | npm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; |
5063 | if (npm != pmap_kernel()) | | 5093 | if (npm != pmap_kernel()) |
5064 | ci->ci_pmap_lastuser = npm; | | 5094 | ci->ci_pmap_lastuser = npm; |
5065 | | | 5095 | |
5066 | /* The old pmap is not longer active */ | | 5096 | /* The old pmap is not longer active */ |
5067 | if (opm != npm) { | | 5097 | if (opm != npm) { |
5068 | if (opm != NULL) | | 5098 | if (opm != NULL) |
5069 | opm->pm_activated = false; | | 5099 | opm->pm_activated = false; |
5070 | | | 5100 | |
5071 | /* But the new one is */ | | 5101 | /* But the new one is */ |
5072 | npm->pm_activated = true; | | 5102 | npm->pm_activated = true; |
5073 | } | | 5103 | } |
5074 | ci->ci_pmap_cur = npm; | | 5104 | ci->ci_pmap_cur = npm; |
5075 | #endif | | 5105 | #endif |
5076 | UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0); | | 5106 | UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0); |
5077 | } | | 5107 | } |
5078 | | | 5108 | |
5079 | void | | 5109 | void |
5080 | pmap_deactivate(struct lwp *l) | | 5110 | pmap_deactivate(struct lwp *l) |
5081 | { | | 5111 | { |
5082 | pmap_t pm = l->l_proc->p_vmspace->vm_map.pmap; | | 5112 | pmap_t pm = l->l_proc->p_vmspace->vm_map.pmap; |
5083 | | | 5113 | |
5084 | UVMHIST_FUNC(__func__); | | 5114 | UVMHIST_FUNC(__func__); |
5085 | UVMHIST_CALLARGS(maphist, "l=%#jx (pm=%#jx)", (uintptr_t)l, | | 5115 | UVMHIST_CALLARGS(maphist, "l=%#jx (pm=%#jx)", (uintptr_t)l, |
5086 | (uintptr_t)pm, 0, 0); | | 5116 | (uintptr_t)pm, 0, 0); |
5087 | | | 5117 | |
5088 | #ifdef ARM_MMU_EXTENDED | | 5118 | #ifdef ARM_MMU_EXTENDED |
5089 | pmap_md_pdetab_deactivate(pm); | | 5119 | pmap_md_pdetab_deactivate(pm); |
5090 | #else | | 5120 | #else |
5091 | /* | | 5121 | /* |
5092 | * If the process is exiting, make sure pmap_activate() does | | 5122 | * If the process is exiting, make sure pmap_activate() does |
5093 | * a full MMU context-switch and cache flush, which we might | | 5123 | * a full MMU context-switch and cache flush, which we might |
5094 | * otherwise skip. See PR port-arm/38950. | | 5124 | * otherwise skip. See PR port-arm/38950. |
5095 | */ | | 5125 | */ |
5096 | if (l->l_proc->p_sflag & PS_WEXIT) | | 5126 | if (l->l_proc->p_sflag & PS_WEXIT) |
5097 | curcpu()->ci_lastlwp = NULL; | | 5127 | curcpu()->ci_lastlwp = NULL; |
5098 | | | 5128 | |
5099 | pm->pm_activated = false; | | 5129 | pm->pm_activated = false; |
5100 | #endif | | 5130 | #endif |
5101 | UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0); | | 5131 | UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0); |
5102 | } | | 5132 | } |
5103 | | | 5133 | |
5104 | void | | 5134 | void |
5105 | pmap_update(pmap_t pm) | | 5135 | pmap_update(pmap_t pm) |
5106 | { | | 5136 | { |
5107 | | | 5137 | |
5108 | UVMHIST_FUNC(__func__); | | 5138 | UVMHIST_FUNC(__func__); |
5109 | UVMHIST_CALLARGS(maphist, "pm=%#jx remove_all %jd", (uintptr_t)pm, | | 5139 | UVMHIST_CALLARGS(maphist, "pm=%#jx remove_all %jd", (uintptr_t)pm, |
5110 | pm->pm_remove_all, 0, 0); | | 5140 | pm->pm_remove_all, 0, 0); |
5111 | | | 5141 | |
5112 | #ifndef ARM_MMU_EXTENDED | | 5142 | #ifndef ARM_MMU_EXTENDED |
5113 | if (pm->pm_remove_all) { | | 5143 | if (pm->pm_remove_all) { |
5114 | /* | | 5144 | /* |
5115 | * Finish up the pmap_remove_all() optimisation by flushing | | 5145 | * Finish up the pmap_remove_all() optimisation by flushing |
5116 | * the TLB. | | 5146 | * the TLB. |
5117 | */ | | 5147 | */ |
5118 | pmap_tlb_flushID(pm); | | 5148 | pmap_tlb_flushID(pm); |
5119 | pm->pm_remove_all = false; | | 5149 | pm->pm_remove_all = false; |
5120 | } | | 5150 | } |
5121 | | | 5151 | |
5122 | if (pmap_is_current(pm)) { | | 5152 | if (pmap_is_current(pm)) { |
5123 | /* | | 5153 | /* |
5124 | * If we're dealing with a current userland pmap, move its L1 | | 5154 | * If we're dealing with a current userland pmap, move its L1 |
5125 | * to the end of the LRU. | | 5155 | * to the end of the LRU. |
5126 | */ | | 5156 | */ |
5127 | if (pm != pmap_kernel()) | | 5157 | if (pm != pmap_kernel()) |
5128 | pmap_use_l1(pm); | | 5158 | pmap_use_l1(pm); |
5129 | | | 5159 | |
5130 | /* | | 5160 | /* |
5131 | * We can assume we're done with frobbing the cache/tlb for | | 5161 | * We can assume we're done with frobbing the cache/tlb for |
5132 | * now. Make sure any future pmap ops don't skip cache/tlb | | 5162 | * now. Make sure any future pmap ops don't skip cache/tlb |
5133 | * flushes. | | 5163 | * flushes. |
5134 | */ | | 5164 | */ |
5135 | pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; | | 5165 | pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; |
5136 | } | | 5166 | } |
5137 | #else | | 5167 | #else |
5138 | | | 5168 | |
5139 | kpreempt_disable(); | | 5169 | kpreempt_disable(); |
5140 | #if defined(MULTIPROCESSOR) && PMAP_TLB_MAX > 1 | | 5170 | #if defined(MULTIPROCESSOR) && PMAP_TLB_MAX > 1 |
5141 | u_int pending = atomic_swap_uint(&pmap->pm_shootdown_pending, 0); | | 5171 | u_int pending = atomic_swap_uint(&pmap->pm_shootdown_pending, 0); |
5142 | if (pending && pmap_tlb_shootdown_bystanders(pmap)) { | | 5172 | if (pending && pmap_tlb_shootdown_bystanders(pmap)) { |
5143 | PMAP_COUNT(shootdown_ipis); | | 5173 | PMAP_COUNT(shootdown_ipis); |
5144 | } | | 5174 | } |
5145 | #endif | | 5175 | #endif |
5146 | | | 5176 | |
5147 | /* | | 5177 | /* |
5148 | * If pmap_remove_all was called, we deactivated ourselves and released | | 5178 | * If pmap_remove_all was called, we deactivated ourselves and released |
5149 | * our ASID. Now we have to reactivate ourselves. | | 5179 | * our ASID. Now we have to reactivate ourselves. |
5150 | */ | | 5180 | */ |
5151 | if (__predict_false(pm->pm_remove_all)) { | | 5181 | if (__predict_false(pm->pm_remove_all)) { |
5152 | pm->pm_remove_all = false; | | 5182 | pm->pm_remove_all = false; |
5153 | | | 5183 | |
5154 | KASSERT(pm != pmap_kernel()); | | 5184 | KASSERT(pm != pmap_kernel()); |
5155 | pmap_md_pdetab_activate(pm, curlwp); | | 5185 | pmap_md_pdetab_activate(pm, curlwp); |
5156 | } | | 5186 | } |
5157 | | | 5187 | |
5158 | if (arm_has_mpext_p) | | 5188 | if (arm_has_mpext_p) |
5159 | armreg_bpiallis_write(0); | | 5189 | armreg_bpiallis_write(0); |
5160 | else | | 5190 | else |
5161 | armreg_bpiall_write(0); | | 5191 | armreg_bpiall_write(0); |
5162 | | | 5192 | |
5163 | kpreempt_enable(); | | 5193 | kpreempt_enable(); |
5164 | | | 5194 | |
5165 | KASSERTMSG(pm == pmap_kernel() | | 5195 | KASSERTMSG(pm == pmap_kernel() |
5166 | || curcpu()->ci_pmap_cur != pm | | 5196 | || curcpu()->ci_pmap_cur != pm |
5167 | || pm->pm_pai[0].pai_asid == curcpu()->ci_pmap_asid_cur, | | 5197 | || pm->pm_pai[0].pai_asid == curcpu()->ci_pmap_asid_cur, |
5168 | "pmap/asid %p/%#x != %s cur pmap/asid %p/%#x", pm, | | 5198 | "pmap/asid %p/%#x != %s cur pmap/asid %p/%#x", pm, |
5169 | pm->pm_pai[0].pai_asid, curcpu()->ci_data.cpu_name, | | 5199 | pm->pm_pai[0].pai_asid, curcpu()->ci_data.cpu_name, |
5170 | curcpu()->ci_pmap_cur, curcpu()->ci_pmap_asid_cur); | | 5200 | curcpu()->ci_pmap_cur, curcpu()->ci_pmap_asid_cur); |
5171 | #endif | | 5201 | #endif |
5172 | | | 5202 | |
5173 | PMAPCOUNT(updates); | | 5203 | PMAPCOUNT(updates); |
5174 | | | 5204 | |
5175 | /* | | 5205 | /* |
5176 | * make sure TLB/cache operations have completed. | | 5206 | * make sure TLB/cache operations have completed. |
5177 | */ | | 5207 | */ |
5178 | cpu_cpwait(); | | 5208 | cpu_cpwait(); |
5179 | UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0); | | 5209 | UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0); |
5180 | } | | 5210 | } |
5181 | | | 5211 | |
5182 | bool | | 5212 | bool |
5183 | pmap_remove_all(pmap_t pm) | | 5213 | pmap_remove_all(pmap_t pm) |
5184 | { | | 5214 | { |
5185 | | | 5215 | |
5186 | UVMHIST_FUNC(__func__); | | 5216 | UVMHIST_FUNC(__func__); |
5187 | UVMHIST_CALLARGS(maphist, "(pm=%#jx)", (uintptr_t)pm, 0, 0, 0); | | 5217 | UVMHIST_CALLARGS(maphist, "(pm=%#jx)", (uintptr_t)pm, 0, 0, 0); |
5188 | | | 5218 | |
5189 | KASSERT(pm != pmap_kernel()); | | 5219 | KASSERT(pm != pmap_kernel()); |
5190 | | | 5220 | |
| | | 5221 | kpreempt_disable(); |
5191 | /* | | 5222 | /* |
5192 | * The vmspace described by this pmap is about to be torn down. | | 5223 | * The vmspace described by this pmap is about to be torn down. |
5193 | * Until pmap_update() is called, UVM will only make calls | | 5224 | * Until pmap_update() is called, UVM will only make calls |
5194 | * to pmap_remove(). We can make life much simpler by flushing | | 5225 | * to pmap_remove(). We can make life much simpler by flushing |
5195 | * the cache now, and deferring TLB invalidation to pmap_update(). | | 5226 | * the cache now, and deferring TLB invalidation to pmap_update(). |
5196 | */ | | 5227 | */ |
5197 | #ifdef PMAP_CACHE_VIVT | | 5228 | #ifdef PMAP_CACHE_VIVT |
5198 | pmap_cache_wbinv_all(pm, PVF_EXEC); | | 5229 | pmap_cache_wbinv_all(pm, PVF_EXEC); |
5199 | #endif | | 5230 | #endif |
5200 | #ifdef ARM_MMU_EXTENDED | | 5231 | #ifdef ARM_MMU_EXTENDED |
5201 | #ifdef MULTIPROCESSOR | | 5232 | #ifdef MULTIPROCESSOR |
5202 | struct cpu_info * const ci = curcpu(); | | 5233 | struct cpu_info * const ci = curcpu(); |
5203 | // This should be the last CPU with this pmap onproc | | 5234 | // This should be the last CPU with this pmap onproc |
5204 | KASSERT(!kcpuset_isotherset(pm->pm_onproc, cpu_index(ci))); | | 5235 | KASSERT(!kcpuset_isotherset(pm->pm_onproc, cpu_index(ci))); |
5205 | if (kcpuset_isset(pm->pm_onproc, cpu_index(ci))) | | 5236 | if (kcpuset_isset(pm->pm_onproc, cpu_index(ci))) |
5206 | #endif | | 5237 | #endif |
5207 | pmap_tlb_asid_deactivate(pm); | | 5238 | pmap_tlb_asid_deactivate(pm); |
5208 | #ifdef MULTIPROCESSOR | | 5239 | #ifdef MULTIPROCESSOR |
5209 | KASSERT(kcpuset_iszero(pm->pm_onproc)); | | 5240 | KASSERT(kcpuset_iszero(pm->pm_onproc)); |
5210 | #endif | | 5241 | #endif |
5211 | | | 5242 | |
5212 | pmap_tlb_asid_release_all(pm); | | 5243 | pmap_tlb_asid_release_all(pm); |
5213 | #endif | | 5244 | #endif |
5214 | pm->pm_remove_all = true; | | 5245 | pm->pm_remove_all = true; |
| | | 5246 | kpreempt_enable(); |
5215 | | | 5247 | |
5216 | UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0); | | 5248 | UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0); |
5217 | return false; | | 5249 | return false; |
5218 | } | | 5250 | } |
5219 | | | 5251 | |
5220 | /* | | 5252 | /* |
5221 | * Retire the given physical map from service. | | 5253 | * Retire the given physical map from service. |
5222 | * Should only be called if the map contains no valid mappings. | | 5254 | * Should only be called if the map contains no valid mappings. |
5223 | */ | | 5255 | */ |
5224 | void | | 5256 | void |
5225 | pmap_destroy(pmap_t pm) | | 5257 | pmap_destroy(pmap_t pm) |
5226 | { | | 5258 | { |
5227 | UVMHIST_FUNC(__func__); | | 5259 | UVMHIST_FUNC(__func__); |
5228 | UVMHIST_CALLARGS(maphist, "pm=%#jx remove_all %jd", (uintptr_t)pm, | | 5260 | UVMHIST_CALLARGS(maphist, "pm=%#jx remove_all %jd", (uintptr_t)pm, |
5229 | pm ? pm->pm_remove_all : 0, 0, 0); | | 5261 | pm ? pm->pm_remove_all : 0, 0, 0); |
5230 | | | 5262 | |
5231 | if (pm == NULL) | | 5263 | if (pm == NULL) |
5232 | return; | | 5264 | return; |
5233 | | | 5265 | |
5234 | if (pm->pm_remove_all) { | | 5266 | if (pm->pm_remove_all) { |
5235 | #ifdef ARM_MMU_EXTENDED | | 5267 | #ifdef ARM_MMU_EXTENDED |
5236 | pmap_tlb_asid_release_all(pm); | | 5268 | pmap_tlb_asid_release_all(pm); |
5237 | #else | | 5269 | #else |
5238 | pmap_tlb_flushID(pm); | | 5270 | pmap_tlb_flushID(pm); |
5239 | #endif | | 5271 | #endif |
5240 | pm->pm_remove_all = false; | | 5272 | pm->pm_remove_all = false; |
5241 | } | | 5273 | } |
5242 | | | 5274 | |
5243 | /* | | 5275 | /* |
5244 | * Drop reference count | | 5276 | * Drop reference count |
5245 | */ | | 5277 | */ |
5246 | if (atomic_dec_uint_nv(&pm->pm_refs) > 0) { | | 5278 | if (atomic_dec_uint_nv(&pm->pm_refs) > 0) { |
5247 | #ifndef ARM_MMU_EXTENDED | | 5279 | #ifndef ARM_MMU_EXTENDED |
5248 | if (pmap_is_current(pm)) { | | 5280 | if (pmap_is_current(pm)) { |
5249 | if (pm != pmap_kernel()) | | 5281 | if (pm != pmap_kernel()) |
5250 | pmap_use_l1(pm); | | 5282 | pmap_use_l1(pm); |
5251 | pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; | | 5283 | pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; |
5252 | } | | 5284 | } |
5253 | #endif | | 5285 | #endif |
5254 | return; | | 5286 | return; |
5255 | } | | 5287 | } |
5256 | | | 5288 | |
5257 | /* | | 5289 | /* |
5258 | * reference count is zero, free pmap resources and then free pmap. | | 5290 | * reference count is zero, free pmap resources and then free pmap. |
5259 | */ | | 5291 | */ |
5260 | | | 5292 | |
5261 | #ifndef ARM_HAS_VBAR | | 5293 | #ifndef ARM_HAS_VBAR |
5262 | if (vector_page < KERNEL_BASE) { | | 5294 | if (vector_page < KERNEL_BASE) { |
5263 | KDASSERT(!pmap_is_current(pm)); | | 5295 | KDASSERT(!pmap_is_current(pm)); |
5264 | | | 5296 | |
5265 | /* Remove the vector page mapping */ | | 5297 | /* Remove the vector page mapping */ |
5266 | pmap_remove(pm, vector_page, vector_page + PAGE_SIZE); | | 5298 | pmap_remove(pm, vector_page, vector_page + PAGE_SIZE); |
5267 | pmap_update(pm); | | 5299 | pmap_update(pm); |
5268 | } | | 5300 | } |
5269 | #endif | | 5301 | #endif |
5270 | | | 5302 | |
5271 | pmap_free_l1(pm); | | 5303 | pmap_free_l1(pm); |
5272 | | | 5304 | |
5273 | #ifdef ARM_MMU_EXTENDED | | 5305 | #ifdef ARM_MMU_EXTENDED |
5274 | #ifdef MULTIPROCESSOR | | 5306 | #ifdef MULTIPROCESSOR |
5275 | kcpuset_destroy(pm->pm_active); | | 5307 | kcpuset_destroy(pm->pm_active); |
5276 | kcpuset_destroy(pm->pm_onproc); | | 5308 | kcpuset_destroy(pm->pm_onproc); |
5277 | #endif | | 5309 | #endif |
5278 | #else | | 5310 | #else |
5279 | struct cpu_info * const ci = curcpu(); | | 5311 | struct cpu_info * const ci = curcpu(); |
5280 | if (ci->ci_pmap_lastuser == pm) | | 5312 | if (ci->ci_pmap_lastuser == pm) |
5281 | ci->ci_pmap_lastuser = NULL; | | 5313 | ci->ci_pmap_lastuser = NULL; |
5282 | #endif | | 5314 | #endif |
5283 | | | 5315 | |
5284 | mutex_destroy(&pm->pm_lock); | | 5316 | mutex_destroy(&pm->pm_lock); |
5285 | pool_cache_put(&pmap_cache, pm); | | 5317 | pool_cache_put(&pmap_cache, pm); |
5286 | UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0); | | 5318 | UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0); |
5287 | } | | 5319 | } |
5288 | | | 5320 | |
5289 | | | 5321 | |
5290 | /* | | 5322 | /* |
5291 | * void pmap_reference(pmap_t pm) | | 5323 | * void pmap_reference(pmap_t pm) |
5292 | * | | 5324 | * |
5293 | * Add a reference to the specified pmap. | | 5325 | * Add a reference to the specified pmap. |
5294 | */ | | 5326 | */ |
5295 | void | | 5327 | void |
5296 | pmap_reference(pmap_t pm) | | 5328 | pmap_reference(pmap_t pm) |
5297 | { | | 5329 | { |
5298 | | | 5330 | |
5299 | if (pm == NULL) | | 5331 | if (pm == NULL) |
5300 | return; | | 5332 | return; |
5301 | | | 5333 | |
5302 | #ifndef ARM_MMU_EXTENDED | | 5334 | #ifndef ARM_MMU_EXTENDED |
5303 | pmap_use_l1(pm); | | 5335 | pmap_use_l1(pm); |
5304 | #endif | | 5336 | #endif |
5305 | | | 5337 | |
5306 | atomic_inc_uint(&pm->pm_refs); | | 5338 | atomic_inc_uint(&pm->pm_refs); |
5307 | } | | 5339 | } |
5308 | | | 5340 | |
5309 | #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 | | 5341 | #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 |
5310 | | | 5342 | |
5311 | static struct evcnt pmap_prefer_nochange_ev = | | 5343 | static struct evcnt pmap_prefer_nochange_ev = |
5312 | EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap prefer", "nochange"); | | 5344 | EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap prefer", "nochange"); |
5313 | static struct evcnt pmap_prefer_change_ev = | | 5345 | static struct evcnt pmap_prefer_change_ev = |
5314 | EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap prefer", "change"); | | 5346 | EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap prefer", "change"); |
5315 | | | 5347 | |
5316 | EVCNT_ATTACH_STATIC(pmap_prefer_change_ev); | | 5348 | EVCNT_ATTACH_STATIC(pmap_prefer_change_ev); |
5317 | EVCNT_ATTACH_STATIC(pmap_prefer_nochange_ev); | | 5349 | EVCNT_ATTACH_STATIC(pmap_prefer_nochange_ev); |
5318 | | | 5350 | |
5319 | void | | 5351 | void |
5320 | pmap_prefer(vaddr_t hint, vaddr_t *vap, int td) | | 5352 | pmap_prefer(vaddr_t hint, vaddr_t *vap, int td) |
5321 | { | | 5353 | { |
5322 | vsize_t mask = arm_cache_prefer_mask | (PAGE_SIZE - 1); | | 5354 | vsize_t mask = arm_cache_prefer_mask | (PAGE_SIZE - 1); |
5323 | vaddr_t va = *vap; | | 5355 | vaddr_t va = *vap; |
5324 | vaddr_t diff = (hint - va) & mask; | | 5356 | vaddr_t diff = (hint - va) & mask; |
5325 | if (diff == 0) { | | 5357 | if (diff == 0) { |
5326 | pmap_prefer_nochange_ev.ev_count++; | | 5358 | pmap_prefer_nochange_ev.ev_count++; |
5327 | } else { | | 5359 | } else { |
5328 | pmap_prefer_change_ev.ev_count++; | | 5360 | pmap_prefer_change_ev.ev_count++; |
5329 | if (__predict_false(td)) | | 5361 | if (__predict_false(td)) |
5330 | va -= mask + 1; | | 5362 | va -= mask + 1; |
5331 | *vap = va + diff; | | 5363 | *vap = va + diff; |
5332 | } | | 5364 | } |
5333 | } | | 5365 | } |
5334 | #endif /* ARM_MMU_V6 | ARM_MMU_V7 */ | | 5366 | #endif /* ARM_MMU_V6 | ARM_MMU_V7 */ |
5335 | | | 5367 | |
5336 | /* | | 5368 | /* |
5337 | * pmap_zero_page() | | 5369 | * pmap_zero_page() |
5338 | * | | 5370 | * |
5339 | * Zero a given physical page by mapping it at a page hook point. | | 5371 | * Zero a given physical page by mapping it at a page hook point. |
5340 | * In doing the zero page op, the page we zero is mapped cachable, as with | | 5372 | * In doing the zero page op, the page we zero is mapped cachable, as with |
5341 | * StrongARM accesses to non-cached pages are non-burst making writing | | 5373 | * StrongARM accesses to non-cached pages are non-burst making writing |
5342 | * _any_ bulk data very slow. | | 5374 | * _any_ bulk data very slow. |
5343 | */ | | 5375 | */ |
5344 | #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0 | | 5376 | #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0 |
5345 | void | | 5377 | void |
5346 | pmap_zero_page_generic(paddr_t pa) | | 5378 | pmap_zero_page_generic(paddr_t pa) |
5347 | { | | 5379 | { |
5348 | #if defined(PMAP_CACHE_VIPT) || defined(DEBUG) | | 5380 | #if defined(PMAP_CACHE_VIPT) || defined(DEBUG) |
5349 | struct vm_page *pg = PHYS_TO_VM_PAGE(pa); | | 5381 | struct vm_page *pg = PHYS_TO_VM_PAGE(pa); |
5350 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 5382 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
5351 | #endif | | 5383 | #endif |
5352 | #if defined(PMAP_CACHE_VIPT) | | 5384 | #if defined(PMAP_CACHE_VIPT) |
5353 | /* Choose the last page color it had, if any */ | | 5385 | /* Choose the last page color it had, if any */ |
5354 | const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask; | | 5386 | const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask; |
5355 | #else | | 5387 | #else |
5356 | const vsize_t va_offset = 0; | | 5388 | const vsize_t va_offset = 0; |
5357 | #endif | | 5389 | #endif |
5358 | #if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) | | 5390 | #if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) |
5359 | /* | | 5391 | /* |
5360 | * Is this page mapped at its natural color? | | 5392 | * Is this page mapped at its natural color? |
5361 | * If we have all of memory mapped, then just convert PA to VA. | | 5393 | * If we have all of memory mapped, then just convert PA to VA. |
5362 | */ | | 5394 | */ |
5363 | bool okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT | | 5395 | bool okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT |
5364 | || va_offset == (pa & arm_cache_prefer_mask); | | 5396 | || va_offset == (pa & arm_cache_prefer_mask); |
5365 | const vaddr_t vdstp = okcolor | | 5397 | const vaddr_t vdstp = okcolor |
5366 | ? pmap_direct_mapped_phys(pa, &okcolor, cpu_cdstp(va_offset)) | | 5398 | ? pmap_direct_mapped_phys(pa, &okcolor, cpu_cdstp(va_offset)) |
5367 | : cpu_cdstp(va_offset); | | 5399 | : cpu_cdstp(va_offset); |
5368 | #else | | 5400 | #else |
5369 | const bool okcolor = false; | | 5401 | const bool okcolor = false; |
5370 | const vaddr_t vdstp = cpu_cdstp(va_offset); | | 5402 | const vaddr_t vdstp = cpu_cdstp(va_offset); |
5371 | #endif | | 5403 | #endif |
5372 | pt_entry_t * const ptep = cpu_cdst_pte(va_offset); | | 5404 | pt_entry_t * const ptep = cpu_cdst_pte(va_offset); |
5373 | | | 5405 | |
5374 | | | 5406 | |
5375 | #ifdef DEBUG | | 5407 | #ifdef DEBUG |
5376 | if (!SLIST_EMPTY(&md->pvh_list)) | | 5408 | if (!SLIST_EMPTY(&md->pvh_list)) |
5377 | panic("pmap_zero_page: page has mappings"); | | 5409 | panic("pmap_zero_page: page has mappings"); |
5378 | #endif | | 5410 | #endif |
5379 | | | 5411 | |
5380 | KDASSERT((pa & PGOFSET) == 0); | | 5412 | KDASSERT((pa & PGOFSET) == 0); |
5381 | | | 5413 | |
5382 | if (!okcolor) { | | 5414 | if (!okcolor) { |
5383 | /* | | 5415 | /* |
5384 | * Hook in the page, zero it, and purge the cache for that | | 5416 | * Hook in the page, zero it, and purge the cache for that |
5385 | * zeroed page. Invalidate the TLB as needed. | | 5417 | * zeroed page. Invalidate the TLB as needed. |
5386 | */ | | 5418 | */ |
5387 | const pt_entry_t npte = L2_S_PROTO | pa | pte_l2_s_cache_mode | | 5419 | const pt_entry_t npte = L2_S_PROTO | pa | pte_l2_s_cache_mode |
5388 | | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE); | | 5420 | | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE); |
5389 | l2pte_set(ptep, npte, 0); | | 5421 | l2pte_set(ptep, npte, 0); |
5390 | PTE_SYNC(ptep); | | 5422 | PTE_SYNC(ptep); |
5391 | cpu_tlb_flushD_SE(vdstp); | | 5423 | cpu_tlb_flushD_SE(vdstp); |
5392 | cpu_cpwait(); | | 5424 | cpu_cpwait(); |
5393 | #if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) && defined(PMAP_CACHE_VIPT) \ | | 5425 | #if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) && defined(PMAP_CACHE_VIPT) \ |
5394 | && !defined(ARM_MMU_EXTENDED) | | 5426 | && !defined(ARM_MMU_EXTENDED) |
5395 | /* | | 5427 | /* |
5396 | * If we are direct-mapped and our color isn't ok, then before | | 5428 | * If we are direct-mapped and our color isn't ok, then before |
5397 | * we bzero the page invalidate its contents from the cache and | | 5429 | * we bzero the page invalidate its contents from the cache and |
5398 | * reset the color to its natural color. | | 5430 | * reset the color to its natural color. |
5399 | */ | | 5431 | */ |
5400 | cpu_dcache_inv_range(vdstp, PAGE_SIZE); | | 5432 | cpu_dcache_inv_range(vdstp, PAGE_SIZE); |
5401 | md->pvh_attrs &= ~arm_cache_prefer_mask; | | 5433 | md->pvh_attrs &= ~arm_cache_prefer_mask; |
5402 | md->pvh_attrs |= (pa & arm_cache_prefer_mask); | | 5434 | md->pvh_attrs |= (pa & arm_cache_prefer_mask); |
5403 | #endif | | 5435 | #endif |
5404 | } | | 5436 | } |
5405 | bzero_page(vdstp); | | 5437 | bzero_page(vdstp); |
5406 | if (!okcolor) { | | 5438 | if (!okcolor) { |
5407 | /* | | 5439 | /* |
5408 | * Unmap the page. | | 5440 | * Unmap the page. |
5409 | */ | | 5441 | */ |
5410 | l2pte_reset(ptep); | | 5442 | l2pte_reset(ptep); |
5411 | PTE_SYNC(ptep); | | 5443 | PTE_SYNC(ptep); |
5412 | cpu_tlb_flushD_SE(vdstp); | | 5444 | cpu_tlb_flushD_SE(vdstp); |
5413 | #ifdef PMAP_CACHE_VIVT | | 5445 | #ifdef PMAP_CACHE_VIVT |
5414 | cpu_dcache_wbinv_range(vdstp, PAGE_SIZE); | | 5446 | cpu_dcache_wbinv_range(vdstp, PAGE_SIZE); |
5415 | #endif | | 5447 | #endif |
5416 | } | | 5448 | } |
5417 | #ifdef PMAP_CACHE_VIPT | | 5449 | #ifdef PMAP_CACHE_VIPT |
5418 | /* | | 5450 | /* |
5419 | * This page is now cache resident so it now has a page color. | | 5451 | * This page is now cache resident so it now has a page color. |
5420 | * Any contents have been obliterated so clear the EXEC flag. | | 5452 | * Any contents have been obliterated so clear the EXEC flag. |
5421 | */ | | 5453 | */ |
5422 | #ifndef ARM_MMU_EXTENDED | | 5454 | #ifndef ARM_MMU_EXTENDED |
5423 | if (!pmap_is_page_colored_p(md)) { | | 5455 | if (!pmap_is_page_colored_p(md)) { |
5424 | PMAPCOUNT(vac_color_new); | | 5456 | PMAPCOUNT(vac_color_new); |
5425 | md->pvh_attrs |= PVF_COLORED; | | 5457 | md->pvh_attrs |= PVF_COLORED; |
5426 | } | | 5458 | } |
5427 | md->pvh_attrs |= PVF_DIRTY; | | 5459 | md->pvh_attrs |= PVF_DIRTY; |
5428 | #endif | | 5460 | #endif |
5429 | if (PV_IS_EXEC_P(md->pvh_attrs)) { | | 5461 | if (PV_IS_EXEC_P(md->pvh_attrs)) { |
5430 | md->pvh_attrs &= ~PVF_EXEC; | | 5462 | md->pvh_attrs &= ~PVF_EXEC; |
5431 | PMAPCOUNT(exec_discarded_zero); | | 5463 | PMAPCOUNT(exec_discarded_zero); |
5432 | } | | 5464 | } |
5433 | #endif | | 5465 | #endif |
5434 | } | | 5466 | } |
5435 | #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */ | | 5467 | #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */ |
5436 | | | 5468 | |
5437 | #if ARM_MMU_XSCALE == 1 | | 5469 | #if ARM_MMU_XSCALE == 1 |
5438 | void | | 5470 | void |
5439 | pmap_zero_page_xscale(paddr_t pa) | | 5471 | pmap_zero_page_xscale(paddr_t pa) |
5440 | { | | 5472 | { |
5441 | #ifdef DEBUG | | 5473 | #ifdef DEBUG |
5442 | struct vm_page *pg = PHYS_TO_VM_PAGE(pa); | | 5474 | struct vm_page *pg = PHYS_TO_VM_PAGE(pa); |
5443 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 5475 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
5444 | | | 5476 | |
5445 | if (!SLIST_EMPTY(&md->pvh_list)) | | 5477 | if (!SLIST_EMPTY(&md->pvh_list)) |
5446 | panic("pmap_zero_page: page has mappings"); | | 5478 | panic("pmap_zero_page: page has mappings"); |
5447 | #endif | | 5479 | #endif |
5448 | | | 5480 | |
5449 | KDASSERT((pa & PGOFSET) == 0); | | 5481 | KDASSERT((pa & PGOFSET) == 0); |
5450 | | | 5482 | |
5451 | /* | | 5483 | /* |
5452 | * Hook in the page, zero it, and purge the cache for that | | 5484 | * Hook in the page, zero it, and purge the cache for that |
5453 | * zeroed page. Invalidate the TLB as needed. | | 5485 | * zeroed page. Invalidate the TLB as needed. |
5454 | */ | | 5486 | */ |
5455 | | | 5487 | |
5456 | pt_entry_t npte = L2_S_PROTO | pa | | | 5488 | pt_entry_t npte = L2_S_PROTO | pa | |
5457 | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | | | 5489 | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | |
5458 | L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */ | | 5490 | L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */ |
5459 | l2pte_set(cdst_pte, npte, 0); | | 5491 | l2pte_set(cdst_pte, npte, 0); |
5460 | PTE_SYNC(cdst_pte); | | 5492 | PTE_SYNC(cdst_pte); |
5461 | cpu_tlb_flushD_SE(cdstp); | | 5493 | cpu_tlb_flushD_SE(cdstp); |
5462 | cpu_cpwait(); | | 5494 | cpu_cpwait(); |
5463 | bzero_page(cdstp); | | 5495 | bzero_page(cdstp); |
5464 | xscale_cache_clean_minidata(); | | 5496 | xscale_cache_clean_minidata(); |
5465 | l2pte_reset(cdst_pte); | | 5497 | l2pte_reset(cdst_pte); |
5466 | PTE_SYNC(cdst_pte); | | 5498 | PTE_SYNC(cdst_pte); |
5467 | } | | 5499 | } |
5468 | #endif /* ARM_MMU_XSCALE == 1 */ | | 5500 | #endif /* ARM_MMU_XSCALE == 1 */ |
5469 | | | 5501 | |
5470 | /* pmap_pageidlezero() | | 5502 | /* pmap_pageidlezero() |
5471 | * | | 5503 | * |
5472 | * The same as above, except that we assume that the page is not | | 5504 | * The same as above, except that we assume that the page is not |
5473 | * mapped. This means we never have to flush the cache first. Called | | 5505 | * mapped. This means we never have to flush the cache first. Called |
5474 | * from the idle loop. | | 5506 | * from the idle loop. |
5475 | */ | | 5507 | */ |
5476 | bool | | 5508 | bool |
5477 | pmap_pageidlezero(paddr_t pa) | | 5509 | pmap_pageidlezero(paddr_t pa) |
5478 | { | | 5510 | { |
5479 | bool rv = true; | | 5511 | bool rv = true; |
5480 | #if defined(PMAP_CACHE_VIPT) || defined(DEBUG) | | 5512 | #if defined(PMAP_CACHE_VIPT) || defined(DEBUG) |
5481 | struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); | | 5513 | struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); |
5482 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 5514 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
5483 | #endif | | 5515 | #endif |
5484 | #ifdef PMAP_CACHE_VIPT | | 5516 | #ifdef PMAP_CACHE_VIPT |
5485 | /* Choose the last page color it had, if any */ | | 5517 | /* Choose the last page color it had, if any */ |
5486 | const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask; | | 5518 | const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask; |
5487 | #else | | 5519 | #else |
5488 | const vsize_t va_offset = 0; | | 5520 | const vsize_t va_offset = 0; |
5489 | #endif | | 5521 | #endif |
5490 | #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS | | 5522 | #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS |
5491 | bool okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT | | 5523 | bool okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT |
5492 | || va_offset == (pa & arm_cache_prefer_mask); | | 5524 | || va_offset == (pa & arm_cache_prefer_mask); |
5493 | const vaddr_t vdstp = okcolor | | 5525 | const vaddr_t vdstp = okcolor |
5494 | ? pmap_direct_mapped_phys(pa, &okcolor, cpu_cdstp(va_offset)) | | 5526 | ? pmap_direct_mapped_phys(pa, &okcolor, cpu_cdstp(va_offset)) |
5495 | : cpu_cdstp(va_offset); | | 5527 | : cpu_cdstp(va_offset); |
5496 | #else | | 5528 | #else |
5497 | const bool okcolor = false; | | 5529 | const bool okcolor = false; |
5498 | const vaddr_t vdstp = cpu_cdstp(va_offset); | | 5530 | const vaddr_t vdstp = cpu_cdstp(va_offset); |
5499 | #endif | | 5531 | #endif |
5500 | pt_entry_t * const ptep = cpu_cdst_pte(va_offset); | | 5532 | pt_entry_t * const ptep = cpu_cdst_pte(va_offset); |
5501 | | | 5533 | |
5502 | | | 5534 | |
5503 | #ifdef DEBUG | | 5535 | #ifdef DEBUG |
5504 | if (!SLIST_EMPTY(&md->pvh_list)) | | 5536 | if (!SLIST_EMPTY(&md->pvh_list)) |
5505 | panic("pmap_pageidlezero: page has mappings"); | | 5537 | panic("pmap_pageidlezero: page has mappings"); |
5506 | #endif | | 5538 | #endif |
5507 | | | 5539 | |
5508 | KDASSERT((pa & PGOFSET) == 0); | | 5540 | KDASSERT((pa & PGOFSET) == 0); |
5509 | | | 5541 | |
5510 | if (!okcolor) { | | 5542 | if (!okcolor) { |
5511 | /* | | 5543 | /* |
5512 | * Hook in the page, zero it, and purge the cache for that | | 5544 | * Hook in the page, zero it, and purge the cache for that |
5513 | * zeroed page. Invalidate the TLB as needed. | | 5545 | * zeroed page. Invalidate the TLB as needed. |
5514 | */ | | 5546 | */ |
5515 | const pt_entry_t npte = L2_S_PROTO | pa | | | 5547 | const pt_entry_t npte = L2_S_PROTO | pa | |
5516 | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; | | 5548 | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; |
5517 | l2pte_set(ptep, npte, 0); | | 5549 | l2pte_set(ptep, npte, 0); |
5518 | PTE_SYNC(ptep); | | 5550 | PTE_SYNC(ptep); |
5519 | cpu_tlb_flushD_SE(vdstp); | | 5551 | cpu_tlb_flushD_SE(vdstp); |
5520 | cpu_cpwait(); | | 5552 | cpu_cpwait(); |
5521 | } | | 5553 | } |
5522 | | | 5554 | |
5523 | uint64_t *ptr = (uint64_t *)vdstp; | | 5555 | uint64_t *ptr = (uint64_t *)vdstp; |
5524 | for (size_t i = 0; i < PAGE_SIZE / sizeof(*ptr); i++) { | | 5556 | for (size_t i = 0; i < PAGE_SIZE / sizeof(*ptr); i++) { |
5525 | if (sched_curcpu_runnable_p() != 0) { | | 5557 | if (sched_curcpu_runnable_p() != 0) { |
5526 | /* | | 5558 | /* |
5527 | * A process has become ready. Abort now, | | 5559 | * A process has become ready. Abort now, |
5528 | * so we don't keep it waiting while we | | 5560 | * so we don't keep it waiting while we |
5529 | * do slow memory access to finish this | | 5561 | * do slow memory access to finish this |
5530 | * page. | | 5562 | * page. |
5531 | */ | | 5563 | */ |
5532 | rv = false; | | 5564 | rv = false; |
5533 | break; | | 5565 | break; |
5534 | } | | 5566 | } |
5535 | *ptr++ = 0; | | 5567 | *ptr++ = 0; |
5536 | } | | 5568 | } |
5537 | | | 5569 | |
5538 | #ifdef PMAP_CACHE_VIVT | | 5570 | #ifdef PMAP_CACHE_VIVT |
5539 | if (rv) | | 5571 | if (rv) |
5540 | /* | | 5572 | /* |
5541 | * if we aborted we'll rezero this page again later so don't | | 5573 | * if we aborted we'll rezero this page again later so don't |
5542 | * purge it unless we finished it | | 5574 | * purge it unless we finished it |
5543 | */ | | 5575 | */ |
5544 | cpu_dcache_wbinv_range(vdstp, PAGE_SIZE); | | 5576 | cpu_dcache_wbinv_range(vdstp, PAGE_SIZE); |
5545 | #elif defined(PMAP_CACHE_VIPT) | | 5577 | #elif defined(PMAP_CACHE_VIPT) |
5546 | /* | | 5578 | /* |
5547 | * This page is now cache resident so it now has a page color. | | 5579 | * This page is now cache resident so it now has a page color. |
5548 | * Any contents have been obliterated so clear the EXEC flag. | | 5580 | * Any contents have been obliterated so clear the EXEC flag. |
5549 | */ | | 5581 | */ |
5550 | #ifndef ARM_MMU_EXTENDED | | 5582 | #ifndef ARM_MMU_EXTENDED |
5551 | if (!pmap_is_page_colored_p(md)) { | | 5583 | if (!pmap_is_page_colored_p(md)) { |
5552 | PMAPCOUNT(vac_color_new); | | 5584 | PMAPCOUNT(vac_color_new); |
5553 | md->pvh_attrs |= PVF_COLORED; | | 5585 | md->pvh_attrs |= PVF_COLORED; |
5554 | } | | 5586 | } |
5555 | #endif | | 5587 | #endif |
5556 | if (PV_IS_EXEC_P(md->pvh_attrs)) { | | 5588 | if (PV_IS_EXEC_P(md->pvh_attrs)) { |
5557 | md->pvh_attrs &= ~PVF_EXEC; | | 5589 | md->pvh_attrs &= ~PVF_EXEC; |
5558 | PMAPCOUNT(exec_discarded_zero); | | 5590 | PMAPCOUNT(exec_discarded_zero); |
5559 | } | | 5591 | } |
5560 | #endif | | 5592 | #endif |
5561 | /* | | 5593 | /* |
5562 | * Unmap the page. | | 5594 | * Unmap the page. |
5563 | */ | | 5595 | */ |
5564 | if (!okcolor) { | | 5596 | if (!okcolor) { |
5565 | l2pte_reset(ptep); | | 5597 | l2pte_reset(ptep); |
5566 | PTE_SYNC(ptep); | | 5598 | PTE_SYNC(ptep); |
5567 | cpu_tlb_flushD_SE(vdstp); | | 5599 | cpu_tlb_flushD_SE(vdstp); |
5568 | } | | 5600 | } |
5569 | | | 5601 | |
5570 | return rv; | | 5602 | return rv; |
5571 | } | | 5603 | } |
5572 | | | 5604 | |
5573 | /* | | 5605 | /* |
5574 | * pmap_copy_page() | | 5606 | * pmap_copy_page() |
5575 | * | | 5607 | * |
5576 | * Copy one physical page into another, by mapping the pages into | | 5608 | * Copy one physical page into another, by mapping the pages into |
5577 | * hook points. The same comment regarding cachability as in | | 5609 | * hook points. The same comment regarding cachability as in |
5578 | * pmap_zero_page also applies here. | | 5610 | * pmap_zero_page also applies here. |
5579 | */ | | 5611 | */ |
5580 | #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0 | | 5612 | #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0 |
5581 | void | | 5613 | void |
5582 | pmap_copy_page_generic(paddr_t src, paddr_t dst) | | 5614 | pmap_copy_page_generic(paddr_t src, paddr_t dst) |
5583 | { | | 5615 | { |
5584 | struct vm_page * const src_pg = PHYS_TO_VM_PAGE(src); | | 5616 | struct vm_page * const src_pg = PHYS_TO_VM_PAGE(src); |
5585 | struct vm_page_md *src_md = VM_PAGE_TO_MD(src_pg); | | 5617 | struct vm_page_md *src_md = VM_PAGE_TO_MD(src_pg); |
5586 | #if defined(PMAP_CACHE_VIPT) || defined(DEBUG) | | 5618 | #if defined(PMAP_CACHE_VIPT) || defined(DEBUG) |
5587 | struct vm_page * const dst_pg = PHYS_TO_VM_PAGE(dst); | | 5619 | struct vm_page * const dst_pg = PHYS_TO_VM_PAGE(dst); |
5588 | struct vm_page_md *dst_md = VM_PAGE_TO_MD(dst_pg); | | 5620 | struct vm_page_md *dst_md = VM_PAGE_TO_MD(dst_pg); |
5589 | #endif | | 5621 | #endif |
5590 | #ifdef PMAP_CACHE_VIPT | | 5622 | #ifdef PMAP_CACHE_VIPT |
5591 | const vsize_t src_va_offset = src_md->pvh_attrs & arm_cache_prefer_mask; | | 5623 | const vsize_t src_va_offset = src_md->pvh_attrs & arm_cache_prefer_mask; |
5592 | const vsize_t dst_va_offset = dst_md->pvh_attrs & arm_cache_prefer_mask; | | 5624 | const vsize_t dst_va_offset = dst_md->pvh_attrs & arm_cache_prefer_mask; |
5593 | #else | | 5625 | #else |
5594 | const vsize_t src_va_offset = 0; | | 5626 | const vsize_t src_va_offset = 0; |
5595 | const vsize_t dst_va_offset = 0; | | 5627 | const vsize_t dst_va_offset = 0; |
5596 | #endif | | 5628 | #endif |
5597 | #if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) | | 5629 | #if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) |
5598 | /* | | 5630 | /* |
5599 | * Is this page mapped at its natural color? | | 5631 | * Is this page mapped at its natural color? |
5600 | * If we have all of memory mapped, then just convert PA to VA. | | 5632 | * If we have all of memory mapped, then just convert PA to VA. |
5601 | */ | | 5633 | */ |
5602 | bool src_okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT | | 5634 | bool src_okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT |
5603 | || src_va_offset == (src & arm_cache_prefer_mask); | | 5635 | || src_va_offset == (src & arm_cache_prefer_mask); |
5604 | bool dst_okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT | | 5636 | bool dst_okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT |
5605 | || dst_va_offset == (dst & arm_cache_prefer_mask); | | 5637 | || dst_va_offset == (dst & arm_cache_prefer_mask); |
5606 | const vaddr_t vsrcp = src_okcolor | | 5638 | const vaddr_t vsrcp = src_okcolor |
5607 | ? pmap_direct_mapped_phys(src, &src_okcolor, | | 5639 | ? pmap_direct_mapped_phys(src, &src_okcolor, |
5608 | cpu_csrcp(src_va_offset)) | | 5640 | cpu_csrcp(src_va_offset)) |
5609 | : cpu_csrcp(src_va_offset); | | 5641 | : cpu_csrcp(src_va_offset); |
5610 | const vaddr_t vdstp = pmap_direct_mapped_phys(dst, &dst_okcolor, | | 5642 | const vaddr_t vdstp = pmap_direct_mapped_phys(dst, &dst_okcolor, |
5611 | cpu_cdstp(dst_va_offset)); | | 5643 | cpu_cdstp(dst_va_offset)); |
5612 | #else | | 5644 | #else |
5613 | const bool src_okcolor = false; | | 5645 | const bool src_okcolor = false; |
5614 | const bool dst_okcolor = false; | | 5646 | const bool dst_okcolor = false; |
5615 | const vaddr_t vsrcp = cpu_csrcp(src_va_offset); | | 5647 | const vaddr_t vsrcp = cpu_csrcp(src_va_offset); |
5616 | const vaddr_t vdstp = cpu_cdstp(dst_va_offset); | | 5648 | const vaddr_t vdstp = cpu_cdstp(dst_va_offset); |
5617 | #endif | | 5649 | #endif |
5618 | pt_entry_t * const src_ptep = cpu_csrc_pte(src_va_offset); | | 5650 | pt_entry_t * const src_ptep = cpu_csrc_pte(src_va_offset); |
5619 | pt_entry_t * const dst_ptep = cpu_cdst_pte(dst_va_offset); | | 5651 | pt_entry_t * const dst_ptep = cpu_cdst_pte(dst_va_offset); |
5620 | | | 5652 | |
5621 | #ifdef DEBUG | | 5653 | #ifdef DEBUG |
5622 | if (!SLIST_EMPTY(&dst_md->pvh_list)) | | 5654 | if (!SLIST_EMPTY(&dst_md->pvh_list)) |
5623 | panic("pmap_copy_page: dst page has mappings"); | | 5655 | panic("pmap_copy_page: dst page has mappings"); |
5624 | #endif | | 5656 | #endif |
5625 | | | 5657 | |
5626 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 5658 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
5627 | KASSERT(arm_cache_prefer_mask == 0 || src_md->pvh_attrs & (PVF_COLORED|PVF_NC)); | | 5659 | KASSERT(arm_cache_prefer_mask == 0 || src_md->pvh_attrs & (PVF_COLORED|PVF_NC)); |
5628 | #endif | | 5660 | #endif |
5629 | KDASSERT((src & PGOFSET) == 0); | | 5661 | KDASSERT((src & PGOFSET) == 0); |
5630 | KDASSERT((dst & PGOFSET) == 0); | | 5662 | KDASSERT((dst & PGOFSET) == 0); |
5631 | | | 5663 | |
5632 | /* | | 5664 | /* |
5633 | * Clean the source page. Hold the source page's lock for | | 5665 | * Clean the source page. Hold the source page's lock for |
5634 | * the duration of the copy so that no other mappings can | | 5666 | * the duration of the copy so that no other mappings can |
5635 | * be created while we have a potentially aliased mapping. | | 5667 | * be created while we have a potentially aliased mapping. |
5636 | */ | | 5668 | */ |
5637 | #ifdef PMAP_CACHE_VIVT | | 5669 | #ifdef PMAP_CACHE_VIVT |
5638 | pmap_acquire_page_lock(src_md); | | 5670 | pmap_acquire_page_lock(src_md); |
5639 | (void) pmap_clean_page(src_md, true); | | 5671 | (void) pmap_clean_page(src_md, true); |
5640 | pmap_release_page_lock(src_md); | | 5672 | pmap_release_page_lock(src_md); |
5641 | #endif | | 5673 | #endif |
5642 | | | 5674 | |
5643 | /* | | 5675 | /* |
5644 | * Map the pages into the page hook points, copy them, and purge | | 5676 | * Map the pages into the page hook points, copy them, and purge |
5645 | * the cache for the appropriate page. Invalidate the TLB | | 5677 | * the cache for the appropriate page. Invalidate the TLB |
5646 | * as required. | | 5678 | * as required. |
5647 | */ | | 5679 | */ |
5648 | if (!src_okcolor) { | | 5680 | if (!src_okcolor) { |
5649 | const pt_entry_t nsrc_pte = L2_S_PROTO | | 5681 | const pt_entry_t nsrc_pte = L2_S_PROTO |
5650 | | src | | 5682 | | src |
5651 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 5683 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
5652 | | ((src_md->pvh_attrs & PVF_NC) ? 0 : pte_l2_s_cache_mode) | | 5684 | | ((src_md->pvh_attrs & PVF_NC) ? 0 : pte_l2_s_cache_mode) |
5653 | #else // defined(PMAP_CACHE_VIVT) || defined(ARM_MMU_EXTENDED) | | 5685 | #else // defined(PMAP_CACHE_VIVT) || defined(ARM_MMU_EXTENDED) |
5654 | | pte_l2_s_cache_mode | | 5686 | | pte_l2_s_cache_mode |
5655 | #endif | | 5687 | #endif |
5656 | | L2_S_PROT(PTE_KERNEL, VM_PROT_READ); | | 5688 | | L2_S_PROT(PTE_KERNEL, VM_PROT_READ); |
5657 | l2pte_set(src_ptep, nsrc_pte, 0); | | 5689 | l2pte_set(src_ptep, nsrc_pte, 0); |
5658 | PTE_SYNC(src_ptep); | | 5690 | PTE_SYNC(src_ptep); |
5659 | cpu_tlb_flushD_SE(vsrcp); | | 5691 | cpu_tlb_flushD_SE(vsrcp); |
5660 | cpu_cpwait(); | | 5692 | cpu_cpwait(); |
5661 | } | | 5693 | } |
5662 | if (!dst_okcolor) { | | 5694 | if (!dst_okcolor) { |
5663 | const pt_entry_t ndst_pte = L2_S_PROTO | dst | | | 5695 | const pt_entry_t ndst_pte = L2_S_PROTO | dst | |
5664 | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; | | 5696 | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; |
5665 | l2pte_set(dst_ptep, ndst_pte, 0); | | 5697 | l2pte_set(dst_ptep, ndst_pte, 0); |
5666 | PTE_SYNC(dst_ptep); | | 5698 | PTE_SYNC(dst_ptep); |
5667 | cpu_tlb_flushD_SE(vdstp); | | 5699 | cpu_tlb_flushD_SE(vdstp); |
5668 | cpu_cpwait(); | | 5700 | cpu_cpwait(); |
5669 | #if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) && defined(PMAP_CACHE_VIPT) | | 5701 | #if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) && defined(PMAP_CACHE_VIPT) |
5670 | /* | | 5702 | /* |
5671 | * If we are direct-mapped and our color isn't ok, then before | | 5703 | * If we are direct-mapped and our color isn't ok, then before |
5672 | * we bcopy to the new page invalidate its contents from the | | 5704 | * we bcopy to the new page invalidate its contents from the |
5673 | * cache and reset its color to its natural color. | | 5705 | * cache and reset its color to its natural color. |
5674 | */ | | 5706 | */ |
5675 | cpu_dcache_inv_range(vdstp, PAGE_SIZE); | | 5707 | cpu_dcache_inv_range(vdstp, PAGE_SIZE); |
5676 | dst_md->pvh_attrs &= ~arm_cache_prefer_mask; | | 5708 | dst_md->pvh_attrs &= ~arm_cache_prefer_mask; |
5677 | dst_md->pvh_attrs |= (dst & arm_cache_prefer_mask); | | 5709 | dst_md->pvh_attrs |= (dst & arm_cache_prefer_mask); |
5678 | #endif | | 5710 | #endif |
5679 | } | | 5711 | } |
5680 | bcopy_page(vsrcp, vdstp); | | 5712 | bcopy_page(vsrcp, vdstp); |
5681 | #ifdef PMAP_CACHE_VIVT | | 5713 | #ifdef PMAP_CACHE_VIVT |
5682 | cpu_dcache_inv_range(vsrcp, PAGE_SIZE); | | 5714 | cpu_dcache_inv_range(vsrcp, PAGE_SIZE); |
5683 | cpu_dcache_wbinv_range(vdstp, PAGE_SIZE); | | 5715 | cpu_dcache_wbinv_range(vdstp, PAGE_SIZE); |
5684 | #endif | | 5716 | #endif |
5685 | /* | | 5717 | /* |
5686 | * Unmap the pages. | | 5718 | * Unmap the pages. |
5687 | */ | | 5719 | */ |
5688 | if (!src_okcolor) { | | 5720 | if (!src_okcolor) { |
5689 | l2pte_reset(src_ptep); | | 5721 | l2pte_reset(src_ptep); |
5690 | PTE_SYNC(src_ptep); | | 5722 | PTE_SYNC(src_ptep); |
5691 | cpu_tlb_flushD_SE(vsrcp); | | 5723 | cpu_tlb_flushD_SE(vsrcp); |
5692 | cpu_cpwait(); | | 5724 | cpu_cpwait(); |
5693 | } | | 5725 | } |
5694 | if (!dst_okcolor) { | | 5726 | if (!dst_okcolor) { |
5695 | l2pte_reset(dst_ptep); | | 5727 | l2pte_reset(dst_ptep); |
5696 | PTE_SYNC(dst_ptep); | | 5728 | PTE_SYNC(dst_ptep); |
5697 | cpu_tlb_flushD_SE(vdstp); | | 5729 | cpu_tlb_flushD_SE(vdstp); |
5698 | cpu_cpwait(); | | 5730 | cpu_cpwait(); |
5699 | } | | 5731 | } |
5700 | #ifdef PMAP_CACHE_VIPT | | 5732 | #ifdef PMAP_CACHE_VIPT |
5701 | /* | | 5733 | /* |
5702 | * Now that the destination page is in the cache, mark it as colored. | | 5734 | * Now that the destination page is in the cache, mark it as colored. |
5703 | * If this was an exec page, discard it. | | 5735 | * If this was an exec page, discard it. |
5704 | */ | | 5736 | */ |
5705 | pmap_acquire_page_lock(dst_md); | | 5737 | pmap_acquire_page_lock(dst_md); |
5706 | #ifndef ARM_MMU_EXTENDED | | 5738 | #ifndef ARM_MMU_EXTENDED |
5707 | if (arm_pcache.cache_type == CACHE_TYPE_PIPT) { | | 5739 | if (arm_pcache.cache_type == CACHE_TYPE_PIPT) { |
5708 | dst_md->pvh_attrs &= ~arm_cache_prefer_mask; | | 5740 | dst_md->pvh_attrs &= ~arm_cache_prefer_mask; |
5709 | dst_md->pvh_attrs |= (dst & arm_cache_prefer_mask); | | 5741 | dst_md->pvh_attrs |= (dst & arm_cache_prefer_mask); |
5710 | } | | 5742 | } |
5711 | if (!pmap_is_page_colored_p(dst_md)) { | | 5743 | if (!pmap_is_page_colored_p(dst_md)) { |
5712 | PMAPCOUNT(vac_color_new); | | 5744 | PMAPCOUNT(vac_color_new); |
5713 | dst_md->pvh_attrs |= PVF_COLORED; | | 5745 | dst_md->pvh_attrs |= PVF_COLORED; |
5714 | } | | 5746 | } |
5715 | dst_md->pvh_attrs |= PVF_DIRTY; | | 5747 | dst_md->pvh_attrs |= PVF_DIRTY; |
5716 | #endif | | 5748 | #endif |
5717 | if (PV_IS_EXEC_P(dst_md->pvh_attrs)) { | | 5749 | if (PV_IS_EXEC_P(dst_md->pvh_attrs)) { |
5718 | dst_md->pvh_attrs &= ~PVF_EXEC; | | 5750 | dst_md->pvh_attrs &= ~PVF_EXEC; |
5719 | PMAPCOUNT(exec_discarded_copy); | | 5751 | PMAPCOUNT(exec_discarded_copy); |
5720 | } | | 5752 | } |
5721 | pmap_release_page_lock(dst_md); | | 5753 | pmap_release_page_lock(dst_md); |
5722 | #endif | | 5754 | #endif |
5723 | } | | 5755 | } |
5724 | #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */ | | 5756 | #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */ |
5725 | | | 5757 | |
5726 | #if ARM_MMU_XSCALE == 1 | | 5758 | #if ARM_MMU_XSCALE == 1 |
5727 | void | | 5759 | void |
5728 | pmap_copy_page_xscale(paddr_t src, paddr_t dst) | | 5760 | pmap_copy_page_xscale(paddr_t src, paddr_t dst) |
5729 | { | | 5761 | { |
5730 | struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); | | 5762 | struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); |
5731 | struct vm_page_md *src_md = VM_PAGE_TO_MD(src_pg); | | 5763 | struct vm_page_md *src_md = VM_PAGE_TO_MD(src_pg); |
5732 | #ifdef DEBUG | | 5764 | #ifdef DEBUG |
5733 | struct vm_page_md *dst_md = VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(dst)); | | 5765 | struct vm_page_md *dst_md = VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(dst)); |
5734 | | | 5766 | |
5735 | if (!SLIST_EMPTY(&dst_md->pvh_list)) | | 5767 | if (!SLIST_EMPTY(&dst_md->pvh_list)) |
5736 | panic("pmap_copy_page: dst page has mappings"); | | 5768 | panic("pmap_copy_page: dst page has mappings"); |
5737 | #endif | | 5769 | #endif |
5738 | | | 5770 | |
5739 | KDASSERT((src & PGOFSET) == 0); | | 5771 | KDASSERT((src & PGOFSET) == 0); |
5740 | KDASSERT((dst & PGOFSET) == 0); | | 5772 | KDASSERT((dst & PGOFSET) == 0); |
5741 | | | 5773 | |
5742 | /* | | 5774 | /* |
5743 | * Clean the source page. Hold the source page's lock for | | 5775 | * Clean the source page. Hold the source page's lock for |
5744 | * the duration of the copy so that no other mappings can | | 5776 | * the duration of the copy so that no other mappings can |
5745 | * be created while we have a potentially aliased mapping. | | 5777 | * be created while we have a potentially aliased mapping. |
5746 | */ | | 5778 | */ |
5747 | #ifdef PMAP_CACHE_VIVT | | 5779 | #ifdef PMAP_CACHE_VIVT |
5748 | pmap_acquire_page_lock(src_md); | | 5780 | pmap_acquire_page_lock(src_md); |
5749 | (void) pmap_clean_page(src_md, true); | | 5781 | (void) pmap_clean_page(src_md, true); |
5750 | pmap_release_page_lock(src_md); | | 5782 | pmap_release_page_lock(src_md); |
5751 | #endif | | 5783 | #endif |
5752 | | | 5784 | |
5753 | /* | | 5785 | /* |
5754 | * Map the pages into the page hook points, copy them, and purge | | 5786 | * Map the pages into the page hook points, copy them, and purge |
5755 | * the cache for the appropriate page. Invalidate the TLB | | 5787 | * the cache for the appropriate page. Invalidate the TLB |
5756 | * as required. | | 5788 | * as required. |
5757 | */ | | 5789 | */ |
5758 | const pt_entry_t nsrc_pte = L2_S_PROTO | src | | 5790 | const pt_entry_t nsrc_pte = L2_S_PROTO | src |
5759 | | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | | 5791 | | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
5760 | | L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */ | | 5792 | | L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */ |
5761 | l2pte_set(csrc_pte, nsrc_pte, 0); | | 5793 | l2pte_set(csrc_pte, nsrc_pte, 0); |
5762 | PTE_SYNC(csrc_pte); | | 5794 | PTE_SYNC(csrc_pte); |
5763 | | | 5795 | |
5764 | const pt_entry_t ndst_pte = L2_S_PROTO | dst | | 5796 | const pt_entry_t ndst_pte = L2_S_PROTO | dst |
5765 | | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | | 5797 | | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
5766 | | L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */ | | 5798 | | L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */ |
5767 | l2pte_set(cdst_pte, ndst_pte, 0); | | 5799 | l2pte_set(cdst_pte, ndst_pte, 0); |
5768 | PTE_SYNC(cdst_pte); | | 5800 | PTE_SYNC(cdst_pte); |
5769 | | | 5801 | |
5770 | cpu_tlb_flushD_SE(csrcp); | | 5802 | cpu_tlb_flushD_SE(csrcp); |
5771 | cpu_tlb_flushD_SE(cdstp); | | 5803 | cpu_tlb_flushD_SE(cdstp); |
5772 | cpu_cpwait(); | | 5804 | cpu_cpwait(); |
5773 | bcopy_page(csrcp, cdstp); | | 5805 | bcopy_page(csrcp, cdstp); |
5774 | xscale_cache_clean_minidata(); | | 5806 | xscale_cache_clean_minidata(); |
5775 | l2pte_reset(csrc_pte); | | 5807 | l2pte_reset(csrc_pte); |
5776 | l2pte_reset(cdst_pte); | | 5808 | l2pte_reset(cdst_pte); |
5777 | PTE_SYNC(csrc_pte); | | 5809 | PTE_SYNC(csrc_pte); |
5778 | PTE_SYNC(cdst_pte); | | 5810 | PTE_SYNC(cdst_pte); |
5779 | } | | 5811 | } |
5780 | #endif /* ARM_MMU_XSCALE == 1 */ | | 5812 | #endif /* ARM_MMU_XSCALE == 1 */ |
5781 | | | 5813 | |
5782 | /* | | 5814 | /* |
5783 | * void pmap_virtual_space(vaddr_t *start, vaddr_t *end) | | 5815 | * void pmap_virtual_space(vaddr_t *start, vaddr_t *end) |
5784 | * | | 5816 | * |
5785 | * Return the start and end addresses of the kernel's virtual space. | | 5817 | * Return the start and end addresses of the kernel's virtual space. |
5786 | * These values are setup in pmap_bootstrap and are updated as pages | | 5818 | * These values are setup in pmap_bootstrap and are updated as pages |
5787 | * are allocated. | | 5819 | * are allocated. |
5788 | */ | | 5820 | */ |
5789 | void | | 5821 | void |
5790 | pmap_virtual_space(vaddr_t *start, vaddr_t *end) | | 5822 | pmap_virtual_space(vaddr_t *start, vaddr_t *end) |
5791 | { | | 5823 | { |
5792 | *start = virtual_avail; | | 5824 | *start = virtual_avail; |
5793 | *end = virtual_end; | | 5825 | *end = virtual_end; |
5794 | } | | 5826 | } |
5795 | | | 5827 | |
5796 | /* | | 5828 | /* |
5797 | * Helper function for pmap_grow_l2_bucket() | | 5829 | * Helper function for pmap_grow_l2_bucket() |
5798 | */ | | 5830 | */ |
5799 | static inline int | | 5831 | static inline int |
5800 | pmap_grow_map(vaddr_t va, paddr_t *pap) | | 5832 | pmap_grow_map(vaddr_t va, paddr_t *pap) |
5801 | { | | 5833 | { |
5802 | paddr_t pa; | | 5834 | paddr_t pa; |
5803 | | | 5835 | |
5804 | KASSERT((va & PGOFSET) == 0); | | 5836 | KASSERT((va & PGOFSET) == 0); |
5805 | | | 5837 | |
5806 | if (uvm.page_init_done == false) { | | 5838 | if (uvm.page_init_done == false) { |
5807 | #ifdef PMAP_STEAL_MEMORY | | 5839 | #ifdef PMAP_STEAL_MEMORY |
5808 | pv_addr_t pv; | | 5840 | pv_addr_t pv; |
5809 | pmap_boot_pagealloc(PAGE_SIZE, | | 5841 | pmap_boot_pagealloc(PAGE_SIZE, |
5810 | #ifdef PMAP_CACHE_VIPT | | 5842 | #ifdef PMAP_CACHE_VIPT |
5811 | arm_cache_prefer_mask, | | 5843 | arm_cache_prefer_mask, |
5812 | va & arm_cache_prefer_mask, | | 5844 | va & arm_cache_prefer_mask, |
5813 | #else | | 5845 | #else |
5814 | 0, 0, | | 5846 | 0, 0, |
5815 | #endif | | 5847 | #endif |
5816 | &pv); | | 5848 | &pv); |
5817 | pa = pv.pv_pa; | | 5849 | pa = pv.pv_pa; |
5818 | #else | | 5850 | #else |
5819 | if (uvm_page_physget(&pa) == false) | | 5851 | if (uvm_page_physget(&pa) == false) |
5820 | return 1; | | 5852 | return 1; |
5821 | #endif /* PMAP_STEAL_MEMORY */ | | 5853 | #endif /* PMAP_STEAL_MEMORY */ |
5822 | } else { | | 5854 | } else { |
5823 | struct vm_page *pg; | | 5855 | struct vm_page *pg; |
5824 | pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); | | 5856 | pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); |
5825 | if (pg == NULL) | | 5857 | if (pg == NULL) |
5826 | return 1; | | 5858 | return 1; |
5827 | pa = VM_PAGE_TO_PHYS(pg); | | 5859 | pa = VM_PAGE_TO_PHYS(pg); |
5828 | /* | | 5860 | /* |
5829 | * This new page must not have any mappings. | | 5861 | * This new page must not have any mappings. |
5830 | */ | | 5862 | */ |
5831 | struct vm_page_md *md __diagused = VM_PAGE_TO_MD(pg); | | 5863 | struct vm_page_md *md __diagused = VM_PAGE_TO_MD(pg); |
5832 | KASSERT(SLIST_EMPTY(&md->pvh_list)); | | 5864 | KASSERT(SLIST_EMPTY(&md->pvh_list)); |
5833 | } | | 5865 | } |
5834 | | | 5866 | |
5835 | /* | | 5867 | /* |
5836 | * Enter it via pmap_kenter_pa and let that routine do the hard work. | | 5868 | * Enter it via pmap_kenter_pa and let that routine do the hard work. |
5837 | */ | | 5869 | */ |
5838 | pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, | | 5870 | pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, |
5839 | PMAP_KMPAGE | PMAP_PTE); | | 5871 | PMAP_KMPAGE | PMAP_PTE); |
5840 | | | 5872 | |
5841 | if (pap) | | 5873 | if (pap) |
5842 | *pap = pa; | | 5874 | *pap = pa; |
5843 | | | 5875 | |
5844 | PMAPCOUNT(pt_mappings); | | 5876 | PMAPCOUNT(pt_mappings); |
5845 | | | 5877 | |
5846 | const pmap_t kpm __diagused = pmap_kernel(); | | 5878 | const pmap_t kpm __diagused = pmap_kernel(); |
5847 | struct l2_bucket * const l2b __diagused = pmap_get_l2_bucket(kpm, va); | | 5879 | struct l2_bucket * const l2b __diagused = pmap_get_l2_bucket(kpm, va); |
5848 | KASSERT(l2b != NULL); | | 5880 | KASSERT(l2b != NULL); |
5849 | | | 5881 | |
5850 | pt_entry_t * const ptep __diagused = &l2b->l2b_kva[l2pte_index(va)]; | | 5882 | pt_entry_t * const ptep __diagused = &l2b->l2b_kva[l2pte_index(va)]; |
5851 | const pt_entry_t pte __diagused = *ptep; | | 5883 | const pt_entry_t pte __diagused = *ptep; |
5852 | KASSERT(l2pte_valid_p(pte)); | | 5884 | KASSERT(l2pte_valid_p(pte)); |
5853 | KASSERT((pte & L2_S_CACHE_MASK) == pte_l2_s_cache_mode_pt); | | 5885 | KASSERT((pte & L2_S_CACHE_MASK) == pte_l2_s_cache_mode_pt); |
5854 | | | 5886 | |
5855 | memset((void *)va, 0, PAGE_SIZE); | | 5887 | memset((void *)va, 0, PAGE_SIZE); |
5856 | | | 5888 | |
5857 | return 0; | | 5889 | return 0; |
5858 | } | | 5890 | } |
5859 | | | 5891 | |
5860 | /* | | 5892 | /* |
5861 | * This is the same as pmap_alloc_l2_bucket(), except that it is only | | 5893 | * This is the same as pmap_alloc_l2_bucket(), except that it is only |
5862 | * used by pmap_growkernel(). | | 5894 | * used by pmap_growkernel(). |
5863 | */ | | 5895 | */ |
5864 | static inline struct l2_bucket * | | 5896 | static inline struct l2_bucket * |
5865 | pmap_grow_l2_bucket(pmap_t pm, vaddr_t va) | | 5897 | pmap_grow_l2_bucket(pmap_t pm, vaddr_t va) |
5866 | { | | 5898 | { |
5867 | const size_t l1slot = l1pte_index(va); | | 5899 | const size_t l1slot = l1pte_index(va); |
5868 | struct l2_dtable *l2; | | 5900 | struct l2_dtable *l2; |
5869 | vaddr_t nva; | | 5901 | vaddr_t nva; |
5870 | | | 5902 | |
5871 | CTASSERT((PAGE_SIZE % L2_TABLE_SIZE_REAL) == 0); | | 5903 | CTASSERT((PAGE_SIZE % L2_TABLE_SIZE_REAL) == 0); |
5872 | if ((l2 = pm->pm_l2[L2_IDX(l1slot)]) == NULL) { | | 5904 | if ((l2 = pm->pm_l2[L2_IDX(l1slot)]) == NULL) { |
5873 | /* | | 5905 | /* |
5874 | * No mapping at this address, as there is | | 5906 | * No mapping at this address, as there is |
5875 | * no entry in the L1 table. | | 5907 | * no entry in the L1 table. |
5876 | * Need to allocate a new l2_dtable. | | 5908 | * Need to allocate a new l2_dtable. |
5877 | */ | | 5909 | */ |
5878 | nva = pmap_kernel_l2dtable_kva; | | 5910 | nva = pmap_kernel_l2dtable_kva; |
5879 | if ((nva & PGOFSET) == 0) { | | 5911 | if ((nva & PGOFSET) == 0) { |
5880 | /* | | 5912 | /* |
5881 | * Need to allocate a backing page | | 5913 | * Need to allocate a backing page |
5882 | */ | | 5914 | */ |
5883 | if (pmap_grow_map(nva, NULL)) | | 5915 | if (pmap_grow_map(nva, NULL)) |
5884 | return NULL; | | 5916 | return NULL; |
5885 | } | | 5917 | } |
5886 | | | 5918 | |
5887 | l2 = (struct l2_dtable *)nva; | | 5919 | l2 = (struct l2_dtable *)nva; |
5888 | nva += sizeof(struct l2_dtable); | | 5920 | nva += sizeof(struct l2_dtable); |
5889 | | | 5921 | |
5890 | if ((nva & PGOFSET) < (pmap_kernel_l2dtable_kva & PGOFSET)) { | | 5922 | if ((nva & PGOFSET) < (pmap_kernel_l2dtable_kva & PGOFSET)) { |
5891 | /* | | 5923 | /* |
5892 | * The new l2_dtable straddles a page boundary. | | 5924 | * The new l2_dtable straddles a page boundary. |
5893 | * Map in another page to cover it. | | 5925 | * Map in another page to cover it. |
5894 | */ | | 5926 | */ |
5895 | if (pmap_grow_map(nva & ~PGOFSET, NULL)) | | 5927 | if (pmap_grow_map(nva & ~PGOFSET, NULL)) |
5896 | return NULL; | | 5928 | return NULL; |
5897 | } | | 5929 | } |
5898 | | | 5930 | |
5899 | pmap_kernel_l2dtable_kva = nva; | | 5931 | pmap_kernel_l2dtable_kva = nva; |
5900 | | | 5932 | |
5901 | /* | | 5933 | /* |
5902 | * Link it into the parent pmap | | 5934 | * Link it into the parent pmap |
5903 | */ | | 5935 | */ |
5904 | pm->pm_l2[L2_IDX(l1slot)] = l2; | | 5936 | pm->pm_l2[L2_IDX(l1slot)] = l2; |
5905 | } | | 5937 | } |
5906 | | | 5938 | |
5907 | struct l2_bucket * const l2b = &l2->l2_bucket[L2_BUCKET(l1slot)]; | | 5939 | struct l2_bucket * const l2b = &l2->l2_bucket[L2_BUCKET(l1slot)]; |
5908 | | | 5940 | |
5909 | /* | | 5941 | /* |
5910 | * Fetch pointer to the L2 page table associated with the address. | | 5942 | * Fetch pointer to the L2 page table associated with the address. |
5911 | */ | | 5943 | */ |
5912 | if (l2b->l2b_kva == NULL) { | | 5944 | if (l2b->l2b_kva == NULL) { |
5913 | pt_entry_t *ptep; | | 5945 | pt_entry_t *ptep; |
5914 | | | 5946 | |
5915 | /* | | 5947 | /* |
5916 | * No L2 page table has been allocated. Chances are, this | | 5948 | * No L2 page table has been allocated. Chances are, this |
5917 | * is because we just allocated the l2_dtable, above. | | 5949 | * is because we just allocated the l2_dtable, above. |
5918 | */ | | 5950 | */ |
5919 | nva = pmap_kernel_l2ptp_kva; | | 5951 | nva = pmap_kernel_l2ptp_kva; |
5920 | ptep = (pt_entry_t *)nva; | | 5952 | ptep = (pt_entry_t *)nva; |
5921 | if ((nva & PGOFSET) == 0) { | | 5953 | if ((nva & PGOFSET) == 0) { |
5922 | /* | | 5954 | /* |
5923 | * Need to allocate a backing page | | 5955 | * Need to allocate a backing page |
5924 | */ | | 5956 | */ |
5925 | if (pmap_grow_map(nva, &pmap_kernel_l2ptp_phys)) | | 5957 | if (pmap_grow_map(nva, &pmap_kernel_l2ptp_phys)) |
5926 | return NULL; | | 5958 | return NULL; |
5927 | PTE_SYNC_RANGE(ptep, PAGE_SIZE / sizeof(pt_entry_t)); | | 5959 | PTE_SYNC_RANGE(ptep, PAGE_SIZE / sizeof(pt_entry_t)); |
5928 | } | | 5960 | } |
5929 | | | 5961 | |
5930 | l2->l2_occupancy++; | | 5962 | l2->l2_occupancy++; |
5931 | l2b->l2b_kva = ptep; | | 5963 | l2b->l2b_kva = ptep; |
5932 | l2b->l2b_l1slot = l1slot; | | 5964 | l2b->l2b_l1slot = l1slot; |
5933 | l2b->l2b_pa = pmap_kernel_l2ptp_phys; | | 5965 | l2b->l2b_pa = pmap_kernel_l2ptp_phys; |
5934 | | | 5966 | |
5935 | pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL; | | 5967 | pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL; |
5936 | pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL; | | 5968 | pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL; |
5937 | } | | 5969 | } |
5938 | | | 5970 | |
5939 | return l2b; | | 5971 | return l2b; |
5940 | } | | 5972 | } |
5941 | | | 5973 | |
5942 | vaddr_t | | 5974 | vaddr_t |
5943 | pmap_growkernel(vaddr_t maxkvaddr) | | 5975 | pmap_growkernel(vaddr_t maxkvaddr) |
5944 | { | | 5976 | { |
5945 | UVMHIST_FUNC(__func__); | | 5977 | UVMHIST_FUNC(__func__); |
5946 | UVMHIST_CALLARGS(maphist, "growing kernel from %#jx to %#jx\n", | | 5978 | UVMHIST_CALLARGS(maphist, "growing kernel from %#jx to %#jx\n", |
5947 | pmap_curmaxkvaddr, maxkvaddr, 0, 0); | | 5979 | pmap_curmaxkvaddr, maxkvaddr, 0, 0); |
5948 | | | 5980 | |
5949 | pmap_t kpm = pmap_kernel(); | | 5981 | pmap_t kpm = pmap_kernel(); |
5950 | #ifndef ARM_MMU_EXTENDED | | 5982 | #ifndef ARM_MMU_EXTENDED |
5951 | struct l1_ttable *l1; | | 5983 | struct l1_ttable *l1; |
5952 | #endif | | 5984 | #endif |
5953 | int s; | | 5985 | int s; |
5954 | | | 5986 | |
5955 | if (maxkvaddr <= pmap_curmaxkvaddr) | | 5987 | if (maxkvaddr <= pmap_curmaxkvaddr) |
5956 | goto out; /* we are OK */ | | 5988 | goto out; /* we are OK */ |
5957 | | | 5989 | |
5958 | KDASSERT(maxkvaddr <= virtual_end); | | 5990 | KDASSERT(maxkvaddr <= virtual_end); |
5959 | | | 5991 | |
5960 | /* | | 5992 | /* |
5961 | * whoops! we need to add kernel PTPs | | 5993 | * whoops! we need to add kernel PTPs |
5962 | */ | | 5994 | */ |
5963 | | | 5995 | |
5964 | vaddr_t pmap_maxkvaddr = pmap_curmaxkvaddr; | | 5996 | vaddr_t pmap_maxkvaddr = pmap_curmaxkvaddr; |
5965 | | | 5997 | |
5966 | s = splvm(); /* to be safe */ | | 5998 | s = splvm(); /* to be safe */ |
5967 | mutex_enter(&kpm_lock); | | 5999 | mutex_enter(&kpm_lock); |
5968 | | | 6000 | |
5969 | /* Map 1MB at a time */ | | 6001 | /* Map 1MB at a time */ |
5970 | size_t l1slot = l1pte_index(pmap_maxkvaddr); | | 6002 | size_t l1slot = l1pte_index(pmap_maxkvaddr); |
5971 | #ifdef ARM_MMU_EXTENDED | | 6003 | #ifdef ARM_MMU_EXTENDED |
5972 | pd_entry_t * const spdep = &kpm->pm_l1[l1slot]; | | 6004 | pd_entry_t * const spdep = &kpm->pm_l1[l1slot]; |
5973 | pd_entry_t *pdep = spdep; | | 6005 | pd_entry_t *pdep = spdep; |
5974 | #endif | | 6006 | #endif |
5975 | for (;pmap_curmaxkvaddr < maxkvaddr; pmap_curmaxkvaddr += L1_S_SIZE, | | 6007 | for (;pmap_curmaxkvaddr < maxkvaddr; pmap_curmaxkvaddr += L1_S_SIZE, |
5976 | #ifdef ARM_MMU_EXTENDED | | 6008 | #ifdef ARM_MMU_EXTENDED |
5977 | pdep++, | | 6009 | pdep++, |
5978 | #endif | | 6010 | #endif |
5979 | l1slot++) { | | 6011 | l1slot++) { |
5980 | struct l2_bucket *l2b = | | 6012 | struct l2_bucket *l2b = |
5981 | pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr); | | 6013 | pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr); |
5982 | KASSERT(l2b != NULL); | | 6014 | KASSERT(l2b != NULL); |
5983 | | | 6015 | |
5984 | const pd_entry_t npde = L1_C_PROTO | l2b->l2b_pa | | 6016 | const pd_entry_t npde = L1_C_PROTO | l2b->l2b_pa |
5985 | | L1_C_DOM(PMAP_DOMAIN_KERNEL); | | 6017 | | L1_C_DOM(PMAP_DOMAIN_KERNEL); |
5986 | #ifdef ARM_MMU_EXTENDED | | 6018 | #ifdef ARM_MMU_EXTENDED |
5987 | KASSERT(*pdep == 0); | | 6019 | KASSERT(*pdep == 0); |
5988 | l1pte_setone(pdep, npde); | | 6020 | l1pte_setone(pdep, npde); |
5989 | #else | | 6021 | #else |
5990 | /* Distribute new L1 entry to all other L1s */ | | 6022 | /* Distribute new L1 entry to all other L1s */ |
5991 | SLIST_FOREACH(l1, &l1_list, l1_link) { | | 6023 | SLIST_FOREACH(l1, &l1_list, l1_link) { |
5992 | pd_entry_t * const pdep = &l1->l1_kva[l1slot]; | | 6024 | pd_entry_t * const pdep = &l1->l1_kva[l1slot]; |
5993 | l1pte_setone(pdep, npde); | | 6025 | l1pte_setone(pdep, npde); |
5994 | PDE_SYNC(pdep); | | 6026 | PDE_SYNC(pdep); |
5995 | } | | 6027 | } |
5996 | #endif | | 6028 | #endif |
5997 | } | | 6029 | } |
5998 | #ifdef ARM_MMU_EXTENDED | | 6030 | #ifdef ARM_MMU_EXTENDED |
5999 | PDE_SYNC_RANGE(spdep, pdep - spdep); | | 6031 | PDE_SYNC_RANGE(spdep, pdep - spdep); |
6000 | #endif | | 6032 | #endif |
6001 | | | 6033 | |
6002 | #ifdef PMAP_CACHE_VIVT | | 6034 | #ifdef PMAP_CACHE_VIVT |
6003 | /* | | 6035 | /* |
6004 | * flush out the cache, expensive but growkernel will happen so | | 6036 | * flush out the cache, expensive but growkernel will happen so |
6005 | * rarely | | 6037 | * rarely |
6006 | */ | | 6038 | */ |
6007 | cpu_dcache_wbinv_all(); | | 6039 | cpu_dcache_wbinv_all(); |
6008 | cpu_tlb_flushD(); | | 6040 | cpu_tlb_flushD(); |
6009 | cpu_cpwait(); | | 6041 | cpu_cpwait(); |
6010 | #endif | | 6042 | #endif |
6011 | | | 6043 | |
6012 | mutex_exit(&kpm_lock); | | 6044 | mutex_exit(&kpm_lock); |
6013 | splx(s); | | 6045 | splx(s); |
6014 | | | 6046 | |
6015 | kasan_shadow_map((void *)pmap_maxkvaddr, | | 6047 | kasan_shadow_map((void *)pmap_maxkvaddr, |
6016 | (size_t)(pmap_curmaxkvaddr - pmap_maxkvaddr)); | | 6048 | (size_t)(pmap_curmaxkvaddr - pmap_maxkvaddr)); |
6017 | | | 6049 | |
6018 | out: | | 6050 | out: |
6019 | return pmap_curmaxkvaddr; | | 6051 | return pmap_curmaxkvaddr; |
6020 | } | | 6052 | } |
6021 | | | 6053 | |
6022 | /************************ Utility routines ****************************/ | | 6054 | /************************ Utility routines ****************************/ |
6023 | | | 6055 | |
6024 | #ifndef ARM_HAS_VBAR | | 6056 | #ifndef ARM_HAS_VBAR |
6025 | /* | | 6057 | /* |
6026 | * vector_page_setprot: | | 6058 | * vector_page_setprot: |
6027 | * | | 6059 | * |
6028 | * Manipulate the protection of the vector page. | | 6060 | * Manipulate the protection of the vector page. |
6029 | */ | | 6061 | */ |
6030 | void | | 6062 | void |
6031 | vector_page_setprot(int prot) | | 6063 | vector_page_setprot(int prot) |
6032 | { | | 6064 | { |
6033 | struct l2_bucket *l2b; | | 6065 | struct l2_bucket *l2b; |
6034 | pt_entry_t *ptep; | | 6066 | pt_entry_t *ptep; |
6035 | | | 6067 | |
6036 | #if defined(CPU_ARMV7) || defined(CPU_ARM11) | | 6068 | #if defined(CPU_ARMV7) || defined(CPU_ARM11) |
6037 | /* | | 6069 | /* |
6038 | * If we are using VBAR to use the vectors in the kernel, then it's | | 6070 | * If we are using VBAR to use the vectors in the kernel, then it's |
6039 | * already mapped in the kernel text so no need to anything here. | | 6071 | * already mapped in the kernel text so no need to anything here. |
6040 | */ | | 6072 | */ |
6041 | if (vector_page != ARM_VECTORS_LOW && vector_page != ARM_VECTORS_HIGH) { | | 6073 | if (vector_page != ARM_VECTORS_LOW && vector_page != ARM_VECTORS_HIGH) { |
6042 | KASSERT((armreg_pfr1_read() & ARM_PFR1_SEC_MASK) != 0); | | 6074 | KASSERT((armreg_pfr1_read() & ARM_PFR1_SEC_MASK) != 0); |
6043 | return; | | 6075 | return; |
6044 | } | | 6076 | } |
6045 | #endif | | 6077 | #endif |
6046 | | | 6078 | |
6047 | l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page); | | 6079 | l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page); |
6048 | KASSERT(l2b != NULL); | | 6080 | KASSERT(l2b != NULL); |
6049 | | | 6081 | |
6050 | ptep = &l2b->l2b_kva[l2pte_index(vector_page)]; | | 6082 | ptep = &l2b->l2b_kva[l2pte_index(vector_page)]; |
6051 | | | 6083 | |
6052 | const pt_entry_t opte = *ptep; | | 6084 | const pt_entry_t opte = *ptep; |
6053 | #ifdef ARM_MMU_EXTENDED | | 6085 | #ifdef ARM_MMU_EXTENDED |
6054 | const pt_entry_t npte = (opte & ~(L2_S_PROT_MASK|L2_XS_XN)) | | 6086 | const pt_entry_t npte = (opte & ~(L2_S_PROT_MASK|L2_XS_XN)) |
6055 | | L2_S_PROT(PTE_KERNEL, prot); | | 6087 | | L2_S_PROT(PTE_KERNEL, prot); |
6056 | #else | | 6088 | #else |
6057 | const pt_entry_t npte = (opte & ~L2_S_PROT_MASK) | | 6089 | const pt_entry_t npte = (opte & ~L2_S_PROT_MASK) |
6058 | | L2_S_PROT(PTE_KERNEL, prot); | | 6090 | | L2_S_PROT(PTE_KERNEL, prot); |
6059 | #endif | | 6091 | #endif |
6060 | l2pte_set(ptep, npte, opte); | | 6092 | l2pte_set(ptep, npte, opte); |
6061 | PTE_SYNC(ptep); | | 6093 | PTE_SYNC(ptep); |
6062 | cpu_tlb_flushD_SE(vector_page); | | 6094 | cpu_tlb_flushD_SE(vector_page); |
6063 | cpu_cpwait(); | | 6095 | cpu_cpwait(); |
6064 | } | | 6096 | } |
6065 | #endif | | 6097 | #endif |
6066 | | | 6098 | |
6067 | /* | | 6099 | /* |
6068 | * Fetch pointers to the PDE/PTE for the given pmap/VA pair. | | 6100 | * Fetch pointers to the PDE/PTE for the given pmap/VA pair. |
6069 | * Returns true if the mapping exists, else false. | | 6101 | * Returns true if the mapping exists, else false. |
6070 | * | | 6102 | * |
6071 | * NOTE: This function is only used by a couple of arm-specific modules. | | 6103 | * NOTE: This function is only used by a couple of arm-specific modules. |
6072 | * It is not safe to take any pmap locks here, since we could be right | | 6104 | * It is not safe to take any pmap locks here, since we could be right |
6073 | * in the middle of debugging the pmap anyway... | | 6105 | * in the middle of debugging the pmap anyway... |
6074 | * | | 6106 | * |
6075 | * It is possible for this routine to return false even though a valid | | 6107 | * It is possible for this routine to return false even though a valid |
6076 | * mapping does exist. This is because we don't lock, so the metadata | | 6108 | * mapping does exist. This is because we don't lock, so the metadata |
6077 | * state may be inconsistent. | | 6109 | * state may be inconsistent. |
6078 | * | | 6110 | * |
6079 | * NOTE: We can return a NULL *ptp in the case where the L1 pde is | | 6111 | * NOTE: We can return a NULL *ptp in the case where the L1 pde is |
6080 | * a "section" mapping. | | 6112 | * a "section" mapping. |
6081 | */ | | 6113 | */ |
6082 | bool | | 6114 | bool |
6083 | pmap_get_pde_pte(pmap_t pm, vaddr_t va, pd_entry_t **pdp, pt_entry_t **ptp) | | 6115 | pmap_get_pde_pte(pmap_t pm, vaddr_t va, pd_entry_t **pdp, pt_entry_t **ptp) |
6084 | { | | 6116 | { |
6085 | struct l2_dtable *l2; | | 6117 | struct l2_dtable *l2; |
6086 | pd_entry_t *pdep, pde; | | 6118 | pd_entry_t *pdep, pde; |
6087 | pt_entry_t *ptep; | | 6119 | pt_entry_t *ptep; |
6088 | u_short l1slot; | | 6120 | u_short l1slot; |
6089 | | | 6121 | |
6090 | if (pm->pm_l1 == NULL) | | 6122 | if (pm->pm_l1 == NULL) |
6091 | return false; | | 6123 | return false; |
6092 | | | 6124 | |
6093 | l1slot = l1pte_index(va); | | 6125 | l1slot = l1pte_index(va); |
6094 | *pdp = pdep = pmap_l1_kva(pm) + l1slot; | | 6126 | *pdp = pdep = pmap_l1_kva(pm) + l1slot; |
6095 | pde = *pdep; | | 6127 | pde = *pdep; |
6096 | | | 6128 | |
6097 | if (l1pte_section_p(pde)) { | | 6129 | if (l1pte_section_p(pde)) { |
6098 | *ptp = NULL; | | 6130 | *ptp = NULL; |
6099 | return true; | | 6131 | return true; |
6100 | } | | 6132 | } |
6101 | | | 6133 | |
6102 | l2 = pm->pm_l2[L2_IDX(l1slot)]; | | 6134 | l2 = pm->pm_l2[L2_IDX(l1slot)]; |
6103 | if (l2 == NULL || | | 6135 | if (l2 == NULL || |
6104 | (ptep = l2->l2_bucket[L2_BUCKET(l1slot)].l2b_kva) == NULL) { | | 6136 | (ptep = l2->l2_bucket[L2_BUCKET(l1slot)].l2b_kva) == NULL) { |
6105 | return false; | | 6137 | return false; |
6106 | } | | 6138 | } |
6107 | | | 6139 | |
6108 | *ptp = &ptep[l2pte_index(va)]; | | 6140 | *ptp = &ptep[l2pte_index(va)]; |
6109 | return true; | | 6141 | return true; |
6110 | } | | 6142 | } |
6111 | | | 6143 | |
6112 | bool | | 6144 | bool |
6113 | pmap_get_pde(pmap_t pm, vaddr_t va, pd_entry_t **pdp) | | 6145 | pmap_get_pde(pmap_t pm, vaddr_t va, pd_entry_t **pdp) |
6114 | { | | 6146 | { |
6115 | | | 6147 | |
6116 | if (pm->pm_l1 == NULL) | | 6148 | if (pm->pm_l1 == NULL) |
6117 | return false; | | 6149 | return false; |
6118 | | | 6150 | |
6119 | *pdp = pmap_l1_kva(pm) + l1pte_index(va); | | 6151 | *pdp = pmap_l1_kva(pm) + l1pte_index(va); |
6120 | | | 6152 | |
6121 | return true; | | 6153 | return true; |
6122 | } | | 6154 | } |
6123 | | | 6155 | |
6124 | /************************ Bootstrapping routines ****************************/ | | 6156 | /************************ Bootstrapping routines ****************************/ |
6125 | | | 6157 | |
6126 | #ifndef ARM_MMU_EXTENDED | | 6158 | #ifndef ARM_MMU_EXTENDED |
6127 | static void | | 6159 | static void |
6128 | pmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt) | | 6160 | pmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt) |
6129 | { | | 6161 | { |
6130 | int i; | | 6162 | int i; |
6131 | | | 6163 | |
6132 | l1->l1_kva = l1pt; | | 6164 | l1->l1_kva = l1pt; |
6133 | l1->l1_domain_use_count = 0; | | 6165 | l1->l1_domain_use_count = 0; |
6134 | l1->l1_domain_first = 0; | | 6166 | l1->l1_domain_first = 0; |
6135 | | | 6167 | |
6136 | for (i = 0; i < PMAP_DOMAINS; i++) | | 6168 | for (i = 0; i < PMAP_DOMAINS; i++) |
6137 | l1->l1_domain_free[i] = i + 1; | | 6169 | l1->l1_domain_free[i] = i + 1; |
6138 | | | 6170 | |
6139 | /* | | 6171 | /* |
6140 | * Copy the kernel's L1 entries to each new L1. | | 6172 | * Copy the kernel's L1 entries to each new L1. |
6141 | */ | | 6173 | */ |
6142 | if (pmap_initialized) | | 6174 | if (pmap_initialized) |
6143 | memcpy(l1pt, pmap_l1_kva(pmap_kernel()), L1_TABLE_SIZE); | | 6175 | memcpy(l1pt, pmap_l1_kva(pmap_kernel()), L1_TABLE_SIZE); |
6144 | | | 6176 | |
6145 | if (pmap_extract(pmap_kernel(), (vaddr_t)l1pt, | | 6177 | if (pmap_extract(pmap_kernel(), (vaddr_t)l1pt, |
6146 | &l1->l1_physaddr) == false) | | 6178 | &l1->l1_physaddr) == false) |
6147 | panic("pmap_init_l1: can't get PA of L1 at %p", l1pt); | | 6179 | panic("pmap_init_l1: can't get PA of L1 at %p", l1pt); |
6148 | | | 6180 | |
6149 | SLIST_INSERT_HEAD(&l1_list, l1, l1_link); | | 6181 | SLIST_INSERT_HEAD(&l1_list, l1, l1_link); |
6150 | TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); | | 6182 | TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); |
6151 | } | | 6183 | } |
6152 | #endif /* !ARM_MMU_EXTENDED */ | | 6184 | #endif /* !ARM_MMU_EXTENDED */ |
6153 | | | 6185 | |
6154 | /* | | 6186 | /* |
6155 | * pmap_bootstrap() is called from the board-specific initarm() routine | | 6187 | * pmap_bootstrap() is called from the board-specific initarm() routine |
6156 | * once the kernel L1/L2 descriptors tables have been set up. | | 6188 | * once the kernel L1/L2 descriptors tables have been set up. |
6157 | * | | 6189 | * |
6158 | * This is a somewhat convoluted process since pmap bootstrap is, effectively, | | 6190 | * This is a somewhat convoluted process since pmap bootstrap is, effectively, |
6159 | * spread over a number of disparate files/functions. | | 6191 | * spread over a number of disparate files/functions. |
6160 | * | | 6192 | * |
6161 | * We are passed the following parameters | | 6193 | * We are passed the following parameters |
6162 | * - vstart | | 6194 | * - vstart |
6163 | * 1MB-aligned start of managed kernel virtual memory. | | 6195 | * 1MB-aligned start of managed kernel virtual memory. |
6164 | * - vend | | 6196 | * - vend |
6165 | * 1MB-aligned end of managed kernel virtual memory. | | 6197 | * 1MB-aligned end of managed kernel virtual memory. |
6166 | * | | 6198 | * |
6167 | * We use 'kernel_l1pt' to build the metadata (struct l1_ttable and | | 6199 | * We use 'kernel_l1pt' to build the metadata (struct l1_ttable and |
6168 | * struct l2_dtable) necessary to track kernel mappings. | | 6200 | * struct l2_dtable) necessary to track kernel mappings. |
6169 | */ | | 6201 | */ |
6170 | #define PMAP_STATIC_L2_SIZE 16 | | 6202 | #define PMAP_STATIC_L2_SIZE 16 |
6171 | void | | 6203 | void |
6172 | pmap_bootstrap(vaddr_t vstart, vaddr_t vend) | | 6204 | pmap_bootstrap(vaddr_t vstart, vaddr_t vend) |
6173 | { | | 6205 | { |
6174 | static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE]; | | 6206 | static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE]; |
6175 | #ifndef ARM_MMU_EXTENDED | | 6207 | #ifndef ARM_MMU_EXTENDED |
6176 | static struct l1_ttable static_l1; | | 6208 | static struct l1_ttable static_l1; |
6177 | struct l1_ttable *l1 = &static_l1; | | 6209 | struct l1_ttable *l1 = &static_l1; |
6178 | #endif | | 6210 | #endif |
6179 | struct l2_dtable *l2; | | 6211 | struct l2_dtable *l2; |
6180 | struct l2_bucket *l2b; | | 6212 | struct l2_bucket *l2b; |
6181 | pd_entry_t *l1pt = (pd_entry_t *) kernel_l1pt.pv_va; | | 6213 | pd_entry_t *l1pt = (pd_entry_t *) kernel_l1pt.pv_va; |
6182 | pmap_t pm = pmap_kernel(); | | 6214 | pmap_t pm = pmap_kernel(); |
6183 | pt_entry_t *ptep; | | 6215 | pt_entry_t *ptep; |
6184 | paddr_t pa; | | 6216 | paddr_t pa; |
6185 | vsize_t size; | | 6217 | vsize_t size; |
6186 | int nptes, l2idx, l2next = 0; | | 6218 | int nptes, l2idx, l2next = 0; |
6187 | | | 6219 | |
6188 | #ifdef ARM_MMU_EXTENDED | | 6220 | #ifdef ARM_MMU_EXTENDED |
6189 | KASSERT(pte_l1_s_cache_mode == pte_l1_s_cache_mode_pt); | | 6221 | KASSERT(pte_l1_s_cache_mode == pte_l1_s_cache_mode_pt); |
6190 | KASSERT(pte_l2_s_cache_mode == pte_l2_s_cache_mode_pt); | | 6222 | KASSERT(pte_l2_s_cache_mode == pte_l2_s_cache_mode_pt); |
6191 | #endif | | 6223 | #endif |
6192 | | | 6224 | |
6193 | VPRINTF("kpm "); | | 6225 | VPRINTF("kpm "); |
6194 | /* | | 6226 | /* |
6195 | * Initialise the kernel pmap object | | 6227 | * Initialise the kernel pmap object |
6196 | */ | | 6228 | */ |
6197 | curcpu()->ci_pmap_cur = pm; | | 6229 | curcpu()->ci_pmap_cur = pm; |
6198 | #ifdef ARM_MMU_EXTENDED | | 6230 | #ifdef ARM_MMU_EXTENDED |
6199 | pm->pm_l1 = l1pt; | | 6231 | pm->pm_l1 = l1pt; |
6200 | pm->pm_l1_pa = kernel_l1pt.pv_pa; | | 6232 | pm->pm_l1_pa = kernel_l1pt.pv_pa; |
6201 | VPRINTF("tlb0 "); | | 6233 | VPRINTF("tlb0 "); |
6202 | pmap_tlb_info_init(&pmap_tlb0_info); | | 6234 | pmap_tlb_info_init(&pmap_tlb0_info); |
6203 | #ifdef MULTIPROCESSOR | | 6235 | #ifdef MULTIPROCESSOR |
6204 | VPRINTF("kcpusets "); | | 6236 | VPRINTF("kcpusets "); |
6205 | pm->pm_onproc = kcpuset_running; | | 6237 | pm->pm_onproc = kcpuset_running; |
6206 | pm->pm_active = kcpuset_running; | | 6238 | pm->pm_active = kcpuset_running; |
6207 | #endif | | 6239 | #endif |
6208 | #else | | 6240 | #else |
6209 | pm->pm_l1 = l1; | | 6241 | pm->pm_l1 = l1; |
6210 | #endif | | 6242 | #endif |
6211 | | | 6243 | |
6212 | VPRINTF("locks "); | | 6244 | VPRINTF("locks "); |
6213 | /* | | 6245 | /* |