| @@ -1,1218 +1,1218 @@ | | | @@ -1,1218 +1,1218 @@ |
1 | /* $NetBSD: pmap.c,v 1.319 2015/04/11 15:21:33 skrll Exp $ */ | | 1 | /* $NetBSD: pmap.c,v 1.320 2015/04/13 16:19:42 matt Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright 2003 Wasabi Systems, Inc. | | 4 | * Copyright 2003 Wasabi Systems, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * Written by Steve C. Woodford for Wasabi Systems, Inc. | | 7 | * Written by Steve C. Woodford for Wasabi Systems, Inc. |
8 | * | | 8 | * |
9 | * Redistribution and use in source and binary forms, with or without | | 9 | * Redistribution and use in source and binary forms, with or without |
10 | * modification, are permitted provided that the following conditions | | 10 | * modification, are permitted provided that the following conditions |
11 | * are met: | | 11 | * are met: |
12 | * 1. Redistributions of source code must retain the above copyright | | 12 | * 1. Redistributions of source code must retain the above copyright |
13 | * notice, this list of conditions and the following disclaimer. | | 13 | * notice, this list of conditions and the following disclaimer. |
14 | * 2. Redistributions in binary form must reproduce the above copyright | | 14 | * 2. Redistributions in binary form must reproduce the above copyright |
15 | * notice, this list of conditions and the following disclaimer in the | | 15 | * notice, this list of conditions and the following disclaimer in the |
16 | * documentation and/or other materials provided with the distribution. | | 16 | * documentation and/or other materials provided with the distribution. |
17 | * 3. All advertising materials mentioning features or use of this software | | 17 | * 3. All advertising materials mentioning features or use of this software |
18 | * must display the following acknowledgement: | | 18 | * must display the following acknowledgement: |
19 | * This product includes software developed for the NetBSD Project by | | 19 | * This product includes software developed for the NetBSD Project by |
20 | * Wasabi Systems, Inc. | | 20 | * Wasabi Systems, Inc. |
21 | * 4. The name of Wasabi Systems, Inc. may not be used to endorse | | 21 | * 4. The name of Wasabi Systems, Inc. may not be used to endorse |
22 | * or promote products derived from this software without specific prior | | 22 | * or promote products derived from this software without specific prior |
23 | * written permission. | | 23 | * written permission. |
24 | * | | 24 | * |
25 | * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND | | 25 | * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND |
26 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 26 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
27 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 27 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
28 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC | | 28 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC |
29 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 29 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
30 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 30 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
31 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 31 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
32 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 32 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
33 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 33 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
34 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 34 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
35 | * POSSIBILITY OF SUCH DAMAGE. | | 35 | * POSSIBILITY OF SUCH DAMAGE. |
36 | */ | | 36 | */ |
37 | | | 37 | |
38 | /* | | 38 | /* |
39 | * Copyright (c) 2002-2003 Wasabi Systems, Inc. | | 39 | * Copyright (c) 2002-2003 Wasabi Systems, Inc. |
40 | * Copyright (c) 2001 Richard Earnshaw | | 40 | * Copyright (c) 2001 Richard Earnshaw |
41 | * Copyright (c) 2001-2002 Christopher Gilbert | | 41 | * Copyright (c) 2001-2002 Christopher Gilbert |
42 | * All rights reserved. | | 42 | * All rights reserved. |
43 | * | | 43 | * |
44 | * 1. Redistributions of source code must retain the above copyright | | 44 | * 1. Redistributions of source code must retain the above copyright |
45 | * notice, this list of conditions and the following disclaimer. | | 45 | * notice, this list of conditions and the following disclaimer. |
46 | * 2. Redistributions in binary form must reproduce the above copyright | | 46 | * 2. Redistributions in binary form must reproduce the above copyright |
47 | * notice, this list of conditions and the following disclaimer in the | | 47 | * notice, this list of conditions and the following disclaimer in the |
48 | * documentation and/or other materials provided with the distribution. | | 48 | * documentation and/or other materials provided with the distribution. |
49 | * 3. The name of the company nor the name of the author may be used to | | 49 | * 3. The name of the company nor the name of the author may be used to |
50 | * endorse or promote products derived from this software without specific | | 50 | * endorse or promote products derived from this software without specific |
51 | * prior written permission. | | 51 | * prior written permission. |
52 | * | | 52 | * |
53 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED | | 53 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
54 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | | 54 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF |
55 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | | 55 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
56 | * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, | | 56 | * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, |
57 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | | 57 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
58 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | | 58 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
59 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 59 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
60 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | | 60 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
61 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 61 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
62 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 62 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
63 | * SUCH DAMAGE. | | 63 | * SUCH DAMAGE. |
64 | */ | | 64 | */ |
65 | | | 65 | |
66 | /*- | | 66 | /*- |
67 | * Copyright (c) 1999 The NetBSD Foundation, Inc. | | 67 | * Copyright (c) 1999 The NetBSD Foundation, Inc. |
68 | * All rights reserved. | | 68 | * All rights reserved. |
69 | * | | 69 | * |
70 | * This code is derived from software contributed to The NetBSD Foundation | | 70 | * This code is derived from software contributed to The NetBSD Foundation |
71 | * by Charles M. Hannum. | | 71 | * by Charles M. Hannum. |
72 | * | | 72 | * |
73 | * Redistribution and use in source and binary forms, with or without | | 73 | * Redistribution and use in source and binary forms, with or without |
74 | * modification, are permitted provided that the following conditions | | 74 | * modification, are permitted provided that the following conditions |
75 | * are met: | | 75 | * are met: |
76 | * 1. Redistributions of source code must retain the above copyright | | 76 | * 1. Redistributions of source code must retain the above copyright |
77 | * notice, this list of conditions and the following disclaimer. | | 77 | * notice, this list of conditions and the following disclaimer. |
78 | * 2. Redistributions in binary form must reproduce the above copyright | | 78 | * 2. Redistributions in binary form must reproduce the above copyright |
79 | * notice, this list of conditions and the following disclaimer in the | | 79 | * notice, this list of conditions and the following disclaimer in the |
80 | * documentation and/or other materials provided with the distribution. | | 80 | * documentation and/or other materials provided with the distribution. |
81 | * | | 81 | * |
82 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 82 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
83 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 83 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
84 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 84 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
85 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 85 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
86 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 86 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
87 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 87 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
88 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 88 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
89 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 89 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
90 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 90 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
91 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 91 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
92 | * POSSIBILITY OF SUCH DAMAGE. | | 92 | * POSSIBILITY OF SUCH DAMAGE. |
93 | */ | | 93 | */ |
94 | | | 94 | |
95 | /* | | 95 | /* |
96 | * Copyright (c) 1994-1998 Mark Brinicombe. | | 96 | * Copyright (c) 1994-1998 Mark Brinicombe. |
97 | * Copyright (c) 1994 Brini. | | 97 | * Copyright (c) 1994 Brini. |
98 | * All rights reserved. | | 98 | * All rights reserved. |
99 | * | | 99 | * |
100 | * This code is derived from software written for Brini by Mark Brinicombe | | 100 | * This code is derived from software written for Brini by Mark Brinicombe |
101 | * | | 101 | * |
102 | * Redistribution and use in source and binary forms, with or without | | 102 | * Redistribution and use in source and binary forms, with or without |
103 | * modification, are permitted provided that the following conditions | | 103 | * modification, are permitted provided that the following conditions |
104 | * are met: | | 104 | * are met: |
105 | * 1. Redistributions of source code must retain the above copyright | | 105 | * 1. Redistributions of source code must retain the above copyright |
106 | * notice, this list of conditions and the following disclaimer. | | 106 | * notice, this list of conditions and the following disclaimer. |
107 | * 2. Redistributions in binary form must reproduce the above copyright | | 107 | * 2. Redistributions in binary form must reproduce the above copyright |
108 | * notice, this list of conditions and the following disclaimer in the | | 108 | * notice, this list of conditions and the following disclaimer in the |
109 | * documentation and/or other materials provided with the distribution. | | 109 | * documentation and/or other materials provided with the distribution. |
110 | * 3. All advertising materials mentioning features or use of this software | | 110 | * 3. All advertising materials mentioning features or use of this software |
111 | * must display the following acknowledgement: | | 111 | * must display the following acknowledgement: |
112 | * This product includes software developed by Mark Brinicombe. | | 112 | * This product includes software developed by Mark Brinicombe. |
113 | * 4. The name of the author may not be used to endorse or promote products | | 113 | * 4. The name of the author may not be used to endorse or promote products |
114 | * derived from this software without specific prior written permission. | | 114 | * derived from this software without specific prior written permission. |
115 | * | | 115 | * |
116 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | | 116 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
117 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | | 117 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
118 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | | 118 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
119 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | | 119 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
120 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | | 120 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
121 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | | 121 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
122 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | | 122 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
123 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | | 123 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
124 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | | 124 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
125 | * | | 125 | * |
126 | * RiscBSD kernel project | | 126 | * RiscBSD kernel project |
127 | * | | 127 | * |
128 | * pmap.c | | 128 | * pmap.c |
129 | * | | 129 | * |
130 | * Machine dependent vm stuff | | 130 | * Machine dependent vm stuff |
131 | * | | 131 | * |
132 | * Created : 20/09/94 | | 132 | * Created : 20/09/94 |
133 | */ | | 133 | */ |
134 | | | 134 | |
135 | /* | | 135 | /* |
136 | * armv6 and VIPT cache support by 3am Software Foundry, | | 136 | * armv6 and VIPT cache support by 3am Software Foundry, |
137 | * Copyright (c) 2007 Microsoft | | 137 | * Copyright (c) 2007 Microsoft |
138 | */ | | 138 | */ |
139 | | | 139 | |
140 | /* | | 140 | /* |
141 | * Performance improvements, UVM changes, overhauls and part-rewrites | | 141 | * Performance improvements, UVM changes, overhauls and part-rewrites |
142 | * were contributed by Neil A. Carson <neil@causality.com>. | | 142 | * were contributed by Neil A. Carson <neil@causality.com>. |
143 | */ | | 143 | */ |
144 | | | 144 | |
145 | /* | | 145 | /* |
146 | * Overhauled again to speedup the pmap, use MMU Domains so that L1 tables | | 146 | * Overhauled again to speedup the pmap, use MMU Domains so that L1 tables |
147 | * can be shared, and re-work the KVM layout, by Steve Woodford of Wasabi | | 147 | * can be shared, and re-work the KVM layout, by Steve Woodford of Wasabi |
148 | * Systems, Inc. | | 148 | * Systems, Inc. |
149 | * | | 149 | * |
150 | * There are still a few things outstanding at this time: | | 150 | * There are still a few things outstanding at this time: |
151 | * | | 151 | * |
152 | * - There are some unresolved issues for MP systems: | | 152 | * - There are some unresolved issues for MP systems: |
153 | * | | 153 | * |
154 | * o The L1 metadata needs a lock, or more specifically, some places | | 154 | * o The L1 metadata needs a lock, or more specifically, some places |
155 | * need to acquire an exclusive lock when modifying L1 translation | | 155 | * need to acquire an exclusive lock when modifying L1 translation |
156 | * table entries. | | 156 | * table entries. |
157 | * | | 157 | * |
158 | * o When one cpu modifies an L1 entry, and that L1 table is also | | 158 | * o When one cpu modifies an L1 entry, and that L1 table is also |
159 | * being used by another cpu, then the latter will need to be told | | 159 | * being used by another cpu, then the latter will need to be told |
160 | * that a tlb invalidation may be necessary. (But only if the old | | 160 | * that a tlb invalidation may be necessary. (But only if the old |
161 | * domain number in the L1 entry being over-written is currently | | 161 | * domain number in the L1 entry being over-written is currently |
162 | * the active domain on that cpu). I guess there are lots more tlb | | 162 | * the active domain on that cpu). I guess there are lots more tlb |
163 | * shootdown issues too... | | 163 | * shootdown issues too... |
164 | * | | 164 | * |
165 | * o If the vector_page is at 0x00000000 instead of in kernel VA space, | | 165 | * o If the vector_page is at 0x00000000 instead of in kernel VA space, |
166 | * then MP systems will lose big-time because of the MMU domain hack. | | 166 | * then MP systems will lose big-time because of the MMU domain hack. |
167 | * The only way this can be solved (apart from moving the vector | | 167 | * The only way this can be solved (apart from moving the vector |
168 | * page to 0xffff0000) is to reserve the first 1MB of user address | | 168 | * page to 0xffff0000) is to reserve the first 1MB of user address |
169 | * space for kernel use only. This would require re-linking all | | 169 | * space for kernel use only. This would require re-linking all |
170 | * applications so that the text section starts above this 1MB | | 170 | * applications so that the text section starts above this 1MB |
171 | * boundary. | | 171 | * boundary. |
172 | * | | 172 | * |
173 | * o Tracking which VM space is resident in the cache/tlb has not yet | | 173 | * o Tracking which VM space is resident in the cache/tlb has not yet |
174 | * been implemented for MP systems. | | 174 | * been implemented for MP systems. |
175 | * | | 175 | * |
176 | * o Finally, there is a pathological condition where two cpus running | | 176 | * o Finally, there is a pathological condition where two cpus running |
177 | * two separate processes (not lwps) which happen to share an L1 | | 177 | * two separate processes (not lwps) which happen to share an L1 |
178 | * can get into a fight over one or more L1 entries. This will result | | 178 | * can get into a fight over one or more L1 entries. This will result |
179 | * in a significant slow-down if both processes are in tight loops. | | 179 | * in a significant slow-down if both processes are in tight loops. |
180 | */ | | 180 | */ |
181 | | | 181 | |
182 | /* | | 182 | /* |
183 | * Special compilation symbols | | 183 | * Special compilation symbols |
184 | * PMAP_DEBUG - Build in pmap_debug_level code | | 184 | * PMAP_DEBUG - Build in pmap_debug_level code |
185 | */ | | 185 | */ |
186 | | | 186 | |
187 | /* Include header files */ | | 187 | /* Include header files */ |
188 | | | 188 | |
189 | #include "opt_arm_debug.h" | | 189 | #include "opt_arm_debug.h" |
190 | #include "opt_cpuoptions.h" | | 190 | #include "opt_cpuoptions.h" |
191 | #include "opt_pmap_debug.h" | | 191 | #include "opt_pmap_debug.h" |
192 | #include "opt_ddb.h" | | 192 | #include "opt_ddb.h" |
193 | #include "opt_lockdebug.h" | | 193 | #include "opt_lockdebug.h" |
194 | #include "opt_multiprocessor.h" | | 194 | #include "opt_multiprocessor.h" |
195 | | | 195 | |
196 | #ifdef MULTIPROCESSOR | | 196 | #ifdef MULTIPROCESSOR |
197 | #define _INTR_PRIVATE | | 197 | #define _INTR_PRIVATE |
198 | #endif | | 198 | #endif |
199 | | | 199 | |
200 | #include <sys/param.h> | | 200 | #include <sys/param.h> |
201 | #include <sys/types.h> | | 201 | #include <sys/types.h> |
202 | #include <sys/kernel.h> | | 202 | #include <sys/kernel.h> |
203 | #include <sys/systm.h> | | 203 | #include <sys/systm.h> |
204 | #include <sys/proc.h> | | 204 | #include <sys/proc.h> |
205 | #include <sys/intr.h> | | 205 | #include <sys/intr.h> |
206 | #include <sys/pool.h> | | 206 | #include <sys/pool.h> |
207 | #include <sys/kmem.h> | | 207 | #include <sys/kmem.h> |
208 | #include <sys/cdefs.h> | | 208 | #include <sys/cdefs.h> |
209 | #include <sys/cpu.h> | | 209 | #include <sys/cpu.h> |
210 | #include <sys/sysctl.h> | | 210 | #include <sys/sysctl.h> |
211 | #include <sys/bus.h> | | 211 | #include <sys/bus.h> |
212 | #include <sys/atomic.h> | | 212 | #include <sys/atomic.h> |
213 | #include <sys/kernhist.h> | | 213 | #include <sys/kernhist.h> |
214 | | | 214 | |
215 | #include <uvm/uvm.h> | | 215 | #include <uvm/uvm.h> |
216 | | | 216 | |
217 | #include <arm/locore.h> | | 217 | #include <arm/locore.h> |
218 | | | 218 | |
219 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.319 2015/04/11 15:21:33 skrll Exp $"); | | 219 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.320 2015/04/13 16:19:42 matt Exp $"); |
220 | | | 220 | |
221 | //#define PMAP_DEBUG | | 221 | //#define PMAP_DEBUG |
222 | #ifdef PMAP_DEBUG | | 222 | #ifdef PMAP_DEBUG |
223 | | | 223 | |
224 | /* XXX need to get rid of all refs to this */ | | 224 | /* XXX need to get rid of all refs to this */ |
225 | int pmap_debug_level = 0; | | 225 | int pmap_debug_level = 0; |
226 | | | 226 | |
227 | /* | | 227 | /* |
228 | * for switching to potentially finer grained debugging | | 228 | * for switching to potentially finer grained debugging |
229 | */ | | 229 | */ |
230 | #define PDB_FOLLOW 0x0001 | | 230 | #define PDB_FOLLOW 0x0001 |
231 | #define PDB_INIT 0x0002 | | 231 | #define PDB_INIT 0x0002 |
232 | #define PDB_ENTER 0x0004 | | 232 | #define PDB_ENTER 0x0004 |
233 | #define PDB_REMOVE 0x0008 | | 233 | #define PDB_REMOVE 0x0008 |
234 | #define PDB_CREATE 0x0010 | | 234 | #define PDB_CREATE 0x0010 |
235 | #define PDB_PTPAGE 0x0020 | | 235 | #define PDB_PTPAGE 0x0020 |
236 | #define PDB_GROWKERN 0x0040 | | 236 | #define PDB_GROWKERN 0x0040 |
237 | #define PDB_BITS 0x0080 | | 237 | #define PDB_BITS 0x0080 |
238 | #define PDB_COLLECT 0x0100 | | 238 | #define PDB_COLLECT 0x0100 |
239 | #define PDB_PROTECT 0x0200 | | 239 | #define PDB_PROTECT 0x0200 |
240 | #define PDB_MAP_L1 0x0400 | | 240 | #define PDB_MAP_L1 0x0400 |
241 | #define PDB_BOOTSTRAP 0x1000 | | 241 | #define PDB_BOOTSTRAP 0x1000 |
242 | #define PDB_PARANOIA 0x2000 | | 242 | #define PDB_PARANOIA 0x2000 |
243 | #define PDB_WIRING 0x4000 | | 243 | #define PDB_WIRING 0x4000 |
244 | #define PDB_PVDUMP 0x8000 | | 244 | #define PDB_PVDUMP 0x8000 |
245 | #define PDB_VAC 0x10000 | | 245 | #define PDB_VAC 0x10000 |
246 | #define PDB_KENTER 0x20000 | | 246 | #define PDB_KENTER 0x20000 |
247 | #define PDB_KREMOVE 0x40000 | | 247 | #define PDB_KREMOVE 0x40000 |
248 | #define PDB_EXEC 0x80000 | | 248 | #define PDB_EXEC 0x80000 |
249 | | | 249 | |
250 | int debugmap = 1; | | 250 | int debugmap = 1; |
251 | int pmapdebug = 0; | | 251 | int pmapdebug = 0; |
252 | #define NPDEBUG(_lev_,_stat_) \ | | 252 | #define NPDEBUG(_lev_,_stat_) \ |
253 | if (pmapdebug & (_lev_)) \ | | 253 | if (pmapdebug & (_lev_)) \ |
254 | ((_stat_)) | | 254 | ((_stat_)) |
255 | | | 255 | |
256 | #else /* PMAP_DEBUG */ | | 256 | #else /* PMAP_DEBUG */ |
257 | #define NPDEBUG(_lev_,_stat_) /* Nothing */ | | 257 | #define NPDEBUG(_lev_,_stat_) /* Nothing */ |
258 | #endif /* PMAP_DEBUG */ | | 258 | #endif /* PMAP_DEBUG */ |
259 | | | 259 | |
260 | /* | | 260 | /* |
261 | * pmap_kernel() points here | | 261 | * pmap_kernel() points here |
262 | */ | | 262 | */ |
263 | static struct pmap kernel_pmap_store = { | | 263 | static struct pmap kernel_pmap_store = { |
264 | #ifndef ARM_MMU_EXTENDED | | 264 | #ifndef ARM_MMU_EXTENDED |
265 | .pm_activated = true, | | 265 | .pm_activated = true, |
266 | .pm_domain = PMAP_DOMAIN_KERNEL, | | 266 | .pm_domain = PMAP_DOMAIN_KERNEL, |
267 | .pm_cstate.cs_all = PMAP_CACHE_STATE_ALL, | | 267 | .pm_cstate.cs_all = PMAP_CACHE_STATE_ALL, |
268 | #endif | | 268 | #endif |
269 | }; | | 269 | }; |
270 | struct pmap * const kernel_pmap_ptr = &kernel_pmap_store; | | 270 | struct pmap * const kernel_pmap_ptr = &kernel_pmap_store; |
271 | #undef pmap_kernel | | 271 | #undef pmap_kernel |
272 | #define pmap_kernel() (&kernel_pmap_store) | | 272 | #define pmap_kernel() (&kernel_pmap_store) |
273 | #ifdef PMAP_NEED_ALLOC_POOLPAGE | | 273 | #ifdef PMAP_NEED_ALLOC_POOLPAGE |
274 | int arm_poolpage_vmfreelist = VM_FREELIST_DEFAULT; | | 274 | int arm_poolpage_vmfreelist = VM_FREELIST_DEFAULT; |
275 | #endif | | 275 | #endif |
276 | | | 276 | |
277 | /* | | 277 | /* |
278 | * Pool and cache that pmap structures are allocated from. | | 278 | * Pool and cache that pmap structures are allocated from. |
279 | * We use a cache to avoid clearing the pm_l2[] array (1KB) | | 279 | * We use a cache to avoid clearing the pm_l2[] array (1KB) |
280 | * in pmap_create(). | | 280 | * in pmap_create(). |
281 | */ | | 281 | */ |
282 | static struct pool_cache pmap_cache; | | 282 | static struct pool_cache pmap_cache; |
283 | static LIST_HEAD(, pmap) pmap_pmaps; | | 283 | static LIST_HEAD(, pmap) pmap_pmaps; |
284 | | | 284 | |
285 | /* | | 285 | /* |
286 | * Pool of PV structures | | 286 | * Pool of PV structures |
287 | */ | | 287 | */ |
288 | static struct pool pmap_pv_pool; | | 288 | static struct pool pmap_pv_pool; |
289 | static void *pmap_bootstrap_pv_page_alloc(struct pool *, int); | | 289 | static void *pmap_bootstrap_pv_page_alloc(struct pool *, int); |
290 | static void pmap_bootstrap_pv_page_free(struct pool *, void *); | | 290 | static void pmap_bootstrap_pv_page_free(struct pool *, void *); |
291 | static struct pool_allocator pmap_bootstrap_pv_allocator = { | | 291 | static struct pool_allocator pmap_bootstrap_pv_allocator = { |
292 | pmap_bootstrap_pv_page_alloc, pmap_bootstrap_pv_page_free | | 292 | pmap_bootstrap_pv_page_alloc, pmap_bootstrap_pv_page_free |
293 | }; | | 293 | }; |
294 | | | 294 | |
295 | /* | | 295 | /* |
296 | * Pool and cache of l2_dtable structures. | | 296 | * Pool and cache of l2_dtable structures. |
297 | * We use a cache to avoid clearing the structures when they're | | 297 | * We use a cache to avoid clearing the structures when they're |
298 | * allocated. (196 bytes) | | 298 | * allocated. (196 bytes) |
299 | */ | | 299 | */ |
300 | static struct pool_cache pmap_l2dtable_cache; | | 300 | static struct pool_cache pmap_l2dtable_cache; |
301 | static vaddr_t pmap_kernel_l2dtable_kva; | | 301 | static vaddr_t pmap_kernel_l2dtable_kva; |
302 | | | 302 | |
303 | /* | | 303 | /* |
304 | * Pool and cache of L2 page descriptors. | | 304 | * Pool and cache of L2 page descriptors. |
305 | * We use a cache to avoid clearing the descriptor table | | 305 | * We use a cache to avoid clearing the descriptor table |
306 | * when they're allocated. (1KB) | | 306 | * when they're allocated. (1KB) |
307 | */ | | 307 | */ |
308 | static struct pool_cache pmap_l2ptp_cache; | | 308 | static struct pool_cache pmap_l2ptp_cache; |
309 | static vaddr_t pmap_kernel_l2ptp_kva; | | 309 | static vaddr_t pmap_kernel_l2ptp_kva; |
310 | static paddr_t pmap_kernel_l2ptp_phys; | | 310 | static paddr_t pmap_kernel_l2ptp_phys; |
311 | | | 311 | |
312 | #ifdef PMAPCOUNTERS | | 312 | #ifdef PMAPCOUNTERS |
313 | #define PMAP_EVCNT_INITIALIZER(name) \ | | 313 | #define PMAP_EVCNT_INITIALIZER(name) \ |
314 | EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", name) | | 314 | EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", name) |
315 | | | 315 | |
316 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 316 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
317 | static struct evcnt pmap_ev_vac_clean_one = | | 317 | static struct evcnt pmap_ev_vac_clean_one = |
318 | PMAP_EVCNT_INITIALIZER("clean page (1 color)"); | | 318 | PMAP_EVCNT_INITIALIZER("clean page (1 color)"); |
319 | static struct evcnt pmap_ev_vac_flush_one = | | 319 | static struct evcnt pmap_ev_vac_flush_one = |
320 | PMAP_EVCNT_INITIALIZER("flush page (1 color)"); | | 320 | PMAP_EVCNT_INITIALIZER("flush page (1 color)"); |
321 | static struct evcnt pmap_ev_vac_flush_lots = | | 321 | static struct evcnt pmap_ev_vac_flush_lots = |
322 | PMAP_EVCNT_INITIALIZER("flush page (2+ colors)"); | | 322 | PMAP_EVCNT_INITIALIZER("flush page (2+ colors)"); |
323 | static struct evcnt pmap_ev_vac_flush_lots2 = | | 323 | static struct evcnt pmap_ev_vac_flush_lots2 = |
324 | PMAP_EVCNT_INITIALIZER("flush page (2+ colors, kmpage)"); | | 324 | PMAP_EVCNT_INITIALIZER("flush page (2+ colors, kmpage)"); |
325 | EVCNT_ATTACH_STATIC(pmap_ev_vac_clean_one); | | 325 | EVCNT_ATTACH_STATIC(pmap_ev_vac_clean_one); |
326 | EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_one); | | 326 | EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_one); |
327 | EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots); | | 327 | EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots); |
328 | EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots2); | | 328 | EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots2); |
329 | | | 329 | |
330 | static struct evcnt pmap_ev_vac_color_new = | | 330 | static struct evcnt pmap_ev_vac_color_new = |
331 | PMAP_EVCNT_INITIALIZER("new page color"); | | 331 | PMAP_EVCNT_INITIALIZER("new page color"); |
332 | static struct evcnt pmap_ev_vac_color_reuse = | | 332 | static struct evcnt pmap_ev_vac_color_reuse = |
333 | PMAP_EVCNT_INITIALIZER("ok first page color"); | | 333 | PMAP_EVCNT_INITIALIZER("ok first page color"); |
334 | static struct evcnt pmap_ev_vac_color_ok = | | 334 | static struct evcnt pmap_ev_vac_color_ok = |
335 | PMAP_EVCNT_INITIALIZER("ok page color"); | | 335 | PMAP_EVCNT_INITIALIZER("ok page color"); |
336 | static struct evcnt pmap_ev_vac_color_blind = | | 336 | static struct evcnt pmap_ev_vac_color_blind = |
337 | PMAP_EVCNT_INITIALIZER("blind page color"); | | 337 | PMAP_EVCNT_INITIALIZER("blind page color"); |
338 | static struct evcnt pmap_ev_vac_color_change = | | 338 | static struct evcnt pmap_ev_vac_color_change = |
339 | PMAP_EVCNT_INITIALIZER("change page color"); | | 339 | PMAP_EVCNT_INITIALIZER("change page color"); |
340 | static struct evcnt pmap_ev_vac_color_erase = | | 340 | static struct evcnt pmap_ev_vac_color_erase = |
341 | PMAP_EVCNT_INITIALIZER("erase page color"); | | 341 | PMAP_EVCNT_INITIALIZER("erase page color"); |
342 | static struct evcnt pmap_ev_vac_color_none = | | 342 | static struct evcnt pmap_ev_vac_color_none = |
343 | PMAP_EVCNT_INITIALIZER("no page color"); | | 343 | PMAP_EVCNT_INITIALIZER("no page color"); |
344 | static struct evcnt pmap_ev_vac_color_restore = | | 344 | static struct evcnt pmap_ev_vac_color_restore = |
345 | PMAP_EVCNT_INITIALIZER("restore page color"); | | 345 | PMAP_EVCNT_INITIALIZER("restore page color"); |
346 | | | 346 | |
347 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_new); | | 347 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_new); |
348 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_reuse); | | 348 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_reuse); |
349 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_ok); | | 349 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_ok); |
350 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_blind); | | 350 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_blind); |
351 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_change); | | 351 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_change); |
352 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_erase); | | 352 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_erase); |
353 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_none); | | 353 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_none); |
354 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_restore); | | 354 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_restore); |
355 | #endif | | 355 | #endif |
356 | | | 356 | |
357 | static struct evcnt pmap_ev_mappings = | | 357 | static struct evcnt pmap_ev_mappings = |
358 | PMAP_EVCNT_INITIALIZER("pages mapped"); | | 358 | PMAP_EVCNT_INITIALIZER("pages mapped"); |
359 | static struct evcnt pmap_ev_unmappings = | | 359 | static struct evcnt pmap_ev_unmappings = |
360 | PMAP_EVCNT_INITIALIZER("pages unmapped"); | | 360 | PMAP_EVCNT_INITIALIZER("pages unmapped"); |
361 | static struct evcnt pmap_ev_remappings = | | 361 | static struct evcnt pmap_ev_remappings = |
362 | PMAP_EVCNT_INITIALIZER("pages remapped"); | | 362 | PMAP_EVCNT_INITIALIZER("pages remapped"); |
363 | | | 363 | |
364 | EVCNT_ATTACH_STATIC(pmap_ev_mappings); | | 364 | EVCNT_ATTACH_STATIC(pmap_ev_mappings); |
365 | EVCNT_ATTACH_STATIC(pmap_ev_unmappings); | | 365 | EVCNT_ATTACH_STATIC(pmap_ev_unmappings); |
366 | EVCNT_ATTACH_STATIC(pmap_ev_remappings); | | 366 | EVCNT_ATTACH_STATIC(pmap_ev_remappings); |
367 | | | 367 | |
368 | static struct evcnt pmap_ev_kernel_mappings = | | 368 | static struct evcnt pmap_ev_kernel_mappings = |
369 | PMAP_EVCNT_INITIALIZER("kernel pages mapped"); | | 369 | PMAP_EVCNT_INITIALIZER("kernel pages mapped"); |
370 | static struct evcnt pmap_ev_kernel_unmappings = | | 370 | static struct evcnt pmap_ev_kernel_unmappings = |
371 | PMAP_EVCNT_INITIALIZER("kernel pages unmapped"); | | 371 | PMAP_EVCNT_INITIALIZER("kernel pages unmapped"); |
372 | static struct evcnt pmap_ev_kernel_remappings = | | 372 | static struct evcnt pmap_ev_kernel_remappings = |
373 | PMAP_EVCNT_INITIALIZER("kernel pages remapped"); | | 373 | PMAP_EVCNT_INITIALIZER("kernel pages remapped"); |
374 | | | 374 | |
375 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_mappings); | | 375 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_mappings); |
376 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_unmappings); | | 376 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_unmappings); |
377 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_remappings); | | 377 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_remappings); |
378 | | | 378 | |
379 | static struct evcnt pmap_ev_kenter_mappings = | | 379 | static struct evcnt pmap_ev_kenter_mappings = |
380 | PMAP_EVCNT_INITIALIZER("kenter pages mapped"); | | 380 | PMAP_EVCNT_INITIALIZER("kenter pages mapped"); |
381 | static struct evcnt pmap_ev_kenter_unmappings = | | 381 | static struct evcnt pmap_ev_kenter_unmappings = |
382 | PMAP_EVCNT_INITIALIZER("kenter pages unmapped"); | | 382 | PMAP_EVCNT_INITIALIZER("kenter pages unmapped"); |
383 | static struct evcnt pmap_ev_kenter_remappings = | | 383 | static struct evcnt pmap_ev_kenter_remappings = |
384 | PMAP_EVCNT_INITIALIZER("kenter pages remapped"); | | 384 | PMAP_EVCNT_INITIALIZER("kenter pages remapped"); |
385 | static struct evcnt pmap_ev_pt_mappings = | | 385 | static struct evcnt pmap_ev_pt_mappings = |
386 | PMAP_EVCNT_INITIALIZER("page table pages mapped"); | | 386 | PMAP_EVCNT_INITIALIZER("page table pages mapped"); |
387 | | | 387 | |
388 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_mappings); | | 388 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_mappings); |
389 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_unmappings); | | 389 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_unmappings); |
390 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_remappings); | | 390 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_remappings); |
391 | EVCNT_ATTACH_STATIC(pmap_ev_pt_mappings); | | 391 | EVCNT_ATTACH_STATIC(pmap_ev_pt_mappings); |
392 | | | 392 | |
393 | static struct evcnt pmap_ev_fixup_mod = | | 393 | static struct evcnt pmap_ev_fixup_mod = |
394 | PMAP_EVCNT_INITIALIZER("page modification emulations"); | | 394 | PMAP_EVCNT_INITIALIZER("page modification emulations"); |
395 | static struct evcnt pmap_ev_fixup_ref = | | 395 | static struct evcnt pmap_ev_fixup_ref = |
396 | PMAP_EVCNT_INITIALIZER("page reference emulations"); | | 396 | PMAP_EVCNT_INITIALIZER("page reference emulations"); |
397 | static struct evcnt pmap_ev_fixup_exec = | | 397 | static struct evcnt pmap_ev_fixup_exec = |
398 | PMAP_EVCNT_INITIALIZER("exec pages fixed up"); | | 398 | PMAP_EVCNT_INITIALIZER("exec pages fixed up"); |
399 | static struct evcnt pmap_ev_fixup_pdes = | | 399 | static struct evcnt pmap_ev_fixup_pdes = |
400 | PMAP_EVCNT_INITIALIZER("pdes fixed up"); | | 400 | PMAP_EVCNT_INITIALIZER("pdes fixed up"); |
401 | #ifndef ARM_MMU_EXTENDED | | 401 | #ifndef ARM_MMU_EXTENDED |
402 | static struct evcnt pmap_ev_fixup_ptesync = | | 402 | static struct evcnt pmap_ev_fixup_ptesync = |
403 | PMAP_EVCNT_INITIALIZER("ptesync fixed"); | | 403 | PMAP_EVCNT_INITIALIZER("ptesync fixed"); |
404 | #endif | | 404 | #endif |
405 | | | 405 | |
406 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_mod); | | 406 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_mod); |
407 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_ref); | | 407 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_ref); |
408 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_exec); | | 408 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_exec); |
409 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_pdes); | | 409 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_pdes); |
410 | #ifndef ARM_MMU_EXTENDED | | 410 | #ifndef ARM_MMU_EXTENDED |
411 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_ptesync); | | 411 | EVCNT_ATTACH_STATIC(pmap_ev_fixup_ptesync); |
412 | #endif | | 412 | #endif |
413 | | | 413 | |
414 | #ifdef PMAP_CACHE_VIPT | | 414 | #ifdef PMAP_CACHE_VIPT |
415 | static struct evcnt pmap_ev_exec_mappings = | | 415 | static struct evcnt pmap_ev_exec_mappings = |
416 | PMAP_EVCNT_INITIALIZER("exec pages mapped"); | | 416 | PMAP_EVCNT_INITIALIZER("exec pages mapped"); |
417 | static struct evcnt pmap_ev_exec_cached = | | 417 | static struct evcnt pmap_ev_exec_cached = |
418 | PMAP_EVCNT_INITIALIZER("exec pages cached"); | | 418 | PMAP_EVCNT_INITIALIZER("exec pages cached"); |
419 | | | 419 | |
420 | EVCNT_ATTACH_STATIC(pmap_ev_exec_mappings); | | 420 | EVCNT_ATTACH_STATIC(pmap_ev_exec_mappings); |
421 | EVCNT_ATTACH_STATIC(pmap_ev_exec_cached); | | 421 | EVCNT_ATTACH_STATIC(pmap_ev_exec_cached); |
422 | | | 422 | |
423 | static struct evcnt pmap_ev_exec_synced = | | 423 | static struct evcnt pmap_ev_exec_synced = |
424 | PMAP_EVCNT_INITIALIZER("exec pages synced"); | | 424 | PMAP_EVCNT_INITIALIZER("exec pages synced"); |
425 | static struct evcnt pmap_ev_exec_synced_map = | | 425 | static struct evcnt pmap_ev_exec_synced_map = |
426 | PMAP_EVCNT_INITIALIZER("exec pages synced (MP)"); | | 426 | PMAP_EVCNT_INITIALIZER("exec pages synced (MP)"); |
427 | #ifndef ARM_MMU_EXTENDED | | 427 | #ifndef ARM_MMU_EXTENDED |
428 | static struct evcnt pmap_ev_exec_synced_unmap = | | 428 | static struct evcnt pmap_ev_exec_synced_unmap = |
429 | PMAP_EVCNT_INITIALIZER("exec pages synced (UM)"); | | 429 | PMAP_EVCNT_INITIALIZER("exec pages synced (UM)"); |
430 | static struct evcnt pmap_ev_exec_synced_remap = | | 430 | static struct evcnt pmap_ev_exec_synced_remap = |
431 | PMAP_EVCNT_INITIALIZER("exec pages synced (RM)"); | | 431 | PMAP_EVCNT_INITIALIZER("exec pages synced (RM)"); |
432 | static struct evcnt pmap_ev_exec_synced_clearbit = | | 432 | static struct evcnt pmap_ev_exec_synced_clearbit = |
433 | PMAP_EVCNT_INITIALIZER("exec pages synced (DG)"); | | 433 | PMAP_EVCNT_INITIALIZER("exec pages synced (DG)"); |
434 | static struct evcnt pmap_ev_exec_synced_kremove = | | 434 | static struct evcnt pmap_ev_exec_synced_kremove = |
435 | PMAP_EVCNT_INITIALIZER("exec pages synced (KU)"); | | 435 | PMAP_EVCNT_INITIALIZER("exec pages synced (KU)"); |
436 | #endif | | 436 | #endif |
437 | | | 437 | |
438 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced); | | 438 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced); |
439 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_map); | | 439 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_map); |
440 | #ifndef ARM_MMU_EXTENDED | | 440 | #ifndef ARM_MMU_EXTENDED |
441 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_unmap); | | 441 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_unmap); |
442 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_remap); | | 442 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_remap); |
443 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_clearbit); | | 443 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_clearbit); |
444 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_kremove); | | 444 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_kremove); |
445 | #endif | | 445 | #endif |
446 | | | 446 | |
447 | static struct evcnt pmap_ev_exec_discarded_unmap = | | 447 | static struct evcnt pmap_ev_exec_discarded_unmap = |
448 | PMAP_EVCNT_INITIALIZER("exec pages discarded (UM)"); | | 448 | PMAP_EVCNT_INITIALIZER("exec pages discarded (UM)"); |
449 | static struct evcnt pmap_ev_exec_discarded_zero = | | 449 | static struct evcnt pmap_ev_exec_discarded_zero = |
450 | PMAP_EVCNT_INITIALIZER("exec pages discarded (ZP)"); | | 450 | PMAP_EVCNT_INITIALIZER("exec pages discarded (ZP)"); |
451 | static struct evcnt pmap_ev_exec_discarded_copy = | | 451 | static struct evcnt pmap_ev_exec_discarded_copy = |
452 | PMAP_EVCNT_INITIALIZER("exec pages discarded (CP)"); | | 452 | PMAP_EVCNT_INITIALIZER("exec pages discarded (CP)"); |
453 | static struct evcnt pmap_ev_exec_discarded_page_protect = | | 453 | static struct evcnt pmap_ev_exec_discarded_page_protect = |
454 | PMAP_EVCNT_INITIALIZER("exec pages discarded (PP)"); | | 454 | PMAP_EVCNT_INITIALIZER("exec pages discarded (PP)"); |
455 | static struct evcnt pmap_ev_exec_discarded_clearbit = | | 455 | static struct evcnt pmap_ev_exec_discarded_clearbit = |
456 | PMAP_EVCNT_INITIALIZER("exec pages discarded (DG)"); | | 456 | PMAP_EVCNT_INITIALIZER("exec pages discarded (DG)"); |
457 | static struct evcnt pmap_ev_exec_discarded_kremove = | | 457 | static struct evcnt pmap_ev_exec_discarded_kremove = |
458 | PMAP_EVCNT_INITIALIZER("exec pages discarded (KU)"); | | 458 | PMAP_EVCNT_INITIALIZER("exec pages discarded (KU)"); |
459 | #ifdef ARM_MMU_EXTENDED | | 459 | #ifdef ARM_MMU_EXTENDED |
460 | static struct evcnt pmap_ev_exec_discarded_modfixup = | | 460 | static struct evcnt pmap_ev_exec_discarded_modfixup = |
461 | PMAP_EVCNT_INITIALIZER("exec pages discarded (MF)"); | | 461 | PMAP_EVCNT_INITIALIZER("exec pages discarded (MF)"); |
462 | #endif | | 462 | #endif |
463 | | | 463 | |
464 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_unmap); | | 464 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_unmap); |
465 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_zero); | | 465 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_zero); |
466 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_copy); | | 466 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_copy); |
467 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_page_protect); | | 467 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_page_protect); |
468 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_clearbit); | | 468 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_clearbit); |
469 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_kremove); | | 469 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_kremove); |
470 | #ifdef ARM_MMU_EXTENDED | | 470 | #ifdef ARM_MMU_EXTENDED |
471 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_modfixup); | | 471 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_modfixup); |
472 | #endif | | 472 | #endif |
473 | #endif /* PMAP_CACHE_VIPT */ | | 473 | #endif /* PMAP_CACHE_VIPT */ |
474 | | | 474 | |
475 | static struct evcnt pmap_ev_updates = PMAP_EVCNT_INITIALIZER("updates"); | | 475 | static struct evcnt pmap_ev_updates = PMAP_EVCNT_INITIALIZER("updates"); |
476 | static struct evcnt pmap_ev_collects = PMAP_EVCNT_INITIALIZER("collects"); | | 476 | static struct evcnt pmap_ev_collects = PMAP_EVCNT_INITIALIZER("collects"); |
477 | static struct evcnt pmap_ev_activations = PMAP_EVCNT_INITIALIZER("activations"); | | 477 | static struct evcnt pmap_ev_activations = PMAP_EVCNT_INITIALIZER("activations"); |
478 | | | 478 | |
479 | EVCNT_ATTACH_STATIC(pmap_ev_updates); | | 479 | EVCNT_ATTACH_STATIC(pmap_ev_updates); |
480 | EVCNT_ATTACH_STATIC(pmap_ev_collects); | | 480 | EVCNT_ATTACH_STATIC(pmap_ev_collects); |
481 | EVCNT_ATTACH_STATIC(pmap_ev_activations); | | 481 | EVCNT_ATTACH_STATIC(pmap_ev_activations); |
482 | | | 482 | |
483 | #define PMAPCOUNT(x) ((void)(pmap_ev_##x.ev_count++)) | | 483 | #define PMAPCOUNT(x) ((void)(pmap_ev_##x.ev_count++)) |
484 | #else | | 484 | #else |
485 | #define PMAPCOUNT(x) ((void)0) | | 485 | #define PMAPCOUNT(x) ((void)0) |
486 | #endif | | 486 | #endif |
487 | | | 487 | |
488 | /* | | 488 | /* |
489 | * pmap copy/zero page, and mem(5) hook point | | 489 | * pmap copy/zero page, and mem(5) hook point |
490 | */ | | 490 | */ |
491 | static pt_entry_t *csrc_pte, *cdst_pte; | | 491 | static pt_entry_t *csrc_pte, *cdst_pte; |
492 | static vaddr_t csrcp, cdstp; | | 492 | static vaddr_t csrcp, cdstp; |
493 | #ifdef MULTIPROCESSOR | | 493 | #ifdef MULTIPROCESSOR |
494 | static size_t cnptes; | | 494 | static size_t cnptes; |
495 | #define cpu_csrc_pte(o) (csrc_pte + cnptes * cpu_number() + ((o) >> L2_S_SHIFT)) | | 495 | #define cpu_csrc_pte(o) (csrc_pte + cnptes * cpu_number() + ((o) >> L2_S_SHIFT)) |
496 | #define cpu_cdst_pte(o) (cdst_pte + cnptes * cpu_number() + ((o) >> L2_S_SHIFT)) | | 496 | #define cpu_cdst_pte(o) (cdst_pte + cnptes * cpu_number() + ((o) >> L2_S_SHIFT)) |
497 | #define cpu_csrcp(o) (csrcp + L2_S_SIZE * cnptes * cpu_number() + (o)) | | 497 | #define cpu_csrcp(o) (csrcp + L2_S_SIZE * cnptes * cpu_number() + (o)) |
498 | #define cpu_cdstp(o) (cdstp + L2_S_SIZE * cnptes * cpu_number() + (o)) | | 498 | #define cpu_cdstp(o) (cdstp + L2_S_SIZE * cnptes * cpu_number() + (o)) |
499 | #else | | 499 | #else |
500 | #define cpu_csrc_pte(o) (csrc_pte + ((o) >> L2_S_SHIFT)) | | 500 | #define cpu_csrc_pte(o) (csrc_pte + ((o) >> L2_S_SHIFT)) |
501 | #define cpu_cdst_pte(o) (cdst_pte + ((o) >> L2_S_SHIFT)) | | 501 | #define cpu_cdst_pte(o) (cdst_pte + ((o) >> L2_S_SHIFT)) |
502 | #define cpu_csrcp(o) (csrcp + (o)) | | 502 | #define cpu_csrcp(o) (csrcp + (o)) |
503 | #define cpu_cdstp(o) (cdstp + (o)) | | 503 | #define cpu_cdstp(o) (cdstp + (o)) |
504 | #endif | | 504 | #endif |
505 | vaddr_t memhook; /* used by mem.c & others */ | | 505 | vaddr_t memhook; /* used by mem.c & others */ |
506 | kmutex_t memlock __cacheline_aligned; /* used by mem.c & others */ | | 506 | kmutex_t memlock __cacheline_aligned; /* used by mem.c & others */ |
507 | kmutex_t pmap_lock __cacheline_aligned; | | 507 | kmutex_t pmap_lock __cacheline_aligned; |
508 | extern void *msgbufaddr; | | 508 | extern void *msgbufaddr; |
509 | int pmap_kmpages; | | 509 | int pmap_kmpages; |
510 | /* | | 510 | /* |
511 | * Flag to indicate if pmap_init() has done its thing | | 511 | * Flag to indicate if pmap_init() has done its thing |
512 | */ | | 512 | */ |
513 | bool pmap_initialized; | | 513 | bool pmap_initialized; |
514 | | | 514 | |
515 | #if defined(ARM_MMU_EXTENDED) && defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) | | 515 | #if defined(ARM_MMU_EXTENDED) && defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) |
516 | /* | | 516 | /* |
517 | * Start of direct-mapped memory | | 517 | * Start of direct-mapped memory |
518 | */ | | 518 | */ |
519 | vaddr_t pmap_directbase = KERNEL_BASE; | | 519 | vaddr_t pmap_directbase = KERNEL_BASE; |
520 | #endif | | 520 | #endif |
521 | | | 521 | |
522 | /* | | 522 | /* |
523 | * Misc. locking data structures | | 523 | * Misc. locking data structures |
524 | */ | | 524 | */ |
525 | | | 525 | |
526 | static inline void | | 526 | static inline void |
527 | pmap_acquire_pmap_lock(pmap_t pm) | | 527 | pmap_acquire_pmap_lock(pmap_t pm) |
528 | { | | 528 | { |
529 | if (pm == pmap_kernel()) { | | 529 | if (pm == pmap_kernel()) { |
530 | #ifdef MULTIPROCESSOR | | 530 | #ifdef MULTIPROCESSOR |
531 | KERNEL_LOCK(1, NULL); | | 531 | KERNEL_LOCK(1, NULL); |
532 | #endif | | 532 | #endif |
533 | } else { | | 533 | } else { |
534 | mutex_enter(pm->pm_lock); | | 534 | mutex_enter(pm->pm_lock); |
535 | } | | 535 | } |
536 | } | | 536 | } |
537 | | | 537 | |
538 | static inline void | | 538 | static inline void |
539 | pmap_release_pmap_lock(pmap_t pm) | | 539 | pmap_release_pmap_lock(pmap_t pm) |
540 | { | | 540 | { |
541 | if (pm == pmap_kernel()) { | | 541 | if (pm == pmap_kernel()) { |
542 | #ifdef MULTIPROCESSOR | | 542 | #ifdef MULTIPROCESSOR |
543 | KERNEL_UNLOCK_ONE(NULL); | | 543 | KERNEL_UNLOCK_ONE(NULL); |
544 | #endif | | 544 | #endif |
545 | } else { | | 545 | } else { |
546 | mutex_exit(pm->pm_lock); | | 546 | mutex_exit(pm->pm_lock); |
547 | } | | 547 | } |
548 | } | | 548 | } |
549 | | | 549 | |
550 | static inline void | | 550 | static inline void |
551 | pmap_acquire_page_lock(struct vm_page_md *md) | | 551 | pmap_acquire_page_lock(struct vm_page_md *md) |
552 | { | | 552 | { |
553 | mutex_enter(&pmap_lock); | | 553 | mutex_enter(&pmap_lock); |
554 | } | | 554 | } |
555 | | | 555 | |
556 | static inline void | | 556 | static inline void |
557 | pmap_release_page_lock(struct vm_page_md *md) | | 557 | pmap_release_page_lock(struct vm_page_md *md) |
558 | { | | 558 | { |
559 | mutex_exit(&pmap_lock); | | 559 | mutex_exit(&pmap_lock); |
560 | } | | 560 | } |
561 | | | 561 | |
562 | #ifdef DIAGNOSTIC | | 562 | #ifdef DIAGNOSTIC |
563 | static inline int | | 563 | static inline int |
564 | pmap_page_locked_p(struct vm_page_md *md) | | 564 | pmap_page_locked_p(struct vm_page_md *md) |
565 | { | | 565 | { |
566 | return mutex_owned(&pmap_lock); | | 566 | return mutex_owned(&pmap_lock); |
567 | } | | 567 | } |
568 | #endif | | 568 | #endif |
569 | | | 569 | |
570 | | | 570 | |
571 | /* | | 571 | /* |
572 | * Metadata for L1 translation tables. | | 572 | * Metadata for L1 translation tables. |
573 | */ | | 573 | */ |
574 | #ifndef ARM_MMU_EXTENDED | | 574 | #ifndef ARM_MMU_EXTENDED |
575 | struct l1_ttable { | | 575 | struct l1_ttable { |
576 | /* Entry on the L1 Table list */ | | 576 | /* Entry on the L1 Table list */ |
577 | SLIST_ENTRY(l1_ttable) l1_link; | | 577 | SLIST_ENTRY(l1_ttable) l1_link; |
578 | | | 578 | |
579 | /* Entry on the L1 Least Recently Used list */ | | 579 | /* Entry on the L1 Least Recently Used list */ |
580 | TAILQ_ENTRY(l1_ttable) l1_lru; | | 580 | TAILQ_ENTRY(l1_ttable) l1_lru; |
581 | | | 581 | |
582 | /* Track how many domains are allocated from this L1 */ | | 582 | /* Track how many domains are allocated from this L1 */ |
583 | volatile u_int l1_domain_use_count; | | 583 | volatile u_int l1_domain_use_count; |
584 | | | 584 | |
585 | /* | | 585 | /* |
586 | * A free-list of domain numbers for this L1. | | 586 | * A free-list of domain numbers for this L1. |
587 | * We avoid using ffs() and a bitmap to track domains since ffs() | | 587 | * We avoid using ffs() and a bitmap to track domains since ffs() |
588 | * is slow on ARM. | | 588 | * is slow on ARM. |
589 | */ | | 589 | */ |
590 | uint8_t l1_domain_first; | | 590 | uint8_t l1_domain_first; |
591 | uint8_t l1_domain_free[PMAP_DOMAINS]; | | 591 | uint8_t l1_domain_free[PMAP_DOMAINS]; |
592 | | | 592 | |
593 | /* Physical address of this L1 page table */ | | 593 | /* Physical address of this L1 page table */ |
594 | paddr_t l1_physaddr; | | 594 | paddr_t l1_physaddr; |
595 | | | 595 | |
596 | /* KVA of this L1 page table */ | | 596 | /* KVA of this L1 page table */ |
597 | pd_entry_t *l1_kva; | | 597 | pd_entry_t *l1_kva; |
598 | }; | | 598 | }; |
599 | | | 599 | |
600 | /* | | 600 | /* |
601 | * L1 Page Tables are tracked using a Least Recently Used list. | | 601 | * L1 Page Tables are tracked using a Least Recently Used list. |
602 | * - New L1s are allocated from the HEAD. | | 602 | * - New L1s are allocated from the HEAD. |
603 | * - Freed L1s are added to the TAIl. | | 603 | * - Freed L1s are added to the TAIl. |
604 | * - Recently accessed L1s (where an 'access' is some change to one of | | 604 | * - Recently accessed L1s (where an 'access' is some change to one of |
605 | * the userland pmaps which owns this L1) are moved to the TAIL. | | 605 | * the userland pmaps which owns this L1) are moved to the TAIL. |
606 | */ | | 606 | */ |
607 | static TAILQ_HEAD(, l1_ttable) l1_lru_list; | | 607 | static TAILQ_HEAD(, l1_ttable) l1_lru_list; |
608 | static kmutex_t l1_lru_lock __cacheline_aligned; | | 608 | static kmutex_t l1_lru_lock __cacheline_aligned; |
609 | | | 609 | |
610 | /* | | 610 | /* |
611 | * A list of all L1 tables | | 611 | * A list of all L1 tables |
612 | */ | | 612 | */ |
613 | static SLIST_HEAD(, l1_ttable) l1_list; | | 613 | static SLIST_HEAD(, l1_ttable) l1_list; |
614 | #endif /* ARM_MMU_EXTENDED */ | | 614 | #endif /* ARM_MMU_EXTENDED */ |
615 | | | 615 | |
616 | /* | | 616 | /* |
617 | * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots. | | 617 | * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots. |
618 | * | | 618 | * |
619 | * This is normally 16MB worth L2 page descriptors for any given pmap. | | 619 | * This is normally 16MB worth L2 page descriptors for any given pmap. |
620 | * Reference counts are maintained for L2 descriptors so they can be | | 620 | * Reference counts are maintained for L2 descriptors so they can be |
621 | * freed when empty. | | 621 | * freed when empty. |
622 | */ | | 622 | */ |
623 | struct l2_bucket { | | 623 | struct l2_bucket { |
624 | pt_entry_t *l2b_kva; /* KVA of L2 Descriptor Table */ | | 624 | pt_entry_t *l2b_kva; /* KVA of L2 Descriptor Table */ |
625 | paddr_t l2b_pa; /* Physical address of same */ | | 625 | paddr_t l2b_pa; /* Physical address of same */ |
626 | u_short l2b_l1slot; /* This L2 table's L1 index */ | | 626 | u_short l2b_l1slot; /* This L2 table's L1 index */ |
627 | u_short l2b_occupancy; /* How many active descriptors */ | | 627 | u_short l2b_occupancy; /* How many active descriptors */ |
628 | }; | | 628 | }; |
629 | | | 629 | |
630 | struct l2_dtable { | | 630 | struct l2_dtable { |
631 | /* The number of L2 page descriptors allocated to this l2_dtable */ | | 631 | /* The number of L2 page descriptors allocated to this l2_dtable */ |
632 | u_int l2_occupancy; | | 632 | u_int l2_occupancy; |
633 | | | 633 | |
634 | /* List of L2 page descriptors */ | | 634 | /* List of L2 page descriptors */ |
635 | struct l2_bucket l2_bucket[L2_BUCKET_SIZE]; | | 635 | struct l2_bucket l2_bucket[L2_BUCKET_SIZE]; |
636 | }; | | 636 | }; |
637 | | | 637 | |
638 | /* | | 638 | /* |
639 | * Given an L1 table index, calculate the corresponding l2_dtable index | | 639 | * Given an L1 table index, calculate the corresponding l2_dtable index |
640 | * and bucket index within the l2_dtable. | | 640 | * and bucket index within the l2_dtable. |
641 | */ | | 641 | */ |
642 | #define L2_BUCKET_XSHIFT (L2_BUCKET_XLOG2 - L1_S_SHIFT) | | 642 | #define L2_BUCKET_XSHIFT (L2_BUCKET_XLOG2 - L1_S_SHIFT) |
643 | #define L2_BUCKET_XFRAME (~(vaddr_t)0 << L2_BUCKET_XLOG2) | | 643 | #define L2_BUCKET_XFRAME (~(vaddr_t)0 << L2_BUCKET_XLOG2) |
644 | #define L2_BUCKET_IDX(l1slot) ((l1slot) >> L2_BUCKET_XSHIFT) | | 644 | #define L2_BUCKET_IDX(l1slot) ((l1slot) >> L2_BUCKET_XSHIFT) |
645 | #define L2_IDX(l1slot) (L2_BUCKET_IDX(l1slot) >> L2_BUCKET_LOG2) | | 645 | #define L2_IDX(l1slot) (L2_BUCKET_IDX(l1slot) >> L2_BUCKET_LOG2) |
646 | #define L2_BUCKET(l1slot) (L2_BUCKET_IDX(l1slot) & (L2_BUCKET_SIZE - 1)) | | 646 | #define L2_BUCKET(l1slot) (L2_BUCKET_IDX(l1slot) & (L2_BUCKET_SIZE - 1)) |
647 | | | 647 | |
648 | __CTASSERT(0x100000000ULL == ((uint64_t)L2_SIZE * L2_BUCKET_SIZE * L1_S_SIZE)); | | 648 | __CTASSERT(0x100000000ULL == ((uint64_t)L2_SIZE * L2_BUCKET_SIZE * L1_S_SIZE)); |
649 | __CTASSERT(L2_BUCKET_XFRAME == ~(L2_BUCKET_XSIZE-1)); | | 649 | __CTASSERT(L2_BUCKET_XFRAME == ~(L2_BUCKET_XSIZE-1)); |
650 | | | 650 | |
651 | /* | | 651 | /* |
652 | * Given a virtual address, this macro returns the | | 652 | * Given a virtual address, this macro returns the |
653 | * virtual address required to drop into the next L2 bucket. | | 653 | * virtual address required to drop into the next L2 bucket. |
654 | */ | | 654 | */ |
655 | #define L2_NEXT_BUCKET_VA(va) (((va) & L2_BUCKET_XFRAME) + L2_BUCKET_XSIZE) | | 655 | #define L2_NEXT_BUCKET_VA(va) (((va) & L2_BUCKET_XFRAME) + L2_BUCKET_XSIZE) |
656 | | | 656 | |
657 | /* | | 657 | /* |
658 | * L2 allocation. | | 658 | * L2 allocation. |
659 | */ | | 659 | */ |
660 | #define pmap_alloc_l2_dtable() \ | | 660 | #define pmap_alloc_l2_dtable() \ |
661 | pool_cache_get(&pmap_l2dtable_cache, PR_NOWAIT) | | 661 | pool_cache_get(&pmap_l2dtable_cache, PR_NOWAIT) |
662 | #define pmap_free_l2_dtable(l2) \ | | 662 | #define pmap_free_l2_dtable(l2) \ |
663 | pool_cache_put(&pmap_l2dtable_cache, (l2)) | | 663 | pool_cache_put(&pmap_l2dtable_cache, (l2)) |
664 | #define pmap_alloc_l2_ptp(pap) \ | | 664 | #define pmap_alloc_l2_ptp(pap) \ |
665 | ((pt_entry_t *)pool_cache_get_paddr(&pmap_l2ptp_cache,\ | | 665 | ((pt_entry_t *)pool_cache_get_paddr(&pmap_l2ptp_cache,\ |
666 | PR_NOWAIT, (pap))) | | 666 | PR_NOWAIT, (pap))) |
667 | | | 667 | |
668 | /* | | 668 | /* |
669 | * We try to map the page tables write-through, if possible. However, not | | 669 | * We try to map the page tables write-through, if possible. However, not |
670 | * all CPUs have a write-through cache mode, so on those we have to sync | | 670 | * all CPUs have a write-through cache mode, so on those we have to sync |
671 | * the cache when we frob page tables. | | 671 | * the cache when we frob page tables. |
672 | * | | 672 | * |
673 | * We try to evaluate this at compile time, if possible. However, it's | | 673 | * We try to evaluate this at compile time, if possible. However, it's |
674 | * not always possible to do that, hence this run-time var. | | 674 | * not always possible to do that, hence this run-time var. |
675 | */ | | 675 | */ |
676 | int pmap_needs_pte_sync; | | 676 | int pmap_needs_pte_sync; |
677 | | | 677 | |
678 | /* | | 678 | /* |
679 | * Real definition of pv_entry. | | 679 | * Real definition of pv_entry. |
680 | */ | | 680 | */ |
681 | struct pv_entry { | | 681 | struct pv_entry { |
682 | SLIST_ENTRY(pv_entry) pv_link; /* next pv_entry */ | | 682 | SLIST_ENTRY(pv_entry) pv_link; /* next pv_entry */ |
683 | pmap_t pv_pmap; /* pmap where mapping lies */ | | 683 | pmap_t pv_pmap; /* pmap where mapping lies */ |
684 | vaddr_t pv_va; /* virtual address for mapping */ | | 684 | vaddr_t pv_va; /* virtual address for mapping */ |
685 | u_int pv_flags; /* flags */ | | 685 | u_int pv_flags; /* flags */ |
686 | }; | | 686 | }; |
687 | | | 687 | |
688 | /* | | 688 | /* |
689 | * Macros to determine if a mapping might be resident in the | | 689 | * Macros to determine if a mapping might be resident in the |
690 | * instruction/data cache and/or TLB | | 690 | * instruction/data cache and/or TLB |
691 | */ | | 691 | */ |
692 | #if ARM_MMU_V7 > 0 && !defined(ARM_MMU_EXTENDED) | | 692 | #if ARM_MMU_V7 > 0 && !defined(ARM_MMU_EXTENDED) |
693 | /* | | 693 | /* |
694 | * Speculative loads by Cortex cores can cause TLB entries to be filled even if | | 694 | * Speculative loads by Cortex cores can cause TLB entries to be filled even if |
695 | * there are no explicit accesses, so there may be always be TLB entries to | | 695 | * there are no explicit accesses, so there may be always be TLB entries to |
696 | * flush. If we used ASIDs then this would not be a problem. | | 696 | * flush. If we used ASIDs then this would not be a problem. |
697 | */ | | 697 | */ |
698 | #define PV_BEEN_EXECD(f) (((f) & PVF_EXEC) == PVF_EXEC) | | 698 | #define PV_BEEN_EXECD(f) (((f) & PVF_EXEC) == PVF_EXEC) |
699 | #define PV_BEEN_REFD(f) (true) | | 699 | #define PV_BEEN_REFD(f) (true) |
700 | #else | | 700 | #else |
701 | #define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC)) | | 701 | #define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC)) |
702 | #define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0) | | 702 | #define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0) |
703 | #endif | | 703 | #endif |
704 | #define PV_IS_EXEC_P(f) (((f) & PVF_EXEC) != 0) | | 704 | #define PV_IS_EXEC_P(f) (((f) & PVF_EXEC) != 0) |
705 | #define PV_IS_KENTRY_P(f) (((f) & PVF_KENTRY) != 0) | | 705 | #define PV_IS_KENTRY_P(f) (((f) & PVF_KENTRY) != 0) |
706 | #define PV_IS_WRITE_P(f) (((f) & PVF_WRITE) != 0) | | 706 | #define PV_IS_WRITE_P(f) (((f) & PVF_WRITE) != 0) |
707 | | | 707 | |
708 | /* | | 708 | /* |
709 | * Local prototypes | | 709 | * Local prototypes |
710 | */ | | 710 | */ |
711 | static bool pmap_set_pt_cache_mode(pd_entry_t *, vaddr_t, size_t); | | 711 | static bool pmap_set_pt_cache_mode(pd_entry_t *, vaddr_t, size_t); |
712 | static void pmap_alloc_specials(vaddr_t *, int, vaddr_t *, | | 712 | static void pmap_alloc_specials(vaddr_t *, int, vaddr_t *, |
713 | pt_entry_t **); | | 713 | pt_entry_t **); |
714 | static bool pmap_is_current(pmap_t) __unused; | | 714 | static bool pmap_is_current(pmap_t) __unused; |
715 | static bool pmap_is_cached(pmap_t); | | 715 | static bool pmap_is_cached(pmap_t); |
716 | static void pmap_enter_pv(struct vm_page_md *, paddr_t, struct pv_entry *, | | 716 | static void pmap_enter_pv(struct vm_page_md *, paddr_t, struct pv_entry *, |
717 | pmap_t, vaddr_t, u_int); | | 717 | pmap_t, vaddr_t, u_int); |
718 | static struct pv_entry *pmap_find_pv(struct vm_page_md *, pmap_t, vaddr_t); | | 718 | static struct pv_entry *pmap_find_pv(struct vm_page_md *, pmap_t, vaddr_t); |
719 | static struct pv_entry *pmap_remove_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); | | 719 | static struct pv_entry *pmap_remove_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); |
720 | static u_int pmap_modify_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t, | | 720 | static u_int pmap_modify_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t, |
721 | u_int, u_int); | | 721 | u_int, u_int); |
722 | | | 722 | |
723 | static void pmap_pinit(pmap_t); | | 723 | static void pmap_pinit(pmap_t); |
724 | static int pmap_pmap_ctor(void *, void *, int); | | 724 | static int pmap_pmap_ctor(void *, void *, int); |
725 | | | 725 | |
726 | static void pmap_alloc_l1(pmap_t); | | 726 | static void pmap_alloc_l1(pmap_t); |
727 | static void pmap_free_l1(pmap_t); | | 727 | static void pmap_free_l1(pmap_t); |
728 | #ifndef ARM_MMU_EXTENDED | | 728 | #ifndef ARM_MMU_EXTENDED |
729 | static void pmap_use_l1(pmap_t); | | 729 | static void pmap_use_l1(pmap_t); |
730 | #endif | | 730 | #endif |
731 | | | 731 | |
732 | static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vaddr_t); | | 732 | static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vaddr_t); |
733 | static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vaddr_t); | | 733 | static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vaddr_t); |
734 | static void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int); | | 734 | static void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int); |
735 | static int pmap_l2ptp_ctor(void *, void *, int); | | 735 | static int pmap_l2ptp_ctor(void *, void *, int); |
736 | static int pmap_l2dtable_ctor(void *, void *, int); | | 736 | static int pmap_l2dtable_ctor(void *, void *, int); |
737 | | | 737 | |
738 | static void pmap_vac_me_harder(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); | | 738 | static void pmap_vac_me_harder(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); |
739 | #ifdef PMAP_CACHE_VIVT | | 739 | #ifdef PMAP_CACHE_VIVT |
740 | static void pmap_vac_me_kpmap(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); | | 740 | static void pmap_vac_me_kpmap(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); |
741 | static void pmap_vac_me_user(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); | | 741 | static void pmap_vac_me_user(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); |
742 | #endif | | 742 | #endif |
743 | | | 743 | |
744 | static void pmap_clearbit(struct vm_page_md *, paddr_t, u_int); | | 744 | static void pmap_clearbit(struct vm_page_md *, paddr_t, u_int); |
745 | #ifdef PMAP_CACHE_VIVT | | 745 | #ifdef PMAP_CACHE_VIVT |
746 | static bool pmap_clean_page(struct vm_page_md *, bool); | | 746 | static bool pmap_clean_page(struct vm_page_md *, bool); |
747 | #endif | | 747 | #endif |
748 | #ifdef PMAP_CACHE_VIPT | | 748 | #ifdef PMAP_CACHE_VIPT |
749 | static void pmap_syncicache_page(struct vm_page_md *, paddr_t); | | 749 | static void pmap_syncicache_page(struct vm_page_md *, paddr_t); |
750 | enum pmap_flush_op { | | 750 | enum pmap_flush_op { |
751 | PMAP_FLUSH_PRIMARY, | | 751 | PMAP_FLUSH_PRIMARY, |
752 | PMAP_FLUSH_SECONDARY, | | 752 | PMAP_FLUSH_SECONDARY, |
753 | PMAP_CLEAN_PRIMARY | | 753 | PMAP_CLEAN_PRIMARY |
754 | }; | | 754 | }; |
755 | #ifndef ARM_MMU_EXTENDED | | 755 | #ifndef ARM_MMU_EXTENDED |
756 | static void pmap_flush_page(struct vm_page_md *, paddr_t, enum pmap_flush_op); | | 756 | static void pmap_flush_page(struct vm_page_md *, paddr_t, enum pmap_flush_op); |
757 | #endif | | 757 | #endif |
758 | #endif | | 758 | #endif |
759 | static void pmap_page_remove(struct vm_page_md *, paddr_t); | | 759 | static void pmap_page_remove(struct vm_page_md *, paddr_t); |
760 | | | 760 | |
761 | #ifndef ARM_MMU_EXTENDED | | 761 | #ifndef ARM_MMU_EXTENDED |
762 | static void pmap_init_l1(struct l1_ttable *, pd_entry_t *); | | 762 | static void pmap_init_l1(struct l1_ttable *, pd_entry_t *); |
763 | #endif | | 763 | #endif |
764 | static vaddr_t kernel_pt_lookup(paddr_t); | | 764 | static vaddr_t kernel_pt_lookup(paddr_t); |
765 | | | 765 | |
766 | | | 766 | |
767 | /* | | 767 | /* |
768 | * Misc variables | | 768 | * Misc variables |
769 | */ | | 769 | */ |
770 | vaddr_t virtual_avail; | | 770 | vaddr_t virtual_avail; |
771 | vaddr_t virtual_end; | | 771 | vaddr_t virtual_end; |
772 | vaddr_t pmap_curmaxkvaddr; | | 772 | vaddr_t pmap_curmaxkvaddr; |
773 | | | 773 | |
774 | paddr_t avail_start; | | 774 | paddr_t avail_start; |
775 | paddr_t avail_end; | | 775 | paddr_t avail_end; |
776 | | | 776 | |
777 | pv_addrqh_t pmap_boot_freeq = SLIST_HEAD_INITIALIZER(&pmap_boot_freeq); | | 777 | pv_addrqh_t pmap_boot_freeq = SLIST_HEAD_INITIALIZER(&pmap_boot_freeq); |
778 | pv_addr_t kernelpages; | | 778 | pv_addr_t kernelpages; |
779 | pv_addr_t kernel_l1pt; | | 779 | pv_addr_t kernel_l1pt; |
780 | pv_addr_t systempage; | | 780 | pv_addr_t systempage; |
781 | | | 781 | |
782 | /* Function to set the debug level of the pmap code */ | | 782 | /* Function to set the debug level of the pmap code */ |
783 | | | 783 | |
784 | #ifdef PMAP_DEBUG | | 784 | #ifdef PMAP_DEBUG |
785 | void | | 785 | void |
786 | pmap_debug(int level) | | 786 | pmap_debug(int level) |
787 | { | | 787 | { |
788 | pmap_debug_level = level; | | 788 | pmap_debug_level = level; |
789 | printf("pmap_debug: level=%d\n", pmap_debug_level); | | 789 | printf("pmap_debug: level=%d\n", pmap_debug_level); |
790 | } | | 790 | } |
791 | #endif /* PMAP_DEBUG */ | | 791 | #endif /* PMAP_DEBUG */ |
792 | | | 792 | |
793 | #ifdef PMAP_CACHE_VIPT | | 793 | #ifdef PMAP_CACHE_VIPT |
794 | #define PMAP_VALIDATE_MD_PAGE(md) \ | | 794 | #define PMAP_VALIDATE_MD_PAGE(md) \ |
795 | KASSERTMSG(arm_cache_prefer_mask == 0 || (((md)->pvh_attrs & PVF_WRITE) == 0) == ((md)->urw_mappings + (md)->krw_mappings == 0), \ | | 795 | KASSERTMSG(arm_cache_prefer_mask == 0 || (((md)->pvh_attrs & PVF_WRITE) == 0) == ((md)->urw_mappings + (md)->krw_mappings == 0), \ |
796 | "(md) %p: attrs=%#x urw=%u krw=%u", (md), \ | | 796 | "(md) %p: attrs=%#x urw=%u krw=%u", (md), \ |
797 | (md)->pvh_attrs, (md)->urw_mappings, (md)->krw_mappings); | | 797 | (md)->pvh_attrs, (md)->urw_mappings, (md)->krw_mappings); |
798 | #endif /* PMAP_CACHE_VIPT */ | | 798 | #endif /* PMAP_CACHE_VIPT */ |
799 | /* | | 799 | /* |
800 | * A bunch of routines to conditionally flush the caches/TLB depending | | 800 | * A bunch of routines to conditionally flush the caches/TLB depending |
801 | * on whether the specified pmap actually needs to be flushed at any | | 801 | * on whether the specified pmap actually needs to be flushed at any |
802 | * given time. | | 802 | * given time. |
803 | */ | | 803 | */ |
804 | static inline void | | 804 | static inline void |
805 | pmap_tlb_flush_SE(pmap_t pm, vaddr_t va, u_int flags) | | 805 | pmap_tlb_flush_SE(pmap_t pm, vaddr_t va, u_int flags) |
806 | { | | 806 | { |
807 | #ifdef ARM_MMU_EXTENDED | | 807 | #ifdef ARM_MMU_EXTENDED |
808 | pmap_tlb_invalidate_addr(pm, va); | | 808 | pmap_tlb_invalidate_addr(pm, va); |
809 | #else | | 809 | #else |
810 | if (pm->pm_cstate.cs_tlb_id != 0) { | | 810 | if (pm->pm_cstate.cs_tlb_id != 0) { |
811 | if (PV_BEEN_EXECD(flags)) { | | 811 | if (PV_BEEN_EXECD(flags)) { |
812 | cpu_tlb_flushID_SE(va); | | 812 | cpu_tlb_flushID_SE(va); |
813 | } else if (PV_BEEN_REFD(flags)) { | | 813 | } else if (PV_BEEN_REFD(flags)) { |
814 | cpu_tlb_flushD_SE(va); | | 814 | cpu_tlb_flushD_SE(va); |
815 | } | | 815 | } |
816 | } | | 816 | } |
817 | #endif /* ARM_MMU_EXTENDED */ | | 817 | #endif /* ARM_MMU_EXTENDED */ |
818 | } | | 818 | } |
819 | | | 819 | |
820 | static inline void | | 820 | static inline void |
821 | pmap_tlb_flushID(pmap_t pm) | | 821 | pmap_tlb_flushID(pmap_t pm) |
822 | { | | 822 | { |
823 | #ifdef ARM_MMU_EXTENDED | | 823 | #ifdef ARM_MMU_EXTENDED |
824 | pmap_tlb_asid_release_all(pm); | | 824 | pmap_tlb_asid_release_all(pm); |
825 | #else | | 825 | #else |
826 | if (pm->pm_cstate.cs_tlb_id) { | | 826 | if (pm->pm_cstate.cs_tlb_id) { |
827 | cpu_tlb_flushID(); | | 827 | cpu_tlb_flushID(); |
828 | #if ARM_MMU_V7 == 0 | | 828 | #if ARM_MMU_V7 == 0 |
829 | /* | | 829 | /* |
830 | * Speculative loads by Cortex cores can cause TLB entries to | | 830 | * Speculative loads by Cortex cores can cause TLB entries to |
831 | * be filled even if there are no explicit accesses, so there | | 831 | * be filled even if there are no explicit accesses, so there |
832 | * may be always be TLB entries to flush. If we used ASIDs | | 832 | * may be always be TLB entries to flush. If we used ASIDs |
833 | * then it would not be a problem. | | 833 | * then it would not be a problem. |
834 | * This is not true for other CPUs. | | 834 | * This is not true for other CPUs. |
835 | */ | | 835 | */ |
836 | pm->pm_cstate.cs_tlb = 0; | | 836 | pm->pm_cstate.cs_tlb = 0; |
837 | #endif /* ARM_MMU_V7 */ | | 837 | #endif /* ARM_MMU_V7 */ |
838 | } | | 838 | } |
839 | #endif /* ARM_MMU_EXTENDED */ | | 839 | #endif /* ARM_MMU_EXTENDED */ |
840 | } | | 840 | } |
841 | | | 841 | |
842 | #ifndef ARM_MMU_EXTENDED | | 842 | #ifndef ARM_MMU_EXTENDED |
843 | static inline void | | 843 | static inline void |
844 | pmap_tlb_flushD(pmap_t pm) | | 844 | pmap_tlb_flushD(pmap_t pm) |
845 | { | | 845 | { |
846 | if (pm->pm_cstate.cs_tlb_d) { | | 846 | if (pm->pm_cstate.cs_tlb_d) { |
847 | cpu_tlb_flushD(); | | 847 | cpu_tlb_flushD(); |
848 | #if ARM_MMU_V7 == 0 | | 848 | #if ARM_MMU_V7 == 0 |
849 | /* | | 849 | /* |
850 | * Speculative loads by Cortex cores can cause TLB entries to | | 850 | * Speculative loads by Cortex cores can cause TLB entries to |
851 | * be filled even if there are no explicit accesses, so there | | 851 | * be filled even if there are no explicit accesses, so there |
852 | * may be always be TLB entries to flush. If we used ASIDs | | 852 | * may be always be TLB entries to flush. If we used ASIDs |
853 | * then it would not be a problem. | | 853 | * then it would not be a problem. |
854 | * This is not true for other CPUs. | | 854 | * This is not true for other CPUs. |
855 | */ | | 855 | */ |
856 | pm->pm_cstate.cs_tlb_d = 0; | | 856 | pm->pm_cstate.cs_tlb_d = 0; |
857 | #endif /* ARM_MMU_V7 */ | | 857 | #endif /* ARM_MMU_V7 */ |
858 | } | | 858 | } |
859 | } | | 859 | } |
860 | #endif /* ARM_MMU_EXTENDED */ | | 860 | #endif /* ARM_MMU_EXTENDED */ |
861 | | | 861 | |
862 | #ifdef PMAP_CACHE_VIVT | | 862 | #ifdef PMAP_CACHE_VIVT |
863 | static inline void | | 863 | static inline void |
864 | pmap_cache_wbinv_page(pmap_t pm, vaddr_t va, bool do_inv, u_int flags) | | 864 | pmap_cache_wbinv_page(pmap_t pm, vaddr_t va, bool do_inv, u_int flags) |
865 | { | | 865 | { |
866 | if (PV_BEEN_EXECD(flags) && pm->pm_cstate.cs_cache_id) { | | 866 | if (PV_BEEN_EXECD(flags) && pm->pm_cstate.cs_cache_id) { |
867 | cpu_idcache_wbinv_range(va, PAGE_SIZE); | | 867 | cpu_idcache_wbinv_range(va, PAGE_SIZE); |
868 | } else if (PV_BEEN_REFD(flags) && pm->pm_cstate.cs_cache_d) { | | 868 | } else if (PV_BEEN_REFD(flags) && pm->pm_cstate.cs_cache_d) { |
869 | if (do_inv) { | | 869 | if (do_inv) { |
870 | if (flags & PVF_WRITE) | | 870 | if (flags & PVF_WRITE) |
871 | cpu_dcache_wbinv_range(va, PAGE_SIZE); | | 871 | cpu_dcache_wbinv_range(va, PAGE_SIZE); |
872 | else | | 872 | else |
873 | cpu_dcache_inv_range(va, PAGE_SIZE); | | 873 | cpu_dcache_inv_range(va, PAGE_SIZE); |
874 | } else if (flags & PVF_WRITE) { | | 874 | } else if (flags & PVF_WRITE) { |
875 | cpu_dcache_wb_range(va, PAGE_SIZE); | | 875 | cpu_dcache_wb_range(va, PAGE_SIZE); |
876 | } | | 876 | } |
877 | } | | 877 | } |
878 | } | | 878 | } |
879 | | | 879 | |
880 | static inline void | | 880 | static inline void |
881 | pmap_cache_wbinv_all(pmap_t pm, u_int flags) | | 881 | pmap_cache_wbinv_all(pmap_t pm, u_int flags) |
882 | { | | 882 | { |
883 | if (PV_BEEN_EXECD(flags)) { | | 883 | if (PV_BEEN_EXECD(flags)) { |
884 | if (pm->pm_cstate.cs_cache_id) { | | 884 | if (pm->pm_cstate.cs_cache_id) { |
885 | cpu_idcache_wbinv_all(); | | 885 | cpu_idcache_wbinv_all(); |
886 | pm->pm_cstate.cs_cache = 0; | | 886 | pm->pm_cstate.cs_cache = 0; |
887 | } | | 887 | } |
888 | } else if (pm->pm_cstate.cs_cache_d) { | | 888 | } else if (pm->pm_cstate.cs_cache_d) { |
889 | cpu_dcache_wbinv_all(); | | 889 | cpu_dcache_wbinv_all(); |
890 | pm->pm_cstate.cs_cache_d = 0; | | 890 | pm->pm_cstate.cs_cache_d = 0; |
891 | } | | 891 | } |
892 | } | | 892 | } |
893 | #endif /* PMAP_CACHE_VIVT */ | | 893 | #endif /* PMAP_CACHE_VIVT */ |
894 | | | 894 | |
895 | static inline uint8_t | | 895 | static inline uint8_t |
896 | pmap_domain(pmap_t pm) | | 896 | pmap_domain(pmap_t pm) |
897 | { | | 897 | { |
898 | #ifdef ARM_MMU_EXTENDED | | 898 | #ifdef ARM_MMU_EXTENDED |
899 | return pm == pmap_kernel() ? PMAP_DOMAIN_KERNEL : PMAP_DOMAIN_USER; | | 899 | return pm == pmap_kernel() ? PMAP_DOMAIN_KERNEL : PMAP_DOMAIN_USER; |
900 | #else | | 900 | #else |
901 | return pm->pm_domain; | | 901 | return pm->pm_domain; |
902 | #endif | | 902 | #endif |
903 | } | | 903 | } |
904 | | | 904 | |
905 | static inline pd_entry_t * | | 905 | static inline pd_entry_t * |
906 | pmap_l1_kva(pmap_t pm) | | 906 | pmap_l1_kva(pmap_t pm) |
907 | { | | 907 | { |
908 | #ifdef ARM_MMU_EXTENDED | | 908 | #ifdef ARM_MMU_EXTENDED |
909 | return pm->pm_l1; | | 909 | return pm->pm_l1; |
910 | #else | | 910 | #else |
911 | return pm->pm_l1->l1_kva; | | 911 | return pm->pm_l1->l1_kva; |
912 | #endif | | 912 | #endif |
913 | } | | 913 | } |
914 | | | 914 | |
915 | static inline bool | | 915 | static inline bool |
916 | pmap_is_current(pmap_t pm) | | 916 | pmap_is_current(pmap_t pm) |
917 | { | | 917 | { |
918 | if (pm == pmap_kernel() || curproc->p_vmspace->vm_map.pmap == pm) | | 918 | if (pm == pmap_kernel() || curproc->p_vmspace->vm_map.pmap == pm) |
919 | return true; | | 919 | return true; |
920 | | | 920 | |
921 | return false; | | 921 | return false; |
922 | } | | 922 | } |
923 | | | 923 | |
924 | static inline bool | | 924 | static inline bool |
925 | pmap_is_cached(pmap_t pm) | | 925 | pmap_is_cached(pmap_t pm) |
926 | { | | 926 | { |
927 | #ifdef ARM_MMU_EXTENDED | | 927 | #ifdef ARM_MMU_EXTENDED |
928 | if (pm == pmap_kernel()) | | 928 | if (pm == pmap_kernel()) |
929 | return true; | | 929 | return true; |
930 | #ifdef MULTIPROCESSOR | | 930 | #ifdef MULTIPROCESSOR |
931 | // Is this pmap active on any CPU? | | 931 | // Is this pmap active on any CPU? |
932 | if (!kcpuset_iszero(pm->pm_active)) | | 932 | if (!kcpuset_iszero(pm->pm_active)) |
933 | return true; | | 933 | return true; |
934 | #else | | 934 | #else |
935 | struct pmap_tlb_info * const ti = cpu_tlb_info(curcpu()); | | 935 | struct pmap_tlb_info * const ti = cpu_tlb_info(curcpu()); |
936 | // Is this pmap active? | | 936 | // Is this pmap active? |
937 | if (PMAP_PAI_ASIDVALID_P(PMAP_PAI(pm, ti), ti)) | | 937 | if (PMAP_PAI_ASIDVALID_P(PMAP_PAI(pm, ti), ti)) |
938 | return true; | | 938 | return true; |
939 | #endif | | 939 | #endif |
940 | #else | | 940 | #else |
941 | struct cpu_info * const ci = curcpu(); | | 941 | struct cpu_info * const ci = curcpu(); |
942 | if (pm == pmap_kernel() || ci->ci_pmap_lastuser == NULL | | 942 | if (pm == pmap_kernel() || ci->ci_pmap_lastuser == NULL |
943 | || ci->ci_pmap_lastuser == pm) | | 943 | || ci->ci_pmap_lastuser == pm) |
944 | return true; | | 944 | return true; |
945 | #endif /* ARM_MMU_EXTENDED */ | | 945 | #endif /* ARM_MMU_EXTENDED */ |
946 | | | 946 | |
947 | return false; | | 947 | return false; |
948 | } | | 948 | } |
949 | | | 949 | |
950 | /* | | 950 | /* |
951 | * PTE_SYNC_CURRENT: | | 951 | * PTE_SYNC_CURRENT: |
952 | * | | 952 | * |
953 | * Make sure the pte is written out to RAM. | | 953 | * Make sure the pte is written out to RAM. |
954 | * We need to do this for one of two cases: | | 954 | * We need to do this for one of two cases: |
955 | * - We're dealing with the kernel pmap | | 955 | * - We're dealing with the kernel pmap |
956 | * - There is no pmap active in the cache/tlb. | | 956 | * - There is no pmap active in the cache/tlb. |
957 | * - The specified pmap is 'active' in the cache/tlb. | | 957 | * - The specified pmap is 'active' in the cache/tlb. |
958 | */ | | 958 | */ |
959 | | | 959 | |
960 | static inline void | | 960 | static inline void |
961 | pmap_pte_sync_current(pmap_t pm, pt_entry_t *ptep) | | 961 | pmap_pte_sync_current(pmap_t pm, pt_entry_t *ptep) |
962 | { | | 962 | { |
963 | if (PMAP_NEEDS_PTE_SYNC && pmap_is_cached(pm)) | | 963 | if (PMAP_NEEDS_PTE_SYNC && pmap_is_cached(pm)) |
964 | PTE_SYNC(ptep); | | 964 | PTE_SYNC(ptep); |
965 | arm_dsb(); | | 965 | arm_dsb(); |
966 | } | | 966 | } |
967 | | | 967 | |
968 | #ifdef PMAP_INCLUDE_PTE_SYNC | | 968 | #ifdef PMAP_INCLUDE_PTE_SYNC |
969 | #define PTE_SYNC_CURRENT(pm, ptep) pmap_pte_sync_current(pm, ptep) | | 969 | #define PTE_SYNC_CURRENT(pm, ptep) pmap_pte_sync_current(pm, ptep) |
970 | #else | | 970 | #else |
971 | #define PTE_SYNC_CURRENT(pm, ptep) /* nothing */ | | 971 | #define PTE_SYNC_CURRENT(pm, ptep) /* nothing */ |
972 | #endif | | 972 | #endif |
973 | | | 973 | |
974 | /* | | 974 | /* |
975 | * main pv_entry manipulation functions: | | 975 | * main pv_entry manipulation functions: |
976 | * pmap_enter_pv: enter a mapping onto a vm_page list | | 976 | * pmap_enter_pv: enter a mapping onto a vm_page list |
977 | * pmap_remove_pv: remove a mapping from a vm_page list | | 977 | * pmap_remove_pv: remove a mapping from a vm_page list |
978 | * | | 978 | * |
979 | * NOTE: pmap_enter_pv expects to lock the pvh itself | | 979 | * NOTE: pmap_enter_pv expects to lock the pvh itself |
980 | * pmap_remove_pv expects the caller to lock the pvh before calling | | 980 | * pmap_remove_pv expects the caller to lock the pvh before calling |
981 | */ | | 981 | */ |
982 | | | 982 | |
983 | /* | | 983 | /* |
984 | * pmap_enter_pv: enter a mapping onto a vm_page lst | | 984 | * pmap_enter_pv: enter a mapping onto a vm_page lst |
985 | * | | 985 | * |
986 | * => caller should hold the proper lock on pmap_main_lock | | 986 | * => caller should hold the proper lock on pmap_main_lock |
987 | * => caller should have pmap locked | | 987 | * => caller should have pmap locked |
988 | * => we will gain the lock on the vm_page and allocate the new pv_entry | | 988 | * => we will gain the lock on the vm_page and allocate the new pv_entry |
989 | * => caller should adjust ptp's wire_count before calling | | 989 | * => caller should adjust ptp's wire_count before calling |
990 | * => caller should not adjust pmap's wire_count | | 990 | * => caller should not adjust pmap's wire_count |
991 | */ | | 991 | */ |
992 | static void | | 992 | static void |
993 | pmap_enter_pv(struct vm_page_md *md, paddr_t pa, struct pv_entry *pv, pmap_t pm, | | 993 | pmap_enter_pv(struct vm_page_md *md, paddr_t pa, struct pv_entry *pv, pmap_t pm, |
994 | vaddr_t va, u_int flags) | | 994 | vaddr_t va, u_int flags) |
995 | { | | 995 | { |
996 | struct pv_entry **pvp; | | 996 | struct pv_entry **pvp; |
997 | | | 997 | |
998 | NPDEBUG(PDB_PVDUMP, | | 998 | NPDEBUG(PDB_PVDUMP, |
999 | printf("pmap_enter_pv: pm %p, md %p, flags 0x%x\n", pm, md, flags)); | | 999 | printf("pmap_enter_pv: pm %p, md %p, flags 0x%x\n", pm, md, flags)); |
1000 | | | 1000 | |
1001 | pv->pv_pmap = pm; | | 1001 | pv->pv_pmap = pm; |
1002 | pv->pv_va = va; | | 1002 | pv->pv_va = va; |
1003 | pv->pv_flags = flags; | | 1003 | pv->pv_flags = flags; |
1004 | | | 1004 | |
1005 | pvp = &SLIST_FIRST(&md->pvh_list); | | 1005 | pvp = &SLIST_FIRST(&md->pvh_list); |
1006 | #ifdef PMAP_CACHE_VIPT | | 1006 | #ifdef PMAP_CACHE_VIPT |
1007 | /* | | 1007 | /* |
1008 | * Insert unmanaged entries, writeable first, at the head of | | 1008 | * Insert unmanaged entries, writeable first, at the head of |
1009 | * the pv list. | | 1009 | * the pv list. |
1010 | */ | | 1010 | */ |
1011 | if (__predict_true(!PV_IS_KENTRY_P(flags))) { | | 1011 | if (__predict_true(!PV_IS_KENTRY_P(flags))) { |
1012 | while (*pvp != NULL && PV_IS_KENTRY_P((*pvp)->pv_flags)) | | 1012 | while (*pvp != NULL && PV_IS_KENTRY_P((*pvp)->pv_flags)) |
1013 | pvp = &SLIST_NEXT(*pvp, pv_link); | | 1013 | pvp = &SLIST_NEXT(*pvp, pv_link); |
1014 | } | | 1014 | } |
1015 | if (!PV_IS_WRITE_P(flags)) { | | 1015 | if (!PV_IS_WRITE_P(flags)) { |
1016 | while (*pvp != NULL && PV_IS_WRITE_P((*pvp)->pv_flags)) | | 1016 | while (*pvp != NULL && PV_IS_WRITE_P((*pvp)->pv_flags)) |
1017 | pvp = &SLIST_NEXT(*pvp, pv_link); | | 1017 | pvp = &SLIST_NEXT(*pvp, pv_link); |
1018 | } | | 1018 | } |
1019 | #endif | | 1019 | #endif |
1020 | SLIST_NEXT(pv, pv_link) = *pvp; /* add to ... */ | | 1020 | SLIST_NEXT(pv, pv_link) = *pvp; /* add to ... */ |
1021 | *pvp = pv; /* ... locked list */ | | 1021 | *pvp = pv; /* ... locked list */ |
1022 | md->pvh_attrs |= flags & (PVF_REF | PVF_MOD); | | 1022 | md->pvh_attrs |= flags & (PVF_REF | PVF_MOD); |
1023 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 1023 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
1024 | if ((pv->pv_flags & PVF_KWRITE) == PVF_KWRITE) | | 1024 | if ((pv->pv_flags & PVF_KWRITE) == PVF_KWRITE) |
1025 | md->pvh_attrs |= PVF_KMOD; | | 1025 | md->pvh_attrs |= PVF_KMOD; |
1026 | if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC) | | 1026 | if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC) |
1027 | md->pvh_attrs |= PVF_DIRTY; | | 1027 | md->pvh_attrs |= PVF_DIRTY; |
1028 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 1028 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
1029 | #endif | | 1029 | #endif |
1030 | if (pm == pmap_kernel()) { | | 1030 | if (pm == pmap_kernel()) { |
1031 | PMAPCOUNT(kernel_mappings); | | 1031 | PMAPCOUNT(kernel_mappings); |
1032 | if (flags & PVF_WRITE) | | 1032 | if (flags & PVF_WRITE) |
1033 | md->krw_mappings++; | | 1033 | md->krw_mappings++; |
1034 | else | | 1034 | else |
1035 | md->kro_mappings++; | | 1035 | md->kro_mappings++; |
1036 | } else { | | 1036 | } else { |
1037 | if (flags & PVF_WRITE) | | 1037 | if (flags & PVF_WRITE) |
1038 | md->urw_mappings++; | | 1038 | md->urw_mappings++; |
1039 | else | | 1039 | else |
1040 | md->uro_mappings++; | | 1040 | md->uro_mappings++; |
1041 | } | | 1041 | } |
1042 | | | 1042 | |
1043 | #ifdef PMAP_CACHE_VIPT | | 1043 | #ifdef PMAP_CACHE_VIPT |
1044 | #ifndef ARM_MMU_EXTENDED | | 1044 | #ifndef ARM_MMU_EXTENDED |
1045 | /* | | 1045 | /* |
1046 | * Even though pmap_vac_me_harder will set PVF_WRITE for us, | | 1046 | * Even though pmap_vac_me_harder will set PVF_WRITE for us, |
1047 | * do it here as well to keep the mappings & KVF_WRITE consistent. | | 1047 | * do it here as well to keep the mappings & KVF_WRITE consistent. |
1048 | */ | | 1048 | */ |
1049 | if (arm_cache_prefer_mask != 0 && (flags & PVF_WRITE) != 0) { | | 1049 | if (arm_cache_prefer_mask != 0 && (flags & PVF_WRITE) != 0) { |
1050 | md->pvh_attrs |= PVF_WRITE; | | 1050 | md->pvh_attrs |= PVF_WRITE; |
1051 | } | | 1051 | } |
1052 | #endif | | 1052 | #endif |
1053 | /* | | 1053 | /* |
1054 | * If this is an exec mapping and its the first exec mapping | | 1054 | * If this is an exec mapping and its the first exec mapping |
1055 | * for this page, make sure to sync the I-cache. | | 1055 | * for this page, make sure to sync the I-cache. |
1056 | */ | | 1056 | */ |
1057 | if (PV_IS_EXEC_P(flags)) { | | 1057 | if (PV_IS_EXEC_P(flags)) { |
1058 | #ifndef ARM_MMU_EXTENDED | | 1058 | #ifndef ARM_MMU_EXTENDED |
1059 | if (!PV_IS_EXEC_P(md->pvh_attrs)) { | | 1059 | if (!PV_IS_EXEC_P(md->pvh_attrs)) { |
1060 | pmap_syncicache_page(md, pa); | | 1060 | pmap_syncicache_page(md, pa); |
1061 | PMAPCOUNT(exec_synced_map); | | 1061 | PMAPCOUNT(exec_synced_map); |
1062 | } | | 1062 | } |
1063 | #endif | | 1063 | #endif |
1064 | PMAPCOUNT(exec_mappings); | | 1064 | PMAPCOUNT(exec_mappings); |
1065 | } | | 1065 | } |
1066 | #endif | | 1066 | #endif |
1067 | | | 1067 | |
1068 | PMAPCOUNT(mappings); | | 1068 | PMAPCOUNT(mappings); |
1069 | | | 1069 | |
1070 | if (pv->pv_flags & PVF_WIRED) | | 1070 | if (pv->pv_flags & PVF_WIRED) |
1071 | ++pm->pm_stats.wired_count; | | 1071 | ++pm->pm_stats.wired_count; |
1072 | } | | 1072 | } |
1073 | | | 1073 | |
1074 | /* | | 1074 | /* |
1075 | * | | 1075 | * |
1076 | * pmap_find_pv: Find a pv entry | | 1076 | * pmap_find_pv: Find a pv entry |
1077 | * | | 1077 | * |
1078 | * => caller should hold lock on vm_page | | 1078 | * => caller should hold lock on vm_page |
1079 | */ | | 1079 | */ |
1080 | static inline struct pv_entry * | | 1080 | static inline struct pv_entry * |
1081 | pmap_find_pv(struct vm_page_md *md, pmap_t pm, vaddr_t va) | | 1081 | pmap_find_pv(struct vm_page_md *md, pmap_t pm, vaddr_t va) |
1082 | { | | 1082 | { |
1083 | struct pv_entry *pv; | | 1083 | struct pv_entry *pv; |
1084 | | | 1084 | |
1085 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { | | 1085 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { |
1086 | if (pm == pv->pv_pmap && va == pv->pv_va) | | 1086 | if (pm == pv->pv_pmap && va == pv->pv_va) |
1087 | break; | | 1087 | break; |
1088 | } | | 1088 | } |
1089 | | | 1089 | |
1090 | return (pv); | | 1090 | return (pv); |
1091 | } | | 1091 | } |
1092 | | | 1092 | |
1093 | /* | | 1093 | /* |
1094 | * pmap_remove_pv: try to remove a mapping from a pv_list | | 1094 | * pmap_remove_pv: try to remove a mapping from a pv_list |
1095 | * | | 1095 | * |
1096 | * => caller should hold proper lock on pmap_main_lock | | 1096 | * => caller should hold proper lock on pmap_main_lock |
1097 | * => pmap should be locked | | 1097 | * => pmap should be locked |
1098 | * => caller should hold lock on vm_page [so that attrs can be adjusted] | | 1098 | * => caller should hold lock on vm_page [so that attrs can be adjusted] |
1099 | * => caller should adjust ptp's wire_count and free PTP if needed | | 1099 | * => caller should adjust ptp's wire_count and free PTP if needed |
1100 | * => caller should NOT adjust pmap's wire_count | | 1100 | * => caller should NOT adjust pmap's wire_count |
1101 | * => we return the removed pv | | 1101 | * => we return the removed pv |
1102 | */ | | 1102 | */ |
1103 | static struct pv_entry * | | 1103 | static struct pv_entry * |
1104 | pmap_remove_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) | | 1104 | pmap_remove_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) |
1105 | { | | 1105 | { |
1106 | struct pv_entry *pv, **prevptr; | | 1106 | struct pv_entry *pv, **prevptr; |
1107 | | | 1107 | |
1108 | NPDEBUG(PDB_PVDUMP, | | 1108 | NPDEBUG(PDB_PVDUMP, |
1109 | printf("pmap_remove_pv: pm %p, md %p, va 0x%08lx\n", pm, md, va)); | | 1109 | printf("pmap_remove_pv: pm %p, md %p, va 0x%08lx\n", pm, md, va)); |
1110 | | | 1110 | |
1111 | prevptr = &SLIST_FIRST(&md->pvh_list); /* prev pv_entry ptr */ | | 1111 | prevptr = &SLIST_FIRST(&md->pvh_list); /* prev pv_entry ptr */ |
1112 | pv = *prevptr; | | 1112 | pv = *prevptr; |
1113 | | | 1113 | |
1114 | while (pv) { | | 1114 | while (pv) { |
1115 | if (pv->pv_pmap == pm && pv->pv_va == va) { /* match? */ | | 1115 | if (pv->pv_pmap == pm && pv->pv_va == va) { /* match? */ |
1116 | NPDEBUG(PDB_PVDUMP, printf("pmap_remove_pv: pm %p, md " | | 1116 | NPDEBUG(PDB_PVDUMP, printf("pmap_remove_pv: pm %p, md " |
1117 | "%p, flags 0x%x\n", pm, md, pv->pv_flags)); | | 1117 | "%p, flags 0x%x\n", pm, md, pv->pv_flags)); |
1118 | if (pv->pv_flags & PVF_WIRED) { | | 1118 | if (pv->pv_flags & PVF_WIRED) { |
1119 | --pm->pm_stats.wired_count; | | 1119 | --pm->pm_stats.wired_count; |
1120 | } | | 1120 | } |
1121 | *prevptr = SLIST_NEXT(pv, pv_link); /* remove it! */ | | 1121 | *prevptr = SLIST_NEXT(pv, pv_link); /* remove it! */ |
1122 | if (pm == pmap_kernel()) { | | 1122 | if (pm == pmap_kernel()) { |
1123 | PMAPCOUNT(kernel_unmappings); | | 1123 | PMAPCOUNT(kernel_unmappings); |
1124 | if (pv->pv_flags & PVF_WRITE) | | 1124 | if (pv->pv_flags & PVF_WRITE) |
1125 | md->krw_mappings--; | | 1125 | md->krw_mappings--; |
1126 | else | | 1126 | else |
1127 | md->kro_mappings--; | | 1127 | md->kro_mappings--; |
1128 | } else { | | 1128 | } else { |
1129 | if (pv->pv_flags & PVF_WRITE) | | 1129 | if (pv->pv_flags & PVF_WRITE) |
1130 | md->urw_mappings--; | | 1130 | md->urw_mappings--; |
1131 | else | | 1131 | else |
1132 | md->uro_mappings--; | | 1132 | md->uro_mappings--; |
1133 | } | | 1133 | } |
1134 | | | 1134 | |
1135 | PMAPCOUNT(unmappings); | | 1135 | PMAPCOUNT(unmappings); |
1136 | #ifdef PMAP_CACHE_VIPT | | 1136 | #ifdef PMAP_CACHE_VIPT |
1137 | if (!(pv->pv_flags & PVF_WRITE)) | | 1137 | if (!(pv->pv_flags & PVF_WRITE)) |
1138 | break; | | 1138 | break; |
1139 | /* | | 1139 | /* |
1140 | * If this page has had an exec mapping, then if | | 1140 | * If this page has had an exec mapping, then if |
1141 | * this was the last mapping, discard the contents, | | 1141 | * this was the last mapping, discard the contents, |
1142 | * otherwise sync the i-cache for this page. | | 1142 | * otherwise sync the i-cache for this page. |
1143 | */ | | 1143 | */ |
1144 | if (PV_IS_EXEC_P(md->pvh_attrs)) { | | 1144 | if (PV_IS_EXEC_P(md->pvh_attrs)) { |
1145 | #ifdef ARM_MMU_EXTENDED | | 1145 | #ifdef ARM_MMU_EXTENDED |
1146 | md->pvh_attrs &= ~PVF_EXEC; | | 1146 | md->pvh_attrs &= ~PVF_EXEC; |
1147 | PMAPCOUNT(exec_discarded_unmap); | | 1147 | PMAPCOUNT(exec_discarded_unmap); |
1148 | #else | | 1148 | #else |
1149 | if (SLIST_EMPTY(&md->pvh_list)) { | | 1149 | if (SLIST_EMPTY(&md->pvh_list)) { |
1150 | md->pvh_attrs &= ~PVF_EXEC; | | 1150 | md->pvh_attrs &= ~PVF_EXEC; |
1151 | PMAPCOUNT(exec_discarded_unmap); | | 1151 | PMAPCOUNT(exec_discarded_unmap); |
1152 | } else { | | 1152 | } else { |
1153 | pmap_syncicache_page(md, pa); | | 1153 | pmap_syncicache_page(md, pa); |
1154 | PMAPCOUNT(exec_synced_unmap); | | 1154 | PMAPCOUNT(exec_synced_unmap); |
1155 | } | | 1155 | } |
1156 | #endif /* ARM_MMU_EXTENDED */ | | 1156 | #endif /* ARM_MMU_EXTENDED */ |
1157 | } | | 1157 | } |
1158 | #endif /* PMAP_CACHE_VIPT */ | | 1158 | #endif /* PMAP_CACHE_VIPT */ |
1159 | break; | | 1159 | break; |
1160 | } | | 1160 | } |
1161 | prevptr = &SLIST_NEXT(pv, pv_link); /* previous pointer */ | | 1161 | prevptr = &SLIST_NEXT(pv, pv_link); /* previous pointer */ |
1162 | pv = *prevptr; /* advance */ | | 1162 | pv = *prevptr; /* advance */ |
1163 | } | | 1163 | } |
1164 | | | 1164 | |
1165 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 1165 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
1166 | /* | | 1166 | /* |
1167 | * If we no longer have a WRITEABLE KENTRY at the head of list, | | 1167 | * If we no longer have a WRITEABLE KENTRY at the head of list, |
1168 | * clear the KMOD attribute from the page. | | 1168 | * clear the KMOD attribute from the page. |
1169 | */ | | 1169 | */ |
1170 | if (SLIST_FIRST(&md->pvh_list) == NULL | | 1170 | if (SLIST_FIRST(&md->pvh_list) == NULL |
1171 | || (SLIST_FIRST(&md->pvh_list)->pv_flags & PVF_KWRITE) != PVF_KWRITE) | | 1171 | || (SLIST_FIRST(&md->pvh_list)->pv_flags & PVF_KWRITE) != PVF_KWRITE) |
1172 | md->pvh_attrs &= ~PVF_KMOD; | | 1172 | md->pvh_attrs &= ~PVF_KMOD; |
1173 | | | 1173 | |
1174 | /* | | 1174 | /* |
1175 | * If this was a writeable page and there are no more writeable | | 1175 | * If this was a writeable page and there are no more writeable |
1176 | * mappings (ignoring KMPAGE), clear the WRITE flag and writeback | | 1176 | * mappings (ignoring KMPAGE), clear the WRITE flag and writeback |
1177 | * the contents to memory. | | 1177 | * the contents to memory. |
1178 | */ | | 1178 | */ |
1179 | if (arm_cache_prefer_mask != 0) { | | 1179 | if (arm_cache_prefer_mask != 0) { |
1180 | if (md->krw_mappings + md->urw_mappings == 0) | | 1180 | if (md->krw_mappings + md->urw_mappings == 0) |
1181 | md->pvh_attrs &= ~PVF_WRITE; | | 1181 | md->pvh_attrs &= ~PVF_WRITE; |
1182 | PMAP_VALIDATE_MD_PAGE(md); | | 1182 | PMAP_VALIDATE_MD_PAGE(md); |
1183 | } | | 1183 | } |
1184 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 1184 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
1185 | #endif /* PMAP_CACHE_VIPT && !ARM_MMU_EXTENDED */ | | 1185 | #endif /* PMAP_CACHE_VIPT && !ARM_MMU_EXTENDED */ |
1186 | | | 1186 | |
1187 | return(pv); /* return removed pv */ | | 1187 | return(pv); /* return removed pv */ |
1188 | } | | 1188 | } |
1189 | | | 1189 | |
1190 | /* | | 1190 | /* |
1191 | * | | 1191 | * |
1192 | * pmap_modify_pv: Update pv flags | | 1192 | * pmap_modify_pv: Update pv flags |
1193 | * | | 1193 | * |
1194 | * => caller should hold lock on vm_page [so that attrs can be adjusted] | | 1194 | * => caller should hold lock on vm_page [so that attrs can be adjusted] |
1195 | * => caller should NOT adjust pmap's wire_count | | 1195 | * => caller should NOT adjust pmap's wire_count |
1196 | * => caller must call pmap_vac_me_harder() if writable status of a page | | 1196 | * => caller must call pmap_vac_me_harder() if writable status of a page |
1197 | * may have changed. | | 1197 | * may have changed. |
1198 | * => we return the old flags | | 1198 | * => we return the old flags |
1199 | * | | 1199 | * |
1200 | * Modify a physical-virtual mapping in the pv table | | 1200 | * Modify a physical-virtual mapping in the pv table |
1201 | */ | | 1201 | */ |
1202 | static u_int | | 1202 | static u_int |
1203 | pmap_modify_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va, | | 1203 | pmap_modify_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va, |
1204 | u_int clr_mask, u_int set_mask) | | 1204 | u_int clr_mask, u_int set_mask) |
1205 | { | | 1205 | { |
1206 | struct pv_entry *npv; | | 1206 | struct pv_entry *npv; |
1207 | u_int flags, oflags; | | 1207 | u_int flags, oflags; |
1208 | | | 1208 | |
1209 | KASSERT(!PV_IS_KENTRY_P(clr_mask)); | | 1209 | KASSERT(!PV_IS_KENTRY_P(clr_mask)); |
1210 | KASSERT(!PV_IS_KENTRY_P(set_mask)); | | 1210 | KASSERT(!PV_IS_KENTRY_P(set_mask)); |
1211 | | | 1211 | |
1212 | if ((npv = pmap_find_pv(md, pm, va)) == NULL) | | 1212 | if ((npv = pmap_find_pv(md, pm, va)) == NULL) |
1213 | return (0); | | 1213 | return (0); |
1214 | | | 1214 | |
1215 | NPDEBUG(PDB_PVDUMP, | | 1215 | NPDEBUG(PDB_PVDUMP, |
1216 | printf("pmap_modify_pv: pm %p, md %p, clr 0x%x, set 0x%x, flags 0x%x\n", pm, md, clr_mask, set_mask, npv->pv_flags)); | | 1216 | printf("pmap_modify_pv: pm %p, md %p, clr 0x%x, set 0x%x, flags 0x%x\n", pm, md, clr_mask, set_mask, npv->pv_flags)); |
1217 | | | 1217 | |
1218 | /* | | 1218 | /* |
| @@ -2675,2202 +2675,2207 @@ pmap_syncicache_page(struct vm_page_md * | | | @@ -2675,2202 +2675,2207 @@ pmap_syncicache_page(struct vm_page_md * |
2675 | | | 2675 | |
2676 | for (size_t i = 0, j = 0; i < way_size; | | 2676 | for (size_t i = 0, j = 0; i < way_size; |
2677 | i += PAGE_SIZE, j += PAGE_SIZE / L2_S_SIZE) { | | 2677 | i += PAGE_SIZE, j += PAGE_SIZE / L2_S_SIZE) { |
2678 | l2pte_reset(ptep + j); | | 2678 | l2pte_reset(ptep + j); |
2679 | PTE_SYNC(ptep + j); | | 2679 | PTE_SYNC(ptep + j); |
2680 | | | 2680 | |
2681 | pmap_tlb_flush_SE(kpm, dstp + i, PVF_REF | PVF_EXEC); | | 2681 | pmap_tlb_flush_SE(kpm, dstp + i, PVF_REF | PVF_EXEC); |
2682 | /* | | 2682 | /* |
2683 | * Set up a PTE with to flush these cache lines. | | 2683 | * Set up a PTE with to flush these cache lines. |
2684 | */ | | 2684 | */ |
2685 | l2pte_set(ptep + j, npte, 0); | | 2685 | l2pte_set(ptep + j, npte, 0); |
2686 | } | | 2686 | } |
2687 | PTE_SYNC_RANGE(ptep, way_size / L2_S_SIZE); | | 2687 | PTE_SYNC_RANGE(ptep, way_size / L2_S_SIZE); |
2688 | | | 2688 | |
2689 | /* | | 2689 | /* |
2690 | * Flush it. | | 2690 | * Flush it. |
2691 | */ | | 2691 | */ |
2692 | cpu_icache_sync_range(dstp, way_size); | | 2692 | cpu_icache_sync_range(dstp, way_size); |
2693 | | | 2693 | |
2694 | for (size_t i = 0, j = 0; i < way_size; | | 2694 | for (size_t i = 0, j = 0; i < way_size; |
2695 | i += PAGE_SIZE, j += PAGE_SIZE / L2_S_SIZE) { | | 2695 | i += PAGE_SIZE, j += PAGE_SIZE / L2_S_SIZE) { |
2696 | /* | | 2696 | /* |
2697 | * Unmap the page(s). | | 2697 | * Unmap the page(s). |
2698 | */ | | 2698 | */ |
2699 | l2pte_reset(ptep + j); | | 2699 | l2pte_reset(ptep + j); |
2700 | pmap_tlb_flush_SE(kpm, dstp + i, PVF_REF | PVF_EXEC); | | 2700 | pmap_tlb_flush_SE(kpm, dstp + i, PVF_REF | PVF_EXEC); |
2701 | } | | 2701 | } |
2702 | PTE_SYNC_RANGE(ptep, way_size / L2_S_SIZE); | | 2702 | PTE_SYNC_RANGE(ptep, way_size / L2_S_SIZE); |
2703 | | | 2703 | |
2704 | md->pvh_attrs |= PVF_EXEC; | | 2704 | md->pvh_attrs |= PVF_EXEC; |
2705 | PMAPCOUNT(exec_synced); | | 2705 | PMAPCOUNT(exec_synced); |
2706 | } | | 2706 | } |
2707 | | | 2707 | |
2708 | #ifndef ARM_MMU_EXTENDED | | 2708 | #ifndef ARM_MMU_EXTENDED |
2709 | void | | 2709 | void |
2710 | pmap_flush_page(struct vm_page_md *md, paddr_t pa, enum pmap_flush_op flush) | | 2710 | pmap_flush_page(struct vm_page_md *md, paddr_t pa, enum pmap_flush_op flush) |
2711 | { | | 2711 | { |
2712 | vsize_t va_offset, end_va; | | 2712 | vsize_t va_offset, end_va; |
2713 | bool wbinv_p; | | 2713 | bool wbinv_p; |
2714 | | | 2714 | |
2715 | if (arm_cache_prefer_mask == 0) | | 2715 | if (arm_cache_prefer_mask == 0) |
2716 | return; | | 2716 | return; |
2717 | | | 2717 | |
2718 | switch (flush) { | | 2718 | switch (flush) { |
2719 | case PMAP_FLUSH_PRIMARY: | | 2719 | case PMAP_FLUSH_PRIMARY: |
2720 | if (md->pvh_attrs & PVF_MULTCLR) { | | 2720 | if (md->pvh_attrs & PVF_MULTCLR) { |
2721 | va_offset = 0; | | 2721 | va_offset = 0; |
2722 | end_va = arm_cache_prefer_mask; | | 2722 | end_va = arm_cache_prefer_mask; |
2723 | md->pvh_attrs &= ~PVF_MULTCLR; | | 2723 | md->pvh_attrs &= ~PVF_MULTCLR; |
2724 | PMAPCOUNT(vac_flush_lots); | | 2724 | PMAPCOUNT(vac_flush_lots); |
2725 | } else { | | 2725 | } else { |
2726 | va_offset = md->pvh_attrs & arm_cache_prefer_mask; | | 2726 | va_offset = md->pvh_attrs & arm_cache_prefer_mask; |
2727 | end_va = va_offset; | | 2727 | end_va = va_offset; |
2728 | PMAPCOUNT(vac_flush_one); | | 2728 | PMAPCOUNT(vac_flush_one); |
2729 | } | | 2729 | } |
2730 | /* | | 2730 | /* |
2731 | * Mark that the page is no longer dirty. | | 2731 | * Mark that the page is no longer dirty. |
2732 | */ | | 2732 | */ |
2733 | md->pvh_attrs &= ~PVF_DIRTY; | | 2733 | md->pvh_attrs &= ~PVF_DIRTY; |
2734 | wbinv_p = true; | | 2734 | wbinv_p = true; |
2735 | break; | | 2735 | break; |
2736 | case PMAP_FLUSH_SECONDARY: | | 2736 | case PMAP_FLUSH_SECONDARY: |
2737 | va_offset = 0; | | 2737 | va_offset = 0; |
2738 | end_va = arm_cache_prefer_mask; | | 2738 | end_va = arm_cache_prefer_mask; |
2739 | wbinv_p = true; | | 2739 | wbinv_p = true; |
2740 | md->pvh_attrs &= ~PVF_MULTCLR; | | 2740 | md->pvh_attrs &= ~PVF_MULTCLR; |
2741 | PMAPCOUNT(vac_flush_lots); | | 2741 | PMAPCOUNT(vac_flush_lots); |
2742 | break; | | 2742 | break; |
2743 | case PMAP_CLEAN_PRIMARY: | | 2743 | case PMAP_CLEAN_PRIMARY: |
2744 | va_offset = md->pvh_attrs & arm_cache_prefer_mask; | | 2744 | va_offset = md->pvh_attrs & arm_cache_prefer_mask; |
2745 | end_va = va_offset; | | 2745 | end_va = va_offset; |
2746 | wbinv_p = false; | | 2746 | wbinv_p = false; |
2747 | /* | | 2747 | /* |
2748 | * Mark that the page is no longer dirty. | | 2748 | * Mark that the page is no longer dirty. |
2749 | */ | | 2749 | */ |
2750 | if ((md->pvh_attrs & PVF_DMOD) == 0) | | 2750 | if ((md->pvh_attrs & PVF_DMOD) == 0) |
2751 | md->pvh_attrs &= ~PVF_DIRTY; | | 2751 | md->pvh_attrs &= ~PVF_DIRTY; |
2752 | PMAPCOUNT(vac_clean_one); | | 2752 | PMAPCOUNT(vac_clean_one); |
2753 | break; | | 2753 | break; |
2754 | default: | | 2754 | default: |
2755 | return; | | 2755 | return; |
2756 | } | | 2756 | } |
2757 | | | 2757 | |
2758 | KASSERT(!(md->pvh_attrs & PVF_NC)); | | 2758 | KASSERT(!(md->pvh_attrs & PVF_NC)); |
2759 | | | 2759 | |
2760 | NPDEBUG(PDB_VAC, printf("pmap_flush_page: md=%p (attrs=%#x)\n", | | 2760 | NPDEBUG(PDB_VAC, printf("pmap_flush_page: md=%p (attrs=%#x)\n", |
2761 | md, md->pvh_attrs)); | | 2761 | md, md->pvh_attrs)); |
2762 | | | 2762 | |
2763 | const size_t scache_line_size = arm_scache.dcache_line_size; | | 2763 | const size_t scache_line_size = arm_scache.dcache_line_size; |
2764 | | | 2764 | |
2765 | for (; va_offset <= end_va; va_offset += PAGE_SIZE) { | | 2765 | for (; va_offset <= end_va; va_offset += PAGE_SIZE) { |
2766 | pt_entry_t * const ptep = cpu_cdst_pte(va_offset); | | 2766 | pt_entry_t * const ptep = cpu_cdst_pte(va_offset); |
2767 | const vaddr_t dstp = cpu_cdstp(va_offset); | | 2767 | const vaddr_t dstp = cpu_cdstp(va_offset); |
2768 | const pt_entry_t opte = *ptep; | | 2768 | const pt_entry_t opte = *ptep; |
2769 | | | 2769 | |
2770 | if (flush == PMAP_FLUSH_SECONDARY | | 2770 | if (flush == PMAP_FLUSH_SECONDARY |
2771 | && va_offset == (md->pvh_attrs & arm_cache_prefer_mask)) | | 2771 | && va_offset == (md->pvh_attrs & arm_cache_prefer_mask)) |
2772 | continue; | | 2772 | continue; |
2773 | | | 2773 | |
2774 | pmap_tlb_flush_SE(pmap_kernel(), dstp, PVF_REF | PVF_EXEC); | | 2774 | pmap_tlb_flush_SE(pmap_kernel(), dstp, PVF_REF | PVF_EXEC); |
2775 | /* | | 2775 | /* |
2776 | * Set up a PTE with the right coloring to flush | | 2776 | * Set up a PTE with the right coloring to flush |
2777 | * existing cache entries. | | 2777 | * existing cache entries. |
2778 | */ | | 2778 | */ |
2779 | const pt_entry_t npte = L2_S_PROTO | | 2779 | const pt_entry_t npte = L2_S_PROTO |
2780 | | pa | | 2780 | | pa |
2781 | | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE) | | 2781 | | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE) |
2782 | | pte_l2_s_cache_mode; | | 2782 | | pte_l2_s_cache_mode; |
2783 | l2pte_set(ptep, npte, opte); | | 2783 | l2pte_set(ptep, npte, opte); |
2784 | PTE_SYNC(ptep); | | 2784 | PTE_SYNC(ptep); |
2785 | | | 2785 | |
2786 | /* | | 2786 | /* |
2787 | * Flush it. Make sure to flush secondary cache too since | | 2787 | * Flush it. Make sure to flush secondary cache too since |
2788 | * bus_dma will ignore uncached pages. | | 2788 | * bus_dma will ignore uncached pages. |
2789 | */ | | 2789 | */ |
2790 | if (scache_line_size != 0) { | | 2790 | if (scache_line_size != 0) { |
2791 | cpu_dcache_wb_range(dstp, PAGE_SIZE); | | 2791 | cpu_dcache_wb_range(dstp, PAGE_SIZE); |
2792 | if (wbinv_p) { | | 2792 | if (wbinv_p) { |
2793 | cpu_sdcache_wbinv_range(dstp, pa, PAGE_SIZE); | | 2793 | cpu_sdcache_wbinv_range(dstp, pa, PAGE_SIZE); |
2794 | cpu_dcache_inv_range(dstp, PAGE_SIZE); | | 2794 | cpu_dcache_inv_range(dstp, PAGE_SIZE); |
2795 | } else { | | 2795 | } else { |
2796 | cpu_sdcache_wb_range(dstp, pa, PAGE_SIZE); | | 2796 | cpu_sdcache_wb_range(dstp, pa, PAGE_SIZE); |
2797 | } | | 2797 | } |
2798 | } else { | | 2798 | } else { |
2799 | if (wbinv_p) { | | 2799 | if (wbinv_p) { |
2800 | cpu_dcache_wbinv_range(dstp, PAGE_SIZE); | | 2800 | cpu_dcache_wbinv_range(dstp, PAGE_SIZE); |
2801 | } else { | | 2801 | } else { |
2802 | cpu_dcache_wb_range(dstp, PAGE_SIZE); | | 2802 | cpu_dcache_wb_range(dstp, PAGE_SIZE); |
2803 | } | | 2803 | } |
2804 | } | | 2804 | } |
2805 | | | 2805 | |
2806 | /* | | 2806 | /* |
2807 | * Restore the page table entry since we might have interrupted | | 2807 | * Restore the page table entry since we might have interrupted |
2808 | * pmap_zero_page or pmap_copy_page which was already using | | 2808 | * pmap_zero_page or pmap_copy_page which was already using |
2809 | * this pte. | | 2809 | * this pte. |
2810 | */ | | 2810 | */ |
2811 | if (opte) { | | 2811 | if (opte) { |
2812 | l2pte_set(ptep, opte, npte); | | 2812 | l2pte_set(ptep, opte, npte); |
2813 | } else { | | 2813 | } else { |
2814 | l2pte_reset(ptep); | | 2814 | l2pte_reset(ptep); |
2815 | } | | 2815 | } |
2816 | PTE_SYNC(ptep); | | 2816 | PTE_SYNC(ptep); |
2817 | pmap_tlb_flush_SE(pmap_kernel(), dstp, PVF_REF | PVF_EXEC); | | 2817 | pmap_tlb_flush_SE(pmap_kernel(), dstp, PVF_REF | PVF_EXEC); |
2818 | } | | 2818 | } |
2819 | } | | 2819 | } |
2820 | #endif /* ARM_MMU_EXTENDED */ | | 2820 | #endif /* ARM_MMU_EXTENDED */ |
2821 | #endif /* PMAP_CACHE_VIPT */ | | 2821 | #endif /* PMAP_CACHE_VIPT */ |
2822 | | | 2822 | |
2823 | /* | | 2823 | /* |
2824 | * Routine: pmap_page_remove | | 2824 | * Routine: pmap_page_remove |
2825 | * Function: | | 2825 | * Function: |
2826 | * Removes this physical page from | | 2826 | * Removes this physical page from |
2827 | * all physical maps in which it resides. | | 2827 | * all physical maps in which it resides. |
2828 | * Reflects back modify bits to the pager. | | 2828 | * Reflects back modify bits to the pager. |
2829 | */ | | 2829 | */ |
2830 | static void | | 2830 | static void |
2831 | pmap_page_remove(struct vm_page_md *md, paddr_t pa) | | 2831 | pmap_page_remove(struct vm_page_md *md, paddr_t pa) |
2832 | { | | 2832 | { |
2833 | struct l2_bucket *l2b; | | 2833 | struct l2_bucket *l2b; |
2834 | struct pv_entry *pv; | | 2834 | struct pv_entry *pv; |
2835 | pt_entry_t *ptep; | | 2835 | pt_entry_t *ptep; |
2836 | #ifndef ARM_MMU_EXTENDED | | 2836 | #ifndef ARM_MMU_EXTENDED |
2837 | bool flush = false; | | 2837 | bool flush = false; |
2838 | #endif | | 2838 | #endif |
2839 | u_int flags = 0; | | 2839 | u_int flags = 0; |
2840 | | | 2840 | |
2841 | NPDEBUG(PDB_FOLLOW, | | 2841 | NPDEBUG(PDB_FOLLOW, |
2842 | printf("pmap_page_remove: md %p (0x%08lx)\n", md, | | 2842 | printf("pmap_page_remove: md %p (0x%08lx)\n", md, |
2843 | pa)); | | 2843 | pa)); |
2844 | | | 2844 | |
2845 | struct pv_entry **pvp = &SLIST_FIRST(&md->pvh_list); | | 2845 | struct pv_entry **pvp = &SLIST_FIRST(&md->pvh_list); |
2846 | pmap_acquire_page_lock(md); | | 2846 | pmap_acquire_page_lock(md); |
2847 | if (*pvp == NULL) { | | 2847 | if (*pvp == NULL) { |
2848 | #ifdef PMAP_CACHE_VIPT | | 2848 | #ifdef PMAP_CACHE_VIPT |
2849 | /* | | 2849 | /* |
2850 | * We *know* the page contents are about to be replaced. | | 2850 | * We *know* the page contents are about to be replaced. |
2851 | * Discard the exec contents | | 2851 | * Discard the exec contents |
2852 | */ | | 2852 | */ |
2853 | if (PV_IS_EXEC_P(md->pvh_attrs)) | | 2853 | if (PV_IS_EXEC_P(md->pvh_attrs)) |
2854 | PMAPCOUNT(exec_discarded_page_protect); | | 2854 | PMAPCOUNT(exec_discarded_page_protect); |
2855 | md->pvh_attrs &= ~PVF_EXEC; | | 2855 | md->pvh_attrs &= ~PVF_EXEC; |
2856 | PMAP_VALIDATE_MD_PAGE(md); | | 2856 | PMAP_VALIDATE_MD_PAGE(md); |
2857 | #endif | | 2857 | #endif |
2858 | pmap_release_page_lock(md); | | 2858 | pmap_release_page_lock(md); |
2859 | return; | | 2859 | return; |
2860 | } | | 2860 | } |
2861 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 2861 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
2862 | KASSERT(arm_cache_prefer_mask == 0 || pmap_is_page_colored_p(md)); | | 2862 | KASSERT(arm_cache_prefer_mask == 0 || pmap_is_page_colored_p(md)); |
2863 | #endif | | 2863 | #endif |
2864 | | | 2864 | |
2865 | /* | | 2865 | /* |
2866 | * Clear alias counts | | 2866 | * Clear alias counts |
2867 | */ | | 2867 | */ |
2868 | #ifdef PMAP_CACHE_VIVT | | 2868 | #ifdef PMAP_CACHE_VIVT |
2869 | md->k_mappings = 0; | | 2869 | md->k_mappings = 0; |
2870 | #endif | | 2870 | #endif |
2871 | md->urw_mappings = md->uro_mappings = 0; | | 2871 | md->urw_mappings = md->uro_mappings = 0; |
2872 | | | 2872 | |
2873 | #ifdef PMAP_CACHE_VIVT | | 2873 | #ifdef PMAP_CACHE_VIVT |
2874 | pmap_clean_page(md, false); | | 2874 | pmap_clean_page(md, false); |
2875 | #endif | | 2875 | #endif |
2876 | | | 2876 | |
2877 | while ((pv = *pvp) != NULL) { | | 2877 | while ((pv = *pvp) != NULL) { |
2878 | pmap_t pm = pv->pv_pmap; | | 2878 | pmap_t pm = pv->pv_pmap; |
2879 | #ifndef ARM_MMU_EXTENDED | | 2879 | #ifndef ARM_MMU_EXTENDED |
2880 | if (flush == false && pmap_is_current(pm)) | | 2880 | if (flush == false && pmap_is_current(pm)) |
2881 | flush = true; | | 2881 | flush = true; |
2882 | #endif | | 2882 | #endif |
2883 | | | 2883 | |
2884 | if (pm == pmap_kernel()) { | | 2884 | if (pm == pmap_kernel()) { |
2885 | #ifdef PMAP_CACHE_VIPT | | 2885 | #ifdef PMAP_CACHE_VIPT |
2886 | /* | | 2886 | /* |
2887 | * If this was unmanaged mapping, it must be preserved. | | 2887 | * If this was unmanaged mapping, it must be preserved. |
2888 | * Move it back on the list and advance the end-of-list | | 2888 | * Move it back on the list and advance the end-of-list |
2889 | * pointer. | | 2889 | * pointer. |
2890 | */ | | 2890 | */ |
2891 | if (PV_IS_KENTRY_P(pv->pv_flags)) { | | 2891 | if (PV_IS_KENTRY_P(pv->pv_flags)) { |
2892 | *pvp = pv; | | 2892 | *pvp = pv; |
2893 | pvp = &SLIST_NEXT(pv, pv_link); | | 2893 | pvp = &SLIST_NEXT(pv, pv_link); |
2894 | continue; | | 2894 | continue; |
2895 | } | | 2895 | } |
2896 | if (pv->pv_flags & PVF_WRITE) | | 2896 | if (pv->pv_flags & PVF_WRITE) |
2897 | md->krw_mappings--; | | 2897 | md->krw_mappings--; |
2898 | else | | 2898 | else |
2899 | md->kro_mappings--; | | 2899 | md->kro_mappings--; |
2900 | #endif | | 2900 | #endif |
2901 | PMAPCOUNT(kernel_unmappings); | | 2901 | PMAPCOUNT(kernel_unmappings); |
2902 | } | | 2902 | } |
2903 | *pvp = SLIST_NEXT(pv, pv_link); /* remove from list */ | | 2903 | *pvp = SLIST_NEXT(pv, pv_link); /* remove from list */ |
2904 | PMAPCOUNT(unmappings); | | 2904 | PMAPCOUNT(unmappings); |
2905 | | | 2905 | |
2906 | pmap_release_page_lock(md); | | 2906 | pmap_release_page_lock(md); |
2907 | pmap_acquire_pmap_lock(pm); | | 2907 | pmap_acquire_pmap_lock(pm); |
2908 | | | 2908 | |
2909 | l2b = pmap_get_l2_bucket(pm, pv->pv_va); | | 2909 | l2b = pmap_get_l2_bucket(pm, pv->pv_va); |
2910 | KASSERTMSG(l2b != NULL, "%#lx", pv->pv_va); | | 2910 | KASSERTMSG(l2b != NULL, "%#lx", pv->pv_va); |
2911 | | | 2911 | |
2912 | ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; | | 2912 | ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; |
2913 | | | 2913 | |
2914 | /* | | 2914 | /* |
2915 | * Update statistics | | 2915 | * Update statistics |
2916 | */ | | 2916 | */ |
2917 | --pm->pm_stats.resident_count; | | 2917 | --pm->pm_stats.resident_count; |
2918 | | | 2918 | |
2919 | /* Wired bit */ | | 2919 | /* Wired bit */ |
2920 | if (pv->pv_flags & PVF_WIRED) | | 2920 | if (pv->pv_flags & PVF_WIRED) |
2921 | --pm->pm_stats.wired_count; | | 2921 | --pm->pm_stats.wired_count; |
2922 | | | 2922 | |
2923 | flags |= pv->pv_flags; | | 2923 | flags |= pv->pv_flags; |
2924 | | | 2924 | |
2925 | /* | | 2925 | /* |
2926 | * Invalidate the PTEs. | | 2926 | * Invalidate the PTEs. |
2927 | */ | | 2927 | */ |
2928 | l2pte_reset(ptep); | | 2928 | l2pte_reset(ptep); |
2929 | PTE_SYNC_CURRENT(pm, ptep); | | 2929 | PTE_SYNC_CURRENT(pm, ptep); |
2930 | | | 2930 | |
2931 | #ifdef ARM_MMU_EXTENDED | | 2931 | #ifdef ARM_MMU_EXTENDED |
2932 | pmap_tlb_invalidate_addr(pm, pv->pv_va); | | 2932 | pmap_tlb_invalidate_addr(pm, pv->pv_va); |
2933 | #endif | | 2933 | #endif |
2934 | | | 2934 | |
2935 | pmap_free_l2_bucket(pm, l2b, PAGE_SIZE / L2_S_SIZE); | | 2935 | pmap_free_l2_bucket(pm, l2b, PAGE_SIZE / L2_S_SIZE); |
2936 | | | 2936 | |
2937 | pmap_release_pmap_lock(pm); | | 2937 | pmap_release_pmap_lock(pm); |
2938 | | | 2938 | |
2939 | pool_put(&pmap_pv_pool, pv); | | 2939 | pool_put(&pmap_pv_pool, pv); |
2940 | pmap_acquire_page_lock(md); | | 2940 | pmap_acquire_page_lock(md); |
2941 | #ifdef MULTIPROCESSOR | | 2941 | #ifdef MULTIPROCESSOR |
2942 | /* | | 2942 | /* |
2943 | * Restart of the beginning of the list. | | 2943 | * Restart of the beginning of the list. |
2944 | */ | | 2944 | */ |
2945 | pvp = &SLIST_FIRST(&md->pvh_list); | | 2945 | pvp = &SLIST_FIRST(&md->pvh_list); |
2946 | #endif | | 2946 | #endif |
2947 | } | | 2947 | } |
2948 | /* | | 2948 | /* |
2949 | * if we reach the end of the list and there are still mappings, they | | 2949 | * if we reach the end of the list and there are still mappings, they |
2950 | * might be able to be cached now. And they must be kernel mappings. | | 2950 | * might be able to be cached now. And they must be kernel mappings. |
2951 | */ | | 2951 | */ |
2952 | if (!SLIST_EMPTY(&md->pvh_list)) { | | 2952 | if (!SLIST_EMPTY(&md->pvh_list)) { |
2953 | pmap_vac_me_harder(md, pa, pmap_kernel(), 0); | | 2953 | pmap_vac_me_harder(md, pa, pmap_kernel(), 0); |
2954 | } | | 2954 | } |
2955 | | | 2955 | |
2956 | #ifdef PMAP_CACHE_VIPT | | 2956 | #ifdef PMAP_CACHE_VIPT |
2957 | /* | | 2957 | /* |
2958 | * Its EXEC cache is now gone. | | 2958 | * Its EXEC cache is now gone. |
2959 | */ | | 2959 | */ |
2960 | if (PV_IS_EXEC_P(md->pvh_attrs)) | | 2960 | if (PV_IS_EXEC_P(md->pvh_attrs)) |
2961 | PMAPCOUNT(exec_discarded_page_protect); | | 2961 | PMAPCOUNT(exec_discarded_page_protect); |
2962 | md->pvh_attrs &= ~PVF_EXEC; | | 2962 | md->pvh_attrs &= ~PVF_EXEC; |
2963 | KASSERT(md->urw_mappings == 0); | | 2963 | KASSERT(md->urw_mappings == 0); |
2964 | KASSERT(md->uro_mappings == 0); | | 2964 | KASSERT(md->uro_mappings == 0); |
2965 | #ifndef ARM_MMU_EXTENDED | | 2965 | #ifndef ARM_MMU_EXTENDED |
2966 | if (arm_cache_prefer_mask != 0) { | | 2966 | if (arm_cache_prefer_mask != 0) { |
2967 | if (md->krw_mappings == 0) | | 2967 | if (md->krw_mappings == 0) |
2968 | md->pvh_attrs &= ~PVF_WRITE; | | 2968 | md->pvh_attrs &= ~PVF_WRITE; |
2969 | PMAP_VALIDATE_MD_PAGE(md); | | 2969 | PMAP_VALIDATE_MD_PAGE(md); |
2970 | } | | 2970 | } |
2971 | #endif /* ARM_MMU_EXTENDED */ | | 2971 | #endif /* ARM_MMU_EXTENDED */ |
2972 | #endif /* PMAP_CACHE_VIPT */ | | 2972 | #endif /* PMAP_CACHE_VIPT */ |
2973 | pmap_release_page_lock(md); | | 2973 | pmap_release_page_lock(md); |
2974 | | | 2974 | |
2975 | #ifndef ARM_MMU_EXTENDED | | 2975 | #ifndef ARM_MMU_EXTENDED |
2976 | if (flush) { | | 2976 | if (flush) { |
2977 | /* | | 2977 | /* |
2978 | * Note: We can't use pmap_tlb_flush{I,D}() here since that | | 2978 | * Note: We can't use pmap_tlb_flush{I,D}() here since that |
2979 | * would need a subsequent call to pmap_update() to ensure | | 2979 | * would need a subsequent call to pmap_update() to ensure |
2980 | * curpm->pm_cstate.cs_all is reset. Our callers are not | | 2980 | * curpm->pm_cstate.cs_all is reset. Our callers are not |
2981 | * required to do that (see pmap(9)), so we can't modify | | 2981 | * required to do that (see pmap(9)), so we can't modify |
2982 | * the current pmap's state. | | 2982 | * the current pmap's state. |
2983 | */ | | 2983 | */ |
2984 | if (PV_BEEN_EXECD(flags)) | | 2984 | if (PV_BEEN_EXECD(flags)) |
2985 | cpu_tlb_flushID(); | | 2985 | cpu_tlb_flushID(); |
2986 | else | | 2986 | else |
2987 | cpu_tlb_flushD(); | | 2987 | cpu_tlb_flushD(); |
2988 | } | | 2988 | } |
2989 | cpu_cpwait(); | | 2989 | cpu_cpwait(); |
2990 | #endif /* ARM_MMU_EXTENDED */ | | 2990 | #endif /* ARM_MMU_EXTENDED */ |
2991 | } | | 2991 | } |
2992 | | | 2992 | |
2993 | /* | | 2993 | /* |
2994 | * pmap_t pmap_create(void) | | 2994 | * pmap_t pmap_create(void) |
2995 | * | | 2995 | * |
2996 | * Create a new pmap structure from scratch. | | 2996 | * Create a new pmap structure from scratch. |
2997 | */ | | 2997 | */ |
2998 | pmap_t | | 2998 | pmap_t |
2999 | pmap_create(void) | | 2999 | pmap_create(void) |
3000 | { | | 3000 | { |
3001 | pmap_t pm; | | 3001 | pmap_t pm; |
3002 | | | 3002 | |
3003 | pm = pool_cache_get(&pmap_cache, PR_WAITOK); | | 3003 | pm = pool_cache_get(&pmap_cache, PR_WAITOK); |
3004 | | | 3004 | |
3005 | mutex_init(&pm->pm_obj_lock, MUTEX_DEFAULT, IPL_NONE); | | 3005 | mutex_init(&pm->pm_obj_lock, MUTEX_DEFAULT, IPL_NONE); |
3006 | uvm_obj_init(&pm->pm_obj, NULL, false, 1); | | 3006 | uvm_obj_init(&pm->pm_obj, NULL, false, 1); |
3007 | uvm_obj_setlock(&pm->pm_obj, &pm->pm_obj_lock); | | 3007 | uvm_obj_setlock(&pm->pm_obj, &pm->pm_obj_lock); |
3008 | | | 3008 | |
3009 | pm->pm_stats.wired_count = 0; | | 3009 | pm->pm_stats.wired_count = 0; |
3010 | pm->pm_stats.resident_count = 1; | | 3010 | pm->pm_stats.resident_count = 1; |
3011 | #ifdef ARM_MMU_EXTENDED | | 3011 | #ifdef ARM_MMU_EXTENDED |
3012 | #ifdef MULTIPROCESSOR | | 3012 | #ifdef MULTIPROCESSOR |
3013 | kcpuset_create(&pm->pm_active, true); | | 3013 | kcpuset_create(&pm->pm_active, true); |
3014 | kcpuset_create(&pm->pm_onproc, true); | | 3014 | kcpuset_create(&pm->pm_onproc, true); |
3015 | #endif | | 3015 | #endif |
3016 | #else | | 3016 | #else |
3017 | pm->pm_cstate.cs_all = 0; | | 3017 | pm->pm_cstate.cs_all = 0; |
3018 | #endif | | 3018 | #endif |
3019 | pmap_alloc_l1(pm); | | 3019 | pmap_alloc_l1(pm); |
3020 | | | 3020 | |
3021 | /* | | 3021 | /* |
3022 | * Note: The pool cache ensures that the pm_l2[] array is already | | 3022 | * Note: The pool cache ensures that the pm_l2[] array is already |
3023 | * initialised to zero. | | 3023 | * initialised to zero. |
3024 | */ | | 3024 | */ |
3025 | | | 3025 | |
3026 | pmap_pinit(pm); | | 3026 | pmap_pinit(pm); |
3027 | | | 3027 | |
3028 | LIST_INSERT_HEAD(&pmap_pmaps, pm, pm_list); | | 3028 | LIST_INSERT_HEAD(&pmap_pmaps, pm, pm_list); |
3029 | | | 3029 | |
3030 | return (pm); | | 3030 | return (pm); |
3031 | } | | 3031 | } |
3032 | | | 3032 | |
3033 | u_int | | 3033 | u_int |
3034 | arm32_mmap_flags(paddr_t pa) | | 3034 | arm32_mmap_flags(paddr_t pa) |
3035 | { | | 3035 | { |
3036 | /* | | 3036 | /* |
3037 | * the upper 8 bits in pmap_enter()'s flags are reserved for MD stuff | | 3037 | * the upper 8 bits in pmap_enter()'s flags are reserved for MD stuff |
3038 | * and we're using the upper bits in page numbers to pass flags around | | 3038 | * and we're using the upper bits in page numbers to pass flags around |
3039 | * so we might as well use the same bits | | 3039 | * so we might as well use the same bits |
3040 | */ | | 3040 | */ |
3041 | return (u_int)pa & PMAP_MD_MASK; | | 3041 | return (u_int)pa & PMAP_MD_MASK; |
3042 | } | | 3042 | } |
3043 | /* | | 3043 | /* |
3044 | * int pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, | | 3044 | * int pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, |
3045 | * u_int flags) | | 3045 | * u_int flags) |
3046 | * | | 3046 | * |
3047 | * Insert the given physical page (p) at | | 3047 | * Insert the given physical page (p) at |
3048 | * the specified virtual address (v) in the | | 3048 | * the specified virtual address (v) in the |
3049 | * target physical map with the protection requested. | | 3049 | * target physical map with the protection requested. |
3050 | * | | 3050 | * |
3051 | * NB: This is the only routine which MAY NOT lazy-evaluate | | 3051 | * NB: This is the only routine which MAY NOT lazy-evaluate |
3052 | * or lose information. That is, this routine must actually | | 3052 | * or lose information. That is, this routine must actually |
3053 | * insert this page into the given map NOW. | | 3053 | * insert this page into the given map NOW. |
3054 | */ | | 3054 | */ |
3055 | int | | 3055 | int |
3056 | pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) | | 3056 | pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) |
3057 | { | | 3057 | { |
3058 | struct l2_bucket *l2b; | | 3058 | struct l2_bucket *l2b; |
3059 | struct vm_page *pg, *opg; | | 3059 | struct vm_page *pg, *opg; |
3060 | u_int nflags; | | 3060 | u_int nflags; |
3061 | u_int oflags; | | 3061 | u_int oflags; |
3062 | const bool kpm_p = (pm == pmap_kernel()); | | 3062 | const bool kpm_p = (pm == pmap_kernel()); |
3063 | #ifdef ARM_HAS_VBAR | | 3063 | #ifdef ARM_HAS_VBAR |
3064 | const bool vector_page_p = false; | | 3064 | const bool vector_page_p = false; |
3065 | #else | | 3065 | #else |
3066 | const bool vector_page_p = (va == vector_page); | | 3066 | const bool vector_page_p = (va == vector_page); |
3067 | #endif | | 3067 | #endif |
3068 | | | 3068 | |
3069 | UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); | | 3069 | UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); |
3070 | | | 3070 | |
3071 | UVMHIST_LOG(maphist, " (pm %p va %#x pa %#x prot %#x", | | 3071 | UVMHIST_LOG(maphist, " (pm %p va %#x pa %#x prot %#x", |
3072 | pm, va, pa, prot); | | 3072 | pm, va, pa, prot); |
3073 | UVMHIST_LOG(maphist, " flag %#x", flags, 0, 0, 0); | | 3073 | UVMHIST_LOG(maphist, " flag %#x", flags, 0, 0, 0); |
3074 | | | 3074 | |
3075 | KDASSERT((flags & PMAP_WIRED) == 0 || (flags & VM_PROT_ALL) != 0); | | 3075 | KDASSERT((flags & PMAP_WIRED) == 0 || (flags & VM_PROT_ALL) != 0); |
3076 | KDASSERT(((va | pa) & PGOFSET) == 0); | | 3076 | KDASSERT(((va | pa) & PGOFSET) == 0); |
3077 | | | 3077 | |
3078 | /* | | 3078 | /* |
3079 | * Get a pointer to the page. Later on in this function, we | | 3079 | * Get a pointer to the page. Later on in this function, we |
3080 | * test for a managed page by checking pg != NULL. | | 3080 | * test for a managed page by checking pg != NULL. |
3081 | */ | | 3081 | */ |
3082 | pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL; | | 3082 | pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL; |
3083 | | | 3083 | |
3084 | nflags = 0; | | 3084 | nflags = 0; |
3085 | if (prot & VM_PROT_WRITE) | | 3085 | if (prot & VM_PROT_WRITE) |
3086 | nflags |= PVF_WRITE; | | 3086 | nflags |= PVF_WRITE; |
3087 | if (prot & VM_PROT_EXECUTE) | | 3087 | if (prot & VM_PROT_EXECUTE) |
3088 | nflags |= PVF_EXEC; | | 3088 | nflags |= PVF_EXEC; |
3089 | if (flags & PMAP_WIRED) | | 3089 | if (flags & PMAP_WIRED) |
3090 | nflags |= PVF_WIRED; | | 3090 | nflags |= PVF_WIRED; |
3091 | | | 3091 | |
3092 | pmap_acquire_pmap_lock(pm); | | 3092 | pmap_acquire_pmap_lock(pm); |
3093 | | | 3093 | |
3094 | /* | | 3094 | /* |
3095 | * Fetch the L2 bucket which maps this page, allocating one if | | 3095 | * Fetch the L2 bucket which maps this page, allocating one if |
3096 | * necessary for user pmaps. | | 3096 | * necessary for user pmaps. |
3097 | */ | | 3097 | */ |
3098 | if (kpm_p) { | | 3098 | if (kpm_p) { |
3099 | l2b = pmap_get_l2_bucket(pm, va); | | 3099 | l2b = pmap_get_l2_bucket(pm, va); |
3100 | } else { | | 3100 | } else { |
3101 | l2b = pmap_alloc_l2_bucket(pm, va); | | 3101 | l2b = pmap_alloc_l2_bucket(pm, va); |
3102 | } | | 3102 | } |
3103 | if (l2b == NULL) { | | 3103 | if (l2b == NULL) { |
3104 | if (flags & PMAP_CANFAIL) { | | 3104 | if (flags & PMAP_CANFAIL) { |
3105 | pmap_release_pmap_lock(pm); | | 3105 | pmap_release_pmap_lock(pm); |
3106 | return (ENOMEM); | | 3106 | return (ENOMEM); |
3107 | } | | 3107 | } |
3108 | panic("pmap_enter: failed to allocate L2 bucket"); | | 3108 | panic("pmap_enter: failed to allocate L2 bucket"); |
3109 | } | | 3109 | } |
3110 | pt_entry_t *ptep = &l2b->l2b_kva[l2pte_index(va)]; | | 3110 | pt_entry_t *ptep = &l2b->l2b_kva[l2pte_index(va)]; |
3111 | const pt_entry_t opte = *ptep; | | 3111 | const pt_entry_t opte = *ptep; |
3112 | pt_entry_t npte = pa; | | 3112 | pt_entry_t npte = pa; |
3113 | oflags = 0; | | 3113 | oflags = 0; |
3114 | | | 3114 | |
3115 | if (opte) { | | 3115 | if (opte) { |
3116 | /* | | 3116 | /* |
3117 | * There is already a mapping at this address. | | 3117 | * There is already a mapping at this address. |
3118 | * If the physical address is different, lookup the | | 3118 | * If the physical address is different, lookup the |
3119 | * vm_page. | | 3119 | * vm_page. |
3120 | */ | | 3120 | */ |
3121 | if (l2pte_pa(opte) != pa) | | 3121 | if (l2pte_pa(opte) != pa) |
3122 | opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); | | 3122 | opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); |
3123 | else | | 3123 | else |
3124 | opg = pg; | | 3124 | opg = pg; |
3125 | } else | | 3125 | } else |
3126 | opg = NULL; | | 3126 | opg = NULL; |
3127 | | | 3127 | |
3128 | if (pg) { | | 3128 | if (pg) { |
3129 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 3129 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
3130 | | | 3130 | |
3131 | /* | | 3131 | /* |
3132 | * This is to be a managed mapping. | | 3132 | * This is to be a managed mapping. |
3133 | */ | | 3133 | */ |
3134 | pmap_acquire_page_lock(md); | | 3134 | pmap_acquire_page_lock(md); |
3135 | if ((flags & VM_PROT_ALL) || (md->pvh_attrs & PVF_REF)) { | | 3135 | if ((flags & VM_PROT_ALL) || (md->pvh_attrs & PVF_REF)) { |
3136 | /* | | 3136 | /* |
3137 | * - The access type indicates that we don't need | | 3137 | * - The access type indicates that we don't need |
3138 | * to do referenced emulation. | | 3138 | * to do referenced emulation. |
3139 | * OR | | 3139 | * OR |
3140 | * - The physical page has already been referenced | | 3140 | * - The physical page has already been referenced |
3141 | * so no need to re-do referenced emulation here. | | 3141 | * so no need to re-do referenced emulation here. |
3142 | */ | | 3142 | */ |
3143 | npte |= l2pte_set_readonly(L2_S_PROTO); | | 3143 | npte |= l2pte_set_readonly(L2_S_PROTO); |
3144 | | | 3144 | |
3145 | nflags |= PVF_REF; | | 3145 | nflags |= PVF_REF; |
3146 | | | 3146 | |
3147 | if ((prot & VM_PROT_WRITE) != 0 && | | 3147 | if ((prot & VM_PROT_WRITE) != 0 && |
3148 | ((flags & VM_PROT_WRITE) != 0 || | | 3148 | ((flags & VM_PROT_WRITE) != 0 || |
3149 | (md->pvh_attrs & PVF_MOD) != 0)) { | | 3149 | (md->pvh_attrs & PVF_MOD) != 0)) { |
3150 | /* | | 3150 | /* |
3151 | * This is a writable mapping, and the | | 3151 | * This is a writable mapping, and the |
3152 | * page's mod state indicates it has | | 3152 | * page's mod state indicates it has |
3153 | * already been modified. Make it | | 3153 | * already been modified. Make it |
3154 | * writable from the outset. | | 3154 | * writable from the outset. |
3155 | */ | | 3155 | */ |
3156 | npte = l2pte_set_writable(npte); | | 3156 | npte = l2pte_set_writable(npte); |
3157 | nflags |= PVF_MOD; | | 3157 | nflags |= PVF_MOD; |
3158 | } | | 3158 | } |
3159 | | | 3159 | |
3160 | #ifdef ARM_MMU_EXTENDED | | 3160 | #ifdef ARM_MMU_EXTENDED |
3161 | /* | | 3161 | /* |
3162 | * If the page has been cleaned, then the pvh_attrs | | 3162 | * If the page has been cleaned, then the pvh_attrs |
3163 | * will have PVF_EXEC set, so mark it execute so we | | 3163 | * will have PVF_EXEC set, so mark it execute so we |
3164 | * don't get an access fault when trying to execute | | 3164 | * don't get an access fault when trying to execute |
3165 | * from it. | | 3165 | * from it. |
3166 | */ | | 3166 | */ |
3167 | if (md->pvh_attrs & nflags & PVF_EXEC) { | | 3167 | if (md->pvh_attrs & nflags & PVF_EXEC) { |
3168 | npte &= ~L2_XS_XN; | | 3168 | npte &= ~L2_XS_XN; |
3169 | } | | 3169 | } |
3170 | #endif | | 3170 | #endif |
3171 | } else { | | 3171 | } else { |
3172 | /* | | 3172 | /* |
3173 | * Need to do page referenced emulation. | | 3173 | * Need to do page referenced emulation. |
3174 | */ | | 3174 | */ |
3175 | npte |= L2_TYPE_INV; | | 3175 | npte |= L2_TYPE_INV; |
3176 | } | | 3176 | } |
3177 | | | 3177 | |
3178 | if (flags & ARM32_MMAP_WRITECOMBINE) { | | 3178 | if (flags & ARM32_MMAP_WRITECOMBINE) { |
3179 | npte |= pte_l2_s_wc_mode; | | 3179 | npte |= pte_l2_s_wc_mode; |
3180 | } else | | 3180 | } else |
3181 | npte |= pte_l2_s_cache_mode; | | 3181 | npte |= pte_l2_s_cache_mode; |
3182 | | | 3182 | |
3183 | if (pg == opg) { | | 3183 | if (pg == opg) { |
3184 | /* | | 3184 | /* |
3185 | * We're changing the attrs of an existing mapping. | | 3185 | * We're changing the attrs of an existing mapping. |
3186 | */ | | 3186 | */ |
3187 | oflags = pmap_modify_pv(md, pa, pm, va, | | 3187 | oflags = pmap_modify_pv(md, pa, pm, va, |
3188 | PVF_WRITE | PVF_EXEC | PVF_WIRED | | | 3188 | PVF_WRITE | PVF_EXEC | PVF_WIRED | |
3189 | PVF_MOD | PVF_REF, nflags); | | 3189 | PVF_MOD | PVF_REF, nflags); |
3190 | | | 3190 | |
3191 | #ifdef PMAP_CACHE_VIVT | | 3191 | #ifdef PMAP_CACHE_VIVT |
3192 | /* | | 3192 | /* |
3193 | * We may need to flush the cache if we're | | 3193 | * We may need to flush the cache if we're |
3194 | * doing rw-ro... | | 3194 | * doing rw-ro... |
3195 | */ | | 3195 | */ |
3196 | if (pm->pm_cstate.cs_cache_d && | | 3196 | if (pm->pm_cstate.cs_cache_d && |
3197 | (oflags & PVF_NC) == 0 && | | 3197 | (oflags & PVF_NC) == 0 && |
3198 | l2pte_writable_p(opte) && | | 3198 | l2pte_writable_p(opte) && |
3199 | (prot & VM_PROT_WRITE) == 0) | | 3199 | (prot & VM_PROT_WRITE) == 0) |
3200 | cpu_dcache_wb_range(va, PAGE_SIZE); | | 3200 | cpu_dcache_wb_range(va, PAGE_SIZE); |
3201 | #endif | | 3201 | #endif |
3202 | } else { | | 3202 | } else { |
3203 | struct pv_entry *pv; | | 3203 | struct pv_entry *pv; |
3204 | /* | | 3204 | /* |
3205 | * New mapping, or changing the backing page | | 3205 | * New mapping, or changing the backing page |
3206 | * of an existing mapping. | | 3206 | * of an existing mapping. |
3207 | */ | | 3207 | */ |
3208 | if (opg) { | | 3208 | if (opg) { |
3209 | struct vm_page_md *omd = VM_PAGE_TO_MD(opg); | | 3209 | struct vm_page_md *omd = VM_PAGE_TO_MD(opg); |
3210 | paddr_t opa = VM_PAGE_TO_PHYS(opg); | | 3210 | paddr_t opa = VM_PAGE_TO_PHYS(opg); |
3211 | | | 3211 | |
3212 | /* | | 3212 | /* |
3213 | * Replacing an existing mapping with a new one. | | 3213 | * Replacing an existing mapping with a new one. |
3214 | * It is part of our managed memory so we | | 3214 | * It is part of our managed memory so we |
3215 | * must remove it from the PV list | | 3215 | * must remove it from the PV list |
3216 | */ | | 3216 | */ |
3217 | pv = pmap_remove_pv(omd, opa, pm, va); | | 3217 | pv = pmap_remove_pv(omd, opa, pm, va); |
3218 | pmap_vac_me_harder(omd, opa, pm, 0); | | 3218 | pmap_vac_me_harder(omd, opa, pm, 0); |
3219 | oflags = pv->pv_flags; | | 3219 | oflags = pv->pv_flags; |
3220 | | | 3220 | |
3221 | #ifdef PMAP_CACHE_VIVT | | 3221 | #ifdef PMAP_CACHE_VIVT |
3222 | /* | | 3222 | /* |
3223 | * If the old mapping was valid (ref/mod | | 3223 | * If the old mapping was valid (ref/mod |
3224 | * emulation creates 'invalid' mappings | | 3224 | * emulation creates 'invalid' mappings |
3225 | * initially) then make sure to frob | | 3225 | * initially) then make sure to frob |
3226 | * the cache. | | 3226 | * the cache. |
3227 | */ | | 3227 | */ |
3228 | if (!(oflags & PVF_NC) && l2pte_valid_p(opte)) { | | 3228 | if (!(oflags & PVF_NC) && l2pte_valid_p(opte)) { |
3229 | pmap_cache_wbinv_page(pm, va, true, | | 3229 | pmap_cache_wbinv_page(pm, va, true, |
3230 | oflags); | | 3230 | oflags); |
3231 | } | | 3231 | } |
3232 | #endif | | 3232 | #endif |
3233 | } else { | | 3233 | } else { |
3234 | pmap_release_page_lock(md); | | 3234 | pmap_release_page_lock(md); |
3235 | pv = pool_get(&pmap_pv_pool, PR_NOWAIT); | | 3235 | pv = pool_get(&pmap_pv_pool, PR_NOWAIT); |
3236 | if (pv == NULL) { | | 3236 | if (pv == NULL) { |
3237 | pmap_release_pmap_lock(pm); | | 3237 | pmap_release_pmap_lock(pm); |
3238 | if ((flags & PMAP_CANFAIL) == 0) | | 3238 | if ((flags & PMAP_CANFAIL) == 0) |
3239 | panic("pmap_enter: " | | 3239 | panic("pmap_enter: " |
3240 | "no pv entries"); | | 3240 | "no pv entries"); |
3241 | | | 3241 | |
3242 | pmap_free_l2_bucket(pm, l2b, 0); | | 3242 | pmap_free_l2_bucket(pm, l2b, 0); |
3243 | UVMHIST_LOG(maphist, " <-- done (ENOMEM)", | | 3243 | UVMHIST_LOG(maphist, " <-- done (ENOMEM)", |
3244 | 0, 0, 0, 0); | | 3244 | 0, 0, 0, 0); |
3245 | return (ENOMEM); | | 3245 | return (ENOMEM); |
3246 | } | | 3246 | } |
3247 | pmap_acquire_page_lock(md); | | 3247 | pmap_acquire_page_lock(md); |
3248 | } | | 3248 | } |
3249 | | | 3249 | |
3250 | pmap_enter_pv(md, pa, pv, pm, va, nflags); | | 3250 | pmap_enter_pv(md, pa, pv, pm, va, nflags); |
3251 | } | | 3251 | } |
3252 | pmap_release_page_lock(md); | | 3252 | pmap_release_page_lock(md); |
3253 | } else { | | 3253 | } else { |
3254 | /* | | 3254 | /* |
3255 | * We're mapping an unmanaged page. | | 3255 | * We're mapping an unmanaged page. |
3256 | * These are always readable, and possibly writable, from | | 3256 | * These are always readable, and possibly writable, from |
3257 | * the get go as we don't need to track ref/mod status. | | 3257 | * the get go as we don't need to track ref/mod status. |
3258 | */ | | 3258 | */ |
3259 | npte |= l2pte_set_readonly(L2_S_PROTO); | | 3259 | npte |= l2pte_set_readonly(L2_S_PROTO); |
3260 | if (prot & VM_PROT_WRITE) | | 3260 | if (prot & VM_PROT_WRITE) |
3261 | npte = l2pte_set_writable(npte); | | 3261 | npte = l2pte_set_writable(npte); |
3262 | | | 3262 | |
3263 | /* | | 3263 | /* |
3264 | * Make sure the vector table is mapped cacheable | | 3264 | * Make sure the vector table is mapped cacheable |
3265 | */ | | 3265 | */ |
3266 | if ((vector_page_p && !kpm_p) | | 3266 | if ((vector_page_p && !kpm_p) |
3267 | || (flags & ARM32_MMAP_CACHEABLE)) { | | 3267 | || (flags & ARM32_MMAP_CACHEABLE)) { |
3268 | npte |= pte_l2_s_cache_mode; | | 3268 | npte |= pte_l2_s_cache_mode; |
3269 | #ifdef ARM_MMU_EXTENDED | | 3269 | #ifdef ARM_MMU_EXTENDED |
3270 | npte &= ~L2_XS_XN; /* and executable */ | | 3270 | npte &= ~L2_XS_XN; /* and executable */ |
3271 | #endif | | 3271 | #endif |
3272 | } else if (flags & ARM32_MMAP_WRITECOMBINE) { | | 3272 | } else if (flags & ARM32_MMAP_WRITECOMBINE) { |
3273 | npte |= pte_l2_s_wc_mode; | | 3273 | npte |= pte_l2_s_wc_mode; |
3274 | } | | 3274 | } |
3275 | if (opg) { | | 3275 | if (opg) { |
3276 | /* | | 3276 | /* |
3277 | * Looks like there's an existing 'managed' mapping | | 3277 | * Looks like there's an existing 'managed' mapping |
3278 | * at this address. | | 3278 | * at this address. |
3279 | */ | | 3279 | */ |
3280 | struct vm_page_md *omd = VM_PAGE_TO_MD(opg); | | 3280 | struct vm_page_md *omd = VM_PAGE_TO_MD(opg); |
3281 | paddr_t opa = VM_PAGE_TO_PHYS(opg); | | 3281 | paddr_t opa = VM_PAGE_TO_PHYS(opg); |
3282 | | | 3282 | |
3283 | pmap_acquire_page_lock(omd); | | 3283 | pmap_acquire_page_lock(omd); |
3284 | struct pv_entry *pv = pmap_remove_pv(omd, opa, pm, va); | | 3284 | struct pv_entry *pv = pmap_remove_pv(omd, opa, pm, va); |
3285 | pmap_vac_me_harder(omd, opa, pm, 0); | | 3285 | pmap_vac_me_harder(omd, opa, pm, 0); |
3286 | oflags = pv->pv_flags; | | 3286 | oflags = pv->pv_flags; |
3287 | pmap_release_page_lock(omd); | | 3287 | pmap_release_page_lock(omd); |
3288 | | | 3288 | |
3289 | #ifdef PMAP_CACHE_VIVT | | 3289 | #ifdef PMAP_CACHE_VIVT |
3290 | if (!(oflags & PVF_NC) && l2pte_valid_p(opte)) { | | 3290 | if (!(oflags & PVF_NC) && l2pte_valid_p(opte)) { |
3291 | pmap_cache_wbinv_page(pm, va, true, oflags); | | 3291 | pmap_cache_wbinv_page(pm, va, true, oflags); |
3292 | } | | 3292 | } |
3293 | #endif | | 3293 | #endif |
3294 | pool_put(&pmap_pv_pool, pv); | | 3294 | pool_put(&pmap_pv_pool, pv); |
3295 | } | | 3295 | } |
3296 | } | | 3296 | } |
3297 | | | 3297 | |
3298 | /* | | 3298 | /* |
3299 | * Make sure userland mappings get the right permissions | | 3299 | * Make sure userland mappings get the right permissions |
3300 | */ | | 3300 | */ |
3301 | if (!vector_page_p && !kpm_p) { | | 3301 | if (!vector_page_p && !kpm_p) { |
3302 | npte |= L2_S_PROT_U; | | 3302 | npte |= L2_S_PROT_U; |
3303 | #ifdef ARM_MMU_EXTENDED | | 3303 | #ifdef ARM_MMU_EXTENDED |
3304 | npte |= L2_XS_nG; /* user pages are not global */ | | 3304 | npte |= L2_XS_nG; /* user pages are not global */ |
3305 | #endif | | 3305 | #endif |
3306 | } | | 3306 | } |
3307 | | | 3307 | |
3308 | /* | | 3308 | /* |
3309 | * Keep the stats up to date | | 3309 | * Keep the stats up to date |
3310 | */ | | 3310 | */ |
3311 | if (opte == 0) { | | 3311 | if (opte == 0) { |
3312 | l2b->l2b_occupancy += PAGE_SIZE / L2_S_SIZE; | | 3312 | l2b->l2b_occupancy += PAGE_SIZE / L2_S_SIZE; |
3313 | pm->pm_stats.resident_count++; | | 3313 | pm->pm_stats.resident_count++; |
3314 | } | | 3314 | } |
3315 | | | 3315 | |
3316 | UVMHIST_LOG(maphist, " opte %#x npte %#x", opte, npte, 0, 0); | | 3316 | UVMHIST_LOG(maphist, " opte %#x npte %#x", opte, npte, 0, 0); |
3317 | | | 3317 | |
3318 | #if defined(ARM_MMU_EXTENDED) | | 3318 | #if defined(ARM_MMU_EXTENDED) |
3319 | /* | | 3319 | /* |
3320 | * If exec protection was requested but the page hasn't been synced, | | 3320 | * If exec protection was requested but the page hasn't been synced, |
3321 | * sync it now and allow execution from it. | | 3321 | * sync it now and allow execution from it. |
3322 | */ | | 3322 | */ |
3323 | if ((nflags & PVF_EXEC) && (npte & L2_XS_XN)) { | | 3323 | if ((nflags & PVF_EXEC) && (npte & L2_XS_XN)) { |
3324 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 3324 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
3325 | npte &= ~L2_XS_XN; | | 3325 | npte &= ~L2_XS_XN; |
3326 | pmap_syncicache_page(md, pa); | | 3326 | pmap_syncicache_page(md, pa); |
3327 | PMAPCOUNT(exec_synced_map); | | 3327 | PMAPCOUNT(exec_synced_map); |
3328 | } | | 3328 | } |
3329 | #endif | | 3329 | #endif |
3330 | /* | | 3330 | /* |
3331 | * If this is just a wiring change, the two PTEs will be | | 3331 | * If this is just a wiring change, the two PTEs will be |
3332 | * identical, so there's no need to update the page table. | | 3332 | * identical, so there's no need to update the page table. |
3333 | */ | | 3333 | */ |
3334 | if (npte != opte) { | | 3334 | if (npte != opte) { |
3335 | l2pte_reset(ptep); | | 3335 | l2pte_reset(ptep); |
3336 | PTE_SYNC(ptep); | | 3336 | PTE_SYNC(ptep); |
3337 | if (l2pte_valid_p(opte)) { | | 3337 | if (l2pte_valid_p(opte)) { |
3338 | pmap_tlb_flush_SE(pm, va, oflags); | | 3338 | pmap_tlb_flush_SE(pm, va, oflags); |
3339 | } | | 3339 | } |
3340 | l2pte_set(ptep, npte, 0); | | 3340 | l2pte_set(ptep, npte, 0); |
3341 | PTE_SYNC(ptep); | | 3341 | PTE_SYNC(ptep); |
3342 | #ifndef ARM_MMU_EXTENDED | | 3342 | #ifndef ARM_MMU_EXTENDED |
3343 | bool is_cached = pmap_is_cached(pm); | | 3343 | bool is_cached = pmap_is_cached(pm); |
3344 | if (is_cached) { | | 3344 | if (is_cached) { |
3345 | /* | | 3345 | /* |
3346 | * We only need to frob the cache/tlb if this pmap | | 3346 | * We only need to frob the cache/tlb if this pmap |
3347 | * is current | | 3347 | * is current |
3348 | */ | | 3348 | */ |
3349 | if (!vector_page_p && l2pte_valid_p(npte)) { | | 3349 | if (!vector_page_p && l2pte_valid_p(npte)) { |
3350 | /* | | 3350 | /* |
3351 | * This mapping is likely to be accessed as | | 3351 | * This mapping is likely to be accessed as |
3352 | * soon as we return to userland. Fix up the | | 3352 | * soon as we return to userland. Fix up the |
3353 | * L1 entry to avoid taking another | | 3353 | * L1 entry to avoid taking another |
3354 | * page/domain fault. | | 3354 | * page/domain fault. |
3355 | */ | | 3355 | */ |
3356 | pd_entry_t *pdep = pmap_l1_kva(pm) | | 3356 | pd_entry_t *pdep = pmap_l1_kva(pm) |
3357 | + l1pte_index(va); | | 3357 | + l1pte_index(va); |
3358 | pd_entry_t pde = L1_C_PROTO | l2b->l2b_pa | | 3358 | pd_entry_t pde = L1_C_PROTO | l2b->l2b_pa |
3359 | | L1_C_DOM(pmap_domain(pm)); | | 3359 | | L1_C_DOM(pmap_domain(pm)); |
3360 | if (*pdep != pde) { | | 3360 | if (*pdep != pde) { |
3361 | l1pte_setone(pdep, pde); | | 3361 | l1pte_setone(pdep, pde); |
3362 | PTE_SYNC(pdep); | | 3362 | PTE_SYNC(pdep); |
3363 | } | | 3363 | } |
3364 | } | | 3364 | } |
3365 | } | | 3365 | } |
3366 | #endif /* !ARM_MMU_EXTENDED */ | | 3366 | #endif /* !ARM_MMU_EXTENDED */ |
3367 | | | 3367 | |
3368 | #ifndef ARM_MMU_EXTENDED | | 3368 | #ifndef ARM_MMU_EXTENDED |
3369 | UVMHIST_LOG(maphist, " is_cached %d cs 0x%08x\n", | | 3369 | UVMHIST_LOG(maphist, " is_cached %d cs 0x%08x\n", |
3370 | is_cached, pm->pm_cstate.cs_all, 0, 0); | | 3370 | is_cached, pm->pm_cstate.cs_all, 0, 0); |
3371 | | | 3371 | |
3372 | if (pg != NULL) { | | 3372 | if (pg != NULL) { |
3373 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 3373 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
3374 | | | 3374 | |
3375 | pmap_acquire_page_lock(md); | | 3375 | pmap_acquire_page_lock(md); |
3376 | pmap_vac_me_harder(md, pa, pm, va); | | 3376 | pmap_vac_me_harder(md, pa, pm, va); |
3377 | pmap_release_page_lock(md); | | 3377 | pmap_release_page_lock(md); |
3378 | } | | 3378 | } |
3379 | #endif | | 3379 | #endif |
3380 | } | | 3380 | } |
3381 | #if defined(PMAP_CACHE_VIPT) && defined(DIAGNOSTIC) | | 3381 | #if defined(PMAP_CACHE_VIPT) && defined(DIAGNOSTIC) |
3382 | if (pg) { | | 3382 | if (pg) { |
3383 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 3383 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
3384 | | | 3384 | |
3385 | pmap_acquire_page_lock(md); | | 3385 | pmap_acquire_page_lock(md); |
3386 | #ifndef ARM_MMU_EXTENDED | | 3386 | #ifndef ARM_MMU_EXTENDED |
3387 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 3387 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
3388 | #endif | | 3388 | #endif |
3389 | PMAP_VALIDATE_MD_PAGE(md); | | 3389 | PMAP_VALIDATE_MD_PAGE(md); |
3390 | pmap_release_page_lock(md); | | 3390 | pmap_release_page_lock(md); |
3391 | } | | 3391 | } |
3392 | #endif | | 3392 | #endif |
3393 | | | 3393 | |
3394 | pmap_release_pmap_lock(pm); | | 3394 | pmap_release_pmap_lock(pm); |
3395 | | | 3395 | |
3396 | return (0); | | 3396 | return (0); |
3397 | } | | 3397 | } |
3398 | | | 3398 | |
3399 | /* | | 3399 | /* |
3400 | * pmap_remove() | | 3400 | * pmap_remove() |
3401 | * | | 3401 | * |
3402 | * pmap_remove is responsible for nuking a number of mappings for a range | | 3402 | * pmap_remove is responsible for nuking a number of mappings for a range |
3403 | * of virtual address space in the current pmap. To do this efficiently | | 3403 | * of virtual address space in the current pmap. To do this efficiently |
3404 | * is interesting, because in a number of cases a wide virtual address | | 3404 | * is interesting, because in a number of cases a wide virtual address |
3405 | * range may be supplied that contains few actual mappings. So, the | | 3405 | * range may be supplied that contains few actual mappings. So, the |
3406 | * optimisations are: | | 3406 | * optimisations are: |
3407 | * 1. Skip over hunks of address space for which no L1 or L2 entry exists. | | 3407 | * 1. Skip over hunks of address space for which no L1 or L2 entry exists. |
3408 | * 2. Build up a list of pages we've hit, up to a maximum, so we can | | 3408 | * 2. Build up a list of pages we've hit, up to a maximum, so we can |
3409 | * maybe do just a partial cache clean. This path of execution is | | 3409 | * maybe do just a partial cache clean. This path of execution is |
3410 | * complicated by the fact that the cache must be flushed _before_ | | 3410 | * complicated by the fact that the cache must be flushed _before_ |
3411 | * the PTE is nuked, being a VAC :-) | | 3411 | * the PTE is nuked, being a VAC :-) |
3412 | * 3. If we're called after UVM calls pmap_remove_all(), we can defer | | 3412 | * 3. If we're called after UVM calls pmap_remove_all(), we can defer |
3413 | * all invalidations until pmap_update(), since pmap_remove_all() has | | 3413 | * all invalidations until pmap_update(), since pmap_remove_all() has |
3414 | * already flushed the cache. | | 3414 | * already flushed the cache. |
3415 | * 4. Maybe later fast-case a single page, but I don't think this is | | 3415 | * 4. Maybe later fast-case a single page, but I don't think this is |
3416 | * going to make _that_ much difference overall. | | 3416 | * going to make _that_ much difference overall. |
3417 | */ | | 3417 | */ |
3418 | | | 3418 | |
3419 | #define PMAP_REMOVE_CLEAN_LIST_SIZE 3 | | 3419 | #define PMAP_REMOVE_CLEAN_LIST_SIZE 3 |
3420 | | | 3420 | |
3421 | void | | 3421 | void |
3422 | pmap_remove(pmap_t pm, vaddr_t sva, vaddr_t eva) | | 3422 | pmap_remove(pmap_t pm, vaddr_t sva, vaddr_t eva) |
3423 | { | | 3423 | { |
3424 | vaddr_t next_bucket; | | 3424 | vaddr_t next_bucket; |
3425 | u_int cleanlist_idx, total, cnt; | | 3425 | u_int cleanlist_idx, total, cnt; |
3426 | struct { | | 3426 | struct { |
3427 | vaddr_t va; | | 3427 | vaddr_t va; |
3428 | pt_entry_t *ptep; | | 3428 | pt_entry_t *ptep; |
3429 | } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE]; | | 3429 | } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE]; |
3430 | u_int mappings; | | 3430 | u_int mappings; |
3431 | | | 3431 | |
3432 | UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); | | 3432 | UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); |
3433 | UVMHIST_LOG(maphist, " (pm=%p, sva=%#x, eva=%#x)", pm, sva, eva, 0); | | 3433 | UVMHIST_LOG(maphist, " (pm=%p, sva=%#x, eva=%#x)", pm, sva, eva, 0); |
3434 | | | 3434 | |
3435 | /* | | 3435 | /* |
3436 | * we lock in the pmap => pv_head direction | | 3436 | * we lock in the pmap => pv_head direction |
3437 | */ | | 3437 | */ |
3438 | pmap_acquire_pmap_lock(pm); | | 3438 | pmap_acquire_pmap_lock(pm); |
3439 | | | 3439 | |
3440 | if (pm->pm_remove_all || !pmap_is_cached(pm)) { | | 3440 | if (pm->pm_remove_all || !pmap_is_cached(pm)) { |
3441 | cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; | | 3441 | cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; |
3442 | #ifndef ARM_MMU_EXTENDED | | 3442 | #ifndef ARM_MMU_EXTENDED |
3443 | if (pm->pm_cstate.cs_tlb == 0) | | 3443 | if (pm->pm_cstate.cs_tlb == 0) |
3444 | pm->pm_remove_all = true; | | 3444 | pm->pm_remove_all = true; |
3445 | #endif | | 3445 | #endif |
3446 | } else | | 3446 | } else |
3447 | cleanlist_idx = 0; | | 3447 | cleanlist_idx = 0; |
3448 | | | 3448 | |
3449 | total = 0; | | 3449 | total = 0; |
3450 | | | 3450 | |
3451 | while (sva < eva) { | | 3451 | while (sva < eva) { |
3452 | /* | | 3452 | /* |
3453 | * Do one L2 bucket's worth at a time. | | 3453 | * Do one L2 bucket's worth at a time. |
3454 | */ | | 3454 | */ |
3455 | next_bucket = L2_NEXT_BUCKET_VA(sva); | | 3455 | next_bucket = L2_NEXT_BUCKET_VA(sva); |
3456 | if (next_bucket > eva) | | 3456 | if (next_bucket > eva) |
3457 | next_bucket = eva; | | 3457 | next_bucket = eva; |
3458 | | | 3458 | |
3459 | struct l2_bucket * const l2b = pmap_get_l2_bucket(pm, sva); | | 3459 | struct l2_bucket * const l2b = pmap_get_l2_bucket(pm, sva); |
3460 | if (l2b == NULL) { | | 3460 | if (l2b == NULL) { |
3461 | sva = next_bucket; | | 3461 | sva = next_bucket; |
3462 | continue; | | 3462 | continue; |
3463 | } | | 3463 | } |
3464 | | | 3464 | |
3465 | pt_entry_t *ptep = &l2b->l2b_kva[l2pte_index(sva)]; | | 3465 | pt_entry_t *ptep = &l2b->l2b_kva[l2pte_index(sva)]; |
3466 | | | 3466 | |
3467 | for (mappings = 0; | | 3467 | for (mappings = 0; |
3468 | sva < next_bucket; | | 3468 | sva < next_bucket; |
3469 | sva += PAGE_SIZE, ptep += PAGE_SIZE / L2_S_SIZE) { | | 3469 | sva += PAGE_SIZE, ptep += PAGE_SIZE / L2_S_SIZE) { |
3470 | pt_entry_t opte = *ptep; | | 3470 | pt_entry_t opte = *ptep; |
3471 | | | 3471 | |
3472 | if (opte == 0) { | | 3472 | if (opte == 0) { |
3473 | /* Nothing here, move along */ | | 3473 | /* Nothing here, move along */ |
3474 | continue; | | 3474 | continue; |
3475 | } | | 3475 | } |
3476 | | | 3476 | |
3477 | u_int flags = PVF_REF; | | 3477 | u_int flags = PVF_REF; |
3478 | paddr_t pa = l2pte_pa(opte); | | 3478 | paddr_t pa = l2pte_pa(opte); |
3479 | struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); | | 3479 | struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); |
3480 | | | 3480 | |
3481 | /* | | 3481 | /* |
3482 | * Update flags. In a number of circumstances, | | 3482 | * Update flags. In a number of circumstances, |
3483 | * we could cluster a lot of these and do a | | 3483 | * we could cluster a lot of these and do a |
3484 | * number of sequential pages in one go. | | 3484 | * number of sequential pages in one go. |
3485 | */ | | 3485 | */ |
3486 | if (pg != NULL) { | | 3486 | if (pg != NULL) { |
3487 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 3487 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
3488 | struct pv_entry *pv; | | 3488 | struct pv_entry *pv; |
3489 | | | 3489 | |
3490 | pmap_acquire_page_lock(md); | | 3490 | pmap_acquire_page_lock(md); |
3491 | pv = pmap_remove_pv(md, pa, pm, sva); | | 3491 | pv = pmap_remove_pv(md, pa, pm, sva); |
3492 | pmap_vac_me_harder(md, pa, pm, 0); | | 3492 | pmap_vac_me_harder(md, pa, pm, 0); |
3493 | pmap_release_page_lock(md); | | 3493 | pmap_release_page_lock(md); |
3494 | if (pv != NULL) { | | 3494 | if (pv != NULL) { |
3495 | if (pm->pm_remove_all == false) { | | 3495 | if (pm->pm_remove_all == false) { |
3496 | flags = pv->pv_flags; | | 3496 | flags = pv->pv_flags; |
3497 | } | | 3497 | } |
3498 | pool_put(&pmap_pv_pool, pv); | | 3498 | pool_put(&pmap_pv_pool, pv); |
3499 | } | | 3499 | } |
3500 | } | | 3500 | } |
3501 | mappings += PAGE_SIZE / L2_S_SIZE; | | 3501 | mappings += PAGE_SIZE / L2_S_SIZE; |
3502 | | | 3502 | |
3503 | if (!l2pte_valid_p(opte)) { | | 3503 | if (!l2pte_valid_p(opte)) { |
3504 | /* | | 3504 | /* |
3505 | * Ref/Mod emulation is still active for this | | 3505 | * Ref/Mod emulation is still active for this |
3506 | * mapping, therefore it is has not yet been | | 3506 | * mapping, therefore it is has not yet been |
3507 | * accessed. No need to frob the cache/tlb. | | 3507 | * accessed. No need to frob the cache/tlb. |
3508 | */ | | 3508 | */ |
3509 | l2pte_reset(ptep); | | 3509 | l2pte_reset(ptep); |
3510 | PTE_SYNC_CURRENT(pm, ptep); | | 3510 | PTE_SYNC_CURRENT(pm, ptep); |
3511 | continue; | | 3511 | continue; |
3512 | } | | 3512 | } |
3513 | | | 3513 | |
3514 | #ifdef ARM_MMU_EXTENDED | | 3514 | #ifdef ARM_MMU_EXTENDED |
3515 | if (pm == pmap_kernel()) { | | 3515 | if (pm == pmap_kernel()) { |
3516 | l2pte_reset(ptep); | | 3516 | l2pte_reset(ptep); |
3517 | PTE_SYNC(ptep); | | 3517 | PTE_SYNC(ptep); |
3518 | pmap_tlb_flush_SE(pm, sva, flags); | | 3518 | pmap_tlb_flush_SE(pm, sva, flags); |
3519 | continue; | | 3519 | continue; |
3520 | } | | 3520 | } |
3521 | #endif | | 3521 | #endif |
3522 | if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) { | | 3522 | if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) { |
3523 | /* Add to the clean list. */ | | 3523 | /* Add to the clean list. */ |
3524 | cleanlist[cleanlist_idx].ptep = ptep; | | 3524 | cleanlist[cleanlist_idx].ptep = ptep; |
3525 | cleanlist[cleanlist_idx].va = | | 3525 | cleanlist[cleanlist_idx].va = |
3526 | sva | (flags & PVF_EXEC); | | 3526 | sva | (flags & PVF_EXEC); |
3527 | cleanlist_idx++; | | 3527 | cleanlist_idx++; |
3528 | } else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) { | | 3528 | } else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) { |
3529 | /* Nuke everything if needed. */ | | 3529 | /* Nuke everything if needed. */ |
3530 | #ifdef PMAP_CACHE_VIVT | | 3530 | #ifdef PMAP_CACHE_VIVT |
3531 | pmap_cache_wbinv_all(pm, PVF_EXEC); | | 3531 | pmap_cache_wbinv_all(pm, PVF_EXEC); |
3532 | #endif | | 3532 | #endif |
3533 | /* | | 3533 | /* |
3534 | * Roll back the previous PTE list, | | 3534 | * Roll back the previous PTE list, |
3535 | * and zero out the current PTE. | | 3535 | * and zero out the current PTE. |
3536 | */ | | 3536 | */ |
3537 | for (cnt = 0; | | 3537 | for (cnt = 0; |
3538 | cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) { | | 3538 | cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) { |
3539 | l2pte_reset(cleanlist[cnt].ptep); | | 3539 | l2pte_reset(cleanlist[cnt].ptep); |
3540 | PTE_SYNC(cleanlist[cnt].ptep); | | 3540 | PTE_SYNC(cleanlist[cnt].ptep); |
3541 | } | | 3541 | } |
3542 | l2pte_reset(ptep); | | 3542 | l2pte_reset(ptep); |
3543 | PTE_SYNC(ptep); | | 3543 | PTE_SYNC(ptep); |
3544 | cleanlist_idx++; | | 3544 | cleanlist_idx++; |
3545 | pm->pm_remove_all = true; | | 3545 | pm->pm_remove_all = true; |
3546 | } else { | | 3546 | } else { |
3547 | l2pte_reset(ptep); | | 3547 | l2pte_reset(ptep); |
3548 | PTE_SYNC(ptep); | | 3548 | PTE_SYNC(ptep); |
3549 | if (pm->pm_remove_all == false) { | | 3549 | if (pm->pm_remove_all == false) { |
3550 | pmap_tlb_flush_SE(pm, sva, flags); | | 3550 | pmap_tlb_flush_SE(pm, sva, flags); |
3551 | } | | 3551 | } |
3552 | } | | 3552 | } |
3553 | } | | 3553 | } |
3554 | | | 3554 | |
3555 | /* | | 3555 | /* |
3556 | * Deal with any left overs | | 3556 | * Deal with any left overs |
3557 | */ | | 3557 | */ |
3558 | if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) { | | 3558 | if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) { |
3559 | total += cleanlist_idx; | | 3559 | total += cleanlist_idx; |
3560 | for (cnt = 0; cnt < cleanlist_idx; cnt++) { | | 3560 | for (cnt = 0; cnt < cleanlist_idx; cnt++) { |
3561 | l2pte_reset(cleanlist[cnt].ptep); | | 3561 | l2pte_reset(cleanlist[cnt].ptep); |
3562 | PTE_SYNC_CURRENT(pm, cleanlist[cnt].ptep); | | 3562 | PTE_SYNC_CURRENT(pm, cleanlist[cnt].ptep); |
3563 | #ifdef ARM_MMU_EXTENDED | | 3563 | #ifdef ARM_MMU_EXTENDED |
3564 | vaddr_t clva = cleanlist[cnt].va; | | 3564 | vaddr_t clva = cleanlist[cnt].va; |
3565 | pmap_tlb_flush_SE(pm, clva, PVF_REF); | | 3565 | pmap_tlb_flush_SE(pm, clva, PVF_REF); |
3566 | #else | | 3566 | #else |
3567 | vaddr_t va = cleanlist[cnt].va; | | 3567 | vaddr_t va = cleanlist[cnt].va; |
3568 | if (pm->pm_cstate.cs_all != 0) { | | 3568 | if (pm->pm_cstate.cs_all != 0) { |
3569 | vaddr_t clva = va & ~PAGE_MASK; | | 3569 | vaddr_t clva = va & ~PAGE_MASK; |
3570 | u_int flags = va & PVF_EXEC; | | 3570 | u_int flags = va & PVF_EXEC; |
3571 | #ifdef PMAP_CACHE_VIVT | | 3571 | #ifdef PMAP_CACHE_VIVT |
3572 | pmap_cache_wbinv_page(pm, clva, true, | | 3572 | pmap_cache_wbinv_page(pm, clva, true, |
3573 | PVF_REF | PVF_WRITE | flags); | | 3573 | PVF_REF | PVF_WRITE | flags); |
3574 | #endif | | 3574 | #endif |
3575 | pmap_tlb_flush_SE(pm, clva, | | 3575 | pmap_tlb_flush_SE(pm, clva, |
3576 | PVF_REF | flags); | | 3576 | PVF_REF | flags); |
3577 | } | | 3577 | } |
3578 | #endif /* ARM_MMU_EXTENDED */ | | 3578 | #endif /* ARM_MMU_EXTENDED */ |
3579 | } | | 3579 | } |
3580 | | | 3580 | |
3581 | /* | | 3581 | /* |
3582 | * If it looks like we're removing a whole bunch | | 3582 | * If it looks like we're removing a whole bunch |
3583 | * of mappings, it's faster to just write-back | | 3583 | * of mappings, it's faster to just write-back |
3584 | * the whole cache now and defer TLB flushes until | | 3584 | * the whole cache now and defer TLB flushes until |
3585 | * pmap_update() is called. | | 3585 | * pmap_update() is called. |
3586 | */ | | 3586 | */ |
3587 | if (total <= PMAP_REMOVE_CLEAN_LIST_SIZE) | | 3587 | if (total <= PMAP_REMOVE_CLEAN_LIST_SIZE) |
3588 | cleanlist_idx = 0; | | 3588 | cleanlist_idx = 0; |
3589 | else { | | 3589 | else { |
3590 | cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; | | 3590 | cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; |
3591 | #ifdef PMAP_CACHE_VIVT | | 3591 | #ifdef PMAP_CACHE_VIVT |
3592 | pmap_cache_wbinv_all(pm, PVF_EXEC); | | 3592 | pmap_cache_wbinv_all(pm, PVF_EXEC); |
3593 | #endif | | 3593 | #endif |
3594 | pm->pm_remove_all = true; | | 3594 | pm->pm_remove_all = true; |
3595 | } | | 3595 | } |
3596 | } | | 3596 | } |
3597 | | | 3597 | |
3598 | | | 3598 | |
3599 | pmap_free_l2_bucket(pm, l2b, mappings); | | 3599 | pmap_free_l2_bucket(pm, l2b, mappings); |
3600 | pm->pm_stats.resident_count -= mappings / (PAGE_SIZE/L2_S_SIZE); | | 3600 | pm->pm_stats.resident_count -= mappings / (PAGE_SIZE/L2_S_SIZE); |
3601 | } | | 3601 | } |
3602 | | | 3602 | |
3603 | pmap_release_pmap_lock(pm); | | 3603 | pmap_release_pmap_lock(pm); |
3604 | } | | 3604 | } |
3605 | | | 3605 | |
3606 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 3606 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
3607 | static struct pv_entry * | | 3607 | static struct pv_entry * |
3608 | pmap_kremove_pg(struct vm_page *pg, vaddr_t va) | | 3608 | pmap_kremove_pg(struct vm_page *pg, vaddr_t va) |
3609 | { | | 3609 | { |
3610 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 3610 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
3611 | paddr_t pa = VM_PAGE_TO_PHYS(pg); | | 3611 | paddr_t pa = VM_PAGE_TO_PHYS(pg); |
3612 | struct pv_entry *pv; | | 3612 | struct pv_entry *pv; |
3613 | | | 3613 | |
3614 | KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & (PVF_COLORED|PVF_NC)); | | 3614 | KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & (PVF_COLORED|PVF_NC)); |
3615 | KASSERT((md->pvh_attrs & PVF_KMPAGE) == 0); | | 3615 | KASSERT((md->pvh_attrs & PVF_KMPAGE) == 0); |
3616 | KASSERT(pmap_page_locked_p(md)); | | 3616 | KASSERT(pmap_page_locked_p(md)); |
3617 | | | 3617 | |
3618 | pv = pmap_remove_pv(md, pa, pmap_kernel(), va); | | 3618 | pv = pmap_remove_pv(md, pa, pmap_kernel(), va); |
3619 | KASSERTMSG(pv, "pg %p (pa #%lx) va %#lx", pg, pa, va); | | 3619 | KASSERTMSG(pv, "pg %p (pa #%lx) va %#lx", pg, pa, va); |
3620 | KASSERT(PV_IS_KENTRY_P(pv->pv_flags)); | | 3620 | KASSERT(PV_IS_KENTRY_P(pv->pv_flags)); |
3621 | | | 3621 | |
3622 | /* | | 3622 | /* |
3623 | * If we are removing a writeable mapping to a cached exec page, | | 3623 | * If we are removing a writeable mapping to a cached exec page, |
3624 | * if it's the last mapping then clear it execness other sync | | 3624 | * if it's the last mapping then clear it execness other sync |
3625 | * the page to the icache. | | 3625 | * the page to the icache. |
3626 | */ | | 3626 | */ |
3627 | if ((md->pvh_attrs & (PVF_NC|PVF_EXEC)) == PVF_EXEC | | 3627 | if ((md->pvh_attrs & (PVF_NC|PVF_EXEC)) == PVF_EXEC |
3628 | && (pv->pv_flags & PVF_WRITE) != 0) { | | 3628 | && (pv->pv_flags & PVF_WRITE) != 0) { |
3629 | if (SLIST_EMPTY(&md->pvh_list)) { | | 3629 | if (SLIST_EMPTY(&md->pvh_list)) { |
3630 | md->pvh_attrs &= ~PVF_EXEC; | | 3630 | md->pvh_attrs &= ~PVF_EXEC; |
3631 | PMAPCOUNT(exec_discarded_kremove); | | 3631 | PMAPCOUNT(exec_discarded_kremove); |
3632 | } else { | | 3632 | } else { |
3633 | pmap_syncicache_page(md, pa); | | 3633 | pmap_syncicache_page(md, pa); |
3634 | PMAPCOUNT(exec_synced_kremove); | | 3634 | PMAPCOUNT(exec_synced_kremove); |
3635 | } | | 3635 | } |
3636 | } | | 3636 | } |
3637 | pmap_vac_me_harder(md, pa, pmap_kernel(), 0); | | 3637 | pmap_vac_me_harder(md, pa, pmap_kernel(), 0); |
3638 | | | 3638 | |
3639 | return pv; | | 3639 | return pv; |
3640 | } | | 3640 | } |
3641 | #endif /* PMAP_CACHE_VIPT && !ARM_MMU_EXTENDED */ | | 3641 | #endif /* PMAP_CACHE_VIPT && !ARM_MMU_EXTENDED */ |
3642 | | | 3642 | |
3643 | /* | | 3643 | /* |
3644 | * pmap_kenter_pa: enter an unmanaged, wired kernel mapping | | 3644 | * pmap_kenter_pa: enter an unmanaged, wired kernel mapping |
3645 | * | | 3645 | * |
3646 | * We assume there is already sufficient KVM space available | | 3646 | * We assume there is already sufficient KVM space available |
3647 | * to do this, as we can't allocate L2 descriptor tables/metadata | | 3647 | * to do this, as we can't allocate L2 descriptor tables/metadata |
3648 | * from here. | | 3648 | * from here. |
3649 | */ | | 3649 | */ |
3650 | void | | 3650 | void |
3651 | pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) | | 3651 | pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) |
3652 | { | | 3652 | { |
3653 | #ifdef PMAP_CACHE_VIVT | | 3653 | #ifdef PMAP_CACHE_VIVT |
3654 | struct vm_page *pg = (flags & PMAP_KMPAGE) ? PHYS_TO_VM_PAGE(pa) : NULL; | | 3654 | struct vm_page *pg = (flags & PMAP_KMPAGE) ? PHYS_TO_VM_PAGE(pa) : NULL; |
3655 | #endif | | 3655 | #endif |
3656 | #ifdef PMAP_CACHE_VIPT | | 3656 | #ifdef PMAP_CACHE_VIPT |
3657 | struct vm_page *pg = PHYS_TO_VM_PAGE(pa); | | 3657 | struct vm_page *pg = PHYS_TO_VM_PAGE(pa); |
3658 | struct vm_page *opg; | | 3658 | struct vm_page *opg; |
3659 | #ifndef ARM_MMU_EXTENDED | | 3659 | #ifndef ARM_MMU_EXTENDED |
3660 | struct pv_entry *pv = NULL; | | 3660 | struct pv_entry *pv = NULL; |
3661 | #endif | | 3661 | #endif |
3662 | #endif | | 3662 | #endif |
3663 | struct vm_page_md *md = pg != NULL ? VM_PAGE_TO_MD(pg) : NULL; | | 3663 | struct vm_page_md *md = pg != NULL ? VM_PAGE_TO_MD(pg) : NULL; |
3664 | | | 3664 | |
3665 | UVMHIST_FUNC(__func__); | | 3665 | UVMHIST_FUNC(__func__); |
3666 | | | 3666 | |
3667 | if (pmap_initialized) { | | 3667 | if (pmap_initialized) { |
3668 | UVMHIST_CALLED(maphist); | | 3668 | UVMHIST_CALLED(maphist); |
3669 | UVMHIST_LOG(maphist, " (va=%#x, pa=%#x, prot=%#x, flags=%#x", | | 3669 | UVMHIST_LOG(maphist, " (va=%#x, pa=%#x, prot=%#x, flags=%#x", |
3670 | va, pa, prot, flags); | | 3670 | va, pa, prot, flags); |
3671 | } | | 3671 | } |
3672 | | | 3672 | |
3673 | pmap_t kpm = pmap_kernel(); | | 3673 | pmap_t kpm = pmap_kernel(); |
| | | 3674 | pmap_acquire_pmap_lock(kpm); |
3674 | struct l2_bucket * const l2b = pmap_get_l2_bucket(kpm, va); | | 3675 | struct l2_bucket * const l2b = pmap_get_l2_bucket(kpm, va); |
3675 | const size_t l1slot __diagused = l1pte_index(va); | | 3676 | const size_t l1slot __diagused = l1pte_index(va); |
3676 | KASSERTMSG(l2b != NULL, | | 3677 | KASSERTMSG(l2b != NULL, |
3677 | "va %#lx pa %#lx prot %d maxkvaddr %#lx: l2 %p l2b %p kva %p", | | 3678 | "va %#lx pa %#lx prot %d maxkvaddr %#lx: l2 %p l2b %p kva %p", |
3678 | va, pa, prot, pmap_curmaxkvaddr, kpm->pm_l2[L2_IDX(l1slot)], | | 3679 | va, pa, prot, pmap_curmaxkvaddr, kpm->pm_l2[L2_IDX(l1slot)], |
3679 | kpm->pm_l2[L2_IDX(l1slot)] | | 3680 | kpm->pm_l2[L2_IDX(l1slot)] |
3680 | ? &kpm->pm_l2[L2_IDX(l1slot)]->l2_bucket[L2_BUCKET(l1slot)] | | 3681 | ? &kpm->pm_l2[L2_IDX(l1slot)]->l2_bucket[L2_BUCKET(l1slot)] |
3681 | : NULL, | | 3682 | : NULL, |
3682 | kpm->pm_l2[L2_IDX(l1slot)] | | 3683 | kpm->pm_l2[L2_IDX(l1slot)] |
3683 | ? kpm->pm_l2[L2_IDX(l1slot)]->l2_bucket[L2_BUCKET(l1slot)].l2b_kva | | 3684 | ? kpm->pm_l2[L2_IDX(l1slot)]->l2_bucket[L2_BUCKET(l1slot)].l2b_kva |
3684 | : NULL); | | 3685 | : NULL); |
3685 | KASSERT(l2b->l2b_kva != NULL); | | 3686 | KASSERT(l2b->l2b_kva != NULL); |
3686 | | | 3687 | |
3687 | pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)]; | | 3688 | pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)]; |
3688 | const pt_entry_t opte = *ptep; | | 3689 | const pt_entry_t opte = *ptep; |
3689 | | | 3690 | |
3690 | if (opte == 0) { | | 3691 | if (opte == 0) { |
3691 | PMAPCOUNT(kenter_mappings); | | 3692 | PMAPCOUNT(kenter_mappings); |
3692 | l2b->l2b_occupancy += PAGE_SIZE / L2_S_SIZE; | | 3693 | l2b->l2b_occupancy += PAGE_SIZE / L2_S_SIZE; |
3693 | } else { | | 3694 | } else { |
3694 | PMAPCOUNT(kenter_remappings); | | 3695 | PMAPCOUNT(kenter_remappings); |
3695 | #ifdef PMAP_CACHE_VIPT | | 3696 | #ifdef PMAP_CACHE_VIPT |
3696 | opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); | | 3697 | opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); |
3697 | #if !defined(ARM_MMU_EXTENDED) || defined(DIAGNOSTIC) | | 3698 | #if !defined(ARM_MMU_EXTENDED) || defined(DIAGNOSTIC) |
3698 | struct vm_page_md *omd __diagused = VM_PAGE_TO_MD(opg); | | 3699 | struct vm_page_md *omd __diagused = VM_PAGE_TO_MD(opg); |
3699 | #endif | | 3700 | #endif |
3700 | if (opg && arm_cache_prefer_mask != 0) { | | 3701 | if (opg && arm_cache_prefer_mask != 0) { |
3701 | KASSERT(opg != pg); | | 3702 | KASSERT(opg != pg); |
3702 | KASSERT((omd->pvh_attrs & PVF_KMPAGE) == 0); | | 3703 | KASSERT((omd->pvh_attrs & PVF_KMPAGE) == 0); |
3703 | KASSERT((flags & PMAP_KMPAGE) == 0); | | 3704 | KASSERT((flags & PMAP_KMPAGE) == 0); |
3704 | #ifndef ARM_MMU_EXTENDED | | 3705 | #ifndef ARM_MMU_EXTENDED |
3705 | pmap_acquire_page_lock(omd); | | 3706 | pmap_acquire_page_lock(omd); |
3706 | pv = pmap_kremove_pg(opg, va); | | 3707 | pv = pmap_kremove_pg(opg, va); |
3707 | pmap_release_page_lock(omd); | | 3708 | pmap_release_page_lock(omd); |
3708 | #endif | | 3709 | #endif |
3709 | } | | 3710 | } |
3710 | #endif | | 3711 | #endif |
3711 | if (l2pte_valid_p(opte)) { | | 3712 | if (l2pte_valid_p(opte)) { |
3712 | l2pte_reset(ptep); | | 3713 | l2pte_reset(ptep); |
3713 | PTE_SYNC(ptep); | | 3714 | PTE_SYNC(ptep); |
3714 | #ifdef PMAP_CACHE_VIVT | | 3715 | #ifdef PMAP_CACHE_VIVT |
3715 | cpu_dcache_wbinv_range(va, PAGE_SIZE); | | 3716 | cpu_dcache_wbinv_range(va, PAGE_SIZE); |
3716 | #endif | | 3717 | #endif |
3717 | cpu_tlb_flushD_SE(va); | | 3718 | cpu_tlb_flushD_SE(va); |
3718 | cpu_cpwait(); | | 3719 | cpu_cpwait(); |
3719 | } | | 3720 | } |
3720 | } | | 3721 | } |
| | | 3722 | pmap_release_pmap_lock(kpm); |
3721 | | | 3723 | |
3722 | pt_entry_t npte = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | | 3724 | pt_entry_t npte = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) |
3723 | | ((flags & PMAP_NOCACHE) | | 3725 | | ((flags & PMAP_NOCACHE) |
3724 | ? 0 | | 3726 | ? 0 |
3725 | : ((flags & PMAP_PTE) | | 3727 | : ((flags & PMAP_PTE) |
3726 | ? pte_l2_s_cache_mode_pt : pte_l2_s_cache_mode)); | | 3728 | ? pte_l2_s_cache_mode_pt : pte_l2_s_cache_mode)); |
3727 | #ifdef ARM_MMU_EXTENDED | | 3729 | #ifdef ARM_MMU_EXTENDED |
3728 | if (prot & VM_PROT_EXECUTE) | | 3730 | if (prot & VM_PROT_EXECUTE) |
3729 | npte &= ~L2_XS_XN; | | 3731 | npte &= ~L2_XS_XN; |
3730 | #endif | | 3732 | #endif |
3731 | l2pte_set(ptep, npte, 0); | | 3733 | l2pte_set(ptep, npte, 0); |
3732 | PTE_SYNC(ptep); | | 3734 | PTE_SYNC(ptep); |
3733 | | | 3735 | |
3734 | if (pg) { | | 3736 | if (pg) { |
3735 | if (flags & PMAP_KMPAGE) { | | 3737 | if (flags & PMAP_KMPAGE) { |
3736 | KASSERT(md->urw_mappings == 0); | | 3738 | KASSERT(md->urw_mappings == 0); |
3737 | KASSERT(md->uro_mappings == 0); | | 3739 | KASSERT(md->uro_mappings == 0); |
3738 | KASSERT(md->krw_mappings == 0); | | 3740 | KASSERT(md->krw_mappings == 0); |
3739 | KASSERT(md->kro_mappings == 0); | | 3741 | KASSERT(md->kro_mappings == 0); |
3740 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 3742 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
3741 | KASSERT(pv == NULL); | | 3743 | KASSERT(pv == NULL); |
3742 | KASSERT(arm_cache_prefer_mask == 0 || (va & PVF_COLORED) == 0); | | 3744 | KASSERT(arm_cache_prefer_mask == 0 || (va & PVF_COLORED) == 0); |
3743 | KASSERT((md->pvh_attrs & PVF_NC) == 0); | | 3745 | KASSERT((md->pvh_attrs & PVF_NC) == 0); |
3744 | /* if there is a color conflict, evict from cache. */ | | 3746 | /* if there is a color conflict, evict from cache. */ |
3745 | if (pmap_is_page_colored_p(md) | | 3747 | if (pmap_is_page_colored_p(md) |
3746 | && ((va ^ md->pvh_attrs) & arm_cache_prefer_mask)) { | | 3748 | && ((va ^ md->pvh_attrs) & arm_cache_prefer_mask)) { |
3747 | PMAPCOUNT(vac_color_change); | | 3749 | PMAPCOUNT(vac_color_change); |
3748 | pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY); | | 3750 | pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY); |
3749 | } else if (md->pvh_attrs & PVF_MULTCLR) { | | 3751 | } else if (md->pvh_attrs & PVF_MULTCLR) { |
3750 | /* | | 3752 | /* |
3751 | * If this page has multiple colors, expunge | | 3753 | * If this page has multiple colors, expunge |
3752 | * them. | | 3754 | * them. |
3753 | */ | | 3755 | */ |
3754 | PMAPCOUNT(vac_flush_lots2); | | 3756 | PMAPCOUNT(vac_flush_lots2); |
3755 | pmap_flush_page(md, pa, PMAP_FLUSH_SECONDARY); | | 3757 | pmap_flush_page(md, pa, PMAP_FLUSH_SECONDARY); |
3756 | } | | 3758 | } |
3757 | /* | | 3759 | /* |
3758 | * Since this is a KMPAGE, there can be no contention | | 3760 | * Since this is a KMPAGE, there can be no contention |
3759 | * for this page so don't lock it. | | 3761 | * for this page so don't lock it. |
3760 | */ | | 3762 | */ |
3761 | md->pvh_attrs &= PAGE_SIZE - 1; | | 3763 | md->pvh_attrs &= PAGE_SIZE - 1; |
3762 | md->pvh_attrs |= PVF_KMPAGE | PVF_COLORED | PVF_DIRTY | | 3764 | md->pvh_attrs |= PVF_KMPAGE | PVF_COLORED | PVF_DIRTY |
3763 | | (va & arm_cache_prefer_mask); | | 3765 | | (va & arm_cache_prefer_mask); |
3764 | #else /* !PMAP_CACHE_VIPT || ARM_MMU_EXTENDED */ | | 3766 | #else /* !PMAP_CACHE_VIPT || ARM_MMU_EXTENDED */ |
3765 | md->pvh_attrs |= PVF_KMPAGE; | | 3767 | md->pvh_attrs |= PVF_KMPAGE; |
3766 | #endif | | 3768 | #endif |
3767 | atomic_inc_32(&pmap_kmpages); | | 3769 | atomic_inc_32(&pmap_kmpages); |
3768 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 3770 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
3769 | } else if (arm_cache_prefer_mask != 0) { | | 3771 | } else if (arm_cache_prefer_mask != 0) { |
3770 | if (pv == NULL) { | | 3772 | if (pv == NULL) { |
3771 | pv = pool_get(&pmap_pv_pool, PR_NOWAIT); | | 3773 | pv = pool_get(&pmap_pv_pool, PR_NOWAIT); |
3772 | KASSERT(pv != NULL); | | 3774 | KASSERT(pv != NULL); |
3773 | } | | 3775 | } |
3774 | pmap_acquire_page_lock(md); | | 3776 | pmap_acquire_page_lock(md); |
3775 | pmap_enter_pv(md, pa, pv, pmap_kernel(), va, | | 3777 | pmap_enter_pv(md, pa, pv, pmap_kernel(), va, |
3776 | PVF_WIRED | PVF_KENTRY | | 3778 | PVF_WIRED | PVF_KENTRY |
3777 | | (prot & VM_PROT_WRITE ? PVF_WRITE : 0)); | | 3779 | | (prot & VM_PROT_WRITE ? PVF_WRITE : 0)); |
3778 | if ((prot & VM_PROT_WRITE) | | 3780 | if ((prot & VM_PROT_WRITE) |
3779 | && !(md->pvh_attrs & PVF_NC)) | | 3781 | && !(md->pvh_attrs & PVF_NC)) |
3780 | md->pvh_attrs |= PVF_DIRTY; | | 3782 | md->pvh_attrs |= PVF_DIRTY; |
3781 | KASSERT((prot & VM_PROT_WRITE) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 3783 | KASSERT((prot & VM_PROT_WRITE) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
3782 | pmap_vac_me_harder(md, pa, pmap_kernel(), va); | | 3784 | pmap_vac_me_harder(md, pa, pmap_kernel(), va); |
3783 | pmap_release_page_lock(md); | | 3785 | pmap_release_page_lock(md); |
3784 | #endif | | 3786 | #endif |
3785 | } | | 3787 | } |
3786 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 3788 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
3787 | } else { | | 3789 | } else { |
3788 | if (pv != NULL) | | 3790 | if (pv != NULL) |
3789 | pool_put(&pmap_pv_pool, pv); | | 3791 | pool_put(&pmap_pv_pool, pv); |
3790 | #endif | | 3792 | #endif |
3791 | } | | 3793 | } |
3792 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 3794 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
3793 | KASSERT(md == NULL || !pmap_page_locked_p(md)); | | 3795 | KASSERT(md == NULL || !pmap_page_locked_p(md)); |
3794 | #endif | | 3796 | #endif |
3795 | if (pmap_initialized) { | | 3797 | if (pmap_initialized) { |
3796 | UVMHIST_LOG(maphist, " <-- done (ptep %p: %#x -> %#x)", | | 3798 | UVMHIST_LOG(maphist, " <-- done (ptep %p: %#x -> %#x)", |
3797 | ptep, opte, npte, 0); | | 3799 | ptep, opte, npte, 0); |
3798 | } | | 3800 | } |
3799 | | | 3801 | |
3800 | } | | 3802 | } |
3801 | | | 3803 | |
3802 | void | | 3804 | void |
3803 | pmap_kremove(vaddr_t va, vsize_t len) | | 3805 | pmap_kremove(vaddr_t va, vsize_t len) |
3804 | { | | 3806 | { |
3805 | #ifdef UVMHIST | | 3807 | #ifdef UVMHIST |
3806 | u_int total_mappings = 0; | | 3808 | u_int total_mappings = 0; |
3807 | #endif | | 3809 | #endif |
3808 | | | 3810 | |
3809 | PMAPCOUNT(kenter_unmappings); | | 3811 | PMAPCOUNT(kenter_unmappings); |
3810 | | | 3812 | |
3811 | UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); | | 3813 | UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); |
3812 | | | 3814 | |
3813 | UVMHIST_LOG(maphist, " (va=%#x, len=%#x)", va, len, 0, 0); | | 3815 | UVMHIST_LOG(maphist, " (va=%#x, len=%#x)", va, len, 0, 0); |
3814 | | | 3816 | |
3815 | const vaddr_t eva = va + len; | | 3817 | const vaddr_t eva = va + len; |
3816 | | | 3818 | |
| | | 3819 | pmap_acquire_pmap_lock(pmap_kernel()); |
| | | 3820 | |
3817 | while (va < eva) { | | 3821 | while (va < eva) { |
3818 | vaddr_t next_bucket = L2_NEXT_BUCKET_VA(va); | | 3822 | vaddr_t next_bucket = L2_NEXT_BUCKET_VA(va); |
3819 | if (next_bucket > eva) | | 3823 | if (next_bucket > eva) |
3820 | next_bucket = eva; | | 3824 | next_bucket = eva; |
3821 | | | 3825 | |
3822 | pmap_t kpm = pmap_kernel(); | | 3826 | pmap_t kpm = pmap_kernel(); |
3823 | struct l2_bucket * const l2b = pmap_get_l2_bucket(kpm, va); | | 3827 | struct l2_bucket * const l2b = pmap_get_l2_bucket(kpm, va); |
3824 | KDASSERT(l2b != NULL); | | 3828 | KDASSERT(l2b != NULL); |
3825 | | | 3829 | |
3826 | pt_entry_t * const sptep = &l2b->l2b_kva[l2pte_index(va)]; | | 3830 | pt_entry_t * const sptep = &l2b->l2b_kva[l2pte_index(va)]; |
3827 | pt_entry_t *ptep = sptep; | | 3831 | pt_entry_t *ptep = sptep; |
3828 | u_int mappings = 0; | | 3832 | u_int mappings = 0; |
3829 | | | 3833 | |
3830 | while (va < next_bucket) { | | 3834 | while (va < next_bucket) { |
3831 | const pt_entry_t opte = *ptep; | | 3835 | const pt_entry_t opte = *ptep; |
3832 | struct vm_page *opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); | | 3836 | struct vm_page *opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); |
3833 | if (opg != NULL) { | | 3837 | if (opg != NULL) { |
3834 | struct vm_page_md *omd = VM_PAGE_TO_MD(opg); | | 3838 | struct vm_page_md *omd = VM_PAGE_TO_MD(opg); |
3835 | | | 3839 | |
3836 | if (omd->pvh_attrs & PVF_KMPAGE) { | | 3840 | if (omd->pvh_attrs & PVF_KMPAGE) { |
3837 | KASSERT(omd->urw_mappings == 0); | | 3841 | KASSERT(omd->urw_mappings == 0); |
3838 | KASSERT(omd->uro_mappings == 0); | | 3842 | KASSERT(omd->uro_mappings == 0); |
3839 | KASSERT(omd->krw_mappings == 0); | | 3843 | KASSERT(omd->krw_mappings == 0); |
3840 | KASSERT(omd->kro_mappings == 0); | | 3844 | KASSERT(omd->kro_mappings == 0); |
3841 | omd->pvh_attrs &= ~PVF_KMPAGE; | | 3845 | omd->pvh_attrs &= ~PVF_KMPAGE; |
3842 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 3846 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
3843 | if (arm_cache_prefer_mask != 0) { | | 3847 | if (arm_cache_prefer_mask != 0) { |
3844 | omd->pvh_attrs &= ~PVF_WRITE; | | 3848 | omd->pvh_attrs &= ~PVF_WRITE; |
3845 | } | | 3849 | } |
3846 | #endif | | 3850 | #endif |
3847 | atomic_dec_32(&pmap_kmpages); | | 3851 | atomic_dec_32(&pmap_kmpages); |
3848 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 3852 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
3849 | } else if (arm_cache_prefer_mask != 0) { | | 3853 | } else if (arm_cache_prefer_mask != 0) { |
3850 | pmap_acquire_page_lock(omd); | | 3854 | pmap_acquire_page_lock(omd); |
3851 | pool_put(&pmap_pv_pool, | | 3855 | pool_put(&pmap_pv_pool, |
3852 | pmap_kremove_pg(opg, va)); | | 3856 | pmap_kremove_pg(opg, va)); |
3853 | pmap_release_page_lock(omd); | | 3857 | pmap_release_page_lock(omd); |
3854 | #endif | | 3858 | #endif |
3855 | } | | 3859 | } |
3856 | } | | 3860 | } |
3857 | if (l2pte_valid_p(opte)) { | | 3861 | if (l2pte_valid_p(opte)) { |
3858 | l2pte_reset(ptep); | | 3862 | l2pte_reset(ptep); |
3859 | PTE_SYNC(ptep); | | 3863 | PTE_SYNC(ptep); |
3860 | #ifdef PMAP_CACHE_VIVT | | 3864 | #ifdef PMAP_CACHE_VIVT |
3861 | cpu_dcache_wbinv_range(va, PAGE_SIZE); | | 3865 | cpu_dcache_wbinv_range(va, PAGE_SIZE); |
3862 | #endif | | 3866 | #endif |
3863 | cpu_tlb_flushD_SE(va); | | 3867 | cpu_tlb_flushD_SE(va); |
3864 | | | 3868 | |
3865 | mappings += PAGE_SIZE / L2_S_SIZE; | | 3869 | mappings += PAGE_SIZE / L2_S_SIZE; |
3866 | } | | 3870 | } |
3867 | va += PAGE_SIZE; | | 3871 | va += PAGE_SIZE; |
3868 | ptep += PAGE_SIZE / L2_S_SIZE; | | 3872 | ptep += PAGE_SIZE / L2_S_SIZE; |
3869 | } | | 3873 | } |
3870 | KDASSERTMSG(mappings <= l2b->l2b_occupancy, "%u %u", | | 3874 | KDASSERTMSG(mappings <= l2b->l2b_occupancy, "%u %u", |
3871 | mappings, l2b->l2b_occupancy); | | 3875 | mappings, l2b->l2b_occupancy); |
3872 | l2b->l2b_occupancy -= mappings; | | 3876 | l2b->l2b_occupancy -= mappings; |
3873 | //PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep)); | | 3877 | //PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep)); |
3874 | #ifdef UVMHIST | | 3878 | #ifdef UVMHIST |
3875 | total_mappings += mappings; | | 3879 | total_mappings += mappings; |
3876 | #endif | | 3880 | #endif |
3877 | } | | 3881 | } |
| | | 3882 | pmap_release_pmap_lock(pmap_kernel()); |
3878 | cpu_cpwait(); | | 3883 | cpu_cpwait(); |
3879 | UVMHIST_LOG(maphist, " <--- done (%u mappings removed)", | | 3884 | UVMHIST_LOG(maphist, " <--- done (%u mappings removed)", |
3880 | total_mappings, 0, 0, 0); | | 3885 | total_mappings, 0, 0, 0); |
3881 | } | | 3886 | } |
3882 | | | 3887 | |
3883 | bool | | 3888 | bool |
3884 | pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap) | | 3889 | pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap) |
3885 | { | | 3890 | { |
3886 | struct l2_dtable *l2; | | 3891 | struct l2_dtable *l2; |
3887 | pd_entry_t *pdep, pde; | | 3892 | pd_entry_t *pdep, pde; |
3888 | pt_entry_t *ptep, pte; | | 3893 | pt_entry_t *ptep, pte; |
3889 | paddr_t pa; | | 3894 | paddr_t pa; |
3890 | u_int l1slot; | | 3895 | u_int l1slot; |
3891 | | | 3896 | |
3892 | pmap_acquire_pmap_lock(pm); | | 3897 | pmap_acquire_pmap_lock(pm); |
3893 | | | 3898 | |
3894 | l1slot = l1pte_index(va); | | 3899 | l1slot = l1pte_index(va); |
3895 | pdep = pmap_l1_kva(pm) + l1slot; | | 3900 | pdep = pmap_l1_kva(pm) + l1slot; |
3896 | pde = *pdep; | | 3901 | pde = *pdep; |
3897 | | | 3902 | |
3898 | if (l1pte_section_p(pde)) { | | 3903 | if (l1pte_section_p(pde)) { |
3899 | /* | | 3904 | /* |
3900 | * These should only happen for pmap_kernel() | | 3905 | * These should only happen for pmap_kernel() |
3901 | */ | | 3906 | */ |
3902 | KDASSERT(pm == pmap_kernel()); | | 3907 | KDASSERT(pm == pmap_kernel()); |
3903 | pmap_release_pmap_lock(pm); | | 3908 | pmap_release_pmap_lock(pm); |
3904 | #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 | | 3909 | #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 |
3905 | if (l1pte_supersection_p(pde)) { | | 3910 | if (l1pte_supersection_p(pde)) { |
3906 | pa = (pde & L1_SS_FRAME) | (va & L1_SS_OFFSET); | | 3911 | pa = (pde & L1_SS_FRAME) | (va & L1_SS_OFFSET); |
3907 | } else | | 3912 | } else |
3908 | #endif | | 3913 | #endif |
3909 | pa = (pde & L1_S_FRAME) | (va & L1_S_OFFSET); | | 3914 | pa = (pde & L1_S_FRAME) | (va & L1_S_OFFSET); |
3910 | } else { | | 3915 | } else { |
3911 | /* | | 3916 | /* |
3912 | * Note that we can't rely on the validity of the L1 | | 3917 | * Note that we can't rely on the validity of the L1 |
3913 | * descriptor as an indication that a mapping exists. | | 3918 | * descriptor as an indication that a mapping exists. |
3914 | * We have to look it up in the L2 dtable. | | 3919 | * We have to look it up in the L2 dtable. |
3915 | */ | | 3920 | */ |
3916 | l2 = pm->pm_l2[L2_IDX(l1slot)]; | | 3921 | l2 = pm->pm_l2[L2_IDX(l1slot)]; |
3917 | | | 3922 | |
3918 | if (l2 == NULL || | | 3923 | if (l2 == NULL || |
3919 | (ptep = l2->l2_bucket[L2_BUCKET(l1slot)].l2b_kva) == NULL) { | | 3924 | (ptep = l2->l2_bucket[L2_BUCKET(l1slot)].l2b_kva) == NULL) { |
3920 | pmap_release_pmap_lock(pm); | | 3925 | pmap_release_pmap_lock(pm); |
3921 | return false; | | 3926 | return false; |
3922 | } | | 3927 | } |
3923 | | | 3928 | |
3924 | pte = ptep[l2pte_index(va)]; | | 3929 | pte = ptep[l2pte_index(va)]; |
3925 | pmap_release_pmap_lock(pm); | | 3930 | pmap_release_pmap_lock(pm); |
3926 | | | 3931 | |
3927 | if (pte == 0) | | 3932 | if (pte == 0) |
3928 | return false; | | 3933 | return false; |
3929 | | | 3934 | |
3930 | switch (pte & L2_TYPE_MASK) { | | 3935 | switch (pte & L2_TYPE_MASK) { |
3931 | case L2_TYPE_L: | | 3936 | case L2_TYPE_L: |
3932 | pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); | | 3937 | pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); |
3933 | break; | | 3938 | break; |
3934 | | | 3939 | |
3935 | default: | | 3940 | default: |
3936 | pa = (pte & ~PAGE_MASK) | (va & PAGE_MASK); | | 3941 | pa = (pte & ~PAGE_MASK) | (va & PAGE_MASK); |
3937 | break; | | 3942 | break; |
3938 | } | | 3943 | } |
3939 | } | | 3944 | } |
3940 | | | 3945 | |
3941 | if (pap != NULL) | | 3946 | if (pap != NULL) |
3942 | *pap = pa; | | 3947 | *pap = pa; |
3943 | | | 3948 | |
3944 | return true; | | 3949 | return true; |
3945 | } | | 3950 | } |
3946 | | | 3951 | |
3947 | void | | 3952 | void |
3948 | pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot) | | 3953 | pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot) |
3949 | { | | 3954 | { |
3950 | struct l2_bucket *l2b; | | 3955 | struct l2_bucket *l2b; |
3951 | vaddr_t next_bucket; | | 3956 | vaddr_t next_bucket; |
3952 | | | 3957 | |
3953 | NPDEBUG(PDB_PROTECT, | | 3958 | NPDEBUG(PDB_PROTECT, |
3954 | printf("pmap_protect: pm %p sva 0x%lx eva 0x%lx prot 0x%x\n", | | 3959 | printf("pmap_protect: pm %p sva 0x%lx eva 0x%lx prot 0x%x\n", |
3955 | pm, sva, eva, prot)); | | 3960 | pm, sva, eva, prot)); |
3956 | | | 3961 | |
3957 | if ((prot & VM_PROT_READ) == 0) { | | 3962 | if ((prot & VM_PROT_READ) == 0) { |
3958 | pmap_remove(pm, sva, eva); | | 3963 | pmap_remove(pm, sva, eva); |
3959 | return; | | 3964 | return; |
3960 | } | | 3965 | } |
3961 | | | 3966 | |
3962 | if (prot & VM_PROT_WRITE) { | | 3967 | if (prot & VM_PROT_WRITE) { |
3963 | /* | | 3968 | /* |
3964 | * If this is a read->write transition, just ignore it and let | | 3969 | * If this is a read->write transition, just ignore it and let |
3965 | * uvm_fault() take care of it later. | | 3970 | * uvm_fault() take care of it later. |
3966 | */ | | 3971 | */ |
3967 | return; | | 3972 | return; |
3968 | } | | 3973 | } |
3969 | | | 3974 | |
3970 | pmap_acquire_pmap_lock(pm); | | 3975 | pmap_acquire_pmap_lock(pm); |
3971 | | | 3976 | |
3972 | #ifndef ARM_MMU_EXTENDED | | 3977 | #ifndef ARM_MMU_EXTENDED |
3973 | const bool flush = eva - sva >= PAGE_SIZE * 4; | | 3978 | const bool flush = eva - sva >= PAGE_SIZE * 4; |
3974 | u_int flags = 0; | | 3979 | u_int flags = 0; |
3975 | #endif | | 3980 | #endif |
3976 | u_int clr_mask = PVF_WRITE | ((prot & VM_PROT_EXECUTE) ? 0 : PVF_EXEC); | | 3981 | u_int clr_mask = PVF_WRITE | ((prot & VM_PROT_EXECUTE) ? 0 : PVF_EXEC); |
3977 | | | 3982 | |
3978 | while (sva < eva) { | | 3983 | while (sva < eva) { |
3979 | next_bucket = L2_NEXT_BUCKET_VA(sva); | | 3984 | next_bucket = L2_NEXT_BUCKET_VA(sva); |
3980 | if (next_bucket > eva) | | 3985 | if (next_bucket > eva) |
3981 | next_bucket = eva; | | 3986 | next_bucket = eva; |
3982 | | | 3987 | |
3983 | l2b = pmap_get_l2_bucket(pm, sva); | | 3988 | l2b = pmap_get_l2_bucket(pm, sva); |
3984 | if (l2b == NULL) { | | 3989 | if (l2b == NULL) { |
3985 | sva = next_bucket; | | 3990 | sva = next_bucket; |
3986 | continue; | | 3991 | continue; |
3987 | } | | 3992 | } |
3988 | | | 3993 | |
3989 | pt_entry_t *ptep = &l2b->l2b_kva[l2pte_index(sva)]; | | 3994 | pt_entry_t *ptep = &l2b->l2b_kva[l2pte_index(sva)]; |
3990 | | | 3995 | |
3991 | while (sva < next_bucket) { | | 3996 | while (sva < next_bucket) { |
3992 | const pt_entry_t opte = *ptep; | | 3997 | const pt_entry_t opte = *ptep; |
3993 | if (l2pte_valid_p(opte) && l2pte_writable_p(opte)) { | | 3998 | if (l2pte_valid_p(opte) && l2pte_writable_p(opte)) { |
3994 | struct vm_page *pg; | | 3999 | struct vm_page *pg; |
3995 | #ifndef ARM_MMU_EXTENDED | | 4000 | #ifndef ARM_MMU_EXTENDED |
3996 | u_int f; | | 4001 | u_int f; |
3997 | #endif | | 4002 | #endif |
3998 | | | 4003 | |
3999 | #ifdef PMAP_CACHE_VIVT | | 4004 | #ifdef PMAP_CACHE_VIVT |
4000 | /* | | 4005 | /* |
4001 | * OK, at this point, we know we're doing | | 4006 | * OK, at this point, we know we're doing |
4002 | * write-protect operation. If the pmap is | | 4007 | * write-protect operation. If the pmap is |
4003 | * active, write-back the page. | | 4008 | * active, write-back the page. |
4004 | */ | | 4009 | */ |
4005 | pmap_cache_wbinv_page(pm, sva, false, | | 4010 | pmap_cache_wbinv_page(pm, sva, false, |
4006 | PVF_REF | PVF_WRITE); | | 4011 | PVF_REF | PVF_WRITE); |
4007 | #endif | | 4012 | #endif |
4008 | | | 4013 | |
4009 | pg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); | | 4014 | pg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); |
4010 | pt_entry_t npte = l2pte_set_readonly(opte); | | 4015 | pt_entry_t npte = l2pte_set_readonly(opte); |
4011 | l2pte_reset(ptep); | | 4016 | l2pte_reset(ptep); |
4012 | PTE_SYNC(ptep); | | 4017 | PTE_SYNC(ptep); |
4013 | #ifdef ARM_MMU_EXTENDED | | 4018 | #ifdef ARM_MMU_EXTENDED |
4014 | pmap_tlb_flush_SE(pm, sva, PVF_REF); | | 4019 | pmap_tlb_flush_SE(pm, sva, PVF_REF); |
4015 | #endif | | 4020 | #endif |
4016 | l2pte_set(ptep, npte, 0); | | 4021 | l2pte_set(ptep, npte, 0); |
4017 | PTE_SYNC(ptep); | | 4022 | PTE_SYNC(ptep); |
4018 | | | 4023 | |
4019 | if (pg != NULL) { | | 4024 | if (pg != NULL) { |
4020 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 4025 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
4021 | paddr_t pa = VM_PAGE_TO_PHYS(pg); | | 4026 | paddr_t pa = VM_PAGE_TO_PHYS(pg); |
4022 | | | 4027 | |
4023 | pmap_acquire_page_lock(md); | | 4028 | pmap_acquire_page_lock(md); |
4024 | #ifndef ARM_MMU_EXTENDED | | 4029 | #ifndef ARM_MMU_EXTENDED |
4025 | f = | | 4030 | f = |
4026 | #endif | | 4031 | #endif |
4027 | pmap_modify_pv(md, pa, pm, sva, | | 4032 | pmap_modify_pv(md, pa, pm, sva, |
4028 | clr_mask, 0); | | 4033 | clr_mask, 0); |
4029 | pmap_vac_me_harder(md, pa, pm, sva); | | 4034 | pmap_vac_me_harder(md, pa, pm, sva); |
4030 | pmap_release_page_lock(md); | | 4035 | pmap_release_page_lock(md); |
4031 | #ifndef ARM_MMU_EXTENDED | | 4036 | #ifndef ARM_MMU_EXTENDED |
4032 | } else { | | 4037 | } else { |
4033 | f = PVF_REF | PVF_EXEC; | | 4038 | f = PVF_REF | PVF_EXEC; |
4034 | } | | 4039 | } |
4035 | | | 4040 | |
4036 | if (flush) { | | 4041 | if (flush) { |
4037 | flags |= f; | | 4042 | flags |= f; |
4038 | } else { | | 4043 | } else { |
4039 | pmap_tlb_flush_SE(pm, sva, f); | | 4044 | pmap_tlb_flush_SE(pm, sva, f); |
4040 | #endif | | 4045 | #endif |
4041 | } | | 4046 | } |
4042 | } | | 4047 | } |
4043 | | | 4048 | |
4044 | sva += PAGE_SIZE; | | 4049 | sva += PAGE_SIZE; |
4045 | ptep += PAGE_SIZE / L2_S_SIZE; | | 4050 | ptep += PAGE_SIZE / L2_S_SIZE; |
4046 | } | | 4051 | } |
4047 | } | | 4052 | } |
4048 | | | 4053 | |
4049 | #ifndef ARM_MMU_EXTENDED | | 4054 | #ifndef ARM_MMU_EXTENDED |
4050 | if (flush) { | | 4055 | if (flush) { |
4051 | if (PV_BEEN_EXECD(flags)) { | | 4056 | if (PV_BEEN_EXECD(flags)) { |
4052 | pmap_tlb_flushID(pm); | | 4057 | pmap_tlb_flushID(pm); |
4053 | } else if (PV_BEEN_REFD(flags)) { | | 4058 | } else if (PV_BEEN_REFD(flags)) { |
4054 | pmap_tlb_flushD(pm); | | 4059 | pmap_tlb_flushD(pm); |
4055 | } | | 4060 | } |
4056 | } | | 4061 | } |
4057 | #endif | | 4062 | #endif |
4058 | | | 4063 | |
4059 | pmap_release_pmap_lock(pm); | | 4064 | pmap_release_pmap_lock(pm); |
4060 | } | | 4065 | } |
4061 | | | 4066 | |
4062 | void | | 4067 | void |
4063 | pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva) | | 4068 | pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva) |
4064 | { | | 4069 | { |
4065 | struct l2_bucket *l2b; | | 4070 | struct l2_bucket *l2b; |
4066 | pt_entry_t *ptep; | | 4071 | pt_entry_t *ptep; |
4067 | vaddr_t next_bucket; | | 4072 | vaddr_t next_bucket; |
4068 | vsize_t page_size = trunc_page(sva) + PAGE_SIZE - sva; | | 4073 | vsize_t page_size = trunc_page(sva) + PAGE_SIZE - sva; |
4069 | | | 4074 | |
4070 | NPDEBUG(PDB_EXEC, | | 4075 | NPDEBUG(PDB_EXEC, |
4071 | printf("pmap_icache_sync_range: pm %p sva 0x%lx eva 0x%lx\n", | | 4076 | printf("pmap_icache_sync_range: pm %p sva 0x%lx eva 0x%lx\n", |
4072 | pm, sva, eva)); | | 4077 | pm, sva, eva)); |
4073 | | | 4078 | |
4074 | pmap_acquire_pmap_lock(pm); | | 4079 | pmap_acquire_pmap_lock(pm); |
4075 | | | 4080 | |
4076 | while (sva < eva) { | | 4081 | while (sva < eva) { |
4077 | next_bucket = L2_NEXT_BUCKET_VA(sva); | | 4082 | next_bucket = L2_NEXT_BUCKET_VA(sva); |
4078 | if (next_bucket > eva) | | 4083 | if (next_bucket > eva) |
4079 | next_bucket = eva; | | 4084 | next_bucket = eva; |
4080 | | | 4085 | |
4081 | l2b = pmap_get_l2_bucket(pm, sva); | | 4086 | l2b = pmap_get_l2_bucket(pm, sva); |
4082 | if (l2b == NULL) { | | 4087 | if (l2b == NULL) { |
4083 | sva = next_bucket; | | 4088 | sva = next_bucket; |
4084 | continue; | | 4089 | continue; |
4085 | } | | 4090 | } |
4086 | | | 4091 | |
4087 | for (ptep = &l2b->l2b_kva[l2pte_index(sva)]; | | 4092 | for (ptep = &l2b->l2b_kva[l2pte_index(sva)]; |
4088 | sva < next_bucket; | | 4093 | sva < next_bucket; |
4089 | sva += page_size, | | 4094 | sva += page_size, |
4090 | ptep += PAGE_SIZE / L2_S_SIZE, | | 4095 | ptep += PAGE_SIZE / L2_S_SIZE, |
4091 | page_size = PAGE_SIZE) { | | 4096 | page_size = PAGE_SIZE) { |
4092 | if (l2pte_valid_p(*ptep)) { | | 4097 | if (l2pte_valid_p(*ptep)) { |
4093 | cpu_icache_sync_range(sva, | | 4098 | cpu_icache_sync_range(sva, |
4094 | min(page_size, eva - sva)); | | 4099 | min(page_size, eva - sva)); |
4095 | } | | 4100 | } |
4096 | } | | 4101 | } |
4097 | } | | 4102 | } |
4098 | | | 4103 | |
4099 | pmap_release_pmap_lock(pm); | | 4104 | pmap_release_pmap_lock(pm); |
4100 | } | | 4105 | } |
4101 | | | 4106 | |
4102 | void | | 4107 | void |
4103 | pmap_page_protect(struct vm_page *pg, vm_prot_t prot) | | 4108 | pmap_page_protect(struct vm_page *pg, vm_prot_t prot) |
4104 | { | | 4109 | { |
4105 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 4110 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
4106 | paddr_t pa = VM_PAGE_TO_PHYS(pg); | | 4111 | paddr_t pa = VM_PAGE_TO_PHYS(pg); |
4107 | | | 4112 | |
4108 | NPDEBUG(PDB_PROTECT, | | 4113 | NPDEBUG(PDB_PROTECT, |
4109 | printf("pmap_page_protect: md %p (0x%08lx), prot 0x%x\n", | | 4114 | printf("pmap_page_protect: md %p (0x%08lx), prot 0x%x\n", |
4110 | md, pa, prot)); | | 4115 | md, pa, prot)); |
4111 | | | 4116 | |
4112 | switch(prot) { | | 4117 | switch(prot) { |
4113 | case VM_PROT_READ|VM_PROT_WRITE: | | 4118 | case VM_PROT_READ|VM_PROT_WRITE: |
4114 | #if defined(ARM_MMU_EXTENDED) | | 4119 | #if defined(ARM_MMU_EXTENDED) |
4115 | pmap_acquire_page_lock(md); | | 4120 | pmap_acquire_page_lock(md); |
4116 | pmap_clearbit(md, pa, PVF_EXEC); | | 4121 | pmap_clearbit(md, pa, PVF_EXEC); |
4117 | pmap_release_page_lock(md); | | 4122 | pmap_release_page_lock(md); |
4118 | break; | | 4123 | break; |
4119 | #endif | | 4124 | #endif |
4120 | case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE: | | 4125 | case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE: |
4121 | break; | | 4126 | break; |
4122 | | | 4127 | |
4123 | case VM_PROT_READ: | | 4128 | case VM_PROT_READ: |
4124 | #if defined(ARM_MMU_EXTENDED) | | 4129 | #if defined(ARM_MMU_EXTENDED) |
4125 | pmap_acquire_page_lock(md); | | 4130 | pmap_acquire_page_lock(md); |
4126 | pmap_clearbit(md, pa, PVF_WRITE|PVF_EXEC); | | 4131 | pmap_clearbit(md, pa, PVF_WRITE|PVF_EXEC); |
4127 | pmap_release_page_lock(md); | | 4132 | pmap_release_page_lock(md); |
4128 | break; | | 4133 | break; |
4129 | #endif | | 4134 | #endif |
4130 | case VM_PROT_READ|VM_PROT_EXECUTE: | | 4135 | case VM_PROT_READ|VM_PROT_EXECUTE: |
4131 | pmap_acquire_page_lock(md); | | 4136 | pmap_acquire_page_lock(md); |
4132 | pmap_clearbit(md, pa, PVF_WRITE); | | 4137 | pmap_clearbit(md, pa, PVF_WRITE); |
4133 | pmap_release_page_lock(md); | | 4138 | pmap_release_page_lock(md); |
4134 | break; | | 4139 | break; |
4135 | | | 4140 | |
4136 | default: | | 4141 | default: |
4137 | pmap_page_remove(md, pa); | | 4142 | pmap_page_remove(md, pa); |
4138 | break; | | 4143 | break; |
4139 | } | | 4144 | } |
4140 | } | | 4145 | } |
4141 | | | 4146 | |
4142 | /* | | 4147 | /* |
4143 | * pmap_clear_modify: | | 4148 | * pmap_clear_modify: |
4144 | * | | 4149 | * |
4145 | * Clear the "modified" attribute for a page. | | 4150 | * Clear the "modified" attribute for a page. |
4146 | */ | | 4151 | */ |
4147 | bool | | 4152 | bool |
4148 | pmap_clear_modify(struct vm_page *pg) | | 4153 | pmap_clear_modify(struct vm_page *pg) |
4149 | { | | 4154 | { |
4150 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 4155 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
4151 | paddr_t pa = VM_PAGE_TO_PHYS(pg); | | 4156 | paddr_t pa = VM_PAGE_TO_PHYS(pg); |
4152 | bool rv; | | 4157 | bool rv; |
4153 | | | 4158 | |
4154 | pmap_acquire_page_lock(md); | | 4159 | pmap_acquire_page_lock(md); |
4155 | | | 4160 | |
4156 | if (md->pvh_attrs & PVF_MOD) { | | 4161 | if (md->pvh_attrs & PVF_MOD) { |
4157 | rv = true; | | 4162 | rv = true; |
4158 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 4163 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
4159 | /* | | 4164 | /* |
4160 | * If we are going to clear the modified bit and there are | | 4165 | * If we are going to clear the modified bit and there are |
4161 | * no other modified bits set, flush the page to memory and | | 4166 | * no other modified bits set, flush the page to memory and |
4162 | * mark it clean. | | 4167 | * mark it clean. |
4163 | */ | | 4168 | */ |
4164 | if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) == PVF_MOD) | | 4169 | if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) == PVF_MOD) |
4165 | pmap_flush_page(md, pa, PMAP_CLEAN_PRIMARY); | | 4170 | pmap_flush_page(md, pa, PMAP_CLEAN_PRIMARY); |
4166 | #endif | | 4171 | #endif |
4167 | pmap_clearbit(md, pa, PVF_MOD); | | 4172 | pmap_clearbit(md, pa, PVF_MOD); |
4168 | } else { | | 4173 | } else { |
4169 | rv = false; | | 4174 | rv = false; |
4170 | } | | 4175 | } |
4171 | pmap_release_page_lock(md); | | 4176 | pmap_release_page_lock(md); |
4172 | | | 4177 | |
4173 | return rv; | | 4178 | return rv; |
4174 | } | | 4179 | } |
4175 | | | 4180 | |
4176 | /* | | 4181 | /* |
4177 | * pmap_clear_reference: | | 4182 | * pmap_clear_reference: |
4178 | * | | 4183 | * |
4179 | * Clear the "referenced" attribute for a page. | | 4184 | * Clear the "referenced" attribute for a page. |
4180 | */ | | 4185 | */ |
4181 | bool | | 4186 | bool |
4182 | pmap_clear_reference(struct vm_page *pg) | | 4187 | pmap_clear_reference(struct vm_page *pg) |
4183 | { | | 4188 | { |
4184 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 4189 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
4185 | paddr_t pa = VM_PAGE_TO_PHYS(pg); | | 4190 | paddr_t pa = VM_PAGE_TO_PHYS(pg); |
4186 | bool rv; | | 4191 | bool rv; |
4187 | | | 4192 | |
4188 | pmap_acquire_page_lock(md); | | 4193 | pmap_acquire_page_lock(md); |
4189 | | | 4194 | |
4190 | if (md->pvh_attrs & PVF_REF) { | | 4195 | if (md->pvh_attrs & PVF_REF) { |
4191 | rv = true; | | 4196 | rv = true; |
4192 | pmap_clearbit(md, pa, PVF_REF); | | 4197 | pmap_clearbit(md, pa, PVF_REF); |
4193 | } else { | | 4198 | } else { |
4194 | rv = false; | | 4199 | rv = false; |
4195 | } | | 4200 | } |
4196 | pmap_release_page_lock(md); | | 4201 | pmap_release_page_lock(md); |
4197 | | | 4202 | |
4198 | return rv; | | 4203 | return rv; |
4199 | } | | 4204 | } |
4200 | | | 4205 | |
4201 | /* | | 4206 | /* |
4202 | * pmap_is_modified: | | 4207 | * pmap_is_modified: |
4203 | * | | 4208 | * |
4204 | * Test if a page has the "modified" attribute. | | 4209 | * Test if a page has the "modified" attribute. |
4205 | */ | | 4210 | */ |
4206 | /* See <arm/arm32/pmap.h> */ | | 4211 | /* See <arm/arm32/pmap.h> */ |
4207 | | | 4212 | |
4208 | /* | | 4213 | /* |
4209 | * pmap_is_referenced: | | 4214 | * pmap_is_referenced: |
4210 | * | | 4215 | * |
4211 | * Test if a page has the "referenced" attribute. | | 4216 | * Test if a page has the "referenced" attribute. |
4212 | */ | | 4217 | */ |
4213 | /* See <arm/arm32/pmap.h> */ | | 4218 | /* See <arm/arm32/pmap.h> */ |
4214 | | | 4219 | |
4215 | #if defined(ARM_MMU_EXTENDED) && 0 | | 4220 | #if defined(ARM_MMU_EXTENDED) && 0 |
4216 | int | | 4221 | int |
4217 | pmap_prefetchabt_fixup(void *v) | | 4222 | pmap_prefetchabt_fixup(void *v) |
4218 | { | | 4223 | { |
4219 | struct trapframe * const tf = v; | | 4224 | struct trapframe * const tf = v; |
4220 | vaddr_t va = trunc_page(tf->tf_pc); | | 4225 | vaddr_t va = trunc_page(tf->tf_pc); |
4221 | int rv = ABORT_FIXUP_FAILED; | | 4226 | int rv = ABORT_FIXUP_FAILED; |
4222 | | | 4227 | |
4223 | if (!TRAP_USERMODE(tf) && va < VM_MAXUSER_ADDRESS) | | 4228 | if (!TRAP_USERMODE(tf) && va < VM_MAXUSER_ADDRESS) |
4224 | return rv; | | 4229 | return rv; |
4225 | | | 4230 | |
4226 | kpreempt_disable(); | | 4231 | kpreempt_disable(); |
4227 | pmap_t pm = curcpu()->ci_pmap_cur; | | 4232 | pmap_t pm = curcpu()->ci_pmap_cur; |
4228 | const size_t l1slot = l1pte_index(va); | | 4233 | const size_t l1slot = l1pte_index(va); |
4229 | struct l2_dtable * const l2 = pm->pm_l2[L2_IDX(l1slot)]; | | 4234 | struct l2_dtable * const l2 = pm->pm_l2[L2_IDX(l1slot)]; |
4230 | if (l2 == NULL) | | 4235 | if (l2 == NULL) |
4231 | goto out; | | 4236 | goto out; |
4232 | | | 4237 | |
4233 | struct l2_bucket * const l2b = &l2->l2_bucket[L2_BUCKET(l1slot)]; | | 4238 | struct l2_bucket * const l2b = &l2->l2_bucket[L2_BUCKET(l1slot)]; |
4234 | if (l2b->l2b_kva == NULL) | | 4239 | if (l2b->l2b_kva == NULL) |
4235 | goto out; | | 4240 | goto out; |
4236 | | | 4241 | |
4237 | /* | | 4242 | /* |
4238 | * Check the PTE itself. | | 4243 | * Check the PTE itself. |
4239 | */ | | 4244 | */ |
4240 | pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)]; | | 4245 | pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)]; |
4241 | const pt_entry_t opte = *ptep; | | 4246 | const pt_entry_t opte = *ptep; |
4242 | if ((opte & L2_S_PROT_U) == 0 || (opte & L2_XS_XN) == 0) | | 4247 | if ((opte & L2_S_PROT_U) == 0 || (opte & L2_XS_XN) == 0) |
4243 | goto out; | | 4248 | goto out; |
4244 | | | 4249 | |
4245 | paddr_t pa = l2pte_pa(pte); | | 4250 | paddr_t pa = l2pte_pa(pte); |
4246 | struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); | | 4251 | struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); |
4247 | KASSERT(pg != NULL); | | 4252 | KASSERT(pg != NULL); |
4248 | | | 4253 | |
4249 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); | | 4254 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); |
4250 | | | 4255 | |
4251 | pmap_acquire_page_lock(md); | | 4256 | pmap_acquire_page_lock(md); |
4252 | struct pv_entry * const pv = pmap_find_pv(md, pm, va); | | 4257 | struct pv_entry * const pv = pmap_find_pv(md, pm, va); |
4253 | KASSERT(pv != NULL); | | 4258 | KASSERT(pv != NULL); |
4254 | | | 4259 | |
4255 | if (PV_IS_EXEC_P(pv->pv_flags)) { | | 4260 | if (PV_IS_EXEC_P(pv->pv_flags)) { |
4256 | l2pte_reset(ptep); | | 4261 | l2pte_reset(ptep); |
4257 | PTE_SYNC(ptep); | | 4262 | PTE_SYNC(ptep); |
4258 | pmap_tlb_flush_SE(pm, va, PVF_EXEC | PVF_REF); | | 4263 | pmap_tlb_flush_SE(pm, va, PVF_EXEC | PVF_REF); |
4259 | if (!PV_IS_EXEC_P(md->pvh_attrs)) { | | 4264 | if (!PV_IS_EXEC_P(md->pvh_attrs)) { |
4260 | pmap_syncicache_page(md, pa); | | 4265 | pmap_syncicache_page(md, pa); |
4261 | } | | 4266 | } |
4262 | rv = ABORT_FIXUP_RETURN; | | 4267 | rv = ABORT_FIXUP_RETURN; |
4263 | l2pte_set(ptep, opte & ~L2_XS_XN, 0); | | 4268 | l2pte_set(ptep, opte & ~L2_XS_XN, 0); |
4264 | PTE_SYNC(ptep); | | 4269 | PTE_SYNC(ptep); |
4265 | } | | 4270 | } |
4266 | pmap_release_page_lock(md); | | 4271 | pmap_release_page_lock(md); |
4267 | | | 4272 | |
4268 | out: | | 4273 | out: |
4269 | kpreempt_enable(); | | 4274 | kpreempt_enable(); |
4270 | return rv; | | 4275 | return rv; |
4271 | } | | 4276 | } |
4272 | #endif | | 4277 | #endif |
4273 | | | 4278 | |
4274 | int | | 4279 | int |
4275 | pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, int user) | | 4280 | pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, int user) |
4276 | { | | 4281 | { |
4277 | struct l2_dtable *l2; | | 4282 | struct l2_dtable *l2; |
4278 | struct l2_bucket *l2b; | | 4283 | struct l2_bucket *l2b; |
4279 | paddr_t pa; | | 4284 | paddr_t pa; |
4280 | const size_t l1slot = l1pte_index(va); | | 4285 | const size_t l1slot = l1pte_index(va); |
4281 | int rv = 0; | | 4286 | int rv = 0; |
4282 | | | 4287 | |
4283 | UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); | | 4288 | UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); |
4284 | | | 4289 | |
4285 | va = trunc_page(va); | | 4290 | va = trunc_page(va); |
4286 | | | 4291 | |
4287 | KASSERT(!user || (pm != pmap_kernel())); | | 4292 | KASSERT(!user || (pm != pmap_kernel())); |
4288 | | | 4293 | |
4289 | UVMHIST_LOG(maphist, " (pm=%#x, va=%#x, ftype=%#x, user=%d)", | | 4294 | UVMHIST_LOG(maphist, " (pm=%#x, va=%#x, ftype=%#x, user=%d)", |
4290 | pm, va, ftype, user); | | 4295 | pm, va, ftype, user); |
4291 | #ifdef ARM_MMU_EXTENDED | | 4296 | #ifdef ARM_MMU_EXTENDED |
4292 | UVMHIST_LOG(maphist, " ti=%#x pai=%#x asid=%#x", | | 4297 | UVMHIST_LOG(maphist, " ti=%#x pai=%#x asid=%#x", |
4293 | cpu_tlb_info(curcpu()), PMAP_PAI(pm, cpu_tlb_info(curcpu())), | | 4298 | cpu_tlb_info(curcpu()), PMAP_PAI(pm, cpu_tlb_info(curcpu())), |
4294 | PMAP_PAI(pm, cpu_tlb_info(curcpu()))->pai_asid, 0); | | 4299 | PMAP_PAI(pm, cpu_tlb_info(curcpu()))->pai_asid, 0); |
4295 | #endif | | 4300 | #endif |
4296 | | | 4301 | |
4297 | pmap_acquire_pmap_lock(pm); | | 4302 | pmap_acquire_pmap_lock(pm); |
4298 | | | 4303 | |
4299 | /* | | 4304 | /* |
4300 | * If there is no l2_dtable for this address, then the process | | 4305 | * If there is no l2_dtable for this address, then the process |
4301 | * has no business accessing it. | | 4306 | * has no business accessing it. |
4302 | * | | 4307 | * |
4303 | * Note: This will catch userland processes trying to access | | 4308 | * Note: This will catch userland processes trying to access |
4304 | * kernel addresses. | | 4309 | * kernel addresses. |
4305 | */ | | 4310 | */ |
4306 | l2 = pm->pm_l2[L2_IDX(l1slot)]; | | 4311 | l2 = pm->pm_l2[L2_IDX(l1slot)]; |
4307 | if (l2 == NULL) { | | 4312 | if (l2 == NULL) { |
4308 | UVMHIST_LOG(maphist, " no l2 for l1slot %#x", l1slot, 0, 0, 0); | | 4313 | UVMHIST_LOG(maphist, " no l2 for l1slot %#x", l1slot, 0, 0, 0); |
4309 | goto out; | | 4314 | goto out; |
4310 | } | | 4315 | } |
4311 | | | 4316 | |
4312 | /* | | 4317 | /* |
4313 | * Likewise if there is no L2 descriptor table | | 4318 | * Likewise if there is no L2 descriptor table |
4314 | */ | | 4319 | */ |
4315 | l2b = &l2->l2_bucket[L2_BUCKET(l1slot)]; | | 4320 | l2b = &l2->l2_bucket[L2_BUCKET(l1slot)]; |
4316 | if (l2b->l2b_kva == NULL) { | | 4321 | if (l2b->l2b_kva == NULL) { |
4317 | UVMHIST_LOG(maphist, " <-- done (no ptep for l1slot %#x)", l1slot, 0, 0, 0); | | 4322 | UVMHIST_LOG(maphist, " <-- done (no ptep for l1slot %#x)", l1slot, 0, 0, 0); |
4318 | goto out; | | 4323 | goto out; |
4319 | } | | 4324 | } |
4320 | | | 4325 | |
4321 | /* | | 4326 | /* |
4322 | * Check the PTE itself. | | 4327 | * Check the PTE itself. |
4323 | */ | | 4328 | */ |
4324 | pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)]; | | 4329 | pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)]; |
4325 | pt_entry_t const opte = *ptep; | | 4330 | pt_entry_t const opte = *ptep; |
4326 | if (opte == 0 || (opte & L2_TYPE_MASK) == L2_TYPE_L) { | | 4331 | if (opte == 0 || (opte & L2_TYPE_MASK) == L2_TYPE_L) { |
4327 | UVMHIST_LOG(maphist, " <-- done (empty pde for l1slot %#x)", l1slot, 0, 0, 0); | | 4332 | UVMHIST_LOG(maphist, " <-- done (empty pde for l1slot %#x)", l1slot, 0, 0, 0); |
4328 | goto out; | | 4333 | goto out; |
4329 | } | | 4334 | } |
4330 | | | 4335 | |
4331 | #ifndef ARM_HAS_VBAR | | 4336 | #ifndef ARM_HAS_VBAR |
4332 | /* | | 4337 | /* |
4333 | * Catch a userland access to the vector page mapped at 0x0 | | 4338 | * Catch a userland access to the vector page mapped at 0x0 |
4334 | */ | | 4339 | */ |
4335 | if (user && (opte & L2_S_PROT_U) == 0) { | | 4340 | if (user && (opte & L2_S_PROT_U) == 0) { |
4336 | UVMHIST_LOG(maphist, " <-- done (vector_page)", 0, 0, 0, 0); | | 4341 | UVMHIST_LOG(maphist, " <-- done (vector_page)", 0, 0, 0, 0); |
4337 | goto out; | | 4342 | goto out; |
4338 | } | | 4343 | } |
4339 | #endif | | 4344 | #endif |
4340 | | | 4345 | |
4341 | pa = l2pte_pa(opte); | | 4346 | pa = l2pte_pa(opte); |
4342 | | | 4347 | |
4343 | if ((ftype & VM_PROT_WRITE) && !l2pte_writable_p(opte)) { | | 4348 | if ((ftype & VM_PROT_WRITE) && !l2pte_writable_p(opte)) { |
4344 | /* | | 4349 | /* |
4345 | * This looks like a good candidate for "page modified" | | 4350 | * This looks like a good candidate for "page modified" |
4346 | * emulation... | | 4351 | * emulation... |
4347 | */ | | 4352 | */ |
4348 | struct pv_entry *pv; | | 4353 | struct pv_entry *pv; |
4349 | struct vm_page *pg; | | 4354 | struct vm_page *pg; |
4350 | | | 4355 | |
4351 | /* Extract the physical address of the page */ | | 4356 | /* Extract the physical address of the page */ |
4352 | if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) { | | 4357 | if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) { |
4353 | UVMHIST_LOG(maphist, " <-- done (mod/ref unmanaged page)", 0, 0, 0, 0); | | 4358 | UVMHIST_LOG(maphist, " <-- done (mod/ref unmanaged page)", 0, 0, 0, 0); |
4354 | goto out; | | 4359 | goto out; |
4355 | } | | 4360 | } |
4356 | | | 4361 | |
4357 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 4362 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
4358 | | | 4363 | |
4359 | /* Get the current flags for this page. */ | | 4364 | /* Get the current flags for this page. */ |
4360 | pmap_acquire_page_lock(md); | | 4365 | pmap_acquire_page_lock(md); |
4361 | pv = pmap_find_pv(md, pm, va); | | 4366 | pv = pmap_find_pv(md, pm, va); |
4362 | if (pv == NULL || PV_IS_KENTRY_P(pv->pv_flags)) { | | 4367 | if (pv == NULL || PV_IS_KENTRY_P(pv->pv_flags)) { |
4363 | pmap_release_page_lock(md); | | 4368 | pmap_release_page_lock(md); |
4364 | UVMHIST_LOG(maphist, " <-- done (mod/ref emul: no PV)", 0, 0, 0, 0); | | 4369 | UVMHIST_LOG(maphist, " <-- done (mod/ref emul: no PV)", 0, 0, 0, 0); |
4365 | goto out; | | 4370 | goto out; |
4366 | } | | 4371 | } |
4367 | | | 4372 | |
4368 | /* | | 4373 | /* |
4369 | * Do the flags say this page is writable? If not then it | | 4374 | * Do the flags say this page is writable? If not then it |
4370 | * is a genuine write fault. If yes then the write fault is | | 4375 | * is a genuine write fault. If yes then the write fault is |
4371 | * our fault as we did not reflect the write access in the | | 4376 | * our fault as we did not reflect the write access in the |
4372 | * PTE. Now we know a write has occurred we can correct this | | 4377 | * PTE. Now we know a write has occurred we can correct this |
4373 | * and also set the modified bit | | 4378 | * and also set the modified bit |
4374 | */ | | 4379 | */ |
4375 | if ((pv->pv_flags & PVF_WRITE) == 0) { | | 4380 | if ((pv->pv_flags & PVF_WRITE) == 0) { |
4376 | pmap_release_page_lock(md); | | 4381 | pmap_release_page_lock(md); |
4377 | goto out; | | 4382 | goto out; |
4378 | } | | 4383 | } |
4379 | | | 4384 | |
4380 | md->pvh_attrs |= PVF_REF | PVF_MOD; | | 4385 | md->pvh_attrs |= PVF_REF | PVF_MOD; |
4381 | pv->pv_flags |= PVF_REF | PVF_MOD; | | 4386 | pv->pv_flags |= PVF_REF | PVF_MOD; |
4382 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) | | 4387 | #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) |
4383 | /* | | 4388 | /* |
4384 | * If there are cacheable mappings for this page, mark it dirty. | | 4389 | * If there are cacheable mappings for this page, mark it dirty. |
4385 | */ | | 4390 | */ |
4386 | if ((md->pvh_attrs & PVF_NC) == 0) | | 4391 | if ((md->pvh_attrs & PVF_NC) == 0) |
4387 | md->pvh_attrs |= PVF_DIRTY; | | 4392 | md->pvh_attrs |= PVF_DIRTY; |
4388 | #endif | | 4393 | #endif |
4389 | #ifdef ARM_MMU_EXTENDED | | 4394 | #ifdef ARM_MMU_EXTENDED |
4390 | if (md->pvh_attrs & PVF_EXEC) { | | 4395 | if (md->pvh_attrs & PVF_EXEC) { |
4391 | md->pvh_attrs &= ~PVF_EXEC; | | 4396 | md->pvh_attrs &= ~PVF_EXEC; |
4392 | PMAPCOUNT(exec_discarded_modfixup); | | 4397 | PMAPCOUNT(exec_discarded_modfixup); |
4393 | } | | 4398 | } |
4394 | #endif | | 4399 | #endif |
4395 | pmap_release_page_lock(md); | | 4400 | pmap_release_page_lock(md); |
4396 | | | 4401 | |
4397 | /* | | 4402 | /* |
4398 | * Re-enable write permissions for the page. No need to call | | 4403 | * Re-enable write permissions for the page. No need to call |
4399 | * pmap_vac_me_harder(), since this is just a | | 4404 | * pmap_vac_me_harder(), since this is just a |
4400 | * modified-emulation fault, and the PVF_WRITE bit isn't | | 4405 | * modified-emulation fault, and the PVF_WRITE bit isn't |
4401 | * changing. We've already set the cacheable bits based on | | 4406 | * changing. We've already set the cacheable bits based on |
4402 | * the assumption that we can write to this page. | | 4407 | * the assumption that we can write to this page. |
4403 | */ | | 4408 | */ |
4404 | const pt_entry_t npte = | | 4409 | const pt_entry_t npte = |
4405 | l2pte_set_writable((opte & ~L2_TYPE_MASK) | L2_S_PROTO) | | 4410 | l2pte_set_writable((opte & ~L2_TYPE_MASK) | L2_S_PROTO) |
4406 | #ifdef ARM_MMU_EXTENDED | | 4411 | #ifdef ARM_MMU_EXTENDED |
4407 | | (pm != pmap_kernel() ? L2_XS_nG : 0) | | 4412 | | (pm != pmap_kernel() ? L2_XS_nG : 0) |
4408 | #endif | | 4413 | #endif |
4409 | | 0; | | 4414 | | 0; |
4410 | l2pte_reset(ptep); | | 4415 | l2pte_reset(ptep); |
4411 | PTE_SYNC(ptep); | | 4416 | PTE_SYNC(ptep); |
4412 | pmap_tlb_flush_SE(pm, va, | | 4417 | pmap_tlb_flush_SE(pm, va, |
4413 | (ftype & VM_PROT_EXECUTE) ? PVF_EXEC | PVF_REF : PVF_REF); | | 4418 | (ftype & VM_PROT_EXECUTE) ? PVF_EXEC | PVF_REF : PVF_REF); |
4414 | l2pte_set(ptep, npte, 0); | | 4419 | l2pte_set(ptep, npte, 0); |
4415 | PTE_SYNC(ptep); | | 4420 | PTE_SYNC(ptep); |
4416 | PMAPCOUNT(fixup_mod); | | 4421 | PMAPCOUNT(fixup_mod); |
4417 | rv = 1; | | 4422 | rv = 1; |
4418 | UVMHIST_LOG(maphist, " <-- done (mod/ref emul: changed pte from %#x to %#x)", | | 4423 | UVMHIST_LOG(maphist, " <-- done (mod/ref emul: changed pte from %#x to %#x)", |
4419 | opte, npte, 0, 0); | | 4424 | opte, npte, 0, 0); |
4420 | } else if ((opte & L2_TYPE_MASK) == L2_TYPE_INV) { | | 4425 | } else if ((opte & L2_TYPE_MASK) == L2_TYPE_INV) { |
4421 | /* | | 4426 | /* |
4422 | * This looks like a good candidate for "page referenced" | | 4427 | * This looks like a good candidate for "page referenced" |
4423 | * emulation. | | 4428 | * emulation. |
4424 | */ | | 4429 | */ |
4425 | struct vm_page *pg; | | 4430 | struct vm_page *pg; |
4426 | | | 4431 | |
4427 | /* Extract the physical address of the page */ | | 4432 | /* Extract the physical address of the page */ |
4428 | if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) { | | 4433 | if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) { |
4429 | UVMHIST_LOG(maphist, " <-- done (ref emul: unmanaged page)", 0, 0, 0, 0); | | 4434 | UVMHIST_LOG(maphist, " <-- done (ref emul: unmanaged page)", 0, 0, 0, 0); |
4430 | goto out; | | 4435 | goto out; |
4431 | } | | 4436 | } |
4432 | | | 4437 | |
4433 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 4438 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
4434 | | | 4439 | |
4435 | /* Get the current flags for this page. */ | | 4440 | /* Get the current flags for this page. */ |
4436 | pmap_acquire_page_lock(md); | | 4441 | pmap_acquire_page_lock(md); |
4437 | struct pv_entry *pv = pmap_find_pv(md, pm, va); | | 4442 | struct pv_entry *pv = pmap_find_pv(md, pm, va); |
4438 | if (pv == NULL || PV_IS_KENTRY_P(pv->pv_flags)) { | | 4443 | if (pv == NULL || PV_IS_KENTRY_P(pv->pv_flags)) { |
4439 | pmap_release_page_lock(md); | | 4444 | pmap_release_page_lock(md); |
4440 | UVMHIST_LOG(maphist, " <-- done (ref emul no PV)", 0, 0, 0, 0); | | 4445 | UVMHIST_LOG(maphist, " <-- done (ref emul no PV)", 0, 0, 0, 0); |
4441 | goto out; | | 4446 | goto out; |
4442 | } | | 4447 | } |
4443 | | | 4448 | |
4444 | md->pvh_attrs |= PVF_REF; | | 4449 | md->pvh_attrs |= PVF_REF; |
4445 | pv->pv_flags |= PVF_REF; | | 4450 | pv->pv_flags |= PVF_REF; |
4446 | | | 4451 | |
4447 | pt_entry_t npte = | | 4452 | pt_entry_t npte = |
4448 | l2pte_set_readonly((opte & ~L2_TYPE_MASK) | L2_S_PROTO); | | 4453 | l2pte_set_readonly((opte & ~L2_TYPE_MASK) | L2_S_PROTO); |
4449 | #ifdef ARM_MMU_EXTENDED | | 4454 | #ifdef ARM_MMU_EXTENDED |
4450 | if (pm != pmap_kernel()) { | | 4455 | if (pm != pmap_kernel()) { |
4451 | npte |= L2_XS_nG; | | 4456 | npte |= L2_XS_nG; |
4452 | } | | 4457 | } |
4453 | /* | | 4458 | /* |
4454 | * If we got called from prefetch abort, then ftype will have | | 4459 | * If we got called from prefetch abort, then ftype will have |
4455 | * VM_PROT_EXECUTE set. Now see if we have no-execute set in | | 4460 | * VM_PROT_EXECUTE set. Now see if we have no-execute set in |
4456 | * the PTE. | | 4461 | * the PTE. |
4457 | */ | | 4462 | */ |
4458 | if (user && (ftype & VM_PROT_EXECUTE) && (npte & L2_XS_XN)) { | | 4463 | if (user && (ftype & VM_PROT_EXECUTE) && (npte & L2_XS_XN)) { |
4459 | /* | | 4464 | /* |
4460 | * Is this a mapping of an executable page? | | 4465 | * Is this a mapping of an executable page? |
4461 | */ | | 4466 | */ |
4462 | if ((pv->pv_flags & PVF_EXEC) == 0) { | | 4467 | if ((pv->pv_flags & PVF_EXEC) == 0) { |
4463 | pmap_release_page_lock(md); | | 4468 | pmap_release_page_lock(md); |
4464 | UVMHIST_LOG(maphist, " <-- done (ref emul: no exec)", | | 4469 | UVMHIST_LOG(maphist, " <-- done (ref emul: no exec)", |
4465 | 0, 0, 0, 0); | | 4470 | 0, 0, 0, 0); |
4466 | goto out; | | 4471 | goto out; |
4467 | } | | 4472 | } |
4468 | /* | | 4473 | /* |
4469 | * If we haven't synced the page, do so now. | | 4474 | * If we haven't synced the page, do so now. |
4470 | */ | | 4475 | */ |
4471 | if ((md->pvh_attrs & PVF_EXEC) == 0) { | | 4476 | if ((md->pvh_attrs & PVF_EXEC) == 0) { |
4472 | UVMHIST_LOG(maphist, " ref emul: syncicache page #%#x", | | 4477 | UVMHIST_LOG(maphist, " ref emul: syncicache page #%#x", |
4473 | pa, 0, 0, 0); | | 4478 | pa, 0, 0, 0); |
4474 | pmap_syncicache_page(md, pa); | | 4479 | pmap_syncicache_page(md, pa); |
4475 | PMAPCOUNT(fixup_exec); | | 4480 | PMAPCOUNT(fixup_exec); |
4476 | } | | 4481 | } |
4477 | npte &= ~L2_XS_XN; | | 4482 | npte &= ~L2_XS_XN; |
4478 | } | | 4483 | } |
4479 | #endif /* ARM_MMU_EXTENDED */ | | 4484 | #endif /* ARM_MMU_EXTENDED */ |
4480 | pmap_release_page_lock(md); | | 4485 | pmap_release_page_lock(md); |
4481 | l2pte_reset(ptep); | | 4486 | l2pte_reset(ptep); |
4482 | PTE_SYNC(ptep); | | 4487 | PTE_SYNC(ptep); |
4483 | pmap_tlb_flush_SE(pm, va, | | 4488 | pmap_tlb_flush_SE(pm, va, |
4484 | (ftype & VM_PROT_EXECUTE) ? PVF_EXEC | PVF_REF : PVF_REF); | | 4489 | (ftype & VM_PROT_EXECUTE) ? PVF_EXEC | PVF_REF : PVF_REF); |
4485 | l2pte_set(ptep, npte, 0); | | 4490 | l2pte_set(ptep, npte, 0); |
4486 | PTE_SYNC(ptep); | | 4491 | PTE_SYNC(ptep); |
4487 | PMAPCOUNT(fixup_ref); | | 4492 | PMAPCOUNT(fixup_ref); |
4488 | rv = 1; | | 4493 | rv = 1; |
4489 | UVMHIST_LOG(maphist, " <-- done (ref emul: changed pte from %#x to %#x)", | | 4494 | UVMHIST_LOG(maphist, " <-- done (ref emul: changed pte from %#x to %#x)", |
4490 | opte, npte, 0, 0); | | 4495 | opte, npte, 0, 0); |
4491 | #ifdef ARM_MMU_EXTENDED | | 4496 | #ifdef ARM_MMU_EXTENDED |
4492 | } else if (user && (ftype & VM_PROT_EXECUTE) && (opte & L2_XS_XN)) { | | 4497 | } else if (user && (ftype & VM_PROT_EXECUTE) && (opte & L2_XS_XN)) { |
4493 | struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); | | 4498 | struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); |
4494 | if (pg == NULL) { | | 4499 | if (pg == NULL) { |
4495 | UVMHIST_LOG(maphist, " <-- done (unmanaged page)", 0, 0, 0, 0); | | 4500 | UVMHIST_LOG(maphist, " <-- done (unmanaged page)", 0, 0, 0, 0); |
4496 | goto out; | | 4501 | goto out; |
4497 | } | | 4502 | } |
4498 | | | 4503 | |
4499 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); | | 4504 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); |
4500 | | | 4505 | |
4501 | /* Get the current flags for this page. */ | | 4506 | /* Get the current flags for this page. */ |
4502 | pmap_acquire_page_lock(md); | | 4507 | pmap_acquire_page_lock(md); |
4503 | struct pv_entry * const pv = pmap_find_pv(md, pm, va); | | 4508 | struct pv_entry * const pv = pmap_find_pv(md, pm, va); |
4504 | if (pv == NULL || (pv->pv_flags & PVF_EXEC) == 0) { | | 4509 | if (pv == NULL || (pv->pv_flags & PVF_EXEC) == 0) { |
4505 | pmap_release_page_lock(md); | | 4510 | pmap_release_page_lock(md); |
4506 | UVMHIST_LOG(maphist, " <-- done (no PV or not EXEC)", 0, 0, 0, 0); | | 4511 | UVMHIST_LOG(maphist, " <-- done (no PV or not EXEC)", 0, 0, 0, 0); |
4507 | goto out; | | 4512 | goto out; |
4508 | } | | 4513 | } |
4509 | | | 4514 | |
4510 | /* | | 4515 | /* |
4511 | * If we haven't synced the page, do so now. | | 4516 | * If we haven't synced the page, do so now. |
4512 | */ | | 4517 | */ |
4513 | if ((md->pvh_attrs & PVF_EXEC) == 0) { | | 4518 | if ((md->pvh_attrs & PVF_EXEC) == 0) { |
4514 | UVMHIST_LOG(maphist, "syncicache page #%#x", | | 4519 | UVMHIST_LOG(maphist, "syncicache page #%#x", |
4515 | pa, 0, 0, 0); | | 4520 | pa, 0, 0, 0); |
4516 | pmap_syncicache_page(md, pa); | | 4521 | pmap_syncicache_page(md, pa); |
4517 | } | | 4522 | } |
4518 | pmap_release_page_lock(md); | | 4523 | pmap_release_page_lock(md); |
4519 | /* | | 4524 | /* |
4520 | * Turn off no-execute. | | 4525 | * Turn off no-execute. |
4521 | */ | | 4526 | */ |
4522 | KASSERT(opte & L2_XS_nG); | | 4527 | KASSERT(opte & L2_XS_nG); |
4523 | l2pte_reset(ptep); | | 4528 | l2pte_reset(ptep); |
4524 | PTE_SYNC(ptep); | | 4529 | PTE_SYNC(ptep); |
4525 | pmap_tlb_flush_SE(pm, va, PVF_EXEC | PVF_REF); | | 4530 | pmap_tlb_flush_SE(pm, va, PVF_EXEC | PVF_REF); |
4526 | l2pte_set(ptep, opte & ~L2_XS_XN, 0); | | 4531 | l2pte_set(ptep, opte & ~L2_XS_XN, 0); |
4527 | PTE_SYNC(ptep); | | 4532 | PTE_SYNC(ptep); |
4528 | rv = 1; | | 4533 | rv = 1; |
4529 | PMAPCOUNT(fixup_exec); | | 4534 | PMAPCOUNT(fixup_exec); |
4530 | UVMHIST_LOG(maphist, "exec: changed pte from %#x to %#x", | | 4535 | UVMHIST_LOG(maphist, "exec: changed pte from %#x to %#x", |
4531 | opte, opte & ~L2_XS_XN, 0, 0); | | 4536 | opte, opte & ~L2_XS_XN, 0, 0); |
4532 | #endif | | 4537 | #endif |
4533 | } | | 4538 | } |
4534 | | | 4539 | |
4535 | #ifndef ARM_MMU_EXTENDED | | 4540 | #ifndef ARM_MMU_EXTENDED |
4536 | /* | | 4541 | /* |
4537 | * We know there is a valid mapping here, so simply | | 4542 | * We know there is a valid mapping here, so simply |
4538 | * fix up the L1 if necessary. | | 4543 | * fix up the L1 if necessary. |
4539 | */ | | 4544 | */ |
4540 | pd_entry_t * const pdep = pmap_l1_kva(pm) + l1slot; | | 4545 | pd_entry_t * const pdep = pmap_l1_kva(pm) + l1slot; |
4541 | pd_entry_t pde = L1_C_PROTO | l2b->l2b_pa | L1_C_DOM(pmap_domain(pm)); | | 4546 | pd_entry_t pde = L1_C_PROTO | l2b->l2b_pa | L1_C_DOM(pmap_domain(pm)); |
4542 | if (*pdep != pde) { | | 4547 | if (*pdep != pde) { |
4543 | l1pte_setone(pdep, pde); | | 4548 | l1pte_setone(pdep, pde); |
4544 | PTE_SYNC(pdep); | | 4549 | PTE_SYNC(pdep); |
4545 | rv = 1; | | 4550 | rv = 1; |
4546 | PMAPCOUNT(fixup_pdes); | | 4551 | PMAPCOUNT(fixup_pdes); |
4547 | } | | 4552 | } |
4548 | #endif | | 4553 | #endif |
4549 | | | 4554 | |
4550 | #ifdef CPU_SA110 | | 4555 | #ifdef CPU_SA110 |
4551 | /* | | 4556 | /* |
4552 | * There are bugs in the rev K SA110. This is a check for one | | 4557 | * There are bugs in the rev K SA110. This is a check for one |
4553 | * of them. | | 4558 | * of them. |
4554 | */ | | 4559 | */ |
4555 | if (rv == 0 && curcpu()->ci_arm_cputype == CPU_ID_SA110 && | | 4560 | if (rv == 0 && curcpu()->ci_arm_cputype == CPU_ID_SA110 && |
4556 | curcpu()->ci_arm_cpurev < 3) { | | 4561 | curcpu()->ci_arm_cpurev < 3) { |
4557 | /* Always current pmap */ | | 4562 | /* Always current pmap */ |
4558 | if (l2pte_valid_p(opte)) { | | 4563 | if (l2pte_valid_p(opte)) { |
4559 | extern int kernel_debug; | | 4564 | extern int kernel_debug; |
4560 | if (kernel_debug & 1) { | | 4565 | if (kernel_debug & 1) { |
4561 | struct proc *p = curlwp->l_proc; | | 4566 | struct proc *p = curlwp->l_proc; |
4562 | printf("prefetch_abort: page is already " | | 4567 | printf("prefetch_abort: page is already " |
4563 | "mapped - pte=%p *pte=%08x\n", ptep, opte); | | 4568 | "mapped - pte=%p *pte=%08x\n", ptep, opte); |
4564 | printf("prefetch_abort: pc=%08lx proc=%p " | | 4569 | printf("prefetch_abort: pc=%08lx proc=%p " |
4565 | "process=%s\n", va, p, p->p_comm); | | 4570 | "process=%s\n", va, p, p->p_comm); |
4566 | printf("prefetch_abort: far=%08x fs=%x\n", | | 4571 | printf("prefetch_abort: far=%08x fs=%x\n", |
4567 | cpu_faultaddress(), cpu_faultstatus()); | | 4572 | cpu_faultaddress(), cpu_faultstatus()); |
4568 | } | | 4573 | } |
4569 | #ifdef DDB | | 4574 | #ifdef DDB |
4570 | if (kernel_debug & 2) | | 4575 | if (kernel_debug & 2) |
4571 | Debugger(); | | 4576 | Debugger(); |
4572 | #endif | | 4577 | #endif |
4573 | rv = 1; | | 4578 | rv = 1; |
4574 | } | | 4579 | } |
4575 | } | | 4580 | } |
4576 | #endif /* CPU_SA110 */ | | 4581 | #endif /* CPU_SA110 */ |
4577 | | | 4582 | |
4578 | #ifndef ARM_MMU_EXTENDED | | 4583 | #ifndef ARM_MMU_EXTENDED |
4579 | /* | | 4584 | /* |
4580 | * If 'rv == 0' at this point, it generally indicates that there is a | | 4585 | * If 'rv == 0' at this point, it generally indicates that there is a |
4581 | * stale TLB entry for the faulting address. That might be due to a | | 4586 | * stale TLB entry for the faulting address. That might be due to a |
4582 | * wrong setting of pmap_needs_pte_sync. So set it and retry. | | 4587 | * wrong setting of pmap_needs_pte_sync. So set it and retry. |
4583 | */ | | 4588 | */ |
4584 | if (rv == 0 | | 4589 | if (rv == 0 |
4585 | && pm->pm_l1->l1_domain_use_count == 1 | | 4590 | && pm->pm_l1->l1_domain_use_count == 1 |
4586 | && pmap_needs_pte_sync == 0) { | | 4591 | && pmap_needs_pte_sync == 0) { |
4587 | pmap_needs_pte_sync = 1; | | 4592 | pmap_needs_pte_sync = 1; |
4588 | PTE_SYNC(ptep); | | 4593 | PTE_SYNC(ptep); |
4589 | PMAPCOUNT(fixup_ptesync); | | 4594 | PMAPCOUNT(fixup_ptesync); |
4590 | rv = 1; | | 4595 | rv = 1; |
4591 | } | | 4596 | } |
4592 | #endif | | 4597 | #endif |
4593 | | | 4598 | |
4594 | #ifndef MULTIPROCESSOR | | 4599 | #ifndef MULTIPROCESSOR |
4595 | #if defined(DEBUG) || 1 | | 4600 | #if defined(DEBUG) || 1 |
4596 | /* | | 4601 | /* |
4597 | * If 'rv == 0' at this point, it generally indicates that there is a | | 4602 | * If 'rv == 0' at this point, it generally indicates that there is a |
4598 | * stale TLB entry for the faulting address. This happens when two or | | 4603 | * stale TLB entry for the faulting address. This happens when two or |
4599 | * more processes are sharing an L1. Since we don't flush the TLB on | | 4604 | * more processes are sharing an L1. Since we don't flush the TLB on |
4600 | * a context switch between such processes, we can take domain faults | | 4605 | * a context switch between such processes, we can take domain faults |
4601 | * for mappings which exist at the same VA in both processes. EVEN IF | | 4606 | * for mappings which exist at the same VA in both processes. EVEN IF |
4602 | * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for | | 4607 | * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for |
4603 | * example. | | 4608 | * example. |
4604 | * | | 4609 | * |
4605 | * This is extremely likely to happen if pmap_enter() updated the L1 | | 4610 | * This is extremely likely to happen if pmap_enter() updated the L1 |
4606 | * entry for a recently entered mapping. In this case, the TLB is | | 4611 | * entry for a recently entered mapping. In this case, the TLB is |
4607 | * flushed for the new mapping, but there may still be TLB entries for | | 4612 | * flushed for the new mapping, but there may still be TLB entries for |
4608 | * other mappings belonging to other processes in the 1MB range | | 4613 | * other mappings belonging to other processes in the 1MB range |
4609 | * covered by the L1 entry. | | 4614 | * covered by the L1 entry. |
4610 | * | | 4615 | * |
4611 | * Since 'rv == 0', we know that the L1 already contains the correct | | 4616 | * Since 'rv == 0', we know that the L1 already contains the correct |
4612 | * value, so the fault must be due to a stale TLB entry. | | 4617 | * value, so the fault must be due to a stale TLB entry. |
4613 | * | | 4618 | * |
4614 | * Since we always need to flush the TLB anyway in the case where we | | 4619 | * Since we always need to flush the TLB anyway in the case where we |
4615 | * fixed up the L1, or frobbed the L2 PTE, we effectively deal with | | 4620 | * fixed up the L1, or frobbed the L2 PTE, we effectively deal with |
4616 | * stale TLB entries dynamically. | | 4621 | * stale TLB entries dynamically. |
4617 | * | | 4622 | * |
4618 | * However, the above condition can ONLY happen if the current L1 is | | 4623 | * However, the above condition can ONLY happen if the current L1 is |
4619 | * being shared. If it happens when the L1 is unshared, it indicates | | 4624 | * being shared. If it happens when the L1 is unshared, it indicates |
4620 | * that other parts of the pmap are not doing their job WRT managing | | 4625 | * that other parts of the pmap are not doing their job WRT managing |
4621 | * the TLB. | | 4626 | * the TLB. |
4622 | */ | | 4627 | */ |
4623 | if (rv == 0 | | 4628 | if (rv == 0 |
4624 | #ifndef ARM_MMU_EXTENDED | | 4629 | #ifndef ARM_MMU_EXTENDED |
4625 | && pm->pm_l1->l1_domain_use_count == 1 | | 4630 | && pm->pm_l1->l1_domain_use_count == 1 |
4626 | #endif | | 4631 | #endif |
4627 | && true) { | | 4632 | && true) { |
4628 | #ifdef DEBUG | | 4633 | #ifdef DEBUG |
4629 | extern int last_fault_code; | | 4634 | extern int last_fault_code; |
4630 | #else | | 4635 | #else |
4631 | int last_fault_code = ftype & VM_PROT_EXECUTE | | 4636 | int last_fault_code = ftype & VM_PROT_EXECUTE |
4632 | ? armreg_ifsr_read() | | 4637 | ? armreg_ifsr_read() |
4633 | : armreg_dfsr_read(); | | 4638 | : armreg_dfsr_read(); |
4634 | #endif | | 4639 | #endif |
4635 | printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n", | | 4640 | printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n", |
4636 | pm, va, ftype); | | 4641 | pm, va, ftype); |
4637 | printf("fixup: l2 %p, l2b %p, ptep %p, pte %#x\n", | | 4642 | printf("fixup: l2 %p, l2b %p, ptep %p, pte %#x\n", |
4638 | l2, l2b, ptep, opte); | | 4643 | l2, l2b, ptep, opte); |
4639 | | | 4644 | |
4640 | #ifndef ARM_MMU_EXTENDED | | 4645 | #ifndef ARM_MMU_EXTENDED |
4641 | printf("fixup: pdep %p, pde %#x, fsr %#x\n", | | 4646 | printf("fixup: pdep %p, pde %#x, fsr %#x\n", |
4642 | pdep, pde, last_fault_code); | | 4647 | pdep, pde, last_fault_code); |
4643 | #else | | 4648 | #else |
4644 | printf("fixup: pdep %p, pde %#x, ttbcr %#x\n", | | 4649 | printf("fixup: pdep %p, pde %#x, ttbcr %#x\n", |
4645 | &pmap_l1_kva(pm)[l1slot], pmap_l1_kva(pm)[l1slot], | | 4650 | &pmap_l1_kva(pm)[l1slot], pmap_l1_kva(pm)[l1slot], |
4646 | armreg_ttbcr_read()); | | 4651 | armreg_ttbcr_read()); |
4647 | printf("fixup: fsr %#x cpm %p casid %#x contextidr %#x dacr %#x\n", | | 4652 | printf("fixup: fsr %#x cpm %p casid %#x contextidr %#x dacr %#x\n", |
4648 | last_fault_code, curcpu()->ci_pmap_cur, | | 4653 | last_fault_code, curcpu()->ci_pmap_cur, |
4649 | curcpu()->ci_pmap_asid_cur, | | 4654 | curcpu()->ci_pmap_asid_cur, |
4650 | armreg_contextidr_read(), armreg_dacr_read()); | | 4655 | armreg_contextidr_read(), armreg_dacr_read()); |
4651 | #ifdef _ARM_ARCH_7 | | 4656 | #ifdef _ARM_ARCH_7 |
4652 | if (ftype & VM_PROT_WRITE) | | 4657 | if (ftype & VM_PROT_WRITE) |
4653 | armreg_ats1cuw_write(va); | | 4658 | armreg_ats1cuw_write(va); |
4654 | else | | 4659 | else |
4655 | armreg_ats1cur_write(va); | | 4660 | armreg_ats1cur_write(va); |
4656 | arm_isb(); | | 4661 | arm_isb(); |
4657 | printf("fixup: par %#x\n", armreg_par_read()); | | 4662 | printf("fixup: par %#x\n", armreg_par_read()); |
4658 | #endif | | 4663 | #endif |
4659 | #endif | | 4664 | #endif |
4660 | #ifdef DDB | | 4665 | #ifdef DDB |
4661 | extern int kernel_debug; | | 4666 | extern int kernel_debug; |
4662 | | | 4667 | |
4663 | if (kernel_debug & 2) { | | 4668 | if (kernel_debug & 2) { |
4664 | pmap_release_pmap_lock(pm); | | 4669 | pmap_release_pmap_lock(pm); |
4665 | #ifdef UVMHIST | | 4670 | #ifdef UVMHIST |
4666 | KERNHIST_DUMP(maphist); | | 4671 | KERNHIST_DUMP(maphist); |
4667 | #endif | | 4672 | #endif |
4668 | cpu_Debugger(); | | 4673 | cpu_Debugger(); |
4669 | pmap_acquire_pmap_lock(pm); | | 4674 | pmap_acquire_pmap_lock(pm); |
4670 | } | | 4675 | } |
4671 | #endif | | 4676 | #endif |
4672 | } | | 4677 | } |
4673 | #endif | | 4678 | #endif |
4674 | #endif | | 4679 | #endif |
4675 | | | 4680 | |
4676 | #ifndef ARM_MMU_EXTENDED | | 4681 | #ifndef ARM_MMU_EXTENDED |
4677 | /* Flush the TLB in the shared L1 case - see comment above */ | | 4682 | /* Flush the TLB in the shared L1 case - see comment above */ |
4678 | pmap_tlb_flush_SE(pm, va, | | 4683 | pmap_tlb_flush_SE(pm, va, |
4679 | (ftype & VM_PROT_EXECUTE) ? PVF_EXEC | PVF_REF : PVF_REF); | | 4684 | (ftype & VM_PROT_EXECUTE) ? PVF_EXEC | PVF_REF : PVF_REF); |
4680 | #endif | | 4685 | #endif |
4681 | | | 4686 | |
4682 | rv = 1; | | 4687 | rv = 1; |
4683 | | | 4688 | |
4684 | out: | | 4689 | out: |
4685 | pmap_release_pmap_lock(pm); | | 4690 | pmap_release_pmap_lock(pm); |
4686 | | | 4691 | |
4687 | return (rv); | | 4692 | return (rv); |
4688 | } | | 4693 | } |
4689 | | | 4694 | |
4690 | /* | | 4695 | /* |
4691 | * Routine: pmap_procwr | | 4696 | * Routine: pmap_procwr |
4692 | * | | 4697 | * |
4693 | * Function: | | 4698 | * Function: |
4694 | * Synchronize caches corresponding to [addr, addr+len) in p. | | 4699 | * Synchronize caches corresponding to [addr, addr+len) in p. |
4695 | * | | 4700 | * |
4696 | */ | | 4701 | */ |
4697 | void | | 4702 | void |
4698 | pmap_procwr(struct proc *p, vaddr_t va, int len) | | 4703 | pmap_procwr(struct proc *p, vaddr_t va, int len) |
4699 | { | | 4704 | { |
4700 | /* We only need to do anything if it is the current process. */ | | 4705 | /* We only need to do anything if it is the current process. */ |
4701 | if (p == curproc) | | 4706 | if (p == curproc) |
4702 | cpu_icache_sync_range(va, len); | | 4707 | cpu_icache_sync_range(va, len); |
4703 | } | | 4708 | } |
4704 | | | 4709 | |
4705 | /* | | 4710 | /* |
4706 | * Routine: pmap_unwire | | 4711 | * Routine: pmap_unwire |
4707 | * Function: Clear the wired attribute for a map/virtual-address pair. | | 4712 | * Function: Clear the wired attribute for a map/virtual-address pair. |
4708 | * | | 4713 | * |
4709 | * In/out conditions: | | 4714 | * In/out conditions: |
4710 | * The mapping must already exist in the pmap. | | 4715 | * The mapping must already exist in the pmap. |
4711 | */ | | 4716 | */ |
4712 | void | | 4717 | void |
4713 | pmap_unwire(pmap_t pm, vaddr_t va) | | 4718 | pmap_unwire(pmap_t pm, vaddr_t va) |
4714 | { | | 4719 | { |
4715 | struct l2_bucket *l2b; | | 4720 | struct l2_bucket *l2b; |
4716 | pt_entry_t *ptep, pte; | | 4721 | pt_entry_t *ptep, pte; |
4717 | struct vm_page *pg; | | 4722 | struct vm_page *pg; |
4718 | paddr_t pa; | | 4723 | paddr_t pa; |
4719 | | | 4724 | |
4720 | NPDEBUG(PDB_WIRING, printf("pmap_unwire: pm %p, va 0x%08lx\n", pm, va)); | | 4725 | NPDEBUG(PDB_WIRING, printf("pmap_unwire: pm %p, va 0x%08lx\n", pm, va)); |
4721 | | | 4726 | |
4722 | pmap_acquire_pmap_lock(pm); | | 4727 | pmap_acquire_pmap_lock(pm); |
4723 | | | 4728 | |
4724 | l2b = pmap_get_l2_bucket(pm, va); | | 4729 | l2b = pmap_get_l2_bucket(pm, va); |
4725 | KDASSERT(l2b != NULL); | | 4730 | KDASSERT(l2b != NULL); |
4726 | | | 4731 | |
4727 | ptep = &l2b->l2b_kva[l2pte_index(va)]; | | 4732 | ptep = &l2b->l2b_kva[l2pte_index(va)]; |
4728 | pte = *ptep; | | 4733 | pte = *ptep; |
4729 | | | 4734 | |
4730 | /* Extract the physical address of the page */ | | 4735 | /* Extract the physical address of the page */ |
4731 | pa = l2pte_pa(pte); | | 4736 | pa = l2pte_pa(pte); |
4732 | | | 4737 | |
4733 | if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { | | 4738 | if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { |
4734 | /* Update the wired bit in the pv entry for this page. */ | | 4739 | /* Update the wired bit in the pv entry for this page. */ |
4735 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 4740 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
4736 | | | 4741 | |
4737 | pmap_acquire_page_lock(md); | | 4742 | pmap_acquire_page_lock(md); |
4738 | (void) pmap_modify_pv(md, pa, pm, va, PVF_WIRED, 0); | | 4743 | (void) pmap_modify_pv(md, pa, pm, va, PVF_WIRED, 0); |
4739 | pmap_release_page_lock(md); | | 4744 | pmap_release_page_lock(md); |
4740 | } | | 4745 | } |
4741 | | | 4746 | |
4742 | pmap_release_pmap_lock(pm); | | 4747 | pmap_release_pmap_lock(pm); |
4743 | } | | 4748 | } |
4744 | | | 4749 | |
4745 | void | | 4750 | void |
4746 | pmap_activate(struct lwp *l) | | 4751 | pmap_activate(struct lwp *l) |
4747 | { | | 4752 | { |
4748 | struct cpu_info * const ci = curcpu(); | | 4753 | struct cpu_info * const ci = curcpu(); |
4749 | extern int block_userspace_access; | | 4754 | extern int block_userspace_access; |
4750 | pmap_t npm = l->l_proc->p_vmspace->vm_map.pmap; | | 4755 | pmap_t npm = l->l_proc->p_vmspace->vm_map.pmap; |
4751 | #ifdef ARM_MMU_EXTENDED | | 4756 | #ifdef ARM_MMU_EXTENDED |
4752 | struct pmap_asid_info * const pai = PMAP_PAI(npm, cpu_tlb_info(ci)); | | 4757 | struct pmap_asid_info * const pai = PMAP_PAI(npm, cpu_tlb_info(ci)); |
4753 | #endif | | 4758 | #endif |
4754 | | | 4759 | |
4755 | UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); | | 4760 | UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); |
4756 | | | 4761 | |
4757 | UVMHIST_LOG(maphist, "(l=%#x) pm=%#x", l, npm, 0, 0); | | 4762 | UVMHIST_LOG(maphist, "(l=%#x) pm=%#x", l, npm, 0, 0); |
4758 | | | 4763 | |
4759 | /* | | 4764 | /* |
4760 | * If activating a non-current lwp or the current lwp is | | 4765 | * If activating a non-current lwp or the current lwp is |
4761 | * already active, just return. | | 4766 | * already active, just return. |
4762 | */ | | 4767 | */ |
4763 | if (false | | 4768 | if (false |
4764 | || l != curlwp | | 4769 | || l != curlwp |
4765 | #ifdef ARM_MMU_EXTENDED | | 4770 | #ifdef ARM_MMU_EXTENDED |
4766 | || (ci->ci_pmap_cur == npm && | | 4771 | || (ci->ci_pmap_cur == npm && |
4767 | (npm == pmap_kernel() | | 4772 | (npm == pmap_kernel() |
4768 | /* || PMAP_PAI_ASIDVALID_P(pai, cpu_tlb_info(ci)) */)) | | 4773 | /* || PMAP_PAI_ASIDVALID_P(pai, cpu_tlb_info(ci)) */)) |
4769 | #else | | 4774 | #else |
4770 | || npm->pm_activated == true | | 4775 | || npm->pm_activated == true |
4771 | #endif | | 4776 | #endif |
4772 | || false) { | | 4777 | || false) { |
4773 | UVMHIST_LOG(maphist, " <-- (same pmap)", curlwp, l, 0, 0); | | 4778 | UVMHIST_LOG(maphist, " <-- (same pmap)", curlwp, l, 0, 0); |
4774 | return; | | 4779 | return; |
4775 | } | | 4780 | } |
4776 | | | 4781 | |
4777 | #ifndef ARM_MMU_EXTENDED | | 4782 | #ifndef ARM_MMU_EXTENDED |
4778 | const uint32_t ndacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | | 4783 | const uint32_t ndacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) |
4779 | | (DOMAIN_CLIENT << (pmap_domain(npm) * 2)); | | 4784 | | (DOMAIN_CLIENT << (pmap_domain(npm) * 2)); |
4780 | | | 4785 | |
4781 | /* | | 4786 | /* |
4782 | * If TTB and DACR are unchanged, short-circuit all the | | 4787 | * If TTB and DACR are unchanged, short-circuit all the |
4783 | * TLB/cache management stuff. | | 4788 | * TLB/cache management stuff. |
4784 | */ | | 4789 | */ |
4785 | pmap_t opm = ci->ci_lastlwp | | 4790 | pmap_t opm = ci->ci_lastlwp |
4786 | ? ci->ci_lastlwp->l_proc->p_vmspace->vm_map.pmap | | 4791 | ? ci->ci_lastlwp->l_proc->p_vmspace->vm_map.pmap |
4787 | : NULL; | | 4792 | : NULL; |
4788 | if (opm != NULL) { | | 4793 | if (opm != NULL) { |
4789 | uint32_t odacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | | 4794 | uint32_t odacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) |
4790 | | (DOMAIN_CLIENT << (pmap_domain(opm) * 2)); | | 4795 | | (DOMAIN_CLIENT << (pmap_domain(opm) * 2)); |
4791 | | | 4796 | |
4792 | if (opm->pm_l1 == npm->pm_l1 && odacr == ndacr) | | 4797 | if (opm->pm_l1 == npm->pm_l1 && odacr == ndacr) |
4793 | goto all_done; | | 4798 | goto all_done; |
4794 | } | | 4799 | } |
4795 | #endif /* !ARM_MMU_EXTENDED */ | | 4800 | #endif /* !ARM_MMU_EXTENDED */ |
4796 | | | 4801 | |
4797 | PMAPCOUNT(activations); | | 4802 | PMAPCOUNT(activations); |
4798 | block_userspace_access = 1; | | 4803 | block_userspace_access = 1; |
4799 | | | 4804 | |
4800 | #ifndef ARM_MMU_EXTENDED | | 4805 | #ifndef ARM_MMU_EXTENDED |
4801 | /* | | 4806 | /* |
4802 | * If switching to a user vmspace which is different to the | | 4807 | * If switching to a user vmspace which is different to the |
4803 | * most recent one, and the most recent one is potentially | | 4808 | * most recent one, and the most recent one is potentially |
4804 | * live in the cache, we must write-back and invalidate the | | 4809 | * live in the cache, we must write-back and invalidate the |
4805 | * entire cache. | | 4810 | * entire cache. |
4806 | */ | | 4811 | */ |
4807 | pmap_t rpm = ci->ci_pmap_lastuser; | | 4812 | pmap_t rpm = ci->ci_pmap_lastuser; |
4808 | #endif | | 4813 | #endif |
4809 | | | 4814 | |
4810 | /* | | 4815 | /* |
4811 | * XXXSCW: There's a corner case here which can leave turds in the cache as | | 4816 | * XXXSCW: There's a corner case here which can leave turds in the cache as |
4812 | * reported in kern/41058. They're probably left over during tear-down and | | 4817 | * reported in kern/41058. They're probably left over during tear-down and |
4813 | * switching away from an exiting process. Until the root cause is identified | | 4818 | * switching away from an exiting process. Until the root cause is identified |
4814 | * and fixed, zap the cache when switching pmaps. This will result in a few | | 4819 | * and fixed, zap the cache when switching pmaps. This will result in a few |
4815 | * unnecessary cache flushes, but that's better than silently corrupting data. | | 4820 | * unnecessary cache flushes, but that's better than silently corrupting data. |
4816 | */ | | 4821 | */ |
4817 | #ifndef ARM_MMU_EXTENDED | | 4822 | #ifndef ARM_MMU_EXTENDED |
4818 | #if 0 | | 4823 | #if 0 |
4819 | if (npm != pmap_kernel() && rpm && npm != rpm && | | 4824 | if (npm != pmap_kernel() && rpm && npm != rpm && |
4820 | rpm->pm_cstate.cs_cache) { | | 4825 | rpm->pm_cstate.cs_cache) { |
4821 | rpm->pm_cstate.cs_cache = 0; | | 4826 | rpm->pm_cstate.cs_cache = 0; |
4822 | #ifdef PMAP_CACHE_VIVT | | 4827 | #ifdef PMAP_CACHE_VIVT |
4823 | cpu_idcache_wbinv_all(); | | 4828 | cpu_idcache_wbinv_all(); |
4824 | #endif | | 4829 | #endif |
4825 | } | | 4830 | } |
4826 | #else | | 4831 | #else |
4827 | if (rpm) { | | 4832 | if (rpm) { |
4828 | rpm->pm_cstate.cs_cache = 0; | | 4833 | rpm->pm_cstate.cs_cache = 0; |
4829 | if (npm == pmap_kernel()) | | 4834 | if (npm == pmap_kernel()) |
4830 | ci->ci_pmap_lastuser = NULL; | | 4835 | ci->ci_pmap_lastuser = NULL; |
4831 | #ifdef PMAP_CACHE_VIVT | | 4836 | #ifdef PMAP_CACHE_VIVT |
4832 | cpu_idcache_wbinv_all(); | | 4837 | cpu_idcache_wbinv_all(); |
4833 | #endif | | 4838 | #endif |
4834 | } | | 4839 | } |
4835 | #endif | | 4840 | #endif |
4836 | | | 4841 | |
4837 | /* No interrupts while we frob the TTB/DACR */ | | 4842 | /* No interrupts while we frob the TTB/DACR */ |
4838 | uint32_t oldirqstate = disable_interrupts(IF32_bits); | | 4843 | uint32_t oldirqstate = disable_interrupts(IF32_bits); |
4839 | #endif /* !ARM_MMU_EXTENDED */ | | 4844 | #endif /* !ARM_MMU_EXTENDED */ |
4840 | | | 4845 | |
4841 | #ifndef ARM_HAS_VBAR | | 4846 | #ifndef ARM_HAS_VBAR |
4842 | /* | | 4847 | /* |
4843 | * For ARM_VECTORS_LOW, we MUST, I repeat, MUST fix up the L1 | | 4848 | * For ARM_VECTORS_LOW, we MUST, I repeat, MUST fix up the L1 |
4844 | * entry corresponding to 'vector_page' in the incoming L1 table | | 4849 | * entry corresponding to 'vector_page' in the incoming L1 table |
4845 | * before switching to it otherwise subsequent interrupts/exceptions | | 4850 | * before switching to it otherwise subsequent interrupts/exceptions |
4846 | * (including domain faults!) will jump into hyperspace. | | 4851 | * (including domain faults!) will jump into hyperspace. |
4847 | */ | | 4852 | */ |
4848 | if (npm->pm_pl1vec != NULL) { | | 4853 | if (npm->pm_pl1vec != NULL) { |
4849 | cpu_tlb_flushID_SE((u_int)vector_page); | | 4854 | cpu_tlb_flushID_SE((u_int)vector_page); |
4850 | cpu_cpwait(); | | 4855 | cpu_cpwait(); |
4851 | *npm->pm_pl1vec = npm->pm_l1vec; | | 4856 | *npm->pm_pl1vec = npm->pm_l1vec; |
4852 | PTE_SYNC(npm->pm_pl1vec); | | 4857 | PTE_SYNC(npm->pm_pl1vec); |
4853 | } | | 4858 | } |
4854 | #endif | | 4859 | #endif |
4855 | | | 4860 | |
4856 | #ifdef ARM_MMU_EXTENDED | | 4861 | #ifdef ARM_MMU_EXTENDED |
4857 | /* | | 4862 | /* |
4858 | * Assume that TTBR1 has only global mappings and TTBR0 only has | | 4863 | * Assume that TTBR1 has only global mappings and TTBR0 only has |
4859 | * non-global mappings. To prevent speculation from doing evil things | | 4864 | * non-global mappings. To prevent speculation from doing evil things |
4860 | * we disable translation table walks using TTBR0 before setting the | | 4865 | * we disable translation table walks using TTBR0 before setting the |
4861 | * CONTEXTIDR (ASID) or new TTBR0 value. Once both are set, table | | 4866 | * CONTEXTIDR (ASID) or new TTBR0 value. Once both are set, table |
4862 | * walks are reenabled. | | 4867 | * walks are reenabled. |
4863 | */ | | 4868 | */ |
4864 | UVMHIST_LOG(maphist, " acquiring asid", 0, 0, 0, 0); | | 4869 | UVMHIST_LOG(maphist, " acquiring asid", 0, 0, 0, 0); |
4865 | const uint32_t old_ttbcr = armreg_ttbcr_read(); | | 4870 | const uint32_t old_ttbcr = armreg_ttbcr_read(); |
4866 | armreg_ttbcr_write(old_ttbcr | TTBCR_S_PD0); | | 4871 | armreg_ttbcr_write(old_ttbcr | TTBCR_S_PD0); |
4867 | arm_isb(); | | 4872 | arm_isb(); |
4868 | pmap_tlb_asid_acquire(npm, l); | | 4873 | pmap_tlb_asid_acquire(npm, l); |
4869 | UVMHIST_LOG(maphist, " setting ttbr pa=%#x asid=%#x", npm->pm_l1_pa, pai->pai_asid, 0, 0); | | 4874 | UVMHIST_LOG(maphist, " setting ttbr pa=%#x asid=%#x", npm->pm_l1_pa, pai->pai_asid, 0, 0); |
4870 | cpu_setttb(npm->pm_l1_pa, pai->pai_asid); | | 4875 | cpu_setttb(npm->pm_l1_pa, pai->pai_asid); |
4871 | /* | | 4876 | /* |
4872 | * Now we can reenable tablewalks since the CONTEXTIDR and TTRB0 have | | 4877 | * Now we can reenable tablewalks since the CONTEXTIDR and TTRB0 have |
4873 | * been updated. | | 4878 | * been updated. |
4874 | */ | | 4879 | */ |
4875 | arm_isb(); | | 4880 | arm_isb(); |
4876 | if (npm != pmap_kernel()) { | | 4881 | if (npm != pmap_kernel()) { |