| @@ -1,5114 +1,5123 @@ | | | @@ -1,5114 +1,5123 @@ |
1 | /* $NetBSD: pmap.c,v 1.211.2.16 2010/10/30 08:41:06 uebayasi Exp $ */ | | 1 | /* $NetBSD: pmap.c,v 1.211.2.17 2010/10/31 03:43:02 uebayasi Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright 2003 Wasabi Systems, Inc. | | 4 | * Copyright 2003 Wasabi Systems, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * Written by Steve C. Woodford for Wasabi Systems, Inc. | | 7 | * Written by Steve C. Woodford for Wasabi Systems, Inc. |
8 | * | | 8 | * |
9 | * Redistribution and use in source and binary forms, with or without | | 9 | * Redistribution and use in source and binary forms, with or without |
10 | * modification, are permitted provided that the following conditions | | 10 | * modification, are permitted provided that the following conditions |
11 | * are met: | | 11 | * are met: |
12 | * 1. Redistributions of source code must retain the above copyright | | 12 | * 1. Redistributions of source code must retain the above copyright |
13 | * notice, this list of conditions and the following disclaimer. | | 13 | * notice, this list of conditions and the following disclaimer. |
14 | * 2. Redistributions in binary form must reproduce the above copyright | | 14 | * 2. Redistributions in binary form must reproduce the above copyright |
15 | * notice, this list of conditions and the following disclaimer in the | | 15 | * notice, this list of conditions and the following disclaimer in the |
16 | * documentation and/or other materials provided with the distribution. | | 16 | * documentation and/or other materials provided with the distribution. |
17 | * 3. All advertising materials mentioning features or use of this software | | 17 | * 3. All advertising materials mentioning features or use of this software |
18 | * must display the following acknowledgement: | | 18 | * must display the following acknowledgement: |
19 | * This product includes software developed for the NetBSD Project by | | 19 | * This product includes software developed for the NetBSD Project by |
20 | * Wasabi Systems, Inc. | | 20 | * Wasabi Systems, Inc. |
21 | * 4. The name of Wasabi Systems, Inc. may not be used to endorse | | 21 | * 4. The name of Wasabi Systems, Inc. may not be used to endorse |
22 | * or promote products derived from this software without specific prior | | 22 | * or promote products derived from this software without specific prior |
23 | * written permission. | | 23 | * written permission. |
24 | * | | 24 | * |
25 | * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND | | 25 | * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND |
26 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 26 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
27 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 27 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
28 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC | | 28 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC |
29 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 29 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
30 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 30 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
31 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 31 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
32 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 32 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
33 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 33 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
34 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 34 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
35 | * POSSIBILITY OF SUCH DAMAGE. | | 35 | * POSSIBILITY OF SUCH DAMAGE. |
36 | */ | | 36 | */ |
37 | | | 37 | |
38 | /* | | 38 | /* |
39 | * Copyright (c) 2002-2003 Wasabi Systems, Inc. | | 39 | * Copyright (c) 2002-2003 Wasabi Systems, Inc. |
40 | * Copyright (c) 2001 Richard Earnshaw | | 40 | * Copyright (c) 2001 Richard Earnshaw |
41 | * Copyright (c) 2001-2002 Christopher Gilbert | | 41 | * Copyright (c) 2001-2002 Christopher Gilbert |
42 | * All rights reserved. | | 42 | * All rights reserved. |
43 | * | | 43 | * |
44 | * 1. Redistributions of source code must retain the above copyright | | 44 | * 1. Redistributions of source code must retain the above copyright |
45 | * notice, this list of conditions and the following disclaimer. | | 45 | * notice, this list of conditions and the following disclaimer. |
46 | * 2. Redistributions in binary form must reproduce the above copyright | | 46 | * 2. Redistributions in binary form must reproduce the above copyright |
47 | * notice, this list of conditions and the following disclaimer in the | | 47 | * notice, this list of conditions and the following disclaimer in the |
48 | * documentation and/or other materials provided with the distribution. | | 48 | * documentation and/or other materials provided with the distribution. |
49 | * 3. The name of the company nor the name of the author may be used to | | 49 | * 3. The name of the company nor the name of the author may be used to |
50 | * endorse or promote products derived from this software without specific | | 50 | * endorse or promote products derived from this software without specific |
51 | * prior written permission. | | 51 | * prior written permission. |
52 | * | | 52 | * |
53 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED | | 53 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
54 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | | 54 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF |
55 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | | 55 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
56 | * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, | | 56 | * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, |
57 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | | 57 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
58 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | | 58 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
59 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 59 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
60 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | | 60 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
61 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 61 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
62 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 62 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
63 | * SUCH DAMAGE. | | 63 | * SUCH DAMAGE. |
64 | */ | | 64 | */ |
65 | | | 65 | |
66 | /*- | | 66 | /*- |
67 | * Copyright (c) 1999 The NetBSD Foundation, Inc. | | 67 | * Copyright (c) 1999 The NetBSD Foundation, Inc. |
68 | * All rights reserved. | | 68 | * All rights reserved. |
69 | * | | 69 | * |
70 | * This code is derived from software contributed to The NetBSD Foundation | | 70 | * This code is derived from software contributed to The NetBSD Foundation |
71 | * by Charles M. Hannum. | | 71 | * by Charles M. Hannum. |
72 | * | | 72 | * |
73 | * Redistribution and use in source and binary forms, with or without | | 73 | * Redistribution and use in source and binary forms, with or without |
74 | * modification, are permitted provided that the following conditions | | 74 | * modification, are permitted provided that the following conditions |
75 | * are met: | | 75 | * are met: |
76 | * 1. Redistributions of source code must retain the above copyright | | 76 | * 1. Redistributions of source code must retain the above copyright |
77 | * notice, this list of conditions and the following disclaimer. | | 77 | * notice, this list of conditions and the following disclaimer. |
78 | * 2. Redistributions in binary form must reproduce the above copyright | | 78 | * 2. Redistributions in binary form must reproduce the above copyright |
79 | * notice, this list of conditions and the following disclaimer in the | | 79 | * notice, this list of conditions and the following disclaimer in the |
80 | * documentation and/or other materials provided with the distribution. | | 80 | * documentation and/or other materials provided with the distribution. |
81 | * | | 81 | * |
82 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 82 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
83 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 83 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
84 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 84 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
85 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 85 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
86 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 86 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
87 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 87 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
88 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 88 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
89 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 89 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
90 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 90 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
91 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 91 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
92 | * POSSIBILITY OF SUCH DAMAGE. | | 92 | * POSSIBILITY OF SUCH DAMAGE. |
93 | */ | | 93 | */ |
94 | | | 94 | |
95 | /* | | 95 | /* |
96 | * Copyright (c) 1994-1998 Mark Brinicombe. | | 96 | * Copyright (c) 1994-1998 Mark Brinicombe. |
97 | * Copyright (c) 1994 Brini. | | 97 | * Copyright (c) 1994 Brini. |
98 | * All rights reserved. | | 98 | * All rights reserved. |
99 | * | | 99 | * |
100 | * This code is derived from software written for Brini by Mark Brinicombe | | 100 | * This code is derived from software written for Brini by Mark Brinicombe |
101 | * | | 101 | * |
102 | * Redistribution and use in source and binary forms, with or without | | 102 | * Redistribution and use in source and binary forms, with or without |
103 | * modification, are permitted provided that the following conditions | | 103 | * modification, are permitted provided that the following conditions |
104 | * are met: | | 104 | * are met: |
105 | * 1. Redistributions of source code must retain the above copyright | | 105 | * 1. Redistributions of source code must retain the above copyright |
106 | * notice, this list of conditions and the following disclaimer. | | 106 | * notice, this list of conditions and the following disclaimer. |
107 | * 2. Redistributions in binary form must reproduce the above copyright | | 107 | * 2. Redistributions in binary form must reproduce the above copyright |
108 | * notice, this list of conditions and the following disclaimer in the | | 108 | * notice, this list of conditions and the following disclaimer in the |
109 | * documentation and/or other materials provided with the distribution. | | 109 | * documentation and/or other materials provided with the distribution. |
110 | * 3. All advertising materials mentioning features or use of this software | | 110 | * 3. All advertising materials mentioning features or use of this software |
111 | * must display the following acknowledgement: | | 111 | * must display the following acknowledgement: |
112 | * This product includes software developed by Mark Brinicombe. | | 112 | * This product includes software developed by Mark Brinicombe. |
113 | * 4. The name of the author may not be used to endorse or promote products | | 113 | * 4. The name of the author may not be used to endorse or promote products |
114 | * derived from this software without specific prior written permission. | | 114 | * derived from this software without specific prior written permission. |
115 | * | | 115 | * |
116 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | | 116 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
117 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | | 117 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
118 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | | 118 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
119 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | | 119 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
120 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | | 120 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
121 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | | 121 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
122 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | | 122 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
123 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | | 123 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
124 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | | 124 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
125 | * | | 125 | * |
126 | * RiscBSD kernel project | | 126 | * RiscBSD kernel project |
127 | * | | 127 | * |
128 | * pmap.c | | 128 | * pmap.c |
129 | * | | 129 | * |
130 | * Machine dependant vm stuff | | 130 | * Machine dependant vm stuff |
131 | * | | 131 | * |
132 | * Created : 20/09/94 | | 132 | * Created : 20/09/94 |
133 | */ | | 133 | */ |
134 | | | 134 | |
135 | /* | | 135 | /* |
136 | * armv6 and VIPT cache support by 3am Software Foundry, | | 136 | * armv6 and VIPT cache support by 3am Software Foundry, |
137 | * Copyright (c) 2007 Microsoft | | 137 | * Copyright (c) 2007 Microsoft |
138 | */ | | 138 | */ |
139 | | | 139 | |
140 | /* | | 140 | /* |
141 | * Performance improvements, UVM changes, overhauls and part-rewrites | | 141 | * Performance improvements, UVM changes, overhauls and part-rewrites |
142 | * were contributed by Neil A. Carson <neil@causality.com>. | | 142 | * were contributed by Neil A. Carson <neil@causality.com>. |
143 | */ | | 143 | */ |
144 | | | 144 | |
145 | /* | | 145 | /* |
146 | * Overhauled again to speedup the pmap, use MMU Domains so that L1 tables | | 146 | * Overhauled again to speedup the pmap, use MMU Domains so that L1 tables |
147 | * can be shared, and re-work the KVM layout, by Steve Woodford of Wasabi | | 147 | * can be shared, and re-work the KVM layout, by Steve Woodford of Wasabi |
148 | * Systems, Inc. | | 148 | * Systems, Inc. |
149 | * | | 149 | * |
150 | * There are still a few things outstanding at this time: | | 150 | * There are still a few things outstanding at this time: |
151 | * | | 151 | * |
152 | * - There are some unresolved issues for MP systems: | | 152 | * - There are some unresolved issues for MP systems: |
153 | * | | 153 | * |
154 | * o The L1 metadata needs a lock, or more specifically, some places | | 154 | * o The L1 metadata needs a lock, or more specifically, some places |
155 | * need to acquire an exclusive lock when modifying L1 translation | | 155 | * need to acquire an exclusive lock when modifying L1 translation |
156 | * table entries. | | 156 | * table entries. |
157 | * | | 157 | * |
158 | * o When one cpu modifies an L1 entry, and that L1 table is also | | 158 | * o When one cpu modifies an L1 entry, and that L1 table is also |
159 | * being used by another cpu, then the latter will need to be told | | 159 | * being used by another cpu, then the latter will need to be told |
160 | * that a tlb invalidation may be necessary. (But only if the old | | 160 | * that a tlb invalidation may be necessary. (But only if the old |
161 | * domain number in the L1 entry being over-written is currently | | 161 | * domain number in the L1 entry being over-written is currently |
162 | * the active domain on that cpu). I guess there are lots more tlb | | 162 | * the active domain on that cpu). I guess there are lots more tlb |
163 | * shootdown issues too... | | 163 | * shootdown issues too... |
164 | * | | 164 | * |
165 | * o If the vector_page is at 0x00000000 instead of 0xffff0000, then | | 165 | * o If the vector_page is at 0x00000000 instead of 0xffff0000, then |
166 | * MP systems will lose big-time because of the MMU domain hack. | | 166 | * MP systems will lose big-time because of the MMU domain hack. |
167 | * The only way this can be solved (apart from moving the vector | | 167 | * The only way this can be solved (apart from moving the vector |
168 | * page to 0xffff0000) is to reserve the first 1MB of user address | | 168 | * page to 0xffff0000) is to reserve the first 1MB of user address |
169 | * space for kernel use only. This would require re-linking all | | 169 | * space for kernel use only. This would require re-linking all |
170 | * applications so that the text section starts above this 1MB | | 170 | * applications so that the text section starts above this 1MB |
171 | * boundary. | | 171 | * boundary. |
172 | * | | 172 | * |
173 | * o Tracking which VM space is resident in the cache/tlb has not yet | | 173 | * o Tracking which VM space is resident in the cache/tlb has not yet |
174 | * been implemented for MP systems. | | 174 | * been implemented for MP systems. |
175 | * | | 175 | * |
176 | * o Finally, there is a pathological condition where two cpus running | | 176 | * o Finally, there is a pathological condition where two cpus running |
177 | * two separate processes (not lwps) which happen to share an L1 | | 177 | * two separate processes (not lwps) which happen to share an L1 |
178 | * can get into a fight over one or more L1 entries. This will result | | 178 | * can get into a fight over one or more L1 entries. This will result |
179 | * in a significant slow-down if both processes are in tight loops. | | 179 | * in a significant slow-down if both processes are in tight loops. |
180 | */ | | 180 | */ |
181 | | | 181 | |
182 | /* | | 182 | /* |
183 | * Special compilation symbols | | 183 | * Special compilation symbols |
184 | * PMAP_DEBUG - Build in pmap_debug_level code | | 184 | * PMAP_DEBUG - Build in pmap_debug_level code |
185 | */ | | 185 | */ |
186 | | | 186 | |
187 | /* Include header files */ | | 187 | /* Include header files */ |
188 | | | 188 | |
189 | #include "opt_cpuoptions.h" | | 189 | #include "opt_cpuoptions.h" |
190 | #include "opt_pmap_debug.h" | | 190 | #include "opt_pmap_debug.h" |
191 | #include "opt_ddb.h" | | 191 | #include "opt_ddb.h" |
192 | #include "opt_lockdebug.h" | | 192 | #include "opt_lockdebug.h" |
193 | #include "opt_multiprocessor.h" | | 193 | #include "opt_multiprocessor.h" |
194 | #include "opt_xip.h" | | 194 | #include "opt_xip.h" |
195 | | | 195 | |
196 | #include <sys/param.h> | | 196 | #include <sys/param.h> |
197 | #include <sys/types.h> | | 197 | #include <sys/types.h> |
198 | #include <sys/kernel.h> | | 198 | #include <sys/kernel.h> |
199 | #include <sys/systm.h> | | 199 | #include <sys/systm.h> |
200 | #include <sys/proc.h> | | 200 | #include <sys/proc.h> |
201 | #include <sys/malloc.h> | | 201 | #include <sys/malloc.h> |
202 | #include <sys/pool.h> | | 202 | #include <sys/pool.h> |
203 | #include <sys/cdefs.h> | | 203 | #include <sys/cdefs.h> |
204 | #include <sys/cpu.h> | | 204 | #include <sys/cpu.h> |
205 | #include <sys/sysctl.h> | | 205 | #include <sys/sysctl.h> |
206 | | | 206 | |
207 | #include <uvm/uvm.h> | | 207 | #include <uvm/uvm.h> |
208 | | | 208 | |
209 | #include <machine/bus.h> | | 209 | #include <machine/bus.h> |
210 | #include <machine/pmap.h> | | 210 | #include <machine/pmap.h> |
211 | #include <machine/pcb.h> | | 211 | #include <machine/pcb.h> |
212 | #include <machine/param.h> | | 212 | #include <machine/param.h> |
213 | #include <arm/arm32/katelib.h> | | 213 | #include <arm/arm32/katelib.h> |
214 | | | 214 | |
215 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.211.2.16 2010/10/30 08:41:06 uebayasi Exp $"); | | 215 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.211.2.17 2010/10/31 03:43:02 uebayasi Exp $"); |
216 | | | 216 | |
217 | #ifdef PMAP_DEBUG | | 217 | #ifdef PMAP_DEBUG |
218 | | | 218 | |
219 | /* XXX need to get rid of all refs to this */ | | 219 | /* XXX need to get rid of all refs to this */ |
220 | int pmap_debug_level = 0; | | 220 | int pmap_debug_level = 0; |
221 | | | 221 | |
222 | /* | | 222 | /* |
223 | * for switching to potentially finer grained debugging | | 223 | * for switching to potentially finer grained debugging |
224 | */ | | 224 | */ |
225 | #define PDB_FOLLOW 0x0001 | | 225 | #define PDB_FOLLOW 0x0001 |
226 | #define PDB_INIT 0x0002 | | 226 | #define PDB_INIT 0x0002 |
227 | #define PDB_ENTER 0x0004 | | 227 | #define PDB_ENTER 0x0004 |
228 | #define PDB_REMOVE 0x0008 | | 228 | #define PDB_REMOVE 0x0008 |
229 | #define PDB_CREATE 0x0010 | | 229 | #define PDB_CREATE 0x0010 |
230 | #define PDB_PTPAGE 0x0020 | | 230 | #define PDB_PTPAGE 0x0020 |
231 | #define PDB_GROWKERN 0x0040 | | 231 | #define PDB_GROWKERN 0x0040 |
232 | #define PDB_BITS 0x0080 | | 232 | #define PDB_BITS 0x0080 |
233 | #define PDB_COLLECT 0x0100 | | 233 | #define PDB_COLLECT 0x0100 |
234 | #define PDB_PROTECT 0x0200 | | 234 | #define PDB_PROTECT 0x0200 |
235 | #define PDB_MAP_L1 0x0400 | | 235 | #define PDB_MAP_L1 0x0400 |
236 | #define PDB_BOOTSTRAP 0x1000 | | 236 | #define PDB_BOOTSTRAP 0x1000 |
237 | #define PDB_PARANOIA 0x2000 | | 237 | #define PDB_PARANOIA 0x2000 |
238 | #define PDB_WIRING 0x4000 | | 238 | #define PDB_WIRING 0x4000 |
239 | #define PDB_PVDUMP 0x8000 | | 239 | #define PDB_PVDUMP 0x8000 |
240 | #define PDB_VAC 0x10000 | | 240 | #define PDB_VAC 0x10000 |
241 | #define PDB_KENTER 0x20000 | | 241 | #define PDB_KENTER 0x20000 |
242 | #define PDB_KREMOVE 0x40000 | | 242 | #define PDB_KREMOVE 0x40000 |
243 | #define PDB_EXEC 0x80000 | | 243 | #define PDB_EXEC 0x80000 |
244 | | | 244 | |
245 | int debugmap = 1; | | 245 | int debugmap = 1; |
246 | int pmapdebug = 0; | | 246 | int pmapdebug = 0; |
247 | #define NPDEBUG(_lev_,_stat_) \ | | 247 | #define NPDEBUG(_lev_,_stat_) \ |
248 | if (pmapdebug & (_lev_)) \ | | 248 | if (pmapdebug & (_lev_)) \ |
249 | ((_stat_)) | | 249 | ((_stat_)) |
250 | | | 250 | |
251 | #else /* PMAP_DEBUG */ | | 251 | #else /* PMAP_DEBUG */ |
252 | #define NPDEBUG(_lev_,_stat_) /* Nothing */ | | 252 | #define NPDEBUG(_lev_,_stat_) /* Nothing */ |
253 | #endif /* PMAP_DEBUG */ | | 253 | #endif /* PMAP_DEBUG */ |
254 | | | 254 | |
255 | /* | | 255 | /* |
256 | * pmap_kernel() points here | | 256 | * pmap_kernel() points here |
257 | */ | | 257 | */ |
258 | static struct pmap kernel_pmap_store; | | 258 | static struct pmap kernel_pmap_store; |
259 | struct pmap *const kernel_pmap_ptr = &kernel_pmap_store; | | 259 | struct pmap *const kernel_pmap_ptr = &kernel_pmap_store; |
260 | | | 260 | |
261 | /* | | 261 | /* |
262 | * Which pmap is currently 'live' in the cache | | 262 | * Which pmap is currently 'live' in the cache |
263 | * | | 263 | * |
264 | * XXXSCW: Fix for SMP ... | | 264 | * XXXSCW: Fix for SMP ... |
265 | */ | | 265 | */ |
266 | static pmap_t pmap_recent_user; | | 266 | static pmap_t pmap_recent_user; |
267 | | | 267 | |
268 | /* | | 268 | /* |
269 | * Pointer to last active lwp, or NULL if it exited. | | 269 | * Pointer to last active lwp, or NULL if it exited. |
270 | */ | | 270 | */ |
271 | struct lwp *pmap_previous_active_lwp; | | 271 | struct lwp *pmap_previous_active_lwp; |
272 | | | 272 | |
273 | /* | | 273 | /* |
274 | * Pool and cache that pmap structures are allocated from. | | 274 | * Pool and cache that pmap structures are allocated from. |
275 | * We use a cache to avoid clearing the pm_l2[] array (1KB) | | 275 | * We use a cache to avoid clearing the pm_l2[] array (1KB) |
276 | * in pmap_create(). | | 276 | * in pmap_create(). |
277 | */ | | 277 | */ |
278 | static struct pool_cache pmap_cache; | | 278 | static struct pool_cache pmap_cache; |
279 | static LIST_HEAD(, pmap) pmap_pmaps; | | 279 | static LIST_HEAD(, pmap) pmap_pmaps; |
280 | | | 280 | |
281 | /* | | 281 | /* |
282 | * Pool of PV structures | | 282 | * Pool of PV structures |
283 | */ | | 283 | */ |
284 | static struct pool pmap_pv_pool; | | 284 | static struct pool pmap_pv_pool; |
285 | static void *pmap_bootstrap_pv_page_alloc(struct pool *, int); | | 285 | static void *pmap_bootstrap_pv_page_alloc(struct pool *, int); |
286 | static void pmap_bootstrap_pv_page_free(struct pool *, void *); | | 286 | static void pmap_bootstrap_pv_page_free(struct pool *, void *); |
287 | static struct pool_allocator pmap_bootstrap_pv_allocator = { | | 287 | static struct pool_allocator pmap_bootstrap_pv_allocator = { |
288 | pmap_bootstrap_pv_page_alloc, pmap_bootstrap_pv_page_free | | 288 | pmap_bootstrap_pv_page_alloc, pmap_bootstrap_pv_page_free |
289 | }; | | 289 | }; |
290 | | | 290 | |
291 | /* | | 291 | /* |
292 | * Pool and cache of l2_dtable structures. | | 292 | * Pool and cache of l2_dtable structures. |
293 | * We use a cache to avoid clearing the structures when they're | | 293 | * We use a cache to avoid clearing the structures when they're |
294 | * allocated. (196 bytes) | | 294 | * allocated. (196 bytes) |
295 | */ | | 295 | */ |
296 | static struct pool_cache pmap_l2dtable_cache; | | 296 | static struct pool_cache pmap_l2dtable_cache; |
297 | static vaddr_t pmap_kernel_l2dtable_kva; | | 297 | static vaddr_t pmap_kernel_l2dtable_kva; |
298 | | | 298 | |
299 | /* | | 299 | /* |
300 | * Pool and cache of L2 page descriptors. | | 300 | * Pool and cache of L2 page descriptors. |
301 | * We use a cache to avoid clearing the descriptor table | | 301 | * We use a cache to avoid clearing the descriptor table |
302 | * when they're allocated. (1KB) | | 302 | * when they're allocated. (1KB) |
303 | */ | | 303 | */ |
304 | static struct pool_cache pmap_l2ptp_cache; | | 304 | static struct pool_cache pmap_l2ptp_cache; |
305 | static vaddr_t pmap_kernel_l2ptp_kva; | | 305 | static vaddr_t pmap_kernel_l2ptp_kva; |
306 | static paddr_t pmap_kernel_l2ptp_phys; | | 306 | static paddr_t pmap_kernel_l2ptp_phys; |
307 | | | 307 | |
308 | #ifdef PMAPCOUNTERS | | 308 | #ifdef PMAPCOUNTERS |
309 | #define PMAP_EVCNT_INITIALIZER(name) \ | | 309 | #define PMAP_EVCNT_INITIALIZER(name) \ |
310 | EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", name) | | 310 | EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", name) |
311 | | | 311 | |
312 | #ifdef PMAP_CACHE_VIPT | | 312 | #ifdef PMAP_CACHE_VIPT |
313 | static struct evcnt pmap_ev_vac_clean_one = | | 313 | static struct evcnt pmap_ev_vac_clean_one = |
314 | PMAP_EVCNT_INITIALIZER("clean page (1 color)"); | | 314 | PMAP_EVCNT_INITIALIZER("clean page (1 color)"); |
315 | static struct evcnt pmap_ev_vac_flush_one = | | 315 | static struct evcnt pmap_ev_vac_flush_one = |
316 | PMAP_EVCNT_INITIALIZER("flush page (1 color)"); | | 316 | PMAP_EVCNT_INITIALIZER("flush page (1 color)"); |
317 | static struct evcnt pmap_ev_vac_flush_lots = | | 317 | static struct evcnt pmap_ev_vac_flush_lots = |
318 | PMAP_EVCNT_INITIALIZER("flush page (2+ colors)"); | | 318 | PMAP_EVCNT_INITIALIZER("flush page (2+ colors)"); |
319 | static struct evcnt pmap_ev_vac_flush_lots2 = | | 319 | static struct evcnt pmap_ev_vac_flush_lots2 = |
320 | PMAP_EVCNT_INITIALIZER("flush page (2+ colors, kmpage)"); | | 320 | PMAP_EVCNT_INITIALIZER("flush page (2+ colors, kmpage)"); |
321 | EVCNT_ATTACH_STATIC(pmap_ev_vac_clean_one); | | 321 | EVCNT_ATTACH_STATIC(pmap_ev_vac_clean_one); |
322 | EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_one); | | 322 | EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_one); |
323 | EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots); | | 323 | EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots); |
324 | EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots2); | | 324 | EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots2); |
325 | | | 325 | |
326 | static struct evcnt pmap_ev_vac_color_new = | | 326 | static struct evcnt pmap_ev_vac_color_new = |
327 | PMAP_EVCNT_INITIALIZER("new page color"); | | 327 | PMAP_EVCNT_INITIALIZER("new page color"); |
328 | static struct evcnt pmap_ev_vac_color_reuse = | | 328 | static struct evcnt pmap_ev_vac_color_reuse = |
329 | PMAP_EVCNT_INITIALIZER("ok first page color"); | | 329 | PMAP_EVCNT_INITIALIZER("ok first page color"); |
330 | static struct evcnt pmap_ev_vac_color_ok = | | 330 | static struct evcnt pmap_ev_vac_color_ok = |
331 | PMAP_EVCNT_INITIALIZER("ok page color"); | | 331 | PMAP_EVCNT_INITIALIZER("ok page color"); |
332 | static struct evcnt pmap_ev_vac_color_blind = | | 332 | static struct evcnt pmap_ev_vac_color_blind = |
333 | PMAP_EVCNT_INITIALIZER("blind page color"); | | 333 | PMAP_EVCNT_INITIALIZER("blind page color"); |
334 | static struct evcnt pmap_ev_vac_color_change = | | 334 | static struct evcnt pmap_ev_vac_color_change = |
335 | PMAP_EVCNT_INITIALIZER("change page color"); | | 335 | PMAP_EVCNT_INITIALIZER("change page color"); |
336 | static struct evcnt pmap_ev_vac_color_erase = | | 336 | static struct evcnt pmap_ev_vac_color_erase = |
337 | PMAP_EVCNT_INITIALIZER("erase page color"); | | 337 | PMAP_EVCNT_INITIALIZER("erase page color"); |
338 | static struct evcnt pmap_ev_vac_color_none = | | 338 | static struct evcnt pmap_ev_vac_color_none = |
339 | PMAP_EVCNT_INITIALIZER("no page color"); | | 339 | PMAP_EVCNT_INITIALIZER("no page color"); |
340 | static struct evcnt pmap_ev_vac_color_restore = | | 340 | static struct evcnt pmap_ev_vac_color_restore = |
341 | PMAP_EVCNT_INITIALIZER("restore page color"); | | 341 | PMAP_EVCNT_INITIALIZER("restore page color"); |
342 | | | 342 | |
343 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_new); | | 343 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_new); |
344 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_reuse); | | 344 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_reuse); |
345 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_ok); | | 345 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_ok); |
346 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_blind); | | 346 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_blind); |
347 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_change); | | 347 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_change); |
348 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_erase); | | 348 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_erase); |
349 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_none); | | 349 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_none); |
350 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_restore); | | 350 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_restore); |
351 | #endif | | 351 | #endif |
352 | | | 352 | |
353 | static struct evcnt pmap_ev_mappings = | | 353 | static struct evcnt pmap_ev_mappings = |
354 | PMAP_EVCNT_INITIALIZER("pages mapped"); | | 354 | PMAP_EVCNT_INITIALIZER("pages mapped"); |
355 | static struct evcnt pmap_ev_unmappings = | | 355 | static struct evcnt pmap_ev_unmappings = |
356 | PMAP_EVCNT_INITIALIZER("pages unmapped"); | | 356 | PMAP_EVCNT_INITIALIZER("pages unmapped"); |
357 | static struct evcnt pmap_ev_remappings = | | 357 | static struct evcnt pmap_ev_remappings = |
358 | PMAP_EVCNT_INITIALIZER("pages remapped"); | | 358 | PMAP_EVCNT_INITIALIZER("pages remapped"); |
359 | | | 359 | |
360 | EVCNT_ATTACH_STATIC(pmap_ev_mappings); | | 360 | EVCNT_ATTACH_STATIC(pmap_ev_mappings); |
361 | EVCNT_ATTACH_STATIC(pmap_ev_unmappings); | | 361 | EVCNT_ATTACH_STATIC(pmap_ev_unmappings); |
362 | EVCNT_ATTACH_STATIC(pmap_ev_remappings); | | 362 | EVCNT_ATTACH_STATIC(pmap_ev_remappings); |
363 | | | 363 | |
364 | static struct evcnt pmap_ev_kernel_mappings = | | 364 | static struct evcnt pmap_ev_kernel_mappings = |
365 | PMAP_EVCNT_INITIALIZER("kernel pages mapped"); | | 365 | PMAP_EVCNT_INITIALIZER("kernel pages mapped"); |
366 | static struct evcnt pmap_ev_kernel_unmappings = | | 366 | static struct evcnt pmap_ev_kernel_unmappings = |
367 | PMAP_EVCNT_INITIALIZER("kernel pages unmapped"); | | 367 | PMAP_EVCNT_INITIALIZER("kernel pages unmapped"); |
368 | static struct evcnt pmap_ev_kernel_remappings = | | 368 | static struct evcnt pmap_ev_kernel_remappings = |
369 | PMAP_EVCNT_INITIALIZER("kernel pages remapped"); | | 369 | PMAP_EVCNT_INITIALIZER("kernel pages remapped"); |
370 | | | 370 | |
371 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_mappings); | | 371 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_mappings); |
372 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_unmappings); | | 372 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_unmappings); |
373 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_remappings); | | 373 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_remappings); |
374 | | | 374 | |
375 | static struct evcnt pmap_ev_kenter_mappings = | | 375 | static struct evcnt pmap_ev_kenter_mappings = |
376 | PMAP_EVCNT_INITIALIZER("kenter pages mapped"); | | 376 | PMAP_EVCNT_INITIALIZER("kenter pages mapped"); |
377 | static struct evcnt pmap_ev_kenter_unmappings = | | 377 | static struct evcnt pmap_ev_kenter_unmappings = |
378 | PMAP_EVCNT_INITIALIZER("kenter pages unmapped"); | | 378 | PMAP_EVCNT_INITIALIZER("kenter pages unmapped"); |
379 | static struct evcnt pmap_ev_kenter_remappings = | | 379 | static struct evcnt pmap_ev_kenter_remappings = |
380 | PMAP_EVCNT_INITIALIZER("kenter pages remapped"); | | 380 | PMAP_EVCNT_INITIALIZER("kenter pages remapped"); |
381 | static struct evcnt pmap_ev_pt_mappings = | | 381 | static struct evcnt pmap_ev_pt_mappings = |
382 | PMAP_EVCNT_INITIALIZER("page table pages mapped"); | | 382 | PMAP_EVCNT_INITIALIZER("page table pages mapped"); |
383 | | | 383 | |
384 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_mappings); | | 384 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_mappings); |
385 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_unmappings); | | 385 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_unmappings); |
386 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_remappings); | | 386 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_remappings); |
387 | EVCNT_ATTACH_STATIC(pmap_ev_pt_mappings); | | 387 | EVCNT_ATTACH_STATIC(pmap_ev_pt_mappings); |
388 | | | 388 | |
389 | #ifdef PMAP_CACHE_VIPT | | 389 | #ifdef PMAP_CACHE_VIPT |
390 | static struct evcnt pmap_ev_exec_mappings = | | 390 | static struct evcnt pmap_ev_exec_mappings = |
391 | PMAP_EVCNT_INITIALIZER("exec pages mapped"); | | 391 | PMAP_EVCNT_INITIALIZER("exec pages mapped"); |
392 | static struct evcnt pmap_ev_exec_cached = | | 392 | static struct evcnt pmap_ev_exec_cached = |
393 | PMAP_EVCNT_INITIALIZER("exec pages cached"); | | 393 | PMAP_EVCNT_INITIALIZER("exec pages cached"); |
394 | | | 394 | |
395 | EVCNT_ATTACH_STATIC(pmap_ev_exec_mappings); | | 395 | EVCNT_ATTACH_STATIC(pmap_ev_exec_mappings); |
396 | EVCNT_ATTACH_STATIC(pmap_ev_exec_cached); | | 396 | EVCNT_ATTACH_STATIC(pmap_ev_exec_cached); |
397 | | | 397 | |
398 | static struct evcnt pmap_ev_exec_synced = | | 398 | static struct evcnt pmap_ev_exec_synced = |
399 | PMAP_EVCNT_INITIALIZER("exec pages synced"); | | 399 | PMAP_EVCNT_INITIALIZER("exec pages synced"); |
400 | static struct evcnt pmap_ev_exec_synced_map = | | 400 | static struct evcnt pmap_ev_exec_synced_map = |
401 | PMAP_EVCNT_INITIALIZER("exec pages synced (MP)"); | | 401 | PMAP_EVCNT_INITIALIZER("exec pages synced (MP)"); |
402 | static struct evcnt pmap_ev_exec_synced_unmap = | | 402 | static struct evcnt pmap_ev_exec_synced_unmap = |
403 | PMAP_EVCNT_INITIALIZER("exec pages synced (UM)"); | | 403 | PMAP_EVCNT_INITIALIZER("exec pages synced (UM)"); |
404 | static struct evcnt pmap_ev_exec_synced_remap = | | 404 | static struct evcnt pmap_ev_exec_synced_remap = |
405 | PMAP_EVCNT_INITIALIZER("exec pages synced (RM)"); | | 405 | PMAP_EVCNT_INITIALIZER("exec pages synced (RM)"); |
406 | static struct evcnt pmap_ev_exec_synced_clearbit = | | 406 | static struct evcnt pmap_ev_exec_synced_clearbit = |
407 | PMAP_EVCNT_INITIALIZER("exec pages synced (DG)"); | | 407 | PMAP_EVCNT_INITIALIZER("exec pages synced (DG)"); |
408 | static struct evcnt pmap_ev_exec_synced_kremove = | | 408 | static struct evcnt pmap_ev_exec_synced_kremove = |
409 | PMAP_EVCNT_INITIALIZER("exec pages synced (KU)"); | | 409 | PMAP_EVCNT_INITIALIZER("exec pages synced (KU)"); |
410 | | | 410 | |
411 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced); | | 411 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced); |
412 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_map); | | 412 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_map); |
413 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_unmap); | | 413 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_unmap); |
414 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_remap); | | 414 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_remap); |
415 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_clearbit); | | 415 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_clearbit); |
416 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_kremove); | | 416 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_kremove); |
417 | | | 417 | |
418 | static struct evcnt pmap_ev_exec_discarded_unmap = | | 418 | static struct evcnt pmap_ev_exec_discarded_unmap = |
419 | PMAP_EVCNT_INITIALIZER("exec pages discarded (UM)"); | | 419 | PMAP_EVCNT_INITIALIZER("exec pages discarded (UM)"); |
420 | static struct evcnt pmap_ev_exec_discarded_zero = | | 420 | static struct evcnt pmap_ev_exec_discarded_zero = |
421 | PMAP_EVCNT_INITIALIZER("exec pages discarded (ZP)"); | | 421 | PMAP_EVCNT_INITIALIZER("exec pages discarded (ZP)"); |
422 | static struct evcnt pmap_ev_exec_discarded_copy = | | 422 | static struct evcnt pmap_ev_exec_discarded_copy = |
423 | PMAP_EVCNT_INITIALIZER("exec pages discarded (CP)"); | | 423 | PMAP_EVCNT_INITIALIZER("exec pages discarded (CP)"); |
424 | static struct evcnt pmap_ev_exec_discarded_page_protect = | | 424 | static struct evcnt pmap_ev_exec_discarded_page_protect = |
425 | PMAP_EVCNT_INITIALIZER("exec pages discarded (PP)"); | | 425 | PMAP_EVCNT_INITIALIZER("exec pages discarded (PP)"); |
426 | static struct evcnt pmap_ev_exec_discarded_clearbit = | | 426 | static struct evcnt pmap_ev_exec_discarded_clearbit = |
427 | PMAP_EVCNT_INITIALIZER("exec pages discarded (DG)"); | | 427 | PMAP_EVCNT_INITIALIZER("exec pages discarded (DG)"); |
428 | static struct evcnt pmap_ev_exec_discarded_kremove = | | 428 | static struct evcnt pmap_ev_exec_discarded_kremove = |
429 | PMAP_EVCNT_INITIALIZER("exec pages discarded (KU)"); | | 429 | PMAP_EVCNT_INITIALIZER("exec pages discarded (KU)"); |
430 | | | 430 | |
431 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_unmap); | | 431 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_unmap); |
432 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_zero); | | 432 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_zero); |
433 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_copy); | | 433 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_copy); |
434 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_page_protect); | | 434 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_page_protect); |
435 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_clearbit); | | 435 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_clearbit); |
436 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_kremove); | | 436 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_kremove); |
437 | #endif /* PMAP_CACHE_VIPT */ | | 437 | #endif /* PMAP_CACHE_VIPT */ |
438 | | | 438 | |
439 | static struct evcnt pmap_ev_updates = PMAP_EVCNT_INITIALIZER("updates"); | | 439 | static struct evcnt pmap_ev_updates = PMAP_EVCNT_INITIALIZER("updates"); |
440 | static struct evcnt pmap_ev_collects = PMAP_EVCNT_INITIALIZER("collects"); | | 440 | static struct evcnt pmap_ev_collects = PMAP_EVCNT_INITIALIZER("collects"); |
441 | static struct evcnt pmap_ev_activations = PMAP_EVCNT_INITIALIZER("activations"); | | 441 | static struct evcnt pmap_ev_activations = PMAP_EVCNT_INITIALIZER("activations"); |
442 | | | 442 | |
443 | EVCNT_ATTACH_STATIC(pmap_ev_updates); | | 443 | EVCNT_ATTACH_STATIC(pmap_ev_updates); |
444 | EVCNT_ATTACH_STATIC(pmap_ev_collects); | | 444 | EVCNT_ATTACH_STATIC(pmap_ev_collects); |
445 | EVCNT_ATTACH_STATIC(pmap_ev_activations); | | 445 | EVCNT_ATTACH_STATIC(pmap_ev_activations); |
446 | | | 446 | |
447 | #define PMAPCOUNT(x) ((void)(pmap_ev_##x.ev_count++)) | | 447 | #define PMAPCOUNT(x) ((void)(pmap_ev_##x.ev_count++)) |
448 | #else | | 448 | #else |
449 | #define PMAPCOUNT(x) ((void)0) | | 449 | #define PMAPCOUNT(x) ((void)0) |
450 | #endif | | 450 | #endif |
451 | | | 451 | |
452 | /* | | 452 | /* |
453 | * pmap copy/zero page, and mem(5) hook point | | 453 | * pmap copy/zero page, and mem(5) hook point |
454 | */ | | 454 | */ |
455 | static pt_entry_t *csrc_pte, *cdst_pte; | | 455 | static pt_entry_t *csrc_pte, *cdst_pte; |
456 | static vaddr_t csrcp, cdstp; | | 456 | static vaddr_t csrcp, cdstp; |
457 | vaddr_t memhook; /* used by mem.c */ | | 457 | vaddr_t memhook; /* used by mem.c */ |
458 | kmutex_t memlock; /* used by mem.c */ | | 458 | kmutex_t memlock; /* used by mem.c */ |
459 | void *zeropage; /* used by mem.c */ | | 459 | void *zeropage; /* used by mem.c */ |
460 | extern void *msgbufaddr; | | 460 | extern void *msgbufaddr; |
461 | int pmap_kmpages; | | 461 | int pmap_kmpages; |
462 | /* | | 462 | /* |
463 | * Flag to indicate if pmap_init() has done its thing | | 463 | * Flag to indicate if pmap_init() has done its thing |
464 | */ | | 464 | */ |
465 | bool pmap_initialized; | | 465 | bool pmap_initialized; |
466 | | | 466 | |
467 | /* | | 467 | /* |
468 | * Misc. locking data structures | | 468 | * Misc. locking data structures |
469 | */ | | 469 | */ |
470 | | | 470 | |
471 | #if 0 /* defined(MULTIPROCESSOR) || defined(LOCKDEBUG) */ | | 471 | #if 0 /* defined(MULTIPROCESSOR) || defined(LOCKDEBUG) */ |
472 | static struct lock pmap_main_lock; | | 472 | static struct lock pmap_main_lock; |
473 | | | 473 | |
474 | #define PMAP_MAP_TO_HEAD_LOCK() \ | | 474 | #define PMAP_MAP_TO_HEAD_LOCK() \ |
475 | (void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL) | | 475 | (void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL) |
476 | #define PMAP_MAP_TO_HEAD_UNLOCK() \ | | 476 | #define PMAP_MAP_TO_HEAD_UNLOCK() \ |
477 | (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL) | | 477 | (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL) |
478 | #define PMAP_HEAD_TO_MAP_LOCK() \ | | 478 | #define PMAP_HEAD_TO_MAP_LOCK() \ |
479 | (void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL) | | 479 | (void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL) |
480 | #define PMAP_HEAD_TO_MAP_UNLOCK() \ | | 480 | #define PMAP_HEAD_TO_MAP_UNLOCK() \ |
481 | spinlockmgr(&pmap_main_lock, LK_RELEASE, (void *) 0) | | 481 | spinlockmgr(&pmap_main_lock, LK_RELEASE, (void *) 0) |
482 | #else | | 482 | #else |
483 | #define PMAP_MAP_TO_HEAD_LOCK() /* null */ | | 483 | #define PMAP_MAP_TO_HEAD_LOCK() /* null */ |
484 | #define PMAP_MAP_TO_HEAD_UNLOCK() /* null */ | | 484 | #define PMAP_MAP_TO_HEAD_UNLOCK() /* null */ |
485 | #define PMAP_HEAD_TO_MAP_LOCK() /* null */ | | 485 | #define PMAP_HEAD_TO_MAP_LOCK() /* null */ |
486 | #define PMAP_HEAD_TO_MAP_UNLOCK() /* null */ | | 486 | #define PMAP_HEAD_TO_MAP_UNLOCK() /* null */ |
487 | #endif | | 487 | #endif |
488 | | | 488 | |
489 | #define pmap_acquire_pmap_lock(pm) \ | | 489 | #define pmap_acquire_pmap_lock(pm) \ |
490 | do { \ | | 490 | do { \ |
491 | if ((pm) != pmap_kernel()) \ | | 491 | if ((pm) != pmap_kernel()) \ |
492 | mutex_enter(&(pm)->pm_lock); \ | | 492 | mutex_enter(&(pm)->pm_lock); \ |
493 | } while (/*CONSTCOND*/0) | | 493 | } while (/*CONSTCOND*/0) |
494 | | | 494 | |
495 | #define pmap_release_pmap_lock(pm) \ | | 495 | #define pmap_release_pmap_lock(pm) \ |
496 | do { \ | | 496 | do { \ |
497 | if ((pm) != pmap_kernel()) \ | | 497 | if ((pm) != pmap_kernel()) \ |
498 | mutex_exit(&(pm)->pm_lock); \ | | 498 | mutex_exit(&(pm)->pm_lock); \ |
499 | } while (/*CONSTCOND*/0) | | 499 | } while (/*CONSTCOND*/0) |
500 | | | 500 | |
501 | | | 501 | |
502 | /* | | 502 | /* |
503 | * Metadata for L1 translation tables. | | 503 | * Metadata for L1 translation tables. |
504 | */ | | 504 | */ |
505 | struct l1_ttable { | | 505 | struct l1_ttable { |
506 | /* Entry on the L1 Table list */ | | 506 | /* Entry on the L1 Table list */ |
507 | SLIST_ENTRY(l1_ttable) l1_link; | | 507 | SLIST_ENTRY(l1_ttable) l1_link; |
508 | | | 508 | |
509 | /* Entry on the L1 Least Recently Used list */ | | 509 | /* Entry on the L1 Least Recently Used list */ |
510 | TAILQ_ENTRY(l1_ttable) l1_lru; | | 510 | TAILQ_ENTRY(l1_ttable) l1_lru; |
511 | | | 511 | |
512 | /* Track how many domains are allocated from this L1 */ | | 512 | /* Track how many domains are allocated from this L1 */ |
513 | volatile u_int l1_domain_use_count; | | 513 | volatile u_int l1_domain_use_count; |
514 | | | 514 | |
515 | /* | | 515 | /* |
516 | * A free-list of domain numbers for this L1. | | 516 | * A free-list of domain numbers for this L1. |
517 | * We avoid using ffs() and a bitmap to track domains since ffs() | | 517 | * We avoid using ffs() and a bitmap to track domains since ffs() |
518 | * is slow on ARM. | | 518 | * is slow on ARM. |
519 | */ | | 519 | */ |
520 | u_int8_t l1_domain_first; | | 520 | u_int8_t l1_domain_first; |
521 | u_int8_t l1_domain_free[PMAP_DOMAINS]; | | 521 | u_int8_t l1_domain_free[PMAP_DOMAINS]; |
522 | | | 522 | |
523 | /* Physical address of this L1 page table */ | | 523 | /* Physical address of this L1 page table */ |
524 | paddr_t l1_physaddr; | | 524 | paddr_t l1_physaddr; |
525 | | | 525 | |
526 | /* KVA of this L1 page table */ | | 526 | /* KVA of this L1 page table */ |
527 | pd_entry_t *l1_kva; | | 527 | pd_entry_t *l1_kva; |
528 | }; | | 528 | }; |
529 | | | 529 | |
530 | /* | | 530 | /* |
531 | * Convert a virtual address into its L1 table index. That is, the | | 531 | * Convert a virtual address into its L1 table index. That is, the |
532 | * index used to locate the L2 descriptor table pointer in an L1 table. | | 532 | * index used to locate the L2 descriptor table pointer in an L1 table. |
533 | * This is basically used to index l1->l1_kva[]. | | 533 | * This is basically used to index l1->l1_kva[]. |
534 | * | | 534 | * |
535 | * Each L2 descriptor table represents 1MB of VA space. | | 535 | * Each L2 descriptor table represents 1MB of VA space. |
536 | */ | | 536 | */ |
537 | #define L1_IDX(va) (((vaddr_t)(va)) >> L1_S_SHIFT) | | 537 | #define L1_IDX(va) (((vaddr_t)(va)) >> L1_S_SHIFT) |
538 | | | 538 | |
539 | /* | | 539 | /* |
540 | * L1 Page Tables are tracked using a Least Recently Used list. | | 540 | * L1 Page Tables are tracked using a Least Recently Used list. |
541 | * - New L1s are allocated from the HEAD. | | 541 | * - New L1s are allocated from the HEAD. |
542 | * - Freed L1s are added to the TAIl. | | 542 | * - Freed L1s are added to the TAIl. |
543 | * - Recently accessed L1s (where an 'access' is some change to one of | | 543 | * - Recently accessed L1s (where an 'access' is some change to one of |
544 | * the userland pmaps which owns this L1) are moved to the TAIL. | | 544 | * the userland pmaps which owns this L1) are moved to the TAIL. |
545 | */ | | 545 | */ |
546 | static TAILQ_HEAD(, l1_ttable) l1_lru_list; | | 546 | static TAILQ_HEAD(, l1_ttable) l1_lru_list; |
547 | static struct simplelock l1_lru_lock; | | 547 | static struct simplelock l1_lru_lock; |
548 | | | 548 | |
549 | /* | | 549 | /* |
550 | * A list of all L1 tables | | 550 | * A list of all L1 tables |
551 | */ | | 551 | */ |
552 | static SLIST_HEAD(, l1_ttable) l1_list; | | 552 | static SLIST_HEAD(, l1_ttable) l1_list; |
553 | | | 553 | |
554 | /* | | 554 | /* |
555 | * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots. | | 555 | * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots. |
556 | * | | 556 | * |
557 | * This is normally 16MB worth L2 page descriptors for any given pmap. | | 557 | * This is normally 16MB worth L2 page descriptors for any given pmap. |
558 | * Reference counts are maintained for L2 descriptors so they can be | | 558 | * Reference counts are maintained for L2 descriptors so they can be |
559 | * freed when empty. | | 559 | * freed when empty. |
560 | */ | | 560 | */ |
561 | struct l2_dtable { | | 561 | struct l2_dtable { |
562 | /* The number of L2 page descriptors allocated to this l2_dtable */ | | 562 | /* The number of L2 page descriptors allocated to this l2_dtable */ |
563 | u_int l2_occupancy; | | 563 | u_int l2_occupancy; |
564 | | | 564 | |
565 | /* List of L2 page descriptors */ | | 565 | /* List of L2 page descriptors */ |
566 | struct l2_bucket { | | 566 | struct l2_bucket { |
567 | pt_entry_t *l2b_kva; /* KVA of L2 Descriptor Table */ | | 567 | pt_entry_t *l2b_kva; /* KVA of L2 Descriptor Table */ |
568 | paddr_t l2b_phys; /* Physical address of same */ | | 568 | paddr_t l2b_phys; /* Physical address of same */ |
569 | u_short l2b_l1idx; /* This L2 table's L1 index */ | | 569 | u_short l2b_l1idx; /* This L2 table's L1 index */ |
570 | u_short l2b_occupancy; /* How many active descriptors */ | | 570 | u_short l2b_occupancy; /* How many active descriptors */ |
571 | } l2_bucket[L2_BUCKET_SIZE]; | | 571 | } l2_bucket[L2_BUCKET_SIZE]; |
572 | }; | | 572 | }; |
573 | | | 573 | |
574 | /* | | 574 | /* |
575 | * Given an L1 table index, calculate the corresponding l2_dtable index | | 575 | * Given an L1 table index, calculate the corresponding l2_dtable index |
576 | * and bucket index within the l2_dtable. | | 576 | * and bucket index within the l2_dtable. |
577 | */ | | 577 | */ |
578 | #define L2_IDX(l1idx) (((l1idx) >> L2_BUCKET_LOG2) & \ | | 578 | #define L2_IDX(l1idx) (((l1idx) >> L2_BUCKET_LOG2) & \ |
579 | (L2_SIZE - 1)) | | 579 | (L2_SIZE - 1)) |
580 | #define L2_BUCKET(l1idx) ((l1idx) & (L2_BUCKET_SIZE - 1)) | | 580 | #define L2_BUCKET(l1idx) ((l1idx) & (L2_BUCKET_SIZE - 1)) |
581 | | | 581 | |
582 | /* | | 582 | /* |
583 | * Given a virtual address, this macro returns the | | 583 | * Given a virtual address, this macro returns the |
584 | * virtual address required to drop into the next L2 bucket. | | 584 | * virtual address required to drop into the next L2 bucket. |
585 | */ | | 585 | */ |
586 | #define L2_NEXT_BUCKET(va) (((va) & L1_S_FRAME) + L1_S_SIZE) | | 586 | #define L2_NEXT_BUCKET(va) (((va) & L1_S_FRAME) + L1_S_SIZE) |
587 | | | 587 | |
588 | /* | | 588 | /* |
589 | * L2 allocation. | | 589 | * L2 allocation. |
590 | */ | | 590 | */ |
591 | #define pmap_alloc_l2_dtable() \ | | 591 | #define pmap_alloc_l2_dtable() \ |
592 | pool_cache_get(&pmap_l2dtable_cache, PR_NOWAIT) | | 592 | pool_cache_get(&pmap_l2dtable_cache, PR_NOWAIT) |
593 | #define pmap_free_l2_dtable(l2) \ | | 593 | #define pmap_free_l2_dtable(l2) \ |
594 | pool_cache_put(&pmap_l2dtable_cache, (l2)) | | 594 | pool_cache_put(&pmap_l2dtable_cache, (l2)) |
595 | #define pmap_alloc_l2_ptp(pap) \ | | 595 | #define pmap_alloc_l2_ptp(pap) \ |
596 | ((pt_entry_t *)pool_cache_get_paddr(&pmap_l2ptp_cache,\ | | 596 | ((pt_entry_t *)pool_cache_get_paddr(&pmap_l2ptp_cache,\ |
597 | PR_NOWAIT, (pap))) | | 597 | PR_NOWAIT, (pap))) |
598 | | | 598 | |
599 | /* | | 599 | /* |
600 | * We try to map the page tables write-through, if possible. However, not | | 600 | * We try to map the page tables write-through, if possible. However, not |
601 | * all CPUs have a write-through cache mode, so on those we have to sync | | 601 | * all CPUs have a write-through cache mode, so on those we have to sync |
602 | * the cache when we frob page tables. | | 602 | * the cache when we frob page tables. |
603 | * | | 603 | * |
604 | * We try to evaluate this at compile time, if possible. However, it's | | 604 | * We try to evaluate this at compile time, if possible. However, it's |
605 | * not always possible to do that, hence this run-time var. | | 605 | * not always possible to do that, hence this run-time var. |
606 | */ | | 606 | */ |
607 | int pmap_needs_pte_sync; | | 607 | int pmap_needs_pte_sync; |
608 | | | 608 | |
609 | /* | | 609 | /* |
610 | * Real definition of pv_entry. | | 610 | * Real definition of pv_entry. |
611 | */ | | 611 | */ |
612 | struct pv_entry { | | 612 | struct pv_entry { |
613 | SLIST_ENTRY(pv_entry) pv_link; /* next pv_entry */ | | 613 | SLIST_ENTRY(pv_entry) pv_link; /* next pv_entry */ |
614 | pmap_t pv_pmap; /* pmap where mapping lies */ | | 614 | pmap_t pv_pmap; /* pmap where mapping lies */ |
615 | vaddr_t pv_va; /* virtual address for mapping */ | | 615 | vaddr_t pv_va; /* virtual address for mapping */ |
616 | u_int pv_flags; /* flags */ | | 616 | u_int pv_flags; /* flags */ |
617 | }; | | 617 | }; |
618 | | | 618 | |
619 | /* | | 619 | /* |
620 | * Macro to determine if a mapping might be resident in the | | 620 | * Macro to determine if a mapping might be resident in the |
621 | * instruction cache and/or TLB | | 621 | * instruction cache and/or TLB |
622 | */ | | 622 | */ |
623 | #define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC)) | | 623 | #define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC)) |
624 | #define PV_IS_EXEC_P(f) (((f) & PVF_EXEC) != 0) | | 624 | #define PV_IS_EXEC_P(f) (((f) & PVF_EXEC) != 0) |
625 | | | 625 | |
626 | /* | | 626 | /* |
627 | * Macro to determine if a mapping might be resident in the | | 627 | * Macro to determine if a mapping might be resident in the |
628 | * data cache and/or TLB | | 628 | * data cache and/or TLB |
629 | */ | | 629 | */ |
630 | #define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0) | | 630 | #define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0) |
631 | | | 631 | |
632 | /* | | 632 | /* |
633 | * Local prototypes | | 633 | * Local prototypes |
634 | */ | | 634 | */ |
635 | static int pmap_set_pt_cache_mode(pd_entry_t *, vaddr_t); | | 635 | static int pmap_set_pt_cache_mode(pd_entry_t *, vaddr_t); |
636 | static void pmap_alloc_specials(vaddr_t *, int, vaddr_t *, | | 636 | static void pmap_alloc_specials(vaddr_t *, int, vaddr_t *, |
637 | pt_entry_t **); | | 637 | pt_entry_t **); |
638 | static bool pmap_is_current(pmap_t); | | 638 | static bool pmap_is_current(pmap_t); |
639 | static bool pmap_is_cached(pmap_t); | | 639 | static bool pmap_is_cached(pmap_t); |
640 | static void pmap_enter_pv(struct vm_page_md *, paddr_t, struct pv_entry *, | | 640 | static void pmap_enter_pv(struct vm_page_md *, paddr_t, struct pv_entry *, |
641 | pmap_t, vaddr_t, u_int); | | 641 | pmap_t, vaddr_t, u_int); |
642 | static struct pv_entry *pmap_find_pv(struct vm_page_md *, pmap_t, vaddr_t); | | 642 | static struct pv_entry *pmap_find_pv(struct vm_page_md *, pmap_t, vaddr_t); |
643 | static struct pv_entry *pmap_remove_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); | | 643 | static struct pv_entry *pmap_remove_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); |
644 | static u_int pmap_modify_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t, | | 644 | static u_int pmap_modify_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t, |
645 | u_int, u_int); | | 645 | u_int, u_int); |
646 | | | 646 | |
647 | static void pmap_pinit(pmap_t); | | 647 | static void pmap_pinit(pmap_t); |
648 | static int pmap_pmap_ctor(void *, void *, int); | | 648 | static int pmap_pmap_ctor(void *, void *, int); |
649 | | | 649 | |
650 | static void pmap_alloc_l1(pmap_t); | | 650 | static void pmap_alloc_l1(pmap_t); |
651 | static void pmap_free_l1(pmap_t); | | 651 | static void pmap_free_l1(pmap_t); |
652 | static void pmap_use_l1(pmap_t); | | 652 | static void pmap_use_l1(pmap_t); |
653 | | | 653 | |
654 | static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vaddr_t); | | 654 | static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vaddr_t); |
655 | static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vaddr_t); | | 655 | static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vaddr_t); |
656 | static void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int); | | 656 | static void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int); |
657 | static int pmap_l2ptp_ctor(void *, void *, int); | | 657 | static int pmap_l2ptp_ctor(void *, void *, int); |
658 | static int pmap_l2dtable_ctor(void *, void *, int); | | 658 | static int pmap_l2dtable_ctor(void *, void *, int); |
659 | | | 659 | |
660 | static void pmap_vac_me_harder(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); | | 660 | static void pmap_vac_me_harder(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); |
661 | #ifdef PMAP_CACHE_VIVT | | 661 | #ifdef PMAP_CACHE_VIVT |
662 | static void pmap_vac_me_kpmap(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); | | 662 | static void pmap_vac_me_kpmap(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); |
663 | static void pmap_vac_me_user(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); | | 663 | static void pmap_vac_me_user(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); |
664 | #endif | | 664 | #endif |
665 | | | 665 | |
666 | static void pmap_clearbit(struct vm_page *, u_int); | | 666 | static void pmap_clearbit(struct vm_page_md *, paddr_t, u_int); |
667 | #ifdef PMAP_CACHE_VIVT | | 667 | #ifdef PMAP_CACHE_VIVT |
668 | static int pmap_clean_page(struct pv_entry *, bool); | | 668 | static int pmap_clean_page(struct pv_entry *, bool); |
669 | #endif | | 669 | #endif |
670 | #ifdef PMAP_CACHE_VIPT | | 670 | #ifdef PMAP_CACHE_VIPT |
671 | static void pmap_syncicache_page(struct vm_page_md *, paddr_t); | | 671 | static void pmap_syncicache_page(struct vm_page_md *, paddr_t); |
672 | enum pmap_flush_op { | | 672 | enum pmap_flush_op { |
673 | PMAP_FLUSH_PRIMARY, | | 673 | PMAP_FLUSH_PRIMARY, |
674 | PMAP_FLUSH_SECONDARY, | | 674 | PMAP_FLUSH_SECONDARY, |
675 | PMAP_CLEAN_PRIMARY | | 675 | PMAP_CLEAN_PRIMARY |
676 | }; | | 676 | }; |
677 | static void pmap_flush_page(struct vm_page_md *, paddr_t, enum pmap_flush_op); | | 677 | static void pmap_flush_page(struct vm_page_md *, paddr_t, enum pmap_flush_op); |
678 | #endif | | 678 | #endif |
679 | static void pmap_page_remove(struct vm_page *); | | 679 | static void pmap_page_remove(struct vm_page_md *, paddr_t); |
680 | | | 680 | |
681 | static void pmap_init_l1(struct l1_ttable *, pd_entry_t *); | | 681 | static void pmap_init_l1(struct l1_ttable *, pd_entry_t *); |
682 | static vaddr_t kernel_pt_lookup(paddr_t); | | 682 | static vaddr_t kernel_pt_lookup(paddr_t); |
683 | | | 683 | |
684 | | | 684 | |
685 | /* | | 685 | /* |
686 | * External function prototypes | | 686 | * External function prototypes |
687 | */ | | 687 | */ |
688 | extern void bzero_page(vaddr_t); | | 688 | extern void bzero_page(vaddr_t); |
689 | extern void bcopy_page(vaddr_t, vaddr_t); | | 689 | extern void bcopy_page(vaddr_t, vaddr_t); |
690 | | | 690 | |
691 | /* | | 691 | /* |
692 | * Misc variables | | 692 | * Misc variables |
693 | */ | | 693 | */ |
694 | vaddr_t virtual_avail; | | 694 | vaddr_t virtual_avail; |
695 | vaddr_t virtual_end; | | 695 | vaddr_t virtual_end; |
696 | vaddr_t pmap_curmaxkvaddr; | | 696 | vaddr_t pmap_curmaxkvaddr; |
697 | | | 697 | |
698 | paddr_t avail_start; | | 698 | paddr_t avail_start; |
699 | paddr_t avail_end; | | 699 | paddr_t avail_end; |
700 | | | 700 | |
701 | pv_addrqh_t pmap_boot_freeq = SLIST_HEAD_INITIALIZER(&pmap_boot_freeq); | | 701 | pv_addrqh_t pmap_boot_freeq = SLIST_HEAD_INITIALIZER(&pmap_boot_freeq); |
702 | pv_addr_t kernelpages; | | 702 | pv_addr_t kernelpages; |
703 | pv_addr_t kernel_l1pt; | | 703 | pv_addr_t kernel_l1pt; |
704 | pv_addr_t systempage; | | 704 | pv_addr_t systempage; |
705 | | | 705 | |
706 | /* Function to set the debug level of the pmap code */ | | 706 | /* Function to set the debug level of the pmap code */ |
707 | | | 707 | |
708 | #ifdef PMAP_DEBUG | | 708 | #ifdef PMAP_DEBUG |
709 | void | | 709 | void |
710 | pmap_debug(int level) | | 710 | pmap_debug(int level) |
711 | { | | 711 | { |
712 | pmap_debug_level = level; | | 712 | pmap_debug_level = level; |
713 | printf("pmap_debug: level=%d\n", pmap_debug_level); | | 713 | printf("pmap_debug: level=%d\n", pmap_debug_level); |
714 | } | | 714 | } |
715 | #endif /* PMAP_DEBUG */ | | 715 | #endif /* PMAP_DEBUG */ |
716 | | | 716 | |
717 | /* | | 717 | /* |
718 | * A bunch of routines to conditionally flush the caches/TLB depending | | 718 | * A bunch of routines to conditionally flush the caches/TLB depending |
719 | * on whether the specified pmap actually needs to be flushed at any | | 719 | * on whether the specified pmap actually needs to be flushed at any |
720 | * given time. | | 720 | * given time. |
721 | */ | | 721 | */ |
722 | static inline void | | 722 | static inline void |
723 | pmap_tlb_flushID_SE(pmap_t pm, vaddr_t va) | | 723 | pmap_tlb_flushID_SE(pmap_t pm, vaddr_t va) |
724 | { | | 724 | { |
725 | | | 725 | |
726 | if (pm->pm_cstate.cs_tlb_id) | | 726 | if (pm->pm_cstate.cs_tlb_id) |
727 | cpu_tlb_flushID_SE(va); | | 727 | cpu_tlb_flushID_SE(va); |
728 | } | | 728 | } |
729 | | | 729 | |
730 | static inline void | | 730 | static inline void |
731 | pmap_tlb_flushD_SE(pmap_t pm, vaddr_t va) | | 731 | pmap_tlb_flushD_SE(pmap_t pm, vaddr_t va) |
732 | { | | 732 | { |
733 | | | 733 | |
734 | if (pm->pm_cstate.cs_tlb_d) | | 734 | if (pm->pm_cstate.cs_tlb_d) |
735 | cpu_tlb_flushD_SE(va); | | 735 | cpu_tlb_flushD_SE(va); |
736 | } | | 736 | } |
737 | | | 737 | |
738 | static inline void | | 738 | static inline void |
739 | pmap_tlb_flushID(pmap_t pm) | | 739 | pmap_tlb_flushID(pmap_t pm) |
740 | { | | 740 | { |
741 | | | 741 | |
742 | if (pm->pm_cstate.cs_tlb_id) { | | 742 | if (pm->pm_cstate.cs_tlb_id) { |
743 | cpu_tlb_flushID(); | | 743 | cpu_tlb_flushID(); |
744 | pm->pm_cstate.cs_tlb = 0; | | 744 | pm->pm_cstate.cs_tlb = 0; |
745 | } | | 745 | } |
746 | } | | 746 | } |
747 | | | 747 | |
748 | static inline void | | 748 | static inline void |
749 | pmap_tlb_flushD(pmap_t pm) | | 749 | pmap_tlb_flushD(pmap_t pm) |
750 | { | | 750 | { |
751 | | | 751 | |
752 | if (pm->pm_cstate.cs_tlb_d) { | | 752 | if (pm->pm_cstate.cs_tlb_d) { |
753 | cpu_tlb_flushD(); | | 753 | cpu_tlb_flushD(); |
754 | pm->pm_cstate.cs_tlb_d = 0; | | 754 | pm->pm_cstate.cs_tlb_d = 0; |
755 | } | | 755 | } |
756 | } | | 756 | } |
757 | | | 757 | |
758 | #ifdef PMAP_CACHE_VIVT | | 758 | #ifdef PMAP_CACHE_VIVT |
759 | static inline void | | 759 | static inline void |
760 | pmap_idcache_wbinv_range(pmap_t pm, vaddr_t va, vsize_t len) | | 760 | pmap_idcache_wbinv_range(pmap_t pm, vaddr_t va, vsize_t len) |
761 | { | | 761 | { |
762 | if (pm->pm_cstate.cs_cache_id) { | | 762 | if (pm->pm_cstate.cs_cache_id) { |
763 | cpu_idcache_wbinv_range(va, len); | | 763 | cpu_idcache_wbinv_range(va, len); |
764 | } | | 764 | } |
765 | } | | 765 | } |
766 | | | 766 | |
767 | static inline void | | 767 | static inline void |
768 | pmap_dcache_wb_range(pmap_t pm, vaddr_t va, vsize_t len, | | 768 | pmap_dcache_wb_range(pmap_t pm, vaddr_t va, vsize_t len, |
769 | bool do_inv, bool rd_only) | | 769 | bool do_inv, bool rd_only) |
770 | { | | 770 | { |
771 | | | 771 | |
772 | if (pm->pm_cstate.cs_cache_d) { | | 772 | if (pm->pm_cstate.cs_cache_d) { |
773 | if (do_inv) { | | 773 | if (do_inv) { |
774 | if (rd_only) | | 774 | if (rd_only) |
775 | cpu_dcache_inv_range(va, len); | | 775 | cpu_dcache_inv_range(va, len); |
776 | else | | 776 | else |
777 | cpu_dcache_wbinv_range(va, len); | | 777 | cpu_dcache_wbinv_range(va, len); |
778 | } else | | 778 | } else |
779 | if (!rd_only) | | 779 | if (!rd_only) |
780 | cpu_dcache_wb_range(va, len); | | 780 | cpu_dcache_wb_range(va, len); |
781 | } | | 781 | } |
782 | } | | 782 | } |
783 | | | 783 | |
784 | static inline void | | 784 | static inline void |
785 | pmap_idcache_wbinv_all(pmap_t pm) | | 785 | pmap_idcache_wbinv_all(pmap_t pm) |
786 | { | | 786 | { |
787 | if (pm->pm_cstate.cs_cache_id) { | | 787 | if (pm->pm_cstate.cs_cache_id) { |
788 | cpu_idcache_wbinv_all(); | | 788 | cpu_idcache_wbinv_all(); |
789 | pm->pm_cstate.cs_cache = 0; | | 789 | pm->pm_cstate.cs_cache = 0; |
790 | } | | 790 | } |
791 | } | | 791 | } |
792 | | | 792 | |
793 | static inline void | | 793 | static inline void |
794 | pmap_dcache_wbinv_all(pmap_t pm) | | 794 | pmap_dcache_wbinv_all(pmap_t pm) |
795 | { | | 795 | { |
796 | if (pm->pm_cstate.cs_cache_d) { | | 796 | if (pm->pm_cstate.cs_cache_d) { |
797 | cpu_dcache_wbinv_all(); | | 797 | cpu_dcache_wbinv_all(); |
798 | pm->pm_cstate.cs_cache_d = 0; | | 798 | pm->pm_cstate.cs_cache_d = 0; |
799 | } | | 799 | } |
800 | } | | 800 | } |
801 | #endif /* PMAP_CACHE_VIVT */ | | 801 | #endif /* PMAP_CACHE_VIVT */ |
802 | | | 802 | |
803 | static inline bool | | 803 | static inline bool |
804 | pmap_is_current(pmap_t pm) | | 804 | pmap_is_current(pmap_t pm) |
805 | { | | 805 | { |
806 | | | 806 | |
807 | if (pm == pmap_kernel() || curproc->p_vmspace->vm_map.pmap == pm) | | 807 | if (pm == pmap_kernel() || curproc->p_vmspace->vm_map.pmap == pm) |
808 | return true; | | 808 | return true; |
809 | | | 809 | |
810 | return false; | | 810 | return false; |
811 | } | | 811 | } |
812 | | | 812 | |
813 | static inline bool | | 813 | static inline bool |
814 | pmap_is_cached(pmap_t pm) | | 814 | pmap_is_cached(pmap_t pm) |
815 | { | | 815 | { |
816 | | | 816 | |
817 | if (pm == pmap_kernel() || pmap_recent_user == NULL || | | 817 | if (pm == pmap_kernel() || pmap_recent_user == NULL || |
818 | pmap_recent_user == pm) | | 818 | pmap_recent_user == pm) |
819 | return (true); | | 819 | return (true); |
820 | | | 820 | |
821 | return false; | | 821 | return false; |
822 | } | | 822 | } |
823 | | | 823 | |
824 | /* | | 824 | /* |
825 | * PTE_SYNC_CURRENT: | | 825 | * PTE_SYNC_CURRENT: |
826 | * | | 826 | * |
827 | * Make sure the pte is written out to RAM. | | 827 | * Make sure the pte is written out to RAM. |
828 | * We need to do this for one of two cases: | | 828 | * We need to do this for one of two cases: |
829 | * - We're dealing with the kernel pmap | | 829 | * - We're dealing with the kernel pmap |
830 | * - There is no pmap active in the cache/tlb. | | 830 | * - There is no pmap active in the cache/tlb. |
831 | * - The specified pmap is 'active' in the cache/tlb. | | 831 | * - The specified pmap is 'active' in the cache/tlb. |
832 | */ | | 832 | */ |
833 | #ifdef PMAP_INCLUDE_PTE_SYNC | | 833 | #ifdef PMAP_INCLUDE_PTE_SYNC |
834 | #define PTE_SYNC_CURRENT(pm, ptep) \ | | 834 | #define PTE_SYNC_CURRENT(pm, ptep) \ |
835 | do { \ | | 835 | do { \ |
836 | if (PMAP_NEEDS_PTE_SYNC && \ | | 836 | if (PMAP_NEEDS_PTE_SYNC && \ |
837 | pmap_is_cached(pm)) \ | | 837 | pmap_is_cached(pm)) \ |
838 | PTE_SYNC(ptep); \ | | 838 | PTE_SYNC(ptep); \ |
839 | } while (/*CONSTCOND*/0) | | 839 | } while (/*CONSTCOND*/0) |
840 | #else | | 840 | #else |
841 | #define PTE_SYNC_CURRENT(pm, ptep) /* nothing */ | | 841 | #define PTE_SYNC_CURRENT(pm, ptep) /* nothing */ |
842 | #endif | | 842 | #endif |
843 | | | 843 | |
844 | /* | | 844 | /* |
845 | * main pv_entry manipulation functions: | | 845 | * main pv_entry manipulation functions: |
846 | * pmap_enter_pv: enter a mapping onto a vm_page list | | 846 | * pmap_enter_pv: enter a mapping onto a vm_page list |
847 | * pmap_remove_pv: remove a mappiing from a vm_page list | | 847 | * pmap_remove_pv: remove a mappiing from a vm_page list |
848 | * | | 848 | * |
849 | * NOTE: pmap_enter_pv expects to lock the pvh itself | | 849 | * NOTE: pmap_enter_pv expects to lock the pvh itself |
850 | * pmap_remove_pv expects te caller to lock the pvh before calling | | 850 | * pmap_remove_pv expects te caller to lock the pvh before calling |
851 | */ | | 851 | */ |
852 | | | 852 | |
853 | /* | | 853 | /* |
854 | * pmap_enter_pv: enter a mapping onto a vm_page lst | | 854 | * pmap_enter_pv: enter a mapping onto a vm_page lst |
855 | * | | 855 | * |
856 | * => caller should hold the proper lock on pmap_main_lock | | 856 | * => caller should hold the proper lock on pmap_main_lock |
857 | * => caller should have pmap locked | | 857 | * => caller should have pmap locked |
858 | * => we will gain the lock on the vm_page and allocate the new pv_entry | | 858 | * => we will gain the lock on the vm_page and allocate the new pv_entry |
859 | * => caller should adjust ptp's wire_count before calling | | 859 | * => caller should adjust ptp's wire_count before calling |
860 | * => caller should not adjust pmap's wire_count | | 860 | * => caller should not adjust pmap's wire_count |
861 | */ | | 861 | */ |
862 | static void | | 862 | static void |
863 | pmap_enter_pv(struct vm_page_md *md, paddr_t pa, struct pv_entry *pv, pmap_t pm, | | 863 | pmap_enter_pv(struct vm_page_md *md, paddr_t pa, struct pv_entry *pv, pmap_t pm, |
864 | vaddr_t va, u_int flags) | | 864 | vaddr_t va, u_int flags) |
865 | { | | 865 | { |
866 | struct pv_entry **pvp; | | 866 | struct pv_entry **pvp; |
867 | | | 867 | |
868 | NPDEBUG(PDB_PVDUMP, | | 868 | NPDEBUG(PDB_PVDUMP, |
869 | printf("pmap_enter_pv: pm %p, md %p, flags 0x%x\n", pm, md, flags)); | | 869 | printf("pmap_enter_pv: pm %p, md %p, flags 0x%x\n", pm, md, flags)); |
870 | | | 870 | |
871 | pv->pv_pmap = pm; | | 871 | pv->pv_pmap = pm; |
872 | pv->pv_va = va; | | 872 | pv->pv_va = va; |
873 | pv->pv_flags = flags; | | 873 | pv->pv_flags = flags; |
874 | | | 874 | |
875 | simple_lock(&md->pvh_slock); /* lock vm_page */ | | 875 | simple_lock(&md->pvh_slock); /* lock vm_page */ |
876 | pvp = &SLIST_FIRST(&md->pvh_list); | | 876 | pvp = &SLIST_FIRST(&md->pvh_list); |
877 | #ifdef PMAP_CACHE_VIPT | | 877 | #ifdef PMAP_CACHE_VIPT |
878 | /* | | 878 | /* |
879 | * Insert unmanaged entries, writeable first, at the head of | | 879 | * Insert unmanaged entries, writeable first, at the head of |
880 | * the pv list. | | 880 | * the pv list. |
881 | */ | | 881 | */ |
882 | if (__predict_true((flags & PVF_KENTRY) == 0)) { | | 882 | if (__predict_true((flags & PVF_KENTRY) == 0)) { |
883 | while (*pvp != NULL && (*pvp)->pv_flags & PVF_KENTRY) | | 883 | while (*pvp != NULL && (*pvp)->pv_flags & PVF_KENTRY) |
884 | pvp = &SLIST_NEXT(*pvp, pv_link); | | 884 | pvp = &SLIST_NEXT(*pvp, pv_link); |
885 | } else if ((flags & PVF_WRITE) == 0) { | | 885 | } else if ((flags & PVF_WRITE) == 0) { |
886 | while (*pvp != NULL && (*pvp)->pv_flags & PVF_WRITE) | | 886 | while (*pvp != NULL && (*pvp)->pv_flags & PVF_WRITE) |
887 | pvp = &SLIST_NEXT(*pvp, pv_link); | | 887 | pvp = &SLIST_NEXT(*pvp, pv_link); |
888 | } | | 888 | } |
889 | #endif | | 889 | #endif |
890 | SLIST_NEXT(pv, pv_link) = *pvp; /* add to ... */ | | 890 | SLIST_NEXT(pv, pv_link) = *pvp; /* add to ... */ |
891 | *pvp = pv; /* ... locked list */ | | 891 | *pvp = pv; /* ... locked list */ |
892 | md->pvh_attrs |= flags & (PVF_REF | PVF_MOD); | | 892 | md->pvh_attrs |= flags & (PVF_REF | PVF_MOD); |
893 | #ifdef PMAP_CACHE_VIPT | | 893 | #ifdef PMAP_CACHE_VIPT |
894 | if ((pv->pv_flags & PVF_KWRITE) == PVF_KWRITE) | | 894 | if ((pv->pv_flags & PVF_KWRITE) == PVF_KWRITE) |
895 | md->pvh_attrs |= PVF_KMOD; | | 895 | md->pvh_attrs |= PVF_KMOD; |
896 | if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC) | | 896 | if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC) |
897 | md->pvh_attrs |= PVF_DIRTY; | | 897 | md->pvh_attrs |= PVF_DIRTY; |
898 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 898 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
899 | #endif | | 899 | #endif |
900 | if (pm == pmap_kernel()) { | | 900 | if (pm == pmap_kernel()) { |
901 | PMAPCOUNT(kernel_mappings); | | 901 | PMAPCOUNT(kernel_mappings); |
902 | if (flags & PVF_WRITE) | | 902 | if (flags & PVF_WRITE) |
903 | md->krw_mappings++; | | 903 | md->krw_mappings++; |
904 | else | | 904 | else |
905 | md->kro_mappings++; | | 905 | md->kro_mappings++; |
906 | } else { | | 906 | } else { |
907 | if (flags & PVF_WRITE) | | 907 | if (flags & PVF_WRITE) |
908 | md->urw_mappings++; | | 908 | md->urw_mappings++; |
909 | else | | 909 | else |
910 | md->uro_mappings++; | | 910 | md->uro_mappings++; |
911 | } | | 911 | } |
912 | | | 912 | |
913 | #ifdef PMAP_CACHE_VIPT | | 913 | #ifdef PMAP_CACHE_VIPT |
914 | /* | | 914 | /* |
915 | * If this is an exec mapping and its the first exec mapping | | 915 | * If this is an exec mapping and its the first exec mapping |
916 | * for this page, make sure to sync the I-cache. | | 916 | * for this page, make sure to sync the I-cache. |
917 | */ | | 917 | */ |
918 | if (PV_IS_EXEC_P(flags)) { | | 918 | if (PV_IS_EXEC_P(flags)) { |
919 | if (!PV_IS_EXEC_P(md->pvh_attrs)) { | | 919 | if (!PV_IS_EXEC_P(md->pvh_attrs)) { |
920 | pmap_syncicache_page(md, pa); | | 920 | pmap_syncicache_page(md, pa); |
921 | PMAPCOUNT(exec_synced_map); | | 921 | PMAPCOUNT(exec_synced_map); |
922 | } | | 922 | } |
923 | PMAPCOUNT(exec_mappings); | | 923 | PMAPCOUNT(exec_mappings); |
924 | } | | 924 | } |
925 | #endif | | 925 | #endif |
926 | | | 926 | |
927 | PMAPCOUNT(mappings); | | 927 | PMAPCOUNT(mappings); |
928 | simple_unlock(&md->pvh_slock); /* unlock, done! */ | | 928 | simple_unlock(&md->pvh_slock); /* unlock, done! */ |
929 | | | 929 | |
930 | if (pv->pv_flags & PVF_WIRED) | | 930 | if (pv->pv_flags & PVF_WIRED) |
931 | ++pm->pm_stats.wired_count; | | 931 | ++pm->pm_stats.wired_count; |
932 | } | | 932 | } |
933 | | | 933 | |
934 | /* | | 934 | /* |
935 | * | | 935 | * |
936 | * pmap_find_pv: Find a pv entry | | 936 | * pmap_find_pv: Find a pv entry |
937 | * | | 937 | * |
938 | * => caller should hold lock on vm_page | | 938 | * => caller should hold lock on vm_page |
939 | */ | | 939 | */ |
940 | static inline struct pv_entry * | | 940 | static inline struct pv_entry * |
941 | pmap_find_pv(struct vm_page_md *md, pmap_t pm, vaddr_t va) | | 941 | pmap_find_pv(struct vm_page_md *md, pmap_t pm, vaddr_t va) |
942 | { | | 942 | { |
943 | struct pv_entry *pv; | | 943 | struct pv_entry *pv; |
944 | | | 944 | |
945 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { | | 945 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { |
946 | if (pm == pv->pv_pmap && va == pv->pv_va) | | 946 | if (pm == pv->pv_pmap && va == pv->pv_va) |
947 | break; | | 947 | break; |
948 | } | | 948 | } |
949 | | | 949 | |
950 | return (pv); | | 950 | return (pv); |
951 | } | | 951 | } |
952 | | | 952 | |
953 | /* | | 953 | /* |
954 | * pmap_remove_pv: try to remove a mapping from a pv_list | | 954 | * pmap_remove_pv: try to remove a mapping from a pv_list |
955 | * | | 955 | * |
956 | * => caller should hold proper lock on pmap_main_lock | | 956 | * => caller should hold proper lock on pmap_main_lock |
957 | * => pmap should be locked | | 957 | * => pmap should be locked |
958 | * => caller should hold lock on vm_page [so that attrs can be adjusted] | | 958 | * => caller should hold lock on vm_page [so that attrs can be adjusted] |
959 | * => caller should adjust ptp's wire_count and free PTP if needed | | 959 | * => caller should adjust ptp's wire_count and free PTP if needed |
960 | * => caller should NOT adjust pmap's wire_count | | 960 | * => caller should NOT adjust pmap's wire_count |
961 | * => we return the removed pv | | 961 | * => we return the removed pv |
962 | */ | | 962 | */ |
963 | static struct pv_entry * | | 963 | static struct pv_entry * |
964 | pmap_remove_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) | | 964 | pmap_remove_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) |
965 | { | | 965 | { |
966 | struct pv_entry *pv, **prevptr; | | 966 | struct pv_entry *pv, **prevptr; |
967 | | | 967 | |
968 | NPDEBUG(PDB_PVDUMP, | | 968 | NPDEBUG(PDB_PVDUMP, |
969 | printf("pmap_remove_pv: pm %p, md %p, va 0x%08lx\n", pm, md, va)); | | 969 | printf("pmap_remove_pv: pm %p, md %p, va 0x%08lx\n", pm, md, va)); |
970 | | | 970 | |
971 | prevptr = &SLIST_FIRST(&md->pvh_list); /* prev pv_entry ptr */ | | 971 | prevptr = &SLIST_FIRST(&md->pvh_list); /* prev pv_entry ptr */ |
972 | pv = *prevptr; | | 972 | pv = *prevptr; |
973 | | | 973 | |
974 | while (pv) { | | 974 | while (pv) { |
975 | if (pv->pv_pmap == pm && pv->pv_va == va) { /* match? */ | | 975 | if (pv->pv_pmap == pm && pv->pv_va == va) { /* match? */ |
976 | NPDEBUG(PDB_PVDUMP, printf("pmap_remove_pv: pm %p, md " | | 976 | NPDEBUG(PDB_PVDUMP, printf("pmap_remove_pv: pm %p, md " |
977 | "%p\n", pm, md)); | | 977 | "%p, flags 0x%x\n", pm, md, pv->pv_flags)); |
978 | if (pv->pv_flags & PVF_WIRED) { | | 978 | if (pv->pv_flags & PVF_WIRED) { |
979 | --pm->pm_stats.wired_count; | | 979 | --pm->pm_stats.wired_count; |
980 | } | | 980 | } |
981 | *prevptr = SLIST_NEXT(pv, pv_link); /* remove it! */ | | 981 | *prevptr = SLIST_NEXT(pv, pv_link); /* remove it! */ |
982 | if (pm == pmap_kernel()) { | | 982 | if (pm == pmap_kernel()) { |
983 | PMAPCOUNT(kernel_unmappings); | | 983 | PMAPCOUNT(kernel_unmappings); |
984 | if (pv->pv_flags & PVF_WRITE) | | 984 | if (pv->pv_flags & PVF_WRITE) |
985 | md->krw_mappings--; | | 985 | md->krw_mappings--; |
986 | else | | 986 | else |
987 | md->kro_mappings--; | | 987 | md->kro_mappings--; |
988 | } else { | | 988 | } else { |
989 | if (pv->pv_flags & PVF_WRITE) | | 989 | if (pv->pv_flags & PVF_WRITE) |
990 | md->urw_mappings--; | | 990 | md->urw_mappings--; |
991 | else | | 991 | else |
992 | md->uro_mappings--; | | 992 | md->uro_mappings--; |
993 | } | | 993 | } |
994 | | | 994 | |
995 | PMAPCOUNT(unmappings); | | 995 | PMAPCOUNT(unmappings); |
996 | #ifdef PMAP_CACHE_VIPT | | 996 | #ifdef PMAP_CACHE_VIPT |
997 | if (!(pv->pv_flags & PVF_WRITE)) | | 997 | if (!(pv->pv_flags & PVF_WRITE)) |
998 | break; | | 998 | break; |
999 | /* | | 999 | /* |
1000 | * If this page has had an exec mapping, then if | | 1000 | * If this page has had an exec mapping, then if |
1001 | * this was the last mapping, discard the contents, | | 1001 | * this was the last mapping, discard the contents, |
1002 | * otherwise sync the i-cache for this page. | | 1002 | * otherwise sync the i-cache for this page. |
1003 | */ | | 1003 | */ |
1004 | if (PV_IS_EXEC_P(md->pvh_attrs)) { | | 1004 | if (PV_IS_EXEC_P(md->pvh_attrs)) { |
1005 | if (SLIST_EMPTY(&md->pvh_list)) { | | 1005 | if (SLIST_EMPTY(&md->pvh_list)) { |
1006 | md->pvh_attrs &= ~PVF_EXEC; | | 1006 | md->pvh_attrs &= ~PVF_EXEC; |
1007 | PMAPCOUNT(exec_discarded_unmap); | | 1007 | PMAPCOUNT(exec_discarded_unmap); |
1008 | } else { | | 1008 | } else { |
1009 | pmap_syncicache_page(md, pa); | | 1009 | pmap_syncicache_page(md, pa); |
1010 | PMAPCOUNT(exec_synced_unmap); | | 1010 | PMAPCOUNT(exec_synced_unmap); |
1011 | } | | 1011 | } |
1012 | } | | 1012 | } |
1013 | #endif /* PMAP_CACHE_VIPT */ | | 1013 | #endif /* PMAP_CACHE_VIPT */ |
1014 | break; | | 1014 | break; |
1015 | } | | 1015 | } |
1016 | prevptr = &SLIST_NEXT(pv, pv_link); /* previous pointer */ | | 1016 | prevptr = &SLIST_NEXT(pv, pv_link); /* previous pointer */ |
1017 | pv = *prevptr; /* advance */ | | 1017 | pv = *prevptr; /* advance */ |
1018 | } | | 1018 | } |
1019 | | | 1019 | |
1020 | #ifdef PMAP_CACHE_VIPT | | 1020 | #ifdef PMAP_CACHE_VIPT |
1021 | /* | | 1021 | /* |
1022 | * If we no longer have a WRITEABLE KENTRY at the head of list, | | 1022 | * If we no longer have a WRITEABLE KENTRY at the head of list, |
1023 | * clear the KMOD attribute from the page. | | 1023 | * clear the KMOD attribute from the page. |
1024 | */ | | 1024 | */ |
1025 | if (SLIST_FIRST(&md->pvh_list) == NULL | | 1025 | if (SLIST_FIRST(&md->pvh_list) == NULL |
1026 | || (SLIST_FIRST(&md->pvh_list)->pv_flags & PVF_KWRITE) != PVF_KWRITE) | | 1026 | || (SLIST_FIRST(&md->pvh_list)->pv_flags & PVF_KWRITE) != PVF_KWRITE) |
1027 | md->pvh_attrs &= ~PVF_KMOD; | | 1027 | md->pvh_attrs &= ~PVF_KMOD; |
1028 | | | 1028 | |
1029 | /* | | 1029 | /* |
1030 | * If this was a writeable page and there are no more writeable | | 1030 | * If this was a writeable page and there are no more writeable |
1031 | * mappings (ignoring KMPAGE), clear the WRITE flag and writeback | | 1031 | * mappings (ignoring KMPAGE), clear the WRITE flag and writeback |
1032 | * the contents to memory. | | 1032 | * the contents to memory. |
1033 | */ | | 1033 | */ |
1034 | if (md->krw_mappings + md->urw_mappings == 0) | | 1034 | if (md->krw_mappings + md->urw_mappings == 0) |
1035 | md->pvh_attrs &= ~PVF_WRITE; | | 1035 | md->pvh_attrs &= ~PVF_WRITE; |
1036 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 1036 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
1037 | #endif /* PMAP_CACHE_VIPT */ | | 1037 | #endif /* PMAP_CACHE_VIPT */ |
1038 | | | 1038 | |
1039 | return(pv); /* return removed pv */ | | 1039 | return(pv); /* return removed pv */ |
1040 | } | | 1040 | } |
1041 | | | 1041 | |
1042 | /* | | 1042 | /* |
1043 | * | | 1043 | * |
1044 | * pmap_modify_pv: Update pv flags | | 1044 | * pmap_modify_pv: Update pv flags |
1045 | * | | 1045 | * |
1046 | * => caller should hold lock on vm_page [so that attrs can be adjusted] | | 1046 | * => caller should hold lock on vm_page [so that attrs can be adjusted] |
1047 | * => caller should NOT adjust pmap's wire_count | | 1047 | * => caller should NOT adjust pmap's wire_count |
1048 | * => caller must call pmap_vac_me_harder() if writable status of a page | | 1048 | * => caller must call pmap_vac_me_harder() if writable status of a page |
1049 | * may have changed. | | 1049 | * may have changed. |
1050 | * => we return the old flags | | 1050 | * => we return the old flags |
1051 | * | | 1051 | * |
1052 | * Modify a physical-virtual mapping in the pv table | | 1052 | * Modify a physical-virtual mapping in the pv table |
1053 | */ | | 1053 | */ |
1054 | static u_int | | 1054 | static u_int |
1055 | pmap_modify_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va, | | 1055 | pmap_modify_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va, |
1056 | u_int clr_mask, u_int set_mask) | | 1056 | u_int clr_mask, u_int set_mask) |
1057 | { | | 1057 | { |
1058 | struct pv_entry *npv; | | 1058 | struct pv_entry *npv; |
1059 | u_int flags, oflags; | | 1059 | u_int flags, oflags; |
1060 | | | 1060 | |
1061 | KASSERT((clr_mask & PVF_KENTRY) == 0); | | 1061 | KASSERT((clr_mask & PVF_KENTRY) == 0); |
1062 | KASSERT((set_mask & PVF_KENTRY) == 0); | | 1062 | KASSERT((set_mask & PVF_KENTRY) == 0); |
1063 | | | 1063 | |
1064 | if ((npv = pmap_find_pv(md, pm, va)) == NULL) | | 1064 | if ((npv = pmap_find_pv(md, pm, va)) == NULL) |
1065 | return (0); | | 1065 | return (0); |
1066 | | | 1066 | |
1067 | NPDEBUG(PDB_PVDUMP, | | 1067 | NPDEBUG(PDB_PVDUMP, |
1068 | printf("pmap_modify_pv: pm %p, md %p, clr 0x%x, set 0x%x, flags 0x%x\n", pm, md, clr_mask, set_mask, npv->pv_flags)); | | 1068 | printf("pmap_modify_pv: pm %p, md %p, clr 0x%x, set 0x%x, flags 0x%x\n", pm, md, clr_mask, set_mask, npv->pv_flags)); |
1069 | | | 1069 | |
1070 | /* | | 1070 | /* |
1071 | * There is at least one VA mapping this page. | | 1071 | * There is at least one VA mapping this page. |
1072 | */ | | 1072 | */ |
1073 | | | 1073 | |
1074 | if (clr_mask & (PVF_REF | PVF_MOD)) { | | 1074 | if (clr_mask & (PVF_REF | PVF_MOD)) { |
1075 | md->pvh_attrs |= set_mask & (PVF_REF | PVF_MOD); | | 1075 | md->pvh_attrs |= set_mask & (PVF_REF | PVF_MOD); |
1076 | #ifdef PMAP_CACHE_VIPT | | 1076 | #ifdef PMAP_CACHE_VIPT |
1077 | if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC) | | 1077 | if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC) |
1078 | md->pvh_attrs |= PVF_DIRTY; | | 1078 | md->pvh_attrs |= PVF_DIRTY; |
1079 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 1079 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
1080 | #endif | | 1080 | #endif |
1081 | } | | 1081 | } |
1082 | | | 1082 | |
1083 | oflags = npv->pv_flags; | | 1083 | oflags = npv->pv_flags; |
1084 | npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask; | | 1084 | npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask; |
1085 | | | 1085 | |
1086 | if ((flags ^ oflags) & PVF_WIRED) { | | 1086 | if ((flags ^ oflags) & PVF_WIRED) { |
1087 | if (flags & PVF_WIRED) | | 1087 | if (flags & PVF_WIRED) |
1088 | ++pm->pm_stats.wired_count; | | 1088 | ++pm->pm_stats.wired_count; |
1089 | else | | 1089 | else |
1090 | --pm->pm_stats.wired_count; | | 1090 | --pm->pm_stats.wired_count; |
1091 | } | | 1091 | } |
1092 | | | 1092 | |
1093 | if ((flags ^ oflags) & PVF_WRITE) { | | 1093 | if ((flags ^ oflags) & PVF_WRITE) { |
1094 | if (pm == pmap_kernel()) { | | 1094 | if (pm == pmap_kernel()) { |
1095 | if (flags & PVF_WRITE) { | | 1095 | if (flags & PVF_WRITE) { |
1096 | md->krw_mappings++; | | 1096 | md->krw_mappings++; |
1097 | md->kro_mappings--; | | 1097 | md->kro_mappings--; |
1098 | } else { | | 1098 | } else { |
1099 | md->kro_mappings++; | | 1099 | md->kro_mappings++; |
1100 | md->krw_mappings--; | | 1100 | md->krw_mappings--; |
1101 | } | | 1101 | } |
1102 | } else { | | 1102 | } else { |
1103 | if (flags & PVF_WRITE) { | | 1103 | if (flags & PVF_WRITE) { |
1104 | md->urw_mappings++; | | 1104 | md->urw_mappings++; |
1105 | md->uro_mappings--; | | 1105 | md->uro_mappings--; |
1106 | } else { | | 1106 | } else { |
1107 | md->uro_mappings++; | | 1107 | md->uro_mappings++; |
1108 | md->urw_mappings--; | | 1108 | md->urw_mappings--; |
1109 | } | | 1109 | } |
1110 | } | | 1110 | } |
1111 | } | | 1111 | } |
1112 | #ifdef PMAP_CACHE_VIPT | | 1112 | #ifdef PMAP_CACHE_VIPT |
1113 | if (md->urw_mappings + md->krw_mappings == 0) | | 1113 | if (md->urw_mappings + md->krw_mappings == 0) |
1114 | md->pvh_attrs &= ~PVF_WRITE; | | 1114 | md->pvh_attrs &= ~PVF_WRITE; |
1115 | /* | | 1115 | /* |
1116 | * We have two cases here: the first is from enter_pv (new exec | | 1116 | * We have two cases here: the first is from enter_pv (new exec |
1117 | * page), the second is a combined pmap_remove_pv/pmap_enter_pv. | | 1117 | * page), the second is a combined pmap_remove_pv/pmap_enter_pv. |
1118 | * Since in latter, pmap_enter_pv won't do anything, we just have | | 1118 | * Since in latter, pmap_enter_pv won't do anything, we just have |
1119 | * to do what pmap_remove_pv would do. | | 1119 | * to do what pmap_remove_pv would do. |
1120 | */ | | 1120 | */ |
1121 | if ((PV_IS_EXEC_P(flags) && !PV_IS_EXEC_P(md->pvh_attrs)) | | 1121 | if ((PV_IS_EXEC_P(flags) && !PV_IS_EXEC_P(md->pvh_attrs)) |
1122 | || (PV_IS_EXEC_P(md->pvh_attrs) | | 1122 | || (PV_IS_EXEC_P(md->pvh_attrs) |
1123 | || (!(flags & PVF_WRITE) && (oflags & PVF_WRITE)))) { | | 1123 | || (!(flags & PVF_WRITE) && (oflags & PVF_WRITE)))) { |
1124 | pmap_syncicache_page(md, pa); | | 1124 | pmap_syncicache_page(md, pa); |
1125 | PMAPCOUNT(exec_synced_remap); | | 1125 | PMAPCOUNT(exec_synced_remap); |
1126 | } | | 1126 | } |
1127 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 1127 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
1128 | #endif | | 1128 | #endif |
1129 | | | 1129 | |
1130 | PMAPCOUNT(remappings); | | 1130 | PMAPCOUNT(remappings); |
1131 | | | 1131 | |
1132 | return (oflags); | | 1132 | return (oflags); |
1133 | } | | 1133 | } |
1134 | | | 1134 | |
1135 | /* | | 1135 | /* |
1136 | * Allocate an L1 translation table for the specified pmap. | | 1136 | * Allocate an L1 translation table for the specified pmap. |
1137 | * This is called at pmap creation time. | | 1137 | * This is called at pmap creation time. |
1138 | */ | | 1138 | */ |
1139 | static void | | 1139 | static void |
1140 | pmap_alloc_l1(pmap_t pm) | | 1140 | pmap_alloc_l1(pmap_t pm) |
1141 | { | | 1141 | { |
1142 | struct l1_ttable *l1; | | 1142 | struct l1_ttable *l1; |
1143 | u_int8_t domain; | | 1143 | u_int8_t domain; |
1144 | | | 1144 | |
1145 | /* | | 1145 | /* |
1146 | * Remove the L1 at the head of the LRU list | | 1146 | * Remove the L1 at the head of the LRU list |
1147 | */ | | 1147 | */ |
1148 | simple_lock(&l1_lru_lock); | | 1148 | simple_lock(&l1_lru_lock); |
1149 | l1 = TAILQ_FIRST(&l1_lru_list); | | 1149 | l1 = TAILQ_FIRST(&l1_lru_list); |
1150 | KDASSERT(l1 != NULL); | | 1150 | KDASSERT(l1 != NULL); |
1151 | TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); | | 1151 | TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); |
1152 | | | 1152 | |
1153 | /* | | 1153 | /* |
1154 | * Pick the first available domain number, and update | | 1154 | * Pick the first available domain number, and update |
1155 | * the link to the next number. | | 1155 | * the link to the next number. |
1156 | */ | | 1156 | */ |
1157 | domain = l1->l1_domain_first; | | 1157 | domain = l1->l1_domain_first; |
1158 | l1->l1_domain_first = l1->l1_domain_free[domain]; | | 1158 | l1->l1_domain_first = l1->l1_domain_free[domain]; |
1159 | | | 1159 | |
1160 | /* | | 1160 | /* |
1161 | * If there are still free domain numbers in this L1, | | 1161 | * If there are still free domain numbers in this L1, |
1162 | * put it back on the TAIL of the LRU list. | | 1162 | * put it back on the TAIL of the LRU list. |
1163 | */ | | 1163 | */ |
1164 | if (++l1->l1_domain_use_count < PMAP_DOMAINS) | | 1164 | if (++l1->l1_domain_use_count < PMAP_DOMAINS) |
1165 | TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); | | 1165 | TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); |
1166 | | | 1166 | |
1167 | simple_unlock(&l1_lru_lock); | | 1167 | simple_unlock(&l1_lru_lock); |
1168 | | | 1168 | |
1169 | /* | | 1169 | /* |
1170 | * Fix up the relevant bits in the pmap structure | | 1170 | * Fix up the relevant bits in the pmap structure |
1171 | */ | | 1171 | */ |
1172 | pm->pm_l1 = l1; | | 1172 | pm->pm_l1 = l1; |
1173 | pm->pm_domain = domain; | | 1173 | pm->pm_domain = domain; |
1174 | } | | 1174 | } |
1175 | | | 1175 | |
1176 | /* | | 1176 | /* |
1177 | * Free an L1 translation table. | | 1177 | * Free an L1 translation table. |
1178 | * This is called at pmap destruction time. | | 1178 | * This is called at pmap destruction time. |
1179 | */ | | 1179 | */ |
1180 | static void | | 1180 | static void |
1181 | pmap_free_l1(pmap_t pm) | | 1181 | pmap_free_l1(pmap_t pm) |
1182 | { | | 1182 | { |
1183 | struct l1_ttable *l1 = pm->pm_l1; | | 1183 | struct l1_ttable *l1 = pm->pm_l1; |
1184 | | | 1184 | |
1185 | simple_lock(&l1_lru_lock); | | 1185 | simple_lock(&l1_lru_lock); |
1186 | | | 1186 | |
1187 | /* | | 1187 | /* |
1188 | * If this L1 is currently on the LRU list, remove it. | | 1188 | * If this L1 is currently on the LRU list, remove it. |
1189 | */ | | 1189 | */ |
1190 | if (l1->l1_domain_use_count < PMAP_DOMAINS) | | 1190 | if (l1->l1_domain_use_count < PMAP_DOMAINS) |
1191 | TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); | | 1191 | TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); |
1192 | | | 1192 | |
1193 | /* | | 1193 | /* |
1194 | * Free up the domain number which was allocated to the pmap | | 1194 | * Free up the domain number which was allocated to the pmap |
1195 | */ | | 1195 | */ |
1196 | l1->l1_domain_free[pm->pm_domain] = l1->l1_domain_first; | | 1196 | l1->l1_domain_free[pm->pm_domain] = l1->l1_domain_first; |
1197 | l1->l1_domain_first = pm->pm_domain; | | 1197 | l1->l1_domain_first = pm->pm_domain; |
1198 | l1->l1_domain_use_count--; | | 1198 | l1->l1_domain_use_count--; |
1199 | | | 1199 | |
1200 | /* | | 1200 | /* |
1201 | * The L1 now must have at least 1 free domain, so add | | 1201 | * The L1 now must have at least 1 free domain, so add |
1202 | * it back to the LRU list. If the use count is zero, | | 1202 | * it back to the LRU list. If the use count is zero, |
1203 | * put it at the head of the list, otherwise it goes | | 1203 | * put it at the head of the list, otherwise it goes |
1204 | * to the tail. | | 1204 | * to the tail. |
1205 | */ | | 1205 | */ |
1206 | if (l1->l1_domain_use_count == 0) | | 1206 | if (l1->l1_domain_use_count == 0) |
1207 | TAILQ_INSERT_HEAD(&l1_lru_list, l1, l1_lru); | | 1207 | TAILQ_INSERT_HEAD(&l1_lru_list, l1, l1_lru); |
1208 | else | | 1208 | else |
1209 | TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); | | 1209 | TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); |
1210 | | | 1210 | |
1211 | simple_unlock(&l1_lru_lock); | | 1211 | simple_unlock(&l1_lru_lock); |
1212 | } | | 1212 | } |
1213 | | | 1213 | |
1214 | static inline void | | 1214 | static inline void |
1215 | pmap_use_l1(pmap_t pm) | | 1215 | pmap_use_l1(pmap_t pm) |
1216 | { | | 1216 | { |
1217 | struct l1_ttable *l1; | | 1217 | struct l1_ttable *l1; |
1218 | | | 1218 | |
1219 | /* | | 1219 | /* |
1220 | * Do nothing if we're in interrupt context. | | 1220 | * Do nothing if we're in interrupt context. |
1221 | * Access to an L1 by the kernel pmap must not affect | | 1221 | * Access to an L1 by the kernel pmap must not affect |
1222 | * the LRU list. | | 1222 | * the LRU list. |
1223 | */ | | 1223 | */ |
1224 | if (cpu_intr_p() || pm == pmap_kernel()) | | 1224 | if (cpu_intr_p() || pm == pmap_kernel()) |
1225 | return; | | 1225 | return; |
1226 | | | 1226 | |
1227 | l1 = pm->pm_l1; | | 1227 | l1 = pm->pm_l1; |
1228 | | | 1228 | |
1229 | /* | | 1229 | /* |
1230 | * If the L1 is not currently on the LRU list, just return | | 1230 | * If the L1 is not currently on the LRU list, just return |
1231 | */ | | 1231 | */ |
1232 | if (l1->l1_domain_use_count == PMAP_DOMAINS) | | 1232 | if (l1->l1_domain_use_count == PMAP_DOMAINS) |
1233 | return; | | 1233 | return; |
1234 | | | 1234 | |
1235 | simple_lock(&l1_lru_lock); | | 1235 | simple_lock(&l1_lru_lock); |
1236 | | | 1236 | |
1237 | /* | | 1237 | /* |
1238 | * Check the use count again, now that we've acquired the lock | | 1238 | * Check the use count again, now that we've acquired the lock |
1239 | */ | | 1239 | */ |
1240 | if (l1->l1_domain_use_count == PMAP_DOMAINS) { | | 1240 | if (l1->l1_domain_use_count == PMAP_DOMAINS) { |
1241 | simple_unlock(&l1_lru_lock); | | 1241 | simple_unlock(&l1_lru_lock); |
1242 | return; | | 1242 | return; |
1243 | } | | 1243 | } |
1244 | | | 1244 | |
1245 | /* | | 1245 | /* |
1246 | * Move the L1 to the back of the LRU list | | 1246 | * Move the L1 to the back of the LRU list |
1247 | */ | | 1247 | */ |
1248 | TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); | | 1248 | TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); |
1249 | TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); | | 1249 | TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); |
1250 | | | 1250 | |
1251 | simple_unlock(&l1_lru_lock); | | 1251 | simple_unlock(&l1_lru_lock); |
1252 | } | | 1252 | } |
1253 | | | 1253 | |
1254 | /* | | 1254 | /* |
1255 | * void pmap_free_l2_ptp(pt_entry_t *, paddr_t *) | | 1255 | * void pmap_free_l2_ptp(pt_entry_t *, paddr_t *) |
1256 | * | | 1256 | * |
1257 | * Free an L2 descriptor table. | | 1257 | * Free an L2 descriptor table. |
1258 | */ | | 1258 | */ |
1259 | static inline void | | 1259 | static inline void |
1260 | #ifndef PMAP_INCLUDE_PTE_SYNC | | 1260 | #ifndef PMAP_INCLUDE_PTE_SYNC |
1261 | pmap_free_l2_ptp(pt_entry_t *l2, paddr_t pa) | | 1261 | pmap_free_l2_ptp(pt_entry_t *l2, paddr_t pa) |
1262 | #else | | 1262 | #else |
1263 | pmap_free_l2_ptp(bool need_sync, pt_entry_t *l2, paddr_t pa) | | 1263 | pmap_free_l2_ptp(bool need_sync, pt_entry_t *l2, paddr_t pa) |
1264 | #endif | | 1264 | #endif |
1265 | { | | 1265 | { |
1266 | #ifdef PMAP_INCLUDE_PTE_SYNC | | 1266 | #ifdef PMAP_INCLUDE_PTE_SYNC |
1267 | #ifdef PMAP_CACHE_VIVT | | 1267 | #ifdef PMAP_CACHE_VIVT |
1268 | /* | | 1268 | /* |
1269 | * Note: With a write-back cache, we may need to sync this | | 1269 | * Note: With a write-back cache, we may need to sync this |
1270 | * L2 table before re-using it. | | 1270 | * L2 table before re-using it. |
1271 | * This is because it may have belonged to a non-current | | 1271 | * This is because it may have belonged to a non-current |
1272 | * pmap, in which case the cache syncs would have been | | 1272 | * pmap, in which case the cache syncs would have been |
1273 | * skipped for the pages that were being unmapped. If the | | 1273 | * skipped for the pages that were being unmapped. If the |
1274 | * L2 table were then to be immediately re-allocated to | | 1274 | * L2 table were then to be immediately re-allocated to |
1275 | * the *current* pmap, it may well contain stale mappings | | 1275 | * the *current* pmap, it may well contain stale mappings |
1276 | * which have not yet been cleared by a cache write-back | | 1276 | * which have not yet been cleared by a cache write-back |
1277 | * and so would still be visible to the mmu. | | 1277 | * and so would still be visible to the mmu. |
1278 | */ | | 1278 | */ |
1279 | if (need_sync) | | 1279 | if (need_sync) |
1280 | PTE_SYNC_RANGE(l2, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); | | 1280 | PTE_SYNC_RANGE(l2, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); |
1281 | #endif /* PMAP_CACHE_VIVT */ | | 1281 | #endif /* PMAP_CACHE_VIVT */ |
1282 | #endif /* PMAP_INCLUDE_PTE_SYNC */ | | 1282 | #endif /* PMAP_INCLUDE_PTE_SYNC */ |
1283 | pool_cache_put_paddr(&pmap_l2ptp_cache, (void *)l2, pa); | | 1283 | pool_cache_put_paddr(&pmap_l2ptp_cache, (void *)l2, pa); |
1284 | } | | 1284 | } |
1285 | | | 1285 | |
1286 | /* | | 1286 | /* |
1287 | * Returns a pointer to the L2 bucket associated with the specified pmap | | 1287 | * Returns a pointer to the L2 bucket associated with the specified pmap |
1288 | * and VA, or NULL if no L2 bucket exists for the address. | | 1288 | * and VA, or NULL if no L2 bucket exists for the address. |
1289 | */ | | 1289 | */ |
1290 | static inline struct l2_bucket * | | 1290 | static inline struct l2_bucket * |
1291 | pmap_get_l2_bucket(pmap_t pm, vaddr_t va) | | 1291 | pmap_get_l2_bucket(pmap_t pm, vaddr_t va) |
1292 | { | | 1292 | { |
1293 | struct l2_dtable *l2; | | 1293 | struct l2_dtable *l2; |
1294 | struct l2_bucket *l2b; | | 1294 | struct l2_bucket *l2b; |
1295 | u_short l1idx; | | 1295 | u_short l1idx; |
1296 | | | 1296 | |
1297 | l1idx = L1_IDX(va); | | 1297 | l1idx = L1_IDX(va); |
1298 | | | 1298 | |
1299 | if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL || | | 1299 | if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL || |
1300 | (l2b = &l2->l2_bucket[L2_BUCKET(l1idx)])->l2b_kva == NULL) | | 1300 | (l2b = &l2->l2_bucket[L2_BUCKET(l1idx)])->l2b_kva == NULL) |
1301 | return (NULL); | | 1301 | return (NULL); |
1302 | | | 1302 | |
1303 | return (l2b); | | 1303 | return (l2b); |
1304 | } | | 1304 | } |
1305 | | | 1305 | |
1306 | /* | | 1306 | /* |
1307 | * Returns a pointer to the L2 bucket associated with the specified pmap | | 1307 | * Returns a pointer to the L2 bucket associated with the specified pmap |
1308 | * and VA. | | 1308 | * and VA. |
1309 | * | | 1309 | * |
1310 | * If no L2 bucket exists, perform the necessary allocations to put an L2 | | 1310 | * If no L2 bucket exists, perform the necessary allocations to put an L2 |
1311 | * bucket/page table in place. | | 1311 | * bucket/page table in place. |
1312 | * | | 1312 | * |
1313 | * Note that if a new L2 bucket/page was allocated, the caller *must* | | 1313 | * Note that if a new L2 bucket/page was allocated, the caller *must* |
1314 | * increment the bucket occupancy counter appropriately *before* | | 1314 | * increment the bucket occupancy counter appropriately *before* |
1315 | * releasing the pmap's lock to ensure no other thread or cpu deallocates | | 1315 | * releasing the pmap's lock to ensure no other thread or cpu deallocates |
1316 | * the bucket/page in the meantime. | | 1316 | * the bucket/page in the meantime. |
1317 | */ | | 1317 | */ |
1318 | static struct l2_bucket * | | 1318 | static struct l2_bucket * |
1319 | pmap_alloc_l2_bucket(pmap_t pm, vaddr_t va) | | 1319 | pmap_alloc_l2_bucket(pmap_t pm, vaddr_t va) |
1320 | { | | 1320 | { |
1321 | struct l2_dtable *l2; | | 1321 | struct l2_dtable *l2; |
1322 | struct l2_bucket *l2b; | | 1322 | struct l2_bucket *l2b; |
1323 | u_short l1idx; | | 1323 | u_short l1idx; |
1324 | | | 1324 | |
1325 | l1idx = L1_IDX(va); | | 1325 | l1idx = L1_IDX(va); |
1326 | | | 1326 | |
1327 | if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { | | 1327 | if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { |
1328 | /* | | 1328 | /* |
1329 | * No mapping at this address, as there is | | 1329 | * No mapping at this address, as there is |
1330 | * no entry in the L1 table. | | 1330 | * no entry in the L1 table. |
1331 | * Need to allocate a new l2_dtable. | | 1331 | * Need to allocate a new l2_dtable. |
1332 | */ | | 1332 | */ |
1333 | if ((l2 = pmap_alloc_l2_dtable()) == NULL) | | 1333 | if ((l2 = pmap_alloc_l2_dtable()) == NULL) |
1334 | return (NULL); | | 1334 | return (NULL); |
1335 | | | 1335 | |
1336 | /* | | 1336 | /* |
1337 | * Link it into the parent pmap | | 1337 | * Link it into the parent pmap |
1338 | */ | | 1338 | */ |
1339 | pm->pm_l2[L2_IDX(l1idx)] = l2; | | 1339 | pm->pm_l2[L2_IDX(l1idx)] = l2; |
1340 | } | | 1340 | } |
1341 | | | 1341 | |
1342 | l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; | | 1342 | l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; |
1343 | | | 1343 | |
1344 | /* | | 1344 | /* |
1345 | * Fetch pointer to the L2 page table associated with the address. | | 1345 | * Fetch pointer to the L2 page table associated with the address. |
1346 | */ | | 1346 | */ |
1347 | if (l2b->l2b_kva == NULL) { | | 1347 | if (l2b->l2b_kva == NULL) { |
1348 | pt_entry_t *ptep; | | 1348 | pt_entry_t *ptep; |
1349 | | | 1349 | |
1350 | /* | | 1350 | /* |
1351 | * No L2 page table has been allocated. Chances are, this | | 1351 | * No L2 page table has been allocated. Chances are, this |
1352 | * is because we just allocated the l2_dtable, above. | | 1352 | * is because we just allocated the l2_dtable, above. |
1353 | */ | | 1353 | */ |
1354 | if ((ptep = pmap_alloc_l2_ptp(&l2b->l2b_phys)) == NULL) { | | 1354 | if ((ptep = pmap_alloc_l2_ptp(&l2b->l2b_phys)) == NULL) { |
1355 | /* | | 1355 | /* |
1356 | * Oops, no more L2 page tables available at this | | 1356 | * Oops, no more L2 page tables available at this |
1357 | * time. We may need to deallocate the l2_dtable | | 1357 | * time. We may need to deallocate the l2_dtable |
1358 | * if we allocated a new one above. | | 1358 | * if we allocated a new one above. |
1359 | */ | | 1359 | */ |
1360 | if (l2->l2_occupancy == 0) { | | 1360 | if (l2->l2_occupancy == 0) { |
1361 | pm->pm_l2[L2_IDX(l1idx)] = NULL; | | 1361 | pm->pm_l2[L2_IDX(l1idx)] = NULL; |
1362 | pmap_free_l2_dtable(l2); | | 1362 | pmap_free_l2_dtable(l2); |
1363 | } | | 1363 | } |
1364 | return (NULL); | | 1364 | return (NULL); |
1365 | } | | 1365 | } |
1366 | | | 1366 | |
1367 | l2->l2_occupancy++; | | 1367 | l2->l2_occupancy++; |
1368 | l2b->l2b_kva = ptep; | | 1368 | l2b->l2b_kva = ptep; |
1369 | l2b->l2b_l1idx = l1idx; | | 1369 | l2b->l2b_l1idx = l1idx; |
1370 | } | | 1370 | } |
1371 | | | 1371 | |
1372 | return (l2b); | | 1372 | return (l2b); |
1373 | } | | 1373 | } |
1374 | | | 1374 | |
1375 | /* | | 1375 | /* |
1376 | * One or more mappings in the specified L2 descriptor table have just been | | 1376 | * One or more mappings in the specified L2 descriptor table have just been |
1377 | * invalidated. | | 1377 | * invalidated. |
1378 | * | | 1378 | * |
1379 | * Garbage collect the metadata and descriptor table itself if necessary. | | 1379 | * Garbage collect the metadata and descriptor table itself if necessary. |
1380 | * | | 1380 | * |
1381 | * The pmap lock must be acquired when this is called (not necessary | | 1381 | * The pmap lock must be acquired when this is called (not necessary |
1382 | * for the kernel pmap). | | 1382 | * for the kernel pmap). |
1383 | */ | | 1383 | */ |
1384 | static void | | 1384 | static void |
1385 | pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count) | | 1385 | pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count) |
1386 | { | | 1386 | { |
1387 | struct l2_dtable *l2; | | 1387 | struct l2_dtable *l2; |
1388 | pd_entry_t *pl1pd, l1pd; | | 1388 | pd_entry_t *pl1pd, l1pd; |
1389 | pt_entry_t *ptep; | | 1389 | pt_entry_t *ptep; |
1390 | u_short l1idx; | | 1390 | u_short l1idx; |
1391 | | | 1391 | |
1392 | KDASSERT(count <= l2b->l2b_occupancy); | | 1392 | KDASSERT(count <= l2b->l2b_occupancy); |
1393 | | | 1393 | |
1394 | /* | | 1394 | /* |
1395 | * Update the bucket's reference count according to how many | | 1395 | * Update the bucket's reference count according to how many |
1396 | * PTEs the caller has just invalidated. | | 1396 | * PTEs the caller has just invalidated. |
1397 | */ | | 1397 | */ |
1398 | l2b->l2b_occupancy -= count; | | 1398 | l2b->l2b_occupancy -= count; |
1399 | | | 1399 | |
1400 | /* | | 1400 | /* |
1401 | * Note: | | 1401 | * Note: |
1402 | * | | 1402 | * |
1403 | * Level 2 page tables allocated to the kernel pmap are never freed | | 1403 | * Level 2 page tables allocated to the kernel pmap are never freed |
1404 | * as that would require checking all Level 1 page tables and | | 1404 | * as that would require checking all Level 1 page tables and |
1405 | * removing any references to the Level 2 page table. See also the | | 1405 | * removing any references to the Level 2 page table. See also the |
1406 | * comment elsewhere about never freeing bootstrap L2 descriptors. | | 1406 | * comment elsewhere about never freeing bootstrap L2 descriptors. |
1407 | * | | 1407 | * |
1408 | * We make do with just invalidating the mapping in the L2 table. | | 1408 | * We make do with just invalidating the mapping in the L2 table. |
1409 | * | | 1409 | * |
1410 | * This isn't really a big deal in practice and, in fact, leads | | 1410 | * This isn't really a big deal in practice and, in fact, leads |
1411 | * to a performance win over time as we don't need to continually | | 1411 | * to a performance win over time as we don't need to continually |
1412 | * alloc/free. | | 1412 | * alloc/free. |
1413 | */ | | 1413 | */ |
1414 | if (l2b->l2b_occupancy > 0 || pm == pmap_kernel()) | | 1414 | if (l2b->l2b_occupancy > 0 || pm == pmap_kernel()) |
1415 | return; | | 1415 | return; |
1416 | | | 1416 | |
1417 | /* | | 1417 | /* |
1418 | * There are no more valid mappings in this level 2 page table. | | 1418 | * There are no more valid mappings in this level 2 page table. |
1419 | * Go ahead and NULL-out the pointer in the bucket, then | | 1419 | * Go ahead and NULL-out the pointer in the bucket, then |
1420 | * free the page table. | | 1420 | * free the page table. |
1421 | */ | | 1421 | */ |
1422 | l1idx = l2b->l2b_l1idx; | | 1422 | l1idx = l2b->l2b_l1idx; |
1423 | ptep = l2b->l2b_kva; | | 1423 | ptep = l2b->l2b_kva; |
1424 | l2b->l2b_kva = NULL; | | 1424 | l2b->l2b_kva = NULL; |
1425 | | | 1425 | |
1426 | pl1pd = &pm->pm_l1->l1_kva[l1idx]; | | 1426 | pl1pd = &pm->pm_l1->l1_kva[l1idx]; |
1427 | | | 1427 | |
1428 | /* | | 1428 | /* |
1429 | * If the L1 slot matches the pmap's domain | | 1429 | * If the L1 slot matches the pmap's domain |
1430 | * number, then invalidate it. | | 1430 | * number, then invalidate it. |
1431 | */ | | 1431 | */ |
1432 | l1pd = *pl1pd & (L1_TYPE_MASK | L1_C_DOM_MASK); | | 1432 | l1pd = *pl1pd & (L1_TYPE_MASK | L1_C_DOM_MASK); |
1433 | if (l1pd == (L1_C_DOM(pm->pm_domain) | L1_TYPE_C)) { | | 1433 | if (l1pd == (L1_C_DOM(pm->pm_domain) | L1_TYPE_C)) { |
1434 | *pl1pd = 0; | | 1434 | *pl1pd = 0; |
1435 | PTE_SYNC(pl1pd); | | 1435 | PTE_SYNC(pl1pd); |
1436 | } | | 1436 | } |
1437 | | | 1437 | |
1438 | /* | | 1438 | /* |
1439 | * Release the L2 descriptor table back to the pool cache. | | 1439 | * Release the L2 descriptor table back to the pool cache. |
1440 | */ | | 1440 | */ |
1441 | #ifndef PMAP_INCLUDE_PTE_SYNC | | 1441 | #ifndef PMAP_INCLUDE_PTE_SYNC |
1442 | pmap_free_l2_ptp(ptep, l2b->l2b_phys); | | 1442 | pmap_free_l2_ptp(ptep, l2b->l2b_phys); |
1443 | #else | | 1443 | #else |
1444 | pmap_free_l2_ptp(!pmap_is_cached(pm), ptep, l2b->l2b_phys); | | 1444 | pmap_free_l2_ptp(!pmap_is_cached(pm), ptep, l2b->l2b_phys); |
1445 | #endif | | 1445 | #endif |
1446 | | | 1446 | |
1447 | /* | | 1447 | /* |
1448 | * Update the reference count in the associated l2_dtable | | 1448 | * Update the reference count in the associated l2_dtable |
1449 | */ | | 1449 | */ |
1450 | l2 = pm->pm_l2[L2_IDX(l1idx)]; | | 1450 | l2 = pm->pm_l2[L2_IDX(l1idx)]; |
1451 | if (--l2->l2_occupancy > 0) | | 1451 | if (--l2->l2_occupancy > 0) |
1452 | return; | | 1452 | return; |
1453 | | | 1453 | |
1454 | /* | | 1454 | /* |
1455 | * There are no more valid mappings in any of the Level 1 | | 1455 | * There are no more valid mappings in any of the Level 1 |
1456 | * slots managed by this l2_dtable. Go ahead and NULL-out | | 1456 | * slots managed by this l2_dtable. Go ahead and NULL-out |
1457 | * the pointer in the parent pmap and free the l2_dtable. | | 1457 | * the pointer in the parent pmap and free the l2_dtable. |
1458 | */ | | 1458 | */ |
1459 | pm->pm_l2[L2_IDX(l1idx)] = NULL; | | 1459 | pm->pm_l2[L2_IDX(l1idx)] = NULL; |
1460 | pmap_free_l2_dtable(l2); | | 1460 | pmap_free_l2_dtable(l2); |
1461 | } | | 1461 | } |
1462 | | | 1462 | |
1463 | /* | | 1463 | /* |
1464 | * Pool cache constructors for L2 descriptor tables, metadata and pmap | | 1464 | * Pool cache constructors for L2 descriptor tables, metadata and pmap |
1465 | * structures. | | 1465 | * structures. |
1466 | */ | | 1466 | */ |
1467 | static int | | 1467 | static int |
1468 | pmap_l2ptp_ctor(void *arg, void *v, int flags) | | 1468 | pmap_l2ptp_ctor(void *arg, void *v, int flags) |
1469 | { | | 1469 | { |
1470 | #ifndef PMAP_INCLUDE_PTE_SYNC | | 1470 | #ifndef PMAP_INCLUDE_PTE_SYNC |
1471 | struct l2_bucket *l2b; | | 1471 | struct l2_bucket *l2b; |
1472 | pt_entry_t *ptep, pte; | | 1472 | pt_entry_t *ptep, pte; |
1473 | vaddr_t va = (vaddr_t)v & ~PGOFSET; | | 1473 | vaddr_t va = (vaddr_t)v & ~PGOFSET; |
1474 | | | 1474 | |
1475 | /* | | 1475 | /* |
1476 | * The mappings for these page tables were initially made using | | 1476 | * The mappings for these page tables were initially made using |
1477 | * pmap_kenter_pa() by the pool subsystem. Therefore, the cache- | | 1477 | * pmap_kenter_pa() by the pool subsystem. Therefore, the cache- |
1478 | * mode will not be right for page table mappings. To avoid | | 1478 | * mode will not be right for page table mappings. To avoid |
1479 | * polluting the pmap_kenter_pa() code with a special case for | | 1479 | * polluting the pmap_kenter_pa() code with a special case for |
1480 | * page tables, we simply fix up the cache-mode here if it's not | | 1480 | * page tables, we simply fix up the cache-mode here if it's not |
1481 | * correct. | | 1481 | * correct. |
1482 | */ | | 1482 | */ |
1483 | l2b = pmap_get_l2_bucket(pmap_kernel(), va); | | 1483 | l2b = pmap_get_l2_bucket(pmap_kernel(), va); |
1484 | KDASSERT(l2b != NULL); | | 1484 | KDASSERT(l2b != NULL); |
1485 | ptep = &l2b->l2b_kva[l2pte_index(va)]; | | 1485 | ptep = &l2b->l2b_kva[l2pte_index(va)]; |
1486 | pte = *ptep; | | 1486 | pte = *ptep; |
1487 | | | 1487 | |
1488 | if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { | | 1488 | if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { |
1489 | /* | | 1489 | /* |
1490 | * Page tables must have the cache-mode set to Write-Thru. | | 1490 | * Page tables must have the cache-mode set to Write-Thru. |
1491 | */ | | 1491 | */ |
1492 | *ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; | | 1492 | *ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; |
1493 | PTE_SYNC(ptep); | | 1493 | PTE_SYNC(ptep); |
1494 | cpu_tlb_flushD_SE(va); | | 1494 | cpu_tlb_flushD_SE(va); |
1495 | cpu_cpwait(); | | 1495 | cpu_cpwait(); |
1496 | } | | 1496 | } |
1497 | #endif | | 1497 | #endif |
1498 | | | 1498 | |
1499 | memset(v, 0, L2_TABLE_SIZE_REAL); | | 1499 | memset(v, 0, L2_TABLE_SIZE_REAL); |
1500 | PTE_SYNC_RANGE(v, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); | | 1500 | PTE_SYNC_RANGE(v, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); |
1501 | return (0); | | 1501 | return (0); |
1502 | } | | 1502 | } |
1503 | | | 1503 | |
1504 | static int | | 1504 | static int |
1505 | pmap_l2dtable_ctor(void *arg, void *v, int flags) | | 1505 | pmap_l2dtable_ctor(void *arg, void *v, int flags) |
1506 | { | | 1506 | { |
1507 | | | 1507 | |
1508 | memset(v, 0, sizeof(struct l2_dtable)); | | 1508 | memset(v, 0, sizeof(struct l2_dtable)); |
1509 | return (0); | | 1509 | return (0); |
1510 | } | | 1510 | } |
1511 | | | 1511 | |
1512 | static int | | 1512 | static int |
1513 | pmap_pmap_ctor(void *arg, void *v, int flags) | | 1513 | pmap_pmap_ctor(void *arg, void *v, int flags) |
1514 | { | | 1514 | { |
1515 | | | 1515 | |
1516 | memset(v, 0, sizeof(struct pmap)); | | 1516 | memset(v, 0, sizeof(struct pmap)); |
1517 | return (0); | | 1517 | return (0); |
1518 | } | | 1518 | } |
1519 | | | 1519 | |
1520 | static void | | 1520 | static void |
1521 | pmap_pinit(pmap_t pm) | | 1521 | pmap_pinit(pmap_t pm) |
1522 | { | | 1522 | { |
1523 | struct l2_bucket *l2b; | | 1523 | struct l2_bucket *l2b; |
1524 | | | 1524 | |
1525 | if (vector_page < KERNEL_BASE) { | | 1525 | if (vector_page < KERNEL_BASE) { |
1526 | /* | | 1526 | /* |
1527 | * Map the vector page. | | 1527 | * Map the vector page. |
1528 | */ | | 1528 | */ |
1529 | pmap_enter(pm, vector_page, systempage.pv_pa, | | 1529 | pmap_enter(pm, vector_page, systempage.pv_pa, |
1530 | VM_PROT_READ, VM_PROT_READ | PMAP_WIRED); | | 1530 | VM_PROT_READ, VM_PROT_READ | PMAP_WIRED); |
1531 | pmap_update(pm); | | 1531 | pmap_update(pm); |
1532 | | | 1532 | |
1533 | pm->pm_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)]; | | 1533 | pm->pm_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)]; |
1534 | l2b = pmap_get_l2_bucket(pm, vector_page); | | 1534 | l2b = pmap_get_l2_bucket(pm, vector_page); |
1535 | KDASSERT(l2b != NULL); | | 1535 | KDASSERT(l2b != NULL); |
1536 | pm->pm_l1vec = l2b->l2b_phys | L1_C_PROTO | | | 1536 | pm->pm_l1vec = l2b->l2b_phys | L1_C_PROTO | |
1537 | L1_C_DOM(pm->pm_domain); | | 1537 | L1_C_DOM(pm->pm_domain); |
1538 | } else | | 1538 | } else |
1539 | pm->pm_pl1vec = NULL; | | 1539 | pm->pm_pl1vec = NULL; |
1540 | } | | 1540 | } |
1541 | | | 1541 | |
1542 | #ifdef PMAP_CACHE_VIVT | | 1542 | #ifdef PMAP_CACHE_VIVT |
1543 | /* | | 1543 | /* |
1544 | * Since we have a virtually indexed cache, we may need to inhibit caching if | | 1544 | * Since we have a virtually indexed cache, we may need to inhibit caching if |
1545 | * there is more than one mapping and at least one of them is writable. | | 1545 | * there is more than one mapping and at least one of them is writable. |
1546 | * Since we purge the cache on every context switch, we only need to check for | | 1546 | * Since we purge the cache on every context switch, we only need to check for |
1547 | * other mappings within the same pmap, or kernel_pmap. | | 1547 | * other mappings within the same pmap, or kernel_pmap. |
1548 | * This function is also called when a page is unmapped, to possibly reenable | | 1548 | * This function is also called when a page is unmapped, to possibly reenable |
1549 | * caching on any remaining mappings. | | 1549 | * caching on any remaining mappings. |
1550 | * | | 1550 | * |
1551 | * The code implements the following logic, where: | | 1551 | * The code implements the following logic, where: |
1552 | * | | 1552 | * |
1553 | * KW = # of kernel read/write pages | | 1553 | * KW = # of kernel read/write pages |
1554 | * KR = # of kernel read only pages | | 1554 | * KR = # of kernel read only pages |
1555 | * UW = # of user read/write pages | | 1555 | * UW = # of user read/write pages |
1556 | * UR = # of user read only pages | | 1556 | * UR = # of user read only pages |
1557 | * | | 1557 | * |
1558 | * KC = kernel mapping is cacheable | | 1558 | * KC = kernel mapping is cacheable |
1559 | * UC = user mapping is cacheable | | 1559 | * UC = user mapping is cacheable |
1560 | * | | 1560 | * |
1561 | * KW=0,KR=0 KW=0,KR>0 KW=1,KR=0 KW>1,KR>=0 | | 1561 | * KW=0,KR=0 KW=0,KR>0 KW=1,KR=0 KW>1,KR>=0 |
1562 | * +--------------------------------------------- | | 1562 | * +--------------------------------------------- |
1563 | * UW=0,UR=0 | --- KC=1 KC=1 KC=0 | | 1563 | * UW=0,UR=0 | --- KC=1 KC=1 KC=0 |
1564 | * UW=0,UR>0 | UC=1 KC=1,UC=1 KC=0,UC=0 KC=0,UC=0 | | 1564 | * UW=0,UR>0 | UC=1 KC=1,UC=1 KC=0,UC=0 KC=0,UC=0 |
1565 | * UW=1,UR=0 | UC=1 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0 | | 1565 | * UW=1,UR=0 | UC=1 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0 |
1566 | * UW>1,UR>=0 | UC=0 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0 | | 1566 | * UW>1,UR>=0 | UC=0 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0 |
1567 | */ | | 1567 | */ |
1568 | | | 1568 | |
1569 | static const int pmap_vac_flags[4][4] = { | | 1569 | static const int pmap_vac_flags[4][4] = { |
1570 | {-1, 0, 0, PVF_KNC}, | | 1570 | {-1, 0, 0, PVF_KNC}, |
1571 | {0, 0, PVF_NC, PVF_NC}, | | 1571 | {0, 0, PVF_NC, PVF_NC}, |
1572 | {0, PVF_NC, PVF_NC, PVF_NC}, | | 1572 | {0, PVF_NC, PVF_NC, PVF_NC}, |
1573 | {PVF_UNC, PVF_NC, PVF_NC, PVF_NC} | | 1573 | {PVF_UNC, PVF_NC, PVF_NC, PVF_NC} |
1574 | }; | | 1574 | }; |
1575 | | | 1575 | |
1576 | static inline int | | 1576 | static inline int |
1577 | pmap_get_vac_flags(const struct vm_page_md *md) | | 1577 | pmap_get_vac_flags(const struct vm_page_md *md) |
1578 | { | | 1578 | { |
1579 | int kidx, uidx; | | 1579 | int kidx, uidx; |
1580 | | | 1580 | |
1581 | kidx = 0; | | 1581 | kidx = 0; |
1582 | if (md->kro_mappings || md->krw_mappings > 1) | | 1582 | if (md->kro_mappings || md->krw_mappings > 1) |
1583 | kidx |= 1; | | 1583 | kidx |= 1; |
1584 | if (md->krw_mappings) | | 1584 | if (md->krw_mappings) |
1585 | kidx |= 2; | | 1585 | kidx |= 2; |
1586 | | | 1586 | |
1587 | uidx = 0; | | 1587 | uidx = 0; |
1588 | if (md->uro_mappings || md->urw_mappings > 1) | | 1588 | if (md->uro_mappings || md->urw_mappings > 1) |
1589 | uidx |= 1; | | 1589 | uidx |= 1; |
1590 | if (md->urw_mappings) | | 1590 | if (md->urw_mappings) |
1591 | uidx |= 2; | | 1591 | uidx |= 2; |
1592 | | | 1592 | |
1593 | return (pmap_vac_flags[uidx][kidx]); | | 1593 | return (pmap_vac_flags[uidx][kidx]); |
1594 | } | | 1594 | } |
1595 | | | 1595 | |
1596 | static inline void | | 1596 | static inline void |
1597 | pmap_vac_me_harder(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) | | 1597 | pmap_vac_me_harder(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) |
1598 | { | | 1598 | { |
1599 | int nattr; | | 1599 | int nattr; |
1600 | | | 1600 | |
1601 | nattr = pmap_get_vac_flags(md); | | 1601 | nattr = pmap_get_vac_flags(md); |
1602 | | | 1602 | |
1603 | if (nattr < 0) { | | 1603 | if (nattr < 0) { |
1604 | md->pvh_attrs &= ~PVF_NC; | | 1604 | md->pvh_attrs &= ~PVF_NC; |
1605 | return; | | 1605 | return; |
1606 | } | | 1606 | } |
1607 | | | 1607 | |
1608 | if (nattr == 0 && (md->pvh_attrs & PVF_NC) == 0) | | 1608 | if (nattr == 0 && (md->pvh_attrs & PVF_NC) == 0) |
1609 | return; | | 1609 | return; |
1610 | | | 1610 | |
1611 | if (pm == pmap_kernel()) | | 1611 | if (pm == pmap_kernel()) |
1612 | pmap_vac_me_kpmap(md, pa, pm, va); | | 1612 | pmap_vac_me_kpmap(md, pa, pm, va); |
1613 | else | | 1613 | else |
1614 | pmap_vac_me_user(md, pa, pm, va); | | 1614 | pmap_vac_me_user(md, pa, pm, va); |
1615 | | | 1615 | |
1616 | md->pvh_attrs = (md->pvh_attrs & ~PVF_NC) | nattr; | | 1616 | md->pvh_attrs = (md->pvh_attrs & ~PVF_NC) | nattr; |
1617 | } | | 1617 | } |
1618 | | | 1618 | |
1619 | static void | | 1619 | static void |
1620 | pmap_vac_me_kpmap(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) | | 1620 | pmap_vac_me_kpmap(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) |
1621 | { | | 1621 | { |
1622 | u_int u_cacheable, u_entries; | | 1622 | u_int u_cacheable, u_entries; |
1623 | struct pv_entry *pv; | | 1623 | struct pv_entry *pv; |
1624 | pmap_t last_pmap = pm; | | 1624 | pmap_t last_pmap = pm; |
1625 | | | 1625 | |
1626 | /* | | 1626 | /* |
1627 | * Pass one, see if there are both kernel and user pmaps for | | 1627 | * Pass one, see if there are both kernel and user pmaps for |
1628 | * this page. Calculate whether there are user-writable or | | 1628 | * this page. Calculate whether there are user-writable or |
1629 | * kernel-writable pages. | | 1629 | * kernel-writable pages. |
1630 | */ | | 1630 | */ |
1631 | u_cacheable = 0; | | 1631 | u_cacheable = 0; |
1632 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { | | 1632 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { |
1633 | if (pv->pv_pmap != pm && (pv->pv_flags & PVF_NC) == 0) | | 1633 | if (pv->pv_pmap != pm && (pv->pv_flags & PVF_NC) == 0) |
1634 | u_cacheable++; | | 1634 | u_cacheable++; |
1635 | } | | 1635 | } |
1636 | | | 1636 | |
1637 | u_entries = md->urw_mappings + md->uro_mappings; | | 1637 | u_entries = md->urw_mappings + md->uro_mappings; |
1638 | | | 1638 | |
1639 | /* | | 1639 | /* |
1640 | * We know we have just been updating a kernel entry, so if | | 1640 | * We know we have just been updating a kernel entry, so if |
1641 | * all user pages are already cacheable, then there is nothing | | 1641 | * all user pages are already cacheable, then there is nothing |
1642 | * further to do. | | 1642 | * further to do. |
1643 | */ | | 1643 | */ |
1644 | if (md->k_mappings == 0 && u_cacheable == u_entries) | | 1644 | if (md->k_mappings == 0 && u_cacheable == u_entries) |
1645 | return; | | 1645 | return; |
1646 | | | 1646 | |
1647 | if (u_entries) { | | 1647 | if (u_entries) { |
1648 | /* | | 1648 | /* |
1649 | * Scan over the list again, for each entry, if it | | 1649 | * Scan over the list again, for each entry, if it |
1650 | * might not be set correctly, call pmap_vac_me_user | | 1650 | * might not be set correctly, call pmap_vac_me_user |
1651 | * to recalculate the settings. | | 1651 | * to recalculate the settings. |
1652 | */ | | 1652 | */ |
1653 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { | | 1653 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { |
1654 | /* | | 1654 | /* |
1655 | * We know kernel mappings will get set | | 1655 | * We know kernel mappings will get set |
1656 | * correctly in other calls. We also know | | 1656 | * correctly in other calls. We also know |
1657 | * that if the pmap is the same as last_pmap | | 1657 | * that if the pmap is the same as last_pmap |
1658 | * then we've just handled this entry. | | 1658 | * then we've just handled this entry. |
1659 | */ | | 1659 | */ |
1660 | if (pv->pv_pmap == pm || pv->pv_pmap == last_pmap) | | 1660 | if (pv->pv_pmap == pm || pv->pv_pmap == last_pmap) |
1661 | continue; | | 1661 | continue; |
1662 | | | 1662 | |
1663 | /* | | 1663 | /* |
1664 | * If there are kernel entries and this page | | 1664 | * If there are kernel entries and this page |
1665 | * is writable but non-cacheable, then we can | | 1665 | * is writable but non-cacheable, then we can |
1666 | * skip this entry also. | | 1666 | * skip this entry also. |
1667 | */ | | 1667 | */ |
1668 | if (md->k_mappings && | | 1668 | if (md->k_mappings && |
1669 | (pv->pv_flags & (PVF_NC | PVF_WRITE)) == | | 1669 | (pv->pv_flags & (PVF_NC | PVF_WRITE)) == |
1670 | (PVF_NC | PVF_WRITE)) | | 1670 | (PVF_NC | PVF_WRITE)) |
1671 | continue; | | 1671 | continue; |
1672 | | | 1672 | |
1673 | /* | | 1673 | /* |
1674 | * Similarly if there are no kernel-writable | | 1674 | * Similarly if there are no kernel-writable |
1675 | * entries and the page is already | | 1675 | * entries and the page is already |
1676 | * read-only/cacheable. | | 1676 | * read-only/cacheable. |
1677 | */ | | 1677 | */ |
1678 | if (md->krw_mappings == 0 && | | 1678 | if (md->krw_mappings == 0 && |
1679 | (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0) | | 1679 | (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0) |
1680 | continue; | | 1680 | continue; |
1681 | | | 1681 | |
1682 | /* | | 1682 | /* |
1683 | * For some of the remaining cases, we know | | 1683 | * For some of the remaining cases, we know |
1684 | * that we must recalculate, but for others we | | 1684 | * that we must recalculate, but for others we |
1685 | * can't tell if they are correct or not, so | | 1685 | * can't tell if they are correct or not, so |
1686 | * we recalculate anyway. | | 1686 | * we recalculate anyway. |
1687 | */ | | 1687 | */ |
1688 | pmap_vac_me_user(md, pa, (last_pmap = pv->pv_pmap), 0); | | 1688 | pmap_vac_me_user(md, pa, (last_pmap = pv->pv_pmap), 0); |
1689 | } | | 1689 | } |
1690 | | | 1690 | |
1691 | if (md->k_mappings == 0) | | 1691 | if (md->k_mappings == 0) |
1692 | return; | | 1692 | return; |
1693 | } | | 1693 | } |
1694 | | | 1694 | |
1695 | pmap_vac_me_user(md, pa, pm, va); | | 1695 | pmap_vac_me_user(md, pa, pm, va); |
1696 | } | | 1696 | } |
1697 | | | 1697 | |
1698 | static void | | 1698 | static void |
1699 | pmap_vac_me_user(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) | | 1699 | pmap_vac_me_user(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) |
1700 | { | | 1700 | { |
1701 | pmap_t kpmap = pmap_kernel(); | | 1701 | pmap_t kpmap = pmap_kernel(); |
1702 | struct pv_entry *pv, *npv = NULL; | | 1702 | struct pv_entry *pv, *npv = NULL; |
1703 | struct l2_bucket *l2b; | | 1703 | struct l2_bucket *l2b; |
1704 | pt_entry_t *ptep, pte; | | 1704 | pt_entry_t *ptep, pte; |
1705 | u_int entries = 0; | | 1705 | u_int entries = 0; |
1706 | u_int writable = 0; | | 1706 | u_int writable = 0; |
1707 | u_int cacheable_entries = 0; | | 1707 | u_int cacheable_entries = 0; |
1708 | u_int kern_cacheable = 0; | | 1708 | u_int kern_cacheable = 0; |
1709 | u_int other_writable = 0; | | 1709 | u_int other_writable = 0; |
1710 | | | 1710 | |
1711 | /* | | 1711 | /* |
1712 | * Count mappings and writable mappings in this pmap. | | 1712 | * Count mappings and writable mappings in this pmap. |
1713 | * Include kernel mappings as part of our own. | | 1713 | * Include kernel mappings as part of our own. |
1714 | * Keep a pointer to the first one. | | 1714 | * Keep a pointer to the first one. |
1715 | */ | | 1715 | */ |
1716 | npv = NULL; | | 1716 | npv = NULL; |
1717 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { | | 1717 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { |
1718 | /* Count mappings in the same pmap */ | | 1718 | /* Count mappings in the same pmap */ |
1719 | if (pm == pv->pv_pmap || kpmap == pv->pv_pmap) { | | 1719 | if (pm == pv->pv_pmap || kpmap == pv->pv_pmap) { |
1720 | if (entries++ == 0) | | 1720 | if (entries++ == 0) |
1721 | npv = pv; | | 1721 | npv = pv; |
1722 | | | 1722 | |
1723 | /* Cacheable mappings */ | | 1723 | /* Cacheable mappings */ |
1724 | if ((pv->pv_flags & PVF_NC) == 0) { | | 1724 | if ((pv->pv_flags & PVF_NC) == 0) { |
1725 | cacheable_entries++; | | 1725 | cacheable_entries++; |
1726 | if (kpmap == pv->pv_pmap) | | 1726 | if (kpmap == pv->pv_pmap) |
1727 | kern_cacheable++; | | 1727 | kern_cacheable++; |
1728 | } | | 1728 | } |
1729 | | | 1729 | |
1730 | /* Writable mappings */ | | 1730 | /* Writable mappings */ |
1731 | if (pv->pv_flags & PVF_WRITE) | | 1731 | if (pv->pv_flags & PVF_WRITE) |
1732 | ++writable; | | 1732 | ++writable; |
1733 | } else | | 1733 | } else |
1734 | if (pv->pv_flags & PVF_WRITE) | | 1734 | if (pv->pv_flags & PVF_WRITE) |
1735 | other_writable = 1; | | 1735 | other_writable = 1; |
1736 | } | | 1736 | } |
1737 | | | 1737 | |
1738 | /* | | 1738 | /* |
1739 | * Enable or disable caching as necessary. | | 1739 | * Enable or disable caching as necessary. |
1740 | * Note: the first entry might be part of the kernel pmap, | | 1740 | * Note: the first entry might be part of the kernel pmap, |
1741 | * so we can't assume this is indicative of the state of the | | 1741 | * so we can't assume this is indicative of the state of the |
1742 | * other (maybe non-kpmap) entries. | | 1742 | * other (maybe non-kpmap) entries. |
1743 | */ | | 1743 | */ |
1744 | if ((entries > 1 && writable) || | | 1744 | if ((entries > 1 && writable) || |
1745 | (entries > 0 && pm == kpmap && other_writable)) { | | 1745 | (entries > 0 && pm == kpmap && other_writable)) { |
1746 | if (cacheable_entries == 0) | | 1746 | if (cacheable_entries == 0) |
1747 | return; | | 1747 | return; |
1748 | | | 1748 | |
1749 | for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) { | | 1749 | for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) { |
1750 | if ((pm != pv->pv_pmap && kpmap != pv->pv_pmap) || | | 1750 | if ((pm != pv->pv_pmap && kpmap != pv->pv_pmap) || |
1751 | (pv->pv_flags & PVF_NC)) | | 1751 | (pv->pv_flags & PVF_NC)) |
1752 | continue; | | 1752 | continue; |
1753 | | | 1753 | |
1754 | pv->pv_flags |= PVF_NC; | | 1754 | pv->pv_flags |= PVF_NC; |
1755 | | | 1755 | |
1756 | l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); | | 1756 | l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); |
1757 | KDASSERT(l2b != NULL); | | 1757 | KDASSERT(l2b != NULL); |
1758 | ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; | | 1758 | ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; |
1759 | pte = *ptep & ~L2_S_CACHE_MASK; | | 1759 | pte = *ptep & ~L2_S_CACHE_MASK; |
1760 | | | 1760 | |
1761 | if ((va != pv->pv_va || pm != pv->pv_pmap) && | | 1761 | if ((va != pv->pv_va || pm != pv->pv_pmap) && |
1762 | l2pte_valid(pte)) { | | 1762 | l2pte_valid(pte)) { |
1763 | if (PV_BEEN_EXECD(pv->pv_flags)) { | | 1763 | if (PV_BEEN_EXECD(pv->pv_flags)) { |
1764 | #ifdef PMAP_CACHE_VIVT | | 1764 | #ifdef PMAP_CACHE_VIVT |
1765 | pmap_idcache_wbinv_range(pv->pv_pmap, | | 1765 | pmap_idcache_wbinv_range(pv->pv_pmap, |
1766 | pv->pv_va, PAGE_SIZE); | | 1766 | pv->pv_va, PAGE_SIZE); |
1767 | #endif | | 1767 | #endif |
1768 | pmap_tlb_flushID_SE(pv->pv_pmap, | | 1768 | pmap_tlb_flushID_SE(pv->pv_pmap, |
1769 | pv->pv_va); | | 1769 | pv->pv_va); |
1770 | } else | | 1770 | } else |
1771 | if (PV_BEEN_REFD(pv->pv_flags)) { | | 1771 | if (PV_BEEN_REFD(pv->pv_flags)) { |
1772 | #ifdef PMAP_CACHE_VIVT | | 1772 | #ifdef PMAP_CACHE_VIVT |
1773 | pmap_dcache_wb_range(pv->pv_pmap, | | 1773 | pmap_dcache_wb_range(pv->pv_pmap, |
1774 | pv->pv_va, PAGE_SIZE, true, | | 1774 | pv->pv_va, PAGE_SIZE, true, |
1775 | (pv->pv_flags & PVF_WRITE) == 0); | | 1775 | (pv->pv_flags & PVF_WRITE) == 0); |
1776 | #endif | | 1776 | #endif |
1777 | pmap_tlb_flushD_SE(pv->pv_pmap, | | 1777 | pmap_tlb_flushD_SE(pv->pv_pmap, |
1778 | pv->pv_va); | | 1778 | pv->pv_va); |
1779 | } | | 1779 | } |
1780 | } | | 1780 | } |
1781 | | | 1781 | |
1782 | *ptep = pte; | | 1782 | *ptep = pte; |
1783 | PTE_SYNC_CURRENT(pv->pv_pmap, ptep); | | 1783 | PTE_SYNC_CURRENT(pv->pv_pmap, ptep); |
1784 | } | | 1784 | } |
1785 | cpu_cpwait(); | | 1785 | cpu_cpwait(); |
1786 | } else | | 1786 | } else |
1787 | if (entries > cacheable_entries) { | | 1787 | if (entries > cacheable_entries) { |
1788 | /* | | 1788 | /* |
1789 | * Turn cacheing back on for some pages. If it is a kernel | | 1789 | * Turn cacheing back on for some pages. If it is a kernel |
1790 | * page, only do so if there are no other writable pages. | | 1790 | * page, only do so if there are no other writable pages. |
1791 | */ | | 1791 | */ |
1792 | for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) { | | 1792 | for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) { |
1793 | if (!(pv->pv_flags & PVF_NC) || (pm != pv->pv_pmap && | | 1793 | if (!(pv->pv_flags & PVF_NC) || (pm != pv->pv_pmap && |
1794 | (kpmap != pv->pv_pmap || other_writable))) | | 1794 | (kpmap != pv->pv_pmap || other_writable))) |
1795 | continue; | | 1795 | continue; |
1796 | | | 1796 | |
1797 | pv->pv_flags &= ~PVF_NC; | | 1797 | pv->pv_flags &= ~PVF_NC; |
1798 | | | 1798 | |
1799 | l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); | | 1799 | l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); |
1800 | KDASSERT(l2b != NULL); | | 1800 | KDASSERT(l2b != NULL); |
1801 | ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; | | 1801 | ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; |
1802 | pte = (*ptep & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode; | | 1802 | pte = (*ptep & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode; |
1803 | | | 1803 | |
1804 | if (l2pte_valid(pte)) { | | 1804 | if (l2pte_valid(pte)) { |
1805 | if (PV_BEEN_EXECD(pv->pv_flags)) { | | 1805 | if (PV_BEEN_EXECD(pv->pv_flags)) { |
1806 | pmap_tlb_flushID_SE(pv->pv_pmap, | | 1806 | pmap_tlb_flushID_SE(pv->pv_pmap, |
1807 | pv->pv_va); | | 1807 | pv->pv_va); |
1808 | } else | | 1808 | } else |
1809 | if (PV_BEEN_REFD(pv->pv_flags)) { | | 1809 | if (PV_BEEN_REFD(pv->pv_flags)) { |
1810 | pmap_tlb_flushD_SE(pv->pv_pmap, | | 1810 | pmap_tlb_flushD_SE(pv->pv_pmap, |
1811 | pv->pv_va); | | 1811 | pv->pv_va); |
1812 | } | | 1812 | } |
1813 | } | | 1813 | } |
1814 | | | 1814 | |
1815 | *ptep = pte; | | 1815 | *ptep = pte; |
1816 | PTE_SYNC_CURRENT(pv->pv_pmap, ptep); | | 1816 | PTE_SYNC_CURRENT(pv->pv_pmap, ptep); |
1817 | } | | 1817 | } |
1818 | } | | 1818 | } |
1819 | } | | 1819 | } |
1820 | #endif | | 1820 | #endif |
1821 | | | 1821 | |
1822 | #ifdef PMAP_CACHE_VIPT | | 1822 | #ifdef PMAP_CACHE_VIPT |
1823 | static void | | 1823 | static void |
1824 | pmap_vac_me_harder(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) | | 1824 | pmap_vac_me_harder(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) |
1825 | { | | 1825 | { |
1826 | struct pv_entry *pv; | | 1826 | struct pv_entry *pv; |
1827 | vaddr_t tst_mask; | | 1827 | vaddr_t tst_mask; |
1828 | bool bad_alias; | | 1828 | bool bad_alias; |
1829 | struct l2_bucket *l2b; | | 1829 | struct l2_bucket *l2b; |
1830 | pt_entry_t *ptep, pte, opte; | | 1830 | pt_entry_t *ptep, pte, opte; |
1831 | const u_int | | 1831 | const u_int |
1832 | rw_mappings = md->urw_mappings + md->krw_mappings, | | 1832 | rw_mappings = md->urw_mappings + md->krw_mappings, |
1833 | ro_mappings = md->uro_mappings + md->kro_mappings; | | 1833 | ro_mappings = md->uro_mappings + md->kro_mappings; |
1834 | | | 1834 | |
1835 | /* do we need to do anything? */ | | 1835 | /* do we need to do anything? */ |
1836 | if (arm_cache_prefer_mask == 0) | | 1836 | if (arm_cache_prefer_mask == 0) |
1837 | return; | | 1837 | return; |
1838 | | | 1838 | |
1839 | NPDEBUG(PDB_VAC, printf("pmap_vac_me_harder: md=%p, pmap=%p va=%08lx\n", | | 1839 | NPDEBUG(PDB_VAC, printf("pmap_vac_me_harder: md=%p, pmap=%p va=%08lx\n", |
1840 | md, pm, va)); | | 1840 | md, pm, va)); |
1841 | | | 1841 | |
1842 | KASSERT(!va || pm); | | 1842 | KASSERT(!va || pm); |
1843 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 1843 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
1844 | | | 1844 | |
1845 | /* Already a conflict? */ | | 1845 | /* Already a conflict? */ |
1846 | if (__predict_false(md->pvh_attrs & PVF_NC)) { | | 1846 | if (__predict_false(md->pvh_attrs & PVF_NC)) { |
1847 | /* just an add, things are already non-cached */ | | 1847 | /* just an add, things are already non-cached */ |
1848 | KASSERT(!(md->pvh_attrs & PVF_DIRTY)); | | 1848 | KASSERT(!(md->pvh_attrs & PVF_DIRTY)); |
1849 | KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); | | 1849 | KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); |
1850 | bad_alias = false; | | 1850 | bad_alias = false; |
1851 | if (va) { | | 1851 | if (va) { |
1852 | PMAPCOUNT(vac_color_none); | | 1852 | PMAPCOUNT(vac_color_none); |
1853 | bad_alias = true; | | 1853 | bad_alias = true; |
1854 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); | | 1854 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); |
1855 | goto fixup; | | 1855 | goto fixup; |
1856 | } | | 1856 | } |
1857 | pv = SLIST_FIRST(&md->pvh_list); | | 1857 | pv = SLIST_FIRST(&md->pvh_list); |
1858 | /* the list can't be empty because it would be cachable */ | | 1858 | /* the list can't be empty because it would be cachable */ |
1859 | if (md->pvh_attrs & PVF_KMPAGE) { | | 1859 | if (md->pvh_attrs & PVF_KMPAGE) { |
1860 | tst_mask = md->pvh_attrs; | | 1860 | tst_mask = md->pvh_attrs; |
1861 | } else { | | 1861 | } else { |
1862 | KASSERT(pv); | | 1862 | KASSERT(pv); |
1863 | tst_mask = pv->pv_va; | | 1863 | tst_mask = pv->pv_va; |
1864 | pv = SLIST_NEXT(pv, pv_link); | | 1864 | pv = SLIST_NEXT(pv, pv_link); |
1865 | } | | 1865 | } |
1866 | /* | | 1866 | /* |
1867 | * Only check for a bad alias if we have writable mappings. | | 1867 | * Only check for a bad alias if we have writable mappings. |
1868 | */ | | 1868 | */ |
1869 | tst_mask &= arm_cache_prefer_mask; | | 1869 | tst_mask &= arm_cache_prefer_mask; |
1870 | if (rw_mappings > 0 && arm_cache_prefer_mask) { | | 1870 | if (rw_mappings > 0 && arm_cache_prefer_mask) { |
1871 | for (; pv && !bad_alias; pv = SLIST_NEXT(pv, pv_link)) { | | 1871 | for (; pv && !bad_alias; pv = SLIST_NEXT(pv, pv_link)) { |
1872 | /* if there's a bad alias, stop checking. */ | | 1872 | /* if there's a bad alias, stop checking. */ |
1873 | if (tst_mask != (pv->pv_va & arm_cache_prefer_mask)) | | 1873 | if (tst_mask != (pv->pv_va & arm_cache_prefer_mask)) |
1874 | bad_alias = true; | | 1874 | bad_alias = true; |
1875 | } | | 1875 | } |
1876 | md->pvh_attrs |= PVF_WRITE; | | 1876 | md->pvh_attrs |= PVF_WRITE; |
1877 | if (!bad_alias) | | 1877 | if (!bad_alias) |
1878 | md->pvh_attrs |= PVF_DIRTY; | | 1878 | md->pvh_attrs |= PVF_DIRTY; |
1879 | } else { | | 1879 | } else { |
1880 | /* | | 1880 | /* |
1881 | * We have only read-only mappings. Let's see if there | | 1881 | * We have only read-only mappings. Let's see if there |
1882 | * are multiple colors in use or if we mapped a KMPAGE. | | 1882 | * are multiple colors in use or if we mapped a KMPAGE. |
1883 | * If the latter, we have a bad alias. If the former, | | 1883 | * If the latter, we have a bad alias. If the former, |
1884 | * we need to remember that. | | 1884 | * we need to remember that. |
1885 | */ | | 1885 | */ |
1886 | for (; pv; pv = SLIST_NEXT(pv, pv_link)) { | | 1886 | for (; pv; pv = SLIST_NEXT(pv, pv_link)) { |
1887 | if (tst_mask != (pv->pv_va & arm_cache_prefer_mask)) { | | 1887 | if (tst_mask != (pv->pv_va & arm_cache_prefer_mask)) { |
1888 | if (md->pvh_attrs & PVF_KMPAGE) | | 1888 | if (md->pvh_attrs & PVF_KMPAGE) |
1889 | bad_alias = true; | | 1889 | bad_alias = true; |
1890 | break; | | 1890 | break; |
1891 | } | | 1891 | } |
1892 | } | | 1892 | } |
1893 | md->pvh_attrs &= ~PVF_WRITE; | | 1893 | md->pvh_attrs &= ~PVF_WRITE; |
1894 | /* | | 1894 | /* |
1895 | * No KMPAGE and we exited early, so we must have | | 1895 | * No KMPAGE and we exited early, so we must have |
1896 | * multiple color mappings. | | 1896 | * multiple color mappings. |
1897 | */ | | 1897 | */ |
1898 | if (!bad_alias && pv != NULL) | | 1898 | if (!bad_alias && pv != NULL) |
1899 | md->pvh_attrs |= PVF_MULTCLR; | | 1899 | md->pvh_attrs |= PVF_MULTCLR; |
1900 | } | | 1900 | } |
1901 | | | 1901 | |
1902 | /* If no conflicting colors, set everything back to cached */ | | 1902 | /* If no conflicting colors, set everything back to cached */ |
1903 | if (!bad_alias) { | | 1903 | if (!bad_alias) { |
1904 | #ifdef DEBUG | | 1904 | #ifdef DEBUG |
1905 | if ((md->pvh_attrs & PVF_WRITE) | | 1905 | if ((md->pvh_attrs & PVF_WRITE) |
1906 | || ro_mappings < 2) { | | 1906 | || ro_mappings < 2) { |
1907 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) | | 1907 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) |
1908 | KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0); | | 1908 | KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0); |
1909 | } | | 1909 | } |
1910 | #endif | | 1910 | #endif |
1911 | md->pvh_attrs &= (PAGE_SIZE - 1) & ~PVF_NC; | | 1911 | md->pvh_attrs &= (PAGE_SIZE - 1) & ~PVF_NC; |
1912 | md->pvh_attrs |= tst_mask | PVF_COLORED; | | 1912 | md->pvh_attrs |= tst_mask | PVF_COLORED; |
1913 | /* | | 1913 | /* |
1914 | * Restore DIRTY bit if page is modified | | 1914 | * Restore DIRTY bit if page is modified |
1915 | */ | | 1915 | */ |
1916 | if (md->pvh_attrs & PVF_DMOD) | | 1916 | if (md->pvh_attrs & PVF_DMOD) |
1917 | md->pvh_attrs |= PVF_DIRTY; | | 1917 | md->pvh_attrs |= PVF_DIRTY; |
1918 | PMAPCOUNT(vac_color_restore); | | 1918 | PMAPCOUNT(vac_color_restore); |
1919 | } else { | | 1919 | } else { |
1920 | KASSERT(SLIST_FIRST(&md->pvh_list) != NULL); | | 1920 | KASSERT(SLIST_FIRST(&md->pvh_list) != NULL); |
1921 | KASSERT(SLIST_NEXT(SLIST_FIRST(&md->pvh_list), pv_link) != NULL); | | 1921 | KASSERT(SLIST_NEXT(SLIST_FIRST(&md->pvh_list), pv_link) != NULL); |
1922 | } | | 1922 | } |
1923 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 1923 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
1924 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); | | 1924 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); |
1925 | } else if (!va) { | | 1925 | } else if (!va) { |
1926 | KASSERT(arm_cache_prefer_mask == 0 || pmap_is_page_colored_p(md)); | | 1926 | KASSERT(arm_cache_prefer_mask == 0 || pmap_is_page_colored_p(md)); |
1927 | KASSERT(!(md->pvh_attrs & PVF_WRITE) | | 1927 | KASSERT(!(md->pvh_attrs & PVF_WRITE) |
1928 | || (md->pvh_attrs & PVF_DIRTY)); | | 1928 | || (md->pvh_attrs & PVF_DIRTY)); |
1929 | if (rw_mappings == 0) { | | 1929 | if (rw_mappings == 0) { |
1930 | md->pvh_attrs &= ~PVF_WRITE; | | 1930 | md->pvh_attrs &= ~PVF_WRITE; |
1931 | if (ro_mappings == 1 | | 1931 | if (ro_mappings == 1 |
1932 | && (md->pvh_attrs & PVF_MULTCLR)) { | | 1932 | && (md->pvh_attrs & PVF_MULTCLR)) { |
1933 | /* | | 1933 | /* |
1934 | * If this is the last readonly mapping | | 1934 | * If this is the last readonly mapping |
1935 | * but it doesn't match the current color | | 1935 | * but it doesn't match the current color |
1936 | * for the page, change the current color | | 1936 | * for the page, change the current color |
1937 | * to match this last readonly mapping. | | 1937 | * to match this last readonly mapping. |
1938 | */ | | 1938 | */ |
1939 | pv = SLIST_FIRST(&md->pvh_list); | | 1939 | pv = SLIST_FIRST(&md->pvh_list); |
1940 | tst_mask = (md->pvh_attrs ^ pv->pv_va) | | 1940 | tst_mask = (md->pvh_attrs ^ pv->pv_va) |
1941 | & arm_cache_prefer_mask; | | 1941 | & arm_cache_prefer_mask; |
1942 | if (tst_mask) { | | 1942 | if (tst_mask) { |
1943 | md->pvh_attrs ^= tst_mask; | | 1943 | md->pvh_attrs ^= tst_mask; |
1944 | PMAPCOUNT(vac_color_change); | | 1944 | PMAPCOUNT(vac_color_change); |
1945 | } | | 1945 | } |
1946 | } | | 1946 | } |
1947 | } | | 1947 | } |
1948 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 1948 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
1949 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); | | 1949 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); |
1950 | return; | | 1950 | return; |
1951 | } else if (!pmap_is_page_colored_p(md)) { | | 1951 | } else if (!pmap_is_page_colored_p(md)) { |
1952 | /* not colored so we just use its color */ | | 1952 | /* not colored so we just use its color */ |
1953 | KASSERT(md->pvh_attrs & (PVF_WRITE|PVF_DIRTY)); | | 1953 | KASSERT(md->pvh_attrs & (PVF_WRITE|PVF_DIRTY)); |
1954 | KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); | | 1954 | KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); |
1955 | PMAPCOUNT(vac_color_new); | | 1955 | PMAPCOUNT(vac_color_new); |
1956 | md->pvh_attrs &= PAGE_SIZE - 1; | | 1956 | md->pvh_attrs &= PAGE_SIZE - 1; |
1957 | md->pvh_attrs |= PVF_COLORED | | 1957 | md->pvh_attrs |= PVF_COLORED |
1958 | | (va & arm_cache_prefer_mask) | | 1958 | | (va & arm_cache_prefer_mask) |
1959 | | (rw_mappings > 0 ? PVF_WRITE : 0); | | 1959 | | (rw_mappings > 0 ? PVF_WRITE : 0); |
1960 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 1960 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
1961 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); | | 1961 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); |
1962 | return; | | 1962 | return; |
1963 | } else if (((md->pvh_attrs ^ va) & arm_cache_prefer_mask) == 0) { | | 1963 | } else if (((md->pvh_attrs ^ va) & arm_cache_prefer_mask) == 0) { |
1964 | bad_alias = false; | | 1964 | bad_alias = false; |
1965 | if (rw_mappings > 0) { | | 1965 | if (rw_mappings > 0) { |
1966 | /* | | 1966 | /* |
1967 | * We now have writeable mappings and if we have | | 1967 | * We now have writeable mappings and if we have |
1968 | * readonly mappings in more than once color, we have | | 1968 | * readonly mappings in more than once color, we have |
1969 | * an aliasing problem. Regardless mark the page as | | 1969 | * an aliasing problem. Regardless mark the page as |
1970 | * writeable. | | 1970 | * writeable. |
1971 | */ | | 1971 | */ |
1972 | if (md->pvh_attrs & PVF_MULTCLR) { | | 1972 | if (md->pvh_attrs & PVF_MULTCLR) { |
1973 | if (ro_mappings < 2) { | | 1973 | if (ro_mappings < 2) { |
1974 | /* | | 1974 | /* |
1975 | * If we only have less than two | | 1975 | * If we only have less than two |
1976 | * read-only mappings, just flush the | | 1976 | * read-only mappings, just flush the |
1977 | * non-primary colors from the cache. | | 1977 | * non-primary colors from the cache. |
1978 | */ | | 1978 | */ |
1979 | pmap_flush_page(md, pa, | | 1979 | pmap_flush_page(md, pa, |
1980 | PMAP_FLUSH_SECONDARY); | | 1980 | PMAP_FLUSH_SECONDARY); |
1981 | } else { | | 1981 | } else { |
1982 | bad_alias = true; | | 1982 | bad_alias = true; |
1983 | } | | 1983 | } |
1984 | } | | 1984 | } |
1985 | md->pvh_attrs |= PVF_WRITE; | | 1985 | md->pvh_attrs |= PVF_WRITE; |
1986 | } | | 1986 | } |
1987 | /* If no conflicting colors, set everything back to cached */ | | 1987 | /* If no conflicting colors, set everything back to cached */ |
1988 | if (!bad_alias) { | | 1988 | if (!bad_alias) { |
1989 | #ifdef DEBUG | | 1989 | #ifdef DEBUG |
1990 | if (rw_mappings > 0 | | 1990 | if (rw_mappings > 0 |
1991 | || (md->pvh_attrs & PMAP_KMPAGE)) { | | 1991 | || (md->pvh_attrs & PMAP_KMPAGE)) { |
1992 | tst_mask = md->pvh_attrs & arm_cache_prefer_mask; | | 1992 | tst_mask = md->pvh_attrs & arm_cache_prefer_mask; |
1993 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) | | 1993 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) |
1994 | KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0); | | 1994 | KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0); |
1995 | } | | 1995 | } |
1996 | #endif | | 1996 | #endif |
1997 | if (SLIST_EMPTY(&md->pvh_list)) | | 1997 | if (SLIST_EMPTY(&md->pvh_list)) |
1998 | PMAPCOUNT(vac_color_reuse); | | 1998 | PMAPCOUNT(vac_color_reuse); |
1999 | else | | 1999 | else |
2000 | PMAPCOUNT(vac_color_ok); | | 2000 | PMAPCOUNT(vac_color_ok); |
2001 | | | 2001 | |
2002 | /* matching color, just return */ | | 2002 | /* matching color, just return */ |
2003 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 2003 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
2004 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); | | 2004 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); |
2005 | return; | | 2005 | return; |
2006 | } | | 2006 | } |
2007 | KASSERT(SLIST_FIRST(&md->pvh_list) != NULL); | | 2007 | KASSERT(SLIST_FIRST(&md->pvh_list) != NULL); |
2008 | KASSERT(SLIST_NEXT(SLIST_FIRST(&md->pvh_list), pv_link) != NULL); | | 2008 | KASSERT(SLIST_NEXT(SLIST_FIRST(&md->pvh_list), pv_link) != NULL); |
2009 | | | 2009 | |
2010 | /* color conflict. evict from cache. */ | | 2010 | /* color conflict. evict from cache. */ |
2011 | | | 2011 | |
2012 | pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY); | | 2012 | pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY); |
2013 | md->pvh_attrs &= ~PVF_COLORED; | | 2013 | md->pvh_attrs &= ~PVF_COLORED; |
2014 | md->pvh_attrs |= PVF_NC; | | 2014 | md->pvh_attrs |= PVF_NC; |
2015 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 2015 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
2016 | KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); | | 2016 | KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); |
2017 | PMAPCOUNT(vac_color_erase); | | 2017 | PMAPCOUNT(vac_color_erase); |
2018 | } else if (rw_mappings == 0 | | 2018 | } else if (rw_mappings == 0 |
2019 | && (md->pvh_attrs & PVF_KMPAGE) == 0) { | | 2019 | && (md->pvh_attrs & PVF_KMPAGE) == 0) { |
2020 | KASSERT((md->pvh_attrs & PVF_WRITE) == 0); | | 2020 | KASSERT((md->pvh_attrs & PVF_WRITE) == 0); |
2021 | | | 2021 | |
2022 | /* | | 2022 | /* |
2023 | * If the page has dirty cache lines, clean it. | | 2023 | * If the page has dirty cache lines, clean it. |
2024 | */ | | 2024 | */ |
2025 | if (md->pvh_attrs & PVF_DIRTY) | | 2025 | if (md->pvh_attrs & PVF_DIRTY) |
2026 | pmap_flush_page(md, pa, PMAP_CLEAN_PRIMARY); | | 2026 | pmap_flush_page(md, pa, PMAP_CLEAN_PRIMARY); |
2027 | | | 2027 | |
2028 | /* | | 2028 | /* |
2029 | * If this is the first remapping (we know that there are no | | 2029 | * If this is the first remapping (we know that there are no |
2030 | * writeable mappings), then this is a simple color change. | | 2030 | * writeable mappings), then this is a simple color change. |
2031 | * Otherwise this is a seconary r/o mapping, which means | | 2031 | * Otherwise this is a seconary r/o mapping, which means |
2032 | * we don't have to do anything. | | 2032 | * we don't have to do anything. |
2033 | */ | | 2033 | */ |
2034 | if (ro_mappings == 1) { | | 2034 | if (ro_mappings == 1) { |
2035 | KASSERT(((md->pvh_attrs ^ va) & arm_cache_prefer_mask) != 0); | | 2035 | KASSERT(((md->pvh_attrs ^ va) & arm_cache_prefer_mask) != 0); |
2036 | md->pvh_attrs &= PAGE_SIZE - 1; | | 2036 | md->pvh_attrs &= PAGE_SIZE - 1; |
2037 | md->pvh_attrs |= (va & arm_cache_prefer_mask); | | 2037 | md->pvh_attrs |= (va & arm_cache_prefer_mask); |
2038 | PMAPCOUNT(vac_color_change); | | 2038 | PMAPCOUNT(vac_color_change); |
2039 | } else { | | 2039 | } else { |
2040 | PMAPCOUNT(vac_color_blind); | | 2040 | PMAPCOUNT(vac_color_blind); |
2041 | } | | 2041 | } |
2042 | md->pvh_attrs |= PVF_MULTCLR; | | 2042 | md->pvh_attrs |= PVF_MULTCLR; |
2043 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 2043 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
2044 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); | | 2044 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); |
2045 | return; | | 2045 | return; |
2046 | } else { | | 2046 | } else { |
2047 | if (rw_mappings > 0) | | 2047 | if (rw_mappings > 0) |
2048 | md->pvh_attrs |= PVF_WRITE; | | 2048 | md->pvh_attrs |= PVF_WRITE; |
2049 | | | 2049 | |
2050 | /* color conflict. evict from cache. */ | | 2050 | /* color conflict. evict from cache. */ |
2051 | pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY); | | 2051 | pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY); |
2052 | | | 2052 | |
2053 | /* the list can't be empty because this was a enter/modify */ | | 2053 | /* the list can't be empty because this was a enter/modify */ |
2054 | pv = SLIST_FIRST(&md->pvh_list); | | 2054 | pv = SLIST_FIRST(&md->pvh_list); |
2055 | if ((md->pvh_attrs & PVF_KMPAGE) == 0) { | | 2055 | if ((md->pvh_attrs & PVF_KMPAGE) == 0) { |
2056 | KASSERT(pv); | | 2056 | KASSERT(pv); |
2057 | /* | | 2057 | /* |
2058 | * If there's only one mapped page, change color to the | | 2058 | * If there's only one mapped page, change color to the |
2059 | * page's new color and return. Restore the DIRTY bit | | 2059 | * page's new color and return. Restore the DIRTY bit |
2060 | * that was erased by pmap_flush_page. | | 2060 | * that was erased by pmap_flush_page. |
2061 | */ | | 2061 | */ |
2062 | if (SLIST_NEXT(pv, pv_link) == NULL) { | | 2062 | if (SLIST_NEXT(pv, pv_link) == NULL) { |
2063 | md->pvh_attrs &= PAGE_SIZE - 1; | | 2063 | md->pvh_attrs &= PAGE_SIZE - 1; |
2064 | md->pvh_attrs |= (va & arm_cache_prefer_mask); | | 2064 | md->pvh_attrs |= (va & arm_cache_prefer_mask); |
2065 | if (md->pvh_attrs & PVF_DMOD) | | 2065 | if (md->pvh_attrs & PVF_DMOD) |
2066 | md->pvh_attrs |= PVF_DIRTY; | | 2066 | md->pvh_attrs |= PVF_DIRTY; |
2067 | PMAPCOUNT(vac_color_change); | | 2067 | PMAPCOUNT(vac_color_change); |
2068 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 2068 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
2069 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); | | 2069 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); |
2070 | KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); | | 2070 | KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); |
2071 | return; | | 2071 | return; |
2072 | } | | 2072 | } |
2073 | } | | 2073 | } |
2074 | bad_alias = true; | | 2074 | bad_alias = true; |
2075 | md->pvh_attrs &= ~PVF_COLORED; | | 2075 | md->pvh_attrs &= ~PVF_COLORED; |
2076 | md->pvh_attrs |= PVF_NC; | | 2076 | md->pvh_attrs |= PVF_NC; |
2077 | PMAPCOUNT(vac_color_erase); | | 2077 | PMAPCOUNT(vac_color_erase); |
2078 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 2078 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
2079 | } | | 2079 | } |
2080 | | | 2080 | |
2081 | fixup: | | 2081 | fixup: |
2082 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); | | 2082 | KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); |
2083 | | | 2083 | |
2084 | /* | | 2084 | /* |
2085 | * Turn cacheing on/off for all pages. | | 2085 | * Turn cacheing on/off for all pages. |
2086 | */ | | 2086 | */ |
2087 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { | | 2087 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { |
2088 | l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); | | 2088 | l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); |
2089 | KDASSERT(l2b != NULL); | | 2089 | KDASSERT(l2b != NULL); |
2090 | ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; | | 2090 | ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; |
2091 | opte = *ptep; | | 2091 | opte = *ptep; |
2092 | pte = opte & ~L2_S_CACHE_MASK; | | 2092 | pte = opte & ~L2_S_CACHE_MASK; |
2093 | if (bad_alias) { | | 2093 | if (bad_alias) { |
2094 | pv->pv_flags |= PVF_NC; | | 2094 | pv->pv_flags |= PVF_NC; |
2095 | } else { | | 2095 | } else { |
2096 | pv->pv_flags &= ~PVF_NC; | | 2096 | pv->pv_flags &= ~PVF_NC; |
2097 | pte |= pte_l2_s_cache_mode; | | 2097 | pte |= pte_l2_s_cache_mode; |
2098 | } | | 2098 | } |
2099 | | | 2099 | |
2100 | if (opte == pte) /* only update is there's a change */ | | 2100 | if (opte == pte) /* only update is there's a change */ |
2101 | continue; | | 2101 | continue; |
2102 | | | 2102 | |
2103 | if (l2pte_valid(pte)) { | | 2103 | if (l2pte_valid(pte)) { |
2104 | if (PV_BEEN_EXECD(pv->pv_flags)) { | | 2104 | if (PV_BEEN_EXECD(pv->pv_flags)) { |
2105 | pmap_tlb_flushID_SE(pv->pv_pmap, pv->pv_va); | | 2105 | pmap_tlb_flushID_SE(pv->pv_pmap, pv->pv_va); |
2106 | } else if (PV_BEEN_REFD(pv->pv_flags)) { | | 2106 | } else if (PV_BEEN_REFD(pv->pv_flags)) { |
2107 | pmap_tlb_flushD_SE(pv->pv_pmap, pv->pv_va); | | 2107 | pmap_tlb_flushD_SE(pv->pv_pmap, pv->pv_va); |
2108 | } | | 2108 | } |
2109 | } | | 2109 | } |
2110 | | | 2110 | |
2111 | *ptep = pte; | | 2111 | *ptep = pte; |
2112 | PTE_SYNC_CURRENT(pv->pv_pmap, ptep); | | 2112 | PTE_SYNC_CURRENT(pv->pv_pmap, ptep); |
2113 | } | | 2113 | } |
2114 | } | | 2114 | } |
2115 | #endif /* PMAP_CACHE_VIPT */ | | 2115 | #endif /* PMAP_CACHE_VIPT */ |
2116 | | | 2116 | |
2117 | | | 2117 | |
2118 | /* | | 2118 | /* |
2119 | * Modify pte bits for all ptes corresponding to the given physical address. | | 2119 | * Modify pte bits for all ptes corresponding to the given physical address. |
2120 | * We use `maskbits' rather than `clearbits' because we're always passing | | 2120 | * We use `maskbits' rather than `clearbits' because we're always passing |
2121 | * constants and the latter would require an extra inversion at run-time. | | 2121 | * constants and the latter would require an extra inversion at run-time. |
2122 | */ | | 2122 | */ |
2123 | static void | | 2123 | static void |
2124 | pmap_clearbit(struct vm_page *pg, u_int maskbits) | | 2124 | pmap_clearbit(struct vm_page_md *md, paddr_t pa, u_int maskbits) |
2125 | { | | 2125 | { |
2126 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | | |
2127 | struct l2_bucket *l2b; | | 2126 | struct l2_bucket *l2b; |
2128 | struct pv_entry *pv; | | 2127 | struct pv_entry *pv; |
2129 | pt_entry_t *ptep, npte, opte; | | 2128 | pt_entry_t *ptep, npte, opte; |
2130 | pmap_t pm; | | 2129 | pmap_t pm; |
2131 | vaddr_t va; | | 2130 | vaddr_t va; |
2132 | u_int oflags; | | 2131 | u_int oflags; |
2133 | #ifdef PMAP_CACHE_VIPT | | 2132 | #ifdef PMAP_CACHE_VIPT |
2134 | const bool want_syncicache = PV_IS_EXEC_P(md->pvh_attrs); | | 2133 | const bool want_syncicache = PV_IS_EXEC_P(md->pvh_attrs); |
2135 | bool need_syncicache = false; | | 2134 | bool need_syncicache = false; |
2136 | bool did_syncicache = false; | | 2135 | bool did_syncicache = false; |
2137 | bool need_vac_me_harder = false; | | 2136 | bool need_vac_me_harder = false; |
2138 | #endif | | 2137 | #endif |
2139 | | | 2138 | |
2140 | NPDEBUG(PDB_BITS, | | 2139 | NPDEBUG(PDB_BITS, |
2141 | printf("pmap_clearbit: pg %p (0x%08lx) mask 0x%x\n", | | 2140 | printf("pmap_clearbit: md %p mask 0x%x\n", |
2142 | pg, VM_PAGE_TO_PHYS(pg), maskbits)); | | 2141 | md, maskbits)); |
2143 | | | 2142 | |
2144 | PMAP_HEAD_TO_MAP_LOCK(); | | 2143 | PMAP_HEAD_TO_MAP_LOCK(); |
2145 | simple_lock(&md->pvh_slock); | | 2144 | simple_lock(&md->pvh_slock); |
2146 | | | 2145 | |
2147 | #ifdef PMAP_CACHE_VIPT | | 2146 | #ifdef PMAP_CACHE_VIPT |
2148 | /* | | 2147 | /* |
2149 | * If we might want to sync the I-cache and we've modified it, | | 2148 | * If we might want to sync the I-cache and we've modified it, |
2150 | * then we know we definitely need to sync or discard it. | | 2149 | * then we know we definitely need to sync or discard it. |
2151 | */ | | 2150 | */ |
2152 | if (want_syncicache) | | 2151 | if (want_syncicache) |
2153 | need_syncicache = md->pvh_attrs & PVF_MOD; | | 2152 | need_syncicache = md->pvh_attrs & PVF_MOD; |
2154 | #endif | | 2153 | #endif |
2155 | /* | | 2154 | /* |
2156 | * Clear saved attributes (modify, reference) | | 2155 | * Clear saved attributes (modify, reference) |
2157 | */ | | 2156 | */ |
2158 | md->pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF)); | | 2157 | md->pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF)); |
2159 | | | 2158 | |
2160 | if (SLIST_EMPTY(&md->pvh_list)) { | | 2159 | if (SLIST_EMPTY(&md->pvh_list)) { |
2161 | #ifdef PMAP_CACHE_VIPT | | 2160 | #ifdef PMAP_CACHE_VIPT |
2162 | if (need_syncicache) { | | 2161 | if (need_syncicache) { |
2163 | /* | | 2162 | /* |
2164 | * No one has it mapped, so just discard it. The next | | 2163 | * No one has it mapped, so just discard it. The next |
2165 | * exec remapping will cause it to be synced. | | 2164 | * exec remapping will cause it to be synced. |
2166 | */ | | 2165 | */ |
2167 | md->pvh_attrs &= ~PVF_EXEC; | | 2166 | md->pvh_attrs &= ~PVF_EXEC; |
2168 | PMAPCOUNT(exec_discarded_clearbit); | | 2167 | PMAPCOUNT(exec_discarded_clearbit); |
2169 | } | | 2168 | } |
2170 | #endif | | 2169 | #endif |
2171 | simple_unlock(&md->pvh_slock); | | 2170 | simple_unlock(&md->pvh_slock); |
2172 | PMAP_HEAD_TO_MAP_UNLOCK(); | | 2171 | PMAP_HEAD_TO_MAP_UNLOCK(); |
2173 | return; | | 2172 | return; |
2174 | } | | 2173 | } |
2175 | | | 2174 | |
2176 | /* | | 2175 | /* |
2177 | * Loop over all current mappings setting/clearing as appropos | | 2176 | * Loop over all current mappings setting/clearing as appropos |
2178 | */ | | 2177 | */ |
2179 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { | | 2178 | SLIST_FOREACH(pv, &md->pvh_list, pv_link) { |
2180 | va = pv->pv_va; | | 2179 | va = pv->pv_va; |
2181 | pm = pv->pv_pmap; | | 2180 | pm = pv->pv_pmap; |
2182 | oflags = pv->pv_flags; | | 2181 | oflags = pv->pv_flags; |
2183 | /* | | 2182 | /* |
2184 | * Kernel entries are unmanaged and as such not to be changed. | | 2183 | * Kernel entries are unmanaged and as such not to be changed. |
2185 | */ | | 2184 | */ |
2186 | if (oflags & PVF_KENTRY) | | 2185 | if (oflags & PVF_KENTRY) |
2187 | continue; | | 2186 | continue; |
2188 | pv->pv_flags &= ~maskbits; | | 2187 | pv->pv_flags &= ~maskbits; |
2189 | | | 2188 | |
2190 | pmap_acquire_pmap_lock(pm); | | 2189 | pmap_acquire_pmap_lock(pm); |
2191 | | | 2190 | |
2192 | l2b = pmap_get_l2_bucket(pm, va); | | 2191 | l2b = pmap_get_l2_bucket(pm, va); |
2193 | KDASSERT(l2b != NULL); | | 2192 | KDASSERT(l2b != NULL); |
2194 | | | 2193 | |
2195 | ptep = &l2b->l2b_kva[l2pte_index(va)]; | | 2194 | ptep = &l2b->l2b_kva[l2pte_index(va)]; |
2196 | npte = opte = *ptep; | | 2195 | npte = opte = *ptep; |
2197 | | | 2196 | |
2198 | NPDEBUG(PDB_BITS, | | 2197 | NPDEBUG(PDB_BITS, |
2199 | printf( | | 2198 | printf( |
2200 | "pmap_clearbit: pv %p, pm %p, va 0x%08lx, flag 0x%x\n", | | 2199 | "pmap_clearbit: pv %p, pm %p, va 0x%08lx, flag 0x%x\n", |
2201 | pv, pv->pv_pmap, pv->pv_va, oflags)); | | 2200 | pv, pv->pv_pmap, pv->pv_va, oflags)); |
2202 | | | 2201 | |
2203 | if (maskbits & (PVF_WRITE|PVF_MOD)) { | | 2202 | if (maskbits & (PVF_WRITE|PVF_MOD)) { |
2204 | #ifdef PMAP_CACHE_VIVT | | 2203 | #ifdef PMAP_CACHE_VIVT |
2205 | if ((pv->pv_flags & PVF_NC)) { | | 2204 | if ((pv->pv_flags & PVF_NC)) { |
2206 | /* | | 2205 | /* |
2207 | * Entry is not cacheable: | | 2206 | * Entry is not cacheable: |
2208 | * | | 2207 | * |
2209 | * Don't turn caching on again if this is a | | 2208 | * Don't turn caching on again if this is a |
2210 | * modified emulation. This would be | | 2209 | * modified emulation. This would be |
2211 | * inconsitent with the settings created by | | 2210 | * inconsitent with the settings created by |
2212 | * pmap_vac_me_harder(). Otherwise, it's safe | | 2211 | * pmap_vac_me_harder(). Otherwise, it's safe |
2213 | * to re-enable cacheing. | | 2212 | * to re-enable cacheing. |
2214 | * | | 2213 | * |
2215 | * There's no need to call pmap_vac_me_harder() | | 2214 | * There's no need to call pmap_vac_me_harder() |
2216 | * here: all pages are losing their write | | 2215 | * here: all pages are losing their write |
2217 | * permission. | | 2216 | * permission. |
2218 | */ | | 2217 | */ |
2219 | if (maskbits & PVF_WRITE) { | | 2218 | if (maskbits & PVF_WRITE) { |
2220 | npte |= pte_l2_s_cache_mode; | | 2219 | npte |= pte_l2_s_cache_mode; |
2221 | pv->pv_flags &= ~PVF_NC; | | 2220 | pv->pv_flags &= ~PVF_NC; |
2222 | } | | 2221 | } |
2223 | } else | | 2222 | } else |
2224 | if (l2pte_writable_p(opte)) { | | 2223 | if (l2pte_writable_p(opte)) { |
2225 | /* | | 2224 | /* |
2226 | * Entry is writable/cacheable: check if pmap | | 2225 | * Entry is writable/cacheable: check if pmap |
2227 | * is current if it is flush it, otherwise it | | 2226 | * is current if it is flush it, otherwise it |
2228 | * won't be in the cache | | 2227 | * won't be in the cache |
2229 | */ | | 2228 | */ |
2230 | if (PV_BEEN_EXECD(oflags)) | | 2229 | if (PV_BEEN_EXECD(oflags)) |
2231 | pmap_idcache_wbinv_range(pm, pv->pv_va, | | 2230 | pmap_idcache_wbinv_range(pm, pv->pv_va, |
2232 | PAGE_SIZE); | | 2231 | PAGE_SIZE); |
2233 | else | | 2232 | else |
2234 | if (PV_BEEN_REFD(oflags)) | | 2233 | if (PV_BEEN_REFD(oflags)) |
2235 | pmap_dcache_wb_range(pm, pv->pv_va, | | 2234 | pmap_dcache_wb_range(pm, pv->pv_va, |
2236 | PAGE_SIZE, | | 2235 | PAGE_SIZE, |
2237 | (maskbits & PVF_REF) != 0, false); | | 2236 | (maskbits & PVF_REF) != 0, false); |
2238 | } | | 2237 | } |
2239 | #endif | | 2238 | #endif |
2240 | | | 2239 | |
2241 | /* make the pte read only */ | | 2240 | /* make the pte read only */ |
2242 | npte = l2pte_set_readonly(npte); | | 2241 | npte = l2pte_set_readonly(npte); |
2243 | | | 2242 | |
2244 | if (maskbits & oflags & PVF_WRITE) { | | 2243 | if (maskbits & oflags & PVF_WRITE) { |
2245 | /* | | 2244 | /* |
2246 | * Keep alias accounting up to date | | 2245 | * Keep alias accounting up to date |
2247 | */ | | 2246 | */ |
2248 | if (pv->pv_pmap == pmap_kernel()) { | | 2247 | if (pv->pv_pmap == pmap_kernel()) { |
2249 | md->krw_mappings--; | | 2248 | md->krw_mappings--; |
2250 | md->kro_mappings++; | | 2249 | md->kro_mappings++; |
2251 | } else { | | 2250 | } else { |
2252 | md->urw_mappings--; | | 2251 | md->urw_mappings--; |
2253 | md->uro_mappings++; | | 2252 | md->uro_mappings++; |
2254 | } | | 2253 | } |
2255 | #ifdef PMAP_CACHE_VIPT | | 2254 | #ifdef PMAP_CACHE_VIPT |
2256 | if (md->urw_mappings + md->krw_mappings == 0) | | 2255 | if (md->urw_mappings + md->krw_mappings == 0) |
2257 | md->pvh_attrs &= ~PVF_WRITE; | | 2256 | md->pvh_attrs &= ~PVF_WRITE; |
2258 | if (want_syncicache) | | 2257 | if (want_syncicache) |
2259 | need_syncicache = true; | | 2258 | need_syncicache = true; |
2260 | need_vac_me_harder = true; | | 2259 | need_vac_me_harder = true; |
2261 | #endif | | 2260 | #endif |
2262 | } | | 2261 | } |
2263 | } | | 2262 | } |
2264 | | | 2263 | |
2265 | if (maskbits & PVF_REF) { | | 2264 | if (maskbits & PVF_REF) { |
2266 | if ((pv->pv_flags & PVF_NC) == 0 && | | 2265 | if ((pv->pv_flags & PVF_NC) == 0 && |
2267 | (maskbits & (PVF_WRITE|PVF_MOD)) == 0 && | | 2266 | (maskbits & (PVF_WRITE|PVF_MOD)) == 0 && |
2268 | l2pte_valid(npte)) { | | 2267 | l2pte_valid(npte)) { |
2269 | #ifdef PMAP_CACHE_VIVT | | 2268 | #ifdef PMAP_CACHE_VIVT |
2270 | /* | | 2269 | /* |
2271 | * Check npte here; we may have already | | 2270 | * Check npte here; we may have already |
2272 | * done the wbinv above, and the validity | | 2271 | * done the wbinv above, and the validity |
2273 | * of the PTE is the same for opte and | | 2272 | * of the PTE is the same for opte and |
2274 | * npte. | | 2273 | * npte. |
2275 | */ | | 2274 | */ |
2276 | /* XXXJRT need idcache_inv_range */ | | 2275 | /* XXXJRT need idcache_inv_range */ |
2277 | if (PV_BEEN_EXECD(oflags)) | | 2276 | if (PV_BEEN_EXECD(oflags)) |
2278 | pmap_idcache_wbinv_range(pm, | | 2277 | pmap_idcache_wbinv_range(pm, |
2279 | pv->pv_va, PAGE_SIZE); | | 2278 | pv->pv_va, PAGE_SIZE); |
2280 | else | | 2279 | else |
2281 | if (PV_BEEN_REFD(oflags)) | | 2280 | if (PV_BEEN_REFD(oflags)) |
2282 | pmap_dcache_wb_range(pm, | | 2281 | pmap_dcache_wb_range(pm, |
2283 | pv->pv_va, PAGE_SIZE, | | 2282 | pv->pv_va, PAGE_SIZE, |
2284 | true, true); | | 2283 | true, true); |
2285 | #endif | | 2284 | #endif |
2286 | } | | 2285 | } |
2287 | | | 2286 | |
2288 | /* | | 2287 | /* |
2289 | * Make the PTE invalid so that we will take a | | 2288 | * Make the PTE invalid so that we will take a |
2290 | * page fault the next time the mapping is | | 2289 | * page fault the next time the mapping is |
2291 | * referenced. | | 2290 | * referenced. |
2292 | */ | | 2291 | */ |
2293 | npte &= ~L2_TYPE_MASK; | | 2292 | npte &= ~L2_TYPE_MASK; |
2294 | npte |= L2_TYPE_INV; | | 2293 | npte |= L2_TYPE_INV; |
2295 | } | | 2294 | } |
2296 | | | 2295 | |
2297 | if (npte != opte) { | | 2296 | if (npte != opte) { |
2298 | *ptep = npte; | | 2297 | *ptep = npte; |
2299 | PTE_SYNC(ptep); | | 2298 | PTE_SYNC(ptep); |
2300 | /* Flush the TLB entry if a current pmap. */ | | 2299 | /* Flush the TLB entry if a current pmap. */ |
2301 | if (PV_BEEN_EXECD(oflags)) | | 2300 | if (PV_BEEN_EXECD(oflags)) |
2302 | pmap_tlb_flushID_SE(pm, pv->pv_va); | | 2301 | pmap_tlb_flushID_SE(pm, pv->pv_va); |
2303 | else | | 2302 | else |
2304 | if (PV_BEEN_REFD(oflags)) | | 2303 | if (PV_BEEN_REFD(oflags)) |
2305 | pmap_tlb_flushD_SE(pm, pv->pv_va); | | 2304 | pmap_tlb_flushD_SE(pm, pv->pv_va); |
2306 | } | | 2305 | } |
2307 | | | 2306 | |
2308 | pmap_release_pmap_lock(pm); | | 2307 | pmap_release_pmap_lock(pm); |
2309 | | | 2308 | |
2310 | NPDEBUG(PDB_BITS, | | 2309 | NPDEBUG(PDB_BITS, |
2311 | printf("pmap_clearbit: pm %p va 0x%lx opte 0x%08x npte 0x%08x\n", | | 2310 | printf("pmap_clearbit: pm %p va 0x%lx opte 0x%08x npte 0x%08x\n", |
2312 | pm, va, opte, npte)); | | 2311 | pm, va, opte, npte)); |
2313 | } | | 2312 | } |
2314 | | | 2313 | |
2315 | #ifdef PMAP_CACHE_VIPT | | 2314 | #ifdef PMAP_CACHE_VIPT |
2316 | /* | | 2315 | /* |
2317 | * If we need to sync the I-cache and we haven't done it yet, do it. | | 2316 | * If we need to sync the I-cache and we haven't done it yet, do it. |
2318 | */ | | 2317 | */ |
2319 | if (need_syncicache && !did_syncicache) { | | 2318 | if (need_syncicache && !did_syncicache) { |
2320 | pmap_syncicache_page(md, VM_PAGE_TO_PHYS(pg)); | | 2319 | pmap_syncicache_page(md, pa); |
2321 | PMAPCOUNT(exec_synced_clearbit); | | 2320 | PMAPCOUNT(exec_synced_clearbit); |
2322 | } | | 2321 | } |
2323 | /* | | 2322 | /* |
2324 | * If we are changing this to read-only, we need to call vac_me_harder | | 2323 | * If we are changing this to read-only, we need to call vac_me_harder |
2325 | * so we can change all the read-only pages to cacheable. We pretend | | 2324 | * so we can change all the read-only pages to cacheable. We pretend |
2326 | * this as a page deletion. | | 2325 | * this as a page deletion. |
2327 | */ | | 2326 | */ |
2328 | if (need_vac_me_harder) { | | 2327 | if (need_vac_me_harder) { |
2329 | if (md->pvh_attrs & PVF_NC) | | 2328 | if (md->pvh_attrs & PVF_NC) |
2330 | pmap_vac_me_harder(md, VM_PAGE_TO_PHYS(pg), NULL, 0); | | 2329 | pmap_vac_me_harder(md, pa, NULL, 0); |
2331 | } | | 2330 | } |
2332 | #endif | | 2331 | #endif |
2333 | | | 2332 | |
2334 | simple_unlock(&md->pvh_slock); | | 2333 | simple_unlock(&md->pvh_slock); |
2335 | PMAP_HEAD_TO_MAP_UNLOCK(); | | 2334 | PMAP_HEAD_TO_MAP_UNLOCK(); |
2336 | } | | 2335 | } |
2337 | | | 2336 | |
2338 | /* | | 2337 | /* |
2339 | * pmap_clean_page() | | 2338 | * pmap_clean_page() |
2340 | * | | 2339 | * |
2341 | * This is a local function used to work out the best strategy to clean | | 2340 | * This is a local function used to work out the best strategy to clean |
2342 | * a single page referenced by its entry in the PV table. It's used by | | 2341 | * a single page referenced by its entry in the PV table. It's used by |
2343 | * pmap_copy_page, pmap_zero page and maybe some others later on. | | 2342 | * pmap_copy_page, pmap_zero page and maybe some others later on. |
2344 | * | | 2343 | * |
2345 | * Its policy is effectively: | | 2344 | * Its policy is effectively: |
2346 | * o If there are no mappings, we don't bother doing anything with the cache. | | 2345 | * o If there are no mappings, we don't bother doing anything with the cache. |
2347 | * o If there is one mapping, we clean just that page. | | 2346 | * o If there is one mapping, we clean just that page. |
2348 | * o If there are multiple mappings, we clean the entire cache. | | 2347 | * o If there are multiple mappings, we clean the entire cache. |
2349 | * | | 2348 | * |
2350 | * So that some functions can be further optimised, it returns 0 if it didn't | | 2349 | * So that some functions can be further optimised, it returns 0 if it didn't |
2351 | * clean the entire cache, or 1 if it did. | | 2350 | * clean the entire cache, or 1 if it did. |
2352 | * | | 2351 | * |
2353 | * XXX One bug in this routine is that if the pv_entry has a single page | | 2352 | * XXX One bug in this routine is that if the pv_entry has a single page |
2354 | * mapped at 0x00000000 a whole cache clean will be performed rather than | | 2353 | * mapped at 0x00000000 a whole cache clean will be performed rather than |
2355 | * just the 1 page. Since this should not occur in everyday use and if it does | | 2354 | * just the 1 page. Since this should not occur in everyday use and if it does |
2356 | * it will just result in not the most efficient clean for the page. | | 2355 | * it will just result in not the most efficient clean for the page. |
2357 | */ | | 2356 | */ |
2358 | #ifdef PMAP_CACHE_VIVT | | 2357 | #ifdef PMAP_CACHE_VIVT |
2359 | static int | | 2358 | static int |
2360 | pmap_clean_page(struct pv_entry *pv, bool is_src) | | 2359 | pmap_clean_page(struct pv_entry *pv, bool is_src) |
2361 | { | | 2360 | { |
2362 | pmap_t pm_to_clean = NULL; | | 2361 | pmap_t pm_to_clean = NULL; |
2363 | struct pv_entry *npv; | | 2362 | struct pv_entry *npv; |
2364 | u_int cache_needs_cleaning = 0; | | 2363 | u_int cache_needs_cleaning = 0; |
2365 | u_int flags = 0; | | 2364 | u_int flags = 0; |
2366 | vaddr_t page_to_clean = 0; | | 2365 | vaddr_t page_to_clean = 0; |
2367 | | | 2366 | |
2368 | if (pv == NULL) { | | 2367 | if (pv == NULL) { |
2369 | /* nothing mapped in so nothing to flush */ | | 2368 | /* nothing mapped in so nothing to flush */ |
2370 | return (0); | | 2369 | return (0); |
2371 | } | | 2370 | } |
2372 | | | 2371 | |
2373 | /* | | 2372 | /* |
2374 | * Since we flush the cache each time we change to a different | | 2373 | * Since we flush the cache each time we change to a different |
2375 | * user vmspace, we only need to flush the page if it is in the | | 2374 | * user vmspace, we only need to flush the page if it is in the |
2376 | * current pmap. | | 2375 | * current pmap. |
2377 | */ | | 2376 | */ |
2378 | | | 2377 | |
2379 | for (npv = pv; npv; npv = SLIST_NEXT(npv, pv_link)) { | | 2378 | for (npv = pv; npv; npv = SLIST_NEXT(npv, pv_link)) { |
2380 | if (pmap_is_current(npv->pv_pmap)) { | | 2379 | if (pmap_is_current(npv->pv_pmap)) { |
2381 | flags |= npv->pv_flags; | | 2380 | flags |= npv->pv_flags; |
2382 | /* | | 2381 | /* |
2383 | * The page is mapped non-cacheable in | | 2382 | * The page is mapped non-cacheable in |
2384 | * this map. No need to flush the cache. | | 2383 | * this map. No need to flush the cache. |
2385 | */ | | 2384 | */ |
2386 | if (npv->pv_flags & PVF_NC) { | | 2385 | if (npv->pv_flags & PVF_NC) { |
2387 | #ifdef DIAGNOSTIC | | 2386 | #ifdef DIAGNOSTIC |
2388 | if (cache_needs_cleaning) | | 2387 | if (cache_needs_cleaning) |
2389 | panic("pmap_clean_page: " | | 2388 | panic("pmap_clean_page: " |
2390 | "cache inconsistency"); | | 2389 | "cache inconsistency"); |
2391 | #endif | | 2390 | #endif |
2392 | break; | | 2391 | break; |
2393 | } else if (is_src && (npv->pv_flags & PVF_WRITE) == 0) | | 2392 | } else if (is_src && (npv->pv_flags & PVF_WRITE) == 0) |
2394 | continue; | | 2393 | continue; |
2395 | if (cache_needs_cleaning) { | | 2394 | if (cache_needs_cleaning) { |
2396 | page_to_clean = 0; | | 2395 | page_to_clean = 0; |
2397 | break; | | 2396 | break; |
2398 | } else { | | 2397 | } else { |
2399 | page_to_clean = npv->pv_va; | | 2398 | page_to_clean = npv->pv_va; |
2400 | pm_to_clean = npv->pv_pmap; | | 2399 | pm_to_clean = npv->pv_pmap; |
2401 | } | | 2400 | } |
2402 | cache_needs_cleaning = 1; | | 2401 | cache_needs_cleaning = 1; |
2403 | } | | 2402 | } |
2404 | } | | 2403 | } |
2405 | | | 2404 | |
2406 | if (page_to_clean) { | | 2405 | if (page_to_clean) { |
2407 | if (PV_BEEN_EXECD(flags)) | | 2406 | if (PV_BEEN_EXECD(flags)) |
2408 | pmap_idcache_wbinv_range(pm_to_clean, page_to_clean, | | 2407 | pmap_idcache_wbinv_range(pm_to_clean, page_to_clean, |
2409 | PAGE_SIZE); | | 2408 | PAGE_SIZE); |
2410 | else | | 2409 | else |
2411 | pmap_dcache_wb_range(pm_to_clean, page_to_clean, | | 2410 | pmap_dcache_wb_range(pm_to_clean, page_to_clean, |
2412 | PAGE_SIZE, !is_src, (flags & PVF_WRITE) == 0); | | 2411 | PAGE_SIZE, !is_src, (flags & PVF_WRITE) == 0); |
2413 | } else if (cache_needs_cleaning) { | | 2412 | } else if (cache_needs_cleaning) { |
2414 | pmap_t const pm = curproc->p_vmspace->vm_map.pmap; | | 2413 | pmap_t const pm = curproc->p_vmspace->vm_map.pmap; |
2415 | | | 2414 | |
2416 | if (PV_BEEN_EXECD(flags)) | | 2415 | if (PV_BEEN_EXECD(flags)) |
2417 | pmap_idcache_wbinv_all(pm); | | 2416 | pmap_idcache_wbinv_all(pm); |
2418 | else | | 2417 | else |
2419 | pmap_dcache_wbinv_all(pm); | | 2418 | pmap_dcache_wbinv_all(pm); |
2420 | return (1); | | 2419 | return (1); |
2421 | } | | 2420 | } |
2422 | return (0); | | 2421 | return (0); |
2423 | } | | 2422 | } |
2424 | #endif | | 2423 | #endif |
2425 | | | 2424 | |
2426 | #ifdef PMAP_CACHE_VIPT | | 2425 | #ifdef PMAP_CACHE_VIPT |
2427 | /* | | 2426 | /* |
2428 | * Sync a page with the I-cache. Since this is a VIPT, we must pick the | | 2427 | * Sync a page with the I-cache. Since this is a VIPT, we must pick the |
2429 | * right cache alias to make sure we flush the right stuff. | | 2428 | * right cache alias to make sure we flush the right stuff. |
2430 | */ | | 2429 | */ |
2431 | void | | 2430 | void |
2432 | pmap_syncicache_page(struct vm_page_md *md, paddr_t pa) | | 2431 | pmap_syncicache_page(struct vm_page_md *md, paddr_t pa) |
2433 | { | | 2432 | { |
2434 | const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask; | | 2433 | const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask; |
2435 | pt_entry_t * const ptep = &cdst_pte[va_offset >> PGSHIFT]; | | 2434 | pt_entry_t * const ptep = &cdst_pte[va_offset >> PGSHIFT]; |
2436 | | | 2435 | |
2437 | NPDEBUG(PDB_EXEC, printf("pmap_syncicache_page: md=%p (attrs=%#x)\n", | | 2436 | NPDEBUG(PDB_EXEC, printf("pmap_syncicache_page: md=%p (attrs=%#x)\n", |
2438 | md, md->pvh_attrs)); | | 2437 | md, md->pvh_attrs)); |
2439 | /* | | 2438 | /* |
2440 | * No need to clean the page if it's non-cached. | | 2439 | * No need to clean the page if it's non-cached. |
2441 | */ | | 2440 | */ |
2442 | if (md->pvh_attrs & PVF_NC) | | 2441 | if (md->pvh_attrs & PVF_NC) |
2443 | return; | | 2442 | return; |
2444 | KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & PVF_COLORED); | | 2443 | KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & PVF_COLORED); |
2445 | | | 2444 | |
2446 | pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset); | | 2445 | pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset); |
2447 | /* | | 2446 | /* |
2448 | * Set up a PTE with the right coloring to flush existing cache lines. | | 2447 | * Set up a PTE with the right coloring to flush existing cache lines. |
2449 | */ | | 2448 | */ |
2450 | *ptep = L2_S_PROTO | | | 2449 | *ptep = L2_S_PROTO | |
2451 | pa | | 2450 | pa |
2452 | | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE) | | 2451 | | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE) |
2453 | | pte_l2_s_cache_mode; | | 2452 | | pte_l2_s_cache_mode; |
2454 | PTE_SYNC(ptep); | | 2453 | PTE_SYNC(ptep); |
2455 | | | 2454 | |
2456 | /* | | 2455 | /* |
2457 | * Flush it. | | 2456 | * Flush it. |
2458 | */ | | 2457 | */ |
2459 | cpu_icache_sync_range(cdstp + va_offset, PAGE_SIZE); | | 2458 | cpu_icache_sync_range(cdstp + va_offset, PAGE_SIZE); |
2460 | /* | | 2459 | /* |
2461 | * Unmap the page. | | 2460 | * Unmap the page. |
2462 | */ | | 2461 | */ |
2463 | *ptep = 0; | | 2462 | *ptep = 0; |
2464 | PTE_SYNC(ptep); | | 2463 | PTE_SYNC(ptep); |
2465 | pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset); | | 2464 | pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset); |
2466 | | | 2465 | |
2467 | md->pvh_attrs |= PVF_EXEC; | | 2466 | md->pvh_attrs |= PVF_EXEC; |
2468 | PMAPCOUNT(exec_synced); | | 2467 | PMAPCOUNT(exec_synced); |
2469 | } | | 2468 | } |
2470 | | | 2469 | |
2471 | void | | 2470 | void |
2472 | pmap_flush_page(struct vm_page_md *md, paddr_t pa, enum pmap_flush_op flush) | | 2471 | pmap_flush_page(struct vm_page_md *md, paddr_t pa, enum pmap_flush_op flush) |
2473 | { | | 2472 | { |
2474 | vsize_t va_offset, end_va; | | 2473 | vsize_t va_offset, end_va; |
2475 | void (*cf)(vaddr_t, vsize_t); | | 2474 | void (*cf)(vaddr_t, vsize_t); |
2476 | | | 2475 | |
2477 | if (arm_cache_prefer_mask == 0) | | 2476 | if (arm_cache_prefer_mask == 0) |
2478 | return; | | 2477 | return; |
2479 | | | 2478 | |
2480 | switch (flush) { | | 2479 | switch (flush) { |
2481 | case PMAP_FLUSH_PRIMARY: | | 2480 | case PMAP_FLUSH_PRIMARY: |
2482 | if (md->pvh_attrs & PVF_MULTCLR) { | | 2481 | if (md->pvh_attrs & PVF_MULTCLR) { |
2483 | va_offset = 0; | | 2482 | va_offset = 0; |
2484 | end_va = arm_cache_prefer_mask; | | 2483 | end_va = arm_cache_prefer_mask; |
2485 | md->pvh_attrs &= ~PVF_MULTCLR; | | 2484 | md->pvh_attrs &= ~PVF_MULTCLR; |
2486 | PMAPCOUNT(vac_flush_lots); | | 2485 | PMAPCOUNT(vac_flush_lots); |
2487 | } else { | | 2486 | } else { |
2488 | va_offset = md->pvh_attrs & arm_cache_prefer_mask; | | 2487 | va_offset = md->pvh_attrs & arm_cache_prefer_mask; |
2489 | end_va = va_offset; | | 2488 | end_va = va_offset; |
2490 | PMAPCOUNT(vac_flush_one); | | 2489 | PMAPCOUNT(vac_flush_one); |
2491 | } | | 2490 | } |
2492 | /* | | 2491 | /* |
2493 | * Mark that the page is no longer dirty. | | 2492 | * Mark that the page is no longer dirty. |
2494 | */ | | 2493 | */ |
2495 | md->pvh_attrs &= ~PVF_DIRTY; | | 2494 | md->pvh_attrs &= ~PVF_DIRTY; |
2496 | cf = cpufuncs.cf_idcache_wbinv_range; | | 2495 | cf = cpufuncs.cf_idcache_wbinv_range; |
2497 | break; | | 2496 | break; |
2498 | case PMAP_FLUSH_SECONDARY: | | 2497 | case PMAP_FLUSH_SECONDARY: |
2499 | va_offset = 0; | | 2498 | va_offset = 0; |
2500 | end_va = arm_cache_prefer_mask; | | 2499 | end_va = arm_cache_prefer_mask; |
2501 | cf = cpufuncs.cf_idcache_wbinv_range; | | 2500 | cf = cpufuncs.cf_idcache_wbinv_range; |
2502 | md->pvh_attrs &= ~PVF_MULTCLR; | | 2501 | md->pvh_attrs &= ~PVF_MULTCLR; |
2503 | PMAPCOUNT(vac_flush_lots); | | 2502 | PMAPCOUNT(vac_flush_lots); |
2504 | break; | | 2503 | break; |
2505 | case PMAP_CLEAN_PRIMARY: | | 2504 | case PMAP_CLEAN_PRIMARY: |
2506 | va_offset = md->pvh_attrs & arm_cache_prefer_mask; | | 2505 | va_offset = md->pvh_attrs & arm_cache_prefer_mask; |
2507 | end_va = va_offset; | | 2506 | end_va = va_offset; |
2508 | cf = cpufuncs.cf_dcache_wb_range; | | 2507 | cf = cpufuncs.cf_dcache_wb_range; |
2509 | /* | | 2508 | /* |
2510 | * Mark that the page is no longer dirty. | | 2509 | * Mark that the page is no longer dirty. |
2511 | */ | | 2510 | */ |
2512 | if ((md->pvh_attrs & PVF_DMOD) == 0) | | 2511 | if ((md->pvh_attrs & PVF_DMOD) == 0) |
2513 | md->pvh_attrs &= ~PVF_DIRTY; | | 2512 | md->pvh_attrs &= ~PVF_DIRTY; |
2514 | PMAPCOUNT(vac_clean_one); | | 2513 | PMAPCOUNT(vac_clean_one); |
2515 | break; | | 2514 | break; |
2516 | default: | | 2515 | default: |
2517 | return; | | 2516 | return; |
2518 | } | | 2517 | } |
2519 | | | 2518 | |
2520 | KASSERT(!(md->pvh_attrs & PVF_NC)); | | 2519 | KASSERT(!(md->pvh_attrs & PVF_NC)); |
2521 | | | 2520 | |
2522 | NPDEBUG(PDB_VAC, printf("pmap_flush_page: md=%p (attrs=%#x)\n", | | 2521 | NPDEBUG(PDB_VAC, printf("pmap_flush_page: md=%p (attrs=%#x)\n", |
2523 | md, md->pvh_attrs)); | | 2522 | md, md->pvh_attrs)); |
2524 | | | 2523 | |
2525 | for (; va_offset <= end_va; va_offset += PAGE_SIZE) { | | 2524 | for (; va_offset <= end_va; va_offset += PAGE_SIZE) { |
2526 | const size_t pte_offset = va_offset >> PGSHIFT; | | 2525 | const size_t pte_offset = va_offset >> PGSHIFT; |
2527 | pt_entry_t * const ptep = &cdst_pte[pte_offset]; | | 2526 | pt_entry_t * const ptep = &cdst_pte[pte_offset]; |
2528 | const pt_entry_t oldpte = *ptep; | | 2527 | const pt_entry_t oldpte = *ptep; |
2529 | | | 2528 | |
2530 | if (flush == PMAP_FLUSH_SECONDARY | | 2529 | if (flush == PMAP_FLUSH_SECONDARY |
2531 | && va_offset == (md->pvh_attrs & arm_cache_prefer_mask)) | | 2530 | && va_offset == (md->pvh_attrs & arm_cache_prefer_mask)) |
2532 | continue; | | 2531 | continue; |
2533 | | | 2532 | |
2534 | pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset); | | 2533 | pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset); |
2535 | /* | | 2534 | /* |
2536 | * Set up a PTE with the right coloring to flush | | 2535 | * Set up a PTE with the right coloring to flush |
2537 | * existing cache entries. | | 2536 | * existing cache entries. |
2538 | */ | | 2537 | */ |
2539 | *ptep = L2_S_PROTO | | 2538 | *ptep = L2_S_PROTO |
2540 | | pa | | 2539 | | pa |
2541 | | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE) | | 2540 | | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE) |
2542 | | pte_l2_s_cache_mode; | | 2541 | | pte_l2_s_cache_mode; |
2543 | PTE_SYNC(ptep); | | 2542 | PTE_SYNC(ptep); |
2544 | | | 2543 | |
2545 | /* | | 2544 | /* |
2546 | * Flush it. | | 2545 | * Flush it. |
2547 | */ | | 2546 | */ |
2548 | (*cf)(cdstp + va_offset, PAGE_SIZE); | | 2547 | (*cf)(cdstp + va_offset, PAGE_SIZE); |
2549 | | | 2548 | |
2550 | /* | | 2549 | /* |
2551 | * Restore the page table entry since we might have interrupted | | 2550 | * Restore the page table entry since we might have interrupted |
2552 | * pmap_zero_page or pmap_copy_page which was already using | | 2551 | * pmap_zero_page or pmap_copy_page which was already using |
2553 | * this pte. | | 2552 | * this pte. |
2554 | */ | | 2553 | */ |
2555 | *ptep = oldpte; | | 2554 | *ptep = oldpte; |
2556 | PTE_SYNC(ptep); | | 2555 | PTE_SYNC(ptep); |
2557 | pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset); | | 2556 | pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset); |
2558 | } | | 2557 | } |
2559 | } | | 2558 | } |
2560 | #endif /* PMAP_CACHE_VIPT */ | | 2559 | #endif /* PMAP_CACHE_VIPT */ |
2561 | | | 2560 | |
2562 | /* | | 2561 | /* |
2563 | * Routine: pmap_page_remove | | 2562 | * Routine: pmap_page_remove |
2564 | * Function: | | 2563 | * Function: |
2565 | * Removes this physical page from | | 2564 | * Removes this physical page from |
2566 | * all physical maps in which it resides. | | 2565 | * all physical maps in which it resides. |
2567 | * Reflects back modify bits to the pager. | | 2566 | * Reflects back modify bits to the pager. |
2568 | */ | | 2567 | */ |
2569 | static void | | 2568 | static void |
2570 | pmap_page_remove(struct vm_page *pg) | | 2569 | pmap_page_remove(struct vm_page_md *md, paddr_t pa) |
2571 | { | | 2570 | { |
2572 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | | |
2573 | paddr_t pa = VM_PAGE_TO_PHYS(pg); | | | |
2574 | struct l2_bucket *l2b; | | 2571 | struct l2_bucket *l2b; |
2575 | struct pv_entry *pv, *npv, **pvp; | | 2572 | struct pv_entry *pv, *npv, **pvp; |
2576 | pmap_t pm; | | 2573 | pmap_t pm; |
2577 | pt_entry_t *ptep; | | 2574 | pt_entry_t *ptep; |
2578 | bool flush; | | 2575 | bool flush; |
2579 | u_int flags; | | 2576 | u_int flags; |
2580 | | | 2577 | |
2581 | NPDEBUG(PDB_FOLLOW, | | 2578 | NPDEBUG(PDB_FOLLOW, |
2582 | printf("pmap_page_remove: pg %p (0x%08lx)\n", pg, | | 2579 | printf("pmap_page_remove: md %p (0x%08lx)\n", md, |
2583 | pa)); | | 2580 | pa)); |
2584 | | | 2581 | |
2585 | PMAP_HEAD_TO_MAP_LOCK(); | | 2582 | PMAP_HEAD_TO_MAP_LOCK(); |
2586 | simple_lock(&md->pvh_slock); | | 2583 | simple_lock(&md->pvh_slock); |
2587 | | | 2584 | |
2588 | pv = SLIST_FIRST(&md->pvh_list); | | 2585 | pv = SLIST_FIRST(&md->pvh_list); |
2589 | if (pv == NULL) { | | 2586 | if (pv == NULL) { |
2590 | #ifdef PMAP_CACHE_VIPT | | 2587 | #ifdef PMAP_CACHE_VIPT |
2591 | /* | | 2588 | /* |
2592 | * We *know* the page contents are about to be replaced. | | 2589 | * We *know* the page contents are about to be replaced. |
2593 | * Discard the exec contents | | 2590 | * Discard the exec contents |
2594 | */ | | 2591 | */ |
2595 | if (PV_IS_EXEC_P(md->pvh_attrs)) | | 2592 | if (PV_IS_EXEC_P(md->pvh_attrs)) |
2596 | PMAPCOUNT(exec_discarded_page_protect); | | 2593 | PMAPCOUNT(exec_discarded_page_protect); |
2597 | md->pvh_attrs &= ~PVF_EXEC; | | 2594 | md->pvh_attrs &= ~PVF_EXEC; |
2598 | KASSERT((md->urw_mappings + md->krw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); | | 2595 | KASSERT((md->urw_mappings + md->krw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); |
2599 | #endif | | 2596 | #endif |
2600 | simple_unlock(&md->pvh_slock); | | 2597 | simple_unlock(&md->pvh_slock); |
2601 | PMAP_HEAD_TO_MAP_UNLOCK(); | | 2598 | PMAP_HEAD_TO_MAP_UNLOCK(); |
2602 | return; | | 2599 | return; |
2603 | } | | 2600 | } |
2604 | #ifdef PMAP_CACHE_VIPT | | 2601 | #ifdef PMAP_CACHE_VIPT |
2605 | KASSERT(arm_cache_prefer_mask == 0 || pmap_is_page_colored_p(md)); | | 2602 | KASSERT(arm_cache_prefer_mask == 0 || pmap_is_page_colored_p(md)); |
2606 | #endif | | 2603 | #endif |
2607 | | | 2604 | |
2608 | /* | | 2605 | /* |
2609 | * Clear alias counts | | 2606 | * Clear alias counts |
2610 | */ | | 2607 | */ |
2611 | #ifdef PMAP_CACHE_VIVT | | 2608 | #ifdef PMAP_CACHE_VIVT |
2612 | md->k_mappings = 0; | | 2609 | md->k_mappings = 0; |
2613 | #endif | | 2610 | #endif |
2614 | md->urw_mappings = md->uro_mappings = 0; | | 2611 | md->urw_mappings = md->uro_mappings = 0; |
2615 | | | 2612 | |
2616 | flush = false; | | 2613 | flush = false; |
2617 | flags = 0; | | 2614 | flags = 0; |
2618 | | | 2615 | |
2619 | #ifdef PMAP_CACHE_VIVT | | 2616 | #ifdef PMAP_CACHE_VIVT |
2620 | pmap_clean_page(pv, false); | | 2617 | pmap_clean_page(pv, false); |
2621 | #endif | | 2618 | #endif |
2622 | | | 2619 | |
2623 | pvp = &SLIST_FIRST(&md->pvh_list); | | 2620 | pvp = &SLIST_FIRST(&md->pvh_list); |
2624 | while (pv) { | | 2621 | while (pv) { |
2625 | pm = pv->pv_pmap; | | 2622 | pm = pv->pv_pmap; |
2626 | npv = SLIST_NEXT(pv, pv_link); | | 2623 | npv = SLIST_NEXT(pv, pv_link); |
2627 | if (flush == false && pmap_is_current(pm)) | | 2624 | if (flush == false && pmap_is_current(pm)) |
2628 | flush = true; | | 2625 | flush = true; |
2629 | | | 2626 | |
2630 | if (pm == pmap_kernel()) { | | 2627 | if (pm == pmap_kernel()) { |
2631 | #ifdef PMAP_CACHE_VIPT | | 2628 | #ifdef PMAP_CACHE_VIPT |
2632 | /* | | 2629 | /* |
2633 | * If this was unmanaged mapping, it must be preserved. | | 2630 | * If this was unmanaged mapping, it must be preserved. |
2634 | * Move it back on the list and advance the end-of-list | | 2631 | * Move it back on the list and advance the end-of-list |
2635 | * pointer. | | 2632 | * pointer. |
2636 | */ | | 2633 | */ |
2637 | if (pv->pv_flags & PVF_KENTRY) { | | 2634 | if (pv->pv_flags & PVF_KENTRY) { |
2638 | *pvp = pv; | | 2635 | *pvp = pv; |
2639 | pvp = &SLIST_NEXT(pv, pv_link); | | 2636 | pvp = &SLIST_NEXT(pv, pv_link); |
2640 | pv = npv; | | 2637 | pv = npv; |
2641 | continue; | | 2638 | continue; |
2642 | } | | 2639 | } |
2643 | if (pv->pv_flags & PVF_WRITE) | | 2640 | if (pv->pv_flags & PVF_WRITE) |
2644 | md->krw_mappings--; | | 2641 | md->krw_mappings--; |
2645 | else | | 2642 | else |
2646 | md->kro_mappings--; | | 2643 | md->kro_mappings--; |
2647 | #endif | | 2644 | #endif |
2648 | PMAPCOUNT(kernel_unmappings); | | 2645 | PMAPCOUNT(kernel_unmappings); |
2649 | } | | 2646 | } |
2650 | PMAPCOUNT(unmappings); | | 2647 | PMAPCOUNT(unmappings); |
2651 | | | 2648 | |
2652 | pmap_acquire_pmap_lock(pm); | | 2649 | pmap_acquire_pmap_lock(pm); |
2653 | | | 2650 | |
2654 | l2b = pmap_get_l2_bucket(pm, pv->pv_va); | | 2651 | l2b = pmap_get_l2_bucket(pm, pv->pv_va); |
2655 | KDASSERT(l2b != NULL); | | 2652 | KDASSERT(l2b != NULL); |
2656 | | | 2653 | |
2657 | ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; | | 2654 | ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; |
2658 | | | 2655 | |
2659 | /* | | 2656 | /* |
2660 | * Update statistics | | 2657 | * Update statistics |
2661 | */ | | 2658 | */ |
2662 | --pm->pm_stats.resident_count; | | 2659 | --pm->pm_stats.resident_count; |
2663 | | | 2660 | |
2664 | /* Wired bit */ | | 2661 | /* Wired bit */ |
2665 | if (pv->pv_flags & PVF_WIRED) | | 2662 | if (pv->pv_flags & PVF_WIRED) |
2666 | --pm->pm_stats.wired_count; | | 2663 | --pm->pm_stats.wired_count; |
2667 | | | 2664 | |
2668 | flags |= pv->pv_flags; | | 2665 | flags |= pv->pv_flags; |
2669 | | | 2666 | |
2670 | /* | | 2667 | /* |
2671 | * Invalidate the PTEs. | | 2668 | * Invalidate the PTEs. |
2672 | */ | | 2669 | */ |
2673 | *ptep = 0; | | 2670 | *ptep = 0; |
2674 | PTE_SYNC_CURRENT(pm, ptep); | | 2671 | PTE_SYNC_CURRENT(pm, ptep); |
2675 | pmap_free_l2_bucket(pm, l2b, 1); | | 2672 | pmap_free_l2_bucket(pm, l2b, 1); |
2676 | | | 2673 | |
2677 | pool_put(&pmap_pv_pool, pv); | | 2674 | pool_put(&pmap_pv_pool, pv); |
2678 | pv = npv; | | 2675 | pv = npv; |
2679 | /* | | 2676 | /* |
2680 | * if we reach the end of the list and there are still | | 2677 | * if we reach the end of the list and there are still |
2681 | * mappings, they might be able to be cached now. | | 2678 | * mappings, they might be able to be cached now. |
2682 | */ | | 2679 | */ |
2683 | if (pv == NULL) { | | 2680 | if (pv == NULL) { |
2684 | *pvp = NULL; | | 2681 | *pvp = NULL; |
2685 | if (!SLIST_EMPTY(&md->pvh_list)) | | 2682 | if (!SLIST_EMPTY(&md->pvh_list)) |
2686 | pmap_vac_me_harder(md, pa, pm, 0); | | 2683 | pmap_vac_me_harder(md, pa, pm, 0); |
2687 | } | | 2684 | } |
2688 | pmap_release_pmap_lock(pm); | | 2685 | pmap_release_pmap_lock(pm); |
2689 | } | | 2686 | } |
2690 | #ifdef PMAP_CACHE_VIPT | | 2687 | #ifdef PMAP_CACHE_VIPT |
2691 | /* | | 2688 | /* |
2692 | * Its EXEC cache is now gone. | | 2689 | * Its EXEC cache is now gone. |
2693 | */ | | 2690 | */ |
2694 | if (PV_IS_EXEC_P(md->pvh_attrs)) | | 2691 | if (PV_IS_EXEC_P(md->pvh_attrs)) |
2695 | PMAPCOUNT(exec_discarded_page_protect); | | 2692 | PMAPCOUNT(exec_discarded_page_protect); |
2696 | md->pvh_attrs &= ~PVF_EXEC; | | 2693 | md->pvh_attrs &= ~PVF_EXEC; |
2697 | KASSERT(md->urw_mappings == 0); | | 2694 | KASSERT(md->urw_mappings == 0); |
2698 | KASSERT(md->uro_mappings == 0); | | 2695 | KASSERT(md->uro_mappings == 0); |
2699 | if (md->krw_mappings == 0) | | 2696 | if (md->krw_mappings == 0) |
2700 | md->pvh_attrs &= ~PVF_WRITE; | | 2697 | md->pvh_attrs &= ~PVF_WRITE; |
2701 | KASSERT((md->urw_mappings + md->krw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); | | 2698 | KASSERT((md->urw_mappings + md->krw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); |
2702 | #endif | | 2699 | #endif |
2703 | simple_unlock(&md->pvh_slock); | | 2700 | simple_unlock(&md->pvh_slock); |
2704 | PMAP_HEAD_TO_MAP_UNLOCK(); | | 2701 | PMAP_HEAD_TO_MAP_UNLOCK(); |
2705 | | | 2702 | |
2706 | if (flush) { | | 2703 | if (flush) { |
2707 | /* | | 2704 | /* |
2708 | * Note: We can't use pmap_tlb_flush{I,D}() here since that | | 2705 | * Note: We can't use pmap_tlb_flush{I,D}() here since that |
2709 | * would need a subsequent call to pmap_update() to ensure | | 2706 | * would need a subsequent call to pmap_update() to ensure |
2710 | * curpm->pm_cstate.cs_all is reset. Our callers are not | | 2707 | * curpm->pm_cstate.cs_all is reset. Our callers are not |
2711 | * required to do that (see pmap(9)), so we can't modify | | 2708 | * required to do that (see pmap(9)), so we can't modify |
2712 | * the current pmap's state. | | 2709 | * the current pmap's state. |
2713 | */ | | 2710 | */ |
2714 | if (PV_BEEN_EXECD(flags)) | | 2711 | if (PV_BEEN_EXECD(flags)) |
2715 | cpu_tlb_flushID(); | | 2712 | cpu_tlb_flushID(); |
2716 | else | | 2713 | else |
2717 | cpu_tlb_flushD(); | | 2714 | cpu_tlb_flushD(); |
2718 | } | | 2715 | } |
2719 | cpu_cpwait(); | | 2716 | cpu_cpwait(); |
2720 | } | | 2717 | } |
2721 | | | 2718 | |
2722 | /* | | 2719 | /* |
2723 | * pmap_t pmap_create(void) | | 2720 | * pmap_t pmap_create(void) |
2724 | * | | 2721 | * |
2725 | * Create a new pmap structure from scratch. | | 2722 | * Create a new pmap structure from scratch. |
2726 | */ | | 2723 | */ |
2727 | pmap_t | | 2724 | pmap_t |
2728 | pmap_create(void) | | 2725 | pmap_create(void) |
2729 | { | | 2726 | { |
2730 | pmap_t pm; | | 2727 | pmap_t pm; |
2731 | | | 2728 | |
2732 | pm = pool_cache_get(&pmap_cache, PR_WAITOK); | | 2729 | pm = pool_cache_get(&pmap_cache, PR_WAITOK); |
2733 | | | 2730 | |
2734 | UVM_OBJ_INIT(&pm->pm_obj, NULL, 1); | | 2731 | UVM_OBJ_INIT(&pm->pm_obj, NULL, 1); |
2735 | pm->pm_stats.wired_count = 0; | | 2732 | pm->pm_stats.wired_count = 0; |
2736 | pm->pm_stats.resident_count = 1; | | 2733 | pm->pm_stats.resident_count = 1; |
2737 | pm->pm_cstate.cs_all = 0; | | 2734 | pm->pm_cstate.cs_all = 0; |
2738 | pmap_alloc_l1(pm); | | 2735 | pmap_alloc_l1(pm); |
2739 | | | 2736 | |
2740 | /* | | 2737 | /* |
2741 | * Note: The pool cache ensures that the pm_l2[] array is already | | 2738 | * Note: The pool cache ensures that the pm_l2[] array is already |
2742 | * initialised to zero. | | 2739 | * initialised to zero. |
2743 | */ | | 2740 | */ |
2744 | | | 2741 | |
2745 | pmap_pinit(pm); | | 2742 | pmap_pinit(pm); |
2746 | | | 2743 | |
2747 | LIST_INSERT_HEAD(&pmap_pmaps, pm, pm_list); | | 2744 | LIST_INSERT_HEAD(&pmap_pmaps, pm, pm_list); |
2748 | | | 2745 | |
2749 | return (pm); | | 2746 | return (pm); |
2750 | } | | 2747 | } |
2751 | | | 2748 | |
2752 | /* | | 2749 | /* |
2753 | * int pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, | | 2750 | * int pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, |
2754 | * u_int flags) | | 2751 | * u_int flags) |
2755 | * | | 2752 | * |
2756 | * Insert the given physical page (p) at | | 2753 | * Insert the given physical page (p) at |
2757 | * the specified virtual address (v) in the | | 2754 | * the specified virtual address (v) in the |
2758 | * target physical map with the protection requested. | | 2755 | * target physical map with the protection requested. |
2759 | * | | 2756 | * |
2760 | * NB: This is the only routine which MAY NOT lazy-evaluate | | 2757 | * NB: This is the only routine which MAY NOT lazy-evaluate |
2761 | * or lose information. That is, this routine must actually | | 2758 | * or lose information. That is, this routine must actually |
2762 | * insert this page into the given map NOW. | | 2759 | * insert this page into the given map NOW. |
2763 | */ | | 2760 | */ |
2764 | int | | 2761 | int |
2765 | pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) | | 2762 | pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) |
2766 | { | | 2763 | { |
2767 | struct l2_bucket *l2b; | | 2764 | struct l2_bucket *l2b; |
2768 | struct vm_page *pg, *opg; | | 2765 | struct vm_page *pg, *opg; |
2769 | struct pv_entry *pv; | | 2766 | struct pv_entry *pv; |
2770 | pt_entry_t *ptep, npte, opte; | | 2767 | pt_entry_t *ptep, npte, opte; |
2771 | u_int nflags; | | 2768 | u_int nflags; |
2772 | u_int oflags; | | 2769 | u_int oflags; |
2773 | | | 2770 | |
2774 | NPDEBUG(PDB_ENTER, printf("pmap_enter: pm %p va 0x%lx pa 0x%lx prot %x flag %x\n", pm, va, pa, prot, flags)); | | 2771 | NPDEBUG(PDB_ENTER, printf("pmap_enter: pm %p va 0x%lx pa 0x%lx prot %x flag %x\n", pm, va, pa, prot, flags)); |
2775 | | | 2772 | |
2776 | KDASSERT((flags & PMAP_WIRED) == 0 || (flags & VM_PROT_ALL) != 0); | | 2773 | KDASSERT((flags & PMAP_WIRED) == 0 || (flags & VM_PROT_ALL) != 0); |
2777 | KDASSERT(((va | pa) & PGOFSET) == 0); | | 2774 | KDASSERT(((va | pa) & PGOFSET) == 0); |
2778 | | | 2775 | |
2779 | /* | | 2776 | /* |
2780 | * Get a pointer to the page. Later on in this function, we | | 2777 | * Get a pointer to the page. Later on in this function, we |
2781 | * test for a managed page by checking pg != NULL. | | 2778 | * test for a managed page by checking pg != NULL. |
2782 | */ | | 2779 | */ |
2783 | pg = (pmap_initialized && ((flags & PMAP_UNMANAGED) == 0)) ? | | 2780 | pg = (pmap_initialized && ((flags & PMAP_UNMANAGED) == 0)) ? |
2784 | PHYS_TO_VM_PAGE(pa) : NULL; | | 2781 | PHYS_TO_VM_PAGE(pa) : NULL; |
2785 | | | 2782 | |
2786 | nflags = 0; | | 2783 | nflags = 0; |
2787 | if (prot & VM_PROT_WRITE) | | 2784 | if (prot & VM_PROT_WRITE) |
2788 | nflags |= PVF_WRITE; | | 2785 | nflags |= PVF_WRITE; |
2789 | if (prot & VM_PROT_EXECUTE) | | 2786 | if (prot & VM_PROT_EXECUTE) |
2790 | nflags |= PVF_EXEC; | | 2787 | nflags |= PVF_EXEC; |
2791 | if (flags & PMAP_WIRED) | | 2788 | if (flags & PMAP_WIRED) |
2792 | nflags |= PVF_WIRED; | | 2789 | nflags |= PVF_WIRED; |
2793 | | | 2790 | |
2794 | PMAP_MAP_TO_HEAD_LOCK(); | | 2791 | PMAP_MAP_TO_HEAD_LOCK(); |
2795 | pmap_acquire_pmap_lock(pm); | | 2792 | pmap_acquire_pmap_lock(pm); |
2796 | | | 2793 | |
2797 | /* | | 2794 | /* |
2798 | * Fetch the L2 bucket which maps this page, allocating one if | | 2795 | * Fetch the L2 bucket which maps this page, allocating one if |
2799 | * necessary for user pmaps. | | 2796 | * necessary for user pmaps. |
2800 | */ | | 2797 | */ |
2801 | if (pm == pmap_kernel()) | | 2798 | if (pm == pmap_kernel()) |
2802 | l2b = pmap_get_l2_bucket(pm, va); | | 2799 | l2b = pmap_get_l2_bucket(pm, va); |
2803 | else | | 2800 | else |
2804 | l2b = pmap_alloc_l2_bucket(pm, va); | | 2801 | l2b = pmap_alloc_l2_bucket(pm, va); |
2805 | if (l2b == NULL) { | | 2802 | if (l2b == NULL) { |
2806 | if (flags & PMAP_CANFAIL) { | | 2803 | if (flags & PMAP_CANFAIL) { |
2807 | pmap_release_pmap_lock(pm); | | 2804 | pmap_release_pmap_lock(pm); |
2808 | PMAP_MAP_TO_HEAD_UNLOCK(); | | 2805 | PMAP_MAP_TO_HEAD_UNLOCK(); |
2809 | return (ENOMEM); | | 2806 | return (ENOMEM); |
2810 | } | | 2807 | } |
2811 | panic("pmap_enter: failed to allocate L2 bucket"); | | 2808 | panic("pmap_enter: failed to allocate L2 bucket"); |
2812 | } | | 2809 | } |
2813 | ptep = &l2b->l2b_kva[l2pte_index(va)]; | | 2810 | ptep = &l2b->l2b_kva[l2pte_index(va)]; |
2814 | opte = *ptep; | | 2811 | opte = *ptep; |
2815 | npte = pa; | | 2812 | npte = pa; |
2816 | oflags = 0; | | 2813 | oflags = 0; |
2817 | | | 2814 | |
2818 | if (opte) { | | 2815 | if (opte) { |
2819 | /* | | 2816 | /* |
2820 | * There is already a mapping at this address. | | 2817 | * There is already a mapping at this address. |
2821 | * If the physical address is different, lookup the | | 2818 | * If the physical address is different, lookup the |
2822 | * vm_page. | | 2819 | * vm_page. |
2823 | */ | | 2820 | */ |
2824 | if (l2pte_pa(opte) != pa) | | 2821 | if (l2pte_pa(opte) != pa) |
2825 | opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); | | 2822 | opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); |
2826 | else | | 2823 | else |
2827 | opg = pg; | | 2824 | opg = pg; |
2828 | } else | | 2825 | } else |
2829 | opg = NULL; | | 2826 | opg = NULL; |
2830 | | | 2827 | |
2831 | if (pg) { | | 2828 | if (pg) { |
2832 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 2829 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
2833 | | | 2830 | |
2834 | /* | | 2831 | /* |
2835 | * This is to be a managed mapping. | | 2832 | * This is to be a managed mapping. |
2836 | */ | | 2833 | */ |
2837 | if ((flags & VM_PROT_ALL) || | | 2834 | if ((flags & VM_PROT_ALL) || |
2838 | (md->pvh_attrs & PVF_REF)) { | | 2835 | (md->pvh_attrs & PVF_REF)) { |
2839 | /* | | 2836 | /* |
2840 | * - The access type indicates that we don't need | | 2837 | * - The access type indicates that we don't need |
2841 | * to do referenced emulation. | | 2838 | * to do referenced emulation. |
2842 | * OR | | 2839 | * OR |
2843 | * - The physical page has already been referenced | | 2840 | * - The physical page has already been referenced |
2844 | * so no need to re-do referenced emulation here. | | 2841 | * so no need to re-do referenced emulation here. |
2845 | */ | | 2842 | */ |
2846 | npte |= l2pte_set_readonly(L2_S_PROTO); | | 2843 | npte |= l2pte_set_readonly(L2_S_PROTO); |
2847 | | | 2844 | |
2848 | nflags |= PVF_REF; | | 2845 | nflags |= PVF_REF; |
2849 | | | 2846 | |
2850 | if ((prot & VM_PROT_WRITE) != 0 && | | 2847 | if ((prot & VM_PROT_WRITE) != 0 && |
2851 | ((flags & VM_PROT_WRITE) != 0 || | | 2848 | ((flags & VM_PROT_WRITE) != 0 || |
2852 | (md->pvh_attrs & PVF_MOD) != 0)) { | | 2849 | (md->pvh_attrs & PVF_MOD) != 0)) { |
2853 | /* | | 2850 | /* |
2854 | * This is a writable mapping, and the | | 2851 | * This is a writable mapping, and the |
2855 | * page's mod state indicates it has | | 2852 | * page's mod state indicates it has |
2856 | * already been modified. Make it | | 2853 | * already been modified. Make it |
2857 | * writable from the outset. | | 2854 | * writable from the outset. |
2858 | */ | | 2855 | */ |
2859 | npte = l2pte_set_writable(npte); | | 2856 | npte = l2pte_set_writable(npte); |
2860 | nflags |= PVF_MOD; | | 2857 | nflags |= PVF_MOD; |
2861 | } | | 2858 | } |
2862 | } else { | | 2859 | } else { |
2863 | /* | | 2860 | /* |
2864 | * Need to do page referenced emulation. | | 2861 | * Need to do page referenced emulation. |
2865 | */ | | 2862 | */ |
2866 | npte |= L2_TYPE_INV; | | 2863 | npte |= L2_TYPE_INV; |
2867 | } | | 2864 | } |
2868 | | | 2865 | |
2869 | npte |= pte_l2_s_cache_mode; | | 2866 | npte |= pte_l2_s_cache_mode; |
2870 | | | 2867 | |
2871 | if (pg == opg) { | | 2868 | if (pg == opg) { |
2872 | /* | | 2869 | /* |
2873 | * We're changing the attrs of an existing mapping. | | 2870 | * We're changing the attrs of an existing mapping. |
2874 | */ | | 2871 | */ |
2875 | simple_lock(&md->pvh_slock); | | 2872 | simple_lock(&md->pvh_slock); |
2876 | oflags = pmap_modify_pv(md, pa, pm, va, | | 2873 | oflags = pmap_modify_pv(md, pa, pm, va, |
2877 | PVF_WRITE | PVF_EXEC | PVF_WIRED | | | 2874 | PVF_WRITE | PVF_EXEC | PVF_WIRED | |
2878 | PVF_MOD | PVF_REF, nflags); | | 2875 | PVF_MOD | PVF_REF, nflags); |
2879 | simple_unlock(&md->pvh_slock); | | 2876 | simple_unlock(&md->pvh_slock); |
2880 | | | 2877 | |
2881 | #ifdef PMAP_CACHE_VIVT | | 2878 | #ifdef PMAP_CACHE_VIVT |
2882 | /* | | 2879 | /* |
2883 | * We may need to flush the cache if we're | | 2880 | * We may need to flush the cache if we're |
2884 | * doing rw-ro... | | 2881 | * doing rw-ro... |
2885 | */ | | 2882 | */ |
2886 | if (pm->pm_cstate.cs_cache_d && | | 2883 | if (pm->pm_cstate.cs_cache_d && |
2887 | (oflags & PVF_NC) == 0 && | | 2884 | (oflags & PVF_NC) == 0 && |
2888 | l2pte_writable_p(opte) && | | 2885 | l2pte_writable_p(opte) && |
2889 | (prot & VM_PROT_WRITE) == 0) | | 2886 | (prot & VM_PROT_WRITE) == 0) |
2890 | cpu_dcache_wb_range(va, PAGE_SIZE); | | 2887 | cpu_dcache_wb_range(va, PAGE_SIZE); |
2891 | #endif | | 2888 | #endif |
2892 | } else { | | 2889 | } else { |
2893 | /* | | 2890 | /* |
2894 | * New mapping, or changing the backing page | | 2891 | * New mapping, or changing the backing page |
2895 | * of an existing mapping. | | 2892 | * of an existing mapping. |
2896 | */ | | 2893 | */ |
2897 | if (opg) { | | 2894 | if (opg) { |
2898 | struct vm_page_md *omd = VM_PAGE_TO_MD(opg); | | 2895 | struct vm_page_md *omd = VM_PAGE_TO_MD(opg); |
2899 | paddr_t opa; | | 2896 | paddr_t opa = VM_PAGE_TO_PHYS(opg); |
2900 | | | | |
2901 | opa = VM_PAGE_TO_PHYS(opg); | | | |
2902 | | | 2897 | |
2903 | /* | | 2898 | /* |
2904 | * Replacing an existing mapping with a new one. | | 2899 | * Replacing an existing mapping with a new one. |
2905 | * It is part of our managed memory so we | | 2900 | * It is part of our managed memory so we |
2906 | * must remove it from the PV list | | 2901 | * must remove it from the PV list |
2907 | */ | | 2902 | */ |
2908 | simple_lock(&omd->pvh_slock); | | 2903 | simple_lock(&omd->pvh_slock); |
2909 | pv = pmap_remove_pv(omd, opa, pm, va); | | 2904 | pv = pmap_remove_pv(omd, opa, pm, va); |
2910 | pmap_vac_me_harder(omd, opa, pm, 0); | | 2905 | pmap_vac_me_harder(omd, opa, pm, 0); |
2911 | simple_unlock(&omd->pvh_slock); | | 2906 | simple_unlock(&omd->pvh_slock); |
2912 | oflags = pv->pv_flags; | | 2907 | oflags = pv->pv_flags; |
2913 | | | 2908 | |
2914 | #ifdef PMAP_CACHE_VIVT | | 2909 | #ifdef PMAP_CACHE_VIVT |
2915 | /* | | 2910 | /* |
2916 | * If the old mapping was valid (ref/mod | | 2911 | * If the old mapping was valid (ref/mod |
2917 | * emulation creates 'invalid' mappings | | 2912 | * emulation creates 'invalid' mappings |
2918 | * initially) then make sure to frob | | 2913 | * initially) then make sure to frob |
2919 | * the cache. | | 2914 | * the cache. |
2920 | */ | | 2915 | */ |
2921 | if ((oflags & PVF_NC) == 0 && | | 2916 | if ((oflags & PVF_NC) == 0 && |
2922 | l2pte_valid(opte)) { | | 2917 | l2pte_valid(opte)) { |
2923 | if (PV_BEEN_EXECD(oflags)) { | | 2918 | if (PV_BEEN_EXECD(oflags)) { |
2924 | pmap_idcache_wbinv_range(pm, va, | | 2919 | pmap_idcache_wbinv_range(pm, va, |
2925 | PAGE_SIZE); | | 2920 | PAGE_SIZE); |
2926 | } else | | 2921 | } else |
2927 | if (PV_BEEN_REFD(oflags)) { | | 2922 | if (PV_BEEN_REFD(oflags)) { |
2928 | pmap_dcache_wb_range(pm, va, | | 2923 | pmap_dcache_wb_range(pm, va, |
2929 | PAGE_SIZE, true, | | 2924 | PAGE_SIZE, true, |
2930 | (oflags & PVF_WRITE) == 0); | | 2925 | (oflags & PVF_WRITE) == 0); |
2931 | } | | 2926 | } |
2932 | } | | 2927 | } |
2933 | #endif | | 2928 | #endif |
2934 | } else | | 2929 | } else |
2935 | if ((pv = pool_get(&pmap_pv_pool, PR_NOWAIT)) == NULL){ | | 2930 | if ((pv = pool_get(&pmap_pv_pool, PR_NOWAIT)) == NULL){ |
2936 | if ((flags & PMAP_CANFAIL) == 0) | | 2931 | if ((flags & PMAP_CANFAIL) == 0) |
2937 | panic("pmap_enter: no pv entries"); | | 2932 | panic("pmap_enter: no pv entries"); |
2938 | | | 2933 | |
2939 | if (pm != pmap_kernel()) | | 2934 | if (pm != pmap_kernel()) |
2940 | pmap_free_l2_bucket(pm, l2b, 0); | | 2935 | pmap_free_l2_bucket(pm, l2b, 0); |
2941 | pmap_release_pmap_lock(pm); | | 2936 | pmap_release_pmap_lock(pm); |
2942 | PMAP_MAP_TO_HEAD_UNLOCK(); | | 2937 | PMAP_MAP_TO_HEAD_UNLOCK(); |
2943 | NPDEBUG(PDB_ENTER, | | 2938 | NPDEBUG(PDB_ENTER, |
2944 | printf("pmap_enter: ENOMEM\n")); | | 2939 | printf("pmap_enter: ENOMEM\n")); |
2945 | return (ENOMEM); | | 2940 | return (ENOMEM); |
2946 | } | | 2941 | } |
2947 | | | 2942 | |
2948 | pmap_enter_pv(md, VM_PAGE_TO_PHYS(pg), pv, pm, va, nflags); | | 2943 | pmap_enter_pv(md, pa, pv, pm, va, nflags); |
2949 | } | | 2944 | } |
2950 | } else { | | 2945 | } else { |
2951 | /* | | 2946 | /* |
2952 | * We're mapping an unmanaged page. | | 2947 | * We're mapping an unmanaged page. |
2953 | * These are always readable, and possibly writable, from | | 2948 | * These are always readable, and possibly writable, from |
2954 | * the get go as we don't need to track ref/mod status. | | 2949 | * the get go as we don't need to track ref/mod status. |
2955 | */ | | 2950 | */ |
2956 | npte |= l2pte_set_readonly(L2_S_PROTO); | | 2951 | npte |= l2pte_set_readonly(L2_S_PROTO); |
2957 | if (prot & VM_PROT_WRITE) | | 2952 | if (prot & VM_PROT_WRITE) |
2958 | npte = l2pte_set_writable(npte); | | 2953 | npte = l2pte_set_writable(npte); |
2959 | | | 2954 | |
2960 | /* | | 2955 | /* |
2961 | * Make sure the vector table is mapped cacheable | | 2956 | * Make sure the vector table is mapped cacheable |
2962 | */ | | 2957 | */ |
2963 | if (pm != pmap_kernel() && va == vector_page) | | 2958 | if (pm != pmap_kernel() && va == vector_page) |
2964 | npte |= pte_l2_s_cache_mode; | | 2959 | npte |= pte_l2_s_cache_mode; |
2965 | | | 2960 | |
2966 | if (opg) { | | 2961 | if (opg) { |
2967 | /* | | 2962 | /* |
2968 | * Looks like there's an existing 'managed' mapping | | 2963 | * Looks like there's an existing 'managed' mapping |
2969 | * at this address. | | 2964 | * at this address. |
2970 | */ | | 2965 | */ |
2971 | struct vm_page_md *omd = VM_PAGE_TO_MD(opg); | | 2966 | struct vm_page_md *omd = VM_PAGE_TO_MD(opg); |
| | | 2967 | paddr_t opa = VM_PAGE_TO_PHYS(opg); |
| | | 2968 | |
2972 | simple_lock(&omd->pvh_slock); | | 2969 | simple_lock(&omd->pvh_slock); |
2973 | pv = pmap_remove_pv(omd, VM_PAGE_TO_PHYS(opg), pm, va); | | 2970 | pv = pmap_remove_pv(omd, opa, pm, va); |
2974 | pmap_vac_me_harder(omd, VM_PAGE_TO_PHYS(opg), pm, 0); | | 2971 | pmap_vac_me_harder(omd, opa, pm, 0); |
2975 | simple_unlock(&omd->pvh_slock); | | 2972 | simple_unlock(&omd->pvh_slock); |
2976 | oflags = pv->pv_flags; | | 2973 | oflags = pv->pv_flags; |
2977 | | | 2974 | |
2978 | #ifdef PMAP_CACHE_VIVT | | 2975 | #ifdef PMAP_CACHE_VIVT |
2979 | if ((oflags & PVF_NC) == 0 && l2pte_valid(opte)) { | | 2976 | if ((oflags & PVF_NC) == 0 && l2pte_valid(opte)) { |
2980 | if (PV_BEEN_EXECD(oflags)) | | 2977 | if (PV_BEEN_EXECD(oflags)) |
2981 | pmap_idcache_wbinv_range(pm, va, | | 2978 | pmap_idcache_wbinv_range(pm, va, |
2982 | PAGE_SIZE); | | 2979 | PAGE_SIZE); |
2983 | else | | 2980 | else |
2984 | if (PV_BEEN_REFD(oflags)) | | 2981 | if (PV_BEEN_REFD(oflags)) |
2985 | pmap_dcache_wb_range(pm, va, PAGE_SIZE, | | 2982 | pmap_dcache_wb_range(pm, va, PAGE_SIZE, |
2986 | true, (oflags & PVF_WRITE) == 0); | | 2983 | true, (oflags & PVF_WRITE) == 0); |
2987 | } | | 2984 | } |
2988 | #endif | | 2985 | #endif |
2989 | pool_put(&pmap_pv_pool, pv); | | 2986 | pool_put(&pmap_pv_pool, pv); |
2990 | } | | 2987 | } |
2991 | } | | 2988 | } |
2992 | | | 2989 | |
2993 | /* | | 2990 | /* |
2994 | * Make sure userland mappings get the right permissions | | 2991 | * Make sure userland mappings get the right permissions |
2995 | */ | | 2992 | */ |
2996 | if (pm != pmap_kernel() && va != vector_page) | | 2993 | if (pm != pmap_kernel() && va != vector_page) |
2997 | npte |= L2_S_PROT_U; | | 2994 | npte |= L2_S_PROT_U; |
2998 | | | 2995 | |
2999 | /* | | 2996 | /* |
3000 | * Keep the stats up to date | | 2997 | * Keep the stats up to date |
3001 | */ | | 2998 | */ |
3002 | if (opte == 0) { | | 2999 | if (opte == 0) { |
3003 | l2b->l2b_occupancy++; | | 3000 | l2b->l2b_occupancy++; |
3004 | pm->pm_stats.resident_count++; | | 3001 | pm->pm_stats.resident_count++; |
3005 | } | | 3002 | } |
3006 | | | 3003 | |
3007 | NPDEBUG(PDB_ENTER, | | 3004 | NPDEBUG(PDB_ENTER, |
3008 | printf("pmap_enter: opte 0x%08x npte 0x%08x\n", opte, npte)); | | 3005 | printf("pmap_enter: opte 0x%08x npte 0x%08x\n", opte, npte)); |
3009 | | | 3006 | |
3010 | /* | | 3007 | /* |
3011 | * If this is just a wiring change, the two PTEs will be | | 3008 | * If this is just a wiring change, the two PTEs will be |
3012 | * identical, so there's no need to update the page table. | | 3009 | * identical, so there's no need to update the page table. |
3013 | */ | | 3010 | */ |
3014 | if (npte != opte) { | | 3011 | if (npte != opte) { |
3015 | bool is_cached = pmap_is_cached(pm); | | 3012 | bool is_cached = pmap_is_cached(pm); |
3016 | | | 3013 | |
3017 | *ptep = npte; | | 3014 | *ptep = npte; |
3018 | if (is_cached) { | | 3015 | if (is_cached) { |
3019 | /* | | 3016 | /* |
3020 | * We only need to frob the cache/tlb if this pmap | | 3017 | * We only need to frob the cache/tlb if this pmap |
3021 | * is current | | 3018 | * is current |
3022 | */ | | 3019 | */ |
3023 | PTE_SYNC(ptep); | | 3020 | PTE_SYNC(ptep); |
3024 | if (va != vector_page && l2pte_valid(npte)) { | | 3021 | if (va != vector_page && l2pte_valid(npte)) { |
3025 | /* | | 3022 | /* |
3026 | * This mapping is likely to be accessed as | | 3023 | * This mapping is likely to be accessed as |
3027 | * soon as we return to userland. Fix up the | | 3024 | * soon as we return to userland. Fix up the |
3028 | * L1 entry to avoid taking another | | 3025 | * L1 entry to avoid taking another |
3029 | * page/domain fault. | | 3026 | * page/domain fault. |
3030 | */ | | 3027 | */ |
3031 | pd_entry_t *pl1pd, l1pd; | | 3028 | pd_entry_t *pl1pd, l1pd; |
3032 | | | 3029 | |
3033 | pl1pd = &pm->pm_l1->l1_kva[L1_IDX(va)]; | | 3030 | pl1pd = &pm->pm_l1->l1_kva[L1_IDX(va)]; |
3034 | l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | | | 3031 | l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | |
3035 | L1_C_PROTO; | | 3032 | L1_C_PROTO; |
3036 | if (*pl1pd != l1pd) { | | 3033 | if (*pl1pd != l1pd) { |
3037 | *pl1pd = l1pd; | | 3034 | *pl1pd = l1pd; |
3038 | PTE_SYNC(pl1pd); | | 3035 | PTE_SYNC(pl1pd); |
3039 | } | | 3036 | } |
3040 | } | | 3037 | } |
3041 | } | | 3038 | } |
3042 | | | 3039 | |
3043 | if (PV_BEEN_EXECD(oflags)) | | 3040 | if (PV_BEEN_EXECD(oflags)) |
3044 | pmap_tlb_flushID_SE(pm, va); | | 3041 | pmap_tlb_flushID_SE(pm, va); |
3045 | else | | 3042 | else |
3046 | if (PV_BEEN_REFD(oflags)) | | 3043 | if (PV_BEEN_REFD(oflags)) |
3047 | pmap_tlb_flushD_SE(pm, va); | | 3044 | pmap_tlb_flushD_SE(pm, va); |
3048 | | | 3045 | |
3049 | NPDEBUG(PDB_ENTER, | | 3046 | NPDEBUG(PDB_ENTER, |
3050 | printf("pmap_enter: is_cached %d cs 0x%08x\n", | | 3047 | printf("pmap_enter: is_cached %d cs 0x%08x\n", |
3051 | is_cached, pm->pm_cstate.cs_all)); | | 3048 | is_cached, pm->pm_cstate.cs_all)); |
3052 | | | 3049 | |
3053 | if (pg != NULL) { | | 3050 | if (pg != NULL) { |
3054 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 3051 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
| | | 3052 | |
3055 | simple_lock(&md->pvh_slock); | | 3053 | simple_lock(&md->pvh_slock); |
3056 | pmap_vac_me_harder(md, VM_PAGE_TO_PHYS(pg), pm, va); | | 3054 | pmap_vac_me_harder(md, pa, pm, va); |
3057 | simple_unlock(&md->pvh_slock); | | 3055 | simple_unlock(&md->pvh_slock); |
3058 | } | | 3056 | } |
3059 | } | | 3057 | } |
3060 | #if defined(PMAP_CACHE_VIPT) && defined(DIAGNOSTIC) | | 3058 | #if defined(PMAP_CACHE_VIPT) && defined(DIAGNOSTIC) |
3061 | if (pg) { | | 3059 | if (pg) { |
3062 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 3060 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
| | | 3061 | |
3063 | simple_lock(&md->pvh_slock); | | 3062 | simple_lock(&md->pvh_slock); |
3064 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 3063 | KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
3065 | KASSERT(((md->pvh_attrs & PVF_WRITE) == 0) == (md->urw_mappings + md->krw_mappings == 0)); | | 3064 | KASSERT(((md->pvh_attrs & PVF_WRITE) == 0) == (md->urw_mappings + md->krw_mappings == 0)); |
3066 | simple_unlock(&md->pvh_slock); | | 3065 | simple_unlock(&md->pvh_slock); |
3067 | } | | 3066 | } |
3068 | #endif | | 3067 | #endif |
3069 | | | 3068 | |
3070 | pmap_release_pmap_lock(pm); | | 3069 | pmap_release_pmap_lock(pm); |
3071 | PMAP_MAP_TO_HEAD_UNLOCK(); | | 3070 | PMAP_MAP_TO_HEAD_UNLOCK(); |
3072 | | | 3071 | |
3073 | return (0); | | 3072 | return (0); |
3074 | } | | 3073 | } |
3075 | | | 3074 | |
3076 | /* | | 3075 | /* |
3077 | * pmap_remove() | | 3076 | * pmap_remove() |
3078 | * | | 3077 | * |
3079 | * pmap_remove is responsible for nuking a number of mappings for a range | | 3078 | * pmap_remove is responsible for nuking a number of mappings for a range |
3080 | * of virtual address space in the current pmap. To do this efficiently | | 3079 | * of virtual address space in the current pmap. To do this efficiently |
3081 | * is interesting, because in a number of cases a wide virtual address | | 3080 | * is interesting, because in a number of cases a wide virtual address |
3082 | * range may be supplied that contains few actual mappings. So, the | | 3081 | * range may be supplied that contains few actual mappings. So, the |
3083 | * optimisations are: | | 3082 | * optimisations are: |
3084 | * 1. Skip over hunks of address space for which no L1 or L2 entry exists. | | 3083 | * 1. Skip over hunks of address space for which no L1 or L2 entry exists. |
3085 | * 2. Build up a list of pages we've hit, up to a maximum, so we can | | 3084 | * 2. Build up a list of pages we've hit, up to a maximum, so we can |
3086 | * maybe do just a partial cache clean. This path of execution is | | 3085 | * maybe do just a partial cache clean. This path of execution is |
3087 | * complicated by the fact that the cache must be flushed _before_ | | 3086 | * complicated by the fact that the cache must be flushed _before_ |
3088 | * the PTE is nuked, being a VAC :-) | | 3087 | * the PTE is nuked, being a VAC :-) |
3089 | * 3. If we're called after UVM calls pmap_remove_all(), we can defer | | 3088 | * 3. If we're called after UVM calls pmap_remove_all(), we can defer |
3090 | * all invalidations until pmap_update(), since pmap_remove_all() has | | 3089 | * all invalidations until pmap_update(), since pmap_remove_all() has |
3091 | * already flushed the cache. | | 3090 | * already flushed the cache. |
3092 | * 4. Maybe later fast-case a single page, but I don't think this is | | 3091 | * 4. Maybe later fast-case a single page, but I don't think this is |
3093 | * going to make _that_ much difference overall. | | 3092 | * going to make _that_ much difference overall. |
3094 | */ | | 3093 | */ |
3095 | | | 3094 | |
3096 | #define PMAP_REMOVE_CLEAN_LIST_SIZE 3 | | 3095 | #define PMAP_REMOVE_CLEAN_LIST_SIZE 3 |
3097 | | | 3096 | |
3098 | void | | 3097 | void |
3099 | pmap_remove(pmap_t pm, vaddr_t sva, vaddr_t eva) | | 3098 | pmap_remove(pmap_t pm, vaddr_t sva, vaddr_t eva) |
3100 | { | | 3099 | { |
3101 | struct l2_bucket *l2b; | | 3100 | struct l2_bucket *l2b; |
3102 | vaddr_t next_bucket; | | 3101 | vaddr_t next_bucket; |
3103 | pt_entry_t *ptep; | | 3102 | pt_entry_t *ptep; |
3104 | u_int cleanlist_idx, total, cnt; | | 3103 | u_int cleanlist_idx, total, cnt; |
3105 | struct { | | 3104 | struct { |
3106 | vaddr_t va; | | 3105 | vaddr_t va; |
3107 | pt_entry_t *ptep; | | 3106 | pt_entry_t *ptep; |
3108 | } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE]; | | 3107 | } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE]; |
3109 | u_int mappings, is_exec, is_refd; | | 3108 | u_int mappings, is_exec, is_refd; |
3110 | | | 3109 | |
3111 | NPDEBUG(PDB_REMOVE, printf("pmap_do_remove: pmap=%p sva=%08lx " | | 3110 | NPDEBUG(PDB_REMOVE, printf("pmap_do_remove: pmap=%p sva=%08lx " |
3112 | "eva=%08lx\n", pm, sva, eva)); | | 3111 | "eva=%08lx\n", pm, sva, eva)); |
3113 | | | 3112 | |
3114 | /* | | 3113 | /* |
3115 | * we lock in the pmap => pv_head direction | | 3114 | * we lock in the pmap => pv_head direction |
3116 | */ | | 3115 | */ |
3117 | PMAP_MAP_TO_HEAD_LOCK(); | | 3116 | PMAP_MAP_TO_HEAD_LOCK(); |
3118 | pmap_acquire_pmap_lock(pm); | | 3117 | pmap_acquire_pmap_lock(pm); |
3119 | | | 3118 | |
3120 | if (pm->pm_remove_all || !pmap_is_cached(pm)) { | | 3119 | if (pm->pm_remove_all || !pmap_is_cached(pm)) { |
3121 | cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; | | 3120 | cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; |
3122 | if (pm->pm_cstate.cs_tlb == 0) | | 3121 | if (pm->pm_cstate.cs_tlb == 0) |
3123 | pm->pm_remove_all = true; | | 3122 | pm->pm_remove_all = true; |
3124 | } else | | 3123 | } else |
3125 | cleanlist_idx = 0; | | 3124 | cleanlist_idx = 0; |
3126 | | | 3125 | |
3127 | total = 0; | | 3126 | total = 0; |
3128 | | | 3127 | |
3129 | while (sva < eva) { | | 3128 | while (sva < eva) { |
3130 | /* | | 3129 | /* |
3131 | * Do one L2 bucket's worth at a time. | | 3130 | * Do one L2 bucket's worth at a time. |
3132 | */ | | 3131 | */ |
3133 | next_bucket = L2_NEXT_BUCKET(sva); | | 3132 | next_bucket = L2_NEXT_BUCKET(sva); |
3134 | if (next_bucket > eva) | | 3133 | if (next_bucket > eva) |
3135 | next_bucket = eva; | | 3134 | next_bucket = eva; |
3136 | | | 3135 | |
3137 | l2b = pmap_get_l2_bucket(pm, sva); | | 3136 | l2b = pmap_get_l2_bucket(pm, sva); |
3138 | if (l2b == NULL) { | | 3137 | if (l2b == NULL) { |
3139 | sva = next_bucket; | | 3138 | sva = next_bucket; |
3140 | continue; | | 3139 | continue; |
3141 | } | | 3140 | } |
3142 | | | 3141 | |
3143 | ptep = &l2b->l2b_kva[l2pte_index(sva)]; | | 3142 | ptep = &l2b->l2b_kva[l2pte_index(sva)]; |
3144 | | | 3143 | |
3145 | for (mappings = 0; sva < next_bucket; sva += PAGE_SIZE, ptep++){ | | 3144 | for (mappings = 0; sva < next_bucket; sva += PAGE_SIZE, ptep++){ |
3146 | struct vm_page *pg; | | 3145 | struct vm_page *pg; |
3147 | pt_entry_t pte; | | 3146 | pt_entry_t pte; |
3148 | paddr_t pa; | | 3147 | paddr_t pa; |
3149 | | | 3148 | |
3150 | pte = *ptep; | | 3149 | pte = *ptep; |
3151 | | | 3150 | |
3152 | if (pte == 0) { | | 3151 | if (pte == 0) { |
3153 | /* Nothing here, move along */ | | 3152 | /* Nothing here, move along */ |
3154 | continue; | | 3153 | continue; |
3155 | } | | 3154 | } |
3156 | | | 3155 | |
3157 | pa = l2pte_pa(pte); | | 3156 | pa = l2pte_pa(pte); |
3158 | is_exec = 0; | | 3157 | is_exec = 0; |
3159 | is_refd = 1; | | 3158 | is_refd = 1; |
3160 | | | 3159 | |
3161 | /* | | 3160 | /* |
3162 | * Update flags. In a number of circumstances, | | 3161 | * Update flags. In a number of circumstances, |
3163 | * we could cluster a lot of these and do a | | 3162 | * we could cluster a lot of these and do a |
3164 | * number of sequential pages in one go. | | 3163 | * number of sequential pages in one go. |
3165 | */ | | 3164 | */ |
3166 | if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { | | 3165 | if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { |
3167 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 3166 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
3168 | struct pv_entry *pv; | | 3167 | struct pv_entry *pv; |
| | | 3168 | |
3169 | simple_lock(&md->pvh_slock); | | 3169 | simple_lock(&md->pvh_slock); |
3170 | pv = pmap_remove_pv(md, VM_PAGE_TO_PHYS(pg), pm, sva); | | 3170 | pv = pmap_remove_pv(md, pa, pm, sva); |
3171 | pmap_vac_me_harder(md, VM_PAGE_TO_PHYS(pg), pm, 0); | | 3171 | pmap_vac_me_harder(md, pa, pm, 0); |
3172 | simple_unlock(&md->pvh_slock); | | 3172 | simple_unlock(&md->pvh_slock); |
3173 | if (pv != NULL) { | | 3173 | if (pv != NULL) { |
3174 | if (pm->pm_remove_all == false) { | | 3174 | if (pm->pm_remove_all == false) { |
3175 | is_exec = | | 3175 | is_exec = |
3176 | PV_BEEN_EXECD(pv->pv_flags); | | 3176 | PV_BEEN_EXECD(pv->pv_flags); |
3177 | is_refd = | | 3177 | is_refd = |
3178 | PV_BEEN_REFD(pv->pv_flags); | | 3178 | PV_BEEN_REFD(pv->pv_flags); |
3179 | } | | 3179 | } |
3180 | pool_put(&pmap_pv_pool, pv); | | 3180 | pool_put(&pmap_pv_pool, pv); |
3181 | } | | 3181 | } |
3182 | } | | 3182 | } |
3183 | mappings++; | | 3183 | mappings++; |
3184 | | | 3184 | |
3185 | if (!l2pte_valid(pte)) { | | 3185 | if (!l2pte_valid(pte)) { |
3186 | /* | | 3186 | /* |
3187 | * Ref/Mod emulation is still active for this | | 3187 | * Ref/Mod emulation is still active for this |
3188 | * mapping, therefore it is has not yet been | | 3188 | * mapping, therefore it is has not yet been |
3189 | * accessed. No need to frob the cache/tlb. | | 3189 | * accessed. No need to frob the cache/tlb. |
3190 | */ | | 3190 | */ |
3191 | *ptep = 0; | | 3191 | *ptep = 0; |
3192 | PTE_SYNC_CURRENT(pm, ptep); | | 3192 | PTE_SYNC_CURRENT(pm, ptep); |
3193 | continue; | | 3193 | continue; |
3194 | } | | 3194 | } |
3195 | | | 3195 | |
3196 | if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) { | | 3196 | if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) { |
3197 | /* Add to the clean list. */ | | 3197 | /* Add to the clean list. */ |
3198 | cleanlist[cleanlist_idx].ptep = ptep; | | 3198 | cleanlist[cleanlist_idx].ptep = ptep; |
3199 | cleanlist[cleanlist_idx].va = | | 3199 | cleanlist[cleanlist_idx].va = |
3200 | sva | (is_exec & 1); | | 3200 | sva | (is_exec & 1); |
3201 | cleanlist_idx++; | | 3201 | cleanlist_idx++; |
3202 | } else | | 3202 | } else |
3203 | if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) { | | 3203 | if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) { |
3204 | /* Nuke everything if needed. */ | | 3204 | /* Nuke everything if needed. */ |
3205 | #ifdef PMAP_CACHE_VIVT | | 3205 | #ifdef PMAP_CACHE_VIVT |
3206 | pmap_idcache_wbinv_all(pm); | | 3206 | pmap_idcache_wbinv_all(pm); |
3207 | #endif | | 3207 | #endif |
3208 | pmap_tlb_flushID(pm); | | 3208 | pmap_tlb_flushID(pm); |
3209 | | | 3209 | |
3210 | /* | | 3210 | /* |
3211 | * Roll back the previous PTE list, | | 3211 | * Roll back the previous PTE list, |
3212 | * and zero out the current PTE. | | 3212 | * and zero out the current PTE. |
3213 | */ | | 3213 | */ |
3214 | for (cnt = 0; | | 3214 | for (cnt = 0; |
3215 | cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) { | | 3215 | cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) { |
3216 | *cleanlist[cnt].ptep = 0; | | 3216 | *cleanlist[cnt].ptep = 0; |
3217 | PTE_SYNC(cleanlist[cnt].ptep); | | 3217 | PTE_SYNC(cleanlist[cnt].ptep); |
3218 | } | | 3218 | } |
3219 | *ptep = 0; | | 3219 | *ptep = 0; |
3220 | PTE_SYNC(ptep); | | 3220 | PTE_SYNC(ptep); |
3221 | cleanlist_idx++; | | 3221 | cleanlist_idx++; |
3222 | pm->pm_remove_all = true; | | 3222 | pm->pm_remove_all = true; |
3223 | } else { | | 3223 | } else { |
3224 | *ptep = 0; | | 3224 | *ptep = 0; |
3225 | PTE_SYNC(ptep); | | 3225 | PTE_SYNC(ptep); |
3226 | if (pm->pm_remove_all == false) { | | 3226 | if (pm->pm_remove_all == false) { |
3227 | if (is_exec) | | 3227 | if (is_exec) |
3228 | pmap_tlb_flushID_SE(pm, sva); | | 3228 | pmap_tlb_flushID_SE(pm, sva); |
3229 | else | | 3229 | else |
3230 | if (is_refd) | | 3230 | if (is_refd) |
3231 | pmap_tlb_flushD_SE(pm, sva); | | 3231 | pmap_tlb_flushD_SE(pm, sva); |
3232 | } | | 3232 | } |
3233 | } | | 3233 | } |
3234 | } | | 3234 | } |
3235 | | | 3235 | |
3236 | /* | | 3236 | /* |
3237 | * Deal with any left overs | | 3237 | * Deal with any left overs |
3238 | */ | | 3238 | */ |
3239 | if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) { | | 3239 | if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) { |
3240 | total += cleanlist_idx; | | 3240 | total += cleanlist_idx; |
3241 | for (cnt = 0; cnt < cleanlist_idx; cnt++) { | | 3241 | for (cnt = 0; cnt < cleanlist_idx; cnt++) { |
3242 | if (pm->pm_cstate.cs_all != 0) { | | 3242 | if (pm->pm_cstate.cs_all != 0) { |
3243 | vaddr_t clva = cleanlist[cnt].va & ~1; | | 3243 | vaddr_t clva = cleanlist[cnt].va & ~1; |
3244 | if (cleanlist[cnt].va & 1) { | | 3244 | if (cleanlist[cnt].va & 1) { |
3245 | #ifdef PMAP_CACHE_VIVT | | 3245 | #ifdef PMAP_CACHE_VIVT |
3246 | pmap_idcache_wbinv_range(pm, | | 3246 | pmap_idcache_wbinv_range(pm, |
3247 | clva, PAGE_SIZE); | | 3247 | clva, PAGE_SIZE); |
3248 | #endif | | 3248 | #endif |
3249 | pmap_tlb_flushID_SE(pm, clva); | | 3249 | pmap_tlb_flushID_SE(pm, clva); |
3250 | } else { | | 3250 | } else { |
3251 | #ifdef PMAP_CACHE_VIVT | | 3251 | #ifdef PMAP_CACHE_VIVT |
3252 | pmap_dcache_wb_range(pm, | | 3252 | pmap_dcache_wb_range(pm, |
3253 | clva, PAGE_SIZE, true, | | 3253 | clva, PAGE_SIZE, true, |
3254 | false); | | 3254 | false); |
3255 | #endif | | 3255 | #endif |
3256 | pmap_tlb_flushD_SE(pm, clva); | | 3256 | pmap_tlb_flushD_SE(pm, clva); |
3257 | } | | 3257 | } |
3258 | } | | 3258 | } |
3259 | *cleanlist[cnt].ptep = 0; | | 3259 | *cleanlist[cnt].ptep = 0; |
3260 | PTE_SYNC_CURRENT(pm, cleanlist[cnt].ptep); | | 3260 | PTE_SYNC_CURRENT(pm, cleanlist[cnt].ptep); |
3261 | } | | 3261 | } |
3262 | | | 3262 | |
3263 | /* | | 3263 | /* |
3264 | * If it looks like we're removing a whole bunch | | 3264 | * If it looks like we're removing a whole bunch |
3265 | * of mappings, it's faster to just write-back | | 3265 | * of mappings, it's faster to just write-back |
3266 | * the whole cache now and defer TLB flushes until | | 3266 | * the whole cache now and defer TLB flushes until |
3267 | * pmap_update() is called. | | 3267 | * pmap_update() is called. |
3268 | */ | | 3268 | */ |
3269 | if (total <= PMAP_REMOVE_CLEAN_LIST_SIZE) | | 3269 | if (total <= PMAP_REMOVE_CLEAN_LIST_SIZE) |
3270 | cleanlist_idx = 0; | | 3270 | cleanlist_idx = 0; |
3271 | else { | | 3271 | else { |
3272 | cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; | | 3272 | cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; |
3273 | #ifdef PMAP_CACHE_VIVT | | 3273 | #ifdef PMAP_CACHE_VIVT |
3274 | pmap_idcache_wbinv_all(pm); | | 3274 | pmap_idcache_wbinv_all(pm); |
3275 | #endif | | 3275 | #endif |
3276 | pm->pm_remove_all = true; | | 3276 | pm->pm_remove_all = true; |
3277 | } | | 3277 | } |
3278 | } | | 3278 | } |
3279 | | | 3279 | |
3280 | pmap_free_l2_bucket(pm, l2b, mappings); | | 3280 | pmap_free_l2_bucket(pm, l2b, mappings); |
3281 | pm->pm_stats.resident_count -= mappings; | | 3281 | pm->pm_stats.resident_count -= mappings; |
3282 | } | | 3282 | } |
3283 | | | 3283 | |
3284 | pmap_release_pmap_lock(pm); | | 3284 | pmap_release_pmap_lock(pm); |
3285 | PMAP_MAP_TO_HEAD_UNLOCK(); | | 3285 | PMAP_MAP_TO_HEAD_UNLOCK(); |
3286 | } | | 3286 | } |
3287 | | | 3287 | |
3288 | #ifdef PMAP_CACHE_VIPT | | 3288 | #ifdef PMAP_CACHE_VIPT |
3289 | static struct pv_entry * | | 3289 | static struct pv_entry * |
3290 | pmap_kremove_pg(struct vm_page *pg, vaddr_t va) | | 3290 | pmap_kremove_pg(struct vm_page *pg, vaddr_t va) |
3291 | { | | 3291 | { |
3292 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 3292 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
| | | 3293 | paddr_t pa = VM_PAGE_TO_PHYS(pg); |
3293 | struct pv_entry *pv; | | 3294 | struct pv_entry *pv; |
3294 | | | 3295 | |
3295 | simple_lock(&md->pvh_slock); | | 3296 | simple_lock(&md->pvh_slock); |
3296 | KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & (PVF_COLORED|PVF_NC)); | | 3297 | KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & (PVF_COLORED|PVF_NC)); |
3297 | KASSERT((md->pvh_attrs & PVF_KMPAGE) == 0); | | 3298 | KASSERT((md->pvh_attrs & PVF_KMPAGE) == 0); |
3298 | | | 3299 | |
3299 | pv = pmap_remove_pv(md, VM_PAGE_TO_PHYS(pg), pmap_kernel(), va); | | 3300 | pv = pmap_remove_pv(md, pa, pmap_kernel(), va); |
3300 | KASSERT(pv); | | 3301 | KASSERT(pv); |
3301 | KASSERT(pv->pv_flags & PVF_KENTRY); | | 3302 | KASSERT(pv->pv_flags & PVF_KENTRY); |
3302 | | | 3303 | |
3303 | /* | | 3304 | /* |
3304 | * If we are removing a writeable mapping to a cached exec page, | | 3305 | * If we are removing a writeable mapping to a cached exec page, |
3305 | * if it's the last mapping then clear it execness other sync | | 3306 | * if it's the last mapping then clear it execness other sync |
3306 | * the page to the icache. | | 3307 | * the page to the icache. |
3307 | */ | | 3308 | */ |
3308 | if ((md->pvh_attrs & (PVF_NC|PVF_EXEC)) == PVF_EXEC | | 3309 | if ((md->pvh_attrs & (PVF_NC|PVF_EXEC)) == PVF_EXEC |
3309 | && (pv->pv_flags & PVF_WRITE) != 0) { | | 3310 | && (pv->pv_flags & PVF_WRITE) != 0) { |
3310 | if (SLIST_EMPTY(&md->pvh_list)) { | | 3311 | if (SLIST_EMPTY(&md->pvh_list)) { |
3311 | md->pvh_attrs &= ~PVF_EXEC; | | 3312 | md->pvh_attrs &= ~PVF_EXEC; |
3312 | PMAPCOUNT(exec_discarded_kremove); | | 3313 | PMAPCOUNT(exec_discarded_kremove); |
3313 | } else { | | 3314 | } else { |
3314 | pmap_syncicache_page(md, VM_PAGE_TO_PHYS(pg)); | | 3315 | pmap_syncicache_page(md, pa); |
3315 | PMAPCOUNT(exec_synced_kremove); | | 3316 | PMAPCOUNT(exec_synced_kremove); |
3316 | } | | 3317 | } |
3317 | } | | 3318 | } |
3318 | pmap_vac_me_harder(md, VM_PAGE_TO_PHYS(pg), pmap_kernel(), 0); | | 3319 | pmap_vac_me_harder(md, pa, pmap_kernel(), 0); |
3319 | simple_unlock(&md->pvh_slock); | | 3320 | simple_unlock(&md->pvh_slock); |
3320 | | | 3321 | |
3321 | return pv; | | 3322 | return pv; |
3322 | } | | 3323 | } |
3323 | #endif /* PMAP_CACHE_VIPT */ | | 3324 | #endif /* PMAP_CACHE_VIPT */ |
3324 | | | 3325 | |
3325 | /* | | 3326 | /* |
3326 | * pmap_kenter_pa: enter an unmanaged, wired kernel mapping | | 3327 | * pmap_kenter_pa: enter an unmanaged, wired kernel mapping |
3327 | * | | 3328 | * |
3328 | * We assume there is already sufficient KVM space available | | 3329 | * We assume there is already sufficient KVM space available |
3329 | * to do this, as we can't allocate L2 descriptor tables/metadata | | 3330 | * to do this, as we can't allocate L2 descriptor tables/metadata |
3330 | * from here. | | 3331 | * from here. |
3331 | */ | | 3332 | */ |
3332 | void | | 3333 | void |
3333 | pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) | | 3334 | pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) |
3334 | { | | 3335 | { |
3335 | struct l2_bucket *l2b; | | 3336 | struct l2_bucket *l2b; |
3336 | pt_entry_t *ptep, opte; | | 3337 | pt_entry_t *ptep, opte; |
3337 | #ifdef PMAP_CACHE_VIVT | | 3338 | #ifdef PMAP_CACHE_VIVT |
3338 | struct vm_page *pg = (flags & PMAP_KMPAGE) ? PHYS_TO_VM_PAGE(pa) : NULL; | | 3339 | struct vm_page *pg = (flags & PMAP_KMPAGE) ? PHYS_TO_VM_PAGE(pa) : NULL; |
3339 | #endif | | 3340 | #endif |
3340 | #ifdef PMAP_CACHE_VIPT | | 3341 | #ifdef PMAP_CACHE_VIPT |
3341 | struct vm_page *pg = PHYS_TO_VM_PAGE(pa); | | 3342 | struct vm_page *pg = PHYS_TO_VM_PAGE(pa); |
3342 | struct vm_page *opg; | | 3343 | struct vm_page *opg; |
3343 | struct pv_entry *pv = NULL; | | 3344 | struct pv_entry *pv = NULL; |
3344 | #endif | | 3345 | #endif |
3345 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 3346 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
3346 | | | 3347 | |
3347 | NPDEBUG(PDB_KENTER, | | 3348 | NPDEBUG(PDB_KENTER, |
3348 | printf("pmap_kenter_pa: va 0x%08lx, pa 0x%08lx, prot 0x%x pg %p md %p\n", | | 3349 | printf("pmap_kenter_pa: va 0x%08lx, pa 0x%08lx, prot 0x%x\n", |
3349 | va, pa, prot, pg, md)); | | 3350 | va, pa, prot)); |
3350 | | | 3351 | |
3351 | l2b = pmap_get_l2_bucket(pmap_kernel(), va); | | 3352 | l2b = pmap_get_l2_bucket(pmap_kernel(), va); |
3352 | KDASSERT(l2b != NULL); | | 3353 | KDASSERT(l2b != NULL); |
3353 | | | 3354 | |
3354 | ptep = &l2b->l2b_kva[l2pte_index(va)]; | | 3355 | ptep = &l2b->l2b_kva[l2pte_index(va)]; |
3355 | opte = *ptep; | | 3356 | opte = *ptep; |
3356 | | | 3357 | |
3357 | if (opte == 0) { | | 3358 | if (opte == 0) { |
3358 | PMAPCOUNT(kenter_mappings); | | 3359 | PMAPCOUNT(kenter_mappings); |
3359 | l2b->l2b_occupancy++; | | 3360 | l2b->l2b_occupancy++; |
3360 | } else { | | 3361 | } else { |
3361 | PMAPCOUNT(kenter_remappings); | | 3362 | PMAPCOUNT(kenter_remappings); |
3362 | #ifdef PMAP_CACHE_VIPT | | 3363 | #ifdef PMAP_CACHE_VIPT |
3363 | opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); | | 3364 | opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); |
3364 | struct vm_page_md *omd = VM_PAGE_TO_MD(opg); | | 3365 | struct vm_page_md *omd = VM_PAGE_TO_MD(opg); |
3365 | if (opg) { | | 3366 | if (opg) { |
3366 | KASSERT(opg != pg); | | 3367 | KASSERT(opg != pg); |
3367 | KASSERT((omd->pvh_attrs & PVF_KMPAGE) == 0); | | 3368 | KASSERT((omd->pvh_attrs & PVF_KMPAGE) == 0); |
3368 | KASSERT((flags & PMAP_KMPAGE) == 0); | | 3369 | KASSERT((flags & PMAP_KMPAGE) == 0); |
3369 | simple_lock(&omd->pvh_slock); | | 3370 | simple_lock(&omd->pvh_slock); |
3370 | pv = pmap_kremove_pg(opg, va); | | 3371 | pv = pmap_kremove_pg(opg, va); |
3371 | simple_unlock(&omd->pvh_slock); | | 3372 | simple_unlock(&omd->pvh_slock); |
3372 | } | | 3373 | } |
3373 | #endif | | 3374 | #endif |
3374 | if (l2pte_valid(opte)) { | | 3375 | if (l2pte_valid(opte)) { |
3375 | #ifdef PMAP_CACHE_VIVT | | 3376 | #ifdef PMAP_CACHE_VIVT |
3376 | cpu_dcache_wbinv_range(va, PAGE_SIZE); | | 3377 | cpu_dcache_wbinv_range(va, PAGE_SIZE); |
3377 | #endif | | 3378 | #endif |
3378 | cpu_tlb_flushD_SE(va); | | 3379 | cpu_tlb_flushD_SE(va); |
3379 | cpu_cpwait(); | | 3380 | cpu_cpwait(); |
3380 | } | | 3381 | } |
3381 | } | | 3382 | } |
3382 | | | 3383 | |
3383 | *ptep = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | | | 3384 | *ptep = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | |
3384 | pte_l2_s_cache_mode; | | 3385 | pte_l2_s_cache_mode; |
3385 | PTE_SYNC(ptep); | | 3386 | PTE_SYNC(ptep); |
3386 | | | 3387 | |
3387 | if (pg) { | | 3388 | if (pg) { |
3388 | if (flags & PMAP_KMPAGE) { | | 3389 | if (flags & PMAP_KMPAGE) { |
3389 | simple_lock(&md->pvh_slock); | | 3390 | simple_lock(&md->pvh_slock); |
3390 | KASSERT(md->urw_mappings == 0); | | 3391 | KASSERT(md->urw_mappings == 0); |
3391 | KASSERT(md->uro_mappings == 0); | | 3392 | KASSERT(md->uro_mappings == 0); |
3392 | KASSERT(md->krw_mappings == 0); | | 3393 | KASSERT(md->krw_mappings == 0); |
3393 | KASSERT(md->kro_mappings == 0); | | 3394 | KASSERT(md->kro_mappings == 0); |
3394 | #ifdef PMAP_CACHE_VIPT | | 3395 | #ifdef PMAP_CACHE_VIPT |
3395 | KASSERT(pv == NULL); | | 3396 | KASSERT(pv == NULL); |
3396 | KASSERT(arm_cache_prefer_mask == 0 || (va & PVF_COLORED) == 0); | | 3397 | KASSERT(arm_cache_prefer_mask == 0 || (va & PVF_COLORED) == 0); |
3397 | KASSERT((md->pvh_attrs & PVF_NC) == 0); | | 3398 | KASSERT((md->pvh_attrs & PVF_NC) == 0); |
3398 | /* if there is a color conflict, evict from cache. */ | | 3399 | /* if there is a color conflict, evict from cache. */ |
3399 | if (pmap_is_page_colored_p(md) | | 3400 | if (pmap_is_page_colored_p(md) |
3400 | && ((va ^ md->pvh_attrs) & arm_cache_prefer_mask)) { | | 3401 | && ((va ^ md->pvh_attrs) & arm_cache_prefer_mask)) { |
3401 | PMAPCOUNT(vac_color_change); | | 3402 | PMAPCOUNT(vac_color_change); |
3402 | pmap_flush_page(md, VM_PAGE_TO_PHYS(pg), PMAP_FLUSH_PRIMARY); | | 3403 | pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY); |
3403 | } else if (md->pvh_attrs & PVF_MULTCLR) { | | 3404 | } else if (md->pvh_attrs & PVF_MULTCLR) { |
3404 | /* | | 3405 | /* |
3405 | * If this page has multiple colors, expunge | | 3406 | * If this page has multiple colors, expunge |
3406 | * them. | | 3407 | * them. |
3407 | */ | | 3408 | */ |
3408 | PMAPCOUNT(vac_flush_lots2); | | 3409 | PMAPCOUNT(vac_flush_lots2); |
3409 | pmap_flush_page(md, VM_PAGE_TO_PHYS(pg), PMAP_FLUSH_SECONDARY); | | 3410 | pmap_flush_page(md, pa, PMAP_FLUSH_SECONDARY); |
3410 | } | | 3411 | } |
3411 | md->pvh_attrs &= PAGE_SIZE - 1; | | 3412 | md->pvh_attrs &= PAGE_SIZE - 1; |
3412 | md->pvh_attrs |= PVF_KMPAGE | | 3413 | md->pvh_attrs |= PVF_KMPAGE |
3413 | | PVF_COLORED | PVF_DIRTY | | 3414 | | PVF_COLORED | PVF_DIRTY |
3414 | | (va & arm_cache_prefer_mask); | | 3415 | | (va & arm_cache_prefer_mask); |
3415 | #endif | | 3416 | #endif |
3416 | #ifdef PMAP_CACHE_VIVT | | 3417 | #ifdef PMAP_CACHE_VIVT |
3417 | md->pvh_attrs |= PVF_KMPAGE; | | 3418 | md->pvh_attrs |= PVF_KMPAGE; |
3418 | #endif | | 3419 | #endif |
3419 | pmap_kmpages++; | | 3420 | pmap_kmpages++; |
3420 | simple_unlock(&md->pvh_slock); | | 3421 | simple_unlock(&md->pvh_slock); |
3421 | #ifdef PMAP_CACHE_VIPT | | 3422 | #ifdef PMAP_CACHE_VIPT |
3422 | } else { | | 3423 | } else { |
3423 | if (pv == NULL) { | | 3424 | if (pv == NULL) { |
3424 | pv = pool_get(&pmap_pv_pool, PR_NOWAIT); | | 3425 | pv = pool_get(&pmap_pv_pool, PR_NOWAIT); |
3425 | KASSERT(pv != NULL); | | 3426 | KASSERT(pv != NULL); |
3426 | } | | 3427 | } |
3427 | pmap_enter_pv(md, VM_PAGE_TO_PHYS(pg), pv, pmap_kernel(), va, | | 3428 | pmap_enter_pv(md, pa, pv, pmap_kernel(), va, |
3428 | PVF_WIRED | PVF_KENTRY | | 3429 | PVF_WIRED | PVF_KENTRY |
3429 | | (prot & VM_PROT_WRITE ? PVF_WRITE : 0)); | | 3430 | | (prot & VM_PROT_WRITE ? PVF_WRITE : 0)); |
3430 | if ((prot & VM_PROT_WRITE) | | 3431 | if ((prot & VM_PROT_WRITE) |
3431 | && !(md->pvh_attrs & PVF_NC)) | | 3432 | && !(md->pvh_attrs & PVF_NC)) |
3432 | md->pvh_attrs |= PVF_DIRTY; | | 3433 | md->pvh_attrs |= PVF_DIRTY; |
3433 | KASSERT((prot & VM_PROT_WRITE) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 3434 | KASSERT((prot & VM_PROT_WRITE) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); |
3434 | simple_lock(&md->pvh_slock); | | 3435 | simple_lock(&md->pvh_slock); |
3435 | pmap_vac_me_harder(md, VM_PAGE_TO_PHYS(pg), pmap_kernel(), va); | | 3436 | pmap_vac_me_harder(md, pa, pmap_kernel(), va); |
3436 | simple_unlock(&md->pvh_slock); | | 3437 | simple_unlock(&md->pvh_slock); |
3437 | #endif | | 3438 | #endif |
3438 | } | | 3439 | } |
3439 | #ifdef PMAP_CACHE_VIPT | | 3440 | #ifdef PMAP_CACHE_VIPT |
3440 | } else { | | 3441 | } else { |
3441 | if (pv != NULL) | | 3442 | if (pv != NULL) |
3442 | pool_put(&pmap_pv_pool, pv); | | 3443 | pool_put(&pmap_pv_pool, pv); |
3443 | #endif | | 3444 | #endif |
3444 | } | | 3445 | } |
3445 | } | | 3446 | } |
3446 | | | 3447 | |
3447 | void | | 3448 | void |
3448 | pmap_kremove(vaddr_t va, vsize_t len) | | 3449 | pmap_kremove(vaddr_t va, vsize_t len) |
3449 | { | | 3450 | { |
3450 | struct l2_bucket *l2b; | | 3451 | struct l2_bucket *l2b; |
3451 | pt_entry_t *ptep, *sptep, opte; | | 3452 | pt_entry_t *ptep, *sptep, opte; |
3452 | vaddr_t next_bucket, eva; | | 3453 | vaddr_t next_bucket, eva; |
3453 | u_int mappings; | | 3454 | u_int mappings; |
3454 | struct vm_page *opg; | | 3455 | struct vm_page *opg; |
3455 | | | 3456 | |
3456 | PMAPCOUNT(kenter_unmappings); | | 3457 | PMAPCOUNT(kenter_unmappings); |
3457 | | | 3458 | |
3458 | NPDEBUG(PDB_KREMOVE, printf("pmap_kremove: va 0x%08lx, len 0x%08lx\n", | | 3459 | NPDEBUG(PDB_KREMOVE, printf("pmap_kremove: va 0x%08lx, len 0x%08lx\n", |
3459 | va, len)); | | 3460 | va, len)); |
3460 | | | 3461 | |
3461 | eva = va + len; | | 3462 | eva = va + len; |
3462 | | | 3463 | |
3463 | while (va < eva) { | | 3464 | while (va < eva) { |
3464 | next_bucket = L2_NEXT_BUCKET(va); | | 3465 | next_bucket = L2_NEXT_BUCKET(va); |
3465 | if (next_bucket > eva) | | 3466 | if (next_bucket > eva) |
3466 | next_bucket = eva; | | 3467 | next_bucket = eva; |
3467 | | | 3468 | |
3468 | l2b = pmap_get_l2_bucket(pmap_kernel(), va); | | 3469 | l2b = pmap_get_l2_bucket(pmap_kernel(), va); |
3469 | KDASSERT(l2b != NULL); | | 3470 | KDASSERT(l2b != NULL); |
3470 | | | 3471 | |
3471 | sptep = ptep = &l2b->l2b_kva[l2pte_index(va)]; | | 3472 | sptep = ptep = &l2b->l2b_kva[l2pte_index(va)]; |
3472 | mappings = 0; | | 3473 | mappings = 0; |
3473 | | | 3474 | |
3474 | while (va < next_bucket) { | | 3475 | while (va < next_bucket) { |
3475 | opte = *ptep; | | 3476 | opte = *ptep; |
3476 | opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); | | 3477 | opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); |
3477 | if (opg) { | | 3478 | if (opg) { |
3478 | struct vm_page_md *omd = VM_PAGE_TO_MD(opg); | | 3479 | struct vm_page_md *omd = VM_PAGE_TO_MD(opg); |
| | | 3480 | |
3479 | if (omd->pvh_attrs & PVF_KMPAGE) { | | 3481 | if (omd->pvh_attrs & PVF_KMPAGE) { |
3480 | simple_lock(&omd->pvh_slock); | | 3482 | simple_lock(&omd->pvh_slock); |
3481 | KASSERT(omd->urw_mappings == 0); | | 3483 | KASSERT(omd->urw_mappings == 0); |
3482 | KASSERT(omd->uro_mappings == 0); | | 3484 | KASSERT(omd->uro_mappings == 0); |
3483 | KASSERT(omd->krw_mappings == 0); | | 3485 | KASSERT(omd->krw_mappings == 0); |
3484 | KASSERT(omd->kro_mappings == 0); | | 3486 | KASSERT(omd->kro_mappings == 0); |
3485 | omd->pvh_attrs &= ~PVF_KMPAGE; | | 3487 | omd->pvh_attrs &= ~PVF_KMPAGE; |
3486 | #ifdef PMAP_CACHE_VIPT | | 3488 | #ifdef PMAP_CACHE_VIPT |
3487 | omd->pvh_attrs &= ~PVF_WRITE; | | 3489 | omd->pvh_attrs &= ~PVF_WRITE; |
3488 | #endif | | 3490 | #endif |
3489 | pmap_kmpages--; | | 3491 | pmap_kmpages--; |
3490 | simple_unlock(&omd->pvh_slock); | | 3492 | simple_unlock(&omd->pvh_slock); |
3491 | #ifdef PMAP_CACHE_VIPT | | 3493 | #ifdef PMAP_CACHE_VIPT |
3492 | } else { | | 3494 | } else { |
3493 | pool_put(&pmap_pv_pool, | | 3495 | pool_put(&pmap_pv_pool, |
3494 | pmap_kremove_pg(opg, va)); | | 3496 | pmap_kremove_pg(opg, va)); |
3495 | #endif | | 3497 | #endif |
3496 | } | | 3498 | } |
3497 | } | | 3499 | } |
3498 | if (l2pte_valid(opte)) { | | 3500 | if (l2pte_valid(opte)) { |
3499 | #ifdef PMAP_CACHE_VIVT | | 3501 | #ifdef PMAP_CACHE_VIVT |
3500 | cpu_dcache_wbinv_range(va, PAGE_SIZE); | | 3502 | cpu_dcache_wbinv_range(va, PAGE_SIZE); |
3501 | #endif | | 3503 | #endif |
3502 | cpu_tlb_flushD_SE(va); | | 3504 | cpu_tlb_flushD_SE(va); |
3503 | } | | 3505 | } |
3504 | if (opte) { | | 3506 | if (opte) { |
3505 | *ptep = 0; | | 3507 | *ptep = 0; |
3506 | mappings++; | | 3508 | mappings++; |
3507 | } | | 3509 | } |
3508 | va += PAGE_SIZE; | | 3510 | va += PAGE_SIZE; |
3509 | ptep++; | | 3511 | ptep++; |
3510 | } | | 3512 | } |
3511 | KDASSERT(mappings <= l2b->l2b_occupancy); | | 3513 | KDASSERT(mappings <= l2b->l2b_occupancy); |
3512 | l2b->l2b_occupancy -= mappings; | | 3514 | l2b->l2b_occupancy -= mappings; |
3513 | PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep)); | | 3515 | PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep)); |
3514 | } | | 3516 | } |
3515 | cpu_cpwait(); | | 3517 | cpu_cpwait(); |
3516 | } | | 3518 | } |
3517 | | | 3519 | |
3518 | paddr_t | | 3520 | paddr_t |
3519 | pmap_mmap(vaddr_t addr, off_t off) | | 3521 | pmap_mmap(vaddr_t addr, off_t off) |
3520 | { | | 3522 | { |
3521 | | | 3523 | |
3522 | return arm_btop(vtophys(addr + off)); | | 3524 | return arm_btop(vtophys(addr + off)); |
3523 | } | | 3525 | } |
3524 | | | 3526 | |
3525 | bool | | 3527 | bool |
3526 | pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap) | | 3528 | pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap) |
3527 | { | | 3529 | { |
3528 | struct l2_dtable *l2; | | 3530 | struct l2_dtable *l2; |
3529 | pd_entry_t *pl1pd, l1pd; | | 3531 | pd_entry_t *pl1pd, l1pd; |
3530 | pt_entry_t *ptep, pte; | | 3532 | pt_entry_t *ptep, pte; |
3531 | paddr_t pa; | | 3533 | paddr_t pa; |
3532 | u_int l1idx; | | 3534 | u_int l1idx; |
3533 | | | 3535 | |
3534 | pmap_acquire_pmap_lock(pm); | | 3536 | pmap_acquire_pmap_lock(pm); |
3535 | | | 3537 | |
3536 | l1idx = L1_IDX(va); | | 3538 | l1idx = L1_IDX(va); |
3537 | pl1pd = &pm->pm_l1->l1_kva[l1idx]; | | 3539 | pl1pd = &pm->pm_l1->l1_kva[l1idx]; |
3538 | l1pd = *pl1pd; | | 3540 | l1pd = *pl1pd; |
3539 | | | 3541 | |
3540 | if (l1pte_section_p(l1pd)) { | | 3542 | if (l1pte_section_p(l1pd)) { |
3541 | /* | | 3543 | /* |
3542 | * These should only happen for pmap_kernel() | | 3544 | * These should only happen for pmap_kernel() |
3543 | */ | | 3545 | */ |
3544 | KDASSERT(pm == pmap_kernel()); | | 3546 | KDASSERT(pm == pmap_kernel()); |
3545 | pmap_release_pmap_lock(pm); | | 3547 | pmap_release_pmap_lock(pm); |
3546 | pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); | | 3548 | pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); |
3547 | } else { | | 3549 | } else { |
3548 | /* | | 3550 | /* |
3549 | * Note that we can't rely on the validity of the L1 | | 3551 | * Note that we can't rely on the validity of the L1 |
3550 | * descriptor as an indication that a mapping exists. | | 3552 | * descriptor as an indication that a mapping exists. |
3551 | * We have to look it up in the L2 dtable. | | 3553 | * We have to look it up in the L2 dtable. |
3552 | */ | | 3554 | */ |
3553 | l2 = pm->pm_l2[L2_IDX(l1idx)]; | | 3555 | l2 = pm->pm_l2[L2_IDX(l1idx)]; |
3554 | | | 3556 | |
3555 | if (l2 == NULL || | | 3557 | if (l2 == NULL || |
3556 | (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { | | 3558 | (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { |
3557 | pmap_release_pmap_lock(pm); | | 3559 | pmap_release_pmap_lock(pm); |
3558 | return false; | | 3560 | return false; |
3559 | } | | 3561 | } |
3560 | | | 3562 | |
3561 | ptep = &ptep[l2pte_index(va)]; | | 3563 | ptep = &ptep[l2pte_index(va)]; |
3562 | pte = *ptep; | | 3564 | pte = *ptep; |
3563 | pmap_release_pmap_lock(pm); | | 3565 | pmap_release_pmap_lock(pm); |
3564 | | | 3566 | |
3565 | if (pte == 0) | | 3567 | if (pte == 0) |
3566 | return false; | | 3568 | return false; |
3567 | | | 3569 | |
3568 | switch (pte & L2_TYPE_MASK) { | | 3570 | switch (pte & L2_TYPE_MASK) { |
3569 | case L2_TYPE_L: | | 3571 | case L2_TYPE_L: |
3570 | pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); | | 3572 | pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); |
3571 | break; | | 3573 | break; |
3572 | | | 3574 | |
3573 | default: | | 3575 | default: |
3574 | pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); | | 3576 | pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); |
3575 | break; | | 3577 | break; |
3576 | } | | 3578 | } |
3577 | } | | 3579 | } |
3578 | | | 3580 | |
3579 | if (pap != NULL) | | 3581 | if (pap != NULL) |
3580 | *pap = pa; | | 3582 | *pap = pa; |
3581 | | | 3583 | |
3582 | return true; | | 3584 | return true; |
3583 | } | | 3585 | } |
3584 | | | 3586 | |
3585 | void | | 3587 | void |
3586 | pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot) | | 3588 | pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot) |
3587 | { | | 3589 | { |
3588 | struct l2_bucket *l2b; | | 3590 | struct l2_bucket *l2b; |
3589 | pt_entry_t *ptep, pte; | | 3591 | pt_entry_t *ptep, pte; |
3590 | vaddr_t next_bucket; | | 3592 | vaddr_t next_bucket; |
3591 | u_int flags; | | 3593 | u_int flags; |
3592 | u_int clr_mask; | | 3594 | u_int clr_mask; |
3593 | int flush; | | 3595 | int flush; |
3594 | | | 3596 | |
3595 | NPDEBUG(PDB_PROTECT, | | 3597 | NPDEBUG(PDB_PROTECT, |
3596 | printf("pmap_protect: pm %p sva 0x%lx eva 0x%lx prot 0x%x\n", | | 3598 | printf("pmap_protect: pm %p sva 0x%lx eva 0x%lx prot 0x%x\n", |
3597 | pm, sva, eva, prot)); | | 3599 | pm, sva, eva, prot)); |
3598 | | | 3600 | |
3599 | if ((prot & VM_PROT_READ) == 0) { | | 3601 | if ((prot & VM_PROT_READ) == 0) { |
3600 | pmap_remove(pm, sva, eva); | | 3602 | pmap_remove(pm, sva, eva); |
3601 | return; | | 3603 | return; |
3602 | } | | 3604 | } |
3603 | | | 3605 | |
3604 | if (prot & VM_PROT_WRITE) { | | 3606 | if (prot & VM_PROT_WRITE) { |
3605 | /* | | 3607 | /* |
3606 | * If this is a read->write transition, just ignore it and let | | 3608 | * If this is a read->write transition, just ignore it and let |
3607 | * uvm_fault() take care of it later. | | 3609 | * uvm_fault() take care of it later. |
3608 | */ | | 3610 | */ |
3609 | return; | | 3611 | return; |
3610 | } | | 3612 | } |
3611 | | | 3613 | |
3612 | PMAP_MAP_TO_HEAD_LOCK(); | | 3614 | PMAP_MAP_TO_HEAD_LOCK(); |
3613 | pmap_acquire_pmap_lock(pm); | | 3615 | pmap_acquire_pmap_lock(pm); |
3614 | | | 3616 | |
3615 | flush = ((eva - sva) >= (PAGE_SIZE * 4)) ? 0 : -1; | | 3617 | flush = ((eva - sva) >= (PAGE_SIZE * 4)) ? 0 : -1; |
3616 | flags = 0; | | 3618 | flags = 0; |
3617 | clr_mask = PVF_WRITE | ((prot & VM_PROT_EXECUTE) ? 0 : PVF_EXEC); | | 3619 | clr_mask = PVF_WRITE | ((prot & VM_PROT_EXECUTE) ? 0 : PVF_EXEC); |
3618 | | | 3620 | |
3619 | while (sva < eva) { | | 3621 | while (sva < eva) { |
3620 | next_bucket = L2_NEXT_BUCKET(sva); | | 3622 | next_bucket = L2_NEXT_BUCKET(sva); |
3621 | if (next_bucket > eva) | | 3623 | if (next_bucket > eva) |
3622 | next_bucket = eva; | | 3624 | next_bucket = eva; |
3623 | | | 3625 | |
3624 | l2b = pmap_get_l2_bucket(pm, sva); | | 3626 | l2b = pmap_get_l2_bucket(pm, sva); |
3625 | if (l2b == NULL) { | | 3627 | if (l2b == NULL) { |
3626 | sva = next_bucket; | | 3628 | sva = next_bucket; |
3627 | continue; | | 3629 | continue; |
3628 | } | | 3630 | } |
3629 | | | 3631 | |
3630 | ptep = &l2b->l2b_kva[l2pte_index(sva)]; | | 3632 | ptep = &l2b->l2b_kva[l2pte_index(sva)]; |
3631 | | | 3633 | |
3632 | while (sva < next_bucket) { | | 3634 | while (sva < next_bucket) { |
3633 | pte = *ptep; | | 3635 | pte = *ptep; |
3634 | if (l2pte_valid(pte) != 0 && l2pte_writable_p(pte)) { | | 3636 | if (l2pte_valid(pte) != 0 && l2pte_writable_p(pte)) { |
3635 | struct vm_page *pg; | | 3637 | struct vm_page *pg; |
3636 | u_int f; | | 3638 | u_int f; |
3637 | | | 3639 | |
3638 | #ifdef PMAP_CACHE_VIVT | | 3640 | #ifdef PMAP_CACHE_VIVT |
3639 | /* | | 3641 | /* |
3640 | * OK, at this point, we know we're doing | | 3642 | * OK, at this point, we know we're doing |
3641 | * write-protect operation. If the pmap is | | 3643 | * write-protect operation. If the pmap is |
3642 | * active, write-back the page. | | 3644 | * active, write-back the page. |
3643 | */ | | 3645 | */ |
3644 | pmap_dcache_wb_range(pm, sva, PAGE_SIZE, | | 3646 | pmap_dcache_wb_range(pm, sva, PAGE_SIZE, |
3645 | false, false); | | 3647 | false, false); |
3646 | #endif | | 3648 | #endif |
3647 | | | 3649 | |
3648 | pg = PHYS_TO_VM_PAGE(l2pte_pa(pte)); | | 3650 | pg = PHYS_TO_VM_PAGE(l2pte_pa(pte)); |
3649 | pte = l2pte_set_readonly(pte); | | 3651 | pte = l2pte_set_readonly(pte); |
3650 | *ptep = pte; | | 3652 | *ptep = pte; |
3651 | PTE_SYNC(ptep); | | 3653 | PTE_SYNC(ptep); |
3652 | | | 3654 | |
3653 | if (pg != NULL) { | | 3655 | if (pg != NULL) { |
3654 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 3656 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
| | | 3657 | paddr_t pa = VM_PAGE_TO_PHYS(pg); |
| | | 3658 | |
3655 | simple_lock(&md->pvh_slock); | | 3659 | simple_lock(&md->pvh_slock); |
3656 | f = pmap_modify_pv(md, VM_PAGE_TO_PHYS(pg), pm, sva, | | 3660 | f = pmap_modify_pv(md, pa, pm, sva, |
3657 | clr_mask, 0); | | 3661 | clr_mask, 0); |
3658 | pmap_vac_me_harder(md, VM_PAGE_TO_PHYS(pg), pm, sva); | | 3662 | pmap_vac_me_harder(md, pa, pm, sva); |
3659 | simple_unlock(&md->pvh_slock); | | 3663 | simple_unlock(&md->pvh_slock); |
3660 | } else | | 3664 | } else |
3661 | f = PVF_REF | PVF_EXEC; | | 3665 | f = PVF_REF | PVF_EXEC; |
3662 | | | 3666 | |
3663 | if (flush >= 0) { | | 3667 | if (flush >= 0) { |
3664 | flush++; | | 3668 | flush++; |
3665 | flags |= f; | | 3669 | flags |= f; |
3666 | } else | | 3670 | } else |
3667 | if (PV_BEEN_EXECD(f)) | | 3671 | if (PV_BEEN_EXECD(f)) |
3668 | pmap_tlb_flushID_SE(pm, sva); | | 3672 | pmap_tlb_flushID_SE(pm, sva); |
3669 | else | | 3673 | else |
3670 | if (PV_BEEN_REFD(f)) | | 3674 | if (PV_BEEN_REFD(f)) |
3671 | pmap_tlb_flushD_SE(pm, sva); | | 3675 | pmap_tlb_flushD_SE(pm, sva); |
3672 | } | | 3676 | } |
3673 | | | 3677 | |
3674 | sva += PAGE_SIZE; | | 3678 | sva += PAGE_SIZE; |
3675 | ptep++; | | 3679 | ptep++; |
3676 | } | | 3680 | } |
3677 | } | | 3681 | } |
3678 | | | 3682 | |
3679 | pmap_release_pmap_lock(pm); | | 3683 | pmap_release_pmap_lock(pm); |
3680 | PMAP_MAP_TO_HEAD_UNLOCK(); | | 3684 | PMAP_MAP_TO_HEAD_UNLOCK(); |
3681 | | | 3685 | |
3682 | if (flush) { | | 3686 | if (flush) { |
3683 | if (PV_BEEN_EXECD(flags)) | | 3687 | if (PV_BEEN_EXECD(flags)) |
3684 | pmap_tlb_flushID(pm); | | 3688 | pmap_tlb_flushID(pm); |
3685 | else | | 3689 | else |
3686 | if (PV_BEEN_REFD(flags)) | | 3690 | if (PV_BEEN_REFD(flags)) |
3687 | pmap_tlb_flushD(pm); | | 3691 | pmap_tlb_flushD(pm); |
3688 | } | | 3692 | } |
3689 | } | | 3693 | } |
3690 | | | 3694 | |
3691 | void | | 3695 | void |
3692 | pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva) | | 3696 | pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva) |
3693 | { | | 3697 | { |
3694 | struct l2_bucket *l2b; | | 3698 | struct l2_bucket *l2b; |
3695 | pt_entry_t *ptep; | | 3699 | pt_entry_t *ptep; |
3696 | vaddr_t next_bucket; | | 3700 | vaddr_t next_bucket; |
3697 | vsize_t page_size = trunc_page(sva) + PAGE_SIZE - sva; | | 3701 | vsize_t page_size = trunc_page(sva) + PAGE_SIZE - sva; |
3698 | | | 3702 | |
3699 | NPDEBUG(PDB_EXEC, | | 3703 | NPDEBUG(PDB_EXEC, |
3700 | printf("pmap_icache_sync_range: pm %p sva 0x%lx eva 0x%lx\n", | | 3704 | printf("pmap_icache_sync_range: pm %p sva 0x%lx eva 0x%lx\n", |
3701 | pm, sva, eva)); | | 3705 | pm, sva, eva)); |
3702 | | | 3706 | |
3703 | PMAP_MAP_TO_HEAD_LOCK(); | | 3707 | PMAP_MAP_TO_HEAD_LOCK(); |
3704 | pmap_acquire_pmap_lock(pm); | | 3708 | pmap_acquire_pmap_lock(pm); |
3705 | | | 3709 | |
3706 | while (sva < eva) { | | 3710 | while (sva < eva) { |
3707 | next_bucket = L2_NEXT_BUCKET(sva); | | 3711 | next_bucket = L2_NEXT_BUCKET(sva); |
3708 | if (next_bucket > eva) | | 3712 | if (next_bucket > eva) |
3709 | next_bucket = eva; | | 3713 | next_bucket = eva; |
3710 | | | 3714 | |
3711 | l2b = pmap_get_l2_bucket(pm, sva); | | 3715 | l2b = pmap_get_l2_bucket(pm, sva); |
3712 | if (l2b == NULL) { | | 3716 | if (l2b == NULL) { |
3713 | sva = next_bucket; | | 3717 | sva = next_bucket; |
3714 | continue; | | 3718 | continue; |
3715 | } | | 3719 | } |
3716 | | | 3720 | |
3717 | for (ptep = &l2b->l2b_kva[l2pte_index(sva)]; | | 3721 | for (ptep = &l2b->l2b_kva[l2pte_index(sva)]; |
3718 | sva < next_bucket; | | 3722 | sva < next_bucket; |
3719 | sva += page_size, ptep++, page_size = PAGE_SIZE) { | | 3723 | sva += page_size, ptep++, page_size = PAGE_SIZE) { |
3720 | if (l2pte_valid(*ptep)) { | | 3724 | if (l2pte_valid(*ptep)) { |
3721 | cpu_icache_sync_range(sva, | | 3725 | cpu_icache_sync_range(sva, |
3722 | min(page_size, eva - sva)); | | 3726 | min(page_size, eva - sva)); |
3723 | } | | 3727 | } |
3724 | } | | 3728 | } |
3725 | } | | 3729 | } |
3726 | | | 3730 | |
3727 | pmap_release_pmap_lock(pm); | | 3731 | pmap_release_pmap_lock(pm); |
3728 | PMAP_MAP_TO_HEAD_UNLOCK(); | | 3732 | PMAP_MAP_TO_HEAD_UNLOCK(); |
3729 | } | | 3733 | } |
3730 | | | 3734 | |
3731 | void | | 3735 | void |
3732 | pmap_page_protect(struct vm_page *pg, vm_prot_t prot) | | 3736 | pmap_page_protect(struct vm_page *pg, vm_prot_t prot) |
3733 | { | | 3737 | { |
| | | 3738 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
| | | 3739 | paddr_t pa = VM_PAGE_TO_PHYS(pg); |
3734 | | | 3740 | |
3735 | NPDEBUG(PDB_PROTECT, | | 3741 | NPDEBUG(PDB_PROTECT, |
3736 | printf("pmap_page_protect: pg %p (0x%08lx), prot 0x%x\n", | | 3742 | printf("pmap_page_protect: md %p (0x%08lx), prot 0x%x\n", |
3737 | pg, VM_PAGE_TO_PHYS(pg), prot)); | | 3743 | md, pa, prot)); |
3738 | | | 3744 | |
3739 | switch(prot) { | | 3745 | switch(prot) { |
3740 | case VM_PROT_READ|VM_PROT_WRITE: | | 3746 | case VM_PROT_READ|VM_PROT_WRITE: |
3741 | #if defined(PMAP_CHECK_VIPT) && defined(PMAP_APX) | | 3747 | #if defined(PMAP_CHECK_VIPT) && defined(PMAP_APX) |
3742 | pmap_clearbit(pg, PVF_EXEC); | | 3748 | pmap_clearbit(md, pa, PVF_EXEC); |
3743 | break; | | 3749 | break; |
3744 | #endif | | 3750 | #endif |
3745 | case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE: | | 3751 | case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE: |
3746 | break; | | 3752 | break; |
3747 | | | 3753 | |
3748 | case VM_PROT_READ: | | 3754 | case VM_PROT_READ: |
3749 | #if defined(PMAP_CHECK_VIPT) && defined(PMAP_APX) | | 3755 | #if defined(PMAP_CHECK_VIPT) && defined(PMAP_APX) |
3750 | pmap_clearbit(pg, PVF_WRITE|PVF_EXEC); | | 3756 | pmap_clearbit(md, pa, PVF_WRITE|PVF_EXEC); |
3751 | break; | | 3757 | break; |
3752 | #endif | | 3758 | #endif |
3753 | case VM_PROT_READ|VM_PROT_EXECUTE: | | 3759 | case VM_PROT_READ|VM_PROT_EXECUTE: |
3754 | pmap_clearbit(pg, PVF_WRITE); | | 3760 | pmap_clearbit(md, pa, PVF_WRITE); |
3755 | break; | | 3761 | break; |
3756 | | | 3762 | |
3757 | default: | | 3763 | default: |
3758 | pmap_page_remove(pg); | | 3764 | pmap_page_remove(md, pa); |
3759 | break; | | 3765 | break; |
3760 | } | | 3766 | } |
3761 | } | | 3767 | } |
3762 | | | 3768 | |
3763 | /* | | 3769 | /* |
3764 | * pmap_clear_modify: | | 3770 | * pmap_clear_modify: |
3765 | * | | 3771 | * |
3766 | * Clear the "modified" attribute for a page. | | 3772 | * Clear the "modified" attribute for a page. |
3767 | */ | | 3773 | */ |
3768 | bool | | 3774 | bool |
3769 | pmap_clear_modify(struct vm_page *pg) | | 3775 | pmap_clear_modify(struct vm_page *pg) |
3770 | { | | 3776 | { |
3771 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 3777 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
| | | 3778 | paddr_t pa = VM_PAGE_TO_PHYS(pg); |
3772 | bool rv; | | 3779 | bool rv; |
3773 | | | 3780 | |
3774 | if (md->pvh_attrs & PVF_MOD) { | | 3781 | if (md->pvh_attrs & PVF_MOD) { |
3775 | rv = true; | | 3782 | rv = true; |
3776 | #ifdef PMAP_CACHE_VIPT | | 3783 | #ifdef PMAP_CACHE_VIPT |
3777 | /* | | 3784 | /* |
3778 | * If we are going to clear the modified bit and there are | | 3785 | * If we are going to clear the modified bit and there are |
3779 | * no other modified bits set, flush the page to memory and | | 3786 | * no other modified bits set, flush the page to memory and |
3780 | * mark it clean. | | 3787 | * mark it clean. |
3781 | */ | | 3788 | */ |
3782 | if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) == PVF_MOD) | | 3789 | if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) == PVF_MOD) |
3783 | pmap_flush_page(md, VM_PAGE_TO_PHYS(pg), PMAP_CLEAN_PRIMARY); | | 3790 | pmap_flush_page(md, pa, PMAP_CLEAN_PRIMARY); |
3784 | #endif | | 3791 | #endif |
3785 | pmap_clearbit(pg, PVF_MOD); | | 3792 | pmap_clearbit(md, pa, PVF_MOD); |
3786 | } else | | 3793 | } else |
3787 | rv = false; | | 3794 | rv = false; |
3788 | | | 3795 | |
3789 | return (rv); | | 3796 | return (rv); |
3790 | } | | 3797 | } |
3791 | | | 3798 | |
3792 | /* | | 3799 | /* |
3793 | * pmap_clear_reference: | | 3800 | * pmap_clear_reference: |
3794 | * | | 3801 | * |
3795 | * Clear the "referenced" attribute for a page. | | 3802 | * Clear the "referenced" attribute for a page. |
3796 | */ | | 3803 | */ |
3797 | bool | | 3804 | bool |
3798 | pmap_clear_reference(struct vm_page *pg) | | 3805 | pmap_clear_reference(struct vm_page *pg) |
3799 | { | | 3806 | { |
3800 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 3807 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
| | | 3808 | paddr_t pa = VM_PAGE_TO_PHYS(pg); |
3801 | bool rv; | | 3809 | bool rv; |
3802 | | | 3810 | |
3803 | if (md->pvh_attrs & PVF_REF) { | | 3811 | if (md->pvh_attrs & PVF_REF) { |
3804 | rv = true; | | 3812 | rv = true; |
3805 | pmap_clearbit(pg, PVF_REF); | | 3813 | pmap_clearbit(md, pa, PVF_REF); |
3806 | } else | | 3814 | } else |
3807 | rv = false; | | 3815 | rv = false; |
3808 | | | 3816 | |
3809 | return (rv); | | 3817 | return (rv); |
3810 | } | | 3818 | } |
3811 | | | 3819 | |
3812 | /* | | 3820 | /* |
3813 | * pmap_is_modified: | | 3821 | * pmap_is_modified: |
3814 | * | | 3822 | * |
3815 | * Test if a page has the "modified" attribute. | | 3823 | * Test if a page has the "modified" attribute. |
3816 | */ | | 3824 | */ |
3817 | /* See <arm/arm32/pmap.h> */ | | 3825 | /* See <arm/arm32/pmap.h> */ |
3818 | | | 3826 | |
3819 | /* | | 3827 | /* |
3820 | * pmap_is_referenced: | | 3828 | * pmap_is_referenced: |
3821 | * | | 3829 | * |
3822 | * Test if a page has the "referenced" attribute. | | 3830 | * Test if a page has the "referenced" attribute. |
3823 | */ | | 3831 | */ |
3824 | /* See <arm/arm32/pmap.h> */ | | 3832 | /* See <arm/arm32/pmap.h> */ |
3825 | | | 3833 | |
3826 | int | | 3834 | int |
3827 | pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, int user) | | 3835 | pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, int user) |
3828 | { | | 3836 | { |
3829 | struct l2_dtable *l2; | | 3837 | struct l2_dtable *l2; |
3830 | struct l2_bucket *l2b; | | 3838 | struct l2_bucket *l2b; |
3831 | pd_entry_t *pl1pd, l1pd; | | 3839 | pd_entry_t *pl1pd, l1pd; |
3832 | pt_entry_t *ptep, pte; | | 3840 | pt_entry_t *ptep, pte; |
3833 | paddr_t pa; | | 3841 | paddr_t pa; |
3834 | u_int l1idx; | | 3842 | u_int l1idx; |
3835 | int rv = 0; | | 3843 | int rv = 0; |
3836 | | | 3844 | |
3837 | PMAP_MAP_TO_HEAD_LOCK(); | | 3845 | PMAP_MAP_TO_HEAD_LOCK(); |
3838 | pmap_acquire_pmap_lock(pm); | | 3846 | pmap_acquire_pmap_lock(pm); |
3839 | | | 3847 | |
3840 | l1idx = L1_IDX(va); | | 3848 | l1idx = L1_IDX(va); |
3841 | | | 3849 | |
3842 | /* | | 3850 | /* |
3843 | * If there is no l2_dtable for this address, then the process | | 3851 | * If there is no l2_dtable for this address, then the process |
3844 | * has no business accessing it. | | 3852 | * has no business accessing it. |
3845 | * | | 3853 | * |
3846 | * Note: This will catch userland processes trying to access | | 3854 | * Note: This will catch userland processes trying to access |
3847 | * kernel addresses. | | 3855 | * kernel addresses. |
3848 | */ | | 3856 | */ |
3849 | l2 = pm->pm_l2[L2_IDX(l1idx)]; | | 3857 | l2 = pm->pm_l2[L2_IDX(l1idx)]; |
3850 | if (l2 == NULL) | | 3858 | if (l2 == NULL) |
3851 | goto out; | | 3859 | goto out; |
3852 | | | 3860 | |
3853 | /* | | 3861 | /* |
3854 | * Likewise if there is no L2 descriptor table | | 3862 | * Likewise if there is no L2 descriptor table |
3855 | */ | | 3863 | */ |
3856 | l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; | | 3864 | l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; |
3857 | if (l2b->l2b_kva == NULL) | | 3865 | if (l2b->l2b_kva == NULL) |
3858 | goto out; | | 3866 | goto out; |
3859 | | | 3867 | |
3860 | /* | | 3868 | /* |
3861 | * Check the PTE itself. | | 3869 | * Check the PTE itself. |
3862 | */ | | 3870 | */ |
3863 | ptep = &l2b->l2b_kva[l2pte_index(va)]; | | 3871 | ptep = &l2b->l2b_kva[l2pte_index(va)]; |
3864 | pte = *ptep; | | 3872 | pte = *ptep; |
3865 | if (pte == 0) | | 3873 | if (pte == 0) |
3866 | goto out; | | 3874 | goto out; |
3867 | | | 3875 | |
3868 | /* | | 3876 | /* |
3869 | * Catch a userland access to the vector page mapped at 0x0 | | 3877 | * Catch a userland access to the vector page mapped at 0x0 |
3870 | */ | | 3878 | */ |
3871 | if (user && (pte & L2_S_PROT_U) == 0) | | 3879 | if (user && (pte & L2_S_PROT_U) == 0) |
3872 | goto out; | | 3880 | goto out; |
3873 | | | 3881 | |
3874 | pa = l2pte_pa(pte); | | 3882 | pa = l2pte_pa(pte); |
3875 | | | 3883 | |
3876 | if ((ftype & VM_PROT_WRITE) && !l2pte_writable_p(pte)) { | | 3884 | if ((ftype & VM_PROT_WRITE) && !l2pte_writable_p(pte)) { |
3877 | /* | | 3885 | /* |
3878 | * This looks like a good candidate for "page modified" | | 3886 | * This looks like a good candidate for "page modified" |
3879 | * emulation... | | 3887 | * emulation... |
3880 | */ | | 3888 | */ |
3881 | struct pv_entry *pv; | | 3889 | struct pv_entry *pv; |
3882 | struct vm_page *pg; | | 3890 | struct vm_page *pg; |
3883 | | | 3891 | |
3884 | /* Extract the physical address of the page */ | | 3892 | /* Extract the physical address of the page */ |
3885 | if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) | | 3893 | if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) |
3886 | goto out; | | 3894 | goto out; |
3887 | | | 3895 | |
3888 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 3896 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
3889 | | | 3897 | |
3890 | /* Get the current flags for this page. */ | | 3898 | /* Get the current flags for this page. */ |
3891 | simple_lock(&md->pvh_slock); | | 3899 | simple_lock(&md->pvh_slock); |
3892 | | | 3900 | |
3893 | pv = pmap_find_pv(md, pm, va); | | 3901 | pv = pmap_find_pv(md, pm, va); |
3894 | if (pv == NULL) { | | 3902 | if (pv == NULL) { |
3895 | simple_unlock(&md->pvh_slock); | | 3903 | simple_unlock(&md->pvh_slock); |
3896 | goto out; | | 3904 | goto out; |
3897 | } | | 3905 | } |
3898 | | | 3906 | |
3899 | /* | | 3907 | /* |
3900 | * Do the flags say this page is writable? If not then it | | 3908 | * Do the flags say this page is writable? If not then it |
3901 | * is a genuine write fault. If yes then the write fault is | | 3909 | * is a genuine write fault. If yes then the write fault is |
3902 | * our fault as we did not reflect the write access in the | | 3910 | * our fault as we did not reflect the write access in the |
3903 | * PTE. Now we know a write has occurred we can correct this | | 3911 | * PTE. Now we know a write has occurred we can correct this |
3904 | * and also set the modified bit | | 3912 | * and also set the modified bit |
3905 | */ | | 3913 | */ |
3906 | if ((pv->pv_flags & PVF_WRITE) == 0) { | | 3914 | if ((pv->pv_flags & PVF_WRITE) == 0) { |
3907 | simple_unlock(&md->pvh_slock); | | 3915 | simple_unlock(&md->pvh_slock); |
3908 | goto out; | | 3916 | goto out; |
3909 | } | | 3917 | } |
3910 | | | 3918 | |
3911 | NPDEBUG(PDB_FOLLOW, | | 3919 | NPDEBUG(PDB_FOLLOW, |
3912 | printf("pmap_fault_fixup: mod emul. pm %p, va 0x%08lx, pa 0x%08lx\n", | | 3920 | printf("pmap_fault_fixup: mod emul. pm %p, va 0x%08lx, pa 0x%08lx\n", |
3913 | pm, va, VM_PAGE_TO_PHYS(pg))); | | 3921 | pm, va, pa)); |
3914 | | | 3922 | |
3915 | md->pvh_attrs |= PVF_REF | PVF_MOD; | | 3923 | md->pvh_attrs |= PVF_REF | PVF_MOD; |
3916 | pv->pv_flags |= PVF_REF | PVF_MOD; | | 3924 | pv->pv_flags |= PVF_REF | PVF_MOD; |
3917 | #ifdef PMAP_CACHE_VIPT | | 3925 | #ifdef PMAP_CACHE_VIPT |
3918 | /* | | 3926 | /* |
3919 | * If there are cacheable mappings for this page, mark it dirty. | | 3927 | * If there are cacheable mappings for this page, mark it dirty. |
3920 | */ | | 3928 | */ |
3921 | if ((md->pvh_attrs & PVF_NC) == 0) | | 3929 | if ((md->pvh_attrs & PVF_NC) == 0) |
3922 | md->pvh_attrs |= PVF_DIRTY; | | 3930 | md->pvh_attrs |= PVF_DIRTY; |
3923 | #endif | | 3931 | #endif |
3924 | simple_unlock(&md->pvh_slock); | | 3932 | simple_unlock(&md->pvh_slock); |
3925 | | | 3933 | |
3926 | /* | | 3934 | /* |
3927 | * Re-enable write permissions for the page. No need to call | | 3935 | * Re-enable write permissions for the page. No need to call |
3928 | * pmap_vac_me_harder(), since this is just a | | 3936 | * pmap_vac_me_harder(), since this is just a |
3929 | * modified-emulation fault, and the PVF_WRITE bit isn't | | 3937 | * modified-emulation fault, and the PVF_WRITE bit isn't |
3930 | * changing. We've already set the cacheable bits based on | | 3938 | * changing. We've already set the cacheable bits based on |
3931 | * the assumption that we can write to this page. | | 3939 | * the assumption that we can write to this page. |
3932 | */ | | 3940 | */ |
3933 | *ptep = l2pte_set_writable((pte & ~L2_TYPE_MASK) | L2_S_PROTO); | | 3941 | *ptep = l2pte_set_writable((pte & ~L2_TYPE_MASK) | L2_S_PROTO); |
3934 | PTE_SYNC(ptep); | | 3942 | PTE_SYNC(ptep); |
3935 | rv = 1; | | 3943 | rv = 1; |
3936 | } else | | 3944 | } else |
3937 | if ((pte & L2_TYPE_MASK) == L2_TYPE_INV) { | | 3945 | if ((pte & L2_TYPE_MASK) == L2_TYPE_INV) { |
3938 | /* | | 3946 | /* |
3939 | * This looks like a good candidate for "page referenced" | | 3947 | * This looks like a good candidate for "page referenced" |
3940 | * emulation. | | 3948 | * emulation. |
3941 | */ | | 3949 | */ |
3942 | struct pv_entry *pv; | | 3950 | struct pv_entry *pv; |
3943 | struct vm_page *pg; | | 3951 | struct vm_page *pg; |
3944 | | | 3952 | |
3945 | /* Extract the physical address of the page */ | | 3953 | /* Extract the physical address of the page */ |
3946 | if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) | | 3954 | if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) |
3947 | goto out; | | 3955 | goto out; |
3948 | | | 3956 | |
3949 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 3957 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
3950 | | | 3958 | |
3951 | /* Get the current flags for this page. */ | | 3959 | /* Get the current flags for this page. */ |
3952 | simple_lock(&md->pvh_slock); | | 3960 | simple_lock(&md->pvh_slock); |
3953 | | | 3961 | |
3954 | pv = pmap_find_pv(md, pm, va); | | 3962 | pv = pmap_find_pv(md, pm, va); |
3955 | if (pv == NULL) { | | 3963 | if (pv == NULL) { |
3956 | simple_unlock(&md->pvh_slock); | | 3964 | simple_unlock(&md->pvh_slock); |
3957 | goto out; | | 3965 | goto out; |
3958 | } | | 3966 | } |
3959 | | | 3967 | |
3960 | md->pvh_attrs |= PVF_REF; | | 3968 | md->pvh_attrs |= PVF_REF; |
3961 | pv->pv_flags |= PVF_REF; | | 3969 | pv->pv_flags |= PVF_REF; |
3962 | simple_unlock(&md->pvh_slock); | | 3970 | simple_unlock(&md->pvh_slock); |
3963 | | | 3971 | |
3964 | NPDEBUG(PDB_FOLLOW, | | 3972 | NPDEBUG(PDB_FOLLOW, |
3965 | printf("pmap_fault_fixup: ref emul. pm %p, va 0x%08lx, pa 0x%08lx\n", | | 3973 | printf("pmap_fault_fixup: ref emul. pm %p, va 0x%08lx, pa 0x%08lx\n", |
3966 | pm, va, VM_PAGE_TO_PHYS(pg))); | | 3974 | pm, va, pa)); |
3967 | | | 3975 | |
3968 | *ptep = l2pte_set_readonly((pte & ~L2_TYPE_MASK) | L2_S_PROTO); | | 3976 | *ptep = l2pte_set_readonly((pte & ~L2_TYPE_MASK) | L2_S_PROTO); |
3969 | PTE_SYNC(ptep); | | 3977 | PTE_SYNC(ptep); |
3970 | rv = 1; | | 3978 | rv = 1; |
3971 | } | | 3979 | } |
3972 | | | 3980 | |
3973 | /* | | 3981 | /* |
3974 | * We know there is a valid mapping here, so simply | | 3982 | * We know there is a valid mapping here, so simply |
3975 | * fix up the L1 if necessary. | | 3983 | * fix up the L1 if necessary. |
3976 | */ | | 3984 | */ |
3977 | pl1pd = &pm->pm_l1->l1_kva[l1idx]; | | 3985 | pl1pd = &pm->pm_l1->l1_kva[l1idx]; |
3978 | l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | L1_C_PROTO; | | 3986 | l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | L1_C_PROTO; |
3979 | if (*pl1pd != l1pd) { | | 3987 | if (*pl1pd != l1pd) { |
3980 | *pl1pd = l1pd; | | 3988 | *pl1pd = l1pd; |
3981 | PTE_SYNC(pl1pd); | | 3989 | PTE_SYNC(pl1pd); |
3982 | rv = 1; | | 3990 | rv = 1; |
3983 | } | | 3991 | } |
3984 | | | 3992 | |
3985 | #ifdef CPU_SA110 | | 3993 | #ifdef CPU_SA110 |
3986 | /* | | 3994 | /* |
3987 | * There are bugs in the rev K SA110. This is a check for one | | 3995 | * There are bugs in the rev K SA110. This is a check for one |
3988 | * of them. | | 3996 | * of them. |
3989 | */ | | 3997 | */ |
3990 | if (rv == 0 && curcpu()->ci_arm_cputype == CPU_ID_SA110 && | | 3998 | if (rv == 0 && curcpu()->ci_arm_cputype == CPU_ID_SA110 && |
3991 | curcpu()->ci_arm_cpurev < 3) { | | 3999 | curcpu()->ci_arm_cpurev < 3) { |
3992 | /* Always current pmap */ | | 4000 | /* Always current pmap */ |
3993 | if (l2pte_valid(pte)) { | | 4001 | if (l2pte_valid(pte)) { |
3994 | extern int kernel_debug; | | 4002 | extern int kernel_debug; |
3995 | if (kernel_debug & 1) { | | 4003 | if (kernel_debug & 1) { |
3996 | struct proc *p = curlwp->l_proc; | | 4004 | struct proc *p = curlwp->l_proc; |
3997 | printf("prefetch_abort: page is already " | | 4005 | printf("prefetch_abort: page is already " |
3998 | "mapped - pte=%p *pte=%08x\n", ptep, pte); | | 4006 | "mapped - pte=%p *pte=%08x\n", ptep, pte); |
3999 | printf("prefetch_abort: pc=%08lx proc=%p " | | 4007 | printf("prefetch_abort: pc=%08lx proc=%p " |
4000 | "process=%s\n", va, p, p->p_comm); | | 4008 | "process=%s\n", va, p, p->p_comm); |
4001 | printf("prefetch_abort: far=%08x fs=%x\n", | | 4009 | printf("prefetch_abort: far=%08x fs=%x\n", |
4002 | cpu_faultaddress(), cpu_faultstatus()); | | 4010 | cpu_faultaddress(), cpu_faultstatus()); |
4003 | } | | 4011 | } |
4004 | #ifdef DDB | | 4012 | #ifdef DDB |
4005 | if (kernel_debug & 2) | | 4013 | if (kernel_debug & 2) |
4006 | Debugger(); | | 4014 | Debugger(); |
4007 | #endif | | 4015 | #endif |
4008 | rv = 1; | | 4016 | rv = 1; |
4009 | } | | 4017 | } |
4010 | } | | 4018 | } |
4011 | #endif /* CPU_SA110 */ | | 4019 | #endif /* CPU_SA110 */ |
4012 | | | 4020 | |
4013 | #ifdef DEBUG | | 4021 | #ifdef DEBUG |
4014 | /* | | 4022 | /* |
4015 | * If 'rv == 0' at this point, it generally indicates that there is a | | 4023 | * If 'rv == 0' at this point, it generally indicates that there is a |
4016 | * stale TLB entry for the faulting address. This happens when two or | | 4024 | * stale TLB entry for the faulting address. This happens when two or |
4017 | * more processes are sharing an L1. Since we don't flush the TLB on | | 4025 | * more processes are sharing an L1. Since we don't flush the TLB on |
4018 | * a context switch between such processes, we can take domain faults | | 4026 | * a context switch between such processes, we can take domain faults |
4019 | * for mappings which exist at the same VA in both processes. EVEN IF | | 4027 | * for mappings which exist at the same VA in both processes. EVEN IF |
4020 | * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for | | 4028 | * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for |
4021 | * example. | | 4029 | * example. |
4022 | * | | 4030 | * |
4023 | * This is extremely likely to happen if pmap_enter() updated the L1 | | 4031 | * This is extremely likely to happen if pmap_enter() updated the L1 |
4024 | * entry for a recently entered mapping. In this case, the TLB is | | 4032 | * entry for a recently entered mapping. In this case, the TLB is |
4025 | * flushed for the new mapping, but there may still be TLB entries for | | 4033 | * flushed for the new mapping, but there may still be TLB entries for |
4026 | * other mappings belonging to other processes in the 1MB range | | 4034 | * other mappings belonging to other processes in the 1MB range |
4027 | * covered by the L1 entry. | | 4035 | * covered by the L1 entry. |
4028 | * | | 4036 | * |
4029 | * Since 'rv == 0', we know that the L1 already contains the correct | | 4037 | * Since 'rv == 0', we know that the L1 already contains the correct |
4030 | * value, so the fault must be due to a stale TLB entry. | | 4038 | * value, so the fault must be due to a stale TLB entry. |
4031 | * | | 4039 | * |
4032 | * Since we always need to flush the TLB anyway in the case where we | | 4040 | * Since we always need to flush the TLB anyway in the case where we |
4033 | * fixed up the L1, or frobbed the L2 PTE, we effectively deal with | | 4041 | * fixed up the L1, or frobbed the L2 PTE, we effectively deal with |
4034 | * stale TLB entries dynamically. | | 4042 | * stale TLB entries dynamically. |
4035 | * | | 4043 | * |
4036 | * However, the above condition can ONLY happen if the current L1 is | | 4044 | * However, the above condition can ONLY happen if the current L1 is |
4037 | * being shared. If it happens when the L1 is unshared, it indicates | | 4045 | * being shared. If it happens when the L1 is unshared, it indicates |
4038 | * that other parts of the pmap are not doing their job WRT managing | | 4046 | * that other parts of the pmap are not doing their job WRT managing |
4039 | * the TLB. | | 4047 | * the TLB. |
4040 | */ | | 4048 | */ |
4041 | if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1) { | | 4049 | if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1) { |
4042 | extern int last_fault_code; | | 4050 | extern int last_fault_code; |
4043 | printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n", | | 4051 | printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n", |
4044 | pm, va, ftype); | | 4052 | pm, va, ftype); |
4045 | printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n", | | 4053 | printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n", |
4046 | l2, l2b, ptep, pl1pd); | | 4054 | l2, l2b, ptep, pl1pd); |
4047 | printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n", | | 4055 | printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n", |
4048 | pte, l1pd, last_fault_code); | | 4056 | pte, l1pd, last_fault_code); |
4049 | #ifdef DDB | | 4057 | #ifdef DDB |
4050 | Debugger(); | | 4058 | Debugger(); |
4051 | #endif | | 4059 | #endif |
4052 | } | | 4060 | } |
4053 | #endif | | 4061 | #endif |
4054 | | | 4062 | |
4055 | cpu_tlb_flushID_SE(va); | | 4063 | cpu_tlb_flushID_SE(va); |
4056 | cpu_cpwait(); | | 4064 | cpu_cpwait(); |
4057 | | | 4065 | |
4058 | rv = 1; | | 4066 | rv = 1; |
4059 | | | 4067 | |
4060 | out: | | 4068 | out: |
4061 | pmap_release_pmap_lock(pm); | | 4069 | pmap_release_pmap_lock(pm); |
4062 | PMAP_MAP_TO_HEAD_UNLOCK(); | | 4070 | PMAP_MAP_TO_HEAD_UNLOCK(); |
4063 | | | 4071 | |
4064 | return (rv); | | 4072 | return (rv); |
4065 | } | | 4073 | } |
4066 | | | 4074 | |
4067 | /* | | 4075 | /* |
4068 | * Routine: pmap_procwr | | 4076 | * Routine: pmap_procwr |
4069 | * | | 4077 | * |
4070 | * Function: | | 4078 | * Function: |
4071 | * Synchronize caches corresponding to [addr, addr+len) in p. | | 4079 | * Synchronize caches corresponding to [addr, addr+len) in p. |
4072 | * | | 4080 | * |
4073 | */ | | 4081 | */ |
4074 | void | | 4082 | void |
4075 | pmap_procwr(struct proc *p, vaddr_t va, int len) | | 4083 | pmap_procwr(struct proc *p, vaddr_t va, int len) |
4076 | { | | 4084 | { |
4077 | /* We only need to do anything if it is the current process. */ | | 4085 | /* We only need to do anything if it is the current process. */ |
4078 | if (p == curproc) | | 4086 | if (p == curproc) |
4079 | cpu_icache_sync_range(va, len); | | 4087 | cpu_icache_sync_range(va, len); |
4080 | } | | 4088 | } |
4081 | | | 4089 | |
4082 | /* | | 4090 | /* |
4083 | * Routine: pmap_unwire | | 4091 | * Routine: pmap_unwire |
4084 | * Function: Clear the wired attribute for a map/virtual-address pair. | | 4092 | * Function: Clear the wired attribute for a map/virtual-address pair. |
4085 | * | | 4093 | * |
4086 | * In/out conditions: | | 4094 | * In/out conditions: |
4087 | * The mapping must already exist in the pmap. | | 4095 | * The mapping must already exist in the pmap. |
4088 | */ | | 4096 | */ |
4089 | void | | 4097 | void |
4090 | pmap_unwire(pmap_t pm, vaddr_t va) | | 4098 | pmap_unwire(pmap_t pm, vaddr_t va) |
4091 | { | | 4099 | { |
4092 | struct l2_bucket *l2b; | | 4100 | struct l2_bucket *l2b; |
4093 | pt_entry_t *ptep, pte; | | 4101 | pt_entry_t *ptep, pte; |
4094 | struct vm_page *pg; | | 4102 | struct vm_page *pg; |
4095 | paddr_t pa; | | 4103 | paddr_t pa; |
4096 | | | 4104 | |
4097 | NPDEBUG(PDB_WIRING, printf("pmap_unwire: pm %p, va 0x%08lx\n", pm, va)); | | 4105 | NPDEBUG(PDB_WIRING, printf("pmap_unwire: pm %p, va 0x%08lx\n", pm, va)); |
4098 | | | 4106 | |
4099 | PMAP_MAP_TO_HEAD_LOCK(); | | 4107 | PMAP_MAP_TO_HEAD_LOCK(); |
4100 | pmap_acquire_pmap_lock(pm); | | 4108 | pmap_acquire_pmap_lock(pm); |
4101 | | | 4109 | |
4102 | l2b = pmap_get_l2_bucket(pm, va); | | 4110 | l2b = pmap_get_l2_bucket(pm, va); |
4103 | KDASSERT(l2b != NULL); | | 4111 | KDASSERT(l2b != NULL); |
4104 | | | 4112 | |
4105 | ptep = &l2b->l2b_kva[l2pte_index(va)]; | | 4113 | ptep = &l2b->l2b_kva[l2pte_index(va)]; |
4106 | pte = *ptep; | | 4114 | pte = *ptep; |
4107 | | | 4115 | |
4108 | /* Extract the physical address of the page */ | | 4116 | /* Extract the physical address of the page */ |
4109 | pa = l2pte_pa(pte); | | 4117 | pa = l2pte_pa(pte); |
4110 | | | 4118 | |
4111 | if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { | | 4119 | if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { |
4112 | /* Update the wired bit in the pv entry for this page. */ | | 4120 | /* Update the wired bit in the pv entry for this page. */ |
4113 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 4121 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
| | | 4122 | |
4114 | simple_lock(&md->pvh_slock); | | 4123 | simple_lock(&md->pvh_slock); |
4115 | (void) pmap_modify_pv(md, VM_PAGE_TO_PHYS(pg), pm, va, PVF_WIRED, 0); | | 4124 | (void) pmap_modify_pv(md, pa, pm, va, PVF_WIRED, 0); |
4116 | simple_unlock(&md->pvh_slock); | | 4125 | simple_unlock(&md->pvh_slock); |
4117 | } | | 4126 | } |
4118 | | | 4127 | |
4119 | pmap_release_pmap_lock(pm); | | 4128 | pmap_release_pmap_lock(pm); |
4120 | PMAP_MAP_TO_HEAD_UNLOCK(); | | 4129 | PMAP_MAP_TO_HEAD_UNLOCK(); |
4121 | } | | 4130 | } |
4122 | | | 4131 | |
4123 | void | | 4132 | void |
4124 | pmap_activate(struct lwp *l) | | 4133 | pmap_activate(struct lwp *l) |
4125 | { | | 4134 | { |
4126 | extern int block_userspace_access; | | 4135 | extern int block_userspace_access; |
4127 | pmap_t opm, npm, rpm; | | 4136 | pmap_t opm, npm, rpm; |
4128 | uint32_t odacr, ndacr; | | 4137 | uint32_t odacr, ndacr; |
4129 | int oldirqstate; | | 4138 | int oldirqstate; |
4130 | | | 4139 | |
4131 | /* | | 4140 | /* |
4132 | * If activating a non-current lwp or the current lwp is | | 4141 | * If activating a non-current lwp or the current lwp is |
4133 | * already active, just return. | | 4142 | * already active, just return. |
4134 | */ | | 4143 | */ |
4135 | if (l != curlwp || | | 4144 | if (l != curlwp || |
4136 | l->l_proc->p_vmspace->vm_map.pmap->pm_activated == true) | | 4145 | l->l_proc->p_vmspace->vm_map.pmap->pm_activated == true) |
4137 | return; | | 4146 | return; |
4138 | | | 4147 | |
4139 | npm = l->l_proc->p_vmspace->vm_map.pmap; | | 4148 | npm = l->l_proc->p_vmspace->vm_map.pmap; |
4140 | ndacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | | | 4149 | ndacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | |
4141 | (DOMAIN_CLIENT << (npm->pm_domain * 2)); | | 4150 | (DOMAIN_CLIENT << (npm->pm_domain * 2)); |
4142 | | | 4151 | |
4143 | /* | | 4152 | /* |
4144 | * If TTB and DACR are unchanged, short-circuit all the | | 4153 | * If TTB and DACR are unchanged, short-circuit all the |
4145 | * TLB/cache management stuff. | | 4154 | * TLB/cache management stuff. |
4146 | */ | | 4155 | */ |
4147 | if (pmap_previous_active_lwp != NULL) { | | 4156 | if (pmap_previous_active_lwp != NULL) { |
4148 | opm = pmap_previous_active_lwp->l_proc->p_vmspace->vm_map.pmap; | | 4157 | opm = pmap_previous_active_lwp->l_proc->p_vmspace->vm_map.pmap; |
4149 | odacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | | | 4158 | odacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | |
4150 | (DOMAIN_CLIENT << (opm->pm_domain * 2)); | | 4159 | (DOMAIN_CLIENT << (opm->pm_domain * 2)); |
4151 | | | 4160 | |
4152 | if (opm->pm_l1 == npm->pm_l1 && odacr == ndacr) | | 4161 | if (opm->pm_l1 == npm->pm_l1 && odacr == ndacr) |
4153 | goto all_done; | | 4162 | goto all_done; |
4154 | } else | | 4163 | } else |
4155 | opm = NULL; | | 4164 | opm = NULL; |
4156 | | | 4165 | |
4157 | PMAPCOUNT(activations); | | 4166 | PMAPCOUNT(activations); |
4158 | block_userspace_access = 1; | | 4167 | block_userspace_access = 1; |
4159 | | | 4168 | |
4160 | /* | | 4169 | /* |
4161 | * If switching to a user vmspace which is different to the | | 4170 | * If switching to a user vmspace which is different to the |
4162 | * most recent one, and the most recent one is potentially | | 4171 | * most recent one, and the most recent one is potentially |
4163 | * live in the cache, we must write-back and invalidate the | | 4172 | * live in the cache, we must write-back and invalidate the |
4164 | * entire cache. | | 4173 | * entire cache. |
4165 | */ | | 4174 | */ |
4166 | rpm = pmap_recent_user; | | 4175 | rpm = pmap_recent_user; |
4167 | | | 4176 | |
4168 | /* | | 4177 | /* |
4169 | * XXXSCW: There's a corner case here which can leave turds in the cache as | | 4178 | * XXXSCW: There's a corner case here which can leave turds in the cache as |
4170 | * reported in kern/41058. They're probably left over during tear-down and | | 4179 | * reported in kern/41058. They're probably left over during tear-down and |
4171 | * switching away from an exiting process. Until the root cause is identified | | 4180 | * switching away from an exiting process. Until the root cause is identified |
4172 | * and fixed, zap the cache when switching pmaps. This will result in a few | | 4181 | * and fixed, zap the cache when switching pmaps. This will result in a few |
4173 | * unnecessary cache flushes, but that's better than silently corrupting data. | | 4182 | * unnecessary cache flushes, but that's better than silently corrupting data. |
4174 | */ | | 4183 | */ |
4175 | #if 0 | | 4184 | #if 0 |
4176 | if (npm != pmap_kernel() && rpm && npm != rpm && | | 4185 | if (npm != pmap_kernel() && rpm && npm != rpm && |
4177 | rpm->pm_cstate.cs_cache) { | | 4186 | rpm->pm_cstate.cs_cache) { |
4178 | rpm->pm_cstate.cs_cache = 0; | | 4187 | rpm->pm_cstate.cs_cache = 0; |
4179 | #ifdef PMAP_CACHE_VIVT | | 4188 | #ifdef PMAP_CACHE_VIVT |
4180 | cpu_idcache_wbinv_all(); | | 4189 | cpu_idcache_wbinv_all(); |
4181 | #endif | | 4190 | #endif |
4182 | } | | 4191 | } |
4183 | #else | | 4192 | #else |
4184 | if (rpm) { | | 4193 | if (rpm) { |
4185 | rpm->pm_cstate.cs_cache = 0; | | 4194 | rpm->pm_cstate.cs_cache = 0; |
4186 | if (npm == pmap_kernel()) | | 4195 | if (npm == pmap_kernel()) |
4187 | pmap_recent_user = NULL; | | 4196 | pmap_recent_user = NULL; |
4188 | #ifdef PMAP_CACHE_VIVT | | 4197 | #ifdef PMAP_CACHE_VIVT |
4189 | cpu_idcache_wbinv_all(); | | 4198 | cpu_idcache_wbinv_all(); |
4190 | #endif | | 4199 | #endif |
4191 | } | | 4200 | } |
4192 | #endif | | 4201 | #endif |
4193 | | | 4202 | |
4194 | /* No interrupts while we frob the TTB/DACR */ | | 4203 | /* No interrupts while we frob the TTB/DACR */ |
4195 | oldirqstate = disable_interrupts(IF32_bits); | | 4204 | oldirqstate = disable_interrupts(IF32_bits); |
4196 | | | 4205 | |
4197 | /* | | 4206 | /* |
4198 | * For ARM_VECTORS_LOW, we MUST, I repeat, MUST fix up the L1 | | 4207 | * For ARM_VECTORS_LOW, we MUST, I repeat, MUST fix up the L1 |
4199 | * entry corresponding to 'vector_page' in the incoming L1 table | | 4208 | * entry corresponding to 'vector_page' in the incoming L1 table |
4200 | * before switching to it otherwise subsequent interrupts/exceptions | | 4209 | * before switching to it otherwise subsequent interrupts/exceptions |
4201 | * (including domain faults!) will jump into hyperspace. | | 4210 | * (including domain faults!) will jump into hyperspace. |
4202 | */ | | 4211 | */ |
4203 | if (npm->pm_pl1vec != NULL) { | | 4212 | if (npm->pm_pl1vec != NULL) { |
4204 | cpu_tlb_flushID_SE((u_int)vector_page); | | 4213 | cpu_tlb_flushID_SE((u_int)vector_page); |
4205 | cpu_cpwait(); | | 4214 | cpu_cpwait(); |
4206 | *npm->pm_pl1vec = npm->pm_l1vec; | | 4215 | *npm->pm_pl1vec = npm->pm_l1vec; |
4207 | PTE_SYNC(npm->pm_pl1vec); | | 4216 | PTE_SYNC(npm->pm_pl1vec); |
4208 | } | | 4217 | } |
4209 | | | 4218 | |
4210 | cpu_domains(ndacr); | | 4219 | cpu_domains(ndacr); |
4211 | | | 4220 | |
4212 | if (npm == pmap_kernel() || npm == rpm) { | | 4221 | if (npm == pmap_kernel() || npm == rpm) { |
4213 | /* | | 4222 | /* |
4214 | * Switching to a kernel thread, or back to the | | 4223 | * Switching to a kernel thread, or back to the |
4215 | * same user vmspace as before... Simply update | | 4224 | * same user vmspace as before... Simply update |
4216 | * the TTB (no TLB flush required) | | 4225 | * the TTB (no TLB flush required) |
4217 | */ | | 4226 | */ |
4218 | __asm volatile("mcr p15, 0, %0, c2, c0, 0" :: | | 4227 | __asm volatile("mcr p15, 0, %0, c2, c0, 0" :: |
4219 | "r"(npm->pm_l1->l1_physaddr)); | | 4228 | "r"(npm->pm_l1->l1_physaddr)); |
4220 | cpu_cpwait(); | | 4229 | cpu_cpwait(); |
4221 | } else { | | 4230 | } else { |
4222 | /* | | 4231 | /* |
4223 | * Otherwise, update TTB and flush TLB | | 4232 | * Otherwise, update TTB and flush TLB |
4224 | */ | | 4233 | */ |
4225 | cpu_context_switch(npm->pm_l1->l1_physaddr); | | 4234 | cpu_context_switch(npm->pm_l1->l1_physaddr); |
4226 | if (rpm != NULL) | | 4235 | if (rpm != NULL) |
4227 | rpm->pm_cstate.cs_tlb = 0; | | 4236 | rpm->pm_cstate.cs_tlb = 0; |
4228 | } | | 4237 | } |
4229 | | | 4238 | |
4230 | restore_interrupts(oldirqstate); | | 4239 | restore_interrupts(oldirqstate); |
4231 | | | 4240 | |
4232 | block_userspace_access = 0; | | 4241 | block_userspace_access = 0; |
4233 | | | 4242 | |
4234 | all_done: | | 4243 | all_done: |
4235 | /* | | 4244 | /* |
4236 | * The new pmap is resident. Make sure it's marked | | 4245 | * The new pmap is resident. Make sure it's marked |
4237 | * as resident in the cache/TLB. | | 4246 | * as resident in the cache/TLB. |
4238 | */ | | 4247 | */ |
4239 | npm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; | | 4248 | npm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; |
4240 | if (npm != pmap_kernel()) | | 4249 | if (npm != pmap_kernel()) |
4241 | pmap_recent_user = npm; | | 4250 | pmap_recent_user = npm; |
4242 | | | 4251 | |
4243 | /* The old pmap is not longer active */ | | 4252 | /* The old pmap is not longer active */ |
4244 | if (opm != NULL) | | 4253 | if (opm != NULL) |
4245 | opm->pm_activated = false; | | 4254 | opm->pm_activated = false; |
4246 | | | 4255 | |
4247 | /* But the new one is */ | | 4256 | /* But the new one is */ |
4248 | npm->pm_activated = true; | | 4257 | npm->pm_activated = true; |
4249 | } | | 4258 | } |
4250 | | | 4259 | |
4251 | void | | 4260 | void |
4252 | pmap_deactivate(struct lwp *l) | | 4261 | pmap_deactivate(struct lwp *l) |
4253 | { | | 4262 | { |
4254 | | | 4263 | |
4255 | /* | | 4264 | /* |
4256 | * If the process is exiting, make sure pmap_activate() does | | 4265 | * If the process is exiting, make sure pmap_activate() does |
4257 | * a full MMU context-switch and cache flush, which we might | | 4266 | * a full MMU context-switch and cache flush, which we might |
4258 | * otherwise skip. See PR port-arm/38950. | | 4267 | * otherwise skip. See PR port-arm/38950. |
4259 | */ | | 4268 | */ |
4260 | if (l->l_proc->p_sflag & PS_WEXIT) | | 4269 | if (l->l_proc->p_sflag & PS_WEXIT) |
4261 | pmap_previous_active_lwp = NULL; | | 4270 | pmap_previous_active_lwp = NULL; |
4262 | | | 4271 | |
4263 | l->l_proc->p_vmspace->vm_map.pmap->pm_activated = false; | | 4272 | l->l_proc->p_vmspace->vm_map.pmap->pm_activated = false; |
4264 | } | | 4273 | } |
4265 | | | 4274 | |
4266 | void | | 4275 | void |
4267 | pmap_update(pmap_t pm) | | 4276 | pmap_update(pmap_t pm) |
4268 | { | | 4277 | { |
4269 | | | 4278 | |
4270 | if (pm->pm_remove_all) { | | 4279 | if (pm->pm_remove_all) { |
4271 | /* | | 4280 | /* |
4272 | * Finish up the pmap_remove_all() optimisation by flushing | | 4281 | * Finish up the pmap_remove_all() optimisation by flushing |
4273 | * the TLB. | | 4282 | * the TLB. |
4274 | */ | | 4283 | */ |
4275 | pmap_tlb_flushID(pm); | | 4284 | pmap_tlb_flushID(pm); |
4276 | pm->pm_remove_all = false; | | 4285 | pm->pm_remove_all = false; |
4277 | } | | 4286 | } |
4278 | | | 4287 | |
4279 | if (pmap_is_current(pm)) { | | 4288 | if (pmap_is_current(pm)) { |
4280 | /* | | 4289 | /* |
4281 | * If we're dealing with a current userland pmap, move its L1 | | 4290 | * If we're dealing with a current userland pmap, move its L1 |
4282 | * to the end of the LRU. | | 4291 | * to the end of the LRU. |
4283 | */ | | 4292 | */ |
4284 | if (pm != pmap_kernel()) | | 4293 | if (pm != pmap_kernel()) |
4285 | pmap_use_l1(pm); | | 4294 | pmap_use_l1(pm); |
4286 | | | 4295 | |
4287 | /* | | 4296 | /* |
4288 | * We can assume we're done with frobbing the cache/tlb for | | 4297 | * We can assume we're done with frobbing the cache/tlb for |
4289 | * now. Make sure any future pmap ops don't skip cache/tlb | | 4298 | * now. Make sure any future pmap ops don't skip cache/tlb |
4290 | * flushes. | | 4299 | * flushes. |
4291 | */ | | 4300 | */ |
4292 | pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; | | 4301 | pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; |
4293 | } | | 4302 | } |
4294 | | | 4303 | |
4295 | PMAPCOUNT(updates); | | 4304 | PMAPCOUNT(updates); |
4296 | | | 4305 | |
4297 | /* | | 4306 | /* |
4298 | * make sure TLB/cache operations have completed. | | 4307 | * make sure TLB/cache operations have completed. |
4299 | */ | | 4308 | */ |
4300 | cpu_cpwait(); | | 4309 | cpu_cpwait(); |
4301 | } | | 4310 | } |
4302 | | | 4311 | |
4303 | void | | 4312 | void |
4304 | pmap_remove_all(pmap_t pm) | | 4313 | pmap_remove_all(pmap_t pm) |
4305 | { | | 4314 | { |
4306 | | | 4315 | |
4307 | /* | | 4316 | /* |
4308 | * The vmspace described by this pmap is about to be torn down. | | 4317 | * The vmspace described by this pmap is about to be torn down. |
4309 | * Until pmap_update() is called, UVM will only make calls | | 4318 | * Until pmap_update() is called, UVM will only make calls |
4310 | * to pmap_remove(). We can make life much simpler by flushing | | 4319 | * to pmap_remove(). We can make life much simpler by flushing |
4311 | * the cache now, and deferring TLB invalidation to pmap_update(). | | 4320 | * the cache now, and deferring TLB invalidation to pmap_update(). |
4312 | */ | | 4321 | */ |
4313 | #ifdef PMAP_CACHE_VIVT | | 4322 | #ifdef PMAP_CACHE_VIVT |
4314 | pmap_idcache_wbinv_all(pm); | | 4323 | pmap_idcache_wbinv_all(pm); |
4315 | #endif | | 4324 | #endif |
4316 | pm->pm_remove_all = true; | | 4325 | pm->pm_remove_all = true; |
4317 | } | | 4326 | } |
4318 | | | 4327 | |
4319 | /* | | 4328 | /* |
4320 | * Retire the given physical map from service. | | 4329 | * Retire the given physical map from service. |
4321 | * Should only be called if the map contains no valid mappings. | | 4330 | * Should only be called if the map contains no valid mappings. |
4322 | */ | | 4331 | */ |
4323 | void | | 4332 | void |
4324 | pmap_destroy(pmap_t pm) | | 4333 | pmap_destroy(pmap_t pm) |
4325 | { | | 4334 | { |
4326 | u_int count; | | 4335 | u_int count; |
4327 | | | 4336 | |
4328 | if (pm == NULL) | | 4337 | if (pm == NULL) |
4329 | return; | | 4338 | return; |
4330 | | | 4339 | |
4331 | if (pm->pm_remove_all) { | | 4340 | if (pm->pm_remove_all) { |
4332 | pmap_tlb_flushID(pm); | | 4341 | pmap_tlb_flushID(pm); |
4333 | pm->pm_remove_all = false; | | 4342 | pm->pm_remove_all = false; |
4334 | } | | 4343 | } |
4335 | | | 4344 | |
4336 | /* | | 4345 | /* |
4337 | * Drop reference count | | 4346 | * Drop reference count |
4338 | */ | | 4347 | */ |
4339 | mutex_enter(&pm->pm_lock); | | 4348 | mutex_enter(&pm->pm_lock); |
4340 | count = --pm->pm_obj.uo_refs; | | 4349 | count = --pm->pm_obj.uo_refs; |
4341 | mutex_exit(&pm->pm_lock); | | 4350 | mutex_exit(&pm->pm_lock); |
4342 | if (count > 0) { | | 4351 | if (count > 0) { |
4343 | if (pmap_is_current(pm)) { | | 4352 | if (pmap_is_current(pm)) { |
4344 | if (pm != pmap_kernel()) | | 4353 | if (pm != pmap_kernel()) |
4345 | pmap_use_l1(pm); | | 4354 | pmap_use_l1(pm); |
4346 | pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; | | 4355 | pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; |
4347 | } | | 4356 | } |
4348 | return; | | 4357 | return; |
4349 | } | | 4358 | } |
4350 | | | 4359 | |
4351 | /* | | 4360 | /* |
4352 | * reference count is zero, free pmap resources and then free pmap. | | 4361 | * reference count is zero, free pmap resources and then free pmap. |
4353 | */ | | 4362 | */ |
4354 | | | 4363 | |
4355 | if (vector_page < KERNEL_BASE) { | | 4364 | if (vector_page < KERNEL_BASE) { |
4356 | KDASSERT(!pmap_is_current(pm)); | | 4365 | KDASSERT(!pmap_is_current(pm)); |
4357 | | | 4366 | |
4358 | /* Remove the vector page mapping */ | | 4367 | /* Remove the vector page mapping */ |
4359 | pmap_remove(pm, vector_page, vector_page + PAGE_SIZE); | | 4368 | pmap_remove(pm, vector_page, vector_page + PAGE_SIZE); |
4360 | pmap_update(pm); | | 4369 | pmap_update(pm); |
4361 | } | | 4370 | } |
4362 | | | 4371 | |
4363 | LIST_REMOVE(pm, pm_list); | | 4372 | LIST_REMOVE(pm, pm_list); |
4364 | | | 4373 | |
4365 | pmap_free_l1(pm); | | 4374 | pmap_free_l1(pm); |
4366 | | | 4375 | |
4367 | if (pmap_recent_user == pm) | | 4376 | if (pmap_recent_user == pm) |
4368 | pmap_recent_user = NULL; | | 4377 | pmap_recent_user = NULL; |
4369 | | | 4378 | |
4370 | UVM_OBJ_DESTROY(&pm->pm_obj); | | 4379 | UVM_OBJ_DESTROY(&pm->pm_obj); |
4371 | | | 4380 | |
4372 | /* return the pmap to the pool */ | | 4381 | /* return the pmap to the pool */ |
4373 | pool_cache_put(&pmap_cache, pm); | | 4382 | pool_cache_put(&pmap_cache, pm); |
4374 | } | | 4383 | } |
4375 | | | 4384 | |
4376 | | | 4385 | |
4377 | /* | | 4386 | /* |
4378 | * void pmap_reference(pmap_t pm) | | 4387 | * void pmap_reference(pmap_t pm) |
4379 | * | | 4388 | * |
4380 | * Add a reference to the specified pmap. | | 4389 | * Add a reference to the specified pmap. |
4381 | */ | | 4390 | */ |
4382 | void | | 4391 | void |
4383 | pmap_reference(pmap_t pm) | | 4392 | pmap_reference(pmap_t pm) |
4384 | { | | 4393 | { |
4385 | | | 4394 | |
4386 | if (pm == NULL) | | 4395 | if (pm == NULL) |
4387 | return; | | 4396 | return; |
4388 | | | 4397 | |
4389 | pmap_use_l1(pm); | | 4398 | pmap_use_l1(pm); |
4390 | | | 4399 | |
4391 | mutex_enter(&pm->pm_lock); | | 4400 | mutex_enter(&pm->pm_lock); |
4392 | pm->pm_obj.uo_refs++; | | 4401 | pm->pm_obj.uo_refs++; |
4393 | mutex_exit(&pm->pm_lock); | | 4402 | mutex_exit(&pm->pm_lock); |
4394 | } | | 4403 | } |
4395 | | | 4404 | |
4396 | #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 | | 4405 | #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 |
4397 | | | 4406 | |
4398 | static struct evcnt pmap_prefer_nochange_ev = | | 4407 | static struct evcnt pmap_prefer_nochange_ev = |
4399 | EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap prefer", "nochange"); | | 4408 | EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap prefer", "nochange"); |
4400 | static struct evcnt pmap_prefer_change_ev = | | 4409 | static struct evcnt pmap_prefer_change_ev = |
4401 | EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap prefer", "change"); | | 4410 | EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap prefer", "change"); |
4402 | | | 4411 | |
4403 | EVCNT_ATTACH_STATIC(pmap_prefer_change_ev); | | 4412 | EVCNT_ATTACH_STATIC(pmap_prefer_change_ev); |
4404 | EVCNT_ATTACH_STATIC(pmap_prefer_nochange_ev); | | 4413 | EVCNT_ATTACH_STATIC(pmap_prefer_nochange_ev); |
4405 | | | 4414 | |
4406 | void | | 4415 | void |
4407 | pmap_prefer(vaddr_t hint, vaddr_t *vap, int td) | | 4416 | pmap_prefer(vaddr_t hint, vaddr_t *vap, int td) |
4408 | { | | 4417 | { |
4409 | vsize_t mask = arm_cache_prefer_mask | (PAGE_SIZE - 1); | | 4418 | vsize_t mask = arm_cache_prefer_mask | (PAGE_SIZE - 1); |
4410 | vaddr_t va = *vap; | | 4419 | vaddr_t va = *vap; |
4411 | vaddr_t diff = (hint - va) & mask; | | 4420 | vaddr_t diff = (hint - va) & mask; |
4412 | if (diff == 0) { | | 4421 | if (diff == 0) { |
4413 | pmap_prefer_nochange_ev.ev_count++; | | 4422 | pmap_prefer_nochange_ev.ev_count++; |
4414 | } else { | | 4423 | } else { |
4415 | pmap_prefer_change_ev.ev_count++; | | 4424 | pmap_prefer_change_ev.ev_count++; |
4416 | if (__predict_false(td)) | | 4425 | if (__predict_false(td)) |
4417 | va -= mask + 1; | | 4426 | va -= mask + 1; |
4418 | *vap = va + diff; | | 4427 | *vap = va + diff; |
4419 | } | | 4428 | } |
4420 | } | | 4429 | } |
4421 | #endif /* ARM_MMU_V6 | ARM_MMU_V7 */ | | 4430 | #endif /* ARM_MMU_V6 | ARM_MMU_V7 */ |
4422 | | | 4431 | |
4423 | /* | | 4432 | /* |
4424 | * pmap_zero_page() | | 4433 | * pmap_zero_page() |
4425 | * | | 4434 | * |
4426 | * Zero a given physical page by mapping it at a page hook point. | | 4435 | * Zero a given physical page by mapping it at a page hook point. |
4427 | * In doing the zero page op, the page we zero is mapped cachable, as with | | 4436 | * In doing the zero page op, the page we zero is mapped cachable, as with |
4428 | * StrongARM accesses to non-cached pages are non-burst making writing | | 4437 | * StrongARM accesses to non-cached pages are non-burst making writing |
4429 | * _any_ bulk data very slow. | | 4438 | * _any_ bulk data very slow. |
4430 | */ | | 4439 | */ |
4431 | #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0 | | 4440 | #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0 |
4432 | void | | 4441 | void |
4433 | pmap_zero_page_generic(paddr_t phys) | | 4442 | pmap_zero_page_generic(paddr_t phys) |
4434 | { | | 4443 | { |
4435 | #if defined(PMAP_CACHE_VIPT) || defined(DEBUG) | | 4444 | #if defined(PMAP_CACHE_VIPT) || defined(DEBUG) |
4436 | struct vm_page *pg = PHYS_TO_VM_PAGE(phys); | | 4445 | struct vm_page *pg = PHYS_TO_VM_PAGE(phys); |
4437 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 4446 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
4438 | #endif | | 4447 | #endif |
4439 | #ifdef PMAP_CACHE_VIPT | | 4448 | #ifdef PMAP_CACHE_VIPT |
4440 | /* Choose the last page color it had, if any */ | | 4449 | /* Choose the last page color it had, if any */ |
4441 | const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask; | | 4450 | const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask; |
4442 | #else | | 4451 | #else |
4443 | const vsize_t va_offset = 0; | | 4452 | const vsize_t va_offset = 0; |
4444 | #endif | | 4453 | #endif |
4445 | pt_entry_t * const ptep = &cdst_pte[va_offset >> PGSHIFT]; | | 4454 | pt_entry_t * const ptep = &cdst_pte[va_offset >> PGSHIFT]; |
4446 | | | 4455 | |
4447 | #ifdef DEBUG | | 4456 | #ifdef DEBUG |
4448 | if (!SLIST_EMPTY(&md->pvh_list)) | | 4457 | if (!SLIST_EMPTY(&md->pvh_list)) |
4449 | panic("pmap_zero_page: page has mappings"); | | 4458 | panic("pmap_zero_page: page has mappings"); |
4450 | #endif | | 4459 | #endif |
4451 | | | 4460 | |
4452 | KDASSERT((phys & PGOFSET) == 0); | | 4461 | KDASSERT((phys & PGOFSET) == 0); |
4453 | | | 4462 | |
4454 | /* | | 4463 | /* |
4455 | * Hook in the page, zero it, and purge the cache for that | | 4464 | * Hook in the page, zero it, and purge the cache for that |
4456 | * zeroed page. Invalidate the TLB as needed. | | 4465 | * zeroed page. Invalidate the TLB as needed. |
4457 | */ | | 4466 | */ |
4458 | *ptep = L2_S_PROTO | phys | | | 4467 | *ptep = L2_S_PROTO | phys | |
4459 | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; | | 4468 | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; |
4460 | PTE_SYNC(ptep); | | 4469 | PTE_SYNC(ptep); |
4461 | cpu_tlb_flushD_SE(cdstp + va_offset); | | 4470 | cpu_tlb_flushD_SE(cdstp + va_offset); |
4462 | cpu_cpwait(); | | 4471 | cpu_cpwait(); |
4463 | bzero_page(cdstp + va_offset); | | 4472 | bzero_page(cdstp + va_offset); |
4464 | /* | | 4473 | /* |
4465 | * Unmap the page. | | 4474 | * Unmap the page. |
4466 | */ | | 4475 | */ |
4467 | *ptep = 0; | | 4476 | *ptep = 0; |
4468 | PTE_SYNC(ptep); | | 4477 | PTE_SYNC(ptep); |
4469 | cpu_tlb_flushD_SE(cdstp + va_offset); | | 4478 | cpu_tlb_flushD_SE(cdstp + va_offset); |
4470 | #ifdef PMAP_CACHE_VIVT | | 4479 | #ifdef PMAP_CACHE_VIVT |
4471 | cpu_dcache_wbinv_range(cdstp + va_offset, PAGE_SIZE); | | 4480 | cpu_dcache_wbinv_range(cdstp + va_offset, PAGE_SIZE); |
4472 | #endif | | 4481 | #endif |
4473 | #ifdef PMAP_CACHE_VIPT | | 4482 | #ifdef PMAP_CACHE_VIPT |
4474 | /* | | 4483 | /* |
4475 | * This page is now cache resident so it now has a page color. | | 4484 | * This page is now cache resident so it now has a page color. |
4476 | * Any contents have been obliterated so clear the EXEC flag. | | 4485 | * Any contents have been obliterated so clear the EXEC flag. |
4477 | */ | | 4486 | */ |
4478 | if (!pmap_is_page_colored_p(md)) { | | 4487 | if (!pmap_is_page_colored_p(md)) { |
4479 | PMAPCOUNT(vac_color_new); | | 4488 | PMAPCOUNT(vac_color_new); |
4480 | md->pvh_attrs |= PVF_COLORED; | | 4489 | md->pvh_attrs |= PVF_COLORED; |
4481 | } | | 4490 | } |
4482 | if (PV_IS_EXEC_P(md->pvh_attrs)) { | | 4491 | if (PV_IS_EXEC_P(md->pvh_attrs)) { |
4483 | md->pvh_attrs &= ~PVF_EXEC; | | 4492 | md->pvh_attrs &= ~PVF_EXEC; |
4484 | PMAPCOUNT(exec_discarded_zero); | | 4493 | PMAPCOUNT(exec_discarded_zero); |
4485 | } | | 4494 | } |
4486 | md->pvh_attrs |= PVF_DIRTY; | | 4495 | md->pvh_attrs |= PVF_DIRTY; |
4487 | #endif | | 4496 | #endif |
4488 | } | | 4497 | } |
4489 | #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */ | | 4498 | #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */ |
4490 | | | 4499 | |
4491 | #if ARM_MMU_XSCALE == 1 | | 4500 | #if ARM_MMU_XSCALE == 1 |
4492 | void | | 4501 | void |
4493 | pmap_zero_page_xscale(paddr_t phys) | | 4502 | pmap_zero_page_xscale(paddr_t phys) |
4494 | { | | 4503 | { |
4495 | #ifdef DEBUG | | 4504 | #ifdef DEBUG |
4496 | struct vm_page *pg = PHYS_TO_VM_PAGE(phys); | | 4505 | struct vm_page *pg = PHYS_TO_VM_PAGE(phys); |
4497 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 4506 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
4498 | | | 4507 | |
4499 | if (!SLIST_EMPTY(&md->pvh_list)) | | 4508 | if (!SLIST_EMPTY(&md->pvh_list)) |
4500 | panic("pmap_zero_page: page has mappings"); | | 4509 | panic("pmap_zero_page: page has mappings"); |
4501 | #endif | | 4510 | #endif |
4502 | | | 4511 | |
4503 | KDASSERT((phys & PGOFSET) == 0); | | 4512 | KDASSERT((phys & PGOFSET) == 0); |
4504 | | | 4513 | |
4505 | /* | | 4514 | /* |
4506 | * Hook in the page, zero it, and purge the cache for that | | 4515 | * Hook in the page, zero it, and purge the cache for that |
4507 | * zeroed page. Invalidate the TLB as needed. | | 4516 | * zeroed page. Invalidate the TLB as needed. |
4508 | */ | | 4517 | */ |
4509 | *cdst_pte = L2_S_PROTO | phys | | | 4518 | *cdst_pte = L2_S_PROTO | phys | |
4510 | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | | | 4519 | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | |
4511 | L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */ | | 4520 | L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */ |
4512 | PTE_SYNC(cdst_pte); | | 4521 | PTE_SYNC(cdst_pte); |
4513 | cpu_tlb_flushD_SE(cdstp); | | 4522 | cpu_tlb_flushD_SE(cdstp); |
4514 | cpu_cpwait(); | | 4523 | cpu_cpwait(); |
4515 | bzero_page(cdstp); | | 4524 | bzero_page(cdstp); |
4516 | xscale_cache_clean_minidata(); | | 4525 | xscale_cache_clean_minidata(); |
4517 | } | | 4526 | } |
4518 | #endif /* ARM_MMU_XSCALE == 1 */ | | 4527 | #endif /* ARM_MMU_XSCALE == 1 */ |
4519 | | | 4528 | |
4520 | /* pmap_pageidlezero() | | 4529 | /* pmap_pageidlezero() |
4521 | * | | 4530 | * |
4522 | * The same as above, except that we assume that the page is not | | 4531 | * The same as above, except that we assume that the page is not |
4523 | * mapped. This means we never have to flush the cache first. Called | | 4532 | * mapped. This means we never have to flush the cache first. Called |
4524 | * from the idle loop. | | 4533 | * from the idle loop. |
4525 | */ | | 4534 | */ |
4526 | bool | | 4535 | bool |
4527 | pmap_pageidlezero(paddr_t phys) | | 4536 | pmap_pageidlezero(paddr_t phys) |
4528 | { | | 4537 | { |
4529 | unsigned int i; | | 4538 | unsigned int i; |
4530 | int *ptr; | | 4539 | int *ptr; |
4531 | bool rv = true; | | 4540 | bool rv = true; |
4532 | #if defined(PMAP_CACHE_VIPT) || defined(DEBUG) | | 4541 | #if defined(PMAP_CACHE_VIPT) || defined(DEBUG) |
4533 | struct vm_page * const pg = PHYS_TO_VM_PAGE(phys); | | 4542 | struct vm_page * const pg = PHYS_TO_VM_PAGE(phys); |
4534 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 4543 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
4535 | #endif | | 4544 | #endif |
4536 | #ifdef PMAP_CACHE_VIPT | | 4545 | #ifdef PMAP_CACHE_VIPT |
4537 | /* Choose the last page color it had, if any */ | | 4546 | /* Choose the last page color it had, if any */ |
4538 | const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask; | | 4547 | const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask; |
4539 | #else | | 4548 | #else |
4540 | const vsize_t va_offset = 0; | | 4549 | const vsize_t va_offset = 0; |
4541 | #endif | | 4550 | #endif |
4542 | pt_entry_t * const ptep = &csrc_pte[va_offset >> PGSHIFT]; | | 4551 | pt_entry_t * const ptep = &csrc_pte[va_offset >> PGSHIFT]; |
4543 | | | 4552 | |
4544 | | | 4553 | |
4545 | #ifdef DEBUG | | 4554 | #ifdef DEBUG |
4546 | if (!SLIST_EMPTY(&md->pvh_list)) | | 4555 | if (!SLIST_EMPTY(&md->pvh_list)) |
4547 | panic("pmap_pageidlezero: page has mappings"); | | 4556 | panic("pmap_pageidlezero: page has mappings"); |
4548 | #endif | | 4557 | #endif |
4549 | | | 4558 | |
4550 | KDASSERT((phys & PGOFSET) == 0); | | 4559 | KDASSERT((phys & PGOFSET) == 0); |
4551 | | | 4560 | |
4552 | /* | | 4561 | /* |
4553 | * Hook in the page, zero it, and purge the cache for that | | 4562 | * Hook in the page, zero it, and purge the cache for that |
4554 | * zeroed page. Invalidate the TLB as needed. | | 4563 | * zeroed page. Invalidate the TLB as needed. |
4555 | */ | | 4564 | */ |
4556 | *ptep = L2_S_PROTO | phys | | | 4565 | *ptep = L2_S_PROTO | phys | |
4557 | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; | | 4566 | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; |
4558 | PTE_SYNC(ptep); | | 4567 | PTE_SYNC(ptep); |
4559 | cpu_tlb_flushD_SE(cdstp + va_offset); | | 4568 | cpu_tlb_flushD_SE(cdstp + va_offset); |
4560 | cpu_cpwait(); | | 4569 | cpu_cpwait(); |
4561 | | | 4570 | |
4562 | for (i = 0, ptr = (int *)(cdstp + va_offset); | | 4571 | for (i = 0, ptr = (int *)(cdstp + va_offset); |
4563 | i < (PAGE_SIZE / sizeof(int)); i++) { | | 4572 | i < (PAGE_SIZE / sizeof(int)); i++) { |
4564 | if (sched_curcpu_runnable_p() != 0) { | | 4573 | if (sched_curcpu_runnable_p() != 0) { |
4565 | /* | | 4574 | /* |
4566 | * A process has become ready. Abort now, | | 4575 | * A process has become ready. Abort now, |
4567 | * so we don't keep it waiting while we | | 4576 | * so we don't keep it waiting while we |
4568 | * do slow memory access to finish this | | 4577 | * do slow memory access to finish this |
4569 | * page. | | 4578 | * page. |
4570 | */ | | 4579 | */ |
4571 | rv = false; | | 4580 | rv = false; |
4572 | break; | | 4581 | break; |
4573 | } | | 4582 | } |
4574 | *ptr++ = 0; | | 4583 | *ptr++ = 0; |
4575 | } | | 4584 | } |
4576 | | | 4585 | |
4577 | #ifdef PMAP_CACHE_VIVT | | 4586 | #ifdef PMAP_CACHE_VIVT |
4578 | if (rv) | | 4587 | if (rv) |
4579 | /* | | 4588 | /* |
4580 | * if we aborted we'll rezero this page again later so don't | | 4589 | * if we aborted we'll rezero this page again later so don't |
4581 | * purge it unless we finished it | | 4590 | * purge it unless we finished it |
4582 | */ | | 4591 | */ |
4583 | cpu_dcache_wbinv_range(cdstp, PAGE_SIZE); | | 4592 | cpu_dcache_wbinv_range(cdstp, PAGE_SIZE); |
4584 | #elif defined(PMAP_CACHE_VIPT) | | 4593 | #elif defined(PMAP_CACHE_VIPT) |
4585 | /* | | 4594 | /* |
4586 | * This page is now cache resident so it now has a page color. | | 4595 | * This page is now cache resident so it now has a page color. |
4587 | * Any contents have been obliterated so clear the EXEC flag. | | 4596 | * Any contents have been obliterated so clear the EXEC flag. |
4588 | */ | | 4597 | */ |
4589 | if (!pmap_is_page_colored_p(md)) { | | 4598 | if (!pmap_is_page_colored_p(md)) { |
4590 | PMAPCOUNT(vac_color_new); | | 4599 | PMAPCOUNT(vac_color_new); |
4591 | md->pvh_attrs |= PVF_COLORED; | | 4600 | md->pvh_attrs |= PVF_COLORED; |
4592 | } | | 4601 | } |
4593 | if (PV_IS_EXEC_P(md->pvh_attrs)) { | | 4602 | if (PV_IS_EXEC_P(md->pvh_attrs)) { |
4594 | md->pvh_attrs &= ~PVF_EXEC; | | 4603 | md->pvh_attrs &= ~PVF_EXEC; |
4595 | PMAPCOUNT(exec_discarded_zero); | | 4604 | PMAPCOUNT(exec_discarded_zero); |
4596 | } | | 4605 | } |
4597 | #endif | | 4606 | #endif |
4598 | /* | | 4607 | /* |
4599 | * Unmap the page. | | 4608 | * Unmap the page. |
4600 | */ | | 4609 | */ |
4601 | *ptep = 0; | | 4610 | *ptep = 0; |
4602 | PTE_SYNC(ptep); | | 4611 | PTE_SYNC(ptep); |
4603 | cpu_tlb_flushD_SE(cdstp + va_offset); | | 4612 | cpu_tlb_flushD_SE(cdstp + va_offset); |
4604 | | | 4613 | |
4605 | return (rv); | | 4614 | return (rv); |
4606 | } | | 4615 | } |
4607 | | | 4616 | |
4608 | /* | | 4617 | /* |
4609 | * pmap_copy_page() | | 4618 | * pmap_copy_page() |
4610 | * | | 4619 | * |
4611 | * Copy one physical page into another, by mapping the pages into | | 4620 | * Copy one physical page into another, by mapping the pages into |
4612 | * hook points. The same comment regarding cachability as in | | 4621 | * hook points. The same comment regarding cachability as in |
4613 | * pmap_zero_page also applies here. | | 4622 | * pmap_zero_page also applies here. |
4614 | */ | | 4623 | */ |
4615 | #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0 | | 4624 | #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0 |
4616 | void | | 4625 | void |
4617 | pmap_copy_page_generic(paddr_t src, paddr_t dst) | | 4626 | pmap_copy_page_generic(paddr_t src, paddr_t dst) |
4618 | { | | 4627 | { |
4619 | struct vm_page * const src_pg = PHYS_TO_VM_PAGE(src); | | 4628 | struct vm_page * const src_pg = PHYS_TO_VM_PAGE(src); |
4620 | struct vm_page_md *src_md = VM_PAGE_TO_MD(src_pg); | | 4629 | struct vm_page_md *src_md = VM_PAGE_TO_MD(src_pg); |
4621 | #if defined(PMAP_CACHE_VIPT) || defined(DEBUG) | | 4630 | #if defined(PMAP_CACHE_VIPT) || defined(DEBUG) |
4622 | struct vm_page * const dst_pg = PHYS_TO_VM_PAGE(dst); | | 4631 | struct vm_page * const dst_pg = PHYS_TO_VM_PAGE(dst); |
4623 | struct vm_page_md *dst_md = VM_PAGE_TO_MD(dst_pg); | | 4632 | struct vm_page_md *dst_md = VM_PAGE_TO_MD(dst_pg); |
4624 | #endif | | 4633 | #endif |
4625 | #ifdef PMAP_CACHE_VIPT | | 4634 | #ifdef PMAP_CACHE_VIPT |
4626 | const vsize_t src_va_offset = src_md->pvh_attrs & arm_cache_prefer_mask; | | 4635 | const vsize_t src_va_offset = src_md->pvh_attrs & arm_cache_prefer_mask; |
4627 | const vsize_t dst_va_offset = dst_md->pvh_attrs & arm_cache_prefer_mask; | | 4636 | const vsize_t dst_va_offset = dst_md->pvh_attrs & arm_cache_prefer_mask; |
4628 | #else | | 4637 | #else |
4629 | const vsize_t src_va_offset = 0; | | 4638 | const vsize_t src_va_offset = 0; |
4630 | const vsize_t dst_va_offset = 0; | | 4639 | const vsize_t dst_va_offset = 0; |
4631 | #endif | | 4640 | #endif |
4632 | pt_entry_t * const src_ptep = &csrc_pte[src_va_offset >> PGSHIFT]; | | 4641 | pt_entry_t * const src_ptep = &csrc_pte[src_va_offset >> PGSHIFT]; |
4633 | pt_entry_t * const dst_ptep = &cdst_pte[dst_va_offset >> PGSHIFT]; | | 4642 | pt_entry_t * const dst_ptep = &cdst_pte[dst_va_offset >> PGSHIFT]; |
4634 | | | 4643 | |
4635 | #ifdef DEBUG | | 4644 | #ifdef DEBUG |
4636 | if (!SLIST_EMPTY(&dst_md->pvh_list)) | | 4645 | if (!SLIST_EMPTY(&dst_md->pvh_list)) |
4637 | panic("pmap_copy_page: dst page has mappings"); | | 4646 | panic("pmap_copy_page: dst page has mappings"); |
4638 | #endif | | 4647 | #endif |
4639 | | | 4648 | |
4640 | #ifdef PMAP_CACHE_VIPT | | 4649 | #ifdef PMAP_CACHE_VIPT |
4641 | KASSERT(arm_cache_prefer_mask == 0 || src_md->pvh_attrs & (PVF_COLORED|PVF_NC)); | | 4650 | KASSERT(arm_cache_prefer_mask == 0 || src_md->pvh_attrs & (PVF_COLORED|PVF_NC)); |
4642 | #endif | | 4651 | #endif |
4643 | KDASSERT((src & PGOFSET) == 0); | | 4652 | KDASSERT((src & PGOFSET) == 0); |
4644 | KDASSERT((dst & PGOFSET) == 0); | | 4653 | KDASSERT((dst & PGOFSET) == 0); |
4645 | | | 4654 | |
4646 | /* | | 4655 | /* |
4647 | * Clean the source page. Hold the source page's lock for | | 4656 | * Clean the source page. Hold the source page's lock for |
4648 | * the duration of the copy so that no other mappings can | | 4657 | * the duration of the copy so that no other mappings can |
4649 | * be created while we have a potentially aliased mapping. | | 4658 | * be created while we have a potentially aliased mapping. |
4650 | */ | | 4659 | */ |
4651 | simple_lock(&src_md->pvh_slock); | | 4660 | simple_lock(&src_md->pvh_slock); |
4652 | #ifdef PMAP_CACHE_VIVT | | 4661 | #ifdef PMAP_CACHE_VIVT |
4653 | (void) pmap_clean_page(SLIST_FIRST(&src_md->pvh_list), true); | | 4662 | (void) pmap_clean_page(SLIST_FIRST(&src_md->pvh_list), true); |
4654 | #endif | | 4663 | #endif |
4655 | | | 4664 | |
4656 | /* | | 4665 | /* |
4657 | * Map the pages into the page hook points, copy them, and purge | | 4666 | * Map the pages into the page hook points, copy them, and purge |
4658 | * the cache for the appropriate page. Invalidate the TLB | | 4667 | * the cache for the appropriate page. Invalidate the TLB |
4659 | * as required. | | 4668 | * as required. |
4660 | */ | | 4669 | */ |
4661 | *src_ptep = L2_S_PROTO | | 4670 | *src_ptep = L2_S_PROTO |
4662 | | src | | 4671 | | src |
4663 | #ifdef PMAP_CACHE_VIPT | | 4672 | #ifdef PMAP_CACHE_VIPT |
4664 | | ((src_md->pvh_attrs & PVF_NC) ? 0 : pte_l2_s_cache_mode) | | 4673 | | ((src_md->pvh_attrs & PVF_NC) ? 0 : pte_l2_s_cache_mode) |
4665 | #endif | | 4674 | #endif |
4666 | #ifdef PMAP_CACHE_VIVT | | 4675 | #ifdef PMAP_CACHE_VIVT |
4667 | | pte_l2_s_cache_mode | | 4676 | | pte_l2_s_cache_mode |
4668 | #endif | | 4677 | #endif |
4669 | | L2_S_PROT(PTE_KERNEL, VM_PROT_READ); | | 4678 | | L2_S_PROT(PTE_KERNEL, VM_PROT_READ); |
4670 | *dst_ptep = L2_S_PROTO | dst | | | 4679 | *dst_ptep = L2_S_PROTO | dst | |
4671 | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; | | 4680 | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; |
4672 | PTE_SYNC(src_ptep); | | 4681 | PTE_SYNC(src_ptep); |
4673 | PTE_SYNC(dst_ptep); | | 4682 | PTE_SYNC(dst_ptep); |
4674 | cpu_tlb_flushD_SE(csrcp + src_va_offset); | | 4683 | cpu_tlb_flushD_SE(csrcp + src_va_offset); |
4675 | cpu_tlb_flushD_SE(cdstp + dst_va_offset); | | 4684 | cpu_tlb_flushD_SE(cdstp + dst_va_offset); |
4676 | cpu_cpwait(); | | 4685 | cpu_cpwait(); |
4677 | bcopy_page(csrcp + src_va_offset, cdstp + dst_va_offset); | | 4686 | bcopy_page(csrcp + src_va_offset, cdstp + dst_va_offset); |
4678 | #ifdef PMAP_CACHE_VIVT | | 4687 | #ifdef PMAP_CACHE_VIVT |
4679 | cpu_dcache_inv_range(csrcp + src_va_offset, PAGE_SIZE); | | 4688 | cpu_dcache_inv_range(csrcp + src_va_offset, PAGE_SIZE); |
4680 | #endif | | 4689 | #endif |
4681 | simple_unlock(&src_md->pvh_slock); /* cache is safe again */ | | 4690 | simple_unlock(&src_md->pvh_slock); /* cache is safe again */ |
4682 | #ifdef PMAP_CACHE_VIVT | | 4691 | #ifdef PMAP_CACHE_VIVT |
4683 | cpu_dcache_wbinv_range(cdstp + dst_va_offset, PAGE_SIZE); | | 4692 | cpu_dcache_wbinv_range(cdstp + dst_va_offset, PAGE_SIZE); |
4684 | #endif | | 4693 | #endif |
4685 | /* | | 4694 | /* |
4686 | * Unmap the pages. | | 4695 | * Unmap the pages. |
4687 | */ | | 4696 | */ |
4688 | *src_ptep = 0; | | 4697 | *src_ptep = 0; |
4689 | *dst_ptep = 0; | | 4698 | *dst_ptep = 0; |
4690 | PTE_SYNC(src_ptep); | | 4699 | PTE_SYNC(src_ptep); |
4691 | PTE_SYNC(dst_ptep); | | 4700 | PTE_SYNC(dst_ptep); |
4692 | cpu_tlb_flushD_SE(csrcp + src_va_offset); | | 4701 | cpu_tlb_flushD_SE(csrcp + src_va_offset); |
4693 | cpu_tlb_flushD_SE(cdstp + dst_va_offset); | | 4702 | cpu_tlb_flushD_SE(cdstp + dst_va_offset); |
4694 | #ifdef PMAP_CACHE_VIPT | | 4703 | #ifdef PMAP_CACHE_VIPT |
4695 | /* | | 4704 | /* |
4696 | * Now that the destination page is in the cache, mark it as colored. | | 4705 | * Now that the destination page is in the cache, mark it as colored. |
4697 | * If this was an exec page, discard it. | | 4706 | * If this was an exec page, discard it. |
4698 | */ | | 4707 | */ |
4699 | if (!pmap_is_page_colored_p(dst_md)) { | | 4708 | if (!pmap_is_page_colored_p(dst_md)) { |
4700 | PMAPCOUNT(vac_color_new); | | 4709 | PMAPCOUNT(vac_color_new); |
4701 | dst_md->pvh_attrs |= PVF_COLORED; | | 4710 | dst_md->pvh_attrs |= PVF_COLORED; |
4702 | } | | 4711 | } |
4703 | if (PV_IS_EXEC_P(dst_md->pvh_attrs)) { | | 4712 | if (PV_IS_EXEC_P(dst_md->pvh_attrs)) { |
4704 | dst_md->pvh_attrs &= ~PVF_EXEC; | | 4713 | dst_md->pvh_attrs &= ~PVF_EXEC; |
4705 | PMAPCOUNT(exec_discarded_copy); | | 4714 | PMAPCOUNT(exec_discarded_copy); |
4706 | } | | 4715 | } |
4707 | dst_md->pvh_attrs |= PVF_DIRTY; | | 4716 | dst_md->pvh_attrs |= PVF_DIRTY; |
4708 | #endif | | 4717 | #endif |
4709 | } | | 4718 | } |
4710 | #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */ | | 4719 | #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */ |
4711 | | | 4720 | |
4712 | #if ARM_MMU_XSCALE == 1 | | 4721 | #if ARM_MMU_XSCALE == 1 |
4713 | void | | 4722 | void |
4714 | pmap_copy_page_xscale(paddr_t src, paddr_t dst) | | 4723 | pmap_copy_page_xscale(paddr_t src, paddr_t dst) |
4715 | { | | 4724 | { |
4716 | struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); | | 4725 | struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); |
4717 | #ifdef DEBUG | | 4726 | #ifdef DEBUG |
4718 | struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst); | | 4727 | struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst); |
4719 | | | 4728 | |
4720 | if (!SLIST_EMPTY(&dst_md->pvh_list)) | | 4729 | if (!SLIST_EMPTY(&dst_md->pvh_list)) |
4721 | panic("pmap_copy_page: dst page has mappings"); | | 4730 | panic("pmap_copy_page: dst page has mappings"); |
4722 | #endif | | 4731 | #endif |
4723 | | | 4732 | |
4724 | KDASSERT((src & PGOFSET) == 0); | | 4733 | KDASSERT((src & PGOFSET) == 0); |
4725 | KDASSERT((dst & PGOFSET) == 0); | | 4734 | KDASSERT((dst & PGOFSET) == 0); |
4726 | | | 4735 | |
4727 | /* | | 4736 | /* |
4728 | * Clean the source page. Hold the source page's lock for | | 4737 | * Clean the source page. Hold the source page's lock for |
4729 | * the duration of the copy so that no other mappings can | | 4738 | * the duration of the copy so that no other mappings can |
4730 | * be created while we have a potentially aliased mapping. | | 4739 | * be created while we have a potentially aliased mapping. |
4731 | */ | | 4740 | */ |
4732 | simple_lock(&src_md->pvh_slock); | | 4741 | simple_lock(&src_md->pvh_slock); |
4733 | #ifdef PMAP_CACHE_VIVT | | 4742 | #ifdef PMAP_CACHE_VIVT |
4734 | (void) pmap_clean_page(SLIST_FIRST(&src_md->pvh_list), true); | | 4743 | (void) pmap_clean_page(SLIST_FIRST(&src_md->pvh_list), true); |
4735 | #endif | | 4744 | #endif |
4736 | | | 4745 | |
4737 | /* | | 4746 | /* |
4738 | * Map the pages into the page hook points, copy them, and purge | | 4747 | * Map the pages into the page hook points, copy them, and purge |
4739 | * the cache for the appropriate page. Invalidate the TLB | | 4748 | * the cache for the appropriate page. Invalidate the TLB |
4740 | * as required. | | 4749 | * as required. |
4741 | */ | | 4750 | */ |
4742 | *csrc_pte = L2_S_PROTO | src | | | 4751 | *csrc_pte = L2_S_PROTO | src | |
4743 | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | | | 4752 | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | |
4744 | L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */ | | 4753 | L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */ |
4745 | PTE_SYNC(csrc_pte); | | 4754 | PTE_SYNC(csrc_pte); |
4746 | *cdst_pte = L2_S_PROTO | dst | | | 4755 | *cdst_pte = L2_S_PROTO | dst | |
4747 | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | | | 4756 | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | |
4748 | L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */ | | 4757 | L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */ |
4749 | PTE_SYNC(cdst_pte); | | 4758 | PTE_SYNC(cdst_pte); |
4750 | cpu_tlb_flushD_SE(csrcp); | | 4759 | cpu_tlb_flushD_SE(csrcp); |
4751 | cpu_tlb_flushD_SE(cdstp); | | 4760 | cpu_tlb_flushD_SE(cdstp); |
4752 | cpu_cpwait(); | | 4761 | cpu_cpwait(); |
4753 | bcopy_page(csrcp, cdstp); | | 4762 | bcopy_page(csrcp, cdstp); |
4754 | simple_unlock(&src_md->pvh_slock); /* cache is safe again */ | | 4763 | simple_unlock(&src_md->pvh_slock); /* cache is safe again */ |
4755 | xscale_cache_clean_minidata(); | | 4764 | xscale_cache_clean_minidata(); |
4756 | } | | 4765 | } |
4757 | #endif /* ARM_MMU_XSCALE == 1 */ | | 4766 | #endif /* ARM_MMU_XSCALE == 1 */ |
4758 | | | 4767 | |
4759 | /* | | 4768 | /* |
4760 | * void pmap_virtual_space(vaddr_t *start, vaddr_t *end) | | 4769 | * void pmap_virtual_space(vaddr_t *start, vaddr_t *end) |
4761 | * | | 4770 | * |
4762 | * Return the start and end addresses of the kernel's virtual space. | | 4771 | * Return the start and end addresses of the kernel's virtual space. |
4763 | * These values are setup in pmap_bootstrap and are updated as pages | | 4772 | * These values are setup in pmap_bootstrap and are updated as pages |
4764 | * are allocated. | | 4773 | * are allocated. |
4765 | */ | | 4774 | */ |
4766 | void | | 4775 | void |
4767 | pmap_virtual_space(vaddr_t *start, vaddr_t *end) | | 4776 | pmap_virtual_space(vaddr_t *start, vaddr_t *end) |
4768 | { | | 4777 | { |
4769 | *start = virtual_avail; | | 4778 | *start = virtual_avail; |
4770 | *end = virtual_end; | | 4779 | *end = virtual_end; |
4771 | } | | 4780 | } |
4772 | | | 4781 | |
4773 | /* | | 4782 | /* |
4774 | * Helper function for pmap_grow_l2_bucket() | | 4783 | * Helper function for pmap_grow_l2_bucket() |
4775 | */ | | 4784 | */ |
4776 | static inline int | | 4785 | static inline int |
4777 | pmap_grow_map(vaddr_t va, pt_entry_t cache_mode, paddr_t *pap) | | 4786 | pmap_grow_map(vaddr_t va, pt_entry_t cache_mode, paddr_t *pap) |
4778 | { | | 4787 | { |
4779 | struct l2_bucket *l2b; | | 4788 | struct l2_bucket *l2b; |
4780 | pt_entry_t *ptep; | | 4789 | pt_entry_t *ptep; |
4781 | paddr_t pa; | | 4790 | paddr_t pa; |
4782 | | | 4791 | |
4783 | if (uvm.page_init_done == false) { | | 4792 | if (uvm.page_init_done == false) { |
4784 | #ifdef PMAP_STEAL_MEMORY | | 4793 | #ifdef PMAP_STEAL_MEMORY |
4785 | pv_addr_t pv; | | 4794 | pv_addr_t pv; |
4786 | pmap_boot_pagealloc(PAGE_SIZE, | | 4795 | pmap_boot_pagealloc(PAGE_SIZE, |
4787 | #ifdef PMAP_CACHE_VIPT | | 4796 | #ifdef PMAP_CACHE_VIPT |
4788 | arm_cache_prefer_mask, | | 4797 | arm_cache_prefer_mask, |
4789 | va & arm_cache_prefer_mask, | | 4798 | va & arm_cache_prefer_mask, |
4790 | #else | | 4799 | #else |
4791 | 0, 0, | | 4800 | 0, 0, |
4792 | #endif | | 4801 | #endif |
4793 | &pv); | | 4802 | &pv); |
4794 | pa = pv.pv_pa; | | 4803 | pa = pv.pv_pa; |
4795 | #else | | 4804 | #else |
4796 | if (uvm_page_physget(&pa) == false) | | 4805 | if (uvm_page_physget(&pa) == false) |
4797 | return (1); | | 4806 | return (1); |
4798 | #endif /* PMAP_STEAL_MEMORY */ | | 4807 | #endif /* PMAP_STEAL_MEMORY */ |
4799 | } else { | | 4808 | } else { |
4800 | struct vm_page *pg; | | 4809 | struct vm_page *pg; |
4801 | pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); | | 4810 | pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); |
4802 | if (pg == NULL) | | 4811 | if (pg == NULL) |
4803 | return (1); | | 4812 | return (1); |
4804 | pa = VM_PAGE_TO_PHYS(pg); | | 4813 | pa = VM_PAGE_TO_PHYS(pg); |
4805 | #ifdef PMAP_CACHE_VIPT | | 4814 | #ifdef PMAP_CACHE_VIPT |
4806 | #ifdef DIAGNOSTIC | | 4815 | #ifdef DIAGNOSTIC |
4807 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); | | 4816 | struct vm_page_md *md = VM_PAGE_TO_MD(pg); |
4808 | #endif | | 4817 | #endif |
4809 | /* | | 4818 | /* |
4810 | * This new page must not have any mappings. Enter it via | | 4819 | * This new page must not have any mappings. Enter it via |
4811 | * pmap_kenter_pa and let that routine do the hard work. | | 4820 | * pmap_kenter_pa and let that routine do the hard work. |
4812 | */ | | 4821 | */ |
4813 | KASSERT(SLIST_EMPTY(&md->pvh_list)); | | 4822 | KASSERT(SLIST_EMPTY(&md->pvh_list)); |
4814 | pmap_kenter_pa(va, pa, | | 4823 | pmap_kenter_pa(va, pa, |
4815 | VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE); | | 4824 | VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE); |
4816 | #endif | | 4825 | #endif |
4817 | } | | 4826 | } |
4818 | | | 4827 | |
4819 | if (pap) | | 4828 | if (pap) |
4820 | *pap = pa; | | 4829 | *pap = pa; |
4821 | | | 4830 | |
4822 | PMAPCOUNT(pt_mappings); | | 4831 | PMAPCOUNT(pt_mappings); |
4823 | l2b = pmap_get_l2_bucket(pmap_kernel(), va); | | 4832 | l2b = pmap_get_l2_bucket(pmap_kernel(), va); |
4824 | KDASSERT(l2b != NULL); | | 4833 | KDASSERT(l2b != NULL); |
4825 | | | 4834 | |
4826 | ptep = &l2b->l2b_kva[l2pte_index(va)]; | | 4835 | ptep = &l2b->l2b_kva[l2pte_index(va)]; |
4827 | *ptep = L2_S_PROTO | pa | cache_mode | | | 4836 | *ptep = L2_S_PROTO | pa | cache_mode | |
4828 | L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE); | | 4837 | L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE); |
4829 | PTE_SYNC(ptep); | | 4838 | PTE_SYNC(ptep); |
4830 | memset((void *)va, 0, PAGE_SIZE); | | 4839 | memset((void *)va, 0, PAGE_SIZE); |
4831 | return (0); | | 4840 | return (0); |
4832 | } | | 4841 | } |
4833 | | | 4842 | |
4834 | /* | | 4843 | /* |
4835 | * This is the same as pmap_alloc_l2_bucket(), except that it is only | | 4844 | * This is the same as pmap_alloc_l2_bucket(), except that it is only |
4836 | * used by pmap_growkernel(). | | 4845 | * used by pmap_growkernel(). |
4837 | */ | | 4846 | */ |
4838 | static inline struct l2_bucket * | | 4847 | static inline struct l2_bucket * |
4839 | pmap_grow_l2_bucket(pmap_t pm, vaddr_t va) | | 4848 | pmap_grow_l2_bucket(pmap_t pm, vaddr_t va) |
4840 | { | | 4849 | { |
4841 | struct l2_dtable *l2; | | 4850 | struct l2_dtable *l2; |
4842 | struct l2_bucket *l2b; | | 4851 | struct l2_bucket *l2b; |
4843 | u_short l1idx; | | 4852 | u_short l1idx; |
4844 | vaddr_t nva; | | 4853 | vaddr_t nva; |
4845 | | | 4854 | |
4846 | l1idx = L1_IDX(va); | | 4855 | l1idx = L1_IDX(va); |
4847 | | | 4856 | |
4848 | if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { | | 4857 | if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { |
4849 | /* | | 4858 | /* |
4850 | * No mapping at this address, as there is | | 4859 | * No mapping at this address, as there is |
4851 | * no entry in the L1 table. | | 4860 | * no entry in the L1 table. |
4852 | * Need to allocate a new l2_dtable. | | 4861 | * Need to allocate a new l2_dtable. |
4853 | */ | | 4862 | */ |
4854 | nva = pmap_kernel_l2dtable_kva; | | 4863 | nva = pmap_kernel_l2dtable_kva; |
4855 | if ((nva & PGOFSET) == 0) { | | 4864 | if ((nva & PGOFSET) == 0) { |
4856 | /* | | 4865 | /* |
4857 | * Need to allocate a backing page | | 4866 | * Need to allocate a backing page |
4858 | */ | | 4867 | */ |
4859 | if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) | | 4868 | if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) |
4860 | return (NULL); | | 4869 | return (NULL); |
4861 | } | | 4870 | } |
4862 | | | 4871 | |
4863 | l2 = (struct l2_dtable *)nva; | | 4872 | l2 = (struct l2_dtable *)nva; |
4864 | nva += sizeof(struct l2_dtable); | | 4873 | nva += sizeof(struct l2_dtable); |
4865 | | | 4874 | |
4866 | if ((nva & PGOFSET) < (pmap_kernel_l2dtable_kva & PGOFSET)) { | | 4875 | if ((nva & PGOFSET) < (pmap_kernel_l2dtable_kva & PGOFSET)) { |
4867 | /* | | 4876 | /* |
4868 | * The new l2_dtable straddles a page boundary. | | 4877 | * The new l2_dtable straddles a page boundary. |
4869 | * Map in another page to cover it. | | 4878 | * Map in another page to cover it. |
4870 | */ | | 4879 | */ |
4871 | if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) | | 4880 | if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) |
4872 | return (NULL); | | 4881 | return (NULL); |
4873 | } | | 4882 | } |
4874 | | | 4883 | |
4875 | pmap_kernel_l2dtable_kva = nva; | | 4884 | pmap_kernel_l2dtable_kva = nva; |
4876 | | | 4885 | |
4877 | /* | | 4886 | /* |
4878 | * Link it into the parent pmap | | 4887 | * Link it into the parent pmap |
4879 | */ | | 4888 | */ |
4880 | pm->pm_l2[L2_IDX(l1idx)] = l2; | | 4889 | pm->pm_l2[L2_IDX(l1idx)] = l2; |
4881 | } | | 4890 | } |
4882 | | | 4891 | |
4883 | l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; | | 4892 | l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; |
4884 | | | 4893 | |
4885 | /* | | 4894 | /* |
4886 | * Fetch pointer to the L2 page table associated with the address. | | 4895 | * Fetch pointer to the L2 page table associated with the address. |
4887 | */ | | 4896 | */ |
4888 | if (l2b->l2b_kva == NULL) { | | 4897 | if (l2b->l2b_kva == NULL) { |
4889 | pt_entry_t *ptep; | | 4898 | pt_entry_t *ptep; |
4890 | | | 4899 | |
4891 | /* | | 4900 | /* |
4892 | * No L2 page table has been allocated. Chances are, this | | 4901 | * No L2 page table has been allocated. Chances are, this |
4893 | * is because we just allocated the l2_dtable, above. | | 4902 | * is because we just allocated the l2_dtable, above. |
4894 | */ | | 4903 | */ |
4895 | nva = pmap_kernel_l2ptp_kva; | | 4904 | nva = pmap_kernel_l2ptp_kva; |
4896 | ptep = (pt_entry_t *)nva; | | 4905 | ptep = (pt_entry_t *)nva; |
4897 | if ((nva & PGOFSET) == 0) { | | 4906 | if ((nva & PGOFSET) == 0) { |
4898 | /* | | 4907 | /* |
4899 | * Need to allocate a backing page | | 4908 | * Need to allocate a backing page |
4900 | */ | | 4909 | */ |
4901 | if (pmap_grow_map(nva, pte_l2_s_cache_mode_pt, | | 4910 | if (pmap_grow_map(nva, pte_l2_s_cache_mode_pt, |
4902 | &pmap_kernel_l2ptp_phys)) | | 4911 | &pmap_kernel_l2ptp_phys)) |
4903 | return (NULL); | | 4912 | return (NULL); |
4904 | PTE_SYNC_RANGE(ptep, PAGE_SIZE / sizeof(pt_entry_t)); | | 4913 | PTE_SYNC_RANGE(ptep, PAGE_SIZE / sizeof(pt_entry_t)); |
4905 | } | | 4914 | } |
4906 | | | 4915 | |
4907 | l2->l2_occupancy++; | | 4916 | l2->l2_occupancy++; |
4908 | l2b->l2b_kva = ptep; | | 4917 | l2b->l2b_kva = ptep; |
4909 | l2b->l2b_l1idx = l1idx; | | 4918 | l2b->l2b_l1idx = l1idx; |
4910 | l2b->l2b_phys = pmap_kernel_l2ptp_phys; | | 4919 | l2b->l2b_phys = pmap_kernel_l2ptp_phys; |
4911 | | | 4920 | |
4912 | pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL; | | 4921 | pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL; |
4913 | pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL; | | 4922 | pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL; |
4914 | } | | 4923 | } |
4915 | | | 4924 | |
4916 | return (l2b); | | 4925 | return (l2b); |
4917 | } | | 4926 | } |
4918 | | | 4927 | |
4919 | vaddr_t | | 4928 | vaddr_t |
4920 | pmap_growkernel(vaddr_t maxkvaddr) | | 4929 | pmap_growkernel(vaddr_t maxkvaddr) |
4921 | { | | 4930 | { |
4922 | pmap_t kpm = pmap_kernel(); | | 4931 | pmap_t kpm = pmap_kernel(); |
4923 | struct l1_ttable *l1; | | 4932 | struct l1_ttable *l1; |
4924 | struct l2_bucket *l2b; | | 4933 | struct l2_bucket *l2b; |
4925 | pd_entry_t *pl1pd; | | 4934 | pd_entry_t *pl1pd; |
4926 | int s; | | 4935 | int s; |
4927 | | | 4936 | |
4928 | if (maxkvaddr <= pmap_curmaxkvaddr) | | 4937 | if (maxkvaddr <= pmap_curmaxkvaddr) |
4929 | goto out; /* we are OK */ | | 4938 | goto out; /* we are OK */ |
4930 | | | 4939 | |
4931 | NPDEBUG(PDB_GROWKERN, | | 4940 | NPDEBUG(PDB_GROWKERN, |
4932 | printf("pmap_growkernel: growing kernel from 0x%lx to 0x%lx\n", | | 4941 | printf("pmap_growkernel: growing kernel from 0x%lx to 0x%lx\n", |
4933 | pmap_curmaxkvaddr, maxkvaddr)); | | 4942 | pmap_curmaxkvaddr, maxkvaddr)); |
4934 | | | 4943 | |
4935 | KDASSERT(maxkvaddr <= virtual_end); | | 4944 | KDASSERT(maxkvaddr <= virtual_end); |
4936 | | | 4945 | |
4937 | /* | | 4946 | /* |
4938 | * whoops! we need to add kernel PTPs | | 4947 | * whoops! we need to add kernel PTPs |
4939 | */ | | 4948 | */ |
4940 | | | 4949 | |
4941 | s = splhigh(); /* to be safe */ | | 4950 | s = splhigh(); /* to be safe */ |
4942 | mutex_enter(&kpm->pm_lock); | | 4951 | mutex_enter(&kpm->pm_lock); |
4943 | | | 4952 | |
4944 | /* Map 1MB at a time */ | | 4953 | /* Map 1MB at a time */ |
4945 | for (; pmap_curmaxkvaddr < maxkvaddr; pmap_curmaxkvaddr += L1_S_SIZE) { | | 4954 | for (; pmap_curmaxkvaddr < maxkvaddr; pmap_curmaxkvaddr += L1_S_SIZE) { |
4946 | | | 4955 | |
4947 | l2b = pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr); | | 4956 | l2b = pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr); |
4948 | KDASSERT(l2b != NULL); | | 4957 | KDASSERT(l2b != NULL); |
4949 | | | 4958 | |
4950 | /* Distribute new L1 entry to all other L1s */ | | 4959 | /* Distribute new L1 entry to all other L1s */ |
4951 | SLIST_FOREACH(l1, &l1_list, l1_link) { | | 4960 | SLIST_FOREACH(l1, &l1_list, l1_link) { |
4952 | pl1pd = &l1->l1_kva[L1_IDX(pmap_curmaxkvaddr)]; | | 4961 | pl1pd = &l1->l1_kva[L1_IDX(pmap_curmaxkvaddr)]; |
4953 | *pl1pd = l2b->l2b_phys | L1_C_DOM(PMAP_DOMAIN_KERNEL) | | | 4962 | *pl1pd = l2b->l2b_phys | L1_C_DOM(PMAP_DOMAIN_KERNEL) | |
4954 | L1_C_PROTO; | | 4963 | L1_C_PROTO; |
4955 | PTE_SYNC(pl1pd); | | 4964 | PTE_SYNC(pl1pd); |
4956 | } | | 4965 | } |
4957 | } | | 4966 | } |
4958 | | | 4967 | |
4959 | /* | | 4968 | /* |
4960 | * flush out the cache, expensive but growkernel will happen so | | 4969 | * flush out the cache, expensive but growkernel will happen so |
4961 | * rarely | | 4970 | * rarely |
4962 | */ | | 4971 | */ |
4963 | cpu_dcache_wbinv_all(); | | 4972 | cpu_dcache_wbinv_all(); |
4964 | cpu_tlb_flushD(); | | 4973 | cpu_tlb_flushD(); |
4965 | cpu_cpwait(); | | 4974 | cpu_cpwait(); |
4966 | | | 4975 | |
4967 | mutex_exit(&kpm->pm_lock); | | 4976 | mutex_exit(&kpm->pm_lock); |
4968 | splx(s); | | 4977 | splx(s); |
4969 | | | 4978 | |
4970 | out: | | 4979 | out: |
4971 | return (pmap_curmaxkvaddr); | | 4980 | return (pmap_curmaxkvaddr); |
4972 | } | | 4981 | } |
4973 | | | 4982 | |
4974 | /************************ Utility routines ****************************/ | | 4983 | /************************ Utility routines ****************************/ |
4975 | | | 4984 | |
4976 | /* | | 4985 | /* |
4977 | * vector_page_setprot: | | 4986 | * vector_page_setprot: |
4978 | * | | 4987 | * |
4979 | * Manipulate the protection of the vector page. | | 4988 | * Manipulate the protection of the vector page. |
4980 | */ | | 4989 | */ |
4981 | void | | 4990 | void |
4982 | vector_page_setprot(int prot) | | 4991 | vector_page_setprot(int prot) |
4983 | { | | 4992 | { |
4984 | struct l2_bucket *l2b; | | 4993 | struct l2_bucket *l2b; |
4985 | pt_entry_t *ptep; | | 4994 | pt_entry_t *ptep; |
4986 | | | 4995 | |
4987 | l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page); | | 4996 | l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page); |
4988 | KDASSERT(l2b != NULL); | | 4997 | KDASSERT(l2b != NULL); |
4989 | | | 4998 | |
4990 | ptep = &l2b->l2b_kva[l2pte_index(vector_page)]; | | 4999 | ptep = &l2b->l2b_kva[l2pte_index(vector_page)]; |
4991 | | | 5000 | |
4992 | *ptep = (*ptep & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot); | | 5001 | *ptep = (*ptep & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot); |
4993 | PTE_SYNC(ptep); | | 5002 | PTE_SYNC(ptep); |
4994 | cpu_tlb_flushD_SE(vector_page); | | 5003 | cpu_tlb_flushD_SE(vector_page); |
4995 | cpu_cpwait(); | | 5004 | cpu_cpwait(); |
4996 | } | | 5005 | } |
4997 | | | 5006 | |
4998 | /* | | 5007 | /* |
4999 | * Fetch pointers to the PDE/PTE for the given pmap/VA pair. | | 5008 | * Fetch pointers to the PDE/PTE for the given pmap/VA pair. |
5000 | * Returns true if the mapping exists, else false. | | 5009 | * Returns true if the mapping exists, else false. |
5001 | * | | 5010 | * |
5002 | * NOTE: This function is only used by a couple of arm-specific modules. | | 5011 | * NOTE: This function is only used by a couple of arm-specific modules. |
5003 | * It is not safe to take any pmap locks here, since we could be right | | 5012 | * It is not safe to take any pmap locks here, since we could be right |
5004 | * in the middle of debugging the pmap anyway... | | 5013 | * in the middle of debugging the pmap anyway... |
5005 | * | | 5014 | * |
5006 | * It is possible for this routine to return false even though a valid | | 5015 | * It is possible for this routine to return false even though a valid |
5007 | * mapping does exist. This is because we don't lock, so the metadata | | 5016 | * mapping does exist. This is because we don't lock, so the metadata |
5008 | * state may be inconsistent. | | 5017 | * state may be inconsistent. |
5009 | * | | 5018 | * |
5010 | * NOTE: We can return a NULL *ptp in the case where the L1 pde is | | 5019 | * NOTE: We can return a NULL *ptp in the case where the L1 pde is |
5011 | * a "section" mapping. | | 5020 | * a "section" mapping. |
5012 | */ | | 5021 | */ |
5013 | bool | | 5022 | bool |
5014 | pmap_get_pde_pte(pmap_t pm, vaddr_t va, pd_entry_t **pdp, pt_entry_t **ptp) | | 5023 | pmap_get_pde_pte(pmap_t pm, vaddr_t va, pd_entry_t **pdp, pt_entry_t **ptp) |
5015 | { | | 5024 | { |
5016 | struct l2_dtable *l2; | | 5025 | struct l2_dtable *l2; |
5017 | pd_entry_t *pl1pd, l1pd; | | 5026 | pd_entry_t *pl1pd, l1pd; |
5018 | pt_entry_t *ptep; | | 5027 | pt_entry_t *ptep; |
5019 | u_short l1idx; | | 5028 | u_short l1idx; |
5020 | | | 5029 | |
5021 | if (pm->pm_l1 == NULL) | | 5030 | if (pm->pm_l1 == NULL) |
5022 | return false; | | 5031 | return false; |
5023 | | | 5032 | |
5024 | l1idx = L1_IDX(va); | | 5033 | l1idx = L1_IDX(va); |
5025 | *pdp = pl1pd = &pm->pm_l1->l1_kva[l1idx]; | | 5034 | *pdp = pl1pd = &pm->pm_l1->l1_kva[l1idx]; |
5026 | l1pd = *pl1pd; | | 5035 | l1pd = *pl1pd; |
5027 | | | 5036 | |
5028 | if (l1pte_section_p(l1pd)) { | | 5037 | if (l1pte_section_p(l1pd)) { |
5029 | *ptp = NULL; | | 5038 | *ptp = NULL; |
5030 | return true; | | 5039 | return true; |
5031 | } | | 5040 | } |
5032 | | | 5041 | |
5033 | if (pm->pm_l2 == NULL) | | 5042 | if (pm->pm_l2 == NULL) |
5034 | return false; | | 5043 | return false; |
5035 | | | 5044 | |
5036 | l2 = pm->pm_l2[L2_IDX(l1idx)]; | | 5045 | l2 = pm->pm_l2[L2_IDX(l1idx)]; |
5037 | | | 5046 | |
5038 | if (l2 == NULL || | | 5047 | if (l2 == NULL || |
5039 | (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { | | 5048 | (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { |
5040 | return false; | | 5049 | return false; |
5041 | } | | 5050 | } |
5042 | | | 5051 | |
5043 | *ptp = &ptep[l2pte_index(va)]; | | 5052 | *ptp = &ptep[l2pte_index(va)]; |
5044 | return true; | | 5053 | return true; |
5045 | } | | 5054 | } |
5046 | | | 5055 | |
5047 | bool | | 5056 | bool |
5048 | pmap_get_pde(pmap_t pm, vaddr_t va, pd_entry_t **pdp) | | 5057 | pmap_get_pde(pmap_t pm, vaddr_t va, pd_entry_t **pdp) |
5049 | { | | 5058 | { |
5050 | u_short l1idx; | | 5059 | u_short l1idx; |
5051 | | | 5060 | |
5052 | if (pm->pm_l1 == NULL) | | 5061 | if (pm->pm_l1 == NULL) |
5053 | return false; | | 5062 | return false; |
5054 | | | 5063 | |
5055 | l1idx = L1_IDX(va); | | 5064 | l1idx = L1_IDX(va); |
5056 | *pdp = &pm->pm_l1->l1_kva[l1idx]; | | 5065 | *pdp = &pm->pm_l1->l1_kva[l1idx]; |
5057 | | | 5066 | |
5058 | return true; | | 5067 | return true; |
5059 | } | | 5068 | } |
5060 | | | 5069 | |
5061 | /************************ Bootstrapping routines ****************************/ | | 5070 | /************************ Bootstrapping routines ****************************/ |
5062 | | | 5071 | |
5063 | static void | | 5072 | static void |
5064 | pmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt) | | 5073 | pmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt) |
5065 | { | | 5074 | { |
5066 | int i; | | 5075 | int i; |
5067 | | | 5076 | |
5068 | l1->l1_kva = l1pt; | | 5077 | l1->l1_kva = l1pt; |
5069 | l1->l1_domain_use_count = 0; | | 5078 | l1->l1_domain_use_count = 0; |
5070 | l1->l1_domain_first = 0; | | 5079 | l1->l1_domain_first = 0; |
5071 | | | 5080 | |
5072 | for (i = 0; i < PMAP_DOMAINS; i++) | | 5081 | for (i = 0; i < PMAP_DOMAINS; i++) |
5073 | l1->l1_domain_free[i] = i + 1; | | 5082 | l1->l1_domain_free[i] = i + 1; |
5074 | | | 5083 | |
5075 | /* | | 5084 | /* |
5076 | * Copy the kernel's L1 entries to each new L1. | | 5085 | * Copy the kernel's L1 entries to each new L1. |
5077 | */ | | 5086 | */ |
5078 | if (pmap_initialized) | | 5087 | if (pmap_initialized) |
5079 | memcpy(l1pt, pmap_kernel()->pm_l1->l1_kva, L1_TABLE_SIZE); | | 5088 | memcpy(l1pt, pmap_kernel()->pm_l1->l1_kva, L1_TABLE_SIZE); |
5080 | | | 5089 | |
5081 | if (pmap_extract(pmap_kernel(), (vaddr_t)l1pt, | | 5090 | if (pmap_extract(pmap_kernel(), (vaddr_t)l1pt, |
5082 | &l1->l1_physaddr) == false) | | 5091 | &l1->l1_physaddr) == false) |
5083 | panic("pmap_init_l1: can't get PA of L1 at %p", l1pt); | | 5092 | panic("pmap_init_l1: can't get PA of L1 at %p", l1pt); |
5084 | | | 5093 | |
5085 | SLIST_INSERT_HEAD(&l1_list, l1, l1_link); | | 5094 | SLIST_INSERT_HEAD(&l1_list, l1, l1_link); |
5086 | TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); | | 5095 | TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); |
5087 | } | | 5096 | } |
5088 | | | 5097 | |
5089 | /* | | 5098 | /* |
5090 | * pmap_bootstrap() is called from the board-specific initarm() routine | | 5099 | * pmap_bootstrap() is called from the board-specific initarm() routine |
5091 | * once the kernel L1/L2 descriptors tables have been set up. | | 5100 | * once the kernel L1/L2 descriptors tables have been set up. |
5092 | * | | 5101 | * |
5093 | * This is a somewhat convoluted process since pmap bootstrap is, effectively, | | 5102 | * This is a somewhat convoluted process since pmap bootstrap is, effectively, |
5094 | * spread over a number of disparate files/functions. | | 5103 | * spread over a number of disparate files/functions. |
5095 | * | | 5104 | * |
5096 | * We are passed the following parameters | | 5105 | * We are passed the following parameters |
5097 | * - kernel_l1pt | | 5106 | * - kernel_l1pt |
5098 | * This is a pointer to the base of the kernel's L1 translation table. | | 5107 | * This is a pointer to the base of the kernel's L1 translation table. |
5099 | * - vstart | | 5108 | * - vstart |
5100 | * 1MB-aligned start of managed kernel virtual memory. | | 5109 | * 1MB-aligned start of managed kernel virtual memory. |
5101 | * - vend | | 5110 | * - vend |
5102 | * 1MB-aligned end of managed kernel virtual memory. | | 5111 | * 1MB-aligned end of managed kernel virtual memory. |
5103 | * | | 5112 | * |
5104 | * We use the first parameter to build the metadata (struct l1_ttable and | | 5113 | * We use the first parameter to build the metadata (struct l1_ttable and |
5105 | * struct l2_dtable) necessary to track kernel mappings. | | 5114 | * struct l2_dtable) necessary to track kernel mappings. |
5106 | */ | | 5115 | */ |
5107 | #define PMAP_STATIC_L2_SIZE 16 | | 5116 | #define PMAP_STATIC_L2_SIZE 16 |
5108 | void | | 5117 | void |
5109 | pmap_bootstrap(vaddr_t vstart, vaddr_t vend) | | 5118 | pmap_bootstrap(vaddr_t vstart, vaddr_t vend) |
5110 | { | | 5119 | { |
5111 | static struct l1_ttable static_l1; | | 5120 | static struct l1_ttable static_l1; |
5112 | static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE]; | | 5121 | static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE]; |
5113 | struct l1_ttable *l1 = &static_l1; | | 5122 | struct l1_ttable *l1 = &static_l1; |
5114 | struct l2_dtable *l2; | | 5123 | struct l2_dtable *l2; |