| @@ -1,1214 +1,1214 @@ | | | @@ -1,1214 +1,1214 @@ |
1 | /* $NetBSD: pmap.c,v 1.187 2008/09/28 21:27:11 skrll Exp $ */ | | 1 | /* $NetBSD: pmap.c,v 1.187.4.1 2009/12/03 09:26:59 sborrill Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright 2003 Wasabi Systems, Inc. | | 4 | * Copyright 2003 Wasabi Systems, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * Written by Steve C. Woodford for Wasabi Systems, Inc. | | 7 | * Written by Steve C. Woodford for Wasabi Systems, Inc. |
8 | * | | 8 | * |
9 | * Redistribution and use in source and binary forms, with or without | | 9 | * Redistribution and use in source and binary forms, with or without |
10 | * modification, are permitted provided that the following conditions | | 10 | * modification, are permitted provided that the following conditions |
11 | * are met: | | 11 | * are met: |
12 | * 1. Redistributions of source code must retain the above copyright | | 12 | * 1. Redistributions of source code must retain the above copyright |
13 | * notice, this list of conditions and the following disclaimer. | | 13 | * notice, this list of conditions and the following disclaimer. |
14 | * 2. Redistributions in binary form must reproduce the above copyright | | 14 | * 2. Redistributions in binary form must reproduce the above copyright |
15 | * notice, this list of conditions and the following disclaimer in the | | 15 | * notice, this list of conditions and the following disclaimer in the |
16 | * documentation and/or other materials provided with the distribution. | | 16 | * documentation and/or other materials provided with the distribution. |
17 | * 3. All advertising materials mentioning features or use of this software | | 17 | * 3. All advertising materials mentioning features or use of this software |
18 | * must display the following acknowledgement: | | 18 | * must display the following acknowledgement: |
19 | * This product includes software developed for the NetBSD Project by | | 19 | * This product includes software developed for the NetBSD Project by |
20 | * Wasabi Systems, Inc. | | 20 | * Wasabi Systems, Inc. |
21 | * 4. The name of Wasabi Systems, Inc. may not be used to endorse | | 21 | * 4. The name of Wasabi Systems, Inc. may not be used to endorse |
22 | * or promote products derived from this software without specific prior | | 22 | * or promote products derived from this software without specific prior |
23 | * written permission. | | 23 | * written permission. |
24 | * | | 24 | * |
25 | * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND | | 25 | * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND |
26 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 26 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
27 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 27 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
28 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC | | 28 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC |
29 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 29 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
30 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 30 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
31 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 31 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
32 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 32 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
33 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 33 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
34 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 34 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
35 | * POSSIBILITY OF SUCH DAMAGE. | | 35 | * POSSIBILITY OF SUCH DAMAGE. |
36 | */ | | 36 | */ |
37 | | | 37 | |
38 | /* | | 38 | /* |
39 | * Copyright (c) 2002-2003 Wasabi Systems, Inc. | | 39 | * Copyright (c) 2002-2003 Wasabi Systems, Inc. |
40 | * Copyright (c) 2001 Richard Earnshaw | | 40 | * Copyright (c) 2001 Richard Earnshaw |
41 | * Copyright (c) 2001-2002 Christopher Gilbert | | 41 | * Copyright (c) 2001-2002 Christopher Gilbert |
42 | * All rights reserved. | | 42 | * All rights reserved. |
43 | * | | 43 | * |
44 | * 1. Redistributions of source code must retain the above copyright | | 44 | * 1. Redistributions of source code must retain the above copyright |
45 | * notice, this list of conditions and the following disclaimer. | | 45 | * notice, this list of conditions and the following disclaimer. |
46 | * 2. Redistributions in binary form must reproduce the above copyright | | 46 | * 2. Redistributions in binary form must reproduce the above copyright |
47 | * notice, this list of conditions and the following disclaimer in the | | 47 | * notice, this list of conditions and the following disclaimer in the |
48 | * documentation and/or other materials provided with the distribution. | | 48 | * documentation and/or other materials provided with the distribution. |
49 | * 3. The name of the company nor the name of the author may be used to | | 49 | * 3. The name of the company nor the name of the author may be used to |
50 | * endorse or promote products derived from this software without specific | | 50 | * endorse or promote products derived from this software without specific |
51 | * prior written permission. | | 51 | * prior written permission. |
52 | * | | 52 | * |
53 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED | | 53 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
54 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | | 54 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF |
55 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | | 55 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
56 | * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, | | 56 | * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, |
57 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | | 57 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
58 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | | 58 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
59 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 59 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
60 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | | 60 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
61 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 61 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
62 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 62 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
63 | * SUCH DAMAGE. | | 63 | * SUCH DAMAGE. |
64 | */ | | 64 | */ |
65 | | | 65 | |
66 | /*- | | 66 | /*- |
67 | * Copyright (c) 1999 The NetBSD Foundation, Inc. | | 67 | * Copyright (c) 1999 The NetBSD Foundation, Inc. |
68 | * All rights reserved. | | 68 | * All rights reserved. |
69 | * | | 69 | * |
70 | * This code is derived from software contributed to The NetBSD Foundation | | 70 | * This code is derived from software contributed to The NetBSD Foundation |
71 | * by Charles M. Hannum. | | 71 | * by Charles M. Hannum. |
72 | * | | 72 | * |
73 | * Redistribution and use in source and binary forms, with or without | | 73 | * Redistribution and use in source and binary forms, with or without |
74 | * modification, are permitted provided that the following conditions | | 74 | * modification, are permitted provided that the following conditions |
75 | * are met: | | 75 | * are met: |
76 | * 1. Redistributions of source code must retain the above copyright | | 76 | * 1. Redistributions of source code must retain the above copyright |
77 | * notice, this list of conditions and the following disclaimer. | | 77 | * notice, this list of conditions and the following disclaimer. |
78 | * 2. Redistributions in binary form must reproduce the above copyright | | 78 | * 2. Redistributions in binary form must reproduce the above copyright |
79 | * notice, this list of conditions and the following disclaimer in the | | 79 | * notice, this list of conditions and the following disclaimer in the |
80 | * documentation and/or other materials provided with the distribution. | | 80 | * documentation and/or other materials provided with the distribution. |
81 | * | | 81 | * |
82 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 82 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
83 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 83 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
84 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 84 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
85 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 85 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
86 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 86 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
87 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 87 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
88 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 88 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
89 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 89 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
90 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 90 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
91 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 91 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
92 | * POSSIBILITY OF SUCH DAMAGE. | | 92 | * POSSIBILITY OF SUCH DAMAGE. |
93 | */ | | 93 | */ |
94 | | | 94 | |
95 | /* | | 95 | /* |
96 | * Copyright (c) 1994-1998 Mark Brinicombe. | | 96 | * Copyright (c) 1994-1998 Mark Brinicombe. |
97 | * Copyright (c) 1994 Brini. | | 97 | * Copyright (c) 1994 Brini. |
98 | * All rights reserved. | | 98 | * All rights reserved. |
99 | * | | 99 | * |
100 | * This code is derived from software written for Brini by Mark Brinicombe | | 100 | * This code is derived from software written for Brini by Mark Brinicombe |
101 | * | | 101 | * |
102 | * Redistribution and use in source and binary forms, with or without | | 102 | * Redistribution and use in source and binary forms, with or without |
103 | * modification, are permitted provided that the following conditions | | 103 | * modification, are permitted provided that the following conditions |
104 | * are met: | | 104 | * are met: |
105 | * 1. Redistributions of source code must retain the above copyright | | 105 | * 1. Redistributions of source code must retain the above copyright |
106 | * notice, this list of conditions and the following disclaimer. | | 106 | * notice, this list of conditions and the following disclaimer. |
107 | * 2. Redistributions in binary form must reproduce the above copyright | | 107 | * 2. Redistributions in binary form must reproduce the above copyright |
108 | * notice, this list of conditions and the following disclaimer in the | | 108 | * notice, this list of conditions and the following disclaimer in the |
109 | * documentation and/or other materials provided with the distribution. | | 109 | * documentation and/or other materials provided with the distribution. |
110 | * 3. All advertising materials mentioning features or use of this software | | 110 | * 3. All advertising materials mentioning features or use of this software |
111 | * must display the following acknowledgement: | | 111 | * must display the following acknowledgement: |
112 | * This product includes software developed by Mark Brinicombe. | | 112 | * This product includes software developed by Mark Brinicombe. |
113 | * 4. The name of the author may not be used to endorse or promote products | | 113 | * 4. The name of the author may not be used to endorse or promote products |
114 | * derived from this software without specific prior written permission. | | 114 | * derived from this software without specific prior written permission. |
115 | * | | 115 | * |
116 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | | 116 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
117 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | | 117 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
118 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | | 118 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
119 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | | 119 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
120 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | | 120 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
121 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | | 121 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
122 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | | 122 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
123 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | | 123 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
124 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | | 124 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
125 | * | | 125 | * |
126 | * RiscBSD kernel project | | 126 | * RiscBSD kernel project |
127 | * | | 127 | * |
128 | * pmap.c | | 128 | * pmap.c |
129 | * | | 129 | * |
130 | * Machine dependant vm stuff | | 130 | * Machine dependant vm stuff |
131 | * | | 131 | * |
132 | * Created : 20/09/94 | | 132 | * Created : 20/09/94 |
133 | */ | | 133 | */ |
134 | | | 134 | |
135 | /* | | 135 | /* |
136 | * armv6 and VIPT cache support by 3am Software Foundry, | | 136 | * armv6 and VIPT cache support by 3am Software Foundry, |
137 | * Copyright (c) 2007 Microsoft | | 137 | * Copyright (c) 2007 Microsoft |
138 | */ | | 138 | */ |
139 | | | 139 | |
140 | /* | | 140 | /* |
141 | * Performance improvements, UVM changes, overhauls and part-rewrites | | 141 | * Performance improvements, UVM changes, overhauls and part-rewrites |
142 | * were contributed by Neil A. Carson <neil@causality.com>. | | 142 | * were contributed by Neil A. Carson <neil@causality.com>. |
143 | */ | | 143 | */ |
144 | | | 144 | |
145 | /* | | 145 | /* |
146 | * Overhauled again to speedup the pmap, use MMU Domains so that L1 tables | | 146 | * Overhauled again to speedup the pmap, use MMU Domains so that L1 tables |
147 | * can be shared, and re-work the KVM layout, by Steve Woodford of Wasabi | | 147 | * can be shared, and re-work the KVM layout, by Steve Woodford of Wasabi |
148 | * Systems, Inc. | | 148 | * Systems, Inc. |
149 | * | | 149 | * |
150 | * There are still a few things outstanding at this time: | | 150 | * There are still a few things outstanding at this time: |
151 | * | | 151 | * |
152 | * - There are some unresolved issues for MP systems: | | 152 | * - There are some unresolved issues for MP systems: |
153 | * | | 153 | * |
154 | * o The L1 metadata needs a lock, or more specifically, some places | | 154 | * o The L1 metadata needs a lock, or more specifically, some places |
155 | * need to acquire an exclusive lock when modifying L1 translation | | 155 | * need to acquire an exclusive lock when modifying L1 translation |
156 | * table entries. | | 156 | * table entries. |
157 | * | | 157 | * |
158 | * o When one cpu modifies an L1 entry, and that L1 table is also | | 158 | * o When one cpu modifies an L1 entry, and that L1 table is also |
159 | * being used by another cpu, then the latter will need to be told | | 159 | * being used by another cpu, then the latter will need to be told |
160 | * that a tlb invalidation may be necessary. (But only if the old | | 160 | * that a tlb invalidation may be necessary. (But only if the old |
161 | * domain number in the L1 entry being over-written is currently | | 161 | * domain number in the L1 entry being over-written is currently |
162 | * the active domain on that cpu). I guess there are lots more tlb | | 162 | * the active domain on that cpu). I guess there are lots more tlb |
163 | * shootdown issues too... | | 163 | * shootdown issues too... |
164 | * | | 164 | * |
165 | * o If the vector_page is at 0x00000000 instead of 0xffff0000, then | | 165 | * o If the vector_page is at 0x00000000 instead of 0xffff0000, then |
166 | * MP systems will lose big-time because of the MMU domain hack. | | 166 | * MP systems will lose big-time because of the MMU domain hack. |
167 | * The only way this can be solved (apart from moving the vector | | 167 | * The only way this can be solved (apart from moving the vector |
168 | * page to 0xffff0000) is to reserve the first 1MB of user address | | 168 | * page to 0xffff0000) is to reserve the first 1MB of user address |
169 | * space for kernel use only. This would require re-linking all | | 169 | * space for kernel use only. This would require re-linking all |
170 | * applications so that the text section starts above this 1MB | | 170 | * applications so that the text section starts above this 1MB |
171 | * boundary. | | 171 | * boundary. |
172 | * | | 172 | * |
173 | * o Tracking which VM space is resident in the cache/tlb has not yet | | 173 | * o Tracking which VM space is resident in the cache/tlb has not yet |
174 | * been implemented for MP systems. | | 174 | * been implemented for MP systems. |
175 | * | | 175 | * |
176 | * o Finally, there is a pathological condition where two cpus running | | 176 | * o Finally, there is a pathological condition where two cpus running |
177 | * two separate processes (not lwps) which happen to share an L1 | | 177 | * two separate processes (not lwps) which happen to share an L1 |
178 | * can get into a fight over one or more L1 entries. This will result | | 178 | * can get into a fight over one or more L1 entries. This will result |
179 | * in a significant slow-down if both processes are in tight loops. | | 179 | * in a significant slow-down if both processes are in tight loops. |
180 | */ | | 180 | */ |
181 | | | 181 | |
182 | /* | | 182 | /* |
183 | * Special compilation symbols | | 183 | * Special compilation symbols |
184 | * PMAP_DEBUG - Build in pmap_debug_level code | | 184 | * PMAP_DEBUG - Build in pmap_debug_level code |
185 | */ | | 185 | */ |
186 | | | 186 | |
187 | /* Include header files */ | | 187 | /* Include header files */ |
188 | | | 188 | |
189 | #include "opt_cpuoptions.h" | | 189 | #include "opt_cpuoptions.h" |
190 | #include "opt_pmap_debug.h" | | 190 | #include "opt_pmap_debug.h" |
191 | #include "opt_ddb.h" | | 191 | #include "opt_ddb.h" |
192 | #include "opt_lockdebug.h" | | 192 | #include "opt_lockdebug.h" |
193 | #include "opt_multiprocessor.h" | | 193 | #include "opt_multiprocessor.h" |
194 | | | 194 | |
195 | #include <sys/param.h> | | 195 | #include <sys/param.h> |
196 | #include <sys/types.h> | | 196 | #include <sys/types.h> |
197 | #include <sys/kernel.h> | | 197 | #include <sys/kernel.h> |
198 | #include <sys/systm.h> | | 198 | #include <sys/systm.h> |
199 | #include <sys/proc.h> | | 199 | #include <sys/proc.h> |
200 | #include <sys/malloc.h> | | 200 | #include <sys/malloc.h> |
201 | #include <sys/user.h> | | 201 | #include <sys/user.h> |
202 | #include <sys/pool.h> | | 202 | #include <sys/pool.h> |
203 | #include <sys/cdefs.h> | | 203 | #include <sys/cdefs.h> |
204 | #include <sys/cpu.h> | | 204 | #include <sys/cpu.h> |
205 | #include <sys/sysctl.h> | | 205 | #include <sys/sysctl.h> |
206 | | | 206 | |
207 | #include <uvm/uvm.h> | | 207 | #include <uvm/uvm.h> |
208 | | | 208 | |
209 | #include <machine/bus.h> | | 209 | #include <machine/bus.h> |
210 | #include <machine/pmap.h> | | 210 | #include <machine/pmap.h> |
211 | #include <machine/pcb.h> | | 211 | #include <machine/pcb.h> |
212 | #include <machine/param.h> | | 212 | #include <machine/param.h> |
213 | #include <arm/arm32/katelib.h> | | 213 | #include <arm/arm32/katelib.h> |
214 | | | 214 | |
215 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.187 2008/09/28 21:27:11 skrll Exp $"); | | 215 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.187.4.1 2009/12/03 09:26:59 sborrill Exp $"); |
216 | | | 216 | |
217 | #ifdef PMAP_DEBUG | | 217 | #ifdef PMAP_DEBUG |
218 | | | 218 | |
219 | /* XXX need to get rid of all refs to this */ | | 219 | /* XXX need to get rid of all refs to this */ |
220 | int pmap_debug_level = 0; | | 220 | int pmap_debug_level = 0; |
221 | | | 221 | |
222 | /* | | 222 | /* |
223 | * for switching to potentially finer grained debugging | | 223 | * for switching to potentially finer grained debugging |
224 | */ | | 224 | */ |
225 | #define PDB_FOLLOW 0x0001 | | 225 | #define PDB_FOLLOW 0x0001 |
226 | #define PDB_INIT 0x0002 | | 226 | #define PDB_INIT 0x0002 |
227 | #define PDB_ENTER 0x0004 | | 227 | #define PDB_ENTER 0x0004 |
228 | #define PDB_REMOVE 0x0008 | | 228 | #define PDB_REMOVE 0x0008 |
229 | #define PDB_CREATE 0x0010 | | 229 | #define PDB_CREATE 0x0010 |
230 | #define PDB_PTPAGE 0x0020 | | 230 | #define PDB_PTPAGE 0x0020 |
231 | #define PDB_GROWKERN 0x0040 | | 231 | #define PDB_GROWKERN 0x0040 |
232 | #define PDB_BITS 0x0080 | | 232 | #define PDB_BITS 0x0080 |
233 | #define PDB_COLLECT 0x0100 | | 233 | #define PDB_COLLECT 0x0100 |
234 | #define PDB_PROTECT 0x0200 | | 234 | #define PDB_PROTECT 0x0200 |
235 | #define PDB_MAP_L1 0x0400 | | 235 | #define PDB_MAP_L1 0x0400 |
236 | #define PDB_BOOTSTRAP 0x1000 | | 236 | #define PDB_BOOTSTRAP 0x1000 |
237 | #define PDB_PARANOIA 0x2000 | | 237 | #define PDB_PARANOIA 0x2000 |
238 | #define PDB_WIRING 0x4000 | | 238 | #define PDB_WIRING 0x4000 |
239 | #define PDB_PVDUMP 0x8000 | | 239 | #define PDB_PVDUMP 0x8000 |
240 | #define PDB_VAC 0x10000 | | 240 | #define PDB_VAC 0x10000 |
241 | #define PDB_KENTER 0x20000 | | 241 | #define PDB_KENTER 0x20000 |
242 | #define PDB_KREMOVE 0x40000 | | 242 | #define PDB_KREMOVE 0x40000 |
243 | #define PDB_EXEC 0x80000 | | 243 | #define PDB_EXEC 0x80000 |
244 | | | 244 | |
245 | int debugmap = 1; | | 245 | int debugmap = 1; |
246 | int pmapdebug = 0; | | 246 | int pmapdebug = 0; |
247 | #define NPDEBUG(_lev_,_stat_) \ | | 247 | #define NPDEBUG(_lev_,_stat_) \ |
248 | if (pmapdebug & (_lev_)) \ | | 248 | if (pmapdebug & (_lev_)) \ |
249 | ((_stat_)) | | 249 | ((_stat_)) |
250 | | | 250 | |
251 | #else /* PMAP_DEBUG */ | | 251 | #else /* PMAP_DEBUG */ |
252 | #define NPDEBUG(_lev_,_stat_) /* Nothing */ | | 252 | #define NPDEBUG(_lev_,_stat_) /* Nothing */ |
253 | #endif /* PMAP_DEBUG */ | | 253 | #endif /* PMAP_DEBUG */ |
254 | | | 254 | |
255 | /* | | 255 | /* |
256 | * pmap_kernel() points here | | 256 | * pmap_kernel() points here |
257 | */ | | 257 | */ |
258 | struct pmap kernel_pmap_store; | | 258 | struct pmap kernel_pmap_store; |
259 | | | 259 | |
260 | /* | | 260 | /* |
261 | * Which pmap is currently 'live' in the cache | | 261 | * Which pmap is currently 'live' in the cache |
262 | * | | 262 | * |
263 | * XXXSCW: Fix for SMP ... | | 263 | * XXXSCW: Fix for SMP ... |
264 | */ | | 264 | */ |
265 | static pmap_t pmap_recent_user; | | 265 | static pmap_t pmap_recent_user; |
266 | | | 266 | |
267 | /* | | 267 | /* |
268 | * Pointer to last active lwp, or NULL if it exited. | | 268 | * Pointer to last active lwp, or NULL if it exited. |
269 | */ | | 269 | */ |
270 | struct lwp *pmap_previous_active_lwp; | | 270 | struct lwp *pmap_previous_active_lwp; |
271 | | | 271 | |
272 | /* | | 272 | /* |
273 | * Pool and cache that pmap structures are allocated from. | | 273 | * Pool and cache that pmap structures are allocated from. |
274 | * We use a cache to avoid clearing the pm_l2[] array (1KB) | | 274 | * We use a cache to avoid clearing the pm_l2[] array (1KB) |
275 | * in pmap_create(). | | 275 | * in pmap_create(). |
276 | */ | | 276 | */ |
277 | static struct pool_cache pmap_cache; | | 277 | static struct pool_cache pmap_cache; |
278 | static LIST_HEAD(, pmap) pmap_pmaps; | | 278 | static LIST_HEAD(, pmap) pmap_pmaps; |
279 | | | 279 | |
280 | /* | | 280 | /* |
281 | * Pool of PV structures | | 281 | * Pool of PV structures |
282 | */ | | 282 | */ |
283 | static struct pool pmap_pv_pool; | | 283 | static struct pool pmap_pv_pool; |
284 | static void *pmap_bootstrap_pv_page_alloc(struct pool *, int); | | 284 | static void *pmap_bootstrap_pv_page_alloc(struct pool *, int); |
285 | static void pmap_bootstrap_pv_page_free(struct pool *, void *); | | 285 | static void pmap_bootstrap_pv_page_free(struct pool *, void *); |
286 | static struct pool_allocator pmap_bootstrap_pv_allocator = { | | 286 | static struct pool_allocator pmap_bootstrap_pv_allocator = { |
287 | pmap_bootstrap_pv_page_alloc, pmap_bootstrap_pv_page_free | | 287 | pmap_bootstrap_pv_page_alloc, pmap_bootstrap_pv_page_free |
288 | }; | | 288 | }; |
289 | | | 289 | |
290 | /* | | 290 | /* |
291 | * Pool and cache of l2_dtable structures. | | 291 | * Pool and cache of l2_dtable structures. |
292 | * We use a cache to avoid clearing the structures when they're | | 292 | * We use a cache to avoid clearing the structures when they're |
293 | * allocated. (196 bytes) | | 293 | * allocated. (196 bytes) |
294 | */ | | 294 | */ |
295 | static struct pool_cache pmap_l2dtable_cache; | | 295 | static struct pool_cache pmap_l2dtable_cache; |
296 | static vaddr_t pmap_kernel_l2dtable_kva; | | 296 | static vaddr_t pmap_kernel_l2dtable_kva; |
297 | | | 297 | |
298 | /* | | 298 | /* |
299 | * Pool and cache of L2 page descriptors. | | 299 | * Pool and cache of L2 page descriptors. |
300 | * We use a cache to avoid clearing the descriptor table | | 300 | * We use a cache to avoid clearing the descriptor table |
301 | * when they're allocated. (1KB) | | 301 | * when they're allocated. (1KB) |
302 | */ | | 302 | */ |
303 | static struct pool_cache pmap_l2ptp_cache; | | 303 | static struct pool_cache pmap_l2ptp_cache; |
304 | static vaddr_t pmap_kernel_l2ptp_kva; | | 304 | static vaddr_t pmap_kernel_l2ptp_kva; |
305 | static paddr_t pmap_kernel_l2ptp_phys; | | 305 | static paddr_t pmap_kernel_l2ptp_phys; |
306 | | | 306 | |
307 | #ifdef PMAPCOUNTERS | | 307 | #ifdef PMAPCOUNTERS |
308 | #define PMAP_EVCNT_INITIALIZER(name) \ | | 308 | #define PMAP_EVCNT_INITIALIZER(name) \ |
309 | EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", name) | | 309 | EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", name) |
310 | | | 310 | |
311 | #ifdef PMAP_CACHE_VIPT | | 311 | #ifdef PMAP_CACHE_VIPT |
312 | static struct evcnt pmap_ev_vac_color_new = | | 312 | static struct evcnt pmap_ev_vac_color_new = |
313 | PMAP_EVCNT_INITIALIZER("new page color"); | | 313 | PMAP_EVCNT_INITIALIZER("new page color"); |
314 | static struct evcnt pmap_ev_vac_color_reuse = | | 314 | static struct evcnt pmap_ev_vac_color_reuse = |
315 | PMAP_EVCNT_INITIALIZER("ok first page color"); | | 315 | PMAP_EVCNT_INITIALIZER("ok first page color"); |
316 | static struct evcnt pmap_ev_vac_color_ok = | | 316 | static struct evcnt pmap_ev_vac_color_ok = |
317 | PMAP_EVCNT_INITIALIZER("ok page color"); | | 317 | PMAP_EVCNT_INITIALIZER("ok page color"); |
318 | static struct evcnt pmap_ev_vac_color_blind = | | 318 | static struct evcnt pmap_ev_vac_color_blind = |
319 | PMAP_EVCNT_INITIALIZER("blind page color"); | | 319 | PMAP_EVCNT_INITIALIZER("blind page color"); |
320 | static struct evcnt pmap_ev_vac_color_change = | | 320 | static struct evcnt pmap_ev_vac_color_change = |
321 | PMAP_EVCNT_INITIALIZER("change page color"); | | 321 | PMAP_EVCNT_INITIALIZER("change page color"); |
322 | static struct evcnt pmap_ev_vac_color_erase = | | 322 | static struct evcnt pmap_ev_vac_color_erase = |
323 | PMAP_EVCNT_INITIALIZER("erase page color"); | | 323 | PMAP_EVCNT_INITIALIZER("erase page color"); |
324 | static struct evcnt pmap_ev_vac_color_none = | | 324 | static struct evcnt pmap_ev_vac_color_none = |
325 | PMAP_EVCNT_INITIALIZER("no page color"); | | 325 | PMAP_EVCNT_INITIALIZER("no page color"); |
326 | static struct evcnt pmap_ev_vac_color_restore = | | 326 | static struct evcnt pmap_ev_vac_color_restore = |
327 | PMAP_EVCNT_INITIALIZER("restore page color"); | | 327 | PMAP_EVCNT_INITIALIZER("restore page color"); |
328 | | | 328 | |
329 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_new); | | 329 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_new); |
330 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_reuse); | | 330 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_reuse); |
331 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_ok); | | 331 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_ok); |
332 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_blind); | | 332 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_blind); |
333 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_change); | | 333 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_change); |
334 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_erase); | | 334 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_erase); |
335 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_none); | | 335 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_none); |
336 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_restore); | | 336 | EVCNT_ATTACH_STATIC(pmap_ev_vac_color_restore); |
337 | #endif | | 337 | #endif |
338 | | | 338 | |
339 | static struct evcnt pmap_ev_mappings = | | 339 | static struct evcnt pmap_ev_mappings = |
340 | PMAP_EVCNT_INITIALIZER("pages mapped"); | | 340 | PMAP_EVCNT_INITIALIZER("pages mapped"); |
341 | static struct evcnt pmap_ev_unmappings = | | 341 | static struct evcnt pmap_ev_unmappings = |
342 | PMAP_EVCNT_INITIALIZER("pages unmapped"); | | 342 | PMAP_EVCNT_INITIALIZER("pages unmapped"); |
343 | static struct evcnt pmap_ev_remappings = | | 343 | static struct evcnt pmap_ev_remappings = |
344 | PMAP_EVCNT_INITIALIZER("pages remapped"); | | 344 | PMAP_EVCNT_INITIALIZER("pages remapped"); |
345 | | | 345 | |
346 | EVCNT_ATTACH_STATIC(pmap_ev_mappings); | | 346 | EVCNT_ATTACH_STATIC(pmap_ev_mappings); |
347 | EVCNT_ATTACH_STATIC(pmap_ev_unmappings); | | 347 | EVCNT_ATTACH_STATIC(pmap_ev_unmappings); |
348 | EVCNT_ATTACH_STATIC(pmap_ev_remappings); | | 348 | EVCNT_ATTACH_STATIC(pmap_ev_remappings); |
349 | | | 349 | |
350 | static struct evcnt pmap_ev_kernel_mappings = | | 350 | static struct evcnt pmap_ev_kernel_mappings = |
351 | PMAP_EVCNT_INITIALIZER("kernel pages mapped"); | | 351 | PMAP_EVCNT_INITIALIZER("kernel pages mapped"); |
352 | static struct evcnt pmap_ev_kernel_unmappings = | | 352 | static struct evcnt pmap_ev_kernel_unmappings = |
353 | PMAP_EVCNT_INITIALIZER("kernel pages unmapped"); | | 353 | PMAP_EVCNT_INITIALIZER("kernel pages unmapped"); |
354 | static struct evcnt pmap_ev_kernel_remappings = | | 354 | static struct evcnt pmap_ev_kernel_remappings = |
355 | PMAP_EVCNT_INITIALIZER("kernel pages remapped"); | | 355 | PMAP_EVCNT_INITIALIZER("kernel pages remapped"); |
356 | | | 356 | |
357 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_mappings); | | 357 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_mappings); |
358 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_unmappings); | | 358 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_unmappings); |
359 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_remappings); | | 359 | EVCNT_ATTACH_STATIC(pmap_ev_kernel_remappings); |
360 | | | 360 | |
361 | static struct evcnt pmap_ev_kenter_mappings = | | 361 | static struct evcnt pmap_ev_kenter_mappings = |
362 | PMAP_EVCNT_INITIALIZER("kenter pages mapped"); | | 362 | PMAP_EVCNT_INITIALIZER("kenter pages mapped"); |
363 | static struct evcnt pmap_ev_kenter_unmappings = | | 363 | static struct evcnt pmap_ev_kenter_unmappings = |
364 | PMAP_EVCNT_INITIALIZER("kenter pages unmapped"); | | 364 | PMAP_EVCNT_INITIALIZER("kenter pages unmapped"); |
365 | static struct evcnt pmap_ev_kenter_remappings = | | 365 | static struct evcnt pmap_ev_kenter_remappings = |
366 | PMAP_EVCNT_INITIALIZER("kenter pages remapped"); | | 366 | PMAP_EVCNT_INITIALIZER("kenter pages remapped"); |
367 | static struct evcnt pmap_ev_pt_mappings = | | 367 | static struct evcnt pmap_ev_pt_mappings = |
368 | PMAP_EVCNT_INITIALIZER("page table pages mapped"); | | 368 | PMAP_EVCNT_INITIALIZER("page table pages mapped"); |
369 | | | 369 | |
370 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_mappings); | | 370 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_mappings); |
371 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_unmappings); | | 371 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_unmappings); |
372 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_remappings); | | 372 | EVCNT_ATTACH_STATIC(pmap_ev_kenter_remappings); |
373 | EVCNT_ATTACH_STATIC(pmap_ev_pt_mappings); | | 373 | EVCNT_ATTACH_STATIC(pmap_ev_pt_mappings); |
374 | | | 374 | |
375 | #ifdef PMAP_CACHE_VIPT | | 375 | #ifdef PMAP_CACHE_VIPT |
376 | static struct evcnt pmap_ev_exec_mappings = | | 376 | static struct evcnt pmap_ev_exec_mappings = |
377 | PMAP_EVCNT_INITIALIZER("exec pages mapped"); | | 377 | PMAP_EVCNT_INITIALIZER("exec pages mapped"); |
378 | static struct evcnt pmap_ev_exec_cached = | | 378 | static struct evcnt pmap_ev_exec_cached = |
379 | PMAP_EVCNT_INITIALIZER("exec pages cached"); | | 379 | PMAP_EVCNT_INITIALIZER("exec pages cached"); |
380 | | | 380 | |
381 | EVCNT_ATTACH_STATIC(pmap_ev_exec_mappings); | | 381 | EVCNT_ATTACH_STATIC(pmap_ev_exec_mappings); |
382 | EVCNT_ATTACH_STATIC(pmap_ev_exec_cached); | | 382 | EVCNT_ATTACH_STATIC(pmap_ev_exec_cached); |
383 | | | 383 | |
384 | static struct evcnt pmap_ev_exec_synced = | | 384 | static struct evcnt pmap_ev_exec_synced = |
385 | PMAP_EVCNT_INITIALIZER("exec pages synced"); | | 385 | PMAP_EVCNT_INITIALIZER("exec pages synced"); |
386 | static struct evcnt pmap_ev_exec_synced_map = | | 386 | static struct evcnt pmap_ev_exec_synced_map = |
387 | PMAP_EVCNT_INITIALIZER("exec pages synced (MP)"); | | 387 | PMAP_EVCNT_INITIALIZER("exec pages synced (MP)"); |
388 | static struct evcnt pmap_ev_exec_synced_unmap = | | 388 | static struct evcnt pmap_ev_exec_synced_unmap = |
389 | PMAP_EVCNT_INITIALIZER("exec pages synced (UM)"); | | 389 | PMAP_EVCNT_INITIALIZER("exec pages synced (UM)"); |
390 | static struct evcnt pmap_ev_exec_synced_remap = | | 390 | static struct evcnt pmap_ev_exec_synced_remap = |
391 | PMAP_EVCNT_INITIALIZER("exec pages synced (RM)"); | | 391 | PMAP_EVCNT_INITIALIZER("exec pages synced (RM)"); |
392 | static struct evcnt pmap_ev_exec_synced_clearbit = | | 392 | static struct evcnt pmap_ev_exec_synced_clearbit = |
393 | PMAP_EVCNT_INITIALIZER("exec pages synced (DG)"); | | 393 | PMAP_EVCNT_INITIALIZER("exec pages synced (DG)"); |
394 | static struct evcnt pmap_ev_exec_synced_kremove = | | 394 | static struct evcnt pmap_ev_exec_synced_kremove = |
395 | PMAP_EVCNT_INITIALIZER("exec pages synced (KU)"); | | 395 | PMAP_EVCNT_INITIALIZER("exec pages synced (KU)"); |
396 | | | 396 | |
397 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced); | | 397 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced); |
398 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_map); | | 398 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_map); |
399 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_unmap); | | 399 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_unmap); |
400 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_remap); | | 400 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_remap); |
401 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_clearbit); | | 401 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_clearbit); |
402 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_kremove); | | 402 | EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_kremove); |
403 | | | 403 | |
404 | static struct evcnt pmap_ev_exec_discarded_unmap = | | 404 | static struct evcnt pmap_ev_exec_discarded_unmap = |
405 | PMAP_EVCNT_INITIALIZER("exec pages discarded (UM)"); | | 405 | PMAP_EVCNT_INITIALIZER("exec pages discarded (UM)"); |
406 | static struct evcnt pmap_ev_exec_discarded_zero = | | 406 | static struct evcnt pmap_ev_exec_discarded_zero = |
407 | PMAP_EVCNT_INITIALIZER("exec pages discarded (ZP)"); | | 407 | PMAP_EVCNT_INITIALIZER("exec pages discarded (ZP)"); |
408 | static struct evcnt pmap_ev_exec_discarded_copy = | | 408 | static struct evcnt pmap_ev_exec_discarded_copy = |
409 | PMAP_EVCNT_INITIALIZER("exec pages discarded (CP)"); | | 409 | PMAP_EVCNT_INITIALIZER("exec pages discarded (CP)"); |
410 | static struct evcnt pmap_ev_exec_discarded_page_protect = | | 410 | static struct evcnt pmap_ev_exec_discarded_page_protect = |
411 | PMAP_EVCNT_INITIALIZER("exec pages discarded (PP)"); | | 411 | PMAP_EVCNT_INITIALIZER("exec pages discarded (PP)"); |
412 | static struct evcnt pmap_ev_exec_discarded_clearbit = | | 412 | static struct evcnt pmap_ev_exec_discarded_clearbit = |
413 | PMAP_EVCNT_INITIALIZER("exec pages discarded (DG)"); | | 413 | PMAP_EVCNT_INITIALIZER("exec pages discarded (DG)"); |
414 | static struct evcnt pmap_ev_exec_discarded_kremove = | | 414 | static struct evcnt pmap_ev_exec_discarded_kremove = |
415 | PMAP_EVCNT_INITIALIZER("exec pages discarded (KU)"); | | 415 | PMAP_EVCNT_INITIALIZER("exec pages discarded (KU)"); |
416 | | | 416 | |
417 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_unmap); | | 417 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_unmap); |
418 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_zero); | | 418 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_zero); |
419 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_copy); | | 419 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_copy); |
420 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_page_protect); | | 420 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_page_protect); |
421 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_clearbit); | | 421 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_clearbit); |
422 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_kremove); | | 422 | EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_kremove); |
423 | #endif /* PMAP_CACHE_VIPT */ | | 423 | #endif /* PMAP_CACHE_VIPT */ |
424 | | | 424 | |
425 | static struct evcnt pmap_ev_updates = PMAP_EVCNT_INITIALIZER("updates"); | | 425 | static struct evcnt pmap_ev_updates = PMAP_EVCNT_INITIALIZER("updates"); |
426 | static struct evcnt pmap_ev_collects = PMAP_EVCNT_INITIALIZER("collects"); | | 426 | static struct evcnt pmap_ev_collects = PMAP_EVCNT_INITIALIZER("collects"); |
427 | static struct evcnt pmap_ev_activations = PMAP_EVCNT_INITIALIZER("activations"); | | 427 | static struct evcnt pmap_ev_activations = PMAP_EVCNT_INITIALIZER("activations"); |
428 | | | 428 | |
429 | EVCNT_ATTACH_STATIC(pmap_ev_updates); | | 429 | EVCNT_ATTACH_STATIC(pmap_ev_updates); |
430 | EVCNT_ATTACH_STATIC(pmap_ev_collects); | | 430 | EVCNT_ATTACH_STATIC(pmap_ev_collects); |
431 | EVCNT_ATTACH_STATIC(pmap_ev_activations); | | 431 | EVCNT_ATTACH_STATIC(pmap_ev_activations); |
432 | | | 432 | |
433 | #define PMAPCOUNT(x) ((void)(pmap_ev_##x.ev_count++)) | | 433 | #define PMAPCOUNT(x) ((void)(pmap_ev_##x.ev_count++)) |
434 | #else | | 434 | #else |
435 | #define PMAPCOUNT(x) ((void)0) | | 435 | #define PMAPCOUNT(x) ((void)0) |
436 | #endif | | 436 | #endif |
437 | | | 437 | |
438 | /* | | 438 | /* |
439 | * pmap copy/zero page, and mem(5) hook point | | 439 | * pmap copy/zero page, and mem(5) hook point |
440 | */ | | 440 | */ |
441 | static pt_entry_t *csrc_pte, *cdst_pte; | | 441 | static pt_entry_t *csrc_pte, *cdst_pte; |
442 | static vaddr_t csrcp, cdstp; | | 442 | static vaddr_t csrcp, cdstp; |
443 | vaddr_t memhook; /* used by mem.c */ | | 443 | vaddr_t memhook; /* used by mem.c */ |
444 | extern void *msgbufaddr; | | 444 | extern void *msgbufaddr; |
445 | int pmap_kmpages; | | 445 | int pmap_kmpages; |
446 | /* | | 446 | /* |
447 | * Flag to indicate if pmap_init() has done its thing | | 447 | * Flag to indicate if pmap_init() has done its thing |
448 | */ | | 448 | */ |
449 | bool pmap_initialized; | | 449 | bool pmap_initialized; |
450 | | | 450 | |
451 | /* | | 451 | /* |
452 | * Misc. locking data structures | | 452 | * Misc. locking data structures |
453 | */ | | 453 | */ |
454 | | | 454 | |
455 | #if 0 /* defined(MULTIPROCESSOR) || defined(LOCKDEBUG) */ | | 455 | #if 0 /* defined(MULTIPROCESSOR) || defined(LOCKDEBUG) */ |
456 | static struct lock pmap_main_lock; | | 456 | static struct lock pmap_main_lock; |
457 | | | 457 | |
458 | #define PMAP_MAP_TO_HEAD_LOCK() \ | | 458 | #define PMAP_MAP_TO_HEAD_LOCK() \ |
459 | (void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL) | | 459 | (void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL) |
460 | #define PMAP_MAP_TO_HEAD_UNLOCK() \ | | 460 | #define PMAP_MAP_TO_HEAD_UNLOCK() \ |
461 | (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL) | | 461 | (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL) |
462 | #define PMAP_HEAD_TO_MAP_LOCK() \ | | 462 | #define PMAP_HEAD_TO_MAP_LOCK() \ |
463 | (void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL) | | 463 | (void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL) |
464 | #define PMAP_HEAD_TO_MAP_UNLOCK() \ | | 464 | #define PMAP_HEAD_TO_MAP_UNLOCK() \ |
465 | spinlockmgr(&pmap_main_lock, LK_RELEASE, (void *) 0) | | 465 | spinlockmgr(&pmap_main_lock, LK_RELEASE, (void *) 0) |
466 | #else | | 466 | #else |
467 | #define PMAP_MAP_TO_HEAD_LOCK() /* null */ | | 467 | #define PMAP_MAP_TO_HEAD_LOCK() /* null */ |
468 | #define PMAP_MAP_TO_HEAD_UNLOCK() /* null */ | | 468 | #define PMAP_MAP_TO_HEAD_UNLOCK() /* null */ |
469 | #define PMAP_HEAD_TO_MAP_LOCK() /* null */ | | 469 | #define PMAP_HEAD_TO_MAP_LOCK() /* null */ |
470 | #define PMAP_HEAD_TO_MAP_UNLOCK() /* null */ | | 470 | #define PMAP_HEAD_TO_MAP_UNLOCK() /* null */ |
471 | #endif | | 471 | #endif |
472 | | | 472 | |
473 | #define pmap_acquire_pmap_lock(pm) \ | | 473 | #define pmap_acquire_pmap_lock(pm) \ |
474 | do { \ | | 474 | do { \ |
475 | if ((pm) != pmap_kernel()) \ | | 475 | if ((pm) != pmap_kernel()) \ |
476 | mutex_enter(&(pm)->pm_lock); \ | | 476 | mutex_enter(&(pm)->pm_lock); \ |
477 | } while (/*CONSTCOND*/0) | | 477 | } while (/*CONSTCOND*/0) |
478 | | | 478 | |
479 | #define pmap_release_pmap_lock(pm) \ | | 479 | #define pmap_release_pmap_lock(pm) \ |
480 | do { \ | | 480 | do { \ |
481 | if ((pm) != pmap_kernel()) \ | | 481 | if ((pm) != pmap_kernel()) \ |
482 | mutex_exit(&(pm)->pm_lock); \ | | 482 | mutex_exit(&(pm)->pm_lock); \ |
483 | } while (/*CONSTCOND*/0) | | 483 | } while (/*CONSTCOND*/0) |
484 | | | 484 | |
485 | | | 485 | |
486 | /* | | 486 | /* |
487 | * Metadata for L1 translation tables. | | 487 | * Metadata for L1 translation tables. |
488 | */ | | 488 | */ |
489 | struct l1_ttable { | | 489 | struct l1_ttable { |
490 | /* Entry on the L1 Table list */ | | 490 | /* Entry on the L1 Table list */ |
491 | SLIST_ENTRY(l1_ttable) l1_link; | | 491 | SLIST_ENTRY(l1_ttable) l1_link; |
492 | | | 492 | |
493 | /* Entry on the L1 Least Recently Used list */ | | 493 | /* Entry on the L1 Least Recently Used list */ |
494 | TAILQ_ENTRY(l1_ttable) l1_lru; | | 494 | TAILQ_ENTRY(l1_ttable) l1_lru; |
495 | | | 495 | |
496 | /* Track how many domains are allocated from this L1 */ | | 496 | /* Track how many domains are allocated from this L1 */ |
497 | volatile u_int l1_domain_use_count; | | 497 | volatile u_int l1_domain_use_count; |
498 | | | 498 | |
499 | /* | | 499 | /* |
500 | * A free-list of domain numbers for this L1. | | 500 | * A free-list of domain numbers for this L1. |
501 | * We avoid using ffs() and a bitmap to track domains since ffs() | | 501 | * We avoid using ffs() and a bitmap to track domains since ffs() |
502 | * is slow on ARM. | | 502 | * is slow on ARM. |
503 | */ | | 503 | */ |
504 | u_int8_t l1_domain_first; | | 504 | u_int8_t l1_domain_first; |
505 | u_int8_t l1_domain_free[PMAP_DOMAINS]; | | 505 | u_int8_t l1_domain_free[PMAP_DOMAINS]; |
506 | | | 506 | |
507 | /* Physical address of this L1 page table */ | | 507 | /* Physical address of this L1 page table */ |
508 | paddr_t l1_physaddr; | | 508 | paddr_t l1_physaddr; |
509 | | | 509 | |
510 | /* KVA of this L1 page table */ | | 510 | /* KVA of this L1 page table */ |
511 | pd_entry_t *l1_kva; | | 511 | pd_entry_t *l1_kva; |
512 | }; | | 512 | }; |
513 | | | 513 | |
514 | /* | | 514 | /* |
515 | * Convert a virtual address into its L1 table index. That is, the | | 515 | * Convert a virtual address into its L1 table index. That is, the |
516 | * index used to locate the L2 descriptor table pointer in an L1 table. | | 516 | * index used to locate the L2 descriptor table pointer in an L1 table. |
517 | * This is basically used to index l1->l1_kva[]. | | 517 | * This is basically used to index l1->l1_kva[]. |
518 | * | | 518 | * |
519 | * Each L2 descriptor table represents 1MB of VA space. | | 519 | * Each L2 descriptor table represents 1MB of VA space. |
520 | */ | | 520 | */ |
521 | #define L1_IDX(va) (((vaddr_t)(va)) >> L1_S_SHIFT) | | 521 | #define L1_IDX(va) (((vaddr_t)(va)) >> L1_S_SHIFT) |
522 | | | 522 | |
523 | /* | | 523 | /* |
524 | * L1 Page Tables are tracked using a Least Recently Used list. | | 524 | * L1 Page Tables are tracked using a Least Recently Used list. |
525 | * - New L1s are allocated from the HEAD. | | 525 | * - New L1s are allocated from the HEAD. |
526 | * - Freed L1s are added to the TAIl. | | 526 | * - Freed L1s are added to the TAIl. |
527 | * - Recently accessed L1s (where an 'access' is some change to one of | | 527 | * - Recently accessed L1s (where an 'access' is some change to one of |
528 | * the userland pmaps which owns this L1) are moved to the TAIL. | | 528 | * the userland pmaps which owns this L1) are moved to the TAIL. |
529 | */ | | 529 | */ |
530 | static TAILQ_HEAD(, l1_ttable) l1_lru_list; | | 530 | static TAILQ_HEAD(, l1_ttable) l1_lru_list; |
531 | static struct simplelock l1_lru_lock; | | 531 | static struct simplelock l1_lru_lock; |
532 | | | 532 | |
533 | /* | | 533 | /* |
534 | * A list of all L1 tables | | 534 | * A list of all L1 tables |
535 | */ | | 535 | */ |
536 | static SLIST_HEAD(, l1_ttable) l1_list; | | 536 | static SLIST_HEAD(, l1_ttable) l1_list; |
537 | | | 537 | |
538 | /* | | 538 | /* |
539 | * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots. | | 539 | * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots. |
540 | * | | 540 | * |
541 | * This is normally 16MB worth L2 page descriptors for any given pmap. | | 541 | * This is normally 16MB worth L2 page descriptors for any given pmap. |
542 | * Reference counts are maintained for L2 descriptors so they can be | | 542 | * Reference counts are maintained for L2 descriptors so they can be |
543 | * freed when empty. | | 543 | * freed when empty. |
544 | */ | | 544 | */ |
545 | struct l2_dtable { | | 545 | struct l2_dtable { |
546 | /* The number of L2 page descriptors allocated to this l2_dtable */ | | 546 | /* The number of L2 page descriptors allocated to this l2_dtable */ |
547 | u_int l2_occupancy; | | 547 | u_int l2_occupancy; |
548 | | | 548 | |
549 | /* List of L2 page descriptors */ | | 549 | /* List of L2 page descriptors */ |
550 | struct l2_bucket { | | 550 | struct l2_bucket { |
551 | pt_entry_t *l2b_kva; /* KVA of L2 Descriptor Table */ | | 551 | pt_entry_t *l2b_kva; /* KVA of L2 Descriptor Table */ |
552 | paddr_t l2b_phys; /* Physical address of same */ | | 552 | paddr_t l2b_phys; /* Physical address of same */ |
553 | u_short l2b_l1idx; /* This L2 table's L1 index */ | | 553 | u_short l2b_l1idx; /* This L2 table's L1 index */ |
554 | u_short l2b_occupancy; /* How many active descriptors */ | | 554 | u_short l2b_occupancy; /* How many active descriptors */ |
555 | } l2_bucket[L2_BUCKET_SIZE]; | | 555 | } l2_bucket[L2_BUCKET_SIZE]; |
556 | }; | | 556 | }; |
557 | | | 557 | |
558 | /* | | 558 | /* |
559 | * Given an L1 table index, calculate the corresponding l2_dtable index | | 559 | * Given an L1 table index, calculate the corresponding l2_dtable index |
560 | * and bucket index within the l2_dtable. | | 560 | * and bucket index within the l2_dtable. |
561 | */ | | 561 | */ |
562 | #define L2_IDX(l1idx) (((l1idx) >> L2_BUCKET_LOG2) & \ | | 562 | #define L2_IDX(l1idx) (((l1idx) >> L2_BUCKET_LOG2) & \ |
563 | (L2_SIZE - 1)) | | 563 | (L2_SIZE - 1)) |
564 | #define L2_BUCKET(l1idx) ((l1idx) & (L2_BUCKET_SIZE - 1)) | | 564 | #define L2_BUCKET(l1idx) ((l1idx) & (L2_BUCKET_SIZE - 1)) |
565 | | | 565 | |
566 | /* | | 566 | /* |
567 | * Given a virtual address, this macro returns the | | 567 | * Given a virtual address, this macro returns the |
568 | * virtual address required to drop into the next L2 bucket. | | 568 | * virtual address required to drop into the next L2 bucket. |
569 | */ | | 569 | */ |
570 | #define L2_NEXT_BUCKET(va) (((va) & L1_S_FRAME) + L1_S_SIZE) | | 570 | #define L2_NEXT_BUCKET(va) (((va) & L1_S_FRAME) + L1_S_SIZE) |
571 | | | 571 | |
572 | /* | | 572 | /* |
573 | * L2 allocation. | | 573 | * L2 allocation. |
574 | */ | | 574 | */ |
575 | #define pmap_alloc_l2_dtable() \ | | 575 | #define pmap_alloc_l2_dtable() \ |
576 | pool_cache_get(&pmap_l2dtable_cache, PR_NOWAIT) | | 576 | pool_cache_get(&pmap_l2dtable_cache, PR_NOWAIT) |
577 | #define pmap_free_l2_dtable(l2) \ | | 577 | #define pmap_free_l2_dtable(l2) \ |
578 | pool_cache_put(&pmap_l2dtable_cache, (l2)) | | 578 | pool_cache_put(&pmap_l2dtable_cache, (l2)) |
579 | #define pmap_alloc_l2_ptp(pap) \ | | 579 | #define pmap_alloc_l2_ptp(pap) \ |
580 | ((pt_entry_t *)pool_cache_get_paddr(&pmap_l2ptp_cache,\ | | 580 | ((pt_entry_t *)pool_cache_get_paddr(&pmap_l2ptp_cache,\ |
581 | PR_NOWAIT, (pap))) | | 581 | PR_NOWAIT, (pap))) |
582 | | | 582 | |
583 | /* | | 583 | /* |
584 | * We try to map the page tables write-through, if possible. However, not | | 584 | * We try to map the page tables write-through, if possible. However, not |
585 | * all CPUs have a write-through cache mode, so on those we have to sync | | 585 | * all CPUs have a write-through cache mode, so on those we have to sync |
586 | * the cache when we frob page tables. | | 586 | * the cache when we frob page tables. |
587 | * | | 587 | * |
588 | * We try to evaluate this at compile time, if possible. However, it's | | 588 | * We try to evaluate this at compile time, if possible. However, it's |
589 | * not always possible to do that, hence this run-time var. | | 589 | * not always possible to do that, hence this run-time var. |
590 | */ | | 590 | */ |
591 | int pmap_needs_pte_sync; | | 591 | int pmap_needs_pte_sync; |
592 | | | 592 | |
593 | /* | | 593 | /* |
594 | * Real definition of pv_entry. | | 594 | * Real definition of pv_entry. |
595 | */ | | 595 | */ |
596 | struct pv_entry { | | 596 | struct pv_entry { |
597 | SLIST_ENTRY(pv_entry) pv_link; /* next pv_entry */ | | 597 | SLIST_ENTRY(pv_entry) pv_link; /* next pv_entry */ |
598 | pmap_t pv_pmap; /* pmap where mapping lies */ | | 598 | pmap_t pv_pmap; /* pmap where mapping lies */ |
599 | vaddr_t pv_va; /* virtual address for mapping */ | | 599 | vaddr_t pv_va; /* virtual address for mapping */ |
600 | u_int pv_flags; /* flags */ | | 600 | u_int pv_flags; /* flags */ |
601 | }; | | 601 | }; |
602 | | | 602 | |
603 | /* | | 603 | /* |
604 | * Macro to determine if a mapping might be resident in the | | 604 | * Macro to determine if a mapping might be resident in the |
605 | * instruction cache and/or TLB | | 605 | * instruction cache and/or TLB |
606 | */ | | 606 | */ |
607 | #define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC)) | | 607 | #define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC)) |
608 | #define PV_IS_EXEC_P(f) (((f) & PVF_EXEC) != 0) | | 608 | #define PV_IS_EXEC_P(f) (((f) & PVF_EXEC) != 0) |
609 | | | 609 | |
610 | /* | | 610 | /* |
611 | * Macro to determine if a mapping might be resident in the | | 611 | * Macro to determine if a mapping might be resident in the |
612 | * data cache and/or TLB | | 612 | * data cache and/or TLB |
613 | */ | | 613 | */ |
614 | #define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0) | | 614 | #define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0) |
615 | | | 615 | |
616 | /* | | 616 | /* |
617 | * Local prototypes | | 617 | * Local prototypes |
618 | */ | | 618 | */ |
619 | static int pmap_set_pt_cache_mode(pd_entry_t *, vaddr_t); | | 619 | static int pmap_set_pt_cache_mode(pd_entry_t *, vaddr_t); |
620 | static void pmap_alloc_specials(vaddr_t *, int, vaddr_t *, | | 620 | static void pmap_alloc_specials(vaddr_t *, int, vaddr_t *, |
621 | pt_entry_t **); | | 621 | pt_entry_t **); |
622 | static bool pmap_is_current(pmap_t); | | 622 | static bool pmap_is_current(pmap_t); |
623 | static bool pmap_is_cached(pmap_t); | | 623 | static bool pmap_is_cached(pmap_t); |
624 | static void pmap_enter_pv(struct vm_page *, struct pv_entry *, | | 624 | static void pmap_enter_pv(struct vm_page *, struct pv_entry *, |
625 | pmap_t, vaddr_t, u_int); | | 625 | pmap_t, vaddr_t, u_int); |
626 | static struct pv_entry *pmap_find_pv(struct vm_page *, pmap_t, vaddr_t); | | 626 | static struct pv_entry *pmap_find_pv(struct vm_page *, pmap_t, vaddr_t); |
627 | static struct pv_entry *pmap_remove_pv(struct vm_page *, pmap_t, vaddr_t, int); | | 627 | static struct pv_entry *pmap_remove_pv(struct vm_page *, pmap_t, vaddr_t, int); |
628 | static u_int pmap_modify_pv(struct vm_page *, pmap_t, vaddr_t, | | 628 | static u_int pmap_modify_pv(struct vm_page *, pmap_t, vaddr_t, |
629 | u_int, u_int); | | 629 | u_int, u_int); |
630 | | | 630 | |
631 | static void pmap_pinit(pmap_t); | | 631 | static void pmap_pinit(pmap_t); |
632 | static int pmap_pmap_ctor(void *, void *, int); | | 632 | static int pmap_pmap_ctor(void *, void *, int); |
633 | | | 633 | |
634 | static void pmap_alloc_l1(pmap_t); | | 634 | static void pmap_alloc_l1(pmap_t); |
635 | static void pmap_free_l1(pmap_t); | | 635 | static void pmap_free_l1(pmap_t); |
636 | static void pmap_use_l1(pmap_t); | | 636 | static void pmap_use_l1(pmap_t); |
637 | | | 637 | |
638 | static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vaddr_t); | | 638 | static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vaddr_t); |
639 | static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vaddr_t); | | 639 | static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vaddr_t); |
640 | static void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int); | | 640 | static void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int); |
641 | static int pmap_l2ptp_ctor(void *, void *, int); | | 641 | static int pmap_l2ptp_ctor(void *, void *, int); |
642 | static int pmap_l2dtable_ctor(void *, void *, int); | | 642 | static int pmap_l2dtable_ctor(void *, void *, int); |
643 | | | 643 | |
644 | static void pmap_vac_me_harder(struct vm_page *, pmap_t, vaddr_t); | | 644 | static void pmap_vac_me_harder(struct vm_page *, pmap_t, vaddr_t); |
645 | #ifdef PMAP_CACHE_VIVT | | 645 | #ifdef PMAP_CACHE_VIVT |
646 | static void pmap_vac_me_kpmap(struct vm_page *, pmap_t, vaddr_t); | | 646 | static void pmap_vac_me_kpmap(struct vm_page *, pmap_t, vaddr_t); |
647 | static void pmap_vac_me_user(struct vm_page *, pmap_t, vaddr_t); | | 647 | static void pmap_vac_me_user(struct vm_page *, pmap_t, vaddr_t); |
648 | #endif | | 648 | #endif |
649 | | | 649 | |
650 | static void pmap_clearbit(struct vm_page *, u_int); | | 650 | static void pmap_clearbit(struct vm_page *, u_int); |
651 | #ifdef PMAP_CACHE_VIVT | | 651 | #ifdef PMAP_CACHE_VIVT |
652 | static int pmap_clean_page(struct pv_entry *, bool); | | 652 | static int pmap_clean_page(struct pv_entry *, bool); |
653 | #endif | | 653 | #endif |
654 | #ifdef PMAP_CACHE_VIPT | | 654 | #ifdef PMAP_CACHE_VIPT |
655 | static void pmap_syncicache_page(struct vm_page *); | | 655 | static void pmap_syncicache_page(struct vm_page *); |
656 | static void pmap_flush_page(struct vm_page *, bool); | | 656 | static void pmap_flush_page(struct vm_page *, bool); |
657 | #endif | | 657 | #endif |
658 | static void pmap_page_remove(struct vm_page *); | | 658 | static void pmap_page_remove(struct vm_page *); |
659 | | | 659 | |
660 | static void pmap_init_l1(struct l1_ttable *, pd_entry_t *); | | 660 | static void pmap_init_l1(struct l1_ttable *, pd_entry_t *); |
661 | static vaddr_t kernel_pt_lookup(paddr_t); | | 661 | static vaddr_t kernel_pt_lookup(paddr_t); |
662 | | | 662 | |
663 | | | 663 | |
664 | /* | | 664 | /* |
665 | * External function prototypes | | 665 | * External function prototypes |
666 | */ | | 666 | */ |
667 | extern void bzero_page(vaddr_t); | | 667 | extern void bzero_page(vaddr_t); |
668 | extern void bcopy_page(vaddr_t, vaddr_t); | | 668 | extern void bcopy_page(vaddr_t, vaddr_t); |
669 | | | 669 | |
670 | /* | | 670 | /* |
671 | * Misc variables | | 671 | * Misc variables |
672 | */ | | 672 | */ |
673 | vaddr_t virtual_avail; | | 673 | vaddr_t virtual_avail; |
674 | vaddr_t virtual_end; | | 674 | vaddr_t virtual_end; |
675 | vaddr_t pmap_curmaxkvaddr; | | 675 | vaddr_t pmap_curmaxkvaddr; |
676 | | | 676 | |
677 | vaddr_t avail_start; | | 677 | vaddr_t avail_start; |
678 | vaddr_t avail_end; | | 678 | vaddr_t avail_end; |
679 | | | 679 | |
680 | pv_addrqh_t pmap_boot_freeq = SLIST_HEAD_INITIALIZER(&pmap_boot_freeq); | | 680 | pv_addrqh_t pmap_boot_freeq = SLIST_HEAD_INITIALIZER(&pmap_boot_freeq); |
681 | pv_addr_t kernelpages; | | 681 | pv_addr_t kernelpages; |
682 | pv_addr_t kernel_l1pt; | | 682 | pv_addr_t kernel_l1pt; |
683 | pv_addr_t systempage; | | 683 | pv_addr_t systempage; |
684 | | | 684 | |
685 | /* Function to set the debug level of the pmap code */ | | 685 | /* Function to set the debug level of the pmap code */ |
686 | | | 686 | |
687 | #ifdef PMAP_DEBUG | | 687 | #ifdef PMAP_DEBUG |
688 | void | | 688 | void |
689 | pmap_debug(int level) | | 689 | pmap_debug(int level) |
690 | { | | 690 | { |
691 | pmap_debug_level = level; | | 691 | pmap_debug_level = level; |
692 | printf("pmap_debug: level=%d\n", pmap_debug_level); | | 692 | printf("pmap_debug: level=%d\n", pmap_debug_level); |
693 | } | | 693 | } |
694 | #endif /* PMAP_DEBUG */ | | 694 | #endif /* PMAP_DEBUG */ |
695 | | | 695 | |
696 | /* | | 696 | /* |
697 | * A bunch of routines to conditionally flush the caches/TLB depending | | 697 | * A bunch of routines to conditionally flush the caches/TLB depending |
698 | * on whether the specified pmap actually needs to be flushed at any | | 698 | * on whether the specified pmap actually needs to be flushed at any |
699 | * given time. | | 699 | * given time. |
700 | */ | | 700 | */ |
701 | static inline void | | 701 | static inline void |
702 | pmap_tlb_flushID_SE(pmap_t pm, vaddr_t va) | | 702 | pmap_tlb_flushID_SE(pmap_t pm, vaddr_t va) |
703 | { | | 703 | { |
704 | | | 704 | |
705 | if (pm->pm_cstate.cs_tlb_id) | | 705 | if (pm->pm_cstate.cs_tlb_id) |
706 | cpu_tlb_flushID_SE(va); | | 706 | cpu_tlb_flushID_SE(va); |
707 | } | | 707 | } |
708 | | | 708 | |
709 | static inline void | | 709 | static inline void |
710 | pmap_tlb_flushD_SE(pmap_t pm, vaddr_t va) | | 710 | pmap_tlb_flushD_SE(pmap_t pm, vaddr_t va) |
711 | { | | 711 | { |
712 | | | 712 | |
713 | if (pm->pm_cstate.cs_tlb_d) | | 713 | if (pm->pm_cstate.cs_tlb_d) |
714 | cpu_tlb_flushD_SE(va); | | 714 | cpu_tlb_flushD_SE(va); |
715 | } | | 715 | } |
716 | | | 716 | |
717 | static inline void | | 717 | static inline void |
718 | pmap_tlb_flushID(pmap_t pm) | | 718 | pmap_tlb_flushID(pmap_t pm) |
719 | { | | 719 | { |
720 | | | 720 | |
721 | if (pm->pm_cstate.cs_tlb_id) { | | 721 | if (pm->pm_cstate.cs_tlb_id) { |
722 | cpu_tlb_flushID(); | | 722 | cpu_tlb_flushID(); |
723 | pm->pm_cstate.cs_tlb = 0; | | 723 | pm->pm_cstate.cs_tlb = 0; |
724 | } | | 724 | } |
725 | } | | 725 | } |
726 | | | 726 | |
727 | static inline void | | 727 | static inline void |
728 | pmap_tlb_flushD(pmap_t pm) | | 728 | pmap_tlb_flushD(pmap_t pm) |
729 | { | | 729 | { |
730 | | | 730 | |
731 | if (pm->pm_cstate.cs_tlb_d) { | | 731 | if (pm->pm_cstate.cs_tlb_d) { |
732 | cpu_tlb_flushD(); | | 732 | cpu_tlb_flushD(); |
733 | pm->pm_cstate.cs_tlb_d = 0; | | 733 | pm->pm_cstate.cs_tlb_d = 0; |
734 | } | | 734 | } |
735 | } | | 735 | } |
736 | | | 736 | |
737 | #ifdef PMAP_CACHE_VIVT | | 737 | #ifdef PMAP_CACHE_VIVT |
738 | static inline void | | 738 | static inline void |
739 | pmap_idcache_wbinv_range(pmap_t pm, vaddr_t va, vsize_t len) | | 739 | pmap_idcache_wbinv_range(pmap_t pm, vaddr_t va, vsize_t len) |
740 | { | | 740 | { |
741 | if (pm->pm_cstate.cs_cache_id) { | | 741 | if (pm->pm_cstate.cs_cache_id) { |
742 | cpu_idcache_wbinv_range(va, len); | | 742 | cpu_idcache_wbinv_range(va, len); |
743 | } | | 743 | } |
744 | } | | 744 | } |
745 | | | 745 | |
746 | static inline void | | 746 | static inline void |
747 | pmap_dcache_wb_range(pmap_t pm, vaddr_t va, vsize_t len, | | 747 | pmap_dcache_wb_range(pmap_t pm, vaddr_t va, vsize_t len, |
748 | bool do_inv, bool rd_only) | | 748 | bool do_inv, bool rd_only) |
749 | { | | 749 | { |
750 | | | 750 | |
751 | if (pm->pm_cstate.cs_cache_d) { | | 751 | if (pm->pm_cstate.cs_cache_d) { |
752 | if (do_inv) { | | 752 | if (do_inv) { |
753 | if (rd_only) | | 753 | if (rd_only) |
754 | cpu_dcache_inv_range(va, len); | | 754 | cpu_dcache_inv_range(va, len); |
755 | else | | 755 | else |
756 | cpu_dcache_wbinv_range(va, len); | | 756 | cpu_dcache_wbinv_range(va, len); |
757 | } else | | 757 | } else |
758 | if (!rd_only) | | 758 | if (!rd_only) |
759 | cpu_dcache_wb_range(va, len); | | 759 | cpu_dcache_wb_range(va, len); |
760 | } | | 760 | } |
761 | } | | 761 | } |
762 | | | 762 | |
763 | static inline void | | 763 | static inline void |
764 | pmap_idcache_wbinv_all(pmap_t pm) | | 764 | pmap_idcache_wbinv_all(pmap_t pm) |
765 | { | | 765 | { |
766 | if (pm->pm_cstate.cs_cache_id) { | | 766 | if (pm->pm_cstate.cs_cache_id) { |
767 | cpu_idcache_wbinv_all(); | | 767 | cpu_idcache_wbinv_all(); |
768 | pm->pm_cstate.cs_cache = 0; | | 768 | pm->pm_cstate.cs_cache = 0; |
769 | } | | 769 | } |
770 | } | | 770 | } |
771 | | | 771 | |
772 | static inline void | | 772 | static inline void |
773 | pmap_dcache_wbinv_all(pmap_t pm) | | 773 | pmap_dcache_wbinv_all(pmap_t pm) |
774 | { | | 774 | { |
775 | if (pm->pm_cstate.cs_cache_d) { | | 775 | if (pm->pm_cstate.cs_cache_d) { |
776 | cpu_dcache_wbinv_all(); | | 776 | cpu_dcache_wbinv_all(); |
777 | pm->pm_cstate.cs_cache_d = 0; | | 777 | pm->pm_cstate.cs_cache_d = 0; |
778 | } | | 778 | } |
779 | } | | 779 | } |
780 | #endif /* PMAP_CACHE_VIVT */ | | 780 | #endif /* PMAP_CACHE_VIVT */ |
781 | | | 781 | |
782 | static inline bool | | 782 | static inline bool |
783 | pmap_is_current(pmap_t pm) | | 783 | pmap_is_current(pmap_t pm) |
784 | { | | 784 | { |
785 | | | 785 | |
786 | if (pm == pmap_kernel() || curproc->p_vmspace->vm_map.pmap == pm) | | 786 | if (pm == pmap_kernel() || curproc->p_vmspace->vm_map.pmap == pm) |
787 | return true; | | 787 | return true; |
788 | | | 788 | |
789 | return false; | | 789 | return false; |
790 | } | | 790 | } |
791 | | | 791 | |
792 | static inline bool | | 792 | static inline bool |
793 | pmap_is_cached(pmap_t pm) | | 793 | pmap_is_cached(pmap_t pm) |
794 | { | | 794 | { |
795 | | | 795 | |
796 | if (pm == pmap_kernel() || pmap_recent_user == NULL || | | 796 | if (pm == pmap_kernel() || pmap_recent_user == NULL || |
797 | pmap_recent_user == pm) | | 797 | pmap_recent_user == pm) |
798 | return (true); | | 798 | return (true); |
799 | | | 799 | |
800 | return false; | | 800 | return false; |
801 | } | | 801 | } |
802 | | | 802 | |
803 | /* | | 803 | /* |
804 | * PTE_SYNC_CURRENT: | | 804 | * PTE_SYNC_CURRENT: |
805 | * | | 805 | * |
806 | * Make sure the pte is written out to RAM. | | 806 | * Make sure the pte is written out to RAM. |
807 | * We need to do this for one of two cases: | | 807 | * We need to do this for one of two cases: |
808 | * - We're dealing with the kernel pmap | | 808 | * - We're dealing with the kernel pmap |
809 | * - There is no pmap active in the cache/tlb. | | 809 | * - There is no pmap active in the cache/tlb. |
810 | * - The specified pmap is 'active' in the cache/tlb. | | 810 | * - The specified pmap is 'active' in the cache/tlb. |
811 | */ | | 811 | */ |
812 | #ifdef PMAP_INCLUDE_PTE_SYNC | | 812 | #ifdef PMAP_INCLUDE_PTE_SYNC |
813 | #define PTE_SYNC_CURRENT(pm, ptep) \ | | 813 | #define PTE_SYNC_CURRENT(pm, ptep) \ |
814 | do { \ | | 814 | do { \ |
815 | if (PMAP_NEEDS_PTE_SYNC && \ | | 815 | if (PMAP_NEEDS_PTE_SYNC && \ |
816 | pmap_is_cached(pm)) \ | | 816 | pmap_is_cached(pm)) \ |
817 | PTE_SYNC(ptep); \ | | 817 | PTE_SYNC(ptep); \ |
818 | } while (/*CONSTCOND*/0) | | 818 | } while (/*CONSTCOND*/0) |
819 | #else | | 819 | #else |
820 | #define PTE_SYNC_CURRENT(pm, ptep) /* nothing */ | | 820 | #define PTE_SYNC_CURRENT(pm, ptep) /* nothing */ |
821 | #endif | | 821 | #endif |
822 | | | 822 | |
823 | /* | | 823 | /* |
824 | * main pv_entry manipulation functions: | | 824 | * main pv_entry manipulation functions: |
825 | * pmap_enter_pv: enter a mapping onto a vm_page list | | 825 | * pmap_enter_pv: enter a mapping onto a vm_page list |
826 | * pmap_remove_pv: remove a mappiing from a vm_page list | | 826 | * pmap_remove_pv: remove a mappiing from a vm_page list |
827 | * | | 827 | * |
828 | * NOTE: pmap_enter_pv expects to lock the pvh itself | | 828 | * NOTE: pmap_enter_pv expects to lock the pvh itself |
829 | * pmap_remove_pv expects te caller to lock the pvh before calling | | 829 | * pmap_remove_pv expects te caller to lock the pvh before calling |
830 | */ | | 830 | */ |
831 | | | 831 | |
832 | /* | | 832 | /* |
833 | * pmap_enter_pv: enter a mapping onto a vm_page lst | | 833 | * pmap_enter_pv: enter a mapping onto a vm_page lst |
834 | * | | 834 | * |
835 | * => caller should hold the proper lock on pmap_main_lock | | 835 | * => caller should hold the proper lock on pmap_main_lock |
836 | * => caller should have pmap locked | | 836 | * => caller should have pmap locked |
837 | * => we will gain the lock on the vm_page and allocate the new pv_entry | | 837 | * => we will gain the lock on the vm_page and allocate the new pv_entry |
838 | * => caller should adjust ptp's wire_count before calling | | 838 | * => caller should adjust ptp's wire_count before calling |
839 | * => caller should not adjust pmap's wire_count | | 839 | * => caller should not adjust pmap's wire_count |
840 | */ | | 840 | */ |
841 | static void | | 841 | static void |
842 | pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, pmap_t pm, | | 842 | pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, pmap_t pm, |
843 | vaddr_t va, u_int flags) | | 843 | vaddr_t va, u_int flags) |
844 | { | | 844 | { |
845 | struct pv_entry **pvp; | | 845 | struct pv_entry **pvp; |
846 | | | 846 | |
847 | NPDEBUG(PDB_PVDUMP, | | 847 | NPDEBUG(PDB_PVDUMP, |
848 | printf("pmap_enter_pv: pm %p, pg %p, flags 0x%x\n", pm, pg, flags)); | | 848 | printf("pmap_enter_pv: pm %p, pg %p, flags 0x%x\n", pm, pg, flags)); |
849 | | | 849 | |
850 | pve->pv_pmap = pm; | | 850 | pve->pv_pmap = pm; |
851 | pve->pv_va = va; | | 851 | pve->pv_va = va; |
852 | pve->pv_flags = flags; | | 852 | pve->pv_flags = flags; |
853 | | | 853 | |
854 | simple_lock(&pg->mdpage.pvh_slock); /* lock vm_page */ | | 854 | simple_lock(&pg->mdpage.pvh_slock); /* lock vm_page */ |
855 | pvp = &SLIST_FIRST(&pg->mdpage.pvh_list); | | 855 | pvp = &SLIST_FIRST(&pg->mdpage.pvh_list); |
856 | #ifdef PMAP_CACHE_VIPT | | 856 | #ifdef PMAP_CACHE_VIPT |
857 | /* | | 857 | /* |
858 | * Insert unmanaged entries, writeable first, at the head of | | 858 | * Insert unmanaged entries, writeable first, at the head of |
859 | * the pv list. | | 859 | * the pv list. |
860 | */ | | 860 | */ |
861 | if (__predict_true((flags & PVF_KENTRY) == 0)) { | | 861 | if (__predict_true((flags & PVF_KENTRY) == 0)) { |
862 | while (*pvp != NULL && (*pvp)->pv_flags & PVF_KENTRY) | | 862 | while (*pvp != NULL && (*pvp)->pv_flags & PVF_KENTRY) |
863 | pvp = &SLIST_NEXT(*pvp, pv_link); | | 863 | pvp = &SLIST_NEXT(*pvp, pv_link); |
864 | } else if ((flags & PVF_WRITE) == 0) { | | 864 | } else if ((flags & PVF_WRITE) == 0) { |
865 | while (*pvp != NULL && (*pvp)->pv_flags & PVF_WRITE) | | 865 | while (*pvp != NULL && (*pvp)->pv_flags & PVF_WRITE) |
866 | pvp = &SLIST_NEXT(*pvp, pv_link); | | 866 | pvp = &SLIST_NEXT(*pvp, pv_link); |
867 | } | | 867 | } |
868 | #endif | | 868 | #endif |
869 | SLIST_NEXT(pve, pv_link) = *pvp; /* add to ... */ | | 869 | SLIST_NEXT(pve, pv_link) = *pvp; /* add to ... */ |
870 | *pvp = pve; /* ... locked list */ | | 870 | *pvp = pve; /* ... locked list */ |
871 | pg->mdpage.pvh_attrs |= flags & (PVF_REF | PVF_MOD); | | 871 | pg->mdpage.pvh_attrs |= flags & (PVF_REF | PVF_MOD); |
872 | #ifdef PMAP_CACHE_VIPT | | 872 | #ifdef PMAP_CACHE_VIPT |
873 | if ((pve->pv_flags & PVF_KWRITE) == PVF_KWRITE) | | 873 | if ((pve->pv_flags & PVF_KWRITE) == PVF_KWRITE) |
874 | pg->mdpage.pvh_attrs |= PVF_KMOD; | | 874 | pg->mdpage.pvh_attrs |= PVF_KMOD; |
875 | if ((pg->mdpage.pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC) | | 875 | if ((pg->mdpage.pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC) |
876 | pg->mdpage.pvh_attrs |= PVF_DIRTY; | | 876 | pg->mdpage.pvh_attrs |= PVF_DIRTY; |
877 | KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || (pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 877 | KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || (pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC))); |
878 | #endif | | 878 | #endif |
879 | if (pm == pmap_kernel()) { | | 879 | if (pm == pmap_kernel()) { |
880 | PMAPCOUNT(kernel_mappings); | | 880 | PMAPCOUNT(kernel_mappings); |
881 | if (flags & PVF_WRITE) | | 881 | if (flags & PVF_WRITE) |
882 | pg->mdpage.krw_mappings++; | | 882 | pg->mdpage.krw_mappings++; |
883 | else | | 883 | else |
884 | pg->mdpage.kro_mappings++; | | 884 | pg->mdpage.kro_mappings++; |
885 | } else | | 885 | } else |
886 | if (flags & PVF_WRITE) | | 886 | if (flags & PVF_WRITE) |
887 | pg->mdpage.urw_mappings++; | | 887 | pg->mdpage.urw_mappings++; |
888 | else | | 888 | else |
889 | pg->mdpage.uro_mappings++; | | 889 | pg->mdpage.uro_mappings++; |
890 | | | 890 | |
891 | #ifdef PMAP_CACHE_VIPT | | 891 | #ifdef PMAP_CACHE_VIPT |
892 | /* | | 892 | /* |
893 | * If this is an exec mapping and its the first exec mapping | | 893 | * If this is an exec mapping and its the first exec mapping |
894 | * for this page, make sure to sync the I-cache. | | 894 | * for this page, make sure to sync the I-cache. |
895 | */ | | 895 | */ |
896 | if (PV_IS_EXEC_P(flags)) { | | 896 | if (PV_IS_EXEC_P(flags)) { |
897 | if (!PV_IS_EXEC_P(pg->mdpage.pvh_attrs)) { | | 897 | if (!PV_IS_EXEC_P(pg->mdpage.pvh_attrs)) { |
898 | pmap_syncicache_page(pg); | | 898 | pmap_syncicache_page(pg); |
899 | PMAPCOUNT(exec_synced_map); | | 899 | PMAPCOUNT(exec_synced_map); |
900 | } | | 900 | } |
901 | PMAPCOUNT(exec_mappings); | | 901 | PMAPCOUNT(exec_mappings); |
902 | } | | 902 | } |
903 | #endif | | 903 | #endif |
904 | | | 904 | |
905 | PMAPCOUNT(mappings); | | 905 | PMAPCOUNT(mappings); |
906 | simple_unlock(&pg->mdpage.pvh_slock); /* unlock, done! */ | | 906 | simple_unlock(&pg->mdpage.pvh_slock); /* unlock, done! */ |
907 | | | 907 | |
908 | if (pve->pv_flags & PVF_WIRED) | | 908 | if (pve->pv_flags & PVF_WIRED) |
909 | ++pm->pm_stats.wired_count; | | 909 | ++pm->pm_stats.wired_count; |
910 | } | | 910 | } |
911 | | | 911 | |
912 | /* | | 912 | /* |
913 | * | | 913 | * |
914 | * pmap_find_pv: Find a pv entry | | 914 | * pmap_find_pv: Find a pv entry |
915 | * | | 915 | * |
916 | * => caller should hold lock on vm_page | | 916 | * => caller should hold lock on vm_page |
917 | */ | | 917 | */ |
918 | static inline struct pv_entry * | | 918 | static inline struct pv_entry * |
919 | pmap_find_pv(struct vm_page *pg, pmap_t pm, vaddr_t va) | | 919 | pmap_find_pv(struct vm_page *pg, pmap_t pm, vaddr_t va) |
920 | { | | 920 | { |
921 | struct pv_entry *pv; | | 921 | struct pv_entry *pv; |
922 | | | 922 | |
923 | SLIST_FOREACH(pv, &pg->mdpage.pvh_list, pv_link) { | | 923 | SLIST_FOREACH(pv, &pg->mdpage.pvh_list, pv_link) { |
924 | if (pm == pv->pv_pmap && va == pv->pv_va) | | 924 | if (pm == pv->pv_pmap && va == pv->pv_va) |
925 | break; | | 925 | break; |
926 | } | | 926 | } |
927 | | | 927 | |
928 | return (pv); | | 928 | return (pv); |
929 | } | | 929 | } |
930 | | | 930 | |
931 | /* | | 931 | /* |
932 | * pmap_remove_pv: try to remove a mapping from a pv_list | | 932 | * pmap_remove_pv: try to remove a mapping from a pv_list |
933 | * | | 933 | * |
934 | * => caller should hold proper lock on pmap_main_lock | | 934 | * => caller should hold proper lock on pmap_main_lock |
935 | * => pmap should be locked | | 935 | * => pmap should be locked |
936 | * => caller should hold lock on vm_page [so that attrs can be adjusted] | | 936 | * => caller should hold lock on vm_page [so that attrs can be adjusted] |
937 | * => caller should adjust ptp's wire_count and free PTP if needed | | 937 | * => caller should adjust ptp's wire_count and free PTP if needed |
938 | * => caller should NOT adjust pmap's wire_count | | 938 | * => caller should NOT adjust pmap's wire_count |
939 | * => we return the removed pve | | 939 | * => we return the removed pve |
940 | */ | | 940 | */ |
941 | static struct pv_entry * | | 941 | static struct pv_entry * |
942 | pmap_remove_pv(struct vm_page *pg, pmap_t pm, vaddr_t va, int skip_wired) | | 942 | pmap_remove_pv(struct vm_page *pg, pmap_t pm, vaddr_t va, int skip_wired) |
943 | { | | 943 | { |
944 | struct pv_entry *pve, **prevptr; | | 944 | struct pv_entry *pve, **prevptr; |
945 | | | 945 | |
946 | NPDEBUG(PDB_PVDUMP, | | 946 | NPDEBUG(PDB_PVDUMP, |
947 | printf("pmap_remove_pv: pm %p, pg %p, va 0x%08lx\n", pm, pg, va)); | | 947 | printf("pmap_remove_pv: pm %p, pg %p, va 0x%08lx\n", pm, pg, va)); |
948 | | | 948 | |
949 | prevptr = &SLIST_FIRST(&pg->mdpage.pvh_list); /* prev pv_entry ptr */ | | 949 | prevptr = &SLIST_FIRST(&pg->mdpage.pvh_list); /* prev pv_entry ptr */ |
950 | pve = *prevptr; | | 950 | pve = *prevptr; |
951 | | | 951 | |
952 | while (pve) { | | 952 | while (pve) { |
953 | if (pve->pv_pmap == pm && pve->pv_va == va) { /* match? */ | | 953 | if (pve->pv_pmap == pm && pve->pv_va == va) { /* match? */ |
954 | NPDEBUG(PDB_PVDUMP, printf("pmap_remove_pv: pm %p, pg " | | 954 | NPDEBUG(PDB_PVDUMP, printf("pmap_remove_pv: pm %p, pg " |
955 | "%p, flags 0x%x\n", pm, pg, pve->pv_flags)); | | 955 | "%p, flags 0x%x\n", pm, pg, pve->pv_flags)); |
956 | if (pve->pv_flags & PVF_WIRED) { | | 956 | if (pve->pv_flags & PVF_WIRED) { |
957 | if (skip_wired) | | 957 | if (skip_wired) |
958 | return (NULL); | | 958 | return (NULL); |
959 | --pm->pm_stats.wired_count; | | 959 | --pm->pm_stats.wired_count; |
960 | } | | 960 | } |
961 | *prevptr = SLIST_NEXT(pve, pv_link); /* remove it! */ | | 961 | *prevptr = SLIST_NEXT(pve, pv_link); /* remove it! */ |
962 | if (pm == pmap_kernel()) { | | 962 | if (pm == pmap_kernel()) { |
963 | PMAPCOUNT(kernel_unmappings); | | 963 | PMAPCOUNT(kernel_unmappings); |
964 | if (pve->pv_flags & PVF_WRITE) | | 964 | if (pve->pv_flags & PVF_WRITE) |
965 | pg->mdpage.krw_mappings--; | | 965 | pg->mdpage.krw_mappings--; |
966 | else | | 966 | else |
967 | pg->mdpage.kro_mappings--; | | 967 | pg->mdpage.kro_mappings--; |
968 | } else | | 968 | } else |
969 | if (pve->pv_flags & PVF_WRITE) | | 969 | if (pve->pv_flags & PVF_WRITE) |
970 | pg->mdpage.urw_mappings--; | | 970 | pg->mdpage.urw_mappings--; |
971 | else | | 971 | else |
972 | pg->mdpage.uro_mappings--; | | 972 | pg->mdpage.uro_mappings--; |
973 | | | 973 | |
974 | PMAPCOUNT(unmappings); | | 974 | PMAPCOUNT(unmappings); |
975 | #ifdef PMAP_CACHE_VIPT | | 975 | #ifdef PMAP_CACHE_VIPT |
976 | if (!(pve->pv_flags & PVF_WRITE)) | | 976 | if (!(pve->pv_flags & PVF_WRITE)) |
977 | break; | | 977 | break; |
978 | /* | | 978 | /* |
979 | * If this page has had an exec mapping, then if | | 979 | * If this page has had an exec mapping, then if |
980 | * this was the last mapping, discard the contents, | | 980 | * this was the last mapping, discard the contents, |
981 | * otherwise sync the i-cache for this page. | | 981 | * otherwise sync the i-cache for this page. |
982 | */ | | 982 | */ |
983 | if (PV_IS_EXEC_P(pg->mdpage.pvh_attrs)) { | | 983 | if (PV_IS_EXEC_P(pg->mdpage.pvh_attrs)) { |
984 | if (SLIST_EMPTY(&pg->mdpage.pvh_list)) { | | 984 | if (SLIST_EMPTY(&pg->mdpage.pvh_list)) { |
985 | pg->mdpage.pvh_attrs &= ~PVF_EXEC; | | 985 | pg->mdpage.pvh_attrs &= ~PVF_EXEC; |
986 | PMAPCOUNT(exec_discarded_unmap); | | 986 | PMAPCOUNT(exec_discarded_unmap); |
987 | } else { | | 987 | } else { |
988 | pmap_syncicache_page(pg); | | 988 | pmap_syncicache_page(pg); |
989 | PMAPCOUNT(exec_synced_unmap); | | 989 | PMAPCOUNT(exec_synced_unmap); |
990 | } | | 990 | } |
991 | } | | 991 | } |
992 | #endif /* PMAP_CACHE_VIPT */ | | 992 | #endif /* PMAP_CACHE_VIPT */ |
993 | break; | | 993 | break; |
994 | } | | 994 | } |
995 | prevptr = &SLIST_NEXT(pve, pv_link); /* previous pointer */ | | 995 | prevptr = &SLIST_NEXT(pve, pv_link); /* previous pointer */ |
996 | pve = *prevptr; /* advance */ | | 996 | pve = *prevptr; /* advance */ |
997 | } | | 997 | } |
998 | | | 998 | |
999 | #ifdef PMAP_CACHE_VIPT | | 999 | #ifdef PMAP_CACHE_VIPT |
1000 | /* | | 1000 | /* |
1001 | * If we no longer have a WRITEABLE KENTRY at the head of list, | | 1001 | * If we no longer have a WRITEABLE KENTRY at the head of list, |
1002 | * clear the KMOD attribute from the page. | | 1002 | * clear the KMOD attribute from the page. |
1003 | */ | | 1003 | */ |
1004 | if (SLIST_FIRST(&pg->mdpage.pvh_list) == NULL | | 1004 | if (SLIST_FIRST(&pg->mdpage.pvh_list) == NULL |
1005 | || (SLIST_FIRST(&pg->mdpage.pvh_list)->pv_flags & PVF_KWRITE) == PVF_KWRITE) | | 1005 | || (SLIST_FIRST(&pg->mdpage.pvh_list)->pv_flags & PVF_KWRITE) == PVF_KWRITE) |
1006 | pg->mdpage.pvh_attrs &= ~PVF_KMOD; | | 1006 | pg->mdpage.pvh_attrs &= ~PVF_KMOD; |
1007 | | | 1007 | |
1008 | /* | | 1008 | /* |
1009 | * If this was a writeable page and there are no more writeable | | 1009 | * If this was a writeable page and there are no more writeable |
1010 | * mappings (ignoring KMPAGE), clear the WRITE flag and writeback | | 1010 | * mappings (ignoring KMPAGE), clear the WRITE flag and writeback |
1011 | * the contents to memory. | | 1011 | * the contents to memory. |
1012 | */ | | 1012 | */ |
1013 | if (pg->mdpage.krw_mappings + pg->mdpage.urw_mappings == 0) | | 1013 | if (pg->mdpage.krw_mappings + pg->mdpage.urw_mappings == 0) |
1014 | pg->mdpage.pvh_attrs &= ~PVF_WRITE; | | 1014 | pg->mdpage.pvh_attrs &= ~PVF_WRITE; |
1015 | KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || (pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 1015 | KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || (pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC))); |
1016 | #endif /* PMAP_CACHE_VIPT */ | | 1016 | #endif /* PMAP_CACHE_VIPT */ |
1017 | | | 1017 | |
1018 | return(pve); /* return removed pve */ | | 1018 | return(pve); /* return removed pve */ |
1019 | } | | 1019 | } |
1020 | | | 1020 | |
1021 | /* | | 1021 | /* |
1022 | * | | 1022 | * |
1023 | * pmap_modify_pv: Update pv flags | | 1023 | * pmap_modify_pv: Update pv flags |
1024 | * | | 1024 | * |
1025 | * => caller should hold lock on vm_page [so that attrs can be adjusted] | | 1025 | * => caller should hold lock on vm_page [so that attrs can be adjusted] |
1026 | * => caller should NOT adjust pmap's wire_count | | 1026 | * => caller should NOT adjust pmap's wire_count |
1027 | * => caller must call pmap_vac_me_harder() if writable status of a page | | 1027 | * => caller must call pmap_vac_me_harder() if writable status of a page |
1028 | * may have changed. | | 1028 | * may have changed. |
1029 | * => we return the old flags | | 1029 | * => we return the old flags |
1030 | * | | 1030 | * |
1031 | * Modify a physical-virtual mapping in the pv table | | 1031 | * Modify a physical-virtual mapping in the pv table |
1032 | */ | | 1032 | */ |
1033 | static u_int | | 1033 | static u_int |
1034 | pmap_modify_pv(struct vm_page *pg, pmap_t pm, vaddr_t va, | | 1034 | pmap_modify_pv(struct vm_page *pg, pmap_t pm, vaddr_t va, |
1035 | u_int clr_mask, u_int set_mask) | | 1035 | u_int clr_mask, u_int set_mask) |
1036 | { | | 1036 | { |
1037 | struct pv_entry *npv; | | 1037 | struct pv_entry *npv; |
1038 | u_int flags, oflags; | | 1038 | u_int flags, oflags; |
1039 | | | 1039 | |
1040 | KASSERT((clr_mask & PVF_KENTRY) == 0); | | 1040 | KASSERT((clr_mask & PVF_KENTRY) == 0); |
1041 | KASSERT((set_mask & PVF_KENTRY) == 0); | | 1041 | KASSERT((set_mask & PVF_KENTRY) == 0); |
1042 | | | 1042 | |
1043 | if ((npv = pmap_find_pv(pg, pm, va)) == NULL) | | 1043 | if ((npv = pmap_find_pv(pg, pm, va)) == NULL) |
1044 | return (0); | | 1044 | return (0); |
1045 | | | 1045 | |
1046 | NPDEBUG(PDB_PVDUMP, | | 1046 | NPDEBUG(PDB_PVDUMP, |
1047 | printf("pmap_modify_pv: pm %p, pg %p, clr 0x%x, set 0x%x, flags 0x%x\n", pm, pg, clr_mask, set_mask, npv->pv_flags)); | | 1047 | printf("pmap_modify_pv: pm %p, pg %p, clr 0x%x, set 0x%x, flags 0x%x\n", pm, pg, clr_mask, set_mask, npv->pv_flags)); |
1048 | | | 1048 | |
1049 | /* | | 1049 | /* |
1050 | * There is at least one VA mapping this page. | | 1050 | * There is at least one VA mapping this page. |
1051 | */ | | 1051 | */ |
1052 | | | 1052 | |
1053 | if (clr_mask & (PVF_REF | PVF_MOD)) { | | 1053 | if (clr_mask & (PVF_REF | PVF_MOD)) { |
1054 | pg->mdpage.pvh_attrs |= set_mask & (PVF_REF | PVF_MOD); | | 1054 | pg->mdpage.pvh_attrs |= set_mask & (PVF_REF | PVF_MOD); |
1055 | #ifdef PMAP_CACHE_VIPT | | 1055 | #ifdef PMAP_CACHE_VIPT |
1056 | if ((pg->mdpage.pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC) | | 1056 | if ((pg->mdpage.pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC) |
1057 | pg->mdpage.pvh_attrs |= PVF_DIRTY; | | 1057 | pg->mdpage.pvh_attrs |= PVF_DIRTY; |
1058 | KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || (pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 1058 | KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || (pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC))); |
1059 | #endif | | 1059 | #endif |
1060 | } | | 1060 | } |
1061 | | | 1061 | |
1062 | oflags = npv->pv_flags; | | 1062 | oflags = npv->pv_flags; |
1063 | npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask; | | 1063 | npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask; |
1064 | | | 1064 | |
1065 | if ((flags ^ oflags) & PVF_WIRED) { | | 1065 | if ((flags ^ oflags) & PVF_WIRED) { |
1066 | if (flags & PVF_WIRED) | | 1066 | if (flags & PVF_WIRED) |
1067 | ++pm->pm_stats.wired_count; | | 1067 | ++pm->pm_stats.wired_count; |
1068 | else | | 1068 | else |
1069 | --pm->pm_stats.wired_count; | | 1069 | --pm->pm_stats.wired_count; |
1070 | } | | 1070 | } |
1071 | | | 1071 | |
1072 | if ((flags ^ oflags) & PVF_WRITE) { | | 1072 | if ((flags ^ oflags) & PVF_WRITE) { |
1073 | if (pm == pmap_kernel()) { | | 1073 | if (pm == pmap_kernel()) { |
1074 | if (flags & PVF_WRITE) { | | 1074 | if (flags & PVF_WRITE) { |
1075 | pg->mdpage.krw_mappings++; | | 1075 | pg->mdpage.krw_mappings++; |
1076 | pg->mdpage.kro_mappings--; | | 1076 | pg->mdpage.kro_mappings--; |
1077 | } else { | | 1077 | } else { |
1078 | pg->mdpage.kro_mappings++; | | 1078 | pg->mdpage.kro_mappings++; |
1079 | pg->mdpage.krw_mappings--; | | 1079 | pg->mdpage.krw_mappings--; |
1080 | } | | 1080 | } |
1081 | } else | | 1081 | } else |
1082 | if (flags & PVF_WRITE) { | | 1082 | if (flags & PVF_WRITE) { |
1083 | pg->mdpage.urw_mappings++; | | 1083 | pg->mdpage.urw_mappings++; |
1084 | pg->mdpage.uro_mappings--; | | 1084 | pg->mdpage.uro_mappings--; |
1085 | } else { | | 1085 | } else { |
1086 | pg->mdpage.uro_mappings++; | | 1086 | pg->mdpage.uro_mappings++; |
1087 | pg->mdpage.urw_mappings--; | | 1087 | pg->mdpage.urw_mappings--; |
1088 | } | | 1088 | } |
1089 | } | | 1089 | } |
1090 | #ifdef PMAP_CACHE_VIPT | | 1090 | #ifdef PMAP_CACHE_VIPT |
1091 | if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0) | | 1091 | if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0) |
1092 | pg->mdpage.pvh_attrs &= ~PVF_WRITE; | | 1092 | pg->mdpage.pvh_attrs &= ~PVF_WRITE; |
1093 | /* | | 1093 | /* |
1094 | * We have two cases here: the first is from enter_pv (new exec | | 1094 | * We have two cases here: the first is from enter_pv (new exec |
1095 | * page), the second is a combined pmap_remove_pv/pmap_enter_pv. | | 1095 | * page), the second is a combined pmap_remove_pv/pmap_enter_pv. |
1096 | * Since in latter, pmap_enter_pv won't do anything, we just have | | 1096 | * Since in latter, pmap_enter_pv won't do anything, we just have |
1097 | * to do what pmap_remove_pv would do. | | 1097 | * to do what pmap_remove_pv would do. |
1098 | */ | | 1098 | */ |
1099 | if ((PV_IS_EXEC_P(flags) && !PV_IS_EXEC_P(pg->mdpage.pvh_attrs)) | | 1099 | if ((PV_IS_EXEC_P(flags) && !PV_IS_EXEC_P(pg->mdpage.pvh_attrs)) |
1100 | || (PV_IS_EXEC_P(pg->mdpage.pvh_attrs) | | 1100 | || (PV_IS_EXEC_P(pg->mdpage.pvh_attrs) |
1101 | || (!(flags & PVF_WRITE) && (oflags & PVF_WRITE)))) { | | 1101 | || (!(flags & PVF_WRITE) && (oflags & PVF_WRITE)))) { |
1102 | pmap_syncicache_page(pg); | | 1102 | pmap_syncicache_page(pg); |
1103 | PMAPCOUNT(exec_synced_remap); | | 1103 | PMAPCOUNT(exec_synced_remap); |
1104 | } | | 1104 | } |
1105 | KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || (pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 1105 | KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || (pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC))); |
1106 | #endif | | 1106 | #endif |
1107 | | | 1107 | |
1108 | PMAPCOUNT(remappings); | | 1108 | PMAPCOUNT(remappings); |
1109 | | | 1109 | |
1110 | return (oflags); | | 1110 | return (oflags); |
1111 | } | | 1111 | } |
1112 | | | 1112 | |
1113 | /* | | 1113 | /* |
1114 | * Allocate an L1 translation table for the specified pmap. | | 1114 | * Allocate an L1 translation table for the specified pmap. |
1115 | * This is called at pmap creation time. | | 1115 | * This is called at pmap creation time. |
1116 | */ | | 1116 | */ |
1117 | static void | | 1117 | static void |
1118 | pmap_alloc_l1(pmap_t pm) | | 1118 | pmap_alloc_l1(pmap_t pm) |
1119 | { | | 1119 | { |
1120 | struct l1_ttable *l1; | | 1120 | struct l1_ttable *l1; |
1121 | u_int8_t domain; | | 1121 | u_int8_t domain; |
1122 | | | 1122 | |
1123 | /* | | 1123 | /* |
1124 | * Remove the L1 at the head of the LRU list | | 1124 | * Remove the L1 at the head of the LRU list |
1125 | */ | | 1125 | */ |
1126 | simple_lock(&l1_lru_lock); | | 1126 | simple_lock(&l1_lru_lock); |
1127 | l1 = TAILQ_FIRST(&l1_lru_list); | | 1127 | l1 = TAILQ_FIRST(&l1_lru_list); |
1128 | KDASSERT(l1 != NULL); | | 1128 | KDASSERT(l1 != NULL); |
1129 | TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); | | 1129 | TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); |
1130 | | | 1130 | |
1131 | /* | | 1131 | /* |
1132 | * Pick the first available domain number, and update | | 1132 | * Pick the first available domain number, and update |
1133 | * the link to the next number. | | 1133 | * the link to the next number. |
1134 | */ | | 1134 | */ |
1135 | domain = l1->l1_domain_first; | | 1135 | domain = l1->l1_domain_first; |
1136 | l1->l1_domain_first = l1->l1_domain_free[domain]; | | 1136 | l1->l1_domain_first = l1->l1_domain_free[domain]; |
1137 | | | 1137 | |
1138 | /* | | 1138 | /* |
1139 | * If there are still free domain numbers in this L1, | | 1139 | * If there are still free domain numbers in this L1, |
1140 | * put it back on the TAIL of the LRU list. | | 1140 | * put it back on the TAIL of the LRU list. |
1141 | */ | | 1141 | */ |
1142 | if (++l1->l1_domain_use_count < PMAP_DOMAINS) | | 1142 | if (++l1->l1_domain_use_count < PMAP_DOMAINS) |
1143 | TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); | | 1143 | TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); |
1144 | | | 1144 | |
1145 | simple_unlock(&l1_lru_lock); | | 1145 | simple_unlock(&l1_lru_lock); |
1146 | | | 1146 | |
1147 | /* | | 1147 | /* |
1148 | * Fix up the relevant bits in the pmap structure | | 1148 | * Fix up the relevant bits in the pmap structure |
1149 | */ | | 1149 | */ |
1150 | pm->pm_l1 = l1; | | 1150 | pm->pm_l1 = l1; |
1151 | pm->pm_domain = domain; | | 1151 | pm->pm_domain = domain; |
1152 | } | | 1152 | } |
1153 | | | 1153 | |
1154 | /* | | 1154 | /* |
1155 | * Free an L1 translation table. | | 1155 | * Free an L1 translation table. |
1156 | * This is called at pmap destruction time. | | 1156 | * This is called at pmap destruction time. |
1157 | */ | | 1157 | */ |
1158 | static void | | 1158 | static void |
1159 | pmap_free_l1(pmap_t pm) | | 1159 | pmap_free_l1(pmap_t pm) |
1160 | { | | 1160 | { |
1161 | struct l1_ttable *l1 = pm->pm_l1; | | 1161 | struct l1_ttable *l1 = pm->pm_l1; |
1162 | | | 1162 | |
1163 | simple_lock(&l1_lru_lock); | | 1163 | simple_lock(&l1_lru_lock); |
1164 | | | 1164 | |
1165 | /* | | 1165 | /* |
1166 | * If this L1 is currently on the LRU list, remove it. | | 1166 | * If this L1 is currently on the LRU list, remove it. |
1167 | */ | | 1167 | */ |
1168 | if (l1->l1_domain_use_count < PMAP_DOMAINS) | | 1168 | if (l1->l1_domain_use_count < PMAP_DOMAINS) |
1169 | TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); | | 1169 | TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); |
1170 | | | 1170 | |
1171 | /* | | 1171 | /* |
1172 | * Free up the domain number which was allocated to the pmap | | 1172 | * Free up the domain number which was allocated to the pmap |
1173 | */ | | 1173 | */ |
1174 | l1->l1_domain_free[pm->pm_domain] = l1->l1_domain_first; | | 1174 | l1->l1_domain_free[pm->pm_domain] = l1->l1_domain_first; |
1175 | l1->l1_domain_first = pm->pm_domain; | | 1175 | l1->l1_domain_first = pm->pm_domain; |
1176 | l1->l1_domain_use_count--; | | 1176 | l1->l1_domain_use_count--; |
1177 | | | 1177 | |
1178 | /* | | 1178 | /* |
1179 | * The L1 now must have at least 1 free domain, so add | | 1179 | * The L1 now must have at least 1 free domain, so add |
1180 | * it back to the LRU list. If the use count is zero, | | 1180 | * it back to the LRU list. If the use count is zero, |
1181 | * put it at the head of the list, otherwise it goes | | 1181 | * put it at the head of the list, otherwise it goes |
1182 | * to the tail. | | 1182 | * to the tail. |
1183 | */ | | 1183 | */ |
1184 | if (l1->l1_domain_use_count == 0) | | 1184 | if (l1->l1_domain_use_count == 0) |
1185 | TAILQ_INSERT_HEAD(&l1_lru_list, l1, l1_lru); | | 1185 | TAILQ_INSERT_HEAD(&l1_lru_list, l1, l1_lru); |
1186 | else | | 1186 | else |
1187 | TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); | | 1187 | TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); |
1188 | | | 1188 | |
1189 | simple_unlock(&l1_lru_lock); | | 1189 | simple_unlock(&l1_lru_lock); |
1190 | } | | 1190 | } |
1191 | | | 1191 | |
1192 | static inline void | | 1192 | static inline void |
1193 | pmap_use_l1(pmap_t pm) | | 1193 | pmap_use_l1(pmap_t pm) |
1194 | { | | 1194 | { |
1195 | struct l1_ttable *l1; | | 1195 | struct l1_ttable *l1; |
1196 | | | 1196 | |
1197 | /* | | 1197 | /* |
1198 | * Do nothing if we're in interrupt context. | | 1198 | * Do nothing if we're in interrupt context. |
1199 | * Access to an L1 by the kernel pmap must not affect | | 1199 | * Access to an L1 by the kernel pmap must not affect |
1200 | * the LRU list. | | 1200 | * the LRU list. |
1201 | */ | | 1201 | */ |
1202 | if (cpu_intr_p() || pm == pmap_kernel()) | | 1202 | if (cpu_intr_p() || pm == pmap_kernel()) |
1203 | return; | | 1203 | return; |
1204 | | | 1204 | |
1205 | l1 = pm->pm_l1; | | 1205 | l1 = pm->pm_l1; |
1206 | | | 1206 | |
1207 | /* | | 1207 | /* |
1208 | * If the L1 is not currently on the LRU list, just return | | 1208 | * If the L1 is not currently on the LRU list, just return |
1209 | */ | | 1209 | */ |
1210 | if (l1->l1_domain_use_count == PMAP_DOMAINS) | | 1210 | if (l1->l1_domain_use_count == PMAP_DOMAINS) |
1211 | return; | | 1211 | return; |
1212 | | | 1212 | |
1213 | simple_lock(&l1_lru_lock); | | 1213 | simple_lock(&l1_lru_lock); |
1214 | | | 1214 | |
| @@ -2649,2436 +2649,2454 @@ pmap_page_remove(struct vm_page *pg) | | | @@ -2649,2436 +2649,2454 @@ pmap_page_remove(struct vm_page *pg) |
2649 | cpu_tlb_flushD(); | | 2649 | cpu_tlb_flushD(); |
2650 | } | | 2650 | } |
2651 | cpu_cpwait(); | | 2651 | cpu_cpwait(); |
2652 | } | | 2652 | } |
2653 | | | 2653 | |
2654 | /* | | 2654 | /* |
2655 | * pmap_t pmap_create(void) | | 2655 | * pmap_t pmap_create(void) |
2656 | * | | 2656 | * |
2657 | * Create a new pmap structure from scratch. | | 2657 | * Create a new pmap structure from scratch. |
2658 | */ | | 2658 | */ |
2659 | pmap_t | | 2659 | pmap_t |
2660 | pmap_create(void) | | 2660 | pmap_create(void) |
2661 | { | | 2661 | { |
2662 | pmap_t pm; | | 2662 | pmap_t pm; |
2663 | | | 2663 | |
2664 | pm = pool_cache_get(&pmap_cache, PR_WAITOK); | | 2664 | pm = pool_cache_get(&pmap_cache, PR_WAITOK); |
2665 | | | 2665 | |
2666 | UVM_OBJ_INIT(&pm->pm_obj, NULL, 1); | | 2666 | UVM_OBJ_INIT(&pm->pm_obj, NULL, 1); |
2667 | pm->pm_stats.wired_count = 0; | | 2667 | pm->pm_stats.wired_count = 0; |
2668 | pm->pm_stats.resident_count = 1; | | 2668 | pm->pm_stats.resident_count = 1; |
2669 | pm->pm_cstate.cs_all = 0; | | 2669 | pm->pm_cstate.cs_all = 0; |
2670 | pmap_alloc_l1(pm); | | 2670 | pmap_alloc_l1(pm); |
2671 | | | 2671 | |
2672 | /* | | 2672 | /* |
2673 | * Note: The pool cache ensures that the pm_l2[] array is already | | 2673 | * Note: The pool cache ensures that the pm_l2[] array is already |
2674 | * initialised to zero. | | 2674 | * initialised to zero. |
2675 | */ | | 2675 | */ |
2676 | | | 2676 | |
2677 | pmap_pinit(pm); | | 2677 | pmap_pinit(pm); |
2678 | | | 2678 | |
2679 | LIST_INSERT_HEAD(&pmap_pmaps, pm, pm_list); | | 2679 | LIST_INSERT_HEAD(&pmap_pmaps, pm, pm_list); |
2680 | | | 2680 | |
2681 | return (pm); | | 2681 | return (pm); |
2682 | } | | 2682 | } |
2683 | | | 2683 | |
2684 | /* | | 2684 | /* |
2685 | * void pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, | | 2685 | * void pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, |
2686 | * int flags) | | 2686 | * int flags) |
2687 | * | | 2687 | * |
2688 | * Insert the given physical page (p) at | | 2688 | * Insert the given physical page (p) at |
2689 | * the specified virtual address (v) in the | | 2689 | * the specified virtual address (v) in the |
2690 | * target physical map with the protection requested. | | 2690 | * target physical map with the protection requested. |
2691 | * | | 2691 | * |
2692 | * NB: This is the only routine which MAY NOT lazy-evaluate | | 2692 | * NB: This is the only routine which MAY NOT lazy-evaluate |
2693 | * or lose information. That is, this routine must actually | | 2693 | * or lose information. That is, this routine must actually |
2694 | * insert this page into the given map NOW. | | 2694 | * insert this page into the given map NOW. |
2695 | */ | | 2695 | */ |
2696 | int | | 2696 | int |
2697 | pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) | | 2697 | pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) |
2698 | { | | 2698 | { |
2699 | struct l2_bucket *l2b; | | 2699 | struct l2_bucket *l2b; |
2700 | struct vm_page *pg, *opg; | | 2700 | struct vm_page *pg, *opg; |
2701 | struct pv_entry *pve; | | 2701 | struct pv_entry *pve; |
2702 | pt_entry_t *ptep, npte, opte; | | 2702 | pt_entry_t *ptep, npte, opte; |
2703 | u_int nflags; | | 2703 | u_int nflags; |
2704 | u_int oflags; | | 2704 | u_int oflags; |
2705 | | | 2705 | |
2706 | NPDEBUG(PDB_ENTER, printf("pmap_enter: pm %p va 0x%lx pa 0x%lx prot %x flag %x\n", pm, va, pa, prot, flags)); | | 2706 | NPDEBUG(PDB_ENTER, printf("pmap_enter: pm %p va 0x%lx pa 0x%lx prot %x flag %x\n", pm, va, pa, prot, flags)); |
2707 | | | 2707 | |
2708 | KDASSERT((flags & PMAP_WIRED) == 0 || (flags & VM_PROT_ALL) != 0); | | 2708 | KDASSERT((flags & PMAP_WIRED) == 0 || (flags & VM_PROT_ALL) != 0); |
2709 | KDASSERT(((va | pa) & PGOFSET) == 0); | | 2709 | KDASSERT(((va | pa) & PGOFSET) == 0); |
2710 | | | 2710 | |
2711 | /* | | 2711 | /* |
2712 | * Get a pointer to the page. Later on in this function, we | | 2712 | * Get a pointer to the page. Later on in this function, we |
2713 | * test for a managed page by checking pg != NULL. | | 2713 | * test for a managed page by checking pg != NULL. |
2714 | */ | | 2714 | */ |
2715 | pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL; | | 2715 | pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL; |
2716 | | | 2716 | |
2717 | nflags = 0; | | 2717 | nflags = 0; |
2718 | if (prot & VM_PROT_WRITE) | | 2718 | if (prot & VM_PROT_WRITE) |
2719 | nflags |= PVF_WRITE; | | 2719 | nflags |= PVF_WRITE; |
2720 | if (prot & VM_PROT_EXECUTE) | | 2720 | if (prot & VM_PROT_EXECUTE) |
2721 | nflags |= PVF_EXEC; | | 2721 | nflags |= PVF_EXEC; |
2722 | if (flags & PMAP_WIRED) | | 2722 | if (flags & PMAP_WIRED) |
2723 | nflags |= PVF_WIRED; | | 2723 | nflags |= PVF_WIRED; |
2724 | | | 2724 | |
2725 | PMAP_MAP_TO_HEAD_LOCK(); | | 2725 | PMAP_MAP_TO_HEAD_LOCK(); |
2726 | pmap_acquire_pmap_lock(pm); | | 2726 | pmap_acquire_pmap_lock(pm); |
2727 | | | 2727 | |
2728 | /* | | 2728 | /* |
2729 | * Fetch the L2 bucket which maps this page, allocating one if | | 2729 | * Fetch the L2 bucket which maps this page, allocating one if |
2730 | * necessary for user pmaps. | | 2730 | * necessary for user pmaps. |
2731 | */ | | 2731 | */ |
2732 | if (pm == pmap_kernel()) | | 2732 | if (pm == pmap_kernel()) |
2733 | l2b = pmap_get_l2_bucket(pm, va); | | 2733 | l2b = pmap_get_l2_bucket(pm, va); |
2734 | else | | 2734 | else |
2735 | l2b = pmap_alloc_l2_bucket(pm, va); | | 2735 | l2b = pmap_alloc_l2_bucket(pm, va); |
2736 | if (l2b == NULL) { | | 2736 | if (l2b == NULL) { |
2737 | if (flags & PMAP_CANFAIL) { | | 2737 | if (flags & PMAP_CANFAIL) { |
2738 | pmap_release_pmap_lock(pm); | | 2738 | pmap_release_pmap_lock(pm); |
2739 | PMAP_MAP_TO_HEAD_UNLOCK(); | | 2739 | PMAP_MAP_TO_HEAD_UNLOCK(); |
2740 | return (ENOMEM); | | 2740 | return (ENOMEM); |
2741 | } | | 2741 | } |
2742 | panic("pmap_enter: failed to allocate L2 bucket"); | | 2742 | panic("pmap_enter: failed to allocate L2 bucket"); |
2743 | } | | 2743 | } |
2744 | ptep = &l2b->l2b_kva[l2pte_index(va)]; | | 2744 | ptep = &l2b->l2b_kva[l2pte_index(va)]; |
2745 | opte = *ptep; | | 2745 | opte = *ptep; |
2746 | npte = pa; | | 2746 | npte = pa; |
2747 | oflags = 0; | | 2747 | oflags = 0; |
2748 | | | 2748 | |
2749 | if (opte) { | | 2749 | if (opte) { |
2750 | /* | | 2750 | /* |
2751 | * There is already a mapping at this address. | | 2751 | * There is already a mapping at this address. |
2752 | * If the physical address is different, lookup the | | 2752 | * If the physical address is different, lookup the |
2753 | * vm_page. | | 2753 | * vm_page. |
2754 | */ | | 2754 | */ |
2755 | if (l2pte_pa(opte) != pa) | | 2755 | if (l2pte_pa(opte) != pa) |
2756 | opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); | | 2756 | opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); |
2757 | else | | 2757 | else |
2758 | opg = pg; | | 2758 | opg = pg; |
2759 | } else | | 2759 | } else |
2760 | opg = NULL; | | 2760 | opg = NULL; |
2761 | | | 2761 | |
2762 | if (pg) { | | 2762 | if (pg) { |
2763 | /* | | 2763 | /* |
2764 | * This is to be a managed mapping. | | 2764 | * This is to be a managed mapping. |
2765 | */ | | 2765 | */ |
2766 | if ((flags & VM_PROT_ALL) || | | 2766 | if ((flags & VM_PROT_ALL) || |
2767 | (pg->mdpage.pvh_attrs & PVF_REF)) { | | 2767 | (pg->mdpage.pvh_attrs & PVF_REF)) { |
2768 | /* | | 2768 | /* |
2769 | * - The access type indicates that we don't need | | 2769 | * - The access type indicates that we don't need |
2770 | * to do referenced emulation. | | 2770 | * to do referenced emulation. |
2771 | * OR | | 2771 | * OR |
2772 | * - The physical page has already been referenced | | 2772 | * - The physical page has already been referenced |
2773 | * so no need to re-do referenced emulation here. | | 2773 | * so no need to re-do referenced emulation here. |
2774 | */ | | 2774 | */ |
2775 | npte |= L2_S_PROTO; | | 2775 | npte |= L2_S_PROTO; |
2776 | | | 2776 | |
2777 | nflags |= PVF_REF; | | 2777 | nflags |= PVF_REF; |
2778 | | | 2778 | |
2779 | if ((prot & VM_PROT_WRITE) != 0 && | | 2779 | if ((prot & VM_PROT_WRITE) != 0 && |
2780 | ((flags & VM_PROT_WRITE) != 0 || | | 2780 | ((flags & VM_PROT_WRITE) != 0 || |
2781 | (pg->mdpage.pvh_attrs & PVF_MOD) != 0)) { | | 2781 | (pg->mdpage.pvh_attrs & PVF_MOD) != 0)) { |
2782 | /* | | 2782 | /* |
2783 | * This is a writable mapping, and the | | 2783 | * This is a writable mapping, and the |
2784 | * page's mod state indicates it has | | 2784 | * page's mod state indicates it has |
2785 | * already been modified. Make it | | 2785 | * already been modified. Make it |
2786 | * writable from the outset. | | 2786 | * writable from the outset. |
2787 | */ | | 2787 | */ |
2788 | npte |= L2_S_PROT_W; | | 2788 | npte |= L2_S_PROT_W; |
2789 | nflags |= PVF_MOD; | | 2789 | nflags |= PVF_MOD; |
2790 | } | | 2790 | } |
2791 | } else { | | 2791 | } else { |
2792 | /* | | 2792 | /* |
2793 | * Need to do page referenced emulation. | | 2793 | * Need to do page referenced emulation. |
2794 | */ | | 2794 | */ |
2795 | npte |= L2_TYPE_INV; | | 2795 | npte |= L2_TYPE_INV; |
2796 | } | | 2796 | } |
2797 | | | 2797 | |
2798 | npte |= pte_l2_s_cache_mode; | | 2798 | npte |= pte_l2_s_cache_mode; |
2799 | | | 2799 | |
2800 | if (pg == opg) { | | 2800 | if (pg == opg) { |
2801 | /* | | 2801 | /* |
2802 | * We're changing the attrs of an existing mapping. | | 2802 | * We're changing the attrs of an existing mapping. |
2803 | */ | | 2803 | */ |
2804 | simple_lock(&pg->mdpage.pvh_slock); | | 2804 | simple_lock(&pg->mdpage.pvh_slock); |
2805 | oflags = pmap_modify_pv(pg, pm, va, | | 2805 | oflags = pmap_modify_pv(pg, pm, va, |
2806 | PVF_WRITE | PVF_EXEC | PVF_WIRED | | | 2806 | PVF_WRITE | PVF_EXEC | PVF_WIRED | |
2807 | PVF_MOD | PVF_REF, nflags); | | 2807 | PVF_MOD | PVF_REF, nflags); |
2808 | simple_unlock(&pg->mdpage.pvh_slock); | | 2808 | simple_unlock(&pg->mdpage.pvh_slock); |
2809 | | | 2809 | |
2810 | #ifdef PMAP_CACHE_VIVT | | 2810 | #ifdef PMAP_CACHE_VIVT |
2811 | /* | | 2811 | /* |
2812 | * We may need to flush the cache if we're | | 2812 | * We may need to flush the cache if we're |
2813 | * doing rw-ro... | | 2813 | * doing rw-ro... |
2814 | */ | | 2814 | */ |
2815 | if (pm->pm_cstate.cs_cache_d && | | 2815 | if (pm->pm_cstate.cs_cache_d && |
2816 | (oflags & PVF_NC) == 0 && | | 2816 | (oflags & PVF_NC) == 0 && |
2817 | (opte & L2_S_PROT_W) != 0 && | | 2817 | (opte & L2_S_PROT_W) != 0 && |
2818 | (prot & VM_PROT_WRITE) == 0) | | 2818 | (prot & VM_PROT_WRITE) == 0) |
2819 | cpu_dcache_wb_range(va, PAGE_SIZE); | | 2819 | cpu_dcache_wb_range(va, PAGE_SIZE); |
2820 | #endif | | 2820 | #endif |
2821 | } else { | | 2821 | } else { |
2822 | /* | | 2822 | /* |
2823 | * New mapping, or changing the backing page | | 2823 | * New mapping, or changing the backing page |
2824 | * of an existing mapping. | | 2824 | * of an existing mapping. |
2825 | */ | | 2825 | */ |
2826 | if (opg) { | | 2826 | if (opg) { |
2827 | /* | | 2827 | /* |
2828 | * Replacing an existing mapping with a new one. | | 2828 | * Replacing an existing mapping with a new one. |
2829 | * It is part of our managed memory so we | | 2829 | * It is part of our managed memory so we |
2830 | * must remove it from the PV list | | 2830 | * must remove it from the PV list |
2831 | */ | | 2831 | */ |
2832 | simple_lock(&opg->mdpage.pvh_slock); | | 2832 | simple_lock(&opg->mdpage.pvh_slock); |
2833 | pve = pmap_remove_pv(opg, pm, va, 0); | | 2833 | pve = pmap_remove_pv(opg, pm, va, 0); |
2834 | pmap_vac_me_harder(opg, pm, 0); | | 2834 | pmap_vac_me_harder(opg, pm, 0); |
2835 | simple_unlock(&opg->mdpage.pvh_slock); | | 2835 | simple_unlock(&opg->mdpage.pvh_slock); |
2836 | oflags = pve->pv_flags; | | 2836 | oflags = pve->pv_flags; |
2837 | | | 2837 | |
2838 | #ifdef PMAP_CACHE_VIVT | | 2838 | #ifdef PMAP_CACHE_VIVT |
2839 | /* | | 2839 | /* |
2840 | * If the old mapping was valid (ref/mod | | 2840 | * If the old mapping was valid (ref/mod |
2841 | * emulation creates 'invalid' mappings | | 2841 | * emulation creates 'invalid' mappings |
2842 | * initially) then make sure to frob | | 2842 | * initially) then make sure to frob |
2843 | * the cache. | | 2843 | * the cache. |
2844 | */ | | 2844 | */ |
2845 | if ((oflags & PVF_NC) == 0 && | | 2845 | if ((oflags & PVF_NC) == 0 && |
2846 | l2pte_valid(opte)) { | | 2846 | l2pte_valid(opte)) { |
2847 | if (PV_BEEN_EXECD(oflags)) { | | 2847 | if (PV_BEEN_EXECD(oflags)) { |
2848 | pmap_idcache_wbinv_range(pm, va, | | 2848 | pmap_idcache_wbinv_range(pm, va, |
2849 | PAGE_SIZE); | | 2849 | PAGE_SIZE); |
2850 | } else | | 2850 | } else |
2851 | if (PV_BEEN_REFD(oflags)) { | | 2851 | if (PV_BEEN_REFD(oflags)) { |
2852 | pmap_dcache_wb_range(pm, va, | | 2852 | pmap_dcache_wb_range(pm, va, |
2853 | PAGE_SIZE, true, | | 2853 | PAGE_SIZE, true, |
2854 | (oflags & PVF_WRITE) == 0); | | 2854 | (oflags & PVF_WRITE) == 0); |
2855 | } | | 2855 | } |
2856 | } | | 2856 | } |
2857 | #endif | | 2857 | #endif |
2858 | } else | | 2858 | } else |
2859 | if ((pve = pool_get(&pmap_pv_pool, PR_NOWAIT)) == NULL){ | | 2859 | if ((pve = pool_get(&pmap_pv_pool, PR_NOWAIT)) == NULL){ |
2860 | if ((flags & PMAP_CANFAIL) == 0) | | 2860 | if ((flags & PMAP_CANFAIL) == 0) |
2861 | panic("pmap_enter: no pv entries"); | | 2861 | panic("pmap_enter: no pv entries"); |
2862 | | | 2862 | |
2863 | if (pm != pmap_kernel()) | | 2863 | if (pm != pmap_kernel()) |
2864 | pmap_free_l2_bucket(pm, l2b, 0); | | 2864 | pmap_free_l2_bucket(pm, l2b, 0); |
2865 | pmap_release_pmap_lock(pm); | | 2865 | pmap_release_pmap_lock(pm); |
2866 | PMAP_MAP_TO_HEAD_UNLOCK(); | | 2866 | PMAP_MAP_TO_HEAD_UNLOCK(); |
2867 | NPDEBUG(PDB_ENTER, | | 2867 | NPDEBUG(PDB_ENTER, |
2868 | printf("pmap_enter: ENOMEM\n")); | | 2868 | printf("pmap_enter: ENOMEM\n")); |
2869 | return (ENOMEM); | | 2869 | return (ENOMEM); |
2870 | } | | 2870 | } |
2871 | | | 2871 | |
2872 | pmap_enter_pv(pg, pve, pm, va, nflags); | | 2872 | pmap_enter_pv(pg, pve, pm, va, nflags); |
2873 | } | | 2873 | } |
2874 | } else { | | 2874 | } else { |
2875 | /* | | 2875 | /* |
2876 | * We're mapping an unmanaged page. | | 2876 | * We're mapping an unmanaged page. |
2877 | * These are always readable, and possibly writable, from | | 2877 | * These are always readable, and possibly writable, from |
2878 | * the get go as we don't need to track ref/mod status. | | 2878 | * the get go as we don't need to track ref/mod status. |
2879 | */ | | 2879 | */ |
2880 | npte |= L2_S_PROTO; | | 2880 | npte |= L2_S_PROTO; |
2881 | if (prot & VM_PROT_WRITE) | | 2881 | if (prot & VM_PROT_WRITE) |
2882 | npte |= L2_S_PROT_W; | | 2882 | npte |= L2_S_PROT_W; |
2883 | | | 2883 | |
2884 | /* | | 2884 | /* |
2885 | * Make sure the vector table is mapped cacheable | | 2885 | * Make sure the vector table is mapped cacheable |
2886 | */ | | 2886 | */ |
2887 | if (pm != pmap_kernel() && va == vector_page) | | 2887 | if (pm != pmap_kernel() && va == vector_page) |
2888 | npte |= pte_l2_s_cache_mode; | | 2888 | npte |= pte_l2_s_cache_mode; |
2889 | | | 2889 | |
2890 | if (opg) { | | 2890 | if (opg) { |
2891 | /* | | 2891 | /* |
2892 | * Looks like there's an existing 'managed' mapping | | 2892 | * Looks like there's an existing 'managed' mapping |
2893 | * at this address. | | 2893 | * at this address. |
2894 | */ | | 2894 | */ |
2895 | simple_lock(&opg->mdpage.pvh_slock); | | 2895 | simple_lock(&opg->mdpage.pvh_slock); |
2896 | pve = pmap_remove_pv(opg, pm, va, 0); | | 2896 | pve = pmap_remove_pv(opg, pm, va, 0); |
2897 | pmap_vac_me_harder(opg, pm, 0); | | 2897 | pmap_vac_me_harder(opg, pm, 0); |
2898 | simple_unlock(&opg->mdpage.pvh_slock); | | 2898 | simple_unlock(&opg->mdpage.pvh_slock); |
2899 | oflags = pve->pv_flags; | | 2899 | oflags = pve->pv_flags; |
2900 | | | 2900 | |
2901 | #ifdef PMAP_CACHE_VIVT | | 2901 | #ifdef PMAP_CACHE_VIVT |
2902 | if ((oflags & PVF_NC) == 0 && l2pte_valid(opte)) { | | 2902 | if ((oflags & PVF_NC) == 0 && l2pte_valid(opte)) { |
2903 | if (PV_BEEN_EXECD(oflags)) | | 2903 | if (PV_BEEN_EXECD(oflags)) |
2904 | pmap_idcache_wbinv_range(pm, va, | | 2904 | pmap_idcache_wbinv_range(pm, va, |
2905 | PAGE_SIZE); | | 2905 | PAGE_SIZE); |
2906 | else | | 2906 | else |
2907 | if (PV_BEEN_REFD(oflags)) | | 2907 | if (PV_BEEN_REFD(oflags)) |
2908 | pmap_dcache_wb_range(pm, va, PAGE_SIZE, | | 2908 | pmap_dcache_wb_range(pm, va, PAGE_SIZE, |
2909 | true, (oflags & PVF_WRITE) == 0); | | 2909 | true, (oflags & PVF_WRITE) == 0); |
2910 | } | | 2910 | } |
2911 | #endif | | 2911 | #endif |
2912 | pool_put(&pmap_pv_pool, pve); | | 2912 | pool_put(&pmap_pv_pool, pve); |
2913 | } | | 2913 | } |
2914 | } | | 2914 | } |
2915 | | | 2915 | |
2916 | /* | | 2916 | /* |
2917 | * Make sure userland mappings get the right permissions | | 2917 | * Make sure userland mappings get the right permissions |
2918 | */ | | 2918 | */ |
2919 | if (pm != pmap_kernel() && va != vector_page) | | 2919 | if (pm != pmap_kernel() && va != vector_page) |
2920 | npte |= L2_S_PROT_U; | | 2920 | npte |= L2_S_PROT_U; |
2921 | | | 2921 | |
2922 | /* | | 2922 | /* |
2923 | * Keep the stats up to date | | 2923 | * Keep the stats up to date |
2924 | */ | | 2924 | */ |
2925 | if (opte == 0) { | | 2925 | if (opte == 0) { |
2926 | l2b->l2b_occupancy++; | | 2926 | l2b->l2b_occupancy++; |
2927 | pm->pm_stats.resident_count++; | | 2927 | pm->pm_stats.resident_count++; |
2928 | } | | 2928 | } |
2929 | | | 2929 | |
2930 | NPDEBUG(PDB_ENTER, | | 2930 | NPDEBUG(PDB_ENTER, |
2931 | printf("pmap_enter: opte 0x%08x npte 0x%08x\n", opte, npte)); | | 2931 | printf("pmap_enter: opte 0x%08x npte 0x%08x\n", opte, npte)); |
2932 | | | 2932 | |
2933 | /* | | 2933 | /* |
2934 | * If this is just a wiring change, the two PTEs will be | | 2934 | * If this is just a wiring change, the two PTEs will be |
2935 | * identical, so there's no need to update the page table. | | 2935 | * identical, so there's no need to update the page table. |
2936 | */ | | 2936 | */ |
2937 | if (npte != opte) { | | 2937 | if (npte != opte) { |
2938 | bool is_cached = pmap_is_cached(pm); | | 2938 | bool is_cached = pmap_is_cached(pm); |
2939 | | | 2939 | |
2940 | *ptep = npte; | | 2940 | *ptep = npte; |
2941 | if (is_cached) { | | 2941 | if (is_cached) { |
2942 | /* | | 2942 | /* |
2943 | * We only need to frob the cache/tlb if this pmap | | 2943 | * We only need to frob the cache/tlb if this pmap |
2944 | * is current | | 2944 | * is current |
2945 | */ | | 2945 | */ |
2946 | PTE_SYNC(ptep); | | 2946 | PTE_SYNC(ptep); |
2947 | if (va != vector_page && l2pte_valid(npte)) { | | 2947 | if (va != vector_page && l2pte_valid(npte)) { |
2948 | /* | | 2948 | /* |
2949 | * This mapping is likely to be accessed as | | 2949 | * This mapping is likely to be accessed as |
2950 | * soon as we return to userland. Fix up the | | 2950 | * soon as we return to userland. Fix up the |
2951 | * L1 entry to avoid taking another | | 2951 | * L1 entry to avoid taking another |
2952 | * page/domain fault. | | 2952 | * page/domain fault. |
2953 | */ | | 2953 | */ |
2954 | pd_entry_t *pl1pd, l1pd; | | 2954 | pd_entry_t *pl1pd, l1pd; |
2955 | | | 2955 | |
2956 | pl1pd = &pm->pm_l1->l1_kva[L1_IDX(va)]; | | 2956 | pl1pd = &pm->pm_l1->l1_kva[L1_IDX(va)]; |
2957 | l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | | | 2957 | l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | |
2958 | L1_C_PROTO; | | 2958 | L1_C_PROTO; |
2959 | if (*pl1pd != l1pd) { | | 2959 | if (*pl1pd != l1pd) { |
2960 | *pl1pd = l1pd; | | 2960 | *pl1pd = l1pd; |
2961 | PTE_SYNC(pl1pd); | | 2961 | PTE_SYNC(pl1pd); |
2962 | } | | 2962 | } |
2963 | } | | 2963 | } |
2964 | } | | 2964 | } |
2965 | | | 2965 | |
2966 | if (PV_BEEN_EXECD(oflags)) | | 2966 | if (PV_BEEN_EXECD(oflags)) |
2967 | pmap_tlb_flushID_SE(pm, va); | | 2967 | pmap_tlb_flushID_SE(pm, va); |
2968 | else | | 2968 | else |
2969 | if (PV_BEEN_REFD(oflags)) | | 2969 | if (PV_BEEN_REFD(oflags)) |
2970 | pmap_tlb_flushD_SE(pm, va); | | 2970 | pmap_tlb_flushD_SE(pm, va); |
2971 | | | 2971 | |
2972 | NPDEBUG(PDB_ENTER, | | 2972 | NPDEBUG(PDB_ENTER, |
2973 | printf("pmap_enter: is_cached %d cs 0x%08x\n", | | 2973 | printf("pmap_enter: is_cached %d cs 0x%08x\n", |
2974 | is_cached, pm->pm_cstate.cs_all)); | | 2974 | is_cached, pm->pm_cstate.cs_all)); |
2975 | | | 2975 | |
2976 | if (pg != NULL) { | | 2976 | if (pg != NULL) { |
2977 | simple_lock(&pg->mdpage.pvh_slock); | | 2977 | simple_lock(&pg->mdpage.pvh_slock); |
2978 | pmap_vac_me_harder(pg, pm, va); | | 2978 | pmap_vac_me_harder(pg, pm, va); |
2979 | simple_unlock(&pg->mdpage.pvh_slock); | | 2979 | simple_unlock(&pg->mdpage.pvh_slock); |
2980 | } | | 2980 | } |
2981 | } | | 2981 | } |
2982 | #if defined(PMAP_CACHE_VIPT) && defined(DIAGNOSTIC) | | 2982 | #if defined(PMAP_CACHE_VIPT) && defined(DIAGNOSTIC) |
2983 | simple_lock(&pg->mdpage.pvh_slock); | | 2983 | simple_lock(&pg->mdpage.pvh_slock); |
2984 | KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || (pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 2984 | KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || (pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC))); |
2985 | KASSERT(((pg->mdpage.pvh_attrs & PVF_WRITE) == 0) == (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0)); | | 2985 | KASSERT(((pg->mdpage.pvh_attrs & PVF_WRITE) == 0) == (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0)); |
2986 | simple_unlock(&pg->mdpage.pvh_slock); | | 2986 | simple_unlock(&pg->mdpage.pvh_slock); |
2987 | #endif | | 2987 | #endif |
2988 | | | 2988 | |
2989 | pmap_release_pmap_lock(pm); | | 2989 | pmap_release_pmap_lock(pm); |
2990 | PMAP_MAP_TO_HEAD_UNLOCK(); | | 2990 | PMAP_MAP_TO_HEAD_UNLOCK(); |
2991 | | | 2991 | |
2992 | return (0); | | 2992 | return (0); |
2993 | } | | 2993 | } |
2994 | | | 2994 | |
2995 | /* | | 2995 | /* |
2996 | * pmap_remove() | | 2996 | * pmap_remove() |
2997 | * | | 2997 | * |
2998 | * pmap_remove is responsible for nuking a number of mappings for a range | | 2998 | * pmap_remove is responsible for nuking a number of mappings for a range |
2999 | * of virtual address space in the current pmap. To do this efficiently | | 2999 | * of virtual address space in the current pmap. To do this efficiently |
3000 | * is interesting, because in a number of cases a wide virtual address | | 3000 | * is interesting, because in a number of cases a wide virtual address |
3001 | * range may be supplied that contains few actual mappings. So, the | | 3001 | * range may be supplied that contains few actual mappings. So, the |
3002 | * optimisations are: | | 3002 | * optimisations are: |
3003 | * 1. Skip over hunks of address space for which no L1 or L2 entry exists. | | 3003 | * 1. Skip over hunks of address space for which no L1 or L2 entry exists. |
3004 | * 2. Build up a list of pages we've hit, up to a maximum, so we can | | 3004 | * 2. Build up a list of pages we've hit, up to a maximum, so we can |
3005 | * maybe do just a partial cache clean. This path of execution is | | 3005 | * maybe do just a partial cache clean. This path of execution is |
3006 | * complicated by the fact that the cache must be flushed _before_ | | 3006 | * complicated by the fact that the cache must be flushed _before_ |
3007 | * the PTE is nuked, being a VAC :-) | | 3007 | * the PTE is nuked, being a VAC :-) |
3008 | * 3. If we're called after UVM calls pmap_remove_all(), we can defer | | 3008 | * 3. If we're called after UVM calls pmap_remove_all(), we can defer |
3009 | * all invalidations until pmap_update(), since pmap_remove_all() has | | 3009 | * all invalidations until pmap_update(), since pmap_remove_all() has |
3010 | * already flushed the cache. | | 3010 | * already flushed the cache. |
3011 | * 4. Maybe later fast-case a single page, but I don't think this is | | 3011 | * 4. Maybe later fast-case a single page, but I don't think this is |
3012 | * going to make _that_ much difference overall. | | 3012 | * going to make _that_ much difference overall. |
3013 | */ | | 3013 | */ |
3014 | | | 3014 | |
3015 | #define PMAP_REMOVE_CLEAN_LIST_SIZE 3 | | 3015 | #define PMAP_REMOVE_CLEAN_LIST_SIZE 3 |
3016 | | | 3016 | |
3017 | void | | 3017 | void |
3018 | pmap_do_remove(pmap_t pm, vaddr_t sva, vaddr_t eva, int skip_wired) | | 3018 | pmap_do_remove(pmap_t pm, vaddr_t sva, vaddr_t eva, int skip_wired) |
3019 | { | | 3019 | { |
3020 | struct l2_bucket *l2b; | | 3020 | struct l2_bucket *l2b; |
3021 | vaddr_t next_bucket; | | 3021 | vaddr_t next_bucket; |
3022 | pt_entry_t *ptep; | | 3022 | pt_entry_t *ptep; |
3023 | u_int cleanlist_idx, total, cnt; | | 3023 | u_int cleanlist_idx, total, cnt; |
3024 | struct { | | 3024 | struct { |
3025 | vaddr_t va; | | 3025 | vaddr_t va; |
3026 | pt_entry_t *ptep; | | 3026 | pt_entry_t *ptep; |
3027 | } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE]; | | 3027 | } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE]; |
3028 | u_int mappings, is_exec, is_refd; | | 3028 | u_int mappings, is_exec, is_refd; |
3029 | | | 3029 | |
3030 | NPDEBUG(PDB_REMOVE, printf("pmap_do_remove: pmap=%p sva=%08lx " | | 3030 | NPDEBUG(PDB_REMOVE, printf("pmap_do_remove: pmap=%p sva=%08lx " |
3031 | "eva=%08lx\n", pm, sva, eva)); | | 3031 | "eva=%08lx\n", pm, sva, eva)); |
3032 | | | 3032 | |
3033 | /* | | 3033 | /* |
3034 | * we lock in the pmap => pv_head direction | | 3034 | * we lock in the pmap => pv_head direction |
3035 | */ | | 3035 | */ |
3036 | PMAP_MAP_TO_HEAD_LOCK(); | | 3036 | PMAP_MAP_TO_HEAD_LOCK(); |
3037 | pmap_acquire_pmap_lock(pm); | | 3037 | pmap_acquire_pmap_lock(pm); |
3038 | | | 3038 | |
3039 | if (pm->pm_remove_all || !pmap_is_cached(pm)) { | | 3039 | if (pm->pm_remove_all || !pmap_is_cached(pm)) { |
3040 | cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; | | 3040 | cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; |
3041 | if (pm->pm_cstate.cs_tlb == 0) | | 3041 | if (pm->pm_cstate.cs_tlb == 0) |
3042 | pm->pm_remove_all = true; | | 3042 | pm->pm_remove_all = true; |
3043 | } else | | 3043 | } else |
3044 | cleanlist_idx = 0; | | 3044 | cleanlist_idx = 0; |
3045 | | | 3045 | |
3046 | total = 0; | | 3046 | total = 0; |
3047 | | | 3047 | |
3048 | while (sva < eva) { | | 3048 | while (sva < eva) { |
3049 | /* | | 3049 | /* |
3050 | * Do one L2 bucket's worth at a time. | | 3050 | * Do one L2 bucket's worth at a time. |
3051 | */ | | 3051 | */ |
3052 | next_bucket = L2_NEXT_BUCKET(sva); | | 3052 | next_bucket = L2_NEXT_BUCKET(sva); |
3053 | if (next_bucket > eva) | | 3053 | if (next_bucket > eva) |
3054 | next_bucket = eva; | | 3054 | next_bucket = eva; |
3055 | | | 3055 | |
3056 | l2b = pmap_get_l2_bucket(pm, sva); | | 3056 | l2b = pmap_get_l2_bucket(pm, sva); |
3057 | if (l2b == NULL) { | | 3057 | if (l2b == NULL) { |
3058 | sva = next_bucket; | | 3058 | sva = next_bucket; |
3059 | continue; | | 3059 | continue; |
3060 | } | | 3060 | } |
3061 | | | 3061 | |
3062 | ptep = &l2b->l2b_kva[l2pte_index(sva)]; | | 3062 | ptep = &l2b->l2b_kva[l2pte_index(sva)]; |
3063 | | | 3063 | |
3064 | for (mappings = 0; sva < next_bucket; sva += PAGE_SIZE, ptep++){ | | 3064 | for (mappings = 0; sva < next_bucket; sva += PAGE_SIZE, ptep++){ |
3065 | struct vm_page *pg; | | 3065 | struct vm_page *pg; |
3066 | pt_entry_t pte; | | 3066 | pt_entry_t pte; |
3067 | paddr_t pa; | | 3067 | paddr_t pa; |
3068 | | | 3068 | |
3069 | pte = *ptep; | | 3069 | pte = *ptep; |
3070 | | | 3070 | |
3071 | if (pte == 0) { | | 3071 | if (pte == 0) { |
3072 | /* Nothing here, move along */ | | 3072 | /* Nothing here, move along */ |
3073 | continue; | | 3073 | continue; |
3074 | } | | 3074 | } |
3075 | | | 3075 | |
3076 | pa = l2pte_pa(pte); | | 3076 | pa = l2pte_pa(pte); |
3077 | is_exec = 0; | | 3077 | is_exec = 0; |
3078 | is_refd = 1; | | 3078 | is_refd = 1; |
3079 | | | 3079 | |
3080 | /* | | 3080 | /* |
3081 | * Update flags. In a number of circumstances, | | 3081 | * Update flags. In a number of circumstances, |
3082 | * we could cluster a lot of these and do a | | 3082 | * we could cluster a lot of these and do a |
3083 | * number of sequential pages in one go. | | 3083 | * number of sequential pages in one go. |
3084 | */ | | 3084 | */ |
3085 | if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { | | 3085 | if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { |
3086 | struct pv_entry *pve; | | 3086 | struct pv_entry *pve; |
3087 | simple_lock(&pg->mdpage.pvh_slock); | | 3087 | simple_lock(&pg->mdpage.pvh_slock); |
3088 | pve = pmap_remove_pv(pg, pm, sva, skip_wired); | | 3088 | pve = pmap_remove_pv(pg, pm, sva, skip_wired); |
3089 | pmap_vac_me_harder(pg, pm, 0); | | 3089 | pmap_vac_me_harder(pg, pm, 0); |
3090 | simple_unlock(&pg->mdpage.pvh_slock); | | 3090 | simple_unlock(&pg->mdpage.pvh_slock); |
3091 | if (pve != NULL) { | | 3091 | if (pve != NULL) { |
3092 | if (pm->pm_remove_all == false) { | | 3092 | if (pm->pm_remove_all == false) { |
3093 | is_exec = | | 3093 | is_exec = |
3094 | PV_BEEN_EXECD(pve->pv_flags); | | 3094 | PV_BEEN_EXECD(pve->pv_flags); |
3095 | is_refd = | | 3095 | is_refd = |
3096 | PV_BEEN_REFD(pve->pv_flags); | | 3096 | PV_BEEN_REFD(pve->pv_flags); |
3097 | } | | 3097 | } |
3098 | pool_put(&pmap_pv_pool, pve); | | 3098 | pool_put(&pmap_pv_pool, pve); |
3099 | } else | | 3099 | } else |
3100 | if (skip_wired) { | | 3100 | if (skip_wired) { |
3101 | /* The mapping is wired. Skip it */ | | 3101 | /* The mapping is wired. Skip it */ |
3102 | continue; | | 3102 | continue; |
3103 | } | | 3103 | } |
3104 | } else | | 3104 | } else |
3105 | if (skip_wired) { | | 3105 | if (skip_wired) { |
3106 | /* Unmanaged pages are always wired. */ | | 3106 | /* Unmanaged pages are always wired. */ |
3107 | continue; | | 3107 | continue; |
3108 | } | | 3108 | } |
3109 | | | 3109 | |
3110 | mappings++; | | 3110 | mappings++; |
3111 | | | 3111 | |
3112 | if (!l2pte_valid(pte)) { | | 3112 | if (!l2pte_valid(pte)) { |
3113 | /* | | 3113 | /* |
3114 | * Ref/Mod emulation is still active for this | | 3114 | * Ref/Mod emulation is still active for this |
3115 | * mapping, therefore it is has not yet been | | 3115 | * mapping, therefore it is has not yet been |
3116 | * accessed. No need to frob the cache/tlb. | | 3116 | * accessed. No need to frob the cache/tlb. |
3117 | */ | | 3117 | */ |
3118 | *ptep = 0; | | 3118 | *ptep = 0; |
3119 | PTE_SYNC_CURRENT(pm, ptep); | | 3119 | PTE_SYNC_CURRENT(pm, ptep); |
3120 | continue; | | 3120 | continue; |
3121 | } | | 3121 | } |
3122 | | | 3122 | |
3123 | if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) { | | 3123 | if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) { |
3124 | /* Add to the clean list. */ | | 3124 | /* Add to the clean list. */ |
3125 | cleanlist[cleanlist_idx].ptep = ptep; | | 3125 | cleanlist[cleanlist_idx].ptep = ptep; |
3126 | cleanlist[cleanlist_idx].va = | | 3126 | cleanlist[cleanlist_idx].va = |
3127 | sva | (is_exec & 1); | | 3127 | sva | (is_exec & 1); |
3128 | cleanlist_idx++; | | 3128 | cleanlist_idx++; |
3129 | } else | | 3129 | } else |
3130 | if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) { | | 3130 | if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) { |
3131 | /* Nuke everything if needed. */ | | 3131 | /* Nuke everything if needed. */ |
3132 | #ifdef PMAP_CACHE_VIVT | | 3132 | #ifdef PMAP_CACHE_VIVT |
3133 | pmap_idcache_wbinv_all(pm); | | 3133 | pmap_idcache_wbinv_all(pm); |
3134 | #endif | | 3134 | #endif |
3135 | pmap_tlb_flushID(pm); | | 3135 | pmap_tlb_flushID(pm); |
3136 | | | 3136 | |
3137 | /* | | 3137 | /* |
3138 | * Roll back the previous PTE list, | | 3138 | * Roll back the previous PTE list, |
3139 | * and zero out the current PTE. | | 3139 | * and zero out the current PTE. |
3140 | */ | | 3140 | */ |
3141 | for (cnt = 0; | | 3141 | for (cnt = 0; |
3142 | cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) { | | 3142 | cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) { |
3143 | *cleanlist[cnt].ptep = 0; | | 3143 | *cleanlist[cnt].ptep = 0; |
3144 | PTE_SYNC(cleanlist[cnt].ptep); | | 3144 | PTE_SYNC(cleanlist[cnt].ptep); |
3145 | } | | 3145 | } |
3146 | *ptep = 0; | | 3146 | *ptep = 0; |
3147 | PTE_SYNC(ptep); | | 3147 | PTE_SYNC(ptep); |
3148 | cleanlist_idx++; | | 3148 | cleanlist_idx++; |
3149 | pm->pm_remove_all = true; | | 3149 | pm->pm_remove_all = true; |
3150 | } else { | | 3150 | } else { |
3151 | *ptep = 0; | | 3151 | *ptep = 0; |
3152 | PTE_SYNC(ptep); | | 3152 | PTE_SYNC(ptep); |
3153 | if (pm->pm_remove_all == false) { | | 3153 | if (pm->pm_remove_all == false) { |
3154 | if (is_exec) | | 3154 | if (is_exec) |
3155 | pmap_tlb_flushID_SE(pm, sva); | | 3155 | pmap_tlb_flushID_SE(pm, sva); |
3156 | else | | 3156 | else |
3157 | if (is_refd) | | 3157 | if (is_refd) |
3158 | pmap_tlb_flushD_SE(pm, sva); | | 3158 | pmap_tlb_flushD_SE(pm, sva); |
3159 | } | | 3159 | } |
3160 | } | | 3160 | } |
3161 | } | | 3161 | } |
3162 | | | 3162 | |
3163 | /* | | 3163 | /* |
3164 | * Deal with any left overs | | 3164 | * Deal with any left overs |
3165 | */ | | 3165 | */ |
3166 | if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) { | | 3166 | if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) { |
3167 | total += cleanlist_idx; | | 3167 | total += cleanlist_idx; |
3168 | for (cnt = 0; cnt < cleanlist_idx; cnt++) { | | 3168 | for (cnt = 0; cnt < cleanlist_idx; cnt++) { |
3169 | if (pm->pm_cstate.cs_all != 0) { | | 3169 | if (pm->pm_cstate.cs_all != 0) { |
3170 | vaddr_t clva = cleanlist[cnt].va & ~1; | | 3170 | vaddr_t clva = cleanlist[cnt].va & ~1; |
3171 | if (cleanlist[cnt].va & 1) { | | 3171 | if (cleanlist[cnt].va & 1) { |
3172 | #ifdef PMAP_CACHE_VIVT | | 3172 | #ifdef PMAP_CACHE_VIVT |
3173 | pmap_idcache_wbinv_range(pm, | | 3173 | pmap_idcache_wbinv_range(pm, |
3174 | clva, PAGE_SIZE); | | 3174 | clva, PAGE_SIZE); |
3175 | #endif | | 3175 | #endif |
3176 | pmap_tlb_flushID_SE(pm, clva); | | 3176 | pmap_tlb_flushID_SE(pm, clva); |
3177 | } else { | | 3177 | } else { |
3178 | #ifdef PMAP_CACHE_VIVT | | 3178 | #ifdef PMAP_CACHE_VIVT |
3179 | pmap_dcache_wb_range(pm, | | 3179 | pmap_dcache_wb_range(pm, |
3180 | clva, PAGE_SIZE, true, | | 3180 | clva, PAGE_SIZE, true, |
3181 | false); | | 3181 | false); |
3182 | #endif | | 3182 | #endif |
3183 | pmap_tlb_flushD_SE(pm, clva); | | 3183 | pmap_tlb_flushD_SE(pm, clva); |
3184 | } | | 3184 | } |
3185 | } | | 3185 | } |
3186 | *cleanlist[cnt].ptep = 0; | | 3186 | *cleanlist[cnt].ptep = 0; |
3187 | PTE_SYNC_CURRENT(pm, cleanlist[cnt].ptep); | | 3187 | PTE_SYNC_CURRENT(pm, cleanlist[cnt].ptep); |
3188 | } | | 3188 | } |
3189 | | | 3189 | |
3190 | /* | | 3190 | /* |
3191 | * If it looks like we're removing a whole bunch | | 3191 | * If it looks like we're removing a whole bunch |
3192 | * of mappings, it's faster to just write-back | | 3192 | * of mappings, it's faster to just write-back |
3193 | * the whole cache now and defer TLB flushes until | | 3193 | * the whole cache now and defer TLB flushes until |
3194 | * pmap_update() is called. | | 3194 | * pmap_update() is called. |
3195 | */ | | 3195 | */ |
3196 | if (total <= PMAP_REMOVE_CLEAN_LIST_SIZE) | | 3196 | if (total <= PMAP_REMOVE_CLEAN_LIST_SIZE) |
3197 | cleanlist_idx = 0; | | 3197 | cleanlist_idx = 0; |
3198 | else { | | 3198 | else { |
3199 | cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; | | 3199 | cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; |
3200 | #ifdef PMAP_CACHE_VIVT | | 3200 | #ifdef PMAP_CACHE_VIVT |
3201 | pmap_idcache_wbinv_all(pm); | | 3201 | pmap_idcache_wbinv_all(pm); |
3202 | #endif | | 3202 | #endif |
3203 | pm->pm_remove_all = true; | | 3203 | pm->pm_remove_all = true; |
3204 | } | | 3204 | } |
3205 | } | | 3205 | } |
3206 | | | 3206 | |
3207 | pmap_free_l2_bucket(pm, l2b, mappings); | | 3207 | pmap_free_l2_bucket(pm, l2b, mappings); |
3208 | pm->pm_stats.resident_count -= mappings; | | 3208 | pm->pm_stats.resident_count -= mappings; |
3209 | } | | 3209 | } |
3210 | | | 3210 | |
3211 | pmap_release_pmap_lock(pm); | | 3211 | pmap_release_pmap_lock(pm); |
3212 | PMAP_MAP_TO_HEAD_UNLOCK(); | | 3212 | PMAP_MAP_TO_HEAD_UNLOCK(); |
3213 | } | | 3213 | } |
3214 | | | 3214 | |
3215 | #ifdef PMAP_CACHE_VIPT | | 3215 | #ifdef PMAP_CACHE_VIPT |
3216 | static struct pv_entry * | | 3216 | static struct pv_entry * |
3217 | pmap_kremove_pg(struct vm_page *pg, vaddr_t va) | | 3217 | pmap_kremove_pg(struct vm_page *pg, vaddr_t va) |
3218 | { | | 3218 | { |
3219 | struct pv_entry *pv; | | 3219 | struct pv_entry *pv; |
3220 | | | 3220 | |
3221 | simple_lock(&pg->mdpage.pvh_slock); | | 3221 | simple_lock(&pg->mdpage.pvh_slock); |
3222 | KASSERT(pg->mdpage.pvh_attrs & (PVF_COLORED|PVF_NC)); | | 3222 | KASSERT(pg->mdpage.pvh_attrs & (PVF_COLORED|PVF_NC)); |
3223 | KASSERT((pg->mdpage.pvh_attrs & PVF_KMPAGE) == 0); | | 3223 | KASSERT((pg->mdpage.pvh_attrs & PVF_KMPAGE) == 0); |
3224 | | | 3224 | |
3225 | pv = pmap_remove_pv(pg, pmap_kernel(), va, false); | | 3225 | pv = pmap_remove_pv(pg, pmap_kernel(), va, false); |
3226 | KASSERT(pv); | | 3226 | KASSERT(pv); |
3227 | KASSERT(pv->pv_flags & PVF_KENTRY); | | 3227 | KASSERT(pv->pv_flags & PVF_KENTRY); |
3228 | | | 3228 | |
3229 | /* | | 3229 | /* |
3230 | * If we are removing a writeable mapping to a cached exec page, | | 3230 | * If we are removing a writeable mapping to a cached exec page, |
3231 | * if it's the last mapping then clear it execness other sync | | 3231 | * if it's the last mapping then clear it execness other sync |
3232 | * the page to the icache. | | 3232 | * the page to the icache. |
3233 | */ | | 3233 | */ |
3234 | if ((pg->mdpage.pvh_attrs & (PVF_NC|PVF_EXEC)) == PVF_EXEC | | 3234 | if ((pg->mdpage.pvh_attrs & (PVF_NC|PVF_EXEC)) == PVF_EXEC |
3235 | && (pv->pv_flags & PVF_WRITE) != 0) { | | 3235 | && (pv->pv_flags & PVF_WRITE) != 0) { |
3236 | if (SLIST_EMPTY(&pg->mdpage.pvh_list)) { | | 3236 | if (SLIST_EMPTY(&pg->mdpage.pvh_list)) { |
3237 | pg->mdpage.pvh_attrs &= ~PVF_EXEC; | | 3237 | pg->mdpage.pvh_attrs &= ~PVF_EXEC; |
3238 | PMAPCOUNT(exec_discarded_kremove); | | 3238 | PMAPCOUNT(exec_discarded_kremove); |
3239 | } else { | | 3239 | } else { |
3240 | pmap_syncicache_page(pg); | | 3240 | pmap_syncicache_page(pg); |
3241 | PMAPCOUNT(exec_synced_kremove); | | 3241 | PMAPCOUNT(exec_synced_kremove); |
3242 | } | | 3242 | } |
3243 | } | | 3243 | } |
3244 | pmap_vac_me_harder(pg, pmap_kernel(), 0); | | 3244 | pmap_vac_me_harder(pg, pmap_kernel(), 0); |
3245 | simple_unlock(&pg->mdpage.pvh_slock); | | 3245 | simple_unlock(&pg->mdpage.pvh_slock); |
3246 | | | 3246 | |
3247 | return pv; | | 3247 | return pv; |
3248 | } | | 3248 | } |
3249 | #endif /* PMAP_CACHE_VIPT */ | | 3249 | #endif /* PMAP_CACHE_VIPT */ |
3250 | | | 3250 | |
3251 | /* | | 3251 | /* |
3252 | * pmap_kenter_pa: enter an unmanaged, wired kernel mapping | | 3252 | * pmap_kenter_pa: enter an unmanaged, wired kernel mapping |
3253 | * | | 3253 | * |
3254 | * We assume there is already sufficient KVM space available | | 3254 | * We assume there is already sufficient KVM space available |
3255 | * to do this, as we can't allocate L2 descriptor tables/metadata | | 3255 | * to do this, as we can't allocate L2 descriptor tables/metadata |
3256 | * from here. | | 3256 | * from here. |
3257 | */ | | 3257 | */ |
3258 | void | | 3258 | void |
3259 | pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot) | | 3259 | pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot) |
3260 | { | | 3260 | { |
3261 | struct l2_bucket *l2b; | | 3261 | struct l2_bucket *l2b; |
3262 | pt_entry_t *ptep, opte; | | 3262 | pt_entry_t *ptep, opte; |
3263 | #ifdef PMAP_CACHE_VIVT | | 3263 | #ifdef PMAP_CACHE_VIVT |
3264 | struct vm_page *pg = (prot & PMAP_KMPAGE) ? PHYS_TO_VM_PAGE(pa) : NULL; | | 3264 | struct vm_page *pg = (prot & PMAP_KMPAGE) ? PHYS_TO_VM_PAGE(pa) : NULL; |
3265 | #endif | | 3265 | #endif |
3266 | #ifdef PMAP_CACHE_VIPT | | 3266 | #ifdef PMAP_CACHE_VIPT |
3267 | struct vm_page *pg = PHYS_TO_VM_PAGE(pa); | | 3267 | struct vm_page *pg = PHYS_TO_VM_PAGE(pa); |
3268 | struct vm_page *opg; | | 3268 | struct vm_page *opg; |
3269 | struct pv_entry *pv = NULL; | | 3269 | struct pv_entry *pv = NULL; |
3270 | #endif | | 3270 | #endif |
3271 | | | 3271 | |
3272 | NPDEBUG(PDB_KENTER, | | 3272 | NPDEBUG(PDB_KENTER, |
3273 | printf("pmap_kenter_pa: va 0x%08lx, pa 0x%08lx, prot 0x%x\n", | | 3273 | printf("pmap_kenter_pa: va 0x%08lx, pa 0x%08lx, prot 0x%x\n", |
3274 | va, pa, prot)); | | 3274 | va, pa, prot)); |
3275 | | | 3275 | |
3276 | l2b = pmap_get_l2_bucket(pmap_kernel(), va); | | 3276 | l2b = pmap_get_l2_bucket(pmap_kernel(), va); |
3277 | KDASSERT(l2b != NULL); | | 3277 | KDASSERT(l2b != NULL); |
3278 | | | 3278 | |
3279 | ptep = &l2b->l2b_kva[l2pte_index(va)]; | | 3279 | ptep = &l2b->l2b_kva[l2pte_index(va)]; |
3280 | opte = *ptep; | | 3280 | opte = *ptep; |
3281 | | | 3281 | |
3282 | if (opte == 0) { | | 3282 | if (opte == 0) { |
3283 | PMAPCOUNT(kenter_mappings); | | 3283 | PMAPCOUNT(kenter_mappings); |
3284 | l2b->l2b_occupancy++; | | 3284 | l2b->l2b_occupancy++; |
3285 | } else { | | 3285 | } else { |
3286 | PMAPCOUNT(kenter_remappings); | | 3286 | PMAPCOUNT(kenter_remappings); |
3287 | #ifdef PMAP_CACHE_VIPT | | 3287 | #ifdef PMAP_CACHE_VIPT |
3288 | opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); | | 3288 | opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); |
3289 | if (opg) { | | 3289 | if (opg) { |
3290 | KASSERT(opg != pg); | | 3290 | KASSERT(opg != pg); |
3291 | KASSERT((opg->mdpage.pvh_attrs & PVF_KMPAGE) == 0); | | 3291 | KASSERT((opg->mdpage.pvh_attrs & PVF_KMPAGE) == 0); |
3292 | KASSERT((prot & PMAP_KMPAGE) == 0); | | 3292 | KASSERT((prot & PMAP_KMPAGE) == 0); |
3293 | simple_lock(&opg->mdpage.pvh_slock); | | 3293 | simple_lock(&opg->mdpage.pvh_slock); |
3294 | pv = pmap_kremove_pg(opg, va); | | 3294 | pv = pmap_kremove_pg(opg, va); |
3295 | simple_unlock(&opg->mdpage.pvh_slock); | | 3295 | simple_unlock(&opg->mdpage.pvh_slock); |
3296 | } | | 3296 | } |
3297 | #endif | | 3297 | #endif |
3298 | if (l2pte_valid(opte)) { | | 3298 | if (l2pte_valid(opte)) { |
3299 | #ifdef PMAP_CACHE_VIVT | | 3299 | #ifdef PMAP_CACHE_VIVT |
3300 | cpu_dcache_wbinv_range(va, PAGE_SIZE); | | 3300 | cpu_dcache_wbinv_range(va, PAGE_SIZE); |
3301 | #endif | | 3301 | #endif |
3302 | cpu_tlb_flushD_SE(va); | | 3302 | cpu_tlb_flushD_SE(va); |
3303 | cpu_cpwait(); | | 3303 | cpu_cpwait(); |
3304 | } | | 3304 | } |
3305 | } | | 3305 | } |
3306 | | | 3306 | |
3307 | *ptep = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | | | 3307 | *ptep = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | |
3308 | pte_l2_s_cache_mode; | | 3308 | pte_l2_s_cache_mode; |
3309 | PTE_SYNC(ptep); | | 3309 | PTE_SYNC(ptep); |
3310 | | | 3310 | |
3311 | if (pg) { | | 3311 | if (pg) { |
3312 | if (prot & PMAP_KMPAGE) { | | 3312 | if (prot & PMAP_KMPAGE) { |
3313 | simple_lock(&pg->mdpage.pvh_slock); | | 3313 | simple_lock(&pg->mdpage.pvh_slock); |
3314 | KASSERT(pg->mdpage.urw_mappings == 0); | | 3314 | KASSERT(pg->mdpage.urw_mappings == 0); |
3315 | KASSERT(pg->mdpage.uro_mappings == 0); | | 3315 | KASSERT(pg->mdpage.uro_mappings == 0); |
3316 | KASSERT(pg->mdpage.krw_mappings == 0); | | 3316 | KASSERT(pg->mdpage.krw_mappings == 0); |
3317 | KASSERT(pg->mdpage.kro_mappings == 0); | | 3317 | KASSERT(pg->mdpage.kro_mappings == 0); |
3318 | #ifdef PMAP_CACHE_VIPT | | 3318 | #ifdef PMAP_CACHE_VIPT |
3319 | KASSERT(pv == NULL); | | 3319 | KASSERT(pv == NULL); |
3320 | KASSERT((va & PVF_COLORED) == 0); | | 3320 | KASSERT((va & PVF_COLORED) == 0); |
3321 | KASSERT((pg->mdpage.pvh_attrs & PVF_NC) == 0); | | 3321 | KASSERT((pg->mdpage.pvh_attrs & PVF_NC) == 0); |
3322 | /* if there is a color conflict, evict from cache. */ | | 3322 | /* if there is a color conflict, evict from cache. */ |
3323 | if (pmap_is_page_colored_p(pg) | | 3323 | if (pmap_is_page_colored_p(pg) |
3324 | && ((va ^ pg->mdpage.pvh_attrs) & arm_cache_prefer_mask)) { | | 3324 | && ((va ^ pg->mdpage.pvh_attrs) & arm_cache_prefer_mask)) { |
3325 | PMAPCOUNT(vac_color_change); | | 3325 | PMAPCOUNT(vac_color_change); |
3326 | pmap_flush_page(pg, true); | | 3326 | pmap_flush_page(pg, true); |
3327 | } | | 3327 | } |
3328 | pg->mdpage.pvh_attrs &= PAGE_SIZE - 1; | | 3328 | pg->mdpage.pvh_attrs &= PAGE_SIZE - 1; |
3329 | pg->mdpage.pvh_attrs |= PVF_KMPAGE | | 3329 | pg->mdpage.pvh_attrs |= PVF_KMPAGE |
3330 | | PVF_COLORED | PVF_DIRTY | | 3330 | | PVF_COLORED | PVF_DIRTY |
3331 | | (va & arm_cache_prefer_mask); | | 3331 | | (va & arm_cache_prefer_mask); |
3332 | #endif | | 3332 | #endif |
3333 | #ifdef PMAP_CACHE_VIVT | | 3333 | #ifdef PMAP_CACHE_VIVT |
3334 | pg->mdpage.pvh_attrs |= PVF_KMPAGE; | | 3334 | pg->mdpage.pvh_attrs |= PVF_KMPAGE; |
3335 | #endif | | 3335 | #endif |
3336 | pmap_kmpages++; | | 3336 | pmap_kmpages++; |
3337 | simple_unlock(&pg->mdpage.pvh_slock); | | 3337 | simple_unlock(&pg->mdpage.pvh_slock); |
3338 | #ifdef PMAP_CACHE_VIPT | | 3338 | #ifdef PMAP_CACHE_VIPT |
3339 | } else { | | 3339 | } else { |
3340 | if (pv == NULL) { | | 3340 | if (pv == NULL) { |
3341 | pv = pool_get(&pmap_pv_pool, PR_NOWAIT); | | 3341 | pv = pool_get(&pmap_pv_pool, PR_NOWAIT); |
3342 | KASSERT(pv != NULL); | | 3342 | KASSERT(pv != NULL); |
3343 | } | | 3343 | } |
3344 | pmap_enter_pv(pg, pv, pmap_kernel(), va, | | 3344 | pmap_enter_pv(pg, pv, pmap_kernel(), va, |
3345 | PVF_WIRED | PVF_KENTRY | | 3345 | PVF_WIRED | PVF_KENTRY |
3346 | | (prot & VM_PROT_WRITE ? PVF_WRITE : 0)); | | 3346 | | (prot & VM_PROT_WRITE ? PVF_WRITE : 0)); |
3347 | if ((prot & VM_PROT_WRITE) | | 3347 | if ((prot & VM_PROT_WRITE) |
3348 | && !(pg->mdpage.pvh_attrs & PVF_NC)) | | 3348 | && !(pg->mdpage.pvh_attrs & PVF_NC)) |
3349 | pg->mdpage.pvh_attrs |= PVF_DIRTY; | | 3349 | pg->mdpage.pvh_attrs |= PVF_DIRTY; |
3350 | KASSERT((prot & VM_PROT_WRITE) == 0 || (pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC))); | | 3350 | KASSERT((prot & VM_PROT_WRITE) == 0 || (pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC))); |
3351 | simple_lock(&pg->mdpage.pvh_slock); | | 3351 | simple_lock(&pg->mdpage.pvh_slock); |
3352 | pmap_vac_me_harder(pg, pmap_kernel(), va); | | 3352 | pmap_vac_me_harder(pg, pmap_kernel(), va); |
3353 | simple_unlock(&pg->mdpage.pvh_slock); | | 3353 | simple_unlock(&pg->mdpage.pvh_slock); |
3354 | #endif | | 3354 | #endif |
3355 | } | | 3355 | } |
3356 | #ifdef PMAP_CACHE_VIPT | | 3356 | #ifdef PMAP_CACHE_VIPT |
3357 | } else { | | 3357 | } else { |
3358 | if (pv != NULL) | | 3358 | if (pv != NULL) |
3359 | pool_put(&pmap_pv_pool, pv); | | 3359 | pool_put(&pmap_pv_pool, pv); |
3360 | #endif | | 3360 | #endif |
3361 | } | | 3361 | } |
3362 | } | | 3362 | } |
3363 | | | 3363 | |
3364 | void | | 3364 | void |
3365 | pmap_kremove(vaddr_t va, vsize_t len) | | 3365 | pmap_kremove(vaddr_t va, vsize_t len) |
3366 | { | | 3366 | { |
3367 | struct l2_bucket *l2b; | | 3367 | struct l2_bucket *l2b; |
3368 | pt_entry_t *ptep, *sptep, opte; | | 3368 | pt_entry_t *ptep, *sptep, opte; |
3369 | vaddr_t next_bucket, eva; | | 3369 | vaddr_t next_bucket, eva; |
3370 | u_int mappings; | | 3370 | u_int mappings; |
3371 | struct vm_page *opg; | | 3371 | struct vm_page *opg; |
3372 | | | 3372 | |
3373 | PMAPCOUNT(kenter_unmappings); | | 3373 | PMAPCOUNT(kenter_unmappings); |
3374 | | | 3374 | |
3375 | NPDEBUG(PDB_KREMOVE, printf("pmap_kremove: va 0x%08lx, len 0x%08lx\n", | | 3375 | NPDEBUG(PDB_KREMOVE, printf("pmap_kremove: va 0x%08lx, len 0x%08lx\n", |
3376 | va, len)); | | 3376 | va, len)); |
3377 | | | 3377 | |
3378 | eva = va + len; | | 3378 | eva = va + len; |
3379 | | | 3379 | |
3380 | while (va < eva) { | | 3380 | while (va < eva) { |
3381 | next_bucket = L2_NEXT_BUCKET(va); | | 3381 | next_bucket = L2_NEXT_BUCKET(va); |
3382 | if (next_bucket > eva) | | 3382 | if (next_bucket > eva) |
3383 | next_bucket = eva; | | 3383 | next_bucket = eva; |
3384 | | | 3384 | |
3385 | l2b = pmap_get_l2_bucket(pmap_kernel(), va); | | 3385 | l2b = pmap_get_l2_bucket(pmap_kernel(), va); |
3386 | KDASSERT(l2b != NULL); | | 3386 | KDASSERT(l2b != NULL); |
3387 | | | 3387 | |
3388 | sptep = ptep = &l2b->l2b_kva[l2pte_index(va)]; | | 3388 | sptep = ptep = &l2b->l2b_kva[l2pte_index(va)]; |
3389 | mappings = 0; | | 3389 | mappings = 0; |
3390 | | | 3390 | |
3391 | while (va < next_bucket) { | | 3391 | while (va < next_bucket) { |
3392 | opte = *ptep; | | 3392 | opte = *ptep; |
3393 | opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); | | 3393 | opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); |
3394 | if (opg) { | | 3394 | if (opg) { |
3395 | if (opg->mdpage.pvh_attrs & PVF_KMPAGE) { | | 3395 | if (opg->mdpage.pvh_attrs & PVF_KMPAGE) { |
3396 | simple_lock(&opg->mdpage.pvh_slock); | | 3396 | simple_lock(&opg->mdpage.pvh_slock); |
3397 | KASSERT(opg->mdpage.urw_mappings == 0); | | 3397 | KASSERT(opg->mdpage.urw_mappings == 0); |
3398 | KASSERT(opg->mdpage.uro_mappings == 0); | | 3398 | KASSERT(opg->mdpage.uro_mappings == 0); |
3399 | KASSERT(opg->mdpage.krw_mappings == 0); | | 3399 | KASSERT(opg->mdpage.krw_mappings == 0); |
3400 | KASSERT(opg->mdpage.kro_mappings == 0); | | 3400 | KASSERT(opg->mdpage.kro_mappings == 0); |
3401 | opg->mdpage.pvh_attrs &= ~PVF_KMPAGE; | | 3401 | opg->mdpage.pvh_attrs &= ~PVF_KMPAGE; |
3402 | #ifdef PMAP_CACHE_VIPT | | 3402 | #ifdef PMAP_CACHE_VIPT |
3403 | opg->mdpage.pvh_attrs &= ~PVF_WRITE; | | 3403 | opg->mdpage.pvh_attrs &= ~PVF_WRITE; |
3404 | #endif | | 3404 | #endif |
3405 | pmap_kmpages--; | | 3405 | pmap_kmpages--; |
3406 | simple_unlock(&opg->mdpage.pvh_slock); | | 3406 | simple_unlock(&opg->mdpage.pvh_slock); |
3407 | #ifdef PMAP_CACHE_VIPT | | 3407 | #ifdef PMAP_CACHE_VIPT |
3408 | } else { | | 3408 | } else { |
3409 | pool_put(&pmap_pv_pool, | | 3409 | pool_put(&pmap_pv_pool, |
3410 | pmap_kremove_pg(opg, va)); | | 3410 | pmap_kremove_pg(opg, va)); |
3411 | #endif | | 3411 | #endif |
3412 | } | | 3412 | } |
3413 | } | | 3413 | } |
3414 | if (l2pte_valid(opte)) { | | 3414 | if (l2pte_valid(opte)) { |
3415 | #ifdef PMAP_CACHE_VIVT | | 3415 | #ifdef PMAP_CACHE_VIVT |
3416 | cpu_dcache_wbinv_range(va, PAGE_SIZE); | | 3416 | cpu_dcache_wbinv_range(va, PAGE_SIZE); |
3417 | #endif | | 3417 | #endif |
3418 | cpu_tlb_flushD_SE(va); | | 3418 | cpu_tlb_flushD_SE(va); |
3419 | } | | 3419 | } |
3420 | if (opte) { | | 3420 | if (opte) { |
3421 | *ptep = 0; | | 3421 | *ptep = 0; |
3422 | mappings++; | | 3422 | mappings++; |
3423 | } | | 3423 | } |
3424 | va += PAGE_SIZE; | | 3424 | va += PAGE_SIZE; |
3425 | ptep++; | | 3425 | ptep++; |
3426 | } | | 3426 | } |
3427 | KDASSERT(mappings <= l2b->l2b_occupancy); | | 3427 | KDASSERT(mappings <= l2b->l2b_occupancy); |
3428 | l2b->l2b_occupancy -= mappings; | | 3428 | l2b->l2b_occupancy -= mappings; |
3429 | PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep)); | | 3429 | PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep)); |
3430 | } | | 3430 | } |
3431 | cpu_cpwait(); | | 3431 | cpu_cpwait(); |
3432 | } | | 3432 | } |
3433 | | | 3433 | |
3434 | bool | | 3434 | bool |
3435 | pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap) | | 3435 | pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap) |
3436 | { | | 3436 | { |
3437 | struct l2_dtable *l2; | | 3437 | struct l2_dtable *l2; |
3438 | pd_entry_t *pl1pd, l1pd; | | 3438 | pd_entry_t *pl1pd, l1pd; |
3439 | pt_entry_t *ptep, pte; | | 3439 | pt_entry_t *ptep, pte; |
3440 | paddr_t pa; | | 3440 | paddr_t pa; |
3441 | u_int l1idx; | | 3441 | u_int l1idx; |
3442 | | | 3442 | |
3443 | pmap_acquire_pmap_lock(pm); | | 3443 | pmap_acquire_pmap_lock(pm); |
3444 | | | 3444 | |
3445 | l1idx = L1_IDX(va); | | 3445 | l1idx = L1_IDX(va); |
3446 | pl1pd = &pm->pm_l1->l1_kva[l1idx]; | | 3446 | pl1pd = &pm->pm_l1->l1_kva[l1idx]; |
3447 | l1pd = *pl1pd; | | 3447 | l1pd = *pl1pd; |
3448 | | | 3448 | |
3449 | if (l1pte_section_p(l1pd)) { | | 3449 | if (l1pte_section_p(l1pd)) { |
3450 | /* | | 3450 | /* |
3451 | * These should only happen for pmap_kernel() | | 3451 | * These should only happen for pmap_kernel() |
3452 | */ | | 3452 | */ |
3453 | KDASSERT(pm == pmap_kernel()); | | 3453 | KDASSERT(pm == pmap_kernel()); |
3454 | pmap_release_pmap_lock(pm); | | 3454 | pmap_release_pmap_lock(pm); |
3455 | pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); | | 3455 | pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); |
3456 | } else { | | 3456 | } else { |
3457 | /* | | 3457 | /* |
3458 | * Note that we can't rely on the validity of the L1 | | 3458 | * Note that we can't rely on the validity of the L1 |
3459 | * descriptor as an indication that a mapping exists. | | 3459 | * descriptor as an indication that a mapping exists. |
3460 | * We have to look it up in the L2 dtable. | | 3460 | * We have to look it up in the L2 dtable. |
3461 | */ | | 3461 | */ |
3462 | l2 = pm->pm_l2[L2_IDX(l1idx)]; | | 3462 | l2 = pm->pm_l2[L2_IDX(l1idx)]; |
3463 | | | 3463 | |
3464 | if (l2 == NULL || | | 3464 | if (l2 == NULL || |
3465 | (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { | | 3465 | (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { |
3466 | pmap_release_pmap_lock(pm); | | 3466 | pmap_release_pmap_lock(pm); |
3467 | return false; | | 3467 | return false; |
3468 | } | | 3468 | } |
3469 | | | 3469 | |
3470 | ptep = &ptep[l2pte_index(va)]; | | 3470 | ptep = &ptep[l2pte_index(va)]; |
3471 | pte = *ptep; | | 3471 | pte = *ptep; |
3472 | pmap_release_pmap_lock(pm); | | 3472 | pmap_release_pmap_lock(pm); |
3473 | | | 3473 | |
3474 | if (pte == 0) | | 3474 | if (pte == 0) |
3475 | return false; | | 3475 | return false; |
3476 | | | 3476 | |
3477 | switch (pte & L2_TYPE_MASK) { | | 3477 | switch (pte & L2_TYPE_MASK) { |
3478 | case L2_TYPE_L: | | 3478 | case L2_TYPE_L: |
3479 | pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); | | 3479 | pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); |
3480 | break; | | 3480 | break; |
3481 | | | 3481 | |
3482 | default: | | 3482 | default: |
3483 | pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); | | 3483 | pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); |
3484 | break; | | 3484 | break; |
3485 | } | | 3485 | } |
3486 | } | | 3486 | } |
3487 | | | 3487 | |
3488 | if (pap != NULL) | | 3488 | if (pap != NULL) |
3489 | *pap = pa; | | 3489 | *pap = pa; |
3490 | | | 3490 | |
3491 | return true; | | 3491 | return true; |
3492 | } | | 3492 | } |
3493 | | | 3493 | |
3494 | void | | 3494 | void |
3495 | pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot) | | 3495 | pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot) |
3496 | { | | 3496 | { |
3497 | struct l2_bucket *l2b; | | 3497 | struct l2_bucket *l2b; |
3498 | pt_entry_t *ptep, pte; | | 3498 | pt_entry_t *ptep, pte; |
3499 | vaddr_t next_bucket; | | 3499 | vaddr_t next_bucket; |
3500 | u_int flags; | | 3500 | u_int flags; |
3501 | u_int clr_mask; | | 3501 | u_int clr_mask; |
3502 | int flush; | | 3502 | int flush; |
3503 | | | 3503 | |
3504 | NPDEBUG(PDB_PROTECT, | | 3504 | NPDEBUG(PDB_PROTECT, |
3505 | printf("pmap_protect: pm %p sva 0x%lx eva 0x%lx prot 0x%x\n", | | 3505 | printf("pmap_protect: pm %p sva 0x%lx eva 0x%lx prot 0x%x\n", |
3506 | pm, sva, eva, prot)); | | 3506 | pm, sva, eva, prot)); |
3507 | | | 3507 | |
3508 | if ((prot & VM_PROT_READ) == 0) { | | 3508 | if ((prot & VM_PROT_READ) == 0) { |
3509 | pmap_remove(pm, sva, eva); | | 3509 | pmap_remove(pm, sva, eva); |
3510 | return; | | 3510 | return; |
3511 | } | | 3511 | } |
3512 | | | 3512 | |
3513 | if (prot & VM_PROT_WRITE) { | | 3513 | if (prot & VM_PROT_WRITE) { |
3514 | /* | | 3514 | /* |
3515 | * If this is a read->write transition, just ignore it and let | | 3515 | * If this is a read->write transition, just ignore it and let |
3516 | * uvm_fault() take care of it later. | | 3516 | * uvm_fault() take care of it later. |
3517 | */ | | 3517 | */ |
3518 | return; | | 3518 | return; |
3519 | } | | 3519 | } |
3520 | | | 3520 | |
3521 | PMAP_MAP_TO_HEAD_LOCK(); | | 3521 | PMAP_MAP_TO_HEAD_LOCK(); |
3522 | pmap_acquire_pmap_lock(pm); | | 3522 | pmap_acquire_pmap_lock(pm); |
3523 | | | 3523 | |
3524 | flush = ((eva - sva) >= (PAGE_SIZE * 4)) ? 0 : -1; | | 3524 | flush = ((eva - sva) >= (PAGE_SIZE * 4)) ? 0 : -1; |
3525 | flags = 0; | | 3525 | flags = 0; |
3526 | clr_mask = PVF_WRITE | ((prot & VM_PROT_EXECUTE) ? 0 : PVF_EXEC); | | 3526 | clr_mask = PVF_WRITE | ((prot & VM_PROT_EXECUTE) ? 0 : PVF_EXEC); |
3527 | | | 3527 | |
3528 | while (sva < eva) { | | 3528 | while (sva < eva) { |
3529 | next_bucket = L2_NEXT_BUCKET(sva); | | 3529 | next_bucket = L2_NEXT_BUCKET(sva); |
3530 | if (next_bucket > eva) | | 3530 | if (next_bucket > eva) |
3531 | next_bucket = eva; | | 3531 | next_bucket = eva; |
3532 | | | 3532 | |
3533 | l2b = pmap_get_l2_bucket(pm, sva); | | 3533 | l2b = pmap_get_l2_bucket(pm, sva); |
3534 | if (l2b == NULL) { | | 3534 | if (l2b == NULL) { |
3535 | sva = next_bucket; | | 3535 | sva = next_bucket; |
3536 | continue; | | 3536 | continue; |
3537 | } | | 3537 | } |
3538 | | | 3538 | |
3539 | ptep = &l2b->l2b_kva[l2pte_index(sva)]; | | 3539 | ptep = &l2b->l2b_kva[l2pte_index(sva)]; |
3540 | | | 3540 | |
3541 | while (sva < next_bucket) { | | 3541 | while (sva < next_bucket) { |
3542 | pte = *ptep; | | 3542 | pte = *ptep; |
3543 | if (l2pte_valid(pte) != 0 && (pte & L2_S_PROT_W) != 0) { | | 3543 | if (l2pte_valid(pte) != 0 && (pte & L2_S_PROT_W) != 0) { |
3544 | struct vm_page *pg; | | 3544 | struct vm_page *pg; |
3545 | u_int f; | | 3545 | u_int f; |
3546 | | | 3546 | |
3547 | #ifdef PMAP_CACHE_VIVT | | 3547 | #ifdef PMAP_CACHE_VIVT |
3548 | /* | | 3548 | /* |
3549 | * OK, at this point, we know we're doing | | 3549 | * OK, at this point, we know we're doing |
3550 | * write-protect operation. If the pmap is | | 3550 | * write-protect operation. If the pmap is |
3551 | * active, write-back the page. | | 3551 | * active, write-back the page. |
3552 | */ | | 3552 | */ |
3553 | pmap_dcache_wb_range(pm, sva, PAGE_SIZE, | | 3553 | pmap_dcache_wb_range(pm, sva, PAGE_SIZE, |
3554 | false, false); | | 3554 | false, false); |
3555 | #endif | | 3555 | #endif |
3556 | | | 3556 | |
3557 | pg = PHYS_TO_VM_PAGE(l2pte_pa(pte)); | | 3557 | pg = PHYS_TO_VM_PAGE(l2pte_pa(pte)); |
3558 | pte &= ~L2_S_PROT_W; | | 3558 | pte &= ~L2_S_PROT_W; |
3559 | *ptep = pte; | | 3559 | *ptep = pte; |
3560 | PTE_SYNC(ptep); | | 3560 | PTE_SYNC(ptep); |
3561 | | | 3561 | |
3562 | if (pg != NULL) { | | 3562 | if (pg != NULL) { |
3563 | simple_lock(&pg->mdpage.pvh_slock); | | 3563 | simple_lock(&pg->mdpage.pvh_slock); |
3564 | f = pmap_modify_pv(pg, pm, sva, | | 3564 | f = pmap_modify_pv(pg, pm, sva, |
3565 | clr_mask, 0); | | 3565 | clr_mask, 0); |
3566 | pmap_vac_me_harder(pg, pm, sva); | | 3566 | pmap_vac_me_harder(pg, pm, sva); |
3567 | simple_unlock(&pg->mdpage.pvh_slock); | | 3567 | simple_unlock(&pg->mdpage.pvh_slock); |
3568 | } else | | 3568 | } else |
3569 | f = PVF_REF | PVF_EXEC; | | 3569 | f = PVF_REF | PVF_EXEC; |
3570 | | | 3570 | |
3571 | if (flush >= 0) { | | 3571 | if (flush >= 0) { |
3572 | flush++; | | 3572 | flush++; |
3573 | flags |= f; | | 3573 | flags |= f; |
3574 | } else | | 3574 | } else |
3575 | if (PV_BEEN_EXECD(f)) | | 3575 | if (PV_BEEN_EXECD(f)) |
3576 | pmap_tlb_flushID_SE(pm, sva); | | 3576 | pmap_tlb_flushID_SE(pm, sva); |
3577 | else | | 3577 | else |
3578 | if (PV_BEEN_REFD(f)) | | 3578 | if (PV_BEEN_REFD(f)) |
3579 | pmap_tlb_flushD_SE(pm, sva); | | 3579 | pmap_tlb_flushD_SE(pm, sva); |
3580 | } | | 3580 | } |
3581 | | | 3581 | |
3582 | sva += PAGE_SIZE; | | 3582 | sva += PAGE_SIZE; |
3583 | ptep++; | | 3583 | ptep++; |
3584 | } | | 3584 | } |
3585 | } | | 3585 | } |
3586 | | | 3586 | |
3587 | pmap_release_pmap_lock(pm); | | 3587 | pmap_release_pmap_lock(pm); |
3588 | PMAP_MAP_TO_HEAD_UNLOCK(); | | 3588 | PMAP_MAP_TO_HEAD_UNLOCK(); |
3589 | | | 3589 | |
3590 | if (flush) { | | 3590 | if (flush) { |
3591 | if (PV_BEEN_EXECD(flags)) | | 3591 | if (PV_BEEN_EXECD(flags)) |
3592 | pmap_tlb_flushID(pm); | | 3592 | pmap_tlb_flushID(pm); |
3593 | else | | 3593 | else |
3594 | if (PV_BEEN_REFD(flags)) | | 3594 | if (PV_BEEN_REFD(flags)) |
3595 | pmap_tlb_flushD(pm); | | 3595 | pmap_tlb_flushD(pm); |
3596 | } | | 3596 | } |
3597 | } | | 3597 | } |
3598 | | | 3598 | |
3599 | void | | 3599 | void |
3600 | pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva) | | 3600 | pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva) |
3601 | { | | 3601 | { |
3602 | struct l2_bucket *l2b; | | 3602 | struct l2_bucket *l2b; |
3603 | pt_entry_t *ptep; | | 3603 | pt_entry_t *ptep; |
3604 | vaddr_t next_bucket; | | 3604 | vaddr_t next_bucket; |
3605 | vsize_t page_size = trunc_page(sva) + PAGE_SIZE - sva; | | 3605 | vsize_t page_size = trunc_page(sva) + PAGE_SIZE - sva; |
3606 | | | 3606 | |
3607 | NPDEBUG(PDB_EXEC, | | 3607 | NPDEBUG(PDB_EXEC, |
3608 | printf("pmap_icache_sync_range: pm %p sva 0x%lx eva 0x%lx\n", | | 3608 | printf("pmap_icache_sync_range: pm %p sva 0x%lx eva 0x%lx\n", |
3609 | pm, sva, eva)); | | 3609 | pm, sva, eva)); |
3610 | | | 3610 | |
3611 | PMAP_MAP_TO_HEAD_LOCK(); | | 3611 | PMAP_MAP_TO_HEAD_LOCK(); |
3612 | pmap_acquire_pmap_lock(pm); | | 3612 | pmap_acquire_pmap_lock(pm); |
3613 | | | 3613 | |
3614 | while (sva < eva) { | | 3614 | while (sva < eva) { |
3615 | next_bucket = L2_NEXT_BUCKET(sva); | | 3615 | next_bucket = L2_NEXT_BUCKET(sva); |
3616 | if (next_bucket > eva) | | 3616 | if (next_bucket > eva) |
3617 | next_bucket = eva; | | 3617 | next_bucket = eva; |
3618 | | | 3618 | |
3619 | l2b = pmap_get_l2_bucket(pm, sva); | | 3619 | l2b = pmap_get_l2_bucket(pm, sva); |
3620 | if (l2b == NULL) { | | 3620 | if (l2b == NULL) { |
3621 | sva = next_bucket; | | 3621 | sva = next_bucket; |
3622 | continue; | | 3622 | continue; |
3623 | } | | 3623 | } |
3624 | | | 3624 | |
3625 | for (ptep = &l2b->l2b_kva[l2pte_index(sva)]; | | 3625 | for (ptep = &l2b->l2b_kva[l2pte_index(sva)]; |
3626 | sva < next_bucket; | | 3626 | sva < next_bucket; |
3627 | sva += page_size, ptep++, page_size = PAGE_SIZE) { | | 3627 | sva += page_size, ptep++, page_size = PAGE_SIZE) { |
3628 | if (l2pte_valid(*ptep)) { | | 3628 | if (l2pte_valid(*ptep)) { |
3629 | cpu_icache_sync_range(sva, | | 3629 | cpu_icache_sync_range(sva, |
3630 | min(page_size, eva - sva)); | | 3630 | min(page_size, eva - sva)); |
3631 | } | | 3631 | } |
3632 | } | | 3632 | } |
3633 | } | | 3633 | } |
3634 | | | 3634 | |
3635 | pmap_release_pmap_lock(pm); | | 3635 | pmap_release_pmap_lock(pm); |
3636 | PMAP_MAP_TO_HEAD_UNLOCK(); | | 3636 | PMAP_MAP_TO_HEAD_UNLOCK(); |
3637 | } | | 3637 | } |
3638 | | | 3638 | |
3639 | void | | 3639 | void |
3640 | pmap_page_protect(struct vm_page *pg, vm_prot_t prot) | | 3640 | pmap_page_protect(struct vm_page *pg, vm_prot_t prot) |
3641 | { | | 3641 | { |
3642 | | | 3642 | |
3643 | NPDEBUG(PDB_PROTECT, | | 3643 | NPDEBUG(PDB_PROTECT, |
3644 | printf("pmap_page_protect: pg %p (0x%08lx), prot 0x%x\n", | | 3644 | printf("pmap_page_protect: pg %p (0x%08lx), prot 0x%x\n", |
3645 | pg, VM_PAGE_TO_PHYS(pg), prot)); | | 3645 | pg, VM_PAGE_TO_PHYS(pg), prot)); |
3646 | | | 3646 | |
3647 | switch(prot) { | | 3647 | switch(prot) { |
3648 | return; | | | |
3649 | case VM_PROT_READ|VM_PROT_WRITE: | | 3648 | case VM_PROT_READ|VM_PROT_WRITE: |
3650 | #if defined(PMAP_CHECK_VIPT) && defined(PMAP_APX) | | 3649 | #if defined(PMAP_CHECK_VIPT) && defined(PMAP_APX) |
3651 | pmap_clearbit(pg, PVF_EXEC); | | 3650 | pmap_clearbit(pg, PVF_EXEC); |
3652 | break; | | 3651 | break; |
3653 | #endif | | 3652 | #endif |
3654 | case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE: | | 3653 | case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE: |
3655 | break; | | 3654 | break; |
3656 | | | 3655 | |
3657 | case VM_PROT_READ: | | 3656 | case VM_PROT_READ: |
3658 | #if defined(PMAP_CHECK_VIPT) && defined(PMAP_APX) | | 3657 | #if defined(PMAP_CHECK_VIPT) && defined(PMAP_APX) |
3659 | pmap_clearbit(pg, PVF_WRITE|PVF_EXEC); | | 3658 | pmap_clearbit(pg, PVF_WRITE|PVF_EXEC); |
3660 | break; | | 3659 | break; |
3661 | #endif | | 3660 | #endif |
3662 | case VM_PROT_READ|VM_PROT_EXECUTE: | | 3661 | case VM_PROT_READ|VM_PROT_EXECUTE: |
3663 | pmap_clearbit(pg, PVF_WRITE); | | 3662 | pmap_clearbit(pg, PVF_WRITE); |
3664 | break; | | 3663 | break; |
3665 | | | 3664 | |
3666 | default: | | 3665 | default: |
3667 | pmap_page_remove(pg); | | 3666 | pmap_page_remove(pg); |
3668 | break; | | 3667 | break; |
3669 | } | | 3668 | } |
3670 | } | | 3669 | } |
3671 | | | 3670 | |
3672 | /* | | 3671 | /* |
3673 | * pmap_clear_modify: | | 3672 | * pmap_clear_modify: |
3674 | * | | 3673 | * |
3675 | * Clear the "modified" attribute for a page. | | 3674 | * Clear the "modified" attribute for a page. |
3676 | */ | | 3675 | */ |
3677 | bool | | 3676 | bool |
3678 | pmap_clear_modify(struct vm_page *pg) | | 3677 | pmap_clear_modify(struct vm_page *pg) |
3679 | { | | 3678 | { |
3680 | bool rv; | | 3679 | bool rv; |
3681 | | | 3680 | |
3682 | if (pg->mdpage.pvh_attrs & PVF_MOD) { | | 3681 | if (pg->mdpage.pvh_attrs & PVF_MOD) { |
3683 | rv = true; | | 3682 | rv = true; |
3684 | pmap_clearbit(pg, PVF_MOD); | | 3683 | pmap_clearbit(pg, PVF_MOD); |
3685 | } else | | 3684 | } else |
3686 | rv = false; | | 3685 | rv = false; |
3687 | | | 3686 | |
3688 | return (rv); | | 3687 | return (rv); |
3689 | } | | 3688 | } |
3690 | | | 3689 | |
3691 | /* | | 3690 | /* |
3692 | * pmap_clear_reference: | | 3691 | * pmap_clear_reference: |
3693 | * | | 3692 | * |
3694 | * Clear the "referenced" attribute for a page. | | 3693 | * Clear the "referenced" attribute for a page. |
3695 | */ | | 3694 | */ |
3696 | bool | | 3695 | bool |
3697 | pmap_clear_reference(struct vm_page *pg) | | 3696 | pmap_clear_reference(struct vm_page *pg) |
3698 | { | | 3697 | { |
3699 | bool rv; | | 3698 | bool rv; |
3700 | | | 3699 | |
3701 | if (pg->mdpage.pvh_attrs & PVF_REF) { | | 3700 | if (pg->mdpage.pvh_attrs & PVF_REF) { |
3702 | rv = true; | | 3701 | rv = true; |
3703 | pmap_clearbit(pg, PVF_REF); | | 3702 | pmap_clearbit(pg, PVF_REF); |
3704 | } else | | 3703 | } else |
3705 | rv = false; | | 3704 | rv = false; |
3706 | | | 3705 | |
3707 | return (rv); | | 3706 | return (rv); |
3708 | } | | 3707 | } |
3709 | | | 3708 | |
3710 | /* | | 3709 | /* |
3711 | * pmap_is_modified: | | 3710 | * pmap_is_modified: |
3712 | * | | 3711 | * |
3713 | * Test if a page has the "modified" attribute. | | 3712 | * Test if a page has the "modified" attribute. |
3714 | */ | | 3713 | */ |
3715 | /* See <arm/arm32/pmap.h> */ | | 3714 | /* See <arm/arm32/pmap.h> */ |
3716 | | | 3715 | |
3717 | /* | | 3716 | /* |
3718 | * pmap_is_referenced: | | 3717 | * pmap_is_referenced: |
3719 | * | | 3718 | * |
3720 | * Test if a page has the "referenced" attribute. | | 3719 | * Test if a page has the "referenced" attribute. |
3721 | */ | | 3720 | */ |
3722 | /* See <arm/arm32/pmap.h> */ | | 3721 | /* See <arm/arm32/pmap.h> */ |
3723 | | | 3722 | |
3724 | int | | 3723 | int |
3725 | pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, int user) | | 3724 | pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, int user) |
3726 | { | | 3725 | { |
3727 | struct l2_dtable *l2; | | 3726 | struct l2_dtable *l2; |
3728 | struct l2_bucket *l2b; | | 3727 | struct l2_bucket *l2b; |
3729 | pd_entry_t *pl1pd, l1pd; | | 3728 | pd_entry_t *pl1pd, l1pd; |
3730 | pt_entry_t *ptep, pte; | | 3729 | pt_entry_t *ptep, pte; |
3731 | paddr_t pa; | | 3730 | paddr_t pa; |
3732 | u_int l1idx; | | 3731 | u_int l1idx; |
3733 | int rv = 0; | | 3732 | int rv = 0; |
3734 | | | 3733 | |
3735 | PMAP_MAP_TO_HEAD_LOCK(); | | 3734 | PMAP_MAP_TO_HEAD_LOCK(); |
3736 | pmap_acquire_pmap_lock(pm); | | 3735 | pmap_acquire_pmap_lock(pm); |
3737 | | | 3736 | |
3738 | l1idx = L1_IDX(va); | | 3737 | l1idx = L1_IDX(va); |
3739 | | | 3738 | |
3740 | /* | | 3739 | /* |
3741 | * If there is no l2_dtable for this address, then the process | | 3740 | * If there is no l2_dtable for this address, then the process |
3742 | * has no business accessing it. | | 3741 | * has no business accessing it. |
3743 | * | | 3742 | * |
3744 | * Note: This will catch userland processes trying to access | | 3743 | * Note: This will catch userland processes trying to access |
3745 | * kernel addresses. | | 3744 | * kernel addresses. |
3746 | */ | | 3745 | */ |
3747 | l2 = pm->pm_l2[L2_IDX(l1idx)]; | | 3746 | l2 = pm->pm_l2[L2_IDX(l1idx)]; |
3748 | if (l2 == NULL) | | 3747 | if (l2 == NULL) |
3749 | goto out; | | 3748 | goto out; |
3750 | | | 3749 | |
3751 | /* | | 3750 | /* |
3752 | * Likewise if there is no L2 descriptor table | | 3751 | * Likewise if there is no L2 descriptor table |
3753 | */ | | 3752 | */ |
3754 | l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; | | 3753 | l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; |
3755 | if (l2b->l2b_kva == NULL) | | 3754 | if (l2b->l2b_kva == NULL) |
3756 | goto out; | | 3755 | goto out; |
3757 | | | 3756 | |
3758 | /* | | 3757 | /* |
3759 | * Check the PTE itself. | | 3758 | * Check the PTE itself. |
3760 | */ | | 3759 | */ |
3761 | ptep = &l2b->l2b_kva[l2pte_index(va)]; | | 3760 | ptep = &l2b->l2b_kva[l2pte_index(va)]; |
3762 | pte = *ptep; | | 3761 | pte = *ptep; |
3763 | if (pte == 0) | | 3762 | if (pte == 0) |
3764 | goto out; | | 3763 | goto out; |
3765 | | | 3764 | |
3766 | /* | | 3765 | /* |
3767 | * Catch a userland access to the vector page mapped at 0x0 | | 3766 | * Catch a userland access to the vector page mapped at 0x0 |
3768 | */ | | 3767 | */ |
3769 | if (user && (pte & L2_S_PROT_U) == 0) | | 3768 | if (user && (pte & L2_S_PROT_U) == 0) |
3770 | goto out; | | 3769 | goto out; |
3771 | | | 3770 | |
3772 | pa = l2pte_pa(pte); | | 3771 | pa = l2pte_pa(pte); |
3773 | | | 3772 | |
3774 | if ((ftype & VM_PROT_WRITE) && (pte & L2_S_PROT_W) == 0) { | | 3773 | if ((ftype & VM_PROT_WRITE) && (pte & L2_S_PROT_W) == 0) { |
3775 | /* | | 3774 | /* |
3776 | * This looks like a good candidate for "page modified" | | 3775 | * This looks like a good candidate for "page modified" |
3777 | * emulation... | | 3776 | * emulation... |
3778 | */ | | 3777 | */ |
3779 | struct pv_entry *pv; | | 3778 | struct pv_entry *pv; |
3780 | struct vm_page *pg; | | 3779 | struct vm_page *pg; |
3781 | | | 3780 | |
3782 | /* Extract the physical address of the page */ | | 3781 | /* Extract the physical address of the page */ |
3783 | if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) | | 3782 | if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) |
3784 | goto out; | | 3783 | goto out; |
3785 | | | 3784 | |
3786 | /* Get the current flags for this page. */ | | 3785 | /* Get the current flags for this page. */ |
3787 | simple_lock(&pg->mdpage.pvh_slock); | | 3786 | simple_lock(&pg->mdpage.pvh_slock); |
3788 | | | 3787 | |
3789 | pv = pmap_find_pv(pg, pm, va); | | 3788 | pv = pmap_find_pv(pg, pm, va); |
3790 | if (pv == NULL) { | | 3789 | if (pv == NULL) { |
3791 | simple_unlock(&pg->mdpage.pvh_slock); | | 3790 | simple_unlock(&pg->mdpage.pvh_slock); |
3792 | goto out; | | 3791 | goto out; |
3793 | } | | 3792 | } |
3794 | | | 3793 | |
3795 | /* | | 3794 | /* |
3796 | * Do the flags say this page is writable? If not then it | | 3795 | * Do the flags say this page is writable? If not then it |
3797 | * is a genuine write fault. If yes then the write fault is | | 3796 | * is a genuine write fault. If yes then the write fault is |
3798 | * our fault as we did not reflect the write access in the | | 3797 | * our fault as we did not reflect the write access in the |
3799 | * PTE. Now we know a write has occurred we can correct this | | 3798 | * PTE. Now we know a write has occurred we can correct this |
3800 | * and also set the modified bit | | 3799 | * and also set the modified bit |
3801 | */ | | 3800 | */ |
3802 | if ((pv->pv_flags & PVF_WRITE) == 0) { | | 3801 | if ((pv->pv_flags & PVF_WRITE) == 0) { |
3803 | simple_unlock(&pg->mdpage.pvh_slock); | | 3802 | simple_unlock(&pg->mdpage.pvh_slock); |
3804 | goto out; | | 3803 | goto out; |
3805 | } | | 3804 | } |
3806 | | | 3805 | |
3807 | NPDEBUG(PDB_FOLLOW, | | 3806 | NPDEBUG(PDB_FOLLOW, |
3808 | printf("pmap_fault_fixup: mod emul. pm %p, va 0x%08lx, pa 0x%08lx\n", | | 3807 | printf("pmap_fault_fixup: mod emul. pm %p, va 0x%08lx, pa 0x%08lx\n", |
3809 | pm, va, VM_PAGE_TO_PHYS(pg))); | | 3808 | pm, va, VM_PAGE_TO_PHYS(pg))); |
3810 | | | 3809 | |
3811 | pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD; | | 3810 | pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD; |
3812 | pv->pv_flags |= PVF_REF | PVF_MOD; | | 3811 | pv->pv_flags |= PVF_REF | PVF_MOD; |
3813 | #ifdef PMAP_CACHE_VIPT | | 3812 | #ifdef PMAP_CACHE_VIPT |
3814 | /* | | 3813 | /* |
3815 | * If there are cacheable mappings for this page, mark it dirty. | | 3814 | * If there are cacheable mappings for this page, mark it dirty. |
3816 | */ | | 3815 | */ |
3817 | if ((pg->mdpage.pvh_attrs & PVF_NC) == 0) | | 3816 | if ((pg->mdpage.pvh_attrs & PVF_NC) == 0) |
3818 | pg->mdpage.pvh_attrs |= PVF_DIRTY; | | 3817 | pg->mdpage.pvh_attrs |= PVF_DIRTY; |
3819 | #endif | | 3818 | #endif |
3820 | simple_unlock(&pg->mdpage.pvh_slock); | | 3819 | simple_unlock(&pg->mdpage.pvh_slock); |
3821 | | | 3820 | |
3822 | /* | | 3821 | /* |
3823 | * Re-enable write permissions for the page. No need to call | | 3822 | * Re-enable write permissions for the page. No need to call |
3824 | * pmap_vac_me_harder(), since this is just a | | 3823 | * pmap_vac_me_harder(), since this is just a |
3825 | * modified-emulation fault, and the PVF_WRITE bit isn't | | 3824 | * modified-emulation fault, and the PVF_WRITE bit isn't |
3826 | * changing. We've already set the cacheable bits based on | | 3825 | * changing. We've already set the cacheable bits based on |
3827 | * the assumption that we can write to this page. | | 3826 | * the assumption that we can write to this page. |
3828 | */ | | 3827 | */ |
3829 | *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W; | | 3828 | *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W; |
3830 | PTE_SYNC(ptep); | | 3829 | PTE_SYNC(ptep); |
3831 | rv = 1; | | 3830 | rv = 1; |
3832 | } else | | 3831 | } else |
3833 | if ((pte & L2_TYPE_MASK) == L2_TYPE_INV) { | | 3832 | if ((pte & L2_TYPE_MASK) == L2_TYPE_INV) { |
3834 | /* | | 3833 | /* |
3835 | * This looks like a good candidate for "page referenced" | | 3834 | * This looks like a good candidate for "page referenced" |
3836 | * emulation. | | 3835 | * emulation. |
3837 | */ | | 3836 | */ |
3838 | struct pv_entry *pv; | | 3837 | struct pv_entry *pv; |
3839 | struct vm_page *pg; | | 3838 | struct vm_page *pg; |
3840 | | | 3839 | |
3841 | /* Extract the physical address of the page */ | | 3840 | /* Extract the physical address of the page */ |
3842 | if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) | | 3841 | if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) |
3843 | goto out; | | 3842 | goto out; |
3844 | | | 3843 | |
3845 | /* Get the current flags for this page. */ | | 3844 | /* Get the current flags for this page. */ |
3846 | simple_lock(&pg->mdpage.pvh_slock); | | 3845 | simple_lock(&pg->mdpage.pvh_slock); |
3847 | | | 3846 | |
3848 | pv = pmap_find_pv(pg, pm, va); | | 3847 | pv = pmap_find_pv(pg, pm, va); |
3849 | if (pv == NULL) { | | 3848 | if (pv == NULL) { |
3850 | simple_unlock(&pg->mdpage.pvh_slock); | | 3849 | simple_unlock(&pg->mdpage.pvh_slock); |
3851 | goto out; | | 3850 | goto out; |
3852 | } | | 3851 | } |
3853 | | | 3852 | |
3854 | pg->mdpage.pvh_attrs |= PVF_REF; | | 3853 | pg->mdpage.pvh_attrs |= PVF_REF; |
3855 | pv->pv_flags |= PVF_REF; | | 3854 | pv->pv_flags |= PVF_REF; |
3856 | simple_unlock(&pg->mdpage.pvh_slock); | | 3855 | simple_unlock(&pg->mdpage.pvh_slock); |
3857 | | | 3856 | |
3858 | NPDEBUG(PDB_FOLLOW, | | 3857 | NPDEBUG(PDB_FOLLOW, |
3859 | printf("pmap_fault_fixup: ref emul. pm %p, va 0x%08lx, pa 0x%08lx\n", | | 3858 | printf("pmap_fault_fixup: ref emul. pm %p, va 0x%08lx, pa 0x%08lx\n", |
3860 | pm, va, VM_PAGE_TO_PHYS(pg))); | | 3859 | pm, va, VM_PAGE_TO_PHYS(pg))); |
3861 | | | 3860 | |
3862 | *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO; | | 3861 | *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO; |
3863 | PTE_SYNC(ptep); | | 3862 | PTE_SYNC(ptep); |
3864 | rv = 1; | | 3863 | rv = 1; |
3865 | } | | 3864 | } |
3866 | | | 3865 | |
3867 | /* | | 3866 | /* |
3868 | * We know there is a valid mapping here, so simply | | 3867 | * We know there is a valid mapping here, so simply |
3869 | * fix up the L1 if necessary. | | 3868 | * fix up the L1 if necessary. |
3870 | */ | | 3869 | */ |
3871 | pl1pd = &pm->pm_l1->l1_kva[l1idx]; | | 3870 | pl1pd = &pm->pm_l1->l1_kva[l1idx]; |
3872 | l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | L1_C_PROTO; | | 3871 | l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | L1_C_PROTO; |
3873 | if (*pl1pd != l1pd) { | | 3872 | if (*pl1pd != l1pd) { |
3874 | *pl1pd = l1pd; | | 3873 | *pl1pd = l1pd; |
3875 | PTE_SYNC(pl1pd); | | 3874 | PTE_SYNC(pl1pd); |
3876 | rv = 1; | | 3875 | rv = 1; |
3877 | } | | 3876 | } |
3878 | | | 3877 | |
3879 | #ifdef CPU_SA110 | | 3878 | #ifdef CPU_SA110 |
3880 | /* | | 3879 | /* |
3881 | * There are bugs in the rev K SA110. This is a check for one | | 3880 | * There are bugs in the rev K SA110. This is a check for one |
3882 | * of them. | | 3881 | * of them. |
3883 | */ | | 3882 | */ |
3884 | if (rv == 0 && curcpu()->ci_arm_cputype == CPU_ID_SA110 && | | 3883 | if (rv == 0 && curcpu()->ci_arm_cputype == CPU_ID_SA110 && |
3885 | curcpu()->ci_arm_cpurev < 3) { | | 3884 | curcpu()->ci_arm_cpurev < 3) { |
3886 | /* Always current pmap */ | | 3885 | /* Always current pmap */ |
3887 | if (l2pte_valid(pte)) { | | 3886 | if (l2pte_valid(pte)) { |
3888 | extern int kernel_debug; | | 3887 | extern int kernel_debug; |
3889 | if (kernel_debug & 1) { | | 3888 | if (kernel_debug & 1) { |
3890 | struct proc *p = curlwp->l_proc; | | 3889 | struct proc *p = curlwp->l_proc; |
3891 | printf("prefetch_abort: page is already " | | 3890 | printf("prefetch_abort: page is already " |
3892 | "mapped - pte=%p *pte=%08x\n", ptep, pte); | | 3891 | "mapped - pte=%p *pte=%08x\n", ptep, pte); |
3893 | printf("prefetch_abort: pc=%08lx proc=%p " | | 3892 | printf("prefetch_abort: pc=%08lx proc=%p " |
3894 | "process=%s\n", va, p, p->p_comm); | | 3893 | "process=%s\n", va, p, p->p_comm); |
3895 | printf("prefetch_abort: far=%08x fs=%x\n", | | 3894 | printf("prefetch_abort: far=%08x fs=%x\n", |
3896 | cpu_faultaddress(), cpu_faultstatus()); | | 3895 | cpu_faultaddress(), cpu_faultstatus()); |
3897 | } | | 3896 | } |
3898 | #ifdef DDB | | 3897 | #ifdef DDB |
3899 | if (kernel_debug & 2) | | 3898 | if (kernel_debug & 2) |
3900 | Debugger(); | | 3899 | Debugger(); |
3901 | #endif | | 3900 | #endif |
3902 | rv = 1; | | 3901 | rv = 1; |
3903 | } | | 3902 | } |
3904 | } | | 3903 | } |
3905 | #endif /* CPU_SA110 */ | | 3904 | #endif /* CPU_SA110 */ |
3906 | | | 3905 | |
3907 | #ifdef DEBUG | | 3906 | #ifdef DEBUG |
3908 | /* | | 3907 | /* |
3909 | * If 'rv == 0' at this point, it generally indicates that there is a | | 3908 | * If 'rv == 0' at this point, it generally indicates that there is a |
3910 | * stale TLB entry for the faulting address. This happens when two or | | 3909 | * stale TLB entry for the faulting address. This happens when two or |
3911 | * more processes are sharing an L1. Since we don't flush the TLB on | | 3910 | * more processes are sharing an L1. Since we don't flush the TLB on |
3912 | * a context switch between such processes, we can take domain faults | | 3911 | * a context switch between such processes, we can take domain faults |
3913 | * for mappings which exist at the same VA in both processes. EVEN IF | | 3912 | * for mappings which exist at the same VA in both processes. EVEN IF |
3914 | * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for | | 3913 | * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for |
3915 | * example. | | 3914 | * example. |
3916 | * | | 3915 | * |
3917 | * This is extremely likely to happen if pmap_enter() updated the L1 | | 3916 | * This is extremely likely to happen if pmap_enter() updated the L1 |
3918 | * entry for a recently entered mapping. In this case, the TLB is | | 3917 | * entry for a recently entered mapping. In this case, the TLB is |
3919 | * flushed for the new mapping, but there may still be TLB entries for | | 3918 | * flushed for the new mapping, but there may still be TLB entries for |
3920 | * other mappings belonging to other processes in the 1MB range | | 3919 | * other mappings belonging to other processes in the 1MB range |
3921 | * covered by the L1 entry. | | 3920 | * covered by the L1 entry. |
3922 | * | | 3921 | * |
3923 | * Since 'rv == 0', we know that the L1 already contains the correct | | 3922 | * Since 'rv == 0', we know that the L1 already contains the correct |
3924 | * value, so the fault must be due to a stale TLB entry. | | 3923 | * value, so the fault must be due to a stale TLB entry. |
3925 | * | | 3924 | * |
3926 | * Since we always need to flush the TLB anyway in the case where we | | 3925 | * Since we always need to flush the TLB anyway in the case where we |
3927 | * fixed up the L1, or frobbed the L2 PTE, we effectively deal with | | 3926 | * fixed up the L1, or frobbed the L2 PTE, we effectively deal with |
3928 | * stale TLB entries dynamically. | | 3927 | * stale TLB entries dynamically. |
3929 | * | | 3928 | * |
3930 | * However, the above condition can ONLY happen if the current L1 is | | 3929 | * However, the above condition can ONLY happen if the current L1 is |
3931 | * being shared. If it happens when the L1 is unshared, it indicates | | 3930 | * being shared. If it happens when the L1 is unshared, it indicates |
3932 | * that other parts of the pmap are not doing their job WRT managing | | 3931 | * that other parts of the pmap are not doing their job WRT managing |
3933 | * the TLB. | | 3932 | * the TLB. |
3934 | */ | | 3933 | */ |
3935 | if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1) { | | 3934 | if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1) { |
3936 | extern int last_fault_code; | | 3935 | extern int last_fault_code; |
3937 | printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n", | | 3936 | printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n", |
3938 | pm, va, ftype); | | 3937 | pm, va, ftype); |
3939 | printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n", | | 3938 | printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n", |
3940 | l2, l2b, ptep, pl1pd); | | 3939 | l2, l2b, ptep, pl1pd); |
3941 | printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n", | | 3940 | printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n", |
3942 | pte, l1pd, last_fault_code); | | 3941 | pte, l1pd, last_fault_code); |
3943 | #ifdef DDB | | 3942 | #ifdef DDB |
3944 | Debugger(); | | 3943 | Debugger(); |
3945 | #endif | | 3944 | #endif |
3946 | } | | 3945 | } |
3947 | #endif | | 3946 | #endif |
3948 | | | 3947 | |
3949 | cpu_tlb_flushID_SE(va); | | 3948 | cpu_tlb_flushID_SE(va); |
3950 | cpu_cpwait(); | | 3949 | cpu_cpwait(); |
3951 | | | 3950 | |
3952 | rv = 1; | | 3951 | rv = 1; |
3953 | | | 3952 | |
3954 | out: | | 3953 | out: |
3955 | pmap_release_pmap_lock(pm); | | 3954 | pmap_release_pmap_lock(pm); |
3956 | PMAP_MAP_TO_HEAD_UNLOCK(); | | 3955 | PMAP_MAP_TO_HEAD_UNLOCK(); |
3957 | | | 3956 | |
3958 | return (rv); | | 3957 | return (rv); |
3959 | } | | 3958 | } |
3960 | | | 3959 | |
3961 | /* | | 3960 | /* |
3962 | * pmap_collect: free resources held by a pmap | | 3961 | * pmap_collect: free resources held by a pmap |
3963 | * | | 3962 | * |
3964 | * => optional function. | | 3963 | * => optional function. |
3965 | * => called when a process is swapped out to free memory. | | 3964 | * => called when a process is swapped out to free memory. |
3966 | */ | | 3965 | */ |
3967 | void | | 3966 | void |
3968 | pmap_collect(pmap_t pm) | | 3967 | pmap_collect(pmap_t pm) |
3969 | { | | 3968 | { |
3970 | | | 3969 | |
3971 | #ifdef PMAP_CACHE_VIVT | | 3970 | #ifdef PMAP_CACHE_VIVT |
3972 | pmap_idcache_wbinv_all(pm); | | 3971 | pmap_idcache_wbinv_all(pm); |
3973 | #endif | | 3972 | #endif |
3974 | pm->pm_remove_all = true; | | 3973 | pm->pm_remove_all = true; |
3975 | pmap_do_remove(pm, VM_MIN_ADDRESS, VM_MAX_ADDRESS, 1); | | 3974 | pmap_do_remove(pm, VM_MIN_ADDRESS, VM_MAX_ADDRESS, 1); |
3976 | pmap_update(pm); | | 3975 | pmap_update(pm); |
3977 | PMAPCOUNT(collects); | | 3976 | PMAPCOUNT(collects); |
3978 | } | | 3977 | } |
3979 | | | 3978 | |
3980 | /* | | 3979 | /* |
3981 | * Routine: pmap_procwr | | 3980 | * Routine: pmap_procwr |
3982 | * | | 3981 | * |
3983 | * Function: | | 3982 | * Function: |
3984 | * Synchronize caches corresponding to [addr, addr+len) in p. | | 3983 | * Synchronize caches corresponding to [addr, addr+len) in p. |
3985 | * | | 3984 | * |
3986 | */ | | 3985 | */ |
3987 | void | | 3986 | void |
3988 | pmap_procwr(struct proc *p, vaddr_t va, int len) | | 3987 | pmap_procwr(struct proc *p, vaddr_t va, int len) |
3989 | { | | 3988 | { |
3990 | /* We only need to do anything if it is the current process. */ | | 3989 | /* We only need to do anything if it is the current process. */ |
3991 | if (p == curproc) | | 3990 | if (p == curproc) |
3992 | cpu_icache_sync_range(va, len); | | 3991 | cpu_icache_sync_range(va, len); |
3993 | } | | 3992 | } |
3994 | | | 3993 | |
3995 | /* | | 3994 | /* |
3996 | * Routine: pmap_unwire | | 3995 | * Routine: pmap_unwire |
3997 | * Function: Clear the wired attribute for a map/virtual-address pair. | | 3996 | * Function: Clear the wired attribute for a map/virtual-address pair. |
3998 | * | | 3997 | * |
3999 | * In/out conditions: | | 3998 | * In/out conditions: |
4000 | * The mapping must already exist in the pmap. | | 3999 | * The mapping must already exist in the pmap. |
4001 | */ | | 4000 | */ |
4002 | void | | 4001 | void |
4003 | pmap_unwire(pmap_t pm, vaddr_t va) | | 4002 | pmap_unwire(pmap_t pm, vaddr_t va) |
4004 | { | | 4003 | { |
4005 | struct l2_bucket *l2b; | | 4004 | struct l2_bucket *l2b; |
4006 | pt_entry_t *ptep, pte; | | 4005 | pt_entry_t *ptep, pte; |
4007 | struct vm_page *pg; | | 4006 | struct vm_page *pg; |
4008 | paddr_t pa; | | 4007 | paddr_t pa; |
4009 | | | 4008 | |
4010 | NPDEBUG(PDB_WIRING, printf("pmap_unwire: pm %p, va 0x%08lx\n", pm, va)); | | 4009 | NPDEBUG(PDB_WIRING, printf("pmap_unwire: pm %p, va 0x%08lx\n", pm, va)); |
4011 | | | 4010 | |
4012 | PMAP_MAP_TO_HEAD_LOCK(); | | 4011 | PMAP_MAP_TO_HEAD_LOCK(); |
4013 | pmap_acquire_pmap_lock(pm); | | 4012 | pmap_acquire_pmap_lock(pm); |
4014 | | | 4013 | |
4015 | l2b = pmap_get_l2_bucket(pm, va); | | 4014 | l2b = pmap_get_l2_bucket(pm, va); |
4016 | KDASSERT(l2b != NULL); | | 4015 | KDASSERT(l2b != NULL); |
4017 | | | 4016 | |
4018 | ptep = &l2b->l2b_kva[l2pte_index(va)]; | | 4017 | ptep = &l2b->l2b_kva[l2pte_index(va)]; |
4019 | pte = *ptep; | | 4018 | pte = *ptep; |
4020 | | | 4019 | |
4021 | /* Extract the physical address of the page */ | | 4020 | /* Extract the physical address of the page */ |
4022 | pa = l2pte_pa(pte); | | 4021 | pa = l2pte_pa(pte); |
4023 | | | 4022 | |
4024 | if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { | | 4023 | if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { |
4025 | /* Update the wired bit in the pv entry for this page. */ | | 4024 | /* Update the wired bit in the pv entry for this page. */ |
4026 | simple_lock(&pg->mdpage.pvh_slock); | | 4025 | simple_lock(&pg->mdpage.pvh_slock); |
4027 | (void) pmap_modify_pv(pg, pm, va, PVF_WIRED, 0); | | 4026 | (void) pmap_modify_pv(pg, pm, va, PVF_WIRED, 0); |
4028 | simple_unlock(&pg->mdpage.pvh_slock); | | 4027 | simple_unlock(&pg->mdpage.pvh_slock); |
4029 | } | | 4028 | } |
4030 | | | 4029 | |
4031 | pmap_release_pmap_lock(pm); | | 4030 | pmap_release_pmap_lock(pm); |
4032 | PMAP_MAP_TO_HEAD_UNLOCK(); | | 4031 | PMAP_MAP_TO_HEAD_UNLOCK(); |
4033 | } | | 4032 | } |
4034 | | | 4033 | |
4035 | void | | 4034 | void |
4036 | pmap_activate(struct lwp *l) | | 4035 | pmap_activate(struct lwp *l) |
4037 | { | | 4036 | { |
4038 | extern int block_userspace_access; | | 4037 | extern int block_userspace_access; |
4039 | pmap_t opm, npm, rpm; | | 4038 | pmap_t opm, npm, rpm; |
4040 | uint32_t odacr, ndacr; | | 4039 | uint32_t odacr, ndacr; |
4041 | int oldirqstate; | | 4040 | int oldirqstate; |
4042 | | | 4041 | |
4043 | /* | | 4042 | /* |
4044 | * If activating a non-current lwp or the current lwp is | | 4043 | * If activating a non-current lwp or the current lwp is |
4045 | * already active, just return. | | 4044 | * already active, just return. |
4046 | */ | | 4045 | */ |
4047 | if (l != curlwp || | | 4046 | if (l != curlwp || |
4048 | l->l_proc->p_vmspace->vm_map.pmap->pm_activated == true) | | 4047 | l->l_proc->p_vmspace->vm_map.pmap->pm_activated == true) |
4049 | return; | | 4048 | return; |
4050 | | | 4049 | |
4051 | npm = l->l_proc->p_vmspace->vm_map.pmap; | | 4050 | npm = l->l_proc->p_vmspace->vm_map.pmap; |
4052 | ndacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | | | 4051 | ndacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | |
4053 | (DOMAIN_CLIENT << (npm->pm_domain * 2)); | | 4052 | (DOMAIN_CLIENT << (npm->pm_domain * 2)); |
4054 | | | 4053 | |
4055 | /* | | 4054 | /* |
4056 | * If TTB and DACR are unchanged, short-circuit all the | | 4055 | * If TTB and DACR are unchanged, short-circuit all the |
4057 | * TLB/cache management stuff. | | 4056 | * TLB/cache management stuff. |
4058 | */ | | 4057 | */ |
4059 | if (pmap_previous_active_lwp != NULL) { | | 4058 | if (pmap_previous_active_lwp != NULL) { |
4060 | opm = pmap_previous_active_lwp->l_proc->p_vmspace->vm_map.pmap; | | 4059 | opm = pmap_previous_active_lwp->l_proc->p_vmspace->vm_map.pmap; |
4061 | odacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | | | 4060 | odacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | |
4062 | (DOMAIN_CLIENT << (opm->pm_domain * 2)); | | 4061 | (DOMAIN_CLIENT << (opm->pm_domain * 2)); |
4063 | | | 4062 | |
4064 | if (opm->pm_l1 == npm->pm_l1 && odacr == ndacr) | | 4063 | if (opm->pm_l1 == npm->pm_l1 && odacr == ndacr) |
4065 | goto all_done; | | 4064 | goto all_done; |
4066 | } else | | 4065 | } else |
4067 | opm = NULL; | | 4066 | opm = NULL; |
4068 | | | 4067 | |
4069 | PMAPCOUNT(activations); | | 4068 | PMAPCOUNT(activations); |
4070 | block_userspace_access = 1; | | 4069 | block_userspace_access = 1; |
4071 | | | 4070 | |
4072 | /* | | 4071 | /* |
4073 | * If switching to a user vmspace which is different to the | | 4072 | * If switching to a user vmspace which is different to the |
4074 | * most recent one, and the most recent one is potentially | | 4073 | * most recent one, and the most recent one is potentially |
4075 | * live in the cache, we must write-back and invalidate the | | 4074 | * live in the cache, we must write-back and invalidate the |
4076 | * entire cache. | | 4075 | * entire cache. |
4077 | */ | | 4076 | */ |
4078 | rpm = pmap_recent_user; | | 4077 | rpm = pmap_recent_user; |
| | | 4078 | |
| | | 4079 | /* |
| | | 4080 | * XXXSCW: There's a corner case here which can leave turds in the cache as |
| | | 4081 | * reported in kern/41058. They're probably left over during tear-down and |
| | | 4082 | * switching away from an exiting process. Until the root cause is identified |
| | | 4083 | * and fixed, zap the cache when switching pmaps. This will result in a few |
| | | 4084 | * unnecessary cache flushes, but that's better than silently corrupting data. |
| | | 4085 | */ |
| | | 4086 | #if 0 |
4079 | if (npm != pmap_kernel() && rpm && npm != rpm && | | 4087 | if (npm != pmap_kernel() && rpm && npm != rpm && |
4080 | rpm->pm_cstate.cs_cache) { | | 4088 | rpm->pm_cstate.cs_cache) { |
4081 | rpm->pm_cstate.cs_cache = 0; | | 4089 | rpm->pm_cstate.cs_cache = 0; |
4082 | #ifdef PMAP_CACHE_VIVT | | 4090 | #ifdef PMAP_CACHE_VIVT |
4083 | cpu_idcache_wbinv_all(); | | 4091 | cpu_idcache_wbinv_all(); |
4084 | #endif | | 4092 | #endif |
4085 | } | | 4093 | } |
| | | 4094 | #else |
| | | 4095 | if (rpm) { |
| | | 4096 | rpm->pm_cstate.cs_cache = 0; |
| | | 4097 | if (npm == pmap_kernel()) |
| | | 4098 | pmap_recent_user = NULL; |
| | | 4099 | #ifdef PMAP_CACHE_VIVT |
| | | 4100 | cpu_idcache_wbinv_all(); |
| | | 4101 | #endif |
| | | 4102 | } |
| | | 4103 | #endif |
4086 | | | 4104 | |
4087 | /* No interrupts while we frob the TTB/DACR */ | | 4105 | /* No interrupts while we frob the TTB/DACR */ |
4088 | oldirqstate = disable_interrupts(IF32_bits); | | 4106 | oldirqstate = disable_interrupts(IF32_bits); |
4089 | | | 4107 | |
4090 | /* | | 4108 | /* |
4091 | * For ARM_VECTORS_LOW, we MUST, I repeat, MUST fix up the L1 | | 4109 | * For ARM_VECTORS_LOW, we MUST, I repeat, MUST fix up the L1 |
4092 | * entry corresponding to 'vector_page' in the incoming L1 table | | 4110 | * entry corresponding to 'vector_page' in the incoming L1 table |
4093 | * before switching to it otherwise subsequent interrupts/exceptions | | 4111 | * before switching to it otherwise subsequent interrupts/exceptions |
4094 | * (including domain faults!) will jump into hyperspace. | | 4112 | * (including domain faults!) will jump into hyperspace. |
4095 | */ | | 4113 | */ |
4096 | if (npm->pm_pl1vec != NULL) { | | 4114 | if (npm->pm_pl1vec != NULL) { |
4097 | cpu_tlb_flushID_SE((u_int)vector_page); | | 4115 | cpu_tlb_flushID_SE((u_int)vector_page); |
4098 | cpu_cpwait(); | | 4116 | cpu_cpwait(); |
4099 | *npm->pm_pl1vec = npm->pm_l1vec; | | 4117 | *npm->pm_pl1vec = npm->pm_l1vec; |
4100 | PTE_SYNC(npm->pm_pl1vec); | | 4118 | PTE_SYNC(npm->pm_pl1vec); |
4101 | } | | 4119 | } |
4102 | | | 4120 | |
4103 | cpu_domains(ndacr); | | 4121 | cpu_domains(ndacr); |
4104 | | | 4122 | |
4105 | if (npm == pmap_kernel() || npm == rpm) { | | 4123 | if (npm == pmap_kernel() || npm == rpm) { |
4106 | /* | | 4124 | /* |
4107 | * Switching to a kernel thread, or back to the | | 4125 | * Switching to a kernel thread, or back to the |
4108 | * same user vmspace as before... Simply update | | 4126 | * same user vmspace as before... Simply update |
4109 | * the TTB (no TLB flush required) | | 4127 | * the TTB (no TLB flush required) |
4110 | */ | | 4128 | */ |
4111 | __asm volatile("mcr p15, 0, %0, c2, c0, 0" :: | | 4129 | __asm volatile("mcr p15, 0, %0, c2, c0, 0" :: |
4112 | "r"(npm->pm_l1->l1_physaddr)); | | 4130 | "r"(npm->pm_l1->l1_physaddr)); |
4113 | cpu_cpwait(); | | 4131 | cpu_cpwait(); |
4114 | } else { | | 4132 | } else { |
4115 | /* | | 4133 | /* |
4116 | * Otherwise, update TTB and flush TLB | | 4134 | * Otherwise, update TTB and flush TLB |
4117 | */ | | 4135 | */ |
4118 | cpu_context_switch(npm->pm_l1->l1_physaddr); | | 4136 | cpu_context_switch(npm->pm_l1->l1_physaddr); |
4119 | if (rpm != NULL) | | 4137 | if (rpm != NULL) |
4120 | rpm->pm_cstate.cs_tlb = 0; | | 4138 | rpm->pm_cstate.cs_tlb = 0; |
4121 | } | | 4139 | } |
4122 | | | 4140 | |
4123 | restore_interrupts(oldirqstate); | | 4141 | restore_interrupts(oldirqstate); |
4124 | | | 4142 | |
4125 | block_userspace_access = 0; | | 4143 | block_userspace_access = 0; |
4126 | | | 4144 | |
4127 | all_done: | | 4145 | all_done: |
4128 | /* | | 4146 | /* |
4129 | * The new pmap is resident. Make sure it's marked | | 4147 | * The new pmap is resident. Make sure it's marked |
4130 | * as resident in the cache/TLB. | | 4148 | * as resident in the cache/TLB. |
4131 | */ | | 4149 | */ |
4132 | npm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; | | 4150 | npm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; |
4133 | if (npm != pmap_kernel()) | | 4151 | if (npm != pmap_kernel()) |
4134 | pmap_recent_user = npm; | | 4152 | pmap_recent_user = npm; |
4135 | | | 4153 | |
4136 | /* The old pmap is not longer active */ | | 4154 | /* The old pmap is not longer active */ |
4137 | if (opm != NULL) | | 4155 | if (opm != NULL) |
4138 | opm->pm_activated = false; | | 4156 | opm->pm_activated = false; |
4139 | | | 4157 | |
4140 | /* But the new one is */ | | 4158 | /* But the new one is */ |
4141 | npm->pm_activated = true; | | 4159 | npm->pm_activated = true; |
4142 | } | | 4160 | } |
4143 | | | 4161 | |
4144 | void | | 4162 | void |
4145 | pmap_deactivate(struct lwp *l) | | 4163 | pmap_deactivate(struct lwp *l) |
4146 | { | | 4164 | { |
4147 | | | 4165 | |
4148 | /* | | 4166 | /* |
4149 | * If the process is exiting, make sure pmap_activate() does | | 4167 | * If the process is exiting, make sure pmap_activate() does |
4150 | * a full MMU context-switch and cache flush, which we might | | 4168 | * a full MMU context-switch and cache flush, which we might |
4151 | * otherwise skip. See PR port-arm/38950. | | 4169 | * otherwise skip. See PR port-arm/38950. |
4152 | */ | | 4170 | */ |
4153 | if (l->l_proc->p_sflag & PS_WEXIT) | | 4171 | if (l->l_proc->p_sflag & PS_WEXIT) |
4154 | pmap_previous_active_lwp = NULL; | | 4172 | pmap_previous_active_lwp = NULL; |
4155 | | | 4173 | |
4156 | l->l_proc->p_vmspace->vm_map.pmap->pm_activated = false; | | 4174 | l->l_proc->p_vmspace->vm_map.pmap->pm_activated = false; |
4157 | } | | 4175 | } |
4158 | | | 4176 | |
4159 | void | | 4177 | void |
4160 | pmap_update(pmap_t pm) | | 4178 | pmap_update(pmap_t pm) |
4161 | { | | 4179 | { |
4162 | | | 4180 | |
4163 | if (pm->pm_remove_all) { | | 4181 | if (pm->pm_remove_all) { |
4164 | /* | | 4182 | /* |
4165 | * Finish up the pmap_remove_all() optimisation by flushing | | 4183 | * Finish up the pmap_remove_all() optimisation by flushing |
4166 | * the TLB. | | 4184 | * the TLB. |
4167 | */ | | 4185 | */ |
4168 | pmap_tlb_flushID(pm); | | 4186 | pmap_tlb_flushID(pm); |
4169 | pm->pm_remove_all = false; | | 4187 | pm->pm_remove_all = false; |
4170 | } | | 4188 | } |
4171 | | | 4189 | |
4172 | if (pmap_is_current(pm)) { | | 4190 | if (pmap_is_current(pm)) { |
4173 | /* | | 4191 | /* |
4174 | * If we're dealing with a current userland pmap, move its L1 | | 4192 | * If we're dealing with a current userland pmap, move its L1 |
4175 | * to the end of the LRU. | | 4193 | * to the end of the LRU. |
4176 | */ | | 4194 | */ |
4177 | if (pm != pmap_kernel()) | | 4195 | if (pm != pmap_kernel()) |
4178 | pmap_use_l1(pm); | | 4196 | pmap_use_l1(pm); |
4179 | | | 4197 | |
4180 | /* | | 4198 | /* |
4181 | * We can assume we're done with frobbing the cache/tlb for | | 4199 | * We can assume we're done with frobbing the cache/tlb for |
4182 | * now. Make sure any future pmap ops don't skip cache/tlb | | 4200 | * now. Make sure any future pmap ops don't skip cache/tlb |
4183 | * flushes. | | 4201 | * flushes. |
4184 | */ | | 4202 | */ |
4185 | pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; | | 4203 | pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; |
4186 | } | | 4204 | } |
4187 | | | 4205 | |
4188 | PMAPCOUNT(updates); | | 4206 | PMAPCOUNT(updates); |
4189 | | | 4207 | |
4190 | /* | | 4208 | /* |
4191 | * make sure TLB/cache operations have completed. | | 4209 | * make sure TLB/cache operations have completed. |
4192 | */ | | 4210 | */ |
4193 | cpu_cpwait(); | | 4211 | cpu_cpwait(); |
4194 | } | | 4212 | } |
4195 | | | 4213 | |
4196 | void | | 4214 | void |
4197 | pmap_remove_all(pmap_t pm) | | 4215 | pmap_remove_all(pmap_t pm) |
4198 | { | | 4216 | { |
4199 | | | 4217 | |
4200 | /* | | 4218 | /* |
4201 | * The vmspace described by this pmap is about to be torn down. | | 4219 | * The vmspace described by this pmap is about to be torn down. |
4202 | * Until pmap_update() is called, UVM will only make calls | | 4220 | * Until pmap_update() is called, UVM will only make calls |
4203 | * to pmap_remove(). We can make life much simpler by flushing | | 4221 | * to pmap_remove(). We can make life much simpler by flushing |
4204 | * the cache now, and deferring TLB invalidation to pmap_update(). | | 4222 | * the cache now, and deferring TLB invalidation to pmap_update(). |
4205 | */ | | 4223 | */ |
4206 | #ifdef PMAP_CACHE_VIVT | | 4224 | #ifdef PMAP_CACHE_VIVT |
4207 | pmap_idcache_wbinv_all(pm); | | 4225 | pmap_idcache_wbinv_all(pm); |
4208 | #endif | | 4226 | #endif |
4209 | pm->pm_remove_all = true; | | 4227 | pm->pm_remove_all = true; |
4210 | } | | 4228 | } |
4211 | | | 4229 | |
4212 | /* | | 4230 | /* |
4213 | * Retire the given physical map from service. | | 4231 | * Retire the given physical map from service. |
4214 | * Should only be called if the map contains no valid mappings. | | 4232 | * Should only be called if the map contains no valid mappings. |
4215 | */ | | 4233 | */ |
4216 | void | | 4234 | void |
4217 | pmap_destroy(pmap_t pm) | | 4235 | pmap_destroy(pmap_t pm) |
4218 | { | | 4236 | { |
4219 | u_int count; | | 4237 | u_int count; |
4220 | | | 4238 | |
4221 | if (pm == NULL) | | 4239 | if (pm == NULL) |
4222 | return; | | 4240 | return; |
4223 | | | 4241 | |
4224 | if (pm->pm_remove_all) { | | 4242 | if (pm->pm_remove_all) { |
4225 | pmap_tlb_flushID(pm); | | 4243 | pmap_tlb_flushID(pm); |
4226 | pm->pm_remove_all = false; | | 4244 | pm->pm_remove_all = false; |
4227 | } | | 4245 | } |
4228 | | | 4246 | |
4229 | /* | | 4247 | /* |
4230 | * Drop reference count | | 4248 | * Drop reference count |
4231 | */ | | 4249 | */ |
4232 | mutex_enter(&pm->pm_lock); | | 4250 | mutex_enter(&pm->pm_lock); |
4233 | count = --pm->pm_obj.uo_refs; | | 4251 | count = --pm->pm_obj.uo_refs; |
4234 | mutex_exit(&pm->pm_lock); | | 4252 | mutex_exit(&pm->pm_lock); |
4235 | if (count > 0) { | | 4253 | if (count > 0) { |
4236 | if (pmap_is_current(pm)) { | | 4254 | if (pmap_is_current(pm)) { |
4237 | if (pm != pmap_kernel()) | | 4255 | if (pm != pmap_kernel()) |
4238 | pmap_use_l1(pm); | | 4256 | pmap_use_l1(pm); |
4239 | pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; | | 4257 | pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; |
4240 | } | | 4258 | } |
4241 | return; | | 4259 | return; |
4242 | } | | 4260 | } |
4243 | | | 4261 | |
4244 | /* | | 4262 | /* |
4245 | * reference count is zero, free pmap resources and then free pmap. | | 4263 | * reference count is zero, free pmap resources and then free pmap. |
4246 | */ | | 4264 | */ |
4247 | | | 4265 | |
4248 | if (vector_page < KERNEL_BASE) { | | 4266 | if (vector_page < KERNEL_BASE) { |
4249 | KDASSERT(!pmap_is_current(pm)); | | 4267 | KDASSERT(!pmap_is_current(pm)); |
4250 | | | 4268 | |
4251 | /* Remove the vector page mapping */ | | 4269 | /* Remove the vector page mapping */ |
4252 | pmap_remove(pm, vector_page, vector_page + PAGE_SIZE); | | 4270 | pmap_remove(pm, vector_page, vector_page + PAGE_SIZE); |
4253 | pmap_update(pm); | | 4271 | pmap_update(pm); |
4254 | } | | 4272 | } |
4255 | | | 4273 | |
4256 | LIST_REMOVE(pm, pm_list); | | 4274 | LIST_REMOVE(pm, pm_list); |
4257 | | | 4275 | |
4258 | pmap_free_l1(pm); | | 4276 | pmap_free_l1(pm); |
4259 | | | 4277 | |
4260 | if (pmap_recent_user == pm) | | 4278 | if (pmap_recent_user == pm) |
4261 | pmap_recent_user = NULL; | | 4279 | pmap_recent_user = NULL; |
4262 | | | 4280 | |
4263 | UVM_OBJ_DESTROY(&pm->pm_obj); | | 4281 | UVM_OBJ_DESTROY(&pm->pm_obj); |
4264 | | | 4282 | |
4265 | /* return the pmap to the pool */ | | 4283 | /* return the pmap to the pool */ |
4266 | pool_cache_put(&pmap_cache, pm); | | 4284 | pool_cache_put(&pmap_cache, pm); |
4267 | } | | 4285 | } |
4268 | | | 4286 | |
4269 | | | 4287 | |
4270 | /* | | 4288 | /* |
4271 | * void pmap_reference(pmap_t pm) | | 4289 | * void pmap_reference(pmap_t pm) |
4272 | * | | 4290 | * |
4273 | * Add a reference to the specified pmap. | | 4291 | * Add a reference to the specified pmap. |
4274 | */ | | 4292 | */ |
4275 | void | | 4293 | void |
4276 | pmap_reference(pmap_t pm) | | 4294 | pmap_reference(pmap_t pm) |
4277 | { | | 4295 | { |
4278 | | | 4296 | |
4279 | if (pm == NULL) | | 4297 | if (pm == NULL) |
4280 | return; | | 4298 | return; |
4281 | | | 4299 | |
4282 | pmap_use_l1(pm); | | 4300 | pmap_use_l1(pm); |
4283 | | | 4301 | |
4284 | mutex_enter(&pm->pm_lock); | | 4302 | mutex_enter(&pm->pm_lock); |
4285 | pm->pm_obj.uo_refs++; | | 4303 | pm->pm_obj.uo_refs++; |
4286 | mutex_exit(&pm->pm_lock); | | 4304 | mutex_exit(&pm->pm_lock); |
4287 | } | | 4305 | } |
4288 | | | 4306 | |
4289 | #if ARM_MMU_V6 > 0 | | 4307 | #if ARM_MMU_V6 > 0 |
4290 | | | 4308 | |
4291 | static struct evcnt pmap_prefer_nochange_ev = | | 4309 | static struct evcnt pmap_prefer_nochange_ev = |
4292 | EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap prefer", "nochange"); | | 4310 | EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap prefer", "nochange"); |
4293 | static struct evcnt pmap_prefer_change_ev = | | 4311 | static struct evcnt pmap_prefer_change_ev = |
4294 | EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap prefer", "change"); | | 4312 | EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap prefer", "change"); |
4295 | | | 4313 | |
4296 | EVCNT_ATTACH_STATIC(pmap_prefer_change_ev); | | 4314 | EVCNT_ATTACH_STATIC(pmap_prefer_change_ev); |
4297 | EVCNT_ATTACH_STATIC(pmap_prefer_nochange_ev); | | 4315 | EVCNT_ATTACH_STATIC(pmap_prefer_nochange_ev); |
4298 | | | 4316 | |
4299 | void | | 4317 | void |
4300 | pmap_prefer(vaddr_t hint, vaddr_t *vap, int td) | | 4318 | pmap_prefer(vaddr_t hint, vaddr_t *vap, int td) |
4301 | { | | 4319 | { |
4302 | vsize_t mask = arm_cache_prefer_mask | (PAGE_SIZE - 1); | | 4320 | vsize_t mask = arm_cache_prefer_mask | (PAGE_SIZE - 1); |
4303 | vaddr_t va = *vap; | | 4321 | vaddr_t va = *vap; |
4304 | vaddr_t diff = (hint - va) & mask; | | 4322 | vaddr_t diff = (hint - va) & mask; |
4305 | if (diff == 0) { | | 4323 | if (diff == 0) { |
4306 | pmap_prefer_nochange_ev.ev_count++; | | 4324 | pmap_prefer_nochange_ev.ev_count++; |
4307 | } else { | | 4325 | } else { |
4308 | pmap_prefer_change_ev.ev_count++; | | 4326 | pmap_prefer_change_ev.ev_count++; |
4309 | if (__predict_false(td)) | | 4327 | if (__predict_false(td)) |
4310 | va -= mask + 1; | | 4328 | va -= mask + 1; |
4311 | *vap = va + diff; | | 4329 | *vap = va + diff; |
4312 | } | | 4330 | } |
4313 | } | | 4331 | } |
4314 | #endif /* ARM_MMU_V6 */ | | 4332 | #endif /* ARM_MMU_V6 */ |
4315 | | | 4333 | |
4316 | /* | | 4334 | /* |
4317 | * pmap_zero_page() | | 4335 | * pmap_zero_page() |
4318 | * | | 4336 | * |
4319 | * Zero a given physical page by mapping it at a page hook point. | | 4337 | * Zero a given physical page by mapping it at a page hook point. |
4320 | * In doing the zero page op, the page we zero is mapped cachable, as with | | 4338 | * In doing the zero page op, the page we zero is mapped cachable, as with |
4321 | * StrongARM accesses to non-cached pages are non-burst making writing | | 4339 | * StrongARM accesses to non-cached pages are non-burst making writing |
4322 | * _any_ bulk data very slow. | | 4340 | * _any_ bulk data very slow. |
4323 | */ | | 4341 | */ |
4324 | #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 | | 4342 | #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 |
4325 | void | | 4343 | void |
4326 | pmap_zero_page_generic(paddr_t phys) | | 4344 | pmap_zero_page_generic(paddr_t phys) |
4327 | { | | 4345 | { |
4328 | #if defined(PMAP_CACHE_VIPT) || defined(DEBUG) | | 4346 | #if defined(PMAP_CACHE_VIPT) || defined(DEBUG) |
4329 | struct vm_page *pg = PHYS_TO_VM_PAGE(phys); | | 4347 | struct vm_page *pg = PHYS_TO_VM_PAGE(phys); |
4330 | #endif | | 4348 | #endif |
4331 | #ifdef PMAP_CACHE_VIPT | | 4349 | #ifdef PMAP_CACHE_VIPT |
4332 | /* Choose the last page color it had, if any */ | | 4350 | /* Choose the last page color it had, if any */ |
4333 | const vsize_t va_offset = pg->mdpage.pvh_attrs & arm_cache_prefer_mask; | | 4351 | const vsize_t va_offset = pg->mdpage.pvh_attrs & arm_cache_prefer_mask; |
4334 | #else | | 4352 | #else |
4335 | const vsize_t va_offset = 0; | | 4353 | const vsize_t va_offset = 0; |
4336 | #endif | | 4354 | #endif |
4337 | pt_entry_t * const ptep = &cdst_pte[va_offset >> PGSHIFT]; | | 4355 | pt_entry_t * const ptep = &cdst_pte[va_offset >> PGSHIFT]; |
4338 | | | 4356 | |
4339 | #ifdef DEBUG | | 4357 | #ifdef DEBUG |
4340 | if (!SLIST_EMPTY(&pg->mdpage.pvh_list)) | | 4358 | if (!SLIST_EMPTY(&pg->mdpage.pvh_list)) |
4341 | panic("pmap_zero_page: page has mappings"); | | 4359 | panic("pmap_zero_page: page has mappings"); |
4342 | #endif | | 4360 | #endif |
4343 | | | 4361 | |
4344 | KDASSERT((phys & PGOFSET) == 0); | | 4362 | KDASSERT((phys & PGOFSET) == 0); |
4345 | | | 4363 | |
4346 | /* | | 4364 | /* |
4347 | * Hook in the page, zero it, and purge the cache for that | | 4365 | * Hook in the page, zero it, and purge the cache for that |
4348 | * zeroed page. Invalidate the TLB as needed. | | 4366 | * zeroed page. Invalidate the TLB as needed. |
4349 | */ | | 4367 | */ |
4350 | *ptep = L2_S_PROTO | phys | | | 4368 | *ptep = L2_S_PROTO | phys | |
4351 | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; | | 4369 | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; |
4352 | PTE_SYNC(ptep); | | 4370 | PTE_SYNC(ptep); |
4353 | cpu_tlb_flushD_SE(cdstp + va_offset); | | 4371 | cpu_tlb_flushD_SE(cdstp + va_offset); |
4354 | cpu_cpwait(); | | 4372 | cpu_cpwait(); |
4355 | bzero_page(cdstp + va_offset); | | 4373 | bzero_page(cdstp + va_offset); |
4356 | /* | | 4374 | /* |
4357 | * Unmap the page. | | 4375 | * Unmap the page. |
4358 | */ | | 4376 | */ |
4359 | *ptep = 0; | | 4377 | *ptep = 0; |
4360 | PTE_SYNC(ptep); | | 4378 | PTE_SYNC(ptep); |
4361 | cpu_tlb_flushD_SE(cdstp + va_offset); | | 4379 | cpu_tlb_flushD_SE(cdstp + va_offset); |
4362 | #ifdef PMAP_CACHE_VIVT | | 4380 | #ifdef PMAP_CACHE_VIVT |
4363 | cpu_dcache_wbinv_range(cdstp + va_offset, PAGE_SIZE); | | 4381 | cpu_dcache_wbinv_range(cdstp + va_offset, PAGE_SIZE); |
4364 | #endif | | 4382 | #endif |
4365 | #ifdef PMAP_CACHE_VIPT | | 4383 | #ifdef PMAP_CACHE_VIPT |
4366 | /* | | 4384 | /* |
4367 | * This page is now cache resident so it now has a page color. | | 4385 | * This page is now cache resident so it now has a page color. |
4368 | * Any contents have been obliterated so clear the EXEC flag. | | 4386 | * Any contents have been obliterated so clear the EXEC flag. |
4369 | */ | | 4387 | */ |
4370 | if (!pmap_is_page_colored_p(pg)) { | | 4388 | if (!pmap_is_page_colored_p(pg)) { |
4371 | PMAPCOUNT(vac_color_new); | | 4389 | PMAPCOUNT(vac_color_new); |
4372 | pg->mdpage.pvh_attrs |= PVF_COLORED; | | 4390 | pg->mdpage.pvh_attrs |= PVF_COLORED; |
4373 | } | | 4391 | } |
4374 | if (PV_IS_EXEC_P(pg->mdpage.pvh_attrs)) { | | 4392 | if (PV_IS_EXEC_P(pg->mdpage.pvh_attrs)) { |
4375 | pg->mdpage.pvh_attrs &= ~PVF_EXEC; | | 4393 | pg->mdpage.pvh_attrs &= ~PVF_EXEC; |
4376 | PMAPCOUNT(exec_discarded_zero); | | 4394 | PMAPCOUNT(exec_discarded_zero); |
4377 | } | | 4395 | } |
4378 | pg->mdpage.pvh_attrs |= PVF_DIRTY; | | 4396 | pg->mdpage.pvh_attrs |= PVF_DIRTY; |
4379 | #endif | | 4397 | #endif |
4380 | } | | 4398 | } |
4381 | #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */ | | 4399 | #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */ |
4382 | | | 4400 | |
4383 | #if ARM_MMU_XSCALE == 1 | | 4401 | #if ARM_MMU_XSCALE == 1 |
4384 | void | | 4402 | void |
4385 | pmap_zero_page_xscale(paddr_t phys) | | 4403 | pmap_zero_page_xscale(paddr_t phys) |
4386 | { | | 4404 | { |
4387 | #ifdef DEBUG | | 4405 | #ifdef DEBUG |
4388 | struct vm_page *pg = PHYS_TO_VM_PAGE(phys); | | 4406 | struct vm_page *pg = PHYS_TO_VM_PAGE(phys); |
4389 | | | 4407 | |
4390 | if (!SLIST_EMPTY(&pg->mdpage.pvh_list)) | | 4408 | if (!SLIST_EMPTY(&pg->mdpage.pvh_list)) |
4391 | panic("pmap_zero_page: page has mappings"); | | 4409 | panic("pmap_zero_page: page has mappings"); |
4392 | #endif | | 4410 | #endif |
4393 | | | 4411 | |
4394 | KDASSERT((phys & PGOFSET) == 0); | | 4412 | KDASSERT((phys & PGOFSET) == 0); |
4395 | | | 4413 | |
4396 | /* | | 4414 | /* |
4397 | * Hook in the page, zero it, and purge the cache for that | | 4415 | * Hook in the page, zero it, and purge the cache for that |
4398 | * zeroed page. Invalidate the TLB as needed. | | 4416 | * zeroed page. Invalidate the TLB as needed. |
4399 | */ | | 4417 | */ |
4400 | *cdst_pte = L2_S_PROTO | phys | | | 4418 | *cdst_pte = L2_S_PROTO | phys | |
4401 | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | | | 4419 | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | |
4402 | L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */ | | 4420 | L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */ |
4403 | PTE_SYNC(cdst_pte); | | 4421 | PTE_SYNC(cdst_pte); |
4404 | cpu_tlb_flushD_SE(cdstp); | | 4422 | cpu_tlb_flushD_SE(cdstp); |
4405 | cpu_cpwait(); | | 4423 | cpu_cpwait(); |
4406 | bzero_page(cdstp); | | 4424 | bzero_page(cdstp); |
4407 | xscale_cache_clean_minidata(); | | 4425 | xscale_cache_clean_minidata(); |
4408 | } | | 4426 | } |
4409 | #endif /* ARM_MMU_XSCALE == 1 */ | | 4427 | #endif /* ARM_MMU_XSCALE == 1 */ |
4410 | | | 4428 | |
4411 | /* pmap_pageidlezero() | | 4429 | /* pmap_pageidlezero() |
4412 | * | | 4430 | * |
4413 | * The same as above, except that we assume that the page is not | | 4431 | * The same as above, except that we assume that the page is not |
4414 | * mapped. This means we never have to flush the cache first. Called | | 4432 | * mapped. This means we never have to flush the cache first. Called |
4415 | * from the idle loop. | | 4433 | * from the idle loop. |
4416 | */ | | 4434 | */ |
4417 | bool | | 4435 | bool |
4418 | pmap_pageidlezero(paddr_t phys) | | 4436 | pmap_pageidlezero(paddr_t phys) |
4419 | { | | 4437 | { |
4420 | unsigned int i; | | 4438 | unsigned int i; |
4421 | int *ptr; | | 4439 | int *ptr; |
4422 | bool rv = true; | | 4440 | bool rv = true; |
4423 | #if defined(PMAP_CACHE_VIPT) || defined(DEBUG) | | 4441 | #if defined(PMAP_CACHE_VIPT) || defined(DEBUG) |
4424 | struct vm_page * const pg = PHYS_TO_VM_PAGE(phys); | | 4442 | struct vm_page * const pg = PHYS_TO_VM_PAGE(phys); |
4425 | #endif | | 4443 | #endif |
4426 | #ifdef PMAP_CACHE_VIPT | | 4444 | #ifdef PMAP_CACHE_VIPT |
4427 | /* Choose the last page color it had, if any */ | | 4445 | /* Choose the last page color it had, if any */ |
4428 | const vsize_t va_offset = pg->mdpage.pvh_attrs & arm_cache_prefer_mask; | | 4446 | const vsize_t va_offset = pg->mdpage.pvh_attrs & arm_cache_prefer_mask; |
4429 | #else | | 4447 | #else |
4430 | const vsize_t va_offset = 0; | | 4448 | const vsize_t va_offset = 0; |
4431 | #endif | | 4449 | #endif |
4432 | pt_entry_t * const ptep = &csrc_pte[va_offset >> PGSHIFT]; | | 4450 | pt_entry_t * const ptep = &csrc_pte[va_offset >> PGSHIFT]; |
4433 | | | 4451 | |
4434 | | | 4452 | |
4435 | #ifdef DEBUG | | 4453 | #ifdef DEBUG |
4436 | if (!SLIST_EMPTY(&pg->mdpage.pvh_list)) | | 4454 | if (!SLIST_EMPTY(&pg->mdpage.pvh_list)) |
4437 | panic("pmap_pageidlezero: page has mappings"); | | 4455 | panic("pmap_pageidlezero: page has mappings"); |
4438 | #endif | | 4456 | #endif |
4439 | | | 4457 | |
4440 | KDASSERT((phys & PGOFSET) == 0); | | 4458 | KDASSERT((phys & PGOFSET) == 0); |
4441 | | | 4459 | |
4442 | /* | | 4460 | /* |
4443 | * Hook in the page, zero it, and purge the cache for that | | 4461 | * Hook in the page, zero it, and purge the cache for that |
4444 | * zeroed page. Invalidate the TLB as needed. | | 4462 | * zeroed page. Invalidate the TLB as needed. |
4445 | */ | | 4463 | */ |
4446 | *ptep = L2_S_PROTO | phys | | | 4464 | *ptep = L2_S_PROTO | phys | |
4447 | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; | | 4465 | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; |
4448 | PTE_SYNC(ptep); | | 4466 | PTE_SYNC(ptep); |
4449 | cpu_tlb_flushD_SE(cdstp + va_offset); | | 4467 | cpu_tlb_flushD_SE(cdstp + va_offset); |
4450 | cpu_cpwait(); | | 4468 | cpu_cpwait(); |
4451 | | | 4469 | |
4452 | for (i = 0, ptr = (int *)(cdstp + va_offset); | | 4470 | for (i = 0, ptr = (int *)(cdstp + va_offset); |
4453 | i < (PAGE_SIZE / sizeof(int)); i++) { | | 4471 | i < (PAGE_SIZE / sizeof(int)); i++) { |
4454 | if (sched_curcpu_runnable_p() != 0) { | | 4472 | if (sched_curcpu_runnable_p() != 0) { |
4455 | /* | | 4473 | /* |
4456 | * A process has become ready. Abort now, | | 4474 | * A process has become ready. Abort now, |
4457 | * so we don't keep it waiting while we | | 4475 | * so we don't keep it waiting while we |
4458 | * do slow memory access to finish this | | 4476 | * do slow memory access to finish this |
4459 | * page. | | 4477 | * page. |
4460 | */ | | 4478 | */ |
4461 | rv = false; | | 4479 | rv = false; |
4462 | break; | | 4480 | break; |
4463 | } | | 4481 | } |
4464 | *ptr++ = 0; | | 4482 | *ptr++ = 0; |
4465 | } | | 4483 | } |
4466 | | | 4484 | |
4467 | #ifdef PMAP_CACHE_VIVT | | 4485 | #ifdef PMAP_CACHE_VIVT |
4468 | if (rv) | | 4486 | if (rv) |
4469 | /* | | 4487 | /* |
4470 | * if we aborted we'll rezero this page again later so don't | | 4488 | * if we aborted we'll rezero this page again later so don't |
4471 | * purge it unless we finished it | | 4489 | * purge it unless we finished it |
4472 | */ | | 4490 | */ |
4473 | cpu_dcache_wbinv_range(cdstp, PAGE_SIZE); | | 4491 | cpu_dcache_wbinv_range(cdstp, PAGE_SIZE); |
4474 | #elif defined(PMAP_CACHE_VIPT) | | 4492 | #elif defined(PMAP_CACHE_VIPT) |
4475 | /* | | 4493 | /* |
4476 | * This page is now cache resident so it now has a page color. | | 4494 | * This page is now cache resident so it now has a page color. |
4477 | * Any contents have been obliterated so clear the EXEC flag. | | 4495 | * Any contents have been obliterated so clear the EXEC flag. |
4478 | */ | | 4496 | */ |
4479 | if (!pmap_is_page_colored_p(pg)) { | | 4497 | if (!pmap_is_page_colored_p(pg)) { |
4480 | PMAPCOUNT(vac_color_new); | | 4498 | PMAPCOUNT(vac_color_new); |
4481 | pg->mdpage.pvh_attrs |= PVF_COLORED; | | 4499 | pg->mdpage.pvh_attrs |= PVF_COLORED; |
4482 | } | | 4500 | } |
4483 | if (PV_IS_EXEC_P(pg->mdpage.pvh_attrs)) { | | 4501 | if (PV_IS_EXEC_P(pg->mdpage.pvh_attrs)) { |
4484 | pg->mdpage.pvh_attrs &= ~PVF_EXEC; | | 4502 | pg->mdpage.pvh_attrs &= ~PVF_EXEC; |
4485 | PMAPCOUNT(exec_discarded_zero); | | 4503 | PMAPCOUNT(exec_discarded_zero); |
4486 | } | | 4504 | } |
4487 | #endif | | 4505 | #endif |
4488 | /* | | 4506 | /* |
4489 | * Unmap the page. | | 4507 | * Unmap the page. |
4490 | */ | | 4508 | */ |
4491 | *ptep = 0; | | 4509 | *ptep = 0; |
4492 | PTE_SYNC(ptep); | | 4510 | PTE_SYNC(ptep); |
4493 | cpu_tlb_flushD_SE(cdstp + va_offset); | | 4511 | cpu_tlb_flushD_SE(cdstp + va_offset); |
4494 | | | 4512 | |
4495 | return (rv); | | 4513 | return (rv); |
4496 | } | | 4514 | } |
4497 | | | 4515 | |
4498 | /* | | 4516 | /* |
4499 | * pmap_copy_page() | | 4517 | * pmap_copy_page() |
4500 | * | | 4518 | * |
4501 | * Copy one physical page into another, by mapping the pages into | | 4519 | * Copy one physical page into another, by mapping the pages into |
4502 | * hook points. The same comment regarding cachability as in | | 4520 | * hook points. The same comment regarding cachability as in |
4503 | * pmap_zero_page also applies here. | | 4521 | * pmap_zero_page also applies here. |
4504 | */ | | 4522 | */ |
4505 | #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 | | 4523 | #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 |
4506 | void | | 4524 | void |
4507 | pmap_copy_page_generic(paddr_t src, paddr_t dst) | | 4525 | pmap_copy_page_generic(paddr_t src, paddr_t dst) |
4508 | { | | 4526 | { |
4509 | struct vm_page * const src_pg = PHYS_TO_VM_PAGE(src); | | 4527 | struct vm_page * const src_pg = PHYS_TO_VM_PAGE(src); |
4510 | #if defined(PMAP_CACHE_VIPT) || defined(DEBUG) | | 4528 | #if defined(PMAP_CACHE_VIPT) || defined(DEBUG) |
4511 | struct vm_page * const dst_pg = PHYS_TO_VM_PAGE(dst); | | 4529 | struct vm_page * const dst_pg = PHYS_TO_VM_PAGE(dst); |
4512 | #endif | | 4530 | #endif |
4513 | #ifdef PMAP_CACHE_VIPT | | 4531 | #ifdef PMAP_CACHE_VIPT |
4514 | const vsize_t src_va_offset = src_pg->mdpage.pvh_attrs & arm_cache_prefer_mask; | | 4532 | const vsize_t src_va_offset = src_pg->mdpage.pvh_attrs & arm_cache_prefer_mask; |
4515 | const vsize_t dst_va_offset = dst_pg->mdpage.pvh_attrs & arm_cache_prefer_mask; | | 4533 | const vsize_t dst_va_offset = dst_pg->mdpage.pvh_attrs & arm_cache_prefer_mask; |
4516 | #else | | 4534 | #else |
4517 | const vsize_t src_va_offset = 0; | | 4535 | const vsize_t src_va_offset = 0; |
4518 | const vsize_t dst_va_offset = 0; | | 4536 | const vsize_t dst_va_offset = 0; |
4519 | #endif | | 4537 | #endif |
4520 | pt_entry_t * const src_ptep = &csrc_pte[src_va_offset >> PGSHIFT]; | | 4538 | pt_entry_t * const src_ptep = &csrc_pte[src_va_offset >> PGSHIFT]; |
4521 | pt_entry_t * const dst_ptep = &cdst_pte[dst_va_offset >> PGSHIFT]; | | 4539 | pt_entry_t * const dst_ptep = &cdst_pte[dst_va_offset >> PGSHIFT]; |
4522 | | | 4540 | |
4523 | #ifdef DEBUG | | 4541 | #ifdef DEBUG |
4524 | if (!SLIST_EMPTY(&dst_pg->mdpage.pvh_list)) | | 4542 | if (!SLIST_EMPTY(&dst_pg->mdpage.pvh_list)) |
4525 | panic("pmap_copy_page: dst page has mappings"); | | 4543 | panic("pmap_copy_page: dst page has mappings"); |
4526 | #endif | | 4544 | #endif |
4527 | | | 4545 | |
4528 | #ifdef PMAP_CACHE_VIPT | | 4546 | #ifdef PMAP_CACHE_VIPT |
4529 | KASSERT(src_pg->mdpage.pvh_attrs & (PVF_COLORED|PVF_NC)); | | 4547 | KASSERT(src_pg->mdpage.pvh_attrs & (PVF_COLORED|PVF_NC)); |
4530 | #endif | | 4548 | #endif |
4531 | KDASSERT((src & PGOFSET) == 0); | | 4549 | KDASSERT((src & PGOFSET) == 0); |
4532 | KDASSERT((dst & PGOFSET) == 0); | | 4550 | KDASSERT((dst & PGOFSET) == 0); |
4533 | | | 4551 | |
4534 | /* | | 4552 | /* |
4535 | * Clean the source page. Hold the source page's lock for | | 4553 | * Clean the source page. Hold the source page's lock for |
4536 | * the duration of the copy so that no other mappings can | | 4554 | * the duration of the copy so that no other mappings can |
4537 | * be created while we have a potentially aliased mapping. | | 4555 | * be created while we have a potentially aliased mapping. |
4538 | */ | | 4556 | */ |
4539 | simple_lock(&src_pg->mdpage.pvh_slock); | | 4557 | simple_lock(&src_pg->mdpage.pvh_slock); |
4540 | #ifdef PMAP_CACHE_VIVT | | 4558 | #ifdef PMAP_CACHE_VIVT |
4541 | (void) pmap_clean_page(SLIST_FIRST(&src_pg->mdpage.pvh_list), true); | | 4559 | (void) pmap_clean_page(SLIST_FIRST(&src_pg->mdpage.pvh_list), true); |
4542 | #endif | | 4560 | #endif |
4543 | | | 4561 | |
4544 | /* | | 4562 | /* |
4545 | * Map the pages into the page hook points, copy them, and purge | | 4563 | * Map the pages into the page hook points, copy them, and purge |
4546 | * the cache for the appropriate page. Invalidate the TLB | | 4564 | * the cache for the appropriate page. Invalidate the TLB |
4547 | * as required. | | 4565 | * as required. |
4548 | */ | | 4566 | */ |
4549 | *src_ptep = L2_S_PROTO | | 4567 | *src_ptep = L2_S_PROTO |
4550 | | src | | 4568 | | src |
4551 | #ifdef PMAP_CACHE_VIPT | | 4569 | #ifdef PMAP_CACHE_VIPT |
4552 | | ((src_pg->mdpage.pvh_attrs & PVF_NC) ? 0 : pte_l2_s_cache_mode) | | 4570 | | ((src_pg->mdpage.pvh_attrs & PVF_NC) ? 0 : pte_l2_s_cache_mode) |
4553 | #endif | | 4571 | #endif |
4554 | #ifdef PMAP_CACHE_VIVT | | 4572 | #ifdef PMAP_CACHE_VIVT |
4555 | | pte_l2_s_cache_mode | | 4573 | | pte_l2_s_cache_mode |
4556 | #endif | | 4574 | #endif |
4557 | | L2_S_PROT(PTE_KERNEL, VM_PROT_READ); | | 4575 | | L2_S_PROT(PTE_KERNEL, VM_PROT_READ); |
4558 | *dst_ptep = L2_S_PROTO | dst | | | 4576 | *dst_ptep = L2_S_PROTO | dst | |
4559 | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; | | 4577 | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; |
4560 | PTE_SYNC(src_ptep); | | 4578 | PTE_SYNC(src_ptep); |
4561 | PTE_SYNC(dst_ptep); | | 4579 | PTE_SYNC(dst_ptep); |
4562 | cpu_tlb_flushD_SE(csrcp + src_va_offset); | | 4580 | cpu_tlb_flushD_SE(csrcp + src_va_offset); |
4563 | cpu_tlb_flushD_SE(cdstp + dst_va_offset); | | 4581 | cpu_tlb_flushD_SE(cdstp + dst_va_offset); |
4564 | cpu_cpwait(); | | 4582 | cpu_cpwait(); |
4565 | bcopy_page(csrcp + src_va_offset, cdstp + dst_va_offset); | | 4583 | bcopy_page(csrcp + src_va_offset, cdstp + dst_va_offset); |
4566 | #ifdef PMAP_CACHE_VIVT | | 4584 | #ifdef PMAP_CACHE_VIVT |
4567 | cpu_dcache_inv_range(csrcp + src_va_offset, PAGE_SIZE); | | 4585 | cpu_dcache_inv_range(csrcp + src_va_offset, PAGE_SIZE); |
4568 | #endif | | 4586 | #endif |
4569 | simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */ | | 4587 | simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */ |
4570 | #ifdef PMAP_CACHE_VIVT | | 4588 | #ifdef PMAP_CACHE_VIVT |
4571 | cpu_dcache_wbinv_range(cdstp + dst_va_offset, PAGE_SIZE); | | 4589 | cpu_dcache_wbinv_range(cdstp + dst_va_offset, PAGE_SIZE); |
4572 | #endif | | 4590 | #endif |
4573 | /* | | 4591 | /* |
4574 | * Unmap the pages. | | 4592 | * Unmap the pages. |
4575 | */ | | 4593 | */ |
4576 | *src_ptep = 0; | | 4594 | *src_ptep = 0; |
4577 | *dst_ptep = 0; | | 4595 | *dst_ptep = 0; |
4578 | PTE_SYNC(src_ptep); | | 4596 | PTE_SYNC(src_ptep); |
4579 | PTE_SYNC(dst_ptep); | | 4597 | PTE_SYNC(dst_ptep); |
4580 | cpu_tlb_flushD_SE(csrcp + src_va_offset); | | 4598 | cpu_tlb_flushD_SE(csrcp + src_va_offset); |
4581 | cpu_tlb_flushD_SE(cdstp + dst_va_offset); | | 4599 | cpu_tlb_flushD_SE(cdstp + dst_va_offset); |
4582 | #ifdef PMAP_CACHE_VIPT | | 4600 | #ifdef PMAP_CACHE_VIPT |
4583 | /* | | 4601 | /* |
4584 | * Now that the destination page is in the cache, mark it as colored. | | 4602 | * Now that the destination page is in the cache, mark it as colored. |
4585 | * If this was an exec page, discard it. | | 4603 | * If this was an exec page, discard it. |
4586 | */ | | 4604 | */ |
4587 | if (!pmap_is_page_colored_p(dst_pg)) { | | 4605 | if (!pmap_is_page_colored_p(dst_pg)) { |
4588 | PMAPCOUNT(vac_color_new); | | 4606 | PMAPCOUNT(vac_color_new); |
4589 | dst_pg->mdpage.pvh_attrs |= PVF_COLORED; | | 4607 | dst_pg->mdpage.pvh_attrs |= PVF_COLORED; |
4590 | } | | 4608 | } |
4591 | if (PV_IS_EXEC_P(dst_pg->mdpage.pvh_attrs)) { | | 4609 | if (PV_IS_EXEC_P(dst_pg->mdpage.pvh_attrs)) { |
4592 | dst_pg->mdpage.pvh_attrs &= ~PVF_EXEC; | | 4610 | dst_pg->mdpage.pvh_attrs &= ~PVF_EXEC; |
4593 | PMAPCOUNT(exec_discarded_copy); | | 4611 | PMAPCOUNT(exec_discarded_copy); |
4594 | } | | 4612 | } |
4595 | dst_pg->mdpage.pvh_attrs |= PVF_DIRTY; | | 4613 | dst_pg->mdpage.pvh_attrs |= PVF_DIRTY; |
4596 | #endif | | 4614 | #endif |
4597 | } | | 4615 | } |
4598 | #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */ | | 4616 | #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */ |
4599 | | | 4617 | |
4600 | #if ARM_MMU_XSCALE == 1 | | 4618 | #if ARM_MMU_XSCALE == 1 |
4601 | void | | 4619 | void |
4602 | pmap_copy_page_xscale(paddr_t src, paddr_t dst) | | 4620 | pmap_copy_page_xscale(paddr_t src, paddr_t dst) |
4603 | { | | 4621 | { |
4604 | struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); | | 4622 | struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); |
4605 | #ifdef DEBUG | | 4623 | #ifdef DEBUG |
4606 | struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst); | | 4624 | struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst); |
4607 | | | 4625 | |
4608 | if (!SLIST_EMPTY(&dst_pg->mdpage.pvh_list)) | | 4626 | if (!SLIST_EMPTY(&dst_pg->mdpage.pvh_list)) |
4609 | panic("pmap_copy_page: dst page has mappings"); | | 4627 | panic("pmap_copy_page: dst page has mappings"); |
4610 | #endif | | 4628 | #endif |
4611 | | | 4629 | |
4612 | KDASSERT((src & PGOFSET) == 0); | | 4630 | KDASSERT((src & PGOFSET) == 0); |
4613 | KDASSERT((dst & PGOFSET) == 0); | | 4631 | KDASSERT((dst & PGOFSET) == 0); |
4614 | | | 4632 | |
4615 | /* | | 4633 | /* |
4616 | * Clean the source page. Hold the source page's lock for | | 4634 | * Clean the source page. Hold the source page's lock for |
4617 | * the duration of the copy so that no other mappings can | | 4635 | * the duration of the copy so that no other mappings can |
4618 | * be created while we have a potentially aliased mapping. | | 4636 | * be created while we have a potentially aliased mapping. |
4619 | */ | | 4637 | */ |
4620 | simple_lock(&src_pg->mdpage.pvh_slock); | | 4638 | simple_lock(&src_pg->mdpage.pvh_slock); |
4621 | #ifdef PMAP_CACHE_VIVT | | 4639 | #ifdef PMAP_CACHE_VIVT |
4622 | (void) pmap_clean_page(SLIST_FIRST(&src_pg->mdpage.pvh_list), true); | | 4640 | (void) pmap_clean_page(SLIST_FIRST(&src_pg->mdpage.pvh_list), true); |
4623 | #endif | | 4641 | #endif |
4624 | | | 4642 | |
4625 | /* | | 4643 | /* |
4626 | * Map the pages into the page hook points, copy them, and purge | | 4644 | * Map the pages into the page hook points, copy them, and purge |
4627 | * the cache for the appropriate page. Invalidate the TLB | | 4645 | * the cache for the appropriate page. Invalidate the TLB |
4628 | * as required. | | 4646 | * as required. |
4629 | */ | | 4647 | */ |
4630 | *csrc_pte = L2_S_PROTO | src | | | 4648 | *csrc_pte = L2_S_PROTO | src | |
4631 | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | | | 4649 | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | |
4632 | L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */ | | 4650 | L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */ |
4633 | PTE_SYNC(csrc_pte); | | 4651 | PTE_SYNC(csrc_pte); |
4634 | *cdst_pte = L2_S_PROTO | dst | | | 4652 | *cdst_pte = L2_S_PROTO | dst | |
4635 | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | | | 4653 | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | |
4636 | L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */ | | 4654 | L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */ |
4637 | PTE_SYNC(cdst_pte); | | 4655 | PTE_SYNC(cdst_pte); |
4638 | cpu_tlb_flushD_SE(csrcp); | | 4656 | cpu_tlb_flushD_SE(csrcp); |
4639 | cpu_tlb_flushD_SE(cdstp); | | 4657 | cpu_tlb_flushD_SE(cdstp); |
4640 | cpu_cpwait(); | | 4658 | cpu_cpwait(); |
4641 | bcopy_page(csrcp, cdstp); | | 4659 | bcopy_page(csrcp, cdstp); |
4642 | simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */ | | 4660 | simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */ |
4643 | xscale_cache_clean_minidata(); | | 4661 | xscale_cache_clean_minidata(); |
4644 | } | | 4662 | } |
4645 | #endif /* ARM_MMU_XSCALE == 1 */ | | 4663 | #endif /* ARM_MMU_XSCALE == 1 */ |
4646 | | | 4664 | |
4647 | /* | | 4665 | /* |
4648 | * void pmap_virtual_space(vaddr_t *start, vaddr_t *end) | | 4666 | * void pmap_virtual_space(vaddr_t *start, vaddr_t *end) |
4649 | * | | 4667 | * |
4650 | * Return the start and end addresses of the kernel's virtual space. | | 4668 | * Return the start and end addresses of the kernel's virtual space. |
4651 | * These values are setup in pmap_bootstrap and are updated as pages | | 4669 | * These values are setup in pmap_bootstrap and are updated as pages |
4652 | * are allocated. | | 4670 | * are allocated. |
4653 | */ | | 4671 | */ |
4654 | void | | 4672 | void |
4655 | pmap_virtual_space(vaddr_t *start, vaddr_t *end) | | 4673 | pmap_virtual_space(vaddr_t *start, vaddr_t *end) |
4656 | { | | 4674 | { |
4657 | *start = virtual_avail; | | 4675 | *start = virtual_avail; |
4658 | *end = virtual_end; | | 4676 | *end = virtual_end; |
4659 | } | | 4677 | } |
4660 | | | 4678 | |
4661 | /* | | 4679 | /* |
4662 | * Helper function for pmap_grow_l2_bucket() | | 4680 | * Helper function for pmap_grow_l2_bucket() |
4663 | */ | | 4681 | */ |
4664 | static inline int | | 4682 | static inline int |
4665 | pmap_grow_map(vaddr_t va, pt_entry_t cache_mode, paddr_t *pap) | | 4683 | pmap_grow_map(vaddr_t va, pt_entry_t cache_mode, paddr_t *pap) |
4666 | { | | 4684 | { |
4667 | struct l2_bucket *l2b; | | 4685 | struct l2_bucket *l2b; |
4668 | pt_entry_t *ptep; | | 4686 | pt_entry_t *ptep; |
4669 | paddr_t pa; | | 4687 | paddr_t pa; |
4670 | | | 4688 | |
4671 | if (uvm.page_init_done == false) { | | 4689 | if (uvm.page_init_done == false) { |
4672 | #ifdef PMAP_STEAL_MEMORY | | 4690 | #ifdef PMAP_STEAL_MEMORY |
4673 | pv_addr_t pv; | | 4691 | pv_addr_t pv; |
4674 | pmap_boot_pagealloc(PAGE_SIZE, | | 4692 | pmap_boot_pagealloc(PAGE_SIZE, |
4675 | #ifdef PMAP_CACHE_VIPT | | 4693 | #ifdef PMAP_CACHE_VIPT |
4676 | arm_cache_prefer_mask, | | 4694 | arm_cache_prefer_mask, |
4677 | va & arm_cache_prefer_mask, | | 4695 | va & arm_cache_prefer_mask, |
4678 | #else | | 4696 | #else |
4679 | 0, 0, | | 4697 | 0, 0, |
4680 | #endif | | 4698 | #endif |
4681 | &pv); | | 4699 | &pv); |
4682 | pa = pv.pv_pa; | | 4700 | pa = pv.pv_pa; |
4683 | #else | | 4701 | #else |
4684 | if (uvm_page_physget(&pa) == false) | | 4702 | if (uvm_page_physget(&pa) == false) |
4685 | return (1); | | 4703 | return (1); |
4686 | #endif /* PMAP_STEAL_MEMORY */ | | 4704 | #endif /* PMAP_STEAL_MEMORY */ |
4687 | } else { | | 4705 | } else { |
4688 | struct vm_page *pg; | | 4706 | struct vm_page *pg; |
4689 | pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); | | 4707 | pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); |
4690 | if (pg == NULL) | | 4708 | if (pg == NULL) |
4691 | return (1); | | 4709 | return (1); |
4692 | pa = VM_PAGE_TO_PHYS(pg); | | 4710 | pa = VM_PAGE_TO_PHYS(pg); |
4693 | #ifdef PMAP_CACHE_VIPT | | 4711 | #ifdef PMAP_CACHE_VIPT |
4694 | /* | | 4712 | /* |
4695 | * This new page must not have any mappings. Enter it via | | 4713 | * This new page must not have any mappings. Enter it via |
4696 | * pmap_kenter_pa and let that routine do the hard work. | | 4714 | * pmap_kenter_pa and let that routine do the hard work. |
4697 | */ | | 4715 | */ |
4698 | KASSERT(SLIST_EMPTY(&pg->mdpage.pvh_list)); | | 4716 | KASSERT(SLIST_EMPTY(&pg->mdpage.pvh_list)); |
4699 | pmap_kenter_pa(va, pa, VM_PROT_READ|VM_PROT_WRITE|PMAP_KMPAGE); | | 4717 | pmap_kenter_pa(va, pa, VM_PROT_READ|VM_PROT_WRITE|PMAP_KMPAGE); |
4700 | #endif | | 4718 | #endif |
4701 | } | | 4719 | } |
4702 | | | 4720 | |
4703 | if (pap) | | 4721 | if (pap) |
4704 | *pap = pa; | | 4722 | *pap = pa; |
4705 | | | 4723 | |
4706 | PMAPCOUNT(pt_mappings); | | 4724 | PMAPCOUNT(pt_mappings); |
4707 | l2b = pmap_get_l2_bucket(pmap_kernel(), va); | | 4725 | l2b = pmap_get_l2_bucket(pmap_kernel(), va); |
4708 | KDASSERT(l2b != NULL); | | 4726 | KDASSERT(l2b != NULL); |
4709 | | | 4727 | |
4710 | ptep = &l2b->l2b_kva[l2pte_index(va)]; | | 4728 | ptep = &l2b->l2b_kva[l2pte_index(va)]; |
4711 | *ptep = L2_S_PROTO | pa | cache_mode | | | 4729 | *ptep = L2_S_PROTO | pa | cache_mode | |
4712 | L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE); | | 4730 | L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE); |
4713 | PTE_SYNC(ptep); | | 4731 | PTE_SYNC(ptep); |
4714 | memset((void *)va, 0, PAGE_SIZE); | | 4732 | memset((void *)va, 0, PAGE_SIZE); |
4715 | return (0); | | 4733 | return (0); |
4716 | } | | 4734 | } |
4717 | | | 4735 | |
4718 | /* | | 4736 | /* |
4719 | * This is the same as pmap_alloc_l2_bucket(), except that it is only | | 4737 | * This is the same as pmap_alloc_l2_bucket(), except that it is only |
4720 | * used by pmap_growkernel(). | | 4738 | * used by pmap_growkernel(). |
4721 | */ | | 4739 | */ |
4722 | static inline struct l2_bucket * | | 4740 | static inline struct l2_bucket * |
4723 | pmap_grow_l2_bucket(pmap_t pm, vaddr_t va) | | 4741 | pmap_grow_l2_bucket(pmap_t pm, vaddr_t va) |
4724 | { | | 4742 | { |
4725 | struct l2_dtable *l2; | | 4743 | struct l2_dtable *l2; |
4726 | struct l2_bucket *l2b; | | 4744 | struct l2_bucket *l2b; |
4727 | u_short l1idx; | | 4745 | u_short l1idx; |
4728 | vaddr_t nva; | | 4746 | vaddr_t nva; |
4729 | | | 4747 | |
4730 | l1idx = L1_IDX(va); | | 4748 | l1idx = L1_IDX(va); |
4731 | | | 4749 | |
4732 | if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { | | 4750 | if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { |
4733 | /* | | 4751 | /* |
4734 | * No mapping at this address, as there is | | 4752 | * No mapping at this address, as there is |
4735 | * no entry in the L1 table. | | 4753 | * no entry in the L1 table. |
4736 | * Need to allocate a new l2_dtable. | | 4754 | * Need to allocate a new l2_dtable. |
4737 | */ | | 4755 | */ |
4738 | nva = pmap_kernel_l2dtable_kva; | | 4756 | nva = pmap_kernel_l2dtable_kva; |
4739 | if ((nva & PGOFSET) == 0) { | | 4757 | if ((nva & PGOFSET) == 0) { |
4740 | /* | | 4758 | /* |
4741 | * Need to allocate a backing page | | 4759 | * Need to allocate a backing page |
4742 | */ | | 4760 | */ |
4743 | if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) | | 4761 | if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) |
4744 | return (NULL); | | 4762 | return (NULL); |
4745 | } | | 4763 | } |
4746 | | | 4764 | |
4747 | l2 = (struct l2_dtable *)nva; | | 4765 | l2 = (struct l2_dtable *)nva; |
4748 | nva += sizeof(struct l2_dtable); | | 4766 | nva += sizeof(struct l2_dtable); |
4749 | | | 4767 | |
4750 | if ((nva & PGOFSET) < (pmap_kernel_l2dtable_kva & PGOFSET)) { | | 4768 | if ((nva & PGOFSET) < (pmap_kernel_l2dtable_kva & PGOFSET)) { |
4751 | /* | | 4769 | /* |
4752 | * The new l2_dtable straddles a page boundary. | | 4770 | * The new l2_dtable straddles a page boundary. |
4753 | * Map in another page to cover it. | | 4771 | * Map in another page to cover it. |
4754 | */ | | 4772 | */ |
4755 | if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) | | 4773 | if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) |
4756 | return (NULL); | | 4774 | return (NULL); |
4757 | } | | 4775 | } |
4758 | | | 4776 | |
4759 | pmap_kernel_l2dtable_kva = nva; | | 4777 | pmap_kernel_l2dtable_kva = nva; |
4760 | | | 4778 | |
4761 | /* | | 4779 | /* |
4762 | * Link it into the parent pmap | | 4780 | * Link it into the parent pmap |
4763 | */ | | 4781 | */ |
4764 | pm->pm_l2[L2_IDX(l1idx)] = l2; | | 4782 | pm->pm_l2[L2_IDX(l1idx)] = l2; |
4765 | } | | 4783 | } |
4766 | | | 4784 | |
4767 | l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; | | 4785 | l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; |
4768 | | | 4786 | |
4769 | /* | | 4787 | /* |
4770 | * Fetch pointer to the L2 page table associated with the address. | | 4788 | * Fetch pointer to the L2 page table associated with the address. |
4771 | */ | | 4789 | */ |
4772 | if (l2b->l2b_kva == NULL) { | | 4790 | if (l2b->l2b_kva == NULL) { |
4773 | pt_entry_t *ptep; | | 4791 | pt_entry_t *ptep; |
4774 | | | 4792 | |
4775 | /* | | 4793 | /* |
4776 | * No L2 page table has been allocated. Chances are, this | | 4794 | * No L2 page table has been allocated. Chances are, this |
4777 | * is because we just allocated the l2_dtable, above. | | 4795 | * is because we just allocated the l2_dtable, above. |
4778 | */ | | 4796 | */ |
4779 | nva = pmap_kernel_l2ptp_kva; | | 4797 | nva = pmap_kernel_l2ptp_kva; |
4780 | ptep = (pt_entry_t *)nva; | | 4798 | ptep = (pt_entry_t *)nva; |
4781 | if ((nva & PGOFSET) == 0) { | | 4799 | if ((nva & PGOFSET) == 0) { |
4782 | /* | | 4800 | /* |
4783 | * Need to allocate a backing page | | 4801 | * Need to allocate a backing page |
4784 | */ | | 4802 | */ |
4785 | if (pmap_grow_map(nva, pte_l2_s_cache_mode_pt, | | 4803 | if (pmap_grow_map(nva, pte_l2_s_cache_mode_pt, |
4786 | &pmap_kernel_l2ptp_phys)) | | 4804 | &pmap_kernel_l2ptp_phys)) |
4787 | return (NULL); | | 4805 | return (NULL); |
4788 | PTE_SYNC_RANGE(ptep, PAGE_SIZE / sizeof(pt_entry_t)); | | 4806 | PTE_SYNC_RANGE(ptep, PAGE_SIZE / sizeof(pt_entry_t)); |
4789 | } | | 4807 | } |
4790 | | | 4808 | |
4791 | l2->l2_occupancy++; | | 4809 | l2->l2_occupancy++; |
4792 | l2b->l2b_kva = ptep; | | 4810 | l2b->l2b_kva = ptep; |
4793 | l2b->l2b_l1idx = l1idx; | | 4811 | l2b->l2b_l1idx = l1idx; |
4794 | l2b->l2b_phys = pmap_kernel_l2ptp_phys; | | 4812 | l2b->l2b_phys = pmap_kernel_l2ptp_phys; |
4795 | | | 4813 | |
4796 | pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL; | | 4814 | pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL; |
4797 | pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL; | | 4815 | pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL; |
4798 | } | | 4816 | } |
4799 | | | 4817 | |
4800 | return (l2b); | | 4818 | return (l2b); |
4801 | } | | 4819 | } |
4802 | | | 4820 | |
4803 | vaddr_t | | 4821 | vaddr_t |
4804 | pmap_growkernel(vaddr_t maxkvaddr) | | 4822 | pmap_growkernel(vaddr_t maxkvaddr) |
4805 | { | | 4823 | { |
4806 | pmap_t kpm = pmap_kernel(); | | 4824 | pmap_t kpm = pmap_kernel(); |
4807 | struct l1_ttable *l1; | | 4825 | struct l1_ttable *l1; |
4808 | struct l2_bucket *l2b; | | 4826 | struct l2_bucket *l2b; |
4809 | pd_entry_t *pl1pd; | | 4827 | pd_entry_t *pl1pd; |
4810 | int s; | | 4828 | int s; |
4811 | | | 4829 | |
4812 | if (maxkvaddr <= pmap_curmaxkvaddr) | | 4830 | if (maxkvaddr <= pmap_curmaxkvaddr) |
4813 | goto out; /* we are OK */ | | 4831 | goto out; /* we are OK */ |
4814 | | | 4832 | |
4815 | NPDEBUG(PDB_GROWKERN, | | 4833 | NPDEBUG(PDB_GROWKERN, |
4816 | printf("pmap_growkernel: growing kernel from 0x%lx to 0x%lx\n", | | 4834 | printf("pmap_growkernel: growing kernel from 0x%lx to 0x%lx\n", |
4817 | pmap_curmaxkvaddr, maxkvaddr)); | | 4835 | pmap_curmaxkvaddr, maxkvaddr)); |
4818 | | | 4836 | |
4819 | KDASSERT(maxkvaddr <= virtual_end); | | 4837 | KDASSERT(maxkvaddr <= virtual_end); |
4820 | | | 4838 | |
4821 | /* | | 4839 | /* |
4822 | * whoops! we need to add kernel PTPs | | 4840 | * whoops! we need to add kernel PTPs |
4823 | */ | | 4841 | */ |
4824 | | | 4842 | |
4825 | s = splhigh(); /* to be safe */ | | 4843 | s = splhigh(); /* to be safe */ |
4826 | mutex_enter(&kpm->pm_lock); | | 4844 | mutex_enter(&kpm->pm_lock); |
4827 | | | 4845 | |
4828 | /* Map 1MB at a time */ | | 4846 | /* Map 1MB at a time */ |
4829 | for (; pmap_curmaxkvaddr < maxkvaddr; pmap_curmaxkvaddr += L1_S_SIZE) { | | 4847 | for (; pmap_curmaxkvaddr < maxkvaddr; pmap_curmaxkvaddr += L1_S_SIZE) { |
4830 | | | 4848 | |
4831 | l2b = pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr); | | 4849 | l2b = pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr); |
4832 | KDASSERT(l2b != NULL); | | 4850 | KDASSERT(l2b != NULL); |
4833 | | | 4851 | |
4834 | /* Distribute new L1 entry to all other L1s */ | | 4852 | /* Distribute new L1 entry to all other L1s */ |
4835 | SLIST_FOREACH(l1, &l1_list, l1_link) { | | 4853 | SLIST_FOREACH(l1, &l1_list, l1_link) { |
4836 | pl1pd = &l1->l1_kva[L1_IDX(pmap_curmaxkvaddr)]; | | 4854 | pl1pd = &l1->l1_kva[L1_IDX(pmap_curmaxkvaddr)]; |
4837 | *pl1pd = l2b->l2b_phys | L1_C_DOM(PMAP_DOMAIN_KERNEL) | | | 4855 | *pl1pd = l2b->l2b_phys | L1_C_DOM(PMAP_DOMAIN_KERNEL) | |
4838 | L1_C_PROTO; | | 4856 | L1_C_PROTO; |
4839 | PTE_SYNC(pl1pd); | | 4857 | PTE_SYNC(pl1pd); |
4840 | } | | 4858 | } |
4841 | } | | 4859 | } |
4842 | | | 4860 | |
4843 | /* | | 4861 | /* |
4844 | * flush out the cache, expensive but growkernel will happen so | | 4862 | * flush out the cache, expensive but growkernel will happen so |
4845 | * rarely | | 4863 | * rarely |
4846 | */ | | 4864 | */ |
4847 | cpu_dcache_wbinv_all(); | | 4865 | cpu_dcache_wbinv_all(); |
4848 | cpu_tlb_flushD(); | | 4866 | cpu_tlb_flushD(); |
4849 | cpu_cpwait(); | | 4867 | cpu_cpwait(); |
4850 | | | 4868 | |
4851 | mutex_exit(&kpm->pm_lock); | | 4869 | mutex_exit(&kpm->pm_lock); |
4852 | splx(s); | | 4870 | splx(s); |
4853 | | | 4871 | |
4854 | out: | | 4872 | out: |
4855 | return (pmap_curmaxkvaddr); | | 4873 | return (pmap_curmaxkvaddr); |
4856 | } | | 4874 | } |
4857 | | | 4875 | |
4858 | /************************ Utility routines ****************************/ | | 4876 | /************************ Utility routines ****************************/ |
4859 | | | 4877 | |
4860 | /* | | 4878 | /* |
4861 | * vector_page_setprot: | | 4879 | * vector_page_setprot: |
4862 | * | | 4880 | * |
4863 | * Manipulate the protection of the vector page. | | 4881 | * Manipulate the protection of the vector page. |
4864 | */ | | 4882 | */ |
4865 | void | | 4883 | void |
4866 | vector_page_setprot(int prot) | | 4884 | vector_page_setprot(int prot) |
4867 | { | | 4885 | { |
4868 | struct l2_bucket *l2b; | | 4886 | struct l2_bucket *l2b; |
4869 | pt_entry_t *ptep; | | 4887 | pt_entry_t *ptep; |
4870 | | | 4888 | |
4871 | l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page); | | 4889 | l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page); |
4872 | KDASSERT(l2b != NULL); | | 4890 | KDASSERT(l2b != NULL); |
4873 | | | 4891 | |
4874 | ptep = &l2b->l2b_kva[l2pte_index(vector_page)]; | | 4892 | ptep = &l2b->l2b_kva[l2pte_index(vector_page)]; |
4875 | | | 4893 | |
4876 | *ptep = (*ptep & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot); | | 4894 | *ptep = (*ptep & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot); |
4877 | PTE_SYNC(ptep); | | 4895 | PTE_SYNC(ptep); |
4878 | cpu_tlb_flushD_SE(vector_page); | | 4896 | cpu_tlb_flushD_SE(vector_page); |
4879 | cpu_cpwait(); | | 4897 | cpu_cpwait(); |
4880 | } | | 4898 | } |
4881 | | | 4899 | |
4882 | /* | | 4900 | /* |
4883 | * Fetch pointers to the PDE/PTE for the given pmap/VA pair. | | 4901 | * Fetch pointers to the PDE/PTE for the given pmap/VA pair. |
4884 | * Returns true if the mapping exists, else false. | | 4902 | * Returns true if the mapping exists, else false. |
4885 | * | | 4903 | * |
4886 | * NOTE: This function is only used by a couple of arm-specific modules. | | 4904 | * NOTE: This function is only used by a couple of arm-specific modules. |
4887 | * It is not safe to take any pmap locks here, since we could be right | | 4905 | * It is not safe to take any pmap locks here, since we could be right |
4888 | * in the middle of debugging the pmap anyway... | | 4906 | * in the middle of debugging the pmap anyway... |
4889 | * | | 4907 | * |
4890 | * It is possible for this routine to return false even though a valid | | 4908 | * It is possible for this routine to return false even though a valid |
4891 | * mapping does exist. This is because we don't lock, so the metadata | | 4909 | * mapping does exist. This is because we don't lock, so the metadata |
4892 | * state may be inconsistent. | | 4910 | * state may be inconsistent. |
4893 | * | | 4911 | * |
4894 | * NOTE: We can return a NULL *ptp in the case where the L1 pde is | | 4912 | * NOTE: We can return a NULL *ptp in the case where the L1 pde is |
4895 | * a "section" mapping. | | 4913 | * a "section" mapping. |
4896 | */ | | 4914 | */ |
4897 | bool | | 4915 | bool |
4898 | pmap_get_pde_pte(pmap_t pm, vaddr_t va, pd_entry_t **pdp, pt_entry_t **ptp) | | 4916 | pmap_get_pde_pte(pmap_t pm, vaddr_t va, pd_entry_t **pdp, pt_entry_t **ptp) |
4899 | { | | 4917 | { |
4900 | struct l2_dtable *l2; | | 4918 | struct l2_dtable *l2; |
4901 | pd_entry_t *pl1pd, l1pd; | | 4919 | pd_entry_t *pl1pd, l1pd; |
4902 | pt_entry_t *ptep; | | 4920 | pt_entry_t *ptep; |
4903 | u_short l1idx; | | 4921 | u_short l1idx; |
4904 | | | 4922 | |
4905 | if (pm->pm_l1 == NULL) | | 4923 | if (pm->pm_l1 == NULL) |
4906 | return false; | | 4924 | return false; |
4907 | | | 4925 | |
4908 | l1idx = L1_IDX(va); | | 4926 | l1idx = L1_IDX(va); |
4909 | *pdp = pl1pd = &pm->pm_l1->l1_kva[l1idx]; | | 4927 | *pdp = pl1pd = &pm->pm_l1->l1_kva[l1idx]; |
4910 | l1pd = *pl1pd; | | 4928 | l1pd = *pl1pd; |
4911 | | | 4929 | |
4912 | if (l1pte_section_p(l1pd)) { | | 4930 | if (l1pte_section_p(l1pd)) { |
4913 | *ptp = NULL; | | 4931 | *ptp = NULL; |
4914 | return true; | | 4932 | return true; |
4915 | } | | 4933 | } |
4916 | | | 4934 | |
4917 | if (pm->pm_l2 == NULL) | | 4935 | if (pm->pm_l2 == NULL) |
4918 | return false; | | 4936 | return false; |
4919 | | | 4937 | |
4920 | l2 = pm->pm_l2[L2_IDX(l1idx)]; | | 4938 | l2 = pm->pm_l2[L2_IDX(l1idx)]; |
4921 | | | 4939 | |
4922 | if (l2 == NULL || | | 4940 | if (l2 == NULL || |
4923 | (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { | | 4941 | (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { |
4924 | return false; | | 4942 | return false; |
4925 | } | | 4943 | } |
4926 | | | 4944 | |
4927 | *ptp = &ptep[l2pte_index(va)]; | | 4945 | *ptp = &ptep[l2pte_index(va)]; |
4928 | return true; | | 4946 | return true; |
4929 | } | | 4947 | } |
4930 | | | 4948 | |
4931 | bool | | 4949 | bool |
4932 | pmap_get_pde(pmap_t pm, vaddr_t va, pd_entry_t **pdp) | | 4950 | pmap_get_pde(pmap_t pm, vaddr_t va, pd_entry_t **pdp) |
4933 | { | | 4951 | { |
4934 | u_short l1idx; | | 4952 | u_short l1idx; |
4935 | | | 4953 | |
4936 | if (pm->pm_l1 == NULL) | | 4954 | if (pm->pm_l1 == NULL) |
4937 | return false; | | 4955 | return false; |
4938 | | | 4956 | |
4939 | l1idx = L1_IDX(va); | | 4957 | l1idx = L1_IDX(va); |
4940 | *pdp = &pm->pm_l1->l1_kva[l1idx]; | | 4958 | *pdp = &pm->pm_l1->l1_kva[l1idx]; |
4941 | | | 4959 | |
4942 | return true; | | 4960 | return true; |
4943 | } | | 4961 | } |
4944 | | | 4962 | |
4945 | /************************ Bootstrapping routines ****************************/ | | 4963 | /************************ Bootstrapping routines ****************************/ |
4946 | | | 4964 | |
4947 | static void | | 4965 | static void |
4948 | pmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt) | | 4966 | pmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt) |
4949 | { | | 4967 | { |
4950 | int i; | | 4968 | int i; |
4951 | | | 4969 | |
4952 | l1->l1_kva = l1pt; | | 4970 | l1->l1_kva = l1pt; |
4953 | l1->l1_domain_use_count = 0; | | 4971 | l1->l1_domain_use_count = 0; |
4954 | l1->l1_domain_first = 0; | | 4972 | l1->l1_domain_first = 0; |
4955 | | | 4973 | |
4956 | for (i = 0; i < PMAP_DOMAINS; i++) | | 4974 | for (i = 0; i < PMAP_DOMAINS; i++) |
4957 | l1->l1_domain_free[i] = i + 1; | | 4975 | l1->l1_domain_free[i] = i + 1; |
4958 | | | 4976 | |
4959 | /* | | 4977 | /* |
4960 | * Copy the kernel's L1 entries to each new L1. | | 4978 | * Copy the kernel's L1 entries to each new L1. |
4961 | */ | | 4979 | */ |
4962 | if (pmap_initialized) | | 4980 | if (pmap_initialized) |
4963 | memcpy(l1pt, pmap_kernel()->pm_l1->l1_kva, L1_TABLE_SIZE); | | 4981 | memcpy(l1pt, pmap_kernel()->pm_l1->l1_kva, L1_TABLE_SIZE); |
4964 | | | 4982 | |
4965 | if (pmap_extract(pmap_kernel(), (vaddr_t)l1pt, | | 4983 | if (pmap_extract(pmap_kernel(), (vaddr_t)l1pt, |
4966 | &l1->l1_physaddr) == false) | | 4984 | &l1->l1_physaddr) == false) |
4967 | panic("pmap_init_l1: can't get PA of L1 at %p", l1pt); | | 4985 | panic("pmap_init_l1: can't get PA of L1 at %p", l1pt); |
4968 | | | 4986 | |
4969 | SLIST_INSERT_HEAD(&l1_list, l1, l1_link); | | 4987 | SLIST_INSERT_HEAD(&l1_list, l1, l1_link); |
4970 | TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); | | 4988 | TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); |
4971 | } | | 4989 | } |
4972 | | | 4990 | |
4973 | /* | | 4991 | /* |
4974 | * pmap_bootstrap() is called from the board-specific initarm() routine | | 4992 | * pmap_bootstrap() is called from the board-specific initarm() routine |
4975 | * once the kernel L1/L2 descriptors tables have been set up. | | 4993 | * once the kernel L1/L2 descriptors tables have been set up. |
4976 | * | | 4994 | * |
4977 | * This is a somewhat convoluted process since pmap bootstrap is, effectively, | | 4995 | * This is a somewhat convoluted process since pmap bootstrap is, effectively, |
4978 | * spread over a number of disparate files/functions. | | 4996 | * spread over a number of disparate files/functions. |
4979 | * | | 4997 | * |
4980 | * We are passed the following parameters | | 4998 | * We are passed the following parameters |
4981 | * - kernel_l1pt | | 4999 | * - kernel_l1pt |
4982 | * This is a pointer to the base of the kernel's L1 translation table. | | 5000 | * This is a pointer to the base of the kernel's L1 translation table. |
4983 | * - vstart | | 5001 | * - vstart |
4984 | * 1MB-aligned start of managed kernel virtual memory. | | 5002 | * 1MB-aligned start of managed kernel virtual memory. |
4985 | * - vend | | 5003 | * - vend |
4986 | * 1MB-aligned end of managed kernel virtual memory. | | 5004 | * 1MB-aligned end of managed kernel virtual memory. |
4987 | * | | 5005 | * |
4988 | * We use the first parameter to build the metadata (struct l1_ttable and | | 5006 | * We use the first parameter to build the metadata (struct l1_ttable and |
4989 | * struct l2_dtable) necessary to track kernel mappings. | | 5007 | * struct l2_dtable) necessary to track kernel mappings. |
4990 | */ | | 5008 | */ |
4991 | #define PMAP_STATIC_L2_SIZE 16 | | 5009 | #define PMAP_STATIC_L2_SIZE 16 |
4992 | void | | 5010 | void |
4993 | pmap_bootstrap(vaddr_t vstart, vaddr_t vend) | | 5011 | pmap_bootstrap(vaddr_t vstart, vaddr_t vend) |
4994 | { | | 5012 | { |
4995 | static struct l1_ttable static_l1; | | 5013 | static struct l1_ttable static_l1; |
4996 | static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE]; | | 5014 | static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE]; |
4997 | struct l1_ttable *l1 = &static_l1; | | 5015 | struct l1_ttable *l1 = &static_l1; |
4998 | struct l2_dtable *l2; | | 5016 | struct l2_dtable *l2; |
4999 | struct l2_bucket *l2b; | | 5017 | struct l2_bucket *l2b; |
5000 | pd_entry_t *l1pt = (pd_entry_t *) kernel_l1pt.pv_va; | | 5018 | pd_entry_t *l1pt = (pd_entry_t *) kernel_l1pt.pv_va; |
5001 | pmap_t pm = pmap_kernel(); | | 5019 | pmap_t pm = pmap_kernel(); |
5002 | pd_entry_t pde; | | 5020 | pd_entry_t pde; |
5003 | pt_entry_t *ptep; | | 5021 | pt_entry_t *ptep; |
5004 | paddr_t pa; | | 5022 | paddr_t pa; |
5005 | vaddr_t va; | | 5023 | vaddr_t va; |
5006 | vsize_t size; | | 5024 | vsize_t size; |
5007 | int nptes, l1idx, l2idx, l2next = 0; | | 5025 | int nptes, l1idx, l2idx, l2next = 0; |
5008 | | | 5026 | |
5009 | /* | | 5027 | /* |
5010 | * Initialise the kernel pmap object | | 5028 | * Initialise the kernel pmap object |
5011 | */ | | 5029 | */ |
5012 | pm->pm_l1 = l1; | | 5030 | pm->pm_l1 = l1; |
5013 | pm->pm_domain = PMAP_DOMAIN_KERNEL; | | 5031 | pm->pm_domain = PMAP_DOMAIN_KERNEL; |
5014 | pm->pm_activated = true; | | 5032 | pm->pm_activated = true; |
5015 | pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; | | 5033 | pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; |
5016 | UVM_OBJ_INIT(&pm->pm_obj, NULL, 1); | | 5034 | UVM_OBJ_INIT(&pm->pm_obj, NULL, 1); |
5017 | | | 5035 | |
5018 | /* | | 5036 | /* |
5019 | * Scan the L1 translation table created by initarm() and create | | 5037 | * Scan the L1 translation table created by initarm() and create |
5020 | * the required metadata for all valid mappings found in it. | | 5038 | * the required metadata for all valid mappings found in it. |
5021 | */ | | 5039 | */ |
5022 | for (l1idx = 0; l1idx < (L1_TABLE_SIZE / sizeof(pd_entry_t)); l1idx++) { | | 5040 | for (l1idx = 0; l1idx < (L1_TABLE_SIZE / sizeof(pd_entry_t)); l1idx++) { |
5023 | pde = l1pt[l1idx]; | | 5041 | pde = l1pt[l1idx]; |
5024 | | | 5042 | |
5025 | /* | | 5043 | /* |
5026 | * We're only interested in Coarse mappings. | | 5044 | * We're only interested in Coarse mappings. |
5027 | * pmap_extract() can deal with section mappings without | | 5045 | * pmap_extract() can deal with section mappings without |
5028 | * recourse to checking L2 metadata. | | 5046 | * recourse to checking L2 metadata. |
5029 | */ | | 5047 | */ |
5030 | if ((pde & L1_TYPE_MASK) != L1_TYPE_C) | | 5048 | if ((pde & L1_TYPE_MASK) != L1_TYPE_C) |
5031 | continue; | | 5049 | continue; |
5032 | | | 5050 | |
5033 | /* | | 5051 | /* |
5034 | * Lookup the KVA of this L2 descriptor table | | 5052 | * Lookup the KVA of this L2 descriptor table |
5035 | */ | | 5053 | */ |
5036 | pa = (paddr_t)(pde & L1_C_ADDR_MASK); | | 5054 | pa = (paddr_t)(pde & L1_C_ADDR_MASK); |
5037 | ptep = (pt_entry_t *)kernel_pt_lookup(pa); | | 5055 | ptep = (pt_entry_t *)kernel_pt_lookup(pa); |
5038 | if (ptep == NULL) { | | 5056 | if (ptep == NULL) { |
5039 | panic("pmap_bootstrap: No L2 for va 0x%x, pa 0x%lx", | | 5057 | panic("pmap_bootstrap: No L2 for va 0x%x, pa 0x%lx", |
5040 | (u_int)l1idx << L1_S_SHIFT, pa); | | 5058 | (u_int)l1idx << L1_S_SHIFT, pa); |
5041 | } | | 5059 | } |
5042 | | | 5060 | |
5043 | /* | | 5061 | /* |
5044 | * Fetch the associated L2 metadata structure. | | 5062 | * Fetch the associated L2 metadata structure. |
5045 | * Allocate a new one if necessary. | | 5063 | * Allocate a new one if necessary. |
5046 | */ | | 5064 | */ |
5047 | if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { | | 5065 | if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { |
5048 | if (l2next == PMAP_STATIC_L2_SIZE) | | 5066 | if (l2next == PMAP_STATIC_L2_SIZE) |
5049 | panic("pmap_bootstrap: out of static L2s"); | | 5067 | panic("pmap_bootstrap: out of static L2s"); |
5050 | pm->pm_l2[L2_IDX(l1idx)] = l2 = &static_l2[l2next++]; | | 5068 | pm->pm_l2[L2_IDX(l1idx)] = l2 = &static_l2[l2next++]; |
5051 | } | | 5069 | } |
5052 | | | 5070 | |
5053 | /* | | 5071 | /* |
5054 | * One more L1 slot tracked... | | 5072 | * One more L1 slot tracked... |
5055 | */ | | 5073 | */ |
5056 | l2->l2_occupancy++; | | 5074 | l2->l2_occupancy++; |
5057 | | | 5075 | |
5058 | /* | | 5076 | /* |
5059 | * Fill in the details of the L2 descriptor in the | | 5077 | * Fill in the details of the L2 descriptor in the |
5060 | * appropriate bucket. | | 5078 | * appropriate bucket. |
5061 | */ | | 5079 | */ |
5062 | l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; | | 5080 | l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; |
5063 | l2b->l2b_kva = ptep; | | 5081 | l2b->l2b_kva = ptep; |
5064 | l2b->l2b_phys = pa; | | 5082 | l2b->l2b_phys = pa; |
5065 | l2b->l2b_l1idx = l1idx; | | 5083 | l2b->l2b_l1idx = l1idx; |
5066 | | | 5084 | |
5067 | /* | | 5085 | /* |
5068 | * Establish an initial occupancy count for this descriptor | | 5086 | * Establish an initial occupancy count for this descriptor |
5069 | */ | | 5087 | */ |
5070 | for (l2idx = 0; | | 5088 | for (l2idx = 0; |
5071 | l2idx < (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); | | 5089 | l2idx < (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); |
5072 | l2idx++) { | | 5090 | l2idx++) { |
5073 | if ((ptep[l2idx] & L2_TYPE_MASK) != L2_TYPE_INV) { | | 5091 | if ((ptep[l2idx] & L2_TYPE_MASK) != L2_TYPE_INV) { |
5074 | l2b->l2b_occupancy++; | | 5092 | l2b->l2b_occupancy++; |
5075 | } | | 5093 | } |
5076 | } | | 5094 | } |
5077 | | | 5095 | |
5078 | /* | | 5096 | /* |
5079 | * Make sure the descriptor itself has the correct cache mode. | | 5097 | * Make sure the descriptor itself has the correct cache mode. |
5080 | * If not, fix it, but whine about the problem. Port-meisters | | 5098 | * If not, fix it, but whine about the problem. Port-meisters |
5081 | * should consider this a clue to fix up their initarm() | | 5099 | * should consider this a clue to fix up their initarm() |
5082 | * function. :) | | 5100 | * function. :) |
5083 | */ | | 5101 | */ |
5084 | if (pmap_set_pt_cache_mode(l1pt, (vaddr_t)ptep)) { | | 5102 | if (pmap_set_pt_cache_mode(l1pt, (vaddr_t)ptep)) { |