| @@ -1,3916 +1,3926 @@ | | | @@ -1,3916 +1,3926 @@ |
1 | /* $NetBSD: pmap.c,v 1.287 2021/05/30 06:41:19 thorpej Exp $ */ | | 1 | /* $NetBSD: pmap.c,v 1.288 2021/05/30 13:34:21 thorpej Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 1998, 1999, 2000, 2001, 2007, 2008, 2020 | | 4 | * Copyright (c) 1998, 1999, 2000, 2001, 2007, 2008, 2020 |
5 | * The NetBSD Foundation, Inc. | | 5 | * The NetBSD Foundation, Inc. |
6 | * All rights reserved. | | 6 | * All rights reserved. |
7 | * | | 7 | * |
8 | * This code is derived from software contributed to The NetBSD Foundation | | 8 | * This code is derived from software contributed to The NetBSD Foundation |
9 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, | | 9 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, |
10 | * NASA Ames Research Center, by Andrew Doran and Mindaugas Rasiukevicius, | | 10 | * NASA Ames Research Center, by Andrew Doran and Mindaugas Rasiukevicius, |
11 | * and by Chris G. Demetriou. | | 11 | * and by Chris G. Demetriou. |
12 | * | | 12 | * |
13 | * Redistribution and use in source and binary forms, with or without | | 13 | * Redistribution and use in source and binary forms, with or without |
14 | * modification, are permitted provided that the following conditions | | 14 | * modification, are permitted provided that the following conditions |
15 | * are met: | | 15 | * are met: |
16 | * 1. Redistributions of source code must retain the above copyright | | 16 | * 1. Redistributions of source code must retain the above copyright |
17 | * notice, this list of conditions and the following disclaimer. | | 17 | * notice, this list of conditions and the following disclaimer. |
18 | * 2. Redistributions in binary form must reproduce the above copyright | | 18 | * 2. Redistributions in binary form must reproduce the above copyright |
19 | * notice, this list of conditions and the following disclaimer in the | | 19 | * notice, this list of conditions and the following disclaimer in the |
20 | * documentation and/or other materials provided with the distribution. | | 20 | * documentation and/or other materials provided with the distribution. |
21 | * | | 21 | * |
22 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 22 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
23 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 23 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
24 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 24 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
25 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 25 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
26 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 26 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
29 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 29 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
30 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 30 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
31 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 31 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
32 | * POSSIBILITY OF SUCH DAMAGE. | | 32 | * POSSIBILITY OF SUCH DAMAGE. |
33 | */ | | 33 | */ |
34 | | | 34 | |
35 | /* | | 35 | /* |
36 | * Copyright (c) 1991, 1993 | | 36 | * Copyright (c) 1991, 1993 |
37 | * The Regents of the University of California. All rights reserved. | | 37 | * The Regents of the University of California. All rights reserved. |
38 | * | | 38 | * |
39 | * This code is derived from software contributed to Berkeley by | | 39 | * This code is derived from software contributed to Berkeley by |
40 | * the Systems Programming Group of the University of Utah Computer | | 40 | * the Systems Programming Group of the University of Utah Computer |
41 | * Science Department. | | 41 | * Science Department. |
42 | * | | 42 | * |
43 | * Redistribution and use in source and binary forms, with or without | | 43 | * Redistribution and use in source and binary forms, with or without |
44 | * modification, are permitted provided that the following conditions | | 44 | * modification, are permitted provided that the following conditions |
45 | * are met: | | 45 | * are met: |
46 | * 1. Redistributions of source code must retain the above copyright | | 46 | * 1. Redistributions of source code must retain the above copyright |
47 | * notice, this list of conditions and the following disclaimer. | | 47 | * notice, this list of conditions and the following disclaimer. |
48 | * 2. Redistributions in binary form must reproduce the above copyright | | 48 | * 2. Redistributions in binary form must reproduce the above copyright |
49 | * notice, this list of conditions and the following disclaimer in the | | 49 | * notice, this list of conditions and the following disclaimer in the |
50 | * documentation and/or other materials provided with the distribution. | | 50 | * documentation and/or other materials provided with the distribution. |
51 | * 3. Neither the name of the University nor the names of its contributors | | 51 | * 3. Neither the name of the University nor the names of its contributors |
52 | * may be used to endorse or promote products derived from this software | | 52 | * may be used to endorse or promote products derived from this software |
53 | * without specific prior written permission. | | 53 | * without specific prior written permission. |
54 | * | | 54 | * |
55 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | | 55 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
56 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 56 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
57 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | | 57 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
58 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | | 58 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
59 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | | 59 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
60 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | | 60 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
61 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 61 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
62 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | | 62 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
63 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 63 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
64 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 64 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
65 | * SUCH DAMAGE. | | 65 | * SUCH DAMAGE. |
66 | * | | 66 | * |
67 | * @(#)pmap.c 8.6 (Berkeley) 5/27/94 | | 67 | * @(#)pmap.c 8.6 (Berkeley) 5/27/94 |
68 | */ | | 68 | */ |
69 | | | 69 | |
70 | /* | | 70 | /* |
71 | * DEC Alpha physical map management code. | | 71 | * DEC Alpha physical map management code. |
72 | * | | 72 | * |
73 | * History: | | 73 | * History: |
74 | * | | 74 | * |
75 | * This pmap started life as a Motorola 68851/68030 pmap, | | 75 | * This pmap started life as a Motorola 68851/68030 pmap, |
76 | * written by Mike Hibler at the University of Utah. | | 76 | * written by Mike Hibler at the University of Utah. |
77 | * | | 77 | * |
78 | * It was modified for the DEC Alpha by Chris Demetriou | | 78 | * It was modified for the DEC Alpha by Chris Demetriou |
79 | * at Carnegie Mellon University. | | 79 | * at Carnegie Mellon University. |
80 | * | | 80 | * |
81 | * Support for non-contiguous physical memory was added by | | 81 | * Support for non-contiguous physical memory was added by |
82 | * Jason R. Thorpe of the Numerical Aerospace Simulation | | 82 | * Jason R. Thorpe of the Numerical Aerospace Simulation |
83 | * Facility, NASA Ames Research Center and Chris Demetriou. | | 83 | * Facility, NASA Ames Research Center and Chris Demetriou. |
84 | * | | 84 | * |
85 | * Page table management and a major cleanup were undertaken | | 85 | * Page table management and a major cleanup were undertaken |
86 | * by Jason R. Thorpe, with lots of help from Ross Harvey of | | 86 | * by Jason R. Thorpe, with lots of help from Ross Harvey of |
87 | * Avalon Computer Systems and from Chris Demetriou. | | 87 | * Avalon Computer Systems and from Chris Demetriou. |
88 | * | | 88 | * |
89 | * Support for the new UVM pmap interface was written by | | 89 | * Support for the new UVM pmap interface was written by |
90 | * Jason R. Thorpe. | | 90 | * Jason R. Thorpe. |
91 | * | | 91 | * |
92 | * Support for ASNs was written by Jason R. Thorpe, again | | 92 | * Support for ASNs was written by Jason R. Thorpe, again |
93 | * with help from Chris Demetriou and Ross Harvey. | | 93 | * with help from Chris Demetriou and Ross Harvey. |
94 | * | | 94 | * |
95 | * The locking protocol was written by Jason R. Thorpe, | | 95 | * The locking protocol was written by Jason R. Thorpe, |
96 | * using Chuck Cranor's i386 pmap for UVM as a model. | | 96 | * using Chuck Cranor's i386 pmap for UVM as a model. |
97 | * | | 97 | * |
98 | * TLB shootdown code was written (and then subsequently | | 98 | * TLB shootdown code was written (and then subsequently |
99 | * rewritten some years later, borrowing some ideas from | | 99 | * rewritten some years later, borrowing some ideas from |
100 | * the x86 pmap) by Jason R. Thorpe. | | 100 | * the x86 pmap) by Jason R. Thorpe. |
101 | * | | 101 | * |
102 | * Multiprocessor modifications by Andrew Doran and | | 102 | * Multiprocessor modifications by Andrew Doran and |
103 | * Jason R. Thorpe. | | 103 | * Jason R. Thorpe. |
104 | * | | 104 | * |
105 | * Notes: | | 105 | * Notes: |
106 | * | | 106 | * |
107 | * All user page table access is done via K0SEG. Kernel | | 107 | * All user page table access is done via K0SEG. Kernel |
108 | * page table access is done via the recursive Virtual Page | | 108 | * page table access is done via the recursive Virtual Page |
109 | * Table becase kernel PT pages are pre-allocated and never | | 109 | * Table becase kernel PT pages are pre-allocated and never |
110 | * freed, so no VPT fault handling is requiried. | | 110 | * freed, so no VPT fault handling is requiried. |
111 | */ | | 111 | */ |
112 | | | 112 | |
113 | /* | | 113 | /* |
114 | * Manages physical address maps. | | 114 | * Manages physical address maps. |
115 | * | | 115 | * |
116 | * Since the information managed by this module is | | 116 | * Since the information managed by this module is |
117 | * also stored by the logical address mapping module, | | 117 | * also stored by the logical address mapping module, |
118 | * this module may throw away valid virtual-to-physical | | 118 | * this module may throw away valid virtual-to-physical |
119 | * mappings at almost any time. However, invalidations | | 119 | * mappings at almost any time. However, invalidations |
120 | * of virtual-to-physical mappings must be done as | | 120 | * of virtual-to-physical mappings must be done as |
121 | * requested. | | 121 | * requested. |
122 | * | | 122 | * |
123 | * In order to cope with hardware architectures which | | 123 | * In order to cope with hardware architectures which |
124 | * make virtual-to-physical map invalidates expensive, | | 124 | * make virtual-to-physical map invalidates expensive, |
125 | * this module may delay invalidate or reduced protection | | 125 | * this module may delay invalidate or reduced protection |
126 | * operations until such time as they are actually | | 126 | * operations until such time as they are actually |
127 | * necessary. This module is given full information as | | 127 | * necessary. This module is given full information as |
128 | * to which processors are currently using which maps, | | 128 | * to which processors are currently using which maps, |
129 | * and to when physical maps must be made correct. | | 129 | * and to when physical maps must be made correct. |
130 | */ | | 130 | */ |
131 | | | 131 | |
132 | #include "opt_lockdebug.h" | | 132 | #include "opt_lockdebug.h" |
133 | #include "opt_sysv.h" | | 133 | #include "opt_sysv.h" |
134 | #include "opt_multiprocessor.h" | | 134 | #include "opt_multiprocessor.h" |
135 | | | 135 | |
136 | #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ | | 136 | #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ |
137 | | | 137 | |
138 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.287 2021/05/30 06:41:19 thorpej Exp $"); | | 138 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.288 2021/05/30 13:34:21 thorpej Exp $"); |
139 | | | 139 | |
140 | #include <sys/param.h> | | 140 | #include <sys/param.h> |
141 | #include <sys/systm.h> | | 141 | #include <sys/systm.h> |
142 | #include <sys/kernel.h> | | 142 | #include <sys/kernel.h> |
143 | #include <sys/proc.h> | | 143 | #include <sys/proc.h> |
144 | #include <sys/malloc.h> | | 144 | #include <sys/malloc.h> |
145 | #include <sys/pool.h> | | 145 | #include <sys/pool.h> |
146 | #include <sys/buf.h> | | 146 | #include <sys/buf.h> |
147 | #include <sys/evcnt.h> | | 147 | #include <sys/evcnt.h> |
148 | #include <sys/atomic.h> | | 148 | #include <sys/atomic.h> |
149 | #include <sys/cpu.h> | | 149 | #include <sys/cpu.h> |
150 | | | 150 | |
151 | #include <uvm/uvm.h> | | 151 | #include <uvm/uvm.h> |
152 | | | 152 | |
153 | #if defined(MULTIPROCESSOR) | | 153 | #if defined(MULTIPROCESSOR) |
154 | #include <machine/rpb.h> | | 154 | #include <machine/rpb.h> |
155 | #endif | | 155 | #endif |
156 | | | 156 | |
157 | #ifdef DEBUG | | 157 | #ifdef DEBUG |
158 | #define PDB_FOLLOW 0x0001 | | 158 | #define PDB_FOLLOW 0x0001 |
159 | #define PDB_INIT 0x0002 | | 159 | #define PDB_INIT 0x0002 |
160 | #define PDB_ENTER 0x0004 | | 160 | #define PDB_ENTER 0x0004 |
161 | #define PDB_REMOVE 0x0008 | | 161 | #define PDB_REMOVE 0x0008 |
162 | #define PDB_CREATE 0x0010 | | 162 | #define PDB_CREATE 0x0010 |
163 | #define PDB_PTPAGE 0x0020 | | 163 | #define PDB_PTPAGE 0x0020 |
164 | #define PDB_ASN 0x0040 | | 164 | #define PDB_ASN 0x0040 |
165 | #define PDB_BITS 0x0080 | | 165 | #define PDB_BITS 0x0080 |
166 | #define PDB_COLLECT 0x0100 | | 166 | #define PDB_COLLECT 0x0100 |
167 | #define PDB_PROTECT 0x0200 | | 167 | #define PDB_PROTECT 0x0200 |
168 | #define PDB_BOOTSTRAP 0x1000 | | 168 | #define PDB_BOOTSTRAP 0x1000 |
169 | #define PDB_PARANOIA 0x2000 | | 169 | #define PDB_PARANOIA 0x2000 |
170 | #define PDB_WIRING 0x4000 | | 170 | #define PDB_WIRING 0x4000 |
171 | #define PDB_PVDUMP 0x8000 | | 171 | #define PDB_PVDUMP 0x8000 |
172 | | | 172 | |
173 | int debugmap = 0; | | 173 | int debugmap = 0; |
174 | int pmapdebug = PDB_PARANOIA; | | 174 | int pmapdebug = PDB_PARANOIA; |
175 | #endif | | 175 | #endif |
176 | | | 176 | |
177 | #if defined(MULTIPROCESSOR) | | 177 | #if defined(MULTIPROCESSOR) |
178 | #define PMAP_MP(x) x | | 178 | #define PMAP_MP(x) x |
179 | #else | | 179 | #else |
180 | #define PMAP_MP(x) __nothing | | 180 | #define PMAP_MP(x) __nothing |
181 | #endif /* MULTIPROCESSOR */ | | 181 | #endif /* MULTIPROCESSOR */ |
182 | | | 182 | |
183 | /* | | 183 | /* |
184 | * Given a map and a machine independent protection code, | | 184 | * Given a map and a machine independent protection code, |
185 | * convert to an alpha protection code. | | 185 | * convert to an alpha protection code. |
186 | */ | | 186 | */ |
187 | #define pte_prot(m, p) (protection_codes[m == pmap_kernel() ? 0 : 1][p]) | | 187 | #define pte_prot(m, p) (protection_codes[m == pmap_kernel() ? 0 : 1][p]) |
188 | static int protection_codes[2][8] __read_mostly; | | 188 | static int protection_codes[2][8] __read_mostly; |
189 | | | 189 | |
190 | /* | | 190 | /* |
191 | * kernel_lev1map: | | 191 | * kernel_lev1map: |
192 | * | | 192 | * |
193 | * Kernel level 1 page table. This maps all kernel level 2 | | 193 | * Kernel level 1 page table. This maps all kernel level 2 |
194 | * page table pages, and is used as a template for all user | | 194 | * page table pages, and is used as a template for all user |
195 | * pmap level 1 page tables. When a new user level 1 page | | 195 | * pmap level 1 page tables. When a new user level 1 page |
196 | * table is allocated, all kernel_lev1map PTEs for kernel | | 196 | * table is allocated, all kernel_lev1map PTEs for kernel |
197 | * addresses are copied to the new map. | | 197 | * addresses are copied to the new map. |
198 | * | | 198 | * |
199 | * The kernel also has an initial set of kernel level 2 page | | 199 | * The kernel also has an initial set of kernel level 2 page |
200 | * table pages. These map the kernel level 3 page table pages. | | 200 | * table pages. These map the kernel level 3 page table pages. |
201 | * As kernel level 3 page table pages are added, more level 2 | | 201 | * As kernel level 3 page table pages are added, more level 2 |
202 | * page table pages may be added to map them. These pages are | | 202 | * page table pages may be added to map them. These pages are |
203 | * never freed. | | 203 | * never freed. |
204 | * | | 204 | * |
205 | * Finally, the kernel also has an initial set of kernel level | | 205 | * Finally, the kernel also has an initial set of kernel level |
206 | * 3 page table pages. These map pages in K1SEG. More level | | 206 | * 3 page table pages. These map pages in K1SEG. More level |
207 | * 3 page table pages may be added at run-time if additional | | 207 | * 3 page table pages may be added at run-time if additional |
208 | * K1SEG address space is required. These pages are never freed. | | 208 | * K1SEG address space is required. These pages are never freed. |
209 | * | | 209 | * |
210 | * NOTE: When mappings are inserted into the kernel pmap, all | | 210 | * NOTE: When mappings are inserted into the kernel pmap, all |
211 | * level 2 and level 3 page table pages must already be allocated | | 211 | * level 2 and level 3 page table pages must already be allocated |
212 | * and mapped into the parent page table. | | 212 | * and mapped into the parent page table. |
213 | */ | | 213 | */ |
214 | pt_entry_t *kernel_lev1map __read_mostly; | | 214 | pt_entry_t *kernel_lev1map __read_mostly; |
215 | | | 215 | |
216 | /* | | 216 | /* |
217 | * Virtual Page Table. | | 217 | * Virtual Page Table. |
218 | */ | | 218 | */ |
219 | static pt_entry_t *VPT __read_mostly; | | 219 | static pt_entry_t *VPT __read_mostly; |
220 | | | 220 | |
221 | static struct { | | 221 | static struct { |
222 | struct pmap k_pmap; | | 222 | struct pmap k_pmap; |
223 | } kernel_pmap_store __cacheline_aligned; | | 223 | } kernel_pmap_store __cacheline_aligned; |
224 | | | 224 | |
225 | struct pmap *const kernel_pmap_ptr = &kernel_pmap_store.k_pmap; | | 225 | struct pmap *const kernel_pmap_ptr = &kernel_pmap_store.k_pmap; |
226 | | | 226 | |
227 | /* PA of first available physical page */ | | 227 | /* PA of first available physical page */ |
228 | paddr_t avail_start __read_mostly; | | 228 | paddr_t avail_start __read_mostly; |
229 | | | 229 | |
230 | /* PA of last available physical page */ | | 230 | /* PA of last available physical page */ |
231 | paddr_t avail_end __read_mostly; | | 231 | paddr_t avail_end __read_mostly; |
232 | | | 232 | |
233 | /* VA of last avail page (end of kernel AS) */ | | 233 | /* VA of last avail page (end of kernel AS) */ |
234 | static vaddr_t virtual_end __read_mostly; | | 234 | static vaddr_t virtual_end __read_mostly; |
235 | | | 235 | |
236 | /* Has pmap_init completed? */ | | 236 | /* Has pmap_init completed? */ |
237 | static bool pmap_initialized __read_mostly; | | 237 | static bool pmap_initialized __read_mostly; |
238 | | | 238 | |
239 | /* Instrumentation */ | | 239 | /* Instrumentation */ |
240 | u_long pmap_pages_stolen __read_mostly; | | 240 | u_long pmap_pages_stolen __read_mostly; |
241 | | | 241 | |
242 | /* | | 242 | /* |
243 | * This variable contains the number of CPU IDs we need to allocate | | 243 | * This variable contains the number of CPU IDs we need to allocate |
244 | * space for when allocating the pmap structure. It is used to | | 244 | * space for when allocating the pmap structure. It is used to |
245 | * size a per-CPU array of ASN and ASN Generation number. | | 245 | * size a per-CPU array of ASN and ASN Generation number. |
246 | */ | | 246 | */ |
247 | static u_long pmap_ncpuids __read_mostly; | | 247 | static u_long pmap_ncpuids __read_mostly; |
248 | | | 248 | |
249 | #ifndef PMAP_PV_LOWAT | | 249 | #ifndef PMAP_PV_LOWAT |
250 | #define PMAP_PV_LOWAT 16 | | 250 | #define PMAP_PV_LOWAT 16 |
251 | #endif | | 251 | #endif |
252 | int pmap_pv_lowat __read_mostly = PMAP_PV_LOWAT; | | 252 | int pmap_pv_lowat __read_mostly = PMAP_PV_LOWAT; |
253 | | | 253 | |
254 | /* | | 254 | /* |
255 | * List of all pmaps, used to update them when e.g. additional kernel | | 255 | * List of all pmaps, used to update them when e.g. additional kernel |
256 | * page tables are allocated. This list is kept LRU-ordered by | | 256 | * page tables are allocated. This list is kept LRU-ordered by |
257 | * pmap_activate(). | | 257 | * pmap_activate(). |
258 | */ | | 258 | */ |
259 | static TAILQ_HEAD(, pmap) pmap_all_pmaps __cacheline_aligned; | | 259 | static TAILQ_HEAD(, pmap) pmap_all_pmaps __cacheline_aligned; |
260 | | | 260 | |
261 | /* | | 261 | /* |
262 | * The pools from which pmap structures and sub-structures are allocated. | | 262 | * The pools from which pmap structures and sub-structures are allocated. |
263 | */ | | 263 | */ |
264 | static struct pool_cache pmap_pmap_cache __read_mostly; | | 264 | static struct pool_cache pmap_pmap_cache __read_mostly; |
265 | static struct pool_cache pmap_l1pt_cache __read_mostly; | | 265 | static struct pool_cache pmap_l1pt_cache __read_mostly; |
266 | static struct pool_cache pmap_pv_cache __read_mostly; | | 266 | static struct pool_cache pmap_pv_cache __read_mostly; |
267 | | | 267 | |
268 | CTASSERT(offsetof(struct pmap, pm_percpu[0]) == COHERENCY_UNIT); | | 268 | CTASSERT(offsetof(struct pmap, pm_percpu[0]) == COHERENCY_UNIT); |
269 | CTASSERT(PMAP_SIZEOF(ALPHA_MAXPROCS) < ALPHA_PGBYTES); | | 269 | CTASSERT(PMAP_SIZEOF(ALPHA_MAXPROCS) < ALPHA_PGBYTES); |
270 | CTASSERT(sizeof(struct pmap_percpu) == COHERENCY_UNIT); | | 270 | CTASSERT(sizeof(struct pmap_percpu) == COHERENCY_UNIT); |
271 | | | 271 | |
272 | /* | | 272 | /* |
273 | * Address Space Numbers. | | 273 | * Address Space Numbers. |
274 | * | | 274 | * |
275 | * On many implementations of the Alpha architecture, the TLB entries and | | 275 | * On many implementations of the Alpha architecture, the TLB entries and |
276 | * I-cache blocks are tagged with a unique number within an implementation- | | 276 | * I-cache blocks are tagged with a unique number within an implementation- |
277 | * specified range. When a process context becomes active, the ASN is used | | 277 | * specified range. When a process context becomes active, the ASN is used |
278 | * to match TLB entries; if a TLB entry for a particular VA does not match | | 278 | * to match TLB entries; if a TLB entry for a particular VA does not match |
279 | * the current ASN, it is ignored (one could think of the processor as | | 279 | * the current ASN, it is ignored (one could think of the processor as |
280 | * having a collection of <max ASN> separate TLBs). This allows operating | | 280 | * having a collection of <max ASN> separate TLBs). This allows operating |
281 | * system software to skip the TLB flush that would otherwise be necessary | | 281 | * system software to skip the TLB flush that would otherwise be necessary |
282 | * at context switch time. | | 282 | * at context switch time. |
283 | * | | 283 | * |
284 | * Alpha PTEs have a bit in them (PG_ASM - Address Space Match) that | | 284 | * Alpha PTEs have a bit in them (PG_ASM - Address Space Match) that |
285 | * causes TLB entries to match any ASN. The PALcode also provides | | 285 | * causes TLB entries to match any ASN. The PALcode also provides |
286 | * a TBI (Translation Buffer Invalidate) operation that flushes all | | 286 | * a TBI (Translation Buffer Invalidate) operation that flushes all |
287 | * TLB entries that _do not_ have PG_ASM. We use this bit for kernel | | 287 | * TLB entries that _do not_ have PG_ASM. We use this bit for kernel |
288 | * mappings, so that invalidation of all user mappings does not invalidate | | 288 | * mappings, so that invalidation of all user mappings does not invalidate |
289 | * kernel mappings (which are consistent across all processes). | | 289 | * kernel mappings (which are consistent across all processes). |
290 | * | | 290 | * |
291 | * pmap_next_asn always indicates to the next ASN to use. When | | 291 | * pmap_next_asn always indicates to the next ASN to use. When |
292 | * pmap_next_asn exceeds pmap_max_asn, we start a new ASN generation. | | 292 | * pmap_next_asn exceeds pmap_max_asn, we start a new ASN generation. |
293 | * | | 293 | * |
294 | * When a new ASN generation is created, the per-process (i.e. non-PG_ASM) | | 294 | * When a new ASN generation is created, the per-process (i.e. non-PG_ASM) |
295 | * TLB entries and the I-cache are flushed, the generation number is bumped, | | 295 | * TLB entries and the I-cache are flushed, the generation number is bumped, |
296 | * and pmap_next_asn is changed to indicate the first non-reserved ASN. | | 296 | * and pmap_next_asn is changed to indicate the first non-reserved ASN. |
297 | * | | 297 | * |
298 | * We reserve ASN #0 for pmaps that use the global kernel_lev1map. This | | 298 | * We reserve ASN #0 for pmaps that use the global kernel_lev1map. This |
299 | * prevents the following scenario to ensure no accidental accesses to | | 299 | * prevents the following scenario to ensure no accidental accesses to |
300 | * user space for LWPs using the kernel pmap. This is important because | | 300 | * user space for LWPs using the kernel pmap. This is important because |
301 | * the PALcode may use the recursive VPT to service TLB misses. | | 301 | * the PALcode may use the recursive VPT to service TLB misses. |
302 | * | | 302 | * |
303 | * By reserving an ASN for the kernel, we are guaranteeing that an lwp | | 303 | * By reserving an ASN for the kernel, we are guaranteeing that an lwp |
304 | * will not see any valid user space TLB entries until it passes through | | 304 | * will not see any valid user space TLB entries until it passes through |
305 | * pmap_activate() for the first time. | | 305 | * pmap_activate() for the first time. |
306 | * | | 306 | * |
307 | * On processors that do not support ASNs, the PALcode invalidates | | 307 | * On processors that do not support ASNs, the PALcode invalidates |
308 | * non-ASM TLB entries automatically on swpctx. We completely skip | | 308 | * non-ASM TLB entries automatically on swpctx. We completely skip |
309 | * the ASN machinery in this case because the PALcode neither reads | | 309 | * the ASN machinery in this case because the PALcode neither reads |
310 | * nor writes that field of the HWPCB. | | 310 | * nor writes that field of the HWPCB. |
311 | */ | | 311 | */ |
312 | | | 312 | |
313 | /* max ASN supported by the system */ | | 313 | /* max ASN supported by the system */ |
314 | static u_int pmap_max_asn __read_mostly; | | 314 | static u_int pmap_max_asn __read_mostly; |
315 | | | 315 | |
316 | /* | | 316 | /* |
317 | * Locking: | | 317 | * Locking: |
318 | * | | 318 | * |
319 | * READ/WRITE LOCKS | | 319 | * READ/WRITE LOCKS |
320 | * ---------------- | | 320 | * ---------------- |
321 | * | | 321 | * |
322 | * * pmap_main_lock - This lock is used to prevent deadlock and/or | | 322 | * * pmap_main_lock - This lock is used to prevent deadlock and/or |
323 | * provide mutex access to the pmap module. Most operations lock | | 323 | * provide mutex access to the pmap module. Most operations lock |
324 | * the pmap first, then PV lists as needed. However, some operations, | | 324 | * the pmap first, then PV lists as needed. However, some operations, |
325 | * such as pmap_page_protect(), lock the PV lists before locking | | 325 | * such as pmap_page_protect(), lock the PV lists before locking |
326 | * the pmaps. To prevent deadlock, we require a mutex lock on the | | 326 | * the pmaps. To prevent deadlock, we require a mutex lock on the |
327 | * pmap module if locking in the PV->pmap direction. This is | | 327 | * pmap module if locking in the PV->pmap direction. This is |
328 | * implemented by acquiring a (shared) read lock on pmap_main_lock | | 328 | * implemented by acquiring a (shared) read lock on pmap_main_lock |
329 | * if locking pmap->PV and a (exclusive) write lock if locking in | | 329 | * if locking pmap->PV and a (exclusive) write lock if locking in |
330 | * the PV->pmap direction. Since only one thread can hold a write | | 330 | * the PV->pmap direction. Since only one thread can hold a write |
331 | * lock at a time, this provides the mutex. | | 331 | * lock at a time, this provides the mutex. |
332 | * | | 332 | * |
333 | * MUTEXES | | 333 | * MUTEXES |
334 | * ------- | | 334 | * ------- |
335 | * | | 335 | * |
336 | * * pmap lock (global hash) - These locks protect the pmap structures. | | 336 | * * pmap lock (global hash) - These locks protect the pmap structures. |
337 | * | | 337 | * |
338 | * * pmap activation lock (global hash) - These IPL_SCHED spin locks | | 338 | * * pmap activation lock (global hash) - These IPL_SCHED spin locks |
339 | * synchronize pmap_activate() and TLB shootdowns. This has a lock | | 339 | * synchronize pmap_activate() and TLB shootdowns. This has a lock |
340 | * ordering constraint with the tlb_lock: | | 340 | * ordering constraint with the tlb_lock: |
341 | * | | 341 | * |
342 | * tlb_lock -> pmap activation lock | | 342 | * tlb_lock -> pmap activation lock |
343 | * | | 343 | * |
344 | * * pvh_lock (global hash) - These locks protect the PV lists for | | 344 | * * pvh_lock (global hash) - These locks protect the PV lists for |
345 | * managed pages. | | 345 | * managed pages. |
346 | * | | 346 | * |
347 | * * tlb_lock - This IPL_VM lock serializes local and remote TLB | | 347 | * * tlb_lock - This IPL_VM lock serializes local and remote TLB |
348 | * invalidation. | | 348 | * invalidation. |
349 | * | | 349 | * |
350 | * * pmap_all_pmaps_lock - This lock protects the global list of | | 350 | * * pmap_all_pmaps_lock - This lock protects the global list of |
351 | * all pmaps. | | 351 | * all pmaps. |
352 | * | | 352 | * |
353 | * * pmap_growkernel_lock - This lock protects pmap_growkernel() | | 353 | * * pmap_growkernel_lock - This lock protects pmap_growkernel() |
354 | * and the virtual_end variable. | | 354 | * and the virtual_end variable. |
355 | * | | 355 | * |
356 | * There is a lock ordering constraint for pmap_growkernel_lock. | | 356 | * There is a lock ordering constraint for pmap_growkernel_lock. |
357 | * pmap_growkernel() acquires the locks in the following order: | | 357 | * pmap_growkernel() acquires the locks in the following order: |
358 | * | | 358 | * |
359 | * pmap_growkernel_lock (write) -> pmap_all_pmaps_lock -> | | 359 | * pmap_growkernel_lock (write) -> pmap_all_pmaps_lock -> |
360 | * pmap lock | | 360 | * pmap lock |
361 | * | | 361 | * |
362 | * We need to ensure consistency between user pmaps and the | | 362 | * We need to ensure consistency between user pmaps and the |
363 | * kernel_lev1map. For this reason, pmap_growkernel_lock must | | 363 | * kernel_lev1map. For this reason, pmap_growkernel_lock must |
364 | * be held to prevent kernel_lev1map changing across pmaps | | 364 | * be held to prevent kernel_lev1map changing across pmaps |
365 | * being added to / removed from the global pmaps list. | | 365 | * being added to / removed from the global pmaps list. |
366 | * | | 366 | * |
367 | * Address space number management (global ASN counters and per-pmap | | 367 | * Address space number management (global ASN counters and per-pmap |
368 | * ASN state) are not locked; they use arrays of values indexed | | 368 | * ASN state) are not locked; they use arrays of values indexed |
369 | * per-processor. | | 369 | * per-processor. |
370 | * | | 370 | * |
371 | * All internal functions which operate on a pmap are called | | 371 | * All internal functions which operate on a pmap are called |
372 | * with the pmap already locked by the caller (which will be | | 372 | * with the pmap already locked by the caller (which will be |
373 | * an interface function). | | 373 | * an interface function). |
374 | */ | | 374 | */ |
375 | static krwlock_t pmap_main_lock __cacheline_aligned; | | 375 | static krwlock_t pmap_main_lock __cacheline_aligned; |
376 | static kmutex_t pmap_all_pmaps_lock __cacheline_aligned; | | 376 | static kmutex_t pmap_all_pmaps_lock __cacheline_aligned; |
377 | static krwlock_t pmap_growkernel_lock __cacheline_aligned; | | 377 | static krwlock_t pmap_growkernel_lock __cacheline_aligned; |
378 | | | 378 | |
379 | #define PMAP_MAP_TO_HEAD_LOCK() rw_enter(&pmap_main_lock, RW_READER) | | 379 | #define PMAP_MAP_TO_HEAD_LOCK() rw_enter(&pmap_main_lock, RW_READER) |
380 | #define PMAP_MAP_TO_HEAD_UNLOCK() rw_exit(&pmap_main_lock) | | 380 | #define PMAP_MAP_TO_HEAD_UNLOCK() rw_exit(&pmap_main_lock) |
381 | #define PMAP_HEAD_TO_MAP_LOCK() rw_enter(&pmap_main_lock, RW_WRITER) | | 381 | #define PMAP_HEAD_TO_MAP_LOCK() rw_enter(&pmap_main_lock, RW_WRITER) |
382 | #define PMAP_HEAD_TO_MAP_UNLOCK() rw_exit(&pmap_main_lock) | | 382 | #define PMAP_HEAD_TO_MAP_UNLOCK() rw_exit(&pmap_main_lock) |
383 | | | 383 | |
384 | static union { | | 384 | static union { |
385 | kmutex_t lock; | | 385 | kmutex_t lock; |
386 | uint8_t pad[COHERENCY_UNIT]; | | 386 | uint8_t pad[COHERENCY_UNIT]; |
387 | } pmap_pvh_locks[64] __cacheline_aligned; | | 387 | } pmap_pvh_locks[64] __cacheline_aligned; |
388 | | | 388 | |
389 | #define PVH_LOCK_HASH(pg) \ | | 389 | #define PVH_LOCK_HASH(pg) \ |
390 | ((((uintptr_t)(pg)) >> 6) & 63) | | 390 | ((((uintptr_t)(pg)) >> 6) & 63) |
391 | | | 391 | |
392 | static inline kmutex_t * | | 392 | static inline kmutex_t * |
393 | pmap_pvh_lock(struct vm_page *pg) | | 393 | pmap_pvh_lock(struct vm_page *pg) |
394 | { | | 394 | { |
395 | return &pmap_pvh_locks[PVH_LOCK_HASH(pg)].lock; | | 395 | return &pmap_pvh_locks[PVH_LOCK_HASH(pg)].lock; |
396 | } | | 396 | } |
397 | | | 397 | |
398 | static union { | | 398 | static union { |
399 | struct { | | 399 | struct { |
400 | kmutex_t lock; | | 400 | kmutex_t lock; |
401 | kmutex_t activation_lock; | | 401 | kmutex_t activation_lock; |
402 | } locks; | | 402 | } locks; |
403 | uint8_t pad[COHERENCY_UNIT]; | | 403 | uint8_t pad[COHERENCY_UNIT]; |
404 | } pmap_pmap_locks[64] __cacheline_aligned; | | 404 | } pmap_pmap_locks[64] __cacheline_aligned; |
405 | | | 405 | |
406 | #define PMAP_LOCK_HASH(pm) \ | | 406 | #define PMAP_LOCK_HASH(pm) \ |
407 | ((((uintptr_t)(pm)) >> 6) & 63) | | 407 | ((((uintptr_t)(pm)) >> 6) & 63) |
408 | | | 408 | |
409 | static inline kmutex_t * | | 409 | static inline kmutex_t * |
410 | pmap_pmap_lock(pmap_t const pmap) | | 410 | pmap_pmap_lock(pmap_t const pmap) |
411 | { | | 411 | { |
412 | return &pmap_pmap_locks[PMAP_LOCK_HASH(pmap)].locks.lock; | | 412 | return &pmap_pmap_locks[PMAP_LOCK_HASH(pmap)].locks.lock; |
413 | } | | 413 | } |
414 | | | 414 | |
415 | static inline kmutex_t * | | 415 | static inline kmutex_t * |
416 | pmap_activation_lock(pmap_t const pmap) | | 416 | pmap_activation_lock(pmap_t const pmap) |
417 | { | | 417 | { |
418 | return &pmap_pmap_locks[PMAP_LOCK_HASH(pmap)].locks.activation_lock; | | 418 | return &pmap_pmap_locks[PMAP_LOCK_HASH(pmap)].locks.activation_lock; |
419 | } | | 419 | } |
420 | | | 420 | |
421 | #define PMAP_LOCK(pmap) mutex_enter(pmap_pmap_lock(pmap)) | | 421 | #define PMAP_LOCK(pmap) mutex_enter(pmap_pmap_lock(pmap)) |
422 | #define PMAP_UNLOCK(pmap) mutex_exit(pmap_pmap_lock(pmap)) | | 422 | #define PMAP_UNLOCK(pmap) mutex_exit(pmap_pmap_lock(pmap)) |
423 | | | 423 | |
424 | #define PMAP_ACT_LOCK(pmap) mutex_spin_enter(pmap_activation_lock(pmap)) | | 424 | #define PMAP_ACT_LOCK(pmap) mutex_spin_enter(pmap_activation_lock(pmap)) |
425 | #define PMAP_ACT_TRYLOCK(pmap) mutex_tryenter(pmap_activation_lock(pmap)) | | 425 | #define PMAP_ACT_TRYLOCK(pmap) mutex_tryenter(pmap_activation_lock(pmap)) |
426 | #define PMAP_ACT_UNLOCK(pmap) mutex_spin_exit(pmap_activation_lock(pmap)) | | 426 | #define PMAP_ACT_UNLOCK(pmap) mutex_spin_exit(pmap_activation_lock(pmap)) |
427 | | | 427 | |
428 | #if defined(MULTIPROCESSOR) | | 428 | #if defined(MULTIPROCESSOR) |
429 | #define pmap_all_cpus() cpus_running | | 429 | #define pmap_all_cpus() cpus_running |
430 | #else | | 430 | #else |
431 | #define pmap_all_cpus() ~0UL | | 431 | #define pmap_all_cpus() ~0UL |
432 | #endif /* MULTIPROCESSOR */ | | 432 | #endif /* MULTIPROCESSOR */ |
433 | | | 433 | |
434 | /* | | 434 | /* |
435 | * Generic routine for freeing pages on a pmap_pagelist back to | | 435 | * Generic routine for freeing pages on a pmap_pagelist back to |
436 | * the system. | | 436 | * the system. |
437 | */ | | 437 | */ |
438 | static void | | 438 | static void |
439 | pmap_pagelist_free(struct pmap_pagelist * const list) | | 439 | pmap_pagelist_free(struct pmap_pagelist * const list) |
440 | { | | 440 | { |
441 | struct vm_page *pg; | | 441 | struct vm_page *pg; |
442 | | | 442 | |
443 | while ((pg = LIST_FIRST(list)) != NULL) { | | 443 | while ((pg = LIST_FIRST(list)) != NULL) { |
444 | LIST_REMOVE(pg, pageq.list); | | 444 | LIST_REMOVE(pg, pageq.list); |
445 | uvm_pagefree(pg); | | 445 | uvm_pagefree(pg); |
446 | } | | 446 | } |
447 | } | | 447 | } |
448 | | | 448 | |
449 | /* | | 449 | /* |
450 | * TLB management. | | 450 | * TLB management. |
451 | * | | 451 | * |
452 | * TLB invalidations need to be performed on local and remote CPUs | | 452 | * TLB invalidations need to be performed on local and remote CPUs |
453 | * whenever parts of the PTE that the hardware or PALcode understands | | 453 | * whenever parts of the PTE that the hardware or PALcode understands |
454 | * changes. In order amortize the cost of these operations, we will | | 454 | * changes. In order amortize the cost of these operations, we will |
455 | * queue up to 8 addresses to invalidate in a batch. Any more than | | 455 | * queue up to 8 addresses to invalidate in a batch. Any more than |
456 | * that, and we will hit the entire TLB. | | 456 | * that, and we will hit the entire TLB. |
457 | * | | 457 | * |
458 | * Some things that add complexity: | | 458 | * Some things that add complexity: |
459 | * | | 459 | * |
460 | * ==> ASNs. A CPU may have valid TLB entries for other than the current | | 460 | * ==> ASNs. A CPU may have valid TLB entries for other than the current |
461 | * address spaace. We can only invalidate TLB entries for the current | | 461 | * address spaace. We can only invalidate TLB entries for the current |
462 | * address space, so when asked to invalidate a VA for the non-current | | 462 | * address space, so when asked to invalidate a VA for the non-current |
463 | * pmap on a given CPU, we simply invalidate the ASN for that pmap,CPU | | 463 | * pmap on a given CPU, we simply invalidate the ASN for that pmap,CPU |
464 | * tuple so that new one is allocated on the next activation on that | | 464 | * tuple so that new one is allocated on the next activation on that |
465 | * CPU. N.B. that for CPUs that don't implement ASNs, SWPCTX does all | | 465 | * CPU. N.B. that for CPUs that don't implement ASNs, SWPCTX does all |
466 | * the work necessary, so we can skip some work in the pmap module | | 466 | * the work necessary, so we can skip some work in the pmap module |
467 | * itself. | | 467 | * itself. |
468 | * | | 468 | * |
469 | * When a pmap is activated on a given CPU, we set a corresponding | | 469 | * When a pmap is activated on a given CPU, we set a corresponding |
470 | * bit in pmap::pm_cpus, indicating that it potentially has valid | | 470 | * bit in pmap::pm_cpus, indicating that it potentially has valid |
471 | * TLB entries for that address space. This bitmap is then used to | | 471 | * TLB entries for that address space. This bitmap is then used to |
472 | * determine which remote CPUs need to be notified of invalidations. | | 472 | * determine which remote CPUs need to be notified of invalidations. |
473 | * The bit is cleared when the ASN is invalidated on that CPU. | | 473 | * The bit is cleared when the ASN is invalidated on that CPU. |
474 | * | | 474 | * |
475 | * In order to serialize with activating an address space on a | | 475 | * In order to serialize with activating an address space on a |
476 | * given CPU (that we can reliably send notifications only to | | 476 | * given CPU (that we can reliably send notifications only to |
477 | * relevant remote CPUs), we acquire the pmap lock in pmap_activate() | | 477 | * relevant remote CPUs), we acquire the pmap lock in pmap_activate() |
478 | * and also hold the lock while remote shootdowns take place. | | 478 | * and also hold the lock while remote shootdowns take place. |
479 | * This does not apply to the kernel pmap; all CPUs are notified about | | 479 | * This does not apply to the kernel pmap; all CPUs are notified about |
480 | * invalidations for the kernel pmap, and the pmap lock is not held | | 480 | * invalidations for the kernel pmap, and the pmap lock is not held |
481 | * in pmap_activate() for the kernel pmap. | | 481 | * in pmap_activate() for the kernel pmap. |
482 | * | | 482 | * |
483 | * ==> P->V operations (e.g. pmap_page_protect()) may require sending | | 483 | * ==> P->V operations (e.g. pmap_page_protect()) may require sending |
484 | * invalidations for multiple address spaces. We only track one | | 484 | * invalidations for multiple address spaces. We only track one |
485 | * address space at a time, and if we encounter more than one, then | | 485 | * address space at a time, and if we encounter more than one, then |
486 | * the notification each CPU gets is to hit the entire TLB. Note | | 486 | * the notification each CPU gets is to hit the entire TLB. Note |
487 | * also that we can't serialize with pmap_activate() in this case, | | 487 | * also that we can't serialize with pmap_activate() in this case, |
488 | * so all CPUs will get the notification, and they check when | | 488 | * so all CPUs will get the notification, and they check when |
489 | * processing the notification if the pmap is current on that CPU. | | 489 | * processing the notification if the pmap is current on that CPU. |
490 | * | | 490 | * |
491 | * Invalidation information is gathered into a pmap_tlb_context structure | | 491 | * Invalidation information is gathered into a pmap_tlb_context structure |
492 | * that includes room for 8 VAs, the pmap the VAs belong to, a bitmap of | | 492 | * that includes room for 8 VAs, the pmap the VAs belong to, a bitmap of |
493 | * CPUs to be notified, and a list for PT pages that are freed during | | 493 | * CPUs to be notified, and a list for PT pages that are freed during |
494 | * removal off mappings. The number of valid addresses in the list as | | 494 | * removal off mappings. The number of valid addresses in the list as |
495 | * well as flags are sqeezed into the lower bits of the first two VAs. | | 495 | * well as flags are sqeezed into the lower bits of the first two VAs. |
496 | * Storage for this structure is allocated on the stack. We need to be | | 496 | * Storage for this structure is allocated on the stack. We need to be |
497 | * careful to keep the size of this struture under control. | | 497 | * careful to keep the size of this struture under control. |
498 | * | | 498 | * |
499 | * When notifying remote CPUs, we acquire the tlb_lock (which also | | 499 | * When notifying remote CPUs, we acquire the tlb_lock (which also |
500 | * blocks IPIs), record the pointer to our context structure, set a | | 500 | * blocks IPIs), record the pointer to our context structure, set a |
501 | * global bitmap off CPUs to be notified, and then send the IPIs to | | 501 | * global bitmap off CPUs to be notified, and then send the IPIs to |
502 | * each victim. While the other CPUs are in-flight, we then perform | | 502 | * each victim. While the other CPUs are in-flight, we then perform |
503 | * any invalidations necessary on the local CPU. Once that is done, | | 503 | * any invalidations necessary on the local CPU. Once that is done, |
504 | * we then wait the the global context pointer to be cleared, which | | 504 | * we then wait the the global context pointer to be cleared, which |
505 | * will be done by the final remote CPU to complete their work. This | | 505 | * will be done by the final remote CPU to complete their work. This |
506 | * method reduces cache line contention during pocessing. | | 506 | * method reduces cache line contention during pocessing. |
507 | * | | 507 | * |
508 | * When removing mappings in user pmaps, this implemention frees page | | 508 | * When removing mappings in user pmaps, this implemention frees page |
509 | * table pages back to the VM system once they contain no valid mappings. | | 509 | * table pages back to the VM system once they contain no valid mappings. |
510 | * As we do this, we must ensure to invalidate TLB entries that the | | 510 | * As we do this, we must ensure to invalidate TLB entries that the |
511 | * CPU might hold for the respective recursive VPT mappings. This must | | 511 | * CPU might hold for the respective recursive VPT mappings. This must |
512 | * be done whenever an L1 or L2 PTE is invalidated. Until these VPT | | 512 | * be done whenever an L1 or L2 PTE is invalidated. Until these VPT |
513 | * translations are invalidated, the PT pages must not be reused. For | | 513 | * translations are invalidated, the PT pages must not be reused. For |
514 | * this reason, we keep a list of freed PT pages in the context stucture | | 514 | * this reason, we keep a list of freed PT pages in the context stucture |
515 | * and drain them off once all invalidations are complete. | | 515 | * and drain them off once all invalidations are complete. |
516 | * | | 516 | * |
517 | * NOTE: The value of TLB_CTX_MAXVA is tuned to accommodate the UBC | | 517 | * NOTE: The value of TLB_CTX_MAXVA is tuned to accommodate the UBC |
518 | * window size (defined as 64KB on alpha in <machine/vmparam.h>). | | 518 | * window size (defined as 64KB on alpha in <machine/vmparam.h>). |
519 | */ | | 519 | */ |
520 | | | 520 | |
521 | #define TLB_CTX_MAXVA 8 | | 521 | #define TLB_CTX_MAXVA 8 |
522 | #define TLB_CTX_ALLVA PAGE_MASK | | 522 | #define TLB_CTX_ALLVA PAGE_MASK |
523 | | | 523 | |
524 | #define TLB_CTX_F_ASM __BIT(0) | | 524 | #define TLB_CTX_F_ASM __BIT(0) |
525 | #define TLB_CTX_F_IMB __BIT(1) | | 525 | #define TLB_CTX_F_IMB __BIT(1) |
526 | #define TLB_CTX_F_KIMB __BIT(2) | | 526 | #define TLB_CTX_F_KIMB __BIT(2) |
527 | #define TLB_CTX_F_PV __BIT(3) | | 527 | #define TLB_CTX_F_PV __BIT(3) |
528 | #define TLB_CTX_F_MULTI __BIT(4) | | 528 | #define TLB_CTX_F_MULTI __BIT(4) |
529 | | | 529 | |
530 | #define TLB_CTX_COUNT(ctx) ((ctx)->t_addrdata[0] & PAGE_MASK) | | 530 | #define TLB_CTX_COUNT(ctx) ((ctx)->t_addrdata[0] & PAGE_MASK) |
531 | #define TLB_CTX_INC_COUNT(ctx) (ctx)->t_addrdata[0]++ | | 531 | #define TLB_CTX_INC_COUNT(ctx) (ctx)->t_addrdata[0]++ |
532 | #define TLB_CTX_SET_ALLVA(ctx) (ctx)->t_addrdata[0] |= TLB_CTX_ALLVA | | 532 | #define TLB_CTX_SET_ALLVA(ctx) (ctx)->t_addrdata[0] |= TLB_CTX_ALLVA |
533 | | | 533 | |
534 | #define TLB_CTX_FLAGS(ctx) ((ctx)->t_addrdata[1] & PAGE_MASK) | | 534 | #define TLB_CTX_FLAGS(ctx) ((ctx)->t_addrdata[1] & PAGE_MASK) |
535 | #define TLB_CTX_SET_FLAG(ctx, f) (ctx)->t_addrdata[1] |= (f) | | 535 | #define TLB_CTX_SET_FLAG(ctx, f) (ctx)->t_addrdata[1] |= (f) |
536 | | | 536 | |
537 | #define TLB_CTX_VA(ctx, i) ((ctx)->t_addrdata[(i)] & ~PAGE_MASK) | | 537 | #define TLB_CTX_VA(ctx, i) ((ctx)->t_addrdata[(i)] & ~PAGE_MASK) |
538 | #define TLB_CTX_SETVA(ctx, i, va) \ | | 538 | #define TLB_CTX_SETVA(ctx, i, va) \ |
539 | (ctx)->t_addrdata[(i)] = (va) | ((ctx)->t_addrdata[(i)] & PAGE_MASK) | | 539 | (ctx)->t_addrdata[(i)] = (va) | ((ctx)->t_addrdata[(i)] & PAGE_MASK) |
540 | | | 540 | |
541 | struct pmap_tlb_context { | | 541 | struct pmap_tlb_context { |
542 | uintptr_t t_addrdata[TLB_CTX_MAXVA]; | | 542 | uintptr_t t_addrdata[TLB_CTX_MAXVA]; |
543 | pmap_t t_pmap; | | 543 | pmap_t t_pmap; |
544 | struct pmap_pagelist t_freeptq; | | 544 | struct pmap_pagelist t_freeptq; |
545 | }; | | 545 | }; |
546 | | | 546 | |
547 | static struct { | | 547 | static struct { |
548 | kmutex_t lock; | | 548 | kmutex_t lock; |
549 | struct evcnt events; | | 549 | struct evcnt events; |
550 | } tlb_shootdown __cacheline_aligned; | | 550 | } tlb_shootdown __cacheline_aligned; |
551 | #define tlb_lock tlb_shootdown.lock | | 551 | #define tlb_lock tlb_shootdown.lock |
552 | #define tlb_evcnt tlb_shootdown.events | | 552 | #define tlb_evcnt tlb_shootdown.events |
553 | #if defined(MULTIPROCESSOR) | | 553 | #if defined(MULTIPROCESSOR) |
554 | static const struct pmap_tlb_context *tlb_context __cacheline_aligned; | | 554 | static const struct pmap_tlb_context *tlb_context __cacheline_aligned; |
555 | static unsigned long tlb_pending __cacheline_aligned; | | 555 | static unsigned long tlb_pending __cacheline_aligned; |
556 | #endif /* MULTIPROCESSOR */ | | 556 | #endif /* MULTIPROCESSOR */ |
557 | | | 557 | |
558 | #if defined(TLB_STATS) | | 558 | #if defined(TLB_STATS) |
559 | #define TLB_COUNT_DECL(cnt) static struct evcnt tlb_stat_##cnt | | 559 | #define TLB_COUNT_DECL(cnt) static struct evcnt tlb_stat_##cnt |
560 | #define TLB_COUNT(cnt) atomic_inc_64(&tlb_stat_##cnt .ev_count) | | 560 | #define TLB_COUNT(cnt) atomic_inc_64(&tlb_stat_##cnt .ev_count) |
561 | #define TLB_COUNT_ATTACH(cnt) \ | | 561 | #define TLB_COUNT_ATTACH(cnt) \ |
562 | evcnt_attach_dynamic_nozero(&tlb_stat_##cnt, EVCNT_TYPE_MISC, \ | | 562 | evcnt_attach_dynamic_nozero(&tlb_stat_##cnt, EVCNT_TYPE_MISC, \ |
563 | NULL, "TLB", #cnt) | | 563 | NULL, "TLB", #cnt) |
564 | | | 564 | |
565 | TLB_COUNT_DECL(invalidate_multi_tbia); | | 565 | TLB_COUNT_DECL(invalidate_multi_tbia); |
566 | TLB_COUNT_DECL(invalidate_multi_tbiap); | | 566 | TLB_COUNT_DECL(invalidate_multi_tbiap); |
567 | TLB_COUNT_DECL(invalidate_multi_imb); | | 567 | TLB_COUNT_DECL(invalidate_multi_imb); |
568 | | | 568 | |
569 | TLB_COUNT_DECL(invalidate_kern_tbia); | | 569 | TLB_COUNT_DECL(invalidate_kern_tbia); |
570 | TLB_COUNT_DECL(invalidate_kern_tbis); | | 570 | TLB_COUNT_DECL(invalidate_kern_tbis); |
571 | TLB_COUNT_DECL(invalidate_kern_imb); | | 571 | TLB_COUNT_DECL(invalidate_kern_imb); |
572 | | | 572 | |
573 | TLB_COUNT_DECL(invalidate_user_not_current); | | 573 | TLB_COUNT_DECL(invalidate_user_not_current); |
574 | TLB_COUNT_DECL(invalidate_user_lazy_imb); | | 574 | TLB_COUNT_DECL(invalidate_user_lazy_imb); |
575 | TLB_COUNT_DECL(invalidate_user_tbiap); | | 575 | TLB_COUNT_DECL(invalidate_user_tbiap); |
576 | TLB_COUNT_DECL(invalidate_user_tbis); | | 576 | TLB_COUNT_DECL(invalidate_user_tbis); |
577 | | | 577 | |
578 | TLB_COUNT_DECL(shootdown_kernel); | | 578 | TLB_COUNT_DECL(shootdown_kernel); |
579 | TLB_COUNT_DECL(shootdown_user); | | 579 | TLB_COUNT_DECL(shootdown_user); |
580 | TLB_COUNT_DECL(shootdown_imb); | | 580 | TLB_COUNT_DECL(shootdown_imb); |
581 | TLB_COUNT_DECL(shootdown_kimb); | | 581 | TLB_COUNT_DECL(shootdown_kimb); |
582 | TLB_COUNT_DECL(shootdown_overflow); | | 582 | TLB_COUNT_DECL(shootdown_overflow); |
583 | | | 583 | |
584 | TLB_COUNT_DECL(shootdown_all_user); | | 584 | TLB_COUNT_DECL(shootdown_all_user); |
585 | TLB_COUNT_DECL(shootdown_all_user_imb); | | 585 | TLB_COUNT_DECL(shootdown_all_user_imb); |
586 | | | 586 | |
587 | TLB_COUNT_DECL(shootdown_pv); | | 587 | TLB_COUNT_DECL(shootdown_pv); |
588 | TLB_COUNT_DECL(shootdown_pv_multi); | | 588 | TLB_COUNT_DECL(shootdown_pv_multi); |
589 | | | 589 | |
590 | TLB_COUNT_DECL(shootnow_over_notify); | | 590 | TLB_COUNT_DECL(shootnow_over_notify); |
591 | TLB_COUNT_DECL(shootnow_remote); | | 591 | TLB_COUNT_DECL(shootnow_remote); |
592 | | | 592 | |
593 | TLB_COUNT_DECL(reason_remove_kernel); | | 593 | TLB_COUNT_DECL(reason_remove_kernel); |
594 | TLB_COUNT_DECL(reason_remove_user); | | 594 | TLB_COUNT_DECL(reason_remove_user); |
595 | TLB_COUNT_DECL(reason_page_protect_read); | | 595 | TLB_COUNT_DECL(reason_page_protect_read); |
596 | TLB_COUNT_DECL(reason_page_protect_none); | | 596 | TLB_COUNT_DECL(reason_page_protect_none); |
597 | TLB_COUNT_DECL(reason_protect); | | 597 | TLB_COUNT_DECL(reason_protect); |
598 | TLB_COUNT_DECL(reason_enter_kernel); | | 598 | TLB_COUNT_DECL(reason_enter_kernel); |
599 | TLB_COUNT_DECL(reason_enter_user); | | 599 | TLB_COUNT_DECL(reason_enter_user); |
600 | TLB_COUNT_DECL(reason_kenter); | | 600 | TLB_COUNT_DECL(reason_kenter); |
601 | TLB_COUNT_DECL(reason_enter_l2pt_delref); | | 601 | TLB_COUNT_DECL(reason_enter_l2pt_delref); |
602 | TLB_COUNT_DECL(reason_enter_l3pt_delref); | | 602 | TLB_COUNT_DECL(reason_enter_l3pt_delref); |
603 | TLB_COUNT_DECL(reason_kremove); | | 603 | TLB_COUNT_DECL(reason_kremove); |
604 | TLB_COUNT_DECL(reason_clear_modify); | | 604 | TLB_COUNT_DECL(reason_clear_modify); |
605 | TLB_COUNT_DECL(reason_clear_reference); | | 605 | TLB_COUNT_DECL(reason_clear_reference); |
606 | TLB_COUNT_DECL(reason_emulate_reference); | | 606 | TLB_COUNT_DECL(reason_emulate_reference); |
607 | | | 607 | |
608 | TLB_COUNT_DECL(asn_reuse); | | 608 | TLB_COUNT_DECL(asn_reuse); |
609 | TLB_COUNT_DECL(asn_newgen); | | 609 | TLB_COUNT_DECL(asn_newgen); |
610 | TLB_COUNT_DECL(asn_assign); | | 610 | TLB_COUNT_DECL(asn_assign); |
611 | | | 611 | |
612 | TLB_COUNT_DECL(activate_both_change); | | 612 | TLB_COUNT_DECL(activate_both_change); |
613 | TLB_COUNT_DECL(activate_asn_change); | | 613 | TLB_COUNT_DECL(activate_asn_change); |
614 | TLB_COUNT_DECL(activate_ptbr_change); | | 614 | TLB_COUNT_DECL(activate_ptbr_change); |
615 | TLB_COUNT_DECL(activate_swpctx); | | 615 | TLB_COUNT_DECL(activate_swpctx); |
616 | TLB_COUNT_DECL(activate_skip_swpctx); | | 616 | TLB_COUNT_DECL(activate_skip_swpctx); |
617 | | | 617 | |
618 | #else /* ! TLB_STATS */ | | 618 | #else /* ! TLB_STATS */ |
619 | #define TLB_COUNT(cnt) __nothing | | 619 | #define TLB_COUNT(cnt) __nothing |
620 | #define TLB_COUNT_ATTACH(cnt) __nothing | | 620 | #define TLB_COUNT_ATTACH(cnt) __nothing |
621 | #endif /* TLB_STATS */ | | 621 | #endif /* TLB_STATS */ |
622 | | | 622 | |
623 | static void | | 623 | static void |
624 | pmap_tlb_init(void) | | 624 | pmap_tlb_init(void) |
625 | { | | 625 | { |
626 | /* mutex is initialized in pmap_bootstrap(). */ | | 626 | /* mutex is initialized in pmap_bootstrap(). */ |
627 | | | 627 | |
628 | evcnt_attach_dynamic_nozero(&tlb_evcnt, EVCNT_TYPE_MISC, | | 628 | evcnt_attach_dynamic_nozero(&tlb_evcnt, EVCNT_TYPE_MISC, |
629 | NULL, "TLB", "shootdown"); | | 629 | NULL, "TLB", "shootdown"); |
630 | | | 630 | |
631 | TLB_COUNT_ATTACH(invalidate_multi_tbia); | | 631 | TLB_COUNT_ATTACH(invalidate_multi_tbia); |
632 | TLB_COUNT_ATTACH(invalidate_multi_tbiap); | | 632 | TLB_COUNT_ATTACH(invalidate_multi_tbiap); |
633 | TLB_COUNT_ATTACH(invalidate_multi_imb); | | 633 | TLB_COUNT_ATTACH(invalidate_multi_imb); |
634 | | | 634 | |
635 | TLB_COUNT_ATTACH(invalidate_kern_tbia); | | 635 | TLB_COUNT_ATTACH(invalidate_kern_tbia); |
636 | TLB_COUNT_ATTACH(invalidate_kern_tbis); | | 636 | TLB_COUNT_ATTACH(invalidate_kern_tbis); |
637 | TLB_COUNT_ATTACH(invalidate_kern_imb); | | 637 | TLB_COUNT_ATTACH(invalidate_kern_imb); |
638 | | | 638 | |
639 | TLB_COUNT_ATTACH(invalidate_user_not_current); | | 639 | TLB_COUNT_ATTACH(invalidate_user_not_current); |
640 | TLB_COUNT_ATTACH(invalidate_user_lazy_imb); | | 640 | TLB_COUNT_ATTACH(invalidate_user_lazy_imb); |
641 | TLB_COUNT_ATTACH(invalidate_user_tbiap); | | 641 | TLB_COUNT_ATTACH(invalidate_user_tbiap); |
642 | TLB_COUNT_ATTACH(invalidate_user_tbis); | | 642 | TLB_COUNT_ATTACH(invalidate_user_tbis); |
643 | | | 643 | |
644 | TLB_COUNT_ATTACH(shootdown_kernel); | | 644 | TLB_COUNT_ATTACH(shootdown_kernel); |
645 | TLB_COUNT_ATTACH(shootdown_user); | | 645 | TLB_COUNT_ATTACH(shootdown_user); |
646 | TLB_COUNT_ATTACH(shootdown_imb); | | 646 | TLB_COUNT_ATTACH(shootdown_imb); |
647 | TLB_COUNT_ATTACH(shootdown_kimb); | | 647 | TLB_COUNT_ATTACH(shootdown_kimb); |
648 | TLB_COUNT_ATTACH(shootdown_overflow); | | 648 | TLB_COUNT_ATTACH(shootdown_overflow); |
649 | | | 649 | |
650 | TLB_COUNT_ATTACH(shootdown_all_user); | | 650 | TLB_COUNT_ATTACH(shootdown_all_user); |
651 | TLB_COUNT_ATTACH(shootdown_all_user_imb); | | 651 | TLB_COUNT_ATTACH(shootdown_all_user_imb); |
652 | | | 652 | |
653 | TLB_COUNT_ATTACH(shootdown_pv); | | 653 | TLB_COUNT_ATTACH(shootdown_pv); |
654 | TLB_COUNT_ATTACH(shootdown_pv_multi); | | 654 | TLB_COUNT_ATTACH(shootdown_pv_multi); |
655 | | | 655 | |
656 | TLB_COUNT_ATTACH(shootnow_over_notify); | | 656 | TLB_COUNT_ATTACH(shootnow_over_notify); |
657 | TLB_COUNT_ATTACH(shootnow_remote); | | 657 | TLB_COUNT_ATTACH(shootnow_remote); |
658 | | | 658 | |
659 | TLB_COUNT_ATTACH(reason_remove_kernel); | | 659 | TLB_COUNT_ATTACH(reason_remove_kernel); |
660 | TLB_COUNT_ATTACH(reason_remove_user); | | 660 | TLB_COUNT_ATTACH(reason_remove_user); |
661 | TLB_COUNT_ATTACH(reason_page_protect_read); | | 661 | TLB_COUNT_ATTACH(reason_page_protect_read); |
662 | TLB_COUNT_ATTACH(reason_page_protect_none); | | 662 | TLB_COUNT_ATTACH(reason_page_protect_none); |
663 | TLB_COUNT_ATTACH(reason_protect); | | 663 | TLB_COUNT_ATTACH(reason_protect); |
664 | TLB_COUNT_ATTACH(reason_enter_kernel); | | 664 | TLB_COUNT_ATTACH(reason_enter_kernel); |
665 | TLB_COUNT_ATTACH(reason_enter_user); | | 665 | TLB_COUNT_ATTACH(reason_enter_user); |
666 | TLB_COUNT_ATTACH(reason_kenter); | | 666 | TLB_COUNT_ATTACH(reason_kenter); |
667 | TLB_COUNT_ATTACH(reason_enter_l2pt_delref); | | 667 | TLB_COUNT_ATTACH(reason_enter_l2pt_delref); |
668 | TLB_COUNT_ATTACH(reason_enter_l3pt_delref); | | 668 | TLB_COUNT_ATTACH(reason_enter_l3pt_delref); |
669 | TLB_COUNT_ATTACH(reason_kremove); | | 669 | TLB_COUNT_ATTACH(reason_kremove); |
670 | TLB_COUNT_ATTACH(reason_clear_modify); | | 670 | TLB_COUNT_ATTACH(reason_clear_modify); |
671 | TLB_COUNT_ATTACH(reason_clear_reference); | | 671 | TLB_COUNT_ATTACH(reason_clear_reference); |
672 | | | 672 | |
673 | TLB_COUNT_ATTACH(asn_reuse); | | 673 | TLB_COUNT_ATTACH(asn_reuse); |
674 | TLB_COUNT_ATTACH(asn_newgen); | | 674 | TLB_COUNT_ATTACH(asn_newgen); |
675 | TLB_COUNT_ATTACH(asn_assign); | | 675 | TLB_COUNT_ATTACH(asn_assign); |
676 | | | 676 | |
677 | TLB_COUNT_ATTACH(activate_both_change); | | 677 | TLB_COUNT_ATTACH(activate_both_change); |
678 | TLB_COUNT_ATTACH(activate_asn_change); | | 678 | TLB_COUNT_ATTACH(activate_asn_change); |
679 | TLB_COUNT_ATTACH(activate_ptbr_change); | | 679 | TLB_COUNT_ATTACH(activate_ptbr_change); |
680 | TLB_COUNT_ATTACH(activate_swpctx); | | 680 | TLB_COUNT_ATTACH(activate_swpctx); |
681 | TLB_COUNT_ATTACH(activate_skip_swpctx); | | 681 | TLB_COUNT_ATTACH(activate_skip_swpctx); |
682 | } | | 682 | } |
683 | | | 683 | |
684 | static inline void | | 684 | static inline void |
685 | pmap_tlb_context_init(struct pmap_tlb_context * const tlbctx, uintptr_t flags) | | 685 | pmap_tlb_context_init(struct pmap_tlb_context * const tlbctx, uintptr_t flags) |
686 | { | | 686 | { |
687 | /* Initialize the minimum number of fields. */ | | 687 | /* Initialize the minimum number of fields. */ |
688 | tlbctx->t_addrdata[0] = 0; | | 688 | tlbctx->t_addrdata[0] = 0; |
689 | tlbctx->t_addrdata[1] = flags; | | 689 | tlbctx->t_addrdata[1] = flags; |
690 | tlbctx->t_pmap = NULL; | | 690 | tlbctx->t_pmap = NULL; |
691 | LIST_INIT(&tlbctx->t_freeptq); | | 691 | LIST_INIT(&tlbctx->t_freeptq); |
692 | } | | 692 | } |
693 | | | 693 | |
694 | static void | | 694 | static void |
695 | pmap_tlb_shootdown_internal(pmap_t const pmap, vaddr_t const va, | | 695 | pmap_tlb_shootdown_internal(pmap_t const pmap, vaddr_t const va, |
696 | pt_entry_t const pte_bits, struct pmap_tlb_context * const tlbctx) | | 696 | pt_entry_t const pte_bits, struct pmap_tlb_context * const tlbctx) |
697 | { | | 697 | { |
698 | KASSERT(pmap != NULL); | | 698 | KASSERT(pmap != NULL); |
699 | KASSERT((va & PAGE_MASK) == 0); | | 699 | KASSERT((va & PAGE_MASK) == 0); |
700 | | | 700 | |
701 | /* | | 701 | /* |
702 | * Figure out who needs to hear about this, and the scope | | 702 | * Figure out who needs to hear about this, and the scope |
703 | * of an all-entries invalidate. | | 703 | * of an all-entries invalidate. |
704 | */ | | 704 | */ |
705 | if (pmap == pmap_kernel()) { | | 705 | if (pmap == pmap_kernel()) { |
706 | TLB_COUNT(shootdown_kernel); | | 706 | TLB_COUNT(shootdown_kernel); |
707 | KASSERT(pte_bits & PG_ASM); | | 707 | KASSERT(pte_bits & PG_ASM); |
708 | TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_ASM); | | 708 | TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_ASM); |
709 | | | 709 | |
710 | /* Note if an I-stream sync is also needed. */ | | 710 | /* Note if an I-stream sync is also needed. */ |
711 | if (pte_bits & PG_EXEC) { | | 711 | if (pte_bits & PG_EXEC) { |
712 | TLB_COUNT(shootdown_kimb); | | 712 | TLB_COUNT(shootdown_kimb); |
713 | TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_KIMB); | | 713 | TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_KIMB); |
714 | } | | 714 | } |
715 | } else { | | 715 | } else { |
716 | TLB_COUNT(shootdown_user); | | 716 | TLB_COUNT(shootdown_user); |
717 | KASSERT((pte_bits & PG_ASM) == 0); | | 717 | KASSERT((pte_bits & PG_ASM) == 0); |
718 | | | 718 | |
719 | /* Note if an I-stream sync is also needed. */ | | 719 | /* Note if an I-stream sync is also needed. */ |
720 | if (pte_bits & PG_EXEC) { | | 720 | if (pte_bits & PG_EXEC) { |
721 | TLB_COUNT(shootdown_imb); | | 721 | TLB_COUNT(shootdown_imb); |
722 | TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_IMB); | | 722 | TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_IMB); |
723 | } | | 723 | } |
724 | } | | 724 | } |
725 | | | 725 | |
726 | KASSERT(tlbctx->t_pmap == NULL || tlbctx->t_pmap == pmap); | | 726 | KASSERT(tlbctx->t_pmap == NULL || tlbctx->t_pmap == pmap); |
727 | tlbctx->t_pmap = pmap; | | 727 | tlbctx->t_pmap = pmap; |
728 | | | 728 | |
729 | /* | | 729 | /* |
730 | * If we're already at the max, just tell each active CPU | | 730 | * If we're already at the max, just tell each active CPU |
731 | * to nail everything. | | 731 | * to nail everything. |
732 | */ | | 732 | */ |
733 | const uintptr_t count = TLB_CTX_COUNT(tlbctx); | | 733 | const uintptr_t count = TLB_CTX_COUNT(tlbctx); |
734 | if (count > TLB_CTX_MAXVA) { | | 734 | if (count > TLB_CTX_MAXVA) { |
735 | return; | | 735 | return; |
736 | } | | 736 | } |
737 | if (count == TLB_CTX_MAXVA) { | | 737 | if (count == TLB_CTX_MAXVA) { |
738 | TLB_COUNT(shootdown_overflow); | | 738 | TLB_COUNT(shootdown_overflow); |
739 | TLB_CTX_SET_ALLVA(tlbctx); | | 739 | TLB_CTX_SET_ALLVA(tlbctx); |
740 | return; | | 740 | return; |
741 | } | | 741 | } |
742 | | | 742 | |
743 | TLB_CTX_SETVA(tlbctx, count, va); | | 743 | TLB_CTX_SETVA(tlbctx, count, va); |
744 | TLB_CTX_INC_COUNT(tlbctx); | | 744 | TLB_CTX_INC_COUNT(tlbctx); |
745 | } | | 745 | } |
746 | | | 746 | |
747 | static void | | 747 | static void |
748 | pmap_tlb_shootdown(pmap_t const pmap, vaddr_t const va, | | 748 | pmap_tlb_shootdown(pmap_t const pmap, vaddr_t const va, |
749 | pt_entry_t const pte_bits, struct pmap_tlb_context * const tlbctx) | | 749 | pt_entry_t const pte_bits, struct pmap_tlb_context * const tlbctx) |
750 | { | | 750 | { |
751 | KASSERT((TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_PV) == 0); | | 751 | KASSERT((TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_PV) == 0); |
752 | pmap_tlb_shootdown_internal(pmap, va, pte_bits, tlbctx); | | 752 | pmap_tlb_shootdown_internal(pmap, va, pte_bits, tlbctx); |
753 | } | | 753 | } |
754 | | | 754 | |
755 | static void | | 755 | static void |
756 | pmap_tlb_shootdown_all_user(pmap_t const pmap, pt_entry_t const pte_bits, | | 756 | pmap_tlb_shootdown_all_user(pmap_t const pmap, pt_entry_t const pte_bits, |
757 | struct pmap_tlb_context * const tlbctx) | | 757 | struct pmap_tlb_context * const tlbctx) |
758 | { | | 758 | { |
759 | KASSERT(pmap != pmap_kernel()); | | 759 | KASSERT(pmap != pmap_kernel()); |
760 | | | 760 | |
761 | TLB_COUNT(shootdown_all_user); | | 761 | TLB_COUNT(shootdown_all_user); |
762 | | | 762 | |
763 | /* Note if an I-stream sync is also needed. */ | | 763 | /* Note if an I-stream sync is also needed. */ |
764 | if (pte_bits & PG_EXEC) { | | 764 | if (pte_bits & PG_EXEC) { |
765 | TLB_COUNT(shootdown_all_user_imb); | | 765 | TLB_COUNT(shootdown_all_user_imb); |
766 | TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_IMB); | | 766 | TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_IMB); |
767 | } | | 767 | } |
768 | | | 768 | |
769 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_PV) { | | 769 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_PV) { |
770 | if (tlbctx->t_pmap == NULL || tlbctx->t_pmap == pmap) { | | 770 | if (tlbctx->t_pmap == NULL || tlbctx->t_pmap == pmap) { |
771 | if (tlbctx->t_pmap == NULL) { | | 771 | if (tlbctx->t_pmap == NULL) { |
772 | pmap_reference(pmap); | | 772 | pmap_reference(pmap); |
773 | tlbctx->t_pmap = pmap; | | 773 | tlbctx->t_pmap = pmap; |
774 | } | | 774 | } |
775 | } else { | | 775 | } else { |
776 | TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_MULTI); | | 776 | TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_MULTI); |
777 | } | | 777 | } |
778 | } else { | | 778 | } else { |
779 | KASSERT(tlbctx->t_pmap == NULL || tlbctx->t_pmap == pmap); | | 779 | KASSERT(tlbctx->t_pmap == NULL || tlbctx->t_pmap == pmap); |
780 | tlbctx->t_pmap = pmap; | | 780 | tlbctx->t_pmap = pmap; |
781 | } | | 781 | } |
782 | | | 782 | |
783 | TLB_CTX_SET_ALLVA(tlbctx); | | 783 | TLB_CTX_SET_ALLVA(tlbctx); |
784 | } | | 784 | } |
785 | | | 785 | |
786 | static void | | 786 | static void |
787 | pmap_tlb_shootdown_pv(pmap_t const pmap, vaddr_t const va, | | 787 | pmap_tlb_shootdown_pv(pmap_t const pmap, vaddr_t const va, |
788 | pt_entry_t const pte_bits, struct pmap_tlb_context * const tlbctx) | | 788 | pt_entry_t const pte_bits, struct pmap_tlb_context * const tlbctx) |
789 | { | | 789 | { |
790 | | | 790 | |
791 | KASSERT(TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_PV); | | 791 | KASSERT(TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_PV); |
792 | | | 792 | |
793 | TLB_COUNT(shootdown_pv); | | 793 | TLB_COUNT(shootdown_pv); |
794 | | | 794 | |
795 | if (tlbctx->t_pmap == NULL || tlbctx->t_pmap == pmap) { | | 795 | if (tlbctx->t_pmap == NULL || tlbctx->t_pmap == pmap) { |
796 | if (tlbctx->t_pmap == NULL) { | | 796 | if (tlbctx->t_pmap == NULL) { |
797 | pmap_reference(pmap); | | 797 | pmap_reference(pmap); |
798 | tlbctx->t_pmap = pmap; | | 798 | tlbctx->t_pmap = pmap; |
799 | } | | 799 | } |
800 | pmap_tlb_shootdown_internal(pmap, va, pte_bits, tlbctx); | | 800 | pmap_tlb_shootdown_internal(pmap, va, pte_bits, tlbctx); |
801 | } else { | | 801 | } else { |
802 | TLB_COUNT(shootdown_pv_multi); | | 802 | TLB_COUNT(shootdown_pv_multi); |
803 | uintptr_t flags = TLB_CTX_F_MULTI; | | 803 | uintptr_t flags = TLB_CTX_F_MULTI; |
804 | if (pmap == pmap_kernel()) { | | 804 | if (pmap == pmap_kernel()) { |
805 | KASSERT(pte_bits & PG_ASM); | | 805 | KASSERT(pte_bits & PG_ASM); |
806 | flags |= TLB_CTX_F_ASM; | | 806 | flags |= TLB_CTX_F_ASM; |
807 | } else { | | 807 | } else { |
808 | KASSERT((pte_bits & PG_ASM) == 0); | | 808 | KASSERT((pte_bits & PG_ASM) == 0); |
809 | } | | 809 | } |
810 | | | 810 | |
811 | /* | | 811 | /* |
812 | * No need to distinguish between kernel and user IMB | | 812 | * No need to distinguish between kernel and user IMB |
813 | * here; see pmap_tlb_invalidate_multi(). | | 813 | * here; see pmap_tlb_invalidate_multi(). |
814 | */ | | 814 | */ |
815 | if (pte_bits & PG_EXEC) { | | 815 | if (pte_bits & PG_EXEC) { |
816 | flags |= TLB_CTX_F_IMB; | | 816 | flags |= TLB_CTX_F_IMB; |
817 | } | | 817 | } |
818 | TLB_CTX_SET_ALLVA(tlbctx); | | 818 | TLB_CTX_SET_ALLVA(tlbctx); |
819 | TLB_CTX_SET_FLAG(tlbctx, flags); | | 819 | TLB_CTX_SET_FLAG(tlbctx, flags); |
820 | } | | 820 | } |
821 | } | | 821 | } |
822 | | | 822 | |
823 | static void | | 823 | static void |
824 | pmap_tlb_invalidate_multi(const struct pmap_tlb_context * const tlbctx) | | 824 | pmap_tlb_invalidate_multi(const struct pmap_tlb_context * const tlbctx) |
825 | { | | 825 | { |
826 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_ASM) { | | 826 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_ASM) { |
827 | TLB_COUNT(invalidate_multi_tbia); | | 827 | TLB_COUNT(invalidate_multi_tbia); |
828 | ALPHA_TBIA(); | | 828 | ALPHA_TBIA(); |
829 | } else { | | 829 | } else { |
830 | TLB_COUNT(invalidate_multi_tbiap); | | 830 | TLB_COUNT(invalidate_multi_tbiap); |
831 | ALPHA_TBIAP(); | | 831 | ALPHA_TBIAP(); |
832 | } | | 832 | } |
833 | if (TLB_CTX_FLAGS(tlbctx) & (TLB_CTX_F_IMB | TLB_CTX_F_KIMB)) { | | 833 | if (TLB_CTX_FLAGS(tlbctx) & (TLB_CTX_F_IMB | TLB_CTX_F_KIMB)) { |
834 | TLB_COUNT(invalidate_multi_imb); | | 834 | TLB_COUNT(invalidate_multi_imb); |
835 | alpha_pal_imb(); | | 835 | alpha_pal_imb(); |
836 | } | | 836 | } |
837 | } | | 837 | } |
838 | | | 838 | |
839 | static void | | 839 | static void |
840 | pmap_tlb_invalidate_kernel(const struct pmap_tlb_context * const tlbctx) | | 840 | pmap_tlb_invalidate_kernel(const struct pmap_tlb_context * const tlbctx) |
841 | { | | 841 | { |
842 | const uintptr_t count = TLB_CTX_COUNT(tlbctx); | | 842 | const uintptr_t count = TLB_CTX_COUNT(tlbctx); |
843 | | | 843 | |
844 | if (count == TLB_CTX_ALLVA) { | | 844 | if (count == TLB_CTX_ALLVA) { |
845 | TLB_COUNT(invalidate_kern_tbia); | | 845 | TLB_COUNT(invalidate_kern_tbia); |
846 | ALPHA_TBIA(); | | 846 | ALPHA_TBIA(); |
847 | } else { | | 847 | } else { |
848 | TLB_COUNT(invalidate_kern_tbis); | | 848 | TLB_COUNT(invalidate_kern_tbis); |
849 | for (uintptr_t i = 0; i < count; i++) { | | 849 | for (uintptr_t i = 0; i < count; i++) { |
850 | ALPHA_TBIS(TLB_CTX_VA(tlbctx, i)); | | 850 | ALPHA_TBIS(TLB_CTX_VA(tlbctx, i)); |
851 | } | | 851 | } |
852 | } | | 852 | } |
853 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_KIMB) { | | 853 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_KIMB) { |
854 | TLB_COUNT(invalidate_kern_imb); | | 854 | TLB_COUNT(invalidate_kern_imb); |
855 | alpha_pal_imb(); | | 855 | alpha_pal_imb(); |
856 | } | | 856 | } |
857 | } | | 857 | } |
858 | | | 858 | |
859 | static void | | 859 | static void |
860 | pmap_tlb_invalidate(const struct pmap_tlb_context * const tlbctx, | | 860 | pmap_tlb_invalidate(const struct pmap_tlb_context * const tlbctx, |
861 | const struct cpu_info * const ci) | | 861 | const struct cpu_info * const ci) |
862 | { | | 862 | { |
863 | const uintptr_t count = TLB_CTX_COUNT(tlbctx); | | 863 | const uintptr_t count = TLB_CTX_COUNT(tlbctx); |
864 | | | 864 | |
865 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_MULTI) { | | 865 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_MULTI) { |
866 | pmap_tlb_invalidate_multi(tlbctx); | | 866 | pmap_tlb_invalidate_multi(tlbctx); |
867 | return; | | 867 | return; |
868 | } | | 868 | } |
869 | | | 869 | |
870 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_ASM) { | | 870 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_ASM) { |
871 | pmap_tlb_invalidate_kernel(tlbctx); | | 871 | pmap_tlb_invalidate_kernel(tlbctx); |
872 | return; | | 872 | return; |
873 | } | | 873 | } |
874 | | | 874 | |
875 | KASSERT(kpreempt_disabled()); | | 875 | KASSERT(kpreempt_disabled()); |
876 | | | 876 | |
877 | pmap_t const pmap = tlbctx->t_pmap; | | 877 | pmap_t const pmap = tlbctx->t_pmap; |
878 | KASSERT(pmap != NULL); | | 878 | KASSERT(pmap != NULL); |
879 | | | 879 | |
880 | if (__predict_false(pmap != ci->ci_pmap)) { | | 880 | if (__predict_false(pmap != ci->ci_pmap)) { |
881 | TLB_COUNT(invalidate_user_not_current); | | 881 | TLB_COUNT(invalidate_user_not_current); |
882 | | | 882 | |
883 | /* | | 883 | /* |
884 | * For CPUs that don't implement ASNs, the SWPCTX call | | 884 | * For CPUs that don't implement ASNs, the SWPCTX call |
885 | * does all of the TLB invalidation work for us. | | 885 | * does all of the TLB invalidation work for us. |
886 | */ | | 886 | */ |
887 | if (__predict_false(pmap_max_asn == 0)) { | | 887 | if (__predict_false(pmap_max_asn == 0)) { |
888 | return; | | 888 | return; |
889 | } | | 889 | } |
890 | | | 890 | |
891 | const u_long cpu_mask = 1UL << ci->ci_cpuid; | | 891 | const u_long cpu_mask = 1UL << ci->ci_cpuid; |
892 | | | 892 | |
893 | /* | | 893 | /* |
894 | * We cannot directly invalidate the TLB in this case, | | 894 | * We cannot directly invalidate the TLB in this case, |
895 | * so force allocation of a new ASN when the pmap becomes | | 895 | * so force allocation of a new ASN when the pmap becomes |
896 | * active again. | | 896 | * active again. |
897 | */ | | 897 | */ |
898 | pmap->pm_percpu[ci->ci_cpuid].pmc_asngen = PMAP_ASNGEN_INVALID; | | 898 | pmap->pm_percpu[ci->ci_cpuid].pmc_asngen = PMAP_ASNGEN_INVALID; |
899 | atomic_and_ulong(&pmap->pm_cpus, ~cpu_mask); | | 899 | atomic_and_ulong(&pmap->pm_cpus, ~cpu_mask); |
900 | | | 900 | |
901 | /* | | 901 | /* |
902 | * This isn't strictly necessary; when we allocate a | | 902 | * This isn't strictly necessary; when we allocate a |
903 | * new ASN, we're going to clear this bit and skip | | 903 | * new ASN, we're going to clear this bit and skip |
904 | * syncing the I-stream. But we will keep this bit | | 904 | * syncing the I-stream. But we will keep this bit |
905 | * of accounting for internal consistency. | | 905 | * of accounting for internal consistency. |
906 | */ | | 906 | */ |
907 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_IMB) { | | 907 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_IMB) { |
908 | pmap->pm_percpu[ci->ci_cpuid].pmc_needisync = 1; | | 908 | pmap->pm_percpu[ci->ci_cpuid].pmc_needisync = 1; |
909 | } | | 909 | } |
910 | return; | | 910 | return; |
911 | } | | 911 | } |
912 | | | 912 | |
913 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_IMB) { | | 913 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_IMB) { |
914 | TLB_COUNT(invalidate_user_lazy_imb); | | 914 | TLB_COUNT(invalidate_user_lazy_imb); |
915 | pmap->pm_percpu[ci->ci_cpuid].pmc_needisync = 1; | | 915 | pmap->pm_percpu[ci->ci_cpuid].pmc_needisync = 1; |
916 | } | | 916 | } |
917 | | | 917 | |
918 | if (count == TLB_CTX_ALLVA) { | | 918 | if (count == TLB_CTX_ALLVA) { |
919 | /* | | 919 | /* |
920 | * Another option here for CPUs that implement ASNs is | | 920 | * Another option here for CPUs that implement ASNs is |
921 | * to allocate a new ASN and do a SWPCTX. That's almost | | 921 | * to allocate a new ASN and do a SWPCTX. That's almost |
922 | * certainly faster than a TBIAP, but would require us | | 922 | * certainly faster than a TBIAP, but would require us |
923 | * to synchronize against IPIs in pmap_activate(). | | 923 | * to synchronize against IPIs in pmap_activate(). |
924 | */ | | 924 | */ |
925 | TLB_COUNT(invalidate_user_tbiap); | | 925 | TLB_COUNT(invalidate_user_tbiap); |
926 | KASSERT((TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_ASM) == 0); | | 926 | KASSERT((TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_ASM) == 0); |
927 | ALPHA_TBIAP(); | | 927 | ALPHA_TBIAP(); |
928 | } else { | | 928 | } else { |
929 | TLB_COUNT(invalidate_user_tbis); | | 929 | TLB_COUNT(invalidate_user_tbis); |
930 | for (uintptr_t i = 0; i < count; i++) { | | 930 | for (uintptr_t i = 0; i < count; i++) { |
931 | ALPHA_TBIS(TLB_CTX_VA(tlbctx, i)); | | 931 | ALPHA_TBIS(TLB_CTX_VA(tlbctx, i)); |
932 | } | | 932 | } |
933 | } | | 933 | } |
934 | } | | 934 | } |
935 | | | 935 | |
936 | static void | | 936 | static void |
937 | pmap_tlb_shootnow(const struct pmap_tlb_context * const tlbctx) | | 937 | pmap_tlb_shootnow(const struct pmap_tlb_context * const tlbctx) |
938 | { | | 938 | { |
939 | | | 939 | |
940 | if (TLB_CTX_COUNT(tlbctx) == 0) { | | 940 | if (TLB_CTX_COUNT(tlbctx) == 0) { |
941 | /* No work to do. */ | | 941 | /* No work to do. */ |
942 | return; | | 942 | return; |
943 | } | | 943 | } |
944 | | | 944 | |
945 | /* | | 945 | /* |
946 | * Acquire the shootdown mutex. This will also block IPL_VM | | 946 | * Acquire the shootdown mutex. This will also block IPL_VM |
947 | * interrupts and disable preemption. It is critically important | | 947 | * interrupts and disable preemption. It is critically important |
948 | * that IPIs not be blocked in this routine. | | 948 | * that IPIs not be blocked in this routine. |
949 | */ | | 949 | */ |
950 | KASSERT((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) < ALPHA_PSL_IPL_CLOCK); | | 950 | KASSERT((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) < ALPHA_PSL_IPL_CLOCK); |
951 | mutex_spin_enter(&tlb_lock); | | 951 | mutex_spin_enter(&tlb_lock); |
952 | tlb_evcnt.ev_count++; | | 952 | tlb_evcnt.ev_count++; |
953 | | | 953 | |
954 | const struct cpu_info *ci = curcpu(); | | 954 | const struct cpu_info *ci = curcpu(); |
955 | const u_long this_cpu = 1UL << ci->ci_cpuid; | | 955 | const u_long this_cpu = 1UL << ci->ci_cpuid; |
956 | u_long active_cpus; | | 956 | u_long active_cpus; |
957 | bool activation_locked, activation_lock_tried; | | 957 | bool activation_locked, activation_lock_tried; |
958 | | | 958 | |
959 | /* | | 959 | /* |
960 | * Figure out who to notify. If it's for the kernel or | | 960 | * Figure out who to notify. If it's for the kernel or |
961 | * multiple aaddress spaces, we notify everybody. If | | 961 | * multiple aaddress spaces, we notify everybody. If |
962 | * it's a single user pmap, then we try to acquire the | | 962 | * it's a single user pmap, then we try to acquire the |
963 | * activation lock so we can get an accurate accounting | | 963 | * activation lock so we can get an accurate accounting |
964 | * of who needs to be notified. If we can't acquire | | 964 | * of who needs to be notified. If we can't acquire |
965 | * the activation lock, then just notify everyone and | | 965 | * the activation lock, then just notify everyone and |
966 | * let them sort it out when they process the IPI. | | 966 | * let them sort it out when they process the IPI. |
967 | */ | | 967 | */ |
968 | if (TLB_CTX_FLAGS(tlbctx) & (TLB_CTX_F_ASM | TLB_CTX_F_MULTI)) { | | 968 | if (TLB_CTX_FLAGS(tlbctx) & (TLB_CTX_F_ASM | TLB_CTX_F_MULTI)) { |
969 | active_cpus = pmap_all_cpus(); | | 969 | active_cpus = pmap_all_cpus(); |
970 | activation_locked = false; | | 970 | activation_locked = false; |
971 | activation_lock_tried = false; | | 971 | activation_lock_tried = false; |
972 | } else { | | 972 | } else { |
973 | KASSERT(tlbctx->t_pmap != NULL); | | 973 | KASSERT(tlbctx->t_pmap != NULL); |
974 | activation_locked = PMAP_ACT_TRYLOCK(tlbctx->t_pmap); | | 974 | activation_locked = PMAP_ACT_TRYLOCK(tlbctx->t_pmap); |
975 | if (__predict_true(activation_locked)) { | | 975 | if (__predict_true(activation_locked)) { |
976 | active_cpus = tlbctx->t_pmap->pm_cpus; | | 976 | active_cpus = tlbctx->t_pmap->pm_cpus; |
977 | } else { | | 977 | } else { |
978 | TLB_COUNT(shootnow_over_notify); | | 978 | TLB_COUNT(shootnow_over_notify); |
979 | active_cpus = pmap_all_cpus(); | | 979 | active_cpus = pmap_all_cpus(); |
980 | } | | 980 | } |
981 | activation_lock_tried = true; | | 981 | activation_lock_tried = true; |
982 | } | | 982 | } |
983 | | | 983 | |
984 | #if defined(MULTIPROCESSOR) | | 984 | #if defined(MULTIPROCESSOR) |
985 | /* | | 985 | /* |
986 | * If there are remote CPUs that need to do work, get them | | 986 | * If there are remote CPUs that need to do work, get them |
987 | * started now. | | 987 | * started now. |
988 | */ | | 988 | */ |
989 | const u_long remote_cpus = active_cpus & ~this_cpu; | | 989 | const u_long remote_cpus = active_cpus & ~this_cpu; |
990 | KASSERT(tlb_context == NULL); | | 990 | KASSERT(tlb_context == NULL); |
991 | if (remote_cpus) { | | 991 | if (remote_cpus) { |
992 | TLB_COUNT(shootnow_remote); | | 992 | TLB_COUNT(shootnow_remote); |
993 | tlb_context = tlbctx; | | 993 | tlb_context = tlbctx; |
994 | tlb_pending = remote_cpus; | | 994 | tlb_pending = remote_cpus; |
995 | alpha_multicast_ipi(remote_cpus, ALPHA_IPI_SHOOTDOWN); | | 995 | alpha_multicast_ipi(remote_cpus, ALPHA_IPI_SHOOTDOWN); |
996 | } | | 996 | } |
997 | #endif /* MULTIPROCESSOR */ | | 997 | #endif /* MULTIPROCESSOR */ |
998 | | | 998 | |
999 | /* | | 999 | /* |
1000 | * Now that the remotes have been notified, release the | | 1000 | * Now that the remotes have been notified, release the |
1001 | * activation lock. | | 1001 | * activation lock. |
1002 | */ | | 1002 | */ |
1003 | if (activation_lock_tried) { | | 1003 | if (activation_lock_tried) { |
1004 | if (activation_locked) { | | 1004 | if (activation_locked) { |
1005 | KASSERT(tlbctx->t_pmap != NULL); | | 1005 | KASSERT(tlbctx->t_pmap != NULL); |
1006 | PMAP_ACT_UNLOCK(tlbctx->t_pmap); | | 1006 | PMAP_ACT_UNLOCK(tlbctx->t_pmap); |
1007 | } | | 1007 | } |
1008 | /* | | 1008 | /* |
1009 | * When we tried to acquire the activation lock, we | | 1009 | * When we tried to acquire the activation lock, we |
1010 | * raised IPL to IPL_SCHED (even if we ultimately | | 1010 | * raised IPL to IPL_SCHED (even if we ultimately |
1011 | * failed to acquire the lock), which blocks out IPIs. | | 1011 | * failed to acquire the lock), which blocks out IPIs. |
1012 | * Force our IPL back down to IPL_VM so that we can | | 1012 | * Force our IPL back down to IPL_VM so that we can |
1013 | * receive IPIs. | | 1013 | * receive IPIs. |
1014 | */ | | 1014 | */ |
1015 | alpha_pal_swpipl(IPL_VM); | | 1015 | alpha_pal_swpipl(IPL_VM); |
1016 | } | | 1016 | } |
1017 | | | 1017 | |
1018 | /* | | 1018 | /* |
1019 | * Do any work that we might need to do. We don't need to | | 1019 | * Do any work that we might need to do. We don't need to |
1020 | * synchronize with activation here because we know that | | 1020 | * synchronize with activation here because we know that |
1021 | * for the current CPU, activation status will not change. | | 1021 | * for the current CPU, activation status will not change. |
1022 | */ | | 1022 | */ |
1023 | if (active_cpus & this_cpu) { | | 1023 | if (active_cpus & this_cpu) { |
1024 | pmap_tlb_invalidate(tlbctx, ci); | | 1024 | pmap_tlb_invalidate(tlbctx, ci); |
1025 | } | | 1025 | } |
1026 | | | 1026 | |
1027 | #if defined(MULTIPROCESSOR) | | 1027 | #if defined(MULTIPROCESSOR) |
1028 | /* Wait for remote CPUs to finish. */ | | 1028 | /* Wait for remote CPUs to finish. */ |
1029 | if (remote_cpus) { | | 1029 | if (remote_cpus) { |
1030 | int backoff = SPINLOCK_BACKOFF_MIN; | | 1030 | int backoff = SPINLOCK_BACKOFF_MIN; |
1031 | u_int spins = 0; | | 1031 | u_int spins = 0; |
1032 | | | 1032 | |
1033 | while (atomic_load_acquire(&tlb_context) != NULL) { | | 1033 | while (atomic_load_acquire(&tlb_context) != NULL) { |
1034 | SPINLOCK_BACKOFF(backoff); | | 1034 | SPINLOCK_BACKOFF(backoff); |
1035 | if (spins++ > 0x0fffffff) { | | 1035 | if (spins++ > 0x0fffffff) { |
1036 | printf("TLB LOCAL MASK = 0x%016lx\n", | | 1036 | printf("TLB LOCAL MASK = 0x%016lx\n", |
1037 | this_cpu); | | 1037 | this_cpu); |
1038 | printf("TLB REMOTE MASK = 0x%016lx\n", | | 1038 | printf("TLB REMOTE MASK = 0x%016lx\n", |
1039 | remote_cpus); | | 1039 | remote_cpus); |
1040 | printf("TLB REMOTE PENDING = 0x%016lx\n", | | 1040 | printf("TLB REMOTE PENDING = 0x%016lx\n", |
1041 | tlb_pending); | | 1041 | tlb_pending); |
1042 | printf("TLB CONTEXT = %p\n", tlb_context); | | 1042 | printf("TLB CONTEXT = %p\n", tlb_context); |
1043 | printf("TLB LOCAL IPL = %lu\n", | | 1043 | printf("TLB LOCAL IPL = %lu\n", |
1044 | alpha_pal_rdps() & ALPHA_PSL_IPL_MASK); | | 1044 | alpha_pal_rdps() & ALPHA_PSL_IPL_MASK); |
1045 | panic("pmap_tlb_shootnow"); | | 1045 | panic("pmap_tlb_shootnow"); |
1046 | } | | 1046 | } |
1047 | } | | 1047 | } |
1048 | } | | 1048 | } |
1049 | KASSERT(tlb_context == NULL); | | 1049 | KASSERT(tlb_context == NULL); |
1050 | #endif /* MULTIPROCESSOR */ | | 1050 | #endif /* MULTIPROCESSOR */ |
1051 | | | 1051 | |
1052 | mutex_spin_exit(&tlb_lock); | | 1052 | mutex_spin_exit(&tlb_lock); |
1053 | | | 1053 | |
1054 | if (__predict_false(TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_PV)) { | | 1054 | if (__predict_false(TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_PV)) { |
1055 | /* | | 1055 | /* |
1056 | * P->V TLB operations may operate on multiple pmaps. | | 1056 | * P->V TLB operations may operate on multiple pmaps. |
1057 | * The shootdown takes a reference on the first pmap it | | 1057 | * The shootdown takes a reference on the first pmap it |
1058 | * encounters, in order to prevent it from disappearing, | | 1058 | * encounters, in order to prevent it from disappearing, |
1059 | * in the hope that we end up with a single-pmap P->V | | 1059 | * in the hope that we end up with a single-pmap P->V |
1060 | * operation (instrumentation shows this is not rare). | | 1060 | * operation (instrumentation shows this is not rare). |
1061 | * | | 1061 | * |
1062 | * Once this shootdown is finished globally, we need to | | 1062 | * Once this shootdown is finished globally, we need to |
1063 | * release this extra reference. | | 1063 | * release this extra reference. |
1064 | */ | | 1064 | */ |
1065 | KASSERT(tlbctx->t_pmap != NULL); | | 1065 | KASSERT(tlbctx->t_pmap != NULL); |
1066 | pmap_destroy(tlbctx->t_pmap); | | 1066 | pmap_destroy(tlbctx->t_pmap); |
1067 | } | | 1067 | } |
1068 | } | | 1068 | } |
1069 | | | 1069 | |
1070 | #if defined(MULTIPROCESSOR) | | 1070 | #if defined(MULTIPROCESSOR) |
1071 | void | | 1071 | void |
1072 | pmap_tlb_shootdown_ipi(struct cpu_info * const ci, | | 1072 | pmap_tlb_shootdown_ipi(struct cpu_info * const ci, |
1073 | | | 1073 | |
1074 | struct trapframe * const tf __unused) | | 1074 | struct trapframe * const tf __unused) |
1075 | { | | 1075 | { |
1076 | KASSERT(tlb_context != NULL); | | 1076 | KASSERT(tlb_context != NULL); |
1077 | pmap_tlb_invalidate(tlb_context, ci); | | 1077 | pmap_tlb_invalidate(tlb_context, ci); |
1078 | if (atomic_and_ulong_nv(&tlb_pending, ~(1UL << ci->ci_cpuid)) == 0) { | | 1078 | if (atomic_and_ulong_nv(&tlb_pending, ~(1UL << ci->ci_cpuid)) == 0) { |
1079 | atomic_store_release(&tlb_context, NULL); | | 1079 | atomic_store_release(&tlb_context, NULL); |
1080 | } | | 1080 | } |
1081 | } | | 1081 | } |
1082 | #endif /* MULTIPROCESSOR */ | | 1082 | #endif /* MULTIPROCESSOR */ |
1083 | | | 1083 | |
1084 | static __inline void | | 1084 | static __inline void |
1085 | pmap_tlb_ptpage_drain(struct pmap_tlb_context * const tlbctx) | | 1085 | pmap_tlb_ptpage_drain(struct pmap_tlb_context * const tlbctx) |
1086 | { | | 1086 | { |
1087 | pmap_pagelist_free(&tlbctx->t_freeptq); | | 1087 | pmap_pagelist_free(&tlbctx->t_freeptq); |
1088 | } | | 1088 | } |
1089 | | | 1089 | |
1090 | /* | | 1090 | /* |
1091 | * Internal routines | | 1091 | * Internal routines |
1092 | */ | | 1092 | */ |
1093 | static void alpha_protection_init(void); | | 1093 | static void alpha_protection_init(void); |
1094 | static pt_entry_t pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, bool, | | 1094 | static pt_entry_t pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, bool, |
1095 | pv_entry_t *, | | 1095 | pv_entry_t *, |
1096 | struct pmap_tlb_context *); | | 1096 | struct pmap_tlb_context *); |
1097 | static void pmap_changebit(struct vm_page *, pt_entry_t, pt_entry_t, | | 1097 | static void pmap_changebit(struct vm_page *, pt_entry_t, pt_entry_t, |
1098 | struct pmap_tlb_context *); | | 1098 | struct pmap_tlb_context *); |
1099 | | | 1099 | |
1100 | /* | | 1100 | /* |
1101 | * PT page management functions. | | 1101 | * PT page management functions. |
1102 | */ | | 1102 | */ |
1103 | static int pmap_ptpage_alloc(pmap_t, pt_entry_t *, int); | | 1103 | static int pmap_ptpage_alloc(pmap_t, pt_entry_t *, int); |
1104 | static void pmap_ptpage_free(pmap_t, pt_entry_t *, | | 1104 | static void pmap_ptpage_free(pmap_t, pt_entry_t *, |
1105 | struct pmap_tlb_context *); | | 1105 | struct pmap_tlb_context *); |
1106 | static void pmap_l3pt_delref(pmap_t, vaddr_t, pt_entry_t *, | | 1106 | static void pmap_l3pt_delref(pmap_t, vaddr_t, pt_entry_t *, |
1107 | struct pmap_tlb_context *); | | 1107 | struct pmap_tlb_context *); |
1108 | static void pmap_l2pt_delref(pmap_t, pt_entry_t *, pt_entry_t *, | | 1108 | static void pmap_l2pt_delref(pmap_t, pt_entry_t *, pt_entry_t *, |
1109 | struct pmap_tlb_context *); | | 1109 | struct pmap_tlb_context *); |
1110 | static void pmap_l1pt_delref(pmap_t, pt_entry_t *); | | 1110 | static void pmap_l1pt_delref(pmap_t, pt_entry_t *); |
1111 | | | 1111 | |
1112 | static void *pmap_l1pt_alloc(struct pool *, int); | | 1112 | static void *pmap_l1pt_alloc(struct pool *, int); |
1113 | static void pmap_l1pt_free(struct pool *, void *); | | 1113 | static void pmap_l1pt_free(struct pool *, void *); |
1114 | | | 1114 | |
1115 | static struct pool_allocator pmap_l1pt_allocator = { | | 1115 | static struct pool_allocator pmap_l1pt_allocator = { |
1116 | pmap_l1pt_alloc, pmap_l1pt_free, 0, | | 1116 | pmap_l1pt_alloc, pmap_l1pt_free, 0, |
1117 | }; | | 1117 | }; |
1118 | | | 1118 | |
1119 | static int pmap_l1pt_ctor(void *, void *, int); | | 1119 | static int pmap_l1pt_ctor(void *, void *, int); |
1120 | | | 1120 | |
1121 | /* | | 1121 | /* |
1122 | * PV table management functions. | | 1122 | * PV table management functions. |
1123 | */ | | 1123 | */ |
1124 | static int pmap_pv_enter(pmap_t, struct vm_page *, vaddr_t, pt_entry_t *, | | 1124 | static int pmap_pv_enter(pmap_t, struct vm_page *, vaddr_t, pt_entry_t *, |
1125 | bool, pv_entry_t); | | 1125 | bool, pv_entry_t); |
1126 | static void pmap_pv_remove(pmap_t, struct vm_page *, vaddr_t, bool, | | 1126 | static void pmap_pv_remove(pmap_t, struct vm_page *, vaddr_t, bool, |
1127 | pv_entry_t *); | | 1127 | pv_entry_t *); |
1128 | static void *pmap_pv_page_alloc(struct pool *, int); | | 1128 | static void *pmap_pv_page_alloc(struct pool *, int); |
1129 | static void pmap_pv_page_free(struct pool *, void *); | | 1129 | static void pmap_pv_page_free(struct pool *, void *); |
1130 | | | 1130 | |
1131 | static struct pool_allocator pmap_pv_page_allocator = { | | 1131 | static struct pool_allocator pmap_pv_page_allocator = { |
1132 | pmap_pv_page_alloc, pmap_pv_page_free, 0, | | 1132 | pmap_pv_page_alloc, pmap_pv_page_free, 0, |
1133 | }; | | 1133 | }; |
1134 | | | 1134 | |
1135 | #ifdef DEBUG | | 1135 | #ifdef DEBUG |
1136 | void pmap_pv_dump(paddr_t); | | 1136 | void pmap_pv_dump(paddr_t); |
1137 | #endif | | 1137 | #endif |
1138 | | | 1138 | |
1139 | #define pmap_pv_alloc() pool_cache_get(&pmap_pv_cache, PR_NOWAIT) | | 1139 | #define pmap_pv_alloc() pool_cache_get(&pmap_pv_cache, PR_NOWAIT) |
1140 | #define pmap_pv_free(pv) pool_cache_put(&pmap_pv_cache, (pv)) | | 1140 | #define pmap_pv_free(pv) pool_cache_put(&pmap_pv_cache, (pv)) |
1141 | | | 1141 | |
1142 | /* | | 1142 | /* |
1143 | * ASN management functions. | | 1143 | * ASN management functions. |
1144 | */ | | 1144 | */ |
1145 | static u_int pmap_asn_alloc(pmap_t, struct cpu_info *); | | 1145 | static u_int pmap_asn_alloc(pmap_t, struct cpu_info *); |
1146 | | | 1146 | |
1147 | /* | | 1147 | /* |
1148 | * Misc. functions. | | 1148 | * Misc. functions. |
1149 | */ | | 1149 | */ |
1150 | static struct vm_page *pmap_physpage_alloc(int); | | 1150 | static struct vm_page *pmap_physpage_alloc(int); |
1151 | static void pmap_physpage_free(paddr_t); | | 1151 | static void pmap_physpage_free(paddr_t); |
1152 | static int pmap_physpage_addref(void *); | | 1152 | static int pmap_physpage_addref(void *); |
1153 | static int pmap_physpage_delref(void *); | | 1153 | static int pmap_physpage_delref(void *); |
1154 | | | 1154 | |
1155 | static bool vtophys_internal(vaddr_t, paddr_t *p); | | 1155 | static bool vtophys_internal(vaddr_t, paddr_t *p); |
1156 | | | 1156 | |
1157 | /* | | 1157 | /* |
1158 | * PMAP_KERNEL_PTE: | | 1158 | * PMAP_KERNEL_PTE: |
1159 | * | | 1159 | * |
1160 | * Get a kernel PTE. | | 1160 | * Get a kernel PTE. |
1161 | * | | 1161 | * |
1162 | * If debugging, do a table walk. If not debugging, just use | | 1162 | * If debugging, do a table walk. If not debugging, just use |
1163 | * the Virtual Page Table, since all kernel page tables are | | 1163 | * the Virtual Page Table, since all kernel page tables are |
1164 | * pre-allocated and mapped in. | | 1164 | * pre-allocated and mapped in. |
1165 | */ | | 1165 | */ |
1166 | #ifdef DEBUG | | 1166 | #ifdef DEBUG |
1167 | #define PMAP_KERNEL_PTE(va) \ | | 1167 | #define PMAP_KERNEL_PTE(va) \ |
1168 | ({ \ | | 1168 | ({ \ |
1169 | pt_entry_t *l1pte_, *l2pte_; \ | | 1169 | pt_entry_t *l1pte_, *l2pte_; \ |
1170 | \ | | 1170 | \ |
1171 | l1pte_ = pmap_l1pte(kernel_lev1map, va); \ | | 1171 | l1pte_ = pmap_l1pte(kernel_lev1map, va); \ |
1172 | if (pmap_pte_v(l1pte_) == 0) { \ | | 1172 | if (pmap_pte_v(l1pte_) == 0) { \ |
1173 | printf("kernel level 1 PTE not valid, va 0x%lx " \ | | 1173 | printf("kernel level 1 PTE not valid, va 0x%lx " \ |
1174 | "(line %d)\n", (va), __LINE__); \ | | 1174 | "(line %d)\n", (va), __LINE__); \ |
1175 | panic("PMAP_KERNEL_PTE"); \ | | 1175 | panic("PMAP_KERNEL_PTE"); \ |
1176 | } \ | | 1176 | } \ |
1177 | l2pte_ = pmap_l2pte(kernel_lev1map, va, l1pte_); \ | | 1177 | l2pte_ = pmap_l2pte(kernel_lev1map, va, l1pte_); \ |
1178 | if (pmap_pte_v(l2pte_) == 0) { \ | | 1178 | if (pmap_pte_v(l2pte_) == 0) { \ |
1179 | printf("kernel level 2 PTE not valid, va 0x%lx " \ | | 1179 | printf("kernel level 2 PTE not valid, va 0x%lx " \ |
1180 | "(line %d)\n", (va), __LINE__); \ | | 1180 | "(line %d)\n", (va), __LINE__); \ |
1181 | panic("PMAP_KERNEL_PTE"); \ | | 1181 | panic("PMAP_KERNEL_PTE"); \ |
1182 | } \ | | 1182 | } \ |
1183 | pmap_l3pte(kernel_lev1map, va, l2pte_); \ | | 1183 | pmap_l3pte(kernel_lev1map, va, l2pte_); \ |
1184 | }) | | 1184 | }) |
1185 | #else | | 1185 | #else |
1186 | #define PMAP_KERNEL_PTE(va) (&VPT[VPT_INDEX((va))]) | | 1186 | #define PMAP_KERNEL_PTE(va) (&VPT[VPT_INDEX((va))]) |
1187 | #endif | | 1187 | #endif |
1188 | | | 1188 | |
1189 | /* | | 1189 | /* |
1190 | * PMAP_STAT_{INCR,DECR}: | | 1190 | * PMAP_STAT_{INCR,DECR}: |
1191 | * | | 1191 | * |
1192 | * Increment or decrement a pmap statistic. | | 1192 | * Increment or decrement a pmap statistic. |
1193 | */ | | 1193 | */ |
1194 | #define PMAP_STAT_INCR(s, v) atomic_add_long((unsigned long *)(&(s)), (v)) | | 1194 | #define PMAP_STAT_INCR(s, v) atomic_add_long((unsigned long *)(&(s)), (v)) |
1195 | #define PMAP_STAT_DECR(s, v) atomic_add_long((unsigned long *)(&(s)), -(v)) | | 1195 | #define PMAP_STAT_DECR(s, v) atomic_add_long((unsigned long *)(&(s)), -(v)) |
1196 | | | 1196 | |
1197 | /* | | 1197 | /* |
1198 | * pmap_init_cpu: | | 1198 | * pmap_init_cpu: |
1199 | * | | 1199 | * |
1200 | * Initilize pmap data in the cpu_info. | | 1200 | * Initilize pmap data in the cpu_info. |
1201 | */ | | 1201 | */ |
1202 | void | | 1202 | void |
1203 | pmap_init_cpu(struct cpu_info * const ci) | | 1203 | pmap_init_cpu(struct cpu_info * const ci) |
1204 | { | | 1204 | { |
1205 | pmap_t const pmap = pmap_kernel(); | | 1205 | pmap_t const pmap = pmap_kernel(); |
1206 | | | 1206 | |
1207 | /* All CPUs start out using the kernel pmap. */ | | 1207 | /* All CPUs start out using the kernel pmap. */ |
1208 | atomic_or_ulong(&pmap->pm_cpus, 1UL << ci->ci_cpuid); | | 1208 | atomic_or_ulong(&pmap->pm_cpus, 1UL << ci->ci_cpuid); |
1209 | pmap_reference(pmap); | | 1209 | pmap_reference(pmap); |
1210 | ci->ci_pmap = pmap; | | 1210 | ci->ci_pmap = pmap; |
1211 | | | 1211 | |
1212 | /* Initialize ASN allocation logic. */ | | 1212 | /* Initialize ASN allocation logic. */ |
1213 | ci->ci_next_asn = PMAP_ASN_FIRST_USER; | | 1213 | ci->ci_next_asn = PMAP_ASN_FIRST_USER; |
1214 | ci->ci_asn_gen = PMAP_ASNGEN_INITIAL; | | 1214 | ci->ci_asn_gen = PMAP_ASNGEN_INITIAL; |
1215 | } | | 1215 | } |
1216 | | | 1216 | |
1217 | /* | | 1217 | /* |
1218 | * pmap_bootstrap: | | 1218 | * pmap_bootstrap: |
1219 | * | | 1219 | * |
1220 | * Bootstrap the system to run with virtual memory. | | 1220 | * Bootstrap the system to run with virtual memory. |
1221 | * | | 1221 | * |
1222 | * Note: no locking is necessary in this function. | | 1222 | * Note: no locking is necessary in this function. |
1223 | */ | | 1223 | */ |
1224 | void | | 1224 | void |
1225 | pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids) | | 1225 | pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids) |
1226 | { | | 1226 | { |
1227 | vsize_t lev2mapsize, lev3mapsize; | | 1227 | vsize_t lev2mapsize, lev3mapsize; |
1228 | pt_entry_t *lev2map, *lev3map; | | 1228 | pt_entry_t *lev2map, *lev3map; |
1229 | pt_entry_t pte; | | 1229 | pt_entry_t pte; |
1230 | vsize_t bufsz; | | 1230 | vsize_t bufsz; |
1231 | struct pcb *pcb; | | 1231 | struct pcb *pcb; |
1232 | int i; | | 1232 | int i; |
1233 | | | 1233 | |
1234 | #ifdef DEBUG | | 1234 | #ifdef DEBUG |
1235 | if (pmapdebug & (PDB_FOLLOW|PDB_BOOTSTRAP)) | | 1235 | if (pmapdebug & (PDB_FOLLOW|PDB_BOOTSTRAP)) |
1236 | printf("pmap_bootstrap(0x%lx, %u)\n", ptaddr, maxasn); | | 1236 | printf("pmap_bootstrap(0x%lx, %u)\n", ptaddr, maxasn); |
1237 | #endif | | 1237 | #endif |
1238 | | | 1238 | |
1239 | /* | | 1239 | /* |
1240 | * Compute the number of pages kmem_arena will have. | | 1240 | * Compute the number of pages kmem_arena will have. |
1241 | */ | | 1241 | */ |
1242 | kmeminit_nkmempages(); | | 1242 | kmeminit_nkmempages(); |
1243 | | | 1243 | |
1244 | /* | | 1244 | /* |
1245 | * Figure out how many initial PTE's are necessary to map the | | 1245 | * Figure out how many initial PTE's are necessary to map the |
1246 | * kernel. We also reserve space for kmem_alloc_pageable() | | 1246 | * kernel. We also reserve space for kmem_alloc_pageable() |
1247 | * for vm_fork(). | | 1247 | * for vm_fork(). |
1248 | */ | | 1248 | */ |
1249 | | | 1249 | |
1250 | /* Get size of buffer cache and set an upper limit */ | | 1250 | /* Get size of buffer cache and set an upper limit */ |
1251 | bufsz = buf_memcalc(); | | 1251 | bufsz = buf_memcalc(); |
1252 | buf_setvalimit(bufsz); | | 1252 | buf_setvalimit(bufsz); |
1253 | | | 1253 | |
1254 | lev3mapsize = | | 1254 | lev3mapsize = |
1255 | (VM_PHYS_SIZE + (ubc_nwins << ubc_winshift) + | | 1255 | (VM_PHYS_SIZE + (ubc_nwins << ubc_winshift) + |
1256 | bufsz + 16 * NCARGS + pager_map_size) / PAGE_SIZE + | | 1256 | bufsz + 16 * NCARGS + pager_map_size) / PAGE_SIZE + |
1257 | (maxproc * UPAGES) + nkmempages; | | 1257 | (maxproc * UPAGES) + nkmempages; |
1258 | | | 1258 | |
1259 | lev3mapsize = roundup(lev3mapsize, NPTEPG); | | 1259 | lev3mapsize = roundup(lev3mapsize, NPTEPG); |
1260 | | | 1260 | |
1261 | /* | | 1261 | /* |
1262 | * Initialize `FYI' variables. Note we're relying on | | 1262 | * Initialize `FYI' variables. Note we're relying on |
1263 | * the fact that BSEARCH sorts the vm_physmem[] array | | 1263 | * the fact that BSEARCH sorts the vm_physmem[] array |
1264 | * for us. | | 1264 | * for us. |
1265 | */ | | 1265 | */ |
1266 | avail_start = ptoa(uvm_physseg_get_avail_start(uvm_physseg_get_first())); | | 1266 | avail_start = ptoa(uvm_physseg_get_avail_start(uvm_physseg_get_first())); |
1267 | avail_end = ptoa(uvm_physseg_get_avail_end(uvm_physseg_get_last())); | | 1267 | avail_end = ptoa(uvm_physseg_get_avail_end(uvm_physseg_get_last())); |
1268 | virtual_end = VM_MIN_KERNEL_ADDRESS + lev3mapsize * PAGE_SIZE; | | 1268 | virtual_end = VM_MIN_KERNEL_ADDRESS + lev3mapsize * PAGE_SIZE; |
1269 | | | 1269 | |
1270 | #if 0 | | 1270 | #if 0 |
1271 | printf("avail_start = 0x%lx\n", avail_start); | | 1271 | printf("avail_start = 0x%lx\n", avail_start); |
1272 | printf("avail_end = 0x%lx\n", avail_end); | | 1272 | printf("avail_end = 0x%lx\n", avail_end); |
1273 | printf("virtual_end = 0x%lx\n", virtual_end); | | 1273 | printf("virtual_end = 0x%lx\n", virtual_end); |
1274 | #endif | | 1274 | #endif |
1275 | | | 1275 | |
1276 | /* | | 1276 | /* |
1277 | * Allocate a level 1 PTE table for the kernel. | | 1277 | * Allocate a level 1 PTE table for the kernel. |
1278 | * This is always one page long. | | 1278 | * This is always one page long. |
1279 | * IF THIS IS NOT A MULTIPLE OF PAGE_SIZE, ALL WILL GO TO HELL. | | 1279 | * IF THIS IS NOT A MULTIPLE OF PAGE_SIZE, ALL WILL GO TO HELL. |
1280 | */ | | 1280 | */ |
1281 | kernel_lev1map = (pt_entry_t *) | | 1281 | kernel_lev1map = (pt_entry_t *) |
1282 | uvm_pageboot_alloc(sizeof(pt_entry_t) * NPTEPG); | | 1282 | uvm_pageboot_alloc(sizeof(pt_entry_t) * NPTEPG); |
1283 | | | 1283 | |
1284 | /* | | 1284 | /* |
1285 | * Allocate a level 2 PTE table for the kernel. | | 1285 | * Allocate a level 2 PTE table for the kernel. |
1286 | * These must map all of the level3 PTEs. | | 1286 | * These must map all of the level3 PTEs. |
1287 | * IF THIS IS NOT A MULTIPLE OF PAGE_SIZE, ALL WILL GO TO HELL. | | 1287 | * IF THIS IS NOT A MULTIPLE OF PAGE_SIZE, ALL WILL GO TO HELL. |
1288 | */ | | 1288 | */ |
1289 | lev2mapsize = roundup(howmany(lev3mapsize, NPTEPG), NPTEPG); | | 1289 | lev2mapsize = roundup(howmany(lev3mapsize, NPTEPG), NPTEPG); |
1290 | lev2map = (pt_entry_t *) | | 1290 | lev2map = (pt_entry_t *) |
1291 | uvm_pageboot_alloc(sizeof(pt_entry_t) * lev2mapsize); | | 1291 | uvm_pageboot_alloc(sizeof(pt_entry_t) * lev2mapsize); |
1292 | | | 1292 | |
1293 | /* | | 1293 | /* |
1294 | * Allocate a level 3 PTE table for the kernel. | | 1294 | * Allocate a level 3 PTE table for the kernel. |
1295 | * Contains lev3mapsize PTEs. | | 1295 | * Contains lev3mapsize PTEs. |
1296 | */ | | 1296 | */ |
1297 | lev3map = (pt_entry_t *) | | 1297 | lev3map = (pt_entry_t *) |
1298 | uvm_pageboot_alloc(sizeof(pt_entry_t) * lev3mapsize); | | 1298 | uvm_pageboot_alloc(sizeof(pt_entry_t) * lev3mapsize); |
1299 | | | 1299 | |
1300 | /* | | 1300 | /* |
1301 | * Set up level 1 page table | | 1301 | * Set up level 1 page table |
1302 | */ | | 1302 | */ |
1303 | | | 1303 | |
1304 | /* Map all of the level 2 pte pages */ | | 1304 | /* Map all of the level 2 pte pages */ |
1305 | for (i = 0; i < howmany(lev2mapsize, NPTEPG); i++) { | | 1305 | for (i = 0; i < howmany(lev2mapsize, NPTEPG); i++) { |
1306 | pte = (ALPHA_K0SEG_TO_PHYS(((vaddr_t)lev2map) + | | 1306 | pte = (ALPHA_K0SEG_TO_PHYS(((vaddr_t)lev2map) + |
1307 | (i*PAGE_SIZE)) >> PGSHIFT) << PG_SHIFT; | | 1307 | (i*PAGE_SIZE)) >> PGSHIFT) << PG_SHIFT; |
1308 | pte |= PG_V | PG_ASM | PG_KRE | PG_KWE | PG_WIRED; | | 1308 | pte |= PG_V | PG_ASM | PG_KRE | PG_KWE | PG_WIRED; |
1309 | kernel_lev1map[l1pte_index(VM_MIN_KERNEL_ADDRESS + | | 1309 | kernel_lev1map[l1pte_index(VM_MIN_KERNEL_ADDRESS + |
1310 | (i*PAGE_SIZE*NPTEPG*NPTEPG))] = pte; | | 1310 | (i*PAGE_SIZE*NPTEPG*NPTEPG))] = pte; |
1311 | } | | 1311 | } |
1312 | | | 1312 | |
1313 | /* Map the virtual page table */ | | 1313 | /* Map the virtual page table */ |
1314 | pte = (ALPHA_K0SEG_TO_PHYS((vaddr_t)kernel_lev1map) >> PGSHIFT) | | 1314 | pte = (ALPHA_K0SEG_TO_PHYS((vaddr_t)kernel_lev1map) >> PGSHIFT) |
1315 | << PG_SHIFT; | | 1315 | << PG_SHIFT; |
1316 | pte |= PG_V | PG_KRE | PG_KWE; /* NOTE NO ASM */ | | 1316 | pte |= PG_V | PG_KRE | PG_KWE; /* NOTE NO ASM */ |
1317 | kernel_lev1map[l1pte_index(VPTBASE)] = pte; | | 1317 | kernel_lev1map[l1pte_index(VPTBASE)] = pte; |
1318 | VPT = (pt_entry_t *)VPTBASE; | | 1318 | VPT = (pt_entry_t *)VPTBASE; |
1319 | | | 1319 | |
1320 | /* | | 1320 | /* |
1321 | * Set up level 2 page table. | | 1321 | * Set up level 2 page table. |
1322 | */ | | 1322 | */ |
1323 | /* Map all of the level 3 pte pages */ | | 1323 | /* Map all of the level 3 pte pages */ |
1324 | for (i = 0; i < howmany(lev3mapsize, NPTEPG); i++) { | | 1324 | for (i = 0; i < howmany(lev3mapsize, NPTEPG); i++) { |
1325 | pte = (ALPHA_K0SEG_TO_PHYS(((vaddr_t)lev3map) + | | 1325 | pte = (ALPHA_K0SEG_TO_PHYS(((vaddr_t)lev3map) + |
1326 | (i*PAGE_SIZE)) >> PGSHIFT) << PG_SHIFT; | | 1326 | (i*PAGE_SIZE)) >> PGSHIFT) << PG_SHIFT; |
1327 | pte |= PG_V | PG_ASM | PG_KRE | PG_KWE | PG_WIRED; | | 1327 | pte |= PG_V | PG_ASM | PG_KRE | PG_KWE | PG_WIRED; |
1328 | lev2map[l2pte_index(VM_MIN_KERNEL_ADDRESS+ | | 1328 | lev2map[l2pte_index(VM_MIN_KERNEL_ADDRESS+ |
1329 | (i*PAGE_SIZE*NPTEPG))] = pte; | | 1329 | (i*PAGE_SIZE*NPTEPG))] = pte; |
1330 | } | | 1330 | } |
1331 | | | 1331 | |
1332 | /* Initialize the pmap_growkernel_lock. */ | | 1332 | /* Initialize the pmap_growkernel_lock. */ |
1333 | rw_init(&pmap_growkernel_lock); | | 1333 | rw_init(&pmap_growkernel_lock); |
1334 | | | 1334 | |
1335 | /* | | 1335 | /* |
1336 | * Set up level three page table (lev3map) | | 1336 | * Set up level three page table (lev3map) |
1337 | */ | | 1337 | */ |
1338 | /* Nothing to do; it's already zero'd */ | | 1338 | /* Nothing to do; it's already zero'd */ |
1339 | | | 1339 | |
1340 | /* | | 1340 | /* |
1341 | * Initialize the pmap pools and list. | | 1341 | * Initialize the pmap pools and list. |
1342 | */ | | 1342 | */ |
1343 | pmap_ncpuids = ncpuids; | | 1343 | pmap_ncpuids = ncpuids; |
1344 | pool_cache_bootstrap(&pmap_pmap_cache, PMAP_SIZEOF(pmap_ncpuids), | | 1344 | pool_cache_bootstrap(&pmap_pmap_cache, PMAP_SIZEOF(pmap_ncpuids), |
1345 | COHERENCY_UNIT, 0, 0, "pmap", NULL, IPL_NONE, NULL, NULL, NULL); | | 1345 | COHERENCY_UNIT, 0, 0, "pmap", NULL, IPL_NONE, NULL, NULL, NULL); |
1346 | pool_cache_bootstrap(&pmap_l1pt_cache, PAGE_SIZE, 0, 0, 0, "pmapl1pt", | | 1346 | pool_cache_bootstrap(&pmap_l1pt_cache, PAGE_SIZE, 0, 0, 0, "pmapl1pt", |
1347 | &pmap_l1pt_allocator, IPL_NONE, pmap_l1pt_ctor, NULL, NULL); | | 1347 | &pmap_l1pt_allocator, IPL_NONE, pmap_l1pt_ctor, NULL, NULL); |
1348 | pool_cache_bootstrap(&pmap_pv_cache, sizeof(struct pv_entry), 0, 0, | | 1348 | pool_cache_bootstrap(&pmap_pv_cache, sizeof(struct pv_entry), 0, 0, |
1349 | PR_LARGECACHE, "pmappv", &pmap_pv_page_allocator, IPL_NONE, NULL, | | 1349 | PR_LARGECACHE, "pmappv", &pmap_pv_page_allocator, IPL_NONE, NULL, |
1350 | NULL, NULL); | | 1350 | NULL, NULL); |
1351 | | | 1351 | |
1352 | TAILQ_INIT(&pmap_all_pmaps); | | 1352 | TAILQ_INIT(&pmap_all_pmaps); |
1353 | | | 1353 | |
1354 | /* Initialize the ASN logic. See also pmap_init_cpu(). */ | | 1354 | /* Initialize the ASN logic. See also pmap_init_cpu(). */ |
1355 | pmap_max_asn = maxasn; | | 1355 | pmap_max_asn = maxasn; |
1356 | | | 1356 | |
1357 | /* | | 1357 | /* |
1358 | * Initialize the locks. | | 1358 | * Initialize the locks. |
1359 | */ | | 1359 | */ |
1360 | rw_init(&pmap_main_lock); | | 1360 | rw_init(&pmap_main_lock); |
1361 | mutex_init(&pmap_all_pmaps_lock, MUTEX_DEFAULT, IPL_NONE); | | 1361 | mutex_init(&pmap_all_pmaps_lock, MUTEX_DEFAULT, IPL_NONE); |
1362 | for (i = 0; i < __arraycount(pmap_pvh_locks); i++) { | | 1362 | for (i = 0; i < __arraycount(pmap_pvh_locks); i++) { |
1363 | mutex_init(&pmap_pvh_locks[i].lock, MUTEX_DEFAULT, IPL_NONE); | | 1363 | mutex_init(&pmap_pvh_locks[i].lock, MUTEX_DEFAULT, IPL_NONE); |
1364 | } | | 1364 | } |
1365 | for (i = 0; i < __arraycount(pmap_pvh_locks); i++) { | | 1365 | for (i = 0; i < __arraycount(pmap_pvh_locks); i++) { |
1366 | mutex_init(&pmap_pmap_locks[i].locks.lock, | | 1366 | mutex_init(&pmap_pmap_locks[i].locks.lock, |
1367 | MUTEX_DEFAULT, IPL_NONE); | | 1367 | MUTEX_DEFAULT, IPL_NONE); |
1368 | mutex_init(&pmap_pmap_locks[i].locks.activation_lock, | | 1368 | mutex_init(&pmap_pmap_locks[i].locks.activation_lock, |
1369 | MUTEX_SPIN, IPL_SCHED); | | 1369 | MUTEX_SPIN, IPL_SCHED); |
1370 | } | | 1370 | } |
1371 | | | 1371 | |
1372 | /* | | 1372 | /* |
1373 | * This must block any interrupt from which a TLB shootdown | | 1373 | * This must block any interrupt from which a TLB shootdown |
1374 | * could be issued, but must NOT block IPIs. | | 1374 | * could be issued, but must NOT block IPIs. |
1375 | */ | | 1375 | */ |
1376 | mutex_init(&tlb_lock, MUTEX_SPIN, IPL_VM); | | 1376 | mutex_init(&tlb_lock, MUTEX_SPIN, IPL_VM); |
1377 | | | 1377 | |
1378 | /* | | 1378 | /* |
1379 | * Initialize kernel pmap. Note that all kernel mappings | | 1379 | * Initialize kernel pmap. Note that all kernel mappings |
1380 | * have PG_ASM set, so the ASN doesn't really matter for | | 1380 | * have PG_ASM set, so the ASN doesn't really matter for |
1381 | * the kernel pmap. Also, since the kernel pmap always | | 1381 | * the kernel pmap. Also, since the kernel pmap always |
1382 | * references kernel_lev1map, it always has an invalid ASN | | 1382 | * references kernel_lev1map, it always has an invalid ASN |
1383 | * generation. | | 1383 | * generation. |
1384 | */ | | 1384 | */ |
1385 | memset(pmap_kernel(), 0, sizeof(struct pmap)); | | 1385 | memset(pmap_kernel(), 0, sizeof(struct pmap)); |
1386 | LIST_INIT(&pmap_kernel()->pm_ptpages); | | 1386 | LIST_INIT(&pmap_kernel()->pm_ptpages); |
| | | 1387 | LIST_INIT(&pmap_kernel()->pm_pvents); |
1387 | atomic_store_relaxed(&pmap_kernel()->pm_count, 1); | | 1388 | atomic_store_relaxed(&pmap_kernel()->pm_count, 1); |
1388 | /* Kernel pmap does not have per-CPU info. */ | | 1389 | /* Kernel pmap does not have per-CPU info. */ |
1389 | TAILQ_INSERT_TAIL(&pmap_all_pmaps, pmap_kernel(), pm_list); | | 1390 | TAILQ_INSERT_TAIL(&pmap_all_pmaps, pmap_kernel(), pm_list); |
1390 | | | 1391 | |
1391 | /* | | 1392 | /* |
1392 | * Set up lwp0's PCB such that the ptbr points to the right place | | 1393 | * Set up lwp0's PCB such that the ptbr points to the right place |
1393 | * and has the kernel pmap's (really unused) ASN. | | 1394 | * and has the kernel pmap's (really unused) ASN. |
1394 | */ | | 1395 | */ |
1395 | pcb = lwp_getpcb(&lwp0); | | 1396 | pcb = lwp_getpcb(&lwp0); |
1396 | pcb->pcb_hw.apcb_ptbr = | | 1397 | pcb->pcb_hw.apcb_ptbr = |
1397 | ALPHA_K0SEG_TO_PHYS((vaddr_t)kernel_lev1map) >> PGSHIFT; | | 1398 | ALPHA_K0SEG_TO_PHYS((vaddr_t)kernel_lev1map) >> PGSHIFT; |
1398 | pcb->pcb_hw.apcb_asn = PMAP_ASN_KERNEL; | | 1399 | pcb->pcb_hw.apcb_asn = PMAP_ASN_KERNEL; |
1399 | | | 1400 | |
1400 | struct cpu_info * const ci = curcpu(); | | 1401 | struct cpu_info * const ci = curcpu(); |
1401 | pmap_init_cpu(ci); | | 1402 | pmap_init_cpu(ci); |
1402 | } | | 1403 | } |
1403 | | | 1404 | |
1404 | /* | | 1405 | /* |
1405 | * pmap_virtual_space: [ INTERFACE ] | | 1406 | * pmap_virtual_space: [ INTERFACE ] |
1406 | * | | 1407 | * |
1407 | * Define the initial bounds of the kernel virtual address space. | | 1408 | * Define the initial bounds of the kernel virtual address space. |
1408 | */ | | 1409 | */ |
1409 | void | | 1410 | void |
1410 | pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp) | | 1411 | pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp) |
1411 | { | | 1412 | { |
1412 | | | 1413 | |
1413 | *vstartp = VM_MIN_KERNEL_ADDRESS; /* kernel is in K0SEG */ | | 1414 | *vstartp = VM_MIN_KERNEL_ADDRESS; /* kernel is in K0SEG */ |
1414 | *vendp = VM_MAX_KERNEL_ADDRESS; /* we use pmap_growkernel */ | | 1415 | *vendp = VM_MAX_KERNEL_ADDRESS; /* we use pmap_growkernel */ |
1415 | } | | 1416 | } |
1416 | | | 1417 | |
1417 | /* | | 1418 | /* |
1418 | * pmap_steal_memory: [ INTERFACE ] | | 1419 | * pmap_steal_memory: [ INTERFACE ] |
1419 | * | | 1420 | * |
1420 | * Bootstrap memory allocator (alternative to vm_bootstrap_steal_memory()). | | 1421 | * Bootstrap memory allocator (alternative to vm_bootstrap_steal_memory()). |
1421 | * This function allows for early dynamic memory allocation until the | | 1422 | * This function allows for early dynamic memory allocation until the |
1422 | * virtual memory system has been bootstrapped. After that point, either | | 1423 | * virtual memory system has been bootstrapped. After that point, either |
1423 | * kmem_alloc or malloc should be used. This function works by stealing | | 1424 | * kmem_alloc or malloc should be used. This function works by stealing |
1424 | * pages from the (to be) managed page pool, then implicitly mapping the | | 1425 | * pages from the (to be) managed page pool, then implicitly mapping the |
1425 | * pages (by using their k0seg addresses) and zeroing them. | | 1426 | * pages (by using their k0seg addresses) and zeroing them. |
1426 | * | | 1427 | * |
1427 | * It may be used once the physical memory segments have been pre-loaded | | 1428 | * It may be used once the physical memory segments have been pre-loaded |
1428 | * into the vm_physmem[] array. Early memory allocation MUST use this | | 1429 | * into the vm_physmem[] array. Early memory allocation MUST use this |
1429 | * interface! This cannot be used after vm_page_startup(), and will | | 1430 | * interface! This cannot be used after vm_page_startup(), and will |
1430 | * generate a panic if tried. | | 1431 | * generate a panic if tried. |
1431 | * | | 1432 | * |
1432 | * Note that this memory will never be freed, and in essence it is wired | | 1433 | * Note that this memory will never be freed, and in essence it is wired |
1433 | * down. | | 1434 | * down. |
1434 | * | | 1435 | * |
1435 | * We must adjust *vstartp and/or *vendp iff we use address space | | 1436 | * We must adjust *vstartp and/or *vendp iff we use address space |
1436 | * from the kernel virtual address range defined by pmap_virtual_space(). | | 1437 | * from the kernel virtual address range defined by pmap_virtual_space(). |
1437 | * | | 1438 | * |
1438 | * Note: no locking is necessary in this function. | | 1439 | * Note: no locking is necessary in this function. |
1439 | */ | | 1440 | */ |
1440 | vaddr_t | | 1441 | vaddr_t |
1441 | pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp) | | 1442 | pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp) |
1442 | { | | 1443 | { |
1443 | int npgs; | | 1444 | int npgs; |
1444 | vaddr_t va; | | 1445 | vaddr_t va; |
1445 | paddr_t pa; | | 1446 | paddr_t pa; |
1446 | | | 1447 | |
1447 | uvm_physseg_t bank; | | 1448 | uvm_physseg_t bank; |
1448 | | | 1449 | |
1449 | size = round_page(size); | | 1450 | size = round_page(size); |
1450 | npgs = atop(size); | | 1451 | npgs = atop(size); |
1451 | | | 1452 | |
1452 | #if 0 | | 1453 | #if 0 |
1453 | printf("PSM: size 0x%lx (npgs 0x%x)\n", size, npgs); | | 1454 | printf("PSM: size 0x%lx (npgs 0x%x)\n", size, npgs); |
1454 | #endif | | 1455 | #endif |
1455 | | | 1456 | |
1456 | for (bank = uvm_physseg_get_first(); | | 1457 | for (bank = uvm_physseg_get_first(); |
1457 | uvm_physseg_valid_p(bank); | | 1458 | uvm_physseg_valid_p(bank); |
1458 | bank = uvm_physseg_get_next(bank)) { | | 1459 | bank = uvm_physseg_get_next(bank)) { |
1459 | if (uvm.page_init_done == true) | | 1460 | if (uvm.page_init_done == true) |
1460 | panic("pmap_steal_memory: called _after_ bootstrap"); | | 1461 | panic("pmap_steal_memory: called _after_ bootstrap"); |
1461 | | | 1462 | |
1462 | #if 0 | | 1463 | #if 0 |
1463 | printf(" bank %d: avail_start 0x%"PRIxPADDR", start 0x%"PRIxPADDR", " | | 1464 | printf(" bank %d: avail_start 0x%"PRIxPADDR", start 0x%"PRIxPADDR", " |
1464 | "avail_end 0x%"PRIxPADDR"\n", bank, uvm_physseg_get_avail_start(bank), | | 1465 | "avail_end 0x%"PRIxPADDR"\n", bank, uvm_physseg_get_avail_start(bank), |
1465 | uvm_physseg_get_start(bank), uvm_physseg_get_avail_end(bank)); | | 1466 | uvm_physseg_get_start(bank), uvm_physseg_get_avail_end(bank)); |
1466 | #endif | | 1467 | #endif |
1467 | | | 1468 | |
1468 | if (uvm_physseg_get_avail_start(bank) != uvm_physseg_get_start(bank) || | | 1469 | if (uvm_physseg_get_avail_start(bank) != uvm_physseg_get_start(bank) || |
1469 | uvm_physseg_get_avail_start(bank) >= uvm_physseg_get_avail_end(bank)) | | 1470 | uvm_physseg_get_avail_start(bank) >= uvm_physseg_get_avail_end(bank)) |
1470 | continue; | | 1471 | continue; |
1471 | | | 1472 | |
1472 | #if 0 | | 1473 | #if 0 |
1473 | printf(" avail_end - avail_start = 0x%"PRIxPADDR"\n", | | 1474 | printf(" avail_end - avail_start = 0x%"PRIxPADDR"\n", |
1474 | uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank)); | | 1475 | uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank)); |
1475 | #endif | | 1476 | #endif |
1476 | | | 1477 | |
1477 | if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank) | | 1478 | if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank) |
1478 | < npgs) | | 1479 | < npgs) |
1479 | continue; | | 1480 | continue; |
1480 | | | 1481 | |
1481 | /* | | 1482 | /* |
1482 | * There are enough pages here; steal them! | | 1483 | * There are enough pages here; steal them! |
1483 | */ | | 1484 | */ |
1484 | pa = ptoa(uvm_physseg_get_start(bank)); | | 1485 | pa = ptoa(uvm_physseg_get_start(bank)); |
1485 | uvm_physseg_unplug(atop(pa), npgs); | | 1486 | uvm_physseg_unplug(atop(pa), npgs); |
1486 | | | 1487 | |
1487 | va = ALPHA_PHYS_TO_K0SEG(pa); | | 1488 | va = ALPHA_PHYS_TO_K0SEG(pa); |
1488 | memset((void *)va, 0, size); | | 1489 | memset((void *)va, 0, size); |
1489 | pmap_pages_stolen += npgs; | | 1490 | pmap_pages_stolen += npgs; |
1490 | return (va); | | 1491 | return (va); |
1491 | } | | 1492 | } |
1492 | | | 1493 | |
1493 | /* | | 1494 | /* |
1494 | * If we got here, this was no memory left. | | 1495 | * If we got here, this was no memory left. |
1495 | */ | | 1496 | */ |
1496 | panic("pmap_steal_memory: no memory to steal"); | | 1497 | panic("pmap_steal_memory: no memory to steal"); |
1497 | } | | 1498 | } |
1498 | | | 1499 | |
1499 | /* | | 1500 | /* |
1500 | * pmap_init: [ INTERFACE ] | | 1501 | * pmap_init: [ INTERFACE ] |
1501 | * | | 1502 | * |
1502 | * Initialize the pmap module. Called by vm_init(), to initialize any | | 1503 | * Initialize the pmap module. Called by vm_init(), to initialize any |
1503 | * structures that the pmap system needs to map virtual memory. | | 1504 | * structures that the pmap system needs to map virtual memory. |
1504 | * | | 1505 | * |
1505 | * Note: no locking is necessary in this function. | | 1506 | * Note: no locking is necessary in this function. |
1506 | */ | | 1507 | */ |
1507 | void | | 1508 | void |
1508 | pmap_init(void) | | 1509 | pmap_init(void) |
1509 | { | | 1510 | { |
1510 | | | 1511 | |
1511 | #ifdef DEBUG | | 1512 | #ifdef DEBUG |
1512 | if (pmapdebug & PDB_FOLLOW) | | 1513 | if (pmapdebug & PDB_FOLLOW) |
1513 | printf("pmap_init()\n"); | | 1514 | printf("pmap_init()\n"); |
1514 | #endif | | 1515 | #endif |
1515 | | | 1516 | |
1516 | /* initialize protection array */ | | 1517 | /* initialize protection array */ |
1517 | alpha_protection_init(); | | 1518 | alpha_protection_init(); |
1518 | | | 1519 | |
1519 | /* Initialize TLB handling. */ | | 1520 | /* Initialize TLB handling. */ |
1520 | pmap_tlb_init(); | | 1521 | pmap_tlb_init(); |
1521 | | | 1522 | |
1522 | /* | | 1523 | /* |
1523 | * Set a low water mark on the pv_entry pool, so that we are | | 1524 | * Set a low water mark on the pv_entry pool, so that we are |
1524 | * more likely to have these around even in extreme memory | | 1525 | * more likely to have these around even in extreme memory |
1525 | * starvation. | | 1526 | * starvation. |
1526 | */ | | 1527 | */ |
1527 | pool_cache_setlowat(&pmap_pv_cache, pmap_pv_lowat); | | 1528 | pool_cache_setlowat(&pmap_pv_cache, pmap_pv_lowat); |
1528 | | | 1529 | |
1529 | /* | | 1530 | /* |
1530 | * Now it is safe to enable pv entry recording. | | 1531 | * Now it is safe to enable pv entry recording. |
1531 | */ | | 1532 | */ |
1532 | pmap_initialized = true; | | 1533 | pmap_initialized = true; |
1533 | | | 1534 | |
1534 | #if 0 | | 1535 | #if 0 |
1535 | for (uvm_physseg_t bank = uvm_physseg_get_first(); | | 1536 | for (uvm_physseg_t bank = uvm_physseg_get_first(); |
1536 | uvm_physseg_valid_p(bank); | | 1537 | uvm_physseg_valid_p(bank); |
1537 | bank = uvm_physseg_get_next(bank)) { | | 1538 | bank = uvm_physseg_get_next(bank)) { |
1538 | printf("bank %d\n", bank); | | 1539 | printf("bank %d\n", bank); |
1539 | printf("\tstart = 0x%lx\n", ptoa(uvm_physseg_get_start(bank))); | | 1540 | printf("\tstart = 0x%lx\n", ptoa(uvm_physseg_get_start(bank))); |
1540 | printf("\tend = 0x%lx\n", ptoa(uvm_physseg_get_end(bank))); | | 1541 | printf("\tend = 0x%lx\n", ptoa(uvm_physseg_get_end(bank))); |
1541 | printf("\tavail_start = 0x%lx\n", | | 1542 | printf("\tavail_start = 0x%lx\n", |
1542 | ptoa(uvm_physseg_get_avail_start(bank))); | | 1543 | ptoa(uvm_physseg_get_avail_start(bank))); |
1543 | printf("\tavail_end = 0x%lx\n", | | 1544 | printf("\tavail_end = 0x%lx\n", |
1544 | ptoa(uvm_physseg_get_avail_end(bank))); | | 1545 | ptoa(uvm_physseg_get_avail_end(bank))); |
1545 | } | | 1546 | } |
1546 | #endif | | 1547 | #endif |
1547 | } | | 1548 | } |
1548 | | | 1549 | |
1549 | /* | | 1550 | /* |
1550 | * pmap_create: [ INTERFACE ] | | 1551 | * pmap_create: [ INTERFACE ] |
1551 | * | | 1552 | * |
1552 | * Create and return a physical map. | | 1553 | * Create and return a physical map. |
1553 | * | | 1554 | * |
1554 | * Note: no locking is necessary in this function. | | 1555 | * Note: no locking is necessary in this function. |
1555 | */ | | 1556 | */ |
1556 | pmap_t | | 1557 | pmap_t |
1557 | pmap_create(void) | | 1558 | pmap_create(void) |
1558 | { | | 1559 | { |
1559 | pmap_t pmap; | | 1560 | pmap_t pmap; |
1560 | pt_entry_t *lev1map; | | 1561 | pt_entry_t *lev1map; |
1561 | int i; | | 1562 | int i; |
1562 | | | 1563 | |
1563 | #ifdef DEBUG | | 1564 | #ifdef DEBUG |
1564 | if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) | | 1565 | if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) |
1565 | printf("pmap_create()\n"); | | 1566 | printf("pmap_create()\n"); |
1566 | #endif | | 1567 | #endif |
1567 | | | 1568 | |
1568 | pmap = pool_cache_get(&pmap_pmap_cache, PR_WAITOK); | | 1569 | pmap = pool_cache_get(&pmap_pmap_cache, PR_WAITOK); |
1569 | memset(pmap, 0, sizeof(*pmap)); | | 1570 | memset(pmap, 0, sizeof(*pmap)); |
1570 | LIST_INIT(&pmap->pm_ptpages); | | 1571 | LIST_INIT(&pmap->pm_ptpages); |
| | | 1572 | LIST_INIT(&pmap->pm_pvents); |
1571 | | | 1573 | |
1572 | atomic_store_relaxed(&pmap->pm_count, 1); | | 1574 | atomic_store_relaxed(&pmap->pm_count, 1); |
1573 | | | 1575 | |
1574 | try_again: | | 1576 | try_again: |
1575 | rw_enter(&pmap_growkernel_lock, RW_READER); | | 1577 | rw_enter(&pmap_growkernel_lock, RW_READER); |
1576 | | | 1578 | |
1577 | lev1map = pool_cache_get(&pmap_l1pt_cache, PR_NOWAIT); | | 1579 | lev1map = pool_cache_get(&pmap_l1pt_cache, PR_NOWAIT); |
1578 | if (__predict_false(lev1map == NULL)) { | | 1580 | if (__predict_false(lev1map == NULL)) { |
1579 | rw_exit(&pmap_growkernel_lock); | | 1581 | rw_exit(&pmap_growkernel_lock); |
1580 | (void) kpause("pmap_create", false, hz >> 2, NULL); | | 1582 | (void) kpause("pmap_create", false, hz >> 2, NULL); |
1581 | goto try_again; | | 1583 | goto try_again; |
1582 | } | | 1584 | } |
1583 | | | 1585 | |
1584 | /* | | 1586 | /* |
1585 | * There are only kernel mappings at this point; give the pmap | | 1587 | * There are only kernel mappings at this point; give the pmap |
1586 | * the kernel ASN. This will be initialized to correct values | | 1588 | * the kernel ASN. This will be initialized to correct values |
1587 | * when the pmap is activated. | | 1589 | * when the pmap is activated. |
1588 | * | | 1590 | * |
1589 | * We stash a pointer to the pmap's lev1map in each CPU's | | 1591 | * We stash a pointer to the pmap's lev1map in each CPU's |
1590 | * private data. It remains constant for the life of the | | 1592 | * private data. It remains constant for the life of the |
1591 | * pmap, and gives us more room in the shared pmap structure. | | 1593 | * pmap, and gives us more room in the shared pmap structure. |
1592 | */ | | 1594 | */ |
1593 | for (i = 0; i < pmap_ncpuids; i++) { | | 1595 | for (i = 0; i < pmap_ncpuids; i++) { |
1594 | pmap->pm_percpu[i].pmc_asn = PMAP_ASN_KERNEL; | | 1596 | pmap->pm_percpu[i].pmc_asn = PMAP_ASN_KERNEL; |
1595 | pmap->pm_percpu[i].pmc_asngen = PMAP_ASNGEN_INVALID; | | 1597 | pmap->pm_percpu[i].pmc_asngen = PMAP_ASNGEN_INVALID; |
1596 | pmap->pm_percpu[i].pmc_lev1map = lev1map; | | 1598 | pmap->pm_percpu[i].pmc_lev1map = lev1map; |
1597 | } | | 1599 | } |
1598 | | | 1600 | |
1599 | mutex_enter(&pmap_all_pmaps_lock); | | 1601 | mutex_enter(&pmap_all_pmaps_lock); |
1600 | TAILQ_INSERT_TAIL(&pmap_all_pmaps, pmap, pm_list); | | 1602 | TAILQ_INSERT_TAIL(&pmap_all_pmaps, pmap, pm_list); |
1601 | mutex_exit(&pmap_all_pmaps_lock); | | 1603 | mutex_exit(&pmap_all_pmaps_lock); |
1602 | | | 1604 | |
1603 | rw_exit(&pmap_growkernel_lock); | | 1605 | rw_exit(&pmap_growkernel_lock); |
1604 | | | 1606 | |
1605 | return (pmap); | | 1607 | return (pmap); |
1606 | } | | 1608 | } |
1607 | | | 1609 | |
1608 | /* | | 1610 | /* |
1609 | * pmap_destroy: [ INTERFACE ] | | 1611 | * pmap_destroy: [ INTERFACE ] |
1610 | * | | 1612 | * |
1611 | * Drop the reference count on the specified pmap, releasing | | 1613 | * Drop the reference count on the specified pmap, releasing |
1612 | * all resources if the reference count drops to zero. | | 1614 | * all resources if the reference count drops to zero. |
1613 | */ | | 1615 | */ |
1614 | void | | 1616 | void |
1615 | pmap_destroy(pmap_t pmap) | | 1617 | pmap_destroy(pmap_t pmap) |
1616 | { | | 1618 | { |
1617 | | | 1619 | |
1618 | #ifdef DEBUG | | 1620 | #ifdef DEBUG |
1619 | if (pmapdebug & PDB_FOLLOW) | | 1621 | if (pmapdebug & PDB_FOLLOW) |
1620 | printf("pmap_destroy(%p)\n", pmap); | | 1622 | printf("pmap_destroy(%p)\n", pmap); |
1621 | #endif | | 1623 | #endif |
1622 | | | 1624 | |
1623 | PMAP_MP(membar_exit()); | | 1625 | PMAP_MP(membar_exit()); |
1624 | KASSERT(atomic_load_relaxed(&pmap->pm_count) > 0); | | 1626 | KASSERT(atomic_load_relaxed(&pmap->pm_count) > 0); |
1625 | if (atomic_dec_uint_nv(&pmap->pm_count) > 0) | | 1627 | if (atomic_dec_uint_nv(&pmap->pm_count) > 0) |
1626 | return; | | 1628 | return; |
1627 | | | 1629 | |
1628 | pt_entry_t *lev1map = pmap_lev1map(pmap); | | 1630 | pt_entry_t *lev1map = pmap_lev1map(pmap); |
1629 | int i; | | 1631 | int i; |
1630 | | | 1632 | |
1631 | rw_enter(&pmap_growkernel_lock, RW_READER); | | 1633 | rw_enter(&pmap_growkernel_lock, RW_READER); |
1632 | | | 1634 | |
1633 | /* | | 1635 | /* |
1634 | * Remove it from the global list of all pmaps. | | 1636 | * Remove it from the global list of all pmaps. |
1635 | */ | | 1637 | */ |
1636 | mutex_enter(&pmap_all_pmaps_lock); | | 1638 | mutex_enter(&pmap_all_pmaps_lock); |
1637 | TAILQ_REMOVE(&pmap_all_pmaps, pmap, pm_list); | | 1639 | TAILQ_REMOVE(&pmap_all_pmaps, pmap, pm_list); |
1638 | mutex_exit(&pmap_all_pmaps_lock); | | 1640 | mutex_exit(&pmap_all_pmaps_lock); |
1639 | | | 1641 | |
1640 | pool_cache_put(&pmap_l1pt_cache, lev1map); | | 1642 | pool_cache_put(&pmap_l1pt_cache, lev1map); |
1641 | #ifdef DIAGNOSTIC | | 1643 | #ifdef DIAGNOSTIC |
1642 | for (i = 0; i < pmap_ncpuids; i++) { | | 1644 | for (i = 0; i < pmap_ncpuids; i++) { |
1643 | pmap->pm_percpu[i].pmc_lev1map = (pt_entry_t *)0xdeadbeefUL; | | 1645 | pmap->pm_percpu[i].pmc_lev1map = (pt_entry_t *)0xdeadbeefUL; |
1644 | } | | 1646 | } |
1645 | #endif /* DIAGNOSTIC */ | | 1647 | #endif /* DIAGNOSTIC */ |
1646 | | | 1648 | |
1647 | rw_exit(&pmap_growkernel_lock); | | 1649 | rw_exit(&pmap_growkernel_lock); |
1648 | | | 1650 | |
1649 | pool_cache_put(&pmap_pmap_cache, pmap); | | 1651 | pool_cache_put(&pmap_pmap_cache, pmap); |
1650 | } | | 1652 | } |
1651 | | | 1653 | |
1652 | /* | | 1654 | /* |
1653 | * pmap_reference: [ INTERFACE ] | | 1655 | * pmap_reference: [ INTERFACE ] |
1654 | * | | 1656 | * |
1655 | * Add a reference to the specified pmap. | | 1657 | * Add a reference to the specified pmap. |
1656 | */ | | 1658 | */ |
1657 | void | | 1659 | void |
1658 | pmap_reference(pmap_t pmap) | | 1660 | pmap_reference(pmap_t pmap) |
1659 | { | | 1661 | { |
1660 | unsigned int newcount __diagused; | | 1662 | unsigned int newcount __diagused; |
1661 | | | 1663 | |
1662 | #ifdef DEBUG | | 1664 | #ifdef DEBUG |
1663 | if (pmapdebug & PDB_FOLLOW) | | 1665 | if (pmapdebug & PDB_FOLLOW) |
1664 | printf("pmap_reference(%p)\n", pmap); | | 1666 | printf("pmap_reference(%p)\n", pmap); |
1665 | #endif | | 1667 | #endif |
1666 | | | 1668 | |
1667 | newcount = atomic_inc_uint_nv(&pmap->pm_count); | | 1669 | newcount = atomic_inc_uint_nv(&pmap->pm_count); |
1668 | KASSERT(newcount != 0); | | 1670 | KASSERT(newcount != 0); |
1669 | PMAP_MP(membar_enter()); | | 1671 | PMAP_MP(membar_enter()); |
1670 | } | | 1672 | } |
1671 | | | 1673 | |
1672 | /* | | 1674 | /* |
1673 | * pmap_remove: [ INTERFACE ] | | 1675 | * pmap_remove: [ INTERFACE ] |
1674 | * | | 1676 | * |
1675 | * Remove the given range of addresses from the specified map. | | 1677 | * Remove the given range of addresses from the specified map. |
1676 | * | | 1678 | * |
1677 | * It is assumed that the start and end are properly | | 1679 | * It is assumed that the start and end are properly |
1678 | * rounded to the page size. | | 1680 | * rounded to the page size. |
1679 | */ | | 1681 | */ |
1680 | static void | | 1682 | static void |
1681 | pmap_remove_internal(pmap_t pmap, vaddr_t sva, vaddr_t eva, | | 1683 | pmap_remove_internal(pmap_t pmap, vaddr_t sva, vaddr_t eva, |
1682 | struct pmap_tlb_context * const tlbctx) | | 1684 | struct pmap_tlb_context * const tlbctx) |
1683 | { | | 1685 | { |
1684 | pt_entry_t *l1pte, *l2pte, *l3pte; | | 1686 | pt_entry_t *l1pte, *l2pte, *l3pte; |
1685 | pt_entry_t *saved_l2pte, *saved_l3pte; | | 1687 | pt_entry_t *saved_l2pte, *saved_l3pte; |
1686 | vaddr_t l1eva, l2eva, l3vptva; | | 1688 | vaddr_t l1eva, l2eva, l3vptva; |
1687 | pt_entry_t pte_bits; | | 1689 | pt_entry_t pte_bits; |
1688 | | | 1690 | |
1689 | #ifdef DEBUG | | 1691 | #ifdef DEBUG |
1690 | if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) | | 1692 | if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) |
1691 | printf("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva); | | 1693 | printf("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva); |
1692 | #endif | | 1694 | #endif |
1693 | | | 1695 | |
1694 | /* | | 1696 | /* |
1695 | * If this is the kernel pmap, we can use a faster method | | 1697 | * If this is the kernel pmap, we can use a faster method |
1696 | * for accessing the PTEs (since the PT pages are always | | 1698 | * for accessing the PTEs (since the PT pages are always |
1697 | * resident). | | 1699 | * resident). |
1698 | * | | 1700 | * |
1699 | * Note that this routine should NEVER be called from an | | 1701 | * Note that this routine should NEVER be called from an |
1700 | * interrupt context; pmap_kremove() is used for that. | | 1702 | * interrupt context; pmap_kremove() is used for that. |
1701 | */ | | 1703 | */ |
1702 | if (pmap == pmap_kernel()) { | | 1704 | if (pmap == pmap_kernel()) { |
1703 | PMAP_MAP_TO_HEAD_LOCK(); | | 1705 | PMAP_MAP_TO_HEAD_LOCK(); |
1704 | PMAP_LOCK(pmap); | | 1706 | PMAP_LOCK(pmap); |
1705 | | | 1707 | |
1706 | while (sva < eva) { | | 1708 | while (sva < eva) { |
1707 | l3pte = PMAP_KERNEL_PTE(sva); | | 1709 | l3pte = PMAP_KERNEL_PTE(sva); |
1708 | if (pmap_pte_v(l3pte)) { | | 1710 | if (pmap_pte_v(l3pte)) { |
1709 | pte_bits = pmap_remove_mapping(pmap, sva, | | 1711 | pte_bits = pmap_remove_mapping(pmap, sva, |
1710 | l3pte, true, NULL, tlbctx); | | 1712 | l3pte, true, NULL, tlbctx); |
1711 | pmap_tlb_shootdown(pmap, sva, pte_bits, | | 1713 | pmap_tlb_shootdown(pmap, sva, pte_bits, |
1712 | tlbctx); | | 1714 | tlbctx); |
1713 | } | | 1715 | } |
1714 | sva += PAGE_SIZE; | | 1716 | sva += PAGE_SIZE; |
1715 | } | | 1717 | } |
1716 | | | 1718 | |
1717 | PMAP_MAP_TO_HEAD_UNLOCK(); | | 1719 | PMAP_MAP_TO_HEAD_UNLOCK(); |
1718 | PMAP_UNLOCK(pmap); | | 1720 | PMAP_UNLOCK(pmap); |
1719 | pmap_tlb_shootnow(tlbctx); | | 1721 | pmap_tlb_shootnow(tlbctx); |
1720 | /* kernel PT pages are never freed. */ | | 1722 | /* kernel PT pages are never freed. */ |
1721 | KASSERT(LIST_EMPTY(&tlbctx->t_freeptq)); | | 1723 | KASSERT(LIST_EMPTY(&tlbctx->t_freeptq)); |
1722 | TLB_COUNT(reason_remove_kernel); | | 1724 | TLB_COUNT(reason_remove_kernel); |
1723 | | | 1725 | |
1724 | return; | | 1726 | return; |
1725 | } | | 1727 | } |
1726 | | | 1728 | |
1727 | pt_entry_t * const lev1map = pmap_lev1map(pmap); | | 1729 | pt_entry_t * const lev1map = pmap_lev1map(pmap); |
1728 | | | 1730 | |
1729 | KASSERT(sva < VM_MAXUSER_ADDRESS); | | 1731 | KASSERT(sva < VM_MAXUSER_ADDRESS); |
1730 | KASSERT(eva <= VM_MAXUSER_ADDRESS); | | 1732 | KASSERT(eva <= VM_MAXUSER_ADDRESS); |
1731 | KASSERT(lev1map != kernel_lev1map); | | 1733 | KASSERT(lev1map != kernel_lev1map); |
1732 | | | 1734 | |
1733 | PMAP_MAP_TO_HEAD_LOCK(); | | 1735 | PMAP_MAP_TO_HEAD_LOCK(); |
1734 | PMAP_LOCK(pmap); | | 1736 | PMAP_LOCK(pmap); |
1735 | | | 1737 | |
1736 | l1pte = pmap_l1pte(lev1map, sva); | | 1738 | l1pte = pmap_l1pte(lev1map, sva); |
1737 | | | 1739 | |
1738 | for (; sva < eva; sva = l1eva, l1pte++) { | | 1740 | for (; sva < eva; sva = l1eva, l1pte++) { |
1739 | l1eva = alpha_trunc_l1seg(sva) + ALPHA_L1SEG_SIZE; | | 1741 | l1eva = alpha_trunc_l1seg(sva) + ALPHA_L1SEG_SIZE; |
1740 | if (pmap_pte_v(l1pte)) { | | 1742 | if (pmap_pte_v(l1pte)) { |
1741 | saved_l2pte = l2pte = pmap_l2pte(lev1map, sva, l1pte); | | 1743 | saved_l2pte = l2pte = pmap_l2pte(lev1map, sva, l1pte); |
1742 | | | 1744 | |
1743 | /* | | 1745 | /* |
1744 | * Add a reference to the L2 table so it won't | | 1746 | * Add a reference to the L2 table so it won't |
1745 | * get removed from under us. | | 1747 | * get removed from under us. |
1746 | */ | | 1748 | */ |
1747 | pmap_physpage_addref(saved_l2pte); | | 1749 | pmap_physpage_addref(saved_l2pte); |
1748 | | | 1750 | |
1749 | for (; sva < l1eva && sva < eva; sva = l2eva, l2pte++) { | | 1751 | for (; sva < l1eva && sva < eva; sva = l2eva, l2pte++) { |
1750 | l2eva = | | 1752 | l2eva = |
1751 | alpha_trunc_l2seg(sva) + ALPHA_L2SEG_SIZE; | | 1753 | alpha_trunc_l2seg(sva) + ALPHA_L2SEG_SIZE; |
1752 | if (pmap_pte_v(l2pte)) { | | 1754 | if (pmap_pte_v(l2pte)) { |
1753 | saved_l3pte = l3pte = | | 1755 | saved_l3pte = l3pte = |
1754 | pmap_l3pte(lev1map, sva, l2pte); | | 1756 | pmap_l3pte(lev1map, sva, l2pte); |
1755 | | | 1757 | |
1756 | /* | | 1758 | /* |
1757 | * Add a reference to the L3 table so | | 1759 | * Add a reference to the L3 table so |
1758 | * it won't get removed from under us. | | 1760 | * it won't get removed from under us. |
1759 | */ | | 1761 | */ |
1760 | pmap_physpage_addref(saved_l3pte); | | 1762 | pmap_physpage_addref(saved_l3pte); |
1761 | | | 1763 | |
1762 | /* | | 1764 | /* |
1763 | * Remember this sva; if the L3 table | | 1765 | * Remember this sva; if the L3 table |
1764 | * gets removed, we need to invalidate | | 1766 | * gets removed, we need to invalidate |
1765 | * the VPT TLB entry for it. | | 1767 | * the VPT TLB entry for it. |
1766 | */ | | 1768 | */ |
1767 | l3vptva = sva; | | 1769 | l3vptva = sva; |
1768 | | | 1770 | |
1769 | for (; sva < l2eva && sva < eva; | | 1771 | for (; sva < l2eva && sva < eva; |
1770 | sva += PAGE_SIZE, l3pte++) { | | 1772 | sva += PAGE_SIZE, l3pte++) { |
1771 | if (!pmap_pte_v(l3pte)) { | | 1773 | if (!pmap_pte_v(l3pte)) { |
1772 | continue; | | 1774 | continue; |
1773 | } | | 1775 | } |
1774 | pte_bits = | | 1776 | pte_bits = |
1775 | pmap_remove_mapping( | | 1777 | pmap_remove_mapping( |
1776 | pmap, sva, | | 1778 | pmap, sva, |
1777 | l3pte, true, | | 1779 | l3pte, true, |
1778 | NULL, tlbctx); | | 1780 | NULL, tlbctx); |
1779 | pmap_tlb_shootdown(pmap, | | 1781 | pmap_tlb_shootdown(pmap, |
1780 | sva, pte_bits, tlbctx); | | 1782 | sva, pte_bits, tlbctx); |
1781 | } | | 1783 | } |
1782 | | | 1784 | |
1783 | /* | | 1785 | /* |
1784 | * Remove the reference to the L3 | | 1786 | * Remove the reference to the L3 |
1785 | * table that we added above. This | | 1787 | * table that we added above. This |
1786 | * may free the L3 table. | | 1788 | * may free the L3 table. |
1787 | */ | | 1789 | */ |
1788 | pmap_l3pt_delref(pmap, l3vptva, | | 1790 | pmap_l3pt_delref(pmap, l3vptva, |
1789 | saved_l3pte, tlbctx); | | 1791 | saved_l3pte, tlbctx); |
1790 | } | | 1792 | } |
1791 | } | | 1793 | } |
1792 | | | 1794 | |
1793 | /* | | 1795 | /* |
1794 | * Remove the reference to the L2 table that we | | 1796 | * Remove the reference to the L2 table that we |
1795 | * added above. This may free the L2 table. | | 1797 | * added above. This may free the L2 table. |
1796 | */ | | 1798 | */ |
1797 | pmap_l2pt_delref(pmap, l1pte, saved_l2pte, tlbctx); | | 1799 | pmap_l2pt_delref(pmap, l1pte, saved_l2pte, tlbctx); |
1798 | } | | 1800 | } |
1799 | } | | 1801 | } |
1800 | | | 1802 | |
1801 | PMAP_MAP_TO_HEAD_UNLOCK(); | | 1803 | PMAP_MAP_TO_HEAD_UNLOCK(); |
1802 | PMAP_UNLOCK(pmap); | | 1804 | PMAP_UNLOCK(pmap); |
1803 | pmap_tlb_shootnow(tlbctx); | | 1805 | pmap_tlb_shootnow(tlbctx); |
1804 | pmap_tlb_ptpage_drain(tlbctx); | | 1806 | pmap_tlb_ptpage_drain(tlbctx); |
1805 | TLB_COUNT(reason_remove_user); | | 1807 | TLB_COUNT(reason_remove_user); |
1806 | } | | 1808 | } |
1807 | | | 1809 | |
1808 | void | | 1810 | void |
1809 | pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva) | | 1811 | pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva) |
1810 | { | | 1812 | { |
1811 | struct pmap_tlb_context tlbctx; | | 1813 | struct pmap_tlb_context tlbctx; |
1812 | | | 1814 | |
1813 | pmap_tlb_context_init(&tlbctx, 0); | | 1815 | pmap_tlb_context_init(&tlbctx, 0); |
1814 | pmap_remove_internal(pmap, sva, eva, &tlbctx); | | 1816 | pmap_remove_internal(pmap, sva, eva, &tlbctx); |
1815 | } | | 1817 | } |
1816 | | | 1818 | |
1817 | /* | | 1819 | /* |
1818 | * pmap_page_protect: [ INTERFACE ] | | 1820 | * pmap_page_protect: [ INTERFACE ] |
1819 | * | | 1821 | * |
1820 | * Lower the permission for all mappings to a given page to | | 1822 | * Lower the permission for all mappings to a given page to |
1821 | * the permissions specified. | | 1823 | * the permissions specified. |
1822 | */ | | 1824 | */ |
1823 | void | | 1825 | void |
1824 | pmap_page_protect(struct vm_page *pg, vm_prot_t prot) | | 1826 | pmap_page_protect(struct vm_page *pg, vm_prot_t prot) |
1825 | { | | 1827 | { |
1826 | pv_entry_t pv, nextpv; | | 1828 | pv_entry_t pv, nextpv; |
1827 | pt_entry_t opte; | | 1829 | pt_entry_t opte; |
1828 | kmutex_t *lock; | | 1830 | kmutex_t *lock; |
1829 | struct pmap_tlb_context tlbctx; | | 1831 | struct pmap_tlb_context tlbctx; |
1830 | | | 1832 | |
1831 | #ifdef DEBUG | | 1833 | #ifdef DEBUG |
1832 | paddr_t pa = VM_PAGE_TO_PHYS(pg); | | 1834 | paddr_t pa = VM_PAGE_TO_PHYS(pg); |
1833 | | | 1835 | |
1834 | | | 1836 | |
1835 | if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) || | | 1837 | if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) || |
1836 | (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))) | | 1838 | (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))) |
1837 | printf("pmap_page_protect(%p, %x)\n", pg, prot); | | 1839 | printf("pmap_page_protect(%p, %x)\n", pg, prot); |
1838 | #endif | | 1840 | #endif |
1839 | | | 1841 | |
1840 | pmap_tlb_context_init(&tlbctx, TLB_CTX_F_PV); | | 1842 | pmap_tlb_context_init(&tlbctx, TLB_CTX_F_PV); |
1841 | | | 1843 | |
1842 | switch (prot) { | | 1844 | switch (prot) { |
1843 | case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE: | | 1845 | case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE: |
1844 | case VM_PROT_READ|VM_PROT_WRITE: | | 1846 | case VM_PROT_READ|VM_PROT_WRITE: |
1845 | return; | | 1847 | return; |
1846 | | | 1848 | |
1847 | /* copy_on_write */ | | 1849 | /* copy_on_write */ |
1848 | case VM_PROT_READ|VM_PROT_EXECUTE: | | 1850 | case VM_PROT_READ|VM_PROT_EXECUTE: |
1849 | case VM_PROT_READ: | | 1851 | case VM_PROT_READ: |
1850 | PMAP_HEAD_TO_MAP_LOCK(); | | 1852 | PMAP_HEAD_TO_MAP_LOCK(); |
1851 | lock = pmap_pvh_lock(pg); | | 1853 | lock = pmap_pvh_lock(pg); |
1852 | mutex_enter(lock); | | 1854 | mutex_enter(lock); |
1853 | for (pv = VM_MDPAGE_PVS(pg); pv != NULL; pv = pv->pv_next) { | | 1855 | for (pv = VM_MDPAGE_PVS(pg); pv != NULL; pv = pv->pv_next) { |
1854 | PMAP_LOCK(pv->pv_pmap); | | 1856 | PMAP_LOCK(pv->pv_pmap); |
1855 | opte = atomic_load_relaxed(pv->pv_pte); | | 1857 | opte = atomic_load_relaxed(pv->pv_pte); |
1856 | if (opte & (PG_KWE | PG_UWE)) { | | 1858 | if (opte & (PG_KWE | PG_UWE)) { |
1857 | atomic_store_relaxed(pv->pv_pte, | | 1859 | atomic_store_relaxed(pv->pv_pte, |
1858 | opte & ~(PG_KWE | PG_UWE)); | | 1860 | opte & ~(PG_KWE | PG_UWE)); |
1859 | pmap_tlb_shootdown_pv(pv->pv_pmap, pv->pv_va, | | 1861 | pmap_tlb_shootdown_pv(pv->pv_pmap, pv->pv_va, |
1860 | opte, &tlbctx); | | 1862 | opte, &tlbctx); |
1861 | } | | 1863 | } |
1862 | PMAP_UNLOCK(pv->pv_pmap); | | 1864 | PMAP_UNLOCK(pv->pv_pmap); |
1863 | } | | 1865 | } |
1864 | mutex_exit(lock); | | 1866 | mutex_exit(lock); |
1865 | PMAP_HEAD_TO_MAP_UNLOCK(); | | 1867 | PMAP_HEAD_TO_MAP_UNLOCK(); |
1866 | pmap_tlb_shootnow(&tlbctx); | | 1868 | pmap_tlb_shootnow(&tlbctx); |
1867 | TLB_COUNT(reason_page_protect_read); | | 1869 | TLB_COUNT(reason_page_protect_read); |
1868 | return; | | 1870 | return; |
1869 | | | 1871 | |
1870 | /* remove_all */ | | 1872 | /* remove_all */ |
1871 | default: | | 1873 | default: |
1872 | break; | | 1874 | break; |
1873 | } | | 1875 | } |
1874 | | | 1876 | |
1875 | PMAP_HEAD_TO_MAP_LOCK(); | | 1877 | PMAP_HEAD_TO_MAP_LOCK(); |
1876 | lock = pmap_pvh_lock(pg); | | 1878 | lock = pmap_pvh_lock(pg); |
1877 | mutex_enter(lock); | | 1879 | mutex_enter(lock); |
1878 | for (pv = VM_MDPAGE_PVS(pg); pv != NULL; pv = nextpv) { | | 1880 | for (pv = VM_MDPAGE_PVS(pg); pv != NULL; pv = nextpv) { |
1879 | pt_entry_t pte_bits; | | 1881 | pt_entry_t pte_bits; |
1880 | pmap_t pmap; | | 1882 | pmap_t pmap; |
1881 | vaddr_t va; | | 1883 | vaddr_t va; |
1882 | | | 1884 | |
1883 | nextpv = pv->pv_next; | | 1885 | nextpv = pv->pv_next; |
1884 | | | 1886 | |
1885 | PMAP_LOCK(pv->pv_pmap); | | 1887 | PMAP_LOCK(pv->pv_pmap); |
1886 | pmap = pv->pv_pmap; | | 1888 | pmap = pv->pv_pmap; |
1887 | va = pv->pv_va; | | 1889 | va = pv->pv_va; |
1888 | pte_bits = pmap_remove_mapping(pmap, va, pv->pv_pte, | | 1890 | pte_bits = pmap_remove_mapping(pmap, va, pv->pv_pte, |
1889 | false, NULL, &tlbctx); | | 1891 | false, NULL, &tlbctx); |
1890 | pmap_tlb_shootdown_pv(pmap, va, pte_bits, &tlbctx); | | 1892 | pmap_tlb_shootdown_pv(pmap, va, pte_bits, &tlbctx); |
1891 | PMAP_UNLOCK(pv->pv_pmap); | | 1893 | PMAP_UNLOCK(pv->pv_pmap); |
1892 | } | | 1894 | } |
1893 | mutex_exit(lock); | | 1895 | mutex_exit(lock); |
1894 | PMAP_HEAD_TO_MAP_UNLOCK(); | | 1896 | PMAP_HEAD_TO_MAP_UNLOCK(); |
1895 | pmap_tlb_shootnow(&tlbctx); | | 1897 | pmap_tlb_shootnow(&tlbctx); |
1896 | pmap_tlb_ptpage_drain(&tlbctx); | | 1898 | pmap_tlb_ptpage_drain(&tlbctx); |
1897 | TLB_COUNT(reason_page_protect_none); | | 1899 | TLB_COUNT(reason_page_protect_none); |
1898 | } | | 1900 | } |
1899 | | | 1901 | |
1900 | /* | | 1902 | /* |
1901 | * pmap_protect: [ INTERFACE ] | | 1903 | * pmap_protect: [ INTERFACE ] |
1902 | * | | 1904 | * |
1903 | * Set the physical protection on the specified range of this map | | 1905 | * Set the physical protection on the specified range of this map |
1904 | * as requested. | | 1906 | * as requested. |
1905 | */ | | 1907 | */ |
1906 | void | | 1908 | void |
1907 | pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) | | 1909 | pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) |
1908 | { | | 1910 | { |
1909 | pt_entry_t *l1pte, *l2pte, *l3pte, opte; | | 1911 | pt_entry_t *l1pte, *l2pte, *l3pte, opte; |
1910 | vaddr_t l1eva, l2eva; | | 1912 | vaddr_t l1eva, l2eva; |
1911 | struct pmap_tlb_context tlbctx; | | 1913 | struct pmap_tlb_context tlbctx; |
1912 | | | 1914 | |
1913 | #ifdef DEBUG | | 1915 | #ifdef DEBUG |
1914 | if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) | | 1916 | if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) |
1915 | printf("pmap_protect(%p, %lx, %lx, %x)\n", | | 1917 | printf("pmap_protect(%p, %lx, %lx, %x)\n", |
1916 | pmap, sva, eva, prot); | | 1918 | pmap, sva, eva, prot); |
1917 | #endif | | 1919 | #endif |
1918 | | | 1920 | |
1919 | pmap_tlb_context_init(&tlbctx, 0); | | 1921 | pmap_tlb_context_init(&tlbctx, 0); |
1920 | | | 1922 | |
1921 | if ((prot & VM_PROT_READ) == VM_PROT_NONE) { | | 1923 | if ((prot & VM_PROT_READ) == VM_PROT_NONE) { |
1922 | pmap_remove_internal(pmap, sva, eva, &tlbctx); | | 1924 | pmap_remove_internal(pmap, sva, eva, &tlbctx); |
1923 | return; | | 1925 | return; |
1924 | } | | 1926 | } |
1925 | | | 1927 | |
1926 | const pt_entry_t bits = pte_prot(pmap, prot); | | 1928 | const pt_entry_t bits = pte_prot(pmap, prot); |
1927 | pt_entry_t * const lev1map = pmap_lev1map(pmap); | | 1929 | pt_entry_t * const lev1map = pmap_lev1map(pmap); |
1928 | | | 1930 | |
1929 | PMAP_LOCK(pmap); | | 1931 | PMAP_LOCK(pmap); |
1930 | | | 1932 | |
1931 | l1pte = pmap_l1pte(lev1map, sva); | | 1933 | l1pte = pmap_l1pte(lev1map, sva); |
1932 | for (; sva < eva; sva = l1eva, l1pte++) { | | 1934 | for (; sva < eva; sva = l1eva, l1pte++) { |
1933 | l1eva = alpha_trunc_l1seg(sva) + ALPHA_L1SEG_SIZE; | | 1935 | l1eva = alpha_trunc_l1seg(sva) + ALPHA_L1SEG_SIZE; |
1934 | if (pmap_pte_v(l1pte)) { | | 1936 | if (pmap_pte_v(l1pte)) { |
1935 | l2pte = pmap_l2pte(lev1map, sva, l1pte); | | 1937 | l2pte = pmap_l2pte(lev1map, sva, l1pte); |
1936 | for (; sva < l1eva && sva < eva; sva = l2eva, l2pte++) { | | 1938 | for (; sva < l1eva && sva < eva; sva = l2eva, l2pte++) { |
1937 | l2eva = | | 1939 | l2eva = |
1938 | alpha_trunc_l2seg(sva) + ALPHA_L2SEG_SIZE; | | 1940 | alpha_trunc_l2seg(sva) + ALPHA_L2SEG_SIZE; |
1939 | if (pmap_pte_v(l2pte)) { | | 1941 | if (pmap_pte_v(l2pte)) { |
1940 | l3pte = pmap_l3pte(lev1map, sva, l2pte); | | 1942 | l3pte = pmap_l3pte(lev1map, sva, l2pte); |
1941 | for (; sva < l2eva && sva < eva; | | 1943 | for (; sva < l2eva && sva < eva; |
1942 | sva += PAGE_SIZE, l3pte++) { | | 1944 | sva += PAGE_SIZE, l3pte++) { |
1943 | if (pmap_pte_v(l3pte) && | | 1945 | if (pmap_pte_v(l3pte) && |
1944 | pmap_pte_prot_chg(l3pte, | | 1946 | pmap_pte_prot_chg(l3pte, |
1945 | bits)) { | | 1947 | bits)) { |
1946 | opte = atomic_load_relaxed(l3pte); | | 1948 | opte = atomic_load_relaxed(l3pte); |
1947 | pmap_pte_set_prot(l3pte, | | 1949 | pmap_pte_set_prot(l3pte, |
1948 | bits); | | 1950 | bits); |
1949 | pmap_tlb_shootdown(pmap, | | 1951 | pmap_tlb_shootdown(pmap, |
1950 | sva, opte, &tlbctx); | | 1952 | sva, opte, &tlbctx); |
1951 | } | | 1953 | } |
1952 | } | | 1954 | } |
1953 | } | | 1955 | } |
1954 | } | | 1956 | } |
1955 | } | | 1957 | } |
1956 | } | | 1958 | } |
1957 | | | 1959 | |
1958 | PMAP_UNLOCK(pmap); | | 1960 | PMAP_UNLOCK(pmap); |
1959 | pmap_tlb_shootnow(&tlbctx); | | 1961 | pmap_tlb_shootnow(&tlbctx); |
1960 | TLB_COUNT(reason_protect); | | 1962 | TLB_COUNT(reason_protect); |
1961 | } | | 1963 | } |
1962 | | | 1964 | |
1963 | /* | | 1965 | /* |
1964 | * pmap_enter_tlb_shootdown: | | 1966 | * pmap_enter_tlb_shootdown: |
1965 | * | | 1967 | * |
1966 | * Carry out a TLB shootdown on behalf of a pmap_enter() | | 1968 | * Carry out a TLB shootdown on behalf of a pmap_enter() |
1967 | * or a pmap_kenter_pa(). This is factored out separately | | 1969 | * or a pmap_kenter_pa(). This is factored out separately |
1968 | * because we expect it to be not a common case. | | 1970 | * because we expect it to be not a common case. |
1969 | */ | | 1971 | */ |
1970 | static void __noinline | | 1972 | static void __noinline |
1971 | pmap_enter_tlb_shootdown(pmap_t const pmap, vaddr_t const va, | | 1973 | pmap_enter_tlb_shootdown(pmap_t const pmap, vaddr_t const va, |
1972 | pt_entry_t const pte_bits, bool locked) | | 1974 | pt_entry_t const pte_bits, bool locked) |
1973 | { | | 1975 | { |
1974 | struct pmap_tlb_context tlbctx; | | 1976 | struct pmap_tlb_context tlbctx; |
1975 | | | 1977 | |
1976 | pmap_tlb_context_init(&tlbctx, 0); | | 1978 | pmap_tlb_context_init(&tlbctx, 0); |
1977 | pmap_tlb_shootdown(pmap, va, pte_bits, &tlbctx); | | 1979 | pmap_tlb_shootdown(pmap, va, pte_bits, &tlbctx); |
1978 | if (locked) { | | 1980 | if (locked) { |
1979 | PMAP_UNLOCK(pmap); | | 1981 | PMAP_UNLOCK(pmap); |
1980 | } | | 1982 | } |
1981 | pmap_tlb_shootnow(&tlbctx); | | 1983 | pmap_tlb_shootnow(&tlbctx); |
1982 | } | | 1984 | } |
1983 | | | 1985 | |
1984 | /* | | 1986 | /* |
1985 | * pmap_enter_l2pt_delref: | | 1987 | * pmap_enter_l2pt_delref: |
1986 | * | | 1988 | * |
1987 | * Release a reference on an L2 PT page for pmap_enter(). | | 1989 | * Release a reference on an L2 PT page for pmap_enter(). |
1988 | * This is factored out separately becacause we expect it | | 1990 | * This is factored out separately becacause we expect it |
1989 | * to be a rare case. | | 1991 | * to be a rare case. |
1990 | */ | | 1992 | */ |
1991 | static void __noinline | | 1993 | static void __noinline |
1992 | pmap_enter_l2pt_delref(pmap_t const pmap, pt_entry_t * const l1pte, | | 1994 | pmap_enter_l2pt_delref(pmap_t const pmap, pt_entry_t * const l1pte, |
1993 | pt_entry_t * const l2pte) | | 1995 | pt_entry_t * const l2pte) |
1994 | { | | 1996 | { |
1995 | struct pmap_tlb_context tlbctx; | | 1997 | struct pmap_tlb_context tlbctx; |
1996 | | | 1998 | |
1997 | /* | | 1999 | /* |
1998 | * PALcode may have tried to service a TLB miss with | | 2000 | * PALcode may have tried to service a TLB miss with |
1999 | * this L2 PTE, so we need to make sure we don't actully | | 2001 | * this L2 PTE, so we need to make sure we don't actully |
2000 | * free the PT page untl we've shot down any TLB entries | | 2002 | * free the PT page untl we've shot down any TLB entries |
2001 | * for this VPT index. | | 2003 | * for this VPT index. |
2002 | */ | | 2004 | */ |
2003 | | | 2005 | |
2004 | pmap_tlb_context_init(&tlbctx, 0); | | 2006 | pmap_tlb_context_init(&tlbctx, 0); |
2005 | pmap_l2pt_delref(pmap, l1pte, l2pte, &tlbctx); | | 2007 | pmap_l2pt_delref(pmap, l1pte, l2pte, &tlbctx); |
2006 | PMAP_UNLOCK(pmap); | | 2008 | PMAP_UNLOCK(pmap); |
2007 | pmap_tlb_shootnow(&tlbctx); | | 2009 | pmap_tlb_shootnow(&tlbctx); |
2008 | pmap_tlb_ptpage_drain(&tlbctx); | | 2010 | pmap_tlb_ptpage_drain(&tlbctx); |
2009 | TLB_COUNT(reason_enter_l2pt_delref); | | 2011 | TLB_COUNT(reason_enter_l2pt_delref); |
2010 | } | | 2012 | } |
2011 | | | 2013 | |
2012 | /* | | 2014 | /* |
2013 | * pmap_enter_l3pt_delref: | | 2015 | * pmap_enter_l3pt_delref: |
2014 | * | | 2016 | * |
2015 | * Release a reference on an L3 PT page for pmap_enter(). | | 2017 | * Release a reference on an L3 PT page for pmap_enter(). |
2016 | * This is factored out separately becacause we expect it | | 2018 | * This is factored out separately becacause we expect it |
2017 | * to be a rare case. | | 2019 | * to be a rare case. |
2018 | */ | | 2020 | */ |
2019 | static void __noinline | | 2021 | static void __noinline |
2020 | pmap_enter_l3pt_delref(pmap_t const pmap, vaddr_t const va, | | 2022 | pmap_enter_l3pt_delref(pmap_t const pmap, vaddr_t const va, |
2021 | pt_entry_t * const pte) | | 2023 | pt_entry_t * const pte) |
2022 | { | | 2024 | { |
2023 | struct pmap_tlb_context tlbctx; | | 2025 | struct pmap_tlb_context tlbctx; |
2024 | | | 2026 | |
2025 | /* | | 2027 | /* |
2026 | * PALcode may have tried to service a TLB miss with | | 2028 | * PALcode may have tried to service a TLB miss with |
2027 | * this PTE, so we need to make sure we don't actully | | 2029 | * this PTE, so we need to make sure we don't actully |
2028 | * free the PT page untl we've shot down any TLB entries | | 2030 | * free the PT page untl we've shot down any TLB entries |
2029 | * for this VPT index. | | 2031 | * for this VPT index. |
2030 | */ | | 2032 | */ |
2031 | | | 2033 | |
2032 | pmap_tlb_context_init(&tlbctx, 0); | | 2034 | pmap_tlb_context_init(&tlbctx, 0); |
2033 | pmap_l3pt_delref(pmap, va, pte, &tlbctx); | | 2035 | pmap_l3pt_delref(pmap, va, pte, &tlbctx); |
2034 | PMAP_UNLOCK(pmap); | | 2036 | PMAP_UNLOCK(pmap); |
2035 | pmap_tlb_shootnow(&tlbctx); | | 2037 | pmap_tlb_shootnow(&tlbctx); |
2036 | pmap_tlb_ptpage_drain(&tlbctx); | | 2038 | pmap_tlb_ptpage_drain(&tlbctx); |
2037 | TLB_COUNT(reason_enter_l3pt_delref); | | 2039 | TLB_COUNT(reason_enter_l3pt_delref); |
2038 | } | | 2040 | } |
2039 | | | 2041 | |
2040 | /* | | 2042 | /* |
2041 | * pmap_enter: [ INTERFACE ] | | 2043 | * pmap_enter: [ INTERFACE ] |
2042 | * | | 2044 | * |
2043 | * Insert the given physical page (p) at | | 2045 | * Insert the given physical page (p) at |
2044 | * the specified virtual address (v) in the | | 2046 | * the specified virtual address (v) in the |
2045 | * target physical map with the protection requested. | | 2047 | * target physical map with the protection requested. |
2046 | * | | 2048 | * |
2047 | * If specified, the page will be wired down, meaning | | 2049 | * If specified, the page will be wired down, meaning |
2048 | * that the related pte can not be reclaimed. | | 2050 | * that the related pte can not be reclaimed. |
2049 | * | | 2051 | * |
2050 | * Note: This is the only routine which MAY NOT lazy-evaluate | | 2052 | * Note: This is the only routine which MAY NOT lazy-evaluate |
2051 | * or lose information. That is, this routine must actually | | 2053 | * or lose information. That is, this routine must actually |
2052 | * insert this page into the given map NOW. | | 2054 | * insert this page into the given map NOW. |
2053 | */ | | 2055 | */ |
2054 | int | | 2056 | int |
2055 | pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) | | 2057 | pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) |
2056 | { | | 2058 | { |
2057 | pt_entry_t *pte, npte, opte; | | 2059 | pt_entry_t *pte, npte, opte; |
2058 | pv_entry_t opv = NULL; | | 2060 | pv_entry_t opv = NULL; |
2059 | paddr_t opa; | | 2061 | paddr_t opa; |
2060 | bool tflush = false; | | 2062 | bool tflush = false; |
2061 | int error = 0; | | 2063 | int error = 0; |
2062 | kmutex_t *lock; | | 2064 | kmutex_t *lock; |
2063 | | | 2065 | |
2064 | #ifdef DEBUG | | 2066 | #ifdef DEBUG |
2065 | if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) | | 2067 | if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) |
2066 | printf("pmap_enter(%p, %lx, %lx, %x, %x)\n", | | 2068 | printf("pmap_enter(%p, %lx, %lx, %x, %x)\n", |
2067 | pmap, va, pa, prot, flags); | | 2069 | pmap, va, pa, prot, flags); |
2068 | #endif | | 2070 | #endif |
2069 | struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); | | 2071 | struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); |
2070 | const bool wired = (flags & PMAP_WIRED) != 0; | | 2072 | const bool wired = (flags & PMAP_WIRED) != 0; |
2071 | | | 2073 | |
2072 | PMAP_MAP_TO_HEAD_LOCK(); | | 2074 | PMAP_MAP_TO_HEAD_LOCK(); |
2073 | PMAP_LOCK(pmap); | | 2075 | PMAP_LOCK(pmap); |
2074 | | | 2076 | |
2075 | if (pmap == pmap_kernel()) { | | 2077 | if (pmap == pmap_kernel()) { |
2076 | KASSERT(va >= VM_MIN_KERNEL_ADDRESS); | | 2078 | KASSERT(va >= VM_MIN_KERNEL_ADDRESS); |
2077 | pte = PMAP_KERNEL_PTE(va); | | 2079 | pte = PMAP_KERNEL_PTE(va); |
2078 | } else { | | 2080 | } else { |
2079 | pt_entry_t *l1pte, *l2pte; | | 2081 | pt_entry_t *l1pte, *l2pte; |
2080 | pt_entry_t * const lev1map = pmap_lev1map(pmap); | | 2082 | pt_entry_t * const lev1map = pmap_lev1map(pmap); |
2081 | | | 2083 | |
2082 | KASSERT(va < VM_MAXUSER_ADDRESS); | | 2084 | KASSERT(va < VM_MAXUSER_ADDRESS); |
2083 | KASSERT(lev1map != kernel_lev1map); | | 2085 | KASSERT(lev1map != kernel_lev1map); |
2084 | | | 2086 | |
2085 | /* | | 2087 | /* |
2086 | * Check to see if the level 1 PTE is valid, and | | 2088 | * Check to see if the level 1 PTE is valid, and |
2087 | * allocate a new level 2 page table page if it's not. | | 2089 | * allocate a new level 2 page table page if it's not. |
2088 | * A reference will be added to the level 2 table when | | 2090 | * A reference will be added to the level 2 table when |
2089 | * the level 3 table is created. | | 2091 | * the level 3 table is created. |
2090 | */ | | 2092 | */ |
2091 | l1pte = pmap_l1pte(lev1map, va); | | 2093 | l1pte = pmap_l1pte(lev1map, va); |
2092 | if (pmap_pte_v(l1pte) == 0) { | | 2094 | if (pmap_pte_v(l1pte) == 0) { |
2093 | pmap_physpage_addref(l1pte); | | 2095 | pmap_physpage_addref(l1pte); |
2094 | error = pmap_ptpage_alloc(pmap, l1pte, PGU_L2PT); | | 2096 | error = pmap_ptpage_alloc(pmap, l1pte, PGU_L2PT); |
2095 | if (error) { | | 2097 | if (error) { |
2096 | pmap_l1pt_delref(pmap, l1pte); | | 2098 | pmap_l1pt_delref(pmap, l1pte); |
2097 | if (flags & PMAP_CANFAIL) | | 2099 | if (flags & PMAP_CANFAIL) |
2098 | goto out; | | 2100 | goto out; |
2099 | panic("pmap_enter: unable to create L2 PT " | | 2101 | panic("pmap_enter: unable to create L2 PT " |
2100 | "page"); | | 2102 | "page"); |
2101 | } | | 2103 | } |
2102 | #ifdef DEBUG | | 2104 | #ifdef DEBUG |
2103 | if (pmapdebug & PDB_PTPAGE) | | 2105 | if (pmapdebug & PDB_PTPAGE) |
2104 | printf("pmap_enter: new level 2 table at " | | 2106 | printf("pmap_enter: new level 2 table at " |
2105 | "0x%lx\n", pmap_pte_pa(l1pte)); | | 2107 | "0x%lx\n", pmap_pte_pa(l1pte)); |
2106 | #endif | | 2108 | #endif |
2107 | } | | 2109 | } |
2108 | | | 2110 | |
2109 | /* | | 2111 | /* |
2110 | * Check to see if the level 2 PTE is valid, and | | 2112 | * Check to see if the level 2 PTE is valid, and |
2111 | * allocate a new level 3 page table page if it's not. | | 2113 | * allocate a new level 3 page table page if it's not. |
2112 | * A reference will be added to the level 3 table when | | 2114 | * A reference will be added to the level 3 table when |
2113 | * the mapping is validated. | | 2115 | * the mapping is validated. |
2114 | */ | | 2116 | */ |
2115 | l2pte = pmap_l2pte(lev1map, va, l1pte); | | 2117 | l2pte = pmap_l2pte(lev1map, va, l1pte); |
2116 | if (pmap_pte_v(l2pte) == 0) { | | 2118 | if (pmap_pte_v(l2pte) == 0) { |
2117 | pmap_physpage_addref(l2pte); | | 2119 | pmap_physpage_addref(l2pte); |
2118 | error = pmap_ptpage_alloc(pmap, l2pte, PGU_L3PT); | | 2120 | error = pmap_ptpage_alloc(pmap, l2pte, PGU_L3PT); |
2119 | if (error) { | | 2121 | if (error) { |
2120 | /* unlocks pmap */ | | 2122 | /* unlocks pmap */ |
2121 | pmap_enter_l2pt_delref(pmap, l1pte, l2pte); | | 2123 | pmap_enter_l2pt_delref(pmap, l1pte, l2pte); |
2122 | if (flags & PMAP_CANFAIL) { | | 2124 | if (flags & PMAP_CANFAIL) { |
2123 | PMAP_LOCK(pmap); | | 2125 | PMAP_LOCK(pmap); |
2124 | goto out; | | 2126 | goto out; |
2125 | } | | 2127 | } |
2126 | panic("pmap_enter: unable to create L3 PT " | | 2128 | panic("pmap_enter: unable to create L3 PT " |
2127 | "page"); | | 2129 | "page"); |
2128 | } | | 2130 | } |
2129 | #ifdef DEBUG | | 2131 | #ifdef DEBUG |
2130 | if (pmapdebug & PDB_PTPAGE) | | 2132 | if (pmapdebug & PDB_PTPAGE) |
2131 | printf("pmap_enter: new level 3 table at " | | 2133 | printf("pmap_enter: new level 3 table at " |
2132 | "0x%lx\n", pmap_pte_pa(l2pte)); | | 2134 | "0x%lx\n", pmap_pte_pa(l2pte)); |
2133 | #endif | | 2135 | #endif |
2134 | } | | 2136 | } |
2135 | | | 2137 | |
2136 | /* | | 2138 | /* |
2137 | * Get the PTE that will map the page. | | 2139 | * Get the PTE that will map the page. |
2138 | */ | | 2140 | */ |
2139 | pte = pmap_l3pte(lev1map, va, l2pte); | | 2141 | pte = pmap_l3pte(lev1map, va, l2pte); |
2140 | } | | 2142 | } |
2141 | | | 2143 | |
2142 | /* Remember all of the old PTE; used for TBI check later. */ | | 2144 | /* Remember all of the old PTE; used for TBI check later. */ |
2143 | opte = atomic_load_relaxed(pte); | | 2145 | opte = atomic_load_relaxed(pte); |
2144 | | | 2146 | |
2145 | /* | | 2147 | /* |
2146 | * Check to see if the old mapping is valid. If not, validate the | | 2148 | * Check to see if the old mapping is valid. If not, validate the |
2147 | * new one immediately. | | 2149 | * new one immediately. |
2148 | */ | | 2150 | */ |
2149 | if ((opte & PG_V) == 0) { | | 2151 | if ((opte & PG_V) == 0) { |
2150 | /* No TLB invalidatons needed for new mappings. */ | | 2152 | /* No TLB invalidatons needed for new mappings. */ |
2151 | | | 2153 | |
2152 | if (pmap != pmap_kernel()) { | | 2154 | if (pmap != pmap_kernel()) { |
2153 | /* | | 2155 | /* |
2154 | * New mappings gain a reference on the level 3 | | 2156 | * New mappings gain a reference on the level 3 |
2155 | * table. | | 2157 | * table. |
2156 | */ | | 2158 | */ |
2157 | pmap_physpage_addref(pte); | | 2159 | pmap_physpage_addref(pte); |
2158 | } | | 2160 | } |
2159 | goto validate_enterpv; | | 2161 | goto validate_enterpv; |
2160 | } | | 2162 | } |
2161 | | | 2163 | |
2162 | opa = pmap_pte_pa(pte); | | 2164 | opa = pmap_pte_pa(pte); |
2163 | | | 2165 | |
2164 | if (opa == pa) { | | 2166 | if (opa == pa) { |
2165 | /* | | 2167 | /* |
2166 | * Mapping has not changed; must be a protection or | | 2168 | * Mapping has not changed; must be a protection or |
2167 | * wiring change. | | 2169 | * wiring change. |
2168 | */ | | 2170 | */ |
2169 | if (pmap_pte_w_chg(pte, wired ? PG_WIRED : 0)) { | | 2171 | if (pmap_pte_w_chg(pte, wired ? PG_WIRED : 0)) { |
2170 | #ifdef DEBUG | | 2172 | #ifdef DEBUG |
2171 | if (pmapdebug & PDB_ENTER) | | 2173 | if (pmapdebug & PDB_ENTER) |
2172 | printf("pmap_enter: wiring change -> %d\n", | | 2174 | printf("pmap_enter: wiring change -> %d\n", |
2173 | wired); | | 2175 | wired); |
2174 | #endif | | 2176 | #endif |
2175 | /* Adjust the wiring count. */ | | 2177 | /* Adjust the wiring count. */ |
2176 | if (wired) | | 2178 | if (wired) |
2177 | PMAP_STAT_INCR(pmap->pm_stats.wired_count, 1); | | 2179 | PMAP_STAT_INCR(pmap->pm_stats.wired_count, 1); |
2178 | else | | 2180 | else |
2179 | PMAP_STAT_DECR(pmap->pm_stats.wired_count, 1); | | 2181 | PMAP_STAT_DECR(pmap->pm_stats.wired_count, 1); |
2180 | } | | 2182 | } |
2181 | | | 2183 | |
2182 | /* Set the PTE. */ | | 2184 | /* Set the PTE. */ |
2183 | goto validate; | | 2185 | goto validate; |
2184 | } | | 2186 | } |
2185 | | | 2187 | |
2186 | /* | | 2188 | /* |
2187 | * The mapping has changed. We need to invalidate the | | 2189 | * The mapping has changed. We need to invalidate the |
2188 | * old mapping before creating the new one. | | 2190 | * old mapping before creating the new one. |
2189 | */ | | 2191 | */ |
2190 | #ifdef DEBUG | | 2192 | #ifdef DEBUG |
2191 | if (pmapdebug & PDB_ENTER) | | 2193 | if (pmapdebug & PDB_ENTER) |
2192 | printf("pmap_enter: removing old mapping 0x%lx\n", va); | | 2194 | printf("pmap_enter: removing old mapping 0x%lx\n", va); |
2193 | #endif | | 2195 | #endif |
2194 | if (pmap != pmap_kernel()) { | | 2196 | if (pmap != pmap_kernel()) { |
2195 | /* | | 2197 | /* |
2196 | * Gain an extra reference on the level 3 table. | | 2198 | * Gain an extra reference on the level 3 table. |
2197 | * pmap_remove_mapping() will delete a reference, | | 2199 | * pmap_remove_mapping() will delete a reference, |
2198 | * and we don't want the table to be erroneously | | 2200 | * and we don't want the table to be erroneously |
2199 | * freed. | | 2201 | * freed. |
2200 | */ | | 2202 | */ |
2201 | pmap_physpage_addref(pte); | | 2203 | pmap_physpage_addref(pte); |
2202 | } | | 2204 | } |
2203 | /* Already have the bits from opte above. */ | | 2205 | /* Already have the bits from opte above. */ |
2204 | (void) pmap_remove_mapping(pmap, va, pte, true, &opv, NULL); | | 2206 | (void) pmap_remove_mapping(pmap, va, pte, true, &opv, NULL); |
2205 | | | 2207 | |
2206 | validate_enterpv: | | 2208 | validate_enterpv: |
2207 | /* Enter the mapping into the pv_table if appropriate. */ | | 2209 | /* Enter the mapping into the pv_table if appropriate. */ |
2208 | if (pg != NULL) { | | 2210 | if (pg != NULL) { |
2209 | error = pmap_pv_enter(pmap, pg, va, pte, true, opv); | | 2211 | error = pmap_pv_enter(pmap, pg, va, pte, true, opv); |
2210 | if (error) { | | 2212 | if (error) { |
2211 | /* This can only fail if opv == NULL */ | | 2213 | /* This can only fail if opv == NULL */ |
2212 | KASSERT(opv == NULL); | | 2214 | KASSERT(opv == NULL); |
2213 | | | 2215 | |
2214 | /* unlocks pmap */ | | 2216 | /* unlocks pmap */ |
2215 | pmap_enter_l3pt_delref(pmap, va, pte); | | 2217 | pmap_enter_l3pt_delref(pmap, va, pte); |
2216 | if (flags & PMAP_CANFAIL) { | | 2218 | if (flags & PMAP_CANFAIL) { |
2217 | PMAP_LOCK(pmap); | | 2219 | PMAP_LOCK(pmap); |
2218 | goto out; | | 2220 | goto out; |
2219 | } | | 2221 | } |
2220 | panic("pmap_enter: unable to enter mapping in PV " | | 2222 | panic("pmap_enter: unable to enter mapping in PV " |
2221 | "table"); | | 2223 | "table"); |
2222 | } | | 2224 | } |
2223 | opv = NULL; | | 2225 | opv = NULL; |
2224 | } | | 2226 | } |
2225 | | | 2227 | |
2226 | /* Increment counters. */ | | 2228 | /* Increment counters. */ |
2227 | PMAP_STAT_INCR(pmap->pm_stats.resident_count, 1); | | 2229 | PMAP_STAT_INCR(pmap->pm_stats.resident_count, 1); |
2228 | if (wired) | | 2230 | if (wired) |
2229 | PMAP_STAT_INCR(pmap->pm_stats.wired_count, 1); | | 2231 | PMAP_STAT_INCR(pmap->pm_stats.wired_count, 1); |
2230 | | | 2232 | |
2231 | validate: | | 2233 | validate: |
2232 | /* Build the new PTE. */ | | 2234 | /* Build the new PTE. */ |
2233 | npte = ((pa >> PGSHIFT) << PG_SHIFT) | pte_prot(pmap, prot) | PG_V; | | 2235 | npte = ((pa >> PGSHIFT) << PG_SHIFT) | pte_prot(pmap, prot) | PG_V; |
2234 | if (pg != NULL) { | | 2236 | if (pg != NULL) { |
2235 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); | | 2237 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); |
2236 | uintptr_t attrs = 0; | | 2238 | uintptr_t attrs = 0; |
2237 | | | 2239 | |
2238 | KASSERT(((flags & VM_PROT_ALL) & ~prot) == 0); | | 2240 | KASSERT(((flags & VM_PROT_ALL) & ~prot) == 0); |
2239 | | | 2241 | |
2240 | if (flags & VM_PROT_WRITE) | | 2242 | if (flags & VM_PROT_WRITE) |
2241 | attrs |= (PGA_REFERENCED|PGA_MODIFIED); | | 2243 | attrs |= (PGA_REFERENCED|PGA_MODIFIED); |
2242 | else if (flags & VM_PROT_ALL) | | 2244 | else if (flags & VM_PROT_ALL) |
2243 | attrs |= PGA_REFERENCED; | | 2245 | attrs |= PGA_REFERENCED; |
2244 | | | 2246 | |
2245 | lock = pmap_pvh_lock(pg); | | 2247 | lock = pmap_pvh_lock(pg); |
2246 | mutex_enter(lock); | | 2248 | mutex_enter(lock); |
2247 | md->pvh_listx |= attrs; | | 2249 | md->pvh_listx |= attrs; |
2248 | mutex_exit(lock); | | 2250 | mutex_exit(lock); |
2249 | | | 2251 | |
2250 | /* Set up referenced/modified emulation for new mapping. */ | | 2252 | /* Set up referenced/modified emulation for new mapping. */ |
2251 | if ((attrs & PGA_REFERENCED) == 0) | | 2253 | if ((attrs & PGA_REFERENCED) == 0) |
2252 | npte |= PG_FOR | PG_FOW | PG_FOE; | | 2254 | npte |= PG_FOR | PG_FOW | PG_FOE; |
2253 | else if ((attrs & PGA_MODIFIED) == 0) | | 2255 | else if ((attrs & PGA_MODIFIED) == 0) |
2254 | npte |= PG_FOW; | | 2256 | npte |= PG_FOW; |
2255 | | | 2257 | |
2256 | /* | | 2258 | /* |
2257 | * Mapping was entered on PV list. | | 2259 | * Mapping was entered on PV list. |
2258 | */ | | 2260 | */ |
2259 | npte |= PG_PVLIST; | | 2261 | npte |= PG_PVLIST; |
2260 | } | | 2262 | } |
2261 | if (wired) | | 2263 | if (wired) |
2262 | npte |= PG_WIRED; | | 2264 | npte |= PG_WIRED; |
2263 | #ifdef DEBUG | | 2265 | #ifdef DEBUG |
2264 | if (pmapdebug & PDB_ENTER) | | 2266 | if (pmapdebug & PDB_ENTER) |
2265 | printf("pmap_enter: new pte = 0x%lx\n", npte); | | 2267 | printf("pmap_enter: new pte = 0x%lx\n", npte); |
2266 | #endif | | 2268 | #endif |
2267 | | | 2269 | |
2268 | /* | | 2270 | /* |
2269 | * If the HW / PALcode portion of the new PTE is the same as the | | 2271 | * If the HW / PALcode portion of the new PTE is the same as the |
2270 | * old PTE, no TBI is necessary. | | 2272 | * old PTE, no TBI is necessary. |
2271 | */ | | 2273 | */ |
2272 | if (opte & PG_V) { | | 2274 | if (opte & PG_V) { |
2273 | tflush = PG_PALCODE(opte) != PG_PALCODE(npte); | | 2275 | tflush = PG_PALCODE(opte) != PG_PALCODE(npte); |
2274 | } | | 2276 | } |
2275 | | | 2277 | |
2276 | /* Set the new PTE. */ | | 2278 | /* Set the new PTE. */ |
2277 | atomic_store_relaxed(pte, npte); | | 2279 | atomic_store_relaxed(pte, npte); |
2278 | | | 2280 | |
2279 | out: | | 2281 | out: |
2280 | PMAP_MAP_TO_HEAD_UNLOCK(); | | 2282 | PMAP_MAP_TO_HEAD_UNLOCK(); |
2281 | | | 2283 | |
2282 | /* | | 2284 | /* |
2283 | * Invalidate the TLB entry for this VA and any appropriate | | 2285 | * Invalidate the TLB entry for this VA and any appropriate |
2284 | * caches. | | 2286 | * caches. |
2285 | */ | | 2287 | */ |
2286 | if (tflush) { | | 2288 | if (tflush) { |
2287 | /* unlocks pmap */ | | 2289 | /* unlocks pmap */ |
2288 | pmap_enter_tlb_shootdown(pmap, va, opte, true); | | 2290 | pmap_enter_tlb_shootdown(pmap, va, opte, true); |
2289 | if (pmap == pmap_kernel()) { | | 2291 | if (pmap == pmap_kernel()) { |
2290 | TLB_COUNT(reason_enter_kernel); | | 2292 | TLB_COUNT(reason_enter_kernel); |
2291 | } else { | | 2293 | } else { |
2292 | TLB_COUNT(reason_enter_user); | | 2294 | TLB_COUNT(reason_enter_user); |
2293 | } | | 2295 | } |
2294 | } else { | | 2296 | } else { |
2295 | PMAP_UNLOCK(pmap); | | 2297 | PMAP_UNLOCK(pmap); |
2296 | } | | 2298 | } |
2297 | | | 2299 | |
2298 | if (opv) | | 2300 | if (opv) |
2299 | pmap_pv_free(opv); | | 2301 | pmap_pv_free(opv); |
2300 | | | 2302 | |
2301 | return error; | | 2303 | return error; |
2302 | } | | 2304 | } |
2303 | | | 2305 | |
2304 | /* | | 2306 | /* |
2305 | * pmap_kenter_pa: [ INTERFACE ] | | 2307 | * pmap_kenter_pa: [ INTERFACE ] |
2306 | * | | 2308 | * |
2307 | * Enter a va -> pa mapping into the kernel pmap without any | | 2309 | * Enter a va -> pa mapping into the kernel pmap without any |
2308 | * physical->virtual tracking. | | 2310 | * physical->virtual tracking. |
2309 | * | | 2311 | * |
2310 | * Note: no locking is necessary in this function. | | 2312 | * Note: no locking is necessary in this function. |
2311 | */ | | 2313 | */ |
2312 | void | | 2314 | void |
2313 | pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) | | 2315 | pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) |
2314 | { | | 2316 | { |
2315 | pmap_t const pmap = pmap_kernel(); | | 2317 | pmap_t const pmap = pmap_kernel(); |
2316 | | | 2318 | |
2317 | #ifdef DEBUG | | 2319 | #ifdef DEBUG |
2318 | if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) | | 2320 | if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) |
2319 | printf("pmap_kenter_pa(%lx, %lx, %x)\n", | | 2321 | printf("pmap_kenter_pa(%lx, %lx, %x)\n", |
2320 | va, pa, prot); | | 2322 | va, pa, prot); |
2321 | #endif | | 2323 | #endif |
2322 | | | 2324 | |
2323 | KASSERT(va >= VM_MIN_KERNEL_ADDRESS); | | 2325 | KASSERT(va >= VM_MIN_KERNEL_ADDRESS); |
2324 | | | 2326 | |
2325 | pt_entry_t * const pte = PMAP_KERNEL_PTE(va); | | 2327 | pt_entry_t * const pte = PMAP_KERNEL_PTE(va); |
2326 | | | 2328 | |
2327 | /* Build the new PTE. */ | | 2329 | /* Build the new PTE. */ |
2328 | const pt_entry_t npte = | | 2330 | const pt_entry_t npte = |
2329 | ((pa >> PGSHIFT) << PG_SHIFT) | pte_prot(pmap_kernel(), prot) | | | 2331 | ((pa >> PGSHIFT) << PG_SHIFT) | pte_prot(pmap_kernel(), prot) | |
2330 | PG_V | PG_WIRED; | | 2332 | PG_V | PG_WIRED; |
2331 | | | 2333 | |
2332 | /* Set the new PTE. */ | | 2334 | /* Set the new PTE. */ |
2333 | const pt_entry_t opte = atomic_load_relaxed(pte); | | 2335 | const pt_entry_t opte = atomic_load_relaxed(pte); |
2334 | atomic_store_relaxed(pte, npte); | | 2336 | atomic_store_relaxed(pte, npte); |
2335 | | | 2337 | |
2336 | PMAP_STAT_INCR(pmap->pm_stats.resident_count, 1); | | 2338 | PMAP_STAT_INCR(pmap->pm_stats.resident_count, 1); |
2337 | PMAP_STAT_INCR(pmap->pm_stats.wired_count, 1); | | 2339 | PMAP_STAT_INCR(pmap->pm_stats.wired_count, 1); |
2338 | | | 2340 | |
2339 | /* | | 2341 | /* |
2340 | * There should not have been anything here, previously, | | 2342 | * There should not have been anything here, previously, |
2341 | * so we can skip TLB shootdowns, etc. in the common case. | | 2343 | * so we can skip TLB shootdowns, etc. in the common case. |
2342 | */ | | 2344 | */ |
2343 | if (__predict_false(opte & PG_V)) { | | 2345 | if (__predict_false(opte & PG_V)) { |
2344 | const pt_entry_t diff = npte ^ opte; | | 2346 | const pt_entry_t diff = npte ^ opte; |
2345 | | | 2347 | |
2346 | printf_nolog("%s: mapping already present\n", __func__); | | 2348 | printf_nolog("%s: mapping already present\n", __func__); |
2347 | PMAP_STAT_DECR(pmap->pm_stats.resident_count, 1); | | 2349 | PMAP_STAT_DECR(pmap->pm_stats.resident_count, 1); |
2348 | if (diff & PG_WIRED) | | 2350 | if (diff & PG_WIRED) |
2349 | PMAP_STAT_DECR(pmap->pm_stats.wired_count, 1); | | 2351 | PMAP_STAT_DECR(pmap->pm_stats.wired_count, 1); |
2350 | /* XXX Can't handle this case. */ | | 2352 | /* XXX Can't handle this case. */ |
2351 | if (diff & PG_PVLIST) | | 2353 | if (diff & PG_PVLIST) |
2352 | panic("pmap_kenter_pa: old mapping was managed"); | | 2354 | panic("pmap_kenter_pa: old mapping was managed"); |
2353 | | | 2355 | |
2354 | pmap_enter_tlb_shootdown(pmap_kernel(), va, opte, false); | | 2356 | pmap_enter_tlb_shootdown(pmap_kernel(), va, opte, false); |
2355 | TLB_COUNT(reason_kenter); | | 2357 | TLB_COUNT(reason_kenter); |
2356 | } | | 2358 | } |
2357 | } | | 2359 | } |
2358 | | | 2360 | |
2359 | /* | | 2361 | /* |
2360 | * pmap_kremove: [ INTERFACE ] | | 2362 | * pmap_kremove: [ INTERFACE ] |
2361 | * | | 2363 | * |
2362 | * Remove a mapping entered with pmap_kenter_pa() starting at va, | | 2364 | * Remove a mapping entered with pmap_kenter_pa() starting at va, |
2363 | * for size bytes (assumed to be page rounded). | | 2365 | * for size bytes (assumed to be page rounded). |
2364 | */ | | 2366 | */ |
2365 | void | | 2367 | void |
2366 | pmap_kremove(vaddr_t va, vsize_t size) | | 2368 | pmap_kremove(vaddr_t va, vsize_t size) |
2367 | { | | 2369 | { |
2368 | pt_entry_t *pte, opte; | | 2370 | pt_entry_t *pte, opte; |
2369 | pmap_t const pmap = pmap_kernel(); | | 2371 | pmap_t const pmap = pmap_kernel(); |
2370 | struct pmap_tlb_context tlbctx; | | 2372 | struct pmap_tlb_context tlbctx; |
2371 | int count = 0; | | 2373 | int count = 0; |
2372 | | | 2374 | |
2373 | #ifdef DEBUG | | 2375 | #ifdef DEBUG |
2374 | if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) | | 2376 | if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) |
2375 | printf("pmap_kremove(%lx, %lx)\n", | | 2377 | printf("pmap_kremove(%lx, %lx)\n", |
2376 | va, size); | | 2378 | va, size); |
2377 | #endif | | 2379 | #endif |
2378 | | | 2380 | |
2379 | pmap_tlb_context_init(&tlbctx, 0); | | 2381 | pmap_tlb_context_init(&tlbctx, 0); |
2380 | | | 2382 | |
2381 | KASSERT(va >= VM_MIN_KERNEL_ADDRESS); | | 2383 | KASSERT(va >= VM_MIN_KERNEL_ADDRESS); |
2382 | | | 2384 | |
2383 | for (; size != 0; size -= PAGE_SIZE, va += PAGE_SIZE) { | | 2385 | for (; size != 0; size -= PAGE_SIZE, va += PAGE_SIZE) { |
2384 | pte = PMAP_KERNEL_PTE(va); | | 2386 | pte = PMAP_KERNEL_PTE(va); |
2385 | opte = atomic_load_relaxed(pte); | | 2387 | opte = atomic_load_relaxed(pte); |
2386 | if (opte & PG_V) { | | 2388 | if (opte & PG_V) { |
2387 | KASSERT((opte & PG_PVLIST) == 0); | | 2389 | KASSERT((opte & PG_PVLIST) == 0); |
2388 | | | 2390 | |
2389 | /* Zap the mapping. */ | | 2391 | /* Zap the mapping. */ |
2390 | atomic_store_relaxed(pte, PG_NV); | | 2392 | atomic_store_relaxed(pte, PG_NV); |
2391 | pmap_tlb_shootdown(pmap, va, opte, &tlbctx); | | 2393 | pmap_tlb_shootdown(pmap, va, opte, &tlbctx); |
2392 | | | 2394 | |
2393 | count++; | | 2395 | count++; |
2394 | } | | 2396 | } |
2395 | } | | 2397 | } |
2396 | | | 2398 | |
2397 | /* Update stats. */ | | 2399 | /* Update stats. */ |
2398 | if (__predict_true(count != 0)) { | | 2400 | if (__predict_true(count != 0)) { |
2399 | PMAP_STAT_DECR(pmap->pm_stats.resident_count, count); | | 2401 | PMAP_STAT_DECR(pmap->pm_stats.resident_count, count); |
2400 | PMAP_STAT_DECR(pmap->pm_stats.wired_count, count); | | 2402 | PMAP_STAT_DECR(pmap->pm_stats.wired_count, count); |
2401 | } | | 2403 | } |
2402 | | | 2404 | |
2403 | pmap_tlb_shootnow(&tlbctx); | | 2405 | pmap_tlb_shootnow(&tlbctx); |
2404 | TLB_COUNT(reason_kremove); | | 2406 | TLB_COUNT(reason_kremove); |
2405 | } | | 2407 | } |
2406 | | | 2408 | |
2407 | /* | | 2409 | /* |
2408 | * pmap_unwire: [ INTERFACE ] | | 2410 | * pmap_unwire: [ INTERFACE ] |
2409 | * | | 2411 | * |
2410 | * Clear the wired attribute for a map/virtual-address pair. | | 2412 | * Clear the wired attribute for a map/virtual-address pair. |
2411 | * | | 2413 | * |
2412 | * The mapping must already exist in the pmap. | | 2414 | * The mapping must already exist in the pmap. |
2413 | */ | | 2415 | */ |
2414 | void | | 2416 | void |
2415 | pmap_unwire(pmap_t pmap, vaddr_t va) | | 2417 | pmap_unwire(pmap_t pmap, vaddr_t va) |
2416 | { | | 2418 | { |
2417 | pt_entry_t *pte; | | 2419 | pt_entry_t *pte; |
2418 | | | 2420 | |
2419 | #ifdef DEBUG | | 2421 | #ifdef DEBUG |
2420 | if (pmapdebug & PDB_FOLLOW) | | 2422 | if (pmapdebug & PDB_FOLLOW) |
2421 | printf("pmap_unwire(%p, %lx)\n", pmap, va); | | 2423 | printf("pmap_unwire(%p, %lx)\n", pmap, va); |
2422 | #endif | | 2424 | #endif |
2423 | | | 2425 | |
2424 | PMAP_LOCK(pmap); | | 2426 | PMAP_LOCK(pmap); |
2425 | | | 2427 | |
2426 | pte = pmap_l3pte(pmap_lev1map(pmap), va, NULL); | | 2428 | pte = pmap_l3pte(pmap_lev1map(pmap), va, NULL); |
2427 | | | 2429 | |
2428 | KASSERT(pte != NULL); | | 2430 | KASSERT(pte != NULL); |
2429 | KASSERT(pmap_pte_v(pte)); | | 2431 | KASSERT(pmap_pte_v(pte)); |
2430 | | | 2432 | |
2431 | /* | | 2433 | /* |
2432 | * If wiring actually changed (always?) clear the wire bit and | | 2434 | * If wiring actually changed (always?) clear the wire bit and |
2433 | * update the wire count. Note that wiring is not a hardware | | 2435 | * update the wire count. Note that wiring is not a hardware |
2434 | * characteristic so there is no need to invalidate the TLB. | | 2436 | * characteristic so there is no need to invalidate the TLB. |
2435 | */ | | 2437 | */ |
2436 | if (pmap_pte_w_chg(pte, 0)) { | | 2438 | if (pmap_pte_w_chg(pte, 0)) { |
2437 | pmap_pte_set_w(pte, false); | | 2439 | pmap_pte_set_w(pte, false); |
2438 | PMAP_STAT_DECR(pmap->pm_stats.wired_count, 1); | | 2440 | PMAP_STAT_DECR(pmap->pm_stats.wired_count, 1); |
2439 | } | | 2441 | } |
2440 | #ifdef DEBUG | | 2442 | #ifdef DEBUG |
2441 | else { | | 2443 | else { |
2442 | printf("pmap_unwire: wiring for pmap %p va 0x%lx " | | 2444 | printf("pmap_unwire: wiring for pmap %p va 0x%lx " |
2443 | "didn't change!\n", pmap, va); | | 2445 | "didn't change!\n", pmap, va); |
2444 | } | | 2446 | } |
2445 | #endif | | 2447 | #endif |
2446 | | | 2448 | |
2447 | PMAP_UNLOCK(pmap); | | 2449 | PMAP_UNLOCK(pmap); |
2448 | } | | 2450 | } |
2449 | | | 2451 | |
2450 | /* | | 2452 | /* |
2451 | * pmap_extract: [ INTERFACE ] | | 2453 | * pmap_extract: [ INTERFACE ] |
2452 | * | | 2454 | * |
2453 | * Extract the physical address associated with the given | | 2455 | * Extract the physical address associated with the given |
2454 | * pmap/virtual address pair. | | 2456 | * pmap/virtual address pair. |
2455 | */ | | 2457 | */ |
2456 | bool | | 2458 | bool |
2457 | pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap) | | 2459 | pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap) |
2458 | { | | 2460 | { |
2459 | pt_entry_t *l1pte, *l2pte, *l3pte; | | 2461 | pt_entry_t *l1pte, *l2pte, *l3pte; |
2460 | paddr_t pa; | | 2462 | paddr_t pa; |
2461 | | | 2463 | |
2462 | #ifdef DEBUG | | 2464 | #ifdef DEBUG |
2463 | if (pmapdebug & PDB_FOLLOW) | | 2465 | if (pmapdebug & PDB_FOLLOW) |
2464 | printf("pmap_extract(%p, %lx) -> ", pmap, va); | | 2466 | printf("pmap_extract(%p, %lx) -> ", pmap, va); |
2465 | #endif | | 2467 | #endif |
2466 | | | 2468 | |
2467 | /* | | 2469 | /* |
2468 | * Take a faster path for the kernel pmap. Avoids locking, | | 2470 | * Take a faster path for the kernel pmap. Avoids locking, |
2469 | * handles K0SEG. | | 2471 | * handles K0SEG. |
2470 | */ | | 2472 | */ |
2471 | if (__predict_true(pmap == pmap_kernel())) { | | 2473 | if (__predict_true(pmap == pmap_kernel())) { |
2472 | if (__predict_true(vtophys_internal(va, pap))) { | | 2474 | if (__predict_true(vtophys_internal(va, pap))) { |
2473 | #ifdef DEBUG | | 2475 | #ifdef DEBUG |
2474 | if (pmapdebug & PDB_FOLLOW) | | 2476 | if (pmapdebug & PDB_FOLLOW) |
2475 | printf("0x%lx (kernel vtophys)\n", pa); | | 2477 | printf("0x%lx (kernel vtophys)\n", pa); |
2476 | #endif | | 2478 | #endif |
2477 | return true; | | 2479 | return true; |
2478 | } | | 2480 | } |
2479 | #ifdef DEBUG | | 2481 | #ifdef DEBUG |
2480 | if (pmapdebug & PDB_FOLLOW) | | 2482 | if (pmapdebug & PDB_FOLLOW) |
2481 | printf("failed (kernel vtophys)\n"); | | 2483 | printf("failed (kernel vtophys)\n"); |
2482 | #endif | | 2484 | #endif |
2483 | return false; | | 2485 | return false; |
2484 | } | | 2486 | } |
2485 | | | 2487 | |
2486 | pt_entry_t * const lev1map = pmap_lev1map(pmap); | | 2488 | pt_entry_t * const lev1map = pmap_lev1map(pmap); |
2487 | | | 2489 | |
2488 | PMAP_LOCK(pmap); | | 2490 | PMAP_LOCK(pmap); |
2489 | | | 2491 | |
2490 | l1pte = pmap_l1pte(lev1map, va); | | 2492 | l1pte = pmap_l1pte(lev1map, va); |
2491 | if (pmap_pte_v(l1pte) == 0) | | 2493 | if (pmap_pte_v(l1pte) == 0) |
2492 | goto out; | | 2494 | goto out; |
2493 | | | 2495 | |
2494 | l2pte = pmap_l2pte(lev1map, va, l1pte); | | 2496 | l2pte = pmap_l2pte(lev1map, va, l1pte); |
2495 | if (pmap_pte_v(l2pte) == 0) | | 2497 | if (pmap_pte_v(l2pte) == 0) |
2496 | goto out; | | 2498 | goto out; |
2497 | | | 2499 | |
2498 | l3pte = pmap_l3pte(lev1map, va, l2pte); | | 2500 | l3pte = pmap_l3pte(lev1map, va, l2pte); |
2499 | if (pmap_pte_v(l3pte) == 0) | | 2501 | if (pmap_pte_v(l3pte) == 0) |
2500 | goto out; | | 2502 | goto out; |
2501 | | | 2503 | |
2502 | pa = pmap_pte_pa(l3pte) | (va & PGOFSET); | | 2504 | pa = pmap_pte_pa(l3pte) | (va & PGOFSET); |
2503 | PMAP_UNLOCK(pmap); | | 2505 | PMAP_UNLOCK(pmap); |
2504 | if (pap != NULL) | | 2506 | if (pap != NULL) |
2505 | *pap = pa; | | 2507 | *pap = pa; |
2506 | #ifdef DEBUG | | 2508 | #ifdef DEBUG |
2507 | if (pmapdebug & PDB_FOLLOW) | | 2509 | if (pmapdebug & PDB_FOLLOW) |
2508 | printf("0x%lx\n", pa); | | 2510 | printf("0x%lx\n", pa); |
2509 | #endif | | 2511 | #endif |
2510 | return (true); | | 2512 | return (true); |
2511 | | | 2513 | |
2512 | out: | | 2514 | out: |
2513 | PMAP_UNLOCK(pmap); | | 2515 | PMAP_UNLOCK(pmap); |
2514 | #ifdef DEBUG | | 2516 | #ifdef DEBUG |
2515 | if (pmapdebug & PDB_FOLLOW) | | 2517 | if (pmapdebug & PDB_FOLLOW) |
2516 | printf("failed\n"); | | 2518 | printf("failed\n"); |
2517 | #endif | | 2519 | #endif |
2518 | return (false); | | 2520 | return (false); |
2519 | } | | 2521 | } |
2520 | | | 2522 | |
2521 | /* | | 2523 | /* |
2522 | * pmap_copy: [ INTERFACE ] | | 2524 | * pmap_copy: [ INTERFACE ] |
2523 | * | | 2525 | * |
2524 | * Copy the mapping range specified by src_addr/len | | 2526 | * Copy the mapping range specified by src_addr/len |
2525 | * from the source map to the range dst_addr/len | | 2527 | * from the source map to the range dst_addr/len |
2526 | * in the destination map. | | 2528 | * in the destination map. |
2527 | * | | 2529 | * |
2528 | * This routine is only advisory and need not do anything. | | 2530 | * This routine is only advisory and need not do anything. |
2529 | */ | | 2531 | */ |
2530 | /* call deleted in <machine/pmap.h> */ | | 2532 | /* call deleted in <machine/pmap.h> */ |
2531 | | | 2533 | |
2532 | /* | | 2534 | /* |
2533 | * pmap_update: [ INTERFACE ] | | 2535 | * pmap_update: [ INTERFACE ] |
2534 | * | | 2536 | * |
2535 | * Require that all active physical maps contain no | | 2537 | * Require that all active physical maps contain no |
2536 | * incorrect entries NOW, by processing any deferred | | 2538 | * incorrect entries NOW, by processing any deferred |
2537 | * pmap operations. | | 2539 | * pmap operations. |
2538 | */ | | 2540 | */ |
2539 | /* call deleted in <machine/pmap.h> */ | | 2541 | /* call deleted in <machine/pmap.h> */ |
2540 | | | 2542 | |
2541 | /* | | 2543 | /* |
2542 | * pmap_activate: [ INTERFACE ] | | 2544 | * pmap_activate: [ INTERFACE ] |
2543 | * | | 2545 | * |
2544 | * Activate the pmap used by the specified process. This includes | | 2546 | * Activate the pmap used by the specified process. This includes |
2545 | * reloading the MMU context of the current process, and marking | | 2547 | * reloading the MMU context of the current process, and marking |
2546 | * the pmap in use by the processor. | | 2548 | * the pmap in use by the processor. |
2547 | */ | | 2549 | */ |
2548 | void | | 2550 | void |
2549 | pmap_activate(struct lwp *l) | | 2551 | pmap_activate(struct lwp *l) |
2550 | { | | 2552 | { |
2551 | struct pmap * const pmap = l->l_proc->p_vmspace->vm_map.pmap; | | 2553 | struct pmap * const pmap = l->l_proc->p_vmspace->vm_map.pmap; |
2552 | struct pcb * const pcb = lwp_getpcb(l); | | 2554 | struct pcb * const pcb = lwp_getpcb(l); |
2553 | | | 2555 | |
2554 | #ifdef DEBUG | | 2556 | #ifdef DEBUG |
2555 | if (pmapdebug & PDB_FOLLOW) | | 2557 | if (pmapdebug & PDB_FOLLOW) |
2556 | printf("pmap_activate(%p)\n", l); | | 2558 | printf("pmap_activate(%p)\n", l); |
2557 | #endif | | 2559 | #endif |
2558 | | | 2560 | |
2559 | KASSERT(kpreempt_disabled()); | | 2561 | KASSERT(kpreempt_disabled()); |
2560 | | | 2562 | |
2561 | struct cpu_info * const ci = curcpu(); | | 2563 | struct cpu_info * const ci = curcpu(); |
2562 | | | 2564 | |
2563 | KASSERT(l == ci->ci_curlwp); | | 2565 | KASSERT(l == ci->ci_curlwp); |
2564 | | | 2566 | |
2565 | u_long const old_ptbr = pcb->pcb_hw.apcb_ptbr; | | 2567 | u_long const old_ptbr = pcb->pcb_hw.apcb_ptbr; |
2566 | u_int const old_asn = pcb->pcb_hw.apcb_asn; | | 2568 | u_int const old_asn = pcb->pcb_hw.apcb_asn; |
2567 | | | 2569 | |
2568 | /* | | 2570 | /* |
2569 | * We hold the activation lock to synchronize with TLB shootdown. | | 2571 | * We hold the activation lock to synchronize with TLB shootdown. |
2570 | * The kernel pmap does not require those tests because shootdowns | | 2572 | * The kernel pmap does not require those tests because shootdowns |
2571 | * for the kernel pmap are always sent to all CPUs. | | 2573 | * for the kernel pmap are always sent to all CPUs. |
2572 | */ | | 2574 | */ |
2573 | if (pmap != pmap_kernel()) { | | 2575 | if (pmap != pmap_kernel()) { |
2574 | PMAP_ACT_LOCK(pmap); | | 2576 | PMAP_ACT_LOCK(pmap); |
2575 | pcb->pcb_hw.apcb_asn = pmap_asn_alloc(pmap, ci); | | 2577 | pcb->pcb_hw.apcb_asn = pmap_asn_alloc(pmap, ci); |
2576 | atomic_or_ulong(&pmap->pm_cpus, (1UL << ci->ci_cpuid)); | | 2578 | atomic_or_ulong(&pmap->pm_cpus, (1UL << ci->ci_cpuid)); |
2577 | } else { | | 2579 | } else { |
2578 | pcb->pcb_hw.apcb_asn = PMAP_ASN_KERNEL; | | 2580 | pcb->pcb_hw.apcb_asn = PMAP_ASN_KERNEL; |
2579 | } | | 2581 | } |
2580 | pcb->pcb_hw.apcb_ptbr = | | 2582 | pcb->pcb_hw.apcb_ptbr = |
2581 | ALPHA_K0SEG_TO_PHYS((vaddr_t)pmap_lev1map(pmap)) >> PGSHIFT; | | 2583 | ALPHA_K0SEG_TO_PHYS((vaddr_t)pmap_lev1map(pmap)) >> PGSHIFT; |
2582 | | | 2584 | |
2583 | /* | | 2585 | /* |
2584 | * Check to see if the ASN or page table base has changed; if | | 2586 | * Check to see if the ASN or page table base has changed; if |
2585 | * so, switch to our own context again so that it will take | | 2587 | * so, switch to our own context again so that it will take |
2586 | * effect. | | 2588 | * effect. |
2587 | * | | 2589 | * |
2588 | * We test ASN first because it's the most likely value to change. | | 2590 | * We test ASN first because it's the most likely value to change. |
2589 | */ | | 2591 | */ |
2590 | if (old_asn != pcb->pcb_hw.apcb_asn || | | 2592 | if (old_asn != pcb->pcb_hw.apcb_asn || |
2591 | old_ptbr != pcb->pcb_hw.apcb_ptbr) { | | 2593 | old_ptbr != pcb->pcb_hw.apcb_ptbr) { |
2592 | if (old_asn != pcb->pcb_hw.apcb_asn && | | 2594 | if (old_asn != pcb->pcb_hw.apcb_asn && |
2593 | old_ptbr != pcb->pcb_hw.apcb_ptbr) { | | 2595 | old_ptbr != pcb->pcb_hw.apcb_ptbr) { |
2594 | TLB_COUNT(activate_both_change); | | 2596 | TLB_COUNT(activate_both_change); |
2595 | } else if (old_asn != pcb->pcb_hw.apcb_asn) { | | 2597 | } else if (old_asn != pcb->pcb_hw.apcb_asn) { |
2596 | TLB_COUNT(activate_asn_change); | | 2598 | TLB_COUNT(activate_asn_change); |
2597 | } else { | | 2599 | } else { |
2598 | TLB_COUNT(activate_ptbr_change); | | 2600 | TLB_COUNT(activate_ptbr_change); |
2599 | } | | 2601 | } |
2600 | (void) alpha_pal_swpctx((u_long)l->l_md.md_pcbpaddr); | | 2602 | (void) alpha_pal_swpctx((u_long)l->l_md.md_pcbpaddr); |
2601 | TLB_COUNT(activate_swpctx); | | 2603 | TLB_COUNT(activate_swpctx); |
2602 | } else { | | 2604 | } else { |
2603 | TLB_COUNT(activate_skip_swpctx); | | 2605 | TLB_COUNT(activate_skip_swpctx); |
2604 | } | | 2606 | } |
2605 | | | 2607 | |
2606 | pmap_reference(pmap); | | 2608 | pmap_reference(pmap); |
2607 | ci->ci_pmap = pmap; | | 2609 | ci->ci_pmap = pmap; |
2608 | | | 2610 | |
2609 | if (pmap != pmap_kernel()) { | | 2611 | if (pmap != pmap_kernel()) { |
2610 | PMAP_ACT_UNLOCK(pmap); | | 2612 | PMAP_ACT_UNLOCK(pmap); |
2611 | } | | 2613 | } |
2612 | } | | 2614 | } |
2613 | | | 2615 | |
2614 | /* | | 2616 | /* |
2615 | * pmap_deactivate: [ INTERFACE ] | | 2617 | * pmap_deactivate: [ INTERFACE ] |
2616 | * | | 2618 | * |
2617 | * Mark that the pmap used by the specified process is no longer | | 2619 | * Mark that the pmap used by the specified process is no longer |
2618 | * in use by the processor. | | 2620 | * in use by the processor. |
2619 | */ | | 2621 | */ |
2620 | void | | 2622 | void |
2621 | pmap_deactivate(struct lwp *l) | | 2623 | pmap_deactivate(struct lwp *l) |
2622 | { | | 2624 | { |
2623 | struct pmap * const pmap = l->l_proc->p_vmspace->vm_map.pmap; | | 2625 | struct pmap * const pmap = l->l_proc->p_vmspace->vm_map.pmap; |
2624 | | | 2626 | |
2625 | #ifdef DEBUG | | 2627 | #ifdef DEBUG |
2626 | if (pmapdebug & PDB_FOLLOW) | | 2628 | if (pmapdebug & PDB_FOLLOW) |
2627 | printf("pmap_deactivate(%p)\n", l); | | 2629 | printf("pmap_deactivate(%p)\n", l); |
2628 | #endif | | 2630 | #endif |
2629 | | | 2631 | |
2630 | KASSERT(kpreempt_disabled()); | | 2632 | KASSERT(kpreempt_disabled()); |
2631 | | | 2633 | |
2632 | struct cpu_info * const ci = curcpu(); | | 2634 | struct cpu_info * const ci = curcpu(); |
2633 | | | 2635 | |
2634 | KASSERT(l == ci->ci_curlwp); | | 2636 | KASSERT(l == ci->ci_curlwp); |
2635 | KASSERT(pmap == ci->ci_pmap); | | 2637 | KASSERT(pmap == ci->ci_pmap); |
2636 | | | 2638 | |
2637 | /* | | 2639 | /* |
2638 | * There is no need to switch to a different PTBR here, | | 2640 | * There is no need to switch to a different PTBR here, |
2639 | * because a pmap_activate() or SWPCTX is guaranteed | | 2641 | * because a pmap_activate() or SWPCTX is guaranteed |
2640 | * before whatever lev1map we're on now is invalidated | | 2642 | * before whatever lev1map we're on now is invalidated |
2641 | * or before user space is accessed again. | | 2643 | * or before user space is accessed again. |
2642 | * | | 2644 | * |
2643 | * Because only kernel mappings will be accessed before the | | 2645 | * Because only kernel mappings will be accessed before the |
2644 | * next pmap_activate() call, we consider our CPU to be on | | 2646 | * next pmap_activate() call, we consider our CPU to be on |
2645 | * the kernel pmap. | | 2647 | * the kernel pmap. |
2646 | */ | | 2648 | */ |
2647 | ci->ci_pmap = pmap_kernel(); | | 2649 | ci->ci_pmap = pmap_kernel(); |
2648 | KASSERT(atomic_load_relaxed(&pmap->pm_count) > 1); | | 2650 | KASSERT(atomic_load_relaxed(&pmap->pm_count) > 1); |
2649 | pmap_destroy(pmap); | | 2651 | pmap_destroy(pmap); |
2650 | } | | 2652 | } |
2651 | | | 2653 | |
2652 | /* | | 2654 | /* |
2653 | * pmap_zero_page: [ INTERFACE ] | | 2655 | * pmap_zero_page: [ INTERFACE ] |
2654 | * | | 2656 | * |
2655 | * Zero the specified (machine independent) page by mapping the page | | 2657 | * Zero the specified (machine independent) page by mapping the page |
2656 | * into virtual memory and clear its contents, one machine dependent | | 2658 | * into virtual memory and clear its contents, one machine dependent |
2657 | * page at a time. | | 2659 | * page at a time. |
2658 | * | | 2660 | * |
2659 | * Note: no locking is necessary in this function. | | 2661 | * Note: no locking is necessary in this function. |
2660 | */ | | 2662 | */ |
2661 | void | | 2663 | void |
2662 | pmap_zero_page(paddr_t phys) | | 2664 | pmap_zero_page(paddr_t phys) |
2663 | { | | 2665 | { |
2664 | u_long *p0, *p1, *pend; | | 2666 | u_long *p0, *p1, *pend; |
2665 | | | 2667 | |
2666 | #ifdef DEBUG | | 2668 | #ifdef DEBUG |
2667 | if (pmapdebug & PDB_FOLLOW) | | 2669 | if (pmapdebug & PDB_FOLLOW) |
2668 | printf("pmap_zero_page(%lx)\n", phys); | | 2670 | printf("pmap_zero_page(%lx)\n", phys); |
2669 | #endif | | 2671 | #endif |
2670 | | | 2672 | |
2671 | p0 = (u_long *)ALPHA_PHYS_TO_K0SEG(phys); | | 2673 | p0 = (u_long *)ALPHA_PHYS_TO_K0SEG(phys); |
2672 | p1 = NULL; | | 2674 | p1 = NULL; |
2673 | pend = (u_long *)((u_long)p0 + PAGE_SIZE); | | 2675 | pend = (u_long *)((u_long)p0 + PAGE_SIZE); |
2674 | | | 2676 | |
2675 | /* | | 2677 | /* |
2676 | * Unroll the loop a bit, doing 16 quadwords per iteration. | | 2678 | * Unroll the loop a bit, doing 16 quadwords per iteration. |
2677 | * Do only 8 back-to-back stores, and alternate registers. | | 2679 | * Do only 8 back-to-back stores, and alternate registers. |
2678 | */ | | 2680 | */ |
2679 | do { | | 2681 | do { |
2680 | __asm volatile( | | 2682 | __asm volatile( |
2681 | "# BEGIN loop body\n" | | 2683 | "# BEGIN loop body\n" |
2682 | " addq %2, (8 * 8), %1 \n" | | 2684 | " addq %2, (8 * 8), %1 \n" |
2683 | " stq $31, (0 * 8)(%0) \n" | | 2685 | " stq $31, (0 * 8)(%0) \n" |
2684 | " stq $31, (1 * 8)(%0) \n" | | 2686 | " stq $31, (1 * 8)(%0) \n" |
2685 | " stq $31, (2 * 8)(%0) \n" | | 2687 | " stq $31, (2 * 8)(%0) \n" |
2686 | " stq $31, (3 * 8)(%0) \n" | | 2688 | " stq $31, (3 * 8)(%0) \n" |
2687 | " stq $31, (4 * 8)(%0) \n" | | 2689 | " stq $31, (4 * 8)(%0) \n" |
2688 | " stq $31, (5 * 8)(%0) \n" | | 2690 | " stq $31, (5 * 8)(%0) \n" |
2689 | " stq $31, (6 * 8)(%0) \n" | | 2691 | " stq $31, (6 * 8)(%0) \n" |
2690 | " stq $31, (7 * 8)(%0) \n" | | 2692 | " stq $31, (7 * 8)(%0) \n" |
2691 | " \n" | | 2693 | " \n" |
2692 | " addq %3, (8 * 8), %0 \n" | | 2694 | " addq %3, (8 * 8), %0 \n" |
2693 | " stq $31, (0 * 8)(%1) \n" | | 2695 | " stq $31, (0 * 8)(%1) \n" |
2694 | " stq $31, (1 * 8)(%1) \n" | | 2696 | " stq $31, (1 * 8)(%1) \n" |
2695 | " stq $31, (2 * 8)(%1) \n" | | 2697 | " stq $31, (2 * 8)(%1) \n" |
2696 | " stq $31, (3 * 8)(%1) \n" | | 2698 | " stq $31, (3 * 8)(%1) \n" |
2697 | " stq $31, (4 * 8)(%1) \n" | | 2699 | " stq $31, (4 * 8)(%1) \n" |
2698 | " stq $31, (5 * 8)(%1) \n" | | 2700 | " stq $31, (5 * 8)(%1) \n" |
2699 | " stq $31, (6 * 8)(%1) \n" | | 2701 | " stq $31, (6 * 8)(%1) \n" |
2700 | " stq $31, (7 * 8)(%1) \n" | | 2702 | " stq $31, (7 * 8)(%1) \n" |
2701 | " # END loop body" | | 2703 | " # END loop body" |
2702 | : "=r" (p0), "=r" (p1) | | 2704 | : "=r" (p0), "=r" (p1) |
2703 | : "0" (p0), "1" (p1) | | 2705 | : "0" (p0), "1" (p1) |
2704 | : "memory"); | | 2706 | : "memory"); |
2705 | } while (p0 < pend); | | 2707 | } while (p0 < pend); |
2706 | } | | 2708 | } |
2707 | | | 2709 | |
2708 | /* | | 2710 | /* |
2709 | * pmap_copy_page: [ INTERFACE ] | | 2711 | * pmap_copy_page: [ INTERFACE ] |
2710 | * | | 2712 | * |
2711 | * Copy the specified (machine independent) page by mapping the page | | 2713 | * Copy the specified (machine independent) page by mapping the page |
2712 | * into virtual memory and using memcpy to copy the page, one machine | | 2714 | * into virtual memory and using memcpy to copy the page, one machine |
2713 | * dependent page at a time. | | 2715 | * dependent page at a time. |
2714 | * | | 2716 | * |
2715 | * Note: no locking is necessary in this function. | | 2717 | * Note: no locking is necessary in this function. |
2716 | */ | | 2718 | */ |
2717 | void | | 2719 | void |
2718 | pmap_copy_page(paddr_t src, paddr_t dst) | | 2720 | pmap_copy_page(paddr_t src, paddr_t dst) |
2719 | { | | 2721 | { |
2720 | const void *s; | | 2722 | const void *s; |
2721 | void *d; | | 2723 | void *d; |
2722 | | | 2724 | |
2723 | #ifdef DEBUG | | 2725 | #ifdef DEBUG |
2724 | if (pmapdebug & PDB_FOLLOW) | | 2726 | if (pmapdebug & PDB_FOLLOW) |
2725 | printf("pmap_copy_page(%lx, %lx)\n", src, dst); | | 2727 | printf("pmap_copy_page(%lx, %lx)\n", src, dst); |
2726 | #endif | | 2728 | #endif |
2727 | s = (const void *)ALPHA_PHYS_TO_K0SEG(src); | | 2729 | s = (const void *)ALPHA_PHYS_TO_K0SEG(src); |
2728 | d = (void *)ALPHA_PHYS_TO_K0SEG(dst); | | 2730 | d = (void *)ALPHA_PHYS_TO_K0SEG(dst); |
2729 | memcpy(d, s, PAGE_SIZE); | | 2731 | memcpy(d, s, PAGE_SIZE); |
2730 | } | | 2732 | } |
2731 | | | 2733 | |
2732 | /* | | 2734 | /* |
2733 | * pmap_pageidlezero: [ INTERFACE ] | | 2735 | * pmap_pageidlezero: [ INTERFACE ] |
2734 | * | | 2736 | * |
2735 | * Page zero'er for the idle loop. Returns true if the | | 2737 | * Page zero'er for the idle loop. Returns true if the |
2736 | * page was zero'd, FALSE if we aborted for some reason. | | 2738 | * page was zero'd, FALSE if we aborted for some reason. |
2737 | */ | | 2739 | */ |
2738 | bool | | 2740 | bool |
2739 | pmap_pageidlezero(paddr_t pa) | | 2741 | pmap_pageidlezero(paddr_t pa) |
2740 | { | | 2742 | { |
2741 | u_long *ptr; | | 2743 | u_long *ptr; |
2742 | int i, cnt = PAGE_SIZE / sizeof(u_long); | | 2744 | int i, cnt = PAGE_SIZE / sizeof(u_long); |
2743 | | | 2745 | |
2744 | for (i = 0, ptr = (u_long *) ALPHA_PHYS_TO_K0SEG(pa); i < cnt; i++) { | | 2746 | for (i = 0, ptr = (u_long *) ALPHA_PHYS_TO_K0SEG(pa); i < cnt; i++) { |
2745 | if (sched_curcpu_runnable_p()) { | | 2747 | if (sched_curcpu_runnable_p()) { |
2746 | /* | | 2748 | /* |
2747 | * An LWP has become ready. Abort now, | | 2749 | * An LWP has become ready. Abort now, |
2748 | * so we don't keep it waiting while we | | 2750 | * so we don't keep it waiting while we |
2749 | * finish zeroing the page. | | 2751 | * finish zeroing the page. |
2750 | */ | | 2752 | */ |
2751 | return (false); | | 2753 | return (false); |
2752 | } | | 2754 | } |
2753 | *ptr++ = 0; | | 2755 | *ptr++ = 0; |
2754 | } | | 2756 | } |
2755 | | | 2757 | |
2756 | return (true); | | 2758 | return (true); |
2757 | } | | 2759 | } |
2758 | | | 2760 | |
2759 | /* | | 2761 | /* |
2760 | * pmap_clear_modify: [ INTERFACE ] | | 2762 | * pmap_clear_modify: [ INTERFACE ] |
2761 | * | | 2763 | * |
2762 | * Clear the modify bits on the specified physical page. | | 2764 | * Clear the modify bits on the specified physical page. |
2763 | */ | | 2765 | */ |
2764 | bool | | 2766 | bool |
2765 | pmap_clear_modify(struct vm_page *pg) | | 2767 | pmap_clear_modify(struct vm_page *pg) |
2766 | { | | 2768 | { |
2767 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); | | 2769 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); |
2768 | bool rv = false; | | 2770 | bool rv = false; |
2769 | kmutex_t *lock; | | 2771 | kmutex_t *lock; |
2770 | struct pmap_tlb_context tlbctx; | | 2772 | struct pmap_tlb_context tlbctx; |
2771 | | | 2773 | |
2772 | #ifdef DEBUG | | 2774 | #ifdef DEBUG |
2773 | if (pmapdebug & PDB_FOLLOW) | | 2775 | if (pmapdebug & PDB_FOLLOW) |
2774 | printf("pmap_clear_modify(%p)\n", pg); | | 2776 | printf("pmap_clear_modify(%p)\n", pg); |
2775 | #endif | | 2777 | #endif |
2776 | | | 2778 | |
2777 | pmap_tlb_context_init(&tlbctx, TLB_CTX_F_PV); | | 2779 | pmap_tlb_context_init(&tlbctx, TLB_CTX_F_PV); |
2778 | | | 2780 | |
2779 | PMAP_HEAD_TO_MAP_LOCK(); | | 2781 | PMAP_HEAD_TO_MAP_LOCK(); |
2780 | lock = pmap_pvh_lock(pg); | | 2782 | lock = pmap_pvh_lock(pg); |
2781 | mutex_enter(lock); | | 2783 | mutex_enter(lock); |
2782 | | | 2784 | |
2783 | if (md->pvh_listx & PGA_MODIFIED) { | | 2785 | if (md->pvh_listx & PGA_MODIFIED) { |
2784 | rv = true; | | 2786 | rv = true; |
2785 | pmap_changebit(pg, PG_FOW, ~0UL, &tlbctx); | | 2787 | pmap_changebit(pg, PG_FOW, ~0UL, &tlbctx); |
2786 | md->pvh_listx &= ~PGA_MODIFIED; | | 2788 | md->pvh_listx &= ~PGA_MODIFIED; |
2787 | } | | 2789 | } |
2788 | | | 2790 | |
2789 | mutex_exit(lock); | | 2791 | mutex_exit(lock); |
2790 | PMAP_HEAD_TO_MAP_UNLOCK(); | | 2792 | PMAP_HEAD_TO_MAP_UNLOCK(); |
2791 | | | 2793 | |
2792 | pmap_tlb_shootnow(&tlbctx); | | 2794 | pmap_tlb_shootnow(&tlbctx); |
2793 | TLB_COUNT(reason_clear_modify); | | 2795 | TLB_COUNT(reason_clear_modify); |
2794 | | | 2796 | |
2795 | return (rv); | | 2797 | return (rv); |
2796 | } | | 2798 | } |
2797 | | | 2799 | |
2798 | /* | | 2800 | /* |
2799 | * pmap_clear_reference: [ INTERFACE ] | | 2801 | * pmap_clear_reference: [ INTERFACE ] |
2800 | * | | 2802 | * |
2801 | * Clear the reference bit on the specified physical page. | | 2803 | * Clear the reference bit on the specified physical page. |
2802 | */ | | 2804 | */ |
2803 | bool | | 2805 | bool |
2804 | pmap_clear_reference(struct vm_page *pg) | | 2806 | pmap_clear_reference(struct vm_page *pg) |
2805 | { | | 2807 | { |
2806 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); | | 2808 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); |
2807 | bool rv = false; | | 2809 | bool rv = false; |
2808 | kmutex_t *lock; | | 2810 | kmutex_t *lock; |
2809 | struct pmap_tlb_context tlbctx; | | 2811 | struct pmap_tlb_context tlbctx; |
2810 | | | 2812 | |
2811 | #ifdef DEBUG | | 2813 | #ifdef DEBUG |
2812 | if (pmapdebug & PDB_FOLLOW) | | 2814 | if (pmapdebug & PDB_FOLLOW) |
2813 | printf("pmap_clear_reference(%p)\n", pg); | | 2815 | printf("pmap_clear_reference(%p)\n", pg); |
2814 | #endif | | 2816 | #endif |
2815 | | | 2817 | |
2816 | pmap_tlb_context_init(&tlbctx, TLB_CTX_F_PV); | | 2818 | pmap_tlb_context_init(&tlbctx, TLB_CTX_F_PV); |
2817 | | | 2819 | |
2818 | PMAP_HEAD_TO_MAP_LOCK(); | | 2820 | PMAP_HEAD_TO_MAP_LOCK(); |
2819 | lock = pmap_pvh_lock(pg); | | 2821 | lock = pmap_pvh_lock(pg); |
2820 | mutex_enter(lock); | | 2822 | mutex_enter(lock); |
2821 | | | 2823 | |
2822 | if (md->pvh_listx & PGA_REFERENCED) { | | 2824 | if (md->pvh_listx & PGA_REFERENCED) { |
2823 | rv = true; | | 2825 | rv = true; |
2824 | pmap_changebit(pg, PG_FOR | PG_FOW | PG_FOE, ~0UL, &tlbctx); | | 2826 | pmap_changebit(pg, PG_FOR | PG_FOW | PG_FOE, ~0UL, &tlbctx); |
2825 | md->pvh_listx &= ~PGA_REFERENCED; | | 2827 | md->pvh_listx &= ~PGA_REFERENCED; |
2826 | } | | 2828 | } |
2827 | | | 2829 | |
2828 | mutex_exit(lock); | | 2830 | mutex_exit(lock); |
2829 | PMAP_HEAD_TO_MAP_UNLOCK(); | | 2831 | PMAP_HEAD_TO_MAP_UNLOCK(); |
2830 | | | 2832 | |
2831 | pmap_tlb_shootnow(&tlbctx); | | 2833 | pmap_tlb_shootnow(&tlbctx); |
2832 | TLB_COUNT(reason_clear_reference); | | 2834 | TLB_COUNT(reason_clear_reference); |
2833 | | | 2835 | |
2834 | return (rv); | | 2836 | return (rv); |
2835 | } | | 2837 | } |
2836 | | | 2838 | |
2837 | /* | | 2839 | /* |
2838 | * pmap_is_referenced: [ INTERFACE ] | | 2840 | * pmap_is_referenced: [ INTERFACE ] |
2839 | * | | 2841 | * |
2840 | * Return whether or not the specified physical page is referenced | | 2842 | * Return whether or not the specified physical page is referenced |
2841 | * by any physical maps. | | 2843 | * by any physical maps. |
2842 | */ | | 2844 | */ |
2843 | /* See <machine/pmap.h> */ | | 2845 | /* See <machine/pmap.h> */ |
2844 | | | 2846 | |
2845 | /* | | 2847 | /* |
2846 | * pmap_is_modified: [ INTERFACE ] | | 2848 | * pmap_is_modified: [ INTERFACE ] |
2847 | * | | 2849 | * |
2848 | * Return whether or not the specified physical page is modified | | 2850 | * Return whether or not the specified physical page is modified |
2849 | * by any physical maps. | | 2851 | * by any physical maps. |
2850 | */ | | 2852 | */ |
2851 | /* See <machine/pmap.h> */ | | 2853 | /* See <machine/pmap.h> */ |
2852 | | | 2854 | |
2853 | /* | | 2855 | /* |
2854 | * pmap_phys_address: [ INTERFACE ] | | 2856 | * pmap_phys_address: [ INTERFACE ] |
2855 | * | | 2857 | * |
2856 | * Return the physical address corresponding to the specified | | 2858 | * Return the physical address corresponding to the specified |
2857 | * cookie. Used by the device pager to decode a device driver's | | 2859 | * cookie. Used by the device pager to decode a device driver's |
2858 | * mmap entry point return value. | | 2860 | * mmap entry point return value. |
2859 | * | | 2861 | * |
2860 | * Note: no locking is necessary in this function. | | 2862 | * Note: no locking is necessary in this function. |
2861 | */ | | 2863 | */ |
2862 | paddr_t | | 2864 | paddr_t |
2863 | pmap_phys_address(paddr_t ppn) | | 2865 | pmap_phys_address(paddr_t ppn) |
2864 | { | | 2866 | { |
2865 | | | 2867 | |
2866 | return (alpha_ptob(ppn)); | | 2868 | return (alpha_ptob(ppn)); |
2867 | } | | 2869 | } |
2868 | | | 2870 | |
2869 | /* | | 2871 | /* |
2870 | * Miscellaneous support routines follow | | 2872 | * Miscellaneous support routines follow |
2871 | */ | | 2873 | */ |
2872 | | | 2874 | |
2873 | /* | | 2875 | /* |
2874 | * alpha_protection_init: | | 2876 | * alpha_protection_init: |
2875 | * | | 2877 | * |
2876 | * Initialize Alpha protection code array. | | 2878 | * Initialize Alpha protection code array. |
2877 | * | | 2879 | * |
2878 | * Note: no locking is necessary in this function. | | 2880 | * Note: no locking is necessary in this function. |
2879 | */ | | 2881 | */ |
2880 | static void | | 2882 | static void |
2881 | alpha_protection_init(void) | | 2883 | alpha_protection_init(void) |
2882 | { | | 2884 | { |
2883 | int prot, *kp, *up; | | 2885 | int prot, *kp, *up; |
2884 | | | 2886 | |
2885 | kp = protection_codes[0]; | | 2887 | kp = protection_codes[0]; |
2886 | up = protection_codes[1]; | | 2888 | up = protection_codes[1]; |
2887 | | | 2889 | |
2888 | for (prot = 0; prot < 8; prot++) { | | 2890 | for (prot = 0; prot < 8; prot++) { |
2889 | kp[prot] = PG_ASM; | | 2891 | kp[prot] = PG_ASM; |
2890 | up[prot] = 0; | | 2892 | up[prot] = 0; |
2891 | | | 2893 | |
2892 | if (prot & VM_PROT_READ) { | | 2894 | if (prot & VM_PROT_READ) { |
2893 | kp[prot] |= PG_KRE; | | 2895 | kp[prot] |= PG_KRE; |
2894 | up[prot] |= PG_KRE | PG_URE; | | 2896 | up[prot] |= PG_KRE | PG_URE; |
2895 | } | | 2897 | } |
2896 | if (prot & VM_PROT_WRITE) { | | 2898 | if (prot & VM_PROT_WRITE) { |
2897 | kp[prot] |= PG_KWE; | | 2899 | kp[prot] |= PG_KWE; |
2898 | up[prot] |= PG_KWE | PG_UWE; | | 2900 | up[prot] |= PG_KWE | PG_UWE; |
2899 | } | | 2901 | } |
2900 | if (prot & VM_PROT_EXECUTE) { | | 2902 | if (prot & VM_PROT_EXECUTE) { |
2901 | kp[prot] |= PG_EXEC | PG_KRE; | | 2903 | kp[prot] |= PG_EXEC | PG_KRE; |
2902 | up[prot] |= PG_EXEC | PG_KRE | PG_URE; | | 2904 | up[prot] |= PG_EXEC | PG_KRE | PG_URE; |
2903 | } else { | | 2905 | } else { |
2904 | kp[prot] |= PG_FOE; | | 2906 | kp[prot] |= PG_FOE; |
2905 | up[prot] |= PG_FOE; | | 2907 | up[prot] |= PG_FOE; |
2906 | } | | 2908 | } |
2907 | } | | 2909 | } |
2908 | } | | 2910 | } |
2909 | | | 2911 | |
2910 | /* | | 2912 | /* |
2911 | * pmap_remove_mapping: | | 2913 | * pmap_remove_mapping: |
2912 | * | | 2914 | * |
2913 | * Invalidate a single page denoted by pmap/va. | | 2915 | * Invalidate a single page denoted by pmap/va. |
2914 | * | | 2916 | * |
2915 | * If (pte != NULL), it is the already computed PTE for the page. | | 2917 | * If (pte != NULL), it is the already computed PTE for the page. |
2916 | * | | 2918 | * |
2917 | * Note: locking in this function is complicated by the fact | | 2919 | * Note: locking in this function is complicated by the fact |
2918 | * that we can be called when the PV list is already locked. | | 2920 | * that we can be called when the PV list is already locked. |
2919 | * (pmap_page_protect()). In this case, the caller must be | | 2921 | * (pmap_page_protect()). In this case, the caller must be |
2920 | * careful to get the next PV entry while we remove this entry | | 2922 | * careful to get the next PV entry while we remove this entry |
2921 | * from beneath it. We assume that the pmap itself is already | | 2923 | * from beneath it. We assume that the pmap itself is already |
2922 | * locked; dolock applies only to the PV list. | | 2924 | * locked; dolock applies only to the PV list. |
2923 | * | | 2925 | * |
2924 | * Returns important PTE bits that the caller needs to check for | | 2926 | * Returns important PTE bits that the caller needs to check for |
2925 | * TLB / I-stream invalidation purposes. | | 2927 | * TLB / I-stream invalidation purposes. |
2926 | */ | | 2928 | */ |
2927 | static pt_entry_t | | 2929 | static pt_entry_t |
2928 | pmap_remove_mapping(pmap_t pmap, vaddr_t va, pt_entry_t *pte, | | 2930 | pmap_remove_mapping(pmap_t pmap, vaddr_t va, pt_entry_t *pte, |
2929 | bool dolock, pv_entry_t *opvp, struct pmap_tlb_context * const tlbctx) | | 2931 | bool dolock, pv_entry_t *opvp, struct pmap_tlb_context * const tlbctx) |
2930 | { | | 2932 | { |
2931 | pt_entry_t opte; | | 2933 | pt_entry_t opte; |
2932 | paddr_t pa; | | 2934 | paddr_t pa; |
2933 | struct vm_page *pg; /* if != NULL, page is managed */ | | 2935 | struct vm_page *pg; /* if != NULL, page is managed */ |
2934 | | | 2936 | |
2935 | #ifdef DEBUG | | 2937 | #ifdef DEBUG |
2936 | if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) | | 2938 | if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) |
2937 | printf("pmap_remove_mapping(%p, %lx, %p, %d, %p)\n", | | 2939 | printf("pmap_remove_mapping(%p, %lx, %p, %d, %p)\n", |
2938 | pmap, va, pte, dolock, opvp); | | 2940 | pmap, va, pte, dolock, opvp); |
2939 | #endif | | 2941 | #endif |
2940 | | | 2942 | |
2941 | /* | | 2943 | /* |
2942 | * PTE not provided, compute it from pmap and va. | | 2944 | * PTE not provided, compute it from pmap and va. |
2943 | */ | | 2945 | */ |
2944 | if (pte == NULL) { | | 2946 | if (pte == NULL) { |
2945 | pte = pmap_l3pte(pmap_lev1map(pmap), va, NULL); | | 2947 | pte = pmap_l3pte(pmap_lev1map(pmap), va, NULL); |
2946 | if (pmap_pte_v(pte) == 0) | | 2948 | if (pmap_pte_v(pte) == 0) |
2947 | return 0; | | 2949 | return 0; |
2948 | } | | 2950 | } |
2949 | | | 2951 | |
2950 | opte = *pte; | | 2952 | opte = *pte; |
2951 | | | 2953 | |
2952 | pa = PG_PFNUM(opte) << PGSHIFT; | | 2954 | pa = PG_PFNUM(opte) << PGSHIFT; |
2953 | | | 2955 | |
2954 | /* | | 2956 | /* |
2955 | * Update statistics | | 2957 | * Update statistics |
2956 | */ | | 2958 | */ |
2957 | if (pmap_pte_w(pte)) | | 2959 | if (pmap_pte_w(pte)) |
2958 | PMAP_STAT_DECR(pmap->pm_stats.wired_count, 1); | | 2960 | PMAP_STAT_DECR(pmap->pm_stats.wired_count, 1); |
2959 | PMAP_STAT_DECR(pmap->pm_stats.resident_count, 1); | | 2961 | PMAP_STAT_DECR(pmap->pm_stats.resident_count, 1); |
2960 | | | 2962 | |
2961 | /* | | 2963 | /* |
2962 | * Invalidate the PTE after saving the reference modify info. | | 2964 | * Invalidate the PTE after saving the reference modify info. |
2963 | */ | | 2965 | */ |
2964 | #ifdef DEBUG | | 2966 | #ifdef DEBUG |
2965 | if (pmapdebug & PDB_REMOVE) | | 2967 | if (pmapdebug & PDB_REMOVE) |
2966 | printf("remove: invalidating pte at %p\n", pte); | | 2968 | printf("remove: invalidating pte at %p\n", pte); |
2967 | #endif | | 2969 | #endif |
2968 | atomic_store_relaxed(pte, PG_NV); | | 2970 | atomic_store_relaxed(pte, PG_NV); |
2969 | | | 2971 | |
2970 | /* | | 2972 | /* |
2971 | * If we're removing a user mapping, check to see if we | | 2973 | * If we're removing a user mapping, check to see if we |
2972 | * can free page table pages. | | 2974 | * can free page table pages. |
2973 | */ | | 2975 | */ |
2974 | if (pmap != pmap_kernel()) { | | 2976 | if (pmap != pmap_kernel()) { |
2975 | /* | | 2977 | /* |
2976 | * Delete the reference on the level 3 table. It will | | 2978 | * Delete the reference on the level 3 table. It will |
2977 | * delete references on the level 2 and 1 tables as | | 2979 | * delete references on the level 2 and 1 tables as |
2978 | * appropriate. | | 2980 | * appropriate. |
2979 | */ | | 2981 | */ |
2980 | pmap_l3pt_delref(pmap, va, pte, tlbctx); | | 2982 | pmap_l3pt_delref(pmap, va, pte, tlbctx); |
2981 | } | | 2983 | } |
2982 | | | 2984 | |
2983 | if (opte & PG_PVLIST) { | | 2985 | if (opte & PG_PVLIST) { |
2984 | /* | | 2986 | /* |
2985 | * Remove it from the PV table. | | 2987 | * Remove it from the PV table. |
2986 | */ | | 2988 | */ |
2987 | pg = PHYS_TO_VM_PAGE(pa); | | 2989 | pg = PHYS_TO_VM_PAGE(pa); |
2988 | KASSERT(pg != NULL); | | 2990 | KASSERT(pg != NULL); |
2989 | pmap_pv_remove(pmap, pg, va, dolock, opvp); | | 2991 | pmap_pv_remove(pmap, pg, va, dolock, opvp); |
2990 | KASSERT(opvp == NULL || *opvp != NULL); | | 2992 | KASSERT(opvp == NULL || *opvp != NULL); |
2991 | } | | 2993 | } |
2992 | | | 2994 | |
2993 | return opte & (PG_V | PG_ASM | PG_EXEC); | | 2995 | return opte & (PG_V | PG_ASM | PG_EXEC); |
2994 | } | | 2996 | } |
2995 | | | 2997 | |
2996 | /* | | 2998 | /* |
2997 | * pmap_changebit: | | 2999 | * pmap_changebit: |
2998 | * | | 3000 | * |
2999 | * Set or clear the specified PTE bits for all mappings on the | | 3001 | * Set or clear the specified PTE bits for all mappings on the |
3000 | * specified page. | | 3002 | * specified page. |
3001 | * | | 3003 | * |
3002 | * Note: we assume that the pv_head is already locked, and that | | 3004 | * Note: we assume that the pv_head is already locked, and that |
3003 | * the caller has acquired a PV->pmap mutex so that we can lock | | 3005 | * the caller has acquired a PV->pmap mutex so that we can lock |
3004 | * the pmaps as we encounter them. | | 3006 | * the pmaps as we encounter them. |
3005 | */ | | 3007 | */ |
3006 | static void | | 3008 | static void |
3007 | pmap_changebit(struct vm_page *pg, pt_entry_t set, pt_entry_t mask, | | 3009 | pmap_changebit(struct vm_page *pg, pt_entry_t set, pt_entry_t mask, |
3008 | struct pmap_tlb_context * const tlbctx) | | 3010 | struct pmap_tlb_context * const tlbctx) |
3009 | { | | 3011 | { |
3010 | pv_entry_t pv; | | 3012 | pv_entry_t pv; |
3011 | pt_entry_t *pte, npte, opte; | | 3013 | pt_entry_t *pte, npte, opte; |
3012 | | | 3014 | |
3013 | #ifdef DEBUG | | 3015 | #ifdef DEBUG |
3014 | if (pmapdebug & PDB_BITS) | | 3016 | if (pmapdebug & PDB_BITS) |
3015 | printf("pmap_changebit(%p, 0x%lx, 0x%lx)\n", | | 3017 | printf("pmap_changebit(%p, 0x%lx, 0x%lx)\n", |
3016 | pg, set, mask); | | 3018 | pg, set, mask); |
3017 | #endif | | 3019 | #endif |
3018 | | | 3020 | |
3019 | /* | | 3021 | /* |
3020 | * Loop over all current mappings setting/clearing as apropos. | | 3022 | * Loop over all current mappings setting/clearing as apropos. |
3021 | */ | | 3023 | */ |
3022 | for (pv = VM_MDPAGE_PVS(pg); pv != NULL; pv = pv->pv_next) { | | 3024 | for (pv = VM_MDPAGE_PVS(pg); pv != NULL; pv = pv->pv_next) { |
3023 | PMAP_LOCK(pv->pv_pmap); | | 3025 | PMAP_LOCK(pv->pv_pmap); |
3024 | | | 3026 | |
3025 | pte = pv->pv_pte; | | 3027 | pte = pv->pv_pte; |
3026 | | | 3028 | |
3027 | opte = atomic_load_relaxed(pte); | | 3029 | opte = atomic_load_relaxed(pte); |
3028 | npte = (opte | set) & mask; | | 3030 | npte = (opte | set) & mask; |
3029 | if (npte != opte) { | | 3031 | if (npte != opte) { |
3030 | atomic_store_relaxed(pte, npte); | | 3032 | atomic_store_relaxed(pte, npte); |
3031 | pmap_tlb_shootdown_pv(pv->pv_pmap, pv->pv_va, | | 3033 | pmap_tlb_shootdown_pv(pv->pv_pmap, pv->pv_va, |
3032 | opte, tlbctx); | | 3034 | opte, tlbctx); |
3033 | } | | 3035 | } |
3034 | PMAP_UNLOCK(pv->pv_pmap); | | 3036 | PMAP_UNLOCK(pv->pv_pmap); |
3035 | } | | 3037 | } |
3036 | } | | 3038 | } |
3037 | | | 3039 | |
3038 | /* | | 3040 | /* |
3039 | * pmap_emulate_reference: | | 3041 | * pmap_emulate_reference: |
3040 | * | | 3042 | * |
3041 | * Emulate reference and/or modified bit hits. | | 3043 | * Emulate reference and/or modified bit hits. |
3042 | * Return 1 if this was an execute fault on a non-exec mapping, | | 3044 | * Return 1 if this was an execute fault on a non-exec mapping, |
3043 | * otherwise return 0. | | 3045 | * otherwise return 0. |
3044 | */ | | 3046 | */ |
3045 | int | | 3047 | int |
3046 | pmap_emulate_reference(struct lwp *l, vaddr_t v, int user, int type) | | 3048 | pmap_emulate_reference(struct lwp *l, vaddr_t v, int user, int type) |
3047 | { | | 3049 | { |
3048 | struct pmap *pmap = l->l_proc->p_vmspace->vm_map.pmap; | | 3050 | struct pmap *pmap = l->l_proc->p_vmspace->vm_map.pmap; |
3049 | pt_entry_t faultoff, *pte; | | 3051 | pt_entry_t faultoff, *pte; |
3050 | struct vm_page *pg; | | 3052 | struct vm_page *pg; |
3051 | paddr_t pa; | | 3053 | paddr_t pa; |
3052 | bool didlock = false; | | 3054 | bool didlock = false; |
3053 | bool exec = false; | | 3055 | bool exec = false; |
3054 | kmutex_t *lock; | | 3056 | kmutex_t *lock; |
3055 | | | 3057 | |
3056 | #ifdef DEBUG | | 3058 | #ifdef DEBUG |
3057 | if (pmapdebug & PDB_FOLLOW) | | 3059 | if (pmapdebug & PDB_FOLLOW) |
3058 | printf("pmap_emulate_reference: %p, 0x%lx, %d, %d\n", | | 3060 | printf("pmap_emulate_reference: %p, 0x%lx, %d, %d\n", |
3059 | l, v, user, type); | | 3061 | l, v, user, type); |
3060 | #endif | | 3062 | #endif |
3061 | | | 3063 | |
3062 | /* | | 3064 | /* |
3063 | * Convert process and virtual address to physical address. | | 3065 | * Convert process and virtual address to physical address. |
3064 | */ | | 3066 | */ |
3065 | if (v >= VM_MIN_KERNEL_ADDRESS) { | | 3067 | if (v >= VM_MIN_KERNEL_ADDRESS) { |
3066 | if (user) | | 3068 | if (user) |
3067 | panic("pmap_emulate_reference: user ref to kernel"); | | 3069 | panic("pmap_emulate_reference: user ref to kernel"); |
3068 | /* | | 3070 | /* |
3069 | * No need to lock here; kernel PT pages never go away. | | 3071 | * No need to lock here; kernel PT pages never go away. |
3070 | */ | | 3072 | */ |
3071 | pte = PMAP_KERNEL_PTE(v); | | 3073 | pte = PMAP_KERNEL_PTE(v); |
3072 | } else { | | 3074 | } else { |
3073 | #ifdef DIAGNOSTIC | | 3075 | #ifdef DIAGNOSTIC |
3074 | if (l == NULL) | | 3076 | if (l == NULL) |
3075 | panic("pmap_emulate_reference: bad proc"); | | 3077 | panic("pmap_emulate_reference: bad proc"); |
3076 | if (l->l_proc->p_vmspace == NULL) | | 3078 | if (l->l_proc->p_vmspace == NULL) |
3077 | panic("pmap_emulate_reference: bad p_vmspace"); | | 3079 | panic("pmap_emulate_reference: bad p_vmspace"); |
3078 | #endif | | 3080 | #endif |
3079 | PMAP_LOCK(pmap); | | 3081 | PMAP_LOCK(pmap); |
3080 | didlock = true; | | 3082 | didlock = true; |
3081 | pte = pmap_l3pte(pmap_lev1map(pmap), v, NULL); | | 3083 | pte = pmap_l3pte(pmap_lev1map(pmap), v, NULL); |
3082 | /* | | 3084 | /* |
3083 | * We'll unlock below where we're done with the PTE. | | 3085 | * We'll unlock below where we're done with the PTE. |
3084 | */ | | 3086 | */ |
3085 | } | | 3087 | } |
3086 | exec = pmap_pte_exec(pte); | | 3088 | exec = pmap_pte_exec(pte); |
3087 | if (!exec && type == ALPHA_MMCSR_FOE) { | | 3089 | if (!exec && type == ALPHA_MMCSR_FOE) { |
3088 | if (didlock) | | 3090 | if (didlock) |
3089 | PMAP_UNLOCK(pmap); | | 3091 | PMAP_UNLOCK(pmap); |
3090 | return (1); | | 3092 | return (1); |
3091 | } | | 3093 | } |
3092 | #ifdef DEBUG | | 3094 | #ifdef DEBUG |
3093 | if (pmapdebug & PDB_FOLLOW) { | | 3095 | if (pmapdebug & PDB_FOLLOW) { |
3094 | printf("\tpte = %p, ", pte); | | 3096 | printf("\tpte = %p, ", pte); |
3095 | printf("*pte = 0x%lx\n", *pte); | | 3097 | printf("*pte = 0x%lx\n", *pte); |
3096 | } | | 3098 | } |
3097 | #endif | | 3099 | #endif |
3098 | | | 3100 | |
3099 | pa = pmap_pte_pa(pte); | | 3101 | pa = pmap_pte_pa(pte); |
3100 | | | 3102 | |
3101 | /* | | 3103 | /* |
3102 | * We're now done with the PTE. If it was a user pmap, unlock | | 3104 | * We're now done with the PTE. If it was a user pmap, unlock |
3103 | * it now. | | 3105 | * it now. |
3104 | */ | | 3106 | */ |
3105 | if (didlock) | | 3107 | if (didlock) |
3106 | PMAP_UNLOCK(pmap); | | 3108 | PMAP_UNLOCK(pmap); |
3107 | | | 3109 | |
3108 | #ifdef DEBUG | | 3110 | #ifdef DEBUG |
3109 | if (pmapdebug & PDB_FOLLOW) | | 3111 | if (pmapdebug & PDB_FOLLOW) |
3110 | printf("\tpa = 0x%lx\n", pa); | | 3112 | printf("\tpa = 0x%lx\n", pa); |
3111 | #endif | | 3113 | #endif |
3112 | #ifdef DIAGNOSTIC | | 3114 | #ifdef DIAGNOSTIC |
3113 | if (!uvm_pageismanaged(pa)) | | 3115 | if (!uvm_pageismanaged(pa)) |
3114 | panic("pmap_emulate_reference(%p, 0x%lx, %d, %d): " | | 3116 | panic("pmap_emulate_reference(%p, 0x%lx, %d, %d): " |
3115 | "pa 0x%lx not managed", l, v, user, type, pa); | | 3117 | "pa 0x%lx not managed", l, v, user, type, pa); |
3116 | #endif | | 3118 | #endif |
3117 | | | 3119 | |
3118 | /* | | 3120 | /* |
3119 | * Twiddle the appropriate bits to reflect the reference | | 3121 | * Twiddle the appropriate bits to reflect the reference |
3120 | * and/or modification.. | | 3122 | * and/or modification.. |
3121 | * | | 3123 | * |
3122 | * The rules: | | 3124 | * The rules: |
3123 | * (1) always mark page as used, and | | 3125 | * (1) always mark page as used, and |
3124 | * (2) if it was a write fault, mark page as modified. | | 3126 | * (2) if it was a write fault, mark page as modified. |
3125 | */ | | 3127 | */ |
3126 | pg = PHYS_TO_VM_PAGE(pa); | | 3128 | pg = PHYS_TO_VM_PAGE(pa); |
3127 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); | | 3129 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); |
3128 | struct pmap_tlb_context tlbctx; | | 3130 | struct pmap_tlb_context tlbctx; |
3129 | | | 3131 | |
3130 | pmap_tlb_context_init(&tlbctx, TLB_CTX_F_PV); | | 3132 | pmap_tlb_context_init(&tlbctx, TLB_CTX_F_PV); |
3131 | | | 3133 | |
3132 | PMAP_HEAD_TO_MAP_LOCK(); | | 3134 | PMAP_HEAD_TO_MAP_LOCK(); |
3133 | lock = pmap_pvh_lock(pg); | | 3135 | lock = pmap_pvh_lock(pg); |
3134 | mutex_enter(lock); | | 3136 | mutex_enter(lock); |
3135 | | | 3137 | |
3136 | if (type == ALPHA_MMCSR_FOW) { | | 3138 | if (type == ALPHA_MMCSR_FOW) { |
3137 | md->pvh_listx |= (PGA_REFERENCED|PGA_MODIFIED); | | 3139 | md->pvh_listx |= (PGA_REFERENCED|PGA_MODIFIED); |
3138 | faultoff = PG_FOR | PG_FOW; | | 3140 | faultoff = PG_FOR | PG_FOW; |
3139 | } else { | | 3141 | } else { |
3140 | md->pvh_listx |= PGA_REFERENCED; | | 3142 | md->pvh_listx |= PGA_REFERENCED; |
3141 | faultoff = PG_FOR; | | 3143 | faultoff = PG_FOR; |
3142 | if (exec) { | | 3144 | if (exec) { |
3143 | faultoff |= PG_FOE; | | 3145 | faultoff |= PG_FOE; |
3144 | } | | 3146 | } |
3145 | } | | 3147 | } |
3146 | pmap_changebit(pg, 0, ~faultoff, &tlbctx); | | 3148 | pmap_changebit(pg, 0, ~faultoff, &tlbctx); |
3147 | | | 3149 | |
3148 | mutex_exit(lock); | | 3150 | mutex_exit(lock); |
3149 | PMAP_HEAD_TO_MAP_UNLOCK(); | | 3151 | PMAP_HEAD_TO_MAP_UNLOCK(); |
3150 | | | 3152 | |
3151 | pmap_tlb_shootnow(&tlbctx); | | 3153 | pmap_tlb_shootnow(&tlbctx); |
3152 | TLB_COUNT(reason_emulate_reference); | | 3154 | TLB_COUNT(reason_emulate_reference); |
3153 | | | 3155 | |
3154 | return (0); | | 3156 | return (0); |
3155 | } | | 3157 | } |
3156 | | | 3158 | |
3157 | #ifdef DEBUG | | 3159 | #ifdef DEBUG |
3158 | /* | | 3160 | /* |
3159 | * pmap_pv_dump: | | 3161 | * pmap_pv_dump: |
3160 | * | | 3162 | * |
3161 | * Dump the physical->virtual data for the specified page. | | 3163 | * Dump the physical->virtual data for the specified page. |
3162 | */ | | 3164 | */ |
3163 | void | | 3165 | void |
3164 | pmap_pv_dump(paddr_t pa) | | 3166 | pmap_pv_dump(paddr_t pa) |
3165 | { | | 3167 | { |
3166 | struct vm_page *pg; | | 3168 | struct vm_page *pg; |
3167 | struct vm_page_md *md; | | 3169 | struct vm_page_md *md; |
3168 | pv_entry_t pv; | | 3170 | pv_entry_t pv; |
3169 | kmutex_t *lock; | | 3171 | kmutex_t *lock; |
3170 | | | 3172 | |
3171 | pg = PHYS_TO_VM_PAGE(pa); | | 3173 | pg = PHYS_TO_VM_PAGE(pa); |
3172 | md = VM_PAGE_TO_MD(pg); | | 3174 | md = VM_PAGE_TO_MD(pg); |
3173 | | | 3175 | |
3174 | lock = pmap_pvh_lock(pg); | | 3176 | lock = pmap_pvh_lock(pg); |
3175 | mutex_enter(lock); | | 3177 | mutex_enter(lock); |
3176 | | | 3178 | |
3177 | printf("pa 0x%lx (attrs = 0x%x):\n", pa, md->pvh_listx & PGA_ATTRS); | | 3179 | printf("pa 0x%lx (attrs = 0x%x):\n", pa, md->pvh_listx & PGA_ATTRS); |
3178 | for (pv = VM_MDPAGE_PVS(pg); pv != NULL; pv = pv->pv_next) | | 3180 | for (pv = VM_MDPAGE_PVS(pg); pv != NULL; pv = pv->pv_next) |
3179 | printf(" pmap %p, va 0x%lx\n", | | 3181 | printf(" pmap %p, va 0x%lx\n", |
3180 | pv->pv_pmap, pv->pv_va); | | 3182 | pv->pv_pmap, pv->pv_va); |
3181 | printf("\n"); | | 3183 | printf("\n"); |
3182 | | | 3184 | |
3183 | mutex_exit(lock); | | 3185 | mutex_exit(lock); |
3184 | } | | 3186 | } |
3185 | #endif | | 3187 | #endif |
3186 | | | 3188 | |
3187 | /* | | 3189 | /* |
3188 | * vtophys: | | 3190 | * vtophys: |
3189 | * | | 3191 | * |
3190 | * Return the physical address corresponding to the K0SEG or | | 3192 | * Return the physical address corresponding to the K0SEG or |
3191 | * K1SEG address provided. | | 3193 | * K1SEG address provided. |
3192 | * | | 3194 | * |
3193 | * Note: no locking is necessary in this function. | | 3195 | * Note: no locking is necessary in this function. |
3194 | */ | | 3196 | */ |
3195 | static bool | | 3197 | static bool |
3196 | vtophys_internal(vaddr_t const vaddr, paddr_t * const pap) | | 3198 | vtophys_internal(vaddr_t const vaddr, paddr_t * const pap) |
3197 | { | | 3199 | { |
3198 | paddr_t pa; | | 3200 | paddr_t pa; |
3199 | | | 3201 | |
3200 | KASSERT(vaddr >= ALPHA_K0SEG_BASE); | | 3202 | KASSERT(vaddr >= ALPHA_K0SEG_BASE); |
3201 | | | 3203 | |
3202 | if (vaddr <= ALPHA_K0SEG_END) { | | 3204 | if (vaddr <= ALPHA_K0SEG_END) { |
3203 | pa = ALPHA_K0SEG_TO_PHYS(vaddr); | | 3205 | pa = ALPHA_K0SEG_TO_PHYS(vaddr); |
3204 | } else { | | 3206 | } else { |
3205 | pt_entry_t * const pte = PMAP_KERNEL_PTE(vaddr); | | 3207 | pt_entry_t * const pte = PMAP_KERNEL_PTE(vaddr); |
3206 | if (__predict_false(! pmap_pte_v(pte))) { | | 3208 | if (__predict_false(! pmap_pte_v(pte))) { |
3207 | return false; | | 3209 | return false; |
3208 | } | | 3210 | } |
3209 | pa = pmap_pte_pa(pte) | (vaddr & PGOFSET); | | 3211 | pa = pmap_pte_pa(pte) | (vaddr & PGOFSET); |
3210 | } | | 3212 | } |
3211 | | | 3213 | |
3212 | if (pap != NULL) { | | 3214 | if (pap != NULL) { |
3213 | *pap = pa; | | 3215 | *pap = pa; |
3214 | } | | 3216 | } |
3215 | | | 3217 | |
3216 | return true; | | 3218 | return true; |
3217 | } | | 3219 | } |
3218 | | | 3220 | |
3219 | paddr_t | | 3221 | paddr_t |
3220 | vtophys(vaddr_t const vaddr) | | 3222 | vtophys(vaddr_t const vaddr) |
3221 | { | | 3223 | { |
3222 | paddr_t pa; | | 3224 | paddr_t pa; |
3223 | | | 3225 | |
3224 | if (__predict_false(! vtophys_internal(vaddr, &pa))) | | 3226 | if (__predict_false(! vtophys_internal(vaddr, &pa))) |
3225 | pa = 0; | | 3227 | pa = 0; |
3226 | return pa; | | 3228 | return pa; |
3227 | } | | 3229 | } |
3228 | | | 3230 | |
3229 | /******************** pv_entry management ********************/ | | 3231 | /******************** pv_entry management ********************/ |
3230 | | | 3232 | |
3231 | /* | | 3233 | /* |
3232 | * pmap_pv_enter: | | 3234 | * pmap_pv_enter: |
3233 | * | | 3235 | * |
3234 | * Add a physical->virtual entry to the pv_table. | | 3236 | * Add a physical->virtual entry to the pv_table. |
3235 | */ | | 3237 | */ |
3236 | static int | | 3238 | static int |
3237 | pmap_pv_enter(pmap_t pmap, struct vm_page *pg, vaddr_t va, pt_entry_t *pte, | | 3239 | pmap_pv_enter(pmap_t pmap, struct vm_page *pg, vaddr_t va, pt_entry_t *pte, |
3238 | bool dolock, pv_entry_t newpv) | | 3240 | bool dolock, pv_entry_t newpv) |
3239 | { | | 3241 | { |
3240 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); | | 3242 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); |
3241 | kmutex_t *lock; | | 3243 | kmutex_t *lock; |
3242 | | | 3244 | |
3243 | /* | | 3245 | /* |
3244 | * Allocate and fill in the new pv_entry. | | 3246 | * Allocate and fill in the new pv_entry. |
3245 | */ | | 3247 | */ |
3246 | if (newpv == NULL) { | | 3248 | if (newpv == NULL) { |
3247 | newpv = pmap_pv_alloc(); | | 3249 | newpv = pmap_pv_alloc(); |
3248 | if (newpv == NULL) | | 3250 | if (newpv == NULL) |
3249 | return ENOMEM; | | 3251 | return ENOMEM; |
3250 | } | | 3252 | } |
3251 | newpv->pv_va = va; | | 3253 | newpv->pv_va = va; |
3252 | newpv->pv_pmap = pmap; | | 3254 | newpv->pv_pmap = pmap; |
3253 | newpv->pv_pte = pte; | | 3255 | newpv->pv_pte = pte; |
3254 | | | 3256 | |
3255 | if (dolock) { | | 3257 | if (dolock) { |
3256 | lock = pmap_pvh_lock(pg); | | 3258 | lock = pmap_pvh_lock(pg); |
3257 | mutex_enter(lock); | | 3259 | mutex_enter(lock); |
3258 | } | | 3260 | } |
3259 | | | 3261 | |
3260 | #ifdef DEBUG | | 3262 | #ifdef DEBUG |
3261 | { | | 3263 | { |
3262 | pv_entry_t pv; | | 3264 | pv_entry_t pv; |
3263 | /* | | 3265 | /* |
3264 | * Make sure the entry doesn't already exist. | | 3266 | * Make sure the entry doesn't already exist. |
3265 | */ | | 3267 | */ |
3266 | for (pv = VM_MDPAGE_PVS(pg); pv != NULL; pv = pv->pv_next) { | | 3268 | for (pv = VM_MDPAGE_PVS(pg); pv != NULL; pv = pv->pv_next) { |
3267 | if (pmap == pv->pv_pmap && va == pv->pv_va) { | | 3269 | if (pmap == pv->pv_pmap && va == pv->pv_va) { |
3268 | printf("pmap = %p, va = 0x%lx\n", pmap, va); | | 3270 | printf("pmap = %p, va = 0x%lx\n", pmap, va); |
3269 | panic("pmap_pv_enter: already in pv table"); | | 3271 | panic("pmap_pv_enter: already in pv table"); |
3270 | } | | 3272 | } |
3271 | } | | 3273 | } |
3272 | } | | 3274 | } |
3273 | #endif | | 3275 | #endif |
3274 | | | 3276 | |
3275 | /* | | 3277 | /* |
3276 | * ...and put it in the list. | | 3278 | * ...and put it in the list. |
3277 | */ | | 3279 | */ |
3278 | uintptr_t const attrs = md->pvh_listx & PGA_ATTRS; | | 3280 | uintptr_t const attrs = md->pvh_listx & PGA_ATTRS; |
3279 | newpv->pv_next = (struct pv_entry *)(md->pvh_listx & ~PGA_ATTRS); | | 3281 | newpv->pv_next = (struct pv_entry *)(md->pvh_listx & ~PGA_ATTRS); |
3280 | md->pvh_listx = (uintptr_t)newpv | attrs; | | 3282 | md->pvh_listx = (uintptr_t)newpv | attrs; |
| | | 3283 | LIST_INSERT_HEAD(&pmap->pm_pvents, newpv, pv_link); |
3281 | | | 3284 | |
3282 | if (dolock) { | | 3285 | if (dolock) { |
3283 | mutex_exit(lock); | | 3286 | mutex_exit(lock); |
3284 | } | | 3287 | } |
3285 | | | 3288 | |
3286 | return 0; | | 3289 | return 0; |
3287 | } | | 3290 | } |
3288 | | | 3291 | |
3289 | /* | | 3292 | /* |
3290 | * pmap_pv_remove: | | 3293 | * pmap_pv_remove: |
3291 | * | | 3294 | * |
3292 | * Remove a physical->virtual entry from the pv_table. | | 3295 | * Remove a physical->virtual entry from the pv_table. |
3293 | */ | | 3296 | */ |
3294 | static void | | 3297 | static void |
3295 | pmap_pv_remove(pmap_t pmap, struct vm_page *pg, vaddr_t va, bool dolock, | | 3298 | pmap_pv_remove(pmap_t pmap, struct vm_page *pg, vaddr_t va, bool dolock, |
3296 | pv_entry_t *opvp) | | 3299 | pv_entry_t *opvp) |
3297 | { | | 3300 | { |
3298 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); | | 3301 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); |
3299 | pv_entry_t pv, *pvp; | | 3302 | pv_entry_t pv, *pvp; |
3300 | kmutex_t *lock; | | 3303 | kmutex_t *lock; |
3301 | | | 3304 | |
3302 | if (dolock) { | | 3305 | if (dolock) { |
3303 | lock = pmap_pvh_lock(pg); | | 3306 | lock = pmap_pvh_lock(pg); |
3304 | mutex_enter(lock); | | 3307 | mutex_enter(lock); |
3305 | } else { | | 3308 | } else { |
3306 | lock = NULL; /* XXX stupid gcc */ | | 3309 | lock = NULL; /* XXX stupid gcc */ |
3307 | } | | 3310 | } |
3308 | | | 3311 | |
3309 | /* | | 3312 | /* |
3310 | * Find the entry to remove. | | 3313 | * Find the entry to remove. |
3311 | */ | | 3314 | */ |
3312 | for (pvp = (struct pv_entry **)&md->pvh_listx, pv = VM_MDPAGE_PVS(pg); | | 3315 | for (pvp = (struct pv_entry **)&md->pvh_listx, pv = VM_MDPAGE_PVS(pg); |
3313 | pv != NULL; pvp = &pv->pv_next, pv = *pvp) | | 3316 | pv != NULL; pvp = &pv->pv_next, pv = *pvp) |
3314 | if (pmap == pv->pv_pmap && va == pv->pv_va) | | 3317 | if (pmap == pv->pv_pmap && va == pv->pv_va) |
3315 | break; | | 3318 | break; |
3316 | | | 3319 | |
3317 | KASSERT(pv != NULL); | | 3320 | KASSERT(pv != NULL); |
3318 | | | 3321 | |
| | | 3322 | /* |
| | | 3323 | * The page attributes are in the lower 2 bits of the first |
| | | 3324 | * PV entry pointer. Rather than comparing the pointer address |
| | | 3325 | * and branching, we just always preserve what might be there |
| | | 3326 | * (either attribute bits or zero bits). |
| | | 3327 | */ |
3319 | *pvp = (pv_entry_t)((uintptr_t)pv->pv_next | | | 3328 | *pvp = (pv_entry_t)((uintptr_t)pv->pv_next | |
3320 | (((uintptr_t)*pvp) & PGA_ATTRS)); | | 3329 | (((uintptr_t)*pvp) & PGA_ATTRS)); |
| | | 3330 | LIST_REMOVE(pv, pv_link); |
3321 | | | 3331 | |
3322 | if (dolock) { | | 3332 | if (dolock) { |
3323 | mutex_exit(lock); | | 3333 | mutex_exit(lock); |
3324 | } | | 3334 | } |
3325 | | | 3335 | |
3326 | if (opvp != NULL) | | 3336 | if (opvp != NULL) |
3327 | *opvp = pv; | | 3337 | *opvp = pv; |
3328 | else | | 3338 | else |
3329 | pmap_pv_free(pv); | | 3339 | pmap_pv_free(pv); |
3330 | } | | 3340 | } |
3331 | | | 3341 | |
3332 | /* | | 3342 | /* |
3333 | * pmap_pv_page_alloc: | | 3343 | * pmap_pv_page_alloc: |
3334 | * | | 3344 | * |
3335 | * Allocate a page for the pv_entry pool. | | 3345 | * Allocate a page for the pv_entry pool. |
3336 | */ | | 3346 | */ |
3337 | static void * | | 3347 | static void * |
3338 | pmap_pv_page_alloc(struct pool *pp, int flags) | | 3348 | pmap_pv_page_alloc(struct pool *pp, int flags) |
3339 | { | | 3349 | { |
3340 | struct vm_page * const pg = pmap_physpage_alloc(PGU_PVENT); | | 3350 | struct vm_page * const pg = pmap_physpage_alloc(PGU_PVENT); |
3341 | if (__predict_false(pg == NULL)) { | | 3351 | if (__predict_false(pg == NULL)) { |
3342 | return NULL; | | 3352 | return NULL; |
3343 | } | | 3353 | } |
3344 | return (void *)ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(pg)); | | 3354 | return (void *)ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(pg)); |
3345 | } | | 3355 | } |
3346 | | | 3356 | |
3347 | /* | | 3357 | /* |
3348 | * pmap_pv_page_free: | | 3358 | * pmap_pv_page_free: |
3349 | * | | 3359 | * |
3350 | * Free a pv_entry pool page. | | 3360 | * Free a pv_entry pool page. |
3351 | */ | | 3361 | */ |
3352 | static void | | 3362 | static void |
3353 | pmap_pv_page_free(struct pool *pp, void *v) | | 3363 | pmap_pv_page_free(struct pool *pp, void *v) |
3354 | { | | 3364 | { |
3355 | | | 3365 | |
3356 | pmap_physpage_free(ALPHA_K0SEG_TO_PHYS((vaddr_t)v)); | | 3366 | pmap_physpage_free(ALPHA_K0SEG_TO_PHYS((vaddr_t)v)); |
3357 | } | | 3367 | } |
3358 | | | 3368 | |
3359 | /******************** misc. functions ********************/ | | 3369 | /******************** misc. functions ********************/ |
3360 | | | 3370 | |
3361 | /* | | 3371 | /* |
3362 | * Pages that are in-use as page table pages should never be part | | 3372 | * Pages that are in-use as page table pages should never be part |
3363 | * of a UVM loan, so we'll use that field for our PT page reference | | 3373 | * of a UVM loan, so we'll use that field for our PT page reference |
3364 | * count. | | 3374 | * count. |
3365 | */ | | 3375 | */ |
3366 | #define PHYSPAGE_REFCNT(pg) atomic_load_relaxed(&(pg)->loan_count) | | 3376 | #define PHYSPAGE_REFCNT(pg) atomic_load_relaxed(&(pg)->loan_count) |
3367 | #define PHYSPAGE_REFCNT_INC(pg) atomic_inc_uint_nv(&(pg)->loan_count) | | 3377 | #define PHYSPAGE_REFCNT_INC(pg) atomic_inc_uint_nv(&(pg)->loan_count) |
3368 | #define PHYSPAGE_REFCNT_DEC(pg) atomic_dec_uint_nv(&(pg)->loan_count) | | 3378 | #define PHYSPAGE_REFCNT_DEC(pg) atomic_dec_uint_nv(&(pg)->loan_count) |
3369 | | | 3379 | |
3370 | /* | | 3380 | /* |
3371 | * pmap_physpage_alloc: | | 3381 | * pmap_physpage_alloc: |
3372 | * | | 3382 | * |
3373 | * Allocate a single page from the VM system and return the | | 3383 | * Allocate a single page from the VM system and return the |
3374 | * physical address for that page. | | 3384 | * physical address for that page. |
3375 | */ | | 3385 | */ |
3376 | static struct vm_page * | | 3386 | static struct vm_page * |
3377 | pmap_physpage_alloc(int usage) | | 3387 | pmap_physpage_alloc(int usage) |
3378 | { | | 3388 | { |
3379 | struct vm_page *pg; | | 3389 | struct vm_page *pg; |
3380 | | | 3390 | |
3381 | /* | | 3391 | /* |
3382 | * Don't ask for a zero'd page in the L1PT case -- we will | | 3392 | * Don't ask for a zero'd page in the L1PT case -- we will |
3383 | * properly initialize it in the constructor. | | 3393 | * properly initialize it in the constructor. |
3384 | */ | | 3394 | */ |
3385 | | | 3395 | |
3386 | pg = uvm_pagealloc(NULL, 0, NULL, usage == PGU_L1PT ? | | 3396 | pg = uvm_pagealloc(NULL, 0, NULL, usage == PGU_L1PT ? |
3387 | UVM_PGA_USERESERVE : UVM_PGA_USERESERVE|UVM_PGA_ZERO); | | 3397 | UVM_PGA_USERESERVE : UVM_PGA_USERESERVE|UVM_PGA_ZERO); |
3388 | if (pg != NULL) { | | 3398 | if (pg != NULL) { |
3389 | KASSERT(PHYSPAGE_REFCNT(pg) == 0); | | 3399 | KASSERT(PHYSPAGE_REFCNT(pg) == 0); |
3390 | } | | 3400 | } |
3391 | return pg; | | 3401 | return pg; |
3392 | } | | 3402 | } |
3393 | | | 3403 | |
3394 | /* | | 3404 | /* |
3395 | * pmap_physpage_free: | | 3405 | * pmap_physpage_free: |
3396 | * | | 3406 | * |
3397 | * Free the single page table page at the specified physical address. | | 3407 | * Free the single page table page at the specified physical address. |
3398 | */ | | 3408 | */ |
3399 | static void | | 3409 | static void |
3400 | pmap_physpage_free(paddr_t pa) | | 3410 | pmap_physpage_free(paddr_t pa) |
3401 | { | | 3411 | { |
3402 | struct vm_page *pg; | | 3412 | struct vm_page *pg; |
3403 | | | 3413 | |
3404 | if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) | | 3414 | if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) |
3405 | panic("pmap_physpage_free: bogus physical page address"); | | 3415 | panic("pmap_physpage_free: bogus physical page address"); |
3406 | | | 3416 | |
3407 | KASSERT(PHYSPAGE_REFCNT(pg) == 0); | | 3417 | KASSERT(PHYSPAGE_REFCNT(pg) == 0); |
3408 | | | 3418 | |
3409 | uvm_pagefree(pg); | | 3419 | uvm_pagefree(pg); |
3410 | } | | 3420 | } |
3411 | | | 3421 | |
3412 | /* | | 3422 | /* |
3413 | * pmap_physpage_addref: | | 3423 | * pmap_physpage_addref: |
3414 | * | | 3424 | * |
3415 | * Add a reference to the specified special use page. | | 3425 | * Add a reference to the specified special use page. |
3416 | */ | | 3426 | */ |
3417 | static int | | 3427 | static int |
3418 | pmap_physpage_addref(void *kva) | | 3428 | pmap_physpage_addref(void *kva) |
3419 | { | | 3429 | { |
3420 | struct vm_page *pg; | | 3430 | struct vm_page *pg; |
3421 | paddr_t pa; | | 3431 | paddr_t pa; |
3422 | | | 3432 | |
3423 | pa = ALPHA_K0SEG_TO_PHYS(trunc_page((vaddr_t)kva)); | | 3433 | pa = ALPHA_K0SEG_TO_PHYS(trunc_page((vaddr_t)kva)); |
3424 | pg = PHYS_TO_VM_PAGE(pa); | | 3434 | pg = PHYS_TO_VM_PAGE(pa); |
3425 | | | 3435 | |
3426 | KASSERT(PHYSPAGE_REFCNT(pg) < UINT32_MAX); | | 3436 | KASSERT(PHYSPAGE_REFCNT(pg) < UINT32_MAX); |
3427 | | | 3437 | |
3428 | return PHYSPAGE_REFCNT_INC(pg); | | 3438 | return PHYSPAGE_REFCNT_INC(pg); |
3429 | } | | 3439 | } |
3430 | | | 3440 | |
3431 | /* | | 3441 | /* |
3432 | * pmap_physpage_delref: | | 3442 | * pmap_physpage_delref: |
3433 | * | | 3443 | * |
3434 | * Delete a reference to the specified special use page. | | 3444 | * Delete a reference to the specified special use page. |
3435 | */ | | 3445 | */ |
3436 | static int | | 3446 | static int |
3437 | pmap_physpage_delref(void *kva) | | 3447 | pmap_physpage_delref(void *kva) |
3438 | { | | 3448 | { |
3439 | struct vm_page *pg; | | 3449 | struct vm_page *pg; |
3440 | paddr_t pa; | | 3450 | paddr_t pa; |
3441 | | | 3451 | |
3442 | pa = ALPHA_K0SEG_TO_PHYS(trunc_page((vaddr_t)kva)); | | 3452 | pa = ALPHA_K0SEG_TO_PHYS(trunc_page((vaddr_t)kva)); |
3443 | pg = PHYS_TO_VM_PAGE(pa); | | 3453 | pg = PHYS_TO_VM_PAGE(pa); |
3444 | | | 3454 | |
3445 | KASSERT(PHYSPAGE_REFCNT(pg) != 0); | | 3455 | KASSERT(PHYSPAGE_REFCNT(pg) != 0); |
3446 | | | 3456 | |
3447 | return PHYSPAGE_REFCNT_DEC(pg); | | 3457 | return PHYSPAGE_REFCNT_DEC(pg); |
3448 | } | | 3458 | } |
3449 | | | 3459 | |
3450 | /******************** page table page management ********************/ | | 3460 | /******************** page table page management ********************/ |
3451 | | | 3461 | |
3452 | static bool | | 3462 | static bool |
3453 | pmap_kptpage_alloc(paddr_t *pap) | | 3463 | pmap_kptpage_alloc(paddr_t *pap) |
3454 | { | | 3464 | { |
3455 | if (uvm.page_init_done == false) { | | 3465 | if (uvm.page_init_done == false) { |
3456 | /* | | 3466 | /* |
3457 | * We're growing the kernel pmap early (from | | 3467 | * We're growing the kernel pmap early (from |
3458 | * uvm_pageboot_alloc()). This case must | | 3468 | * uvm_pageboot_alloc()). This case must |
3459 | * be handled a little differently. | | 3469 | * be handled a little differently. |
3460 | */ | | 3470 | */ |
3461 | *pap = ALPHA_K0SEG_TO_PHYS( | | 3471 | *pap = ALPHA_K0SEG_TO_PHYS( |
3462 | pmap_steal_memory(PAGE_SIZE, NULL, NULL)); | | 3472 | pmap_steal_memory(PAGE_SIZE, NULL, NULL)); |
3463 | return true; | | 3473 | return true; |
3464 | } | | 3474 | } |
3465 | | | 3475 | |
3466 | struct vm_page * const pg = pmap_physpage_alloc(PGU_NORMAL); | | 3476 | struct vm_page * const pg = pmap_physpage_alloc(PGU_NORMAL); |
3467 | if (__predict_true(pg != NULL)) { | | 3477 | if (__predict_true(pg != NULL)) { |
3468 | *pap = VM_PAGE_TO_PHYS(pg); | | 3478 | *pap = VM_PAGE_TO_PHYS(pg); |
3469 | return true; | | 3479 | return true; |
3470 | } | | 3480 | } |
3471 | return false; | | 3481 | return false; |
3472 | } | | 3482 | } |
3473 | | | 3483 | |
3474 | /* | | 3484 | /* |
3475 | * pmap_growkernel: [ INTERFACE ] | | 3485 | * pmap_growkernel: [ INTERFACE ] |
3476 | * | | 3486 | * |
3477 | * Grow the kernel address space. This is a hint from the | | 3487 | * Grow the kernel address space. This is a hint from the |
3478 | * upper layer to pre-allocate more kernel PT pages. | | 3488 | * upper layer to pre-allocate more kernel PT pages. |
3479 | */ | | 3489 | */ |
3480 | vaddr_t | | 3490 | vaddr_t |
3481 | pmap_growkernel(vaddr_t maxkvaddr) | | 3491 | pmap_growkernel(vaddr_t maxkvaddr) |
3482 | { | | 3492 | { |
3483 | struct pmap *pm; | | 3493 | struct pmap *pm; |
3484 | paddr_t ptaddr; | | 3494 | paddr_t ptaddr; |
3485 | pt_entry_t *l1pte, *l2pte, pte; | | 3495 | pt_entry_t *l1pte, *l2pte, pte; |
3486 | pt_entry_t *lev1map; | | 3496 | pt_entry_t *lev1map; |
3487 | vaddr_t va; | | 3497 | vaddr_t va; |
3488 | int l1idx; | | 3498 | int l1idx; |
3489 | | | 3499 | |
3490 | rw_enter(&pmap_growkernel_lock, RW_WRITER); | | 3500 | rw_enter(&pmap_growkernel_lock, RW_WRITER); |
3491 | | | 3501 | |
3492 | if (maxkvaddr <= virtual_end) | | 3502 | if (maxkvaddr <= virtual_end) |
3493 | goto out; /* we are OK */ | | 3503 | goto out; /* we are OK */ |
3494 | | | 3504 | |
3495 | va = virtual_end; | | 3505 | va = virtual_end; |
3496 | | | 3506 | |
3497 | while (va < maxkvaddr) { | | 3507 | while (va < maxkvaddr) { |
3498 | /* | | 3508 | /* |
3499 | * If there is no valid L1 PTE (i.e. no L2 PT page), | | 3509 | * If there is no valid L1 PTE (i.e. no L2 PT page), |
3500 | * allocate a new L2 PT page and insert it into the | | 3510 | * allocate a new L2 PT page and insert it into the |
3501 | * L1 map. | | 3511 | * L1 map. |
3502 | */ | | 3512 | */ |
3503 | l1pte = pmap_l1pte(kernel_lev1map, va); | | 3513 | l1pte = pmap_l1pte(kernel_lev1map, va); |
3504 | if (pmap_pte_v(l1pte) == 0) { | | 3514 | if (pmap_pte_v(l1pte) == 0) { |
3505 | if (!pmap_kptpage_alloc(&ptaddr)) | | 3515 | if (!pmap_kptpage_alloc(&ptaddr)) |
3506 | goto die; | | 3516 | goto die; |
3507 | pte = (atop(ptaddr) << PG_SHIFT) | | | 3517 | pte = (atop(ptaddr) << PG_SHIFT) | |
3508 | PG_V | PG_ASM | PG_KRE | PG_KWE | PG_WIRED; | | 3518 | PG_V | PG_ASM | PG_KRE | PG_KWE | PG_WIRED; |
3509 | *l1pte = pte; | | 3519 | *l1pte = pte; |
3510 | | | 3520 | |
3511 | l1idx = l1pte_index(va); | | 3521 | l1idx = l1pte_index(va); |
3512 | | | 3522 | |
3513 | /* Update all the user pmaps. */ | | 3523 | /* Update all the user pmaps. */ |
3514 | mutex_enter(&pmap_all_pmaps_lock); | | 3524 | mutex_enter(&pmap_all_pmaps_lock); |
3515 | for (pm = TAILQ_FIRST(&pmap_all_pmaps); | | 3525 | for (pm = TAILQ_FIRST(&pmap_all_pmaps); |
3516 | pm != NULL; pm = TAILQ_NEXT(pm, pm_list)) { | | 3526 | pm != NULL; pm = TAILQ_NEXT(pm, pm_list)) { |
3517 | /* Skip the kernel pmap. */ | | 3527 | /* Skip the kernel pmap. */ |
3518 | if (pm == pmap_kernel()) | | 3528 | if (pm == pmap_kernel()) |
3519 | continue; | | 3529 | continue; |
3520 | | | 3530 | |
3521 | /* | | 3531 | /* |
3522 | * Any pmaps published on the global list | | 3532 | * Any pmaps published on the global list |
3523 | * should never be referencing kernel_lev1map. | | 3533 | * should never be referencing kernel_lev1map. |
3524 | */ | | 3534 | */ |
3525 | lev1map = pmap_lev1map(pm); | | 3535 | lev1map = pmap_lev1map(pm); |
3526 | KASSERT(lev1map != kernel_lev1map); | | 3536 | KASSERT(lev1map != kernel_lev1map); |
3527 | | | 3537 | |
3528 | PMAP_LOCK(pm); | | 3538 | PMAP_LOCK(pm); |
3529 | lev1map[l1idx] = pte; | | 3539 | lev1map[l1idx] = pte; |
3530 | PMAP_UNLOCK(pm); | | 3540 | PMAP_UNLOCK(pm); |
3531 | } | | 3541 | } |
3532 | mutex_exit(&pmap_all_pmaps_lock); | | 3542 | mutex_exit(&pmap_all_pmaps_lock); |
3533 | } | | 3543 | } |
3534 | | | 3544 | |
3535 | /* | | 3545 | /* |
3536 | * Have an L2 PT page now, add the L3 PT page. | | 3546 | * Have an L2 PT page now, add the L3 PT page. |
3537 | */ | | 3547 | */ |
3538 | l2pte = pmap_l2pte(kernel_lev1map, va, l1pte); | | 3548 | l2pte = pmap_l2pte(kernel_lev1map, va, l1pte); |
3539 | KASSERT(pmap_pte_v(l2pte) == 0); | | 3549 | KASSERT(pmap_pte_v(l2pte) == 0); |
3540 | if (!pmap_kptpage_alloc(&ptaddr)) | | 3550 | if (!pmap_kptpage_alloc(&ptaddr)) |
3541 | goto die; | | 3551 | goto die; |
3542 | *l2pte = (atop(ptaddr) << PG_SHIFT) | | | 3552 | *l2pte = (atop(ptaddr) << PG_SHIFT) | |
3543 | PG_V | PG_ASM | PG_KRE | PG_KWE | PG_WIRED; | | 3553 | PG_V | PG_ASM | PG_KRE | PG_KWE | PG_WIRED; |
3544 | va += ALPHA_L2SEG_SIZE; | | 3554 | va += ALPHA_L2SEG_SIZE; |
3545 | } | | 3555 | } |
3546 | | | 3556 | |
3547 | /* Invalidate the L1 PT cache. */ | | 3557 | /* Invalidate the L1 PT cache. */ |
3548 | pool_cache_invalidate(&pmap_l1pt_cache); | | 3558 | pool_cache_invalidate(&pmap_l1pt_cache); |
3549 | | | 3559 | |
3550 | virtual_end = va; | | 3560 | virtual_end = va; |
3551 | | | 3561 | |
3552 | out: | | 3562 | out: |
3553 | rw_exit(&pmap_growkernel_lock); | | 3563 | rw_exit(&pmap_growkernel_lock); |
3554 | | | 3564 | |
3555 | return (virtual_end); | | 3565 | return (virtual_end); |
3556 | | | 3566 | |
3557 | die: | | 3567 | die: |
3558 | panic("pmap_growkernel: out of memory"); | | 3568 | panic("pmap_growkernel: out of memory"); |
3559 | } | | 3569 | } |
3560 | | | 3570 | |
3561 | /* | | 3571 | /* |
3562 | * pmap_l1pt_ctor: | | 3572 | * pmap_l1pt_ctor: |
3563 | * | | 3573 | * |
3564 | * Pool cache constructor for L1 PT pages. | | 3574 | * Pool cache constructor for L1 PT pages. |
3565 | * | | 3575 | * |
3566 | * Note: The growkernel lock is held across allocations | | 3576 | * Note: The growkernel lock is held across allocations |
3567 | * from our pool_cache, so we don't need to acquire it | | 3577 | * from our pool_cache, so we don't need to acquire it |
3568 | * ourselves. | | 3578 | * ourselves. |
3569 | */ | | 3579 | */ |
3570 | static int | | 3580 | static int |
3571 | pmap_l1pt_ctor(void *arg, void *object, int flags) | | 3581 | pmap_l1pt_ctor(void *arg, void *object, int flags) |
3572 | { | | 3582 | { |
3573 | pt_entry_t *l1pt = object, pte; | | 3583 | pt_entry_t *l1pt = object, pte; |
3574 | int i; | | 3584 | int i; |
3575 | | | 3585 | |
3576 | /* | | 3586 | /* |
3577 | * Initialize the new level 1 table by zeroing the | | 3587 | * Initialize the new level 1 table by zeroing the |
3578 | * user portion and copying the kernel mappings into | | 3588 | * user portion and copying the kernel mappings into |
3579 | * the kernel portion. | | 3589 | * the kernel portion. |
3580 | */ | | 3590 | */ |
3581 | for (i = 0; i < l1pte_index(VM_MIN_KERNEL_ADDRESS); i++) | | 3591 | for (i = 0; i < l1pte_index(VM_MIN_KERNEL_ADDRESS); i++) |
3582 | l1pt[i] = 0; | | 3592 | l1pt[i] = 0; |
3583 | | | 3593 | |
3584 | for (i = l1pte_index(VM_MIN_KERNEL_ADDRESS); | | 3594 | for (i = l1pte_index(VM_MIN_KERNEL_ADDRESS); |
3585 | i <= l1pte_index(VM_MAX_KERNEL_ADDRESS); i++) | | 3595 | i <= l1pte_index(VM_MAX_KERNEL_ADDRESS); i++) |
3586 | l1pt[i] = kernel_lev1map[i]; | | 3596 | l1pt[i] = kernel_lev1map[i]; |
3587 | | | 3597 | |
3588 | /* | | 3598 | /* |
3589 | * Now, map the new virtual page table. NOTE: NO ASM! | | 3599 | * Now, map the new virtual page table. NOTE: NO ASM! |
3590 | */ | | 3600 | */ |
3591 | pte = ((ALPHA_K0SEG_TO_PHYS((vaddr_t) l1pt) >> PGSHIFT) << PG_SHIFT) | | | 3601 | pte = ((ALPHA_K0SEG_TO_PHYS((vaddr_t) l1pt) >> PGSHIFT) << PG_SHIFT) | |
3592 | PG_V | PG_KRE | PG_KWE; | | 3602 | PG_V | PG_KRE | PG_KWE; |
3593 | l1pt[l1pte_index(VPTBASE)] = pte; | | 3603 | l1pt[l1pte_index(VPTBASE)] = pte; |
3594 | | | 3604 | |
3595 | return (0); | | 3605 | return (0); |
3596 | } | | 3606 | } |
3597 | | | 3607 | |
3598 | /* | | 3608 | /* |
3599 | * pmap_l1pt_alloc: | | 3609 | * pmap_l1pt_alloc: |
3600 | * | | 3610 | * |
3601 | * Page alloctaor for L1 PT pages. | | 3611 | * Page alloctaor for L1 PT pages. |
3602 | */ | | 3612 | */ |
3603 | static void * | | 3613 | static void * |
3604 | pmap_l1pt_alloc(struct pool *pp, int flags) | | 3614 | pmap_l1pt_alloc(struct pool *pp, int flags) |
3605 | { | | 3615 | { |
3606 | /* | | 3616 | /* |
3607 | * Attempt to allocate a free page. | | 3617 | * Attempt to allocate a free page. |
3608 | */ | | 3618 | */ |
3609 | struct vm_page * const pg = pmap_physpage_alloc(PGU_L1PT); | | 3619 | struct vm_page * const pg = pmap_physpage_alloc(PGU_L1PT); |
3610 | if (__predict_false(pg == NULL)) { | | 3620 | if (__predict_false(pg == NULL)) { |
3611 | return NULL; | | 3621 | return NULL; |
3612 | } | | 3622 | } |
3613 | return (void *)ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(pg)); | | 3623 | return (void *)ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(pg)); |
3614 | } | | 3624 | } |
3615 | | | 3625 | |
3616 | /* | | 3626 | /* |
3617 | * pmap_l1pt_free: | | 3627 | * pmap_l1pt_free: |
3618 | * | | 3628 | * |
3619 | * Page freer for L1 PT pages. | | 3629 | * Page freer for L1 PT pages. |
3620 | */ | | 3630 | */ |
3621 | static void | | 3631 | static void |
3622 | pmap_l1pt_free(struct pool *pp, void *v) | | 3632 | pmap_l1pt_free(struct pool *pp, void *v) |
3623 | { | | 3633 | { |
3624 | | | 3634 | |
3625 | pmap_physpage_free(ALPHA_K0SEG_TO_PHYS((vaddr_t) v)); | | 3635 | pmap_physpage_free(ALPHA_K0SEG_TO_PHYS((vaddr_t) v)); |
3626 | } | | 3636 | } |
3627 | | | 3637 | |
3628 | /* | | 3638 | /* |
3629 | * pmap_ptpage_alloc: | | 3639 | * pmap_ptpage_alloc: |
3630 | * | | 3640 | * |
3631 | * Allocate a level 2 or level 3 page table page for a user | | 3641 | * Allocate a level 2 or level 3 page table page for a user |
3632 | * pmap, and initialize the PTE that references it. | | 3642 | * pmap, and initialize the PTE that references it. |
3633 | * | | 3643 | * |
3634 | * Note: the pmap must already be locked. | | 3644 | * Note: the pmap must already be locked. |
3635 | */ | | 3645 | */ |
3636 | static int | | 3646 | static int |
3637 | pmap_ptpage_alloc(pmap_t pmap, pt_entry_t * const pte, int const usage) | | 3647 | pmap_ptpage_alloc(pmap_t pmap, pt_entry_t * const pte, int const usage) |
3638 | { | | 3648 | { |
3639 | /* | | 3649 | /* |
3640 | * Allocate the page table page. | | 3650 | * Allocate the page table page. |
3641 | */ | | 3651 | */ |
3642 | struct vm_page * const pg = pmap_physpage_alloc(usage); | | 3652 | struct vm_page * const pg = pmap_physpage_alloc(usage); |
3643 | if (__predict_false(pg == NULL)) { | | 3653 | if (__predict_false(pg == NULL)) { |
3644 | return ENOMEM; | | 3654 | return ENOMEM; |
3645 | } | | 3655 | } |
3646 | | | 3656 | |
3647 | LIST_INSERT_HEAD(&pmap->pm_ptpages, pg, pageq.list); | | 3657 | LIST_INSERT_HEAD(&pmap->pm_ptpages, pg, pageq.list); |
3648 | | | 3658 | |
3649 | /* | | 3659 | /* |
3650 | * Initialize the referencing PTE. | | 3660 | * Initialize the referencing PTE. |
3651 | */ | | 3661 | */ |
3652 | const pt_entry_t npte = ((VM_PAGE_TO_PHYS(pg) >> PGSHIFT) << PG_SHIFT) | | | 3662 | const pt_entry_t npte = ((VM_PAGE_TO_PHYS(pg) >> PGSHIFT) << PG_SHIFT) | |
3653 | PG_V | PG_KRE | PG_KWE | PG_WIRED; | | 3663 | PG_V | PG_KRE | PG_KWE | PG_WIRED; |
3654 | | | 3664 | |
3655 | atomic_store_relaxed(pte, npte); | | 3665 | atomic_store_relaxed(pte, npte); |
3656 | | | 3666 | |
3657 | return (0); | | 3667 | return (0); |
3658 | } | | 3668 | } |
3659 | | | 3669 | |
3660 | /* | | 3670 | /* |
3661 | * pmap_ptpage_free: | | 3671 | * pmap_ptpage_free: |
3662 | * | | 3672 | * |
3663 | * Free the level 2 or level 3 page table page referenced | | 3673 | * Free the level 2 or level 3 page table page referenced |
3664 | * be the provided PTE. | | 3674 | * be the provided PTE. |
3665 | * | | 3675 | * |
3666 | * Note: the pmap must already be locked. | | 3676 | * Note: the pmap must already be locked. |
3667 | */ | | 3677 | */ |
3668 | static void | | 3678 | static void |
3669 | pmap_ptpage_free(pmap_t pmap, pt_entry_t * const pte, | | 3679 | pmap_ptpage_free(pmap_t pmap, pt_entry_t * const pte, |
3670 | struct pmap_tlb_context * const tlbctx) | | 3680 | struct pmap_tlb_context * const tlbctx) |
3671 | { | | 3681 | { |
3672 | | | 3682 | |
3673 | /* | | 3683 | /* |
3674 | * Extract the physical address of the page from the PTE | | 3684 | * Extract the physical address of the page from the PTE |
3675 | * and clear the entry. | | 3685 | * and clear the entry. |
3676 | */ | | 3686 | */ |
3677 | const paddr_t ptpa = pmap_pte_pa(pte); | | 3687 | const paddr_t ptpa = pmap_pte_pa(pte); |
3678 | atomic_store_relaxed(pte, PG_NV); | | 3688 | atomic_store_relaxed(pte, PG_NV); |
3679 | | | 3689 | |
3680 | struct vm_page * const pg = PHYS_TO_VM_PAGE(ptpa); | | 3690 | struct vm_page * const pg = PHYS_TO_VM_PAGE(ptpa); |
3681 | KASSERT(pg != NULL); | | 3691 | KASSERT(pg != NULL); |
3682 | | | 3692 | |
3683 | KASSERT(PHYSPAGE_REFCNT(pg) == 0); | | 3693 | KASSERT(PHYSPAGE_REFCNT(pg) == 0); |
3684 | #ifdef DEBUG | | 3694 | #ifdef DEBUG |
3685 | pmap_zero_page(ptpa); | | 3695 | pmap_zero_page(ptpa); |
3686 | #endif | | 3696 | #endif |
3687 | | | 3697 | |
3688 | LIST_REMOVE(pg, pageq.list); | | 3698 | LIST_REMOVE(pg, pageq.list); |
3689 | LIST_INSERT_HEAD(&tlbctx->t_freeptq, pg, pageq.list); | | 3699 | LIST_INSERT_HEAD(&tlbctx->t_freeptq, pg, pageq.list); |
3690 | } | | 3700 | } |
3691 | | | 3701 | |
3692 | /* | | 3702 | /* |
3693 | * pmap_l3pt_delref: | | 3703 | * pmap_l3pt_delref: |
3694 | * | | 3704 | * |
3695 | * Delete a reference on a level 3 PT page. If the reference drops | | 3705 | * Delete a reference on a level 3 PT page. If the reference drops |
3696 | * to zero, free it. | | 3706 | * to zero, free it. |
3697 | * | | 3707 | * |
3698 | * Note: the pmap must already be locked. | | 3708 | * Note: the pmap must already be locked. |
3699 | */ | | 3709 | */ |
3700 | static void | | 3710 | static void |
3701 | pmap_l3pt_delref(pmap_t pmap, vaddr_t va, pt_entry_t *l3pte, | | 3711 | pmap_l3pt_delref(pmap_t pmap, vaddr_t va, pt_entry_t *l3pte, |
3702 | struct pmap_tlb_context * const tlbctx) | | 3712 | struct pmap_tlb_context * const tlbctx) |
3703 | { | | 3713 | { |
3704 | pt_entry_t *l1pte, *l2pte; | | 3714 | pt_entry_t *l1pte, *l2pte; |
3705 | pt_entry_t * const lev1map = pmap_lev1map(pmap); | | 3715 | pt_entry_t * const lev1map = pmap_lev1map(pmap); |
3706 | | | 3716 | |
3707 | l1pte = pmap_l1pte(lev1map, va); | | 3717 | l1pte = pmap_l1pte(lev1map, va); |
3708 | l2pte = pmap_l2pte(lev1map, va, l1pte); | | 3718 | l2pte = pmap_l2pte(lev1map, va, l1pte); |
3709 | | | 3719 | |
3710 | #ifdef DIAGNOSTIC | | 3720 | #ifdef DIAGNOSTIC |
3711 | if (pmap == pmap_kernel()) | | 3721 | if (pmap == pmap_kernel()) |
3712 | panic("pmap_l3pt_delref: kernel pmap"); | | 3722 | panic("pmap_l3pt_delref: kernel pmap"); |
3713 | #endif | | 3723 | #endif |
3714 | | | 3724 | |
3715 | if (pmap_physpage_delref(l3pte) == 0) { | | 3725 | if (pmap_physpage_delref(l3pte) == 0) { |
3716 | /* | | 3726 | /* |
3717 | * No more mappings; we can free the level 3 table. | | 3727 | * No more mappings; we can free the level 3 table. |
3718 | */ | | 3728 | */ |
3719 | #ifdef DEBUG | | 3729 | #ifdef DEBUG |
3720 | if (pmapdebug & PDB_PTPAGE) | | 3730 | if (pmapdebug & PDB_PTPAGE) |
3721 | printf("pmap_l3pt_delref: freeing level 3 table at " | | 3731 | printf("pmap_l3pt_delref: freeing level 3 table at " |
3722 | "0x%lx\n", pmap_pte_pa(l2pte)); | | 3732 | "0x%lx\n", pmap_pte_pa(l2pte)); |
3723 | #endif | | 3733 | #endif |
3724 | /* | | 3734 | /* |
3725 | * You can pass NULL if you know the last refrence won't | | 3735 | * You can pass NULL if you know the last refrence won't |
3726 | * be dropped. | | 3736 | * be dropped. |
3727 | */ | | 3737 | */ |
3728 | KASSERT(tlbctx != NULL); | | 3738 | KASSERT(tlbctx != NULL); |
3729 | pmap_ptpage_free(pmap, l2pte, tlbctx); | | 3739 | pmap_ptpage_free(pmap, l2pte, tlbctx); |
3730 | | | 3740 | |
3731 | /* | | 3741 | /* |
3732 | * We've freed a level 3 table, so we must invalidate | | 3742 | * We've freed a level 3 table, so we must invalidate |
3733 | * any now-stale TLB entries for the corresponding VPT | | 3743 | * any now-stale TLB entries for the corresponding VPT |
3734 | * VA range. Easiest way to guarantee this is to hit | | 3744 | * VA range. Easiest way to guarantee this is to hit |
3735 | * all of the user TLB entries. | | 3745 | * all of the user TLB entries. |
3736 | */ | | 3746 | */ |
3737 | pmap_tlb_shootdown_all_user(pmap, PG_V, tlbctx); | | 3747 | pmap_tlb_shootdown_all_user(pmap, PG_V, tlbctx); |
3738 | | | 3748 | |
3739 | /* | | 3749 | /* |
3740 | * We've freed a level 3 table, so delete the reference | | 3750 | * We've freed a level 3 table, so delete the reference |
3741 | * on the level 2 table. | | 3751 | * on the level 2 table. |
3742 | */ | | 3752 | */ |
3743 | pmap_l2pt_delref(pmap, l1pte, l2pte, tlbctx); | | 3753 | pmap_l2pt_delref(pmap, l1pte, l2pte, tlbctx); |
3744 | } | | 3754 | } |
3745 | } | | 3755 | } |
3746 | | | 3756 | |
3747 | /* | | 3757 | /* |
3748 | * pmap_l2pt_delref: | | 3758 | * pmap_l2pt_delref: |
3749 | * | | 3759 | * |
3750 | * Delete a reference on a level 2 PT page. If the reference drops | | 3760 | * Delete a reference on a level 2 PT page. If the reference drops |
3751 | * to zero, free it. | | 3761 | * to zero, free it. |
3752 | * | | 3762 | * |
3753 | * Note: the pmap must already be locked. | | 3763 | * Note: the pmap must already be locked. |
3754 | */ | | 3764 | */ |
3755 | static void | | 3765 | static void |
3756 | pmap_l2pt_delref(pmap_t pmap, pt_entry_t *l1pte, pt_entry_t *l2pte, | | 3766 | pmap_l2pt_delref(pmap_t pmap, pt_entry_t *l1pte, pt_entry_t *l2pte, |
3757 | struct pmap_tlb_context * const tlbctx) | | 3767 | struct pmap_tlb_context * const tlbctx) |
3758 | { | | 3768 | { |
3759 | | | 3769 | |
3760 | #ifdef DIAGNOSTIC | | 3770 | #ifdef DIAGNOSTIC |
3761 | if (pmap == pmap_kernel()) | | 3771 | if (pmap == pmap_kernel()) |
3762 | panic("pmap_l2pt_delref: kernel pmap"); | | 3772 | panic("pmap_l2pt_delref: kernel pmap"); |
3763 | #endif | | 3773 | #endif |
3764 | | | 3774 | |
3765 | if (pmap_physpage_delref(l2pte) == 0) { | | 3775 | if (pmap_physpage_delref(l2pte) == 0) { |
3766 | /* | | 3776 | /* |
3767 | * No more mappings in this segment; we can free the | | 3777 | * No more mappings in this segment; we can free the |
3768 | * level 2 table. | | 3778 | * level 2 table. |
3769 | */ | | 3779 | */ |
3770 | #ifdef DEBUG | | 3780 | #ifdef DEBUG |
3771 | if (pmapdebug & PDB_PTPAGE) | | 3781 | if (pmapdebug & PDB_PTPAGE) |
3772 | printf("pmap_l2pt_delref: freeing level 2 table at " | | 3782 | printf("pmap_l2pt_delref: freeing level 2 table at " |
3773 | "0x%lx\n", pmap_pte_pa(l1pte)); | | 3783 | "0x%lx\n", pmap_pte_pa(l1pte)); |
3774 | #endif | | 3784 | #endif |
3775 | /* | | 3785 | /* |
3776 | * You can pass NULL if you know the last refrence won't | | 3786 | * You can pass NULL if you know the last refrence won't |
3777 | * be dropped. | | 3787 | * be dropped. |
3778 | */ | | 3788 | */ |
3779 | KASSERT(tlbctx != NULL); | | 3789 | KASSERT(tlbctx != NULL); |
3780 | pmap_ptpage_free(pmap, l1pte, tlbctx); | | 3790 | pmap_ptpage_free(pmap, l1pte, tlbctx); |
3781 | | | 3791 | |
3782 | /* | | 3792 | /* |
3783 | * We've freed a level 2 table, so we must invalidate | | 3793 | * We've freed a level 2 table, so we must invalidate |
3784 | * any now-stale TLB entries for the corresponding VPT | | 3794 | * any now-stale TLB entries for the corresponding VPT |
3785 | * VA range. Easiest way to guarantee this is to hit | | 3795 | * VA range. Easiest way to guarantee this is to hit |
3786 | * all of the user TLB entries. | | 3796 | * all of the user TLB entries. |
3787 | */ | | 3797 | */ |
3788 | pmap_tlb_shootdown_all_user(pmap, PG_V, tlbctx); | | 3798 | pmap_tlb_shootdown_all_user(pmap, PG_V, tlbctx); |
3789 | | | 3799 | |
3790 | /* | | 3800 | /* |
3791 | * We've freed a level 2 table, so delete the reference | | 3801 | * We've freed a level 2 table, so delete the reference |
3792 | * on the level 1 table. | | 3802 | * on the level 1 table. |
3793 | */ | | 3803 | */ |
3794 | pmap_l1pt_delref(pmap, l1pte); | | 3804 | pmap_l1pt_delref(pmap, l1pte); |
3795 | } | | 3805 | } |
3796 | } | | 3806 | } |
3797 | | | 3807 | |
3798 | /* | | 3808 | /* |
3799 | * pmap_l1pt_delref: | | 3809 | * pmap_l1pt_delref: |
3800 | * | | 3810 | * |
3801 | * Delete a reference on a level 1 PT page. | | 3811 | * Delete a reference on a level 1 PT page. |
3802 | */ | | 3812 | */ |
3803 | static void | | 3813 | static void |
3804 | pmap_l1pt_delref(pmap_t pmap, pt_entry_t *l1pte) | | 3814 | pmap_l1pt_delref(pmap_t pmap, pt_entry_t *l1pte) |
3805 | { | | 3815 | { |
3806 | | | 3816 | |
3807 | KASSERT(pmap != pmap_kernel()); | | 3817 | KASSERT(pmap != pmap_kernel()); |
3808 | | | 3818 | |
3809 | (void)pmap_physpage_delref(l1pte); | | 3819 | (void)pmap_physpage_delref(l1pte); |
3810 | } | | 3820 | } |
3811 | | | 3821 | |
3812 | /******************** Address Space Number management ********************/ | | 3822 | /******************** Address Space Number management ********************/ |
3813 | | | 3823 | |
3814 | /* | | 3824 | /* |
3815 | * pmap_asn_alloc: | | 3825 | * pmap_asn_alloc: |
3816 | * | | 3826 | * |
3817 | * Allocate and assign an ASN to the specified pmap. | | 3827 | * Allocate and assign an ASN to the specified pmap. |
3818 | * | | 3828 | * |
3819 | * Note: the pmap must already be locked. This may be called from | | 3829 | * Note: the pmap must already be locked. This may be called from |
3820 | * an interprocessor interrupt, and in that case, the sender of | | 3830 | * an interprocessor interrupt, and in that case, the sender of |
3821 | * the IPI has the pmap lock. | | 3831 | * the IPI has the pmap lock. |
3822 | */ | | 3832 | */ |
3823 | static u_int | | 3833 | static u_int |
3824 | pmap_asn_alloc(pmap_t const pmap, struct cpu_info * const ci) | | 3834 | pmap_asn_alloc(pmap_t const pmap, struct cpu_info * const ci) |
3825 | { | | 3835 | { |
3826 | | | 3836 | |
3827 | #ifdef DEBUG | | 3837 | #ifdef DEBUG |
3828 | if (pmapdebug & (PDB_FOLLOW|PDB_ASN)) | | 3838 | if (pmapdebug & (PDB_FOLLOW|PDB_ASN)) |
3829 | printf("pmap_asn_alloc(%p)\n", pmap); | | 3839 | printf("pmap_asn_alloc(%p)\n", pmap); |
3830 | #endif | | 3840 | #endif |
3831 | | | 3841 | |
3832 | KASSERT(pmap != pmap_kernel()); | | 3842 | KASSERT(pmap != pmap_kernel()); |
3833 | KASSERT(pmap->pm_percpu[ci->ci_cpuid].pmc_lev1map != kernel_lev1map); | | 3843 | KASSERT(pmap->pm_percpu[ci->ci_cpuid].pmc_lev1map != kernel_lev1map); |
3834 | KASSERT(kpreempt_disabled()); | | 3844 | KASSERT(kpreempt_disabled()); |
3835 | | | 3845 | |
3836 | /* No work to do if the the CPU does not implement ASNs. */ | | 3846 | /* No work to do if the the CPU does not implement ASNs. */ |
3837 | if (pmap_max_asn == 0) | | 3847 | if (pmap_max_asn == 0) |
3838 | return 0; | | 3848 | return 0; |
3839 | | | 3849 | |
3840 | struct pmap_percpu * const pmc = &pmap->pm_percpu[ci->ci_cpuid]; | | 3850 | struct pmap_percpu * const pmc = &pmap->pm_percpu[ci->ci_cpuid]; |
3841 | | | 3851 | |
3842 | /* | | 3852 | /* |
3843 | * Hopefully, we can continue using the one we have... | | 3853 | * Hopefully, we can continue using the one we have... |
3844 | * | | 3854 | * |
3845 | * N.B. the generation check will fail the first time | | 3855 | * N.B. the generation check will fail the first time |
3846 | * any pmap is activated on a given CPU, because we start | | 3856 | * any pmap is activated on a given CPU, because we start |
3847 | * the generation counter at 1, but initialize pmaps with | | 3857 | * the generation counter at 1, but initialize pmaps with |
3848 | * 0; this forces the first ASN allocation to occur. | | 3858 | * 0; this forces the first ASN allocation to occur. |
3849 | */ | | 3859 | */ |
3850 | if (pmc->pmc_asngen == ci->ci_asn_gen) { | | 3860 | if (pmc->pmc_asngen == ci->ci_asn_gen) { |
3851 | #ifdef DEBUG | | 3861 | #ifdef DEBUG |
3852 | if (pmapdebug & PDB_ASN) | | 3862 | if (pmapdebug & PDB_ASN) |
3853 | printf("pmap_asn_alloc: same generation, keeping %u\n", | | 3863 | printf("pmap_asn_alloc: same generation, keeping %u\n", |
3854 | pmc->pmc_asn); | | 3864 | pmc->pmc_asn); |
3855 | #endif | | 3865 | #endif |
3856 | TLB_COUNT(asn_reuse); | | 3866 | TLB_COUNT(asn_reuse); |
3857 | return pmc->pmc_asn; | | 3867 | return pmc->pmc_asn; |
3858 | } | | 3868 | } |
3859 | | | 3869 | |
3860 | /* | | 3870 | /* |
3861 | * Need to assign a new ASN. Grab the next one, incrementing | | 3871 | * Need to assign a new ASN. Grab the next one, incrementing |
3862 | * the generation number if we have to. | | 3872 | * the generation number if we have to. |
3863 | */ | | 3873 | */ |
3864 | if (ci->ci_next_asn > pmap_max_asn) { | | 3874 | if (ci->ci_next_asn > pmap_max_asn) { |
3865 | /* | | 3875 | /* |
3866 | * Invalidate all non-PG_ASM TLB entries and the | | 3876 | * Invalidate all non-PG_ASM TLB entries and the |
3867 | * I-cache, and bump the generation number. | | 3877 | * I-cache, and bump the generation number. |
3868 | */ | | 3878 | */ |
3869 | ALPHA_TBIAP(); | | 3879 | ALPHA_TBIAP(); |
3870 | alpha_pal_imb(); | | 3880 | alpha_pal_imb(); |
3871 | | | 3881 | |
3872 | ci->ci_next_asn = PMAP_ASN_FIRST_USER; | | 3882 | ci->ci_next_asn = PMAP_ASN_FIRST_USER; |
3873 | ci->ci_asn_gen++; | | 3883 | ci->ci_asn_gen++; |
3874 | TLB_COUNT(asn_newgen); | | 3884 | TLB_COUNT(asn_newgen); |
3875 | | | 3885 | |
3876 | /* | | 3886 | /* |
3877 | * Make sure the generation number doesn't wrap. We could | | 3887 | * Make sure the generation number doesn't wrap. We could |
3878 | * handle this scenario by traversing all of the pmaps, | | 3888 | * handle this scenario by traversing all of the pmaps, |
3879 | * and invalidating the generation number on those which | | 3889 | * and invalidating the generation number on those which |
3880 | * are not currently in use by this processor. | | 3890 | * are not currently in use by this processor. |
3881 | * | | 3891 | * |
3882 | * However... considering that we're using an unsigned 64-bit | | 3892 | * However... considering that we're using an unsigned 64-bit |
3883 | * integer for generation numbers, on non-ASN CPUs, we won't | | 3893 | * integer for generation numbers, on non-ASN CPUs, we won't |
3884 | * wrap for approximately 75 billion years on a 128-ASN CPU | | 3894 | * wrap for approximately 75 billion years on a 128-ASN CPU |
3885 | * (assuming 1000 switch * operations per second). | | 3895 | * (assuming 1000 switch * operations per second). |
3886 | * | | 3896 | * |
3887 | * So, we don't bother. | | 3897 | * So, we don't bother. |
3888 | */ | | 3898 | */ |
3889 | KASSERT(ci->ci_asn_gen != PMAP_ASNGEN_INVALID); | | 3899 | KASSERT(ci->ci_asn_gen != PMAP_ASNGEN_INVALID); |
3890 | #ifdef DEBUG | | 3900 | #ifdef DEBUG |
3891 | if (pmapdebug & PDB_ASN) | | 3901 | if (pmapdebug & PDB_ASN) |
3892 | printf("pmap_asn_alloc: generation bumped to %lu\n", | | 3902 | printf("pmap_asn_alloc: generation bumped to %lu\n", |
3893 | ci->ci_asn_ge); | | 3903 | ci->ci_asn_ge); |
3894 | #endif | | 3904 | #endif |
3895 | } | | 3905 | } |
3896 | | | 3906 | |
3897 | /* | | 3907 | /* |
3898 | * Assign the new ASN and validate the generation number. | | 3908 | * Assign the new ASN and validate the generation number. |
3899 | */ | | 3909 | */ |
3900 | pmc->pmc_asn = ci->ci_next_asn++; | | 3910 | pmc->pmc_asn = ci->ci_next_asn++; |
3901 | pmc->pmc_asngen = ci->ci_asn_gen; | | 3911 | pmc->pmc_asngen = ci->ci_asn_gen; |
3902 | TLB_COUNT(asn_assign); | | 3912 | TLB_COUNT(asn_assign); |
3903 | | | 3913 | |
3904 | /* | | 3914 | /* |
3905 | * We have a new ASN, so we can skip any pending I-stream sync | | 3915 | * We have a new ASN, so we can skip any pending I-stream sync |
3906 | * on the way back out to user space. | | 3916 | * on the way back out to user space. |
3907 | */ | | 3917 | */ |
3908 | pmc->pmc_needisync = 0; | | 3918 | pmc->pmc_needisync = 0; |
3909 | | | 3919 | |
3910 | #ifdef DEBUG | | 3920 | #ifdef DEBUG |
3911 | if (pmapdebug & PDB_ASN) | | 3921 | if (pmapdebug & PDB_ASN) |
3912 | printf("pmap_asn_alloc: assigning %u to pmap %p\n", | | 3922 | printf("pmap_asn_alloc: assigning %u to pmap %p\n", |
3913 | pmc->pmc_asn, pmap); | | 3923 | pmc->pmc_asn, pmap); |
3914 | #endif | | 3924 | #endif |
3915 | return pmc->pmc_asn; | | 3925 | return pmc->pmc_asn; |
3916 | } | | 3926 | } |