| @@ -1,1137 +1,1137 @@ | | | @@ -1,1137 +1,1137 @@ |
1 | /* $NetBSD: pmap.c,v 1.284 2021/05/30 01:41:45 thorpej Exp $ */ | | 1 | /* $NetBSD: pmap.c,v 1.285 2021/05/30 04:04:26 thorpej Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 1998, 1999, 2000, 2001, 2007, 2008, 2020 | | 4 | * Copyright (c) 1998, 1999, 2000, 2001, 2007, 2008, 2020 |
5 | * The NetBSD Foundation, Inc. | | 5 | * The NetBSD Foundation, Inc. |
6 | * All rights reserved. | | 6 | * All rights reserved. |
7 | * | | 7 | * |
8 | * This code is derived from software contributed to The NetBSD Foundation | | 8 | * This code is derived from software contributed to The NetBSD Foundation |
9 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, | | 9 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, |
10 | * NASA Ames Research Center, by Andrew Doran and Mindaugas Rasiukevicius, | | 10 | * NASA Ames Research Center, by Andrew Doran and Mindaugas Rasiukevicius, |
11 | * and by Chris G. Demetriou. | | 11 | * and by Chris G. Demetriou. |
12 | * | | 12 | * |
13 | * Redistribution and use in source and binary forms, with or without | | 13 | * Redistribution and use in source and binary forms, with or without |
14 | * modification, are permitted provided that the following conditions | | 14 | * modification, are permitted provided that the following conditions |
15 | * are met: | | 15 | * are met: |
16 | * 1. Redistributions of source code must retain the above copyright | | 16 | * 1. Redistributions of source code must retain the above copyright |
17 | * notice, this list of conditions and the following disclaimer. | | 17 | * notice, this list of conditions and the following disclaimer. |
18 | * 2. Redistributions in binary form must reproduce the above copyright | | 18 | * 2. Redistributions in binary form must reproduce the above copyright |
19 | * notice, this list of conditions and the following disclaimer in the | | 19 | * notice, this list of conditions and the following disclaimer in the |
20 | * documentation and/or other materials provided with the distribution. | | 20 | * documentation and/or other materials provided with the distribution. |
21 | * | | 21 | * |
22 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 22 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
23 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 23 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
24 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 24 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
25 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 25 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
26 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 26 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
29 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 29 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
30 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 30 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
31 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 31 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
32 | * POSSIBILITY OF SUCH DAMAGE. | | 32 | * POSSIBILITY OF SUCH DAMAGE. |
33 | */ | | 33 | */ |
34 | | | 34 | |
35 | /* | | 35 | /* |
36 | * Copyright (c) 1991, 1993 | | 36 | * Copyright (c) 1991, 1993 |
37 | * The Regents of the University of California. All rights reserved. | | 37 | * The Regents of the University of California. All rights reserved. |
38 | * | | 38 | * |
39 | * This code is derived from software contributed to Berkeley by | | 39 | * This code is derived from software contributed to Berkeley by |
40 | * the Systems Programming Group of the University of Utah Computer | | 40 | * the Systems Programming Group of the University of Utah Computer |
41 | * Science Department. | | 41 | * Science Department. |
42 | * | | 42 | * |
43 | * Redistribution and use in source and binary forms, with or without | | 43 | * Redistribution and use in source and binary forms, with or without |
44 | * modification, are permitted provided that the following conditions | | 44 | * modification, are permitted provided that the following conditions |
45 | * are met: | | 45 | * are met: |
46 | * 1. Redistributions of source code must retain the above copyright | | 46 | * 1. Redistributions of source code must retain the above copyright |
47 | * notice, this list of conditions and the following disclaimer. | | 47 | * notice, this list of conditions and the following disclaimer. |
48 | * 2. Redistributions in binary form must reproduce the above copyright | | 48 | * 2. Redistributions in binary form must reproduce the above copyright |
49 | * notice, this list of conditions and the following disclaimer in the | | 49 | * notice, this list of conditions and the following disclaimer in the |
50 | * documentation and/or other materials provided with the distribution. | | 50 | * documentation and/or other materials provided with the distribution. |
51 | * 3. Neither the name of the University nor the names of its contributors | | 51 | * 3. Neither the name of the University nor the names of its contributors |
52 | * may be used to endorse or promote products derived from this software | | 52 | * may be used to endorse or promote products derived from this software |
53 | * without specific prior written permission. | | 53 | * without specific prior written permission. |
54 | * | | 54 | * |
55 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | | 55 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
56 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 56 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
57 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | | 57 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
58 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | | 58 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
59 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | | 59 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
60 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | | 60 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
61 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 61 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
62 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | | 62 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
63 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 63 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
64 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 64 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
65 | * SUCH DAMAGE. | | 65 | * SUCH DAMAGE. |
66 | * | | 66 | * |
67 | * @(#)pmap.c 8.6 (Berkeley) 5/27/94 | | 67 | * @(#)pmap.c 8.6 (Berkeley) 5/27/94 |
68 | */ | | 68 | */ |
69 | | | 69 | |
70 | /* | | 70 | /* |
71 | * DEC Alpha physical map management code. | | 71 | * DEC Alpha physical map management code. |
72 | * | | 72 | * |
73 | * History: | | 73 | * History: |
74 | * | | 74 | * |
75 | * This pmap started life as a Motorola 68851/68030 pmap, | | 75 | * This pmap started life as a Motorola 68851/68030 pmap, |
76 | * written by Mike Hibler at the University of Utah. | | 76 | * written by Mike Hibler at the University of Utah. |
77 | * | | 77 | * |
78 | * It was modified for the DEC Alpha by Chris Demetriou | | 78 | * It was modified for the DEC Alpha by Chris Demetriou |
79 | * at Carnegie Mellon University. | | 79 | * at Carnegie Mellon University. |
80 | * | | 80 | * |
81 | * Support for non-contiguous physical memory was added by | | 81 | * Support for non-contiguous physical memory was added by |
82 | * Jason R. Thorpe of the Numerical Aerospace Simulation | | 82 | * Jason R. Thorpe of the Numerical Aerospace Simulation |
83 | * Facility, NASA Ames Research Center and Chris Demetriou. | | 83 | * Facility, NASA Ames Research Center and Chris Demetriou. |
84 | * | | 84 | * |
85 | * Page table management and a major cleanup were undertaken | | 85 | * Page table management and a major cleanup were undertaken |
86 | * by Jason R. Thorpe, with lots of help from Ross Harvey of | | 86 | * by Jason R. Thorpe, with lots of help from Ross Harvey of |
87 | * Avalon Computer Systems and from Chris Demetriou. | | 87 | * Avalon Computer Systems and from Chris Demetriou. |
88 | * | | 88 | * |
89 | * Support for the new UVM pmap interface was written by | | 89 | * Support for the new UVM pmap interface was written by |
90 | * Jason R. Thorpe. | | 90 | * Jason R. Thorpe. |
91 | * | | 91 | * |
92 | * Support for ASNs was written by Jason R. Thorpe, again | | 92 | * Support for ASNs was written by Jason R. Thorpe, again |
93 | * with help from Chris Demetriou and Ross Harvey. | | 93 | * with help from Chris Demetriou and Ross Harvey. |
94 | * | | 94 | * |
95 | * The locking protocol was written by Jason R. Thorpe, | | 95 | * The locking protocol was written by Jason R. Thorpe, |
96 | * using Chuck Cranor's i386 pmap for UVM as a model. | | 96 | * using Chuck Cranor's i386 pmap for UVM as a model. |
97 | * | | 97 | * |
98 | * TLB shootdown code was written (and then subsequently | | 98 | * TLB shootdown code was written (and then subsequently |
99 | * rewritten some years later, borrowing some ideas from | | 99 | * rewritten some years later, borrowing some ideas from |
100 | * the x86 pmap) by Jason R. Thorpe. | | 100 | * the x86 pmap) by Jason R. Thorpe. |
101 | * | | 101 | * |
102 | * Multiprocessor modifications by Andrew Doran and | | 102 | * Multiprocessor modifications by Andrew Doran and |
103 | * Jason R. Thorpe. | | 103 | * Jason R. Thorpe. |
104 | * | | 104 | * |
105 | * Notes: | | 105 | * Notes: |
106 | * | | 106 | * |
107 | * All user page table access is done via K0SEG. Kernel | | 107 | * All user page table access is done via K0SEG. Kernel |
108 | * page table access is done via the recursive Virtual Page | | 108 | * page table access is done via the recursive Virtual Page |
109 | * Table becase kernel PT pages are pre-allocated and never | | 109 | * Table becase kernel PT pages are pre-allocated and never |
110 | * freed, so no VPT fault handling is requiried. | | 110 | * freed, so no VPT fault handling is requiried. |
111 | */ | | 111 | */ |
112 | | | 112 | |
113 | /* | | 113 | /* |
114 | * Manages physical address maps. | | 114 | * Manages physical address maps. |
115 | * | | 115 | * |
116 | * Since the information managed by this module is | | 116 | * Since the information managed by this module is |
117 | * also stored by the logical address mapping module, | | 117 | * also stored by the logical address mapping module, |
118 | * this module may throw away valid virtual-to-physical | | 118 | * this module may throw away valid virtual-to-physical |
119 | * mappings at almost any time. However, invalidations | | 119 | * mappings at almost any time. However, invalidations |
120 | * of virtual-to-physical mappings must be done as | | 120 | * of virtual-to-physical mappings must be done as |
121 | * requested. | | 121 | * requested. |
122 | * | | 122 | * |
123 | * In order to cope with hardware architectures which | | 123 | * In order to cope with hardware architectures which |
124 | * make virtual-to-physical map invalidates expensive, | | 124 | * make virtual-to-physical map invalidates expensive, |
125 | * this module may delay invalidate or reduced protection | | 125 | * this module may delay invalidate or reduced protection |
126 | * operations until such time as they are actually | | 126 | * operations until such time as they are actually |
127 | * necessary. This module is given full information as | | 127 | * necessary. This module is given full information as |
128 | * to which processors are currently using which maps, | | 128 | * to which processors are currently using which maps, |
129 | * and to when physical maps must be made correct. | | 129 | * and to when physical maps must be made correct. |
130 | */ | | 130 | */ |
131 | | | 131 | |
132 | #include "opt_lockdebug.h" | | 132 | #include "opt_lockdebug.h" |
133 | #include "opt_sysv.h" | | 133 | #include "opt_sysv.h" |
134 | #include "opt_multiprocessor.h" | | 134 | #include "opt_multiprocessor.h" |
135 | | | 135 | |
136 | #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ | | 136 | #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ |
137 | | | 137 | |
138 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.284 2021/05/30 01:41:45 thorpej Exp $"); | | 138 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.285 2021/05/30 04:04:26 thorpej Exp $"); |
139 | | | 139 | |
140 | #include <sys/param.h> | | 140 | #include <sys/param.h> |
141 | #include <sys/systm.h> | | 141 | #include <sys/systm.h> |
142 | #include <sys/kernel.h> | | 142 | #include <sys/kernel.h> |
143 | #include <sys/proc.h> | | 143 | #include <sys/proc.h> |
144 | #include <sys/malloc.h> | | 144 | #include <sys/malloc.h> |
145 | #include <sys/pool.h> | | 145 | #include <sys/pool.h> |
146 | #include <sys/buf.h> | | 146 | #include <sys/buf.h> |
147 | #include <sys/evcnt.h> | | 147 | #include <sys/evcnt.h> |
148 | #include <sys/atomic.h> | | 148 | #include <sys/atomic.h> |
149 | #include <sys/cpu.h> | | 149 | #include <sys/cpu.h> |
150 | | | 150 | |
151 | #include <uvm/uvm.h> | | 151 | #include <uvm/uvm.h> |
152 | | | 152 | |
153 | #if defined(MULTIPROCESSOR) | | 153 | #if defined(MULTIPROCESSOR) |
154 | #include <machine/rpb.h> | | 154 | #include <machine/rpb.h> |
155 | #endif | | 155 | #endif |
156 | | | 156 | |
157 | #ifdef DEBUG | | 157 | #ifdef DEBUG |
158 | #define PDB_FOLLOW 0x0001 | | 158 | #define PDB_FOLLOW 0x0001 |
159 | #define PDB_INIT 0x0002 | | 159 | #define PDB_INIT 0x0002 |
160 | #define PDB_ENTER 0x0004 | | 160 | #define PDB_ENTER 0x0004 |
161 | #define PDB_REMOVE 0x0008 | | 161 | #define PDB_REMOVE 0x0008 |
162 | #define PDB_CREATE 0x0010 | | 162 | #define PDB_CREATE 0x0010 |
163 | #define PDB_PTPAGE 0x0020 | | 163 | #define PDB_PTPAGE 0x0020 |
164 | #define PDB_ASN 0x0040 | | 164 | #define PDB_ASN 0x0040 |
165 | #define PDB_BITS 0x0080 | | 165 | #define PDB_BITS 0x0080 |
166 | #define PDB_COLLECT 0x0100 | | 166 | #define PDB_COLLECT 0x0100 |
167 | #define PDB_PROTECT 0x0200 | | 167 | #define PDB_PROTECT 0x0200 |
168 | #define PDB_BOOTSTRAP 0x1000 | | 168 | #define PDB_BOOTSTRAP 0x1000 |
169 | #define PDB_PARANOIA 0x2000 | | 169 | #define PDB_PARANOIA 0x2000 |
170 | #define PDB_WIRING 0x4000 | | 170 | #define PDB_WIRING 0x4000 |
171 | #define PDB_PVDUMP 0x8000 | | 171 | #define PDB_PVDUMP 0x8000 |
172 | | | 172 | |
173 | int debugmap = 0; | | 173 | int debugmap = 0; |
174 | int pmapdebug = PDB_PARANOIA; | | 174 | int pmapdebug = PDB_PARANOIA; |
175 | #endif | | 175 | #endif |
176 | | | 176 | |
177 | #if defined(MULTIPROCESSOR) | | 177 | #if defined(MULTIPROCESSOR) |
178 | #define PMAP_MP(x) x | | 178 | #define PMAP_MP(x) x |
179 | #else | | 179 | #else |
180 | #define PMAP_MP(x) __nothing | | 180 | #define PMAP_MP(x) __nothing |
181 | #endif /* MULTIPROCESSOR */ | | 181 | #endif /* MULTIPROCESSOR */ |
182 | | | 182 | |
183 | /* | | 183 | /* |
184 | * Given a map and a machine independent protection code, | | 184 | * Given a map and a machine independent protection code, |
185 | * convert to an alpha protection code. | | 185 | * convert to an alpha protection code. |
186 | */ | | 186 | */ |
187 | #define pte_prot(m, p) (protection_codes[m == pmap_kernel() ? 0 : 1][p]) | | 187 | #define pte_prot(m, p) (protection_codes[m == pmap_kernel() ? 0 : 1][p]) |
188 | static int protection_codes[2][8] __read_mostly; | | 188 | static int protection_codes[2][8] __read_mostly; |
189 | | | 189 | |
190 | /* | | 190 | /* |
191 | * kernel_lev1map: | | 191 | * kernel_lev1map: |
192 | * | | 192 | * |
193 | * Kernel level 1 page table. This maps all kernel level 2 | | 193 | * Kernel level 1 page table. This maps all kernel level 2 |
194 | * page table pages, and is used as a template for all user | | 194 | * page table pages, and is used as a template for all user |
195 | * pmap level 1 page tables. When a new user level 1 page | | 195 | * pmap level 1 page tables. When a new user level 1 page |
196 | * table is allocated, all kernel_lev1map PTEs for kernel | | 196 | * table is allocated, all kernel_lev1map PTEs for kernel |
197 | * addresses are copied to the new map. | | 197 | * addresses are copied to the new map. |
198 | * | | 198 | * |
199 | * The kernel also has an initial set of kernel level 2 page | | 199 | * The kernel also has an initial set of kernel level 2 page |
200 | * table pages. These map the kernel level 3 page table pages. | | 200 | * table pages. These map the kernel level 3 page table pages. |
201 | * As kernel level 3 page table pages are added, more level 2 | | 201 | * As kernel level 3 page table pages are added, more level 2 |
202 | * page table pages may be added to map them. These pages are | | 202 | * page table pages may be added to map them. These pages are |
203 | * never freed. | | 203 | * never freed. |
204 | * | | 204 | * |
205 | * Finally, the kernel also has an initial set of kernel level | | 205 | * Finally, the kernel also has an initial set of kernel level |
206 | * 3 page table pages. These map pages in K1SEG. More level | | 206 | * 3 page table pages. These map pages in K1SEG. More level |
207 | * 3 page table pages may be added at run-time if additional | | 207 | * 3 page table pages may be added at run-time if additional |
208 | * K1SEG address space is required. These pages are never freed. | | 208 | * K1SEG address space is required. These pages are never freed. |
209 | * | | 209 | * |
210 | * NOTE: When mappings are inserted into the kernel pmap, all | | 210 | * NOTE: When mappings are inserted into the kernel pmap, all |
211 | * level 2 and level 3 page table pages must already be allocated | | 211 | * level 2 and level 3 page table pages must already be allocated |
212 | * and mapped into the parent page table. | | 212 | * and mapped into the parent page table. |
213 | */ | | 213 | */ |
214 | pt_entry_t *kernel_lev1map __read_mostly; | | 214 | pt_entry_t *kernel_lev1map __read_mostly; |
215 | | | 215 | |
216 | /* | | 216 | /* |
217 | * Virtual Page Table. | | 217 | * Virtual Page Table. |
218 | */ | | 218 | */ |
219 | static pt_entry_t *VPT __read_mostly; | | 219 | static pt_entry_t *VPT __read_mostly; |
220 | | | 220 | |
221 | static struct { | | 221 | static struct { |
222 | struct pmap k_pmap; | | 222 | struct pmap k_pmap; |
223 | } kernel_pmap_store __cacheline_aligned; | | 223 | } kernel_pmap_store __cacheline_aligned; |
224 | | | 224 | |
225 | struct pmap *const kernel_pmap_ptr = &kernel_pmap_store.k_pmap; | | 225 | struct pmap *const kernel_pmap_ptr = &kernel_pmap_store.k_pmap; |
226 | | | 226 | |
227 | /* PA of first available physical page */ | | 227 | /* PA of first available physical page */ |
228 | paddr_t avail_start __read_mostly; | | 228 | paddr_t avail_start __read_mostly; |
229 | | | 229 | |
230 | /* PA of last available physical page */ | | 230 | /* PA of last available physical page */ |
231 | paddr_t avail_end __read_mostly; | | 231 | paddr_t avail_end __read_mostly; |
232 | | | 232 | |
233 | /* VA of last avail page (end of kernel AS) */ | | 233 | /* VA of last avail page (end of kernel AS) */ |
234 | static vaddr_t virtual_end __read_mostly; | | 234 | static vaddr_t virtual_end __read_mostly; |
235 | | | 235 | |
236 | /* Has pmap_init completed? */ | | 236 | /* Has pmap_init completed? */ |
237 | static bool pmap_initialized __read_mostly; | | 237 | static bool pmap_initialized __read_mostly; |
238 | | | 238 | |
239 | /* Instrumentation */ | | 239 | /* Instrumentation */ |
240 | u_long pmap_pages_stolen __read_mostly; | | 240 | u_long pmap_pages_stolen __read_mostly; |
241 | | | 241 | |
242 | /* | | 242 | /* |
243 | * This variable contains the number of CPU IDs we need to allocate | | 243 | * This variable contains the number of CPU IDs we need to allocate |
244 | * space for when allocating the pmap structure. It is used to | | 244 | * space for when allocating the pmap structure. It is used to |
245 | * size a per-CPU array of ASN and ASN Generation number. | | 245 | * size a per-CPU array of ASN and ASN Generation number. |
246 | */ | | 246 | */ |
247 | static u_long pmap_ncpuids __read_mostly; | | 247 | static u_long pmap_ncpuids __read_mostly; |
248 | | | 248 | |
249 | #ifndef PMAP_PV_LOWAT | | 249 | #ifndef PMAP_PV_LOWAT |
250 | #define PMAP_PV_LOWAT 16 | | 250 | #define PMAP_PV_LOWAT 16 |
251 | #endif | | 251 | #endif |
252 | int pmap_pv_lowat __read_mostly = PMAP_PV_LOWAT; | | 252 | int pmap_pv_lowat __read_mostly = PMAP_PV_LOWAT; |
253 | | | 253 | |
254 | /* | | 254 | /* |
255 | * List of all pmaps, used to update them when e.g. additional kernel | | 255 | * List of all pmaps, used to update them when e.g. additional kernel |
256 | * page tables are allocated. This list is kept LRU-ordered by | | 256 | * page tables are allocated. This list is kept LRU-ordered by |
257 | * pmap_activate(). | | 257 | * pmap_activate(). |
258 | */ | | 258 | */ |
259 | static TAILQ_HEAD(, pmap) pmap_all_pmaps __cacheline_aligned; | | 259 | static TAILQ_HEAD(, pmap) pmap_all_pmaps __cacheline_aligned; |
260 | | | 260 | |
261 | /* | | 261 | /* |
262 | * The pools from which pmap structures and sub-structures are allocated. | | 262 | * The pools from which pmap structures and sub-structures are allocated. |
263 | */ | | 263 | */ |
264 | static struct pool_cache pmap_pmap_cache __read_mostly; | | 264 | static struct pool_cache pmap_pmap_cache __read_mostly; |
265 | static struct pool_cache pmap_l1pt_cache __read_mostly; | | 265 | static struct pool_cache pmap_l1pt_cache __read_mostly; |
266 | static struct pool_cache pmap_pv_cache __read_mostly; | | 266 | static struct pool_cache pmap_pv_cache __read_mostly; |
267 | | | 267 | |
268 | CTASSERT(offsetof(struct pmap, pm_percpu[0]) == COHERENCY_UNIT); | | 268 | CTASSERT(offsetof(struct pmap, pm_percpu[0]) == COHERENCY_UNIT); |
269 | CTASSERT(PMAP_SIZEOF(ALPHA_MAXPROCS) < ALPHA_PGBYTES); | | 269 | CTASSERT(PMAP_SIZEOF(ALPHA_MAXPROCS) < ALPHA_PGBYTES); |
270 | CTASSERT(sizeof(struct pmap_percpu) == COHERENCY_UNIT); | | 270 | CTASSERT(sizeof(struct pmap_percpu) == COHERENCY_UNIT); |
271 | | | 271 | |
272 | /* | | 272 | /* |
273 | * Address Space Numbers. | | 273 | * Address Space Numbers. |
274 | * | | 274 | * |
275 | * On many implementations of the Alpha architecture, the TLB entries and | | 275 | * On many implementations of the Alpha architecture, the TLB entries and |
276 | * I-cache blocks are tagged with a unique number within an implementation- | | 276 | * I-cache blocks are tagged with a unique number within an implementation- |
277 | * specified range. When a process context becomes active, the ASN is used | | 277 | * specified range. When a process context becomes active, the ASN is used |
278 | * to match TLB entries; if a TLB entry for a particular VA does not match | | 278 | * to match TLB entries; if a TLB entry for a particular VA does not match |
279 | * the current ASN, it is ignored (one could think of the processor as | | 279 | * the current ASN, it is ignored (one could think of the processor as |
280 | * having a collection of <max ASN> separate TLBs). This allows operating | | 280 | * having a collection of <max ASN> separate TLBs). This allows operating |
281 | * system software to skip the TLB flush that would otherwise be necessary | | 281 | * system software to skip the TLB flush that would otherwise be necessary |
282 | * at context switch time. | | 282 | * at context switch time. |
283 | * | | 283 | * |
284 | * Alpha PTEs have a bit in them (PG_ASM - Address Space Match) that | | 284 | * Alpha PTEs have a bit in them (PG_ASM - Address Space Match) that |
285 | * causes TLB entries to match any ASN. The PALcode also provides | | 285 | * causes TLB entries to match any ASN. The PALcode also provides |
286 | * a TBI (Translation Buffer Invalidate) operation that flushes all | | 286 | * a TBI (Translation Buffer Invalidate) operation that flushes all |
287 | * TLB entries that _do not_ have PG_ASM. We use this bit for kernel | | 287 | * TLB entries that _do not_ have PG_ASM. We use this bit for kernel |
288 | * mappings, so that invalidation of all user mappings does not invalidate | | 288 | * mappings, so that invalidation of all user mappings does not invalidate |
289 | * kernel mappings (which are consistent across all processes). | | 289 | * kernel mappings (which are consistent across all processes). |
290 | * | | 290 | * |
291 | * pmap_next_asn always indicates to the next ASN to use. When | | 291 | * pmap_next_asn always indicates to the next ASN to use. When |
292 | * pmap_next_asn exceeds pmap_max_asn, we start a new ASN generation. | | 292 | * pmap_next_asn exceeds pmap_max_asn, we start a new ASN generation. |
293 | * | | 293 | * |
294 | * When a new ASN generation is created, the per-process (i.e. non-PG_ASM) | | 294 | * When a new ASN generation is created, the per-process (i.e. non-PG_ASM) |
295 | * TLB entries and the I-cache are flushed, the generation number is bumped, | | 295 | * TLB entries and the I-cache are flushed, the generation number is bumped, |
296 | * and pmap_next_asn is changed to indicate the first non-reserved ASN. | | 296 | * and pmap_next_asn is changed to indicate the first non-reserved ASN. |
297 | * | | 297 | * |
298 | * We reserve ASN #0 for pmaps that use the global kernel_lev1map. This | | 298 | * We reserve ASN #0 for pmaps that use the global kernel_lev1map. This |
299 | * prevents the following scenario to ensure no accidental accesses to | | 299 | * prevents the following scenario to ensure no accidental accesses to |
300 | * user space for LWPs using the kernel pmap. This is important because | | 300 | * user space for LWPs using the kernel pmap. This is important because |
301 | * the PALcode may use the recursive VPT to service TLB misses. | | 301 | * the PALcode may use the recursive VPT to service TLB misses. |
302 | * | | 302 | * |
303 | * By reserving an ASN for the kernel, we are guaranteeing that an lwp | | 303 | * By reserving an ASN for the kernel, we are guaranteeing that an lwp |
304 | * will not see any valid user space TLB entries until it passes through | | 304 | * will not see any valid user space TLB entries until it passes through |
305 | * pmap_activate() for the first time. | | 305 | * pmap_activate() for the first time. |
306 | * | | 306 | * |
307 | * On processors that do not support ASNs, the PALcode invalidates | | 307 | * On processors that do not support ASNs, the PALcode invalidates |
308 | * non-ASM TLB entries automatically on swpctx. We completely skip | | 308 | * non-ASM TLB entries automatically on swpctx. We completely skip |
309 | * the ASN machinery in this case because the PALcode neither reads | | 309 | * the ASN machinery in this case because the PALcode neither reads |
310 | * nor writes that field of the HWPCB. | | 310 | * nor writes that field of the HWPCB. |
311 | */ | | 311 | */ |
312 | | | 312 | |
313 | /* max ASN supported by the system */ | | 313 | /* max ASN supported by the system */ |
314 | static u_int pmap_max_asn __read_mostly; | | 314 | static u_int pmap_max_asn __read_mostly; |
315 | | | 315 | |
316 | /* | | 316 | /* |
317 | * Locking: | | 317 | * Locking: |
318 | * | | 318 | * |
319 | * READ/WRITE LOCKS | | 319 | * READ/WRITE LOCKS |
320 | * ---------------- | | 320 | * ---------------- |
321 | * | | 321 | * |
322 | * * pmap_main_lock - This lock is used to prevent deadlock and/or | | 322 | * * pmap_main_lock - This lock is used to prevent deadlock and/or |
323 | * provide mutex access to the pmap module. Most operations lock | | 323 | * provide mutex access to the pmap module. Most operations lock |
324 | * the pmap first, then PV lists as needed. However, some operations, | | 324 | * the pmap first, then PV lists as needed. However, some operations, |
325 | * such as pmap_page_protect(), lock the PV lists before locking | | 325 | * such as pmap_page_protect(), lock the PV lists before locking |
326 | * the pmaps. To prevent deadlock, we require a mutex lock on the | | 326 | * the pmaps. To prevent deadlock, we require a mutex lock on the |
327 | * pmap module if locking in the PV->pmap direction. This is | | 327 | * pmap module if locking in the PV->pmap direction. This is |
328 | * implemented by acquiring a (shared) read lock on pmap_main_lock | | 328 | * implemented by acquiring a (shared) read lock on pmap_main_lock |
329 | * if locking pmap->PV and a (exclusive) write lock if locking in | | 329 | * if locking pmap->PV and a (exclusive) write lock if locking in |
330 | * the PV->pmap direction. Since only one thread can hold a write | | 330 | * the PV->pmap direction. Since only one thread can hold a write |
331 | * lock at a time, this provides the mutex. | | 331 | * lock at a time, this provides the mutex. |
332 | * | | 332 | * |
333 | * MUTEXES | | 333 | * MUTEXES |
334 | * ------- | | 334 | * ------- |
335 | * | | 335 | * |
336 | * * pmap lock (global hash) - These locks protect the pmap structures. | | 336 | * * pmap lock (global hash) - These locks protect the pmap structures. |
337 | * | | 337 | * |
338 | * * pmap activation lock (global hash) - These IPL_SCHED spin locks | | 338 | * * pmap activation lock (global hash) - These IPL_SCHED spin locks |
339 | * synchronize pmap_activate() and TLB shootdowns. This has a lock | | 339 | * synchronize pmap_activate() and TLB shootdowns. This has a lock |
340 | * ordering constraint with the tlb_lock: | | 340 | * ordering constraint with the tlb_lock: |
341 | * | | 341 | * |
342 | * tlb_lock -> pmap activation lock | | 342 | * tlb_lock -> pmap activation lock |
343 | * | | 343 | * |
344 | * * pvh_lock (global hash) - These locks protect the PV lists for | | 344 | * * pvh_lock (global hash) - These locks protect the PV lists for |
345 | * managed pages. | | 345 | * managed pages. |
346 | * | | 346 | * |
347 | * * tlb_lock - This IPL_VM lock serializes local and remote TLB | | 347 | * * tlb_lock - This IPL_VM lock serializes local and remote TLB |
348 | * invalidation. | | 348 | * invalidation. |
349 | * | | 349 | * |
350 | * * pmap_all_pmaps_lock - This lock protects the global list of | | 350 | * * pmap_all_pmaps_lock - This lock protects the global list of |
351 | * all pmaps. | | 351 | * all pmaps. |
352 | * | | 352 | * |
353 | * * pmap_growkernel_lock - This lock protects pmap_growkernel() | | 353 | * * pmap_growkernel_lock - This lock protects pmap_growkernel() |
354 | * and the virtual_end variable. | | 354 | * and the virtual_end variable. |
355 | * | | 355 | * |
356 | * There is a lock ordering constraint for pmap_growkernel_lock. | | 356 | * There is a lock ordering constraint for pmap_growkernel_lock. |
357 | * pmap_growkernel() acquires the locks in the following order: | | 357 | * pmap_growkernel() acquires the locks in the following order: |
358 | * | | 358 | * |
359 | * pmap_growkernel_lock (write) -> pmap_all_pmaps_lock -> | | 359 | * pmap_growkernel_lock (write) -> pmap_all_pmaps_lock -> |
360 | * pmap lock | | 360 | * pmap lock |
361 | * | | 361 | * |
362 | * We need to ensure consistency between user pmaps and the | | 362 | * We need to ensure consistency between user pmaps and the |
363 | * kernel_lev1map. For this reason, pmap_growkernel_lock must | | 363 | * kernel_lev1map. For this reason, pmap_growkernel_lock must |
364 | * be held to prevent kernel_lev1map changing across pmaps | | 364 | * be held to prevent kernel_lev1map changing across pmaps |
365 | * being added to / removed from the global pmaps list. | | 365 | * being added to / removed from the global pmaps list. |
366 | * | | 366 | * |
367 | * Address space number management (global ASN counters and per-pmap | | 367 | * Address space number management (global ASN counters and per-pmap |
368 | * ASN state) are not locked; they use arrays of values indexed | | 368 | * ASN state) are not locked; they use arrays of values indexed |
369 | * per-processor. | | 369 | * per-processor. |
370 | * | | 370 | * |
371 | * All internal functions which operate on a pmap are called | | 371 | * All internal functions which operate on a pmap are called |
372 | * with the pmap already locked by the caller (which will be | | 372 | * with the pmap already locked by the caller (which will be |
373 | * an interface function). | | 373 | * an interface function). |
374 | */ | | 374 | */ |
375 | static krwlock_t pmap_main_lock __cacheline_aligned; | | 375 | static krwlock_t pmap_main_lock __cacheline_aligned; |
376 | static kmutex_t pmap_all_pmaps_lock __cacheline_aligned; | | 376 | static kmutex_t pmap_all_pmaps_lock __cacheline_aligned; |
377 | static krwlock_t pmap_growkernel_lock __cacheline_aligned; | | 377 | static krwlock_t pmap_growkernel_lock __cacheline_aligned; |
378 | | | 378 | |
379 | #define PMAP_MAP_TO_HEAD_LOCK() rw_enter(&pmap_main_lock, RW_READER) | | 379 | #define PMAP_MAP_TO_HEAD_LOCK() rw_enter(&pmap_main_lock, RW_READER) |
380 | #define PMAP_MAP_TO_HEAD_UNLOCK() rw_exit(&pmap_main_lock) | | 380 | #define PMAP_MAP_TO_HEAD_UNLOCK() rw_exit(&pmap_main_lock) |
381 | #define PMAP_HEAD_TO_MAP_LOCK() rw_enter(&pmap_main_lock, RW_WRITER) | | 381 | #define PMAP_HEAD_TO_MAP_LOCK() rw_enter(&pmap_main_lock, RW_WRITER) |
382 | #define PMAP_HEAD_TO_MAP_UNLOCK() rw_exit(&pmap_main_lock) | | 382 | #define PMAP_HEAD_TO_MAP_UNLOCK() rw_exit(&pmap_main_lock) |
383 | | | 383 | |
384 | static union { | | 384 | static union { |
385 | kmutex_t lock; | | 385 | kmutex_t lock; |
386 | uint8_t pad[COHERENCY_UNIT]; | | 386 | uint8_t pad[COHERENCY_UNIT]; |
387 | } pmap_pvh_locks[64] __cacheline_aligned; | | 387 | } pmap_pvh_locks[64] __cacheline_aligned; |
388 | | | 388 | |
389 | #define PVH_LOCK_HASH(pg) \ | | 389 | #define PVH_LOCK_HASH(pg) \ |
390 | ((((uintptr_t)(pg)) >> 6) & 63) | | 390 | ((((uintptr_t)(pg)) >> 6) & 63) |
391 | | | 391 | |
392 | static inline kmutex_t * | | 392 | static inline kmutex_t * |
393 | pmap_pvh_lock(struct vm_page *pg) | | 393 | pmap_pvh_lock(struct vm_page *pg) |
394 | { | | 394 | { |
395 | return &pmap_pvh_locks[PVH_LOCK_HASH(pg)].lock; | | 395 | return &pmap_pvh_locks[PVH_LOCK_HASH(pg)].lock; |
396 | } | | 396 | } |
397 | | | 397 | |
398 | static union { | | 398 | static union { |
399 | struct { | | 399 | struct { |
400 | kmutex_t lock; | | 400 | kmutex_t lock; |
401 | kmutex_t activation_lock; | | 401 | kmutex_t activation_lock; |
402 | } locks; | | 402 | } locks; |
403 | uint8_t pad[COHERENCY_UNIT]; | | 403 | uint8_t pad[COHERENCY_UNIT]; |
404 | } pmap_pmap_locks[64] __cacheline_aligned; | | 404 | } pmap_pmap_locks[64] __cacheline_aligned; |
405 | | | 405 | |
406 | #define PMAP_LOCK_HASH(pm) \ | | 406 | #define PMAP_LOCK_HASH(pm) \ |
407 | ((((uintptr_t)(pm)) >> 6) & 63) | | 407 | ((((uintptr_t)(pm)) >> 6) & 63) |
408 | | | 408 | |
409 | static inline kmutex_t * | | 409 | static inline kmutex_t * |
410 | pmap_pmap_lock(pmap_t const pmap) | | 410 | pmap_pmap_lock(pmap_t const pmap) |
411 | { | | 411 | { |
412 | return &pmap_pmap_locks[PMAP_LOCK_HASH(pmap)].locks.lock; | | 412 | return &pmap_pmap_locks[PMAP_LOCK_HASH(pmap)].locks.lock; |
413 | } | | 413 | } |
414 | | | 414 | |
415 | static inline kmutex_t * | | 415 | static inline kmutex_t * |
416 | pmap_activation_lock(pmap_t const pmap) | | 416 | pmap_activation_lock(pmap_t const pmap) |
417 | { | | 417 | { |
418 | return &pmap_pmap_locks[PMAP_LOCK_HASH(pmap)].locks.activation_lock; | | 418 | return &pmap_pmap_locks[PMAP_LOCK_HASH(pmap)].locks.activation_lock; |
419 | } | | 419 | } |
420 | | | 420 | |
421 | #define PMAP_LOCK(pmap) mutex_enter(pmap_pmap_lock(pmap)) | | 421 | #define PMAP_LOCK(pmap) mutex_enter(pmap_pmap_lock(pmap)) |
422 | #define PMAP_UNLOCK(pmap) mutex_exit(pmap_pmap_lock(pmap)) | | 422 | #define PMAP_UNLOCK(pmap) mutex_exit(pmap_pmap_lock(pmap)) |
423 | | | 423 | |
424 | #define PMAP_ACT_LOCK(pmap) mutex_spin_enter(pmap_activation_lock(pmap)) | | 424 | #define PMAP_ACT_LOCK(pmap) mutex_spin_enter(pmap_activation_lock(pmap)) |
425 | #define PMAP_ACT_TRYLOCK(pmap) mutex_tryenter(pmap_activation_lock(pmap)) | | 425 | #define PMAP_ACT_TRYLOCK(pmap) mutex_tryenter(pmap_activation_lock(pmap)) |
426 | #define PMAP_ACT_UNLOCK(pmap) mutex_spin_exit(pmap_activation_lock(pmap)) | | 426 | #define PMAP_ACT_UNLOCK(pmap) mutex_spin_exit(pmap_activation_lock(pmap)) |
427 | | | 427 | |
428 | #if defined(MULTIPROCESSOR) | | 428 | #if defined(MULTIPROCESSOR) |
429 | #define pmap_all_cpus() cpus_running | | 429 | #define pmap_all_cpus() cpus_running |
430 | #else | | 430 | #else |
431 | #define pmap_all_cpus() ~0UL | | 431 | #define pmap_all_cpus() ~0UL |
432 | #endif /* MULTIPROCESSOR */ | | 432 | #endif /* MULTIPROCESSOR */ |
433 | | | 433 | |
434 | /* | | 434 | /* |
435 | * Generic routine for freeing pages on a pmap_pagelist back to | | 435 | * Generic routine for freeing pages on a pmap_pagelist back to |
436 | * the system. | | 436 | * the system. |
437 | */ | | 437 | */ |
438 | static void | | 438 | static void |
439 | pmap_pagelist_free(struct pmap_pagelist * const list) | | 439 | pmap_pagelist_free(struct pmap_pagelist * const list) |
440 | { | | 440 | { |
441 | struct vm_page *pg; | | 441 | struct vm_page *pg; |
442 | | | 442 | |
443 | while ((pg = LIST_FIRST(list)) != NULL) { | | 443 | while ((pg = LIST_FIRST(list)) != NULL) { |
444 | LIST_REMOVE(pg, pageq.list); | | 444 | LIST_REMOVE(pg, pageq.list); |
445 | uvm_pagefree(pg); | | 445 | uvm_pagefree(pg); |
446 | } | | 446 | } |
447 | } | | 447 | } |
448 | | | 448 | |
449 | /* | | 449 | /* |
450 | * TLB management. | | 450 | * TLB management. |
451 | * | | 451 | * |
452 | * TLB invalidations need to be performed on local and remote CPUs | | 452 | * TLB invalidations need to be performed on local and remote CPUs |
453 | * whenever parts of the PTE that the hardware or PALcode understands | | 453 | * whenever parts of the PTE that the hardware or PALcode understands |
454 | * changes. In order amortize the cost of these operations, we will | | 454 | * changes. In order amortize the cost of these operations, we will |
455 | * queue up to 8 addresses to invalidate in a batch. Any more than | | 455 | * queue up to 8 addresses to invalidate in a batch. Any more than |
456 | * that, and we will hit the entire TLB. | | 456 | * that, and we will hit the entire TLB. |
457 | * | | 457 | * |
458 | * Some things that add complexity: | | 458 | * Some things that add complexity: |
459 | * | | 459 | * |
460 | * ==> ASNs. A CPU may have valid TLB entries for other than the current | | 460 | * ==> ASNs. A CPU may have valid TLB entries for other than the current |
461 | * address spaace. We can only invalidate TLB entries for the current | | 461 | * address spaace. We can only invalidate TLB entries for the current |
462 | * address space, so when asked to invalidate a VA for the non-current | | 462 | * address space, so when asked to invalidate a VA for the non-current |
463 | * pmap on a given CPU, we simply invalidate the ASN for that pmap,CPU | | 463 | * pmap on a given CPU, we simply invalidate the ASN for that pmap,CPU |
464 | * tuple so that new one is allocated on the next activation on that | | 464 | * tuple so that new one is allocated on the next activation on that |
465 | * CPU. N.B. that for CPUs that don't implement ASNs, SWPCTX does all | | 465 | * CPU. N.B. that for CPUs that don't implement ASNs, SWPCTX does all |
466 | * the work necessary, so we can skip some work in the pmap module | | 466 | * the work necessary, so we can skip some work in the pmap module |
467 | * itself. | | 467 | * itself. |
468 | * | | 468 | * |
469 | * When a pmap is activated on a given CPU, we set a corresponding | | 469 | * When a pmap is activated on a given CPU, we set a corresponding |
470 | * bit in pmap::pm_cpus, indicating that it potentially has valid | | 470 | * bit in pmap::pm_cpus, indicating that it potentially has valid |
471 | * TLB entries for that address space. This bitmap is then used to | | 471 | * TLB entries for that address space. This bitmap is then used to |
472 | * determine which remote CPUs need to be notified of invalidations. | | 472 | * determine which remote CPUs need to be notified of invalidations. |
473 | * The bit is cleared when the ASN is invalidated on that CPU. | | 473 | * The bit is cleared when the ASN is invalidated on that CPU. |
474 | * | | 474 | * |
475 | * In order to serialize with activating an address space on a | | 475 | * In order to serialize with activating an address space on a |
476 | * given CPU (that we can reliably send notifications only to | | 476 | * given CPU (that we can reliably send notifications only to |
477 | * relevant remote CPUs), we acquire the pmap lock in pmap_activate() | | 477 | * relevant remote CPUs), we acquire the pmap lock in pmap_activate() |
478 | * and also hold the lock while remote shootdowns take place. | | 478 | * and also hold the lock while remote shootdowns take place. |
479 | * This does not apply to the kernel pmap; all CPUs are notified about | | 479 | * This does not apply to the kernel pmap; all CPUs are notified about |
480 | * invalidations for the kernel pmap, and the pmap lock is not held | | 480 | * invalidations for the kernel pmap, and the pmap lock is not held |
481 | * in pmap_activate() for the kernel pmap. | | 481 | * in pmap_activate() for the kernel pmap. |
482 | * | | 482 | * |
483 | * ==> P->V operations (e.g. pmap_page_protect()) may require sending | | 483 | * ==> P->V operations (e.g. pmap_page_protect()) may require sending |
484 | * invalidations for multiple address spaces. We only track one | | 484 | * invalidations for multiple address spaces. We only track one |
485 | * address space at a time, and if we encounter more than one, then | | 485 | * address space at a time, and if we encounter more than one, then |
486 | * the notification each CPU gets is to hit the entire TLB. Note | | 486 | * the notification each CPU gets is to hit the entire TLB. Note |
487 | * also that we can't serialize with pmap_activate() in this case, | | 487 | * also that we can't serialize with pmap_activate() in this case, |
488 | * so all CPUs will get the notification, and they check when | | 488 | * so all CPUs will get the notification, and they check when |
489 | * processing the notification if the pmap is current on that CPU. | | 489 | * processing the notification if the pmap is current on that CPU. |
490 | * | | 490 | * |
491 | * Invalidation information is gathered into a pmap_tlb_context structure | | 491 | * Invalidation information is gathered into a pmap_tlb_context structure |
492 | * that includes room for 8 VAs, the pmap the VAs belong to, a bitmap of | | 492 | * that includes room for 8 VAs, the pmap the VAs belong to, a bitmap of |
493 | * CPUs to be notified, and a list for PT pages that are freed during | | 493 | * CPUs to be notified, and a list for PT pages that are freed during |
494 | * removal off mappings. The number of valid addresses in the list as | | 494 | * removal off mappings. The number of valid addresses in the list as |
495 | * well as flags are sqeezed into the lower bits of the first two VAs. | | 495 | * well as flags are sqeezed into the lower bits of the first two VAs. |
496 | * Storage for this structure is allocated on the stack. We need to be | | 496 | * Storage for this structure is allocated on the stack. We need to be |
497 | * careful to keep the size of this struture under control. | | 497 | * careful to keep the size of this struture under control. |
498 | * | | 498 | * |
499 | * When notifying remote CPUs, we acquire the tlb_lock (which also | | 499 | * When notifying remote CPUs, we acquire the tlb_lock (which also |
500 | * blocks IPIs), record the pointer to our context structure, set a | | 500 | * blocks IPIs), record the pointer to our context structure, set a |
501 | * global bitmap off CPUs to be notified, and then send the IPIs to | | 501 | * global bitmap off CPUs to be notified, and then send the IPIs to |
502 | * each victim. While the other CPUs are in-flight, we then perform | | 502 | * each victim. While the other CPUs are in-flight, we then perform |
503 | * any invalidations necessary on the local CPU. Once that is done, | | 503 | * any invalidations necessary on the local CPU. Once that is done, |
504 | * we then wait the the global context pointer to be cleared, which | | 504 | * we then wait the the global context pointer to be cleared, which |
505 | * will be done by the final remote CPU to complete their work. This | | 505 | * will be done by the final remote CPU to complete their work. This |
506 | * method reduces cache line contention during pocessing. | | 506 | * method reduces cache line contention during pocessing. |
507 | * | | 507 | * |
508 | * When removing mappings in user pmaps, this implemention frees page | | 508 | * When removing mappings in user pmaps, this implemention frees page |
509 | * table pages back to the VM system once they contain no valid mappings. | | 509 | * table pages back to the VM system once they contain no valid mappings. |
510 | * As we do this, we must ensure to invalidate TLB entries that the | | 510 | * As we do this, we must ensure to invalidate TLB entries that the |
511 | * CPU might hold for the respective recursive VPT mappings. This must | | 511 | * CPU might hold for the respective recursive VPT mappings. This must |
512 | * be done whenever an L1 or L2 PTE is invalidated. Until these VPT | | 512 | * be done whenever an L1 or L2 PTE is invalidated. Until these VPT |
513 | * translations are invalidated, the PT pages must not be reused. For | | 513 | * translations are invalidated, the PT pages must not be reused. For |
514 | * this reason, we keep a list of freed PT pages in the context stucture | | 514 | * this reason, we keep a list of freed PT pages in the context stucture |
515 | * and drain them off once all invalidations are complete. | | 515 | * and drain them off once all invalidations are complete. |
516 | * | | 516 | * |
517 | * NOTE: The value of TLB_CTX_MAXVA is tuned to accommodate the UBC | | 517 | * NOTE: The value of TLB_CTX_MAXVA is tuned to accommodate the UBC |
518 | * window size (defined as 64KB on alpha in <machine/vmparam.h>). | | 518 | * window size (defined as 64KB on alpha in <machine/vmparam.h>). |
519 | */ | | 519 | */ |
520 | | | 520 | |
521 | #define TLB_CTX_MAXVA 8 | | 521 | #define TLB_CTX_MAXVA 8 |
522 | #define TLB_CTX_ALLVA PAGE_MASK | | 522 | #define TLB_CTX_ALLVA PAGE_MASK |
523 | | | 523 | |
524 | #define TLB_CTX_F_ASM __BIT(0) | | 524 | #define TLB_CTX_F_ASM __BIT(0) |
525 | #define TLB_CTX_F_IMB __BIT(1) | | 525 | #define TLB_CTX_F_IMB __BIT(1) |
526 | #define TLB_CTX_F_KIMB __BIT(2) | | 526 | #define TLB_CTX_F_KIMB __BIT(2) |
527 | #define TLB_CTX_F_PV __BIT(3) | | 527 | #define TLB_CTX_F_PV __BIT(3) |
528 | #define TLB_CTX_F_MULTI __BIT(4) | | 528 | #define TLB_CTX_F_MULTI __BIT(4) |
529 | | | 529 | |
530 | #define TLB_CTX_COUNT(ctx) ((ctx)->t_addrdata[0] & PAGE_MASK) | | 530 | #define TLB_CTX_COUNT(ctx) ((ctx)->t_addrdata[0] & PAGE_MASK) |
531 | #define TLB_CTX_INC_COUNT(ctx) (ctx)->t_addrdata[0]++ | | 531 | #define TLB_CTX_INC_COUNT(ctx) (ctx)->t_addrdata[0]++ |
532 | #define TLB_CTX_SET_ALLVA(ctx) (ctx)->t_addrdata[0] |= TLB_CTX_ALLVA | | 532 | #define TLB_CTX_SET_ALLVA(ctx) (ctx)->t_addrdata[0] |= TLB_CTX_ALLVA |
533 | | | 533 | |
534 | #define TLB_CTX_FLAGS(ctx) ((ctx)->t_addrdata[1] & PAGE_MASK) | | 534 | #define TLB_CTX_FLAGS(ctx) ((ctx)->t_addrdata[1] & PAGE_MASK) |
535 | #define TLB_CTX_SET_FLAG(ctx, f) (ctx)->t_addrdata[1] |= (f) | | 535 | #define TLB_CTX_SET_FLAG(ctx, f) (ctx)->t_addrdata[1] |= (f) |
536 | | | 536 | |
537 | #define TLB_CTX_VA(ctx, i) ((ctx)->t_addrdata[(i)] & ~PAGE_MASK) | | 537 | #define TLB_CTX_VA(ctx, i) ((ctx)->t_addrdata[(i)] & ~PAGE_MASK) |
538 | #define TLB_CTX_SETVA(ctx, i, va) \ | | 538 | #define TLB_CTX_SETVA(ctx, i, va) \ |
539 | (ctx)->t_addrdata[(i)] = (va) | ((ctx)->t_addrdata[(i)] & PAGE_MASK) | | 539 | (ctx)->t_addrdata[(i)] = (va) | ((ctx)->t_addrdata[(i)] & PAGE_MASK) |
540 | | | 540 | |
541 | struct pmap_tlb_context { | | 541 | struct pmap_tlb_context { |
542 | uintptr_t t_addrdata[TLB_CTX_MAXVA]; | | 542 | uintptr_t t_addrdata[TLB_CTX_MAXVA]; |
543 | pmap_t t_pmap; | | 543 | pmap_t t_pmap; |
544 | struct pmap_pagelist t_freeptq; | | 544 | struct pmap_pagelist t_freeptq; |
545 | }; | | 545 | }; |
546 | | | 546 | |
547 | static struct { | | 547 | static struct { |
548 | kmutex_t lock; | | 548 | kmutex_t lock; |
549 | struct evcnt events; | | 549 | struct evcnt events; |
550 | } tlb_shootdown __cacheline_aligned; | | 550 | } tlb_shootdown __cacheline_aligned; |
551 | #define tlb_lock tlb_shootdown.lock | | 551 | #define tlb_lock tlb_shootdown.lock |
552 | #define tlb_evcnt tlb_shootdown.events | | 552 | #define tlb_evcnt tlb_shootdown.events |
553 | #if defined(MULTIPROCESSOR) | | 553 | #if defined(MULTIPROCESSOR) |
554 | static const struct pmap_tlb_context *tlb_context __cacheline_aligned; | | 554 | static const struct pmap_tlb_context *tlb_context __cacheline_aligned; |
555 | static unsigned long tlb_pending __cacheline_aligned; | | 555 | static unsigned long tlb_pending __cacheline_aligned; |
556 | #endif /* MULTIPROCESSOR */ | | 556 | #endif /* MULTIPROCESSOR */ |
557 | | | 557 | |
558 | #if defined(TLB_STATS) | | 558 | #if defined(TLB_STATS) |
559 | #define TLB_COUNT_DECL(cnt) static struct evcnt tlb_stat_##cnt | | 559 | #define TLB_COUNT_DECL(cnt) static struct evcnt tlb_stat_##cnt |
560 | #define TLB_COUNT(cnt) atomic_inc_64(&tlb_stat_##cnt .ev_count) | | 560 | #define TLB_COUNT(cnt) atomic_inc_64(&tlb_stat_##cnt .ev_count) |
561 | #define TLB_COUNT_ATTACH(cnt) \ | | 561 | #define TLB_COUNT_ATTACH(cnt) \ |
562 | evcnt_attach_dynamic_nozero(&tlb_stat_##cnt, EVCNT_TYPE_MISC, \ | | 562 | evcnt_attach_dynamic_nozero(&tlb_stat_##cnt, EVCNT_TYPE_MISC, \ |
563 | NULL, "TLB", #cnt) | | 563 | NULL, "TLB", #cnt) |
564 | | | 564 | |
565 | TLB_COUNT_DECL(invalidate_multi_tbia); | | 565 | TLB_COUNT_DECL(invalidate_multi_tbia); |
566 | TLB_COUNT_DECL(invalidate_multi_tbiap); | | 566 | TLB_COUNT_DECL(invalidate_multi_tbiap); |
567 | TLB_COUNT_DECL(invalidate_multi_imb); | | 567 | TLB_COUNT_DECL(invalidate_multi_imb); |
568 | | | 568 | |
569 | TLB_COUNT_DECL(invalidate_kern_tbia); | | 569 | TLB_COUNT_DECL(invalidate_kern_tbia); |
570 | TLB_COUNT_DECL(invalidate_kern_tbis); | | 570 | TLB_COUNT_DECL(invalidate_kern_tbis); |
571 | TLB_COUNT_DECL(invalidate_kern_imb); | | 571 | TLB_COUNT_DECL(invalidate_kern_imb); |
572 | | | 572 | |
573 | TLB_COUNT_DECL(invalidate_user_not_current); | | 573 | TLB_COUNT_DECL(invalidate_user_not_current); |
574 | TLB_COUNT_DECL(invalidate_user_lazy_imb); | | 574 | TLB_COUNT_DECL(invalidate_user_lazy_imb); |
575 | TLB_COUNT_DECL(invalidate_user_tbiap); | | 575 | TLB_COUNT_DECL(invalidate_user_tbiap); |
576 | TLB_COUNT_DECL(invalidate_user_tbis); | | 576 | TLB_COUNT_DECL(invalidate_user_tbis); |
577 | | | 577 | |
578 | TLB_COUNT_DECL(shootdown_kernel); | | 578 | TLB_COUNT_DECL(shootdown_kernel); |
579 | TLB_COUNT_DECL(shootdown_user); | | 579 | TLB_COUNT_DECL(shootdown_user); |
580 | TLB_COUNT_DECL(shootdown_imb); | | 580 | TLB_COUNT_DECL(shootdown_imb); |
581 | TLB_COUNT_DECL(shootdown_kimb); | | 581 | TLB_COUNT_DECL(shootdown_kimb); |
582 | TLB_COUNT_DECL(shootdown_overflow); | | 582 | TLB_COUNT_DECL(shootdown_overflow); |
583 | | | 583 | |
584 | TLB_COUNT_DECL(shootdown_all_user); | | 584 | TLB_COUNT_DECL(shootdown_all_user); |
585 | TLB_COUNT_DECL(shootdown_all_user_imb); | | 585 | TLB_COUNT_DECL(shootdown_all_user_imb); |
586 | | | 586 | |
587 | TLB_COUNT_DECL(shootdown_pv); | | 587 | TLB_COUNT_DECL(shootdown_pv); |
588 | TLB_COUNT_DECL(shootdown_pv_multi); | | 588 | TLB_COUNT_DECL(shootdown_pv_multi); |
589 | | | 589 | |
590 | TLB_COUNT_DECL(shootnow_over_notify); | | 590 | TLB_COUNT_DECL(shootnow_over_notify); |
591 | TLB_COUNT_DECL(shootnow_remote); | | 591 | TLB_COUNT_DECL(shootnow_remote); |
592 | | | 592 | |
593 | TLB_COUNT_DECL(reason_remove_kernel); | | 593 | TLB_COUNT_DECL(reason_remove_kernel); |
594 | TLB_COUNT_DECL(reason_remove_user); | | 594 | TLB_COUNT_DECL(reason_remove_user); |
595 | TLB_COUNT_DECL(reason_page_protect_read); | | 595 | TLB_COUNT_DECL(reason_page_protect_read); |
596 | TLB_COUNT_DECL(reason_page_protect_none); | | 596 | TLB_COUNT_DECL(reason_page_protect_none); |
597 | TLB_COUNT_DECL(reason_protect); | | 597 | TLB_COUNT_DECL(reason_protect); |
598 | TLB_COUNT_DECL(reason_enter_kernel); | | 598 | TLB_COUNT_DECL(reason_enter_kernel); |
599 | TLB_COUNT_DECL(reason_enter_user); | | 599 | TLB_COUNT_DECL(reason_enter_user); |
600 | TLB_COUNT_DECL(reason_kenter); | | 600 | TLB_COUNT_DECL(reason_kenter); |
601 | TLB_COUNT_DECL(reason_enter_l2pt_delref); | | 601 | TLB_COUNT_DECL(reason_enter_l2pt_delref); |
602 | TLB_COUNT_DECL(reason_enter_l3pt_delref); | | 602 | TLB_COUNT_DECL(reason_enter_l3pt_delref); |
603 | TLB_COUNT_DECL(reason_kremove); | | 603 | TLB_COUNT_DECL(reason_kremove); |
604 | TLB_COUNT_DECL(reason_clear_modify); | | 604 | TLB_COUNT_DECL(reason_clear_modify); |
605 | TLB_COUNT_DECL(reason_clear_reference); | | 605 | TLB_COUNT_DECL(reason_clear_reference); |
606 | TLB_COUNT_DECL(reason_emulate_reference); | | 606 | TLB_COUNT_DECL(reason_emulate_reference); |
607 | | | 607 | |
608 | TLB_COUNT_DECL(asn_reuse); | | 608 | TLB_COUNT_DECL(asn_reuse); |
609 | TLB_COUNT_DECL(asn_newgen); | | 609 | TLB_COUNT_DECL(asn_newgen); |
610 | TLB_COUNT_DECL(asn_assign); | | 610 | TLB_COUNT_DECL(asn_assign); |
611 | | | 611 | |
612 | TLB_COUNT_DECL(activate_both_change); | | 612 | TLB_COUNT_DECL(activate_both_change); |
613 | TLB_COUNT_DECL(activate_asn_change); | | 613 | TLB_COUNT_DECL(activate_asn_change); |
614 | TLB_COUNT_DECL(activate_ptbr_change); | | 614 | TLB_COUNT_DECL(activate_ptbr_change); |
615 | TLB_COUNT_DECL(activate_swpctx); | | 615 | TLB_COUNT_DECL(activate_swpctx); |
616 | TLB_COUNT_DECL(activate_skip_swpctx); | | 616 | TLB_COUNT_DECL(activate_skip_swpctx); |
617 | | | 617 | |
618 | #else /* ! TLB_STATS */ | | 618 | #else /* ! TLB_STATS */ |
619 | #define TLB_COUNT(cnt) __nothing | | 619 | #define TLB_COUNT(cnt) __nothing |
620 | #define TLB_COUNT_ATTACH(cnt) __nothing | | 620 | #define TLB_COUNT_ATTACH(cnt) __nothing |
621 | #endif /* TLB_STATS */ | | 621 | #endif /* TLB_STATS */ |
622 | | | 622 | |
623 | static void | | 623 | static void |
624 | pmap_tlb_init(void) | | 624 | pmap_tlb_init(void) |
625 | { | | 625 | { |
626 | /* mutex is initialized in pmap_bootstrap(). */ | | 626 | /* mutex is initialized in pmap_bootstrap(). */ |
627 | | | 627 | |
628 | evcnt_attach_dynamic_nozero(&tlb_evcnt, EVCNT_TYPE_MISC, | | 628 | evcnt_attach_dynamic_nozero(&tlb_evcnt, EVCNT_TYPE_MISC, |
629 | NULL, "TLB", "shootdown"); | | 629 | NULL, "TLB", "shootdown"); |
630 | | | 630 | |
631 | TLB_COUNT_ATTACH(invalidate_multi_tbia); | | 631 | TLB_COUNT_ATTACH(invalidate_multi_tbia); |
632 | TLB_COUNT_ATTACH(invalidate_multi_tbiap); | | 632 | TLB_COUNT_ATTACH(invalidate_multi_tbiap); |
633 | TLB_COUNT_ATTACH(invalidate_multi_imb); | | 633 | TLB_COUNT_ATTACH(invalidate_multi_imb); |
634 | | | 634 | |
635 | TLB_COUNT_ATTACH(invalidate_kern_tbia); | | 635 | TLB_COUNT_ATTACH(invalidate_kern_tbia); |
636 | TLB_COUNT_ATTACH(invalidate_kern_tbis); | | 636 | TLB_COUNT_ATTACH(invalidate_kern_tbis); |
637 | TLB_COUNT_ATTACH(invalidate_kern_imb); | | 637 | TLB_COUNT_ATTACH(invalidate_kern_imb); |
638 | | | 638 | |
639 | TLB_COUNT_ATTACH(invalidate_user_not_current); | | 639 | TLB_COUNT_ATTACH(invalidate_user_not_current); |
640 | TLB_COUNT_ATTACH(invalidate_user_lazy_imb); | | 640 | TLB_COUNT_ATTACH(invalidate_user_lazy_imb); |
641 | TLB_COUNT_ATTACH(invalidate_user_tbiap); | | 641 | TLB_COUNT_ATTACH(invalidate_user_tbiap); |
642 | TLB_COUNT_ATTACH(invalidate_user_tbis); | | 642 | TLB_COUNT_ATTACH(invalidate_user_tbis); |
643 | | | 643 | |
644 | TLB_COUNT_ATTACH(shootdown_kernel); | | 644 | TLB_COUNT_ATTACH(shootdown_kernel); |
645 | TLB_COUNT_ATTACH(shootdown_user); | | 645 | TLB_COUNT_ATTACH(shootdown_user); |
646 | TLB_COUNT_ATTACH(shootdown_imb); | | 646 | TLB_COUNT_ATTACH(shootdown_imb); |
647 | TLB_COUNT_ATTACH(shootdown_kimb); | | 647 | TLB_COUNT_ATTACH(shootdown_kimb); |
648 | TLB_COUNT_ATTACH(shootdown_overflow); | | 648 | TLB_COUNT_ATTACH(shootdown_overflow); |
649 | | | 649 | |
650 | TLB_COUNT_ATTACH(shootdown_all_user); | | 650 | TLB_COUNT_ATTACH(shootdown_all_user); |
651 | TLB_COUNT_ATTACH(shootdown_all_user_imb); | | 651 | TLB_COUNT_ATTACH(shootdown_all_user_imb); |
652 | | | 652 | |
653 | TLB_COUNT_ATTACH(shootdown_pv); | | 653 | TLB_COUNT_ATTACH(shootdown_pv); |
654 | TLB_COUNT_ATTACH(shootdown_pv_multi); | | 654 | TLB_COUNT_ATTACH(shootdown_pv_multi); |
655 | | | 655 | |
656 | TLB_COUNT_ATTACH(shootnow_over_notify); | | 656 | TLB_COUNT_ATTACH(shootnow_over_notify); |
657 | TLB_COUNT_ATTACH(shootnow_remote); | | 657 | TLB_COUNT_ATTACH(shootnow_remote); |
658 | | | 658 | |
659 | TLB_COUNT_ATTACH(reason_remove_kernel); | | 659 | TLB_COUNT_ATTACH(reason_remove_kernel); |
660 | TLB_COUNT_ATTACH(reason_remove_user); | | 660 | TLB_COUNT_ATTACH(reason_remove_user); |
661 | TLB_COUNT_ATTACH(reason_page_protect_read); | | 661 | TLB_COUNT_ATTACH(reason_page_protect_read); |
662 | TLB_COUNT_ATTACH(reason_page_protect_none); | | 662 | TLB_COUNT_ATTACH(reason_page_protect_none); |
663 | TLB_COUNT_ATTACH(reason_protect); | | 663 | TLB_COUNT_ATTACH(reason_protect); |
664 | TLB_COUNT_ATTACH(reason_enter_kernel); | | 664 | TLB_COUNT_ATTACH(reason_enter_kernel); |
665 | TLB_COUNT_ATTACH(reason_enter_user); | | 665 | TLB_COUNT_ATTACH(reason_enter_user); |
666 | TLB_COUNT_ATTACH(reason_kenter); | | 666 | TLB_COUNT_ATTACH(reason_kenter); |
667 | TLB_COUNT_ATTACH(reason_enter_l2pt_delref); | | 667 | TLB_COUNT_ATTACH(reason_enter_l2pt_delref); |
668 | TLB_COUNT_ATTACH(reason_enter_l3pt_delref); | | 668 | TLB_COUNT_ATTACH(reason_enter_l3pt_delref); |
669 | TLB_COUNT_ATTACH(reason_kremove); | | 669 | TLB_COUNT_ATTACH(reason_kremove); |
670 | TLB_COUNT_ATTACH(reason_clear_modify); | | 670 | TLB_COUNT_ATTACH(reason_clear_modify); |
671 | TLB_COUNT_ATTACH(reason_clear_reference); | | 671 | TLB_COUNT_ATTACH(reason_clear_reference); |
672 | | | 672 | |
673 | TLB_COUNT_ATTACH(asn_reuse); | | 673 | TLB_COUNT_ATTACH(asn_reuse); |
674 | TLB_COUNT_ATTACH(asn_newgen); | | 674 | TLB_COUNT_ATTACH(asn_newgen); |
675 | TLB_COUNT_ATTACH(asn_assign); | | 675 | TLB_COUNT_ATTACH(asn_assign); |
676 | | | 676 | |
677 | TLB_COUNT_ATTACH(activate_both_change); | | 677 | TLB_COUNT_ATTACH(activate_both_change); |
678 | TLB_COUNT_ATTACH(activate_asn_change); | | 678 | TLB_COUNT_ATTACH(activate_asn_change); |
679 | TLB_COUNT_ATTACH(activate_ptbr_change); | | 679 | TLB_COUNT_ATTACH(activate_ptbr_change); |
680 | TLB_COUNT_ATTACH(activate_swpctx); | | 680 | TLB_COUNT_ATTACH(activate_swpctx); |
681 | TLB_COUNT_ATTACH(activate_skip_swpctx); | | 681 | TLB_COUNT_ATTACH(activate_skip_swpctx); |
682 | } | | 682 | } |
683 | | | 683 | |
684 | static inline void | | 684 | static inline void |
685 | pmap_tlb_context_init(struct pmap_tlb_context * const tlbctx, uintptr_t flags) | | 685 | pmap_tlb_context_init(struct pmap_tlb_context * const tlbctx, uintptr_t flags) |
686 | { | | 686 | { |
687 | /* Initialize the minimum number of fields. */ | | 687 | /* Initialize the minimum number of fields. */ |
688 | tlbctx->t_addrdata[0] = 0; | | 688 | tlbctx->t_addrdata[0] = 0; |
689 | tlbctx->t_addrdata[1] = flags; | | 689 | tlbctx->t_addrdata[1] = flags; |
690 | tlbctx->t_pmap = NULL; | | 690 | tlbctx->t_pmap = NULL; |
691 | LIST_INIT(&tlbctx->t_freeptq); | | 691 | LIST_INIT(&tlbctx->t_freeptq); |
692 | } | | 692 | } |
693 | | | 693 | |
694 | static void | | 694 | static void |
695 | pmap_tlb_shootdown_internal(pmap_t const pmap, vaddr_t const va, | | 695 | pmap_tlb_shootdown_internal(pmap_t const pmap, vaddr_t const va, |
696 | pt_entry_t const pte_bits, struct pmap_tlb_context * const tlbctx) | | 696 | pt_entry_t const pte_bits, struct pmap_tlb_context * const tlbctx) |
697 | { | | 697 | { |
698 | KASSERT(pmap != NULL); | | 698 | KASSERT(pmap != NULL); |
699 | KASSERT((va & PAGE_MASK) == 0); | | 699 | KASSERT((va & PAGE_MASK) == 0); |
700 | | | 700 | |
701 | /* | | 701 | /* |
702 | * Figure out who needs to hear about this, and the scope | | 702 | * Figure out who needs to hear about this, and the scope |
703 | * of an all-entries invalidate. | | 703 | * of an all-entries invalidate. |
704 | */ | | 704 | */ |
705 | if (pmap == pmap_kernel()) { | | 705 | if (pmap == pmap_kernel()) { |
706 | TLB_COUNT(shootdown_kernel); | | 706 | TLB_COUNT(shootdown_kernel); |
707 | KASSERT(pte_bits & PG_ASM); | | 707 | KASSERT(pte_bits & PG_ASM); |
708 | TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_ASM); | | 708 | TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_ASM); |
709 | | | 709 | |
710 | /* Note if an I-stream sync is also needed. */ | | 710 | /* Note if an I-stream sync is also needed. */ |
711 | if (pte_bits & PG_EXEC) { | | 711 | if (pte_bits & PG_EXEC) { |
712 | TLB_COUNT(shootdown_kimb); | | 712 | TLB_COUNT(shootdown_kimb); |
713 | TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_KIMB); | | 713 | TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_KIMB); |
714 | } | | 714 | } |
715 | } else { | | 715 | } else { |
716 | TLB_COUNT(shootdown_user); | | 716 | TLB_COUNT(shootdown_user); |
717 | KASSERT((pte_bits & PG_ASM) == 0); | | 717 | KASSERT((pte_bits & PG_ASM) == 0); |
718 | | | 718 | |
719 | /* Note if an I-stream sync is also needed. */ | | 719 | /* Note if an I-stream sync is also needed. */ |
720 | if (pte_bits & PG_EXEC) { | | 720 | if (pte_bits & PG_EXEC) { |
721 | TLB_COUNT(shootdown_imb); | | 721 | TLB_COUNT(shootdown_imb); |
722 | TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_IMB); | | 722 | TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_IMB); |
723 | } | | 723 | } |
724 | } | | 724 | } |
725 | | | 725 | |
726 | KASSERT(tlbctx->t_pmap == NULL || tlbctx->t_pmap == pmap); | | 726 | KASSERT(tlbctx->t_pmap == NULL || tlbctx->t_pmap == pmap); |
727 | tlbctx->t_pmap = pmap; | | 727 | tlbctx->t_pmap = pmap; |
728 | | | 728 | |
729 | /* | | 729 | /* |
730 | * If we're already at the max, just tell each active CPU | | 730 | * If we're already at the max, just tell each active CPU |
731 | * to nail everything. | | 731 | * to nail everything. |
732 | */ | | 732 | */ |
733 | const uintptr_t count = TLB_CTX_COUNT(tlbctx); | | 733 | const uintptr_t count = TLB_CTX_COUNT(tlbctx); |
734 | if (count > TLB_CTX_MAXVA) { | | 734 | if (count > TLB_CTX_MAXVA) { |
735 | return; | | 735 | return; |
736 | } | | 736 | } |
737 | if (count == TLB_CTX_MAXVA) { | | 737 | if (count == TLB_CTX_MAXVA) { |
738 | TLB_COUNT(shootdown_overflow); | | 738 | TLB_COUNT(shootdown_overflow); |
739 | TLB_CTX_SET_ALLVA(tlbctx); | | 739 | TLB_CTX_SET_ALLVA(tlbctx); |
740 | return; | | 740 | return; |
741 | } | | 741 | } |
742 | | | 742 | |
743 | TLB_CTX_SETVA(tlbctx, count, va); | | 743 | TLB_CTX_SETVA(tlbctx, count, va); |
744 | TLB_CTX_INC_COUNT(tlbctx); | | 744 | TLB_CTX_INC_COUNT(tlbctx); |
745 | } | | 745 | } |
746 | | | 746 | |
747 | static void | | 747 | static void |
748 | pmap_tlb_shootdown(pmap_t const pmap, vaddr_t const va, | | 748 | pmap_tlb_shootdown(pmap_t const pmap, vaddr_t const va, |
749 | pt_entry_t const pte_bits, struct pmap_tlb_context * const tlbctx) | | 749 | pt_entry_t const pte_bits, struct pmap_tlb_context * const tlbctx) |
750 | { | | 750 | { |
751 | KASSERT((TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_PV) == 0); | | 751 | KASSERT((TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_PV) == 0); |
752 | pmap_tlb_shootdown_internal(pmap, va, pte_bits, tlbctx); | | 752 | pmap_tlb_shootdown_internal(pmap, va, pte_bits, tlbctx); |
753 | } | | 753 | } |
754 | | | 754 | |
755 | static void | | 755 | static void |
756 | pmap_tlb_shootdown_all_user(pmap_t const pmap, pt_entry_t const pte_bits, | | 756 | pmap_tlb_shootdown_all_user(pmap_t const pmap, pt_entry_t const pte_bits, |
757 | struct pmap_tlb_context * const tlbctx) | | 757 | struct pmap_tlb_context * const tlbctx) |
758 | { | | 758 | { |
759 | KASSERT(pmap != pmap_kernel()); | | 759 | KASSERT(pmap != pmap_kernel()); |
760 | | | 760 | |
761 | TLB_COUNT(shootdown_all_user); | | 761 | TLB_COUNT(shootdown_all_user); |
762 | | | 762 | |
763 | /* Note if an I-stream sync is also needed. */ | | 763 | /* Note if an I-stream sync is also needed. */ |
764 | if (pte_bits & PG_EXEC) { | | 764 | if (pte_bits & PG_EXEC) { |
765 | TLB_COUNT(shootdown_all_user_imb); | | 765 | TLB_COUNT(shootdown_all_user_imb); |
766 | TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_IMB); | | 766 | TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_IMB); |
767 | } | | 767 | } |
768 | | | 768 | |
769 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_PV) { | | 769 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_PV) { |
770 | if (tlbctx->t_pmap == NULL || tlbctx->t_pmap == pmap) { | | 770 | if (tlbctx->t_pmap == NULL || tlbctx->t_pmap == pmap) { |
771 | if (tlbctx->t_pmap == NULL) { | | 771 | if (tlbctx->t_pmap == NULL) { |
772 | pmap_reference(pmap); | | 772 | pmap_reference(pmap); |
773 | tlbctx->t_pmap = pmap; | | 773 | tlbctx->t_pmap = pmap; |
774 | } | | 774 | } |
775 | } else { | | 775 | } else { |
776 | TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_MULTI); | | 776 | TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_MULTI); |
777 | } | | 777 | } |
778 | } else { | | 778 | } else { |
779 | KASSERT(tlbctx->t_pmap == NULL || tlbctx->t_pmap == pmap); | | 779 | KASSERT(tlbctx->t_pmap == NULL || tlbctx->t_pmap == pmap); |
780 | tlbctx->t_pmap = pmap; | | 780 | tlbctx->t_pmap = pmap; |
781 | } | | 781 | } |
782 | | | 782 | |
783 | TLB_CTX_SET_ALLVA(tlbctx); | | 783 | TLB_CTX_SET_ALLVA(tlbctx); |
784 | } | | 784 | } |
785 | | | 785 | |
786 | static void | | 786 | static void |
787 | pmap_tlb_shootdown_pv(pmap_t const pmap, vaddr_t const va, | | 787 | pmap_tlb_shootdown_pv(pmap_t const pmap, vaddr_t const va, |
788 | pt_entry_t const pte_bits, struct pmap_tlb_context * const tlbctx) | | 788 | pt_entry_t const pte_bits, struct pmap_tlb_context * const tlbctx) |
789 | { | | 789 | { |
790 | | | 790 | |
791 | KASSERT(TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_PV); | | 791 | KASSERT(TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_PV); |
792 | | | 792 | |
793 | TLB_COUNT(shootdown_pv); | | 793 | TLB_COUNT(shootdown_pv); |
794 | | | 794 | |
795 | if (tlbctx->t_pmap == NULL || tlbctx->t_pmap == pmap) { | | 795 | if (tlbctx->t_pmap == NULL || tlbctx->t_pmap == pmap) { |
796 | if (tlbctx->t_pmap == NULL) { | | 796 | if (tlbctx->t_pmap == NULL) { |
797 | pmap_reference(pmap); | | 797 | pmap_reference(pmap); |
798 | tlbctx->t_pmap = pmap; | | 798 | tlbctx->t_pmap = pmap; |
799 | } | | 799 | } |
800 | pmap_tlb_shootdown_internal(pmap, va, pte_bits, tlbctx); | | 800 | pmap_tlb_shootdown_internal(pmap, va, pte_bits, tlbctx); |
801 | } else { | | 801 | } else { |
802 | TLB_COUNT(shootdown_pv_multi); | | 802 | TLB_COUNT(shootdown_pv_multi); |
803 | uintptr_t flags = TLB_CTX_F_MULTI; | | 803 | uintptr_t flags = TLB_CTX_F_MULTI; |
804 | if (pmap == pmap_kernel()) { | | 804 | if (pmap == pmap_kernel()) { |
805 | KASSERT(pte_bits & PG_ASM); | | 805 | KASSERT(pte_bits & PG_ASM); |
806 | flags |= TLB_CTX_F_ASM; | | 806 | flags |= TLB_CTX_F_ASM; |
807 | } else { | | 807 | } else { |
808 | KASSERT((pte_bits & PG_ASM) == 0); | | 808 | KASSERT((pte_bits & PG_ASM) == 0); |
809 | } | | 809 | } |
810 | | | 810 | |
811 | /* | | 811 | /* |
812 | * No need to distinguish between kernel and user IMB | | 812 | * No need to distinguish between kernel and user IMB |
813 | * here; see pmap_tlb_invalidate_multi(). | | 813 | * here; see pmap_tlb_invalidate_multi(). |
814 | */ | | 814 | */ |
815 | if (pte_bits & PG_EXEC) { | | 815 | if (pte_bits & PG_EXEC) { |
816 | flags |= TLB_CTX_F_IMB; | | 816 | flags |= TLB_CTX_F_IMB; |
817 | } | | 817 | } |
818 | TLB_CTX_SET_ALLVA(tlbctx); | | 818 | TLB_CTX_SET_ALLVA(tlbctx); |
819 | TLB_CTX_SET_FLAG(tlbctx, flags); | | 819 | TLB_CTX_SET_FLAG(tlbctx, flags); |
820 | } | | 820 | } |
821 | } | | 821 | } |
822 | | | 822 | |
823 | static void | | 823 | static void |
824 | pmap_tlb_invalidate_multi(const struct pmap_tlb_context * const tlbctx) | | 824 | pmap_tlb_invalidate_multi(const struct pmap_tlb_context * const tlbctx) |
825 | { | | 825 | { |
826 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_ASM) { | | 826 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_ASM) { |
827 | TLB_COUNT(invalidate_multi_tbia); | | 827 | TLB_COUNT(invalidate_multi_tbia); |
828 | ALPHA_TBIA(); | | 828 | ALPHA_TBIA(); |
829 | } else { | | 829 | } else { |
830 | TLB_COUNT(invalidate_multi_tbiap); | | 830 | TLB_COUNT(invalidate_multi_tbiap); |
831 | ALPHA_TBIAP(); | | 831 | ALPHA_TBIAP(); |
832 | } | | 832 | } |
833 | if (TLB_CTX_FLAGS(tlbctx) & (TLB_CTX_F_IMB | TLB_CTX_F_KIMB)) { | | 833 | if (TLB_CTX_FLAGS(tlbctx) & (TLB_CTX_F_IMB | TLB_CTX_F_KIMB)) { |
834 | TLB_COUNT(invalidate_multi_imb); | | 834 | TLB_COUNT(invalidate_multi_imb); |
835 | alpha_pal_imb(); | | 835 | alpha_pal_imb(); |
836 | } | | 836 | } |
837 | } | | 837 | } |
838 | | | 838 | |
839 | static void | | 839 | static void |
840 | pmap_tlb_invalidate_kernel(const struct pmap_tlb_context * const tlbctx) | | 840 | pmap_tlb_invalidate_kernel(const struct pmap_tlb_context * const tlbctx) |
841 | { | | 841 | { |
842 | const uintptr_t count = TLB_CTX_COUNT(tlbctx); | | 842 | const uintptr_t count = TLB_CTX_COUNT(tlbctx); |
843 | | | 843 | |
844 | if (count == TLB_CTX_ALLVA) { | | 844 | if (count == TLB_CTX_ALLVA) { |
845 | TLB_COUNT(invalidate_kern_tbia); | | 845 | TLB_COUNT(invalidate_kern_tbia); |
846 | ALPHA_TBIA(); | | 846 | ALPHA_TBIA(); |
847 | } else { | | 847 | } else { |
848 | TLB_COUNT(invalidate_kern_tbis); | | 848 | TLB_COUNT(invalidate_kern_tbis); |
849 | for (uintptr_t i = 0; i < count; i++) { | | 849 | for (uintptr_t i = 0; i < count; i++) { |
850 | ALPHA_TBIS(TLB_CTX_VA(tlbctx, i)); | | 850 | ALPHA_TBIS(TLB_CTX_VA(tlbctx, i)); |
851 | } | | 851 | } |
852 | } | | 852 | } |
853 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_KIMB) { | | 853 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_KIMB) { |
854 | TLB_COUNT(invalidate_kern_imb); | | 854 | TLB_COUNT(invalidate_kern_imb); |
855 | alpha_pal_imb(); | | 855 | alpha_pal_imb(); |
856 | } | | 856 | } |
857 | } | | 857 | } |
858 | | | 858 | |
859 | static void | | 859 | static void |
860 | pmap_tlb_invalidate(const struct pmap_tlb_context * const tlbctx, | | 860 | pmap_tlb_invalidate(const struct pmap_tlb_context * const tlbctx, |
861 | const struct cpu_info * const ci) | | 861 | const struct cpu_info * const ci) |
862 | { | | 862 | { |
863 | const uintptr_t count = TLB_CTX_COUNT(tlbctx); | | 863 | const uintptr_t count = TLB_CTX_COUNT(tlbctx); |
864 | | | 864 | |
865 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_MULTI) { | | 865 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_MULTI) { |
866 | pmap_tlb_invalidate_multi(tlbctx); | | 866 | pmap_tlb_invalidate_multi(tlbctx); |
867 | return; | | 867 | return; |
868 | } | | 868 | } |
869 | | | 869 | |
870 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_ASM) { | | 870 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_ASM) { |
871 | pmap_tlb_invalidate_kernel(tlbctx); | | 871 | pmap_tlb_invalidate_kernel(tlbctx); |
872 | return; | | 872 | return; |
873 | } | | 873 | } |
874 | | | 874 | |
875 | KASSERT(kpreempt_disabled()); | | 875 | KASSERT(kpreempt_disabled()); |
876 | | | 876 | |
877 | pmap_t const pmap = tlbctx->t_pmap; | | 877 | pmap_t const pmap = tlbctx->t_pmap; |
878 | KASSERT(pmap != NULL); | | 878 | KASSERT(pmap != NULL); |
879 | | | 879 | |
880 | if (__predict_false(pmap != ci->ci_pmap)) { | | 880 | if (__predict_false(pmap != ci->ci_pmap)) { |
881 | TLB_COUNT(invalidate_user_not_current); | | 881 | TLB_COUNT(invalidate_user_not_current); |
882 | | | 882 | |
883 | /* | | 883 | /* |
884 | * For CPUs that don't implement ASNs, the SWPCTX call | | 884 | * For CPUs that don't implement ASNs, the SWPCTX call |
885 | * does all of the TLB invalidation work for us. | | 885 | * does all of the TLB invalidation work for us. |
886 | */ | | 886 | */ |
887 | if (__predict_false(pmap_max_asn == 0)) { | | 887 | if (__predict_false(pmap_max_asn == 0)) { |
888 | return; | | 888 | return; |
889 | } | | 889 | } |
890 | | | 890 | |
891 | const u_long cpu_mask = 1UL << ci->ci_cpuid; | | 891 | const u_long cpu_mask = 1UL << ci->ci_cpuid; |
892 | | | 892 | |
893 | /* | | 893 | /* |
894 | * We cannot directly invalidate the TLB in this case, | | 894 | * We cannot directly invalidate the TLB in this case, |
895 | * so force allocation of a new ASN when the pmap becomes | | 895 | * so force allocation of a new ASN when the pmap becomes |
896 | * active again. | | 896 | * active again. |
897 | */ | | 897 | */ |
898 | pmap->pm_percpu[ci->ci_cpuid].pmc_asngen = PMAP_ASNGEN_INVALID; | | 898 | pmap->pm_percpu[ci->ci_cpuid].pmc_asngen = PMAP_ASNGEN_INVALID; |
899 | atomic_and_ulong(&pmap->pm_cpus, ~cpu_mask); | | 899 | atomic_and_ulong(&pmap->pm_cpus, ~cpu_mask); |
900 | | | 900 | |
901 | /* | | 901 | /* |
902 | * This isn't strictly necessary; when we allocate a | | 902 | * This isn't strictly necessary; when we allocate a |
903 | * new ASN, we're going to clear this bit and skip | | 903 | * new ASN, we're going to clear this bit and skip |
904 | * syncing the I-stream. But we will keep this bit | | 904 | * syncing the I-stream. But we will keep this bit |
905 | * of accounting for internal consistency. | | 905 | * of accounting for internal consistency. |
906 | */ | | 906 | */ |
907 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_IMB) { | | 907 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_IMB) { |
908 | pmap->pm_percpu[ci->ci_cpuid].pmc_needisync = 1; | | 908 | pmap->pm_percpu[ci->ci_cpuid].pmc_needisync = 1; |
909 | } | | 909 | } |
910 | return; | | 910 | return; |
911 | } | | 911 | } |
912 | | | 912 | |
913 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_IMB) { | | 913 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_IMB) { |
914 | TLB_COUNT(invalidate_user_lazy_imb); | | 914 | TLB_COUNT(invalidate_user_lazy_imb); |
915 | pmap->pm_percpu[ci->ci_cpuid].pmc_needisync = 1; | | 915 | pmap->pm_percpu[ci->ci_cpuid].pmc_needisync = 1; |
916 | } | | 916 | } |
917 | | | 917 | |
918 | if (count == TLB_CTX_ALLVA) { | | 918 | if (count == TLB_CTX_ALLVA) { |
919 | /* | | 919 | /* |
920 | * Another option here for CPUs that implement ASNs is | | 920 | * Another option here for CPUs that implement ASNs is |
921 | * to allocate a new ASN and do a SWPCTX. That's almost | | 921 | * to allocate a new ASN and do a SWPCTX. That's almost |
922 | * certainly faster than a TBIAP, but would require us | | 922 | * certainly faster than a TBIAP, but would require us |
923 | * to synchronize against IPIs in pmap_activate(). | | 923 | * to synchronize against IPIs in pmap_activate(). |
924 | */ | | 924 | */ |
925 | TLB_COUNT(invalidate_user_tbiap); | | 925 | TLB_COUNT(invalidate_user_tbiap); |
926 | KASSERT((TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_ASM) == 0); | | 926 | KASSERT((TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_ASM) == 0); |
927 | ALPHA_TBIAP(); | | 927 | ALPHA_TBIAP(); |
928 | } else { | | 928 | } else { |
929 | TLB_COUNT(invalidate_user_tbis); | | 929 | TLB_COUNT(invalidate_user_tbis); |
930 | for (uintptr_t i = 0; i < count; i++) { | | 930 | for (uintptr_t i = 0; i < count; i++) { |
931 | ALPHA_TBIS(TLB_CTX_VA(tlbctx, i)); | | 931 | ALPHA_TBIS(TLB_CTX_VA(tlbctx, i)); |
932 | } | | 932 | } |
933 | } | | 933 | } |
934 | } | | 934 | } |
935 | | | 935 | |
936 | static void | | 936 | static void |
937 | pmap_tlb_shootnow(const struct pmap_tlb_context * const tlbctx) | | 937 | pmap_tlb_shootnow(const struct pmap_tlb_context * const tlbctx) |
938 | { | | 938 | { |
939 | | | 939 | |
940 | if (TLB_CTX_COUNT(tlbctx) == 0) { | | 940 | if (TLB_CTX_COUNT(tlbctx) == 0) { |
941 | /* No work to do. */ | | 941 | /* No work to do. */ |
942 | return; | | 942 | return; |
943 | } | | 943 | } |
944 | | | 944 | |
945 | /* | | 945 | /* |
946 | * Acquire the shootdown mutex. This will also block IPL_VM | | 946 | * Acquire the shootdown mutex. This will also block IPL_VM |
947 | * interrupts and disable preemption. It is critically important | | 947 | * interrupts and disable preemption. It is critically important |
948 | * that IPIs not be blocked in this routine. | | 948 | * that IPIs not be blocked in this routine. |
949 | */ | | 949 | */ |
950 | KASSERT((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) < ALPHA_PSL_IPL_CLOCK); | | 950 | KASSERT((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) < ALPHA_PSL_IPL_CLOCK); |
951 | mutex_spin_enter(&tlb_lock); | | 951 | mutex_spin_enter(&tlb_lock); |
952 | tlb_evcnt.ev_count++; | | 952 | tlb_evcnt.ev_count++; |
953 | | | 953 | |
954 | const struct cpu_info *ci = curcpu(); | | 954 | const struct cpu_info *ci = curcpu(); |
955 | const u_long this_cpu = 1UL << ci->ci_cpuid; | | 955 | const u_long this_cpu = 1UL << ci->ci_cpuid; |
956 | u_long active_cpus; | | 956 | u_long active_cpus; |
957 | bool activation_locked, activation_lock_tried; | | 957 | bool activation_locked, activation_lock_tried; |
958 | | | 958 | |
959 | /* | | 959 | /* |
960 | * Figure out who to notify. If it's for the kernel or | | 960 | * Figure out who to notify. If it's for the kernel or |
961 | * multiple aaddress spaces, we notify everybody. If | | 961 | * multiple aaddress spaces, we notify everybody. If |
962 | * it's a single user pmap, then we try to acquire the | | 962 | * it's a single user pmap, then we try to acquire the |
963 | * activation lock so we can get an accurate accounting | | 963 | * activation lock so we can get an accurate accounting |
964 | * of who needs to be notified. If we can't acquire | | 964 | * of who needs to be notified. If we can't acquire |
965 | * the activation lock, then just notify everyone and | | 965 | * the activation lock, then just notify everyone and |
966 | * let them sort it out when they process the IPI. | | 966 | * let them sort it out when they process the IPI. |
967 | */ | | 967 | */ |
968 | if (TLB_CTX_FLAGS(tlbctx) & (TLB_CTX_F_ASM | TLB_CTX_F_MULTI)) { | | 968 | if (TLB_CTX_FLAGS(tlbctx) & (TLB_CTX_F_ASM | TLB_CTX_F_MULTI)) { |
969 | active_cpus = pmap_all_cpus(); | | 969 | active_cpus = pmap_all_cpus(); |
970 | activation_locked = false; | | 970 | activation_locked = false; |
971 | activation_lock_tried = false; | | 971 | activation_lock_tried = false; |
972 | } else { | | 972 | } else { |
973 | KASSERT(tlbctx->t_pmap != NULL); | | 973 | KASSERT(tlbctx->t_pmap != NULL); |
974 | activation_locked = PMAP_ACT_TRYLOCK(tlbctx->t_pmap); | | 974 | activation_locked = PMAP_ACT_TRYLOCK(tlbctx->t_pmap); |
975 | if (__predict_true(activation_locked)) { | | 975 | if (__predict_true(activation_locked)) { |
976 | active_cpus = tlbctx->t_pmap->pm_cpus; | | 976 | active_cpus = tlbctx->t_pmap->pm_cpus; |
977 | } else { | | 977 | } else { |
978 | TLB_COUNT(shootnow_over_notify); | | 978 | TLB_COUNT(shootnow_over_notify); |
979 | active_cpus = pmap_all_cpus(); | | 979 | active_cpus = pmap_all_cpus(); |
980 | } | | 980 | } |
981 | activation_lock_tried = true; | | 981 | activation_lock_tried = true; |
982 | } | | 982 | } |
983 | | | 983 | |
984 | #if defined(MULTIPROCESSOR) | | 984 | #if defined(MULTIPROCESSOR) |
985 | /* | | 985 | /* |
986 | * If there are remote CPUs that need to do work, get them | | 986 | * If there are remote CPUs that need to do work, get them |
987 | * started now. | | 987 | * started now. |
988 | */ | | 988 | */ |
989 | const u_long remote_cpus = active_cpus & ~this_cpu; | | 989 | const u_long remote_cpus = active_cpus & ~this_cpu; |
990 | KASSERT(tlb_context == NULL); | | 990 | KASSERT(tlb_context == NULL); |
991 | if (remote_cpus) { | | 991 | if (remote_cpus) { |
992 | TLB_COUNT(shootnow_remote); | | 992 | TLB_COUNT(shootnow_remote); |
993 | tlb_context = tlbctx; | | 993 | tlb_context = tlbctx; |
994 | tlb_pending = remote_cpus; | | 994 | tlb_pending = remote_cpus; |
995 | alpha_multicast_ipi(remote_cpus, ALPHA_IPI_SHOOTDOWN); | | 995 | alpha_multicast_ipi(remote_cpus, ALPHA_IPI_SHOOTDOWN); |
996 | } | | 996 | } |
997 | #endif /* MULTIPROCESSOR */ | | 997 | #endif /* MULTIPROCESSOR */ |
998 | | | 998 | |
999 | /* | | 999 | /* |
1000 | * Now that the remotes have been notified, release the | | 1000 | * Now that the remotes have been notified, release the |
1001 | * activation lock. | | 1001 | * activation lock. |
1002 | */ | | 1002 | */ |
1003 | if (activation_lock_tried) { | | 1003 | if (activation_lock_tried) { |
1004 | if (activation_locked) { | | 1004 | if (activation_locked) { |
1005 | KASSERT(tlbctx->t_pmap != NULL); | | 1005 | KASSERT(tlbctx->t_pmap != NULL); |
1006 | PMAP_ACT_UNLOCK(tlbctx->t_pmap); | | 1006 | PMAP_ACT_UNLOCK(tlbctx->t_pmap); |
1007 | } | | 1007 | } |
1008 | /* | | 1008 | /* |
1009 | * When we tried to acquire the activation lock, we | | 1009 | * When we tried to acquire the activation lock, we |
1010 | * raised IPL to IPL_SCHED (even if we ultimately | | 1010 | * raised IPL to IPL_SCHED (even if we ultimately |
1011 | * failed to acquire the lock), which blocks out IPIs. | | 1011 | * failed to acquire the lock), which blocks out IPIs. |
1012 | * Force our IPL back down to IPL_VM so that we can | | 1012 | * Force our IPL back down to IPL_VM so that we can |
1013 | * receive IPIs. | | 1013 | * receive IPIs. |
1014 | */ | | 1014 | */ |
1015 | alpha_pal_swpipl(IPL_VM); | | 1015 | alpha_pal_swpipl(IPL_VM); |
1016 | } | | 1016 | } |
1017 | | | 1017 | |
1018 | /* | | 1018 | /* |
1019 | * Do any work that we might need to do. We don't need to | | 1019 | * Do any work that we might need to do. We don't need to |
1020 | * synchronize with activation here because we know that | | 1020 | * synchronize with activation here because we know that |
1021 | * for the current CPU, activation status will not change. | | 1021 | * for the current CPU, activation status will not change. |
1022 | */ | | 1022 | */ |
1023 | if (active_cpus & this_cpu) { | | 1023 | if (active_cpus & this_cpu) { |
1024 | pmap_tlb_invalidate(tlbctx, ci); | | 1024 | pmap_tlb_invalidate(tlbctx, ci); |
1025 | } | | 1025 | } |
1026 | | | 1026 | |
1027 | #if defined(MULTIPROCESSOR) | | 1027 | #if defined(MULTIPROCESSOR) |
1028 | /* Wait for remote CPUs to finish. */ | | 1028 | /* Wait for remote CPUs to finish. */ |
1029 | if (remote_cpus) { | | 1029 | if (remote_cpus) { |
1030 | int backoff = SPINLOCK_BACKOFF_MIN; | | 1030 | int backoff = SPINLOCK_BACKOFF_MIN; |
1031 | u_int spins = 0; | | 1031 | u_int spins = 0; |
1032 | | | 1032 | |
1033 | while (atomic_load_acquire(&tlb_context) != NULL) { | | 1033 | while (atomic_load_acquire(&tlb_context) != NULL) { |
1034 | SPINLOCK_BACKOFF(backoff); | | 1034 | SPINLOCK_BACKOFF(backoff); |
1035 | if (spins++ > 0x0fffffff) { | | 1035 | if (spins++ > 0x0fffffff) { |
1036 | printf("TLB LOCAL MASK = 0x%016lx\n", | | 1036 | printf("TLB LOCAL MASK = 0x%016lx\n", |
1037 | this_cpu); | | 1037 | this_cpu); |
1038 | printf("TLB REMOTE MASK = 0x%016lx\n", | | 1038 | printf("TLB REMOTE MASK = 0x%016lx\n", |
1039 | remote_cpus); | | 1039 | remote_cpus); |
1040 | printf("TLB REMOTE PENDING = 0x%016lx\n", | | 1040 | printf("TLB REMOTE PENDING = 0x%016lx\n", |
1041 | tlb_pending); | | 1041 | tlb_pending); |
1042 | printf("TLB CONTEXT = %p\n", tlb_context); | | 1042 | printf("TLB CONTEXT = %p\n", tlb_context); |
1043 | printf("TLB LOCAL IPL = %lu\n", | | 1043 | printf("TLB LOCAL IPL = %lu\n", |
1044 | alpha_pal_rdps() & ALPHA_PSL_IPL_MASK); | | 1044 | alpha_pal_rdps() & ALPHA_PSL_IPL_MASK); |
1045 | panic("pmap_tlb_shootnow"); | | 1045 | panic("pmap_tlb_shootnow"); |
1046 | } | | 1046 | } |
1047 | } | | 1047 | } |
1048 | } | | 1048 | } |
1049 | KASSERT(tlb_context == NULL); | | 1049 | KASSERT(tlb_context == NULL); |
1050 | #endif /* MULTIPROCESSOR */ | | 1050 | #endif /* MULTIPROCESSOR */ |
1051 | | | 1051 | |
1052 | mutex_spin_exit(&tlb_lock); | | 1052 | mutex_spin_exit(&tlb_lock); |
1053 | | | 1053 | |
1054 | if (__predict_false(TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_PV)) { | | 1054 | if (__predict_false(TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_PV)) { |
1055 | /* | | 1055 | /* |
1056 | * P->V TLB operations may operate on multiple pmaps. | | 1056 | * P->V TLB operations may operate on multiple pmaps. |
1057 | * The shootdown takes a reference on the first pmap it | | 1057 | * The shootdown takes a reference on the first pmap it |
1058 | * encounters, in order to prevent it from disappearing, | | 1058 | * encounters, in order to prevent it from disappearing, |
1059 | * in the hope that we end up with a single-pmap P->V | | 1059 | * in the hope that we end up with a single-pmap P->V |
1060 | * operation (instrumentation shows this is not rare). | | 1060 | * operation (instrumentation shows this is not rare). |
1061 | * | | 1061 | * |
1062 | * Once this shootdown is finished globally, we need to | | 1062 | * Once this shootdown is finished globally, we need to |
1063 | * release this extra reference. | | 1063 | * release this extra reference. |
1064 | */ | | 1064 | */ |
1065 | KASSERT(tlbctx->t_pmap != NULL); | | 1065 | KASSERT(tlbctx->t_pmap != NULL); |
1066 | pmap_destroy(tlbctx->t_pmap); | | 1066 | pmap_destroy(tlbctx->t_pmap); |
1067 | } | | 1067 | } |
1068 | } | | 1068 | } |
1069 | | | 1069 | |
1070 | #if defined(MULTIPROCESSOR) | | 1070 | #if defined(MULTIPROCESSOR) |
1071 | void | | 1071 | void |
1072 | pmap_tlb_shootdown_ipi(struct cpu_info * const ci, | | 1072 | pmap_tlb_shootdown_ipi(struct cpu_info * const ci, |
1073 | | | 1073 | |
1074 | struct trapframe * const tf __unused) | | 1074 | struct trapframe * const tf __unused) |
1075 | { | | 1075 | { |
1076 | KASSERT(tlb_context != NULL); | | 1076 | KASSERT(tlb_context != NULL); |
1077 | pmap_tlb_invalidate(tlb_context, ci); | | 1077 | pmap_tlb_invalidate(tlb_context, ci); |
1078 | if (atomic_and_ulong_nv(&tlb_pending, ~(1UL << ci->ci_cpuid)) == 0) { | | 1078 | if (atomic_and_ulong_nv(&tlb_pending, ~(1UL << ci->ci_cpuid)) == 0) { |
1079 | atomic_store_release(&tlb_context, NULL); | | 1079 | atomic_store_release(&tlb_context, NULL); |
1080 | } | | 1080 | } |
1081 | } | | 1081 | } |
1082 | #endif /* MULTIPROCESSOR */ | | 1082 | #endif /* MULTIPROCESSOR */ |
1083 | | | 1083 | |
1084 | static __inline void | | 1084 | static __inline void |
1085 | pmap_tlb_ptpage_drain(struct pmap_tlb_context * const tlbctx) | | 1085 | pmap_tlb_ptpage_drain(struct pmap_tlb_context * const tlbctx) |
1086 | { | | 1086 | { |
1087 | pmap_pagelist_free(&tlbctx->t_freeptq); | | 1087 | pmap_pagelist_free(&tlbctx->t_freeptq); |
1088 | } | | 1088 | } |
1089 | | | 1089 | |
1090 | /* | | 1090 | /* |
1091 | * Internal routines | | 1091 | * Internal routines |
1092 | */ | | 1092 | */ |
1093 | static void alpha_protection_init(void); | | 1093 | static void alpha_protection_init(void); |
1094 | static pt_entry_t pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, bool, | | 1094 | static pt_entry_t pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, bool, |
1095 | pv_entry_t *, | | 1095 | pv_entry_t *, |
1096 | struct pmap_tlb_context *); | | 1096 | struct pmap_tlb_context *); |
1097 | static void pmap_changebit(struct vm_page *, pt_entry_t, pt_entry_t, | | 1097 | static void pmap_changebit(struct vm_page *, pt_entry_t, pt_entry_t, |
1098 | struct pmap_tlb_context *); | | 1098 | struct pmap_tlb_context *); |
1099 | | | 1099 | |
1100 | /* | | 1100 | /* |
1101 | * PT page management functions. | | 1101 | * PT page management functions. |
1102 | */ | | 1102 | */ |
1103 | static int pmap_ptpage_alloc(pmap_t, pt_entry_t *, int); | | 1103 | static int pmap_ptpage_alloc(pmap_t, pt_entry_t *, int); |
1104 | static void pmap_ptpage_free(pmap_t, pt_entry_t *, | | 1104 | static void pmap_ptpage_free(pmap_t, pt_entry_t *, |
1105 | struct pmap_tlb_context *); | | 1105 | struct pmap_tlb_context *); |
1106 | static void pmap_l3pt_delref(pmap_t, vaddr_t, pt_entry_t *, | | 1106 | static void pmap_l3pt_delref(pmap_t, vaddr_t, pt_entry_t *, |
1107 | struct pmap_tlb_context *); | | 1107 | struct pmap_tlb_context *); |
1108 | static void pmap_l2pt_delref(pmap_t, pt_entry_t *, pt_entry_t *, | | 1108 | static void pmap_l2pt_delref(pmap_t, pt_entry_t *, pt_entry_t *, |
1109 | struct pmap_tlb_context *); | | 1109 | struct pmap_tlb_context *); |
1110 | static void pmap_l1pt_delref(pmap_t, pt_entry_t *); | | 1110 | static void pmap_l1pt_delref(pmap_t, pt_entry_t *); |
1111 | | | 1111 | |
1112 | static void *pmap_l1pt_alloc(struct pool *, int); | | 1112 | static void *pmap_l1pt_alloc(struct pool *, int); |
1113 | static void pmap_l1pt_free(struct pool *, void *); | | 1113 | static void pmap_l1pt_free(struct pool *, void *); |
1114 | | | 1114 | |
1115 | static struct pool_allocator pmap_l1pt_allocator = { | | 1115 | static struct pool_allocator pmap_l1pt_allocator = { |
1116 | pmap_l1pt_alloc, pmap_l1pt_free, 0, | | 1116 | pmap_l1pt_alloc, pmap_l1pt_free, 0, |
1117 | }; | | 1117 | }; |
1118 | | | 1118 | |
1119 | static int pmap_l1pt_ctor(void *, void *, int); | | 1119 | static int pmap_l1pt_ctor(void *, void *, int); |
1120 | | | 1120 | |
1121 | /* | | 1121 | /* |
1122 | * PV table management functions. | | 1122 | * PV table management functions. |
1123 | */ | | 1123 | */ |
1124 | static int pmap_pv_enter(pmap_t, struct vm_page *, vaddr_t, pt_entry_t *, | | 1124 | static int pmap_pv_enter(pmap_t, struct vm_page *, vaddr_t, pt_entry_t *, |
1125 | bool, pv_entry_t); | | 1125 | bool, pv_entry_t); |
1126 | static void pmap_pv_remove(pmap_t, struct vm_page *, vaddr_t, bool, | | 1126 | static void pmap_pv_remove(pmap_t, struct vm_page *, vaddr_t, bool, |
1127 | pv_entry_t *); | | 1127 | pv_entry_t *); |
1128 | static void *pmap_pv_page_alloc(struct pool *, int); | | 1128 | static void *pmap_pv_page_alloc(struct pool *, int); |
1129 | static void pmap_pv_page_free(struct pool *, void *); | | 1129 | static void pmap_pv_page_free(struct pool *, void *); |
1130 | | | 1130 | |
1131 | static struct pool_allocator pmap_pv_page_allocator = { | | 1131 | static struct pool_allocator pmap_pv_page_allocator = { |
1132 | pmap_pv_page_alloc, pmap_pv_page_free, 0, | | 1132 | pmap_pv_page_alloc, pmap_pv_page_free, 0, |
1133 | }; | | 1133 | }; |
1134 | | | 1134 | |
1135 | #ifdef DEBUG | | 1135 | #ifdef DEBUG |
1136 | void pmap_pv_dump(paddr_t); | | 1136 | void pmap_pv_dump(paddr_t); |
1137 | #endif | | 1137 | #endif |
| @@ -2362,1562 +2362,1554 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v | | | @@ -2362,1562 +2362,1554 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v |
2362 | * Remove a mapping entered with pmap_kenter_pa() starting at va, | | 2362 | * Remove a mapping entered with pmap_kenter_pa() starting at va, |
2363 | * for size bytes (assumed to be page rounded). | | 2363 | * for size bytes (assumed to be page rounded). |
2364 | */ | | 2364 | */ |
2365 | void | | 2365 | void |
2366 | pmap_kremove(vaddr_t va, vsize_t size) | | 2366 | pmap_kremove(vaddr_t va, vsize_t size) |
2367 | { | | 2367 | { |
2368 | pt_entry_t *pte, opte; | | 2368 | pt_entry_t *pte, opte; |
2369 | pmap_t const pmap = pmap_kernel(); | | 2369 | pmap_t const pmap = pmap_kernel(); |
2370 | struct pmap_tlb_context tlbctx; | | 2370 | struct pmap_tlb_context tlbctx; |
2371 | int count = 0; | | 2371 | int count = 0; |
2372 | | | 2372 | |
2373 | #ifdef DEBUG | | 2373 | #ifdef DEBUG |
2374 | if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) | | 2374 | if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) |
2375 | printf("pmap_kremove(%lx, %lx)\n", | | 2375 | printf("pmap_kremove(%lx, %lx)\n", |
2376 | va, size); | | 2376 | va, size); |
2377 | #endif | | 2377 | #endif |
2378 | | | 2378 | |
2379 | pmap_tlb_context_init(&tlbctx, 0); | | 2379 | pmap_tlb_context_init(&tlbctx, 0); |
2380 | | | 2380 | |
2381 | KASSERT(va >= VM_MIN_KERNEL_ADDRESS); | | 2381 | KASSERT(va >= VM_MIN_KERNEL_ADDRESS); |
2382 | | | 2382 | |
2383 | for (; size != 0; size -= PAGE_SIZE, va += PAGE_SIZE) { | | 2383 | for (; size != 0; size -= PAGE_SIZE, va += PAGE_SIZE) { |
2384 | pte = PMAP_KERNEL_PTE(va); | | 2384 | pte = PMAP_KERNEL_PTE(va); |
2385 | opte = atomic_load_relaxed(pte); | | 2385 | opte = atomic_load_relaxed(pte); |
2386 | if (opte & PG_V) { | | 2386 | if (opte & PG_V) { |
2387 | KASSERT((opte & PG_PVLIST) == 0); | | 2387 | KASSERT((opte & PG_PVLIST) == 0); |
2388 | | | 2388 | |
2389 | /* Zap the mapping. */ | | 2389 | /* Zap the mapping. */ |
2390 | atomic_store_relaxed(pte, PG_NV); | | 2390 | atomic_store_relaxed(pte, PG_NV); |
2391 | pmap_tlb_shootdown(pmap, va, opte, &tlbctx); | | 2391 | pmap_tlb_shootdown(pmap, va, opte, &tlbctx); |
2392 | | | 2392 | |
2393 | count++; | | 2393 | count++; |
2394 | } | | 2394 | } |
2395 | } | | 2395 | } |
2396 | | | 2396 | |
2397 | /* Update stats. */ | | 2397 | /* Update stats. */ |
2398 | if (__predict_true(count != 0)) { | | 2398 | if (__predict_true(count != 0)) { |
2399 | PMAP_STAT_DECR(pmap->pm_stats.resident_count, count); | | 2399 | PMAP_STAT_DECR(pmap->pm_stats.resident_count, count); |
2400 | PMAP_STAT_DECR(pmap->pm_stats.wired_count, count); | | 2400 | PMAP_STAT_DECR(pmap->pm_stats.wired_count, count); |
2401 | } | | 2401 | } |
2402 | | | 2402 | |
2403 | pmap_tlb_shootnow(&tlbctx); | | 2403 | pmap_tlb_shootnow(&tlbctx); |
2404 | TLB_COUNT(reason_kremove); | | 2404 | TLB_COUNT(reason_kremove); |
2405 | } | | 2405 | } |
2406 | | | 2406 | |
2407 | /* | | 2407 | /* |
2408 | * pmap_unwire: [ INTERFACE ] | | 2408 | * pmap_unwire: [ INTERFACE ] |
2409 | * | | 2409 | * |
2410 | * Clear the wired attribute for a map/virtual-address pair. | | 2410 | * Clear the wired attribute for a map/virtual-address pair. |
2411 | * | | 2411 | * |
2412 | * The mapping must already exist in the pmap. | | 2412 | * The mapping must already exist in the pmap. |
2413 | */ | | 2413 | */ |
2414 | void | | 2414 | void |
2415 | pmap_unwire(pmap_t pmap, vaddr_t va) | | 2415 | pmap_unwire(pmap_t pmap, vaddr_t va) |
2416 | { | | 2416 | { |
2417 | pt_entry_t *pte; | | 2417 | pt_entry_t *pte; |
2418 | | | 2418 | |
2419 | #ifdef DEBUG | | 2419 | #ifdef DEBUG |
2420 | if (pmapdebug & PDB_FOLLOW) | | 2420 | if (pmapdebug & PDB_FOLLOW) |
2421 | printf("pmap_unwire(%p, %lx)\n", pmap, va); | | 2421 | printf("pmap_unwire(%p, %lx)\n", pmap, va); |
2422 | #endif | | 2422 | #endif |
2423 | | | 2423 | |
2424 | PMAP_LOCK(pmap); | | 2424 | PMAP_LOCK(pmap); |
2425 | | | 2425 | |
2426 | pte = pmap_l3pte(pmap_lev1map(pmap), va, NULL); | | 2426 | pte = pmap_l3pte(pmap_lev1map(pmap), va, NULL); |
2427 | | | 2427 | |
2428 | KASSERT(pte != NULL); | | 2428 | KASSERT(pte != NULL); |
2429 | KASSERT(pmap_pte_v(pte)); | | 2429 | KASSERT(pmap_pte_v(pte)); |
2430 | | | 2430 | |
2431 | /* | | 2431 | /* |
2432 | * If wiring actually changed (always?) clear the wire bit and | | 2432 | * If wiring actually changed (always?) clear the wire bit and |
2433 | * update the wire count. Note that wiring is not a hardware | | 2433 | * update the wire count. Note that wiring is not a hardware |
2434 | * characteristic so there is no need to invalidate the TLB. | | 2434 | * characteristic so there is no need to invalidate the TLB. |
2435 | */ | | 2435 | */ |
2436 | if (pmap_pte_w_chg(pte, 0)) { | | 2436 | if (pmap_pte_w_chg(pte, 0)) { |
2437 | pmap_pte_set_w(pte, false); | | 2437 | pmap_pte_set_w(pte, false); |
2438 | PMAP_STAT_DECR(pmap->pm_stats.wired_count, 1); | | 2438 | PMAP_STAT_DECR(pmap->pm_stats.wired_count, 1); |
2439 | } | | 2439 | } |
2440 | #ifdef DEBUG | | 2440 | #ifdef DEBUG |
2441 | else { | | 2441 | else { |
2442 | printf("pmap_unwire: wiring for pmap %p va 0x%lx " | | 2442 | printf("pmap_unwire: wiring for pmap %p va 0x%lx " |
2443 | "didn't change!\n", pmap, va); | | 2443 | "didn't change!\n", pmap, va); |
2444 | } | | 2444 | } |
2445 | #endif | | 2445 | #endif |
2446 | | | 2446 | |
2447 | PMAP_UNLOCK(pmap); | | 2447 | PMAP_UNLOCK(pmap); |
2448 | } | | 2448 | } |
2449 | | | 2449 | |
2450 | /* | | 2450 | /* |
2451 | * pmap_extract: [ INTERFACE ] | | 2451 | * pmap_extract: [ INTERFACE ] |
2452 | * | | 2452 | * |
2453 | * Extract the physical address associated with the given | | 2453 | * Extract the physical address associated with the given |
2454 | * pmap/virtual address pair. | | 2454 | * pmap/virtual address pair. |
2455 | */ | | 2455 | */ |
2456 | bool | | 2456 | bool |
2457 | pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap) | | 2457 | pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap) |
2458 | { | | 2458 | { |
2459 | pt_entry_t *l1pte, *l2pte, *l3pte; | | 2459 | pt_entry_t *l1pte, *l2pte, *l3pte; |
2460 | paddr_t pa; | | 2460 | paddr_t pa; |
2461 | | | 2461 | |
2462 | #ifdef DEBUG | | 2462 | #ifdef DEBUG |
2463 | if (pmapdebug & PDB_FOLLOW) | | 2463 | if (pmapdebug & PDB_FOLLOW) |
2464 | printf("pmap_extract(%p, %lx) -> ", pmap, va); | | 2464 | printf("pmap_extract(%p, %lx) -> ", pmap, va); |
2465 | #endif | | 2465 | #endif |
2466 | | | 2466 | |
2467 | /* | | 2467 | /* |
2468 | * Take a faster path for the kernel pmap. Avoids locking, | | 2468 | * Take a faster path for the kernel pmap. Avoids locking, |
2469 | * handles K0SEG. | | 2469 | * handles K0SEG. |
2470 | */ | | 2470 | */ |
2471 | if (__predict_true(pmap == pmap_kernel())) { | | 2471 | if (__predict_true(pmap == pmap_kernel())) { |
2472 | if (__predict_true(vtophys_internal(va, pap))) { | | 2472 | if (__predict_true(vtophys_internal(va, pap))) { |
2473 | #ifdef DEBUG | | 2473 | #ifdef DEBUG |
2474 | if (pmapdebug & PDB_FOLLOW) | | 2474 | if (pmapdebug & PDB_FOLLOW) |
2475 | printf("0x%lx (kernel vtophys)\n", pa); | | 2475 | printf("0x%lx (kernel vtophys)\n", pa); |
2476 | #endif | | 2476 | #endif |
2477 | return true; | | 2477 | return true; |
2478 | } | | 2478 | } |
2479 | #ifdef DEBUG | | 2479 | #ifdef DEBUG |
2480 | if (pmapdebug & PDB_FOLLOW) | | 2480 | if (pmapdebug & PDB_FOLLOW) |
2481 | printf("failed (kernel vtophys)\n"); | | 2481 | printf("failed (kernel vtophys)\n"); |
2482 | #endif | | 2482 | #endif |
2483 | return false; | | 2483 | return false; |
2484 | } | | 2484 | } |
2485 | | | 2485 | |
2486 | pt_entry_t * const lev1map = pmap_lev1map(pmap); | | 2486 | pt_entry_t * const lev1map = pmap_lev1map(pmap); |
2487 | | | 2487 | |
2488 | PMAP_LOCK(pmap); | | 2488 | PMAP_LOCK(pmap); |
2489 | | | 2489 | |
2490 | l1pte = pmap_l1pte(lev1map, va); | | 2490 | l1pte = pmap_l1pte(lev1map, va); |
2491 | if (pmap_pte_v(l1pte) == 0) | | 2491 | if (pmap_pte_v(l1pte) == 0) |
2492 | goto out; | | 2492 | goto out; |
2493 | | | 2493 | |
2494 | l2pte = pmap_l2pte(lev1map, va, l1pte); | | 2494 | l2pte = pmap_l2pte(lev1map, va, l1pte); |
2495 | if (pmap_pte_v(l2pte) == 0) | | 2495 | if (pmap_pte_v(l2pte) == 0) |
2496 | goto out; | | 2496 | goto out; |
2497 | | | 2497 | |
2498 | l3pte = pmap_l3pte(lev1map, va, l2pte); | | 2498 | l3pte = pmap_l3pte(lev1map, va, l2pte); |
2499 | if (pmap_pte_v(l3pte) == 0) | | 2499 | if (pmap_pte_v(l3pte) == 0) |
2500 | goto out; | | 2500 | goto out; |
2501 | | | 2501 | |
2502 | pa = pmap_pte_pa(l3pte) | (va & PGOFSET); | | 2502 | pa = pmap_pte_pa(l3pte) | (va & PGOFSET); |
2503 | PMAP_UNLOCK(pmap); | | 2503 | PMAP_UNLOCK(pmap); |
2504 | if (pap != NULL) | | 2504 | if (pap != NULL) |
2505 | *pap = pa; | | 2505 | *pap = pa; |
2506 | #ifdef DEBUG | | 2506 | #ifdef DEBUG |
2507 | if (pmapdebug & PDB_FOLLOW) | | 2507 | if (pmapdebug & PDB_FOLLOW) |
2508 | printf("0x%lx\n", pa); | | 2508 | printf("0x%lx\n", pa); |
2509 | #endif | | 2509 | #endif |
2510 | return (true); | | 2510 | return (true); |
2511 | | | 2511 | |
2512 | out: | | 2512 | out: |
2513 | PMAP_UNLOCK(pmap); | | 2513 | PMAP_UNLOCK(pmap); |
2514 | #ifdef DEBUG | | 2514 | #ifdef DEBUG |
2515 | if (pmapdebug & PDB_FOLLOW) | | 2515 | if (pmapdebug & PDB_FOLLOW) |
2516 | printf("failed\n"); | | 2516 | printf("failed\n"); |
2517 | #endif | | 2517 | #endif |
2518 | return (false); | | 2518 | return (false); |
2519 | } | | 2519 | } |
2520 | | | 2520 | |
2521 | /* | | 2521 | /* |
2522 | * pmap_copy: [ INTERFACE ] | | 2522 | * pmap_copy: [ INTERFACE ] |
2523 | * | | 2523 | * |
2524 | * Copy the mapping range specified by src_addr/len | | 2524 | * Copy the mapping range specified by src_addr/len |
2525 | * from the source map to the range dst_addr/len | | 2525 | * from the source map to the range dst_addr/len |
2526 | * in the destination map. | | 2526 | * in the destination map. |
2527 | * | | 2527 | * |
2528 | * This routine is only advisory and need not do anything. | | 2528 | * This routine is only advisory and need not do anything. |
2529 | */ | | 2529 | */ |
2530 | /* call deleted in <machine/pmap.h> */ | | 2530 | /* call deleted in <machine/pmap.h> */ |
2531 | | | 2531 | |
2532 | /* | | 2532 | /* |
2533 | * pmap_update: [ INTERFACE ] | | 2533 | * pmap_update: [ INTERFACE ] |
2534 | * | | 2534 | * |
2535 | * Require that all active physical maps contain no | | 2535 | * Require that all active physical maps contain no |
2536 | * incorrect entries NOW, by processing any deferred | | 2536 | * incorrect entries NOW, by processing any deferred |
2537 | * pmap operations. | | 2537 | * pmap operations. |
2538 | */ | | 2538 | */ |
2539 | /* call deleted in <machine/pmap.h> */ | | 2539 | /* call deleted in <machine/pmap.h> */ |
2540 | | | 2540 | |
2541 | /* | | 2541 | /* |
2542 | * pmap_activate: [ INTERFACE ] | | 2542 | * pmap_activate: [ INTERFACE ] |
2543 | * | | 2543 | * |
2544 | * Activate the pmap used by the specified process. This includes | | 2544 | * Activate the pmap used by the specified process. This includes |
2545 | * reloading the MMU context of the current process, and marking | | 2545 | * reloading the MMU context of the current process, and marking |
2546 | * the pmap in use by the processor. | | 2546 | * the pmap in use by the processor. |
2547 | */ | | 2547 | */ |
2548 | void | | 2548 | void |
2549 | pmap_activate(struct lwp *l) | | 2549 | pmap_activate(struct lwp *l) |
2550 | { | | 2550 | { |
2551 | struct pmap * const pmap = l->l_proc->p_vmspace->vm_map.pmap; | | 2551 | struct pmap * const pmap = l->l_proc->p_vmspace->vm_map.pmap; |
2552 | struct pcb * const pcb = lwp_getpcb(l); | | 2552 | struct pcb * const pcb = lwp_getpcb(l); |
2553 | | | 2553 | |
2554 | #ifdef DEBUG | | 2554 | #ifdef DEBUG |
2555 | if (pmapdebug & PDB_FOLLOW) | | 2555 | if (pmapdebug & PDB_FOLLOW) |
2556 | printf("pmap_activate(%p)\n", l); | | 2556 | printf("pmap_activate(%p)\n", l); |
2557 | #endif | | 2557 | #endif |
2558 | | | 2558 | |
2559 | KASSERT(kpreempt_disabled()); | | 2559 | KASSERT(kpreempt_disabled()); |
2560 | | | 2560 | |
2561 | struct cpu_info * const ci = curcpu(); | | 2561 | struct cpu_info * const ci = curcpu(); |
2562 | | | 2562 | |
2563 | KASSERT(l == ci->ci_curlwp); | | 2563 | KASSERT(l == ci->ci_curlwp); |
2564 | | | 2564 | |
2565 | u_long const old_ptbr = pcb->pcb_hw.apcb_ptbr; | | 2565 | u_long const old_ptbr = pcb->pcb_hw.apcb_ptbr; |
2566 | u_int const old_asn = pcb->pcb_hw.apcb_asn; | | 2566 | u_int const old_asn = pcb->pcb_hw.apcb_asn; |
2567 | | | 2567 | |
2568 | /* | | 2568 | /* |
2569 | * We hold the activation lock to synchronize with TLB shootdown. | | 2569 | * We hold the activation lock to synchronize with TLB shootdown. |
2570 | * The kernel pmap does not require those tests because shootdowns | | 2570 | * The kernel pmap does not require those tests because shootdowns |
2571 | * for the kernel pmap are always sent to all CPUs. | | 2571 | * for the kernel pmap are always sent to all CPUs. |
2572 | */ | | 2572 | */ |
2573 | if (pmap != pmap_kernel()) { | | 2573 | if (pmap != pmap_kernel()) { |
2574 | PMAP_ACT_LOCK(pmap); | | 2574 | PMAP_ACT_LOCK(pmap); |
2575 | pcb->pcb_hw.apcb_asn = pmap_asn_alloc(pmap, ci); | | 2575 | pcb->pcb_hw.apcb_asn = pmap_asn_alloc(pmap, ci); |
2576 | atomic_or_ulong(&pmap->pm_cpus, (1UL << ci->ci_cpuid)); | | 2576 | atomic_or_ulong(&pmap->pm_cpus, (1UL << ci->ci_cpuid)); |
2577 | } else { | | 2577 | } else { |
2578 | pcb->pcb_hw.apcb_asn = PMAP_ASN_KERNEL; | | 2578 | pcb->pcb_hw.apcb_asn = PMAP_ASN_KERNEL; |
2579 | } | | 2579 | } |
2580 | pcb->pcb_hw.apcb_ptbr = | | 2580 | pcb->pcb_hw.apcb_ptbr = |
2581 | ALPHA_K0SEG_TO_PHYS((vaddr_t)pmap_lev1map(pmap)) >> PGSHIFT; | | 2581 | ALPHA_K0SEG_TO_PHYS((vaddr_t)pmap_lev1map(pmap)) >> PGSHIFT; |
2582 | | | 2582 | |
2583 | /* | | 2583 | /* |
2584 | * Check to see if the ASN or page table base has changed; if | | 2584 | * Check to see if the ASN or page table base has changed; if |
2585 | * so, switch to our own context again so that it will take | | 2585 | * so, switch to our own context again so that it will take |
2586 | * effect. | | 2586 | * effect. |
2587 | * | | 2587 | * |
2588 | * We test ASN first because it's the most likely value to change. | | 2588 | * We test ASN first because it's the most likely value to change. |
2589 | */ | | 2589 | */ |
2590 | if (old_asn != pcb->pcb_hw.apcb_asn || | | 2590 | if (old_asn != pcb->pcb_hw.apcb_asn || |
2591 | old_ptbr != pcb->pcb_hw.apcb_ptbr) { | | 2591 | old_ptbr != pcb->pcb_hw.apcb_ptbr) { |
2592 | if (old_asn != pcb->pcb_hw.apcb_asn && | | 2592 | if (old_asn != pcb->pcb_hw.apcb_asn && |
2593 | old_ptbr != pcb->pcb_hw.apcb_ptbr) { | | 2593 | old_ptbr != pcb->pcb_hw.apcb_ptbr) { |
2594 | TLB_COUNT(activate_both_change); | | 2594 | TLB_COUNT(activate_both_change); |
2595 | } else if (old_asn != pcb->pcb_hw.apcb_asn) { | | 2595 | } else if (old_asn != pcb->pcb_hw.apcb_asn) { |
2596 | TLB_COUNT(activate_asn_change); | | 2596 | TLB_COUNT(activate_asn_change); |
2597 | } else { | | 2597 | } else { |
2598 | TLB_COUNT(activate_ptbr_change); | | 2598 | TLB_COUNT(activate_ptbr_change); |
2599 | } | | 2599 | } |
2600 | (void) alpha_pal_swpctx((u_long)l->l_md.md_pcbpaddr); | | 2600 | (void) alpha_pal_swpctx((u_long)l->l_md.md_pcbpaddr); |
2601 | TLB_COUNT(activate_swpctx); | | 2601 | TLB_COUNT(activate_swpctx); |
2602 | } else { | | 2602 | } else { |
2603 | TLB_COUNT(activate_skip_swpctx); | | 2603 | TLB_COUNT(activate_skip_swpctx); |
2604 | } | | 2604 | } |
2605 | | | 2605 | |
2606 | pmap_reference(pmap); | | 2606 | pmap_reference(pmap); |
2607 | ci->ci_pmap = pmap; | | 2607 | ci->ci_pmap = pmap; |
2608 | | | 2608 | |
2609 | if (pmap != pmap_kernel()) { | | 2609 | if (pmap != pmap_kernel()) { |
2610 | PMAP_ACT_UNLOCK(pmap); | | 2610 | PMAP_ACT_UNLOCK(pmap); |
2611 | } | | 2611 | } |
2612 | } | | 2612 | } |
2613 | | | 2613 | |
2614 | /* | | 2614 | /* |
2615 | * pmap_deactivate: [ INTERFACE ] | | 2615 | * pmap_deactivate: [ INTERFACE ] |
2616 | * | | 2616 | * |
2617 | * Mark that the pmap used by the specified process is no longer | | 2617 | * Mark that the pmap used by the specified process is no longer |
2618 | * in use by the processor. | | 2618 | * in use by the processor. |
2619 | */ | | 2619 | */ |
2620 | void | | 2620 | void |
2621 | pmap_deactivate(struct lwp *l) | | 2621 | pmap_deactivate(struct lwp *l) |
2622 | { | | 2622 | { |
2623 | struct pmap * const pmap = l->l_proc->p_vmspace->vm_map.pmap; | | 2623 | struct pmap * const pmap = l->l_proc->p_vmspace->vm_map.pmap; |
2624 | | | 2624 | |
2625 | #ifdef DEBUG | | 2625 | #ifdef DEBUG |
2626 | if (pmapdebug & PDB_FOLLOW) | | 2626 | if (pmapdebug & PDB_FOLLOW) |
2627 | printf("pmap_deactivate(%p)\n", l); | | 2627 | printf("pmap_deactivate(%p)\n", l); |
2628 | #endif | | 2628 | #endif |
2629 | | | 2629 | |
2630 | KASSERT(kpreempt_disabled()); | | 2630 | KASSERT(kpreempt_disabled()); |
2631 | | | 2631 | |
2632 | struct cpu_info * const ci = curcpu(); | | 2632 | struct cpu_info * const ci = curcpu(); |
2633 | | | 2633 | |
2634 | KASSERT(l == ci->ci_curlwp); | | 2634 | KASSERT(l == ci->ci_curlwp); |
2635 | KASSERT(pmap == ci->ci_pmap); | | 2635 | KASSERT(pmap == ci->ci_pmap); |
2636 | | | 2636 | |
2637 | /* | | 2637 | /* |
2638 | * There is no need to switch to a different PTBR here, | | 2638 | * There is no need to switch to a different PTBR here, |
2639 | * because a pmap_activate() or SWPCTX is guaranteed | | 2639 | * because a pmap_activate() or SWPCTX is guaranteed |
2640 | * before whatever lev1map we're on now is invalidated | | 2640 | * before whatever lev1map we're on now is invalidated |
2641 | * or before user space is accessed again. | | 2641 | * or before user space is accessed again. |
2642 | * | | 2642 | * |
2643 | * Because only kernel mappings will be accessed before the | | 2643 | * Because only kernel mappings will be accessed before the |
2644 | * next pmap_activate() call, we consider our CPU to be on | | 2644 | * next pmap_activate() call, we consider our CPU to be on |
2645 | * the kernel pmap. | | 2645 | * the kernel pmap. |
2646 | */ | | 2646 | */ |
2647 | ci->ci_pmap = pmap_kernel(); | | 2647 | ci->ci_pmap = pmap_kernel(); |
2648 | KASSERT(atomic_load_relaxed(&pmap->pm_count) > 1); | | 2648 | KASSERT(atomic_load_relaxed(&pmap->pm_count) > 1); |
2649 | pmap_destroy(pmap); | | 2649 | pmap_destroy(pmap); |
2650 | } | | 2650 | } |
2651 | | | 2651 | |
2652 | /* | | 2652 | /* |
2653 | * pmap_zero_page: [ INTERFACE ] | | 2653 | * pmap_zero_page: [ INTERFACE ] |
2654 | * | | 2654 | * |
2655 | * Zero the specified (machine independent) page by mapping the page | | 2655 | * Zero the specified (machine independent) page by mapping the page |
2656 | * into virtual memory and clear its contents, one machine dependent | | 2656 | * into virtual memory and clear its contents, one machine dependent |
2657 | * page at a time. | | 2657 | * page at a time. |
2658 | * | | 2658 | * |
2659 | * Note: no locking is necessary in this function. | | 2659 | * Note: no locking is necessary in this function. |
2660 | */ | | 2660 | */ |
2661 | void | | 2661 | void |
2662 | pmap_zero_page(paddr_t phys) | | 2662 | pmap_zero_page(paddr_t phys) |
2663 | { | | 2663 | { |
2664 | u_long *p0, *p1, *pend; | | 2664 | u_long *p0, *p1, *pend; |
2665 | | | 2665 | |
2666 | #ifdef DEBUG | | 2666 | #ifdef DEBUG |
2667 | if (pmapdebug & PDB_FOLLOW) | | 2667 | if (pmapdebug & PDB_FOLLOW) |
2668 | printf("pmap_zero_page(%lx)\n", phys); | | 2668 | printf("pmap_zero_page(%lx)\n", phys); |
2669 | #endif | | 2669 | #endif |
2670 | | | 2670 | |
2671 | p0 = (u_long *)ALPHA_PHYS_TO_K0SEG(phys); | | 2671 | p0 = (u_long *)ALPHA_PHYS_TO_K0SEG(phys); |
2672 | p1 = NULL; | | 2672 | p1 = NULL; |
2673 | pend = (u_long *)((u_long)p0 + PAGE_SIZE); | | 2673 | pend = (u_long *)((u_long)p0 + PAGE_SIZE); |
2674 | | | 2674 | |
2675 | /* | | 2675 | /* |
2676 | * Unroll the loop a bit, doing 16 quadwords per iteration. | | 2676 | * Unroll the loop a bit, doing 16 quadwords per iteration. |
2677 | * Do only 8 back-to-back stores, and alternate registers. | | 2677 | * Do only 8 back-to-back stores, and alternate registers. |
2678 | */ | | 2678 | */ |
2679 | do { | | 2679 | do { |
2680 | __asm volatile( | | 2680 | __asm volatile( |
2681 | "# BEGIN loop body\n" | | 2681 | "# BEGIN loop body\n" |
2682 | " addq %2, (8 * 8), %1 \n" | | 2682 | " addq %2, (8 * 8), %1 \n" |
2683 | " stq $31, (0 * 8)(%0) \n" | | 2683 | " stq $31, (0 * 8)(%0) \n" |
2684 | " stq $31, (1 * 8)(%0) \n" | | 2684 | " stq $31, (1 * 8)(%0) \n" |
2685 | " stq $31, (2 * 8)(%0) \n" | | 2685 | " stq $31, (2 * 8)(%0) \n" |
2686 | " stq $31, (3 * 8)(%0) \n" | | 2686 | " stq $31, (3 * 8)(%0) \n" |
2687 | " stq $31, (4 * 8)(%0) \n" | | 2687 | " stq $31, (4 * 8)(%0) \n" |
2688 | " stq $31, (5 * 8)(%0) \n" | | 2688 | " stq $31, (5 * 8)(%0) \n" |
2689 | " stq $31, (6 * 8)(%0) \n" | | 2689 | " stq $31, (6 * 8)(%0) \n" |
2690 | " stq $31, (7 * 8)(%0) \n" | | 2690 | " stq $31, (7 * 8)(%0) \n" |
2691 | " \n" | | 2691 | " \n" |
2692 | " addq %3, (8 * 8), %0 \n" | | 2692 | " addq %3, (8 * 8), %0 \n" |
2693 | " stq $31, (0 * 8)(%1) \n" | | 2693 | " stq $31, (0 * 8)(%1) \n" |
2694 | " stq $31, (1 * 8)(%1) \n" | | 2694 | " stq $31, (1 * 8)(%1) \n" |
2695 | " stq $31, (2 * 8)(%1) \n" | | 2695 | " stq $31, (2 * 8)(%1) \n" |
2696 | " stq $31, (3 * 8)(%1) \n" | | 2696 | " stq $31, (3 * 8)(%1) \n" |
2697 | " stq $31, (4 * 8)(%1) \n" | | 2697 | " stq $31, (4 * 8)(%1) \n" |
2698 | " stq $31, (5 * 8)(%1) \n" | | 2698 | " stq $31, (5 * 8)(%1) \n" |
2699 | " stq $31, (6 * 8)(%1) \n" | | 2699 | " stq $31, (6 * 8)(%1) \n" |
2700 | " stq $31, (7 * 8)(%1) \n" | | 2700 | " stq $31, (7 * 8)(%1) \n" |
2701 | " # END loop body" | | 2701 | " # END loop body" |
2702 | : "=r" (p0), "=r" (p1) | | 2702 | : "=r" (p0), "=r" (p1) |
2703 | : "0" (p0), "1" (p1) | | 2703 | : "0" (p0), "1" (p1) |
2704 | : "memory"); | | 2704 | : "memory"); |
2705 | } while (p0 < pend); | | 2705 | } while (p0 < pend); |
2706 | } | | 2706 | } |
2707 | | | 2707 | |
2708 | /* | | 2708 | /* |
2709 | * pmap_copy_page: [ INTERFACE ] | | 2709 | * pmap_copy_page: [ INTERFACE ] |
2710 | * | | 2710 | * |
2711 | * Copy the specified (machine independent) page by mapping the page | | 2711 | * Copy the specified (machine independent) page by mapping the page |
2712 | * into virtual memory and using memcpy to copy the page, one machine | | 2712 | * into virtual memory and using memcpy to copy the page, one machine |
2713 | * dependent page at a time. | | 2713 | * dependent page at a time. |
2714 | * | | 2714 | * |
2715 | * Note: no locking is necessary in this function. | | 2715 | * Note: no locking is necessary in this function. |
2716 | */ | | 2716 | */ |
2717 | void | | 2717 | void |
2718 | pmap_copy_page(paddr_t src, paddr_t dst) | | 2718 | pmap_copy_page(paddr_t src, paddr_t dst) |
2719 | { | | 2719 | { |
2720 | const void *s; | | 2720 | const void *s; |
2721 | void *d; | | 2721 | void *d; |
2722 | | | 2722 | |
2723 | #ifdef DEBUG | | 2723 | #ifdef DEBUG |
2724 | if (pmapdebug & PDB_FOLLOW) | | 2724 | if (pmapdebug & PDB_FOLLOW) |
2725 | printf("pmap_copy_page(%lx, %lx)\n", src, dst); | | 2725 | printf("pmap_copy_page(%lx, %lx)\n", src, dst); |
2726 | #endif | | 2726 | #endif |
2727 | s = (const void *)ALPHA_PHYS_TO_K0SEG(src); | | 2727 | s = (const void *)ALPHA_PHYS_TO_K0SEG(src); |
2728 | d = (void *)ALPHA_PHYS_TO_K0SEG(dst); | | 2728 | d = (void *)ALPHA_PHYS_TO_K0SEG(dst); |
2729 | memcpy(d, s, PAGE_SIZE); | | 2729 | memcpy(d, s, PAGE_SIZE); |
2730 | } | | 2730 | } |
2731 | | | 2731 | |
2732 | /* | | 2732 | /* |
2733 | * pmap_pageidlezero: [ INTERFACE ] | | 2733 | * pmap_pageidlezero: [ INTERFACE ] |
2734 | * | | 2734 | * |
2735 | * Page zero'er for the idle loop. Returns true if the | | 2735 | * Page zero'er for the idle loop. Returns true if the |
2736 | * page was zero'd, FALSE if we aborted for some reason. | | 2736 | * page was zero'd, FALSE if we aborted for some reason. |
2737 | */ | | 2737 | */ |
2738 | bool | | 2738 | bool |
2739 | pmap_pageidlezero(paddr_t pa) | | 2739 | pmap_pageidlezero(paddr_t pa) |
2740 | { | | 2740 | { |
2741 | u_long *ptr; | | 2741 | u_long *ptr; |
2742 | int i, cnt = PAGE_SIZE / sizeof(u_long); | | 2742 | int i, cnt = PAGE_SIZE / sizeof(u_long); |
2743 | | | 2743 | |
2744 | for (i = 0, ptr = (u_long *) ALPHA_PHYS_TO_K0SEG(pa); i < cnt; i++) { | | 2744 | for (i = 0, ptr = (u_long *) ALPHA_PHYS_TO_K0SEG(pa); i < cnt; i++) { |
2745 | if (sched_curcpu_runnable_p()) { | | 2745 | if (sched_curcpu_runnable_p()) { |
2746 | /* | | 2746 | /* |
2747 | * An LWP has become ready. Abort now, | | 2747 | * An LWP has become ready. Abort now, |
2748 | * so we don't keep it waiting while we | | 2748 | * so we don't keep it waiting while we |
2749 | * finish zeroing the page. | | 2749 | * finish zeroing the page. |
2750 | */ | | 2750 | */ |
2751 | return (false); | | 2751 | return (false); |
2752 | } | | 2752 | } |
2753 | *ptr++ = 0; | | 2753 | *ptr++ = 0; |
2754 | } | | 2754 | } |
2755 | | | 2755 | |
2756 | return (true); | | 2756 | return (true); |
2757 | } | | 2757 | } |
2758 | | | 2758 | |
2759 | /* | | 2759 | /* |
2760 | * pmap_clear_modify: [ INTERFACE ] | | 2760 | * pmap_clear_modify: [ INTERFACE ] |
2761 | * | | 2761 | * |
2762 | * Clear the modify bits on the specified physical page. | | 2762 | * Clear the modify bits on the specified physical page. |
2763 | */ | | 2763 | */ |
2764 | bool | | 2764 | bool |
2765 | pmap_clear_modify(struct vm_page *pg) | | 2765 | pmap_clear_modify(struct vm_page *pg) |
2766 | { | | 2766 | { |
2767 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); | | 2767 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); |
2768 | bool rv = false; | | 2768 | bool rv = false; |
2769 | kmutex_t *lock; | | 2769 | kmutex_t *lock; |
2770 | struct pmap_tlb_context tlbctx; | | 2770 | struct pmap_tlb_context tlbctx; |
2771 | | | 2771 | |
2772 | #ifdef DEBUG | | 2772 | #ifdef DEBUG |
2773 | if (pmapdebug & PDB_FOLLOW) | | 2773 | if (pmapdebug & PDB_FOLLOW) |
2774 | printf("pmap_clear_modify(%p)\n", pg); | | 2774 | printf("pmap_clear_modify(%p)\n", pg); |
2775 | #endif | | 2775 | #endif |
2776 | | | 2776 | |
2777 | pmap_tlb_context_init(&tlbctx, TLB_CTX_F_PV); | | 2777 | pmap_tlb_context_init(&tlbctx, TLB_CTX_F_PV); |
2778 | | | 2778 | |
2779 | PMAP_HEAD_TO_MAP_LOCK(); | | 2779 | PMAP_HEAD_TO_MAP_LOCK(); |
2780 | lock = pmap_pvh_lock(pg); | | 2780 | lock = pmap_pvh_lock(pg); |
2781 | mutex_enter(lock); | | 2781 | mutex_enter(lock); |
2782 | | | 2782 | |
2783 | if (md->pvh_attrs & PGA_MODIFIED) { | | 2783 | if (md->pvh_attrs & PGA_MODIFIED) { |
2784 | rv = true; | | 2784 | rv = true; |
2785 | pmap_changebit(pg, PG_FOW, ~0UL, &tlbctx); | | 2785 | pmap_changebit(pg, PG_FOW, ~0UL, &tlbctx); |
2786 | md->pvh_attrs &= ~PGA_MODIFIED; | | 2786 | md->pvh_attrs &= ~PGA_MODIFIED; |
2787 | } | | 2787 | } |
2788 | | | 2788 | |
2789 | mutex_exit(lock); | | 2789 | mutex_exit(lock); |
2790 | PMAP_HEAD_TO_MAP_UNLOCK(); | | 2790 | PMAP_HEAD_TO_MAP_UNLOCK(); |
2791 | | | 2791 | |
2792 | pmap_tlb_shootnow(&tlbctx); | | 2792 | pmap_tlb_shootnow(&tlbctx); |
2793 | TLB_COUNT(reason_clear_modify); | | 2793 | TLB_COUNT(reason_clear_modify); |
2794 | | | 2794 | |
2795 | return (rv); | | 2795 | return (rv); |
2796 | } | | 2796 | } |
2797 | | | 2797 | |
2798 | /* | | 2798 | /* |
2799 | * pmap_clear_reference: [ INTERFACE ] | | 2799 | * pmap_clear_reference: [ INTERFACE ] |
2800 | * | | 2800 | * |
2801 | * Clear the reference bit on the specified physical page. | | 2801 | * Clear the reference bit on the specified physical page. |
2802 | */ | | 2802 | */ |
2803 | bool | | 2803 | bool |
2804 | pmap_clear_reference(struct vm_page *pg) | | 2804 | pmap_clear_reference(struct vm_page *pg) |
2805 | { | | 2805 | { |
2806 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); | | 2806 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); |
2807 | bool rv = false; | | 2807 | bool rv = false; |
2808 | kmutex_t *lock; | | 2808 | kmutex_t *lock; |
2809 | struct pmap_tlb_context tlbctx; | | 2809 | struct pmap_tlb_context tlbctx; |
2810 | | | 2810 | |
2811 | #ifdef DEBUG | | 2811 | #ifdef DEBUG |
2812 | if (pmapdebug & PDB_FOLLOW) | | 2812 | if (pmapdebug & PDB_FOLLOW) |
2813 | printf("pmap_clear_reference(%p)\n", pg); | | 2813 | printf("pmap_clear_reference(%p)\n", pg); |
2814 | #endif | | 2814 | #endif |
2815 | | | 2815 | |
2816 | pmap_tlb_context_init(&tlbctx, TLB_CTX_F_PV); | | 2816 | pmap_tlb_context_init(&tlbctx, TLB_CTX_F_PV); |
2817 | | | 2817 | |
2818 | PMAP_HEAD_TO_MAP_LOCK(); | | 2818 | PMAP_HEAD_TO_MAP_LOCK(); |
2819 | lock = pmap_pvh_lock(pg); | | 2819 | lock = pmap_pvh_lock(pg); |
2820 | mutex_enter(lock); | | 2820 | mutex_enter(lock); |
2821 | | | 2821 | |
2822 | if (md->pvh_attrs & PGA_REFERENCED) { | | 2822 | if (md->pvh_attrs & PGA_REFERENCED) { |
2823 | rv = true; | | 2823 | rv = true; |
2824 | pmap_changebit(pg, PG_FOR | PG_FOW | PG_FOE, ~0UL, &tlbctx); | | 2824 | pmap_changebit(pg, PG_FOR | PG_FOW | PG_FOE, ~0UL, &tlbctx); |
2825 | md->pvh_attrs &= ~PGA_REFERENCED; | | 2825 | md->pvh_attrs &= ~PGA_REFERENCED; |
2826 | } | | 2826 | } |
2827 | | | 2827 | |
2828 | mutex_exit(lock); | | 2828 | mutex_exit(lock); |
2829 | PMAP_HEAD_TO_MAP_UNLOCK(); | | 2829 | PMAP_HEAD_TO_MAP_UNLOCK(); |
2830 | | | 2830 | |
2831 | pmap_tlb_shootnow(&tlbctx); | | 2831 | pmap_tlb_shootnow(&tlbctx); |
2832 | TLB_COUNT(reason_clear_reference); | | 2832 | TLB_COUNT(reason_clear_reference); |
2833 | | | 2833 | |
2834 | return (rv); | | 2834 | return (rv); |
2835 | } | | 2835 | } |
2836 | | | 2836 | |
2837 | /* | | 2837 | /* |
2838 | * pmap_is_referenced: [ INTERFACE ] | | 2838 | * pmap_is_referenced: [ INTERFACE ] |
2839 | * | | 2839 | * |
2840 | * Return whether or not the specified physical page is referenced | | 2840 | * Return whether or not the specified physical page is referenced |
2841 | * by any physical maps. | | 2841 | * by any physical maps. |
2842 | */ | | 2842 | */ |
2843 | /* See <machine/pmap.h> */ | | 2843 | /* See <machine/pmap.h> */ |
2844 | | | 2844 | |
2845 | /* | | 2845 | /* |
2846 | * pmap_is_modified: [ INTERFACE ] | | 2846 | * pmap_is_modified: [ INTERFACE ] |
2847 | * | | 2847 | * |
2848 | * Return whether or not the specified physical page is modified | | 2848 | * Return whether or not the specified physical page is modified |
2849 | * by any physical maps. | | 2849 | * by any physical maps. |
2850 | */ | | 2850 | */ |
2851 | /* See <machine/pmap.h> */ | | 2851 | /* See <machine/pmap.h> */ |
2852 | | | 2852 | |
2853 | /* | | 2853 | /* |
2854 | * pmap_phys_address: [ INTERFACE ] | | 2854 | * pmap_phys_address: [ INTERFACE ] |
2855 | * | | 2855 | * |
2856 | * Return the physical address corresponding to the specified | | 2856 | * Return the physical address corresponding to the specified |
2857 | * cookie. Used by the device pager to decode a device driver's | | 2857 | * cookie. Used by the device pager to decode a device driver's |
2858 | * mmap entry point return value. | | 2858 | * mmap entry point return value. |
2859 | * | | 2859 | * |
2860 | * Note: no locking is necessary in this function. | | 2860 | * Note: no locking is necessary in this function. |
2861 | */ | | 2861 | */ |
2862 | paddr_t | | 2862 | paddr_t |
2863 | pmap_phys_address(paddr_t ppn) | | 2863 | pmap_phys_address(paddr_t ppn) |
2864 | { | | 2864 | { |
2865 | | | 2865 | |
2866 | return (alpha_ptob(ppn)); | | 2866 | return (alpha_ptob(ppn)); |
2867 | } | | 2867 | } |
2868 | | | 2868 | |
2869 | /* | | 2869 | /* |
2870 | * Miscellaneous support routines follow | | 2870 | * Miscellaneous support routines follow |
2871 | */ | | 2871 | */ |
2872 | | | 2872 | |
2873 | /* | | 2873 | /* |
2874 | * alpha_protection_init: | | 2874 | * alpha_protection_init: |
2875 | * | | 2875 | * |
2876 | * Initialize Alpha protection code array. | | 2876 | * Initialize Alpha protection code array. |
2877 | * | | 2877 | * |
2878 | * Note: no locking is necessary in this function. | | 2878 | * Note: no locking is necessary in this function. |
2879 | */ | | 2879 | */ |
2880 | static void | | 2880 | static void |
2881 | alpha_protection_init(void) | | 2881 | alpha_protection_init(void) |
2882 | { | | 2882 | { |
2883 | int prot, *kp, *up; | | 2883 | int prot, *kp, *up; |
2884 | | | 2884 | |
2885 | kp = protection_codes[0]; | | 2885 | kp = protection_codes[0]; |
2886 | up = protection_codes[1]; | | 2886 | up = protection_codes[1]; |
2887 | | | 2887 | |
2888 | for (prot = 0; prot < 8; prot++) { | | 2888 | for (prot = 0; prot < 8; prot++) { |
2889 | kp[prot] = PG_ASM; | | 2889 | kp[prot] = PG_ASM; |
2890 | up[prot] = 0; | | 2890 | up[prot] = 0; |
2891 | | | 2891 | |
2892 | if (prot & VM_PROT_READ) { | | 2892 | if (prot & VM_PROT_READ) { |
2893 | kp[prot] |= PG_KRE; | | 2893 | kp[prot] |= PG_KRE; |
2894 | up[prot] |= PG_KRE | PG_URE; | | 2894 | up[prot] |= PG_KRE | PG_URE; |
2895 | } | | 2895 | } |
2896 | if (prot & VM_PROT_WRITE) { | | 2896 | if (prot & VM_PROT_WRITE) { |
2897 | kp[prot] |= PG_KWE; | | 2897 | kp[prot] |= PG_KWE; |
2898 | up[prot] |= PG_KWE | PG_UWE; | | 2898 | up[prot] |= PG_KWE | PG_UWE; |
2899 | } | | 2899 | } |
2900 | if (prot & VM_PROT_EXECUTE) { | | 2900 | if (prot & VM_PROT_EXECUTE) { |
2901 | kp[prot] |= PG_EXEC | PG_KRE; | | 2901 | kp[prot] |= PG_EXEC | PG_KRE; |
2902 | up[prot] |= PG_EXEC | PG_KRE | PG_URE; | | 2902 | up[prot] |= PG_EXEC | PG_KRE | PG_URE; |
2903 | } else { | | 2903 | } else { |
2904 | kp[prot] |= PG_FOE; | | 2904 | kp[prot] |= PG_FOE; |
2905 | up[prot] |= PG_FOE; | | 2905 | up[prot] |= PG_FOE; |
2906 | } | | 2906 | } |
2907 | } | | 2907 | } |
2908 | } | | 2908 | } |
2909 | | | 2909 | |
2910 | /* | | 2910 | /* |
2911 | * pmap_remove_mapping: | | 2911 | * pmap_remove_mapping: |
2912 | * | | 2912 | * |
2913 | * Invalidate a single page denoted by pmap/va. | | 2913 | * Invalidate a single page denoted by pmap/va. |
2914 | * | | 2914 | * |
2915 | * If (pte != NULL), it is the already computed PTE for the page. | | 2915 | * If (pte != NULL), it is the already computed PTE for the page. |
2916 | * | | 2916 | * |
2917 | * Note: locking in this function is complicated by the fact | | 2917 | * Note: locking in this function is complicated by the fact |
2918 | * that we can be called when the PV list is already locked. | | 2918 | * that we can be called when the PV list is already locked. |
2919 | * (pmap_page_protect()). In this case, the caller must be | | 2919 | * (pmap_page_protect()). In this case, the caller must be |
2920 | * careful to get the next PV entry while we remove this entry | | 2920 | * careful to get the next PV entry while we remove this entry |
2921 | * from beneath it. We assume that the pmap itself is already | | 2921 | * from beneath it. We assume that the pmap itself is already |
2922 | * locked; dolock applies only to the PV list. | | 2922 | * locked; dolock applies only to the PV list. |
2923 | * | | 2923 | * |
2924 | * Returns important PTE bits that the caller needs to check for | | 2924 | * Returns important PTE bits that the caller needs to check for |
2925 | * TLB / I-stream invalidation purposes. | | 2925 | * TLB / I-stream invalidation purposes. |
2926 | */ | | 2926 | */ |
2927 | static pt_entry_t | | 2927 | static pt_entry_t |
2928 | pmap_remove_mapping(pmap_t pmap, vaddr_t va, pt_entry_t *pte, | | 2928 | pmap_remove_mapping(pmap_t pmap, vaddr_t va, pt_entry_t *pte, |
2929 | bool dolock, pv_entry_t *opvp, struct pmap_tlb_context * const tlbctx) | | 2929 | bool dolock, pv_entry_t *opvp, struct pmap_tlb_context * const tlbctx) |
2930 | { | | 2930 | { |
2931 | pt_entry_t opte; | | 2931 | pt_entry_t opte; |
2932 | paddr_t pa; | | 2932 | paddr_t pa; |
2933 | struct vm_page *pg; /* if != NULL, page is managed */ | | 2933 | struct vm_page *pg; /* if != NULL, page is managed */ |
2934 | | | 2934 | |
2935 | #ifdef DEBUG | | 2935 | #ifdef DEBUG |
2936 | if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) | | 2936 | if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) |
2937 | printf("pmap_remove_mapping(%p, %lx, %p, %d, %p)\n", | | 2937 | printf("pmap_remove_mapping(%p, %lx, %p, %d, %p)\n", |
2938 | pmap, va, pte, dolock, opvp); | | 2938 | pmap, va, pte, dolock, opvp); |
2939 | #endif | | 2939 | #endif |
2940 | | | 2940 | |
2941 | /* | | 2941 | /* |
2942 | * PTE not provided, compute it from pmap and va. | | 2942 | * PTE not provided, compute it from pmap and va. |
2943 | */ | | 2943 | */ |
2944 | if (pte == NULL) { | | 2944 | if (pte == NULL) { |
2945 | pte = pmap_l3pte(pmap_lev1map(pmap), va, NULL); | | 2945 | pte = pmap_l3pte(pmap_lev1map(pmap), va, NULL); |
2946 | if (pmap_pte_v(pte) == 0) | | 2946 | if (pmap_pte_v(pte) == 0) |
2947 | return 0; | | 2947 | return 0; |
2948 | } | | 2948 | } |
2949 | | | 2949 | |
2950 | opte = *pte; | | 2950 | opte = *pte; |
2951 | | | 2951 | |
2952 | pa = PG_PFNUM(opte) << PGSHIFT; | | 2952 | pa = PG_PFNUM(opte) << PGSHIFT; |
2953 | | | 2953 | |
2954 | /* | | 2954 | /* |
2955 | * Update statistics | | 2955 | * Update statistics |
2956 | */ | | 2956 | */ |
2957 | if (pmap_pte_w(pte)) | | 2957 | if (pmap_pte_w(pte)) |
2958 | PMAP_STAT_DECR(pmap->pm_stats.wired_count, 1); | | 2958 | PMAP_STAT_DECR(pmap->pm_stats.wired_count, 1); |
2959 | PMAP_STAT_DECR(pmap->pm_stats.resident_count, 1); | | 2959 | PMAP_STAT_DECR(pmap->pm_stats.resident_count, 1); |
2960 | | | 2960 | |
2961 | /* | | 2961 | /* |
2962 | * Invalidate the PTE after saving the reference modify info. | | 2962 | * Invalidate the PTE after saving the reference modify info. |
2963 | */ | | 2963 | */ |
2964 | #ifdef DEBUG | | 2964 | #ifdef DEBUG |
2965 | if (pmapdebug & PDB_REMOVE) | | 2965 | if (pmapdebug & PDB_REMOVE) |
2966 | printf("remove: invalidating pte at %p\n", pte); | | 2966 | printf("remove: invalidating pte at %p\n", pte); |
2967 | #endif | | 2967 | #endif |
2968 | atomic_store_relaxed(pte, PG_NV); | | 2968 | atomic_store_relaxed(pte, PG_NV); |
2969 | | | 2969 | |
2970 | /* | | 2970 | /* |
2971 | * If we're removing a user mapping, check to see if we | | 2971 | * If we're removing a user mapping, check to see if we |
2972 | * can free page table pages. | | 2972 | * can free page table pages. |
2973 | */ | | 2973 | */ |
2974 | if (pmap != pmap_kernel()) { | | 2974 | if (pmap != pmap_kernel()) { |
2975 | /* | | 2975 | /* |
2976 | * Delete the reference on the level 3 table. It will | | 2976 | * Delete the reference on the level 3 table. It will |
2977 | * delete references on the level 2 and 1 tables as | | 2977 | * delete references on the level 2 and 1 tables as |
2978 | * appropriate. | | 2978 | * appropriate. |
2979 | */ | | 2979 | */ |
2980 | pmap_l3pt_delref(pmap, va, pte, tlbctx); | | 2980 | pmap_l3pt_delref(pmap, va, pte, tlbctx); |
2981 | } | | 2981 | } |
2982 | | | 2982 | |
2983 | if (opte & PG_PVLIST) { | | 2983 | if (opte & PG_PVLIST) { |
2984 | /* | | 2984 | /* |
2985 | * Remove it from the PV table. | | 2985 | * Remove it from the PV table. |
2986 | */ | | 2986 | */ |
2987 | pg = PHYS_TO_VM_PAGE(pa); | | 2987 | pg = PHYS_TO_VM_PAGE(pa); |
2988 | KASSERT(pg != NULL); | | 2988 | KASSERT(pg != NULL); |
2989 | pmap_pv_remove(pmap, pg, va, dolock, opvp); | | 2989 | pmap_pv_remove(pmap, pg, va, dolock, opvp); |
2990 | KASSERT(opvp == NULL || *opvp != NULL); | | 2990 | KASSERT(opvp == NULL || *opvp != NULL); |
2991 | } | | 2991 | } |
2992 | | | 2992 | |
2993 | return opte & (PG_V | PG_ASM | PG_EXEC); | | 2993 | return opte & (PG_V | PG_ASM | PG_EXEC); |
2994 | } | | 2994 | } |
2995 | | | 2995 | |
2996 | /* | | 2996 | /* |
2997 | * pmap_changebit: | | 2997 | * pmap_changebit: |
2998 | * | | 2998 | * |
2999 | * Set or clear the specified PTE bits for all mappings on the | | 2999 | * Set or clear the specified PTE bits for all mappings on the |
3000 | * specified page. | | 3000 | * specified page. |
3001 | * | | 3001 | * |
3002 | * Note: we assume that the pv_head is already locked, and that | | 3002 | * Note: we assume that the pv_head is already locked, and that |
3003 | * the caller has acquired a PV->pmap mutex so that we can lock | | 3003 | * the caller has acquired a PV->pmap mutex so that we can lock |
3004 | * the pmaps as we encounter them. | | 3004 | * the pmaps as we encounter them. |
3005 | */ | | 3005 | */ |
3006 | static void | | 3006 | static void |
3007 | pmap_changebit(struct vm_page *pg, pt_entry_t set, pt_entry_t mask, | | 3007 | pmap_changebit(struct vm_page *pg, pt_entry_t set, pt_entry_t mask, |
3008 | struct pmap_tlb_context * const tlbctx) | | 3008 | struct pmap_tlb_context * const tlbctx) |
3009 | { | | 3009 | { |
3010 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); | | 3010 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); |
3011 | pv_entry_t pv; | | 3011 | pv_entry_t pv; |
3012 | pt_entry_t *pte, npte, opte; | | 3012 | pt_entry_t *pte, npte, opte; |
3013 | | | 3013 | |
3014 | #ifdef DEBUG | | 3014 | #ifdef DEBUG |
3015 | if (pmapdebug & PDB_BITS) | | 3015 | if (pmapdebug & PDB_BITS) |
3016 | printf("pmap_changebit(%p, 0x%lx, 0x%lx)\n", | | 3016 | printf("pmap_changebit(%p, 0x%lx, 0x%lx)\n", |
3017 | pg, set, mask); | | 3017 | pg, set, mask); |
3018 | #endif | | 3018 | #endif |
3019 | | | 3019 | |
3020 | /* | | 3020 | /* |
3021 | * Loop over all current mappings setting/clearing as apropos. | | 3021 | * Loop over all current mappings setting/clearing as apropos. |
3022 | */ | | 3022 | */ |
3023 | for (pv = md->pvh_list; pv != NULL; pv = pv->pv_next) { | | 3023 | for (pv = md->pvh_list; pv != NULL; pv = pv->pv_next) { |
3024 | PMAP_LOCK(pv->pv_pmap); | | 3024 | PMAP_LOCK(pv->pv_pmap); |
3025 | | | 3025 | |
3026 | pte = pv->pv_pte; | | 3026 | pte = pv->pv_pte; |
3027 | | | 3027 | |
3028 | opte = atomic_load_relaxed(pte); | | 3028 | opte = atomic_load_relaxed(pte); |
3029 | npte = (opte | set) & mask; | | 3029 | npte = (opte | set) & mask; |
3030 | if (npte != opte) { | | 3030 | if (npte != opte) { |
3031 | atomic_store_relaxed(pte, npte); | | 3031 | atomic_store_relaxed(pte, npte); |
3032 | pmap_tlb_shootdown_pv(pv->pv_pmap, pv->pv_va, | | 3032 | pmap_tlb_shootdown_pv(pv->pv_pmap, pv->pv_va, |
3033 | opte, tlbctx); | | 3033 | opte, tlbctx); |
3034 | } | | 3034 | } |
3035 | PMAP_UNLOCK(pv->pv_pmap); | | 3035 | PMAP_UNLOCK(pv->pv_pmap); |
3036 | } | | 3036 | } |
3037 | } | | 3037 | } |
3038 | | | 3038 | |
3039 | /* | | 3039 | /* |
3040 | * pmap_emulate_reference: | | 3040 | * pmap_emulate_reference: |
3041 | * | | 3041 | * |
3042 | * Emulate reference and/or modified bit hits. | | 3042 | * Emulate reference and/or modified bit hits. |
3043 | * Return 1 if this was an execute fault on a non-exec mapping, | | 3043 | * Return 1 if this was an execute fault on a non-exec mapping, |
3044 | * otherwise return 0. | | 3044 | * otherwise return 0. |
3045 | */ | | 3045 | */ |
3046 | int | | 3046 | int |
3047 | pmap_emulate_reference(struct lwp *l, vaddr_t v, int user, int type) | | 3047 | pmap_emulate_reference(struct lwp *l, vaddr_t v, int user, int type) |
3048 | { | | 3048 | { |
3049 | struct pmap *pmap = l->l_proc->p_vmspace->vm_map.pmap; | | 3049 | struct pmap *pmap = l->l_proc->p_vmspace->vm_map.pmap; |
3050 | pt_entry_t faultoff, *pte; | | 3050 | pt_entry_t faultoff, *pte; |
3051 | struct vm_page *pg; | | 3051 | struct vm_page *pg; |
3052 | paddr_t pa; | | 3052 | paddr_t pa; |
3053 | bool didlock = false; | | 3053 | bool didlock = false; |
3054 | bool exec = false; | | 3054 | bool exec = false; |
3055 | kmutex_t *lock; | | 3055 | kmutex_t *lock; |
3056 | | | 3056 | |
3057 | #ifdef DEBUG | | 3057 | #ifdef DEBUG |
3058 | if (pmapdebug & PDB_FOLLOW) | | 3058 | if (pmapdebug & PDB_FOLLOW) |
3059 | printf("pmap_emulate_reference: %p, 0x%lx, %d, %d\n", | | 3059 | printf("pmap_emulate_reference: %p, 0x%lx, %d, %d\n", |
3060 | l, v, user, type); | | 3060 | l, v, user, type); |
3061 | #endif | | 3061 | #endif |
3062 | | | 3062 | |
3063 | /* | | 3063 | /* |
3064 | * Convert process and virtual address to physical address. | | 3064 | * Convert process and virtual address to physical address. |
3065 | */ | | 3065 | */ |
3066 | if (v >= VM_MIN_KERNEL_ADDRESS) { | | 3066 | if (v >= VM_MIN_KERNEL_ADDRESS) { |
3067 | if (user) | | 3067 | if (user) |
3068 | panic("pmap_emulate_reference: user ref to kernel"); | | 3068 | panic("pmap_emulate_reference: user ref to kernel"); |
3069 | /* | | 3069 | /* |
3070 | * No need to lock here; kernel PT pages never go away. | | 3070 | * No need to lock here; kernel PT pages never go away. |
3071 | */ | | 3071 | */ |
3072 | pte = PMAP_KERNEL_PTE(v); | | 3072 | pte = PMAP_KERNEL_PTE(v); |
3073 | } else { | | 3073 | } else { |
3074 | #ifdef DIAGNOSTIC | | 3074 | #ifdef DIAGNOSTIC |
3075 | if (l == NULL) | | 3075 | if (l == NULL) |
3076 | panic("pmap_emulate_reference: bad proc"); | | 3076 | panic("pmap_emulate_reference: bad proc"); |
3077 | if (l->l_proc->p_vmspace == NULL) | | 3077 | if (l->l_proc->p_vmspace == NULL) |
3078 | panic("pmap_emulate_reference: bad p_vmspace"); | | 3078 | panic("pmap_emulate_reference: bad p_vmspace"); |
3079 | #endif | | 3079 | #endif |
3080 | PMAP_LOCK(pmap); | | 3080 | PMAP_LOCK(pmap); |
3081 | didlock = true; | | 3081 | didlock = true; |
3082 | pte = pmap_l3pte(pmap_lev1map(pmap), v, NULL); | | 3082 | pte = pmap_l3pte(pmap_lev1map(pmap), v, NULL); |
3083 | /* | | 3083 | /* |
3084 | * We'll unlock below where we're done with the PTE. | | 3084 | * We'll unlock below where we're done with the PTE. |
3085 | */ | | 3085 | */ |
3086 | } | | 3086 | } |
3087 | exec = pmap_pte_exec(pte); | | 3087 | exec = pmap_pte_exec(pte); |
3088 | if (!exec && type == ALPHA_MMCSR_FOE) { | | 3088 | if (!exec && type == ALPHA_MMCSR_FOE) { |
3089 | if (didlock) | | 3089 | if (didlock) |
3090 | PMAP_UNLOCK(pmap); | | 3090 | PMAP_UNLOCK(pmap); |
3091 | return (1); | | 3091 | return (1); |
3092 | } | | 3092 | } |
3093 | #ifdef DEBUG | | 3093 | #ifdef DEBUG |
3094 | if (pmapdebug & PDB_FOLLOW) { | | 3094 | if (pmapdebug & PDB_FOLLOW) { |
3095 | printf("\tpte = %p, ", pte); | | 3095 | printf("\tpte = %p, ", pte); |
3096 | printf("*pte = 0x%lx\n", *pte); | | 3096 | printf("*pte = 0x%lx\n", *pte); |
3097 | } | | 3097 | } |
3098 | #endif | | 3098 | #endif |
3099 | | | 3099 | |
3100 | pa = pmap_pte_pa(pte); | | 3100 | pa = pmap_pte_pa(pte); |
3101 | | | 3101 | |
3102 | /* | | 3102 | /* |
3103 | * We're now done with the PTE. If it was a user pmap, unlock | | 3103 | * We're now done with the PTE. If it was a user pmap, unlock |
3104 | * it now. | | 3104 | * it now. |
3105 | */ | | 3105 | */ |
3106 | if (didlock) | | 3106 | if (didlock) |
3107 | PMAP_UNLOCK(pmap); | | 3107 | PMAP_UNLOCK(pmap); |
3108 | | | 3108 | |
3109 | #ifdef DEBUG | | 3109 | #ifdef DEBUG |
3110 | if (pmapdebug & PDB_FOLLOW) | | 3110 | if (pmapdebug & PDB_FOLLOW) |
3111 | printf("\tpa = 0x%lx\n", pa); | | 3111 | printf("\tpa = 0x%lx\n", pa); |
3112 | #endif | | 3112 | #endif |
3113 | #ifdef DIAGNOSTIC | | 3113 | #ifdef DIAGNOSTIC |
3114 | if (!uvm_pageismanaged(pa)) | | 3114 | if (!uvm_pageismanaged(pa)) |
3115 | panic("pmap_emulate_reference(%p, 0x%lx, %d, %d): " | | 3115 | panic("pmap_emulate_reference(%p, 0x%lx, %d, %d): " |
3116 | "pa 0x%lx not managed", l, v, user, type, pa); | | 3116 | "pa 0x%lx not managed", l, v, user, type, pa); |
3117 | #endif | | 3117 | #endif |
3118 | | | 3118 | |
3119 | /* | | 3119 | /* |
3120 | * Twiddle the appropriate bits to reflect the reference | | 3120 | * Twiddle the appropriate bits to reflect the reference |
3121 | * and/or modification.. | | 3121 | * and/or modification.. |
3122 | * | | 3122 | * |
3123 | * The rules: | | 3123 | * The rules: |
3124 | * (1) always mark page as used, and | | 3124 | * (1) always mark page as used, and |
3125 | * (2) if it was a write fault, mark page as modified. | | 3125 | * (2) if it was a write fault, mark page as modified. |
3126 | */ | | 3126 | */ |
3127 | pg = PHYS_TO_VM_PAGE(pa); | | 3127 | pg = PHYS_TO_VM_PAGE(pa); |
3128 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); | | 3128 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); |
3129 | struct pmap_tlb_context tlbctx; | | 3129 | struct pmap_tlb_context tlbctx; |
3130 | | | 3130 | |
3131 | pmap_tlb_context_init(&tlbctx, TLB_CTX_F_PV); | | 3131 | pmap_tlb_context_init(&tlbctx, TLB_CTX_F_PV); |
3132 | | | 3132 | |
3133 | PMAP_HEAD_TO_MAP_LOCK(); | | 3133 | PMAP_HEAD_TO_MAP_LOCK(); |
3134 | lock = pmap_pvh_lock(pg); | | 3134 | lock = pmap_pvh_lock(pg); |
3135 | mutex_enter(lock); | | 3135 | mutex_enter(lock); |
3136 | | | 3136 | |
3137 | if (type == ALPHA_MMCSR_FOW) { | | 3137 | if (type == ALPHA_MMCSR_FOW) { |
3138 | md->pvh_attrs |= (PGA_REFERENCED|PGA_MODIFIED); | | 3138 | md->pvh_attrs |= (PGA_REFERENCED|PGA_MODIFIED); |
3139 | faultoff = PG_FOR | PG_FOW; | | 3139 | faultoff = PG_FOR | PG_FOW; |
3140 | } else { | | 3140 | } else { |
3141 | md->pvh_attrs |= PGA_REFERENCED; | | 3141 | md->pvh_attrs |= PGA_REFERENCED; |
3142 | faultoff = PG_FOR; | | 3142 | faultoff = PG_FOR; |
3143 | if (exec) { | | 3143 | if (exec) { |
3144 | faultoff |= PG_FOE; | | 3144 | faultoff |= PG_FOE; |
3145 | } | | 3145 | } |
3146 | } | | 3146 | } |
3147 | pmap_changebit(pg, 0, ~faultoff, &tlbctx); | | 3147 | pmap_changebit(pg, 0, ~faultoff, &tlbctx); |
3148 | | | 3148 | |
3149 | mutex_exit(lock); | | 3149 | mutex_exit(lock); |
3150 | PMAP_HEAD_TO_MAP_UNLOCK(); | | 3150 | PMAP_HEAD_TO_MAP_UNLOCK(); |
3151 | | | 3151 | |
3152 | pmap_tlb_shootnow(&tlbctx); | | 3152 | pmap_tlb_shootnow(&tlbctx); |
3153 | TLB_COUNT(reason_emulate_reference); | | 3153 | TLB_COUNT(reason_emulate_reference); |
3154 | | | 3154 | |
3155 | return (0); | | 3155 | return (0); |
3156 | } | | 3156 | } |
3157 | | | 3157 | |
3158 | #ifdef DEBUG | | 3158 | #ifdef DEBUG |
3159 | /* | | 3159 | /* |
3160 | * pmap_pv_dump: | | 3160 | * pmap_pv_dump: |
3161 | * | | 3161 | * |
3162 | * Dump the physical->virtual data for the specified page. | | 3162 | * Dump the physical->virtual data for the specified page. |
3163 | */ | | 3163 | */ |
3164 | void | | 3164 | void |
3165 | pmap_pv_dump(paddr_t pa) | | 3165 | pmap_pv_dump(paddr_t pa) |
3166 | { | | 3166 | { |
3167 | struct vm_page *pg; | | 3167 | struct vm_page *pg; |
3168 | struct vm_page_md *md; | | 3168 | struct vm_page_md *md; |
3169 | pv_entry_t pv; | | 3169 | pv_entry_t pv; |
3170 | kmutex_t *lock; | | 3170 | kmutex_t *lock; |
3171 | | | 3171 | |
3172 | pg = PHYS_TO_VM_PAGE(pa); | | 3172 | pg = PHYS_TO_VM_PAGE(pa); |
3173 | md = VM_PAGE_TO_MD(pg); | | 3173 | md = VM_PAGE_TO_MD(pg); |
3174 | | | 3174 | |
3175 | lock = pmap_pvh_lock(pg); | | 3175 | lock = pmap_pvh_lock(pg); |
3176 | mutex_enter(lock); | | 3176 | mutex_enter(lock); |
3177 | | | 3177 | |
3178 | printf("pa 0x%lx (attrs = 0x%x):\n", pa, md->pvh_attrs); | | 3178 | printf("pa 0x%lx (attrs = 0x%x):\n", pa, md->pvh_attrs); |
3179 | for (pv = md->pvh_list; pv != NULL; pv = pv->pv_next) | | 3179 | for (pv = md->pvh_list; pv != NULL; pv = pv->pv_next) |
3180 | printf(" pmap %p, va 0x%lx\n", | | 3180 | printf(" pmap %p, va 0x%lx\n", |
3181 | pv->pv_pmap, pv->pv_va); | | 3181 | pv->pv_pmap, pv->pv_va); |
3182 | printf("\n"); | | 3182 | printf("\n"); |
3183 | | | 3183 | |
3184 | mutex_exit(lock); | | 3184 | mutex_exit(lock); |
3185 | } | | 3185 | } |
3186 | #endif | | 3186 | #endif |
3187 | | | 3187 | |
3188 | /* | | 3188 | /* |
3189 | * vtophys: | | 3189 | * vtophys: |
3190 | * | | 3190 | * |
3191 | * Return the physical address corresponding to the K0SEG or | | 3191 | * Return the physical address corresponding to the K0SEG or |
3192 | * K1SEG address provided. | | 3192 | * K1SEG address provided. |
3193 | * | | 3193 | * |
3194 | * Note: no locking is necessary in this function. | | 3194 | * Note: no locking is necessary in this function. |
3195 | */ | | 3195 | */ |
3196 | static bool | | 3196 | static bool |
3197 | vtophys_internal(vaddr_t const vaddr, paddr_t * const pap) | | 3197 | vtophys_internal(vaddr_t const vaddr, paddr_t * const pap) |
3198 | { | | 3198 | { |
3199 | paddr_t pa; | | 3199 | paddr_t pa; |
3200 | | | 3200 | |
3201 | KASSERT(vaddr >= ALPHA_K0SEG_BASE); | | 3201 | KASSERT(vaddr >= ALPHA_K0SEG_BASE); |
3202 | | | 3202 | |
3203 | if (vaddr <= ALPHA_K0SEG_END) { | | 3203 | if (vaddr <= ALPHA_K0SEG_END) { |
3204 | pa = ALPHA_K0SEG_TO_PHYS(vaddr); | | 3204 | pa = ALPHA_K0SEG_TO_PHYS(vaddr); |
3205 | } else { | | 3205 | } else { |
3206 | pt_entry_t * const pte = PMAP_KERNEL_PTE(vaddr); | | 3206 | pt_entry_t * const pte = PMAP_KERNEL_PTE(vaddr); |
3207 | if (__predict_false(! pmap_pte_v(pte))) { | | 3207 | if (__predict_false(! pmap_pte_v(pte))) { |
3208 | return false; | | 3208 | return false; |
3209 | } | | 3209 | } |
3210 | pa = pmap_pte_pa(pte) | (vaddr & PGOFSET); | | 3210 | pa = pmap_pte_pa(pte) | (vaddr & PGOFSET); |
3211 | } | | 3211 | } |
3212 | | | 3212 | |
3213 | if (pap != NULL) { | | 3213 | if (pap != NULL) { |
3214 | *pap = pa; | | 3214 | *pap = pa; |
3215 | } | | 3215 | } |
3216 | | | 3216 | |
3217 | return true; | | 3217 | return true; |
3218 | } | | 3218 | } |
3219 | | | 3219 | |
3220 | paddr_t | | 3220 | paddr_t |
3221 | vtophys(vaddr_t const vaddr) | | 3221 | vtophys(vaddr_t const vaddr) |
3222 | { | | 3222 | { |
3223 | paddr_t pa; | | 3223 | paddr_t pa; |
3224 | | | 3224 | |
3225 | if (__predict_false(! vtophys_internal(vaddr, &pa))) | | 3225 | if (__predict_false(! vtophys_internal(vaddr, &pa))) |
3226 | pa = 0; | | 3226 | pa = 0; |
3227 | return pa; | | 3227 | return pa; |
3228 | } | | 3228 | } |
3229 | | | 3229 | |
3230 | /******************** pv_entry management ********************/ | | 3230 | /******************** pv_entry management ********************/ |
3231 | | | 3231 | |
3232 | /* | | 3232 | /* |
3233 | * pmap_pv_enter: | | 3233 | * pmap_pv_enter: |
3234 | * | | 3234 | * |
3235 | * Add a physical->virtual entry to the pv_table. | | 3235 | * Add a physical->virtual entry to the pv_table. |
3236 | */ | | 3236 | */ |
3237 | static int | | 3237 | static int |
3238 | pmap_pv_enter(pmap_t pmap, struct vm_page *pg, vaddr_t va, pt_entry_t *pte, | | 3238 | pmap_pv_enter(pmap_t pmap, struct vm_page *pg, vaddr_t va, pt_entry_t *pte, |
3239 | bool dolock, pv_entry_t newpv) | | 3239 | bool dolock, pv_entry_t newpv) |
3240 | { | | 3240 | { |
3241 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); | | 3241 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); |
3242 | kmutex_t *lock; | | 3242 | kmutex_t *lock; |
3243 | | | 3243 | |
3244 | /* | | 3244 | /* |
3245 | * Allocate and fill in the new pv_entry. | | 3245 | * Allocate and fill in the new pv_entry. |
3246 | */ | | 3246 | */ |
3247 | if (newpv == NULL) { | | 3247 | if (newpv == NULL) { |
3248 | newpv = pmap_pv_alloc(); | | 3248 | newpv = pmap_pv_alloc(); |
3249 | if (newpv == NULL) | | 3249 | if (newpv == NULL) |
3250 | return ENOMEM; | | 3250 | return ENOMEM; |
3251 | } | | 3251 | } |
3252 | newpv->pv_va = va; | | 3252 | newpv->pv_va = va; |
3253 | newpv->pv_pmap = pmap; | | 3253 | newpv->pv_pmap = pmap; |
3254 | newpv->pv_pte = pte; | | 3254 | newpv->pv_pte = pte; |
3255 | | | 3255 | |
3256 | if (dolock) { | | 3256 | if (dolock) { |
3257 | lock = pmap_pvh_lock(pg); | | 3257 | lock = pmap_pvh_lock(pg); |
3258 | mutex_enter(lock); | | 3258 | mutex_enter(lock); |
3259 | } | | 3259 | } |
3260 | | | 3260 | |
3261 | #ifdef DEBUG | | 3261 | #ifdef DEBUG |
3262 | { | | 3262 | { |
3263 | pv_entry_t pv; | | 3263 | pv_entry_t pv; |
3264 | /* | | 3264 | /* |
3265 | * Make sure the entry doesn't already exist. | | 3265 | * Make sure the entry doesn't already exist. |
3266 | */ | | 3266 | */ |
3267 | for (pv = md->pvh_list; pv != NULL; pv = pv->pv_next) { | | 3267 | for (pv = md->pvh_list; pv != NULL; pv = pv->pv_next) { |
3268 | if (pmap == pv->pv_pmap && va == pv->pv_va) { | | 3268 | if (pmap == pv->pv_pmap && va == pv->pv_va) { |
3269 | printf("pmap = %p, va = 0x%lx\n", pmap, va); | | 3269 | printf("pmap = %p, va = 0x%lx\n", pmap, va); |
3270 | panic("pmap_pv_enter: already in pv table"); | | 3270 | panic("pmap_pv_enter: already in pv table"); |
3271 | } | | 3271 | } |
3272 | } | | 3272 | } |
3273 | } | | 3273 | } |
3274 | #endif | | 3274 | #endif |
3275 | | | 3275 | |
3276 | /* | | 3276 | /* |
3277 | * ...and put it in the list. | | 3277 | * ...and put it in the list. |
3278 | */ | | 3278 | */ |
3279 | newpv->pv_next = md->pvh_list; | | 3279 | newpv->pv_next = md->pvh_list; |
3280 | md->pvh_list = newpv; | | 3280 | md->pvh_list = newpv; |
3281 | | | 3281 | |
3282 | if (dolock) { | | 3282 | if (dolock) { |
3283 | mutex_exit(lock); | | 3283 | mutex_exit(lock); |
3284 | } | | 3284 | } |
3285 | | | 3285 | |
3286 | return 0; | | 3286 | return 0; |
3287 | } | | 3287 | } |
3288 | | | 3288 | |
3289 | /* | | 3289 | /* |
3290 | * pmap_pv_remove: | | 3290 | * pmap_pv_remove: |
3291 | * | | 3291 | * |
3292 | * Remove a physical->virtual entry from the pv_table. | | 3292 | * Remove a physical->virtual entry from the pv_table. |
3293 | */ | | 3293 | */ |
3294 | static void | | 3294 | static void |
3295 | pmap_pv_remove(pmap_t pmap, struct vm_page *pg, vaddr_t va, bool dolock, | | 3295 | pmap_pv_remove(pmap_t pmap, struct vm_page *pg, vaddr_t va, bool dolock, |
3296 | pv_entry_t *opvp) | | 3296 | pv_entry_t *opvp) |
3297 | { | | 3297 | { |
3298 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); | | 3298 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); |
3299 | pv_entry_t pv, *pvp; | | 3299 | pv_entry_t pv, *pvp; |
3300 | kmutex_t *lock; | | 3300 | kmutex_t *lock; |
3301 | | | 3301 | |
3302 | if (dolock) { | | 3302 | if (dolock) { |
3303 | lock = pmap_pvh_lock(pg); | | 3303 | lock = pmap_pvh_lock(pg); |
3304 | mutex_enter(lock); | | 3304 | mutex_enter(lock); |
3305 | } else { | | 3305 | } else { |
3306 | lock = NULL; /* XXX stupid gcc */ | | 3306 | lock = NULL; /* XXX stupid gcc */ |
3307 | } | | 3307 | } |
3308 | | | 3308 | |
3309 | /* | | 3309 | /* |
3310 | * Find the entry to remove. | | 3310 | * Find the entry to remove. |
3311 | */ | | 3311 | */ |
3312 | for (pvp = &md->pvh_list, pv = *pvp; | | 3312 | for (pvp = &md->pvh_list, pv = *pvp; |
3313 | pv != NULL; pvp = &pv->pv_next, pv = *pvp) | | 3313 | pv != NULL; pvp = &pv->pv_next, pv = *pvp) |
3314 | if (pmap == pv->pv_pmap && va == pv->pv_va) | | 3314 | if (pmap == pv->pv_pmap && va == pv->pv_va) |
3315 | break; | | 3315 | break; |
3316 | | | 3316 | |
3317 | KASSERT(pv != NULL); | | 3317 | KASSERT(pv != NULL); |
3318 | | | 3318 | |
3319 | *pvp = pv->pv_next; | | 3319 | *pvp = pv->pv_next; |
3320 | | | 3320 | |
3321 | if (dolock) { | | 3321 | if (dolock) { |
3322 | mutex_exit(lock); | | 3322 | mutex_exit(lock); |
3323 | } | | 3323 | } |
3324 | | | 3324 | |
3325 | if (opvp != NULL) | | 3325 | if (opvp != NULL) |
3326 | *opvp = pv; | | 3326 | *opvp = pv; |
3327 | else | | 3327 | else |
3328 | pmap_pv_free(pv); | | 3328 | pmap_pv_free(pv); |
3329 | } | | 3329 | } |
3330 | | | 3330 | |
3331 | /* | | 3331 | /* |
3332 | * pmap_pv_page_alloc: | | 3332 | * pmap_pv_page_alloc: |
3333 | * | | 3333 | * |
3334 | * Allocate a page for the pv_entry pool. | | 3334 | * Allocate a page for the pv_entry pool. |
3335 | */ | | 3335 | */ |
3336 | static void * | | 3336 | static void * |
3337 | pmap_pv_page_alloc(struct pool *pp, int flags) | | 3337 | pmap_pv_page_alloc(struct pool *pp, int flags) |
3338 | { | | 3338 | { |
3339 | struct vm_page * const pg = pmap_physpage_alloc(PGU_PVENT); | | 3339 | struct vm_page * const pg = pmap_physpage_alloc(PGU_PVENT); |
3340 | if (__predict_false(pg == NULL)) { | | 3340 | if (__predict_false(pg == NULL)) { |
3341 | return NULL; | | 3341 | return NULL; |
3342 | } | | 3342 | } |
3343 | return (void *)ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(pg)); | | 3343 | return (void *)ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(pg)); |
3344 | } | | 3344 | } |
3345 | | | 3345 | |
3346 | /* | | 3346 | /* |
3347 | * pmap_pv_page_free: | | 3347 | * pmap_pv_page_free: |
3348 | * | | 3348 | * |
3349 | * Free a pv_entry pool page. | | 3349 | * Free a pv_entry pool page. |
3350 | */ | | 3350 | */ |
3351 | static void | | 3351 | static void |
3352 | pmap_pv_page_free(struct pool *pp, void *v) | | 3352 | pmap_pv_page_free(struct pool *pp, void *v) |
3353 | { | | 3353 | { |
3354 | | | 3354 | |
3355 | pmap_physpage_free(ALPHA_K0SEG_TO_PHYS((vaddr_t)v)); | | 3355 | pmap_physpage_free(ALPHA_K0SEG_TO_PHYS((vaddr_t)v)); |
3356 | } | | 3356 | } |
3357 | | | 3357 | |
3358 | /******************** misc. functions ********************/ | | 3358 | /******************** misc. functions ********************/ |
3359 | | | 3359 | |
3360 | /* | | 3360 | /* |
| | | 3361 | * Pages that are in-use as page table pages should never be part |
| | | 3362 | * of a UVM loan, so we'll use that field for our PT page reference |
| | | 3363 | * count. |
| | | 3364 | */ |
| | | 3365 | #define PHYSPAGE_REFCNT(pg) atomic_load_relaxed(&(pg)->loan_count) |
| | | 3366 | #define PHYSPAGE_REFCNT_INC(pg) atomic_inc_uint_nv(&(pg)->loan_count) |
| | | 3367 | #define PHYSPAGE_REFCNT_DEC(pg) atomic_dec_uint_nv(&(pg)->loan_count) |
| | | 3368 | |
| | | 3369 | /* |
3361 | * pmap_physpage_alloc: | | 3370 | * pmap_physpage_alloc: |
3362 | * | | 3371 | * |
3363 | * Allocate a single page from the VM system and return the | | 3372 | * Allocate a single page from the VM system and return the |
3364 | * physical address for that page. | | 3373 | * physical address for that page. |
3365 | */ | | 3374 | */ |
3366 | static struct vm_page * | | 3375 | static struct vm_page * |
3367 | pmap_physpage_alloc(int usage) | | 3376 | pmap_physpage_alloc(int usage) |
3368 | { | | 3377 | { |
3369 | struct vm_page *pg; | | 3378 | struct vm_page *pg; |
3370 | | | 3379 | |
3371 | /* | | 3380 | /* |
3372 | * Don't ask for a zero'd page in the L1PT case -- we will | | 3381 | * Don't ask for a zero'd page in the L1PT case -- we will |
3373 | * properly initialize it in the constructor. | | 3382 | * properly initialize it in the constructor. |
3374 | */ | | 3383 | */ |
3375 | | | 3384 | |
3376 | pg = uvm_pagealloc(NULL, 0, NULL, usage == PGU_L1PT ? | | 3385 | pg = uvm_pagealloc(NULL, 0, NULL, usage == PGU_L1PT ? |
3377 | UVM_PGA_USERESERVE : UVM_PGA_USERESERVE|UVM_PGA_ZERO); | | 3386 | UVM_PGA_USERESERVE : UVM_PGA_USERESERVE|UVM_PGA_ZERO); |
3378 | if (pg != NULL) { | | 3387 | if (pg != NULL) { |
3379 | #ifdef DEBUG | | 3388 | KASSERT(PHYSPAGE_REFCNT(pg) == 0); |
3380 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); | | | |
3381 | if (md->pvh_refcnt != 0) { | | | |
3382 | printf("pmap_physpage_alloc: page 0x%lx has " | | | |
3383 | "%d references\n", pa, md->pvh_refcnt); | | | |
3384 | panic("pmap_physpage_alloc"); | | | |
3385 | } | | | |
3386 | #endif | | | |
3387 | } | | 3389 | } |
3388 | return pg; | | 3390 | return pg; |
3389 | } | | 3391 | } |
3390 | | | 3392 | |
3391 | /* | | 3393 | /* |
3392 | * pmap_physpage_free: | | 3394 | * pmap_physpage_free: |
3393 | * | | 3395 | * |
3394 | * Free the single page table page at the specified physical address. | | 3396 | * Free the single page table page at the specified physical address. |
3395 | */ | | 3397 | */ |
3396 | static void | | 3398 | static void |
3397 | pmap_physpage_free(paddr_t pa) | | 3399 | pmap_physpage_free(paddr_t pa) |
3398 | { | | 3400 | { |
3399 | struct vm_page *pg; | | 3401 | struct vm_page *pg; |
3400 | | | 3402 | |
3401 | if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) | | 3403 | if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) |
3402 | panic("pmap_physpage_free: bogus physical page address"); | | 3404 | panic("pmap_physpage_free: bogus physical page address"); |
3403 | | | 3405 | |
3404 | #ifdef DEBUG | | 3406 | KASSERT(PHYSPAGE_REFCNT(pg) == 0); |
3405 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); | | | |
3406 | if (md->pvh_refcnt != 0) | | | |
3407 | panic("pmap_physpage_free: page still has references"); | | | |
3408 | #endif | | | |
3409 | | | 3407 | |
3410 | uvm_pagefree(pg); | | 3408 | uvm_pagefree(pg); |
3411 | } | | 3409 | } |
3412 | | | 3410 | |
3413 | /* | | 3411 | /* |
3414 | * pmap_physpage_addref: | | 3412 | * pmap_physpage_addref: |
3415 | * | | 3413 | * |
3416 | * Add a reference to the specified special use page. | | 3414 | * Add a reference to the specified special use page. |
3417 | */ | | 3415 | */ |
3418 | static int | | 3416 | static int |
3419 | pmap_physpage_addref(void *kva) | | 3417 | pmap_physpage_addref(void *kva) |
3420 | { | | 3418 | { |
3421 | struct vm_page *pg; | | 3419 | struct vm_page *pg; |
3422 | struct vm_page_md *md; | | | |
3423 | paddr_t pa; | | 3420 | paddr_t pa; |
3424 | | | 3421 | |
3425 | pa = ALPHA_K0SEG_TO_PHYS(trunc_page((vaddr_t)kva)); | | 3422 | pa = ALPHA_K0SEG_TO_PHYS(trunc_page((vaddr_t)kva)); |
3426 | pg = PHYS_TO_VM_PAGE(pa); | | 3423 | pg = PHYS_TO_VM_PAGE(pa); |
3427 | md = VM_PAGE_TO_MD(pg); | | | |
3428 | | | 3424 | |
3429 | KASSERT((int)md->pvh_refcnt >= 0); | | 3425 | KASSERT(PHYSPAGE_REFCNT(pg) < UINT32_MAX); |
3430 | | | 3426 | |
3431 | return atomic_inc_uint_nv(&md->pvh_refcnt); | | 3427 | return PHYSPAGE_REFCNT_INC(pg); |
3432 | } | | 3428 | } |
3433 | | | 3429 | |
3434 | /* | | 3430 | /* |
3435 | * pmap_physpage_delref: | | 3431 | * pmap_physpage_delref: |
3436 | * | | 3432 | * |
3437 | * Delete a reference to the specified special use page. | | 3433 | * Delete a reference to the specified special use page. |
3438 | */ | | 3434 | */ |
3439 | static int | | 3435 | static int |
3440 | pmap_physpage_delref(void *kva) | | 3436 | pmap_physpage_delref(void *kva) |
3441 | { | | 3437 | { |
3442 | struct vm_page *pg; | | 3438 | struct vm_page *pg; |
3443 | struct vm_page_md *md; | | | |
3444 | paddr_t pa; | | 3439 | paddr_t pa; |
3445 | | | 3440 | |
3446 | pa = ALPHA_K0SEG_TO_PHYS(trunc_page((vaddr_t)kva)); | | 3441 | pa = ALPHA_K0SEG_TO_PHYS(trunc_page((vaddr_t)kva)); |
3447 | pg = PHYS_TO_VM_PAGE(pa); | | 3442 | pg = PHYS_TO_VM_PAGE(pa); |
3448 | md = VM_PAGE_TO_MD(pg); | | | |
3449 | | | 3443 | |
3450 | KASSERT((int)md->pvh_refcnt > 0); | | 3444 | KASSERT(PHYSPAGE_REFCNT(pg) != 0); |
3451 | | | 3445 | |
3452 | return atomic_dec_uint_nv(&md->pvh_refcnt); | | 3446 | return PHYSPAGE_REFCNT_DEC(pg); |
3453 | } | | 3447 | } |
3454 | | | 3448 | |
3455 | /******************** page table page management ********************/ | | 3449 | /******************** page table page management ********************/ |
3456 | | | 3450 | |
3457 | static bool | | 3451 | static bool |
3458 | pmap_kptpage_alloc(paddr_t *pap) | | 3452 | pmap_kptpage_alloc(paddr_t *pap) |
3459 | { | | 3453 | { |
3460 | if (uvm.page_init_done == false) { | | 3454 | if (uvm.page_init_done == false) { |
3461 | /* | | 3455 | /* |
3462 | * We're growing the kernel pmap early (from | | 3456 | * We're growing the kernel pmap early (from |
3463 | * uvm_pageboot_alloc()). This case must | | 3457 | * uvm_pageboot_alloc()). This case must |
3464 | * be handled a little differently. | | 3458 | * be handled a little differently. |
3465 | */ | | 3459 | */ |
3466 | *pap = ALPHA_K0SEG_TO_PHYS( | | 3460 | *pap = ALPHA_K0SEG_TO_PHYS( |
3467 | pmap_steal_memory(PAGE_SIZE, NULL, NULL)); | | 3461 | pmap_steal_memory(PAGE_SIZE, NULL, NULL)); |
3468 | return true; | | 3462 | return true; |
3469 | } | | 3463 | } |
3470 | | | 3464 | |
3471 | struct vm_page * const pg = pmap_physpage_alloc(PGU_NORMAL); | | 3465 | struct vm_page * const pg = pmap_physpage_alloc(PGU_NORMAL); |
3472 | if (__predict_true(pg != NULL)) { | | 3466 | if (__predict_true(pg != NULL)) { |
3473 | *pap = VM_PAGE_TO_PHYS(pg); | | 3467 | *pap = VM_PAGE_TO_PHYS(pg); |
3474 | return true; | | 3468 | return true; |
3475 | } | | 3469 | } |
3476 | return false; | | 3470 | return false; |
3477 | } | | 3471 | } |
3478 | | | 3472 | |
3479 | /* | | 3473 | /* |
3480 | * pmap_growkernel: [ INTERFACE ] | | 3474 | * pmap_growkernel: [ INTERFACE ] |
3481 | * | | 3475 | * |
3482 | * Grow the kernel address space. This is a hint from the | | 3476 | * Grow the kernel address space. This is a hint from the |
3483 | * upper layer to pre-allocate more kernel PT pages. | | 3477 | * upper layer to pre-allocate more kernel PT pages. |
3484 | */ | | 3478 | */ |
3485 | vaddr_t | | 3479 | vaddr_t |
3486 | pmap_growkernel(vaddr_t maxkvaddr) | | 3480 | pmap_growkernel(vaddr_t maxkvaddr) |
3487 | { | | 3481 | { |
3488 | struct pmap *pm; | | 3482 | struct pmap *pm; |
3489 | paddr_t ptaddr; | | 3483 | paddr_t ptaddr; |
3490 | pt_entry_t *l1pte, *l2pte, pte; | | 3484 | pt_entry_t *l1pte, *l2pte, pte; |
3491 | pt_entry_t *lev1map; | | 3485 | pt_entry_t *lev1map; |
3492 | vaddr_t va; | | 3486 | vaddr_t va; |
3493 | int l1idx; | | 3487 | int l1idx; |
3494 | | | 3488 | |
3495 | rw_enter(&pmap_growkernel_lock, RW_WRITER); | | 3489 | rw_enter(&pmap_growkernel_lock, RW_WRITER); |
3496 | | | 3490 | |
3497 | if (maxkvaddr <= virtual_end) | | 3491 | if (maxkvaddr <= virtual_end) |
3498 | goto out; /* we are OK */ | | 3492 | goto out; /* we are OK */ |
3499 | | | 3493 | |
3500 | va = virtual_end; | | 3494 | va = virtual_end; |
3501 | | | 3495 | |
3502 | while (va < maxkvaddr) { | | 3496 | while (va < maxkvaddr) { |
3503 | /* | | 3497 | /* |
3504 | * If there is no valid L1 PTE (i.e. no L2 PT page), | | 3498 | * If there is no valid L1 PTE (i.e. no L2 PT page), |
3505 | * allocate a new L2 PT page and insert it into the | | 3499 | * allocate a new L2 PT page and insert it into the |
3506 | * L1 map. | | 3500 | * L1 map. |
3507 | */ | | 3501 | */ |
3508 | l1pte = pmap_l1pte(kernel_lev1map, va); | | 3502 | l1pte = pmap_l1pte(kernel_lev1map, va); |
3509 | if (pmap_pte_v(l1pte) == 0) { | | 3503 | if (pmap_pte_v(l1pte) == 0) { |
3510 | if (!pmap_kptpage_alloc(&ptaddr)) | | 3504 | if (!pmap_kptpage_alloc(&ptaddr)) |
3511 | goto die; | | 3505 | goto die; |
3512 | pte = (atop(ptaddr) << PG_SHIFT) | | | 3506 | pte = (atop(ptaddr) << PG_SHIFT) | |
3513 | PG_V | PG_ASM | PG_KRE | PG_KWE | PG_WIRED; | | 3507 | PG_V | PG_ASM | PG_KRE | PG_KWE | PG_WIRED; |
3514 | *l1pte = pte; | | 3508 | *l1pte = pte; |
3515 | | | 3509 | |
3516 | l1idx = l1pte_index(va); | | 3510 | l1idx = l1pte_index(va); |
3517 | | | 3511 | |
3518 | /* Update all the user pmaps. */ | | 3512 | /* Update all the user pmaps. */ |
3519 | mutex_enter(&pmap_all_pmaps_lock); | | 3513 | mutex_enter(&pmap_all_pmaps_lock); |
3520 | for (pm = TAILQ_FIRST(&pmap_all_pmaps); | | 3514 | for (pm = TAILQ_FIRST(&pmap_all_pmaps); |
3521 | pm != NULL; pm = TAILQ_NEXT(pm, pm_list)) { | | 3515 | pm != NULL; pm = TAILQ_NEXT(pm, pm_list)) { |
3522 | /* Skip the kernel pmap. */ | | 3516 | /* Skip the kernel pmap. */ |
3523 | if (pm == pmap_kernel()) | | 3517 | if (pm == pmap_kernel()) |
3524 | continue; | | 3518 | continue; |
3525 | | | 3519 | |
3526 | /* | | 3520 | /* |
3527 | * Any pmaps published on the global list | | 3521 | * Any pmaps published on the global list |
3528 | * should never be referencing kernel_lev1map. | | 3522 | * should never be referencing kernel_lev1map. |
3529 | */ | | 3523 | */ |
3530 | lev1map = pmap_lev1map(pm); | | 3524 | lev1map = pmap_lev1map(pm); |
3531 | KASSERT(lev1map != kernel_lev1map); | | 3525 | KASSERT(lev1map != kernel_lev1map); |
3532 | | | 3526 | |
3533 | PMAP_LOCK(pm); | | 3527 | PMAP_LOCK(pm); |
3534 | lev1map[l1idx] = pte; | | 3528 | lev1map[l1idx] = pte; |
3535 | PMAP_UNLOCK(pm); | | 3529 | PMAP_UNLOCK(pm); |
3536 | } | | 3530 | } |
3537 | mutex_exit(&pmap_all_pmaps_lock); | | 3531 | mutex_exit(&pmap_all_pmaps_lock); |
3538 | } | | 3532 | } |
3539 | | | 3533 | |
3540 | /* | | 3534 | /* |
3541 | * Have an L2 PT page now, add the L3 PT page. | | 3535 | * Have an L2 PT page now, add the L3 PT page. |
3542 | */ | | 3536 | */ |
3543 | l2pte = pmap_l2pte(kernel_lev1map, va, l1pte); | | 3537 | l2pte = pmap_l2pte(kernel_lev1map, va, l1pte); |
3544 | KASSERT(pmap_pte_v(l2pte) == 0); | | 3538 | KASSERT(pmap_pte_v(l2pte) == 0); |
3545 | if (!pmap_kptpage_alloc(&ptaddr)) | | 3539 | if (!pmap_kptpage_alloc(&ptaddr)) |
3546 | goto die; | | 3540 | goto die; |
3547 | *l2pte = (atop(ptaddr) << PG_SHIFT) | | | 3541 | *l2pte = (atop(ptaddr) << PG_SHIFT) | |
3548 | PG_V | PG_ASM | PG_KRE | PG_KWE | PG_WIRED; | | 3542 | PG_V | PG_ASM | PG_KRE | PG_KWE | PG_WIRED; |
3549 | va += ALPHA_L2SEG_SIZE; | | 3543 | va += ALPHA_L2SEG_SIZE; |
3550 | } | | 3544 | } |
3551 | | | 3545 | |
3552 | /* Invalidate the L1 PT cache. */ | | 3546 | /* Invalidate the L1 PT cache. */ |
3553 | pool_cache_invalidate(&pmap_l1pt_cache); | | 3547 | pool_cache_invalidate(&pmap_l1pt_cache); |
3554 | | | 3548 | |
3555 | virtual_end = va; | | 3549 | virtual_end = va; |
3556 | | | 3550 | |
3557 | out: | | 3551 | out: |
3558 | rw_exit(&pmap_growkernel_lock); | | 3552 | rw_exit(&pmap_growkernel_lock); |
3559 | | | 3553 | |
3560 | return (virtual_end); | | 3554 | return (virtual_end); |
3561 | | | 3555 | |
3562 | die: | | 3556 | die: |
3563 | panic("pmap_growkernel: out of memory"); | | 3557 | panic("pmap_growkernel: out of memory"); |
3564 | } | | 3558 | } |
3565 | | | 3559 | |
3566 | /* | | 3560 | /* |
3567 | * pmap_l1pt_ctor: | | 3561 | * pmap_l1pt_ctor: |
3568 | * | | 3562 | * |
3569 | * Pool cache constructor for L1 PT pages. | | 3563 | * Pool cache constructor for L1 PT pages. |
3570 | * | | 3564 | * |
3571 | * Note: The growkernel lock is held across allocations | | 3565 | * Note: The growkernel lock is held across allocations |
3572 | * from our pool_cache, so we don't need to acquire it | | 3566 | * from our pool_cache, so we don't need to acquire it |
3573 | * ourselves. | | 3567 | * ourselves. |
3574 | */ | | 3568 | */ |
3575 | static int | | 3569 | static int |
3576 | pmap_l1pt_ctor(void *arg, void *object, int flags) | | 3570 | pmap_l1pt_ctor(void *arg, void *object, int flags) |
3577 | { | | 3571 | { |
3578 | pt_entry_t *l1pt = object, pte; | | 3572 | pt_entry_t *l1pt = object, pte; |
3579 | int i; | | 3573 | int i; |
3580 | | | 3574 | |
3581 | /* | | 3575 | /* |
3582 | * Initialize the new level 1 table by zeroing the | | 3576 | * Initialize the new level 1 table by zeroing the |
3583 | * user portion and copying the kernel mappings into | | 3577 | * user portion and copying the kernel mappings into |
3584 | * the kernel portion. | | 3578 | * the kernel portion. |
3585 | */ | | 3579 | */ |
3586 | for (i = 0; i < l1pte_index(VM_MIN_KERNEL_ADDRESS); i++) | | 3580 | for (i = 0; i < l1pte_index(VM_MIN_KERNEL_ADDRESS); i++) |
3587 | l1pt[i] = 0; | | 3581 | l1pt[i] = 0; |
3588 | | | 3582 | |
3589 | for (i = l1pte_index(VM_MIN_KERNEL_ADDRESS); | | 3583 | for (i = l1pte_index(VM_MIN_KERNEL_ADDRESS); |
3590 | i <= l1pte_index(VM_MAX_KERNEL_ADDRESS); i++) | | 3584 | i <= l1pte_index(VM_MAX_KERNEL_ADDRESS); i++) |
3591 | l1pt[i] = kernel_lev1map[i]; | | 3585 | l1pt[i] = kernel_lev1map[i]; |
3592 | | | 3586 | |
3593 | /* | | 3587 | /* |
3594 | * Now, map the new virtual page table. NOTE: NO ASM! | | 3588 | * Now, map the new virtual page table. NOTE: NO ASM! |
3595 | */ | | 3589 | */ |
3596 | pte = ((ALPHA_K0SEG_TO_PHYS((vaddr_t) l1pt) >> PGSHIFT) << PG_SHIFT) | | | 3590 | pte = ((ALPHA_K0SEG_TO_PHYS((vaddr_t) l1pt) >> PGSHIFT) << PG_SHIFT) | |
3597 | PG_V | PG_KRE | PG_KWE; | | 3591 | PG_V | PG_KRE | PG_KWE; |
3598 | l1pt[l1pte_index(VPTBASE)] = pte; | | 3592 | l1pt[l1pte_index(VPTBASE)] = pte; |
3599 | | | 3593 | |
3600 | return (0); | | 3594 | return (0); |
3601 | } | | 3595 | } |
3602 | | | 3596 | |
3603 | /* | | 3597 | /* |
3604 | * pmap_l1pt_alloc: | | 3598 | * pmap_l1pt_alloc: |
3605 | * | | 3599 | * |
3606 | * Page alloctaor for L1 PT pages. | | 3600 | * Page alloctaor for L1 PT pages. |
3607 | */ | | 3601 | */ |
3608 | static void * | | 3602 | static void * |
3609 | pmap_l1pt_alloc(struct pool *pp, int flags) | | 3603 | pmap_l1pt_alloc(struct pool *pp, int flags) |
3610 | { | | 3604 | { |
3611 | /* | | 3605 | /* |
3612 | * Attempt to allocate a free page. | | 3606 | * Attempt to allocate a free page. |
3613 | */ | | 3607 | */ |
3614 | struct vm_page * const pg = pmap_physpage_alloc(PGU_L1PT); | | 3608 | struct vm_page * const pg = pmap_physpage_alloc(PGU_L1PT); |
3615 | if (__predict_false(pg == NULL)) { | | 3609 | if (__predict_false(pg == NULL)) { |
3616 | return NULL; | | 3610 | return NULL; |
3617 | } | | 3611 | } |
3618 | return (void *)ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(pg)); | | 3612 | return (void *)ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(pg)); |
3619 | } | | 3613 | } |
3620 | | | 3614 | |
3621 | /* | | 3615 | /* |
3622 | * pmap_l1pt_free: | | 3616 | * pmap_l1pt_free: |
3623 | * | | 3617 | * |
3624 | * Page freer for L1 PT pages. | | 3618 | * Page freer for L1 PT pages. |
3625 | */ | | 3619 | */ |
3626 | static void | | 3620 | static void |
3627 | pmap_l1pt_free(struct pool *pp, void *v) | | 3621 | pmap_l1pt_free(struct pool *pp, void *v) |
3628 | { | | 3622 | { |
3629 | | | 3623 | |
3630 | pmap_physpage_free(ALPHA_K0SEG_TO_PHYS((vaddr_t) v)); | | 3624 | pmap_physpage_free(ALPHA_K0SEG_TO_PHYS((vaddr_t) v)); |
3631 | } | | 3625 | } |
3632 | | | 3626 | |
3633 | /* | | 3627 | /* |
3634 | * pmap_ptpage_alloc: | | 3628 | * pmap_ptpage_alloc: |
3635 | * | | 3629 | * |
3636 | * Allocate a level 2 or level 3 page table page for a user | | 3630 | * Allocate a level 2 or level 3 page table page for a user |
3637 | * pmap, and initialize the PTE that references it. | | 3631 | * pmap, and initialize the PTE that references it. |
3638 | * | | 3632 | * |
3639 | * Note: the pmap must already be locked. | | 3633 | * Note: the pmap must already be locked. |
3640 | */ | | 3634 | */ |
3641 | static int | | 3635 | static int |
3642 | pmap_ptpage_alloc(pmap_t pmap, pt_entry_t * const pte, int const usage) | | 3636 | pmap_ptpage_alloc(pmap_t pmap, pt_entry_t * const pte, int const usage) |
3643 | { | | 3637 | { |
3644 | /* | | 3638 | /* |
3645 | * Allocate the page table page. | | 3639 | * Allocate the page table page. |
3646 | */ | | 3640 | */ |
3647 | struct vm_page * const pg = pmap_physpage_alloc(usage); | | 3641 | struct vm_page * const pg = pmap_physpage_alloc(usage); |
3648 | if (__predict_false(pg == NULL)) { | | 3642 | if (__predict_false(pg == NULL)) { |
3649 | return ENOMEM; | | 3643 | return ENOMEM; |
3650 | } | | 3644 | } |
3651 | | | 3645 | |
3652 | LIST_INSERT_HEAD(&pmap->pm_ptpages, pg, pageq.list); | | 3646 | LIST_INSERT_HEAD(&pmap->pm_ptpages, pg, pageq.list); |
3653 | | | 3647 | |
3654 | /* | | 3648 | /* |
3655 | * Initialize the referencing PTE. | | 3649 | * Initialize the referencing PTE. |
3656 | */ | | 3650 | */ |
3657 | const pt_entry_t npte = ((VM_PAGE_TO_PHYS(pg) >> PGSHIFT) << PG_SHIFT) | | | 3651 | const pt_entry_t npte = ((VM_PAGE_TO_PHYS(pg) >> PGSHIFT) << PG_SHIFT) | |
3658 | PG_V | PG_KRE | PG_KWE | PG_WIRED; | | 3652 | PG_V | PG_KRE | PG_KWE | PG_WIRED; |
3659 | | | 3653 | |
3660 | atomic_store_relaxed(pte, npte); | | 3654 | atomic_store_relaxed(pte, npte); |
3661 | | | 3655 | |
3662 | return (0); | | 3656 | return (0); |
3663 | } | | 3657 | } |
3664 | | | 3658 | |
3665 | /* | | 3659 | /* |
3666 | * pmap_ptpage_free: | | 3660 | * pmap_ptpage_free: |
3667 | * | | 3661 | * |
3668 | * Free the level 2 or level 3 page table page referenced | | 3662 | * Free the level 2 or level 3 page table page referenced |
3669 | * be the provided PTE. | | 3663 | * be the provided PTE. |
3670 | * | | 3664 | * |
3671 | * Note: the pmap must already be locked. | | 3665 | * Note: the pmap must already be locked. |
3672 | */ | | 3666 | */ |
3673 | static void | | 3667 | static void |
3674 | pmap_ptpage_free(pmap_t pmap, pt_entry_t * const pte, | | 3668 | pmap_ptpage_free(pmap_t pmap, pt_entry_t * const pte, |
3675 | struct pmap_tlb_context * const tlbctx) | | 3669 | struct pmap_tlb_context * const tlbctx) |
3676 | { | | 3670 | { |
3677 | | | 3671 | |
3678 | /* | | 3672 | /* |
3679 | * Extract the physical address of the page from the PTE | | 3673 | * Extract the physical address of the page from the PTE |
3680 | * and clear the entry. | | 3674 | * and clear the entry. |
3681 | */ | | 3675 | */ |
3682 | const paddr_t ptpa = pmap_pte_pa(pte); | | 3676 | const paddr_t ptpa = pmap_pte_pa(pte); |
3683 | atomic_store_relaxed(pte, PG_NV); | | 3677 | atomic_store_relaxed(pte, PG_NV); |
3684 | | | 3678 | |
3685 | struct vm_page * const pg = PHYS_TO_VM_PAGE(ptpa); | | 3679 | struct vm_page * const pg = PHYS_TO_VM_PAGE(ptpa); |
3686 | KASSERT(pg != NULL); | | 3680 | KASSERT(pg != NULL); |
3687 | | | 3681 | |
| | | 3682 | KASSERT(PHYSPAGE_REFCNT(pg) == 0); |
3688 | #ifdef DEBUG | | 3683 | #ifdef DEBUG |
3689 | struct vm_page_md * const md = VM_PAGE_TO_MD(pg); | | | |
3690 | KDASSERT(md->pvh_refcnt == 0); | | | |
3691 | | | | |
3692 | pmap_zero_page(ptpa); | | 3684 | pmap_zero_page(ptpa); |
3693 | #endif | | 3685 | #endif |
3694 | | | 3686 | |
3695 | LIST_REMOVE(pg, pageq.list); | | 3687 | LIST_REMOVE(pg, pageq.list); |
3696 | LIST_INSERT_HEAD(&tlbctx->t_freeptq, pg, pageq.list); | | 3688 | LIST_INSERT_HEAD(&tlbctx->t_freeptq, pg, pageq.list); |
3697 | } | | 3689 | } |
3698 | | | 3690 | |
3699 | /* | | 3691 | /* |
3700 | * pmap_l3pt_delref: | | 3692 | * pmap_l3pt_delref: |
3701 | * | | 3693 | * |
3702 | * Delete a reference on a level 3 PT page. If the reference drops | | 3694 | * Delete a reference on a level 3 PT page. If the reference drops |
3703 | * to zero, free it. | | 3695 | * to zero, free it. |
3704 | * | | 3696 | * |
3705 | * Note: the pmap must already be locked. | | 3697 | * Note: the pmap must already be locked. |
3706 | */ | | 3698 | */ |
3707 | static void | | 3699 | static void |
3708 | pmap_l3pt_delref(pmap_t pmap, vaddr_t va, pt_entry_t *l3pte, | | 3700 | pmap_l3pt_delref(pmap_t pmap, vaddr_t va, pt_entry_t *l3pte, |
3709 | struct pmap_tlb_context * const tlbctx) | | 3701 | struct pmap_tlb_context * const tlbctx) |
3710 | { | | 3702 | { |
3711 | pt_entry_t *l1pte, *l2pte; | | 3703 | pt_entry_t *l1pte, *l2pte; |
3712 | pt_entry_t * const lev1map = pmap_lev1map(pmap); | | 3704 | pt_entry_t * const lev1map = pmap_lev1map(pmap); |
3713 | | | 3705 | |
3714 | l1pte = pmap_l1pte(lev1map, va); | | 3706 | l1pte = pmap_l1pte(lev1map, va); |
3715 | l2pte = pmap_l2pte(lev1map, va, l1pte); | | 3707 | l2pte = pmap_l2pte(lev1map, va, l1pte); |
3716 | | | 3708 | |
3717 | #ifdef DIAGNOSTIC | | 3709 | #ifdef DIAGNOSTIC |
3718 | if (pmap == pmap_kernel()) | | 3710 | if (pmap == pmap_kernel()) |
3719 | panic("pmap_l3pt_delref: kernel pmap"); | | 3711 | panic("pmap_l3pt_delref: kernel pmap"); |
3720 | #endif | | 3712 | #endif |
3721 | | | 3713 | |
3722 | if (pmap_physpage_delref(l3pte) == 0) { | | 3714 | if (pmap_physpage_delref(l3pte) == 0) { |
3723 | /* | | 3715 | /* |
3724 | * No more mappings; we can free the level 3 table. | | 3716 | * No more mappings; we can free the level 3 table. |
3725 | */ | | 3717 | */ |
3726 | #ifdef DEBUG | | 3718 | #ifdef DEBUG |
3727 | if (pmapdebug & PDB_PTPAGE) | | 3719 | if (pmapdebug & PDB_PTPAGE) |
3728 | printf("pmap_l3pt_delref: freeing level 3 table at " | | 3720 | printf("pmap_l3pt_delref: freeing level 3 table at " |
3729 | "0x%lx\n", pmap_pte_pa(l2pte)); | | 3721 | "0x%lx\n", pmap_pte_pa(l2pte)); |
3730 | #endif | | 3722 | #endif |
3731 | /* | | 3723 | /* |
3732 | * You can pass NULL if you know the last refrence won't | | 3724 | * You can pass NULL if you know the last refrence won't |
3733 | * be dropped. | | 3725 | * be dropped. |
3734 | */ | | 3726 | */ |
3735 | KASSERT(tlbctx != NULL); | | 3727 | KASSERT(tlbctx != NULL); |
3736 | pmap_ptpage_free(pmap, l2pte, tlbctx); | | 3728 | pmap_ptpage_free(pmap, l2pte, tlbctx); |
3737 | | | 3729 | |
3738 | /* | | 3730 | /* |
3739 | * We've freed a level 3 table, so we must invalidate | | 3731 | * We've freed a level 3 table, so we must invalidate |
3740 | * any now-stale TLB entries for the corresponding VPT | | 3732 | * any now-stale TLB entries for the corresponding VPT |
3741 | * VA range. Easiest way to guarantee this is to hit | | 3733 | * VA range. Easiest way to guarantee this is to hit |
3742 | * all of the user TLB entries. | | 3734 | * all of the user TLB entries. |
3743 | */ | | 3735 | */ |
3744 | pmap_tlb_shootdown_all_user(pmap, PG_V, tlbctx); | | 3736 | pmap_tlb_shootdown_all_user(pmap, PG_V, tlbctx); |
3745 | | | 3737 | |
3746 | /* | | 3738 | /* |
3747 | * We've freed a level 3 table, so delete the reference | | 3739 | * We've freed a level 3 table, so delete the reference |
3748 | * on the level 2 table. | | 3740 | * on the level 2 table. |
3749 | */ | | 3741 | */ |
3750 | pmap_l2pt_delref(pmap, l1pte, l2pte, tlbctx); | | 3742 | pmap_l2pt_delref(pmap, l1pte, l2pte, tlbctx); |
3751 | } | | 3743 | } |
3752 | } | | 3744 | } |
3753 | | | 3745 | |
3754 | /* | | 3746 | /* |
3755 | * pmap_l2pt_delref: | | 3747 | * pmap_l2pt_delref: |
3756 | * | | 3748 | * |
3757 | * Delete a reference on a level 2 PT page. If the reference drops | | 3749 | * Delete a reference on a level 2 PT page. If the reference drops |
3758 | * to zero, free it. | | 3750 | * to zero, free it. |
3759 | * | | 3751 | * |
3760 | * Note: the pmap must already be locked. | | 3752 | * Note: the pmap must already be locked. |
3761 | */ | | 3753 | */ |
3762 | static void | | 3754 | static void |
3763 | pmap_l2pt_delref(pmap_t pmap, pt_entry_t *l1pte, pt_entry_t *l2pte, | | 3755 | pmap_l2pt_delref(pmap_t pmap, pt_entry_t *l1pte, pt_entry_t *l2pte, |
3764 | struct pmap_tlb_context * const tlbctx) | | 3756 | struct pmap_tlb_context * const tlbctx) |
3765 | { | | 3757 | { |
3766 | | | 3758 | |
3767 | #ifdef DIAGNOSTIC | | 3759 | #ifdef DIAGNOSTIC |
3768 | if (pmap == pmap_kernel()) | | 3760 | if (pmap == pmap_kernel()) |
3769 | panic("pmap_l2pt_delref: kernel pmap"); | | 3761 | panic("pmap_l2pt_delref: kernel pmap"); |
3770 | #endif | | 3762 | #endif |
3771 | | | 3763 | |
3772 | if (pmap_physpage_delref(l2pte) == 0) { | | 3764 | if (pmap_physpage_delref(l2pte) == 0) { |
3773 | /* | | 3765 | /* |
3774 | * No more mappings in this segment; we can free the | | 3766 | * No more mappings in this segment; we can free the |
3775 | * level 2 table. | | 3767 | * level 2 table. |
3776 | */ | | 3768 | */ |
3777 | #ifdef DEBUG | | 3769 | #ifdef DEBUG |
3778 | if (pmapdebug & PDB_PTPAGE) | | 3770 | if (pmapdebug & PDB_PTPAGE) |
3779 | printf("pmap_l2pt_delref: freeing level 2 table at " | | 3771 | printf("pmap_l2pt_delref: freeing level 2 table at " |
3780 | "0x%lx\n", pmap_pte_pa(l1pte)); | | 3772 | "0x%lx\n", pmap_pte_pa(l1pte)); |
3781 | #endif | | 3773 | #endif |
3782 | /* | | 3774 | /* |
3783 | * You can pass NULL if you know the last refrence won't | | 3775 | * You can pass NULL if you know the last refrence won't |
3784 | * be dropped. | | 3776 | * be dropped. |
3785 | */ | | 3777 | */ |
3786 | KASSERT(tlbctx != NULL); | | 3778 | KASSERT(tlbctx != NULL); |
3787 | pmap_ptpage_free(pmap, l1pte, tlbctx); | | 3779 | pmap_ptpage_free(pmap, l1pte, tlbctx); |
3788 | | | 3780 | |
3789 | /* | | 3781 | /* |
3790 | * We've freed a level 2 table, so we must invalidate | | 3782 | * We've freed a level 2 table, so we must invalidate |
3791 | * any now-stale TLB entries for the corresponding VPT | | 3783 | * any now-stale TLB entries for the corresponding VPT |
3792 | * VA range. Easiest way to guarantee this is to hit | | 3784 | * VA range. Easiest way to guarantee this is to hit |
3793 | * all of the user TLB entries. | | 3785 | * all of the user TLB entries. |
3794 | */ | | 3786 | */ |
3795 | pmap_tlb_shootdown_all_user(pmap, PG_V, tlbctx); | | 3787 | pmap_tlb_shootdown_all_user(pmap, PG_V, tlbctx); |
3796 | | | 3788 | |
3797 | /* | | 3789 | /* |
3798 | * We've freed a level 2 table, so delete the reference | | 3790 | * We've freed a level 2 table, so delete the reference |
3799 | * on the level 1 table. | | 3791 | * on the level 1 table. |
3800 | */ | | 3792 | */ |
3801 | pmap_l1pt_delref(pmap, l1pte); | | 3793 | pmap_l1pt_delref(pmap, l1pte); |
3802 | } | | 3794 | } |
3803 | } | | 3795 | } |
3804 | | | 3796 | |
3805 | /* | | 3797 | /* |
3806 | * pmap_l1pt_delref: | | 3798 | * pmap_l1pt_delref: |
3807 | * | | 3799 | * |
3808 | * Delete a reference on a level 1 PT page. | | 3800 | * Delete a reference on a level 1 PT page. |
3809 | */ | | 3801 | */ |
3810 | static void | | 3802 | static void |
3811 | pmap_l1pt_delref(pmap_t pmap, pt_entry_t *l1pte) | | 3803 | pmap_l1pt_delref(pmap_t pmap, pt_entry_t *l1pte) |
3812 | { | | 3804 | { |
3813 | | | 3805 | |
3814 | KASSERT(pmap != pmap_kernel()); | | 3806 | KASSERT(pmap != pmap_kernel()); |
3815 | | | 3807 | |
3816 | (void)pmap_physpage_delref(l1pte); | | 3808 | (void)pmap_physpage_delref(l1pte); |
3817 | } | | 3809 | } |
3818 | | | 3810 | |
3819 | /******************** Address Space Number management ********************/ | | 3811 | /******************** Address Space Number management ********************/ |
3820 | | | 3812 | |
3821 | /* | | 3813 | /* |
3822 | * pmap_asn_alloc: | | 3814 | * pmap_asn_alloc: |
3823 | * | | 3815 | * |
3824 | * Allocate and assign an ASN to the specified pmap. | | 3816 | * Allocate and assign an ASN to the specified pmap. |
3825 | * | | 3817 | * |
3826 | * Note: the pmap must already be locked. This may be called from | | 3818 | * Note: the pmap must already be locked. This may be called from |
3827 | * an interprocessor interrupt, and in that case, the sender of | | 3819 | * an interprocessor interrupt, and in that case, the sender of |
3828 | * the IPI has the pmap lock. | | 3820 | * the IPI has the pmap lock. |
3829 | */ | | 3821 | */ |
3830 | static u_int | | 3822 | static u_int |
3831 | pmap_asn_alloc(pmap_t const pmap, struct cpu_info * const ci) | | 3823 | pmap_asn_alloc(pmap_t const pmap, struct cpu_info * const ci) |
3832 | { | | 3824 | { |
3833 | | | 3825 | |
3834 | #ifdef DEBUG | | 3826 | #ifdef DEBUG |
3835 | if (pmapdebug & (PDB_FOLLOW|PDB_ASN)) | | 3827 | if (pmapdebug & (PDB_FOLLOW|PDB_ASN)) |
3836 | printf("pmap_asn_alloc(%p)\n", pmap); | | 3828 | printf("pmap_asn_alloc(%p)\n", pmap); |
3837 | #endif | | 3829 | #endif |
3838 | | | 3830 | |
3839 | KASSERT(pmap != pmap_kernel()); | | 3831 | KASSERT(pmap != pmap_kernel()); |
3840 | KASSERT(pmap->pm_percpu[ci->ci_cpuid].pmc_lev1map != kernel_lev1map); | | 3832 | KASSERT(pmap->pm_percpu[ci->ci_cpuid].pmc_lev1map != kernel_lev1map); |
3841 | KASSERT(kpreempt_disabled()); | | 3833 | KASSERT(kpreempt_disabled()); |
3842 | | | 3834 | |
3843 | /* No work to do if the the CPU does not implement ASNs. */ | | 3835 | /* No work to do if the the CPU does not implement ASNs. */ |
3844 | if (pmap_max_asn == 0) | | 3836 | if (pmap_max_asn == 0) |
3845 | return 0; | | 3837 | return 0; |
3846 | | | 3838 | |
3847 | struct pmap_percpu * const pmc = &pmap->pm_percpu[ci->ci_cpuid]; | | 3839 | struct pmap_percpu * const pmc = &pmap->pm_percpu[ci->ci_cpuid]; |
3848 | | | 3840 | |
3849 | /* | | 3841 | /* |
3850 | * Hopefully, we can continue using the one we have... | | 3842 | * Hopefully, we can continue using the one we have... |
3851 | * | | 3843 | * |
3852 | * N.B. the generation check will fail the first time | | 3844 | * N.B. the generation check will fail the first time |
3853 | * any pmap is activated on a given CPU, because we start | | 3845 | * any pmap is activated on a given CPU, because we start |
3854 | * the generation counter at 1, but initialize pmaps with | | 3846 | * the generation counter at 1, but initialize pmaps with |
3855 | * 0; this forces the first ASN allocation to occur. | | 3847 | * 0; this forces the first ASN allocation to occur. |
3856 | */ | | 3848 | */ |
3857 | if (pmc->pmc_asngen == ci->ci_asn_gen) { | | 3849 | if (pmc->pmc_asngen == ci->ci_asn_gen) { |
3858 | #ifdef DEBUG | | 3850 | #ifdef DEBUG |
3859 | if (pmapdebug & PDB_ASN) | | 3851 | if (pmapdebug & PDB_ASN) |
3860 | printf("pmap_asn_alloc: same generation, keeping %u\n", | | 3852 | printf("pmap_asn_alloc: same generation, keeping %u\n", |
3861 | pmc->pmc_asn); | | 3853 | pmc->pmc_asn); |
3862 | #endif | | 3854 | #endif |
3863 | TLB_COUNT(asn_reuse); | | 3855 | TLB_COUNT(asn_reuse); |
3864 | return pmc->pmc_asn; | | 3856 | return pmc->pmc_asn; |
3865 | } | | 3857 | } |
3866 | | | 3858 | |
3867 | /* | | 3859 | /* |
3868 | * Need to assign a new ASN. Grab the next one, incrementing | | 3860 | * Need to assign a new ASN. Grab the next one, incrementing |
3869 | * the generation number if we have to. | | 3861 | * the generation number if we have to. |
3870 | */ | | 3862 | */ |
3871 | if (ci->ci_next_asn > pmap_max_asn) { | | 3863 | if (ci->ci_next_asn > pmap_max_asn) { |
3872 | /* | | 3864 | /* |
3873 | * Invalidate all non-PG_ASM TLB entries and the | | 3865 | * Invalidate all non-PG_ASM TLB entries and the |
3874 | * I-cache, and bump the generation number. | | 3866 | * I-cache, and bump the generation number. |
3875 | */ | | 3867 | */ |
3876 | ALPHA_TBIAP(); | | 3868 | ALPHA_TBIAP(); |
3877 | alpha_pal_imb(); | | 3869 | alpha_pal_imb(); |
3878 | | | 3870 | |
3879 | ci->ci_next_asn = PMAP_ASN_FIRST_USER; | | 3871 | ci->ci_next_asn = PMAP_ASN_FIRST_USER; |
3880 | ci->ci_asn_gen++; | | 3872 | ci->ci_asn_gen++; |
3881 | TLB_COUNT(asn_newgen); | | 3873 | TLB_COUNT(asn_newgen); |
3882 | | | 3874 | |
3883 | /* | | 3875 | /* |
3884 | * Make sure the generation number doesn't wrap. We could | | 3876 | * Make sure the generation number doesn't wrap. We could |
3885 | * handle this scenario by traversing all of the pmaps, | | 3877 | * handle this scenario by traversing all of the pmaps, |
3886 | * and invalidating the generation number on those which | | 3878 | * and invalidating the generation number on those which |
3887 | * are not currently in use by this processor. | | 3879 | * are not currently in use by this processor. |
3888 | * | | 3880 | * |
3889 | * However... considering that we're using an unsigned 64-bit | | 3881 | * However... considering that we're using an unsigned 64-bit |
3890 | * integer for generation numbers, on non-ASN CPUs, we won't | | 3882 | * integer for generation numbers, on non-ASN CPUs, we won't |
3891 | * wrap for approximately 75 billion years on a 128-ASN CPU | | 3883 | * wrap for approximately 75 billion years on a 128-ASN CPU |
3892 | * (assuming 1000 switch * operations per second). | | 3884 | * (assuming 1000 switch * operations per second). |
3893 | * | | 3885 | * |
3894 | * So, we don't bother. | | 3886 | * So, we don't bother. |
3895 | */ | | 3887 | */ |
3896 | KASSERT(ci->ci_asn_gen != PMAP_ASNGEN_INVALID); | | 3888 | KASSERT(ci->ci_asn_gen != PMAP_ASNGEN_INVALID); |
3897 | #ifdef DEBUG | | 3889 | #ifdef DEBUG |
3898 | if (pmapdebug & PDB_ASN) | | 3890 | if (pmapdebug & PDB_ASN) |
3899 | printf("pmap_asn_alloc: generation bumped to %lu\n", | | 3891 | printf("pmap_asn_alloc: generation bumped to %lu\n", |
3900 | ci->ci_asn_ge); | | 3892 | ci->ci_asn_ge); |
3901 | #endif | | 3893 | #endif |
3902 | } | | 3894 | } |
3903 | | | 3895 | |
3904 | /* | | 3896 | /* |
3905 | * Assign the new ASN and validate the generation number. | | 3897 | * Assign the new ASN and validate the generation number. |
3906 | */ | | 3898 | */ |
3907 | pmc->pmc_asn = ci->ci_next_asn++; | | 3899 | pmc->pmc_asn = ci->ci_next_asn++; |
3908 | pmc->pmc_asngen = ci->ci_asn_gen; | | 3900 | pmc->pmc_asngen = ci->ci_asn_gen; |
3909 | TLB_COUNT(asn_assign); | | 3901 | TLB_COUNT(asn_assign); |
3910 | | | 3902 | |
3911 | /* | | 3903 | /* |
3912 | * We have a new ASN, so we can skip any pending I-stream sync | | 3904 | * We have a new ASN, so we can skip any pending I-stream sync |
3913 | * on the way back out to user space. | | 3905 | * on the way back out to user space. |
3914 | */ | | 3906 | */ |
3915 | pmc->pmc_needisync = 0; | | 3907 | pmc->pmc_needisync = 0; |
3916 | | | 3908 | |
3917 | #ifdef DEBUG | | 3909 | #ifdef DEBUG |
3918 | if (pmapdebug & PDB_ASN) | | 3910 | if (pmapdebug & PDB_ASN) |
3919 | printf("pmap_asn_alloc: assigning %u to pmap %p\n", | | 3911 | printf("pmap_asn_alloc: assigning %u to pmap %p\n", |
3920 | pmc->pmc_asn, pmap); | | 3912 | pmc->pmc_asn, pmap); |
3921 | #endif | | 3913 | #endif |
3922 | return pmc->pmc_asn; | | 3914 | return pmc->pmc_asn; |
3923 | } | | 3915 | } |