| @@ -1,1107 +1,1107 @@ | | | @@ -1,1107 +1,1107 @@ |
1 | /* $NetBSD: pmap.c,v 1.114 2016/12/22 14:47:59 cherry Exp $ */ | | 1 | /* $NetBSD: pmap.c,v 1.114.20.1 2020/08/26 18:06:54 martin Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Jeremy Cooper. | | 8 | * by Jeremy Cooper. |
9 | * | | 9 | * |
10 | * Redistribution and use in source and binary forms, with or without | | 10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions | | 11 | * modification, are permitted provided that the following conditions |
12 | * are met: | | 12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright | | 13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. | | 14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright | | 15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the | | 16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. | | 17 | * documentation and/or other materials provided with the distribution. |
18 | * | | 18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. | | 29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ | | 30 | */ |
31 | | | 31 | |
32 | /* | | 32 | /* |
33 | * XXX These comments aren't quite accurate. Need to change. | | 33 | * XXX These comments aren't quite accurate. Need to change. |
34 | * The sun3x uses the MC68851 Memory Management Unit, which is built | | 34 | * The sun3x uses the MC68851 Memory Management Unit, which is built |
35 | * into the CPU. The 68851 maps virtual to physical addresses using | | 35 | * into the CPU. The 68851 maps virtual to physical addresses using |
36 | * a multi-level table lookup, which is stored in the very memory that | | 36 | * a multi-level table lookup, which is stored in the very memory that |
37 | * it maps. The number of levels of lookup is configurable from one | | 37 | * it maps. The number of levels of lookup is configurable from one |
38 | * to four. In this implementation, we use three, named 'A' through 'C'. | | 38 | * to four. In this implementation, we use three, named 'A' through 'C'. |
39 | * | | 39 | * |
40 | * The MMU translates virtual addresses into physical addresses by | | 40 | * The MMU translates virtual addresses into physical addresses by |
41 | * traversing these tables in a process called a 'table walk'. The most | | 41 | * traversing these tables in a process called a 'table walk'. The most |
42 | * significant 7 bits of the Virtual Address ('VA') being translated are | | 42 | * significant 7 bits of the Virtual Address ('VA') being translated are |
43 | * used as an index into the level A table, whose base in physical memory | | 43 | * used as an index into the level A table, whose base in physical memory |
44 | * is stored in a special MMU register, the 'CPU Root Pointer' or CRP. The | | 44 | * is stored in a special MMU register, the 'CPU Root Pointer' or CRP. The |
45 | * address found at that index in the A table is used as the base | | 45 | * address found at that index in the A table is used as the base |
46 | * address for the next table, the B table. The next six bits of the VA are | | 46 | * address for the next table, the B table. The next six bits of the VA are |
47 | * used as an index into the B table, which in turn gives the base address | | 47 | * used as an index into the B table, which in turn gives the base address |
48 | * of the third and final C table. | | 48 | * of the third and final C table. |
49 | * | | 49 | * |
50 | * The next six bits of the VA are used as an index into the C table to | | 50 | * The next six bits of the VA are used as an index into the C table to |
51 | * locate a Page Table Entry (PTE). The PTE is a physical address in memory | | 51 | * locate a Page Table Entry (PTE). The PTE is a physical address in memory |
52 | * to which the remaining 13 bits of the VA are added, producing the | | 52 | * to which the remaining 13 bits of the VA are added, producing the |
53 | * mapped physical address. | | 53 | * mapped physical address. |
54 | * | | 54 | * |
55 | * To map the entire memory space in this manner would require 2114296 bytes | | 55 | * To map the entire memory space in this manner would require 2114296 bytes |
56 | * of page tables per process - quite expensive. Instead we will | | 56 | * of page tables per process - quite expensive. Instead we will |
57 | * allocate a fixed but considerably smaller space for the page tables at | | 57 | * allocate a fixed but considerably smaller space for the page tables at |
58 | * the time the VM system is initialized. When the pmap code is asked by | | 58 | * the time the VM system is initialized. When the pmap code is asked by |
59 | * the kernel to map a VA to a PA, it allocates tables as needed from this | | 59 | * the kernel to map a VA to a PA, it allocates tables as needed from this |
60 | * pool. When there are no more tables in the pool, tables are stolen | | 60 | * pool. When there are no more tables in the pool, tables are stolen |
61 | * from the oldest mapped entries in the tree. This is only possible | | 61 | * from the oldest mapped entries in the tree. This is only possible |
62 | * because all memory mappings are stored in the kernel memory map | | 62 | * because all memory mappings are stored in the kernel memory map |
63 | * structures, independent of the pmap structures. A VA which references | | 63 | * structures, independent of the pmap structures. A VA which references |
64 | * one of these invalidated maps will cause a page fault. The kernel | | 64 | * one of these invalidated maps will cause a page fault. The kernel |
65 | * will determine that the page fault was caused by a task using a valid | | 65 | * will determine that the page fault was caused by a task using a valid |
66 | * VA, but for some reason (which does not concern it), that address was | | 66 | * VA, but for some reason (which does not concern it), that address was |
67 | * not mapped. It will ask the pmap code to re-map the entry and then | | 67 | * not mapped. It will ask the pmap code to re-map the entry and then |
68 | * it will resume executing the faulting task. | | 68 | * it will resume executing the faulting task. |
69 | * | | 69 | * |
70 | * In this manner the most efficient use of the page table space is | | 70 | * In this manner the most efficient use of the page table space is |
71 | * achieved. Tasks which do not execute often will have their tables | | 71 | * achieved. Tasks which do not execute often will have their tables |
72 | * stolen and reused by tasks which execute more frequently. The best | | 72 | * stolen and reused by tasks which execute more frequently. The best |
73 | * size for the page table pool will probably be determined by | | 73 | * size for the page table pool will probably be determined by |
74 | * experimentation. | | 74 | * experimentation. |
75 | * | | 75 | * |
76 | * You read all of the comments so far. Good for you. | | 76 | * You read all of the comments so far. Good for you. |
77 | * Now go play! | | 77 | * Now go play! |
78 | */ | | 78 | */ |
79 | | | 79 | |
80 | /*** A Note About the 68851 Address Translation Cache | | 80 | /*** A Note About the 68851 Address Translation Cache |
81 | * The MC68851 has a 64 entry cache, called the Address Translation Cache | | 81 | * The MC68851 has a 64 entry cache, called the Address Translation Cache |
82 | * or 'ATC'. This cache stores the most recently used page descriptors | | 82 | * or 'ATC'. This cache stores the most recently used page descriptors |
83 | * accessed by the MMU when it does translations. Using a marker called a | | 83 | * accessed by the MMU when it does translations. Using a marker called a |
84 | * 'task alias' the MMU can store the descriptors from 8 different table | | 84 | * 'task alias' the MMU can store the descriptors from 8 different table |
85 | * spaces concurrently. The task alias is associated with the base | | 85 | * spaces concurrently. The task alias is associated with the base |
86 | * address of the level A table of that address space. When an address | | 86 | * address of the level A table of that address space. When an address |
87 | * space is currently active (the CRP currently points to its A table) | | 87 | * space is currently active (the CRP currently points to its A table) |
88 | * the only cached descriptors that will be obeyed are ones which have a | | 88 | * the only cached descriptors that will be obeyed are ones which have a |
89 | * matching task alias of the current space associated with them. | | 89 | * matching task alias of the current space associated with them. |
90 | * | | 90 | * |
91 | * Since the cache is always consulted before any table lookups are done, | | 91 | * Since the cache is always consulted before any table lookups are done, |
92 | * it is important that it accurately reflect the state of the MMU tables. | | 92 | * it is important that it accurately reflect the state of the MMU tables. |
93 | * Whenever a change has been made to a table that has been loaded into | | 93 | * Whenever a change has been made to a table that has been loaded into |
94 | * the MMU, the code must be sure to flush any cached entries that are | | 94 | * the MMU, the code must be sure to flush any cached entries that are |
95 | * affected by the change. These instances are documented in the code at | | 95 | * affected by the change. These instances are documented in the code at |
96 | * various points. | | 96 | * various points. |
97 | */ | | 97 | */ |
98 | /*** A Note About the Note About the 68851 Address Translation Cache | | 98 | /*** A Note About the Note About the 68851 Address Translation Cache |
99 | * 4 months into this code I discovered that the sun3x does not have | | 99 | * 4 months into this code I discovered that the sun3x does not have |
100 | * a MC68851 chip. Instead, it has a version of this MMU that is part of the | | 100 | * a MC68851 chip. Instead, it has a version of this MMU that is part of the |
101 | * the 68030 CPU. | | 101 | * the 68030 CPU. |
102 | * All though it behaves very similarly to the 68851, it only has 1 task | | 102 | * All though it behaves very similarly to the 68851, it only has 1 task |
103 | * alias and a 22 entry cache. So sadly (or happily), the first paragraph | | 103 | * alias and a 22 entry cache. So sadly (or happily), the first paragraph |
104 | * of the previous note does not apply to the sun3x pmap. | | 104 | * of the previous note does not apply to the sun3x pmap. |
105 | */ | | 105 | */ |
106 | | | 106 | |
107 | #include <sys/cdefs.h> | | 107 | #include <sys/cdefs.h> |
108 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.114 2016/12/22 14:47:59 cherry Exp $"); | | 108 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.114.20.1 2020/08/26 18:06:54 martin Exp $"); |
109 | | | 109 | |
110 | #include "opt_ddb.h" | | 110 | #include "opt_ddb.h" |
111 | #include "opt_pmap_debug.h" | | 111 | #include "opt_pmap_debug.h" |
112 | | | 112 | |
113 | #include <sys/param.h> | | 113 | #include <sys/param.h> |
114 | #include <sys/systm.h> | | 114 | #include <sys/systm.h> |
115 | #include <sys/proc.h> | | 115 | #include <sys/proc.h> |
116 | #include <sys/malloc.h> | | 116 | #include <sys/malloc.h> |
117 | #include <sys/pool.h> | | 117 | #include <sys/pool.h> |
118 | #include <sys/queue.h> | | 118 | #include <sys/queue.h> |
119 | #include <sys/kcore.h> | | 119 | #include <sys/kcore.h> |
120 | #include <sys/atomic.h> | | 120 | #include <sys/atomic.h> |
121 | | | 121 | |
122 | #include <uvm/uvm.h> | | 122 | #include <uvm/uvm.h> |
123 | | | 123 | |
124 | #include <machine/cpu.h> | | 124 | #include <machine/cpu.h> |
125 | #include <machine/kcore.h> | | 125 | #include <machine/kcore.h> |
126 | #include <machine/mon.h> | | 126 | #include <machine/mon.h> |
127 | #include <machine/pmap.h> | | 127 | #include <machine/pmap.h> |
128 | #include <machine/pte.h> | | 128 | #include <machine/pte.h> |
129 | #include <machine/vmparam.h> | | 129 | #include <machine/vmparam.h> |
130 | #include <m68k/cacheops.h> | | 130 | #include <m68k/cacheops.h> |
131 | | | 131 | |
132 | #include <sun3/sun3/cache.h> | | 132 | #include <sun3/sun3/cache.h> |
133 | #include <sun3/sun3/machdep.h> | | 133 | #include <sun3/sun3/machdep.h> |
134 | | | 134 | |
135 | #include "pmap_pvt.h" | | 135 | #include "pmap_pvt.h" |
136 | | | 136 | |
137 | /* XXX - What headers declare these? */ | | 137 | /* XXX - What headers declare these? */ |
138 | extern struct pcb *curpcb; | | 138 | extern struct pcb *curpcb; |
139 | | | 139 | |
140 | /* Defined in locore.s */ | | 140 | /* Defined in locore.s */ |
141 | extern char kernel_text[]; | | 141 | extern char kernel_text[]; |
142 | | | 142 | |
143 | /* Defined by the linker */ | | 143 | /* Defined by the linker */ |
144 | extern char etext[], edata[], end[]; | | 144 | extern char etext[], edata[], end[]; |
145 | extern char *esym; /* DDB */ | | 145 | extern char *esym; /* DDB */ |
146 | | | 146 | |
147 | /*************************** DEBUGGING DEFINITIONS *********************** | | 147 | /*************************** DEBUGGING DEFINITIONS *********************** |
148 | * Macros, preprocessor defines and variables used in debugging can make * | | 148 | * Macros, preprocessor defines and variables used in debugging can make * |
149 | * code hard to read. Anything used exclusively for debugging purposes * | | 149 | * code hard to read. Anything used exclusively for debugging purposes * |
150 | * is defined here to avoid having such mess scattered around the file. * | | 150 | * is defined here to avoid having such mess scattered around the file. * |
151 | *************************************************************************/ | | 151 | *************************************************************************/ |
152 | #ifdef PMAP_DEBUG | | 152 | #ifdef PMAP_DEBUG |
153 | /* | | 153 | /* |
154 | * To aid the debugging process, macros should be expanded into smaller steps | | 154 | * To aid the debugging process, macros should be expanded into smaller steps |
155 | * that accomplish the same goal, yet provide convenient places for placing | | 155 | * that accomplish the same goal, yet provide convenient places for placing |
156 | * breakpoints. When this code is compiled with PMAP_DEBUG mode defined, the | | 156 | * breakpoints. When this code is compiled with PMAP_DEBUG mode defined, the |
157 | * 'INLINE' keyword is defined to an empty string. This way, any function | | 157 | * 'INLINE' keyword is defined to an empty string. This way, any function |
158 | * defined to be a 'static INLINE' will become 'outlined' and compiled as | | 158 | * defined to be a 'static INLINE' will become 'outlined' and compiled as |
159 | * a separate function, which is much easier to debug. | | 159 | * a separate function, which is much easier to debug. |
160 | */ | | 160 | */ |
161 | #define INLINE /* nothing */ | | 161 | #define INLINE /* nothing */ |
162 | | | 162 | |
163 | /* | | 163 | /* |
164 | * It is sometimes convenient to watch the activity of a particular table | | 164 | * It is sometimes convenient to watch the activity of a particular table |
165 | * in the system. The following variables are used for that purpose. | | 165 | * in the system. The following variables are used for that purpose. |
166 | */ | | 166 | */ |
167 | a_tmgr_t *pmap_watch_atbl = 0; | | 167 | a_tmgr_t *pmap_watch_atbl = 0; |
168 | b_tmgr_t *pmap_watch_btbl = 0; | | 168 | b_tmgr_t *pmap_watch_btbl = 0; |
169 | c_tmgr_t *pmap_watch_ctbl = 0; | | 169 | c_tmgr_t *pmap_watch_ctbl = 0; |
170 | | | 170 | |
171 | int pmap_debug = 0; | | 171 | int pmap_debug = 0; |
172 | #define DPRINT(args) if (pmap_debug) printf args | | 172 | #define DPRINT(args) if (pmap_debug) printf args |
173 | | | 173 | |
174 | #else /********** Stuff below is defined if NOT debugging **************/ | | 174 | #else /********** Stuff below is defined if NOT debugging **************/ |
175 | | | 175 | |
176 | #define INLINE inline | | 176 | #define INLINE inline |
177 | #define DPRINT(args) /* nada */ | | 177 | #define DPRINT(args) /* nada */ |
178 | | | 178 | |
179 | #endif /* PMAP_DEBUG */ | | 179 | #endif /* PMAP_DEBUG */ |
180 | /*********************** END OF DEBUGGING DEFINITIONS ********************/ | | 180 | /*********************** END OF DEBUGGING DEFINITIONS ********************/ |
181 | | | 181 | |
182 | /*** Management Structure - Memory Layout | | 182 | /*** Management Structure - Memory Layout |
183 | * For every MMU table in the sun3x pmap system there must be a way to | | 183 | * For every MMU table in the sun3x pmap system there must be a way to |
184 | * manage it; we must know which process is using it, what other tables | | 184 | * manage it; we must know which process is using it, what other tables |
185 | * depend on it, and whether or not it contains any locked pages. This | | 185 | * depend on it, and whether or not it contains any locked pages. This |
186 | * is solved by the creation of 'table management' or 'tmgr' | | 186 | * is solved by the creation of 'table management' or 'tmgr' |
187 | * structures. One for each MMU table in the system. | | 187 | * structures. One for each MMU table in the system. |
188 | * | | 188 | * |
189 | * MAP OF MEMORY USED BY THE PMAP SYSTEM | | 189 | * MAP OF MEMORY USED BY THE PMAP SYSTEM |
190 | * | | 190 | * |
191 | * towards lower memory | | 191 | * towards lower memory |
192 | * kernAbase -> +-------------------------------------------------------+ | | 192 | * kernAbase -> +-------------------------------------------------------+ |
193 | * | Kernel MMU A level table | | | 193 | * | Kernel MMU A level table | |
194 | * kernBbase -> +-------------------------------------------------------+ | | 194 | * kernBbase -> +-------------------------------------------------------+ |
195 | * | Kernel MMU B level tables | | | 195 | * | Kernel MMU B level tables | |
196 | * kernCbase -> +-------------------------------------------------------+ | | 196 | * kernCbase -> +-------------------------------------------------------+ |
197 | * | | | | 197 | * | | |
198 | * | Kernel MMU C level tables | | | 198 | * | Kernel MMU C level tables | |
199 | * | | | | 199 | * | | |
200 | * mmuCbase -> +-------------------------------------------------------+ | | 200 | * mmuCbase -> +-------------------------------------------------------+ |
201 | * | User MMU C level tables | | | 201 | * | User MMU C level tables | |
202 | * mmuAbase -> +-------------------------------------------------------+ | | 202 | * mmuAbase -> +-------------------------------------------------------+ |
203 | * | | | | 203 | * | | |
204 | * | User MMU A level tables | | | 204 | * | User MMU A level tables | |
205 | * | | | | 205 | * | | |
206 | * mmuBbase -> +-------------------------------------------------------+ | | 206 | * mmuBbase -> +-------------------------------------------------------+ |
207 | * | User MMU B level tables | | | 207 | * | User MMU B level tables | |
208 | * tmgrAbase -> +-------------------------------------------------------+ | | 208 | * tmgrAbase -> +-------------------------------------------------------+ |
209 | * | TMGR A level table structures | | | 209 | * | TMGR A level table structures | |
210 | * tmgrBbase -> +-------------------------------------------------------+ | | 210 | * tmgrBbase -> +-------------------------------------------------------+ |
211 | * | TMGR B level table structures | | | 211 | * | TMGR B level table structures | |
212 | * tmgrCbase -> +-------------------------------------------------------+ | | 212 | * tmgrCbase -> +-------------------------------------------------------+ |
213 | * | TMGR C level table structures | | | 213 | * | TMGR C level table structures | |
214 | * pvbase -> +-------------------------------------------------------+ | | 214 | * pvbase -> +-------------------------------------------------------+ |
215 | * | Physical to Virtual mapping table (list heads) | | | 215 | * | Physical to Virtual mapping table (list heads) | |
216 | * pvebase -> +-------------------------------------------------------+ | | 216 | * pvebase -> +-------------------------------------------------------+ |
217 | * | Physical to Virtual mapping table (list elements) | | | 217 | * | Physical to Virtual mapping table (list elements) | |
218 | * | | | | 218 | * | | |
219 | * +-------------------------------------------------------+ | | 219 | * +-------------------------------------------------------+ |
220 | * towards higher memory | | 220 | * towards higher memory |
221 | * | | 221 | * |
222 | * For every A table in the MMU A area, there will be a corresponding | | 222 | * For every A table in the MMU A area, there will be a corresponding |
223 | * a_tmgr structure in the TMGR A area. The same will be true for | | 223 | * a_tmgr structure in the TMGR A area. The same will be true for |
224 | * the B and C tables. This arrangement will make it easy to find the | | 224 | * the B and C tables. This arrangement will make it easy to find the |
225 | * controling tmgr structure for any table in the system by use of | | 225 | * controling tmgr structure for any table in the system by use of |
226 | * (relatively) simple macros. | | 226 | * (relatively) simple macros. |
227 | */ | | 227 | */ |
228 | | | 228 | |
229 | /* | | 229 | /* |
230 | * Global variables for storing the base addresses for the areas | | 230 | * Global variables for storing the base addresses for the areas |
231 | * labeled above. | | 231 | * labeled above. |
232 | */ | | 232 | */ |
233 | static vaddr_t kernAphys; | | 233 | static vaddr_t kernAphys; |
234 | static mmu_long_dte_t *kernAbase; | | 234 | static mmu_long_dte_t *kernAbase; |
235 | static mmu_short_dte_t *kernBbase; | | 235 | static mmu_short_dte_t *kernBbase; |
236 | static mmu_short_pte_t *kernCbase; | | 236 | static mmu_short_pte_t *kernCbase; |
237 | static mmu_short_pte_t *mmuCbase; | | 237 | static mmu_short_pte_t *mmuCbase; |
238 | static mmu_short_dte_t *mmuBbase; | | 238 | static mmu_short_dte_t *mmuBbase; |
239 | static mmu_long_dte_t *mmuAbase; | | 239 | static mmu_long_dte_t *mmuAbase; |
240 | static a_tmgr_t *Atmgrbase; | | 240 | static a_tmgr_t *Atmgrbase; |
241 | static b_tmgr_t *Btmgrbase; | | 241 | static b_tmgr_t *Btmgrbase; |
242 | static c_tmgr_t *Ctmgrbase; | | 242 | static c_tmgr_t *Ctmgrbase; |
243 | static pv_t *pvbase; | | 243 | static pv_t *pvbase; |
244 | static pv_elem_t *pvebase; | | 244 | static pv_elem_t *pvebase; |
245 | static struct pmap kernel_pmap; | | 245 | static struct pmap kernel_pmap; |
246 | struct pmap *const kernel_pmap_ptr = &kernel_pmap; | | 246 | struct pmap *const kernel_pmap_ptr = &kernel_pmap; |
247 | | | 247 | |
248 | /* | | 248 | /* |
249 | * This holds the CRP currently loaded into the MMU. | | 249 | * This holds the CRP currently loaded into the MMU. |
250 | */ | | 250 | */ |
251 | struct mmu_rootptr kernel_crp; | | 251 | struct mmu_rootptr kernel_crp; |
252 | | | 252 | |
253 | /* | | 253 | /* |
254 | * Just all around global variables. | | 254 | * Just all around global variables. |
255 | */ | | 255 | */ |
256 | static TAILQ_HEAD(a_pool_head_struct, a_tmgr_struct) a_pool; | | 256 | static TAILQ_HEAD(a_pool_head_struct, a_tmgr_struct) a_pool; |
257 | static TAILQ_HEAD(b_pool_head_struct, b_tmgr_struct) b_pool; | | 257 | static TAILQ_HEAD(b_pool_head_struct, b_tmgr_struct) b_pool; |
258 | static TAILQ_HEAD(c_pool_head_struct, c_tmgr_struct) c_pool; | | 258 | static TAILQ_HEAD(c_pool_head_struct, c_tmgr_struct) c_pool; |
259 | | | 259 | |
260 | | | 260 | |
261 | /* | | 261 | /* |
262 | * Flags used to mark the safety/availability of certain operations or | | 262 | * Flags used to mark the safety/availability of certain operations or |
263 | * resources. | | 263 | * resources. |
264 | */ | | 264 | */ |
265 | /* Safe to use pmap_bootstrap_alloc(). */ | | 265 | /* Safe to use pmap_bootstrap_alloc(). */ |
266 | static bool bootstrap_alloc_enabled = false; | | 266 | static bool bootstrap_alloc_enabled = false; |
267 | /* Temporary virtual pages are in use */ | | 267 | /* Temporary virtual pages are in use */ |
268 | int tmp_vpages_inuse; | | 268 | int tmp_vpages_inuse; |
269 | | | 269 | |
270 | /* | | 270 | /* |
271 | * XXX: For now, retain the traditional variables that were | | 271 | * XXX: For now, retain the traditional variables that were |
272 | * used in the old pmap/vm interface (without NONCONTIG). | | 272 | * used in the old pmap/vm interface (without NONCONTIG). |
273 | */ | | 273 | */ |
274 | /* Kernel virtual address space available: */ | | 274 | /* Kernel virtual address space available: */ |
275 | vaddr_t virtual_avail, virtual_end; | | 275 | vaddr_t virtual_avail, virtual_end; |
276 | /* Physical address space available: */ | | 276 | /* Physical address space available: */ |
277 | paddr_t avail_start, avail_end; | | 277 | paddr_t avail_start, avail_end; |
278 | | | 278 | |
279 | /* This keep track of the end of the contiguously mapped range. */ | | 279 | /* This keep track of the end of the contiguously mapped range. */ |
280 | vaddr_t virtual_contig_end; | | 280 | vaddr_t virtual_contig_end; |
281 | | | 281 | |
282 | /* Physical address used by pmap_next_page() */ | | 282 | /* Physical address used by pmap_next_page() */ |
283 | paddr_t avail_next; | | 283 | paddr_t avail_next; |
284 | | | 284 | |
285 | /* These are used by pmap_copy_page(), etc. */ | | 285 | /* These are used by pmap_copy_page(), etc. */ |
286 | vaddr_t tmp_vpages[2]; | | 286 | vaddr_t tmp_vpages[2]; |
287 | | | 287 | |
288 | /* memory pool for pmap structures */ | | 288 | /* memory pool for pmap structures */ |
289 | struct pool pmap_pmap_pool; | | 289 | struct pool pmap_pmap_pool; |
290 | | | 290 | |
291 | /* | | 291 | /* |
292 | * The 3/80 is the only member of the sun3x family that has non-contiguous | | 292 | * The 3/80 is the only member of the sun3x family that has non-contiguous |
293 | * physical memory. Memory is divided into 4 banks which are physically | | 293 | * physical memory. Memory is divided into 4 banks which are physically |
294 | * locatable on the system board. Although the size of these banks varies | | 294 | * locatable on the system board. Although the size of these banks varies |
295 | * with the size of memory they contain, their base addresses are | | 295 | * with the size of memory they contain, their base addresses are |
296 | * permenently fixed. The following structure, which describes these | | 296 | * permenently fixed. The following structure, which describes these |
297 | * banks, is initialized by pmap_bootstrap() after it reads from a similar | | 297 | * banks, is initialized by pmap_bootstrap() after it reads from a similar |
298 | * structure provided by the ROM Monitor. | | 298 | * structure provided by the ROM Monitor. |
299 | * | | 299 | * |
300 | * For the other machines in the sun3x architecture which do have contiguous | | 300 | * For the other machines in the sun3x architecture which do have contiguous |
301 | * RAM, this list will have only one entry, which will describe the entire | | 301 | * RAM, this list will have only one entry, which will describe the entire |
302 | * range of available memory. | | 302 | * range of available memory. |
303 | */ | | 303 | */ |
304 | struct pmap_physmem_struct avail_mem[SUN3X_NPHYS_RAM_SEGS]; | | 304 | struct pmap_physmem_struct avail_mem[SUN3X_NPHYS_RAM_SEGS]; |
305 | u_int total_phys_mem; | | 305 | u_int total_phys_mem; |
306 | | | 306 | |
307 | /*************************************************************************/ | | 307 | /*************************************************************************/ |
308 | | | 308 | |
309 | /* | | 309 | /* |
310 | * XXX - Should "tune" these based on statistics. | | 310 | * XXX - Should "tune" these based on statistics. |
311 | * | | 311 | * |
312 | * My first guess about the relative numbers of these needed is | | 312 | * My first guess about the relative numbers of these needed is |
313 | * based on the fact that a "typical" process will have several | | 313 | * based on the fact that a "typical" process will have several |
314 | * pages mapped at low virtual addresses (text, data, bss), then | | 314 | * pages mapped at low virtual addresses (text, data, bss), then |
315 | * some mapped shared libraries, and then some stack pages mapped | | 315 | * some mapped shared libraries, and then some stack pages mapped |
316 | * near the high end of the VA space. Each process can use only | | 316 | * near the high end of the VA space. Each process can use only |
317 | * one A table, and most will use only two B tables (maybe three) | | 317 | * one A table, and most will use only two B tables (maybe three) |
318 | * and probably about four C tables. Therefore, the first guess | | 318 | * and probably about four C tables. Therefore, the first guess |
319 | * at the relative numbers of these needed is 1:2:4 -gwr | | 319 | * at the relative numbers of these needed is 1:2:4 -gwr |
320 | * | | 320 | * |
321 | * The number of C tables needed is closely related to the amount | | 321 | * The number of C tables needed is closely related to the amount |
322 | * of physical memory available plus a certain amount attributable | | 322 | * of physical memory available plus a certain amount attributable |
323 | * to the use of double mappings. With a few simulation statistics | | 323 | * to the use of double mappings. With a few simulation statistics |
324 | * we can find a reasonably good estimation of this unknown value. | | 324 | * we can find a reasonably good estimation of this unknown value. |
325 | * Armed with that and the above ratios, we have a good idea of what | | 325 | * Armed with that and the above ratios, we have a good idea of what |
326 | * is needed at each level. -j | | 326 | * is needed at each level. -j |
327 | * | | 327 | * |
328 | * Note: It is not physical memory memory size, but the total mapped | | 328 | * Note: It is not physical memory memory size, but the total mapped |
329 | * virtual space required by the combined working sets of all the | | 329 | * virtual space required by the combined working sets of all the |
330 | * currently _runnable_ processes. (Sleeping ones don't count.) | | 330 | * currently _runnable_ processes. (Sleeping ones don't count.) |
331 | * The amount of physical memory should be irrelevant. -gwr | | 331 | * The amount of physical memory should be irrelevant. -gwr |
332 | */ | | 332 | */ |
333 | #ifdef FIXED_NTABLES | | 333 | #ifdef FIXED_NTABLES |
334 | #define NUM_A_TABLES 16 | | 334 | #define NUM_A_TABLES 16 |
335 | #define NUM_B_TABLES 32 | | 335 | #define NUM_B_TABLES 32 |
336 | #define NUM_C_TABLES 64 | | 336 | #define NUM_C_TABLES 64 |
337 | #else | | 337 | #else |
338 | unsigned int NUM_A_TABLES, NUM_B_TABLES, NUM_C_TABLES; | | 338 | unsigned int NUM_A_TABLES, NUM_B_TABLES, NUM_C_TABLES; |
339 | #endif /* FIXED_NTABLES */ | | 339 | #endif /* FIXED_NTABLES */ |
340 | | | 340 | |
341 | /* | | 341 | /* |
342 | * This determines our total virtual mapping capacity. | | 342 | * This determines our total virtual mapping capacity. |
343 | * Yes, it is a FIXED value so we can pre-allocate. | | 343 | * Yes, it is a FIXED value so we can pre-allocate. |
344 | */ | | 344 | */ |
345 | #define NUM_USER_PTES (NUM_C_TABLES * MMU_C_TBL_SIZE) | | 345 | #define NUM_USER_PTES (NUM_C_TABLES * MMU_C_TBL_SIZE) |
346 | | | 346 | |
347 | /* | | 347 | /* |
348 | * The size of the Kernel Virtual Address Space (KVAS) | | 348 | * The size of the Kernel Virtual Address Space (KVAS) |
349 | * for purposes of MMU table allocation is -KERNBASE | | 349 | * for purposes of MMU table allocation is -KERNBASE |
350 | * (length from KERNBASE to 0xFFFFffff) | | 350 | * (length from KERNBASE to 0xFFFFffff) |
351 | */ | | 351 | */ |
352 | #define KVAS_SIZE (-KERNBASE3X) | | 352 | #define KVAS_SIZE (-KERNBASE3X) |
353 | | | 353 | |
354 | /* Numbers of kernel MMU tables to support KVAS_SIZE. */ | | 354 | /* Numbers of kernel MMU tables to support KVAS_SIZE. */ |
355 | #define KERN_B_TABLES (KVAS_SIZE >> MMU_TIA_SHIFT) | | 355 | #define KERN_B_TABLES (KVAS_SIZE >> MMU_TIA_SHIFT) |
356 | #define KERN_C_TABLES (KVAS_SIZE >> MMU_TIB_SHIFT) | | 356 | #define KERN_C_TABLES (KVAS_SIZE >> MMU_TIB_SHIFT) |
357 | #define NUM_KERN_PTES (KVAS_SIZE >> MMU_TIC_SHIFT) | | 357 | #define NUM_KERN_PTES (KVAS_SIZE >> MMU_TIC_SHIFT) |
358 | | | 358 | |
359 | /*************************** MISCELANEOUS MACROS *************************/ | | 359 | /*************************** MISCELANEOUS MACROS *************************/ |
360 | void *pmap_bootstrap_alloc(int); | | 360 | void *pmap_bootstrap_alloc(int); |
361 | | | 361 | |
362 | static INLINE void *mmu_ptov(paddr_t); | | 362 | static INLINE void *mmu_ptov(paddr_t); |
363 | static INLINE paddr_t mmu_vtop(void *); | | 363 | static INLINE paddr_t mmu_vtop(void *); |
364 | | | 364 | |
365 | #if 0 | | 365 | #if 0 |
366 | static INLINE a_tmgr_t *mmuA2tmgr(mmu_long_dte_t *); | | 366 | static INLINE a_tmgr_t *mmuA2tmgr(mmu_long_dte_t *); |
367 | #endif /* 0 */ | | 367 | #endif /* 0 */ |
368 | static INLINE b_tmgr_t *mmuB2tmgr(mmu_short_dte_t *); | | 368 | static INLINE b_tmgr_t *mmuB2tmgr(mmu_short_dte_t *); |
369 | static INLINE c_tmgr_t *mmuC2tmgr(mmu_short_pte_t *); | | 369 | static INLINE c_tmgr_t *mmuC2tmgr(mmu_short_pte_t *); |
370 | | | 370 | |
371 | static INLINE pv_t *pa2pv(paddr_t); | | 371 | static INLINE pv_t *pa2pv(paddr_t); |
372 | static INLINE int pteidx(mmu_short_pte_t *); | | 372 | static INLINE int pteidx(mmu_short_pte_t *); |
373 | static INLINE pmap_t current_pmap(void); | | 373 | static INLINE pmap_t current_pmap(void); |
374 | | | 374 | |
375 | /* | | 375 | /* |
376 | * We can always convert between virtual and physical addresses | | 376 | * We can always convert between virtual and physical addresses |
377 | * for anything in the range [KERNBASE ... avail_start] because | | 377 | * for anything in the range [KERNBASE ... avail_start] because |
378 | * that range is GUARANTEED to be mapped linearly. | | 378 | * that range is GUARANTEED to be mapped linearly. |
379 | * We rely heavily upon this feature! | | 379 | * We rely heavily upon this feature! |
380 | */ | | 380 | */ |
381 | static INLINE void * | | 381 | static INLINE void * |
382 | mmu_ptov(paddr_t pa) | | 382 | mmu_ptov(paddr_t pa) |
383 | { | | 383 | { |
384 | vaddr_t va; | | 384 | vaddr_t va; |
385 | | | 385 | |
386 | va = (pa + KERNBASE3X); | | 386 | va = (pa + KERNBASE3X); |
387 | #ifdef PMAP_DEBUG | | 387 | #ifdef PMAP_DEBUG |
388 | if ((va < KERNBASE3X) || (va >= virtual_contig_end)) | | 388 | if ((va < KERNBASE3X) || (va >= virtual_contig_end)) |
389 | panic("mmu_ptov"); | | 389 | panic("mmu_ptov"); |
390 | #endif | | 390 | #endif |
391 | return (void *)va; | | 391 | return (void *)va; |
392 | } | | 392 | } |
393 | | | 393 | |
394 | static INLINE paddr_t | | 394 | static INLINE paddr_t |
395 | mmu_vtop(void *vva) | | 395 | mmu_vtop(void *vva) |
396 | { | | 396 | { |
397 | vaddr_t va; | | 397 | vaddr_t va; |
398 | | | 398 | |
399 | va = (vaddr_t)vva; | | 399 | va = (vaddr_t)vva; |
400 | #ifdef PMAP_DEBUG | | 400 | #ifdef PMAP_DEBUG |
401 | if ((va < KERNBASE3X) || (va >= virtual_contig_end)) | | 401 | if ((va < KERNBASE3X) || (va >= virtual_contig_end)) |
402 | panic("mmu_vtop"); | | 402 | panic("mmu_vtop"); |
403 | #endif | | 403 | #endif |
404 | return va - KERNBASE3X; | | 404 | return va - KERNBASE3X; |
405 | } | | 405 | } |
406 | | | 406 | |
407 | /* | | 407 | /* |
408 | * These macros map MMU tables to their corresponding manager structures. | | 408 | * These macros map MMU tables to their corresponding manager structures. |
409 | * They are needed quite often because many of the pointers in the pmap | | 409 | * They are needed quite often because many of the pointers in the pmap |
410 | * system reference MMU tables and not the structures that control them. | | 410 | * system reference MMU tables and not the structures that control them. |
411 | * There needs to be a way to find one when given the other and these | | 411 | * There needs to be a way to find one when given the other and these |
412 | * macros do so by taking advantage of the memory layout described above. | | 412 | * macros do so by taking advantage of the memory layout described above. |
413 | * Here's a quick step through the first macro, mmuA2tmgr(): | | 413 | * Here's a quick step through the first macro, mmuA2tmgr(): |
414 | * | | 414 | * |
415 | * 1) find the offset of the given MMU A table from the base of its table | | 415 | * 1) find the offset of the given MMU A table from the base of its table |
416 | * pool (table - mmuAbase). | | 416 | * pool (table - mmuAbase). |
417 | * 2) convert this offset into a table index by dividing it by the | | 417 | * 2) convert this offset into a table index by dividing it by the |
418 | * size of one MMU 'A' table. (sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE) | | 418 | * size of one MMU 'A' table. (sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE) |
419 | * 3) use this index to select the corresponding 'A' table manager | | 419 | * 3) use this index to select the corresponding 'A' table manager |
420 | * structure from the 'A' table manager pool (Atmgrbase[index]). | | 420 | * structure from the 'A' table manager pool (Atmgrbase[index]). |
421 | */ | | 421 | */ |
422 | /* This function is not currently used. */ | | 422 | /* This function is not currently used. */ |
423 | #if 0 | | 423 | #if 0 |
424 | static INLINE a_tmgr_t * | | 424 | static INLINE a_tmgr_t * |
425 | mmuA2tmgr(mmu_long_dte_t *mmuAtbl) | | 425 | mmuA2tmgr(mmu_long_dte_t *mmuAtbl) |
426 | { | | 426 | { |
427 | int idx; | | 427 | int idx; |
428 | | | 428 | |
429 | /* Which table is this in? */ | | 429 | /* Which table is this in? */ |
430 | idx = (mmuAtbl - mmuAbase) / MMU_A_TBL_SIZE; | | 430 | idx = (mmuAtbl - mmuAbase) / MMU_A_TBL_SIZE; |
431 | #ifdef PMAP_DEBUG | | 431 | #ifdef PMAP_DEBUG |
432 | if ((idx < 0) || (idx >= NUM_A_TABLES)) | | 432 | if ((idx < 0) || (idx >= NUM_A_TABLES)) |
433 | panic("mmuA2tmgr"); | | 433 | panic("mmuA2tmgr"); |
434 | #endif | | 434 | #endif |
435 | return &Atmgrbase[idx]; | | 435 | return &Atmgrbase[idx]; |
436 | } | | 436 | } |
437 | #endif /* 0 */ | | 437 | #endif /* 0 */ |
438 | | | 438 | |
439 | static INLINE b_tmgr_t * | | 439 | static INLINE b_tmgr_t * |
440 | mmuB2tmgr(mmu_short_dte_t *mmuBtbl) | | 440 | mmuB2tmgr(mmu_short_dte_t *mmuBtbl) |
441 | { | | 441 | { |
442 | int idx; | | 442 | int idx; |
443 | | | 443 | |
444 | /* Which table is this in? */ | | 444 | /* Which table is this in? */ |
445 | idx = (mmuBtbl - mmuBbase) / MMU_B_TBL_SIZE; | | 445 | idx = (mmuBtbl - mmuBbase) / MMU_B_TBL_SIZE; |
446 | #ifdef PMAP_DEBUG | | 446 | #ifdef PMAP_DEBUG |
447 | if ((idx < 0) || (idx >= NUM_B_TABLES)) | | 447 | if ((idx < 0) || (idx >= NUM_B_TABLES)) |
448 | panic("mmuB2tmgr"); | | 448 | panic("mmuB2tmgr"); |
449 | #endif | | 449 | #endif |
450 | return &Btmgrbase[idx]; | | 450 | return &Btmgrbase[idx]; |
451 | } | | 451 | } |
452 | | | 452 | |
453 | /* mmuC2tmgr INTERNAL | | 453 | /* mmuC2tmgr INTERNAL |
454 | ** | | 454 | ** |
455 | * Given a pte known to belong to a C table, return the address of | | 455 | * Given a pte known to belong to a C table, return the address of |
456 | * that table's management structure. | | 456 | * that table's management structure. |
457 | */ | | 457 | */ |
458 | static INLINE c_tmgr_t * | | 458 | static INLINE c_tmgr_t * |
459 | mmuC2tmgr(mmu_short_pte_t *mmuCtbl) | | 459 | mmuC2tmgr(mmu_short_pte_t *mmuCtbl) |
460 | { | | 460 | { |
461 | int idx; | | 461 | int idx; |
462 | | | 462 | |
463 | /* Which table is this in? */ | | 463 | /* Which table is this in? */ |
464 | idx = (mmuCtbl - mmuCbase) / MMU_C_TBL_SIZE; | | 464 | idx = (mmuCtbl - mmuCbase) / MMU_C_TBL_SIZE; |
465 | #ifdef PMAP_DEBUG | | 465 | #ifdef PMAP_DEBUG |
466 | if ((idx < 0) || (idx >= NUM_C_TABLES)) | | 466 | if ((idx < 0) || (idx >= NUM_C_TABLES)) |
467 | panic("mmuC2tmgr"); | | 467 | panic("mmuC2tmgr"); |
468 | #endif | | 468 | #endif |
469 | return &Ctmgrbase[idx]; | | 469 | return &Ctmgrbase[idx]; |
470 | } | | 470 | } |
471 | | | 471 | |
472 | /* This is now a function call below. | | 472 | /* This is now a function call below. |
473 | * #define pa2pv(pa) \ | | 473 | * #define pa2pv(pa) \ |
474 | * (&pvbase[(unsigned long)\ | | 474 | * (&pvbase[(unsigned long)\ |
475 | * m68k_btop(pa)\ | | 475 | * m68k_btop(pa)\ |
476 | * ]) | | 476 | * ]) |
477 | */ | | 477 | */ |
478 | | | 478 | |
479 | /* pa2pv INTERNAL | | 479 | /* pa2pv INTERNAL |
480 | ** | | 480 | ** |
481 | * Return the pv_list_head element which manages the given physical | | 481 | * Return the pv_list_head element which manages the given physical |
482 | * address. | | 482 | * address. |
483 | */ | | 483 | */ |
484 | static INLINE pv_t * | | 484 | static INLINE pv_t * |
485 | pa2pv(paddr_t pa) | | 485 | pa2pv(paddr_t pa) |
486 | { | | 486 | { |
487 | struct pmap_physmem_struct *bank; | | 487 | struct pmap_physmem_struct *bank; |
488 | int idx; | | 488 | int idx; |
489 | | | 489 | |
490 | bank = &avail_mem[0]; | | 490 | bank = &avail_mem[0]; |
491 | while (pa >= bank->pmem_end) | | 491 | while (pa >= bank->pmem_end) |
492 | bank = bank->pmem_next; | | 492 | bank = bank->pmem_next; |
493 | | | 493 | |
494 | pa -= bank->pmem_start; | | 494 | pa -= bank->pmem_start; |
495 | idx = bank->pmem_pvbase + m68k_btop(pa); | | 495 | idx = bank->pmem_pvbase + m68k_btop(pa); |
496 | #ifdef PMAP_DEBUG | | 496 | #ifdef PMAP_DEBUG |
497 | if ((idx < 0) || (idx >= physmem)) | | 497 | if ((idx < 0) || (idx >= physmem)) |
498 | panic("pa2pv"); | | 498 | panic("pa2pv"); |
499 | #endif | | 499 | #endif |
500 | return &pvbase[idx]; | | 500 | return &pvbase[idx]; |
501 | } | | 501 | } |
502 | | | 502 | |
503 | /* pteidx INTERNAL | | 503 | /* pteidx INTERNAL |
504 | ** | | 504 | ** |
505 | * Return the index of the given PTE within the entire fixed table of | | 505 | * Return the index of the given PTE within the entire fixed table of |
506 | * PTEs. | | 506 | * PTEs. |
507 | */ | | 507 | */ |
508 | static INLINE int | | 508 | static INLINE int |
509 | pteidx(mmu_short_pte_t *pte) | | 509 | pteidx(mmu_short_pte_t *pte) |
510 | { | | 510 | { |
511 | | | 511 | |
512 | return pte - kernCbase; | | 512 | return pte - kernCbase; |
513 | } | | 513 | } |
514 | | | 514 | |
515 | /* | | 515 | /* |
516 | * This just offers a place to put some debugging checks, | | 516 | * This just offers a place to put some debugging checks, |
517 | * and reduces the number of places "curlwp" appears... | | 517 | * and reduces the number of places "curlwp" appears... |
518 | */ | | 518 | */ |
519 | static INLINE pmap_t | | 519 | static INLINE pmap_t |
520 | current_pmap(void) | | 520 | current_pmap(void) |
521 | { | | 521 | { |
522 | struct vmspace *vm; | | 522 | struct vmspace *vm; |
523 | struct vm_map *map; | | 523 | struct vm_map *map; |
524 | pmap_t pmap; | | 524 | pmap_t pmap; |
525 | | | 525 | |
526 | vm = curproc->p_vmspace; | | 526 | vm = curproc->p_vmspace; |
527 | map = &vm->vm_map; | | 527 | map = &vm->vm_map; |
528 | pmap = vm_map_pmap(map); | | 528 | pmap = vm_map_pmap(map); |
529 | | | 529 | |
530 | return pmap; | | 530 | return pmap; |
531 | } | | 531 | } |
532 | | | 532 | |
533 | | | 533 | |
534 | /*************************** FUNCTION DEFINITIONS ************************ | | 534 | /*************************** FUNCTION DEFINITIONS ************************ |
535 | * These appear here merely for the compiler to enforce type checking on * | | 535 | * These appear here merely for the compiler to enforce type checking on * |
536 | * all function calls. * | | 536 | * all function calls. * |
537 | *************************************************************************/ | | 537 | *************************************************************************/ |
538 | | | 538 | |
539 | /* | | 539 | /* |
540 | * Internal functions | | 540 | * Internal functions |
541 | */ | | 541 | */ |
542 | a_tmgr_t *get_a_table(void); | | 542 | a_tmgr_t *get_a_table(void); |
543 | b_tmgr_t *get_b_table(void); | | 543 | b_tmgr_t *get_b_table(void); |
544 | c_tmgr_t *get_c_table(void); | | 544 | c_tmgr_t *get_c_table(void); |
545 | int free_a_table(a_tmgr_t *, bool); | | 545 | int free_a_table(a_tmgr_t *, bool); |
546 | int free_b_table(b_tmgr_t *, bool); | | 546 | int free_b_table(b_tmgr_t *, bool); |
547 | int free_c_table(c_tmgr_t *, bool); | | 547 | int free_c_table(c_tmgr_t *, bool); |
548 | | | 548 | |
549 | void pmap_bootstrap_aalign(int); | | 549 | void pmap_bootstrap_aalign(int); |
550 | void pmap_alloc_usermmu(void); | | 550 | void pmap_alloc_usermmu(void); |
551 | void pmap_alloc_usertmgr(void); | | 551 | void pmap_alloc_usertmgr(void); |
552 | void pmap_alloc_pv(void); | | 552 | void pmap_alloc_pv(void); |
553 | void pmap_init_a_tables(void); | | 553 | void pmap_init_a_tables(void); |
554 | void pmap_init_b_tables(void); | | 554 | void pmap_init_b_tables(void); |
555 | void pmap_init_c_tables(void); | | 555 | void pmap_init_c_tables(void); |
556 | void pmap_init_pv(void); | | 556 | void pmap_init_pv(void); |
557 | void pmap_clear_pv(paddr_t, int); | | 557 | void pmap_clear_pv(paddr_t, int); |
558 | static INLINE bool is_managed(paddr_t); | | 558 | static INLINE bool is_managed(paddr_t); |
559 | | | 559 | |
560 | bool pmap_remove_a(a_tmgr_t *, vaddr_t, vaddr_t); | | 560 | bool pmap_remove_a(a_tmgr_t *, vaddr_t, vaddr_t); |
561 | bool pmap_remove_b(b_tmgr_t *, vaddr_t, vaddr_t); | | 561 | bool pmap_remove_b(b_tmgr_t *, vaddr_t, vaddr_t); |
562 | bool pmap_remove_c(c_tmgr_t *, vaddr_t, vaddr_t); | | 562 | bool pmap_remove_c(c_tmgr_t *, vaddr_t, vaddr_t); |
563 | void pmap_remove_pte(mmu_short_pte_t *); | | 563 | void pmap_remove_pte(mmu_short_pte_t *); |
564 | | | 564 | |
565 | void pmap_enter_kernel(vaddr_t, paddr_t, vm_prot_t); | | 565 | void pmap_enter_kernel(vaddr_t, paddr_t, vm_prot_t); |
566 | static INLINE void pmap_remove_kernel(vaddr_t, vaddr_t); | | 566 | static INLINE void pmap_remove_kernel(vaddr_t, vaddr_t); |
567 | static INLINE void pmap_protect_kernel(vaddr_t, vaddr_t, vm_prot_t); | | 567 | static INLINE void pmap_protect_kernel(vaddr_t, vaddr_t, vm_prot_t); |
568 | static INLINE bool pmap_extract_kernel(vaddr_t, paddr_t *); | | 568 | static INLINE bool pmap_extract_kernel(vaddr_t, paddr_t *); |
569 | vaddr_t pmap_get_pteinfo(u_int, pmap_t *, c_tmgr_t **); | | 569 | vaddr_t pmap_get_pteinfo(u_int, pmap_t *, c_tmgr_t **); |
570 | static INLINE int pmap_dereference(pmap_t); | | 570 | static INLINE int pmap_dereference(pmap_t); |
571 | | | 571 | |
572 | bool pmap_stroll(pmap_t, vaddr_t, a_tmgr_t **, b_tmgr_t **, c_tmgr_t **, | | 572 | bool pmap_stroll(pmap_t, vaddr_t, a_tmgr_t **, b_tmgr_t **, c_tmgr_t **, |
573 | mmu_short_pte_t **, int *, int *, int *); | | 573 | mmu_short_pte_t **, int *, int *, int *); |
574 | void pmap_bootstrap_copyprom(void); | | 574 | void pmap_bootstrap_copyprom(void); |
575 | void pmap_takeover_mmu(void); | | 575 | void pmap_takeover_mmu(void); |
576 | void pmap_bootstrap_setprom(void); | | 576 | void pmap_bootstrap_setprom(void); |
577 | static void pmap_page_upload(void); | | 577 | static void pmap_page_upload(void); |
578 | | | 578 | |
579 | #ifdef PMAP_DEBUG | | 579 | #ifdef PMAP_DEBUG |
580 | /* Debugging function definitions */ | | 580 | /* Debugging function definitions */ |
581 | void pv_list(paddr_t, int); | | 581 | void pv_list(paddr_t, int); |
582 | #endif /* PMAP_DEBUG */ | | 582 | #endif /* PMAP_DEBUG */ |
583 | | | 583 | |
584 | /** Interface functions | | 584 | /** Interface functions |
585 | ** - functions required by the Mach VM Pmap interface, with MACHINE_CONTIG | | 585 | ** - functions required by the Mach VM Pmap interface, with MACHINE_CONTIG |
586 | ** defined. | | 586 | ** defined. |
587 | ** The new UVM doesn't require them so now INTERNAL. | | 587 | ** The new UVM doesn't require them so now INTERNAL. |
588 | **/ | | 588 | **/ |
589 | static INLINE void pmap_pinit(pmap_t); | | 589 | static INLINE void pmap_pinit(pmap_t); |
590 | static INLINE void pmap_release(pmap_t); | | 590 | static INLINE void pmap_release(pmap_t); |
591 | | | 591 | |
592 | /********************************** CODE ******************************** | | 592 | /********************************** CODE ******************************** |
593 | * Functions that are called from other parts of the kernel are labeled * | | 593 | * Functions that are called from other parts of the kernel are labeled * |
594 | * as 'INTERFACE' functions. Functions that are only called from * | | 594 | * as 'INTERFACE' functions. Functions that are only called from * |
595 | * within the pmap module are labeled as 'INTERNAL' functions. * | | 595 | * within the pmap module are labeled as 'INTERNAL' functions. * |
596 | * Functions that are internal, but are not (currently) used at all are * | | 596 | * Functions that are internal, but are not (currently) used at all are * |
597 | * labeled 'INTERNAL_X'. * | | 597 | * labeled 'INTERNAL_X'. * |
598 | ************************************************************************/ | | 598 | ************************************************************************/ |
599 | | | 599 | |
600 | /* pmap_bootstrap INTERNAL | | 600 | /* pmap_bootstrap INTERNAL |
601 | ** | | 601 | ** |
602 | * Initializes the pmap system. Called at boot time from | | 602 | * Initializes the pmap system. Called at boot time from |
603 | * locore2.c:_vm_init() | | 603 | * locore2.c:_vm_init() |
604 | * | | 604 | * |
605 | * Reminder: having a pmap_bootstrap_alloc() and also having the VM | | 605 | * Reminder: having a pmap_bootstrap_alloc() and also having the VM |
606 | * system implement pmap_steal_memory() is redundant. | | 606 | * system implement pmap_steal_memory() is redundant. |
607 | * Don't release this code without removing one or the other! | | 607 | * Don't release this code without removing one or the other! |
608 | */ | | 608 | */ |
609 | void | | 609 | void |
610 | pmap_bootstrap(vaddr_t nextva) | | 610 | pmap_bootstrap(vaddr_t nextva) |
611 | { | | 611 | { |
612 | struct physmemory *membank; | | 612 | struct physmemory *membank; |
613 | struct pmap_physmem_struct *pmap_membank; | | 613 | struct pmap_physmem_struct *pmap_membank; |
614 | vaddr_t va, eva; | | 614 | vaddr_t va, eva; |
615 | paddr_t pa; | | 615 | paddr_t pa; |
616 | int b, c, i, j; /* running table counts */ | | 616 | int b, c, i, j; /* running table counts */ |
617 | int size, resvmem; | | 617 | int size, resvmem; |
618 | | | 618 | |
619 | /* | | 619 | /* |
620 | * This function is called by __bootstrap after it has | | 620 | * This function is called by __bootstrap after it has |
621 | * determined the type of machine and made the appropriate | | 621 | * determined the type of machine and made the appropriate |
622 | * patches to the ROM vectors (XXX- I don't quite know what I meant | | 622 | * patches to the ROM vectors (XXX- I don't quite know what I meant |
623 | * by that.) It allocates and sets up enough of the pmap system | | 623 | * by that.) It allocates and sets up enough of the pmap system |
624 | * to manage the kernel's address space. | | 624 | * to manage the kernel's address space. |
625 | */ | | 625 | */ |
626 | | | 626 | |
627 | /* | | 627 | /* |
628 | * Determine the range of kernel virtual and physical | | 628 | * Determine the range of kernel virtual and physical |
629 | * space available. Note that we ABSOLUTELY DEPEND on | | 629 | * space available. Note that we ABSOLUTELY DEPEND on |
630 | * the fact that the first bank of memory (4MB) is | | 630 | * the fact that the first bank of memory (4MB) is |
631 | * mapped linearly to KERNBASE (which we guaranteed in | | 631 | * mapped linearly to KERNBASE (which we guaranteed in |
632 | * the first instructions of locore.s). | | 632 | * the first instructions of locore.s). |
633 | * That is plenty for our bootstrap work. | | 633 | * That is plenty for our bootstrap work. |
634 | */ | | 634 | */ |
635 | virtual_avail = m68k_round_page(nextva); | | 635 | virtual_avail = m68k_round_page(nextva); |
636 | virtual_contig_end = KERNBASE3X + 0x400000; /* +4MB */ | | 636 | virtual_contig_end = KERNBASE3X + 0x400000; /* +4MB */ |
637 | virtual_end = VM_MAX_KERNEL_ADDRESS; | | 637 | virtual_end = VM_MAX_KERNEL_ADDRESS; |
638 | /* Don't need avail_start til later. */ | | 638 | /* Don't need avail_start til later. */ |
639 | | | 639 | |
640 | /* We may now call pmap_bootstrap_alloc(). */ | | 640 | /* We may now call pmap_bootstrap_alloc(). */ |
641 | bootstrap_alloc_enabled = true; | | 641 | bootstrap_alloc_enabled = true; |
642 | | | 642 | |
643 | /* | | 643 | /* |
644 | * This is a somewhat unwrapped loop to deal with | | 644 | * This is a somewhat unwrapped loop to deal with |
645 | * copying the PROM's 'phsymem' banks into the pmap's | | 645 | * copying the PROM's 'phsymem' banks into the pmap's |
646 | * banks. The following is always assumed: | | 646 | * banks. The following is always assumed: |
647 | * 1. There is always at least one bank of memory. | | 647 | * 1. There is always at least one bank of memory. |
648 | * 2. There is always a last bank of memory, and its | | 648 | * 2. There is always a last bank of memory, and its |
649 | * pmem_next member must be set to NULL. | | 649 | * pmem_next member must be set to NULL. |
650 | */ | | 650 | */ |
651 | membank = romVectorPtr->v_physmemory; | | 651 | membank = romVectorPtr->v_physmemory; |
652 | pmap_membank = avail_mem; | | 652 | pmap_membank = avail_mem; |
653 | total_phys_mem = 0; | | 653 | total_phys_mem = 0; |
654 | | | 654 | |
655 | for (;;) { /* break on !membank */ | | 655 | for (;;) { /* break on !membank */ |
656 | pmap_membank->pmem_start = membank->address; | | 656 | pmap_membank->pmem_start = membank->address; |
657 | pmap_membank->pmem_end = membank->address + membank->size; | | 657 | pmap_membank->pmem_end = membank->address + membank->size; |
658 | total_phys_mem += membank->size; | | 658 | total_phys_mem += membank->size; |
659 | membank = membank->next; | | 659 | membank = membank->next; |
660 | if (!membank) | | 660 | if (!membank) |
661 | break; | | 661 | break; |
662 | /* This silly syntax arises because pmap_membank | | 662 | /* This silly syntax arises because pmap_membank |
663 | * is really a pre-allocated array, but it is put into | | 663 | * is really a pre-allocated array, but it is put into |
664 | * use as a linked list. | | 664 | * use as a linked list. |
665 | */ | | 665 | */ |
666 | pmap_membank->pmem_next = pmap_membank + 1; | | 666 | pmap_membank->pmem_next = pmap_membank + 1; |
667 | pmap_membank = pmap_membank->pmem_next; | | 667 | pmap_membank = pmap_membank->pmem_next; |
668 | } | | 668 | } |
669 | /* This is the last element. */ | | 669 | /* This is the last element. */ |
670 | pmap_membank->pmem_next = NULL; | | 670 | pmap_membank->pmem_next = NULL; |
671 | | | 671 | |
672 | /* | | 672 | /* |
673 | * Note: total_phys_mem, physmem represent | | 673 | * Note: total_phys_mem, physmem represent |
674 | * actual physical memory, including that | | 674 | * actual physical memory, including that |
675 | * reserved for the PROM monitor. | | 675 | * reserved for the PROM monitor. |
676 | */ | | 676 | */ |
677 | physmem = btoc(total_phys_mem); | | 677 | physmem = btoc(total_phys_mem); |
678 | | | 678 | |
679 | /* | | 679 | /* |
680 | * Avail_end is set to the first byte of physical memory | | 680 | * Avail_end is set to the first byte of physical memory |
681 | * after the end of the last bank. We use this only to | | 681 | * after the end of the last bank. We use this only to |
682 | * determine if a physical address is "managed" memory. | | 682 | * determine if a physical address is "managed" memory. |
683 | * This address range should be reduced to prevent the | | 683 | * This address range should be reduced to prevent the |
684 | * physical pages needed by the PROM monitor from being used | | 684 | * physical pages needed by the PROM monitor from being used |
685 | * in the VM system. | | 685 | * in the VM system. |
686 | */ | | 686 | */ |
687 | resvmem = total_phys_mem - *(romVectorPtr->memoryAvail); | | 687 | resvmem = total_phys_mem - *(romVectorPtr->memoryAvail); |
688 | resvmem = m68k_round_page(resvmem); | | 688 | resvmem = m68k_round_page(resvmem); |
689 | avail_end = pmap_membank->pmem_end - resvmem; | | 689 | avail_end = pmap_membank->pmem_end - resvmem; |
690 | | | 690 | |
691 | /* | | 691 | /* |
692 | * First allocate enough kernel MMU tables to map all | | 692 | * First allocate enough kernel MMU tables to map all |
693 | * of kernel virtual space from KERNBASE to 0xFFFFFFFF. | | 693 | * of kernel virtual space from KERNBASE to 0xFFFFFFFF. |
694 | * Note: All must be aligned on 256 byte boundaries. | | 694 | * Note: All must be aligned on 256 byte boundaries. |
695 | * Start with the level-A table (one of those). | | 695 | * Start with the level-A table (one of those). |
696 | */ | | 696 | */ |
697 | size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE; | | 697 | size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE; |
698 | kernAbase = pmap_bootstrap_alloc(size); | | 698 | kernAbase = pmap_bootstrap_alloc(size); |
699 | memset(kernAbase, 0, size); | | 699 | memset(kernAbase, 0, size); |
700 | | | 700 | |
701 | /* Now the level-B kernel tables... */ | | 701 | /* Now the level-B kernel tables... */ |
702 | size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * KERN_B_TABLES; | | 702 | size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * KERN_B_TABLES; |
703 | kernBbase = pmap_bootstrap_alloc(size); | | 703 | kernBbase = pmap_bootstrap_alloc(size); |
704 | memset(kernBbase, 0, size); | | 704 | memset(kernBbase, 0, size); |
705 | | | 705 | |
706 | /* Now the level-C kernel tables... */ | | 706 | /* Now the level-C kernel tables... */ |
707 | size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * KERN_C_TABLES; | | 707 | size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * KERN_C_TABLES; |
708 | kernCbase = pmap_bootstrap_alloc(size); | | 708 | kernCbase = pmap_bootstrap_alloc(size); |
709 | memset(kernCbase, 0, size); | | 709 | memset(kernCbase, 0, size); |
710 | /* | | 710 | /* |
711 | * Note: In order for the PV system to work correctly, the kernel | | 711 | * Note: In order for the PV system to work correctly, the kernel |
712 | * and user-level C tables must be allocated contiguously. | | 712 | * and user-level C tables must be allocated contiguously. |
713 | * Nothing should be allocated between here and the allocation of | | 713 | * Nothing should be allocated between here and the allocation of |
714 | * mmuCbase below. XXX: Should do this as one allocation, and | | 714 | * mmuCbase below. XXX: Should do this as one allocation, and |
715 | * then compute a pointer for mmuCbase instead of this... | | 715 | * then compute a pointer for mmuCbase instead of this... |
716 | * | | 716 | * |
717 | * Allocate user MMU tables. | | 717 | * Allocate user MMU tables. |
718 | * These must be contiguous with the preceding. | | 718 | * These must be contiguous with the preceding. |
719 | */ | | 719 | */ |
720 | | | 720 | |
721 | #ifndef FIXED_NTABLES | | 721 | #ifndef FIXED_NTABLES |
722 | /* | | 722 | /* |
723 | * The number of user-level C tables that should be allocated is | | 723 | * The number of user-level C tables that should be allocated is |
724 | * related to the size of physical memory. In general, there should | | 724 | * related to the size of physical memory. In general, there should |
725 | * be enough tables to map four times the amount of available RAM. | | 725 | * be enough tables to map four times the amount of available RAM. |
726 | * The extra amount is needed because some table space is wasted by | | 726 | * The extra amount is needed because some table space is wasted by |
727 | * fragmentation. | | 727 | * fragmentation. |
728 | */ | | 728 | */ |
729 | NUM_C_TABLES = (total_phys_mem * 4) / (MMU_C_TBL_SIZE * MMU_PAGE_SIZE); | | 729 | NUM_C_TABLES = (total_phys_mem * 4) / (MMU_C_TBL_SIZE * MMU_PAGE_SIZE); |
730 | NUM_B_TABLES = NUM_C_TABLES / 2; | | 730 | NUM_B_TABLES = NUM_C_TABLES / 2; |
731 | NUM_A_TABLES = NUM_B_TABLES / 2; | | 731 | NUM_A_TABLES = NUM_B_TABLES / 2; |
732 | #endif /* !FIXED_NTABLES */ | | 732 | #endif /* !FIXED_NTABLES */ |
733 | | | 733 | |
734 | size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * NUM_C_TABLES; | | 734 | size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * NUM_C_TABLES; |
735 | mmuCbase = pmap_bootstrap_alloc(size); | | 735 | mmuCbase = pmap_bootstrap_alloc(size); |
736 | | | 736 | |
737 | size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * NUM_B_TABLES; | | 737 | size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * NUM_B_TABLES; |
738 | mmuBbase = pmap_bootstrap_alloc(size); | | 738 | mmuBbase = pmap_bootstrap_alloc(size); |
739 | | | 739 | |
740 | size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE * NUM_A_TABLES; | | 740 | size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE * NUM_A_TABLES; |
741 | mmuAbase = pmap_bootstrap_alloc(size); | | 741 | mmuAbase = pmap_bootstrap_alloc(size); |
742 | | | 742 | |
743 | /* | | 743 | /* |
744 | * Fill in the never-changing part of the kernel tables. | | 744 | * Fill in the never-changing part of the kernel tables. |
745 | * For simplicity, the kernel's mappings will be editable as a | | 745 | * For simplicity, the kernel's mappings will be editable as a |
746 | * flat array of page table entries at kernCbase. The | | 746 | * flat array of page table entries at kernCbase. The |
747 | * higher level 'A' and 'B' tables must be initialized to point | | 747 | * higher level 'A' and 'B' tables must be initialized to point |
748 | * to this lower one. | | 748 | * to this lower one. |
749 | */ | | 749 | */ |
750 | b = c = 0; | | 750 | b = c = 0; |
751 | | | 751 | |
752 | /* | | 752 | /* |
753 | * Invalidate all mappings below KERNBASE in the A table. | | 753 | * Invalidate all mappings below KERNBASE in the A table. |
754 | * This area has already been zeroed out, but it is good | | 754 | * This area has already been zeroed out, but it is good |
755 | * practice to explicitly show that we are interpreting | | 755 | * practice to explicitly show that we are interpreting |
756 | * it as a list of A table descriptors. | | 756 | * it as a list of A table descriptors. |
757 | */ | | 757 | */ |
758 | for (i = 0; i < MMU_TIA(KERNBASE3X); i++) { | | 758 | for (i = 0; i < MMU_TIA(KERNBASE3X); i++) { |
759 | kernAbase[i].addr.raw = 0; | | 759 | kernAbase[i].addr.raw = 0; |
760 | } | | 760 | } |
761 | | | 761 | |
762 | /* | | 762 | /* |
763 | * Set up the kernel A and B tables so that they will reference the | | 763 | * Set up the kernel A and B tables so that they will reference the |
764 | * correct spots in the contiguous table of PTEs allocated for the | | 764 | * correct spots in the contiguous table of PTEs allocated for the |
765 | * kernel's virtual memory space. | | 765 | * kernel's virtual memory space. |
766 | */ | | 766 | */ |
767 | for (i = MMU_TIA(KERNBASE3X); i < MMU_A_TBL_SIZE; i++) { | | 767 | for (i = MMU_TIA(KERNBASE3X); i < MMU_A_TBL_SIZE; i++) { |
768 | kernAbase[i].attr.raw = | | 768 | kernAbase[i].attr.raw = |
769 | MMU_LONG_DTE_LU | MMU_LONG_DTE_SUPV | MMU_DT_SHORT; | | 769 | MMU_LONG_DTE_LU | MMU_LONG_DTE_SUPV | MMU_DT_SHORT; |
770 | kernAbase[i].addr.raw = mmu_vtop(&kernBbase[b]); | | 770 | kernAbase[i].addr.raw = mmu_vtop(&kernBbase[b]); |
771 | | | 771 | |
772 | for (j = 0; j < MMU_B_TBL_SIZE; j++) { | | 772 | for (j = 0; j < MMU_B_TBL_SIZE; j++) { |
773 | kernBbase[b + j].attr.raw = | | 773 | kernBbase[b + j].attr.raw = |
774 | mmu_vtop(&kernCbase[c]) | MMU_DT_SHORT; | | 774 | mmu_vtop(&kernCbase[c]) | MMU_DT_SHORT; |
775 | c += MMU_C_TBL_SIZE; | | 775 | c += MMU_C_TBL_SIZE; |
776 | } | | 776 | } |
777 | b += MMU_B_TBL_SIZE; | | 777 | b += MMU_B_TBL_SIZE; |
778 | } | | 778 | } |
779 | | | 779 | |
780 | pmap_alloc_usermmu(); /* Allocate user MMU tables. */ | | 780 | pmap_alloc_usermmu(); /* Allocate user MMU tables. */ |
781 | pmap_alloc_usertmgr(); /* Allocate user MMU table managers.*/ | | 781 | pmap_alloc_usertmgr(); /* Allocate user MMU table managers.*/ |
782 | pmap_alloc_pv(); /* Allocate physical->virtual map. */ | | 782 | pmap_alloc_pv(); /* Allocate physical->virtual map. */ |
783 | | | 783 | |
784 | /* | | 784 | /* |
785 | * We are now done with pmap_bootstrap_alloc(). Round up | | 785 | * We are now done with pmap_bootstrap_alloc(). Round up |
786 | * `virtual_avail' to the nearest page, and set the flag | | 786 | * `virtual_avail' to the nearest page, and set the flag |
787 | * to prevent use of pmap_bootstrap_alloc() hereafter. | | 787 | * to prevent use of pmap_bootstrap_alloc() hereafter. |
788 | */ | | 788 | */ |
789 | pmap_bootstrap_aalign(PAGE_SIZE); | | 789 | pmap_bootstrap_aalign(PAGE_SIZE); |
790 | bootstrap_alloc_enabled = false; | | 790 | bootstrap_alloc_enabled = false; |
791 | | | 791 | |
792 | /* | | 792 | /* |
793 | * Now that we are done with pmap_bootstrap_alloc(), we | | 793 | * Now that we are done with pmap_bootstrap_alloc(), we |
794 | * must save the virtual and physical addresses of the | | 794 | * must save the virtual and physical addresses of the |
795 | * end of the linearly mapped range, which are stored in | | 795 | * end of the linearly mapped range, which are stored in |
796 | * virtual_contig_end and avail_start, respectively. | | 796 | * virtual_contig_end and avail_start, respectively. |
797 | * These variables will never change after this point. | | 797 | * These variables will never change after this point. |
798 | */ | | 798 | */ |
799 | virtual_contig_end = virtual_avail; | | 799 | virtual_contig_end = virtual_avail; |
800 | avail_start = virtual_avail - KERNBASE3X; | | 800 | avail_start = virtual_avail - KERNBASE3X; |
801 | | | 801 | |
802 | /* | | 802 | /* |
803 | * `avail_next' is a running pointer used by pmap_next_page() to | | 803 | * `avail_next' is a running pointer used by pmap_next_page() to |
804 | * keep track of the next available physical page to be handed | | 804 | * keep track of the next available physical page to be handed |
805 | * to the VM system during its initialization, in which it | | 805 | * to the VM system during its initialization, in which it |
806 | * asks for physical pages, one at a time. | | 806 | * asks for physical pages, one at a time. |
807 | */ | | 807 | */ |
808 | avail_next = avail_start; | | 808 | avail_next = avail_start; |
809 | | | 809 | |
810 | /* | | 810 | /* |
811 | * Now allocate some virtual addresses, but not the physical pages | | 811 | * Now allocate some virtual addresses, but not the physical pages |
812 | * behind them. Note that virtual_avail is already page-aligned. | | 812 | * behind them. Note that virtual_avail is already page-aligned. |
813 | * | | 813 | * |
814 | * tmp_vpages[] is an array of two virtual pages used for temporary | | 814 | * tmp_vpages[] is an array of two virtual pages used for temporary |
815 | * kernel mappings in the pmap module to facilitate various physical | | 815 | * kernel mappings in the pmap module to facilitate various physical |
816 | * address-oritented operations. | | 816 | * address-oritented operations. |
817 | */ | | 817 | */ |
818 | tmp_vpages[0] = virtual_avail; | | 818 | tmp_vpages[0] = virtual_avail; |
819 | virtual_avail += PAGE_SIZE; | | 819 | virtual_avail += PAGE_SIZE; |
820 | tmp_vpages[1] = virtual_avail; | | 820 | tmp_vpages[1] = virtual_avail; |
821 | virtual_avail += PAGE_SIZE; | | 821 | virtual_avail += PAGE_SIZE; |
822 | | | 822 | |
823 | /** Initialize the PV system **/ | | 823 | /** Initialize the PV system **/ |
824 | pmap_init_pv(); | | 824 | pmap_init_pv(); |
825 | | | 825 | |
826 | /* | | 826 | /* |
827 | * Fill in the kernel_pmap structure and kernel_crp. | | 827 | * Fill in the kernel_pmap structure and kernel_crp. |
828 | */ | | 828 | */ |
829 | kernAphys = mmu_vtop(kernAbase); | | 829 | kernAphys = mmu_vtop(kernAbase); |
830 | kernel_pmap.pm_a_tmgr = NULL; | | 830 | kernel_pmap.pm_a_tmgr = NULL; |
831 | kernel_pmap.pm_a_phys = kernAphys; | | 831 | kernel_pmap.pm_a_phys = kernAphys; |
832 | kernel_pmap.pm_refcount = 1; /* always in use */ | | 832 | kernel_pmap.pm_refcount = 1; /* always in use */ |
833 | | | 833 | |
834 | kernel_crp.rp_attr = MMU_LONG_DTE_LU | MMU_DT_LONG; | | 834 | kernel_crp.rp_attr = MMU_LONG_DTE_LU | MMU_DT_LONG; |
835 | kernel_crp.rp_addr = kernAphys; | | 835 | kernel_crp.rp_addr = kernAphys; |
836 | | | 836 | |
837 | /* | | 837 | /* |
838 | * Now pmap_enter_kernel() may be used safely and will be | | 838 | * Now pmap_enter_kernel() may be used safely and will be |
839 | * the main interface used hereafter to modify the kernel's | | 839 | * the main interface used hereafter to modify the kernel's |
840 | * virtual address space. Note that since we are still running | | 840 | * virtual address space. Note that since we are still running |
841 | * under the PROM's address table, none of these table modifications | | 841 | * under the PROM's address table, none of these table modifications |
842 | * actually take effect until pmap_takeover_mmu() is called. | | 842 | * actually take effect until pmap_takeover_mmu() is called. |
843 | * | | 843 | * |
844 | * Note: Our tables do NOT have the PROM linear mappings! | | 844 | * Note: Our tables do NOT have the PROM linear mappings! |
845 | * Only the mappings created here exist in our tables, so | | 845 | * Only the mappings created here exist in our tables, so |
846 | * remember to map anything we expect to use. | | 846 | * remember to map anything we expect to use. |
847 | */ | | 847 | */ |
848 | va = (vaddr_t)KERNBASE3X; | | 848 | va = (vaddr_t)KERNBASE3X; |
849 | pa = 0; | | 849 | pa = 0; |
850 | | | 850 | |
851 | /* | | 851 | /* |
852 | * The first page of the kernel virtual address space is the msgbuf | | 852 | * The first page of the kernel virtual address space is the msgbuf |
853 | * page. The page attributes (data, non-cached) are set here, while | | 853 | * page. The page attributes (data, non-cached) are set here, while |
854 | * the address is assigned to this global pointer in cpu_startup(). | | 854 | * the address is assigned to this global pointer in cpu_startup(). |
855 | * It is non-cached, mostly due to paranoia. | | 855 | * It is non-cached, mostly due to paranoia. |
856 | */ | | 856 | */ |
857 | pmap_enter_kernel(va, pa|PMAP_NC, VM_PROT_ALL); | | 857 | pmap_enter_kernel(va, pa|PMAP_NC, VM_PROT_ALL); |
858 | va += PAGE_SIZE; | | 858 | va += PAGE_SIZE; |
859 | pa += PAGE_SIZE; | | 859 | pa += PAGE_SIZE; |
860 | | | 860 | |
861 | /* Next page is used as the temporary stack. */ | | 861 | /* Next page is used as the temporary stack. */ |
862 | pmap_enter_kernel(va, pa, VM_PROT_ALL); | | 862 | pmap_enter_kernel(va, pa, VM_PROT_ALL); |
863 | va += PAGE_SIZE; | | 863 | va += PAGE_SIZE; |
864 | pa += PAGE_SIZE; | | 864 | pa += PAGE_SIZE; |
865 | | | 865 | |
866 | /* | | 866 | /* |
867 | * Map all of the kernel's text segment as read-only and cacheable. | | 867 | * Map all of the kernel's text segment as read-only and cacheable. |
868 | * (Cacheable is implied by default). Unfortunately, the last bytes | | 868 | * (Cacheable is implied by default). Unfortunately, the last bytes |
869 | * of kernel text and the first bytes of kernel data will often be | | 869 | * of kernel text and the first bytes of kernel data will often be |
870 | * sharing the same page. Therefore, the last page of kernel text | | 870 | * sharing the same page. Therefore, the last page of kernel text |
871 | * has to be mapped as read/write, to accommodate the data. | | 871 | * has to be mapped as read/write, to accommodate the data. |
872 | */ | | 872 | */ |
873 | eva = m68k_trunc_page((vaddr_t)etext); | | 873 | eva = m68k_trunc_page((vaddr_t)etext); |
874 | for (; va < eva; va += PAGE_SIZE, pa += PAGE_SIZE) | | 874 | for (; va < eva; va += PAGE_SIZE, pa += PAGE_SIZE) |
875 | pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_EXECUTE); | | 875 | pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_EXECUTE); |
876 | | | 876 | |
877 | /* | | 877 | /* |
878 | * Map all of the kernel's data as read/write and cacheable. | | 878 | * Map all of the kernel's data as read/write and cacheable. |
879 | * This includes: data, BSS, symbols, and everything in the | | 879 | * This includes: data, BSS, symbols, and everything in the |
880 | * contiguous memory used by pmap_bootstrap_alloc() | | 880 | * contiguous memory used by pmap_bootstrap_alloc() |
881 | */ | | 881 | */ |
882 | for (; pa < avail_start; va += PAGE_SIZE, pa += PAGE_SIZE) | | 882 | for (; pa < avail_start; va += PAGE_SIZE, pa += PAGE_SIZE) |
883 | pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_WRITE); | | 883 | pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_WRITE); |
884 | | | 884 | |
885 | /* | | 885 | /* |
886 | * At this point we are almost ready to take over the MMU. But first | | 886 | * At this point we are almost ready to take over the MMU. But first |
887 | * we must save the PROM's address space in our map, as we call its | | 887 | * we must save the PROM's address space in our map, as we call its |
888 | * routines and make references to its data later in the kernel. | | 888 | * routines and make references to its data later in the kernel. |
889 | */ | | 889 | */ |
890 | pmap_bootstrap_copyprom(); | | 890 | pmap_bootstrap_copyprom(); |
891 | pmap_takeover_mmu(); | | 891 | pmap_takeover_mmu(); |
892 | pmap_bootstrap_setprom(); | | 892 | pmap_bootstrap_setprom(); |
893 | | | 893 | |
894 | /* Notify the VM system of our page size. */ | | 894 | /* Notify the VM system of our page size. */ |
895 | uvmexp.pagesize = PAGE_SIZE; | | 895 | uvmexp.pagesize = PAGE_SIZE; |
896 | uvm_md_init(); | | 896 | uvm_md_init(); |
897 | | | 897 | |
898 | pmap_page_upload(); | | 898 | pmap_page_upload(); |
899 | } | | 899 | } |
900 | | | 900 | |
901 | | | 901 | |
902 | /* pmap_alloc_usermmu INTERNAL | | 902 | /* pmap_alloc_usermmu INTERNAL |
903 | ** | | 903 | ** |
904 | * Called from pmap_bootstrap() to allocate MMU tables that will | | 904 | * Called from pmap_bootstrap() to allocate MMU tables that will |
905 | * eventually be used for user mappings. | | 905 | * eventually be used for user mappings. |
906 | */ | | 906 | */ |
907 | void | | 907 | void |
908 | pmap_alloc_usermmu(void) | | 908 | pmap_alloc_usermmu(void) |
909 | { | | 909 | { |
910 | | | 910 | |
911 | /* XXX: Moved into caller. */ | | 911 | /* XXX: Moved into caller. */ |
912 | } | | 912 | } |
913 | | | 913 | |
914 | /* pmap_alloc_pv INTERNAL | | 914 | /* pmap_alloc_pv INTERNAL |
915 | ** | | 915 | ** |
916 | * Called from pmap_bootstrap() to allocate the physical | | 916 | * Called from pmap_bootstrap() to allocate the physical |
917 | * to virtual mapping list. Each physical page of memory | | 917 | * to virtual mapping list. Each physical page of memory |
918 | * in the system has a corresponding element in this list. | | 918 | * in the system has a corresponding element in this list. |
919 | */ | | 919 | */ |
920 | void | | 920 | void |
921 | pmap_alloc_pv(void) | | 921 | pmap_alloc_pv(void) |
922 | { | | 922 | { |
923 | int i; | | 923 | int i; |
924 | unsigned int total_mem; | | 924 | unsigned int total_mem; |
925 | | | 925 | |
926 | /* | | 926 | /* |
927 | * Allocate a pv_head structure for every page of physical | | 927 | * Allocate a pv_head structure for every page of physical |
928 | * memory that will be managed by the system. Since memory on | | 928 | * memory that will be managed by the system. Since memory on |
929 | * the 3/80 is non-contiguous, we cannot arrive at a total page | | 929 | * the 3/80 is non-contiguous, we cannot arrive at a total page |
930 | * count by subtraction of the lowest available address from the | | 930 | * count by subtraction of the lowest available address from the |
931 | * highest, but rather we have to step through each memory | | 931 | * highest, but rather we have to step through each memory |
932 | * bank and add the number of pages in each to the total. | | 932 | * bank and add the number of pages in each to the total. |
933 | * | | 933 | * |
934 | * At this time we also initialize the offset of each bank's | | 934 | * At this time we also initialize the offset of each bank's |
935 | * starting pv_head within the pv_head list so that the physical | | 935 | * starting pv_head within the pv_head list so that the physical |
936 | * memory state routines (pmap_is_referenced(), | | 936 | * memory state routines (pmap_is_referenced(), |
937 | * pmap_is_modified(), et al.) can quickly find coresponding | | 937 | * pmap_is_modified(), et al.) can quickly find coresponding |
938 | * pv_heads in spite of the non-contiguity. | | 938 | * pv_heads in spite of the non-contiguity. |
939 | */ | | 939 | */ |
940 | total_mem = 0; | | 940 | total_mem = 0; |
941 | for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) { | | 941 | for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) { |
942 | avail_mem[i].pmem_pvbase = m68k_btop(total_mem); | | 942 | avail_mem[i].pmem_pvbase = m68k_btop(total_mem); |
943 | total_mem += avail_mem[i].pmem_end - avail_mem[i].pmem_start; | | 943 | total_mem += avail_mem[i].pmem_end - avail_mem[i].pmem_start; |
944 | if (avail_mem[i].pmem_next == NULL) | | 944 | if (avail_mem[i].pmem_next == NULL) |
945 | break; | | 945 | break; |
946 | } | | 946 | } |
947 | pvbase = (pv_t *)pmap_bootstrap_alloc(sizeof(pv_t) * | | 947 | pvbase = (pv_t *)pmap_bootstrap_alloc(sizeof(pv_t) * |
948 | m68k_btop(total_phys_mem)); | | 948 | m68k_btop(total_phys_mem)); |
949 | } | | 949 | } |
950 | | | 950 | |
951 | /* pmap_alloc_usertmgr INTERNAL | | 951 | /* pmap_alloc_usertmgr INTERNAL |
952 | ** | | 952 | ** |
953 | * Called from pmap_bootstrap() to allocate the structures which | | 953 | * Called from pmap_bootstrap() to allocate the structures which |
954 | * facilitate management of user MMU tables. Each user MMU table | | 954 | * facilitate management of user MMU tables. Each user MMU table |
955 | * in the system has one such structure associated with it. | | 955 | * in the system has one such structure associated with it. |
956 | */ | | 956 | */ |
957 | void | | 957 | void |
958 | pmap_alloc_usertmgr(void) | | 958 | pmap_alloc_usertmgr(void) |
959 | { | | 959 | { |
960 | /* Allocate user MMU table managers */ | | 960 | /* Allocate user MMU table managers */ |
961 | /* It would be a lot simpler to just make these BSS, but */ | | 961 | /* It would be a lot simpler to just make these BSS, but */ |
962 | /* we may want to change their size at boot time... -j */ | | 962 | /* we may want to change their size at boot time... -j */ |
963 | Atmgrbase = | | 963 | Atmgrbase = |
964 | (a_tmgr_t *)pmap_bootstrap_alloc(sizeof(a_tmgr_t) * NUM_A_TABLES); | | 964 | (a_tmgr_t *)pmap_bootstrap_alloc(sizeof(a_tmgr_t) * NUM_A_TABLES); |
965 | Btmgrbase = | | 965 | Btmgrbase = |
966 | (b_tmgr_t *)pmap_bootstrap_alloc(sizeof(b_tmgr_t) * NUM_B_TABLES); | | 966 | (b_tmgr_t *)pmap_bootstrap_alloc(sizeof(b_tmgr_t) * NUM_B_TABLES); |
967 | Ctmgrbase = | | 967 | Ctmgrbase = |
968 | (c_tmgr_t *)pmap_bootstrap_alloc(sizeof(c_tmgr_t) * NUM_C_TABLES); | | 968 | (c_tmgr_t *)pmap_bootstrap_alloc(sizeof(c_tmgr_t) * NUM_C_TABLES); |
969 | | | 969 | |
970 | /* | | 970 | /* |
971 | * Allocate PV list elements for the physical to virtual | | 971 | * Allocate PV list elements for the physical to virtual |
972 | * mapping system. | | 972 | * mapping system. |
973 | */ | | 973 | */ |
974 | pvebase = (pv_elem_t *)pmap_bootstrap_alloc(sizeof(pv_elem_t) * | | 974 | pvebase = (pv_elem_t *)pmap_bootstrap_alloc(sizeof(pv_elem_t) * |
975 | (NUM_USER_PTES + NUM_KERN_PTES)); | | 975 | (NUM_USER_PTES + NUM_KERN_PTES)); |
976 | } | | 976 | } |
977 | | | 977 | |
978 | /* pmap_bootstrap_copyprom() INTERNAL | | 978 | /* pmap_bootstrap_copyprom() INTERNAL |
979 | ** | | 979 | ** |
980 | * Copy the PROM mappings into our own tables. Note, we | | 980 | * Copy the PROM mappings into our own tables. Note, we |
981 | * can use physical addresses until __bootstrap returns. | | 981 | * can use physical addresses until __bootstrap returns. |
982 | */ | | 982 | */ |
983 | void | | 983 | void |
984 | pmap_bootstrap_copyprom(void) | | 984 | pmap_bootstrap_copyprom(void) |
985 | { | | 985 | { |
986 | struct sunromvec *romp; | | 986 | struct sunromvec *romp; |
987 | int *mon_ctbl; | | 987 | int *mon_ctbl; |
988 | mmu_short_pte_t *kpte; | | 988 | mmu_short_pte_t *kpte; |
989 | int i, len; | | 989 | int i, len; |
990 | | | 990 | |
991 | romp = romVectorPtr; | | 991 | romp = romVectorPtr; |
992 | | | 992 | |
993 | /* | | 993 | /* |
994 | * Copy the mappings in SUN3X_MON_KDB_BASE...SUN3X_MONEND | | 994 | * Copy the mappings in SUN3X_MON_KDB_BASE...SUN3X_MONEND |
995 | * Note: mon_ctbl[0] maps SUN3X_MON_KDB_BASE | | 995 | * Note: mon_ctbl[0] maps SUN3X_MON_KDB_BASE |
996 | */ | | 996 | */ |
997 | mon_ctbl = *romp->monptaddr; | | 997 | mon_ctbl = *romp->monptaddr; |
998 | i = m68k_btop(SUN3X_MON_KDB_BASE - KERNBASE3X); | | 998 | i = m68k_btop(SUN3X_MON_KDB_BASE - KERNBASE3X); |
999 | kpte = &kernCbase[i]; | | 999 | kpte = &kernCbase[i]; |
1000 | len = m68k_btop(SUN3X_MONEND - SUN3X_MON_KDB_BASE); | | 1000 | len = m68k_btop(SUN3X_MONEND - SUN3X_MON_KDB_BASE); |
1001 | | | 1001 | |
1002 | for (i = 0; i < len; i++) { | | 1002 | for (i = 0; i < len; i++) { |
1003 | kpte[i].attr.raw = mon_ctbl[i]; | | 1003 | kpte[i].attr.raw = mon_ctbl[i]; |
1004 | } | | 1004 | } |
1005 | | | 1005 | |
1006 | /* | | 1006 | /* |
1007 | * Copy the mappings at MON_DVMA_BASE (to the end). | | 1007 | * Copy the mappings at MON_DVMA_BASE (to the end). |
1008 | * Note, in here, mon_ctbl[0] maps MON_DVMA_BASE. | | 1008 | * Note, in here, mon_ctbl[0] maps MON_DVMA_BASE. |
1009 | * Actually, we only want the last page, which the | | 1009 | * Actually, we only want the last page, which the |
1010 | * PROM has set up for use by the "ie" driver. | | 1010 | * PROM has set up for use by the "ie" driver. |
1011 | * (The i82686 needs its SCP there.) | | 1011 | * (The i82686 needs its SCP there.) |
1012 | * If we copy all the mappings, pmap_enter_kernel | | 1012 | * If we copy all the mappings, pmap_enter_kernel |
1013 | * may complain about finding valid PTEs that are | | 1013 | * may complain about finding valid PTEs that are |
1014 | * not recorded in our PV lists... | | 1014 | * not recorded in our PV lists... |
1015 | */ | | 1015 | */ |
1016 | mon_ctbl = *romp->shadowpteaddr; | | 1016 | mon_ctbl = *romp->shadowpteaddr; |
1017 | i = m68k_btop(SUN3X_MON_DVMA_BASE - KERNBASE3X); | | 1017 | i = m68k_btop(SUN3X_MON_DVMA_BASE - KERNBASE3X); |
1018 | kpte = &kernCbase[i]; | | 1018 | kpte = &kernCbase[i]; |
1019 | len = m68k_btop(SUN3X_MON_DVMA_SIZE); | | 1019 | len = m68k_btop(SUN3X_MON_DVMA_SIZE); |
1020 | for (i = (len - 1); i < len; i++) { | | 1020 | for (i = (len - 1); i < len; i++) { |
1021 | kpte[i].attr.raw = mon_ctbl[i]; | | 1021 | kpte[i].attr.raw = mon_ctbl[i]; |
1022 | } | | 1022 | } |
1023 | } | | 1023 | } |
1024 | | | 1024 | |
1025 | /* pmap_takeover_mmu INTERNAL | | 1025 | /* pmap_takeover_mmu INTERNAL |
1026 | ** | | 1026 | ** |
1027 | * Called from pmap_bootstrap() after it has copied enough of the | | 1027 | * Called from pmap_bootstrap() after it has copied enough of the |
1028 | * PROM mappings into the kernel map so that we can use our own | | 1028 | * PROM mappings into the kernel map so that we can use our own |
1029 | * MMU table. | | 1029 | * MMU table. |
1030 | */ | | 1030 | */ |
1031 | void | | 1031 | void |
1032 | pmap_takeover_mmu(void) | | 1032 | pmap_takeover_mmu(void) |
1033 | { | | 1033 | { |
1034 | | | 1034 | |
1035 | loadcrp(&kernel_crp); | | 1035 | loadcrp(&kernel_crp); |
1036 | } | | 1036 | } |
1037 | | | 1037 | |
1038 | /* pmap_bootstrap_setprom() INTERNAL | | 1038 | /* pmap_bootstrap_setprom() INTERNAL |
1039 | ** | | 1039 | ** |
1040 | * Set the PROM mappings so it can see kernel space. | | 1040 | * Set the PROM mappings so it can see kernel space. |
1041 | * Note that physical addresses are used here, which | | 1041 | * Note that physical addresses are used here, which |
1042 | * we can get away with because this runs with the | | 1042 | * we can get away with because this runs with the |
1043 | * low 1GB set for transparent translation. | | 1043 | * low 1GB set for transparent translation. |
1044 | */ | | 1044 | */ |
1045 | void | | 1045 | void |
1046 | pmap_bootstrap_setprom(void) | | 1046 | pmap_bootstrap_setprom(void) |
1047 | { | | 1047 | { |
1048 | mmu_long_dte_t *mon_dte; | | 1048 | mmu_long_dte_t *mon_dte; |
1049 | extern struct mmu_rootptr mon_crp; | | 1049 | extern struct mmu_rootptr mon_crp; |
1050 | int i; | | 1050 | int i; |
1051 | | | 1051 | |
1052 | mon_dte = (mmu_long_dte_t *)mon_crp.rp_addr; | | 1052 | mon_dte = (mmu_long_dte_t *)mon_crp.rp_addr; |
1053 | for (i = MMU_TIA(KERNBASE3X); i < MMU_TIA(KERN_END3X); i++) { | | 1053 | for (i = MMU_TIA(KERNBASE3X); i < MMU_TIA(KERN_END3X); i++) { |
1054 | mon_dte[i].attr.raw = kernAbase[i].attr.raw; | | 1054 | mon_dte[i].attr.raw = kernAbase[i].attr.raw; |
1055 | mon_dte[i].addr.raw = kernAbase[i].addr.raw; | | 1055 | mon_dte[i].addr.raw = kernAbase[i].addr.raw; |
1056 | } | | 1056 | } |
1057 | } | | 1057 | } |
1058 | | | 1058 | |
1059 | | | 1059 | |
1060 | /* pmap_init INTERFACE | | 1060 | /* pmap_init INTERFACE |
1061 | ** | | 1061 | ** |
1062 | * Called at the end of vm_init() to set up the pmap system to go | | 1062 | * Called at the end of vm_init() to set up the pmap system to go |
1063 | * into full time operation. All initialization of kernel_pmap | | 1063 | * into full time operation. All initialization of kernel_pmap |
1064 | * should be already done by now, so this should just do things | | 1064 | * should be already done by now, so this should just do things |
1065 | * needed for user-level pmaps to work. | | 1065 | * needed for user-level pmaps to work. |
1066 | */ | | 1066 | */ |
1067 | void | | 1067 | void |
1068 | pmap_init(void) | | 1068 | pmap_init(void) |
1069 | { | | 1069 | { |
1070 | | | 1070 | |
1071 | /** Initialize the manager pools **/ | | 1071 | /** Initialize the manager pools **/ |
1072 | TAILQ_INIT(&a_pool); | | 1072 | TAILQ_INIT(&a_pool); |
1073 | TAILQ_INIT(&b_pool); | | 1073 | TAILQ_INIT(&b_pool); |
1074 | TAILQ_INIT(&c_pool); | | 1074 | TAILQ_INIT(&c_pool); |
1075 | | | 1075 | |
1076 | /************************************************************** | | 1076 | /************************************************************** |
1077 | * Initialize all tmgr structures and MMU tables they manage. * | | 1077 | * Initialize all tmgr structures and MMU tables they manage. * |
1078 | **************************************************************/ | | 1078 | **************************************************************/ |
1079 | /** Initialize A tables **/ | | 1079 | /** Initialize A tables **/ |
1080 | pmap_init_a_tables(); | | 1080 | pmap_init_a_tables(); |
1081 | /** Initialize B tables **/ | | 1081 | /** Initialize B tables **/ |
1082 | pmap_init_b_tables(); | | 1082 | pmap_init_b_tables(); |
1083 | /** Initialize C tables **/ | | 1083 | /** Initialize C tables **/ |
1084 | pmap_init_c_tables(); | | 1084 | pmap_init_c_tables(); |
1085 | | | 1085 | |
1086 | /** Initialize the pmap pools **/ | | 1086 | /** Initialize the pmap pools **/ |
1087 | pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl", | | 1087 | pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl", |
1088 | &pool_allocator_nointr, IPL_NONE); | | 1088 | &pool_allocator_nointr, IPL_NONE); |
1089 | } | | 1089 | } |
1090 | | | 1090 | |
1091 | /* pmap_init_a_tables() INTERNAL | | 1091 | /* pmap_init_a_tables() INTERNAL |
1092 | ** | | 1092 | ** |
1093 | * Initializes all A managers, their MMU A tables, and inserts | | 1093 | * Initializes all A managers, their MMU A tables, and inserts |
1094 | * them into the A manager pool for use by the system. | | 1094 | * them into the A manager pool for use by the system. |
1095 | */ | | 1095 | */ |
1096 | void | | 1096 | void |
1097 | pmap_init_a_tables(void) | | 1097 | pmap_init_a_tables(void) |
1098 | { | | 1098 | { |
1099 | int i; | | 1099 | int i; |
1100 | a_tmgr_t *a_tbl; | | 1100 | a_tmgr_t *a_tbl; |
1101 | | | 1101 | |
1102 | for (i = 0; i < NUM_A_TABLES; i++) { | | 1102 | for (i = 0; i < NUM_A_TABLES; i++) { |
1103 | /* Select the next available A manager from the pool */ | | 1103 | /* Select the next available A manager from the pool */ |
1104 | a_tbl = &Atmgrbase[i]; | | 1104 | a_tbl = &Atmgrbase[i]; |
1105 | | | 1105 | |
1106 | /* | | 1106 | /* |
1107 | * Clear its parent entry. Set its wired and valid | | 1107 | * Clear its parent entry. Set its wired and valid |
| @@ -1149,2006 +1149,2014 @@ pmap_init_b_tables(void) | | | @@ -1149,2006 +1149,2014 @@ pmap_init_b_tables(void) |
1149 | b_tbl->bt_wcnt = 0; /* wired entry count, */ | | 1149 | b_tbl->bt_wcnt = 0; /* wired entry count, */ |
1150 | b_tbl->bt_ecnt = 0; /* valid entry count. */ | | 1150 | b_tbl->bt_ecnt = 0; /* valid entry count. */ |
1151 | | | 1151 | |
1152 | /* Assign it the next available MMU B table from the pool */ | | 1152 | /* Assign it the next available MMU B table from the pool */ |
1153 | b_tbl->bt_dtbl = &mmuBbase[i * MMU_B_TBL_SIZE]; | | 1153 | b_tbl->bt_dtbl = &mmuBbase[i * MMU_B_TBL_SIZE]; |
1154 | | | 1154 | |
1155 | /* Invalidate every descriptor in the table */ | | 1155 | /* Invalidate every descriptor in the table */ |
1156 | for (j = 0; j < MMU_B_TBL_SIZE; j++) | | 1156 | for (j = 0; j < MMU_B_TBL_SIZE; j++) |
1157 | b_tbl->bt_dtbl[j].attr.raw = MMU_DT_INVALID; | | 1157 | b_tbl->bt_dtbl[j].attr.raw = MMU_DT_INVALID; |
1158 | | | 1158 | |
1159 | /* Insert the manager into the B pool */ | | 1159 | /* Insert the manager into the B pool */ |
1160 | TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link); | | 1160 | TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link); |
1161 | } | | 1161 | } |
1162 | } | | 1162 | } |
1163 | | | 1163 | |
1164 | /* pmap_init_c_tables() INTERNAL | | 1164 | /* pmap_init_c_tables() INTERNAL |
1165 | ** | | 1165 | ** |
1166 | * Initializes all C table managers, their MMU C tables, and | | 1166 | * Initializes all C table managers, their MMU C tables, and |
1167 | * inserts them into the C manager pool for use by the system. | | 1167 | * inserts them into the C manager pool for use by the system. |
1168 | */ | | 1168 | */ |
1169 | void | | 1169 | void |
1170 | pmap_init_c_tables(void) | | 1170 | pmap_init_c_tables(void) |
1171 | { | | 1171 | { |
1172 | int i, j; | | 1172 | int i, j; |
1173 | c_tmgr_t *c_tbl; | | 1173 | c_tmgr_t *c_tbl; |
1174 | | | 1174 | |
1175 | for (i = 0; i < NUM_C_TABLES; i++) { | | 1175 | for (i = 0; i < NUM_C_TABLES; i++) { |
1176 | /* Select the next available C manager from the pool */ | | 1176 | /* Select the next available C manager from the pool */ |
1177 | c_tbl = &Ctmgrbase[i]; | | 1177 | c_tbl = &Ctmgrbase[i]; |
1178 | | | 1178 | |
1179 | c_tbl->ct_parent = NULL; /* clear its parent, */ | | 1179 | c_tbl->ct_parent = NULL; /* clear its parent, */ |
1180 | c_tbl->ct_pidx = 0; /* parent index, */ | | 1180 | c_tbl->ct_pidx = 0; /* parent index, */ |
1181 | c_tbl->ct_wcnt = 0; /* wired entry count, */ | | 1181 | c_tbl->ct_wcnt = 0; /* wired entry count, */ |
1182 | c_tbl->ct_ecnt = 0; /* valid entry count, */ | | 1182 | c_tbl->ct_ecnt = 0; /* valid entry count, */ |
1183 | c_tbl->ct_pmap = NULL; /* parent pmap, */ | | 1183 | c_tbl->ct_pmap = NULL; /* parent pmap, */ |
1184 | c_tbl->ct_va = 0; /* base of managed range */ | | 1184 | c_tbl->ct_va = 0; /* base of managed range */ |
1185 | | | 1185 | |
1186 | /* Assign it the next available MMU C table from the pool */ | | 1186 | /* Assign it the next available MMU C table from the pool */ |
1187 | c_tbl->ct_dtbl = &mmuCbase[i * MMU_C_TBL_SIZE]; | | 1187 | c_tbl->ct_dtbl = &mmuCbase[i * MMU_C_TBL_SIZE]; |
1188 | | | 1188 | |
1189 | for (j = 0; j < MMU_C_TBL_SIZE; j++) | | 1189 | for (j = 0; j < MMU_C_TBL_SIZE; j++) |
1190 | c_tbl->ct_dtbl[j].attr.raw = MMU_DT_INVALID; | | 1190 | c_tbl->ct_dtbl[j].attr.raw = MMU_DT_INVALID; |
1191 | | | 1191 | |
1192 | TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link); | | 1192 | TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link); |
1193 | } | | 1193 | } |
1194 | } | | 1194 | } |
1195 | | | 1195 | |
1196 | /* pmap_init_pv() INTERNAL | | 1196 | /* pmap_init_pv() INTERNAL |
1197 | ** | | 1197 | ** |
1198 | * Initializes the Physical to Virtual mapping system. | | 1198 | * Initializes the Physical to Virtual mapping system. |
1199 | */ | | 1199 | */ |
1200 | void | | 1200 | void |
1201 | pmap_init_pv(void) | | 1201 | pmap_init_pv(void) |
1202 | { | | 1202 | { |
1203 | int i; | | 1203 | int i; |
1204 | | | 1204 | |
1205 | /* Initialize every PV head. */ | | 1205 | /* Initialize every PV head. */ |
1206 | for (i = 0; i < m68k_btop(total_phys_mem); i++) { | | 1206 | for (i = 0; i < m68k_btop(total_phys_mem); i++) { |
1207 | pvbase[i].pv_idx = PVE_EOL; /* Indicate no mappings */ | | 1207 | pvbase[i].pv_idx = PVE_EOL; /* Indicate no mappings */ |
1208 | pvbase[i].pv_flags = 0; /* Zero out page flags */ | | 1208 | pvbase[i].pv_flags = 0; /* Zero out page flags */ |
1209 | } | | 1209 | } |
1210 | } | | 1210 | } |
1211 | | | 1211 | |
1212 | /* is_managed INTERNAL | | 1212 | /* is_managed INTERNAL |
1213 | ** | | 1213 | ** |
1214 | * Determine if the given physical address is managed by the PV system. | | 1214 | * Determine if the given physical address is managed by the PV system. |
1215 | * Note that this logic assumes that no one will ask for the status of | | 1215 | * Note that this logic assumes that no one will ask for the status of |
1216 | * addresses which lie in-between the memory banks on the 3/80. If they | | 1216 | * addresses which lie in-between the memory banks on the 3/80. If they |
1217 | * do so, it will falsely report that it is managed. | | 1217 | * do so, it will falsely report that it is managed. |
1218 | * | | 1218 | * |
1219 | * Note: A "managed" address is one that was reported to the VM system as | | 1219 | * Note: A "managed" address is one that was reported to the VM system as |
1220 | * a "usable page" during system startup. As such, the VM system expects the | | 1220 | * a "usable page" during system startup. As such, the VM system expects the |
1221 | * pmap module to keep an accurate track of the useage of those pages. | | 1221 | * pmap module to keep an accurate track of the useage of those pages. |
1222 | * Any page not given to the VM system at startup does not exist (as far as | | 1222 | * Any page not given to the VM system at startup does not exist (as far as |
1223 | * the VM system is concerned) and is therefore "unmanaged." Examples are | | 1223 | * the VM system is concerned) and is therefore "unmanaged." Examples are |
1224 | * those pages which belong to the ROM monitor and the memory allocated before | | 1224 | * those pages which belong to the ROM monitor and the memory allocated before |
1225 | * the VM system was started. | | 1225 | * the VM system was started. |
1226 | */ | | 1226 | */ |
1227 | static INLINE bool | | 1227 | static INLINE bool |
1228 | is_managed(paddr_t pa) | | 1228 | is_managed(paddr_t pa) |
1229 | { | | 1229 | { |
1230 | if (pa >= avail_start && pa < avail_end) | | 1230 | if (pa >= avail_start && pa < avail_end) |
1231 | return true; | | 1231 | return true; |
1232 | else | | 1232 | else |
1233 | return false; | | 1233 | return false; |
1234 | } | | 1234 | } |
1235 | | | 1235 | |
1236 | /* get_a_table INTERNAL | | 1236 | /* get_a_table INTERNAL |
1237 | ** | | 1237 | ** |
1238 | * Retrieve and return a level A table for use in a user map. | | 1238 | * Retrieve and return a level A table for use in a user map. |
1239 | */ | | 1239 | */ |
1240 | a_tmgr_t * | | 1240 | a_tmgr_t * |
1241 | get_a_table(void) | | 1241 | get_a_table(void) |
1242 | { | | 1242 | { |
1243 | a_tmgr_t *tbl; | | 1243 | a_tmgr_t *tbl; |
1244 | pmap_t pmap; | | 1244 | pmap_t pmap; |
1245 | | | 1245 | |
1246 | /* Get the top A table in the pool */ | | 1246 | /* Get the top A table in the pool */ |
1247 | tbl = TAILQ_FIRST(&a_pool); | | 1247 | tbl = TAILQ_FIRST(&a_pool); |
1248 | if (tbl == NULL) { | | 1248 | if (tbl == NULL) { |
1249 | /* | | 1249 | /* |
1250 | * XXX - Instead of panicking here and in other get_x_table | | 1250 | * XXX - Instead of panicking here and in other get_x_table |
1251 | * functions, we do have the option of sleeping on the head of | | 1251 | * functions, we do have the option of sleeping on the head of |
1252 | * the table pool. Any function which updates the table pool | | 1252 | * the table pool. Any function which updates the table pool |
1253 | * would then issue a wakeup() on the head, thus waking up any | | 1253 | * would then issue a wakeup() on the head, thus waking up any |
1254 | * processes waiting for a table. | | 1254 | * processes waiting for a table. |
1255 | * | | 1255 | * |
1256 | * Actually, the place to sleep would be when some process | | 1256 | * Actually, the place to sleep would be when some process |
1257 | * asks for a "wired" mapping that would run us short of | | 1257 | * asks for a "wired" mapping that would run us short of |
1258 | * mapping resources. This design DEPENDS on always having | | 1258 | * mapping resources. This design DEPENDS on always having |
1259 | * some mapping resources in the pool for stealing, so we | | 1259 | * some mapping resources in the pool for stealing, so we |
1260 | * must make sure we NEVER let the pool become empty. -gwr | | 1260 | * must make sure we NEVER let the pool become empty. -gwr |
1261 | */ | | 1261 | */ |
1262 | panic("get_a_table: out of A tables."); | | 1262 | panic("get_a_table: out of A tables."); |
1263 | } | | 1263 | } |
1264 | | | 1264 | |
1265 | TAILQ_REMOVE(&a_pool, tbl, at_link); | | 1265 | TAILQ_REMOVE(&a_pool, tbl, at_link); |
1266 | /* | | 1266 | /* |
1267 | * If the table has a non-null parent pointer then it is in use. | | 1267 | * If the table has a non-null parent pointer then it is in use. |
1268 | * Forcibly abduct it from its parent and clear its entries. | | 1268 | * Forcibly abduct it from its parent and clear its entries. |
1269 | * No re-entrancy worries here. This table would not be in the | | 1269 | * No re-entrancy worries here. This table would not be in the |
1270 | * table pool unless it was available for use. | | 1270 | * table pool unless it was available for use. |
1271 | * | | 1271 | * |
1272 | * Note that the second argument to free_a_table() is false. This | | 1272 | * Note that the second argument to free_a_table() is false. This |
1273 | * indicates that the table should not be relinked into the A table | | 1273 | * indicates that the table should not be relinked into the A table |
1274 | * pool. That is a job for the function that called us. | | 1274 | * pool. That is a job for the function that called us. |
1275 | */ | | 1275 | */ |
1276 | if (tbl->at_parent) { | | 1276 | if (tbl->at_parent) { |
1277 | KASSERT(tbl->at_wcnt == 0); | | 1277 | KASSERT(tbl->at_wcnt == 0); |
1278 | pmap = tbl->at_parent; | | 1278 | pmap = tbl->at_parent; |
1279 | free_a_table(tbl, false); | | 1279 | free_a_table(tbl, false); |
1280 | pmap->pm_a_tmgr = NULL; | | 1280 | pmap->pm_a_tmgr = NULL; |
1281 | pmap->pm_a_phys = kernAphys; | | 1281 | pmap->pm_a_phys = kernAphys; |
1282 | } | | 1282 | } |
1283 | return tbl; | | 1283 | return tbl; |
1284 | } | | 1284 | } |
1285 | | | 1285 | |
1286 | /* get_b_table INTERNAL | | 1286 | /* get_b_table INTERNAL |
1287 | ** | | 1287 | ** |
1288 | * Return a level B table for use. | | 1288 | * Return a level B table for use. |
1289 | */ | | 1289 | */ |
1290 | b_tmgr_t * | | 1290 | b_tmgr_t * |
1291 | get_b_table(void) | | 1291 | get_b_table(void) |
1292 | { | | 1292 | { |
1293 | b_tmgr_t *tbl; | | 1293 | b_tmgr_t *tbl; |
1294 | | | 1294 | |
1295 | /* See 'get_a_table' for comments. */ | | 1295 | /* See 'get_a_table' for comments. */ |
1296 | tbl = TAILQ_FIRST(&b_pool); | | 1296 | tbl = TAILQ_FIRST(&b_pool); |
1297 | if (tbl == NULL) | | 1297 | if (tbl == NULL) |
1298 | panic("get_b_table: out of B tables."); | | 1298 | panic("get_b_table: out of B tables."); |
1299 | TAILQ_REMOVE(&b_pool, tbl, bt_link); | | 1299 | TAILQ_REMOVE(&b_pool, tbl, bt_link); |
1300 | if (tbl->bt_parent) { | | 1300 | if (tbl->bt_parent) { |
1301 | KASSERT(tbl->bt_wcnt == 0); | | 1301 | KASSERT(tbl->bt_wcnt == 0); |
1302 | tbl->bt_parent->at_dtbl[tbl->bt_pidx].attr.raw = MMU_DT_INVALID; | | 1302 | tbl->bt_parent->at_dtbl[tbl->bt_pidx].attr.raw = MMU_DT_INVALID; |
1303 | tbl->bt_parent->at_ecnt--; | | 1303 | tbl->bt_parent->at_ecnt--; |
1304 | free_b_table(tbl, false); | | 1304 | free_b_table(tbl, false); |
1305 | } | | 1305 | } |
1306 | return tbl; | | 1306 | return tbl; |
1307 | } | | 1307 | } |
1308 | | | 1308 | |
1309 | /* get_c_table INTERNAL | | 1309 | /* get_c_table INTERNAL |
1310 | ** | | 1310 | ** |
1311 | * Return a level C table for use. | | 1311 | * Return a level C table for use. |
1312 | */ | | 1312 | */ |
1313 | c_tmgr_t * | | 1313 | c_tmgr_t * |
1314 | get_c_table(void) | | 1314 | get_c_table(void) |
1315 | { | | 1315 | { |
1316 | c_tmgr_t *tbl; | | 1316 | c_tmgr_t *tbl; |
1317 | | | 1317 | |
1318 | /* See 'get_a_table' for comments */ | | 1318 | /* See 'get_a_table' for comments */ |
1319 | tbl = TAILQ_FIRST(&c_pool); | | 1319 | tbl = TAILQ_FIRST(&c_pool); |
1320 | if (tbl == NULL) | | 1320 | if (tbl == NULL) |
1321 | panic("get_c_table: out of C tables."); | | 1321 | panic("get_c_table: out of C tables."); |
1322 | TAILQ_REMOVE(&c_pool, tbl, ct_link); | | 1322 | TAILQ_REMOVE(&c_pool, tbl, ct_link); |
1323 | if (tbl->ct_parent) { | | 1323 | if (tbl->ct_parent) { |
1324 | KASSERT(tbl->ct_wcnt == 0); | | 1324 | KASSERT(tbl->ct_wcnt == 0); |
1325 | tbl->ct_parent->bt_dtbl[tbl->ct_pidx].attr.raw = MMU_DT_INVALID; | | 1325 | tbl->ct_parent->bt_dtbl[tbl->ct_pidx].attr.raw = MMU_DT_INVALID; |
1326 | tbl->ct_parent->bt_ecnt--; | | 1326 | tbl->ct_parent->bt_ecnt--; |
1327 | free_c_table(tbl, false); | | 1327 | free_c_table(tbl, false); |
1328 | } | | 1328 | } |
1329 | return tbl; | | 1329 | return tbl; |
1330 | } | | 1330 | } |
1331 | | | 1331 | |
1332 | /* | | 1332 | /* |
1333 | * The following 'free_table' and 'steal_table' functions are called to | | 1333 | * The following 'free_table' and 'steal_table' functions are called to |
1334 | * detach tables from their current obligations (parents and children) and | | 1334 | * detach tables from their current obligations (parents and children) and |
1335 | * prepare them for reuse in another mapping. | | 1335 | * prepare them for reuse in another mapping. |
1336 | * | | 1336 | * |
1337 | * Free_table is used when the calling function will handle the fate | | 1337 | * Free_table is used when the calling function will handle the fate |
1338 | * of the parent table, such as returning it to the free pool when it has | | 1338 | * of the parent table, such as returning it to the free pool when it has |
1339 | * no valid entries. Functions that do not want to handle this should | | 1339 | * no valid entries. Functions that do not want to handle this should |
1340 | * call steal_table, in which the parent table's descriptors and entry | | 1340 | * call steal_table, in which the parent table's descriptors and entry |
1341 | * count are automatically modified when this table is removed. | | 1341 | * count are automatically modified when this table is removed. |
1342 | */ | | 1342 | */ |
1343 | | | 1343 | |
1344 | /* free_a_table INTERNAL | | 1344 | /* free_a_table INTERNAL |
1345 | ** | | 1345 | ** |
1346 | * Unmaps the given A table and all child tables from their current | | 1346 | * Unmaps the given A table and all child tables from their current |
1347 | * mappings. Returns the number of pages that were invalidated. | | 1347 | * mappings. Returns the number of pages that were invalidated. |
1348 | * If 'relink' is true, the function will return the table to the head | | 1348 | * If 'relink' is true, the function will return the table to the head |
1349 | * of the available table pool. | | 1349 | * of the available table pool. |
1350 | * | | 1350 | * |
1351 | * Cache note: The MC68851 will automatically flush all | | 1351 | * Cache note: The MC68851 will automatically flush all |
1352 | * descriptors derived from a given A table from its | | 1352 | * descriptors derived from a given A table from its |
1353 | * Automatic Translation Cache (ATC) if we issue a | | 1353 | * Automatic Translation Cache (ATC) if we issue a |
1354 | * 'PFLUSHR' instruction with the base address of the | | 1354 | * 'PFLUSHR' instruction with the base address of the |
1355 | * table. This function should do, and does so. | | 1355 | * table. This function should do, and does so. |
1356 | * Note note: We are using an MC68030 - there is no | | 1356 | * Note note: We are using an MC68030 - there is no |
1357 | * PFLUSHR. | | 1357 | * PFLUSHR. |
1358 | */ | | 1358 | */ |
1359 | int | | 1359 | int |
1360 | free_a_table(a_tmgr_t *a_tbl, bool relink) | | 1360 | free_a_table(a_tmgr_t *a_tbl, bool relink) |
1361 | { | | 1361 | { |
1362 | int i, removed_cnt; | | 1362 | int i, removed_cnt; |
1363 | mmu_long_dte_t *dte; | | 1363 | mmu_long_dte_t *dte; |
1364 | mmu_short_dte_t *dtbl; | | 1364 | mmu_short_dte_t *dtbl; |
1365 | b_tmgr_t *b_tbl; | | 1365 | b_tmgr_t *b_tbl; |
1366 | uint8_t at_wired, bt_wired; | | 1366 | uint8_t at_wired, bt_wired; |
1367 | | | 1367 | |
1368 | /* | | 1368 | /* |
1369 | * Flush the ATC cache of all cached descriptors derived | | 1369 | * Flush the ATC cache of all cached descriptors derived |
1370 | * from this table. | | 1370 | * from this table. |
1371 | * Sun3x does not use 68851's cached table feature | | 1371 | * Sun3x does not use 68851's cached table feature |
1372 | * flush_atc_crp(mmu_vtop(a_tbl->dte)); | | 1372 | * flush_atc_crp(mmu_vtop(a_tbl->dte)); |
1373 | */ | | 1373 | */ |
1374 | | | 1374 | |
1375 | /* | | 1375 | /* |
1376 | * Remove any pending cache flushes that were designated | | 1376 | * Remove any pending cache flushes that were designated |
1377 | * for the pmap this A table belongs to. | | 1377 | * for the pmap this A table belongs to. |
1378 | * a_tbl->parent->atc_flushq[0] = 0; | | 1378 | * a_tbl->parent->atc_flushq[0] = 0; |
1379 | * Not implemented in sun3x. | | 1379 | * Not implemented in sun3x. |
1380 | */ | | 1380 | */ |
1381 | | | 1381 | |
1382 | /* | | 1382 | /* |
1383 | * All A tables in the system should retain a map for the | | 1383 | * All A tables in the system should retain a map for the |
1384 | * kernel. If the table contains any valid descriptors | | 1384 | * kernel. If the table contains any valid descriptors |
1385 | * (other than those for the kernel area), invalidate them all, | | 1385 | * (other than those for the kernel area), invalidate them all, |
1386 | * stopping short of the kernel's entries. | | 1386 | * stopping short of the kernel's entries. |
1387 | */ | | 1387 | */ |
1388 | removed_cnt = 0; | | 1388 | removed_cnt = 0; |
1389 | at_wired = a_tbl->at_wcnt; | | 1389 | at_wired = a_tbl->at_wcnt; |
1390 | if (a_tbl->at_ecnt) { | | 1390 | if (a_tbl->at_ecnt) { |
1391 | dte = a_tbl->at_dtbl; | | 1391 | dte = a_tbl->at_dtbl; |
1392 | for (i = 0; i < MMU_TIA(KERNBASE3X); i++) { | | 1392 | for (i = 0; i < MMU_TIA(KERNBASE3X); i++) { |
1393 | /* | | 1393 | /* |
1394 | * If a table entry points to a valid B table, free | | 1394 | * If a table entry points to a valid B table, free |
1395 | * it and its children. | | 1395 | * it and its children. |
1396 | */ | | 1396 | */ |
1397 | if (MMU_VALID_DT(dte[i])) { | | 1397 | if (MMU_VALID_DT(dte[i])) { |
1398 | /* | | 1398 | /* |
1399 | * The following block does several things, | | 1399 | * The following block does several things, |
1400 | * from innermost expression to the | | 1400 | * from innermost expression to the |
1401 | * outermost: | | 1401 | * outermost: |
1402 | * 1) It extracts the base (cc 1996) | | 1402 | * 1) It extracts the base (cc 1996) |
1403 | * address of the B table pointed | | 1403 | * address of the B table pointed |
1404 | * to in the A table entry dte[i]. | | 1404 | * to in the A table entry dte[i]. |
1405 | * 2) It converts this base address into | | 1405 | * 2) It converts this base address into |
1406 | * the virtual address it can be | | 1406 | * the virtual address it can be |
1407 | * accessed with. (all MMU tables point | | 1407 | * accessed with. (all MMU tables point |
1408 | * to physical addresses.) | | 1408 | * to physical addresses.) |
1409 | * 3) It finds the corresponding manager | | 1409 | * 3) It finds the corresponding manager |
1410 | * structure which manages this MMU table. | | 1410 | * structure which manages this MMU table. |
1411 | * 4) It frees the manager structure. | | 1411 | * 4) It frees the manager structure. |
1412 | * (This frees the MMU table and all | | 1412 | * (This frees the MMU table and all |
1413 | * child tables. See 'free_b_table' for | | 1413 | * child tables. See 'free_b_table' for |
1414 | * details.) | | 1414 | * details.) |
1415 | */ | | 1415 | */ |
1416 | dtbl = mmu_ptov(dte[i].addr.raw); | | 1416 | dtbl = mmu_ptov(dte[i].addr.raw); |
1417 | b_tbl = mmuB2tmgr(dtbl); | | 1417 | b_tbl = mmuB2tmgr(dtbl); |
1418 | bt_wired = b_tbl->bt_wcnt; | | 1418 | bt_wired = b_tbl->bt_wcnt; |
1419 | removed_cnt += free_b_table(b_tbl, true); | | 1419 | removed_cnt += free_b_table(b_tbl, true); |
1420 | if (bt_wired) | | 1420 | if (bt_wired) |
1421 | a_tbl->at_wcnt--; | | 1421 | a_tbl->at_wcnt--; |
1422 | dte[i].attr.raw = MMU_DT_INVALID; | | 1422 | dte[i].attr.raw = MMU_DT_INVALID; |
1423 | } | | 1423 | } |
1424 | } | | 1424 | } |
1425 | a_tbl->at_ecnt = 0; | | 1425 | a_tbl->at_ecnt = 0; |
1426 | } | | 1426 | } |
1427 | KASSERT(a_tbl->at_wcnt == 0); | | 1427 | KASSERT(a_tbl->at_wcnt == 0); |
1428 | | | 1428 | |
1429 | if (relink) { | | 1429 | if (relink) { |
1430 | a_tbl->at_parent = NULL; | | 1430 | a_tbl->at_parent = NULL; |
1431 | if (!at_wired) | | 1431 | if (!at_wired) |
1432 | TAILQ_REMOVE(&a_pool, a_tbl, at_link); | | 1432 | TAILQ_REMOVE(&a_pool, a_tbl, at_link); |
1433 | TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link); | | 1433 | TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link); |
1434 | } | | 1434 | } |
1435 | return removed_cnt; | | 1435 | return removed_cnt; |
1436 | } | | 1436 | } |
1437 | | | 1437 | |
1438 | /* free_b_table INTERNAL | | 1438 | /* free_b_table INTERNAL |
1439 | ** | | 1439 | ** |
1440 | * Unmaps the given B table and all its children from their current | | 1440 | * Unmaps the given B table and all its children from their current |
1441 | * mappings. Returns the number of pages that were invalidated. | | 1441 | * mappings. Returns the number of pages that were invalidated. |
1442 | * (For comments, see 'free_a_table()'). | | 1442 | * (For comments, see 'free_a_table()'). |
1443 | */ | | 1443 | */ |
1444 | int | | 1444 | int |
1445 | free_b_table(b_tmgr_t *b_tbl, bool relink) | | 1445 | free_b_table(b_tmgr_t *b_tbl, bool relink) |
1446 | { | | 1446 | { |
1447 | int i, removed_cnt; | | 1447 | int i, removed_cnt; |
1448 | mmu_short_dte_t *dte; | | 1448 | mmu_short_dte_t *dte; |
1449 | mmu_short_pte_t *dtbl; | | 1449 | mmu_short_pte_t *dtbl; |
1450 | c_tmgr_t *c_tbl; | | 1450 | c_tmgr_t *c_tbl; |
1451 | uint8_t bt_wired, ct_wired; | | 1451 | uint8_t bt_wired, ct_wired; |
1452 | | | 1452 | |
1453 | removed_cnt = 0; | | 1453 | removed_cnt = 0; |
1454 | bt_wired = b_tbl->bt_wcnt; | | 1454 | bt_wired = b_tbl->bt_wcnt; |
1455 | if (b_tbl->bt_ecnt) { | | 1455 | if (b_tbl->bt_ecnt) { |
1456 | dte = b_tbl->bt_dtbl; | | 1456 | dte = b_tbl->bt_dtbl; |
1457 | for (i = 0; i < MMU_B_TBL_SIZE; i++) { | | 1457 | for (i = 0; i < MMU_B_TBL_SIZE; i++) { |
1458 | if (MMU_VALID_DT(dte[i])) { | | 1458 | if (MMU_VALID_DT(dte[i])) { |
1459 | dtbl = mmu_ptov(MMU_DTE_PA(dte[i])); | | 1459 | dtbl = mmu_ptov(MMU_DTE_PA(dte[i])); |
1460 | c_tbl = mmuC2tmgr(dtbl); | | 1460 | c_tbl = mmuC2tmgr(dtbl); |
1461 | ct_wired = c_tbl->ct_wcnt; | | 1461 | ct_wired = c_tbl->ct_wcnt; |
1462 | removed_cnt += free_c_table(c_tbl, true); | | 1462 | removed_cnt += free_c_table(c_tbl, true); |
1463 | if (ct_wired) | | 1463 | if (ct_wired) |
1464 | b_tbl->bt_wcnt--; | | 1464 | b_tbl->bt_wcnt--; |
1465 | dte[i].attr.raw = MMU_DT_INVALID; | | 1465 | dte[i].attr.raw = MMU_DT_INVALID; |
1466 | } | | 1466 | } |
1467 | } | | 1467 | } |
1468 | b_tbl->bt_ecnt = 0; | | 1468 | b_tbl->bt_ecnt = 0; |
1469 | } | | 1469 | } |
1470 | KASSERT(b_tbl->bt_wcnt == 0); | | 1470 | KASSERT(b_tbl->bt_wcnt == 0); |
1471 | | | 1471 | |
1472 | if (relink) { | | 1472 | if (relink) { |
1473 | b_tbl->bt_parent = NULL; | | 1473 | b_tbl->bt_parent = NULL; |
1474 | if (!bt_wired) | | 1474 | if (!bt_wired) |
1475 | TAILQ_REMOVE(&b_pool, b_tbl, bt_link); | | 1475 | TAILQ_REMOVE(&b_pool, b_tbl, bt_link); |
1476 | TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link); | | 1476 | TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link); |
1477 | } | | 1477 | } |
1478 | return removed_cnt; | | 1478 | return removed_cnt; |
1479 | } | | 1479 | } |
1480 | | | 1480 | |
1481 | /* free_c_table INTERNAL | | 1481 | /* free_c_table INTERNAL |
1482 | ** | | 1482 | ** |
1483 | * Unmaps the given C table from use and returns it to the pool for | | 1483 | * Unmaps the given C table from use and returns it to the pool for |
1484 | * re-use. Returns the number of pages that were invalidated. | | 1484 | * re-use. Returns the number of pages that were invalidated. |
1485 | * | | 1485 | * |
1486 | * This function preserves any physical page modification information | | 1486 | * This function preserves any physical page modification information |
1487 | * contained in the page descriptors within the C table by calling | | 1487 | * contained in the page descriptors within the C table by calling |
1488 | * 'pmap_remove_pte().' | | 1488 | * 'pmap_remove_pte().' |
1489 | */ | | 1489 | */ |
1490 | int | | 1490 | int |
1491 | free_c_table(c_tmgr_t *c_tbl, bool relink) | | 1491 | free_c_table(c_tmgr_t *c_tbl, bool relink) |
1492 | { | | 1492 | { |
1493 | mmu_short_pte_t *c_pte; | | 1493 | mmu_short_pte_t *c_pte; |
1494 | int i, removed_cnt; | | 1494 | int i, removed_cnt; |
1495 | uint8_t ct_wired; | | 1495 | uint8_t ct_wired; |
1496 | | | 1496 | |
1497 | removed_cnt = 0; | | 1497 | removed_cnt = 0; |
1498 | ct_wired = c_tbl->ct_wcnt; | | 1498 | ct_wired = c_tbl->ct_wcnt; |
1499 | if (c_tbl->ct_ecnt) { | | 1499 | if (c_tbl->ct_ecnt) { |
1500 | for (i = 0; i < MMU_C_TBL_SIZE; i++) { | | 1500 | for (i = 0; i < MMU_C_TBL_SIZE; i++) { |
1501 | c_pte = &c_tbl->ct_dtbl[i]; | | 1501 | c_pte = &c_tbl->ct_dtbl[i]; |
1502 | if (MMU_VALID_DT(*c_pte)) { | | 1502 | if (MMU_VALID_DT(*c_pte)) { |
1503 | if (c_pte->attr.raw & MMU_SHORT_PTE_WIRED) | | 1503 | if (c_pte->attr.raw & MMU_SHORT_PTE_WIRED) |
1504 | c_tbl->ct_wcnt--; | | 1504 | c_tbl->ct_wcnt--; |
1505 | pmap_remove_pte(c_pte); | | 1505 | pmap_remove_pte(c_pte); |
1506 | removed_cnt++; | | 1506 | removed_cnt++; |
1507 | } | | 1507 | } |
1508 | } | | 1508 | } |
1509 | c_tbl->ct_ecnt = 0; | | 1509 | c_tbl->ct_ecnt = 0; |
1510 | } | | 1510 | } |
1511 | KASSERT(c_tbl->ct_wcnt == 0); | | 1511 | KASSERT(c_tbl->ct_wcnt == 0); |
1512 | | | 1512 | |
1513 | if (relink) { | | 1513 | if (relink) { |
1514 | c_tbl->ct_parent = NULL; | | 1514 | c_tbl->ct_parent = NULL; |
1515 | if (!ct_wired) | | 1515 | if (!ct_wired) |
1516 | TAILQ_REMOVE(&c_pool, c_tbl, ct_link); | | 1516 | TAILQ_REMOVE(&c_pool, c_tbl, ct_link); |
1517 | TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link); | | 1517 | TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link); |
1518 | } | | 1518 | } |
1519 | return removed_cnt; | | 1519 | return removed_cnt; |
1520 | } | | 1520 | } |
1521 | | | 1521 | |
1522 | | | 1522 | |
1523 | /* pmap_remove_pte INTERNAL | | 1523 | /* pmap_remove_pte INTERNAL |
1524 | ** | | 1524 | ** |
1525 | * Unmap the given pte and preserve any page modification | | 1525 | * Unmap the given pte and preserve any page modification |
1526 | * information by transfering it to the pv head of the | | 1526 | * information by transfering it to the pv head of the |
1527 | * physical page it maps to. This function does not update | | 1527 | * physical page it maps to. This function does not update |
1528 | * any reference counts because it is assumed that the calling | | 1528 | * any reference counts because it is assumed that the calling |
1529 | * function will do so. | | 1529 | * function will do so. |
1530 | */ | | 1530 | */ |
1531 | void | | 1531 | void |
1532 | pmap_remove_pte(mmu_short_pte_t *pte) | | 1532 | pmap_remove_pte(mmu_short_pte_t *pte) |
1533 | { | | 1533 | { |
1534 | u_short pv_idx, targ_idx; | | 1534 | u_short pv_idx, targ_idx; |
1535 | paddr_t pa; | | 1535 | paddr_t pa; |
1536 | pv_t *pv; | | 1536 | pv_t *pv; |
1537 | | | 1537 | |
1538 | pa = MMU_PTE_PA(*pte); | | 1538 | pa = MMU_PTE_PA(*pte); |
1539 | if (is_managed(pa)) { | | 1539 | if (is_managed(pa)) { |
1540 | pv = pa2pv(pa); | | 1540 | pv = pa2pv(pa); |
1541 | targ_idx = pteidx(pte); /* Index of PTE being removed */ | | 1541 | targ_idx = pteidx(pte); /* Index of PTE being removed */ |
1542 | | | 1542 | |
1543 | /* | | 1543 | /* |
1544 | * If the PTE being removed is the first (or only) PTE in | | 1544 | * If the PTE being removed is the first (or only) PTE in |
1545 | * the list of PTEs currently mapped to this page, remove the | | 1545 | * the list of PTEs currently mapped to this page, remove the |
1546 | * PTE by changing the index found on the PV head. Otherwise | | 1546 | * PTE by changing the index found on the PV head. Otherwise |
1547 | * a linear search through the list will have to be executed | | 1547 | * a linear search through the list will have to be executed |
1548 | * in order to find the PVE which points to the PTE being | | 1548 | * in order to find the PVE which points to the PTE being |
1549 | * removed, so that it may be modified to point to its new | | 1549 | * removed, so that it may be modified to point to its new |
1550 | * neighbor. | | 1550 | * neighbor. |
1551 | */ | | 1551 | */ |
1552 | | | 1552 | |
1553 | pv_idx = pv->pv_idx; /* Index of first PTE in PV list */ | | 1553 | pv_idx = pv->pv_idx; /* Index of first PTE in PV list */ |
1554 | if (pv_idx == targ_idx) { | | 1554 | if (pv_idx == targ_idx) { |
1555 | pv->pv_idx = pvebase[targ_idx].pve_next; | | 1555 | pv->pv_idx = pvebase[targ_idx].pve_next; |
1556 | } else { | | 1556 | } else { |
1557 | | | 1557 | |
1558 | /* | | 1558 | /* |
1559 | * Find the PV element pointing to the target | | 1559 | * Find the PV element pointing to the target |
1560 | * element. Note: may have pv_idx==PVE_EOL | | 1560 | * element. Note: may have pv_idx==PVE_EOL |
1561 | */ | | 1561 | */ |
1562 | | | 1562 | |
1563 | for (;;) { | | 1563 | for (;;) { |
1564 | if (pv_idx == PVE_EOL) { | | 1564 | if (pv_idx == PVE_EOL) { |
1565 | goto pv_not_found; | | 1565 | goto pv_not_found; |
1566 | } | | 1566 | } |
1567 | if (pvebase[pv_idx].pve_next == targ_idx) | | 1567 | if (pvebase[pv_idx].pve_next == targ_idx) |
1568 | break; | | 1568 | break; |
1569 | pv_idx = pvebase[pv_idx].pve_next; | | 1569 | pv_idx = pvebase[pv_idx].pve_next; |
1570 | } | | 1570 | } |
1571 | | | 1571 | |
1572 | /* | | 1572 | /* |
1573 | * At this point, pv_idx is the index of the PV | | 1573 | * At this point, pv_idx is the index of the PV |
1574 | * element just before the target element in the list. | | 1574 | * element just before the target element in the list. |
1575 | * Unlink the target. | | 1575 | * Unlink the target. |
1576 | */ | | 1576 | */ |
1577 | | | 1577 | |
1578 | pvebase[pv_idx].pve_next = pvebase[targ_idx].pve_next; | | 1578 | pvebase[pv_idx].pve_next = pvebase[targ_idx].pve_next; |
1579 | } | | 1579 | } |
1580 | | | 1580 | |
1581 | /* | | 1581 | /* |
1582 | * Save the mod/ref bits of the pte by simply | | 1582 | * Save the mod/ref bits of the pte by simply |
1583 | * ORing the entire pte onto the pv_flags member | | 1583 | * ORing the entire pte onto the pv_flags member |
1584 | * of the pv structure. | | 1584 | * of the pv structure. |
1585 | * There is no need to use a separate bit pattern | | 1585 | * There is no need to use a separate bit pattern |
1586 | * for usage information on the pv head than that | | 1586 | * for usage information on the pv head than that |
1587 | * which is used on the MMU ptes. | | 1587 | * which is used on the MMU ptes. |
1588 | */ | | 1588 | */ |
1589 | | | 1589 | |
1590 | pv_not_found: | | 1590 | pv_not_found: |
1591 | pv->pv_flags |= (u_short) pte->attr.raw; | | 1591 | pv->pv_flags |= (u_short) pte->attr.raw; |
1592 | } | | 1592 | } |
1593 | pte->attr.raw = MMU_DT_INVALID; | | 1593 | pte->attr.raw = MMU_DT_INVALID; |
1594 | } | | 1594 | } |
1595 | | | 1595 | |
1596 | /* pmap_stroll INTERNAL | | 1596 | /* pmap_stroll INTERNAL |
1597 | ** | | 1597 | ** |
1598 | * Retrieve the addresses of all table managers involved in the mapping of | | 1598 | * Retrieve the addresses of all table managers involved in the mapping of |
1599 | * the given virtual address. If the table walk completed successfully, | | 1599 | * the given virtual address. If the table walk completed successfully, |
1600 | * return true. If it was only partially successful, return false. | | 1600 | * return true. If it was only partially successful, return false. |
1601 | * The table walk performed by this function is important to many other | | 1601 | * The table walk performed by this function is important to many other |
1602 | * functions in this module. | | 1602 | * functions in this module. |
1603 | * | | 1603 | * |
1604 | * Note: This function ought to be easier to read. | | 1604 | * Note: This function ought to be easier to read. |
1605 | */ | | 1605 | */ |
1606 | bool | | 1606 | bool |
1607 | pmap_stroll(pmap_t pmap, vaddr_t va, a_tmgr_t **a_tbl, b_tmgr_t **b_tbl, | | 1607 | pmap_stroll(pmap_t pmap, vaddr_t va, a_tmgr_t **a_tbl, b_tmgr_t **b_tbl, |
1608 | c_tmgr_t **c_tbl, mmu_short_pte_t **pte, int *a_idx, int *b_idx, | | 1608 | c_tmgr_t **c_tbl, mmu_short_pte_t **pte, int *a_idx, int *b_idx, |
1609 | int *pte_idx) | | 1609 | int *pte_idx) |
1610 | { | | 1610 | { |
1611 | mmu_long_dte_t *a_dte; /* A: long descriptor table */ | | 1611 | mmu_long_dte_t *a_dte; /* A: long descriptor table */ |
1612 | mmu_short_dte_t *b_dte; /* B: short descriptor table */ | | 1612 | mmu_short_dte_t *b_dte; /* B: short descriptor table */ |
1613 | | | 1613 | |
1614 | if (pmap == pmap_kernel()) | | 1614 | if (pmap == pmap_kernel()) |
1615 | return false; | | 1615 | return false; |
1616 | | | 1616 | |
1617 | /* Does the given pmap have its own A table? */ | | 1617 | /* Does the given pmap have its own A table? */ |
1618 | *a_tbl = pmap->pm_a_tmgr; | | 1618 | *a_tbl = pmap->pm_a_tmgr; |
1619 | if (*a_tbl == NULL) | | 1619 | if (*a_tbl == NULL) |
1620 | return false; /* No. Return unknown. */ | | 1620 | return false; /* No. Return unknown. */ |
1621 | /* Does the A table have a valid B table | | 1621 | /* Does the A table have a valid B table |
1622 | * under the corresponding table entry? | | 1622 | * under the corresponding table entry? |
1623 | */ | | 1623 | */ |
1624 | *a_idx = MMU_TIA(va); | | 1624 | *a_idx = MMU_TIA(va); |
1625 | a_dte = &((*a_tbl)->at_dtbl[*a_idx]); | | 1625 | a_dte = &((*a_tbl)->at_dtbl[*a_idx]); |
1626 | if (!MMU_VALID_DT(*a_dte)) | | 1626 | if (!MMU_VALID_DT(*a_dte)) |
1627 | return false; /* No. Return unknown. */ | | 1627 | return false; /* No. Return unknown. */ |
1628 | /* Yes. Extract B table from the A table. */ | | 1628 | /* Yes. Extract B table from the A table. */ |
1629 | *b_tbl = mmuB2tmgr(mmu_ptov(a_dte->addr.raw)); | | 1629 | *b_tbl = mmuB2tmgr(mmu_ptov(a_dte->addr.raw)); |
1630 | /* | | 1630 | /* |
1631 | * Does the B table have a valid C table | | 1631 | * Does the B table have a valid C table |
1632 | * under the corresponding table entry? | | 1632 | * under the corresponding table entry? |
1633 | */ | | 1633 | */ |
1634 | *b_idx = MMU_TIB(va); | | 1634 | *b_idx = MMU_TIB(va); |
1635 | b_dte = &((*b_tbl)->bt_dtbl[*b_idx]); | | 1635 | b_dte = &((*b_tbl)->bt_dtbl[*b_idx]); |
1636 | if (!MMU_VALID_DT(*b_dte)) | | 1636 | if (!MMU_VALID_DT(*b_dte)) |
1637 | return false; /* No. Return unknown. */ | | 1637 | return false; /* No. Return unknown. */ |
1638 | /* Yes. Extract C table from the B table. */ | | 1638 | /* Yes. Extract C table from the B table. */ |
1639 | *c_tbl = mmuC2tmgr(mmu_ptov(MMU_DTE_PA(*b_dte))); | | 1639 | *c_tbl = mmuC2tmgr(mmu_ptov(MMU_DTE_PA(*b_dte))); |
1640 | *pte_idx = MMU_TIC(va); | | 1640 | *pte_idx = MMU_TIC(va); |
1641 | *pte = &((*c_tbl)->ct_dtbl[*pte_idx]); | | 1641 | *pte = &((*c_tbl)->ct_dtbl[*pte_idx]); |
1642 | | | 1642 | |
1643 | return true; | | 1643 | return true; |
1644 | } | | 1644 | } |
1645 | | | 1645 | |
1646 | /* pmap_enter INTERFACE | | 1646 | /* pmap_enter INTERFACE |
1647 | ** | | 1647 | ** |
1648 | * Called by the kernel to map a virtual address | | 1648 | * Called by the kernel to map a virtual address |
1649 | * to a physical address in the given process map. | | 1649 | * to a physical address in the given process map. |
1650 | * | | 1650 | * |
1651 | * Note: this function should apply an exclusive lock | | 1651 | * Note: this function should apply an exclusive lock |
1652 | * on the pmap system for its duration. (it certainly | | 1652 | * on the pmap system for its duration. (it certainly |
1653 | * would save my hair!!) | | 1653 | * would save my hair!!) |
1654 | * This function ought to be easier to read. | | 1654 | * This function ought to be easier to read. |
1655 | */ | | 1655 | */ |
1656 | int | | 1656 | int |
1657 | pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) | | 1657 | pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) |
1658 | { | | 1658 | { |
1659 | bool insert, managed; /* Marks the need for PV insertion.*/ | | 1659 | bool insert, managed; /* Marks the need for PV insertion.*/ |
1660 | u_short nidx; /* PV list index */ | | 1660 | u_short nidx; /* PV list index */ |
1661 | int mapflags; /* Flags for the mapping (see NOTE1) */ | | 1661 | int mapflags; /* Flags for the mapping (see NOTE1) */ |
1662 | u_int a_idx, b_idx, pte_idx; /* table indices */ | | 1662 | u_int a_idx, b_idx, pte_idx; /* table indices */ |
1663 | a_tmgr_t *a_tbl; /* A: long descriptor table manager */ | | 1663 | a_tmgr_t *a_tbl; /* A: long descriptor table manager */ |
1664 | b_tmgr_t *b_tbl; /* B: short descriptor table manager */ | | 1664 | b_tmgr_t *b_tbl; /* B: short descriptor table manager */ |
1665 | c_tmgr_t *c_tbl; /* C: short page table manager */ | | 1665 | c_tmgr_t *c_tbl; /* C: short page table manager */ |
1666 | mmu_long_dte_t *a_dte; /* A: long descriptor table */ | | 1666 | mmu_long_dte_t *a_dte; /* A: long descriptor table */ |
1667 | mmu_short_dte_t *b_dte; /* B: short descriptor table */ | | 1667 | mmu_short_dte_t *b_dte; /* B: short descriptor table */ |
1668 | mmu_short_pte_t *c_pte; /* C: short page descriptor table */ | | 1668 | mmu_short_pte_t *c_pte; /* C: short page descriptor table */ |
1669 | pv_t *pv; /* pv list head */ | | 1669 | pv_t *pv; /* pv list head */ |
1670 | bool wired; /* is the mapping to be wired? */ | | 1670 | bool wired; /* is the mapping to be wired? */ |
1671 | enum {NONE, NEWA, NEWB, NEWC} llevel; /* used at end */ | | 1671 | enum {NONE, NEWA, NEWB, NEWC} llevel; /* used at end */ |
1672 | | | 1672 | |
1673 | if (pmap == pmap_kernel()) { | | 1673 | if (pmap == pmap_kernel()) { |
1674 | pmap_enter_kernel(va, pa, prot); | | 1674 | pmap_enter_kernel(va, pa, prot); |
1675 | return 0; | | 1675 | return 0; |
1676 | } | | 1676 | } |
1677 | | | 1677 | |
1678 | /* | | 1678 | /* |
1679 | * Determine if the mapping should be wired. | | 1679 | * Determine if the mapping should be wired. |
1680 | */ | | 1680 | */ |
1681 | wired = ((flags & PMAP_WIRED) != 0); | | 1681 | wired = ((flags & PMAP_WIRED) != 0); |
1682 | | | 1682 | |
1683 | /* | | 1683 | /* |
1684 | * NOTE1: | | 1684 | * NOTE1: |
1685 | * | | 1685 | * |
1686 | * On November 13, 1999, someone changed the pmap_enter() API such | | 1686 | * On November 13, 1999, someone changed the pmap_enter() API such |
1687 | * that it now accepts a 'flags' argument. This new argument | | 1687 | * that it now accepts a 'flags' argument. This new argument |
1688 | * contains bit-flags for the architecture-independent (UVM) system to | | 1688 | * contains bit-flags for the architecture-independent (UVM) system to |
1689 | * use in signalling certain mapping requirements to the architecture- | | 1689 | * use in signalling certain mapping requirements to the architecture- |
1690 | * dependent (pmap) system. The argument it replaces, 'wired', is now | | 1690 | * dependent (pmap) system. The argument it replaces, 'wired', is now |
1691 | * one of the flags within it. | | 1691 | * one of the flags within it. |
1692 | * | | 1692 | * |
1693 | * In addition to flags signaled by the architecture-independent | | 1693 | * In addition to flags signaled by the architecture-independent |
1694 | * system, parts of the architecture-dependent section of the sun3x | | 1694 | * system, parts of the architecture-dependent section of the sun3x |
1695 | * kernel pass their own flags in the lower, unused bits of the | | 1695 | * kernel pass their own flags in the lower, unused bits of the |
1696 | * physical address supplied to this function. These flags are | | 1696 | * physical address supplied to this function. These flags are |
1697 | * extracted and stored in the temporary variable 'mapflags'. | | 1697 | * extracted and stored in the temporary variable 'mapflags'. |
1698 | * | | 1698 | * |
1699 | * Extract sun3x specific flags from the physical address. | | 1699 | * Extract sun3x specific flags from the physical address. |
1700 | */ | | 1700 | */ |
1701 | mapflags = (pa & ~MMU_PAGE_MASK); | | 1701 | mapflags = (pa & ~MMU_PAGE_MASK); |
1702 | pa &= MMU_PAGE_MASK; | | 1702 | pa &= MMU_PAGE_MASK; |
1703 | | | 1703 | |
1704 | /* | | 1704 | /* |
1705 | * Determine if the physical address being mapped is on-board RAM. | | 1705 | * Determine if the physical address being mapped is on-board RAM. |
1706 | * Any other area of the address space is likely to belong to a | | 1706 | * Any other area of the address space is likely to belong to a |
1707 | * device and hence it would be disasterous to cache its contents. | | 1707 | * device and hence it would be disasterous to cache its contents. |
1708 | */ | | 1708 | */ |
1709 | if ((managed = is_managed(pa)) == false) | | 1709 | if ((managed = is_managed(pa)) == false) |
1710 | mapflags |= PMAP_NC; | | 1710 | mapflags |= PMAP_NC; |
1711 | | | 1711 | |
1712 | /* | | 1712 | /* |
1713 | * For user mappings we walk along the MMU tables of the given | | 1713 | * For user mappings we walk along the MMU tables of the given |
1714 | * pmap, reaching a PTE which describes the virtual page being | | 1714 | * pmap, reaching a PTE which describes the virtual page being |
1715 | * mapped or changed. If any level of the walk ends in an invalid | | 1715 | * mapped or changed. If any level of the walk ends in an invalid |
1716 | * entry, a table must be allocated and the entry must be updated | | 1716 | * entry, a table must be allocated and the entry must be updated |
1717 | * to point to it. | | 1717 | * to point to it. |
1718 | * There is a bit of confusion as to whether this code must be | | 1718 | * There is a bit of confusion as to whether this code must be |
1719 | * re-entrant. For now we will assume it is. To support | | 1719 | * re-entrant. For now we will assume it is. To support |
1720 | * re-entrancy we must unlink tables from the table pool before | | 1720 | * re-entrancy we must unlink tables from the table pool before |
1721 | * we assume we may use them. Tables are re-linked into the pool | | 1721 | * we assume we may use them. Tables are re-linked into the pool |
1722 | * when we are finished with them at the end of the function. | | 1722 | * when we are finished with them at the end of the function. |
1723 | * But I don't feel like doing that until we have proof that this | | 1723 | * But I don't feel like doing that until we have proof that this |
1724 | * needs to be re-entrant. | | 1724 | * needs to be re-entrant. |
1725 | * 'llevel' records which tables need to be relinked. | | 1725 | * 'llevel' records which tables need to be relinked. |
1726 | */ | | 1726 | */ |
1727 | llevel = NONE; | | 1727 | llevel = NONE; |
1728 | | | 1728 | |
1729 | /* | | 1729 | /* |
1730 | * Step 1 - Retrieve the A table from the pmap. If it has no | | 1730 | * Step 1 - Retrieve the A table from the pmap. If it has no |
1731 | * A table, allocate a new one from the available pool. | | 1731 | * A table, allocate a new one from the available pool. |
1732 | */ | | 1732 | */ |
1733 | | | 1733 | |
1734 | a_tbl = pmap->pm_a_tmgr; | | 1734 | a_tbl = pmap->pm_a_tmgr; |
1735 | if (a_tbl == NULL) { | | 1735 | if (a_tbl == NULL) { |
1736 | /* | | 1736 | /* |
1737 | * This pmap does not currently have an A table. Allocate | | 1737 | * This pmap does not currently have an A table. Allocate |
1738 | * a new one. | | 1738 | * a new one. |
1739 | */ | | 1739 | */ |
1740 | a_tbl = get_a_table(); | | 1740 | a_tbl = get_a_table(); |
1741 | a_tbl->at_parent = pmap; | | 1741 | a_tbl->at_parent = pmap; |
1742 | | | 1742 | |
1743 | /* | | 1743 | /* |
1744 | * Assign this new A table to the pmap, and calculate its | | 1744 | * Assign this new A table to the pmap, and calculate its |
1745 | * physical address so that loadcrp() can be used to make | | 1745 | * physical address so that loadcrp() can be used to make |
1746 | * the table active. | | 1746 | * the table active. |
1747 | */ | | 1747 | */ |
1748 | pmap->pm_a_tmgr = a_tbl; | | 1748 | pmap->pm_a_tmgr = a_tbl; |
1749 | pmap->pm_a_phys = mmu_vtop(a_tbl->at_dtbl); | | 1749 | pmap->pm_a_phys = mmu_vtop(a_tbl->at_dtbl); |
1750 | | | 1750 | |
1751 | /* | | 1751 | /* |
1752 | * If the process receiving a new A table is the current | | 1752 | * If the process receiving a new A table is the current |
1753 | * process, we are responsible for setting the MMU so that | | 1753 | * process, we are responsible for setting the MMU so that |
1754 | * it becomes the current address space. This only adds | | 1754 | * it becomes the current address space. This only adds |
1755 | * new mappings, so no need to flush anything. | | 1755 | * new mappings, so no need to flush anything. |
1756 | */ | | 1756 | */ |
1757 | if (pmap == current_pmap()) { | | 1757 | if (pmap == current_pmap()) { |
1758 | kernel_crp.rp_addr = pmap->pm_a_phys; | | 1758 | kernel_crp.rp_addr = pmap->pm_a_phys; |
1759 | loadcrp(&kernel_crp); | | 1759 | loadcrp(&kernel_crp); |
1760 | } | | 1760 | } |
1761 | | | 1761 | |
1762 | if (!wired) | | 1762 | if (!wired) |
1763 | llevel = NEWA; | | 1763 | llevel = NEWA; |
1764 | } else { | | 1764 | } else { |
1765 | /* | | 1765 | /* |
1766 | * Use the A table already allocated for this pmap. | | 1766 | * Use the A table already allocated for this pmap. |
1767 | * Unlink it from the A table pool if necessary. | | 1767 | * Unlink it from the A table pool if necessary. |
1768 | */ | | 1768 | */ |
1769 | if (wired && !a_tbl->at_wcnt) | | 1769 | if (wired && !a_tbl->at_wcnt) |
1770 | TAILQ_REMOVE(&a_pool, a_tbl, at_link); | | 1770 | TAILQ_REMOVE(&a_pool, a_tbl, at_link); |
1771 | } | | 1771 | } |
1772 | | | 1772 | |
1773 | /* | | 1773 | /* |
1774 | * Step 2 - Walk into the B table. If there is no valid B table, | | 1774 | * Step 2 - Walk into the B table. If there is no valid B table, |
1775 | * allocate one. | | 1775 | * allocate one. |
1776 | */ | | 1776 | */ |
1777 | | | 1777 | |
1778 | a_idx = MMU_TIA(va); /* Calculate the TIA of the VA. */ | | 1778 | a_idx = MMU_TIA(va); /* Calculate the TIA of the VA. */ |
1779 | a_dte = &a_tbl->at_dtbl[a_idx]; /* Retrieve descriptor from table */ | | 1779 | a_dte = &a_tbl->at_dtbl[a_idx]; /* Retrieve descriptor from table */ |
1780 | if (MMU_VALID_DT(*a_dte)) { /* Is the descriptor valid? */ | | 1780 | if (MMU_VALID_DT(*a_dte)) { /* Is the descriptor valid? */ |
1781 | /* The descriptor is valid. Use the B table it points to. */ | | 1781 | /* The descriptor is valid. Use the B table it points to. */ |
1782 | /************************************* | | 1782 | /************************************* |
1783 | * a_idx * | | 1783 | * a_idx * |
1784 | * v * | | 1784 | * v * |
1785 | * a_tbl -> +-+-+-+-+-+-+-+-+-+-+-+- * | | 1785 | * a_tbl -> +-+-+-+-+-+-+-+-+-+-+-+- * |
1786 | * | | | | | | | | | | | | * | | 1786 | * | | | | | | | | | | | | * |
1787 | * +-+-+-+-+-+-+-+-+-+-+-+- * | | 1787 | * +-+-+-+-+-+-+-+-+-+-+-+- * |
1788 | * | * | | 1788 | * | * |
1789 | * \- b_tbl -> +-+- * | | 1789 | * \- b_tbl -> +-+- * |
1790 | * | | * | | 1790 | * | | * |
1791 | * +-+- * | | 1791 | * +-+- * |
1792 | *************************************/ | | 1792 | *************************************/ |
1793 | b_dte = mmu_ptov(a_dte->addr.raw); | | 1793 | b_dte = mmu_ptov(a_dte->addr.raw); |
1794 | b_tbl = mmuB2tmgr(b_dte); | | 1794 | b_tbl = mmuB2tmgr(b_dte); |
1795 | | | 1795 | |
1796 | /* | | 1796 | /* |
1797 | * If the requested mapping must be wired, but this table | | 1797 | * If the requested mapping must be wired, but this table |
1798 | * being used to map it is not, the table must be removed | | 1798 | * being used to map it is not, the table must be removed |
1799 | * from the available pool and its wired entry count | | 1799 | * from the available pool and its wired entry count |
1800 | * incremented. | | 1800 | * incremented. |
1801 | */ | | 1801 | */ |
1802 | if (wired && !b_tbl->bt_wcnt) { | | 1802 | if (wired && !b_tbl->bt_wcnt) { |
1803 | TAILQ_REMOVE(&b_pool, b_tbl, bt_link); | | 1803 | TAILQ_REMOVE(&b_pool, b_tbl, bt_link); |
1804 | a_tbl->at_wcnt++; | | 1804 | a_tbl->at_wcnt++; |
1805 | } | | 1805 | } |
1806 | } else { | | 1806 | } else { |
1807 | /* The descriptor is invalid. Allocate a new B table. */ | | 1807 | /* The descriptor is invalid. Allocate a new B table. */ |
1808 | b_tbl = get_b_table(); | | 1808 | b_tbl = get_b_table(); |
1809 | | | 1809 | |
1810 | /* Point the parent A table descriptor to this new B table. */ | | 1810 | /* Point the parent A table descriptor to this new B table. */ |
1811 | a_dte->addr.raw = mmu_vtop(b_tbl->bt_dtbl); | | 1811 | a_dte->addr.raw = mmu_vtop(b_tbl->bt_dtbl); |
1812 | a_dte->attr.raw = MMU_LONG_DTE_LU | MMU_DT_SHORT; | | 1812 | a_dte->attr.raw = MMU_LONG_DTE_LU | MMU_DT_SHORT; |
1813 | a_tbl->at_ecnt++; /* Update parent's valid entry count */ | | 1813 | a_tbl->at_ecnt++; /* Update parent's valid entry count */ |
1814 | | | 1814 | |
1815 | /* Create the necessary back references to the parent table */ | | 1815 | /* Create the necessary back references to the parent table */ |
1816 | b_tbl->bt_parent = a_tbl; | | 1816 | b_tbl->bt_parent = a_tbl; |
1817 | b_tbl->bt_pidx = a_idx; | | 1817 | b_tbl->bt_pidx = a_idx; |
1818 | | | 1818 | |
1819 | /* | | 1819 | /* |
1820 | * If this table is to be wired, make sure the parent A table | | 1820 | * If this table is to be wired, make sure the parent A table |
1821 | * wired count is updated to reflect that it has another wired | | 1821 | * wired count is updated to reflect that it has another wired |
1822 | * entry. | | 1822 | * entry. |
1823 | */ | | 1823 | */ |
1824 | if (wired) | | 1824 | if (wired) |
1825 | a_tbl->at_wcnt++; | | 1825 | a_tbl->at_wcnt++; |
1826 | else if (llevel == NONE) | | 1826 | else if (llevel == NONE) |
1827 | llevel = NEWB; | | 1827 | llevel = NEWB; |
1828 | } | | 1828 | } |
1829 | | | 1829 | |
1830 | /* | | 1830 | /* |
1831 | * Step 3 - Walk into the C table, if there is no valid C table, | | 1831 | * Step 3 - Walk into the C table, if there is no valid C table, |
1832 | * allocate one. | | 1832 | * allocate one. |
1833 | */ | | 1833 | */ |
1834 | | | 1834 | |
1835 | b_idx = MMU_TIB(va); /* Calculate the TIB of the VA */ | | 1835 | b_idx = MMU_TIB(va); /* Calculate the TIB of the VA */ |
1836 | b_dte = &b_tbl->bt_dtbl[b_idx]; /* Retrieve descriptor from table */ | | 1836 | b_dte = &b_tbl->bt_dtbl[b_idx]; /* Retrieve descriptor from table */ |
1837 | if (MMU_VALID_DT(*b_dte)) { /* Is the descriptor valid? */ | | 1837 | if (MMU_VALID_DT(*b_dte)) { /* Is the descriptor valid? */ |
1838 | /* The descriptor is valid. Use the C table it points to. */ | | 1838 | /* The descriptor is valid. Use the C table it points to. */ |
1839 | /************************************** | | 1839 | /************************************** |
1840 | * c_idx * | | 1840 | * c_idx * |
1841 | * | v * | | 1841 | * | v * |
1842 | * \- b_tbl -> +-+-+-+-+-+-+-+-+-+-+- * | | 1842 | * \- b_tbl -> +-+-+-+-+-+-+-+-+-+-+- * |
1843 | * | | | | | | | | | | | * | | 1843 | * | | | | | | | | | | | * |
1844 | * +-+-+-+-+-+-+-+-+-+-+- * | | 1844 | * +-+-+-+-+-+-+-+-+-+-+- * |
1845 | * | * | | 1845 | * | * |
1846 | * \- c_tbl -> +-+-- * | | 1846 | * \- c_tbl -> +-+-- * |
1847 | * | | | * | | 1847 | * | | | * |
1848 | * +-+-- * | | 1848 | * +-+-- * |
1849 | **************************************/ | | 1849 | **************************************/ |
1850 | c_pte = mmu_ptov(MMU_PTE_PA(*b_dte)); | | 1850 | c_pte = mmu_ptov(MMU_PTE_PA(*b_dte)); |
1851 | c_tbl = mmuC2tmgr(c_pte); | | 1851 | c_tbl = mmuC2tmgr(c_pte); |
1852 | | | 1852 | |
1853 | /* If mapping is wired and table is not */ | | 1853 | /* If mapping is wired and table is not */ |
1854 | if (wired && !c_tbl->ct_wcnt) { | | 1854 | if (wired && !c_tbl->ct_wcnt) { |
1855 | TAILQ_REMOVE(&c_pool, c_tbl, ct_link); | | 1855 | TAILQ_REMOVE(&c_pool, c_tbl, ct_link); |
1856 | b_tbl->bt_wcnt++; | | 1856 | b_tbl->bt_wcnt++; |
1857 | } | | 1857 | } |
1858 | } else { | | 1858 | } else { |
1859 | /* The descriptor is invalid. Allocate a new C table. */ | | 1859 | /* The descriptor is invalid. Allocate a new C table. */ |
1860 | c_tbl = get_c_table(); | | 1860 | c_tbl = get_c_table(); |
1861 | | | 1861 | |
1862 | /* Point the parent B table descriptor to this new C table. */ | | 1862 | /* Point the parent B table descriptor to this new C table. */ |
1863 | b_dte->attr.raw = mmu_vtop(c_tbl->ct_dtbl); | | 1863 | b_dte->attr.raw = mmu_vtop(c_tbl->ct_dtbl); |
1864 | b_dte->attr.raw |= MMU_DT_SHORT; | | 1864 | b_dte->attr.raw |= MMU_DT_SHORT; |
1865 | b_tbl->bt_ecnt++; /* Update parent's valid entry count */ | | 1865 | b_tbl->bt_ecnt++; /* Update parent's valid entry count */ |
1866 | | | 1866 | |
1867 | /* Create the necessary back references to the parent table */ | | 1867 | /* Create the necessary back references to the parent table */ |
1868 | c_tbl->ct_parent = b_tbl; | | 1868 | c_tbl->ct_parent = b_tbl; |
1869 | c_tbl->ct_pidx = b_idx; | | 1869 | c_tbl->ct_pidx = b_idx; |
1870 | /* | | 1870 | /* |
1871 | * Store the pmap and base virtual managed address for faster | | 1871 | * Store the pmap and base virtual managed address for faster |
1872 | * retrieval in the PV functions. | | 1872 | * retrieval in the PV functions. |
1873 | */ | | 1873 | */ |
1874 | c_tbl->ct_pmap = pmap; | | 1874 | c_tbl->ct_pmap = pmap; |
1875 | c_tbl->ct_va = (va & (MMU_TIA_MASK|MMU_TIB_MASK)); | | 1875 | c_tbl->ct_va = (va & (MMU_TIA_MASK|MMU_TIB_MASK)); |
1876 | | | 1876 | |
1877 | /* | | 1877 | /* |
1878 | * If this table is to be wired, make sure the parent B table | | 1878 | * If this table is to be wired, make sure the parent B table |
1879 | * wired count is updated to reflect that it has another wired | | 1879 | * wired count is updated to reflect that it has another wired |
1880 | * entry. | | 1880 | * entry. |
1881 | */ | | 1881 | */ |
1882 | if (wired) | | 1882 | if (wired) |
1883 | b_tbl->bt_wcnt++; | | 1883 | b_tbl->bt_wcnt++; |
1884 | else if (llevel == NONE) | | 1884 | else if (llevel == NONE) |
1885 | llevel = NEWC; | | 1885 | llevel = NEWC; |
1886 | } | | 1886 | } |
1887 | | | 1887 | |
1888 | /* | | 1888 | /* |
1889 | * Step 4 - Deposit a page descriptor (PTE) into the appropriate | | 1889 | * Step 4 - Deposit a page descriptor (PTE) into the appropriate |
1890 | * slot of the C table, describing the PA to which the VA is mapped. | | 1890 | * slot of the C table, describing the PA to which the VA is mapped. |
1891 | */ | | 1891 | */ |
1892 | | | 1892 | |
1893 | pte_idx = MMU_TIC(va); | | 1893 | pte_idx = MMU_TIC(va); |
1894 | c_pte = &c_tbl->ct_dtbl[pte_idx]; | | 1894 | c_pte = &c_tbl->ct_dtbl[pte_idx]; |
1895 | if (MMU_VALID_DT(*c_pte)) { /* Is the entry currently valid? */ | | 1895 | if (MMU_VALID_DT(*c_pte)) { /* Is the entry currently valid? */ |
1896 | /* | | 1896 | /* |
1897 | * The PTE is currently valid. This particular call | | 1897 | * The PTE is currently valid. This particular call |
1898 | * is just a synonym for one (or more) of the following | | 1898 | * is just a synonym for one (or more) of the following |
1899 | * operations: | | 1899 | * operations: |
1900 | * change protection of a page | | 1900 | * change protection of a page |
1901 | * change wiring status of a page | | 1901 | * change wiring status of a page |
1902 | * remove the mapping of a page | | 1902 | * remove the mapping of a page |
1903 | */ | | 1903 | */ |
1904 | | | 1904 | |
1905 | /* First check if this is a wiring operation. */ | | 1905 | /* First check if this is a wiring operation. */ |
1906 | if (c_pte->attr.raw & MMU_SHORT_PTE_WIRED) { | | 1906 | if (c_pte->attr.raw & MMU_SHORT_PTE_WIRED) { |
1907 | /* | | 1907 | /* |
1908 | * The existing mapping is wired, so adjust wired | | 1908 | * The existing mapping is wired, so adjust wired |
1909 | * entry count here. If new mapping is still wired, | | 1909 | * entry count here. If new mapping is still wired, |
1910 | * wired entry count will be incremented again later. | | 1910 | * wired entry count will be incremented again later. |
1911 | */ | | 1911 | */ |
1912 | c_tbl->ct_wcnt--; | | 1912 | c_tbl->ct_wcnt--; |
1913 | if (!wired) { | | 1913 | if (!wired) { |
1914 | /* | | 1914 | /* |
1915 | * The mapping of this PTE is being changed | | 1915 | * The mapping of this PTE is being changed |
1916 | * from wired to unwired. | | 1916 | * from wired to unwired. |
1917 | * Adjust wired entry counts in each table and | | 1917 | * Adjust wired entry counts in each table and |
1918 | * set llevel flag to put unwired tables back | | 1918 | * set llevel flag to put unwired tables back |
1919 | * into the active pool. | | 1919 | * into the active pool. |
1920 | */ | | 1920 | */ |
1921 | if (c_tbl->ct_wcnt == 0) { | | 1921 | if (c_tbl->ct_wcnt == 0) { |
1922 | llevel = NEWC; | | 1922 | llevel = NEWC; |
1923 | if (--b_tbl->bt_wcnt == 0) { | | 1923 | if (--b_tbl->bt_wcnt == 0) { |
1924 | llevel = NEWB; | | 1924 | llevel = NEWB; |
1925 | if (--a_tbl->at_wcnt == 0) { | | 1925 | if (--a_tbl->at_wcnt == 0) { |
1926 | llevel = NEWA; | | 1926 | llevel = NEWA; |
1927 | } | | 1927 | } |
1928 | } | | 1928 | } |
1929 | } | | 1929 | } |
1930 | } | | 1930 | } |
1931 | } | | 1931 | } |
1932 | | | 1932 | |
1933 | /* Is the new address the same as the old? */ | | 1933 | /* Is the new address the same as the old? */ |
1934 | if (MMU_PTE_PA(*c_pte) == pa) { | | 1934 | if (MMU_PTE_PA(*c_pte) == pa) { |
1935 | /* | | 1935 | /* |
1936 | * Yes, mark that it does not need to be reinserted | | 1936 | * Yes, mark that it does not need to be reinserted |
1937 | * into the PV list. | | 1937 | * into the PV list. |
1938 | */ | | 1938 | */ |
1939 | insert = false; | | 1939 | insert = false; |
1940 | | | 1940 | |
1941 | /* | | 1941 | /* |
1942 | * Clear all but the modified, referenced and wired | | 1942 | * Clear all but the modified, referenced and wired |
1943 | * bits on the PTE. | | 1943 | * bits on the PTE. |
1944 | */ | | 1944 | */ |
1945 | c_pte->attr.raw &= (MMU_SHORT_PTE_M | | 1945 | c_pte->attr.raw &= (MMU_SHORT_PTE_M |
1946 | | MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED); | | 1946 | | MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED); |
1947 | } else { | | 1947 | } else { |
1948 | /* No, remove the old entry */ | | 1948 | /* No, remove the old entry */ |
1949 | pmap_remove_pte(c_pte); | | 1949 | pmap_remove_pte(c_pte); |
1950 | insert = true; | | 1950 | insert = true; |
1951 | } | | 1951 | } |
1952 | | | 1952 | |
1953 | /* | | 1953 | /* |
1954 | * TLB flush is only necessary if modifying current map. | | 1954 | * TLB flush is only necessary if modifying current map. |
1955 | * However, in pmap_enter(), the pmap almost always IS | | 1955 | * However, in pmap_enter(), the pmap almost always IS |
1956 | * the current pmap, so don't even bother to check. | | 1956 | * the current pmap, so don't even bother to check. |
1957 | */ | | 1957 | */ |
1958 | TBIS(va); | | 1958 | TBIS(va); |
1959 | } else { | | 1959 | } else { |
1960 | /* | | 1960 | /* |
1961 | * The PTE is invalid. Increment the valid entry count in | | 1961 | * The PTE is invalid. Increment the valid entry count in |
1962 | * the C table manager to reflect the addition of a new entry. | | 1962 | * the C table manager to reflect the addition of a new entry. |
1963 | */ | | 1963 | */ |
1964 | c_tbl->ct_ecnt++; | | 1964 | c_tbl->ct_ecnt++; |
1965 | | | 1965 | |
1966 | /* XXX - temporarily make sure the PTE is cleared. */ | | 1966 | /* XXX - temporarily make sure the PTE is cleared. */ |
1967 | c_pte->attr.raw = 0; | | 1967 | c_pte->attr.raw = 0; |
1968 | | | 1968 | |
1969 | /* It will also need to be inserted into the PV list. */ | | 1969 | /* It will also need to be inserted into the PV list. */ |
1970 | insert = true; | | 1970 | insert = true; |
1971 | } | | 1971 | } |
1972 | | | 1972 | |
1973 | /* | | 1973 | /* |
1974 | * If page is changing from unwired to wired status, set an unused bit | | 1974 | * If page is changing from unwired to wired status, set an unused bit |
1975 | * within the PTE to indicate that it is wired. Also increment the | | 1975 | * within the PTE to indicate that it is wired. Also increment the |
1976 | * wired entry count in the C table manager. | | 1976 | * wired entry count in the C table manager. |
1977 | */ | | 1977 | */ |
1978 | if (wired) { | | 1978 | if (wired) { |
1979 | c_pte->attr.raw |= MMU_SHORT_PTE_WIRED; | | 1979 | c_pte->attr.raw |= MMU_SHORT_PTE_WIRED; |
1980 | c_tbl->ct_wcnt++; | | 1980 | c_tbl->ct_wcnt++; |
1981 | } | | 1981 | } |
1982 | | | 1982 | |
1983 | /* | | 1983 | /* |
1984 | * Map the page, being careful to preserve modify/reference/wired | | 1984 | * Map the page, being careful to preserve modify/reference/wired |
1985 | * bits. At this point it is assumed that the PTE either has no bits | | 1985 | * bits. At this point it is assumed that the PTE either has no bits |
1986 | * set, or if there are set bits, they are only modified, reference or | | 1986 | * set, or if there are set bits, they are only modified, reference or |
1987 | * wired bits. If not, the following statement will cause erratic | | 1987 | * wired bits. If not, the following statement will cause erratic |
1988 | * behavior. | | 1988 | * behavior. |
1989 | */ | | 1989 | */ |
1990 | #ifdef PMAP_DEBUG | | 1990 | #ifdef PMAP_DEBUG |
1991 | if (c_pte->attr.raw & ~(MMU_SHORT_PTE_M | | | 1991 | if (c_pte->attr.raw & ~(MMU_SHORT_PTE_M | |
1992 | MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED)) { | | 1992 | MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED)) { |
1993 | printf("pmap_enter: junk left in PTE at %p\n", c_pte); | | 1993 | printf("pmap_enter: junk left in PTE at %p\n", c_pte); |
1994 | Debugger(); | | 1994 | Debugger(); |
1995 | } | | 1995 | } |
1996 | #endif | | 1996 | #endif |
1997 | c_pte->attr.raw |= ((u_long) pa | MMU_DT_PAGE); | | 1997 | c_pte->attr.raw |= ((u_long) pa | MMU_DT_PAGE); |
1998 | | | 1998 | |
1999 | /* | | 1999 | /* |
2000 | * If the mapping should be read-only, set the write protect | | 2000 | * If the mapping should be read-only, set the write protect |
2001 | * bit in the PTE. | | 2001 | * bit in the PTE. |
2002 | */ | | 2002 | */ |
2003 | if (!(prot & VM_PROT_WRITE)) | | 2003 | if (!(prot & VM_PROT_WRITE)) |
2004 | c_pte->attr.raw |= MMU_SHORT_PTE_WP; | | 2004 | c_pte->attr.raw |= MMU_SHORT_PTE_WP; |
2005 | | | 2005 | |
2006 | /* | | 2006 | /* |
2007 | * Mark the PTE as used and/or modified as specified by the flags arg. | | 2007 | * Mark the PTE as used and/or modified as specified by the flags arg. |
2008 | */ | | 2008 | */ |
2009 | if (flags & VM_PROT_ALL) { | | 2009 | if (flags & VM_PROT_ALL) { |
2010 | c_pte->attr.raw |= MMU_SHORT_PTE_USED; | | 2010 | c_pte->attr.raw |= MMU_SHORT_PTE_USED; |
2011 | if (flags & VM_PROT_WRITE) { | | 2011 | if (flags & VM_PROT_WRITE) { |
2012 | c_pte->attr.raw |= MMU_SHORT_PTE_M; | | 2012 | c_pte->attr.raw |= MMU_SHORT_PTE_M; |
2013 | } | | 2013 | } |
2014 | } | | 2014 | } |
2015 | | | 2015 | |
2016 | /* | | 2016 | /* |
2017 | * If the mapping should be cache inhibited (indicated by the flag | | 2017 | * If the mapping should be cache inhibited (indicated by the flag |
2018 | * bits found on the lower order of the physical address.) | | 2018 | * bits found on the lower order of the physical address.) |
2019 | * mark the PTE as a cache inhibited page. | | 2019 | * mark the PTE as a cache inhibited page. |
2020 | */ | | 2020 | */ |
2021 | if (mapflags & PMAP_NC) | | 2021 | if (mapflags & PMAP_NC) |
2022 | c_pte->attr.raw |= MMU_SHORT_PTE_CI; | | 2022 | c_pte->attr.raw |= MMU_SHORT_PTE_CI; |
2023 | | | 2023 | |
2024 | /* | | 2024 | /* |
2025 | * If the physical address being mapped is managed by the PV | | 2025 | * If the physical address being mapped is managed by the PV |
2026 | * system then link the pte into the list of pages mapped to that | | 2026 | * system then link the pte into the list of pages mapped to that |
2027 | * address. | | 2027 | * address. |
2028 | */ | | 2028 | */ |
2029 | if (insert && managed) { | | 2029 | if (insert && managed) { |
2030 | pv = pa2pv(pa); | | 2030 | pv = pa2pv(pa); |
2031 | nidx = pteidx(c_pte); | | 2031 | nidx = pteidx(c_pte); |
2032 | | | 2032 | |
2033 | pvebase[nidx].pve_next = pv->pv_idx; | | 2033 | pvebase[nidx].pve_next = pv->pv_idx; |
2034 | pv->pv_idx = nidx; | | 2034 | pv->pv_idx = nidx; |
2035 | } | | 2035 | } |
2036 | | | 2036 | |
2037 | /* Move any allocated or unwired tables back into the active pool. */ | | 2037 | /* Move any allocated or unwired tables back into the active pool. */ |
2038 | | | 2038 | |
2039 | switch (llevel) { | | 2039 | switch (llevel) { |
2040 | case NEWA: | | 2040 | case NEWA: |
2041 | TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link); | | 2041 | TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link); |
2042 | /* FALLTHROUGH */ | | 2042 | /* FALLTHROUGH */ |
2043 | case NEWB: | | 2043 | case NEWB: |
2044 | TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link); | | 2044 | TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link); |
2045 | /* FALLTHROUGH */ | | 2045 | /* FALLTHROUGH */ |
2046 | case NEWC: | | 2046 | case NEWC: |
2047 | TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link); | | 2047 | TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link); |
2048 | /* FALLTHROUGH */ | | 2048 | /* FALLTHROUGH */ |
2049 | default: | | 2049 | default: |
2050 | break; | | 2050 | break; |
2051 | } | | 2051 | } |
2052 | | | 2052 | |
2053 | return 0; | | 2053 | return 0; |
2054 | } | | 2054 | } |
2055 | | | 2055 | |
2056 | /* pmap_enter_kernel INTERNAL | | 2056 | /* pmap_enter_kernel INTERNAL |
2057 | ** | | 2057 | ** |
2058 | * Map the given virtual address to the given physical address within the | | 2058 | * Map the given virtual address to the given physical address within the |
2059 | * kernel address space. This function exists because the kernel map does | | 2059 | * kernel address space. This function exists because the kernel map does |
2060 | * not do dynamic table allocation. It consists of a contiguous array of ptes | | 2060 | * not do dynamic table allocation. It consists of a contiguous array of ptes |
2061 | * and can be edited directly without the need to walk through any tables. | | 2061 | * and can be edited directly without the need to walk through any tables. |
2062 | * | | 2062 | * |
2063 | * XXX: "Danger, Will Robinson!" | | 2063 | * XXX: "Danger, Will Robinson!" |
2064 | * Note that the kernel should never take a fault on any page | | 2064 | * Note that the kernel should never take a fault on any page |
2065 | * between [ KERNBASE .. virtual_avail ] and this is checked in | | 2065 | * between [ KERNBASE .. virtual_avail ] and this is checked in |
2066 | * trap.c for kernel-mode MMU faults. This means that mappings | | 2066 | * trap.c for kernel-mode MMU faults. This means that mappings |
2067 | * created in that range must be implicily wired. -gwr | | 2067 | * created in that range must be implicily wired. -gwr |
2068 | */ | | 2068 | */ |
2069 | void | | 2069 | void |
2070 | pmap_enter_kernel(vaddr_t va, paddr_t pa, vm_prot_t prot) | | 2070 | pmap_enter_kernel(vaddr_t va, paddr_t pa, vm_prot_t prot) |
2071 | { | | 2071 | { |
2072 | bool was_valid, insert; | | 2072 | bool was_valid, insert; |
2073 | u_short pte_idx; | | 2073 | u_short pte_idx; |
2074 | int flags; | | 2074 | int flags; |
2075 | mmu_short_pte_t *pte; | | 2075 | mmu_short_pte_t *pte; |
2076 | pv_t *pv; | | 2076 | pv_t *pv; |
2077 | paddr_t old_pa; | | 2077 | paddr_t old_pa; |
2078 | | | 2078 | |
2079 | flags = (pa & ~MMU_PAGE_MASK); | | 2079 | flags = (pa & ~MMU_PAGE_MASK); |
2080 | pa &= MMU_PAGE_MASK; | | 2080 | pa &= MMU_PAGE_MASK; |
2081 | | | 2081 | |
2082 | if (is_managed(pa)) | | 2082 | if (is_managed(pa)) |
2083 | insert = true; | | 2083 | insert = true; |
2084 | else | | 2084 | else |
2085 | insert = false; | | 2085 | insert = false; |
2086 | | | 2086 | |
2087 | /* | | 2087 | /* |
2088 | * Calculate the index of the PTE being modified. | | 2088 | * Calculate the index of the PTE being modified. |
2089 | */ | | 2089 | */ |
2090 | pte_idx = (u_long)m68k_btop(va - KERNBASE3X); | | 2090 | pte_idx = (u_long)m68k_btop(va - KERNBASE3X); |
2091 | | | 2091 | |
2092 | /* This array is traditionally named "Sysmap" */ | | 2092 | /* This array is traditionally named "Sysmap" */ |
2093 | pte = &kernCbase[pte_idx]; | | 2093 | pte = &kernCbase[pte_idx]; |
2094 | | | 2094 | |
2095 | if (MMU_VALID_DT(*pte)) { | | 2095 | if (MMU_VALID_DT(*pte)) { |
2096 | was_valid = true; | | 2096 | was_valid = true; |
2097 | /* | | 2097 | /* |
2098 | * If the PTE already maps a different | | 2098 | * If the PTE already maps a different |
2099 | * physical address, umap and pv_unlink. | | 2099 | * physical address, umap and pv_unlink. |
2100 | */ | | 2100 | */ |
2101 | old_pa = MMU_PTE_PA(*pte); | | 2101 | old_pa = MMU_PTE_PA(*pte); |
2102 | if (pa != old_pa) | | 2102 | if (pa != old_pa) |
2103 | pmap_remove_pte(pte); | | 2103 | pmap_remove_pte(pte); |
2104 | else { | | 2104 | else { |
2105 | /* | | 2105 | /* |
2106 | * Old PA and new PA are the same. No need to | | 2106 | * Old PA and new PA are the same. No need to |
2107 | * relink the mapping within the PV list. | | 2107 | * relink the mapping within the PV list. |
2108 | */ | | 2108 | */ |
2109 | insert = false; | | 2109 | insert = false; |
2110 | | | 2110 | |
2111 | /* | | 2111 | /* |
2112 | * Save any mod/ref bits on the PTE. | | 2112 | * Save any mod/ref bits on the PTE. |
2113 | */ | | 2113 | */ |
2114 | pte->attr.raw &= (MMU_SHORT_PTE_USED|MMU_SHORT_PTE_M); | | 2114 | pte->attr.raw &= (MMU_SHORT_PTE_USED|MMU_SHORT_PTE_M); |
2115 | } | | 2115 | } |
2116 | } else { | | 2116 | } else { |
2117 | pte->attr.raw = MMU_DT_INVALID; | | 2117 | pte->attr.raw = MMU_DT_INVALID; |
2118 | was_valid = false; | | 2118 | was_valid = false; |
2119 | } | | 2119 | } |
2120 | | | 2120 | |
2121 | /* | | 2121 | /* |
2122 | * Map the page. Being careful to preserve modified/referenced bits | | 2122 | * Map the page. Being careful to preserve modified/referenced bits |
2123 | * on the PTE. | | 2123 | * on the PTE. |
2124 | */ | | 2124 | */ |
2125 | pte->attr.raw |= (pa | MMU_DT_PAGE); | | 2125 | pte->attr.raw |= (pa | MMU_DT_PAGE); |
2126 | | | 2126 | |
2127 | if (!(prot & VM_PROT_WRITE)) /* If access should be read-only */ | | 2127 | if (!(prot & VM_PROT_WRITE)) /* If access should be read-only */ |
2128 | pte->attr.raw |= MMU_SHORT_PTE_WP; | | 2128 | pte->attr.raw |= MMU_SHORT_PTE_WP; |
2129 | if (flags & PMAP_NC) | | 2129 | if (flags & PMAP_NC) |
2130 | pte->attr.raw |= MMU_SHORT_PTE_CI; | | 2130 | pte->attr.raw |= MMU_SHORT_PTE_CI; |
2131 | if (was_valid) | | 2131 | if (was_valid) |
2132 | TBIS(va); | | 2132 | TBIS(va); |
2133 | | | 2133 | |
2134 | /* | | 2134 | /* |
2135 | * Insert the PTE into the PV system, if need be. | | 2135 | * Insert the PTE into the PV system, if need be. |
2136 | */ | | 2136 | */ |
2137 | if (insert) { | | 2137 | if (insert) { |
2138 | pv = pa2pv(pa); | | 2138 | pv = pa2pv(pa); |
2139 | pvebase[pte_idx].pve_next = pv->pv_idx; | | 2139 | pvebase[pte_idx].pve_next = pv->pv_idx; |
2140 | pv->pv_idx = pte_idx; | | 2140 | pv->pv_idx = pte_idx; |
2141 | } | | 2141 | } |
2142 | } | | 2142 | } |
2143 | | | 2143 | |
2144 | void | | 2144 | void |
2145 | pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) | | 2145 | pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) |
2146 | { | | 2146 | { |
2147 | mmu_short_pte_t *pte; | | 2147 | mmu_short_pte_t *pte; |
| | | 2148 | u_int mapflags; |
| | | 2149 | |
| | | 2150 | /* XXX: MD PMAP_NC should be replaced by MI PMAP_NOCACHE in flags. */ |
| | | 2151 | mapflags = (pa & ~MMU_PAGE_MASK); |
| | | 2152 | if ((mapflags & PMAP_NC) != 0) |
| | | 2153 | flags |= PMAP_NOCACHE; |
2148 | | | 2154 | |
2149 | /* This array is traditionally named "Sysmap" */ | | 2155 | /* This array is traditionally named "Sysmap" */ |
2150 | pte = &kernCbase[(u_long)m68k_btop(va - KERNBASE3X)]; | | 2156 | pte = &kernCbase[(u_long)m68k_btop(va - KERNBASE3X)]; |
2151 | | | 2157 | |
2152 | KASSERT(!MMU_VALID_DT(*pte)); | | 2158 | KASSERT(!MMU_VALID_DT(*pte)); |
2153 | pte->attr.raw = MMU_DT_INVALID | MMU_DT_PAGE | (pa & MMU_PAGE_MASK); | | 2159 | pte->attr.raw = MMU_DT_INVALID | MMU_DT_PAGE | (pa & MMU_PAGE_MASK); |
2154 | if (!(prot & VM_PROT_WRITE)) | | 2160 | if (!(prot & VM_PROT_WRITE)) |
2155 | pte->attr.raw |= MMU_SHORT_PTE_WP; | | 2161 | pte->attr.raw |= MMU_SHORT_PTE_WP; |
| | | 2162 | if ((flags & PMAP_NOCACHE) != 0) |
| | | 2163 | pte->attr.raw |= MMU_SHORT_PTE_CI; |
2156 | } | | 2164 | } |
2157 | | | 2165 | |
2158 | void | | 2166 | void |
2159 | pmap_kremove(vaddr_t va, vsize_t len) | | 2167 | pmap_kremove(vaddr_t va, vsize_t len) |
2160 | { | | 2168 | { |
2161 | int idx, eidx; | | 2169 | int idx, eidx; |
2162 | | | 2170 | |
2163 | #ifdef PMAP_DEBUG | | 2171 | #ifdef PMAP_DEBUG |
2164 | if ((va & PGOFSET) || (len & PGOFSET)) | | 2172 | if ((va & PGOFSET) || (len & PGOFSET)) |
2165 | panic("pmap_kremove: alignment"); | | 2173 | panic("pmap_kremove: alignment"); |
2166 | #endif | | 2174 | #endif |
2167 | | | 2175 | |
2168 | idx = m68k_btop(va - KERNBASE3X); | | 2176 | idx = m68k_btop(va - KERNBASE3X); |
2169 | eidx = m68k_btop(va + len - KERNBASE3X); | | 2177 | eidx = m68k_btop(va + len - KERNBASE3X); |
2170 | | | 2178 | |
2171 | while (idx < eidx) { | | 2179 | while (idx < eidx) { |
2172 | kernCbase[idx++].attr.raw = MMU_DT_INVALID; | | 2180 | kernCbase[idx++].attr.raw = MMU_DT_INVALID; |
2173 | TBIS(va); | | 2181 | TBIS(va); |
2174 | va += PAGE_SIZE; | | 2182 | va += PAGE_SIZE; |
2175 | } | | 2183 | } |
2176 | } | | 2184 | } |
2177 | | | 2185 | |
2178 | /* pmap_map INTERNAL | | 2186 | /* pmap_map INTERNAL |
2179 | ** | | 2187 | ** |
2180 | * Map a contiguous range of physical memory into a contiguous range of | | 2188 | * Map a contiguous range of physical memory into a contiguous range of |
2181 | * the kernel virtual address space. | | 2189 | * the kernel virtual address space. |
2182 | * | | 2190 | * |
2183 | * Used for device mappings and early mapping of the kernel text/data/bss. | | 2191 | * Used for device mappings and early mapping of the kernel text/data/bss. |
2184 | * Returns the first virtual address beyond the end of the range. | | 2192 | * Returns the first virtual address beyond the end of the range. |
2185 | */ | | 2193 | */ |
2186 | vaddr_t | | 2194 | vaddr_t |
2187 | pmap_map(vaddr_t va, paddr_t pa, paddr_t endpa, int prot) | | 2195 | pmap_map(vaddr_t va, paddr_t pa, paddr_t endpa, int prot) |
2188 | { | | 2196 | { |
2189 | int sz; | | 2197 | int sz; |
2190 | | | 2198 | |
2191 | sz = endpa - pa; | | 2199 | sz = endpa - pa; |
2192 | do { | | 2200 | do { |
2193 | pmap_enter_kernel(va, pa, prot); | | 2201 | pmap_enter_kernel(va, pa, prot); |
2194 | va += PAGE_SIZE; | | 2202 | va += PAGE_SIZE; |
2195 | pa += PAGE_SIZE; | | 2203 | pa += PAGE_SIZE; |
2196 | sz -= PAGE_SIZE; | | 2204 | sz -= PAGE_SIZE; |
2197 | } while (sz > 0); | | 2205 | } while (sz > 0); |
2198 | pmap_update(pmap_kernel()); | | 2206 | pmap_update(pmap_kernel()); |
2199 | return va; | | 2207 | return va; |
2200 | } | | 2208 | } |
2201 | | | 2209 | |
2202 | /* pmap_protect_kernel INTERNAL | | 2210 | /* pmap_protect_kernel INTERNAL |
2203 | ** | | 2211 | ** |
2204 | * Apply the given protection code to a kernel address range. | | 2212 | * Apply the given protection code to a kernel address range. |
2205 | */ | | 2213 | */ |
2206 | static INLINE void | | 2214 | static INLINE void |
2207 | pmap_protect_kernel(vaddr_t startva, vaddr_t endva, vm_prot_t prot) | | 2215 | pmap_protect_kernel(vaddr_t startva, vaddr_t endva, vm_prot_t prot) |
2208 | { | | 2216 | { |
2209 | vaddr_t va; | | 2217 | vaddr_t va; |
2210 | mmu_short_pte_t *pte; | | 2218 | mmu_short_pte_t *pte; |
2211 | | | 2219 | |
2212 | pte = &kernCbase[(unsigned long) m68k_btop(startva - KERNBASE3X)]; | | 2220 | pte = &kernCbase[(unsigned long) m68k_btop(startva - KERNBASE3X)]; |
2213 | for (va = startva; va < endva; va += PAGE_SIZE, pte++) { | | 2221 | for (va = startva; va < endva; va += PAGE_SIZE, pte++) { |
2214 | if (MMU_VALID_DT(*pte)) { | | 2222 | if (MMU_VALID_DT(*pte)) { |
2215 | switch (prot) { | | 2223 | switch (prot) { |
2216 | case VM_PROT_ALL: | | 2224 | case VM_PROT_ALL: |
2217 | break; | | 2225 | break; |
2218 | case VM_PROT_EXECUTE: | | 2226 | case VM_PROT_EXECUTE: |
2219 | case VM_PROT_READ: | | 2227 | case VM_PROT_READ: |
2220 | case VM_PROT_READ|VM_PROT_EXECUTE: | | 2228 | case VM_PROT_READ|VM_PROT_EXECUTE: |
2221 | pte->attr.raw |= MMU_SHORT_PTE_WP; | | 2229 | pte->attr.raw |= MMU_SHORT_PTE_WP; |
2222 | break; | | 2230 | break; |
2223 | case VM_PROT_NONE: | | 2231 | case VM_PROT_NONE: |
2224 | /* this is an alias for 'pmap_remove_kernel' */ | | 2232 | /* this is an alias for 'pmap_remove_kernel' */ |
2225 | pmap_remove_pte(pte); | | 2233 | pmap_remove_pte(pte); |
2226 | break; | | 2234 | break; |
2227 | default: | | 2235 | default: |
2228 | break; | | 2236 | break; |
2229 | } | | 2237 | } |
2230 | /* | | 2238 | /* |
2231 | * since this is the kernel, immediately flush any cached | | 2239 | * since this is the kernel, immediately flush any cached |
2232 | * descriptors for this address. | | 2240 | * descriptors for this address. |
2233 | */ | | 2241 | */ |
2234 | TBIS(va); | | 2242 | TBIS(va); |
2235 | } | | 2243 | } |
2236 | } | | 2244 | } |
2237 | } | | 2245 | } |
2238 | | | 2246 | |
2239 | /* pmap_protect INTERFACE | | 2247 | /* pmap_protect INTERFACE |
2240 | ** | | 2248 | ** |
2241 | * Apply the given protection to the given virtual address range within | | 2249 | * Apply the given protection to the given virtual address range within |
2242 | * the given map. | | 2250 | * the given map. |
2243 | * | | 2251 | * |
2244 | * It is ok for the protection applied to be stronger than what is | | 2252 | * It is ok for the protection applied to be stronger than what is |
2245 | * specified. We use this to our advantage when the given map has no | | 2253 | * specified. We use this to our advantage when the given map has no |
2246 | * mapping for the virtual address. By skipping a page when this | | 2254 | * mapping for the virtual address. By skipping a page when this |
2247 | * is discovered, we are effectively applying a protection of VM_PROT_NONE, | | 2255 | * is discovered, we are effectively applying a protection of VM_PROT_NONE, |
2248 | * and therefore do not need to map the page just to apply a protection | | 2256 | * and therefore do not need to map the page just to apply a protection |
2249 | * code. Only pmap_enter() needs to create new mappings if they do not exist. | | 2257 | * code. Only pmap_enter() needs to create new mappings if they do not exist. |
2250 | * | | 2258 | * |
2251 | * XXX - This function could be speeded up by using pmap_stroll() for inital | | 2259 | * XXX - This function could be speeded up by using pmap_stroll() for inital |
2252 | * setup, and then manual scrolling in the for() loop. | | 2260 | * setup, and then manual scrolling in the for() loop. |
2253 | */ | | 2261 | */ |
2254 | void | | 2262 | void |
2255 | pmap_protect(pmap_t pmap, vaddr_t startva, vaddr_t endva, vm_prot_t prot) | | 2263 | pmap_protect(pmap_t pmap, vaddr_t startva, vaddr_t endva, vm_prot_t prot) |
2256 | { | | 2264 | { |
2257 | bool iscurpmap; | | 2265 | bool iscurpmap; |
2258 | int a_idx, b_idx, c_idx; | | 2266 | int a_idx, b_idx, c_idx; |
2259 | a_tmgr_t *a_tbl; | | 2267 | a_tmgr_t *a_tbl; |
2260 | b_tmgr_t *b_tbl; | | 2268 | b_tmgr_t *b_tbl; |
2261 | c_tmgr_t *c_tbl; | | 2269 | c_tmgr_t *c_tbl; |
2262 | mmu_short_pte_t *pte; | | 2270 | mmu_short_pte_t *pte; |
2263 | | | 2271 | |
2264 | if (pmap == pmap_kernel()) { | | 2272 | if (pmap == pmap_kernel()) { |
2265 | pmap_protect_kernel(startva, endva, prot); | | 2273 | pmap_protect_kernel(startva, endva, prot); |
2266 | return; | | 2274 | return; |
2267 | } | | 2275 | } |
2268 | | | 2276 | |
2269 | /* | | 2277 | /* |
2270 | * In this particular pmap implementation, there are only three | | 2278 | * In this particular pmap implementation, there are only three |
2271 | * types of memory protection: 'all' (read/write/execute), | | 2279 | * types of memory protection: 'all' (read/write/execute), |
2272 | * 'read-only' (read/execute) and 'none' (no mapping.) | | 2280 | * 'read-only' (read/execute) and 'none' (no mapping.) |
2273 | * It is not possible for us to treat 'executable' as a separate | | 2281 | * It is not possible for us to treat 'executable' as a separate |
2274 | * protection type. Therefore, protection requests that seek to | | 2282 | * protection type. Therefore, protection requests that seek to |
2275 | * remove execute permission while retaining read or write, and those | | 2283 | * remove execute permission while retaining read or write, and those |
2276 | * that make little sense (write-only for example) are ignored. | | 2284 | * that make little sense (write-only for example) are ignored. |
2277 | */ | | 2285 | */ |
2278 | switch (prot) { | | 2286 | switch (prot) { |
2279 | case VM_PROT_NONE: | | 2287 | case VM_PROT_NONE: |
2280 | /* | | 2288 | /* |
2281 | * A request to apply the protection code of | | 2289 | * A request to apply the protection code of |
2282 | * 'VM_PROT_NONE' is a synonym for pmap_remove(). | | 2290 | * 'VM_PROT_NONE' is a synonym for pmap_remove(). |
2283 | */ | | 2291 | */ |
2284 | pmap_remove(pmap, startva, endva); | | 2292 | pmap_remove(pmap, startva, endva); |
2285 | return; | | 2293 | return; |
2286 | case VM_PROT_EXECUTE: | | 2294 | case VM_PROT_EXECUTE: |
2287 | case VM_PROT_READ: | | 2295 | case VM_PROT_READ: |
2288 | case VM_PROT_READ|VM_PROT_EXECUTE: | | 2296 | case VM_PROT_READ|VM_PROT_EXECUTE: |
2289 | /* continue */ | | 2297 | /* continue */ |
2290 | break; | | 2298 | break; |
2291 | case VM_PROT_WRITE: | | 2299 | case VM_PROT_WRITE: |
2292 | case VM_PROT_WRITE|VM_PROT_READ: | | 2300 | case VM_PROT_WRITE|VM_PROT_READ: |
2293 | case VM_PROT_WRITE|VM_PROT_EXECUTE: | | 2301 | case VM_PROT_WRITE|VM_PROT_EXECUTE: |
2294 | case VM_PROT_ALL: | | 2302 | case VM_PROT_ALL: |
2295 | /* None of these should happen in a sane system. */ | | 2303 | /* None of these should happen in a sane system. */ |
2296 | return; | | 2304 | return; |
2297 | } | | 2305 | } |
2298 | | | 2306 | |
2299 | /* | | 2307 | /* |
2300 | * If the pmap has no A table, it has no mappings and therefore | | 2308 | * If the pmap has no A table, it has no mappings and therefore |
2301 | * there is nothing to protect. | | 2309 | * there is nothing to protect. |
2302 | */ | | 2310 | */ |
2303 | if ((a_tbl = pmap->pm_a_tmgr) == NULL) | | 2311 | if ((a_tbl = pmap->pm_a_tmgr) == NULL) |
2304 | return; | | 2312 | return; |
2305 | | | 2313 | |
2306 | a_idx = MMU_TIA(startva); | | 2314 | a_idx = MMU_TIA(startva); |
2307 | b_idx = MMU_TIB(startva); | | 2315 | b_idx = MMU_TIB(startva); |
2308 | c_idx = MMU_TIC(startva); | | 2316 | c_idx = MMU_TIC(startva); |
2309 | b_tbl = NULL; | | 2317 | b_tbl = NULL; |
2310 | c_tbl = NULL; | | 2318 | c_tbl = NULL; |
2311 | | | 2319 | |
2312 | iscurpmap = (pmap == current_pmap()); | | 2320 | iscurpmap = (pmap == current_pmap()); |
2313 | while (startva < endva) { | | 2321 | while (startva < endva) { |
2314 | if (b_tbl || MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) { | | 2322 | if (b_tbl || MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) { |
2315 | if (b_tbl == NULL) { | | 2323 | if (b_tbl == NULL) { |
2316 | b_tbl = (b_tmgr_t *) a_tbl->at_dtbl[a_idx].addr.raw; | | 2324 | b_tbl = (b_tmgr_t *) a_tbl->at_dtbl[a_idx].addr.raw; |
2317 | b_tbl = mmu_ptov((vaddr_t)b_tbl); | | 2325 | b_tbl = mmu_ptov((vaddr_t)b_tbl); |
2318 | b_tbl = mmuB2tmgr((mmu_short_dte_t *)b_tbl); | | 2326 | b_tbl = mmuB2tmgr((mmu_short_dte_t *)b_tbl); |
2319 | } | | 2327 | } |
2320 | if (c_tbl || MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) { | | 2328 | if (c_tbl || MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) { |
2321 | if (c_tbl == NULL) { | | 2329 | if (c_tbl == NULL) { |
2322 | c_tbl = (c_tmgr_t *) MMU_DTE_PA(b_tbl->bt_dtbl[b_idx]); | | 2330 | c_tbl = (c_tmgr_t *) MMU_DTE_PA(b_tbl->bt_dtbl[b_idx]); |
2323 | c_tbl = mmu_ptov((vaddr_t)c_tbl); | | 2331 | c_tbl = mmu_ptov((vaddr_t)c_tbl); |
2324 | c_tbl = mmuC2tmgr((mmu_short_pte_t *)c_tbl); | | 2332 | c_tbl = mmuC2tmgr((mmu_short_pte_t *)c_tbl); |
2325 | } | | 2333 | } |
2326 | if (MMU_VALID_DT(c_tbl->ct_dtbl[c_idx])) { | | 2334 | if (MMU_VALID_DT(c_tbl->ct_dtbl[c_idx])) { |
2327 | pte = &c_tbl->ct_dtbl[c_idx]; | | 2335 | pte = &c_tbl->ct_dtbl[c_idx]; |
2328 | /* make the mapping read-only */ | | 2336 | /* make the mapping read-only */ |
2329 | pte->attr.raw |= MMU_SHORT_PTE_WP; | | 2337 | pte->attr.raw |= MMU_SHORT_PTE_WP; |
2330 | /* | | 2338 | /* |
2331 | * If we just modified the current address space, | | 2339 | * If we just modified the current address space, |
2332 | * flush any translations for the modified page from | | 2340 | * flush any translations for the modified page from |
2333 | * the translation cache and any data from it in the | | 2341 | * the translation cache and any data from it in the |
2334 | * data cache. | | 2342 | * data cache. |
2335 | */ | | 2343 | */ |
2336 | if (iscurpmap) | | 2344 | if (iscurpmap) |
2337 | TBIS(startva); | | 2345 | TBIS(startva); |
2338 | } | | 2346 | } |
2339 | startva += PAGE_SIZE; | | 2347 | startva += PAGE_SIZE; |
2340 | | | 2348 | |
2341 | if (++c_idx >= MMU_C_TBL_SIZE) { /* exceeded C table? */ | | 2349 | if (++c_idx >= MMU_C_TBL_SIZE) { /* exceeded C table? */ |
2342 | c_tbl = NULL; | | 2350 | c_tbl = NULL; |
2343 | c_idx = 0; | | 2351 | c_idx = 0; |
2344 | if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */ | | 2352 | if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */ |
2345 | b_tbl = NULL; | | 2353 | b_tbl = NULL; |
2346 | b_idx = 0; | | 2354 | b_idx = 0; |
2347 | } | | 2355 | } |
2348 | } | | 2356 | } |
2349 | } else { /* C table wasn't valid */ | | 2357 | } else { /* C table wasn't valid */ |
2350 | c_tbl = NULL; | | 2358 | c_tbl = NULL; |
2351 | c_idx = 0; | | 2359 | c_idx = 0; |
2352 | startva += MMU_TIB_RANGE; | | 2360 | startva += MMU_TIB_RANGE; |
2353 | if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */ | | 2361 | if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */ |
2354 | b_tbl = NULL; | | 2362 | b_tbl = NULL; |
2355 | b_idx = 0; | | 2363 | b_idx = 0; |
2356 | } | | 2364 | } |
2357 | } /* C table */ | | 2365 | } /* C table */ |
2358 | } else { /* B table wasn't valid */ | | 2366 | } else { /* B table wasn't valid */ |
2359 | b_tbl = NULL; | | 2367 | b_tbl = NULL; |
2360 | b_idx = 0; | | 2368 | b_idx = 0; |
2361 | startva += MMU_TIA_RANGE; | | 2369 | startva += MMU_TIA_RANGE; |
2362 | a_idx++; | | 2370 | a_idx++; |
2363 | } /* B table */ | | 2371 | } /* B table */ |
2364 | } | | 2372 | } |
2365 | } | | 2373 | } |
2366 | | | 2374 | |
2367 | /* pmap_unwire INTERFACE | | 2375 | /* pmap_unwire INTERFACE |
2368 | ** | | 2376 | ** |
2369 | * Clear the wired attribute of the specified page. | | 2377 | * Clear the wired attribute of the specified page. |
2370 | * | | 2378 | * |
2371 | * This function is called from vm_fault.c to unwire | | 2379 | * This function is called from vm_fault.c to unwire |
2372 | * a mapping. | | 2380 | * a mapping. |
2373 | */ | | 2381 | */ |
2374 | void | | 2382 | void |
2375 | pmap_unwire(pmap_t pmap, vaddr_t va) | | 2383 | pmap_unwire(pmap_t pmap, vaddr_t va) |
2376 | { | | 2384 | { |
2377 | int a_idx, b_idx, c_idx; | | 2385 | int a_idx, b_idx, c_idx; |
2378 | a_tmgr_t *a_tbl; | | 2386 | a_tmgr_t *a_tbl; |
2379 | b_tmgr_t *b_tbl; | | 2387 | b_tmgr_t *b_tbl; |
2380 | c_tmgr_t *c_tbl; | | 2388 | c_tmgr_t *c_tbl; |
2381 | mmu_short_pte_t *pte; | | 2389 | mmu_short_pte_t *pte; |
2382 | | | 2390 | |
2383 | /* Kernel mappings always remain wired. */ | | 2391 | /* Kernel mappings always remain wired. */ |
2384 | if (pmap == pmap_kernel()) | | 2392 | if (pmap == pmap_kernel()) |
2385 | return; | | 2393 | return; |
2386 | | | 2394 | |
2387 | /* | | 2395 | /* |
2388 | * Walk through the tables. If the walk terminates without | | 2396 | * Walk through the tables. If the walk terminates without |
2389 | * a valid PTE then the address wasn't wired in the first place. | | 2397 | * a valid PTE then the address wasn't wired in the first place. |
2390 | * Return immediately. | | 2398 | * Return immediately. |
2391 | */ | | 2399 | */ |
2392 | if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, &pte, &a_idx, | | 2400 | if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, &pte, &a_idx, |
2393 | &b_idx, &c_idx) == false) | | 2401 | &b_idx, &c_idx) == false) |
2394 | return; | | 2402 | return; |
2395 | | | 2403 | |
2396 | | | 2404 | |
2397 | /* Is the PTE wired? If not, return. */ | | 2405 | /* Is the PTE wired? If not, return. */ |
2398 | if (!(pte->attr.raw & MMU_SHORT_PTE_WIRED)) | | 2406 | if (!(pte->attr.raw & MMU_SHORT_PTE_WIRED)) |
2399 | return; | | 2407 | return; |
2400 | | | 2408 | |
2401 | /* Remove the wiring bit. */ | | 2409 | /* Remove the wiring bit. */ |
2402 | pte->attr.raw &= ~(MMU_SHORT_PTE_WIRED); | | 2410 | pte->attr.raw &= ~(MMU_SHORT_PTE_WIRED); |
2403 | | | 2411 | |
2404 | /* | | 2412 | /* |
2405 | * Decrement the wired entry count in the C table. | | 2413 | * Decrement the wired entry count in the C table. |
2406 | * If it reaches zero the following things happen: | | 2414 | * If it reaches zero the following things happen: |
2407 | * 1. The table no longer has any wired entries and is considered | | 2415 | * 1. The table no longer has any wired entries and is considered |
2408 | * unwired. | | 2416 | * unwired. |
2409 | * 2. It is placed on the available queue. | | 2417 | * 2. It is placed on the available queue. |
2410 | * 3. The parent table's wired entry count is decremented. | | 2418 | * 3. The parent table's wired entry count is decremented. |
2411 | * 4. If it reaches zero, this process repeats at step 1 and | | 2419 | * 4. If it reaches zero, this process repeats at step 1 and |
2412 | * stops at after reaching the A table. | | 2420 | * stops at after reaching the A table. |
2413 | */ | | 2421 | */ |
2414 | if (--c_tbl->ct_wcnt == 0) { | | 2422 | if (--c_tbl->ct_wcnt == 0) { |
2415 | TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link); | | 2423 | TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link); |
2416 | if (--b_tbl->bt_wcnt == 0) { | | 2424 | if (--b_tbl->bt_wcnt == 0) { |
2417 | TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link); | | 2425 | TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link); |
2418 | if (--a_tbl->at_wcnt == 0) { | | 2426 | if (--a_tbl->at_wcnt == 0) { |
2419 | TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link); | | 2427 | TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link); |
2420 | } | | 2428 | } |
2421 | } | | 2429 | } |
2422 | } | | 2430 | } |
2423 | } | | 2431 | } |
2424 | | | 2432 | |
2425 | /* pmap_copy INTERFACE | | 2433 | /* pmap_copy INTERFACE |
2426 | ** | | 2434 | ** |
2427 | * Copy the mappings of a range of addresses in one pmap, into | | 2435 | * Copy the mappings of a range of addresses in one pmap, into |
2428 | * the destination address of another. | | 2436 | * the destination address of another. |
2429 | * | | 2437 | * |
2430 | * This routine is advisory. Should we one day decide that MMU tables | | 2438 | * This routine is advisory. Should we one day decide that MMU tables |
2431 | * may be shared by more than one pmap, this function should be used to | | 2439 | * may be shared by more than one pmap, this function should be used to |
2432 | * link them together. Until that day however, we do nothing. | | 2440 | * link them together. Until that day however, we do nothing. |
2433 | */ | | 2441 | */ |
2434 | void | | 2442 | void |
2435 | pmap_copy(pmap_t pmap_a, pmap_t pmap_b, vaddr_t dst, vsize_t len, vaddr_t src) | | 2443 | pmap_copy(pmap_t pmap_a, pmap_t pmap_b, vaddr_t dst, vsize_t len, vaddr_t src) |
2436 | { | | 2444 | { |
2437 | | | 2445 | |
2438 | /* not implemented. */ | | 2446 | /* not implemented. */ |
2439 | } | | 2447 | } |
2440 | | | 2448 | |
2441 | /* pmap_copy_page INTERFACE | | 2449 | /* pmap_copy_page INTERFACE |
2442 | ** | | 2450 | ** |
2443 | * Copy the contents of one physical page into another. | | 2451 | * Copy the contents of one physical page into another. |
2444 | * | | 2452 | * |
2445 | * This function makes use of two virtual pages allocated in pmap_bootstrap() | | 2453 | * This function makes use of two virtual pages allocated in pmap_bootstrap() |
2446 | * to map the two specified physical pages into the kernel address space. | | 2454 | * to map the two specified physical pages into the kernel address space. |
2447 | * | | 2455 | * |
2448 | * Note: We could use the transparent translation registers to make the | | 2456 | * Note: We could use the transparent translation registers to make the |
2449 | * mappings. If we do so, be sure to disable interrupts before using them. | | 2457 | * mappings. If we do so, be sure to disable interrupts before using them. |
2450 | */ | | 2458 | */ |
2451 | void | | 2459 | void |
2452 | pmap_copy_page(paddr_t srcpa, paddr_t dstpa) | | 2460 | pmap_copy_page(paddr_t srcpa, paddr_t dstpa) |
2453 | { | | 2461 | { |
2454 | vaddr_t srcva, dstva; | | 2462 | vaddr_t srcva, dstva; |
2455 | int s; | | 2463 | int s; |
2456 | | | 2464 | |
2457 | srcva = tmp_vpages[0]; | | 2465 | srcva = tmp_vpages[0]; |
2458 | dstva = tmp_vpages[1]; | | 2466 | dstva = tmp_vpages[1]; |
2459 | | | 2467 | |
2460 | s = splvm(); | | 2468 | s = splvm(); |
2461 | #ifdef DIAGNOSTIC | | 2469 | #ifdef DIAGNOSTIC |
2462 | if (tmp_vpages_inuse++) | | 2470 | if (tmp_vpages_inuse++) |
2463 | panic("pmap_copy_page: temporary vpages are in use."); | | 2471 | panic("pmap_copy_page: temporary vpages are in use."); |
2464 | #endif | | 2472 | #endif |
2465 | | | 2473 | |
2466 | /* Map pages as non-cacheable to avoid cache polution? */ | | 2474 | /* Map pages as non-cacheable to avoid cache polution? */ |
2467 | pmap_kenter_pa(srcva, srcpa, VM_PROT_READ, 0); | | 2475 | pmap_kenter_pa(srcva, srcpa, VM_PROT_READ, 0); |
2468 | pmap_kenter_pa(dstva, dstpa, VM_PROT_READ | VM_PROT_WRITE, 0); | | 2476 | pmap_kenter_pa(dstva, dstpa, VM_PROT_READ | VM_PROT_WRITE, 0); |
2469 | | | 2477 | |
2470 | /* Hand-optimized version of memcpy(dst, src, PAGE_SIZE) */ | | 2478 | /* Hand-optimized version of memcpy(dst, src, PAGE_SIZE) */ |
2471 | copypage((char *)srcva, (char *)dstva); | | 2479 | copypage((char *)srcva, (char *)dstva); |
2472 | | | 2480 | |
2473 | pmap_kremove(srcva, PAGE_SIZE); | | 2481 | pmap_kremove(srcva, PAGE_SIZE); |
2474 | pmap_kremove(dstva, PAGE_SIZE); | | 2482 | pmap_kremove(dstva, PAGE_SIZE); |
2475 | | | 2483 | |
2476 | #ifdef DIAGNOSTIC | | 2484 | #ifdef DIAGNOSTIC |
2477 | --tmp_vpages_inuse; | | 2485 | --tmp_vpages_inuse; |
2478 | #endif | | 2486 | #endif |
2479 | splx(s); | | 2487 | splx(s); |
2480 | } | | 2488 | } |
2481 | | | 2489 | |
2482 | /* pmap_zero_page INTERFACE | | 2490 | /* pmap_zero_page INTERFACE |
2483 | ** | | 2491 | ** |
2484 | * Zero the contents of the specified physical page. | | 2492 | * Zero the contents of the specified physical page. |
2485 | * | | 2493 | * |
2486 | * Uses one of the virtual pages allocated in pmap_boostrap() | | 2494 | * Uses one of the virtual pages allocated in pmap_boostrap() |
2487 | * to map the specified page into the kernel address space. | | 2495 | * to map the specified page into the kernel address space. |
2488 | */ | | 2496 | */ |
2489 | void | | 2497 | void |
2490 | pmap_zero_page(paddr_t dstpa) | | 2498 | pmap_zero_page(paddr_t dstpa) |
2491 | { | | 2499 | { |
2492 | vaddr_t dstva; | | 2500 | vaddr_t dstva; |
2493 | int s; | | 2501 | int s; |
2494 | | | 2502 | |
2495 | dstva = tmp_vpages[1]; | | 2503 | dstva = tmp_vpages[1]; |
2496 | s = splvm(); | | 2504 | s = splvm(); |
2497 | #ifdef DIAGNOSTIC | | 2505 | #ifdef DIAGNOSTIC |
2498 | if (tmp_vpages_inuse++) | | 2506 | if (tmp_vpages_inuse++) |
2499 | panic("pmap_zero_page: temporary vpages are in use."); | | 2507 | panic("pmap_zero_page: temporary vpages are in use."); |
2500 | #endif | | 2508 | #endif |
2501 | | | 2509 | |
2502 | /* The comments in pmap_copy_page() above apply here also. */ | | 2510 | /* The comments in pmap_copy_page() above apply here also. */ |
2503 | pmap_kenter_pa(dstva, dstpa, VM_PROT_READ | VM_PROT_WRITE, 0); | | 2511 | pmap_kenter_pa(dstva, dstpa, VM_PROT_READ | VM_PROT_WRITE, 0); |
2504 | | | 2512 | |
2505 | /* Hand-optimized version of memset(ptr, 0, PAGE_SIZE) */ | | 2513 | /* Hand-optimized version of memset(ptr, 0, PAGE_SIZE) */ |
2506 | zeropage((char *)dstva); | | 2514 | zeropage((char *)dstva); |
2507 | | | 2515 | |
2508 | pmap_kremove(dstva, PAGE_SIZE); | | 2516 | pmap_kremove(dstva, PAGE_SIZE); |
2509 | #ifdef DIAGNOSTIC | | 2517 | #ifdef DIAGNOSTIC |
2510 | --tmp_vpages_inuse; | | 2518 | --tmp_vpages_inuse; |
2511 | #endif | | 2519 | #endif |
2512 | splx(s); | | 2520 | splx(s); |
2513 | } | | 2521 | } |
2514 | | | 2522 | |
2515 | /* pmap_pinit INTERNAL | | 2523 | /* pmap_pinit INTERNAL |
2516 | ** | | 2524 | ** |
2517 | * Initialize a pmap structure. | | 2525 | * Initialize a pmap structure. |
2518 | */ | | 2526 | */ |
2519 | static INLINE void | | 2527 | static INLINE void |
2520 | pmap_pinit(pmap_t pmap) | | 2528 | pmap_pinit(pmap_t pmap) |
2521 | { | | 2529 | { |
2522 | | | 2530 | |
2523 | memset(pmap, 0, sizeof(struct pmap)); | | 2531 | memset(pmap, 0, sizeof(struct pmap)); |
2524 | pmap->pm_a_tmgr = NULL; | | 2532 | pmap->pm_a_tmgr = NULL; |
2525 | pmap->pm_a_phys = kernAphys; | | 2533 | pmap->pm_a_phys = kernAphys; |
2526 | pmap->pm_refcount = 1; | | 2534 | pmap->pm_refcount = 1; |
2527 | } | | 2535 | } |
2528 | | | 2536 | |
2529 | /* pmap_create INTERFACE | | 2537 | /* pmap_create INTERFACE |
2530 | ** | | 2538 | ** |
2531 | * Create and return a pmap structure. | | 2539 | * Create and return a pmap structure. |
2532 | */ | | 2540 | */ |
2533 | pmap_t | | 2541 | pmap_t |
2534 | pmap_create(void) | | 2542 | pmap_create(void) |
2535 | { | | 2543 | { |
2536 | pmap_t pmap; | | 2544 | pmap_t pmap; |
2537 | | | 2545 | |
2538 | pmap = pool_get(&pmap_pmap_pool, PR_WAITOK); | | 2546 | pmap = pool_get(&pmap_pmap_pool, PR_WAITOK); |
2539 | pmap_pinit(pmap); | | 2547 | pmap_pinit(pmap); |
2540 | return pmap; | | 2548 | return pmap; |
2541 | } | | 2549 | } |
2542 | | | 2550 | |
2543 | /* pmap_release INTERNAL | | 2551 | /* pmap_release INTERNAL |
2544 | ** | | 2552 | ** |
2545 | * Release any resources held by the given pmap. | | 2553 | * Release any resources held by the given pmap. |
2546 | * | | 2554 | * |
2547 | * This is the reverse analog to pmap_pinit. It does not | | 2555 | * This is the reverse analog to pmap_pinit. It does not |
2548 | * necessarily mean for the pmap structure to be deallocated, | | 2556 | * necessarily mean for the pmap structure to be deallocated, |
2549 | * as in pmap_destroy. | | 2557 | * as in pmap_destroy. |
2550 | */ | | 2558 | */ |
2551 | static INLINE void | | 2559 | static INLINE void |
2552 | pmap_release(pmap_t pmap) | | 2560 | pmap_release(pmap_t pmap) |
2553 | { | | 2561 | { |
2554 | | | 2562 | |
2555 | /* | | 2563 | /* |
2556 | * As long as the pmap contains no mappings, | | 2564 | * As long as the pmap contains no mappings, |
2557 | * which always should be the case whenever | | 2565 | * which always should be the case whenever |
2558 | * this function is called, there really should | | 2566 | * this function is called, there really should |
2559 | * be nothing to do. | | 2567 | * be nothing to do. |
2560 | */ | | 2568 | */ |
2561 | #ifdef PMAP_DEBUG | | 2569 | #ifdef PMAP_DEBUG |
2562 | if (pmap == pmap_kernel()) | | 2570 | if (pmap == pmap_kernel()) |
2563 | panic("pmap_release: kernel pmap"); | | 2571 | panic("pmap_release: kernel pmap"); |
2564 | #endif | | 2572 | #endif |
2565 | /* | | 2573 | /* |
2566 | * XXX - If this pmap has an A table, give it back. | | 2574 | * XXX - If this pmap has an A table, give it back. |
2567 | * The pmap SHOULD be empty by now, and pmap_remove | | 2575 | * The pmap SHOULD be empty by now, and pmap_remove |
2568 | * should have already given back the A table... | | 2576 | * should have already given back the A table... |
2569 | * However, I see: pmap->pm_a_tmgr->at_ecnt == 1 | | 2577 | * However, I see: pmap->pm_a_tmgr->at_ecnt == 1 |
2570 | * at this point, which means some mapping was not | | 2578 | * at this point, which means some mapping was not |
2571 | * removed when it should have been. -gwr | | 2579 | * removed when it should have been. -gwr |
2572 | */ | | 2580 | */ |
2573 | if (pmap->pm_a_tmgr != NULL) { | | 2581 | if (pmap->pm_a_tmgr != NULL) { |
2574 | /* First make sure we are not using it! */ | | 2582 | /* First make sure we are not using it! */ |
2575 | if (kernel_crp.rp_addr == pmap->pm_a_phys) { | | 2583 | if (kernel_crp.rp_addr == pmap->pm_a_phys) { |
2576 | kernel_crp.rp_addr = kernAphys; | | 2584 | kernel_crp.rp_addr = kernAphys; |
2577 | loadcrp(&kernel_crp); | | 2585 | loadcrp(&kernel_crp); |
2578 | } | | 2586 | } |
2579 | #ifdef PMAP_DEBUG /* XXX - todo! */ | | 2587 | #ifdef PMAP_DEBUG /* XXX - todo! */ |
2580 | /* XXX - Now complain... */ | | 2588 | /* XXX - Now complain... */ |
2581 | printf("pmap_release: still have table\n"); | | 2589 | printf("pmap_release: still have table\n"); |
2582 | Debugger(); | | 2590 | Debugger(); |
2583 | #endif | | 2591 | #endif |
2584 | free_a_table(pmap->pm_a_tmgr, true); | | 2592 | free_a_table(pmap->pm_a_tmgr, true); |
2585 | pmap->pm_a_tmgr = NULL; | | 2593 | pmap->pm_a_tmgr = NULL; |
2586 | pmap->pm_a_phys = kernAphys; | | 2594 | pmap->pm_a_phys = kernAphys; |
2587 | } | | 2595 | } |
2588 | } | | 2596 | } |
2589 | | | 2597 | |
2590 | /* pmap_reference INTERFACE | | 2598 | /* pmap_reference INTERFACE |
2591 | ** | | 2599 | ** |
2592 | * Increment the reference count of a pmap. | | 2600 | * Increment the reference count of a pmap. |
2593 | */ | | 2601 | */ |
2594 | void | | 2602 | void |
2595 | pmap_reference(pmap_t pmap) | | 2603 | pmap_reference(pmap_t pmap) |
2596 | { | | 2604 | { |
2597 | | | 2605 | |
2598 | atomic_inc_uint(&pmap->pm_refcount); | | 2606 | atomic_inc_uint(&pmap->pm_refcount); |
2599 | } | | 2607 | } |
2600 | | | 2608 | |
2601 | /* pmap_dereference INTERNAL | | 2609 | /* pmap_dereference INTERNAL |
2602 | ** | | 2610 | ** |
2603 | * Decrease the reference count on the given pmap | | 2611 | * Decrease the reference count on the given pmap |
2604 | * by one and return the current count. | | 2612 | * by one and return the current count. |
2605 | */ | | 2613 | */ |
2606 | static INLINE int | | 2614 | static INLINE int |
2607 | pmap_dereference(pmap_t pmap) | | 2615 | pmap_dereference(pmap_t pmap) |
2608 | { | | 2616 | { |
2609 | int rtn; | | 2617 | int rtn; |
2610 | | | 2618 | |
2611 | rtn = atomic_dec_uint_nv(&pmap->pm_refcount); | | 2619 | rtn = atomic_dec_uint_nv(&pmap->pm_refcount); |
2612 | | | 2620 | |
2613 | return rtn; | | 2621 | return rtn; |
2614 | } | | 2622 | } |
2615 | | | 2623 | |
2616 | /* pmap_destroy INTERFACE | | 2624 | /* pmap_destroy INTERFACE |
2617 | ** | | 2625 | ** |
2618 | * Decrement a pmap's reference count and delete | | 2626 | * Decrement a pmap's reference count and delete |
2619 | * the pmap if it becomes zero. Will be called | | 2627 | * the pmap if it becomes zero. Will be called |
2620 | * only after all mappings have been removed. | | 2628 | * only after all mappings have been removed. |
2621 | */ | | 2629 | */ |
2622 | void | | 2630 | void |
2623 | pmap_destroy(pmap_t pmap) | | 2631 | pmap_destroy(pmap_t pmap) |
2624 | { | | 2632 | { |
2625 | | | 2633 | |
2626 | if (pmap_dereference(pmap) == 0) { | | 2634 | if (pmap_dereference(pmap) == 0) { |
2627 | pmap_release(pmap); | | 2635 | pmap_release(pmap); |
2628 | pool_put(&pmap_pmap_pool, pmap); | | 2636 | pool_put(&pmap_pmap_pool, pmap); |
2629 | } | | 2637 | } |
2630 | } | | 2638 | } |
2631 | | | 2639 | |
2632 | /* pmap_is_referenced INTERFACE | | 2640 | /* pmap_is_referenced INTERFACE |
2633 | ** | | 2641 | ** |
2634 | * Determine if the given physical page has been | | 2642 | * Determine if the given physical page has been |
2635 | * referenced (read from [or written to.]) | | 2643 | * referenced (read from [or written to.]) |
2636 | */ | | 2644 | */ |
2637 | bool | | 2645 | bool |
2638 | pmap_is_referenced(struct vm_page *pg) | | 2646 | pmap_is_referenced(struct vm_page *pg) |
2639 | { | | 2647 | { |
2640 | paddr_t pa = VM_PAGE_TO_PHYS(pg); | | 2648 | paddr_t pa = VM_PAGE_TO_PHYS(pg); |
2641 | pv_t *pv; | | 2649 | pv_t *pv; |
2642 | int idx; | | 2650 | int idx; |
2643 | | | 2651 | |
2644 | /* | | 2652 | /* |
2645 | * Check the flags on the pv head. If they are set, | | 2653 | * Check the flags on the pv head. If they are set, |
2646 | * return immediately. Otherwise a search must be done. | | 2654 | * return immediately. Otherwise a search must be done. |
2647 | */ | | 2655 | */ |
2648 | | | 2656 | |
2649 | pv = pa2pv(pa); | | 2657 | pv = pa2pv(pa); |
2650 | if (pv->pv_flags & PV_FLAGS_USED) | | 2658 | if (pv->pv_flags & PV_FLAGS_USED) |
2651 | return true; | | 2659 | return true; |
2652 | | | 2660 | |
2653 | /* | | 2661 | /* |
2654 | * Search through all pv elements pointing | | 2662 | * Search through all pv elements pointing |
2655 | * to this page and query their reference bits | | 2663 | * to this page and query their reference bits |
2656 | */ | | 2664 | */ |
2657 | | | 2665 | |
2658 | for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) { | | 2666 | for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) { |
2659 | if (MMU_PTE_USED(kernCbase[idx])) { | | 2667 | if (MMU_PTE_USED(kernCbase[idx])) { |
2660 | return true; | | 2668 | return true; |
2661 | } | | 2669 | } |
2662 | } | | 2670 | } |
2663 | return false; | | 2671 | return false; |
2664 | } | | 2672 | } |
2665 | | | 2673 | |
2666 | /* pmap_is_modified INTERFACE | | 2674 | /* pmap_is_modified INTERFACE |
2667 | ** | | 2675 | ** |
2668 | * Determine if the given physical page has been | | 2676 | * Determine if the given physical page has been |
2669 | * modified (written to.) | | 2677 | * modified (written to.) |
2670 | */ | | 2678 | */ |
2671 | bool | | 2679 | bool |
2672 | pmap_is_modified(struct vm_page *pg) | | 2680 | pmap_is_modified(struct vm_page *pg) |
2673 | { | | 2681 | { |
2674 | paddr_t pa = VM_PAGE_TO_PHYS(pg); | | 2682 | paddr_t pa = VM_PAGE_TO_PHYS(pg); |
2675 | pv_t *pv; | | 2683 | pv_t *pv; |
2676 | int idx; | | 2684 | int idx; |
2677 | | | 2685 | |
2678 | /* see comments in pmap_is_referenced() */ | | 2686 | /* see comments in pmap_is_referenced() */ |
2679 | pv = pa2pv(pa); | | 2687 | pv = pa2pv(pa); |
2680 | if (pv->pv_flags & PV_FLAGS_MDFY) | | 2688 | if (pv->pv_flags & PV_FLAGS_MDFY) |
2681 | return true; | | 2689 | return true; |
2682 | | | 2690 | |
2683 | for (idx = pv->pv_idx; | | 2691 | for (idx = pv->pv_idx; |
2684 | idx != PVE_EOL; | | 2692 | idx != PVE_EOL; |
2685 | idx = pvebase[idx].pve_next) { | | 2693 | idx = pvebase[idx].pve_next) { |
2686 | | | 2694 | |
2687 | if (MMU_PTE_MODIFIED(kernCbase[idx])) { | | 2695 | if (MMU_PTE_MODIFIED(kernCbase[idx])) { |
2688 | return true; | | 2696 | return true; |
2689 | } | | 2697 | } |
2690 | } | | 2698 | } |
2691 | | | 2699 | |
2692 | return false; | | 2700 | return false; |
2693 | } | | 2701 | } |
2694 | | | 2702 | |
2695 | /* pmap_page_protect INTERFACE | | 2703 | /* pmap_page_protect INTERFACE |
2696 | ** | | 2704 | ** |
2697 | * Applies the given protection to all mappings to the given | | 2705 | * Applies the given protection to all mappings to the given |
2698 | * physical page. | | 2706 | * physical page. |
2699 | */ | | 2707 | */ |
2700 | void | | 2708 | void |
2701 | pmap_page_protect(struct vm_page *pg, vm_prot_t prot) | | 2709 | pmap_page_protect(struct vm_page *pg, vm_prot_t prot) |
2702 | { | | 2710 | { |
2703 | paddr_t pa = VM_PAGE_TO_PHYS(pg); | | 2711 | paddr_t pa = VM_PAGE_TO_PHYS(pg); |
2704 | pv_t *pv; | | 2712 | pv_t *pv; |
2705 | int idx; | | 2713 | int idx; |
2706 | vaddr_t va; | | 2714 | vaddr_t va; |
2707 | struct mmu_short_pte_struct *pte; | | 2715 | struct mmu_short_pte_struct *pte; |
2708 | c_tmgr_t *c_tbl; | | 2716 | c_tmgr_t *c_tbl; |
2709 | pmap_t pmap, curpmap; | | 2717 | pmap_t pmap, curpmap; |
2710 | | | 2718 | |
2711 | curpmap = current_pmap(); | | 2719 | curpmap = current_pmap(); |
2712 | pv = pa2pv(pa); | | 2720 | pv = pa2pv(pa); |
2713 | | | 2721 | |
2714 | for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) { | | 2722 | for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) { |
2715 | pte = &kernCbase[idx]; | | 2723 | pte = &kernCbase[idx]; |
2716 | switch (prot) { | | 2724 | switch (prot) { |
2717 | case VM_PROT_ALL: | | 2725 | case VM_PROT_ALL: |
2718 | /* do nothing */ | | 2726 | /* do nothing */ |
2719 | break; | | 2727 | break; |
2720 | case VM_PROT_EXECUTE: | | 2728 | case VM_PROT_EXECUTE: |
2721 | case VM_PROT_READ: | | 2729 | case VM_PROT_READ: |
2722 | case VM_PROT_READ|VM_PROT_EXECUTE: | | 2730 | case VM_PROT_READ|VM_PROT_EXECUTE: |
2723 | /* | | 2731 | /* |
2724 | * Determine the virtual address mapped by | | 2732 | * Determine the virtual address mapped by |
2725 | * the PTE and flush ATC entries if necessary. | | 2733 | * the PTE and flush ATC entries if necessary. |
2726 | */ | | 2734 | */ |
2727 | va = pmap_get_pteinfo(idx, &pmap, &c_tbl); | | 2735 | va = pmap_get_pteinfo(idx, &pmap, &c_tbl); |
2728 | pte->attr.raw |= MMU_SHORT_PTE_WP; | | 2736 | pte->attr.raw |= MMU_SHORT_PTE_WP; |
2729 | if (pmap == curpmap || pmap == pmap_kernel()) | | 2737 | if (pmap == curpmap || pmap == pmap_kernel()) |
2730 | TBIS(va); | | 2738 | TBIS(va); |
2731 | break; | | 2739 | break; |
2732 | case VM_PROT_NONE: | | 2740 | case VM_PROT_NONE: |
2733 | /* Save the mod/ref bits. */ | | 2741 | /* Save the mod/ref bits. */ |
2734 | pv->pv_flags |= pte->attr.raw; | | 2742 | pv->pv_flags |= pte->attr.raw; |
2735 | /* Invalidate the PTE. */ | | 2743 | /* Invalidate the PTE. */ |
2736 | pte->attr.raw = MMU_DT_INVALID; | | 2744 | pte->attr.raw = MMU_DT_INVALID; |
2737 | | | 2745 | |
2738 | /* | | 2746 | /* |
2739 | * Update table counts. And flush ATC entries | | 2747 | * Update table counts. And flush ATC entries |
2740 | * if necessary. | | 2748 | * if necessary. |
2741 | */ | | 2749 | */ |
2742 | va = pmap_get_pteinfo(idx, &pmap, &c_tbl); | | 2750 | va = pmap_get_pteinfo(idx, &pmap, &c_tbl); |
2743 | | | 2751 | |
2744 | /* | | 2752 | /* |
2745 | * If the PTE belongs to the kernel map, | | 2753 | * If the PTE belongs to the kernel map, |
2746 | * be sure to flush the page it maps. | | 2754 | * be sure to flush the page it maps. |
2747 | */ | | 2755 | */ |
2748 | if (pmap == pmap_kernel()) { | | 2756 | if (pmap == pmap_kernel()) { |
2749 | TBIS(va); | | 2757 | TBIS(va); |
2750 | } else { | | 2758 | } else { |
2751 | /* | | 2759 | /* |
2752 | * The PTE belongs to a user map. | | 2760 | * The PTE belongs to a user map. |
2753 | * update the entry count in the C | | 2761 | * update the entry count in the C |
2754 | * table to which it belongs and flush | | 2762 | * table to which it belongs and flush |
2755 | * the ATC if the mapping belongs to | | 2763 | * the ATC if the mapping belongs to |
2756 | * the current pmap. | | 2764 | * the current pmap. |
2757 | */ | | 2765 | */ |
2758 | c_tbl->ct_ecnt--; | | 2766 | c_tbl->ct_ecnt--; |
2759 | if (pmap == curpmap) | | 2767 | if (pmap == curpmap) |
2760 | TBIS(va); | | 2768 | TBIS(va); |
2761 | } | | 2769 | } |
2762 | break; | | 2770 | break; |
2763 | default: | | 2771 | default: |
2764 | break; | | 2772 | break; |
2765 | } | | 2773 | } |
2766 | } | | 2774 | } |
2767 | | | 2775 | |
2768 | /* | | 2776 | /* |
2769 | * If the protection code indicates that all mappings to the page | | 2777 | * If the protection code indicates that all mappings to the page |
2770 | * be removed, truncate the PV list to zero entries. | | 2778 | * be removed, truncate the PV list to zero entries. |
2771 | */ | | 2779 | */ |
2772 | if (prot == VM_PROT_NONE) | | 2780 | if (prot == VM_PROT_NONE) |
2773 | pv->pv_idx = PVE_EOL; | | 2781 | pv->pv_idx = PVE_EOL; |
2774 | } | | 2782 | } |
2775 | | | 2783 | |
2776 | /* pmap_get_pteinfo INTERNAL | | 2784 | /* pmap_get_pteinfo INTERNAL |
2777 | ** | | 2785 | ** |
2778 | * Called internally to find the pmap and virtual address within that | | 2786 | * Called internally to find the pmap and virtual address within that |
2779 | * map to which the pte at the given index maps. Also includes the PTE's C | | 2787 | * map to which the pte at the given index maps. Also includes the PTE's C |
2780 | * table manager. | | 2788 | * table manager. |
2781 | * | | 2789 | * |
2782 | * Returns the pmap in the argument provided, and the virtual address | | 2790 | * Returns the pmap in the argument provided, and the virtual address |
2783 | * by return value. | | 2791 | * by return value. |
2784 | */ | | 2792 | */ |
2785 | vaddr_t | | 2793 | vaddr_t |
2786 | pmap_get_pteinfo(u_int idx, pmap_t *pmap, c_tmgr_t **tbl) | | 2794 | pmap_get_pteinfo(u_int idx, pmap_t *pmap, c_tmgr_t **tbl) |
2787 | { | | 2795 | { |
2788 | vaddr_t va = 0; | | 2796 | vaddr_t va = 0; |
2789 | | | 2797 | |
2790 | /* | | 2798 | /* |
2791 | * Determine if the PTE is a kernel PTE or a user PTE. | | 2799 | * Determine if the PTE is a kernel PTE or a user PTE. |
2792 | */ | | 2800 | */ |
2793 | if (idx >= NUM_KERN_PTES) { | | 2801 | if (idx >= NUM_KERN_PTES) { |
2794 | /* | | 2802 | /* |
2795 | * The PTE belongs to a user mapping. | | 2803 | * The PTE belongs to a user mapping. |
2796 | */ | | 2804 | */ |
2797 | /* XXX: Would like an inline for this to validate idx... */ | | 2805 | /* XXX: Would like an inline for this to validate idx... */ |
2798 | *tbl = &Ctmgrbase[(idx - NUM_KERN_PTES) / MMU_C_TBL_SIZE]; | | 2806 | *tbl = &Ctmgrbase[(idx - NUM_KERN_PTES) / MMU_C_TBL_SIZE]; |
2799 | | | 2807 | |
2800 | *pmap = (*tbl)->ct_pmap; | | 2808 | *pmap = (*tbl)->ct_pmap; |
2801 | /* | | 2809 | /* |
2802 | * To find the va to which the PTE maps, we first take | | 2810 | * To find the va to which the PTE maps, we first take |
2803 | * the table's base virtual address mapping which is stored | | 2811 | * the table's base virtual address mapping which is stored |
2804 | * in ct_va. We then increment this address by a page for | | 2812 | * in ct_va. We then increment this address by a page for |
2805 | * every slot skipped until we reach the PTE. | | 2813 | * every slot skipped until we reach the PTE. |
2806 | */ | | 2814 | */ |
2807 | va = (*tbl)->ct_va; | | 2815 | va = (*tbl)->ct_va; |
2808 | va += m68k_ptob(idx % MMU_C_TBL_SIZE); | | 2816 | va += m68k_ptob(idx % MMU_C_TBL_SIZE); |
2809 | } else { | | 2817 | } else { |
2810 | /* | | 2818 | /* |
2811 | * The PTE belongs to the kernel map. | | 2819 | * The PTE belongs to the kernel map. |
2812 | */ | | 2820 | */ |
2813 | *pmap = pmap_kernel(); | | 2821 | *pmap = pmap_kernel(); |
2814 | | | 2822 | |
2815 | va = m68k_ptob(idx); | | 2823 | va = m68k_ptob(idx); |
2816 | va += KERNBASE3X; | | 2824 | va += KERNBASE3X; |
2817 | } | | 2825 | } |
2818 | | | 2826 | |
2819 | return va; | | 2827 | return va; |
2820 | } | | 2828 | } |
2821 | | | 2829 | |
2822 | /* pmap_clear_modify INTERFACE | | 2830 | /* pmap_clear_modify INTERFACE |
2823 | ** | | 2831 | ** |
2824 | * Clear the modification bit on the page at the specified | | 2832 | * Clear the modification bit on the page at the specified |
2825 | * physical address. | | 2833 | * physical address. |
2826 | * | | 2834 | * |
2827 | */ | | 2835 | */ |
2828 | bool | | 2836 | bool |
2829 | pmap_clear_modify(struct vm_page *pg) | | 2837 | pmap_clear_modify(struct vm_page *pg) |
2830 | { | | 2838 | { |
2831 | paddr_t pa = VM_PAGE_TO_PHYS(pg); | | 2839 | paddr_t pa = VM_PAGE_TO_PHYS(pg); |
2832 | bool rv; | | 2840 | bool rv; |
2833 | | | 2841 | |
2834 | rv = pmap_is_modified(pg); | | 2842 | rv = pmap_is_modified(pg); |
2835 | pmap_clear_pv(pa, PV_FLAGS_MDFY); | | 2843 | pmap_clear_pv(pa, PV_FLAGS_MDFY); |
2836 | return rv; | | 2844 | return rv; |
2837 | } | | 2845 | } |
2838 | | | 2846 | |
2839 | /* pmap_clear_reference INTERFACE | | 2847 | /* pmap_clear_reference INTERFACE |
2840 | ** | | 2848 | ** |
2841 | * Clear the referenced bit on the page at the specified | | 2849 | * Clear the referenced bit on the page at the specified |
2842 | * physical address. | | 2850 | * physical address. |
2843 | */ | | 2851 | */ |
2844 | bool | | 2852 | bool |
2845 | pmap_clear_reference(struct vm_page *pg) | | 2853 | pmap_clear_reference(struct vm_page *pg) |
2846 | { | | 2854 | { |
2847 | paddr_t pa = VM_PAGE_TO_PHYS(pg); | | 2855 | paddr_t pa = VM_PAGE_TO_PHYS(pg); |
2848 | bool rv; | | 2856 | bool rv; |
2849 | | | 2857 | |
2850 | rv = pmap_is_referenced(pg); | | 2858 | rv = pmap_is_referenced(pg); |
2851 | pmap_clear_pv(pa, PV_FLAGS_USED); | | 2859 | pmap_clear_pv(pa, PV_FLAGS_USED); |
2852 | return rv; | | 2860 | return rv; |
2853 | } | | 2861 | } |
2854 | | | 2862 | |
2855 | /* pmap_clear_pv INTERNAL | | 2863 | /* pmap_clear_pv INTERNAL |
2856 | ** | | 2864 | ** |
2857 | * Clears the specified flag from the specified physical address. | | 2865 | * Clears the specified flag from the specified physical address. |
2858 | * (Used by pmap_clear_modify() and pmap_clear_reference().) | | 2866 | * (Used by pmap_clear_modify() and pmap_clear_reference().) |
2859 | * | | 2867 | * |
2860 | * Flag is one of: | | 2868 | * Flag is one of: |
2861 | * PV_FLAGS_MDFY - Page modified bit. | | 2869 | * PV_FLAGS_MDFY - Page modified bit. |
2862 | * PV_FLAGS_USED - Page used (referenced) bit. | | 2870 | * PV_FLAGS_USED - Page used (referenced) bit. |
2863 | * | | 2871 | * |
2864 | * This routine must not only clear the flag on the pv list | | 2872 | * This routine must not only clear the flag on the pv list |
2865 | * head. It must also clear the bit on every pte in the pv | | 2873 | * head. It must also clear the bit on every pte in the pv |
2866 | * list associated with the address. | | 2874 | * list associated with the address. |
2867 | */ | | 2875 | */ |
2868 | void | | 2876 | void |
2869 | pmap_clear_pv(paddr_t pa, int flag) | | 2877 | pmap_clear_pv(paddr_t pa, int flag) |
2870 | { | | 2878 | { |
2871 | pv_t *pv; | | 2879 | pv_t *pv; |
2872 | int idx; | | 2880 | int idx; |
2873 | vaddr_t va; | | 2881 | vaddr_t va; |
2874 | pmap_t pmap; | | 2882 | pmap_t pmap; |
2875 | mmu_short_pte_t *pte; | | 2883 | mmu_short_pte_t *pte; |
2876 | c_tmgr_t *c_tbl; | | 2884 | c_tmgr_t *c_tbl; |
2877 | | | 2885 | |
2878 | pv = pa2pv(pa); | | 2886 | pv = pa2pv(pa); |
2879 | pv->pv_flags &= ~(flag); | | 2887 | pv->pv_flags &= ~(flag); |
2880 | for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) { | | 2888 | for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) { |
2881 | pte = &kernCbase[idx]; | | 2889 | pte = &kernCbase[idx]; |
2882 | pte->attr.raw &= ~(flag); | | 2890 | pte->attr.raw &= ~(flag); |
2883 | | | 2891 | |
2884 | /* | | 2892 | /* |
2885 | * The MC68030 MMU will not set the modified or | | 2893 | * The MC68030 MMU will not set the modified or |
2886 | * referenced bits on any MMU tables for which it has | | 2894 | * referenced bits on any MMU tables for which it has |
2887 | * a cached descriptor with its modify bit set. To insure | | 2895 | * a cached descriptor with its modify bit set. To insure |
2888 | * that it will modify these bits on the PTE during the next | | 2896 | * that it will modify these bits on the PTE during the next |
2889 | * time it is written to or read from, we must flush it from | | 2897 | * time it is written to or read from, we must flush it from |
2890 | * the ATC. | | 2898 | * the ATC. |
2891 | * | | 2899 | * |
2892 | * Ordinarily it is only necessary to flush the descriptor | | 2900 | * Ordinarily it is only necessary to flush the descriptor |
2893 | * if it is used in the current address space. But since I | | 2901 | * if it is used in the current address space. But since I |
2894 | * am not sure that there will always be a notion of | | 2902 | * am not sure that there will always be a notion of |
2895 | * 'the current address space' when this function is called, | | 2903 | * 'the current address space' when this function is called, |
2896 | * I will skip the test and always flush the address. It | | 2904 | * I will skip the test and always flush the address. It |
2897 | * does no harm. | | 2905 | * does no harm. |
2898 | */ | | 2906 | */ |
2899 | | | 2907 | |
2900 | va = pmap_get_pteinfo(idx, &pmap, &c_tbl); | | 2908 | va = pmap_get_pteinfo(idx, &pmap, &c_tbl); |
2901 | TBIS(va); | | 2909 | TBIS(va); |
2902 | } | | 2910 | } |
2903 | } | | 2911 | } |
2904 | | | 2912 | |
2905 | /* pmap_extract_kernel INTERNAL | | 2913 | /* pmap_extract_kernel INTERNAL |
2906 | ** | | 2914 | ** |
2907 | * Extract a translation from the kernel address space. | | 2915 | * Extract a translation from the kernel address space. |
2908 | */ | | 2916 | */ |
2909 | static INLINE bool | | 2917 | static INLINE bool |
2910 | pmap_extract_kernel(vaddr_t va, paddr_t *pap) | | 2918 | pmap_extract_kernel(vaddr_t va, paddr_t *pap) |
2911 | { | | 2919 | { |
2912 | mmu_short_pte_t *pte; | | 2920 | mmu_short_pte_t *pte; |
2913 | | | 2921 | |
2914 | pte = &kernCbase[(u_int)m68k_btop(va - KERNBASE3X)]; | | 2922 | pte = &kernCbase[(u_int)m68k_btop(va - KERNBASE3X)]; |
2915 | if (!MMU_VALID_DT(*pte)) | | 2923 | if (!MMU_VALID_DT(*pte)) |
2916 | return false; | | 2924 | return false; |
2917 | if (pap != NULL) | | 2925 | if (pap != NULL) |
2918 | *pap = MMU_PTE_PA(*pte); | | 2926 | *pap = MMU_PTE_PA(*pte); |
2919 | return true; | | 2927 | return true; |
2920 | } | | 2928 | } |
2921 | | | 2929 | |
2922 | /* pmap_extract INTERFACE | | 2930 | /* pmap_extract INTERFACE |
2923 | ** | | 2931 | ** |
2924 | * Return the physical address mapped by the virtual address | | 2932 | * Return the physical address mapped by the virtual address |
2925 | * in the specified pmap. | | 2933 | * in the specified pmap. |
2926 | * | | 2934 | * |
2927 | * Note: this function should also apply an exclusive lock | | 2935 | * Note: this function should also apply an exclusive lock |
2928 | * on the pmap system during its duration. | | 2936 | * on the pmap system during its duration. |
2929 | */ | | 2937 | */ |
2930 | bool | | 2938 | bool |
2931 | pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap) | | 2939 | pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap) |
2932 | { | | 2940 | { |
2933 | int a_idx, b_idx, pte_idx; | | 2941 | int a_idx, b_idx, pte_idx; |
2934 | a_tmgr_t *a_tbl; | | 2942 | a_tmgr_t *a_tbl; |
2935 | b_tmgr_t *b_tbl; | | 2943 | b_tmgr_t *b_tbl; |
2936 | c_tmgr_t *c_tbl; | | 2944 | c_tmgr_t *c_tbl; |
2937 | mmu_short_pte_t *c_pte; | | 2945 | mmu_short_pte_t *c_pte; |
2938 | | | 2946 | |
2939 | if (pmap == pmap_kernel()) | | 2947 | if (pmap == pmap_kernel()) |
2940 | return pmap_extract_kernel(va, pap); | | 2948 | return pmap_extract_kernel(va, pap); |
2941 | | | 2949 | |
2942 | if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, | | 2950 | if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, |
2943 | &c_pte, &a_idx, &b_idx, &pte_idx) == false) | | 2951 | &c_pte, &a_idx, &b_idx, &pte_idx) == false) |
2944 | return false; | | 2952 | return false; |
2945 | | | 2953 | |
2946 | if (!MMU_VALID_DT(*c_pte)) | | 2954 | if (!MMU_VALID_DT(*c_pte)) |
2947 | return false; | | 2955 | return false; |
2948 | | | 2956 | |
2949 | if (pap != NULL) | | 2957 | if (pap != NULL) |
2950 | *pap = MMU_PTE_PA(*c_pte); | | 2958 | *pap = MMU_PTE_PA(*c_pte); |
2951 | return true; | | 2959 | return true; |
2952 | } | | 2960 | } |
2953 | | | 2961 | |
2954 | /* pmap_remove_kernel INTERNAL | | 2962 | /* pmap_remove_kernel INTERNAL |
2955 | ** | | 2963 | ** |
2956 | * Remove the mapping of a range of virtual addresses from the kernel map. | | 2964 | * Remove the mapping of a range of virtual addresses from the kernel map. |
2957 | * The arguments are already page-aligned. | | 2965 | * The arguments are already page-aligned. |
2958 | */ | | 2966 | */ |
2959 | static INLINE void | | 2967 | static INLINE void |
2960 | pmap_remove_kernel(vaddr_t sva, vaddr_t eva) | | 2968 | pmap_remove_kernel(vaddr_t sva, vaddr_t eva) |
2961 | { | | 2969 | { |
2962 | int idx, eidx; | | 2970 | int idx, eidx; |
2963 | | | 2971 | |
2964 | #ifdef PMAP_DEBUG | | 2972 | #ifdef PMAP_DEBUG |
2965 | if ((sva & PGOFSET) || (eva & PGOFSET)) | | 2973 | if ((sva & PGOFSET) || (eva & PGOFSET)) |
2966 | panic("pmap_remove_kernel: alignment"); | | 2974 | panic("pmap_remove_kernel: alignment"); |
2967 | #endif | | 2975 | #endif |
2968 | | | 2976 | |
2969 | idx = m68k_btop(sva - KERNBASE3X); | | 2977 | idx = m68k_btop(sva - KERNBASE3X); |
2970 | eidx = m68k_btop(eva - KERNBASE3X); | | 2978 | eidx = m68k_btop(eva - KERNBASE3X); |
2971 | | | 2979 | |
2972 | while (idx < eidx) { | | 2980 | while (idx < eidx) { |
2973 | pmap_remove_pte(&kernCbase[idx++]); | | 2981 | pmap_remove_pte(&kernCbase[idx++]); |
2974 | TBIS(sva); | | 2982 | TBIS(sva); |
2975 | sva += PAGE_SIZE; | | 2983 | sva += PAGE_SIZE; |
2976 | } | | 2984 | } |
2977 | } | | 2985 | } |
2978 | | | 2986 | |
2979 | /* pmap_remove INTERFACE | | 2987 | /* pmap_remove INTERFACE |
2980 | ** | | 2988 | ** |
2981 | * Remove the mapping of a range of virtual addresses from the given pmap. | | 2989 | * Remove the mapping of a range of virtual addresses from the given pmap. |
2982 | * | | 2990 | * |
2983 | */ | | 2991 | */ |
2984 | void | | 2992 | void |
2985 | pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva) | | 2993 | pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva) |
2986 | { | | 2994 | { |
2987 | | | 2995 | |
2988 | if (pmap == pmap_kernel()) { | | 2996 | if (pmap == pmap_kernel()) { |
2989 | pmap_remove_kernel(sva, eva); | | 2997 | pmap_remove_kernel(sva, eva); |
2990 | return; | | 2998 | return; |
2991 | } | | 2999 | } |
2992 | | | 3000 | |
2993 | /* | | 3001 | /* |
2994 | * If the pmap doesn't have an A table of its own, it has no mappings | | 3002 | * If the pmap doesn't have an A table of its own, it has no mappings |
2995 | * that can be removed. | | 3003 | * that can be removed. |
2996 | */ | | 3004 | */ |
2997 | if (pmap->pm_a_tmgr == NULL) | | 3005 | if (pmap->pm_a_tmgr == NULL) |
2998 | return; | | 3006 | return; |
2999 | | | 3007 | |
3000 | /* | | 3008 | /* |
3001 | * Remove the specified range from the pmap. If the function | | 3009 | * Remove the specified range from the pmap. If the function |
3002 | * returns true, the operation removed all the valid mappings | | 3010 | * returns true, the operation removed all the valid mappings |
3003 | * in the pmap and freed its A table. If this happened to the | | 3011 | * in the pmap and freed its A table. If this happened to the |
3004 | * currently loaded pmap, the MMU root pointer must be reloaded | | 3012 | * currently loaded pmap, the MMU root pointer must be reloaded |
3005 | * with the default 'kernel' map. | | 3013 | * with the default 'kernel' map. |
3006 | */ | | 3014 | */ |
3007 | if (pmap_remove_a(pmap->pm_a_tmgr, sva, eva)) { | | 3015 | if (pmap_remove_a(pmap->pm_a_tmgr, sva, eva)) { |
3008 | if (kernel_crp.rp_addr == pmap->pm_a_phys) { | | 3016 | if (kernel_crp.rp_addr == pmap->pm_a_phys) { |
3009 | kernel_crp.rp_addr = kernAphys; | | 3017 | kernel_crp.rp_addr = kernAphys; |
3010 | loadcrp(&kernel_crp); | | 3018 | loadcrp(&kernel_crp); |
3011 | /* will do TLB flush below */ | | 3019 | /* will do TLB flush below */ |
3012 | } | | 3020 | } |
3013 | pmap->pm_a_tmgr = NULL; | | 3021 | pmap->pm_a_tmgr = NULL; |
3014 | pmap->pm_a_phys = kernAphys; | | 3022 | pmap->pm_a_phys = kernAphys; |
3015 | } | | 3023 | } |
3016 | | | 3024 | |
3017 | /* | | 3025 | /* |
3018 | * If we just modified the current address space, | | 3026 | * If we just modified the current address space, |
3019 | * make sure to flush the MMU cache. | | 3027 | * make sure to flush the MMU cache. |
3020 | * | | 3028 | * |
3021 | * XXX - this could be an unecessarily large flush. | | 3029 | * XXX - this could be an unecessarily large flush. |
3022 | * XXX - Could decide, based on the size of the VA range | | 3030 | * XXX - Could decide, based on the size of the VA range |
3023 | * to be removed, whether to flush "by pages" or "all". | | 3031 | * to be removed, whether to flush "by pages" or "all". |
3024 | */ | | 3032 | */ |
3025 | if (pmap == current_pmap()) | | 3033 | if (pmap == current_pmap()) |
3026 | TBIAU(); | | 3034 | TBIAU(); |
3027 | } | | 3035 | } |
3028 | | | 3036 | |
3029 | /* pmap_remove_a INTERNAL | | 3037 | /* pmap_remove_a INTERNAL |
3030 | ** | | 3038 | ** |
3031 | * This is function number one in a set of three that removes a range | | 3039 | * This is function number one in a set of three that removes a range |
3032 | * of memory in the most efficient manner by removing the highest possible | | 3040 | * of memory in the most efficient manner by removing the highest possible |
3033 | * tables from the memory space. This particular function attempts to remove | | 3041 | * tables from the memory space. This particular function attempts to remove |
3034 | * as many B tables as it can, delegating the remaining fragmented ranges to | | 3042 | * as many B tables as it can, delegating the remaining fragmented ranges to |
3035 | * pmap_remove_b(). | | 3043 | * pmap_remove_b(). |
3036 | * | | 3044 | * |
3037 | * If the removal operation results in an empty A table, the function returns | | 3045 | * If the removal operation results in an empty A table, the function returns |
3038 | * true. | | 3046 | * true. |
3039 | * | | 3047 | * |
3040 | * It's ugly but will do for now. | | 3048 | * It's ugly but will do for now. |
3041 | */ | | 3049 | */ |
3042 | bool | | 3050 | bool |
3043 | pmap_remove_a(a_tmgr_t *a_tbl, vaddr_t sva, vaddr_t eva) | | 3051 | pmap_remove_a(a_tmgr_t *a_tbl, vaddr_t sva, vaddr_t eva) |
3044 | { | | 3052 | { |
3045 | bool empty; | | 3053 | bool empty; |
3046 | int idx; | | 3054 | int idx; |
3047 | vaddr_t nstart, nend; | | 3055 | vaddr_t nstart, nend; |
3048 | b_tmgr_t *b_tbl; | | 3056 | b_tmgr_t *b_tbl; |
3049 | mmu_long_dte_t *a_dte; | | 3057 | mmu_long_dte_t *a_dte; |
3050 | mmu_short_dte_t *b_dte; | | 3058 | mmu_short_dte_t *b_dte; |
3051 | uint8_t at_wired, bt_wired; | | 3059 | uint8_t at_wired, bt_wired; |
3052 | | | 3060 | |
3053 | /* | | 3061 | /* |
3054 | * The following code works with what I call a 'granularity | | 3062 | * The following code works with what I call a 'granularity |
3055 | * reduction algorithim'. A range of addresses will always have | | 3063 | * reduction algorithim'. A range of addresses will always have |
3056 | * the following properties, which are classified according to | | 3064 | * the following properties, which are classified according to |
3057 | * how the range relates to the size of the current granularity | | 3065 | * how the range relates to the size of the current granularity |
3058 | * - an A table entry: | | 3066 | * - an A table entry: |
3059 | * | | 3067 | * |
3060 | * 1 2 3 4 | | 3068 | * 1 2 3 4 |
3061 | * -+---+---+---+---+---+---+---+- | | 3069 | * -+---+---+---+---+---+---+---+- |
3062 | * -+---+---+---+---+---+---+---+- | | 3070 | * -+---+---+---+---+---+---+---+- |
3063 | * | | 3071 | * |
3064 | * A range will always start on a granularity boundary, illustrated | | 3072 | * A range will always start on a granularity boundary, illustrated |
3065 | * by '+' signs in the table above, or it will start at some point | | 3073 | * by '+' signs in the table above, or it will start at some point |
3066 | * inbetween a granularity boundary, as illustrated by point 1. | | 3074 | * inbetween a granularity boundary, as illustrated by point 1. |
3067 | * The first step in removing a range of addresses is to remove the | | 3075 | * The first step in removing a range of addresses is to remove the |
3068 | * range between 1 and 2, the nearest granularity boundary. This | | 3076 | * range between 1 and 2, the nearest granularity boundary. This |
3069 | * job is handled by the section of code governed by the | | 3077 | * job is handled by the section of code governed by the |
3070 | * 'if (start < nstart)' statement. | | 3078 | * 'if (start < nstart)' statement. |
3071 | * | | 3079 | * |
3072 | * A range will always encompass zero or more intergral granules, | | 3080 | * A range will always encompass zero or more intergral granules, |
3073 | * illustrated by points 2 and 3. Integral granules are easy to | | 3081 | * illustrated by points 2 and 3. Integral granules are easy to |
3074 | * remove. The removal of these granules is the second step, and | | 3082 | * remove. The removal of these granules is the second step, and |
3075 | * is handled by the code block 'if (nstart < nend)'. | | 3083 | * is handled by the code block 'if (nstart < nend)'. |
3076 | * | | 3084 | * |
3077 | * Lastly, a range will always end on a granularity boundary, | | 3085 | * Lastly, a range will always end on a granularity boundary, |
3078 | * ill. by point 3, or it will fall just beyond one, ill. by point | | 3086 | * ill. by point 3, or it will fall just beyond one, ill. by point |
3079 | * 4. The last step involves removing this range and is handled by | | 3087 | * 4. The last step involves removing this range and is handled by |
3080 | * the code block 'if (nend < end)'. | | 3088 | * the code block 'if (nend < end)'. |
3081 | */ | | 3089 | */ |
3082 | nstart = MMU_ROUND_UP_A(sva); | | 3090 | nstart = MMU_ROUND_UP_A(sva); |
3083 | nend = MMU_ROUND_A(eva); | | 3091 | nend = MMU_ROUND_A(eva); |
3084 | | | 3092 | |
3085 | at_wired = a_tbl->at_wcnt; | | 3093 | at_wired = a_tbl->at_wcnt; |
3086 | | | 3094 | |
3087 | if (sva < nstart) { | | 3095 | if (sva < nstart) { |
3088 | /* | | 3096 | /* |
3089 | * This block is executed if the range starts between | | 3097 | * This block is executed if the range starts between |
3090 | * a granularity boundary. | | 3098 | * a granularity boundary. |
3091 | * | | 3099 | * |
3092 | * First find the DTE which is responsible for mapping | | 3100 | * First find the DTE which is responsible for mapping |
3093 | * the start of the range. | | 3101 | * the start of the range. |
3094 | */ | | 3102 | */ |
3095 | idx = MMU_TIA(sva); | | 3103 | idx = MMU_TIA(sva); |
3096 | a_dte = &a_tbl->at_dtbl[idx]; | | 3104 | a_dte = &a_tbl->at_dtbl[idx]; |
3097 | | | 3105 | |
3098 | /* | | 3106 | /* |
3099 | * If the DTE is valid then delegate the removal of the sub | | 3107 | * If the DTE is valid then delegate the removal of the sub |
3100 | * range to pmap_remove_b(), which can remove addresses at | | 3108 | * range to pmap_remove_b(), which can remove addresses at |
3101 | * a finer granularity. | | 3109 | * a finer granularity. |
3102 | */ | | 3110 | */ |
3103 | if (MMU_VALID_DT(*a_dte)) { | | 3111 | if (MMU_VALID_DT(*a_dte)) { |
3104 | b_dte = mmu_ptov(a_dte->addr.raw); | | 3112 | b_dte = mmu_ptov(a_dte->addr.raw); |
3105 | b_tbl = mmuB2tmgr(b_dte); | | 3113 | b_tbl = mmuB2tmgr(b_dte); |
3106 | bt_wired = b_tbl->bt_wcnt; | | 3114 | bt_wired = b_tbl->bt_wcnt; |
3107 | | | 3115 | |
3108 | /* | | 3116 | /* |
3109 | * The sub range to be removed starts at the start | | 3117 | * The sub range to be removed starts at the start |
3110 | * of the full range we were asked to remove, and ends | | 3118 | * of the full range we were asked to remove, and ends |
3111 | * at the greater of: | | 3119 | * at the greater of: |
3112 | * 1. The end of the full range, -or- | | 3120 | * 1. The end of the full range, -or- |
3113 | * 2. The end of the full range, rounded down to the | | 3121 | * 2. The end of the full range, rounded down to the |
3114 | * nearest granularity boundary. | | 3122 | * nearest granularity boundary. |
3115 | */ | | 3123 | */ |
3116 | if (eva < nstart) | | 3124 | if (eva < nstart) |
3117 | empty = pmap_remove_b(b_tbl, sva, eva); | | 3125 | empty = pmap_remove_b(b_tbl, sva, eva); |
3118 | else | | 3126 | else |
3119 | empty = pmap_remove_b(b_tbl, sva, nstart); | | 3127 | empty = pmap_remove_b(b_tbl, sva, nstart); |
3120 | | | 3128 | |
3121 | /* | | 3129 | /* |
3122 | * If the child table no longer has wired entries, | | 3130 | * If the child table no longer has wired entries, |
3123 | * decrement wired entry count. | | 3131 | * decrement wired entry count. |
3124 | */ | | 3132 | */ |
3125 | if (bt_wired && b_tbl->bt_wcnt == 0) | | 3133 | if (bt_wired && b_tbl->bt_wcnt == 0) |
3126 | a_tbl->at_wcnt--; | | 3134 | a_tbl->at_wcnt--; |
3127 | | | 3135 | |
3128 | /* | | 3136 | /* |
3129 | * If the removal resulted in an empty B table, | | 3137 | * If the removal resulted in an empty B table, |
3130 | * invalidate the DTE that points to it and decrement | | 3138 | * invalidate the DTE that points to it and decrement |
3131 | * the valid entry count of the A table. | | 3139 | * the valid entry count of the A table. |
3132 | */ | | 3140 | */ |
3133 | if (empty) { | | 3141 | if (empty) { |
3134 | a_dte->attr.raw = MMU_DT_INVALID; | | 3142 | a_dte->attr.raw = MMU_DT_INVALID; |
3135 | a_tbl->at_ecnt--; | | 3143 | a_tbl->at_ecnt--; |
3136 | } | | 3144 | } |
3137 | } | | 3145 | } |
3138 | /* | | 3146 | /* |
3139 | * If the DTE is invalid, the address range is already non- | | 3147 | * If the DTE is invalid, the address range is already non- |
3140 | * existent and can simply be skipped. | | 3148 | * existent and can simply be skipped. |
3141 | */ | | 3149 | */ |
3142 | } | | 3150 | } |
3143 | if (nstart < nend) { | | 3151 | if (nstart < nend) { |
3144 | /* | | 3152 | /* |
3145 | * This block is executed if the range spans a whole number | | 3153 | * This block is executed if the range spans a whole number |
3146 | * multiple of granules (A table entries.) | | 3154 | * multiple of granules (A table entries.) |
3147 | * | | 3155 | * |
3148 | * First find the DTE which is responsible for mapping | | 3156 | * First find the DTE which is responsible for mapping |
3149 | * the start of the first granule involved. | | 3157 | * the start of the first granule involved. |
3150 | */ | | 3158 | */ |
3151 | idx = MMU_TIA(nstart); | | 3159 | idx = MMU_TIA(nstart); |
3152 | a_dte = &a_tbl->at_dtbl[idx]; | | 3160 | a_dte = &a_tbl->at_dtbl[idx]; |
3153 | | | 3161 | |
3154 | /* | | 3162 | /* |