| @@ -1,1058 +1,1058 @@ | | | @@ -1,1058 +1,1058 @@ |
1 | /* $NetBSD: pmap.c,v 1.328 2009/05/18 02:28:35 mrg Exp $ */ | | 1 | /* $NetBSD: pmap.c,v 1.329 2009/05/27 02:19:50 mrg Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright (c) 1996 | | 4 | * Copyright (c) 1996 |
5 | * The President and Fellows of Harvard College. All rights reserved. | | 5 | * The President and Fellows of Harvard College. All rights reserved. |
6 | * Copyright (c) 1992, 1993 | | 6 | * Copyright (c) 1992, 1993 |
7 | * The Regents of the University of California. All rights reserved. | | 7 | * The Regents of the University of California. All rights reserved. |
8 | * | | 8 | * |
9 | * This software was developed by the Computer Systems Engineering group | | 9 | * This software was developed by the Computer Systems Engineering group |
10 | * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and | | 10 | * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and |
11 | * contributed to Berkeley. | | 11 | * contributed to Berkeley. |
12 | * | | 12 | * |
13 | * All advertising materials mentioning features or use of this software | | 13 | * All advertising materials mentioning features or use of this software |
14 | * must display the following acknowledgement: | | 14 | * must display the following acknowledgement: |
15 | * This product includes software developed by Harvard University. | | 15 | * This product includes software developed by Harvard University. |
16 | * This product includes software developed by the University of | | 16 | * This product includes software developed by the University of |
17 | * California, Lawrence Berkeley Laboratory. | | 17 | * California, Lawrence Berkeley Laboratory. |
18 | * | | 18 | * |
19 | * Redistribution and use in source and binary forms, with or without | | 19 | * Redistribution and use in source and binary forms, with or without |
20 | * modification, are permitted provided that the following conditions | | 20 | * modification, are permitted provided that the following conditions |
21 | * are met: | | 21 | * are met: |
22 | * | | 22 | * |
23 | * 1. Redistributions of source code must retain the above copyright | | 23 | * 1. Redistributions of source code must retain the above copyright |
24 | * notice, this list of conditions and the following disclaimer. | | 24 | * notice, this list of conditions and the following disclaimer. |
25 | * 2. Redistributions in binary form must reproduce the above copyright | | 25 | * 2. Redistributions in binary form must reproduce the above copyright |
26 | * notice, this list of conditions and the following disclaimer in the | | 26 | * notice, this list of conditions and the following disclaimer in the |
27 | * documentation and/or other materials provided with the distribution. | | 27 | * documentation and/or other materials provided with the distribution. |
28 | * 3. All advertising materials mentioning features or use of this software | | 28 | * 3. All advertising materials mentioning features or use of this software |
29 | * must display the following acknowledgement: | | 29 | * must display the following acknowledgement: |
30 | * This product includes software developed by Aaron Brown and | | 30 | * This product includes software developed by Aaron Brown and |
31 | * Harvard University. | | 31 | * Harvard University. |
32 | * This product includes software developed by the University of | | 32 | * This product includes software developed by the University of |
33 | * California, Berkeley and its contributors. | | 33 | * California, Berkeley and its contributors. |
34 | * 4. Neither the name of the University nor the names of its contributors | | 34 | * 4. Neither the name of the University nor the names of its contributors |
35 | * may be used to endorse or promote products derived from this software | | 35 | * may be used to endorse or promote products derived from this software |
36 | * without specific prior written permission. | | 36 | * without specific prior written permission. |
37 | * | | 37 | * |
38 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | | 38 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
39 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 39 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
40 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | | 40 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
41 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | | 41 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
42 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | | 42 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
43 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | | 43 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
44 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 44 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
45 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | | 45 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
46 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 46 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
47 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 47 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
48 | * SUCH DAMAGE. | | 48 | * SUCH DAMAGE. |
49 | * | | 49 | * |
50 | * @(#)pmap.c 8.4 (Berkeley) 2/5/94 | | 50 | * @(#)pmap.c 8.4 (Berkeley) 2/5/94 |
51 | * | | 51 | * |
52 | */ | | 52 | */ |
53 | | | 53 | |
54 | /* | | 54 | /* |
55 | * SPARC physical map management code. | | 55 | * SPARC physical map management code. |
56 | */ | | 56 | */ |
57 | | | 57 | |
58 | #include <sys/cdefs.h> | | 58 | #include <sys/cdefs.h> |
59 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.328 2009/05/18 02:28:35 mrg Exp $"); | | 59 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.329 2009/05/27 02:19:50 mrg Exp $"); |
60 | | | 60 | |
61 | #include "opt_ddb.h" | | 61 | #include "opt_ddb.h" |
62 | #include "opt_kgdb.h" | | 62 | #include "opt_kgdb.h" |
63 | #include "opt_sparc_arch.h" | | 63 | #include "opt_sparc_arch.h" |
64 | | | 64 | |
65 | #include <sys/param.h> | | 65 | #include <sys/param.h> |
66 | #include <sys/systm.h> | | 66 | #include <sys/systm.h> |
67 | #include <sys/device.h> | | 67 | #include <sys/device.h> |
68 | #include <sys/proc.h> | | 68 | #include <sys/proc.h> |
69 | #include <sys/queue.h> | | 69 | #include <sys/queue.h> |
70 | #include <sys/pool.h> | | 70 | #include <sys/pool.h> |
71 | #include <sys/exec.h> | | 71 | #include <sys/exec.h> |
72 | #include <sys/core.h> | | 72 | #include <sys/core.h> |
73 | #include <sys/kcore.h> | | 73 | #include <sys/kcore.h> |
74 | #include <sys/kernel.h> | | 74 | #include <sys/kernel.h> |
75 | #include <sys/atomic.h> | | 75 | #include <sys/atomic.h> |
76 | | | 76 | |
77 | #include <uvm/uvm.h> | | 77 | #include <uvm/uvm.h> |
78 | | | 78 | |
79 | #include <machine/autoconf.h> | | 79 | #include <machine/autoconf.h> |
80 | #include <machine/bsd_openprom.h> | | 80 | #include <machine/bsd_openprom.h> |
81 | #include <machine/oldmon.h> | | 81 | #include <machine/oldmon.h> |
82 | #include <machine/cpu.h> | | 82 | #include <machine/cpu.h> |
83 | #include <machine/ctlreg.h> | | 83 | #include <machine/ctlreg.h> |
84 | #include <machine/kcore.h> | | 84 | #include <machine/kcore.h> |
85 | | | 85 | |
86 | #include <sparc/sparc/asm.h> | | 86 | #include <sparc/sparc/asm.h> |
87 | #include <sparc/sparc/cache.h> | | 87 | #include <sparc/sparc/cache.h> |
88 | #include <sparc/sparc/vaddrs.h> | | 88 | #include <sparc/sparc/vaddrs.h> |
89 | #include <sparc/sparc/cpuvar.h> | | 89 | #include <sparc/sparc/cpuvar.h> |
90 | | | 90 | |
91 | /* | | 91 | /* |
92 | * The SPARCstation offers us the following challenges: | | 92 | * The SPARCstation offers us the following challenges: |
93 | * | | 93 | * |
94 | * 1. A virtual address cache. This is, strictly speaking, not | | 94 | * 1. A virtual address cache. This is, strictly speaking, not |
95 | * part of the architecture, but the code below assumes one. | | 95 | * part of the architecture, but the code below assumes one. |
96 | * This is a write-through cache on the 4c and a write-back cache | | 96 | * This is a write-through cache on the 4c and a write-back cache |
97 | * on others. | | 97 | * on others. |
98 | * | | 98 | * |
99 | * 2. (4/4c only) An MMU that acts like a cache. There is not enough | | 99 | * 2. (4/4c only) An MMU that acts like a cache. There is not enough |
100 | * space in the MMU to map everything all the time. Instead, we need | | 100 | * space in the MMU to map everything all the time. Instead, we need |
101 | * to load MMU with the `working set' of translations for each | | 101 | * to load MMU with the `working set' of translations for each |
102 | * process. The sun4m does not act like a cache; tables are maintained | | 102 | * process. The sun4m does not act like a cache; tables are maintained |
103 | * in physical memory. | | 103 | * in physical memory. |
104 | * | | 104 | * |
105 | * 3. Segmented virtual and physical spaces. The upper 12 bits of | | 105 | * 3. Segmented virtual and physical spaces. The upper 12 bits of |
106 | * a virtual address (the virtual segment) index a segment table, | | 106 | * a virtual address (the virtual segment) index a segment table, |
107 | * giving a physical segment. The physical segment selects a | | 107 | * giving a physical segment. The physical segment selects a |
108 | * `Page Map Entry Group' (PMEG) and the virtual page number---the | | 108 | * `Page Map Entry Group' (PMEG) and the virtual page number---the |
109 | * next 5 or 6 bits of the virtual address---select the particular | | 109 | * next 5 or 6 bits of the virtual address---select the particular |
110 | * `Page Map Entry' for the page. We call the latter a PTE and | | 110 | * `Page Map Entry' for the page. We call the latter a PTE and |
111 | * call each Page Map Entry Group a pmeg (for want of a better name). | | 111 | * call each Page Map Entry Group a pmeg (for want of a better name). |
112 | * Note that the sun4m has an unsegmented 36-bit physical space. | | 112 | * Note that the sun4m has an unsegmented 36-bit physical space. |
113 | * | | 113 | * |
114 | * Since there are no valid bits in the segment table, the only way | | 114 | * Since there are no valid bits in the segment table, the only way |
115 | * to have an invalid segment is to make one full pmeg of invalid PTEs. | | 115 | * to have an invalid segment is to make one full pmeg of invalid PTEs. |
116 | * We use the last one (since the ROM does as well) (sun4/4c only) | | 116 | * We use the last one (since the ROM does as well) (sun4/4c only) |
117 | * | | 117 | * |
118 | * 4. Discontiguous physical pages. The Mach VM expects physical pages | | 118 | * 4. Discontiguous physical pages. The Mach VM expects physical pages |
119 | * to be in one sequential lump. | | 119 | * to be in one sequential lump. |
120 | * | | 120 | * |
121 | * 5. The MMU is always on: it is not possible to disable it. This is | | 121 | * 5. The MMU is always on: it is not possible to disable it. This is |
122 | * mainly a startup hassle. | | 122 | * mainly a startup hassle. |
123 | */ | | 123 | */ |
124 | | | 124 | |
125 | struct pmap_stats { | | 125 | struct pmap_stats { |
126 | int ps_unlink_pvfirst; /* # of pv_unlinks on head */ | | 126 | int ps_unlink_pvfirst; /* # of pv_unlinks on head */ |
127 | int ps_unlink_pvsearch; /* # of pv_unlink searches */ | | 127 | int ps_unlink_pvsearch; /* # of pv_unlink searches */ |
128 | int ps_changeprots; /* # of calls to changeprot */ | | 128 | int ps_changeprots; /* # of calls to changeprot */ |
129 | int ps_enter_firstpv; /* pv heads entered */ | | 129 | int ps_enter_firstpv; /* pv heads entered */ |
130 | int ps_enter_secondpv; /* pv nonheads entered */ | | 130 | int ps_enter_secondpv; /* pv nonheads entered */ |
131 | int ps_useless_changewire; /* useless wiring changes */ | | 131 | int ps_useless_changewire; /* useless wiring changes */ |
132 | int ps_npg_prot_all; /* # of active pages protected */ | | 132 | int ps_npg_prot_all; /* # of active pages protected */ |
133 | int ps_npg_prot_actual; /* # pages actually affected */ | | 133 | int ps_npg_prot_actual; /* # pages actually affected */ |
134 | int ps_npmeg_free; /* # of free pmegs */ | | 134 | int ps_npmeg_free; /* # of free pmegs */ |
135 | int ps_npmeg_locked; /* # of pmegs on locked list */ | | 135 | int ps_npmeg_locked; /* # of pmegs on locked list */ |
136 | int ps_npmeg_lru; /* # of pmegs on lru list */ | | 136 | int ps_npmeg_lru; /* # of pmegs on lru list */ |
137 | } pmap_stats; | | 137 | } pmap_stats; |
138 | | | 138 | |
139 | #if defined(SUN4) || defined(SUN4C) | | 139 | #if defined(SUN4) || defined(SUN4C) |
140 | struct evcnt mmu_stolenpmegs_evcnt = | | 140 | struct evcnt mmu_stolenpmegs_evcnt = |
141 | EVCNT_INITIALIZER(EVCNT_TYPE_INTR,0,"mmu","stln pmgs"); | | 141 | EVCNT_INITIALIZER(EVCNT_TYPE_INTR,0,"mmu","stln pmgs"); |
142 | EVCNT_ATTACH_STATIC(mmu_stolenpmegs_evcnt); | | 142 | EVCNT_ATTACH_STATIC(mmu_stolenpmegs_evcnt); |
143 | | | 143 | |
144 | struct evcnt mmu_pagein_evcnt = | | 144 | struct evcnt mmu_pagein_evcnt = |
145 | EVCNT_INITIALIZER(EVCNT_TYPE_INTR,0,"mmu","pagein"); | | 145 | EVCNT_INITIALIZER(EVCNT_TYPE_INTR,0,"mmu","pagein"); |
146 | EVCNT_ATTACH_STATIC(mmu_pagein_evcnt); | | 146 | EVCNT_ATTACH_STATIC(mmu_pagein_evcnt); |
147 | #endif /* SUN4 || SUN4C */ | | 147 | #endif /* SUN4 || SUN4C */ |
148 | | | 148 | |
149 | #ifdef DEBUG | | 149 | #ifdef DEBUG |
150 | #define PDB_CREATE 0x0001 | | 150 | #define PDB_CREATE 0x0001 |
151 | #define PDB_DESTROY 0x0002 | | 151 | #define PDB_DESTROY 0x0002 |
152 | #define PDB_REMOVE 0x0004 | | 152 | #define PDB_REMOVE 0x0004 |
153 | #define PDB_CHANGEPROT 0x0008 | | 153 | #define PDB_CHANGEPROT 0x0008 |
154 | #define PDB_ENTER 0x0010 | | 154 | #define PDB_ENTER 0x0010 |
155 | #define PDB_FOLLOW 0x0020 | | 155 | #define PDB_FOLLOW 0x0020 |
156 | | | 156 | |
157 | #define PDB_MMU_ALLOC 0x0100 | | 157 | #define PDB_MMU_ALLOC 0x0100 |
158 | #define PDB_MMU_STEAL 0x0200 | | 158 | #define PDB_MMU_STEAL 0x0200 |
159 | #define PDB_CTX_ALLOC 0x0400 | | 159 | #define PDB_CTX_ALLOC 0x0400 |
160 | #define PDB_CTX_STEAL 0x0800 | | 160 | #define PDB_CTX_STEAL 0x0800 |
161 | #define PDB_MMUREG_ALLOC 0x1000 | | 161 | #define PDB_MMUREG_ALLOC 0x1000 |
162 | #define PDB_MMUREG_STEAL 0x2000 | | 162 | #define PDB_MMUREG_STEAL 0x2000 |
163 | #define PDB_CACHESTUFF 0x4000 | | 163 | #define PDB_CACHESTUFF 0x4000 |
164 | #define PDB_SWITCHMAP 0x8000 | | 164 | #define PDB_SWITCHMAP 0x8000 |
165 | #define PDB_SANITYCHK 0x10000 | | 165 | #define PDB_SANITYCHK 0x10000 |
166 | int pmapdebug = 0; | | 166 | int pmapdebug = 0; |
167 | #endif | | 167 | #endif |
168 | | | 168 | |
169 | /* | | 169 | /* |
170 | * Bounds on managed physical addresses. Used by (MD) users | | 170 | * Bounds on managed physical addresses. Used by (MD) users |
171 | * of uvm_pglistalloc() to provide search hints. | | 171 | * of uvm_pglistalloc() to provide search hints. |
172 | */ | | 172 | */ |
173 | paddr_t vm_first_phys = (paddr_t)-1; | | 173 | paddr_t vm_first_phys = (paddr_t)-1; |
174 | paddr_t vm_last_phys = 0; | | 174 | paddr_t vm_last_phys = 0; |
175 | psize_t vm_num_phys; | | 175 | psize_t vm_num_phys; |
176 | | | 176 | |
177 | #define PMAP_LOCK() KERNEL_LOCK(1, NULL) | | 177 | #define PMAP_LOCK() KERNEL_LOCK(1, NULL) |
178 | #define PMAP_UNLOCK() KERNEL_UNLOCK_ONE(NULL) | | 178 | #define PMAP_UNLOCK() KERNEL_UNLOCK_ONE(NULL) |
179 | | | 179 | |
180 | /* | | 180 | /* |
181 | * Flags in pvlist.pv_flags. Note that PV_MOD must be 1 and PV_REF must be 2 | | 181 | * Flags in pvlist.pv_flags. Note that PV_MOD must be 1 and PV_REF must be 2 |
182 | * since they must line up with the bits in the hardware PTEs (see pte.h). | | 182 | * since they must line up with the bits in the hardware PTEs (see pte.h). |
183 | * SUN4M bits are at a slightly different location in the PTE. | | 183 | * SUN4M bits are at a slightly different location in the PTE. |
184 | * | | 184 | * |
185 | * Note: the REF, MOD and ANC flag bits occur only in the head of a pvlist. | | 185 | * Note: the REF, MOD and ANC flag bits occur only in the head of a pvlist. |
186 | * The NC bit is meaningful in each individual pv entry and reflects the | | 186 | * The NC bit is meaningful in each individual pv entry and reflects the |
187 | * requested non-cacheability at the time the entry was made through | | 187 | * requested non-cacheability at the time the entry was made through |
188 | * pv_link() or when subsequently altered by kvm_uncache() (but the latter | | 188 | * pv_link() or when subsequently altered by kvm_uncache() (but the latter |
189 | * does not happen in kernels as of the time of this writing (March 2001)). | | 189 | * does not happen in kernels as of the time of this writing (March 2001)). |
190 | */ | | 190 | */ |
191 | #define PV_MOD 1 /* page modified */ | | 191 | #define PV_MOD 1 /* page modified */ |
192 | #define PV_REF 2 /* page referenced */ | | 192 | #define PV_REF 2 /* page referenced */ |
193 | #define PV_NC 4 /* page cannot be cached */ | | 193 | #define PV_NC 4 /* page cannot be cached */ |
194 | #define PV_REF4M 1 /* page referenced (SRMMU) */ | | 194 | #define PV_REF4M 1 /* page referenced (SRMMU) */ |
195 | #define PV_MOD4M 2 /* page modified (SRMMU) */ | | 195 | #define PV_MOD4M 2 /* page modified (SRMMU) */ |
196 | #define PV_ANC 0x10 /* page has incongruent aliases */ | | 196 | #define PV_ANC 0x10 /* page has incongruent aliases */ |
197 | | | 197 | |
198 | static struct pool pv_pool; | | 198 | static struct pool pv_pool; |
199 | | | 199 | |
200 | /* | | 200 | /* |
201 | * pvhead(pte): find a VM page given a PTE entry. | | 201 | * pvhead(pte): find a VM page given a PTE entry. |
202 | */ | | 202 | */ |
203 | #if defined(SUN4) || defined(SUN4C) | | 203 | #if defined(SUN4) || defined(SUN4C) |
204 | static struct vm_page * | | 204 | static struct vm_page * |
205 | pvhead4_4c(u_int pte) | | 205 | pvhead4_4c(u_int pte) |
206 | { | | 206 | { |
207 | paddr_t pa = (pte & PG_PFNUM) << PGSHIFT; | | 207 | paddr_t pa = (pte & PG_PFNUM) << PGSHIFT; |
208 | | | 208 | |
209 | return (PHYS_TO_VM_PAGE(pa)); | | 209 | return (PHYS_TO_VM_PAGE(pa)); |
210 | } | | 210 | } |
211 | #endif | | 211 | #endif |
212 | | | 212 | |
213 | #if defined(SUN4M) || defined(SUN4D) | | 213 | #if defined(SUN4M) || defined(SUN4D) |
214 | static struct vm_page * | | 214 | static struct vm_page * |
215 | pvhead4m(u_int pte) | | 215 | pvhead4m(u_int pte) |
216 | { | | 216 | { |
217 | paddr_t pa = (pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT; | | 217 | paddr_t pa = (pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT; |
218 | | | 218 | |
219 | return (PHYS_TO_VM_PAGE(pa)); | | 219 | return (PHYS_TO_VM_PAGE(pa)); |
220 | } | | 220 | } |
221 | #endif | | 221 | #endif |
222 | | | 222 | |
223 | /* | | 223 | /* |
224 | * Each virtual segment within each pmap is either valid or invalid. | | 224 | * Each virtual segment within each pmap is either valid or invalid. |
225 | * It is valid if pm_npte[VA_VSEG(va)] is not 0. This does not mean | | 225 | * It is valid if pm_npte[VA_VSEG(va)] is not 0. This does not mean |
226 | * it is in the MMU, however; that is true iff pm_segmap[VA_VSEG(va)] | | 226 | * it is in the MMU, however; that is true iff pm_segmap[VA_VSEG(va)] |
227 | * does not point to the invalid PMEG. | | 227 | * does not point to the invalid PMEG. |
228 | * | | 228 | * |
229 | * In the older SPARC architectures (sun4/sun4c), page tables are cached in | | 229 | * In the older SPARC architectures (sun4/sun4c), page tables are cached in |
230 | * the MMU. The following discussion applies to these architectures: | | 230 | * the MMU. The following discussion applies to these architectures: |
231 | * | | 231 | * |
232 | * If a virtual segment is valid and loaded, the correct PTEs appear | | 232 | * If a virtual segment is valid and loaded, the correct PTEs appear |
233 | * in the MMU only. If it is valid and unloaded, the correct PTEs appear | | 233 | * in the MMU only. If it is valid and unloaded, the correct PTEs appear |
234 | * in the pm_pte[VA_VSEG(va)] only. However, some effort is made to keep | | 234 | * in the pm_pte[VA_VSEG(va)] only. However, some effort is made to keep |
235 | * the software copies consistent enough with the MMU so that libkvm can | | 235 | * the software copies consistent enough with the MMU so that libkvm can |
236 | * do user address translations. In particular, pv_changepte() and | | 236 | * do user address translations. In particular, pv_changepte() and |
237 | * pmap_enu() maintain consistency, while less critical changes are | | 237 | * pmap_enu() maintain consistency, while less critical changes are |
238 | * not maintained. pm_pte[VA_VSEG(va)] always points to space for those | | 238 | * not maintained. pm_pte[VA_VSEG(va)] always points to space for those |
239 | * PTEs. | | 239 | * PTEs. |
240 | * | | 240 | * |
241 | * Each PMEG in the MMU is either free or contains PTEs corresponding to | | 241 | * Each PMEG in the MMU is either free or contains PTEs corresponding to |
242 | * some pmap and virtual segment. If it contains some PTEs, it also contains | | 242 | * some pmap and virtual segment. If it contains some PTEs, it also contains |
243 | * reference and modify bits that belong in the pv_table. If we need | | 243 | * reference and modify bits that belong in the pv_table. If we need |
244 | * to steal a PMEG from some process (if we need one and none are free) | | 244 | * to steal a PMEG from some process (if we need one and none are free) |
245 | * we must copy the ref and mod bits, and update pm_segmap in the other | | 245 | * we must copy the ref and mod bits, and update pm_segmap in the other |
246 | * pmap to show that its virtual segment is no longer in the MMU. | | 246 | * pmap to show that its virtual segment is no longer in the MMU. |
247 | * | | 247 | * |
248 | * There are 128 PMEGs in a small Sun-4, of which only a few dozen are | | 248 | * There are 128 PMEGs in a small Sun-4, of which only a few dozen are |
249 | * tied down permanently, leaving `about' 100 to be spread among | | 249 | * tied down permanently, leaving `about' 100 to be spread among |
250 | * running processes. These are managed as an LRU cache. Before | | 250 | * running processes. These are managed as an LRU cache. Before |
251 | * calling the VM paging code for a user page fault, the fault handler | | 251 | * calling the VM paging code for a user page fault, the fault handler |
252 | * calls mmu_load(pmap, va) to try to get a set of PTEs put into the | | 252 | * calls mmu_load(pmap, va) to try to get a set of PTEs put into the |
253 | * MMU. mmu_load will check the validity of the segment and tell whether | | 253 | * MMU. mmu_load will check the validity of the segment and tell whether |
254 | * it did something. | | 254 | * it did something. |
255 | * | | 255 | * |
256 | * Since I hate the name PMEG I call this data structure an `mmu entry'. | | 256 | * Since I hate the name PMEG I call this data structure an `mmu entry'. |
257 | * Each mmuentry is on exactly one of three `usage' lists: free, LRU, | | 257 | * Each mmuentry is on exactly one of three `usage' lists: free, LRU, |
258 | * or locked. The locked list is only used for kernel mappings that need | | 258 | * or locked. The locked list is only used for kernel mappings that need |
259 | * to be wired down. | | 259 | * to be wired down. |
260 | * | | 260 | * |
261 | * | | 261 | * |
262 | * In the sun4m architecture using the SPARC Reference MMU (SRMMU), three | | 262 | * In the sun4m architecture using the SPARC Reference MMU (SRMMU), three |
263 | * levels of page tables are maintained in physical memory. We use the same | | 263 | * levels of page tables are maintained in physical memory. We use the same |
264 | * structures as with the 3-level old-style MMU (pm_regmap, pm_segmap, | | 264 | * structures as with the 3-level old-style MMU (pm_regmap, pm_segmap, |
265 | * rg_segmap, sg_pte, etc) to maintain kernel-edible page tables; we also | | 265 | * rg_segmap, sg_pte, etc) to maintain kernel-edible page tables; we also |
266 | * build a parallel set of physical tables that can be used by the MMU. | | 266 | * build a parallel set of physical tables that can be used by the MMU. |
267 | * (XXX: This seems redundant, but is it necessary for the unified kernel?) | | 267 | * (XXX: This seems redundant, but is it necessary for the unified kernel?) |
268 | * | | 268 | * |
269 | * If a virtual segment is valid, its entries will be in both parallel lists. | | 269 | * If a virtual segment is valid, its entries will be in both parallel lists. |
270 | * If it is not valid, then its entry in the kernel tables will be zero, and | | 270 | * If it is not valid, then its entry in the kernel tables will be zero, and |
271 | * its entry in the MMU tables will either be nonexistent or zero as well. | | 271 | * its entry in the MMU tables will either be nonexistent or zero as well. |
272 | * | | 272 | * |
273 | * The Reference MMU generally uses a Translation Look-aside Buffer (TLB) | | 273 | * The Reference MMU generally uses a Translation Look-aside Buffer (TLB) |
274 | * to cache the result of recently executed page table walks. When | | 274 | * to cache the result of recently executed page table walks. When |
275 | * manipulating page tables, we need to ensure consistency of the | | 275 | * manipulating page tables, we need to ensure consistency of the |
276 | * in-memory and TLB copies of the page table entries. This is handled | | 276 | * in-memory and TLB copies of the page table entries. This is handled |
277 | * by flushing (and invalidating) a TLB entry when appropriate before | | 277 | * by flushing (and invalidating) a TLB entry when appropriate before |
278 | * altering an in-memory page table entry. | | 278 | * altering an in-memory page table entry. |
279 | */ | | 279 | */ |
280 | struct mmuentry { | | 280 | struct mmuentry { |
281 | CIRCLEQ_ENTRY(mmuentry) me_list; /* usage list link */ | | 281 | CIRCLEQ_ENTRY(mmuentry) me_list; /* usage list link */ |
282 | TAILQ_ENTRY(mmuentry) me_pmchain; /* pmap owner link */ | | 282 | TAILQ_ENTRY(mmuentry) me_pmchain; /* pmap owner link */ |
283 | struct pmap *me_pmap; /* pmap, if in use */ | | 283 | struct pmap *me_pmap; /* pmap, if in use */ |
284 | u_short me_vreg; /* associated virtual region/segment */ | | 284 | u_short me_vreg; /* associated virtual region/segment */ |
285 | u_short me_vseg; /* associated virtual region/segment */ | | 285 | u_short me_vseg; /* associated virtual region/segment */ |
286 | u_short me_cookie; /* hardware SMEG/PMEG number */ | | 286 | u_short me_cookie; /* hardware SMEG/PMEG number */ |
287 | #ifdef DIAGNOSTIC | | 287 | #ifdef DIAGNOSTIC |
288 | int *me_statp;/*XXX*/ | | 288 | int *me_statp;/*XXX*/ |
289 | #endif | | 289 | #endif |
290 | }; | | 290 | }; |
291 | struct mmuentry *mmusegments; /* allocated in pmap_bootstrap */ | | 291 | struct mmuentry *mmusegments; /* allocated in pmap_bootstrap */ |
292 | struct mmuentry *mmuregions; /* allocated in pmap_bootstrap */ | | 292 | struct mmuentry *mmuregions; /* allocated in pmap_bootstrap */ |
293 | | | 293 | |
294 | CIRCLEQ_HEAD(mmuq, mmuentry); | | 294 | CIRCLEQ_HEAD(mmuq, mmuentry); |
295 | struct mmuq segm_freelist, segm_lru, segm_locked; | | 295 | struct mmuq segm_freelist, segm_lru, segm_locked; |
296 | struct mmuq region_freelist, region_lru, region_locked; | | 296 | struct mmuq region_freelist, region_lru, region_locked; |
297 | /* | | 297 | /* |
298 | * We use a circular queue, since that allows us to remove an element | | 298 | * We use a circular queue, since that allows us to remove an element |
299 | * from a list without knowing the list header. | | 299 | * from a list without knowing the list header. |
300 | */ | | 300 | */ |
301 | #define CIRCLEQ_REMOVE_NOH(elm, field) do { \ | | 301 | #define CIRCLEQ_REMOVE_NOH(elm, field) do { \ |
302 | (elm)->field.cqe_next->field.cqe_prev = (elm)->field.cqe_prev; \ | | 302 | (elm)->field.cqe_next->field.cqe_prev = (elm)->field.cqe_prev; \ |
303 | (elm)->field.cqe_prev->field.cqe_next = (elm)->field.cqe_next; \ | | 303 | (elm)->field.cqe_prev->field.cqe_next = (elm)->field.cqe_next; \ |
304 | } while (/*CONSTCOND*/0) | | 304 | } while (/*CONSTCOND*/0) |
305 | | | 305 | |
306 | #define MMUQ_INIT(head) CIRCLEQ_INIT(head) | | 306 | #define MMUQ_INIT(head) CIRCLEQ_INIT(head) |
307 | #define MMUQ_REMOVE(elm,field) CIRCLEQ_REMOVE_NOH(elm,field) | | 307 | #define MMUQ_REMOVE(elm,field) CIRCLEQ_REMOVE_NOH(elm,field) |
308 | #define MMUQ_INSERT_TAIL(head,elm,field)CIRCLEQ_INSERT_TAIL(head,elm,field) | | 308 | #define MMUQ_INSERT_TAIL(head,elm,field)CIRCLEQ_INSERT_TAIL(head,elm,field) |
309 | #define MMUQ_EMPTY(head) CIRCLEQ_EMPTY(head) | | 309 | #define MMUQ_EMPTY(head) CIRCLEQ_EMPTY(head) |
310 | #define MMUQ_FIRST(head) CIRCLEQ_FIRST(head) | | 310 | #define MMUQ_FIRST(head) CIRCLEQ_FIRST(head) |
311 | | | 311 | |
312 | | | 312 | |
313 | int seginval; /* [4/4c] the invalid segment number */ | | 313 | int seginval; /* [4/4c] the invalid segment number */ |
314 | int reginval; /* [4/3mmu] the invalid region number */ | | 314 | int reginval; /* [4/3mmu] the invalid region number */ |
315 | | | 315 | |
316 | static kmutex_t demap_lock; | | 316 | static kmutex_t demap_lock; |
317 | | | 317 | |
318 | /* | | 318 | /* |
319 | * (sun4/4c) | | 319 | * (sun4/4c) |
320 | * A context is simply a small number that dictates which set of 4096 | | 320 | * A context is simply a small number that dictates which set of 4096 |
321 | * segment map entries the MMU uses. The Sun 4c has eight (SS1,IPC) or | | 321 | * segment map entries the MMU uses. The Sun 4c has eight (SS1,IPC) or |
322 | * sixteen (SS2,IPX) such sets. These are alloted in an `almost MRU' fashion. | | 322 | * sixteen (SS2,IPX) such sets. These are alloted in an `almost MRU' fashion. |
323 | * (sun4m) | | 323 | * (sun4m) |
324 | * A context is simply a small number that indexes the context table, the | | 324 | * A context is simply a small number that indexes the context table, the |
325 | * root-level page table mapping 4G areas. Each entry in this table points | | 325 | * root-level page table mapping 4G areas. Each entry in this table points |
326 | * to a 1st-level region table. A SPARC reference MMU will usually use 16 | | 326 | * to a 1st-level region table. A SPARC reference MMU will usually use 16 |
327 | * such contexts, but some offer as many as 64k contexts; the theoretical | | 327 | * such contexts, but some offer as many as 64k contexts; the theoretical |
328 | * maximum is 2^32 - 1, but this would create overlarge context tables. | | 328 | * maximum is 2^32 - 1, but this would create overlarge context tables. |
329 | * | | 329 | * |
330 | * Each context is either free or attached to a pmap. | | 330 | * Each context is either free or attached to a pmap. |
331 | * | | 331 | * |
332 | * Since the virtual address cache is tagged by context, when we steal | | 332 | * Since the virtual address cache is tagged by context, when we steal |
333 | * a context we have to flush (that part of) the cache. | | 333 | * a context we have to flush (that part of) the cache. |
334 | */ | | 334 | */ |
335 | union ctxinfo { | | 335 | union ctxinfo { |
336 | union ctxinfo *c_nextfree; /* free list (if free) */ | | 336 | union ctxinfo *c_nextfree; /* free list (if free) */ |
337 | struct pmap *c_pmap; /* pmap (if busy) */ | | 337 | struct pmap *c_pmap; /* pmap (if busy) */ |
338 | }; | | 338 | }; |
339 | | | 339 | |
340 | static kmutex_t ctx_lock; /* lock for below */ | | 340 | static kmutex_t ctx_lock; /* lock for below */ |
341 | union ctxinfo *ctxinfo; /* allocated at in pmap_bootstrap */ | | 341 | union ctxinfo *ctxinfo; /* allocated at in pmap_bootstrap */ |
342 | union ctxinfo *ctx_freelist; /* context free list */ | | 342 | union ctxinfo *ctx_freelist; /* context free list */ |
343 | int ctx_kick; /* allocation rover when none free */ | | 343 | int ctx_kick; /* allocation rover when none free */ |
344 | int ctx_kickdir; /* ctx_kick roves both directions */ | | 344 | int ctx_kickdir; /* ctx_kick roves both directions */ |
345 | int ncontext; /* sizeof ctx_freelist */ | | 345 | int ncontext; /* sizeof ctx_freelist */ |
346 | | | 346 | |
347 | void ctx_alloc(struct pmap *); | | 347 | void ctx_alloc(struct pmap *); |
348 | void ctx_free(struct pmap *); | | 348 | void ctx_free(struct pmap *); |
349 | | | 349 | |
350 | void * vmmap; /* one reserved MI vpage for /dev/mem */ | | 350 | void * vmmap; /* one reserved MI vpage for /dev/mem */ |
351 | /*void * vdumppages; -* 32KB worth of reserved dump pages */ | | 351 | /*void * vdumppages; -* 32KB worth of reserved dump pages */ |
352 | | | 352 | |
353 | smeg_t tregion; /* [4/3mmu] Region for temporary mappings */ | | 353 | smeg_t tregion; /* [4/3mmu] Region for temporary mappings */ |
354 | | | 354 | |
355 | static struct pmap kernel_pmap_store; /* the kernel's pmap */ | | 355 | static struct pmap kernel_pmap_store; /* the kernel's pmap */ |
356 | struct pmap *const kernel_pmap_ptr = &kernel_pmap_store; /* pmap_kernel() */ | | 356 | struct pmap *const kernel_pmap_ptr = &kernel_pmap_store; /* pmap_kernel() */ |
357 | struct regmap kernel_regmap_store[NKREG]; /* the kernel's regmap */ | | 357 | struct regmap kernel_regmap_store[NKREG]; /* the kernel's regmap */ |
358 | struct segmap kernel_segmap_store[NKREG*NSEGRG];/* the kernel's segmaps */ | | 358 | struct segmap kernel_segmap_store[NKREG*NSEGRG];/* the kernel's segmaps */ |
359 | | | 359 | |
360 | #if defined(SUN4M) || defined(SUN4D) | | 360 | #if defined(SUN4M) || defined(SUN4D) |
361 | u_int *kernel_regtable_store; /* 1k of storage to map the kernel */ | | 361 | u_int *kernel_regtable_store; /* 1k of storage to map the kernel */ |
362 | u_int *kernel_segtable_store; /* 2k of storage to map the kernel */ | | 362 | u_int *kernel_segtable_store; /* 2k of storage to map the kernel */ |
363 | u_int *kernel_pagtable_store; /* 128k of storage to map the kernel */ | | 363 | u_int *kernel_pagtable_store; /* 128k of storage to map the kernel */ |
364 | | | 364 | |
365 | /* | | 365 | /* |
366 | * Memory pools and back-end supplier for SRMMU page tables. | | 366 | * Memory pools and back-end supplier for SRMMU page tables. |
367 | * Share a pool between the level 2 and level 3 page tables, | | 367 | * Share a pool between the level 2 and level 3 page tables, |
368 | * since these are equal in size. | | 368 | * since these are equal in size. |
369 | */ | | 369 | */ |
370 | static struct pool L1_pool; | | 370 | static struct pool L1_pool; |
371 | static struct pool L23_pool; | | 371 | static struct pool L23_pool; |
372 | | | 372 | |
373 | static void *pgt_page_alloc(struct pool *, int); | | 373 | static void *pgt_page_alloc(struct pool *, int); |
374 | static void pgt_page_free(struct pool *, void *); | | 374 | static void pgt_page_free(struct pool *, void *); |
375 | | | 375 | |
376 | static struct pool_allocator pgt_page_allocator = { | | 376 | static struct pool_allocator pgt_page_allocator = { |
377 | pgt_page_alloc, pgt_page_free, 0, | | 377 | pgt_page_alloc, pgt_page_free, 0, |
378 | }; | | 378 | }; |
379 | | | 379 | |
380 | #endif /* SUN4M || SUN4D */ | | 380 | #endif /* SUN4M || SUN4D */ |
381 | | | 381 | |
382 | #if defined(SUN4) || defined(SUN4C) | | 382 | #if defined(SUN4) || defined(SUN4C) |
383 | /* | | 383 | /* |
384 | * Memory pool for user and kernel PTE tables. | | 384 | * Memory pool for user and kernel PTE tables. |
385 | */ | | 385 | */ |
386 | static struct pool pte_pool; | | 386 | static struct pool pte_pool; |
387 | #endif | | 387 | #endif |
388 | | | 388 | |
389 | struct memarr *pmemarr; /* physical memory regions */ | | 389 | struct memarr *pmemarr; /* physical memory regions */ |
390 | int npmemarr; /* number of entries in pmemarr */ | | 390 | int npmemarr; /* number of entries in pmemarr */ |
391 | | | 391 | |
392 | static paddr_t avail_start; /* first available physical page, other | | 392 | static paddr_t avail_start; /* first available physical page, other |
393 | than the `etext gap' defined below */ | | 393 | than the `etext gap' defined below */ |
394 | static vaddr_t etext_gap_start;/* start of gap between text & data */ | | 394 | static vaddr_t etext_gap_start;/* start of gap between text & data */ |
395 | static vaddr_t etext_gap_end; /* end of gap between text & data */ | | 395 | static vaddr_t etext_gap_end; /* end of gap between text & data */ |
396 | static vaddr_t virtual_avail; /* first free kernel virtual address */ | | 396 | static vaddr_t virtual_avail; /* first free kernel virtual address */ |
397 | static vaddr_t virtual_end; /* last free kernel virtual address */ | | 397 | static vaddr_t virtual_end; /* last free kernel virtual address */ |
398 | | | 398 | |
399 | static void pmap_page_upload(void); | | 399 | static void pmap_page_upload(void); |
400 | | | 400 | |
401 | int mmu_has_hole; | | 401 | int mmu_has_hole; |
402 | | | 402 | |
403 | vaddr_t prom_vstart; /* For /dev/kmem */ | | 403 | vaddr_t prom_vstart; /* For /dev/kmem */ |
404 | vaddr_t prom_vend; | | 404 | vaddr_t prom_vend; |
405 | | | 405 | |
406 | /* | | 406 | /* |
407 | * Memory pool for pmap structures. | | 407 | * Memory pool for pmap structures. |
408 | */ | | 408 | */ |
409 | static struct pool_cache pmap_cache; | | 409 | static struct pool_cache pmap_cache; |
410 | static int pmap_pmap_pool_ctor(void *, void *, int); | | 410 | static int pmap_pmap_pool_ctor(void *, void *, int); |
411 | static void pmap_pmap_pool_dtor(void *, void *); | | 411 | static void pmap_pmap_pool_dtor(void *, void *); |
412 | static struct pool segmap_pool; | | 412 | static struct pool segmap_pool; |
413 | | | 413 | |
414 | #if defined(SUN4) | | 414 | #if defined(SUN4) |
415 | /* | | 415 | /* |
416 | * [sun4]: segfixmask: on some systems (4/110) "getsegmap()" returns a | | 416 | * [sun4]: segfixmask: on some systems (4/110) "getsegmap()" returns a |
417 | * partly invalid value. getsegmap returns a 16 bit value on the sun4, | | 417 | * partly invalid value. getsegmap returns a 16 bit value on the sun4, |
418 | * but only the first 8 or so bits are valid (the rest are *supposed* to | | 418 | * but only the first 8 or so bits are valid (the rest are *supposed* to |
419 | * be zero. On the 4/110 the bits that are supposed to be zero are | | 419 | * be zero. On the 4/110 the bits that are supposed to be zero are |
420 | * all one instead. e.g. KERNBASE is usually mapped by pmeg number zero. | | 420 | * all one instead. e.g. KERNBASE is usually mapped by pmeg number zero. |
421 | * On a 4/300 getsegmap(KERNBASE) == 0x0000, but | | 421 | * On a 4/300 getsegmap(KERNBASE) == 0x0000, but |
422 | * on a 4/100 getsegmap(KERNBASE) == 0xff00 | | 422 | * on a 4/100 getsegmap(KERNBASE) == 0xff00 |
423 | * | | 423 | * |
424 | * This confuses mmu_reservemon() and causes it to not reserve the PROM's | | 424 | * This confuses mmu_reservemon() and causes it to not reserve the PROM's |
425 | * pmegs. Then the PROM's pmegs get used during autoconfig and everything | | 425 | * pmegs. Then the PROM's pmegs get used during autoconfig and everything |
426 | * falls apart! (not very fun to debug, BTW.) | | 426 | * falls apart! (not very fun to debug, BTW.) |
427 | * | | 427 | * |
428 | * solution: mask the invalid bits in the getsetmap macro. | | 428 | * solution: mask the invalid bits in the getsetmap macro. |
429 | */ | | 429 | */ |
430 | | | 430 | |
431 | static u_int segfixmask = 0xffffffff; /* all bits valid to start */ | | 431 | static u_int segfixmask = 0xffffffff; /* all bits valid to start */ |
432 | #else | | 432 | #else |
433 | #define segfixmask 0xffffffff /* It's in getsegmap's scope */ | | 433 | #define segfixmask 0xffffffff /* It's in getsegmap's scope */ |
434 | #endif | | 434 | #endif |
435 | | | 435 | |
436 | /* | | 436 | /* |
437 | * pseudo-functions for mnemonic value | | 437 | * pseudo-functions for mnemonic value |
438 | */ | | 438 | */ |
439 | #define getsegmap(va) (CPU_ISSUN4C \ | | 439 | #define getsegmap(va) (CPU_ISSUN4C \ |
440 | ? lduba(va, ASI_SEGMAP) \ | | 440 | ? lduba(va, ASI_SEGMAP) \ |
441 | : (lduha(va, ASI_SEGMAP) & segfixmask)) | | 441 | : (lduha(va, ASI_SEGMAP) & segfixmask)) |
442 | #define setsegmap(va, pmeg) (CPU_ISSUN4C \ | | 442 | #define setsegmap(va, pmeg) (CPU_ISSUN4C \ |
443 | ? stba(va, ASI_SEGMAP, pmeg) \ | | 443 | ? stba(va, ASI_SEGMAP, pmeg) \ |
444 | : stha(va, ASI_SEGMAP, pmeg)) | | 444 | : stha(va, ASI_SEGMAP, pmeg)) |
445 | | | 445 | |
446 | /* 3-level sun4 MMU only: */ | | 446 | /* 3-level sun4 MMU only: */ |
447 | #define getregmap(va) ((unsigned)lduha((va)+2, ASI_REGMAP) >> 8) | | 447 | #define getregmap(va) ((unsigned)lduha((va)+2, ASI_REGMAP) >> 8) |
448 | #define setregmap(va, smeg) stha((va)+2, ASI_REGMAP, (smeg << 8)) | | 448 | #define setregmap(va, smeg) stha((va)+2, ASI_REGMAP, (smeg << 8)) |
449 | | | 449 | |
450 | | | 450 | |
451 | #if defined(SUN4M) || defined(SUN4D) | | 451 | #if defined(SUN4M) || defined(SUN4D) |
452 | #if 0 | | 452 | #if 0 |
453 | #if VM_PROT_READ != 1 || VM_PROT_WRITE != 2 || VM_PROT_EXECUTE != 4 | | 453 | #if VM_PROT_READ != 1 || VM_PROT_WRITE != 2 || VM_PROT_EXECUTE != 4 |
454 | #error fix protection code translation table | | 454 | #error fix protection code translation table |
455 | #endif | | 455 | #endif |
456 | #endif | | 456 | #endif |
457 | /* | | 457 | /* |
458 | * Translation table for kernel vs. PTE protection bits. | | 458 | * Translation table for kernel vs. PTE protection bits. |
459 | */ | | 459 | */ |
460 | const u_int protection_codes[2][8] = { | | 460 | const u_int protection_codes[2][8] = { |
461 | /* kernel */ | | 461 | /* kernel */ |
462 | { | | 462 | { |
463 | PPROT_N_RX, /* VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE */ | | 463 | PPROT_N_RX, /* VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE */ |
464 | PPROT_N_RX, /* VM_PROT_NONE | VM_PROT_NONE | VM_PROT_READ */ | | 464 | PPROT_N_RX, /* VM_PROT_NONE | VM_PROT_NONE | VM_PROT_READ */ |
465 | PPROT_N_RWX, /* VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE */ | | 465 | PPROT_N_RWX, /* VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE */ |
466 | PPROT_N_RWX, /* VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_READ */ | | 466 | PPROT_N_RWX, /* VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_READ */ |
467 | PPROT_N_RX, /* VM_PROT_EXECUTE | VM_PROT_NONE | VM_PROT_NONE */ | | 467 | PPROT_N_RX, /* VM_PROT_EXECUTE | VM_PROT_NONE | VM_PROT_NONE */ |
468 | PPROT_N_RX, /* VM_PROT_EXECUTE | VM_PROT_NONE | VM_PROT_READ */ | | 468 | PPROT_N_RX, /* VM_PROT_EXECUTE | VM_PROT_NONE | VM_PROT_READ */ |
469 | PPROT_N_RWX, /* VM_PROT_EXECUTE | VM_PROT_WRITE | VM_PROT_NONE */ | | 469 | PPROT_N_RWX, /* VM_PROT_EXECUTE | VM_PROT_WRITE | VM_PROT_NONE */ |
470 | PPROT_N_RWX, /* VM_PROT_EXECUTE | VM_PROT_WRITE | VM_PROT_READ */ | | 470 | PPROT_N_RWX, /* VM_PROT_EXECUTE | VM_PROT_WRITE | VM_PROT_READ */ |
471 | }, | | 471 | }, |
472 | | | 472 | |
473 | /* user */ | | 473 | /* user */ |
474 | { | | 474 | { |
475 | PPROT_N_RX, /* VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE */ | | 475 | PPROT_N_RX, /* VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE */ |
476 | PPROT_R_R, /* VM_PROT_NONE | VM_PROT_NONE | VM_PROT_READ */ | | 476 | PPROT_R_R, /* VM_PROT_NONE | VM_PROT_NONE | VM_PROT_READ */ |
477 | PPROT_RW_RW, /* VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE */ | | 477 | PPROT_RW_RW, /* VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE */ |
478 | PPROT_RW_RW, /* VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_READ */ | | 478 | PPROT_RW_RW, /* VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_READ */ |
479 | PPROT_X_X, /* VM_PROT_EXECUTE | VM_PROT_NONE | VM_PROT_NONE */ | | 479 | PPROT_X_X, /* VM_PROT_EXECUTE | VM_PROT_NONE | VM_PROT_NONE */ |
480 | PPROT_RX_RX, /* VM_PROT_EXECUTE | VM_PROT_NONE | VM_PROT_READ */ | | 480 | PPROT_RX_RX, /* VM_PROT_EXECUTE | VM_PROT_NONE | VM_PROT_READ */ |
481 | PPROT_RWX_RWX, /* VM_PROT_EXECUTE | VM_PROT_WRITE | VM_PROT_NONE */ | | 481 | PPROT_RWX_RWX, /* VM_PROT_EXECUTE | VM_PROT_WRITE | VM_PROT_NONE */ |
482 | PPROT_RWX_RWX, /* VM_PROT_EXECUTE | VM_PROT_WRITE | VM_PROT_READ */ | | 482 | PPROT_RWX_RWX, /* VM_PROT_EXECUTE | VM_PROT_WRITE | VM_PROT_READ */ |
483 | } | | 483 | } |
484 | }; | | 484 | }; |
485 | #define pte_kprot4m(prot) (protection_codes[0][(prot)]) | | 485 | #define pte_kprot4m(prot) (protection_codes[0][(prot)]) |
486 | #define pte_uprot4m(prot) (protection_codes[1][(prot)]) | | 486 | #define pte_uprot4m(prot) (protection_codes[1][(prot)]) |
487 | #define pte_prot4m(pm, prot) \ | | 487 | #define pte_prot4m(pm, prot) \ |
488 | (protection_codes[(pm) == pmap_kernel() ? 0 : 1][(prot)]) | | 488 | (protection_codes[(pm) == pmap_kernel() ? 0 : 1][(prot)]) |
489 | | | 489 | |
490 | void setpte4m(vaddr_t va, int pte); | | 490 | void setpte4m(vaddr_t va, int pte); |
491 | void setpgt4m(int *ptep, int pte); | | 491 | void setpgt4m(int *ptep, int pte); |
492 | void setpgt4m_va(vaddr_t, int *, int, int, int, u_int); | | 492 | void setpgt4m_va(vaddr_t, int *, int, int, int, u_int); |
493 | int updatepte4m(vaddr_t, int *, int, int, int, u_int); | | 493 | int updatepte4m(vaddr_t, int *, int, int, int, u_int); |
494 | #endif /* SUN4M || SUN4D */ | | 494 | #endif /* SUN4M || SUN4D */ |
495 | | | 495 | |
496 | #if defined(MULTIPROCESSOR) | | 496 | #if defined(MULTIPROCESSOR) |
497 | #define PMAP_SET_CPUSET(pmap, cpi) \ | | 497 | #define PMAP_SET_CPUSET(pmap, cpi) \ |
498 | (pmap->pm_cpuset |= (1 << (cpi)->ci_cpuid)) | | 498 | (pmap->pm_cpuset |= (1 << (cpi)->ci_cpuid)) |
499 | #define PMAP_CLR_CPUSET(pmap, cpi) \ | | 499 | #define PMAP_CLR_CPUSET(pmap, cpi) \ |
500 | (pmap->pm_cpuset &= ~(1 << (cpi)->ci_cpuid)) | | 500 | (pmap->pm_cpuset &= ~(1 << (cpi)->ci_cpuid)) |
501 | #define PMAP_CPUSET(pmap) (pmap->pm_cpuset) | | 501 | #define PMAP_CPUSET(pmap) (pmap->pm_cpuset) |
502 | #else | | 502 | #else |
503 | #define PMAP_SET_CPUSET(pmap, cpi) /* nothing */ | | 503 | #define PMAP_SET_CPUSET(pmap, cpi) /* nothing */ |
504 | #define PMAP_CLR_CPUSET(pmap, cpi) /* nothing */ | | 504 | #define PMAP_CLR_CPUSET(pmap, cpi) /* nothing */ |
505 | #define PMAP_CPUSET(pmap) 1 /* XXX: 1 or 0? */ | | 505 | #define PMAP_CPUSET(pmap) 1 /* XXX: 1 or 0? */ |
506 | #endif /* MULTIPROCESSOR */ | | 506 | #endif /* MULTIPROCESSOR */ |
507 | | | 507 | |
508 | | | 508 | |
509 | /* Function pointer messiness for supporting multiple sparc architectures | | 509 | /* Function pointer messiness for supporting multiple sparc architectures |
510 | * within a single kernel: notice that there are two versions of many of the | | 510 | * within a single kernel: notice that there are two versions of many of the |
511 | * functions within this file/module, one for the sun4/sun4c and the other | | 511 | * functions within this file/module, one for the sun4/sun4c and the other |
512 | * for the sun4m. For performance reasons (since things like pte bits don't | | 512 | * for the sun4m. For performance reasons (since things like pte bits don't |
513 | * map nicely between the two architectures), there are separate functions | | 513 | * map nicely between the two architectures), there are separate functions |
514 | * rather than unified functions which test the cputyp variable. If only | | 514 | * rather than unified functions which test the cputyp variable. If only |
515 | * one architecture is being used, then the non-suffixed function calls | | 515 | * one architecture is being used, then the non-suffixed function calls |
516 | * are macro-translated into the appropriate xxx4_4c or xxx4m call. If | | 516 | * are macro-translated into the appropriate xxx4_4c or xxx4m call. If |
517 | * multiple architectures are defined, the calls translate to (*xxx_p), | | 517 | * multiple architectures are defined, the calls translate to (*xxx_p), |
518 | * i.e. they indirect through function pointers initialized as appropriate | | 518 | * i.e. they indirect through function pointers initialized as appropriate |
519 | * to the run-time architecture in pmap_bootstrap. See also pmap.h. | | 519 | * to the run-time architecture in pmap_bootstrap. See also pmap.h. |
520 | */ | | 520 | */ |
521 | | | 521 | |
522 | #if defined(SUN4M) || defined(SUN4D) | | 522 | #if defined(SUN4M) || defined(SUN4D) |
523 | static void mmu_setup4m_L1(int, struct pmap *); | | 523 | static void mmu_setup4m_L1(int, struct pmap *); |
524 | static void mmu_setup4m_L2(int, struct regmap *); | | 524 | static void mmu_setup4m_L2(int, struct regmap *); |
525 | static void mmu_setup4m_L3(int, struct segmap *); | | 525 | static void mmu_setup4m_L3(int, struct segmap *); |
526 | /*static*/ void mmu_reservemon4m(struct pmap *); | | 526 | /*static*/ void mmu_reservemon4m(struct pmap *); |
527 | | | 527 | |
528 | /*static*/ void pmap_changeprot4m(pmap_t, vaddr_t, vm_prot_t, int); | | 528 | /*static*/ void pmap_changeprot4m(pmap_t, vaddr_t, vm_prot_t, int); |
529 | /*static*/ void pmap_rmk4m(struct pmap *, vaddr_t, vaddr_t, int, int); | | 529 | /*static*/ void pmap_rmk4m(struct pmap *, vaddr_t, vaddr_t, int, int); |
530 | /*static*/ void pmap_rmu4m(struct pmap *, vaddr_t, vaddr_t, int, int); | | 530 | /*static*/ void pmap_rmu4m(struct pmap *, vaddr_t, vaddr_t, int, int); |
531 | /*static*/ int pmap_enk4m(struct pmap *, vaddr_t, vm_prot_t, | | 531 | /*static*/ int pmap_enk4m(struct pmap *, vaddr_t, vm_prot_t, |
532 | int, struct vm_page *, int); | | 532 | int, struct vm_page *, int); |
533 | /*static*/ int pmap_enu4m(struct pmap *, vaddr_t, vm_prot_t, | | 533 | /*static*/ int pmap_enu4m(struct pmap *, vaddr_t, vm_prot_t, |
534 | int, struct vm_page *, int); | | 534 | int, struct vm_page *, int); |
535 | /*static*/ void pv_changepte4m(struct vm_page *, int, int); | | 535 | /*static*/ void pv_changepte4m(struct vm_page *, int, int); |
536 | /*static*/ int pv_syncflags4m(struct vm_page *); | | 536 | /*static*/ int pv_syncflags4m(struct vm_page *); |
537 | /*static*/ int pv_link4m(struct vm_page *, struct pmap *, vaddr_t, u_int *); | | 537 | /*static*/ int pv_link4m(struct vm_page *, struct pmap *, vaddr_t, u_int *); |
538 | /*static*/ void pv_unlink4m(struct vm_page *, struct pmap *, vaddr_t); | | 538 | /*static*/ void pv_unlink4m(struct vm_page *, struct pmap *, vaddr_t); |
539 | #endif | | 539 | #endif |
540 | | | 540 | |
541 | #if defined(SUN4) || defined(SUN4C) | | 541 | #if defined(SUN4) || defined(SUN4C) |
542 | /*static*/ void mmu_reservemon4_4c(int *, int *); | | 542 | /*static*/ void mmu_reservemon4_4c(int *, int *); |
543 | /*static*/ void pmap_changeprot4_4c(pmap_t, vaddr_t, vm_prot_t, int); | | 543 | /*static*/ void pmap_changeprot4_4c(pmap_t, vaddr_t, vm_prot_t, int); |
544 | /*static*/ void pmap_rmk4_4c(struct pmap *, vaddr_t, vaddr_t, int, int); | | 544 | /*static*/ void pmap_rmk4_4c(struct pmap *, vaddr_t, vaddr_t, int, int); |
545 | /*static*/ void pmap_rmu4_4c(struct pmap *, vaddr_t, vaddr_t, int, int); | | 545 | /*static*/ void pmap_rmu4_4c(struct pmap *, vaddr_t, vaddr_t, int, int); |
546 | /*static*/ int pmap_enk4_4c(struct pmap *, vaddr_t, vm_prot_t, | | 546 | /*static*/ int pmap_enk4_4c(struct pmap *, vaddr_t, vm_prot_t, |
547 | int, struct vm_page *, int); | | 547 | int, struct vm_page *, int); |
548 | /*static*/ int pmap_enu4_4c(struct pmap *, vaddr_t, vm_prot_t, | | 548 | /*static*/ int pmap_enu4_4c(struct pmap *, vaddr_t, vm_prot_t, |
549 | int, struct vm_page *, int); | | 549 | int, struct vm_page *, int); |
550 | /*static*/ void pv_changepte4_4c(struct vm_page *, int, int); | | 550 | /*static*/ void pv_changepte4_4c(struct vm_page *, int, int); |
551 | /*static*/ int pv_syncflags4_4c(struct vm_page *); | | 551 | /*static*/ int pv_syncflags4_4c(struct vm_page *); |
552 | /*static*/ int pv_link4_4c(struct vm_page *, struct pmap *, vaddr_t, u_int *); | | 552 | /*static*/ int pv_link4_4c(struct vm_page *, struct pmap *, vaddr_t, u_int *); |
553 | /*static*/ void pv_unlink4_4c(struct vm_page *, struct pmap *, vaddr_t); | | 553 | /*static*/ void pv_unlink4_4c(struct vm_page *, struct pmap *, vaddr_t); |
554 | #endif | | 554 | #endif |
555 | | | 555 | |
556 | #if !(defined(SUN4M) || defined(SUN4D)) && (defined(SUN4) || defined(SUN4C)) | | 556 | #if !(defined(SUN4M) || defined(SUN4D)) && (defined(SUN4) || defined(SUN4C)) |
557 | #define pmap_rmk pmap_rmk4_4c | | 557 | #define pmap_rmk pmap_rmk4_4c |
558 | #define pmap_rmu pmap_rmu4_4c | | 558 | #define pmap_rmu pmap_rmu4_4c |
559 | | | 559 | |
560 | #elif (defined(SUN4M) || defined(SUN4D)) && !(defined(SUN4) || defined(SUN4C)) | | 560 | #elif (defined(SUN4M) || defined(SUN4D)) && !(defined(SUN4) || defined(SUN4C)) |
561 | #define pmap_rmk pmap_rmk4m | | 561 | #define pmap_rmk pmap_rmk4m |
562 | #define pmap_rmu pmap_rmu4m | | 562 | #define pmap_rmu pmap_rmu4m |
563 | | | 563 | |
564 | #else /* must use function pointers */ | | 564 | #else /* must use function pointers */ |
565 | | | 565 | |
566 | /* function pointer declarations */ | | 566 | /* function pointer declarations */ |
567 | /* from pmap.h: */ | | 567 | /* from pmap.h: */ |
568 | bool (*pmap_clear_modify_p)(struct vm_page *); | | 568 | bool (*pmap_clear_modify_p)(struct vm_page *); |
569 | bool (*pmap_clear_reference_p)(struct vm_page *); | | 569 | bool (*pmap_clear_reference_p)(struct vm_page *); |
570 | int (*pmap_enter_p)(pmap_t, vaddr_t, paddr_t, vm_prot_t, int); | | 570 | int (*pmap_enter_p)(pmap_t, vaddr_t, paddr_t, vm_prot_t, int); |
571 | bool (*pmap_extract_p)(pmap_t, vaddr_t, paddr_t *); | | 571 | bool (*pmap_extract_p)(pmap_t, vaddr_t, paddr_t *); |
572 | bool (*pmap_is_modified_p)(struct vm_page *); | | 572 | bool (*pmap_is_modified_p)(struct vm_page *); |
573 | bool (*pmap_is_referenced_p)(struct vm_page *); | | 573 | bool (*pmap_is_referenced_p)(struct vm_page *); |
574 | void (*pmap_kenter_pa_p)(vaddr_t, paddr_t, vm_prot_t); | | 574 | void (*pmap_kenter_pa_p)(vaddr_t, paddr_t, vm_prot_t); |
575 | void (*pmap_kremove_p)(vaddr_t, vsize_t); | | 575 | void (*pmap_kremove_p)(vaddr_t, vsize_t); |
576 | void (*pmap_kprotect_p)(vaddr_t, vsize_t, vm_prot_t); | | 576 | void (*pmap_kprotect_p)(vaddr_t, vsize_t, vm_prot_t); |
577 | void (*pmap_page_protect_p)(struct vm_page *, vm_prot_t); | | 577 | void (*pmap_page_protect_p)(struct vm_page *, vm_prot_t); |
578 | void (*pmap_protect_p)(pmap_t, vaddr_t, vaddr_t, vm_prot_t); | | 578 | void (*pmap_protect_p)(pmap_t, vaddr_t, vaddr_t, vm_prot_t); |
579 | /* local: */ | | 579 | /* local: */ |
580 | void (*pmap_rmk_p)(struct pmap *, vaddr_t, vaddr_t, int, int); | | 580 | void (*pmap_rmk_p)(struct pmap *, vaddr_t, vaddr_t, int, int); |
581 | void (*pmap_rmu_p)(struct pmap *, vaddr_t, vaddr_t, int, int); | | 581 | void (*pmap_rmu_p)(struct pmap *, vaddr_t, vaddr_t, int, int); |
582 | | | 582 | |
583 | #define pmap_rmk (*pmap_rmk_p) | | 583 | #define pmap_rmk (*pmap_rmk_p) |
584 | #define pmap_rmu (*pmap_rmu_p) | | 584 | #define pmap_rmu (*pmap_rmu_p) |
585 | | | 585 | |
586 | #endif | | 586 | #endif |
587 | | | 587 | |
588 | /* --------------------------------------------------------------*/ | | 588 | /* --------------------------------------------------------------*/ |
589 | | | 589 | |
590 | /* | | 590 | /* |
591 | * Next we have some sun4m/4d-specific routines which have no 4/4c | | 591 | * Next we have some sun4m/4d-specific routines which have no 4/4c |
592 | * counterparts, or which are 4/4c macros. | | 592 | * counterparts, or which are 4/4c macros. |
593 | */ | | 593 | */ |
594 | | | 594 | |
595 | #if defined(SUN4M) || defined(SUN4D) | | 595 | #if defined(SUN4M) || defined(SUN4D) |
596 | /* | | 596 | /* |
597 | * SP versions of the tlb flush operations. | | 597 | * SP versions of the tlb flush operations. |
598 | * | | 598 | * |
599 | * Turn off traps to prevent register window overflows | | 599 | * Turn off traps to prevent register window overflows |
600 | * from writing user windows to the wrong stack. | | 600 | * from writing user windows to the wrong stack. |
601 | */ | | 601 | */ |
602 | static void | | 602 | static void |
603 | sp_tlb_flush(int va, int ctx, int lvl) | | 603 | sp_tlb_flush(int va, int ctx, int lvl) |
604 | { | | 604 | { |
605 | | | 605 | |
606 | /* Traps off */ | | 606 | /* Traps off */ |
607 | __asm("rd %psr, %o3"); | | 607 | __asm("rd %psr, %o3"); |
608 | __asm("wr %%o3, %0, %%psr" :: "n" (PSR_ET)); | | 608 | __asm("wr %%o3, %0, %%psr" :: "n" (PSR_ET)); |
609 | | | 609 | |
610 | /* Save context */ | | 610 | /* Save context */ |
611 | __asm("mov %0, %%o4" :: "n"(SRMMU_CXR)); | | 611 | __asm("mov %0, %%o4" :: "n"(SRMMU_CXR)); |
612 | __asm("lda [%%o4]%0, %%o5" :: "n"(ASI_SRMMU)); | | 612 | __asm("lda [%%o4]%0, %%o5" :: "n"(ASI_SRMMU)); |
613 | | | 613 | |
614 | /* Set new context and flush type bits */ | | 614 | /* Set new context and flush type bits */ |
615 | __asm("andn %o0, 0xfff, %o0"); | | 615 | __asm("andn %o0, 0xfff, %o0"); |
616 | __asm("sta %%o1, [%%o4]%0" :: "n"(ASI_SRMMU)); | | 616 | __asm("sta %%o1, [%%o4]%0" :: "n"(ASI_SRMMU)); |
617 | __asm("or %o0, %o2, %o0"); | | 617 | __asm("or %o0, %o2, %o0"); |
618 | | | 618 | |
619 | /* Do the TLB flush */ | | 619 | /* Do the TLB flush */ |
620 | __asm("sta %%g0, [%%o0]%0" :: "n"(ASI_SRMMUFP)); | | 620 | __asm("sta %%g0, [%%o0]%0" :: "n"(ASI_SRMMUFP)); |
621 | | | 621 | |
622 | /* Restore context */ | | 622 | /* Restore context */ |
623 | __asm("sta %%o5, [%%o4]%0" :: "n"(ASI_SRMMU)); | | 623 | __asm("sta %%o5, [%%o4]%0" :: "n"(ASI_SRMMU)); |
624 | | | 624 | |
625 | /* and turn traps on again */ | | 625 | /* and turn traps on again */ |
626 | __asm("wr %o3, 0, %psr"); | | 626 | __asm("wr %o3, 0, %psr"); |
627 | __asm("nop"); | | 627 | __asm("nop"); |
628 | __asm("nop"); | | 628 | __asm("nop"); |
629 | __asm("nop"); | | 629 | __asm("nop"); |
630 | } | | 630 | } |
631 | | | 631 | |
632 | static inline void | | 632 | static inline void |
633 | sp_tlb_flush_all(void) | | 633 | sp_tlb_flush_all(void) |
634 | { | | 634 | { |
635 | | | 635 | |
636 | sta(ASI_SRMMUFP_LN, ASI_SRMMUFP, 0); | | 636 | sta(ASI_SRMMUFP_LN, ASI_SRMMUFP, 0); |
637 | } | | 637 | } |
638 | | | 638 | |
639 | #if defined(MULTIPROCESSOR) | | 639 | #if defined(MULTIPROCESSOR) |
640 | /* | | 640 | /* |
641 | * The SMP versions of the tlb flush routines. We only need to | | 641 | * The SMP versions of the tlb flush routines. We only need to |
642 | * do a cross call for these on sun4m (Mbus) systems. sun4d systems | | 642 | * do a cross call for these on sun4m (Mbus) systems. sun4d systems |
643 | * have an Xbus which broadcasts TLB demaps in hardware. | | 643 | * have an Xbus which broadcasts TLB demaps in hardware. |
644 | */ | | 644 | */ |
645 | | | 645 | |
646 | static inline void smp_tlb_flush_page (int va, int ctx, u_int cpuset); | | 646 | static inline void smp_tlb_flush_page (int va, int ctx, u_int cpuset); |
647 | static inline void smp_tlb_flush_segment (int va, int ctx, u_int cpuset); | | 647 | static inline void smp_tlb_flush_segment (int va, int ctx, u_int cpuset); |
648 | static inline void smp_tlb_flush_region (int va, int ctx, u_int cpuset); | | 648 | static inline void smp_tlb_flush_region (int va, int ctx, u_int cpuset); |
649 | static inline void smp_tlb_flush_context (int ctx, u_int cpuset); | | 649 | static inline void smp_tlb_flush_context (int ctx, u_int cpuset); |
650 | static inline void smp_tlb_flush_all (void); | | 650 | static inline void smp_tlb_flush_all (void); |
651 | | | 651 | |
652 | /* From locore: */ | | 652 | /* From locore: */ |
653 | extern void ft_tlb_flush(int va, int ctx, int lvl); | | 653 | extern void ft_tlb_flush(int va, int ctx, int lvl); |
654 | | | 654 | |
655 | static inline void | | 655 | static inline void |
656 | smp_tlb_flush_page(int va, int ctx, u_int cpuset) | | 656 | smp_tlb_flush_page(int va, int ctx, u_int cpuset) |
657 | { | | 657 | { |
658 | | | 658 | |
659 | if (CPU_ISSUN4D) { | | 659 | if (CPU_ISSUN4D) { |
660 | sp_tlb_flush(va, ctx, ASI_SRMMUFP_L3); | | 660 | sp_tlb_flush(va, ctx, ASI_SRMMUFP_L3); |
661 | } else | | 661 | } else |
662 | FXCALL3(sp_tlb_flush, ft_tlb_flush, va, ctx, ASI_SRMMUFP_L3, cpuset); | | 662 | FXCALL3(sp_tlb_flush, ft_tlb_flush, va, ctx, ASI_SRMMUFP_L3, cpuset); |
663 | } | | 663 | } |
664 | | | 664 | |
665 | static inline void | | 665 | static inline void |
666 | smp_tlb_flush_segment(int va, int ctx, u_int cpuset) | | 666 | smp_tlb_flush_segment(int va, int ctx, u_int cpuset) |
667 | { | | 667 | { |
668 | | | 668 | |
669 | if (CPU_ISSUN4D) { | | 669 | if (CPU_ISSUN4D) { |
670 | sp_tlb_flush(va, ctx, ASI_SRMMUFP_L2); | | 670 | sp_tlb_flush(va, ctx, ASI_SRMMUFP_L2); |
671 | } else | | 671 | } else |
672 | FXCALL3(sp_tlb_flush, ft_tlb_flush, va, ctx, ASI_SRMMUFP_L2, cpuset); | | 672 | FXCALL3(sp_tlb_flush, ft_tlb_flush, va, ctx, ASI_SRMMUFP_L2, cpuset); |
673 | } | | 673 | } |
674 | | | 674 | |
675 | static inline void | | 675 | static inline void |
676 | smp_tlb_flush_region(int va, int ctx, u_int cpuset) | | 676 | smp_tlb_flush_region(int va, int ctx, u_int cpuset) |
677 | { | | 677 | { |
678 | | | 678 | |
679 | if (CPU_ISSUN4D) { | | 679 | if (CPU_ISSUN4D) { |
680 | sp_tlb_flush(va, ctx, ASI_SRMMUFP_L1); | | 680 | sp_tlb_flush(va, ctx, ASI_SRMMUFP_L1); |
681 | } else | | 681 | } else |
682 | FXCALL3(sp_tlb_flush, ft_tlb_flush, va, ctx, ASI_SRMMUFP_L1, cpuset); | | 682 | FXCALL3(sp_tlb_flush, ft_tlb_flush, va, ctx, ASI_SRMMUFP_L1, cpuset); |
683 | } | | 683 | } |
684 | | | 684 | |
685 | static inline void | | 685 | static inline void |
686 | smp_tlb_flush_context(int ctx, u_int cpuset) | | 686 | smp_tlb_flush_context(int ctx, u_int cpuset) |
687 | { | | 687 | { |
688 | | | 688 | |
689 | if (CPU_ISSUN4D) { | | 689 | if (CPU_ISSUN4D) { |
690 | sp_tlb_flush(ctx, 0, ASI_SRMMUFP_L0); | | 690 | sp_tlb_flush(ctx, 0, ASI_SRMMUFP_L0); |
691 | } else | | 691 | } else |
692 | FXCALL3(sp_tlb_flush, ft_tlb_flush, 0, ctx, ASI_SRMMUFP_L0, cpuset); | | 692 | FXCALL3(sp_tlb_flush, ft_tlb_flush, 0, ctx, ASI_SRMMUFP_L0, cpuset); |
693 | } | | 693 | } |
694 | | | 694 | |
695 | static inline void | | 695 | static inline void |
696 | smp_tlb_flush_all(void) | | 696 | smp_tlb_flush_all(void) |
697 | { | | 697 | { |
698 | | | 698 | |
699 | if (CPU_ISSUN4D) { | | 699 | if (CPU_ISSUN4D) { |
700 | sp_tlb_flush_all(); | | 700 | sp_tlb_flush_all(); |
701 | } else | | 701 | } else |
702 | XCALL0(sp_tlb_flush_all, CPUSET_ALL); | | 702 | XCALL0(sp_tlb_flush_all, CPUSET_ALL); |
703 | } | | 703 | } |
704 | #endif /* MULTIPROCESSOR */ | | 704 | #endif /* MULTIPROCESSOR */ |
705 | | | 705 | |
706 | #if defined(MULTIPROCESSOR) | | 706 | #if defined(MULTIPROCESSOR) |
707 | #define tlb_flush_page(va,ctx,s) smp_tlb_flush_page(va,ctx,s) | | 707 | #define tlb_flush_page(va,ctx,s) smp_tlb_flush_page(va,ctx,s) |
708 | #define tlb_flush_segment(va,ctx,s) smp_tlb_flush_segment(va,ctx,s) | | 708 | #define tlb_flush_segment(va,ctx,s) smp_tlb_flush_segment(va,ctx,s) |
709 | #define tlb_flush_region(va,ctx,s) smp_tlb_flush_region(va,ctx,s) | | 709 | #define tlb_flush_region(va,ctx,s) smp_tlb_flush_region(va,ctx,s) |
710 | #define tlb_flush_context(ctx,s) smp_tlb_flush_context(ctx,s) | | 710 | #define tlb_flush_context(ctx,s) smp_tlb_flush_context(ctx,s) |
711 | #define tlb_flush_all() smp_tlb_flush_all() | | 711 | #define tlb_flush_all() smp_tlb_flush_all() |
712 | #else | | 712 | #else |
713 | #define tlb_flush_page(va,ctx,s) sp_tlb_flush(va,ctx,ASI_SRMMUFP_L3) | | 713 | #define tlb_flush_page(va,ctx,s) sp_tlb_flush(va,ctx,ASI_SRMMUFP_L3) |
714 | #define tlb_flush_segment(va,ctx,s) sp_tlb_flush(va,ctx,ASI_SRMMUFP_L2) | | 714 | #define tlb_flush_segment(va,ctx,s) sp_tlb_flush(va,ctx,ASI_SRMMUFP_L2) |
715 | #define tlb_flush_region(va,ctx,s) sp_tlb_flush(va,ctx,ASI_SRMMUFP_L1) | | 715 | #define tlb_flush_region(va,ctx,s) sp_tlb_flush(va,ctx,ASI_SRMMUFP_L1) |
716 | #define tlb_flush_context(ctx,s) sp_tlb_flush(ctx,0,ASI_SRMMUFP_L0) | | 716 | #define tlb_flush_context(ctx,s) sp_tlb_flush(ctx,0,ASI_SRMMUFP_L0) |
717 | #define tlb_flush_all() sp_tlb_flush_all() | | 717 | #define tlb_flush_all() sp_tlb_flush_all() |
718 | #endif /* MULTIPROCESSOR */ | | 718 | #endif /* MULTIPROCESSOR */ |
719 | | | 719 | |
720 | static u_int VA2PA(void *); | | 720 | static u_int VA2PA(void *); |
721 | static u_long srmmu_bypass_read(u_long); | | 721 | static u_long srmmu_bypass_read(u_long); |
722 | | | 722 | |
723 | /* | | 723 | /* |
724 | * VA2PA(addr) -- converts a virtual address to a physical address using | | 724 | * VA2PA(addr) -- converts a virtual address to a physical address using |
725 | * the MMU's currently-installed page tables. As a side effect, the address | | 725 | * the MMU's currently-installed page tables. As a side effect, the address |
726 | * translation used may cause the associated pte to be encached. The correct | | 726 | * translation used may cause the associated pte to be encached. The correct |
727 | * context for VA must be set before this is called. | | 727 | * context for VA must be set before this is called. |
728 | * | | 728 | * |
729 | * This routine should work with any level of mapping, as it is used | | 729 | * This routine should work with any level of mapping, as it is used |
730 | * during bootup to interact with the ROM's initial L1 mapping of the kernel. | | 730 | * during bootup to interact with the ROM's initial L1 mapping of the kernel. |
731 | */ | | 731 | */ |
732 | static u_int | | 732 | static u_int |
733 | VA2PA(void *addr) | | 733 | VA2PA(void *addr) |
734 | { | | 734 | { |
735 | u_int pte; | | 735 | u_int pte; |
736 | | | 736 | |
737 | /* | | 737 | /* |
738 | * We'll use that handy SRMMU flush/probe. | | 738 | * We'll use that handy SRMMU flush/probe. |
739 | * Try each level in turn until we find a valid pte. Otherwise panic. | | 739 | * Try each level in turn until we find a valid pte. Otherwise panic. |
740 | */ | | 740 | */ |
741 | | | 741 | |
742 | pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L3, ASI_SRMMUFP); | | 742 | pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L3, ASI_SRMMUFP); |
743 | /* Unlock fault status; required on Hypersparc modules */ | | 743 | /* Unlock fault status; required on Hypersparc modules */ |
744 | (void)lda(SRMMU_SFSR, ASI_SRMMU); | | 744 | (void)lda(SRMMU_SFSR, ASI_SRMMU); |
745 | if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE) | | 745 | if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE) |
746 | return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) | | | 746 | return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) | |
747 | ((u_int)addr & 0xfff)); | | 747 | ((u_int)addr & 0xfff)); |
748 | | | 748 | |
749 | /* A `TLB Flush Entire' is required before any L0, L1 or L2 probe */ | | 749 | /* A `TLB Flush Entire' is required before any L0, L1 or L2 probe */ |
750 | tlb_flush_all_real(); | | 750 | tlb_flush_all_real(); |
751 | | | 751 | |
752 | pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L2, ASI_SRMMUFP); | | 752 | pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L2, ASI_SRMMUFP); |
753 | if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE) | | 753 | if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE) |
754 | return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) | | | 754 | return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) | |
755 | ((u_int)addr & 0x3ffff)); | | 755 | ((u_int)addr & 0x3ffff)); |
756 | pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L1, ASI_SRMMUFP); | | 756 | pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L1, ASI_SRMMUFP); |
757 | if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE) | | 757 | if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE) |
758 | return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) | | | 758 | return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) | |
759 | ((u_int)addr & 0xffffff)); | | 759 | ((u_int)addr & 0xffffff)); |
760 | pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L0, ASI_SRMMUFP); | | 760 | pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L0, ASI_SRMMUFP); |
761 | if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE) | | 761 | if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE) |
762 | return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) | | | 762 | return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) | |
763 | ((u_int)addr & 0xffffffff)); | | 763 | ((u_int)addr & 0xffffffff)); |
764 | | | 764 | |
765 | #ifdef DIAGNOSTIC | | 765 | #ifdef DIAGNOSTIC |
766 | panic("VA2PA: Asked to translate unmapped VA %p", addr); | | 766 | panic("VA2PA: Asked to translate unmapped VA %p", addr); |
767 | #else | | 767 | #else |
768 | return (0); | | 768 | return (0); |
769 | #endif | | 769 | #endif |
770 | } | | 770 | } |
771 | | | 771 | |
772 | /* | | 772 | /* |
773 | * Atomically update a PTE entry, coping with hardware updating the | | 773 | * Atomically update a PTE entry, coping with hardware updating the |
774 | * PTE at the same time we are. This is the procedure that is | | 774 | * PTE at the same time we are. This is the procedure that is |
775 | * recommended in the SuperSPARC user's manual. | | 775 | * recommended in the SuperSPARC user's manual. |
776 | */ | | 776 | */ |
777 | int | | 777 | int |
778 | updatepte4m(vaddr_t va, int *pte, int bic, int bis, int ctx, u_int cpuset) | | 778 | updatepte4m(vaddr_t va, int *pte, int bic, int bis, int ctx, u_int cpuset) |
779 | { | | 779 | { |
780 | int oldval, swapval; | | 780 | int oldval, swapval; |
781 | volatile int *vpte = (volatile int *)pte; | | 781 | volatile int *vpte = (volatile int *)pte; |
782 | | | 782 | |
783 | /* | | 783 | /* |
784 | * Can only be one of these happening in the system | | 784 | * Can only be one of these happening in the system |
785 | * at any one time. | | 785 | * at any one time. |
786 | */ | | 786 | */ |
787 | mutex_spin_enter(&demap_lock); | | 787 | mutex_spin_enter(&demap_lock); |
788 | | | 788 | |
789 | /* | | 789 | /* |
790 | * The idea is to loop swapping zero into the pte, flushing | | 790 | * The idea is to loop swapping zero into the pte, flushing |
791 | * it, and repeating until it stays zero. At this point, | | 791 | * it, and repeating until it stays zero. At this point, |
792 | * there should be no more hardware accesses to this PTE | | 792 | * there should be no more hardware accesses to this PTE |
793 | * so we can modify it without losing any mod/ref info. | | 793 | * so we can modify it without losing any mod/ref info. |
794 | */ | | 794 | */ |
795 | oldval = 0; | | 795 | oldval = 0; |
796 | do { | | 796 | do { |
797 | swapval = 0; | | 797 | swapval = 0; |
798 | swap(vpte, swapval); | | 798 | swap(vpte, swapval); |
799 | tlb_flush_page(va, ctx, cpuset); | | 799 | tlb_flush_page(va, ctx, cpuset); |
800 | oldval |= swapval; | | 800 | oldval |= swapval; |
801 | } while (__predict_false(*vpte != 0)); | | 801 | } while (__predict_false(*vpte != 0)); |
802 | | | 802 | |
803 | swapval = (oldval & ~bic) | bis; | | 803 | swapval = (oldval & ~bic) | bis; |
804 | swap(vpte, swapval); | | 804 | swap(vpte, swapval); |
805 | | | 805 | |
806 | mutex_spin_exit(&demap_lock); | | 806 | mutex_spin_exit(&demap_lock); |
807 | | | 807 | |
808 | return (oldval); | | 808 | return (oldval); |
809 | } | | 809 | } |
810 | | | 810 | |
811 | inline void | | 811 | inline void |
812 | setpgt4m(int *ptep, int pte) | | 812 | setpgt4m(int *ptep, int pte) |
813 | { | | 813 | { |
814 | | | 814 | |
815 | swap(ptep, pte); | | 815 | swap(ptep, pte); |
816 | } | | 816 | } |
817 | | | 817 | |
818 | inline void | | 818 | inline void |
819 | setpgt4m_va(vaddr_t va, int *ptep, int pte, int pageflush, int ctx, | | 819 | setpgt4m_va(vaddr_t va, int *ptep, int pte, int pageflush, int ctx, |
820 | u_int cpuset) | | 820 | u_int cpuset) |
821 | { | | 821 | { |
822 | | | 822 | |
823 | #if defined(MULTIPROCESSOR) | | 823 | #if defined(MULTIPROCESSOR) |
824 | updatepte4m(va, ptep, 0xffffffff, pte, pageflush ? ctx : 0, cpuset); | | 824 | updatepte4m(va, ptep, 0xffffffff, pte, pageflush ? ctx : 0, cpuset); |
825 | #else | | 825 | #else |
826 | if (__predict_true(pageflush)) | | 826 | if (__predict_true(pageflush)) |
827 | tlb_flush_page(va, ctx, 0); | | 827 | tlb_flush_page(va, ctx, 0); |
828 | setpgt4m(ptep, pte); | | 828 | setpgt4m(ptep, pte); |
829 | #endif /* MULTIPROCESSOR */ | | 829 | #endif /* MULTIPROCESSOR */ |
830 | } | | 830 | } |
831 | | | 831 | |
832 | /* Set the page table entry for va to pte. */ | | 832 | /* Set the page table entry for va to pte. */ |
833 | void | | 833 | void |
834 | setpte4m(vaddr_t va, int pte) | | 834 | setpte4m(vaddr_t va, int pte) |
835 | { | | 835 | { |
836 | struct pmap *pm; | | 836 | struct pmap *pm; |
837 | struct regmap *rp; | | 837 | struct regmap *rp; |
838 | struct segmap *sp; | | 838 | struct segmap *sp; |
839 | | | 839 | |
840 | #ifdef DEBUG | | 840 | #ifdef DEBUG |
841 | if (getcontext4m() != 0) | | 841 | if (getcontext4m() != 0) |
842 | panic("setpte4m: user context"); | | 842 | panic("setpte4m: user context"); |
843 | #endif | | 843 | #endif |
844 | | | 844 | |
845 | pm = pmap_kernel(); | | 845 | pm = pmap_kernel(); |
846 | rp = &pm->pm_regmap[VA_VREG(va)]; | | 846 | rp = &pm->pm_regmap[VA_VREG(va)]; |
847 | sp = &rp->rg_segmap[VA_VSEG(va)]; | | 847 | sp = &rp->rg_segmap[VA_VSEG(va)]; |
848 | | | 848 | |
849 | tlb_flush_page(va, 0, CPUSET_ALL); | | 849 | tlb_flush_page(va, 0, CPUSET_ALL); |
850 | setpgt4m(sp->sg_pte + VA_SUN4M_VPG(va), pte); | | 850 | setpgt4m(sp->sg_pte + VA_SUN4M_VPG(va), pte); |
851 | } | | 851 | } |
852 | | | 852 | |
853 | /* | | 853 | /* |
854 | * Page table pool back-end. | | 854 | * Page table pool back-end. |
855 | */ | | 855 | */ |
856 | void * | | 856 | void * |
857 | pgt_page_alloc(struct pool *pp, int flags) | | 857 | pgt_page_alloc(struct pool *pp, int flags) |
858 | { | | 858 | { |
859 | int cacheit = (cpuinfo.flags & CPUFLG_CACHEPAGETABLES) != 0; | | 859 | int cacheit = (cpuinfo.flags & CPUFLG_CACHEPAGETABLES) != 0; |
860 | struct vm_page *pg; | | 860 | struct vm_page *pg; |
861 | vaddr_t va; | | 861 | vaddr_t va; |
862 | paddr_t pa; | | 862 | paddr_t pa; |
863 | | | 863 | |
864 | /* Allocate a page of physical memory */ | | 864 | /* Allocate a page of physical memory */ |
865 | if ((pg = uvm_pagealloc(NULL, 0, NULL, 0)) == NULL) | | 865 | if ((pg = uvm_pagealloc(NULL, 0, NULL, 0)) == NULL) |
866 | return (NULL); | | 866 | return (NULL); |
867 | | | 867 | |
868 | /* Allocate virtual memory */ | | 868 | /* Allocate virtual memory */ |
869 | va = uvm_km_alloc(kmem_map, PAGE_SIZE, 0, UVM_KMF_VAONLY | | | 869 | va = uvm_km_alloc(kmem_map, PAGE_SIZE, 0, UVM_KMF_VAONLY | |
870 | ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK)); | | 870 | ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK)); |
871 | if (va == 0) { | | 871 | if (va == 0) { |
872 | uvm_pagefree(pg); | | 872 | uvm_pagefree(pg); |
873 | return (NULL); | | 873 | return (NULL); |
874 | } | | 874 | } |
875 | | | 875 | |
876 | /* | | 876 | /* |
877 | * On systems with a physical data cache we need to flush this page | | 877 | * On systems with a physical data cache we need to flush this page |
878 | * from the cache if the pagetables cannot be cached. | | 878 | * from the cache if the pagetables cannot be cached. |
879 | * On systems with a virtually indexed data cache, we only need | | 879 | * On systems with a virtually indexed data cache, we only need |
880 | * to map it non-cacheable, since the page is not currently mapped. | | 880 | * to map it non-cacheable, since the page is not currently mapped. |
881 | */ | | 881 | */ |
882 | pa = VM_PAGE_TO_PHYS(pg); | | 882 | pa = VM_PAGE_TO_PHYS(pg); |
883 | if (cacheit == 0) | | 883 | if (cacheit == 0) |
884 | pcache_flush_page(pa, 1); | | 884 | pcache_flush_page(pa, 1); |
885 | | | 885 | |
886 | /* Map the page */ | | 886 | /* Map the page */ |
887 | pmap_kenter_pa(va, pa | (cacheit ? 0 : PMAP_NC), | | 887 | pmap_kenter_pa(va, pa | (cacheit ? 0 : PMAP_NC), |
888 | VM_PROT_READ | VM_PROT_WRITE); | | 888 | VM_PROT_READ | VM_PROT_WRITE); |
889 | pmap_update(pmap_kernel()); | | 889 | pmap_update(pmap_kernel()); |
890 | | | 890 | |
891 | return ((void *)va); | | 891 | return ((void *)va); |
892 | } | | 892 | } |
893 | | | 893 | |
894 | void | | 894 | void |
895 | pgt_page_free(struct pool *pp, void *v) | | 895 | pgt_page_free(struct pool *pp, void *v) |
896 | { | | 896 | { |
897 | vaddr_t va; | | 897 | vaddr_t va; |
898 | paddr_t pa; | | 898 | paddr_t pa; |
899 | bool rv; | | 899 | bool rv; |
900 | | | 900 | |
901 | va = (vaddr_t)v; | | 901 | va = (vaddr_t)v; |
902 | rv = pmap_extract(pmap_kernel(), va, &pa); | | 902 | rv = pmap_extract(pmap_kernel(), va, &pa); |
903 | KASSERT(rv); | | 903 | KASSERT(rv); |
904 | uvm_pagefree(PHYS_TO_VM_PAGE(pa)); | | 904 | uvm_pagefree(PHYS_TO_VM_PAGE(pa)); |
905 | pmap_kremove(va, PAGE_SIZE); | | 905 | pmap_kremove(va, PAGE_SIZE); |
906 | uvm_km_free(kmem_map, va, PAGE_SIZE, UVM_KMF_VAONLY); | | 906 | uvm_km_free(kmem_map, va, PAGE_SIZE, UVM_KMF_VAONLY); |
907 | } | | 907 | } |
908 | #endif /* SUN4M || SUN4D */ | | 908 | #endif /* SUN4M || SUN4D */ |
909 | | | 909 | |
910 | /*----------------------------------------------------------------*/ | | 910 | /*----------------------------------------------------------------*/ |
911 | | | 911 | |
912 | /* | | 912 | /* |
913 | * The following three macros are to be used in sun4/sun4c code only. | | 913 | * The following three macros are to be used in sun4/sun4c code only. |
914 | */ | | 914 | */ |
915 | #if defined(SUN4_MMU3L) | | 915 | #if defined(SUN4_MMU3L) |
916 | #define CTX_USABLE(pm,rp) ( \ | | 916 | #define CTX_USABLE(pm,rp) ( \ |
917 | ((pm)->pm_ctx != NULL && \ | | 917 | ((pm)->pm_ctx != NULL && \ |
918 | (!HASSUN4_MMU3L || (rp)->rg_smeg != reginval)) \ | | 918 | (!HASSUN4_MMU3L || (rp)->rg_smeg != reginval)) \ |
919 | ) | | 919 | ) |
920 | #else | | 920 | #else |
921 | #define CTX_USABLE(pm,rp) ((pm)->pm_ctx != NULL ) | | 921 | #define CTX_USABLE(pm,rp) ((pm)->pm_ctx != NULL ) |
922 | #endif | | 922 | #endif |
923 | | | 923 | |
924 | #define GAP_WIDEN(pm,vr) do if (CPU_HAS_SUNMMU) { \ | | 924 | #define GAP_WIDEN(pm,vr) do if (CPU_HAS_SUNMMU) { \ |
925 | if (vr + 1 == pm->pm_gap_start) \ | | 925 | if (vr + 1 == pm->pm_gap_start) \ |
926 | pm->pm_gap_start = vr; \ | | 926 | pm->pm_gap_start = vr; \ |
927 | if (vr == pm->pm_gap_end) \ | | 927 | if (vr == pm->pm_gap_end) \ |
928 | pm->pm_gap_end = vr + 1; \ | | 928 | pm->pm_gap_end = vr + 1; \ |
929 | } while (0) | | 929 | } while (0) |
930 | | | 930 | |
931 | #define GAP_SHRINK(pm,vr) do if (CPU_HAS_SUNMMU) { \ | | 931 | #define GAP_SHRINK(pm,vr) do if (CPU_HAS_SUNMMU) { \ |
932 | int x; \ | | 932 | int x; \ |
933 | x = pm->pm_gap_start + (pm->pm_gap_end - pm->pm_gap_start) / 2; \ | | 933 | x = pm->pm_gap_start + (pm->pm_gap_end - pm->pm_gap_start) / 2; \ |
934 | if (vr > x) { \ | | 934 | if (vr > x) { \ |
935 | if (vr < pm->pm_gap_end) \ | | 935 | if (vr < pm->pm_gap_end) \ |
936 | pm->pm_gap_end = vr; \ | | 936 | pm->pm_gap_end = vr; \ |
937 | } else { \ | | 937 | } else { \ |
938 | if (vr >= pm->pm_gap_start && x != pm->pm_gap_start) \ | | 938 | if (vr >= pm->pm_gap_start && x != pm->pm_gap_start) \ |
939 | pm->pm_gap_start = vr + 1; \ | | 939 | pm->pm_gap_start = vr + 1; \ |
940 | } \ | | 940 | } \ |
941 | } while (0) | | 941 | } while (0) |
942 | | | 942 | |
943 | | | 943 | |
944 | static void get_phys_mem(void **); | | 944 | static void get_phys_mem(void **); |
945 | #if 0 /* not used */ | | 945 | #if 0 /* not used */ |
946 | void kvm_iocache(char *, int); | | 946 | void kvm_iocache(char *, int); |
947 | #endif | | 947 | #endif |
948 | | | 948 | |
949 | #ifdef DEBUG | | 949 | #ifdef DEBUG |
950 | void pm_check(char *, struct pmap *); | | 950 | void pm_check(char *, struct pmap *); |
951 | void pm_check_k(char *, struct pmap *); | | 951 | void pm_check_k(char *, struct pmap *); |
952 | void pm_check_u(char *, struct pmap *); | | 952 | void pm_check_u(char *, struct pmap *); |
953 | #endif | | 953 | #endif |
954 | | | 954 | |
955 | /* | | 955 | /* |
956 | * During the PMAP bootstrap, we can use a simple translation to map a | | 956 | * During the PMAP bootstrap, we can use a simple translation to map a |
957 | * kernel virtual address to a psysical memory address (this is arranged | | 957 | * kernel virtual address to a psysical memory address (this is arranged |
958 | * in locore). Usually, KERNBASE maps to physical address 0. This is always | | 958 | * in locore). Usually, KERNBASE maps to physical address 0. This is always |
959 | * the case on sun4 and sun4c machines. On sun4m machines -- if no memory is | | 959 | * the case on sun4 and sun4c machines. On sun4m machines -- if no memory is |
960 | * installed in the bank corresponding to physical address 0 -- the PROM may | | 960 | * installed in the bank corresponding to physical address 0 -- the PROM may |
961 | * elect to load us at some other address, presumably at the start of | | 961 | * elect to load us at some other address, presumably at the start of |
962 | * the first memory bank that is available. We set the up the variable | | 962 | * the first memory bank that is available. We set the up the variable |
963 | * `va2pa_offset' to hold the physical address corresponding to KERNBASE. | | 963 | * `va2pa_offset' to hold the physical address corresponding to KERNBASE. |
964 | */ | | 964 | */ |
965 | | | 965 | |
966 | static u_long va2pa_offset; | | 966 | static u_long va2pa_offset; |
967 | #define PMAP_BOOTSTRAP_VA2PA(v) ((paddr_t)((u_long)(v) - va2pa_offset)) | | 967 | #define PMAP_BOOTSTRAP_VA2PA(v) ((paddr_t)((u_long)(v) - va2pa_offset)) |
968 | #define PMAP_BOOTSTRAP_PA2VA(p) ((vaddr_t)((u_long)(p) + va2pa_offset)) | | 968 | #define PMAP_BOOTSTRAP_PA2VA(p) ((vaddr_t)((u_long)(p) + va2pa_offset)) |
969 | | | 969 | |
970 | /* | | 970 | /* |
971 | * Grab physical memory list. | | 971 | * Grab physical memory list. |
972 | * While here, compute `physmem'. | | 972 | * While here, compute `physmem'. |
973 | */ | | 973 | */ |
974 | void | | 974 | void |
975 | get_phys_mem(void **top) | | 975 | get_phys_mem(void **top) |
976 | { | | 976 | { |
977 | struct memarr *mp; | | 977 | struct memarr *mp; |
978 | char *p; | | 978 | char *p; |
979 | int i; | | 979 | int i; |
980 | | | 980 | |
981 | /* Load the memory descriptor array at the current kernel top */ | | 981 | /* Load the memory descriptor array at the current kernel top */ |
982 | p = (void *)ALIGN(*top); | | 982 | p = (void *)ALIGN(*top); |
983 | pmemarr = (struct memarr *)p; | | 983 | pmemarr = (struct memarr *)p; |
984 | npmemarr = prom_makememarr(pmemarr, 1000, MEMARR_AVAILPHYS); | | 984 | npmemarr = prom_makememarr(pmemarr, 1000, MEMARR_AVAILPHYS); |
985 | | | 985 | |
986 | /* Update kernel top */ | | 986 | /* Update kernel top */ |
987 | p += npmemarr * sizeof(struct memarr); | | 987 | p += npmemarr * sizeof(struct memarr); |
988 | *top = p; | | 988 | *top = p; |
989 | | | 989 | |
990 | for (physmem = 0, mp = pmemarr, i = npmemarr; --i >= 0; mp++) | | 990 | for (physmem = 0, mp = pmemarr, i = npmemarr; --i >= 0; mp++) |
991 | physmem += btoc(mp->len); | | 991 | physmem += btoc(mp->len); |
992 | } | | 992 | } |
993 | | | 993 | |
994 | | | 994 | |
995 | /* | | 995 | /* |
996 | * Support functions for vm_page_bootstrap(). | | 996 | * Support functions for vm_page_bootstrap(). |
997 | */ | | 997 | */ |
998 | | | 998 | |
999 | /* | | 999 | /* |
1000 | * How much virtual space does this kernel have? | | 1000 | * How much virtual space does this kernel have? |
1001 | * (After mapping kernel text, data, etc.) | | 1001 | * (After mapping kernel text, data, etc.) |
1002 | */ | | 1002 | */ |
1003 | void | | 1003 | void |
1004 | pmap_virtual_space(vaddr_t *v_start, vaddr_t *v_end) | | 1004 | pmap_virtual_space(vaddr_t *v_start, vaddr_t *v_end) |
1005 | { | | 1005 | { |
1006 | | | 1006 | |
1007 | *v_start = virtual_avail; | | 1007 | *v_start = virtual_avail; |
1008 | *v_end = virtual_end; | | 1008 | *v_end = virtual_end; |
1009 | } | | 1009 | } |
1010 | | | 1010 | |
1011 | #ifdef PMAP_GROWKERNEL | | 1011 | #ifdef PMAP_GROWKERNEL |
1012 | vaddr_t | | 1012 | vaddr_t |
1013 | pmap_growkernel(vaddr_t eva) | | 1013 | pmap_growkernel(vaddr_t eva) |
1014 | { | | 1014 | { |
1015 | struct regmap *rp; | | 1015 | struct regmap *rp; |
1016 | struct segmap *sp; | | 1016 | struct segmap *sp; |
1017 | int vr, evr, M, N, i; | | 1017 | int vr, evr, M, N, i; |
1018 | struct vm_page *pg; | | 1018 | struct vm_page *pg; |
1019 | vaddr_t va; | | 1019 | vaddr_t va; |
1020 | | | 1020 | |
1021 | if (eva <= virtual_end) | | 1021 | if (eva <= virtual_end) |
1022 | return (virtual_end); | | 1022 | return (virtual_end); |
1023 | | | 1023 | |
1024 | /* For now, only implemented for sun4/sun4c */ | | 1024 | /* For now, only implemented for sun4/sun4c */ |
1025 | KASSERT(CPU_HAS_SUNMMU); | | 1025 | KASSERT(CPU_HAS_SUNMMU); |
1026 | | | 1026 | |
1027 | /* | | 1027 | /* |
1028 | * Map in the next region(s) | | 1028 | * Map in the next region(s) |
1029 | */ | | 1029 | */ |
1030 | | | 1030 | |
1031 | /* Get current end-of-kernel */ | | 1031 | /* Get current end-of-kernel */ |
1032 | vr = virtual_end >> RGSHIFT; | | 1032 | vr = virtual_end >> RGSHIFT; |
1033 | evr = (eva + NBPRG - 1) >> RGSHIFT; | | 1033 | evr = (eva + NBPRG - 1) >> RGSHIFT; |
1034 | eva = evr << RGSHIFT; | | 1034 | eva = evr << RGSHIFT; |
1035 | | | 1035 | |
1036 | if (eva > VM_MAX_KERNEL_ADDRESS) | | 1036 | if (eva > VM_MAX_KERNEL_ADDRESS) |
1037 | panic("growkernel: grown too large: %lx", eva); | | 1037 | panic("growkernel: grown too large: %lx", eva); |
1038 | | | 1038 | |
1039 | /* | | 1039 | /* |
1040 | * Divide a region in N blocks of M segments, where each segment | | 1040 | * Divide a region in N blocks of M segments, where each segment |
1041 | * block can have its PTEs mapped by one page. | | 1041 | * block can have its PTEs mapped by one page. |
1042 | * N should come out to 1 for 8K pages and to 4 for 4K pages. | | 1042 | * N should come out to 1 for 8K pages and to 4 for 4K pages. |
1043 | */ | | 1043 | */ |
1044 | M = NBPG / (NPTESG * sizeof(int)); | | 1044 | M = NBPG / (NPTESG * sizeof(int)); |
1045 | N = (NBPRG/NBPSG) / M; | | 1045 | N = (NBPRG/NBPSG) / M; |
1046 | | | 1046 | |
1047 | while (vr < evr) { | | 1047 | while (vr < evr) { |
1048 | rp = &pmap_kernel()->pm_regmap[vr]; | | 1048 | rp = &pmap_kernel()->pm_regmap[vr]; |
1049 | for (i = 0; i < N; i++) { | | 1049 | for (i = 0; i < N; i++) { |
1050 | sp = &rp->rg_segmap[i * M]; | | 1050 | sp = &rp->rg_segmap[i * M]; |
1051 | va = (vaddr_t)sp->sg_pte; | | 1051 | va = (vaddr_t)sp->sg_pte; |
1052 | pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); | | 1052 | pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); |
1053 | if (pg == NULL) | | 1053 | if (pg == NULL) |
1054 | panic("growkernel: out of memory"); | | 1054 | panic("growkernel: out of memory"); |
1055 | pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), | | 1055 | pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), |
1056 | VM_PROT_READ | VM_PROT_WRITE); | | 1056 | VM_PROT_READ | VM_PROT_WRITE); |
1057 | } | | 1057 | } |
1058 | } | | 1058 | } |
| @@ -2506,2316 +2506,2326 @@ pv_link4_4c(struct vm_page *pg, struct p | | | @@ -2506,2316 +2506,2326 @@ pv_link4_4c(struct vm_page *pg, struct p |
2506 | pv0->pv_pmap = pm; | | 2506 | pv0->pv_pmap = pm; |
2507 | pv0->pv_va = va; | | 2507 | pv0->pv_va = va; |
2508 | pv0->pv_flags |= nc; | | 2508 | pv0->pv_flags |= nc; |
2509 | return (0); | | 2509 | return (0); |
2510 | } | | 2510 | } |
2511 | | | 2511 | |
2512 | /* | | 2512 | /* |
2513 | * Allocate the new PV entry now, and, if that fails, bail out | | 2513 | * Allocate the new PV entry now, and, if that fails, bail out |
2514 | * before changing the cacheable state of the existing mappings. | | 2514 | * before changing the cacheable state of the existing mappings. |
2515 | */ | | 2515 | */ |
2516 | npv = pool_get(&pv_pool, PR_NOWAIT); | | 2516 | npv = pool_get(&pv_pool, PR_NOWAIT); |
2517 | if (npv == NULL) | | 2517 | if (npv == NULL) |
2518 | return (ENOMEM); | | 2518 | return (ENOMEM); |
2519 | | | 2519 | |
2520 | pmap_stats.ps_enter_secondpv++; | | 2520 | pmap_stats.ps_enter_secondpv++; |
2521 | | | 2521 | |
2522 | /* | | 2522 | /* |
2523 | * Before entering the new mapping, see if | | 2523 | * Before entering the new mapping, see if |
2524 | * it will cause old mappings to become aliased | | 2524 | * it will cause old mappings to become aliased |
2525 | * and thus need to be `discached'. | | 2525 | * and thus need to be `discached'. |
2526 | */ | | 2526 | */ |
2527 | if (pv0->pv_flags & PV_ANC) { | | 2527 | if (pv0->pv_flags & PV_ANC) { |
2528 | /* already uncached, just stay that way */ | | 2528 | /* already uncached, just stay that way */ |
2529 | *pteprotop |= PG_NC; | | 2529 | *pteprotop |= PG_NC; |
2530 | goto link_npv; | | 2530 | goto link_npv; |
2531 | } | | 2531 | } |
2532 | | | 2532 | |
2533 | for (pv = pv0; pv != NULL; pv = pv->pv_next) { | | 2533 | for (pv = pv0; pv != NULL; pv = pv->pv_next) { |
2534 | if ((pv->pv_flags & PV_NC) != 0) { | | 2534 | if ((pv->pv_flags & PV_NC) != 0) { |
2535 | *pteprotop |= PG_NC; | | 2535 | *pteprotop |= PG_NC; |
2536 | #ifdef DEBUG | | 2536 | #ifdef DEBUG |
2537 | /* Check currently illegal condition */ | | 2537 | /* Check currently illegal condition */ |
2538 | if (nc == 0) | | 2538 | if (nc == 0) |
2539 | printf("pv_link: proc %s, va=0x%lx: " | | 2539 | printf("pv_link: proc %s, va=0x%lx: " |
2540 | "unexpected uncached mapping at 0x%lx\n", | | 2540 | "unexpected uncached mapping at 0x%lx\n", |
2541 | curproc ? curproc->p_comm : "--", | | 2541 | curproc ? curproc->p_comm : "--", |
2542 | va, pv->pv_va); | | 2542 | va, pv->pv_va); |
2543 | #endif | | 2543 | #endif |
2544 | } | | 2544 | } |
2545 | if (BADALIAS(va, pv->pv_va)) { | | 2545 | if (BADALIAS(va, pv->pv_va)) { |
2546 | #ifdef DEBUG | | 2546 | #ifdef DEBUG |
2547 | if (pmapdebug & PDB_CACHESTUFF) | | 2547 | if (pmapdebug & PDB_CACHESTUFF) |
2548 | printf( | | 2548 | printf( |
2549 | "pv_link: badalias: proc %s, 0x%lx<=>0x%lx, pg %p\n", | | 2549 | "pv_link: badalias: proc %s, 0x%lx<=>0x%lx, pg %p\n", |
2550 | curproc ? curproc->p_comm : "--", | | 2550 | curproc ? curproc->p_comm : "--", |
2551 | va, pv->pv_va, pg); | | 2551 | va, pv->pv_va, pg); |
2552 | #endif | | 2552 | #endif |
2553 | /* Mark list head `uncached due to aliases' */ | | 2553 | /* Mark list head `uncached due to aliases' */ |
2554 | pv0->pv_flags |= PV_ANC; | | 2554 | pv0->pv_flags |= PV_ANC; |
2555 | pv_changepte4_4c(pg, PG_NC, 0); | | 2555 | pv_changepte4_4c(pg, PG_NC, 0); |
2556 | *pteprotop |= PG_NC; | | 2556 | *pteprotop |= PG_NC; |
2557 | break; | | 2557 | break; |
2558 | } | | 2558 | } |
2559 | } | | 2559 | } |
2560 | | | 2560 | |
2561 | link_npv: | | 2561 | link_npv: |
2562 | npv->pv_next = pv0->pv_next; | | 2562 | npv->pv_next = pv0->pv_next; |
2563 | npv->pv_pmap = pm; | | 2563 | npv->pv_pmap = pm; |
2564 | npv->pv_va = va; | | 2564 | npv->pv_va = va; |
2565 | npv->pv_flags = nc; | | 2565 | npv->pv_flags = nc; |
2566 | pv0->pv_next = npv; | | 2566 | pv0->pv_next = npv; |
2567 | return (0); | | 2567 | return (0); |
2568 | } | | 2568 | } |
2569 | | | 2569 | |
2570 | #endif /* SUN4 || SUN4C */ | | 2570 | #endif /* SUN4 || SUN4C */ |
2571 | | | 2571 | |
2572 | #if defined(SUN4M) || defined(SUN4D) /* SRMMU versions of above */ | | 2572 | #if defined(SUN4M) || defined(SUN4D) /* SRMMU versions of above */ |
2573 | /* | | 2573 | /* |
2574 | * Walk the given pv list, and for each PTE, set or clear some bits | | 2574 | * Walk the given pv list, and for each PTE, set or clear some bits |
2575 | * (e.g., PG_W or PG_NC). | | 2575 | * (e.g., PG_W or PG_NC). |
2576 | * | | 2576 | * |
2577 | * This routine flushes the cache for any page whose PTE changes, | | 2577 | * This routine flushes the cache for any page whose PTE changes, |
2578 | * as long as the process has a context; this is overly conservative. | | 2578 | * as long as the process has a context; this is overly conservative. |
2579 | * It also copies ref and mod bits to the pvlist, on the theory that | | 2579 | * It also copies ref and mod bits to the pvlist, on the theory that |
2580 | * this might save work later. (XXX should test this theory) | | 2580 | * this might save work later. (XXX should test this theory) |
2581 | * | | 2581 | * |
2582 | * Called with PV lock and pmap main lock held. | | 2582 | * Called with PV lock and pmap main lock held. |
2583 | */ | | 2583 | */ |
2584 | void | | 2584 | void |
2585 | pv_changepte4m(struct vm_page *pg, int bis, int bic) | | 2585 | pv_changepte4m(struct vm_page *pg, int bis, int bic) |
2586 | { | | 2586 | { |
2587 | struct pvlist *pv; | | 2587 | struct pvlist *pv; |
2588 | struct pmap *pm; | | 2588 | struct pmap *pm; |
2589 | vaddr_t va; | | 2589 | vaddr_t va; |
2590 | struct regmap *rp; | | 2590 | struct regmap *rp; |
2591 | struct segmap *sp; | | 2591 | struct segmap *sp; |
2592 | | | 2592 | |
2593 | pv = VM_MDPAGE_PVHEAD(pg); | | 2593 | pv = VM_MDPAGE_PVHEAD(pg); |
2594 | if (pv->pv_pmap == NULL) | | 2594 | if (pv->pv_pmap == NULL) |
2595 | return; | | 2595 | return; |
2596 | | | 2596 | |
2597 | for (; pv != NULL; pv = pv->pv_next) { | | 2597 | for (; pv != NULL; pv = pv->pv_next) { |
2598 | int tpte; | | 2598 | int tpte; |
2599 | pm = pv->pv_pmap; | | 2599 | pm = pv->pv_pmap; |
2600 | /* XXXSMP: should lock pm */ | | 2600 | /* XXXSMP: should lock pm */ |
2601 | va = pv->pv_va; | | 2601 | va = pv->pv_va; |
2602 | rp = &pm->pm_regmap[VA_VREG(va)]; | | 2602 | rp = &pm->pm_regmap[VA_VREG(va)]; |
2603 | sp = &rp->rg_segmap[VA_VSEG(va)]; | | 2603 | sp = &rp->rg_segmap[VA_VSEG(va)]; |
2604 | | | 2604 | |
2605 | if (pm->pm_ctx) { | | 2605 | if (pm->pm_ctx) { |
2606 | /* | | 2606 | /* |
2607 | * XXX: always flush cache; conservative, but | | 2607 | * XXX: always flush cache; conservative, but |
2608 | * needed to invalidate cache tag protection | | 2608 | * needed to invalidate cache tag protection |
2609 | * bits and when disabling caching. | | 2609 | * bits and when disabling caching. |
2610 | */ | | 2610 | */ |
2611 | cache_flush_page(va, pm->pm_ctxnum); | | 2611 | cache_flush_page(va, pm->pm_ctxnum); |
2612 | } | | 2612 | } |
2613 | | | 2613 | |
2614 | tpte = sp->sg_pte[VA_SUN4M_VPG(va)]; | | 2614 | tpte = sp->sg_pte[VA_SUN4M_VPG(va)]; |
2615 | KASSERT((tpte & SRMMU_TETYPE) == SRMMU_TEPTE); | | 2615 | KASSERT((tpte & SRMMU_TETYPE) == SRMMU_TEPTE); |
2616 | VM_MDPAGE_PVHEAD(pg)->pv_flags |= MR4M(updatepte4m(va, | | 2616 | VM_MDPAGE_PVHEAD(pg)->pv_flags |= MR4M(updatepte4m(va, |
2617 | &sp->sg_pte[VA_SUN4M_VPG(va)], bic, bis, pm->pm_ctxnum, | | 2617 | &sp->sg_pte[VA_SUN4M_VPG(va)], bic, bis, pm->pm_ctxnum, |
2618 | PMAP_CPUSET(pm))); | | 2618 | PMAP_CPUSET(pm))); |
2619 | } | | 2619 | } |
2620 | } | | 2620 | } |
2621 | | | 2621 | |
2622 | /* | | 2622 | /* |
2623 | * Sync ref and mod bits in pvlist. If page has been ref'd or modified, | | 2623 | * Sync ref and mod bits in pvlist. If page has been ref'd or modified, |
2624 | * update ref/mod bits in pvlist, and clear the hardware bits. | | 2624 | * update ref/mod bits in pvlist, and clear the hardware bits. |
2625 | * | | 2625 | * |
2626 | * Return the new flags. | | 2626 | * Return the new flags. |
2627 | */ | | 2627 | */ |
2628 | int | | 2628 | int |
2629 | pv_syncflags4m(struct vm_page *pg) | | 2629 | pv_syncflags4m(struct vm_page *pg) |
2630 | { | | 2630 | { |
2631 | struct pvlist *pv; | | 2631 | struct pvlist *pv; |
2632 | struct pmap *pm; | | 2632 | struct pmap *pm; |
2633 | int va, flags; | | 2633 | int va, flags; |
2634 | int s; | | 2634 | int s; |
2635 | struct regmap *rp; | | 2635 | struct regmap *rp; |
2636 | struct segmap *sp; | | 2636 | struct segmap *sp; |
2637 | int tpte; | | 2637 | int tpte; |
2638 | | | 2638 | |
2639 | s = splvm(); | | 2639 | s = splvm(); |
2640 | PMAP_LOCK(); | | 2640 | PMAP_LOCK(); |
2641 | pv = VM_MDPAGE_PVHEAD(pg); | | 2641 | pv = VM_MDPAGE_PVHEAD(pg); |
2642 | if (pv->pv_pmap == NULL) { | | 2642 | if (pv->pv_pmap == NULL) { |
2643 | /* Page not mapped; pv_flags is already up to date */ | | 2643 | /* Page not mapped; pv_flags is already up to date */ |
2644 | flags = 0; | | 2644 | flags = 0; |
2645 | goto out; | | 2645 | goto out; |
2646 | } | | 2646 | } |
2647 | | | 2647 | |
2648 | flags = pv->pv_flags; | | 2648 | flags = pv->pv_flags; |
2649 | for (; pv != NULL; pv = pv->pv_next) { | | 2649 | for (; pv != NULL; pv = pv->pv_next) { |
2650 | pm = pv->pv_pmap; | | 2650 | pm = pv->pv_pmap; |
2651 | va = pv->pv_va; | | 2651 | va = pv->pv_va; |
2652 | rp = &pm->pm_regmap[VA_VREG(va)]; | | 2652 | rp = &pm->pm_regmap[VA_VREG(va)]; |
2653 | sp = &rp->rg_segmap[VA_VSEG(va)]; | | 2653 | sp = &rp->rg_segmap[VA_VSEG(va)]; |
2654 | | | 2654 | |
2655 | tpte = sp->sg_pte[VA_SUN4M_VPG(va)]; | | 2655 | tpte = sp->sg_pte[VA_SUN4M_VPG(va)]; |
2656 | if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE && | | 2656 | if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE && |
2657 | (tpte & (SRMMU_PG_R|SRMMU_PG_M)) != 0) { | | 2657 | (tpte & (SRMMU_PG_R|SRMMU_PG_M)) != 0) { |
2658 | /* | | 2658 | /* |
2659 | * Flush cache if modified to make sure the PTE | | 2659 | * Flush cache if modified to make sure the PTE |
2660 | * M bit will be set again on the next write access. | | 2660 | * M bit will be set again on the next write access. |
2661 | */ | | 2661 | */ |
2662 | if (pm->pm_ctx && (tpte & SRMMU_PG_M) == SRMMU_PG_M) | | 2662 | if (pm->pm_ctx && (tpte & SRMMU_PG_M) == SRMMU_PG_M) |
2663 | cache_flush_page(va, pm->pm_ctxnum); | | 2663 | cache_flush_page(va, pm->pm_ctxnum); |
2664 | | | 2664 | |
2665 | flags |= MR4M(updatepte4m(va, | | 2665 | flags |= MR4M(updatepte4m(va, |
2666 | &sp->sg_pte[VA_SUN4M_VPG(va)], | | 2666 | &sp->sg_pte[VA_SUN4M_VPG(va)], |
2667 | SRMMU_PG_M | SRMMU_PG_R, | | 2667 | SRMMU_PG_M | SRMMU_PG_R, |
2668 | 0, pm->pm_ctxnum, PMAP_CPUSET(pm))); | | 2668 | 0, pm->pm_ctxnum, PMAP_CPUSET(pm))); |
2669 | } | | 2669 | } |
2670 | } | | 2670 | } |
2671 | | | 2671 | |
2672 | VM_MDPAGE_PVHEAD(pg)->pv_flags = flags; | | 2672 | VM_MDPAGE_PVHEAD(pg)->pv_flags = flags; |
2673 | out: | | 2673 | out: |
2674 | PMAP_UNLOCK(); | | 2674 | PMAP_UNLOCK(); |
2675 | splx(s); | | 2675 | splx(s); |
2676 | return (flags); | | 2676 | return (flags); |
2677 | } | | 2677 | } |
2678 | | | 2678 | |
2679 | /* | | 2679 | /* |
2680 | * Should be called with pmap already locked. | | 2680 | * Should be called with pmap already locked. |
2681 | */ | | 2681 | */ |
2682 | void | | 2682 | void |
2683 | pv_unlink4m(struct vm_page *pg, struct pmap *pm, vaddr_t va) | | 2683 | pv_unlink4m(struct vm_page *pg, struct pmap *pm, vaddr_t va) |
2684 | { | | 2684 | { |
2685 | struct pvlist *pv0, *npv; | | 2685 | struct pvlist *pv0, *npv; |
2686 | | | 2686 | |
2687 | pv0 = VM_MDPAGE_PVHEAD(pg); | | 2687 | pv0 = VM_MDPAGE_PVHEAD(pg); |
2688 | | | 2688 | |
2689 | npv = pv0->pv_next; | | 2689 | npv = pv0->pv_next; |
2690 | /* | | 2690 | /* |
2691 | * First entry is special (sigh). | | 2691 | * First entry is special (sigh). |
2692 | */ | | 2692 | */ |
2693 | if (pv0->pv_pmap == pm && pv0->pv_va == va) { | | 2693 | if (pv0->pv_pmap == pm && pv0->pv_va == va) { |
2694 | pmap_stats.ps_unlink_pvfirst++; | | 2694 | pmap_stats.ps_unlink_pvfirst++; |
2695 | if (npv != NULL) { | | 2695 | if (npv != NULL) { |
2696 | /* | | 2696 | /* |
2697 | * Shift next entry into the head. | | 2697 | * Shift next entry into the head. |
2698 | * Make sure to retain the REF, MOD and ANC flags | | 2698 | * Make sure to retain the REF, MOD and ANC flags |
2699 | * on the list head. | | 2699 | * on the list head. |
2700 | */ | | 2700 | */ |
2701 | pv0->pv_next = npv->pv_next; | | 2701 | pv0->pv_next = npv->pv_next; |
2702 | pv0->pv_pmap = npv->pv_pmap; | | 2702 | pv0->pv_pmap = npv->pv_pmap; |
2703 | pv0->pv_va = npv->pv_va; | | 2703 | pv0->pv_va = npv->pv_va; |
2704 | pv0->pv_flags &= ~PV_NC; | | 2704 | pv0->pv_flags &= ~PV_NC; |
2705 | pv0->pv_flags |= (npv->pv_flags & PV_NC); | | 2705 | pv0->pv_flags |= (npv->pv_flags & PV_NC); |
2706 | pool_put(&pv_pool, npv); | | 2706 | pool_put(&pv_pool, npv); |
2707 | } else { | | 2707 | } else { |
2708 | /* | | 2708 | /* |
2709 | * No mappings left; we need to maintain | | 2709 | * No mappings left; we need to maintain |
2710 | * the REF and MOD flags, since pmap_is_modified() | | 2710 | * the REF and MOD flags, since pmap_is_modified() |
2711 | * can still be called for this page. | | 2711 | * can still be called for this page. |
2712 | */ | | 2712 | */ |
2713 | pv0->pv_pmap = NULL; | | 2713 | pv0->pv_pmap = NULL; |
2714 | pv0->pv_flags &= ~(PV_NC|PV_ANC); | | 2714 | pv0->pv_flags &= ~(PV_NC|PV_ANC); |
2715 | return; | | 2715 | return; |
2716 | } | | 2716 | } |
2717 | } else { | | 2717 | } else { |
2718 | struct pvlist *prev; | | 2718 | struct pvlist *prev; |
2719 | | | 2719 | |
2720 | pmap_stats.ps_unlink_pvsearch++; | | 2720 | pmap_stats.ps_unlink_pvsearch++; |
2721 | for (prev = pv0;; prev = npv, npv = npv->pv_next) { | | 2721 | for (prev = pv0;; prev = npv, npv = npv->pv_next) { |
2722 | if (npv == NULL) { | | 2722 | if (npv == NULL) { |
2723 | panic("pv_unlink: pm %p is missing on pg %p", | | 2723 | panic("pv_unlink: pm %p is missing on pg %p", |
2724 | pm, pg); | | 2724 | pm, pg); |
2725 | return; | | 2725 | return; |
2726 | } | | 2726 | } |
2727 | if (npv->pv_pmap == pm && npv->pv_va == va) | | 2727 | if (npv->pv_pmap == pm && npv->pv_va == va) |
2728 | break; | | 2728 | break; |
2729 | } | | 2729 | } |
2730 | prev->pv_next = npv->pv_next; | | 2730 | prev->pv_next = npv->pv_next; |
2731 | pool_put(&pv_pool, npv); | | 2731 | pool_put(&pv_pool, npv); |
2732 | } | | 2732 | } |
2733 | | | 2733 | |
2734 | if ((pv0->pv_flags & (PV_NC|PV_ANC)) == PV_ANC) { | | 2734 | if ((pv0->pv_flags & (PV_NC|PV_ANC)) == PV_ANC) { |
2735 | | | 2735 | |
2736 | /* | | 2736 | /* |
2737 | * Not cached: check whether we can fix that now. | | 2737 | * Not cached: check whether we can fix that now. |
2738 | */ | | 2738 | */ |
2739 | va = pv0->pv_va; | | 2739 | va = pv0->pv_va; |
2740 | for (npv = pv0->pv_next; npv != NULL; npv = npv->pv_next) | | 2740 | for (npv = pv0->pv_next; npv != NULL; npv = npv->pv_next) |
2741 | if (BADALIAS(va, npv->pv_va) || | | 2741 | if (BADALIAS(va, npv->pv_va) || |
2742 | (npv->pv_flags & PV_NC) != 0) | | 2742 | (npv->pv_flags & PV_NC) != 0) |
2743 | return; | | 2743 | return; |
2744 | #ifdef DEBUG | | 2744 | #ifdef DEBUG |
2745 | if (pmapdebug & PDB_CACHESTUFF) | | 2745 | if (pmapdebug & PDB_CACHESTUFF) |
2746 | printf( | | 2746 | printf( |
2747 | "pv_unlink: alias ok: proc %s, va 0x%lx, pg %p\n", | | 2747 | "pv_unlink: alias ok: proc %s, va 0x%lx, pg %p\n", |
2748 | curproc ? curproc->p_comm : "--", | | 2748 | curproc ? curproc->p_comm : "--", |
2749 | va, pg); | | 2749 | va, pg); |
2750 | #endif | | 2750 | #endif |
2751 | pv0->pv_flags &= ~PV_ANC; | | 2751 | pv0->pv_flags &= ~PV_ANC; |
2752 | pv_changepte4m(pg, SRMMU_PG_C, 0); | | 2752 | pv_changepte4m(pg, SRMMU_PG_C, 0); |
2753 | } | | 2753 | } |
2754 | } | | 2754 | } |
2755 | | | 2755 | |
2756 | /* | | 2756 | /* |
2757 | * pv_link is the inverse of pv_unlink, and is used in pmap_enter. | | 2757 | * pv_link is the inverse of pv_unlink, and is used in pmap_enter. |
2758 | * May turn off the cacheable bit in the pte prototype for the new mapping. | | 2758 | * May turn off the cacheable bit in the pte prototype for the new mapping. |
2759 | * Called with pm locked. | | 2759 | * Called with pm locked. |
2760 | */ | | 2760 | */ |
2761 | /*static*/ int | | 2761 | /*static*/ int |
2762 | pv_link4m(struct vm_page *pg, struct pmap *pm, vaddr_t va, | | 2762 | pv_link4m(struct vm_page *pg, struct pmap *pm, vaddr_t va, |
2763 | unsigned int *pteprotop) | | 2763 | unsigned int *pteprotop) |
2764 | { | | 2764 | { |
2765 | struct pvlist *pv0, *pv, *npv; | | 2765 | struct pvlist *pv0, *pv, *npv; |
2766 | int nc = (*pteprotop & SRMMU_PG_C) == 0 ? PV_NC : 0; | | 2766 | int nc = (*pteprotop & SRMMU_PG_C) == 0 ? PV_NC : 0; |
2767 | int error = 0; | | 2767 | int error = 0; |
2768 | | | 2768 | |
2769 | pv0 = VM_MDPAGE_PVHEAD(pg); | | 2769 | pv0 = VM_MDPAGE_PVHEAD(pg); |
2770 | | | 2770 | |
2771 | if (pv0->pv_pmap == NULL) { | | 2771 | if (pv0->pv_pmap == NULL) { |
2772 | /* no pvlist entries yet */ | | 2772 | /* no pvlist entries yet */ |
2773 | pmap_stats.ps_enter_firstpv++; | | 2773 | pmap_stats.ps_enter_firstpv++; |
2774 | pv0->pv_next = NULL; | | 2774 | pv0->pv_next = NULL; |
2775 | pv0->pv_pmap = pm; | | 2775 | pv0->pv_pmap = pm; |
2776 | pv0->pv_va = va; | | 2776 | pv0->pv_va = va; |
2777 | pv0->pv_flags |= nc; | | 2777 | pv0->pv_flags |= nc; |
2778 | goto out; | | 2778 | goto out; |
2779 | } | | 2779 | } |
2780 | | | 2780 | |
2781 | /* | | 2781 | /* |
2782 | * Allocate the new PV entry now, and, if that fails, bail out | | 2782 | * Allocate the new PV entry now, and, if that fails, bail out |
2783 | * before changing the cacheable state of the existing mappings. | | 2783 | * before changing the cacheable state of the existing mappings. |
2784 | */ | | 2784 | */ |
2785 | npv = pool_get(&pv_pool, PR_NOWAIT); | | 2785 | npv = pool_get(&pv_pool, PR_NOWAIT); |
2786 | if (npv == NULL) { | | 2786 | if (npv == NULL) { |
2787 | error = ENOMEM; | | 2787 | error = ENOMEM; |
2788 | goto out; | | 2788 | goto out; |
2789 | } | | 2789 | } |
2790 | | | 2790 | |
2791 | pmap_stats.ps_enter_secondpv++; | | 2791 | pmap_stats.ps_enter_secondpv++; |
2792 | | | 2792 | |
2793 | /* | | 2793 | /* |
2794 | * See if the new mapping will cause old mappings to | | 2794 | * See if the new mapping will cause old mappings to |
2795 | * become aliased and thus need to be `discached'. | | 2795 | * become aliased and thus need to be `discached'. |
2796 | */ | | 2796 | */ |
2797 | if ((pv0->pv_flags & PV_ANC) != 0) { | | 2797 | if ((pv0->pv_flags & PV_ANC) != 0) { |
2798 | /* already uncached, just stay that way */ | | 2798 | /* already uncached, just stay that way */ |
2799 | *pteprotop &= ~SRMMU_PG_C; | | 2799 | *pteprotop &= ~SRMMU_PG_C; |
2800 | goto link_npv; | | 2800 | goto link_npv; |
2801 | } | | 2801 | } |
2802 | | | 2802 | |
2803 | for (pv = pv0; pv != NULL; pv = pv->pv_next) { | | 2803 | for (pv = pv0; pv != NULL; pv = pv->pv_next) { |
2804 | if ((pv->pv_flags & PV_NC) != 0) { | | 2804 | if ((pv->pv_flags & PV_NC) != 0) { |
2805 | *pteprotop &= ~SRMMU_PG_C; | | 2805 | *pteprotop &= ~SRMMU_PG_C; |
2806 | #ifdef DEBUG | | 2806 | #ifdef DEBUG |
2807 | /* Check currently illegal condition */ | | 2807 | /* Check currently illegal condition */ |
2808 | if (nc == 0) | | 2808 | if (nc == 0) |
2809 | printf("pv_link: proc %s, va=0x%lx: " | | 2809 | printf("pv_link: proc %s, va=0x%lx: " |
2810 | "unexpected uncached mapping at 0x%lx\n", | | 2810 | "unexpected uncached mapping at 0x%lx\n", |
2811 | curproc ? curproc->p_comm : "--", | | 2811 | curproc ? curproc->p_comm : "--", |
2812 | va, pv->pv_va); | | 2812 | va, pv->pv_va); |
2813 | #endif | | 2813 | #endif |
2814 | } | | 2814 | } |
2815 | if (BADALIAS(va, pv->pv_va)) { | | 2815 | if (BADALIAS(va, pv->pv_va)) { |
2816 | #ifdef DEBUG | | 2816 | #ifdef DEBUG |
2817 | if (pmapdebug & PDB_CACHESTUFF) | | 2817 | if (pmapdebug & PDB_CACHESTUFF) |
2818 | printf( | | 2818 | printf( |
2819 | "pv_link: badalias: proc %s, 0x%lx<=>0x%lx, pg %p\n", | | 2819 | "pv_link: badalias: proc %s, 0x%lx<=>0x%lx, pg %p\n", |
2820 | curproc ? curproc->p_comm : "--", | | 2820 | curproc ? curproc->p_comm : "--", |
2821 | va, pv->pv_va, pg); | | 2821 | va, pv->pv_va, pg); |
2822 | #endif | | 2822 | #endif |
2823 | /* Mark list head `uncached due to aliases' */ | | 2823 | /* Mark list head `uncached due to aliases' */ |
2824 | pv0->pv_flags |= PV_ANC; | | 2824 | pv0->pv_flags |= PV_ANC; |
2825 | pv_changepte4m(pg, 0, SRMMU_PG_C); | | 2825 | pv_changepte4m(pg, 0, SRMMU_PG_C); |
2826 | *pteprotop &= ~SRMMU_PG_C; | | 2826 | *pteprotop &= ~SRMMU_PG_C; |
2827 | break; | | 2827 | break; |
2828 | } | | 2828 | } |
2829 | } | | 2829 | } |
2830 | | | 2830 | |
2831 | link_npv: | | 2831 | link_npv: |
2832 | /* Now link in the new PV entry */ | | 2832 | /* Now link in the new PV entry */ |
2833 | npv->pv_next = pv0->pv_next; | | 2833 | npv->pv_next = pv0->pv_next; |
2834 | npv->pv_pmap = pm; | | 2834 | npv->pv_pmap = pm; |
2835 | npv->pv_va = va; | | 2835 | npv->pv_va = va; |
2836 | npv->pv_flags = nc; | | 2836 | npv->pv_flags = nc; |
2837 | pv0->pv_next = npv; | | 2837 | pv0->pv_next = npv; |
2838 | | | 2838 | |
2839 | out: | | 2839 | out: |
2840 | return (error); | | 2840 | return (error); |
2841 | } | | 2841 | } |
2842 | #endif | | 2842 | #endif |
2843 | | | 2843 | |
2844 | /* | | 2844 | /* |
2845 | * Uncache all entries on behalf of kvm_uncache(). In addition to | | 2845 | * Uncache all entries on behalf of kvm_uncache(). In addition to |
2846 | * removing the cache bit from the PTE, we are also setting PV_NC | | 2846 | * removing the cache bit from the PTE, we are also setting PV_NC |
2847 | * in each entry to stop pv_unlink() from re-caching (i.e. when a | | 2847 | * in each entry to stop pv_unlink() from re-caching (i.e. when a |
2848 | * a bad alias is going away). | | 2848 | * a bad alias is going away). |
2849 | */ | | 2849 | */ |
2850 | static void | | 2850 | static void |
2851 | pv_uncache(struct vm_page *pg) | | 2851 | pv_uncache(struct vm_page *pg) |
2852 | { | | 2852 | { |
2853 | struct pvlist *pv; | | 2853 | struct pvlist *pv; |
2854 | int s; | | 2854 | int s; |
2855 | | | 2855 | |
2856 | s = splvm(); | | 2856 | s = splvm(); |
2857 | PMAP_LOCK(); | | 2857 | PMAP_LOCK(); |
2858 | | | 2858 | |
2859 | for (pv = VM_MDPAGE_PVHEAD(pg); pv != NULL; pv = pv->pv_next) | | 2859 | for (pv = VM_MDPAGE_PVHEAD(pg); pv != NULL; pv = pv->pv_next) |
2860 | pv->pv_flags |= PV_NC; | | 2860 | pv->pv_flags |= PV_NC; |
2861 | | | 2861 | |
2862 | #if defined(SUN4M) || defined(SUN4D) | | 2862 | #if defined(SUN4M) || defined(SUN4D) |
2863 | if (CPU_HAS_SRMMU) | | 2863 | if (CPU_HAS_SRMMU) |
2864 | pv_changepte4m(pg, 0, SRMMU_PG_C); | | 2864 | pv_changepte4m(pg, 0, SRMMU_PG_C); |
2865 | #endif | | 2865 | #endif |
2866 | #if defined(SUN4) || defined(SUN4C) | | 2866 | #if defined(SUN4) || defined(SUN4C) |
2867 | if (CPU_HAS_SUNMMU) | | 2867 | if (CPU_HAS_SUNMMU) |
2868 | pv_changepte4_4c(pg, PG_NC, 0); | | 2868 | pv_changepte4_4c(pg, PG_NC, 0); |
2869 | #endif | | 2869 | #endif |
2870 | PMAP_UNLOCK(); | | 2870 | PMAP_UNLOCK(); |
2871 | splx(s); | | 2871 | splx(s); |
2872 | } | | 2872 | } |
2873 | | | 2873 | |
2874 | /* | | 2874 | /* |
2875 | * Walk the given list and flush the cache for each (MI) page that is | | 2875 | * Walk the given list and flush the cache for each (MI) page that is |
2876 | * potentially in the cache. Called only if vactype != VAC_NONE. | | 2876 | * potentially in the cache. Called only if vactype != VAC_NONE. |
2877 | */ | | 2877 | */ |
2878 | #if defined(SUN4) || defined(SUN4C) | | 2878 | #if defined(SUN4) || defined(SUN4C) |
2879 | static void | | 2879 | static void |
2880 | pv_flushcache4_4c(struct vm_page *pg) | | 2880 | pv_flushcache4_4c(struct vm_page *pg) |
2881 | { | | 2881 | { |
2882 | struct pvlist *pv; | | 2882 | struct pvlist *pv; |
2883 | struct pmap *pm; | | 2883 | struct pmap *pm; |
2884 | int s, ctx; | | 2884 | int s, ctx; |
2885 | | | 2885 | |
2886 | pv = VM_MDPAGE_PVHEAD(pg); | | 2886 | pv = VM_MDPAGE_PVHEAD(pg); |
2887 | | | 2887 | |
2888 | write_user_windows(); /* paranoia? */ | | 2888 | write_user_windows(); /* paranoia? */ |
2889 | s = splvm(); /* XXX extreme paranoia */ | | 2889 | s = splvm(); /* XXX extreme paranoia */ |
2890 | if ((pm = pv->pv_pmap) != NULL) { | | 2890 | if ((pm = pv->pv_pmap) != NULL) { |
2891 | ctx = getcontext4(); | | 2891 | ctx = getcontext4(); |
2892 | for (;;) { | | 2892 | for (;;) { |
2893 | if (pm->pm_ctx) { | | 2893 | if (pm->pm_ctx) { |
2894 | setcontext4(pm->pm_ctxnum); | | 2894 | setcontext4(pm->pm_ctxnum); |
2895 | cache_flush_page(pv->pv_va, pm->pm_ctxnum); | | 2895 | cache_flush_page(pv->pv_va, pm->pm_ctxnum); |
2896 | } | | 2896 | } |
2897 | pv = pv->pv_next; | | 2897 | pv = pv->pv_next; |
2898 | if (pv == NULL) | | 2898 | if (pv == NULL) |
2899 | break; | | 2899 | break; |
2900 | pm = pv->pv_pmap; | | 2900 | pm = pv->pv_pmap; |
2901 | } | | 2901 | } |
2902 | setcontext4(ctx); | | 2902 | setcontext4(ctx); |
2903 | } | | 2903 | } |
2904 | splx(s); | | 2904 | splx(s); |
2905 | } | | 2905 | } |
2906 | #endif /* SUN4 || SUN4C */ | | 2906 | #endif /* SUN4 || SUN4C */ |
2907 | | | 2907 | |
2908 | #if defined(SUN4M) || defined(SUN4D) | | 2908 | #if defined(SUN4M) || defined(SUN4D) |
2909 | static void | | 2909 | static void |
2910 | pv_flushcache4m(struct vm_page *pg) | | 2910 | pv_flushcache4m(struct vm_page *pg) |
2911 | { | | 2911 | { |
2912 | struct pvlist *pv; | | 2912 | struct pvlist *pv; |
2913 | struct pmap *pm; | | 2913 | struct pmap *pm; |
2914 | int s; | | 2914 | int s; |
2915 | | | 2915 | |
2916 | pv = VM_MDPAGE_PVHEAD(pg); | | 2916 | pv = VM_MDPAGE_PVHEAD(pg); |
2917 | | | 2917 | |
2918 | s = splvm(); /* XXX extreme paranoia */ | | 2918 | s = splvm(); /* XXX extreme paranoia */ |
2919 | if ((pm = pv->pv_pmap) != NULL) { | | 2919 | if ((pm = pv->pv_pmap) != NULL) { |
2920 | for (;;) { | | 2920 | for (;;) { |
2921 | if (pm->pm_ctx) { | | 2921 | if (pm->pm_ctx) { |
2922 | cache_flush_page(pv->pv_va, pm->pm_ctxnum); | | 2922 | cache_flush_page(pv->pv_va, pm->pm_ctxnum); |
2923 | } | | 2923 | } |
2924 | pv = pv->pv_next; | | 2924 | pv = pv->pv_next; |
2925 | if (pv == NULL) | | 2925 | if (pv == NULL) |
2926 | break; | | 2926 | break; |
2927 | pm = pv->pv_pmap; | | 2927 | pm = pv->pv_pmap; |
2928 | } | | 2928 | } |
2929 | } | | 2929 | } |
2930 | splx(s); | | 2930 | splx(s); |
2931 | } | | 2931 | } |
2932 | #endif /* SUN4M || SUN4D */ | | 2932 | #endif /* SUN4M || SUN4D */ |
2933 | | | 2933 | |
2934 | /*----------------------------------------------------------------*/ | | 2934 | /*----------------------------------------------------------------*/ |
2935 | | | 2935 | |
2936 | /* | | 2936 | /* |
2937 | * At last, pmap code. | | 2937 | * At last, pmap code. |
2938 | */ | | 2938 | */ |
2939 | | | 2939 | |
2940 | #if defined(SUN4) && (defined(SUN4C) || defined(SUN4M) || defined(SUN4D)) | | 2940 | #if defined(SUN4) && (defined(SUN4C) || defined(SUN4M) || defined(SUN4D)) |
2941 | int nptesg; | | 2941 | int nptesg; |
2942 | #endif | | 2942 | #endif |
2943 | | | 2943 | |
2944 | #if defined(SUN4M) || defined(SUN4D) | | 2944 | #if defined(SUN4M) || defined(SUN4D) |
2945 | static void pmap_bootstrap4m(void *); | | 2945 | static void pmap_bootstrap4m(void *); |
2946 | #endif | | 2946 | #endif |
2947 | #if defined(SUN4) || defined(SUN4C) | | 2947 | #if defined(SUN4) || defined(SUN4C) |
2948 | static void pmap_bootstrap4_4c(void *, int, int, int); | | 2948 | static void pmap_bootstrap4_4c(void *, int, int, int); |
2949 | #endif | | 2949 | #endif |
2950 | | | 2950 | |
2951 | /* | | 2951 | /* |
2952 | * Bootstrap the system enough to run with VM enabled. | | 2952 | * Bootstrap the system enough to run with VM enabled. |
2953 | * | | 2953 | * |
2954 | * nsegment is the number of mmu segment entries (``PMEGs''); | | 2954 | * nsegment is the number of mmu segment entries (``PMEGs''); |
2955 | * nregion is the number of mmu region entries (``SMEGs''); | | 2955 | * nregion is the number of mmu region entries (``SMEGs''); |
2956 | * nctx is the number of contexts. | | 2956 | * nctx is the number of contexts. |
2957 | */ | | 2957 | */ |
2958 | void | | 2958 | void |
2959 | pmap_bootstrap(int nctx, int nregion, int nsegment) | | 2959 | pmap_bootstrap(int nctx, int nregion, int nsegment) |
2960 | { | | 2960 | { |
2961 | void *p; | | 2961 | void *p; |
2962 | extern char etext[], kernel_data_start[]; | | 2962 | extern char etext[], kernel_data_start[]; |
2963 | extern char *kernel_top; | | 2963 | extern char *kernel_top; |
2964 | | | 2964 | |
2965 | uvmexp.pagesize = NBPG; | | 2965 | uvmexp.pagesize = NBPG; |
2966 | uvm_setpagesize(); | | 2966 | uvm_setpagesize(); |
2967 | | | 2967 | |
2968 | #if defined(SUN4) && (defined(SUN4C) || defined(SUN4M) || defined(SUN4D)) | | 2968 | #if defined(SUN4) && (defined(SUN4C) || defined(SUN4M) || defined(SUN4D)) |
2969 | /* In this case NPTESG is a variable */ | | 2969 | /* In this case NPTESG is a variable */ |
2970 | nptesg = (NBPSG >> pgshift); | | 2970 | nptesg = (NBPSG >> pgshift); |
2971 | #endif | | 2971 | #endif |
2972 | | | 2972 | |
2973 | /* | | 2973 | /* |
2974 | * Grab physical memory list. | | 2974 | * Grab physical memory list. |
2975 | */ | | 2975 | */ |
2976 | p = kernel_top; | | 2976 | p = kernel_top; |
2977 | get_phys_mem(&p); | | 2977 | get_phys_mem(&p); |
2978 | | | 2978 | |
2979 | /* | | 2979 | /* |
2980 | * The data segment in sparc ELF images is aligned to a 64KB | | 2980 | * The data segment in sparc ELF images is aligned to a 64KB |
2981 | * (the maximum page size defined by the ELF/sparc ABI) boundary. | | 2981 | * (the maximum page size defined by the ELF/sparc ABI) boundary. |
2982 | * This results in a unused portion of physical memory in between | | 2982 | * This results in a unused portion of physical memory in between |
2983 | * the text/rodata and the data segment. We pick up that gap | | 2983 | * the text/rodata and the data segment. We pick up that gap |
2984 | * here to remove it from the kernel map and give it to the | | 2984 | * here to remove it from the kernel map and give it to the |
2985 | * VM manager later. | | 2985 | * VM manager later. |
2986 | */ | | 2986 | */ |
2987 | etext_gap_start = (vaddr_t)(etext + NBPG - 1) & ~PGOFSET; | | 2987 | etext_gap_start = (vaddr_t)(etext + NBPG - 1) & ~PGOFSET; |
2988 | etext_gap_end = (vaddr_t)kernel_data_start & ~PGOFSET; | | 2988 | etext_gap_end = (vaddr_t)kernel_data_start & ~PGOFSET; |
2989 | | | 2989 | |
2990 | if (CPU_HAS_SRMMU) { | | 2990 | if (CPU_HAS_SRMMU) { |
2991 | #if defined(SUN4M) || defined(SUN4D) | | 2991 | #if defined(SUN4M) || defined(SUN4D) |
2992 | pmap_bootstrap4m(p); | | 2992 | pmap_bootstrap4m(p); |
2993 | #endif | | 2993 | #endif |
2994 | } else if (CPU_HAS_SUNMMU) { | | 2994 | } else if (CPU_HAS_SUNMMU) { |
2995 | #if defined(SUN4) || defined(SUN4C) | | 2995 | #if defined(SUN4) || defined(SUN4C) |
2996 | pmap_bootstrap4_4c(p, nctx, nregion, nsegment); | | 2996 | pmap_bootstrap4_4c(p, nctx, nregion, nsegment); |
2997 | #endif | | 2997 | #endif |
2998 | } | | 2998 | } |
2999 | | | 2999 | |
3000 | pmap_page_upload(); | | 3000 | pmap_page_upload(); |
3001 | mutex_init(&demap_lock, MUTEX_DEFAULT, IPL_VM); | | 3001 | mutex_init(&demap_lock, MUTEX_DEFAULT, IPL_VM); |
3002 | mutex_init(&ctx_lock, MUTEX_DEFAULT, IPL_SCHED); | | 3002 | mutex_init(&ctx_lock, MUTEX_DEFAULT, IPL_SCHED); |
3003 | } | | 3003 | } |
3004 | | | 3004 | |
3005 | #if defined(SUN4) || defined(SUN4C) | | 3005 | #if defined(SUN4) || defined(SUN4C) |
3006 | void | | 3006 | void |
3007 | pmap_bootstrap4_4c(void *top, int nctx, int nregion, int nsegment) | | 3007 | pmap_bootstrap4_4c(void *top, int nctx, int nregion, int nsegment) |
3008 | { | | 3008 | { |
3009 | union ctxinfo *ci; | | 3009 | union ctxinfo *ci; |
3010 | struct mmuentry *mmuseg; | | 3010 | struct mmuentry *mmuseg; |
3011 | #if defined(SUN4_MMU3L) | | 3011 | #if defined(SUN4_MMU3L) |
3012 | struct mmuentry *mmureg; | | 3012 | struct mmuentry *mmureg; |
3013 | #endif | | 3013 | #endif |
3014 | struct regmap *rp; | | 3014 | struct regmap *rp; |
3015 | struct segmap *sp; | | 3015 | struct segmap *sp; |
3016 | int i, j; | | 3016 | int i, j; |
3017 | int npte, zseg, vr, vs; | | 3017 | int npte, zseg, vr, vs; |
3018 | int startscookie, scookie; | | 3018 | int startscookie, scookie; |
3019 | #if defined(SUN4_MMU3L) | | 3019 | #if defined(SUN4_MMU3L) |
3020 | int startrcookie = 0, rcookie = 0; | | 3020 | int startrcookie = 0, rcookie = 0; |
3021 | #endif | | 3021 | #endif |
3022 | int *kptes; | | 3022 | int *kptes; |
3023 | int lastpage; | | 3023 | int lastpage; |
3024 | vaddr_t va; | | 3024 | vaddr_t va; |
3025 | vaddr_t p; | | 3025 | vaddr_t p; |
3026 | extern char kernel_text[]; | | 3026 | extern char kernel_text[]; |
3027 | | | 3027 | |
3028 | /* | | 3028 | /* |
3029 | * Compute `va2pa_offset'. | | 3029 | * Compute `va2pa_offset'. |
3030 | * Use `kernel_text' to probe the MMU translation since | | 3030 | * Use `kernel_text' to probe the MMU translation since |
3031 | * the pages at KERNBASE might not be mapped. | | 3031 | * the pages at KERNBASE might not be mapped. |
3032 | */ | | 3032 | */ |
3033 | va2pa_offset = (vaddr_t)kernel_text - | | 3033 | va2pa_offset = (vaddr_t)kernel_text - |
3034 | ((getpte4(kernel_text) & PG_PFNUM) << PGSHIFT); | | 3034 | ((getpte4(kernel_text) & PG_PFNUM) << PGSHIFT); |
3035 | | | 3035 | |
3036 | ncontext = nctx; | | 3036 | ncontext = nctx; |
3037 | | | 3037 | |
3038 | switch (cputyp) { | | 3038 | switch (cputyp) { |
3039 | case CPU_SUN4C: | | 3039 | case CPU_SUN4C: |
3040 | mmu_has_hole = 1; | | 3040 | mmu_has_hole = 1; |
3041 | break; | | 3041 | break; |
3042 | case CPU_SUN4: | | 3042 | case CPU_SUN4: |
3043 | if (cpuinfo.cpu_type != CPUTYP_4_400) { | | 3043 | if (cpuinfo.cpu_type != CPUTYP_4_400) { |
3044 | mmu_has_hole = 1; | | 3044 | mmu_has_hole = 1; |
3045 | break; | | 3045 | break; |
3046 | } | | 3046 | } |
3047 | } | | 3047 | } |
3048 | | | 3048 | |
3049 | #if defined(SUN4) | | 3049 | #if defined(SUN4) |
3050 | /* | | 3050 | /* |
3051 | * set up the segfixmask to mask off invalid bits | | 3051 | * set up the segfixmask to mask off invalid bits |
3052 | */ | | 3052 | */ |
3053 | segfixmask = nsegment - 1; /* assume nsegment is a power of 2 */ | | 3053 | segfixmask = nsegment - 1; /* assume nsegment is a power of 2 */ |
3054 | #ifdef DIAGNOSTIC | | 3054 | #ifdef DIAGNOSTIC |
3055 | if (((nsegment & segfixmask) | (nsegment & ~segfixmask)) != nsegment) { | | 3055 | if (((nsegment & segfixmask) | (nsegment & ~segfixmask)) != nsegment) { |
3056 | printf("pmap_bootstrap: unsuitable number of segments (%d)\n", | | 3056 | printf("pmap_bootstrap: unsuitable number of segments (%d)\n", |
3057 | nsegment); | | 3057 | nsegment); |
3058 | callrom(); | | 3058 | callrom(); |
3059 | } | | 3059 | } |
3060 | #endif | | 3060 | #endif |
3061 | #endif | | 3061 | #endif |
3062 | | | 3062 | |
3063 | #if defined(SUN4M) || defined(SUN4D) /* We're in a dual-arch kernel. | | 3063 | #if defined(SUN4M) || defined(SUN4D) /* We're in a dual-arch kernel. |
3064 | Setup 4/4c fn. ptrs */ | | 3064 | Setup 4/4c fn. ptrs */ |
3065 | pmap_clear_modify_p = pmap_clear_modify4_4c; | | 3065 | pmap_clear_modify_p = pmap_clear_modify4_4c; |
3066 | pmap_clear_reference_p = pmap_clear_reference4_4c; | | 3066 | pmap_clear_reference_p = pmap_clear_reference4_4c; |
3067 | pmap_enter_p = pmap_enter4_4c; | | 3067 | pmap_enter_p = pmap_enter4_4c; |
3068 | pmap_extract_p = pmap_extract4_4c; | | 3068 | pmap_extract_p = pmap_extract4_4c; |
3069 | pmap_is_modified_p = pmap_is_modified4_4c; | | 3069 | pmap_is_modified_p = pmap_is_modified4_4c; |
3070 | pmap_is_referenced_p = pmap_is_referenced4_4c; | | 3070 | pmap_is_referenced_p = pmap_is_referenced4_4c; |
3071 | pmap_kenter_pa_p = pmap_kenter_pa4_4c; | | 3071 | pmap_kenter_pa_p = pmap_kenter_pa4_4c; |
3072 | pmap_kremove_p = pmap_kremove4_4c; | | 3072 | pmap_kremove_p = pmap_kremove4_4c; |
3073 | pmap_kprotect_p = pmap_kprotect4_4c; | | 3073 | pmap_kprotect_p = pmap_kprotect4_4c; |
3074 | pmap_page_protect_p = pmap_page_protect4_4c; | | 3074 | pmap_page_protect_p = pmap_page_protect4_4c; |
3075 | pmap_protect_p = pmap_protect4_4c; | | 3075 | pmap_protect_p = pmap_protect4_4c; |
3076 | pmap_rmk_p = pmap_rmk4_4c; | | 3076 | pmap_rmk_p = pmap_rmk4_4c; |
3077 | pmap_rmu_p = pmap_rmu4_4c; | | 3077 | pmap_rmu_p = pmap_rmu4_4c; |
3078 | #endif /* defined SUN4M || defined SUN4D */ | | 3078 | #endif /* defined SUN4M || defined SUN4D */ |
3079 | | | 3079 | |
3080 | p = (vaddr_t)top; | | 3080 | p = (vaddr_t)top; |
3081 | | | 3081 | |
3082 | /* | | 3082 | /* |
3083 | * Last segment is the `invalid' one (one PMEG of pte's with !pg_v). | | 3083 | * Last segment is the `invalid' one (one PMEG of pte's with !pg_v). |
3084 | * It will never be used for anything else. | | 3084 | * It will never be used for anything else. |
3085 | */ | | 3085 | */ |
3086 | seginval = --nsegment; | | 3086 | seginval = --nsegment; |
3087 | | | 3087 | |
3088 | #if defined(SUN4_MMU3L) | | 3088 | #if defined(SUN4_MMU3L) |
3089 | if (HASSUN4_MMU3L) | | 3089 | if (HASSUN4_MMU3L) |
3090 | reginval = --nregion; | | 3090 | reginval = --nregion; |
3091 | #endif | | 3091 | #endif |
3092 | | | 3092 | |
3093 | /* | | 3093 | /* |
3094 | * Allocate and initialise mmu entries and context structures. | | 3094 | * Allocate and initialise mmu entries and context structures. |
3095 | */ | | 3095 | */ |
3096 | #if defined(SUN4_MMU3L) | | 3096 | #if defined(SUN4_MMU3L) |
3097 | mmuregions = mmureg = (struct mmuentry *)p; | | 3097 | mmuregions = mmureg = (struct mmuentry *)p; |
3098 | p += nregion * sizeof(struct mmuentry); | | 3098 | p += nregion * sizeof(struct mmuentry); |
3099 | memset(mmuregions, 0, nregion * sizeof(struct mmuentry)); | | 3099 | memset(mmuregions, 0, nregion * sizeof(struct mmuentry)); |
3100 | #endif | | 3100 | #endif |
3101 | mmusegments = mmuseg = (struct mmuentry *)p; | | 3101 | mmusegments = mmuseg = (struct mmuentry *)p; |
3102 | p += nsegment * sizeof(struct mmuentry); | | 3102 | p += nsegment * sizeof(struct mmuentry); |
3103 | memset(mmusegments, 0, nsegment * sizeof(struct mmuentry)); | | 3103 | memset(mmusegments, 0, nsegment * sizeof(struct mmuentry)); |
3104 | | | 3104 | |
3105 | pmap_kernel()->pm_ctx = ctxinfo = ci = (union ctxinfo *)p; | | 3105 | pmap_kernel()->pm_ctx = ctxinfo = ci = (union ctxinfo *)p; |
3106 | p += nctx * sizeof *ci; | | 3106 | p += nctx * sizeof *ci; |
3107 | | | 3107 | |
3108 | /* Initialize MMU resource queues */ | | 3108 | /* Initialize MMU resource queues */ |
3109 | #if defined(SUN4_MMU3L) | | 3109 | #if defined(SUN4_MMU3L) |
3110 | MMUQ_INIT(®ion_freelist); | | 3110 | MMUQ_INIT(®ion_freelist); |
3111 | MMUQ_INIT(®ion_lru); | | 3111 | MMUQ_INIT(®ion_lru); |
3112 | MMUQ_INIT(®ion_locked); | | 3112 | MMUQ_INIT(®ion_locked); |
3113 | #endif | | 3113 | #endif |
3114 | MMUQ_INIT(&segm_freelist); | | 3114 | MMUQ_INIT(&segm_freelist); |
3115 | MMUQ_INIT(&segm_lru); | | 3115 | MMUQ_INIT(&segm_lru); |
3116 | MMUQ_INIT(&segm_locked); | | 3116 | MMUQ_INIT(&segm_locked); |
3117 | | | 3117 | |
3118 | | | 3118 | |
3119 | /* | | 3119 | /* |
3120 | * Intialize the kernel pmap. | | 3120 | * Intialize the kernel pmap. |
3121 | */ | | 3121 | */ |
3122 | /* kernel_pmap_store.pm_ctxnum = 0; */ | | 3122 | /* kernel_pmap_store.pm_ctxnum = 0; */ |
3123 | kernel_pmap_store.pm_refcount = 1; | | 3123 | kernel_pmap_store.pm_refcount = 1; |
3124 | #if defined(SUN4_MMU3L) | | 3124 | #if defined(SUN4_MMU3L) |
3125 | TAILQ_INIT(&kernel_pmap_store.pm_reglist); | | 3125 | TAILQ_INIT(&kernel_pmap_store.pm_reglist); |
3126 | #endif | | 3126 | #endif |
3127 | TAILQ_INIT(&kernel_pmap_store.pm_seglist); | | 3127 | TAILQ_INIT(&kernel_pmap_store.pm_seglist); |
3128 | | | 3128 | |
3129 | /* | | 3129 | /* |
3130 | * Allocate memory for kernel PTEs | | 3130 | * Allocate memory for kernel PTEs |
3131 | * XXX Consider allocating memory for only a few regions | | 3131 | * XXX Consider allocating memory for only a few regions |
3132 | * and use growkernel() to allocate more as needed. | | 3132 | * and use growkernel() to allocate more as needed. |
3133 | */ | | 3133 | */ |
3134 | kptes = (int *)p; | | 3134 | kptes = (int *)p; |
3135 | p += NKREG * NSEGRG * NPTESG * sizeof(int); | | 3135 | p += NKREG * NSEGRG * NPTESG * sizeof(int); |
3136 | memset(kptes, 0, NKREG * NSEGRG * NPTESG * sizeof(int)); | | 3136 | memset(kptes, 0, NKREG * NSEGRG * NPTESG * sizeof(int)); |
3137 | | | 3137 | |
3138 | /* | | 3138 | /* |
3139 | * Set up pm_regmap for kernel to point NUREG *below* the beginning | | 3139 | * Set up pm_regmap for kernel to point NUREG *below* the beginning |
3140 | * of kernel regmap storage. Since the kernel only uses regions | | 3140 | * of kernel regmap storage. Since the kernel only uses regions |
3141 | * above NUREG, we save storage space and can index kernel and | | 3141 | * above NUREG, we save storage space and can index kernel and |
3142 | * user regions in the same way. | | 3142 | * user regions in the same way. |
3143 | */ | | 3143 | */ |
3144 | kernel_pmap_store.pm_regmap = &kernel_regmap_store[-NUREG]; | | 3144 | kernel_pmap_store.pm_regmap = &kernel_regmap_store[-NUREG]; |
3145 | for (i = NKREG; --i >= 0;) { | | 3145 | for (i = NKREG; --i >= 0;) { |
3146 | #if defined(SUN4_MMU3L) | | 3146 | #if defined(SUN4_MMU3L) |
3147 | kernel_regmap_store[i].rg_smeg = reginval; | | 3147 | kernel_regmap_store[i].rg_smeg = reginval; |
3148 | #endif | | 3148 | #endif |
3149 | kernel_regmap_store[i].rg_segmap = | | 3149 | kernel_regmap_store[i].rg_segmap = |
3150 | &kernel_segmap_store[i * NSEGRG]; | | 3150 | &kernel_segmap_store[i * NSEGRG]; |
3151 | for (j = NSEGRG; --j >= 0;) { | | 3151 | for (j = NSEGRG; --j >= 0;) { |
3152 | sp = &kernel_segmap_store[i * NSEGRG + j]; | | 3152 | sp = &kernel_segmap_store[i * NSEGRG + j]; |
3153 | sp->sg_pmeg = seginval; | | 3153 | sp->sg_pmeg = seginval; |
3154 | sp->sg_pte = &kptes[(i * NSEGRG + j) * NPTESG]; | | 3154 | sp->sg_pte = &kptes[(i * NSEGRG + j) * NPTESG]; |
3155 | } | | 3155 | } |
3156 | } | | 3156 | } |
3157 | | | 3157 | |
3158 | /* | | 3158 | /* |
3159 | * Preserve the monitor ROM's reserved VM region, so that | | 3159 | * Preserve the monitor ROM's reserved VM region, so that |
3160 | * we can use L1-A or the monitor's debugger. As a side | | 3160 | * we can use L1-A or the monitor's debugger. As a side |
3161 | * effect we map the ROM's reserved VM into all contexts | | 3161 | * effect we map the ROM's reserved VM into all contexts |
3162 | * (otherwise L1-A crashes the machine!). | | 3162 | * (otherwise L1-A crashes the machine!). |
3163 | */ | | 3163 | */ |
3164 | | | 3164 | |
3165 | mmu_reservemon4_4c(&nregion, &nsegment); | | 3165 | mmu_reservemon4_4c(&nregion, &nsegment); |
3166 | | | 3166 | |
3167 | #if defined(SUN4_MMU3L) | | 3167 | #if defined(SUN4_MMU3L) |
3168 | /* Reserve one region for temporary mappings */ | | 3168 | /* Reserve one region for temporary mappings */ |
3169 | if (HASSUN4_MMU3L) | | 3169 | if (HASSUN4_MMU3L) |
3170 | tregion = --nregion; | | 3170 | tregion = --nregion; |
3171 | #endif | | 3171 | #endif |
3172 | | | 3172 | |
3173 | /* | | 3173 | /* |
3174 | * Set up the `constants' for the call to vm_init() | | 3174 | * Set up the `constants' for the call to vm_init() |
3175 | * in main(). All pages beginning at p (rounded up to | | 3175 | * in main(). All pages beginning at p (rounded up to |
3176 | * the next whole page) and continuing through the number | | 3176 | * the next whole page) and continuing through the number |
3177 | * of available pages are free, but they start at a higher | | 3177 | * of available pages are free, but they start at a higher |
3178 | * virtual address. This gives us two mappable MD pages | | 3178 | * virtual address. This gives us two mappable MD pages |
3179 | * for pmap_zero_page and pmap_copy_page, and one MI page | | 3179 | * for pmap_zero_page and pmap_copy_page, and one MI page |
3180 | * for /dev/mem, all with no associated physical memory. | | 3180 | * for /dev/mem, all with no associated physical memory. |
3181 | */ | | 3181 | */ |
3182 | p = (p + NBPG - 1) & ~PGOFSET; | | 3182 | p = (p + NBPG - 1) & ~PGOFSET; |
3183 | | | 3183 | |
3184 | avail_start = PMAP_BOOTSTRAP_VA2PA(p); | | 3184 | avail_start = PMAP_BOOTSTRAP_VA2PA(p); |
3185 | | | 3185 | |
3186 | i = p; | | 3186 | i = p; |
3187 | cpuinfo.vpage[0] = (void *)p, p += NBPG; | | 3187 | cpuinfo.vpage[0] = (void *)p, p += NBPG; |
3188 | cpuinfo.vpage[1] = (void *)p, p += NBPG; | | 3188 | cpuinfo.vpage[1] = (void *)p, p += NBPG; |
3189 | vmmap = (void *)p, p += NBPG; | | 3189 | vmmap = (void *)p, p += NBPG; |
3190 | p = (vaddr_t)reserve_dumppages((void *)p); | | 3190 | p = (vaddr_t)reserve_dumppages((void *)p); |
3191 | | | 3191 | |
3192 | virtual_avail = p; | | 3192 | virtual_avail = p; |
3193 | virtual_end = VM_MAX_KERNEL_ADDRESS; | | 3193 | virtual_end = VM_MAX_KERNEL_ADDRESS; |
3194 | | | 3194 | |
3195 | p = i; /* retract to first free phys */ | | 3195 | p = i; /* retract to first free phys */ |
3196 | | | 3196 | |
3197 | | | 3197 | |
3198 | /* | | 3198 | /* |
3199 | * All contexts are free except the kernel's. | | 3199 | * All contexts are free except the kernel's. |
3200 | * | | 3200 | * |
3201 | * XXX sun4c could use context 0 for users? | | 3201 | * XXX sun4c could use context 0 for users? |
3202 | */ | | 3202 | */ |
3203 | ci->c_pmap = pmap_kernel(); | | 3203 | ci->c_pmap = pmap_kernel(); |
3204 | ctx_freelist = ci + 1; | | 3204 | ctx_freelist = ci + 1; |
3205 | for (i = 1; i < ncontext; i++) { | | 3205 | for (i = 1; i < ncontext; i++) { |
3206 | ci++; | | 3206 | ci++; |
3207 | ci->c_nextfree = ci + 1; | | 3207 | ci->c_nextfree = ci + 1; |
3208 | } | | 3208 | } |
3209 | ci->c_nextfree = NULL; | | 3209 | ci->c_nextfree = NULL; |
3210 | ctx_kick = 0; | | 3210 | ctx_kick = 0; |
3211 | ctx_kickdir = -1; | | 3211 | ctx_kickdir = -1; |
3212 | | | 3212 | |
3213 | /* | | 3213 | /* |
3214 | * Init mmu entries that map the kernel physical addresses. | | 3214 | * Init mmu entries that map the kernel physical addresses. |
3215 | * | | 3215 | * |
3216 | * All the other MMU entries are free. | | 3216 | * All the other MMU entries are free. |
3217 | * | | 3217 | * |
3218 | * THIS ASSUMES THE KERNEL IS MAPPED BY A CONTIGUOUS RANGE OF | | 3218 | * THIS ASSUMES THE KERNEL IS MAPPED BY A CONTIGUOUS RANGE OF |
3219 | * MMU SEGMENTS/REGIONS DURING THE BOOT PROCESS | | 3219 | * MMU SEGMENTS/REGIONS DURING THE BOOT PROCESS |
3220 | */ | | 3220 | */ |
3221 | | | 3221 | |
3222 | /* Compute the number of segments used by the kernel */ | | 3222 | /* Compute the number of segments used by the kernel */ |
3223 | zseg = (((p + NBPSG - 1) & ~SGOFSET) - KERNBASE) >> SGSHIFT; | | 3223 | zseg = (((p + NBPSG - 1) & ~SGOFSET) - KERNBASE) >> SGSHIFT; |
3224 | lastpage = VA_VPG(p); | | 3224 | lastpage = VA_VPG(p); |
3225 | if (lastpage == 0) | | 3225 | if (lastpage == 0) |
3226 | /* | | 3226 | /* |
3227 | * If the page bits in p are 0, we filled the last segment | | 3227 | * If the page bits in p are 0, we filled the last segment |
3228 | * exactly; if not, it is the last page filled in the | | 3228 | * exactly; if not, it is the last page filled in the |
3229 | * last segment. | | 3229 | * last segment. |
3230 | */ | | 3230 | */ |
3231 | lastpage = NPTESG; | | 3231 | lastpage = NPTESG; |
3232 | | | 3232 | |
3233 | p = KERNBASE; /* first va */ | | 3233 | p = KERNBASE; /* first va */ |
3234 | vs = VA_VSEG(KERNBASE); /* first virtual segment */ | | 3234 | vs = VA_VSEG(KERNBASE); /* first virtual segment */ |
3235 | vr = VA_VREG(KERNBASE); /* first virtual region */ | | 3235 | vr = VA_VREG(KERNBASE); /* first virtual region */ |
3236 | rp = &pmap_kernel()->pm_regmap[vr]; | | 3236 | rp = &pmap_kernel()->pm_regmap[vr]; |
3237 | | | 3237 | |
3238 | /* Get region/segment where kernel addresses start */ | | 3238 | /* Get region/segment where kernel addresses start */ |
3239 | #if defined(SUN4_MMU3L) | | 3239 | #if defined(SUN4_MMU3L) |
3240 | if (HASSUN4_MMU3L) | | 3240 | if (HASSUN4_MMU3L) |
3241 | startrcookie = rcookie = getregmap(p); | | 3241 | startrcookie = rcookie = getregmap(p); |
3242 | mmureg = &mmuregions[rcookie]; | | 3242 | mmureg = &mmuregions[rcookie]; |
3243 | #endif | | 3243 | #endif |
3244 | | | 3244 | |
3245 | startscookie = scookie = getsegmap(p); | | 3245 | startscookie = scookie = getsegmap(p); |
3246 | mmuseg = &mmusegments[scookie]; | | 3246 | mmuseg = &mmusegments[scookie]; |
3247 | zseg += scookie; /* First free segment */ | | 3247 | zseg += scookie; /* First free segment */ |
3248 | | | 3248 | |
3249 | for (;;) { | | 3249 | for (;;) { |
3250 | | | 3250 | |
3251 | /* | | 3251 | /* |
3252 | * Distribute each kernel region/segment into all contexts. | | 3252 | * Distribute each kernel region/segment into all contexts. |
3253 | * This is done through the monitor ROM, rather than | | 3253 | * This is done through the monitor ROM, rather than |
3254 | * directly here: if we do a setcontext we will fault, | | 3254 | * directly here: if we do a setcontext we will fault, |
3255 | * as we are not (yet) mapped in any other context. | | 3255 | * as we are not (yet) mapped in any other context. |
3256 | */ | | 3256 | */ |
3257 | | | 3257 | |
3258 | if ((vs % NSEGRG) == 0) { | | 3258 | if ((vs % NSEGRG) == 0) { |
3259 | /* Entering a new region */ | | 3259 | /* Entering a new region */ |
3260 | if (VA_VREG(p) > vr) { | | 3260 | if (VA_VREG(p) > vr) { |
3261 | #ifdef DEBUG | | 3261 | #ifdef DEBUG |
3262 | printf("note: giant kernel!\n"); | | 3262 | printf("note: giant kernel!\n"); |
3263 | #endif | | 3263 | #endif |
3264 | vr++, rp++; | | 3264 | vr++, rp++; |
3265 | } | | 3265 | } |
3266 | #if defined(SUN4_MMU3L) | | 3266 | #if defined(SUN4_MMU3L) |
3267 | if (HASSUN4_MMU3L) { | | 3267 | if (HASSUN4_MMU3L) { |
3268 | for (i = 1; i < nctx; i++) | | 3268 | for (i = 1; i < nctx; i++) |
3269 | prom_setcontext(i, (void *)p, rcookie); | | 3269 | prom_setcontext(i, (void *)p, rcookie); |
3270 | | | 3270 | |
3271 | MMUQ_INSERT_TAIL(®ion_locked, | | 3271 | MMUQ_INSERT_TAIL(®ion_locked, |
3272 | mmureg, me_list); | | 3272 | mmureg, me_list); |
3273 | TAILQ_INSERT_TAIL(&pmap_kernel()->pm_reglist, | | 3273 | TAILQ_INSERT_TAIL(&pmap_kernel()->pm_reglist, |
3274 | mmureg, me_pmchain); | | 3274 | mmureg, me_pmchain); |
3275 | #ifdef DIAGNOSTIC | | 3275 | #ifdef DIAGNOSTIC |
3276 | mmuseg->me_statp = NULL; | | 3276 | mmuseg->me_statp = NULL; |
3277 | #endif | | 3277 | #endif |
3278 | mmureg->me_cookie = rcookie; | | 3278 | mmureg->me_cookie = rcookie; |
3279 | mmureg->me_pmap = pmap_kernel(); | | 3279 | mmureg->me_pmap = pmap_kernel(); |
3280 | mmureg->me_vreg = vr; | | 3280 | mmureg->me_vreg = vr; |
3281 | rp->rg_smeg = rcookie; | | 3281 | rp->rg_smeg = rcookie; |
3282 | mmureg++; | | 3282 | mmureg++; |
3283 | rcookie++; | | 3283 | rcookie++; |
3284 | } | | 3284 | } |
3285 | #endif /* SUN4_MMU3L */ | | 3285 | #endif /* SUN4_MMU3L */ |
3286 | } | | 3286 | } |
3287 | | | 3287 | |
3288 | #if defined(SUN4_MMU3L) | | 3288 | #if defined(SUN4_MMU3L) |
3289 | if (!HASSUN4_MMU3L) | | 3289 | if (!HASSUN4_MMU3L) |
3290 | #endif | | 3290 | #endif |
3291 | for (i = 1; i < nctx; i++) | | 3291 | for (i = 1; i < nctx; i++) |
3292 | prom_setcontext(i, (void *)p, scookie); | | 3292 | prom_setcontext(i, (void *)p, scookie); |
3293 | | | 3293 | |
3294 | /* set up the mmu entry */ | | 3294 | /* set up the mmu entry */ |
3295 | MMUQ_INSERT_TAIL(&segm_locked, mmuseg, me_list); | | 3295 | MMUQ_INSERT_TAIL(&segm_locked, mmuseg, me_list); |
3296 | #ifdef DIAGNOSTIC | | 3296 | #ifdef DIAGNOSTIC |
3297 | mmuseg->me_statp = &pmap_stats.ps_npmeg_locked; | | 3297 | mmuseg->me_statp = &pmap_stats.ps_npmeg_locked; |
3298 | #endif | | 3298 | #endif |
3299 | TAILQ_INSERT_TAIL(&pmap_kernel()->pm_seglist, mmuseg, me_pmchain); | | 3299 | TAILQ_INSERT_TAIL(&pmap_kernel()->pm_seglist, mmuseg, me_pmchain); |
3300 | pmap_stats.ps_npmeg_locked++; | | 3300 | pmap_stats.ps_npmeg_locked++; |
3301 | mmuseg->me_cookie = scookie; | | 3301 | mmuseg->me_cookie = scookie; |
3302 | mmuseg->me_pmap = pmap_kernel(); | | 3302 | mmuseg->me_pmap = pmap_kernel(); |
3303 | mmuseg->me_vreg = vr; | | 3303 | mmuseg->me_vreg = vr; |
3304 | mmuseg->me_vseg = vs % NSEGRG; | | 3304 | mmuseg->me_vseg = vs % NSEGRG; |
3305 | sp = &rp->rg_segmap[vs % NSEGRG]; | | 3305 | sp = &rp->rg_segmap[vs % NSEGRG]; |
3306 | sp->sg_pmeg = scookie; | | 3306 | sp->sg_pmeg = scookie; |
3307 | npte = ++scookie < zseg ? NPTESG : lastpage; | | 3307 | npte = ++scookie < zseg ? NPTESG : lastpage; |
3308 | sp->sg_npte = npte; | | 3308 | sp->sg_npte = npte; |
3309 | sp->sg_nwired = npte; | | 3309 | sp->sg_nwired = npte; |
3310 | pmap_kernel()->pm_stats.resident_count += npte; | | 3310 | pmap_kernel()->pm_stats.resident_count += npte; |
3311 | rp->rg_nsegmap += 1; | | 3311 | rp->rg_nsegmap += 1; |
3312 | for (i = 0; i < npte; i++) | | 3312 | for (i = 0; i < npte; i++) |
3313 | sp->sg_pte[i] = getpte4(p + i * NBPG) | PG_WIRED; | | 3313 | sp->sg_pte[i] = getpte4(p + i * NBPG) | PG_WIRED; |
3314 | mmuseg++; | | 3314 | mmuseg++; |
3315 | vs++; | | 3315 | vs++; |
3316 | if (scookie < zseg) { | | 3316 | if (scookie < zseg) { |
3317 | p += NBPSG; | | 3317 | p += NBPSG; |
3318 | continue; | | 3318 | continue; |
3319 | } | | 3319 | } |
3320 | | | 3320 | |
3321 | /* | | 3321 | /* |
3322 | * Unmap the pages, if any, that are not part of | | 3322 | * Unmap the pages, if any, that are not part of |
3323 | * the final segment. | | 3323 | * the final segment. |
3324 | */ | | 3324 | */ |
3325 | for (p += npte << PGSHIFT; npte < NPTESG; npte++, p += NBPG) | | 3325 | for (p += npte << PGSHIFT; npte < NPTESG; npte++, p += NBPG) |
3326 | setpte4(p, 0); | | 3326 | setpte4(p, 0); |
3327 | | | 3327 | |
3328 | #if defined(SUN4_MMU3L) | | 3328 | #if defined(SUN4_MMU3L) |
3329 | if (HASSUN4_MMU3L) { | | 3329 | if (HASSUN4_MMU3L) { |
3330 | /* | | 3330 | /* |
3331 | * Unmap the segments, if any, that are not part of | | 3331 | * Unmap the segments, if any, that are not part of |
3332 | * the final region. | | 3332 | * the final region. |
3333 | */ | | 3333 | */ |
3334 | for (i = rp->rg_nsegmap; i < NSEGRG; i++, p += NBPSG) | | 3334 | for (i = rp->rg_nsegmap; i < NSEGRG; i++, p += NBPSG) |
3335 | setsegmap(p, seginval); | | 3335 | setsegmap(p, seginval); |
3336 | | | 3336 | |
3337 | /* | | 3337 | /* |
3338 | * Unmap any kernel regions that we aren't using. | | 3338 | * Unmap any kernel regions that we aren't using. |
3339 | */ | | 3339 | */ |
3340 | for (i = 0; i < nctx; i++) { | | 3340 | for (i = 0; i < nctx; i++) { |
3341 | setcontext4(i); | | 3341 | setcontext4(i); |
3342 | for (va = p; | | 3342 | for (va = p; |
3343 | va < (OPENPROM_STARTVADDR & ~(NBPRG - 1)); | | 3343 | va < (OPENPROM_STARTVADDR & ~(NBPRG - 1)); |
3344 | va += NBPRG) | | 3344 | va += NBPRG) |
3345 | setregmap(va, reginval); | | 3345 | setregmap(va, reginval); |
3346 | } | | 3346 | } |
3347 | | | 3347 | |
3348 | } else | | 3348 | } else |
3349 | #endif | | 3349 | #endif |
3350 | { | | 3350 | { |
3351 | /* | | 3351 | /* |
3352 | * Unmap any kernel segments that we aren't using. | | 3352 | * Unmap any kernel segments that we aren't using. |
3353 | */ | | 3353 | */ |
3354 | for (i = 0; i < nctx; i++) { | | 3354 | for (i = 0; i < nctx; i++) { |
3355 | setcontext4(i); | | 3355 | setcontext4(i); |
3356 | for (va = p; | | 3356 | for (va = p; |
3357 | va < (OPENPROM_STARTVADDR & ~(NBPSG - 1)); | | 3357 | va < (OPENPROM_STARTVADDR & ~(NBPSG - 1)); |
3358 | va += NBPSG) | | 3358 | va += NBPSG) |
3359 | setsegmap(va, seginval); | | 3359 | setsegmap(va, seginval); |
3360 | } | | 3360 | } |
3361 | } | | 3361 | } |
3362 | break; | | 3362 | break; |
3363 | } | | 3363 | } |
3364 | | | 3364 | |
3365 | #if defined(SUN4_MMU3L) | | 3365 | #if defined(SUN4_MMU3L) |
3366 | if (HASSUN4_MMU3L) | | 3366 | if (HASSUN4_MMU3L) |
3367 | for (rcookie = 0; rcookie < nregion; rcookie++) { | | 3367 | for (rcookie = 0; rcookie < nregion; rcookie++) { |
3368 | if (rcookie == startrcookie) | | 3368 | if (rcookie == startrcookie) |
3369 | /* Kernel must fit in one region! */ | | 3369 | /* Kernel must fit in one region! */ |
3370 | rcookie++; | | 3370 | rcookie++; |
3371 | mmureg = &mmuregions[rcookie]; | | 3371 | mmureg = &mmuregions[rcookie]; |
3372 | mmureg->me_cookie = rcookie; | | 3372 | mmureg->me_cookie = rcookie; |
3373 | MMUQ_INSERT_TAIL(®ion_freelist, mmureg, me_list); | | 3373 | MMUQ_INSERT_TAIL(®ion_freelist, mmureg, me_list); |
3374 | #ifdef DIAGNOSTIC | | 3374 | #ifdef DIAGNOSTIC |
3375 | mmuseg->me_statp = NULL; | | 3375 | mmuseg->me_statp = NULL; |
3376 | #endif | | 3376 | #endif |
3377 | } | | 3377 | } |
3378 | #endif /* SUN4_MMU3L */ | | 3378 | #endif /* SUN4_MMU3L */ |
3379 | | | 3379 | |
3380 | for (scookie = 0; scookie < nsegment; scookie++) { | | 3380 | for (scookie = 0; scookie < nsegment; scookie++) { |
3381 | if (scookie == startscookie) | | 3381 | if (scookie == startscookie) |
3382 | /* Skip static kernel image */ | | 3382 | /* Skip static kernel image */ |
3383 | scookie = zseg; | | 3383 | scookie = zseg; |
3384 | mmuseg = &mmusegments[scookie]; | | 3384 | mmuseg = &mmusegments[scookie]; |
3385 | mmuseg->me_cookie = scookie; | | 3385 | mmuseg->me_cookie = scookie; |
3386 | MMUQ_INSERT_TAIL(&segm_freelist, mmuseg, me_list); | | 3386 | MMUQ_INSERT_TAIL(&segm_freelist, mmuseg, me_list); |
3387 | pmap_stats.ps_npmeg_free++; | | 3387 | pmap_stats.ps_npmeg_free++; |
3388 | #ifdef DIAGNOSTIC | | 3388 | #ifdef DIAGNOSTIC |
3389 | mmuseg->me_statp = NULL; | | 3389 | mmuseg->me_statp = NULL; |
3390 | #endif | | 3390 | #endif |
3391 | } | | 3391 | } |
3392 | | | 3392 | |
3393 | /* Erase all spurious user-space segmaps */ | | 3393 | /* Erase all spurious user-space segmaps */ |
3394 | for (i = 1; i < ncontext; i++) { | | 3394 | for (i = 1; i < ncontext; i++) { |
3395 | setcontext4(i); | | 3395 | setcontext4(i); |
3396 | if (HASSUN4_MMU3L) | | 3396 | if (HASSUN4_MMU3L) |
3397 | for (p = 0, j = NUREG; --j >= 0; p += NBPRG) | | 3397 | for (p = 0, j = NUREG; --j >= 0; p += NBPRG) |
3398 | setregmap(p, reginval); | | 3398 | setregmap(p, reginval); |
3399 | else | | 3399 | else |
3400 | for (p = 0, vr = 0; vr < NUREG; vr++) { | | 3400 | for (p = 0, vr = 0; vr < NUREG; vr++) { |
3401 | if (VA_INHOLE(p)) { | | 3401 | if (VA_INHOLE(p)) { |
3402 | p = MMU_HOLE_END; | | 3402 | p = MMU_HOLE_END; |
3403 | vr = VA_VREG(p); | | 3403 | vr = VA_VREG(p); |
3404 | } | | 3404 | } |
3405 | for (j = NSEGRG; --j >= 0; p += NBPSG) | | 3405 | for (j = NSEGRG; --j >= 0; p += NBPSG) |
3406 | setsegmap(p, seginval); | | 3406 | setsegmap(p, seginval); |
3407 | } | | 3407 | } |
3408 | } | | 3408 | } |
3409 | setcontext4(0); | | 3409 | setcontext4(0); |
3410 | | | 3410 | |
3411 | /* | | 3411 | /* |
3412 | * write protect & encache kernel text; | | 3412 | * write protect & encache kernel text; |
3413 | * set red zone at kernel base; | | 3413 | * set red zone at kernel base; |
3414 | * enable cache on message buffer and cpuinfo. | | 3414 | * enable cache on message buffer and cpuinfo. |
3415 | */ | | 3415 | */ |
3416 | { | | 3416 | { |
3417 | extern char etext[]; | | 3417 | extern char etext[]; |
3418 | | | 3418 | |
3419 | /* Enable cache on message buffer and cpuinfo */ | | 3419 | /* Enable cache on message buffer and cpuinfo */ |
3420 | for (p = KERNBASE; p < (vaddr_t)trapbase; p += NBPG) | | 3420 | for (p = KERNBASE; p < (vaddr_t)trapbase; p += NBPG) |
3421 | setpte4(p, getpte4(p) & ~PG_NC); | | 3421 | setpte4(p, getpte4(p) & ~PG_NC); |
3422 | | | 3422 | |
3423 | /* Enable cache and write protext kernel text */ | | 3423 | /* Enable cache and write protext kernel text */ |
3424 | for (p = (vaddr_t)trapbase; p < (vaddr_t)etext; p += NBPG) | | 3424 | for (p = (vaddr_t)trapbase; p < (vaddr_t)etext; p += NBPG) |
3425 | setpte4(p, getpte4(p) & ~(PG_NC|PG_W)); | | 3425 | setpte4(p, getpte4(p) & ~(PG_NC|PG_W)); |
3426 | | | 3426 | |
3427 | /* | | 3427 | /* |
3428 | * Unmap the `etext gap'; it'll be made available | | 3428 | * Unmap the `etext gap'; it'll be made available |
3429 | * to the VM manager. | | 3429 | * to the VM manager. |
3430 | */ | | 3430 | */ |
3431 | for (p = etext_gap_start; p < etext_gap_end; p += NBPG) { | | 3431 | for (p = etext_gap_start; p < etext_gap_end; p += NBPG) { |
3432 | rp = &pmap_kernel()->pm_regmap[VA_VREG(p)]; | | 3432 | rp = &pmap_kernel()->pm_regmap[VA_VREG(p)]; |
3433 | sp = &rp->rg_segmap[VA_VSEG(p)]; | | 3433 | sp = &rp->rg_segmap[VA_VSEG(p)]; |
3434 | sp->sg_nwired--; | | 3434 | sp->sg_nwired--; |
3435 | sp->sg_npte--; | | 3435 | sp->sg_npte--; |
3436 | pmap_kernel()->pm_stats.resident_count--; | | 3436 | pmap_kernel()->pm_stats.resident_count--; |
3437 | sp->sg_pte[VA_VPG(p)] = 0; | | 3437 | sp->sg_pte[VA_VPG(p)] = 0; |
3438 | setpte4(p, 0); | | 3438 | setpte4(p, 0); |
3439 | } | | 3439 | } |
3440 | | | 3440 | |
3441 | /* Enable cache on data & bss */ | | 3441 | /* Enable cache on data & bss */ |
3442 | for (p = etext_gap_end; p < virtual_avail; p += NBPG) | | 3442 | for (p = etext_gap_end; p < virtual_avail; p += NBPG) |
3443 | setpte4(p, getpte4(p) & ~PG_NC); | | 3443 | setpte4(p, getpte4(p) & ~PG_NC); |
3444 | | | 3444 | |
3445 | } | | 3445 | } |
3446 | } | | 3446 | } |
3447 | #endif | | 3447 | #endif |
3448 | | | 3448 | |
3449 | #if defined(SUN4M) || defined(SUN4D) /* SRMMU version of pmap_bootstrap */ | | 3449 | #if defined(SUN4M) || defined(SUN4D) /* SRMMU version of pmap_bootstrap */ |
3450 | /* | | 3450 | /* |
3451 | * Bootstrap the system enough to run with VM enabled on a sun4m machine. | | 3451 | * Bootstrap the system enough to run with VM enabled on a sun4m machine. |
3452 | * | | 3452 | * |
3453 | * Switches from ROM to kernel page tables, and sets up initial mappings. | | 3453 | * Switches from ROM to kernel page tables, and sets up initial mappings. |
3454 | */ | | 3454 | */ |
3455 | static void | | 3455 | static void |
3456 | pmap_bootstrap4m(void *top) | | 3456 | pmap_bootstrap4m(void *top) |
3457 | { | | 3457 | { |
3458 | int i, j; | | 3458 | int i, j; |
3459 | vaddr_t p, q; | | 3459 | vaddr_t p, q; |
3460 | union ctxinfo *ci; | | 3460 | union ctxinfo *ci; |
3461 | int reg, seg; | | 3461 | int reg, seg; |
3462 | unsigned int ctxtblsize; | | 3462 | unsigned int ctxtblsize; |
3463 | vaddr_t pagetables_start, pagetables_end; | | 3463 | vaddr_t pagetables_start, pagetables_end; |
3464 | paddr_t pagetables_start_pa; | | 3464 | paddr_t pagetables_start_pa; |
3465 | extern char etext[]; | | 3465 | extern char etext[]; |
3466 | extern char kernel_text[]; | | 3466 | extern char kernel_text[]; |
3467 | vaddr_t va; | | 3467 | vaddr_t va; |
3468 | #ifdef MULTIPROCESSOR | | 3468 | #ifdef MULTIPROCESSOR |
3469 | vsize_t off; | | 3469 | vsize_t off; |
3470 | size_t cpuinfo_len; | | 3470 | size_t cpuinfo_len; |
3471 | uint8_t *cpuinfo_data; | | 3471 | uint8_t *cpuinfo_data; |
3472 | #endif | | 3472 | #endif |
3473 | | | 3473 | |
3474 | /* | | 3474 | /* |
3475 | * Compute `va2pa_offset'. | | 3475 | * Compute `va2pa_offset'. |
3476 | * Use `kernel_text' to probe the MMU translation since | | 3476 | * Use `kernel_text' to probe the MMU translation since |
3477 | * the pages at KERNBASE might not be mapped. | | 3477 | * the pages at KERNBASE might not be mapped. |
3478 | */ | | 3478 | */ |
3479 | va2pa_offset = (vaddr_t)kernel_text - VA2PA(kernel_text); | | 3479 | va2pa_offset = (vaddr_t)kernel_text - VA2PA(kernel_text); |
3480 | | | 3480 | |
3481 | ncontext = cpuinfo.mmu_ncontext; | | 3481 | ncontext = cpuinfo.mmu_ncontext; |
3482 | | | 3482 | |
3483 | #if defined(SUN4) || defined(SUN4C) /* setup SRMMU fn. ptrs for dual-arch | | 3483 | #if defined(SUN4) || defined(SUN4C) /* setup SRMMU fn. ptrs for dual-arch |
3484 | kernel */ | | 3484 | kernel */ |
3485 | pmap_clear_modify_p = pmap_clear_modify4m; | | 3485 | pmap_clear_modify_p = pmap_clear_modify4m; |
3486 | pmap_clear_reference_p = pmap_clear_reference4m; | | 3486 | pmap_clear_reference_p = pmap_clear_reference4m; |
3487 | pmap_enter_p = pmap_enter4m; | | 3487 | pmap_enter_p = pmap_enter4m; |
3488 | pmap_extract_p = pmap_extract4m; | | 3488 | pmap_extract_p = pmap_extract4m; |
3489 | pmap_is_modified_p = pmap_is_modified4m; | | 3489 | pmap_is_modified_p = pmap_is_modified4m; |
3490 | pmap_is_referenced_p = pmap_is_referenced4m; | | 3490 | pmap_is_referenced_p = pmap_is_referenced4m; |
3491 | pmap_kenter_pa_p = pmap_kenter_pa4m; | | 3491 | pmap_kenter_pa_p = pmap_kenter_pa4m; |
3492 | pmap_kremove_p = pmap_kremove4m; | | 3492 | pmap_kremove_p = pmap_kremove4m; |
3493 | pmap_kprotect_p = pmap_kprotect4m; | | 3493 | pmap_kprotect_p = pmap_kprotect4m; |
3494 | pmap_page_protect_p = pmap_page_protect4m; | | 3494 | pmap_page_protect_p = pmap_page_protect4m; |
3495 | pmap_protect_p = pmap_protect4m; | | 3495 | pmap_protect_p = pmap_protect4m; |
3496 | pmap_rmk_p = pmap_rmk4m; | | 3496 | pmap_rmk_p = pmap_rmk4m; |
3497 | pmap_rmu_p = pmap_rmu4m; | | 3497 | pmap_rmu_p = pmap_rmu4m; |
3498 | #endif /* defined SUN4/SUN4C */ | | 3498 | #endif /* defined SUN4/SUN4C */ |
3499 | | | 3499 | |
3500 | /* | | 3500 | /* |
3501 | * p points to top of kernel mem | | 3501 | * p points to top of kernel mem |
3502 | */ | | 3502 | */ |
3503 | p = (vaddr_t)top; | | 3503 | p = (vaddr_t)top; |
3504 | | | 3504 | |
| | | 3505 | #if defined(MULTIPROCESSOR) |
| | | 3506 | /* |
| | | 3507 | * allocate the rest of the cpu_info{} area. note we waste the |
| | | 3508 | * first one to get a VA space. |
| | | 3509 | */ |
| | | 3510 | cpuinfo_len = ((sizeof(struct cpu_info) + NBPG - 1) & ~PGOFSET); |
| | | 3511 | if (sparc_ncpus > 1) { |
| | | 3512 | p = (p + NBPG - 1) & ~PGOFSET; |
| | | 3513 | cpuinfo_data = (uint8_t *)p; |
| | | 3514 | p += (cpuinfo_len * sparc_ncpus); |
| | | 3515 | |
| | | 3516 | /* XXX we waste the first one */ |
| | | 3517 | memset(cpuinfo_data + cpuinfo_len, 0, cpuinfo_len * (sparc_ncpus - 1)); |
| | | 3518 | } else |
| | | 3519 | cpuinfo_data = (uint8_t *)CPUINFO_VA; |
| | | 3520 | #endif |
| | | 3521 | |
3505 | /* | | 3522 | /* |
3506 | * Intialize the kernel pmap. | | 3523 | * Intialize the kernel pmap. |
3507 | */ | | 3524 | */ |
3508 | /* kernel_pmap_store.pm_ctxnum = 0; */ | | 3525 | /* kernel_pmap_store.pm_ctxnum = 0; */ |
3509 | kernel_pmap_store.pm_refcount = 1; | | 3526 | kernel_pmap_store.pm_refcount = 1; |
3510 | | | 3527 | |
3511 | /* | | 3528 | /* |
3512 | * Set up pm_regmap for kernel to point NUREG *below* the beginning | | 3529 | * Set up pm_regmap for kernel to point NUREG *below* the beginning |
3513 | * of kernel regmap storage. Since the kernel only uses regions | | 3530 | * of kernel regmap storage. Since the kernel only uses regions |
3514 | * above NUREG, we save storage space and can index kernel and | | 3531 | * above NUREG, we save storage space and can index kernel and |
3515 | * user regions in the same way. | | 3532 | * user regions in the same way. |
3516 | */ | | 3533 | */ |
3517 | kernel_pmap_store.pm_regmap = &kernel_regmap_store[-NUREG]; | | 3534 | kernel_pmap_store.pm_regmap = &kernel_regmap_store[-NUREG]; |
3518 | memset(kernel_regmap_store, 0, NKREG * sizeof(struct regmap)); | | 3535 | memset(kernel_regmap_store, 0, NKREG * sizeof(struct regmap)); |
3519 | memset(kernel_segmap_store, 0, NKREG * NSEGRG * sizeof(struct segmap)); | | 3536 | memset(kernel_segmap_store, 0, NKREG * NSEGRG * sizeof(struct segmap)); |
3520 | for (i = NKREG; --i >= 0;) { | | 3537 | for (i = NKREG; --i >= 0;) { |
3521 | kernel_regmap_store[i].rg_segmap = | | 3538 | kernel_regmap_store[i].rg_segmap = |
3522 | &kernel_segmap_store[i * NSEGRG]; | | 3539 | &kernel_segmap_store[i * NSEGRG]; |
3523 | kernel_regmap_store[i].rg_seg_ptps = NULL; | | 3540 | kernel_regmap_store[i].rg_seg_ptps = NULL; |
3524 | for (j = NSEGRG; --j >= 0;) | | 3541 | for (j = NSEGRG; --j >= 0;) |
3525 | kernel_segmap_store[i * NSEGRG + j].sg_pte = NULL; | | 3542 | kernel_segmap_store[i * NSEGRG + j].sg_pte = NULL; |
3526 | } | | 3543 | } |
3527 | | | 3544 | |
3528 | /* Allocate kernel region pointer tables */ | | 3545 | /* Allocate kernel region pointer tables */ |
3529 | pmap_kernel()->pm_reg_ptps = (int **)(q = p); | | 3546 | pmap_kernel()->pm_reg_ptps = (int **)(q = p); |
3530 | p += sparc_ncpus * sizeof(int **); | | 3547 | p += sparc_ncpus * sizeof(int **); |
3531 | memset((void *)q, 0, (u_int)p - (u_int)q); | | 3548 | memset((void *)q, 0, (u_int)p - (u_int)q); |
3532 | | | 3549 | |
3533 | pmap_kernel()->pm_reg_ptps_pa = (int *)(q = p); | | 3550 | pmap_kernel()->pm_reg_ptps_pa = (int *)(q = p); |
3534 | p += sparc_ncpus * sizeof(int *); | | 3551 | p += sparc_ncpus * sizeof(int *); |
3535 | memset((void *)q, 0, (u_int)p - (u_int)q); | | 3552 | memset((void *)q, 0, (u_int)p - (u_int)q); |
3536 | | | 3553 | |
3537 | /* Allocate context administration */ | | 3554 | /* Allocate context administration */ |
3538 | pmap_kernel()->pm_ctx = ctxinfo = ci = (union ctxinfo *)p; | | 3555 | pmap_kernel()->pm_ctx = ctxinfo = ci = (union ctxinfo *)p; |
3539 | p += ncontext * sizeof *ci; | | 3556 | p += ncontext * sizeof *ci; |
3540 | memset((void *)ci, 0, (u_int)p - (u_int)ci); | | 3557 | memset((void *)ci, 0, (u_int)p - (u_int)ci); |
3541 | | | 3558 | |
3542 | #if defined(MULTIPROCESSOR) | | | |
3543 | /* | | | |
3544 | * allocate the rest of the cpu_info{} area. note we waste the | | | |
3545 | * first one to get a VA space. | | | |
3546 | */ | | | |
3547 | p = (p + NBPG - 1) & ~PGOFSET; | | | |
3548 | cpuinfo_data = (uint8_t *)p; | | | |
3549 | cpuinfo_len = ((sizeof(struct cpu_info) + NBPG - 1) & ~PGOFSET); | | | |
3550 | p += (cpuinfo_len * sparc_ncpus); | | | |
3551 | prom_printf("extra cpus: %p, p: %p, gap start: %p, gap end: %p\n", | | | |
3552 | cpuinfo_data, p, etext_gap_start, etext_gap_end); | | | |
3553 | | | | |
3554 | /* XXX we waste the first one */ | | | |
3555 | memset(cpuinfo_data + cpuinfo_len, 0, cpuinfo_len * (sparc_ncpus - 1)); | | | |
3556 | #endif | | | |
3557 | | | | |
3558 | /* | | 3559 | /* |
3559 | * Set up the `constants' for the call to vm_init() | | 3560 | * Set up the `constants' for the call to vm_init() |
3560 | * in main(). All pages beginning at p (rounded up to | | 3561 | * in main(). All pages beginning at p (rounded up to |
3561 | * the next whole page) and continuing through the number | | 3562 | * the next whole page) and continuing through the number |
3562 | * of available pages are free. | | 3563 | * of available pages are free. |
3563 | */ | | 3564 | */ |
3564 | p = (p + NBPG - 1) & ~PGOFSET; | | 3565 | p = (p + NBPG - 1) & ~PGOFSET; |
3565 | | | 3566 | |
3566 | /* | | 3567 | /* |
3567 | * Reserve memory for MMU pagetables. Some of these have severe | | 3568 | * Reserve memory for MMU pagetables. Some of these have severe |
3568 | * alignment restrictions. We allocate in a sequence that | | 3569 | * alignment restrictions. We allocate in a sequence that |
3569 | * minimizes alignment gaps. | | 3570 | * minimizes alignment gaps. |
3570 | */ | | 3571 | */ |
3571 | | | 3572 | |
3572 | pagetables_start = p; | | 3573 | pagetables_start = p; |
3573 | pagetables_start_pa = PMAP_BOOTSTRAP_VA2PA(p); | | 3574 | pagetables_start_pa = PMAP_BOOTSTRAP_VA2PA(p); |
3574 | | | 3575 | |
3575 | /* | | 3576 | /* |
3576 | * Allocate context table. | | 3577 | * Allocate context table. |
3577 | * To keep supersparc happy, minimum aligment is on a 4K boundary. | | 3578 | * To keep supersparc happy, minimum aligment is on a 4K boundary. |
3578 | */ | | 3579 | */ |
3579 | ctxtblsize = max(ncontext,1024) * sizeof(int); | | 3580 | ctxtblsize = max(ncontext,1024) * sizeof(int); |
3580 | cpuinfo.ctx_tbl = (int *)roundup((u_int)p, ctxtblsize); | | 3581 | cpuinfo.ctx_tbl = (int *)roundup((u_int)p, ctxtblsize); |
3581 | cpuinfo.ctx_tbl_pa = PMAP_BOOTSTRAP_VA2PA(cpuinfo.ctx_tbl); | | 3582 | cpuinfo.ctx_tbl_pa = PMAP_BOOTSTRAP_VA2PA(cpuinfo.ctx_tbl); |
3582 | p = (u_int)cpuinfo.ctx_tbl + ctxtblsize; | | 3583 | p = (u_int)cpuinfo.ctx_tbl + ctxtblsize; |
3583 | | | 3584 | |
3584 | #if defined(MULTIPROCESSOR) | | 3585 | #if defined(MULTIPROCESSOR) |
3585 | /* | | 3586 | /* |
3586 | * Make sure all smp_tlb_flush*() routines for kernel pmap are | | 3587 | * Make sure all smp_tlb_flush*() routines for kernel pmap are |
3587 | * broadcast to all CPU's. | | 3588 | * broadcast to all CPU's. |
3588 | */ | | 3589 | */ |
3589 | pmap_kernel()->pm_cpuset = CPUSET_ALL; | | 3590 | pmap_kernel()->pm_cpuset = CPUSET_ALL; |
3590 | #endif | | 3591 | #endif |
3591 | | | 3592 | |
3592 | /* | | 3593 | /* |
3593 | * Reserve memory for segment and page tables needed to map the entire | | 3594 | * Reserve memory for segment and page tables needed to map the entire |
3594 | * kernel. This takes (2K + NKREG * 16K) of space, but unfortunately | | 3595 | * kernel. This takes (2K + NKREG * 16K) of space, but unfortunately |
3595 | * is necessary since pmap_enter() *must* be able to enter a kernel | | 3596 | * is necessary since pmap_enter() *must* be able to enter a kernel |
3596 | * mapping without delay. | | 3597 | * mapping without delay. |
3597 | */ | | 3598 | */ |
3598 | p = (vaddr_t) roundup(p, SRMMU_L1SIZE * sizeof(u_int)); | | 3599 | p = (vaddr_t) roundup(p, SRMMU_L1SIZE * sizeof(u_int)); |
3599 | qzero((void *)p, SRMMU_L1SIZE * sizeof(u_int)); | | 3600 | qzero((void *)p, SRMMU_L1SIZE * sizeof(u_int)); |
3600 | kernel_regtable_store = (u_int *)p; | | 3601 | kernel_regtable_store = (u_int *)p; |
3601 | p += SRMMU_L1SIZE * sizeof(u_int); | | 3602 | p += SRMMU_L1SIZE * sizeof(u_int); |
3602 | | | 3603 | |
3603 | p = (vaddr_t) roundup(p, SRMMU_L2SIZE * sizeof(u_int)); | | 3604 | p = (vaddr_t) roundup(p, SRMMU_L2SIZE * sizeof(u_int)); |
3604 | qzero((void *)p, (SRMMU_L2SIZE * sizeof(u_int)) * NKREG); | | 3605 | qzero((void *)p, (SRMMU_L2SIZE * sizeof(u_int)) * NKREG); |
3605 | kernel_segtable_store = (u_int *)p; | | 3606 | kernel_segtable_store = (u_int *)p; |
3606 | p += (SRMMU_L2SIZE * sizeof(u_int)) * NKREG; | | 3607 | p += (SRMMU_L2SIZE * sizeof(u_int)) * NKREG; |
3607 | | | 3608 | |
3608 | p = (vaddr_t) roundup(p, SRMMU_L3SIZE * sizeof(u_int)); | | 3609 | p = (vaddr_t) roundup(p, SRMMU_L3SIZE * sizeof(u_int)); |
3609 | /* zero it: all will be SRMMU_TEINVALID */ | | 3610 | /* zero it: all will be SRMMU_TEINVALID */ |
3610 | qzero((void *)p, ((SRMMU_L3SIZE * sizeof(u_int)) * NSEGRG) * NKREG); | | 3611 | qzero((void *)p, ((SRMMU_L3SIZE * sizeof(u_int)) * NSEGRG) * NKREG); |
3611 | kernel_pagtable_store = (u_int *)p; | | 3612 | kernel_pagtable_store = (u_int *)p; |
3612 | p += ((SRMMU_L3SIZE * sizeof(u_int)) * NSEGRG) * NKREG; | | 3613 | p += ((SRMMU_L3SIZE * sizeof(u_int)) * NSEGRG) * NKREG; |
3613 | | | 3614 | |
3614 | /* Round to next page and mark end of pre-wired kernel space */ | | 3615 | /* Round to next page and mark end of pre-wired kernel space */ |
3615 | p = (p + NBPG - 1) & ~PGOFSET; | | 3616 | p = (p + NBPG - 1) & ~PGOFSET; |
3616 | pagetables_end = p; | | 3617 | pagetables_end = p; |
3617 | | | 3618 | |
3618 | avail_start = PMAP_BOOTSTRAP_VA2PA(p); | | 3619 | avail_start = PMAP_BOOTSTRAP_VA2PA(p); |
3619 | | | 3620 | |
3620 | /* | | 3621 | /* |
3621 | * Now wire the region and segment tables of the kernel map. | | 3622 | * Now wire the region and segment tables of the kernel map. |
3622 | */ | | 3623 | */ |
3623 | pmap_kernel()->pm_reg_ptps[0] = (int *) kernel_regtable_store; | | 3624 | pmap_kernel()->pm_reg_ptps[0] = (int *) kernel_regtable_store; |
3624 | pmap_kernel()->pm_reg_ptps_pa[0] = | | 3625 | pmap_kernel()->pm_reg_ptps_pa[0] = |
3625 | PMAP_BOOTSTRAP_VA2PA(kernel_regtable_store); | | 3626 | PMAP_BOOTSTRAP_VA2PA(kernel_regtable_store); |
3626 | | | 3627 | |
3627 | /* Install L1 table in context 0 */ | | 3628 | /* Install L1 table in context 0 */ |
3628 | setpgt4m(&cpuinfo.ctx_tbl[0], | | 3629 | setpgt4m(&cpuinfo.ctx_tbl[0], |
3629 | (pmap_kernel()->pm_reg_ptps_pa[0] >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD); | | 3630 | (pmap_kernel()->pm_reg_ptps_pa[0] >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD); |
3630 | | | 3631 | |
3631 | for (reg = 0; reg < NKREG; reg++) { | | 3632 | for (reg = 0; reg < NKREG; reg++) { |
3632 | struct regmap *rp; | | 3633 | struct regmap *rp; |
3633 | void *kphyssegtbl; | | 3634 | void *kphyssegtbl; |
3634 | | | 3635 | |
3635 | /* | | 3636 | /* |
3636 | * Entering new region; install & build segtbl | | 3637 | * Entering new region; install & build segtbl |
3637 | */ | | 3638 | */ |
3638 | | | 3639 | |
3639 | rp = &pmap_kernel()->pm_regmap[reg + VA_VREG(KERNBASE)]; | | 3640 | rp = &pmap_kernel()->pm_regmap[reg + VA_VREG(KERNBASE)]; |
3640 | | | 3641 | |
3641 | kphyssegtbl = (void *) | | 3642 | kphyssegtbl = (void *) |
3642 | &kernel_segtable_store[reg * SRMMU_L2SIZE]; | | 3643 | &kernel_segtable_store[reg * SRMMU_L2SIZE]; |
3643 | | | 3644 | |
3644 | setpgt4m(&pmap_kernel()->pm_reg_ptps[0][reg + VA_VREG(KERNBASE)], | | 3645 | setpgt4m(&pmap_kernel()->pm_reg_ptps[0][reg + VA_VREG(KERNBASE)], |
3645 | (PMAP_BOOTSTRAP_VA2PA(kphyssegtbl) >> SRMMU_PPNPASHIFT) | | | 3646 | (PMAP_BOOTSTRAP_VA2PA(kphyssegtbl) >> SRMMU_PPNPASHIFT) | |
3646 | SRMMU_TEPTD); | | 3647 | SRMMU_TEPTD); |
3647 | | | 3648 | |
3648 | rp->rg_seg_ptps = (int *)kphyssegtbl; | | 3649 | rp->rg_seg_ptps = (int *)kphyssegtbl; |
3649 | | | 3650 | |
3650 | for (seg = 0; seg < NSEGRG; seg++) { | | 3651 | for (seg = 0; seg < NSEGRG; seg++) { |
3651 | struct segmap *sp; | | 3652 | struct segmap *sp; |
3652 | void *kphyspagtbl; | | 3653 | void *kphyspagtbl; |
3653 | | | 3654 | |
3654 | rp->rg_nsegmap++; | | 3655 | rp->rg_nsegmap++; |
3655 | | | 3656 | |
3656 | sp = &rp->rg_segmap[seg]; | | 3657 | sp = &rp->rg_segmap[seg]; |
3657 | kphyspagtbl = (void *) | | 3658 | kphyspagtbl = (void *) |
3658 | &kernel_pagtable_store | | 3659 | &kernel_pagtable_store |
3659 | [((reg * NSEGRG) + seg) * SRMMU_L3SIZE]; | | 3660 | [((reg * NSEGRG) + seg) * SRMMU_L3SIZE]; |
3660 | | | 3661 | |
3661 | setpgt4m(&rp->rg_seg_ptps[seg], | | 3662 | setpgt4m(&rp->rg_seg_ptps[seg], |
3662 | (PMAP_BOOTSTRAP_VA2PA(kphyspagtbl) >> SRMMU_PPNPASHIFT) | | | 3663 | (PMAP_BOOTSTRAP_VA2PA(kphyspagtbl) >> SRMMU_PPNPASHIFT) | |
3663 | SRMMU_TEPTD); | | 3664 | SRMMU_TEPTD); |
3664 | sp->sg_pte = (int *) kphyspagtbl; | | 3665 | sp->sg_pte = (int *) kphyspagtbl; |
3665 | } | | 3666 | } |
3666 | } | | 3667 | } |
3667 | | | 3668 | |
3668 | /* | | 3669 | /* |
3669 | * Preserve the monitor ROM's reserved VM region, so that | | 3670 | * Preserve the monitor ROM's reserved VM region, so that |
3670 | * we can use L1-A or the monitor's debugger. | | 3671 | * we can use L1-A or the monitor's debugger. |
3671 | */ | | 3672 | */ |
3672 | mmu_reservemon4m(&kernel_pmap_store); | | 3673 | mmu_reservemon4m(&kernel_pmap_store); |
3673 | | | 3674 | |
3674 | /* | | 3675 | /* |
3675 | * Reserve virtual address space for two mappable MD pages | | 3676 | * Reserve virtual address space for two mappable MD pages |
3676 | * for pmap_zero_page and pmap_copy_page, one MI page | | 3677 | * for pmap_zero_page and pmap_copy_page, one MI page |
3677 | * for /dev/mem, and some more for dumpsys(). | | 3678 | * for /dev/mem, and some more for dumpsys(). |
3678 | */ | | 3679 | */ |
3679 | q = p; | | 3680 | q = p; |
3680 | cpuinfo.vpage[0] = (void *)p, p += NBPG; | | 3681 | cpuinfo.vpage[0] = (void *)p, p += NBPG; |
3681 | cpuinfo.vpage[1] = (void *)p, p += NBPG; | | 3682 | cpuinfo.vpage[1] = (void *)p, p += NBPG; |
3682 | vmmap = (void *)p, p += NBPG; | | 3683 | vmmap = (void *)p, p += NBPG; |
3683 | p = (vaddr_t)reserve_dumppages((void *)p); | | 3684 | p = (vaddr_t)reserve_dumppages((void *)p); |
3684 | | | 3685 | |
3685 | /* Find PTE locations of vpage[] to optimize zero_fill() et.al. */ | | 3686 | /* Find PTE locations of vpage[] to optimize zero_fill() et.al. */ |
3686 | for (i = 0; i < 2; i++) { | | 3687 | for (i = 0; i < 2; i++) { |
3687 | struct regmap *rp; | | 3688 | struct regmap *rp; |
3688 | struct segmap *sp; | | 3689 | struct segmap *sp; |
3689 | rp = &pmap_kernel()->pm_regmap[VA_VREG(cpuinfo.vpage[i])]; | | 3690 | rp = &pmap_kernel()->pm_regmap[VA_VREG(cpuinfo.vpage[i])]; |
3690 | sp = &rp->rg_segmap[VA_VSEG(cpuinfo.vpage[i])]; | | 3691 | sp = &rp->rg_segmap[VA_VSEG(cpuinfo.vpage[i])]; |
3691 | cpuinfo.vpage_pte[i] = | | 3692 | cpuinfo.vpage_pte[i] = |
3692 | &sp->sg_pte[VA_SUN4M_VPG(cpuinfo.vpage[i])]; | | 3693 | &sp->sg_pte[VA_SUN4M_VPG(cpuinfo.vpage[i])]; |
3693 | } | | 3694 | } |
3694 | | | 3695 | |
3695 | #if !(defined(PROM_AT_F0) || defined(MSIIEP)) | | 3696 | #if !(defined(PROM_AT_F0) || defined(MSIIEP)) |
3696 | virtual_avail = p; | | 3697 | virtual_avail = p; |
3697 | #elif defined(MSIIEP) | | 3698 | #elif defined(MSIIEP) |
3698 | virtual_avail = (vaddr_t)0xf0800000; /* Krups */ | | 3699 | virtual_avail = (vaddr_t)0xf0800000; /* Krups */ |
3699 | #else | | 3700 | #else |
3700 | virtual_avail = (vaddr_t)0xf0080000; /* Mr.Coffee/OFW */ | | 3701 | virtual_avail = (vaddr_t)0xf0080000; /* Mr.Coffee/OFW */ |
3701 | #endif | | 3702 | #endif |
3702 | virtual_end = VM_MAX_KERNEL_ADDRESS; | | 3703 | virtual_end = VM_MAX_KERNEL_ADDRESS; |
3703 | | | 3704 | |
3704 | p = q; /* retract to first free phys */ | | 3705 | p = q; /* retract to first free phys */ |
3705 | | | 3706 | |
3706 | /* | | 3707 | /* |
3707 | * Set up the ctxinfo structures (freelist of contexts) | | 3708 | * Set up the ctxinfo structures (freelist of contexts) |
3708 | */ | | 3709 | */ |
3709 | ci->c_pmap = pmap_kernel(); | | 3710 | ci->c_pmap = pmap_kernel(); |
3710 | ctx_freelist = ci + 1; | | 3711 | ctx_freelist = ci + 1; |
3711 | for (i = 1; i < ncontext; i++) { | | 3712 | for (i = 1; i < ncontext; i++) { |
3712 | ci++; | | 3713 | ci++; |
3713 | ci->c_nextfree = ci + 1; | | 3714 | ci->c_nextfree = ci + 1; |
3714 | } | | 3715 | } |
3715 | ci->c_nextfree = NULL; | | 3716 | ci->c_nextfree = NULL; |
3716 | ctx_kick = 0; | | 3717 | ctx_kick = 0; |
3717 | ctx_kickdir = -1; | | 3718 | ctx_kickdir = -1; |
3718 | | | 3719 | |
3719 | /* | | 3720 | /* |
3720 | * Now map the kernel into our new set of page tables, then | | 3721 | * Now map the kernel into our new set of page tables, then |
3721 | * (finally) switch over to our running page tables. | | 3722 | * (finally) switch over to our running page tables. |
3722 | * We map from KERNBASE to p into context 0's page tables (and | | 3723 | * We map from KERNBASE to p into context 0's page tables (and |
3723 | * the kernel pmap). | | 3724 | * the kernel pmap). |
3724 | */ | | 3725 | */ |
3725 | #ifdef DEBUG /* Sanity checks */ | | 3726 | #ifdef DEBUG /* Sanity checks */ |
3726 | if (p % NBPG != 0) | | 3727 | if (p % NBPG != 0) |
3727 | panic("pmap_bootstrap4m: p misaligned?!?"); | | 3728 | panic("pmap_bootstrap4m: p misaligned?!?"); |
3728 | if (KERNBASE % NBPRG != 0) | | 3729 | if (KERNBASE % NBPRG != 0) |
3729 | panic("pmap_bootstrap4m: KERNBASE not region-aligned"); | | 3730 | panic("pmap_bootstrap4m: KERNBASE not region-aligned"); |
3730 | #endif | | 3731 | #endif |
3731 | | | 3732 | |
3732 | for (q = KERNBASE; q < p; q += NBPG) { | | 3733 | for (q = KERNBASE; q < p; q += NBPG) { |
3733 | struct regmap *rp; | | 3734 | struct regmap *rp; |
3734 | struct segmap *sp; | | 3735 | struct segmap *sp; |
3735 | int pte, *ptep; | | 3736 | int pte, *ptep; |
3736 | | | 3737 | |
3737 | /* | | 3738 | /* |
3738 | * Now install entry for current page. | | 3739 | * Now install entry for current page. |
3739 | */ | | 3740 | */ |
3740 | rp = &pmap_kernel()->pm_regmap[VA_VREG(q)]; | | 3741 | rp = &pmap_kernel()->pm_regmap[VA_VREG(q)]; |
3741 | sp = &rp->rg_segmap[VA_VSEG(q)]; | | 3742 | sp = &rp->rg_segmap[VA_VSEG(q)]; |
3742 | ptep = &sp->sg_pte[VA_VPG(q)]; | | 3743 | ptep = &sp->sg_pte[VA_VPG(q)]; |
3743 | | | 3744 | |
3744 | /* | | 3745 | /* |
3745 | * Unmap the `etext gap'; it'll be made available | | 3746 | * Unmap the `etext gap'; it'll be made available |
3746 | * to the VM manager. | | 3747 | * to the VM manager. |
3747 | */ | | 3748 | */ |
3748 | if (q >= etext_gap_start && q < etext_gap_end) { | | 3749 | if (q >= etext_gap_start && q < etext_gap_end) { |
3749 | setpgt4m(ptep, 0); | | 3750 | setpgt4m(ptep, 0); |
3750 | continue; | | 3751 | continue; |
3751 | } | | 3752 | } |
3752 | | | 3753 | |
3753 | pte = PMAP_BOOTSTRAP_VA2PA(q) >> SRMMU_PPNPASHIFT; | | 3754 | pte = PMAP_BOOTSTRAP_VA2PA(q) >> SRMMU_PPNPASHIFT; |
3754 | pte |= PPROT_N_RX | SRMMU_TEPTE; | | 3755 | pte |= PPROT_N_RX | SRMMU_TEPTE; |
3755 | | | 3756 | |
3756 | /* Deal with the cacheable bit for pagetable memory */ | | 3757 | /* Deal with the cacheable bit for pagetable memory */ |
3757 | if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) != 0 || | | 3758 | if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) != 0 || |
3758 | q < pagetables_start || q >= pagetables_end) | | 3759 | q < pagetables_start || q >= pagetables_end) |
3759 | pte |= SRMMU_PG_C; | | 3760 | pte |= SRMMU_PG_C; |
3760 | | | 3761 | |
3761 | /* write-protect kernel text */ | | 3762 | /* write-protect kernel text */ |
3762 | if (q < (vaddr_t)trapbase || q >= (vaddr_t)etext) | | 3763 | if (q < (vaddr_t)trapbase || q >= (vaddr_t)etext) |
3763 | pte |= PPROT_WRITE; | | 3764 | pte |= PPROT_WRITE; |
3764 | | | 3765 | |
3765 | setpgt4m(ptep, pte); | | 3766 | setpgt4m(ptep, pte); |
3766 | pmap_kernel()->pm_stats.resident_count++; | | 3767 | pmap_kernel()->pm_stats.resident_count++; |
3767 | } | | 3768 | } |
3768 | | | 3769 | |
3769 | if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0) { | | 3770 | if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0) { |
3770 | /* | | 3771 | /* |
3771 | * The page tables have been setup. Since we're still | | 3772 | * The page tables have been setup. Since we're still |
3772 | * running on the PROM's memory map, the memory we | | 3773 | * running on the PROM's memory map, the memory we |
3773 | * allocated for our page tables might still be cached. | | 3774 | * allocated for our page tables might still be cached. |
3774 | * Flush it now, and don't touch it again until we | | 3775 | * Flush it now, and don't touch it again until we |
3775 | * switch to our own tables (will be done immediately below). | | 3776 | * switch to our own tables (will be done immediately below). |
3776 | */ | | 3777 | */ |
3777 | int size = pagetables_end - pagetables_start; | | 3778 | int size = pagetables_end - pagetables_start; |
3778 | if (CACHEINFO.c_vactype != VAC_NONE) { | | 3779 | if (CACHEINFO.c_vactype != VAC_NONE) { |
3779 | va = (vaddr_t)pagetables_start; | | 3780 | va = (vaddr_t)pagetables_start; |
3780 | while (size > 0) { | | 3781 | while (size > 0) { |
3781 | cache_flush_page(va, 0); | | 3782 | cache_flush_page(va, 0); |
3782 | va += NBPG; | | 3783 | va += NBPG; |
3783 | size -= NBPG; | | 3784 | size -= NBPG; |
3784 | } | | 3785 | } |
3785 | } else if (cpuinfo.pcache_flush_page != NULL) { | | 3786 | } else if (cpuinfo.pcache_flush_page != NULL) { |
3786 | paddr_t pa = pagetables_start_pa; | | 3787 | paddr_t pa = pagetables_start_pa; |
3787 | while (size > 0) { | | 3788 | while (size > 0) { |
3788 | pcache_flush_page(pa, 0); | | 3789 | pcache_flush_page(pa, 0); |
3789 | pa += NBPG; | | 3790 | pa += NBPG; |
3790 | size -= NBPG; | | 3791 | size -= NBPG; |
3791 | } | | 3792 | } |
3792 | } | | 3793 | } |
3793 | } | | 3794 | } |
3794 | | | 3795 | |
3795 | /* | | 3796 | /* |
3796 | * Now switch to kernel pagetables (finally!) | | 3797 | * Now switch to kernel pagetables (finally!) |
3797 | */ | | 3798 | */ |
3798 | mmu_install_tables(&cpuinfo); | | 3799 | mmu_install_tables(&cpuinfo); |
3799 | | | 3800 | |
3800 | #ifdef MULTIPROCESSOR | | 3801 | #ifdef MULTIPROCESSOR |
3801 | /* | | 3802 | /* |
| | | 3803 | * Initialise any cpu-specific data now. |
| | | 3804 | */ |
| | | 3805 | cpu_init_system(); |
| | | 3806 | |
| | | 3807 | /* |
3802 | * Remap cpu0 from CPUINFO_VA to the new correct value, wasting the | | 3808 | * Remap cpu0 from CPUINFO_VA to the new correct value, wasting the |
3803 | * backing pages we allocated above XXX. | | 3809 | * backing page we allocated above XXX. |
3804 | */ | | 3810 | */ |
3805 | for (off = 0, va = (vaddr_t)cpuinfo_data; | | 3811 | for (off = 0, va = (vaddr_t)cpuinfo_data; |
3806 | off < sizeof(struct cpu_info); | | 3812 | sparc_ncpus > 1 && off < sizeof(struct cpu_info); |
3807 | va += NBPG, off += NBPG) { | | 3813 | va += NBPG, off += NBPG) { |
3808 | paddr_t pa = PMAP_BOOTSTRAP_VA2PA(CPUINFO_VA + off); | | 3814 | paddr_t pa = PMAP_BOOTSTRAP_VA2PA(CPUINFO_VA + off); |
3809 | prom_printf("going to pmap_kenter_pa(va=%p, pa=%p)\n", va, pa); | | 3815 | prom_printf("going to pmap_kenter_pa(va=%p, pa=%p)\n", va, pa); |
3810 | pmap_kremove(va, NBPG); | | 3816 | pmap_kremove(va, NBPG); |
3811 | pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE); | | 3817 | pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE); |
| | | 3818 | cache_flush_page(va, 0); |
| | | 3819 | cache_flush_page(CPUINFO_VA, 0); |
3812 | } | | 3820 | } |
3813 | | | 3821 | |
3814 | /* | | 3822 | /* |
3815 | * Setup the cpus[] array and the ci_self links. | | 3823 | * Setup the cpus[] array and the ci_self links. |
3816 | */ | | 3824 | */ |
3817 | prom_printf("setting cpus self reference\n"); | | 3825 | prom_printf("setting cpus self reference\n"); |
3818 | for (i = 0; i < sparc_ncpus; i++) { | | 3826 | for (i = 0; i < sparc_ncpus; i++) { |
3819 | cpus[i] = (struct cpu_info *)(cpuinfo_data + (cpuinfo_len * i)); | | 3827 | cpus[i] = (struct cpu_info *)(cpuinfo_data + (cpuinfo_len * i)); |
3820 | cpus[i]->ci_self = cpus[i]; | | 3828 | cpus[i]->ci_self = cpus[i]; |
3821 | prom_printf("set cpu%d ci_self address: %p\n", i, cpus[i]); | | 3829 | prom_printf("set cpu%d ci_self address: %p\n", i, cpus[i]); |
3822 | } | | 3830 | } |
| | | 3831 | #else |
| | | 3832 | cpus[0] = (struct cpu_info *)CPUINFO_VA; |
3823 | #endif | | 3833 | #endif |
3824 | | | 3834 | |
3825 | pmap_update(pmap_kernel()); | | 3835 | pmap_update(pmap_kernel()); |
3826 | prom_printf("pmap_bootstrap4m done\n"); | | 3836 | prom_printf("pmap_bootstrap4m done\n"); |
3827 | } | | 3837 | } |
3828 | | | 3838 | |
3829 | static u_long prom_ctxreg; | | 3839 | static u_long prom_ctxreg; |
3830 | | | 3840 | |
3831 | void | | 3841 | void |
3832 | mmu_install_tables(struct cpu_info *sc) | | 3842 | mmu_install_tables(struct cpu_info *sc) |
3833 | { | | 3843 | { |
3834 | | | 3844 | |
3835 | #ifdef DEBUG | | 3845 | #ifdef DEBUG |
3836 | prom_printf("pmap_bootstrap: installing kernel page tables..."); | | 3846 | prom_printf("pmap_bootstrap: installing kernel page tables..."); |
3837 | #endif | | 3847 | #endif |
3838 | setcontext4m(0); /* paranoia? %%%: Make 0x3 a define! below */ | | 3848 | setcontext4m(0); /* paranoia? %%%: Make 0x3 a define! below */ |
3839 | | | 3849 | |
3840 | /* Enable MMU tablewalk caching, flush TLB */ | | 3850 | /* Enable MMU tablewalk caching, flush TLB */ |
3841 | if (sc->mmu_enable != 0) | | 3851 | if (sc->mmu_enable != 0) |
3842 | sc->mmu_enable(); | | 3852 | sc->mmu_enable(); |
3843 | | | 3853 | |
3844 | tlb_flush_all_real(); | | 3854 | tlb_flush_all_real(); |
3845 | prom_ctxreg = lda(SRMMU_CXTPTR, ASI_SRMMU); | | 3855 | prom_ctxreg = lda(SRMMU_CXTPTR, ASI_SRMMU); |
3846 | | | 3856 | |
3847 | sta(SRMMU_CXTPTR, ASI_SRMMU, | | 3857 | sta(SRMMU_CXTPTR, ASI_SRMMU, |
3848 | (sc->ctx_tbl_pa >> SRMMU_PPNPASHIFT) & ~0x3); | | 3858 | (sc->ctx_tbl_pa >> SRMMU_PPNPASHIFT) & ~0x3); |
3849 | | | 3859 | |
3850 | tlb_flush_all_real(); | | 3860 | tlb_flush_all_real(); |
3851 | | | 3861 | |
3852 | #ifdef DEBUG | | 3862 | #ifdef DEBUG |
3853 | prom_printf("done.\n"); | | 3863 | prom_printf("done.\n"); |
3854 | #endif | | 3864 | #endif |
3855 | } | | 3865 | } |
3856 | | | 3866 | |
3857 | void srmmu_restore_prom_ctx(void); | | 3867 | void srmmu_restore_prom_ctx(void); |
3858 | | | 3868 | |
3859 | void | | 3869 | void |
3860 | srmmu_restore_prom_ctx(void) | | 3870 | srmmu_restore_prom_ctx(void) |
3861 | { | | 3871 | { |
3862 | | | 3872 | |
3863 | tlb_flush_all(); | | 3873 | tlb_flush_all(); |
3864 | sta(SRMMU_CXTPTR, ASI_SRMMU, prom_ctxreg); | | 3874 | sta(SRMMU_CXTPTR, ASI_SRMMU, prom_ctxreg); |
3865 | tlb_flush_all(); | | 3875 | tlb_flush_all(); |
3866 | } | | 3876 | } |
3867 | #endif /* SUN4M || SUN4D */ | | 3877 | #endif /* SUN4M || SUN4D */ |
3868 | | | 3878 | |
3869 | #if defined(MULTIPROCESSOR) | | 3879 | #if defined(MULTIPROCESSOR) |
3870 | /* | | 3880 | /* |
3871 | * Allocate per-CPU page tables. One region, segment and page table | | 3881 | * Allocate per-CPU page tables. One region, segment and page table |
3872 | * is needed to map CPUINFO_VA to different physical addresses on | | 3882 | * is needed to map CPUINFO_VA to different physical addresses on |
3873 | * each CPU. Since the kernel region and segment tables are all | | 3883 | * each CPU. Since the kernel region and segment tables are all |
3874 | * pre-wired (in bootstrap() above) and we also assume that the | | 3884 | * pre-wired (in bootstrap() above) and we also assume that the |
3875 | * first segment (256K) of kernel space is fully populated with | | 3885 | * first segment (256K) of kernel space is fully populated with |
3876 | * pages from the start, these per-CPU tables will never need | | 3886 | * pages from the start, these per-CPU tables will never need |
3877 | * to be updated when mapping kernel virtual memory. | | 3887 | * to be updated when mapping kernel virtual memory. |
3878 | * | | 3888 | * |
3879 | * Note: this routine is called in the context of the boot CPU | | 3889 | * Note: this routine is called in the context of the boot CPU |
3880 | * during autoconfig. | | 3890 | * during autoconfig. |
3881 | */ | | 3891 | */ |
3882 | void | | 3892 | void |
3883 | pmap_alloc_cpu(struct cpu_info *sc) | | 3893 | pmap_alloc_cpu(struct cpu_info *sc) |
3884 | { | | 3894 | { |
3885 | #if defined(SUN4M) || defined(SUN4D) /* Only implemented for SUN4M/D */ | | 3895 | #if defined(SUN4M) || defined(SUN4D) /* Only implemented for SUN4M/D */ |
3886 | vaddr_t va; | | 3896 | vaddr_t va; |
3887 | paddr_t pa; | | 3897 | paddr_t pa; |
3888 | paddr_t alignment; | | 3898 | paddr_t alignment; |
3889 | u_int *ctxtable, *regtable, *segtable, *pagtable; | | 3899 | u_int *ctxtable, *regtable, *segtable, *pagtable; |
3890 | u_int *ctxtable_pa, *regtable_pa, *segtable_pa, *pagtable_pa; | | 3900 | u_int *ctxtable_pa, *regtable_pa, *segtable_pa, *pagtable_pa; |
3891 | psize_t ctxsize, size; | | 3901 | psize_t ctxsize, size; |
3892 | int vr, vs, vpg; | | 3902 | int vr, vs, vpg; |
3893 | struct regmap *rp; | | 3903 | struct regmap *rp; |
3894 | struct segmap *sp; | | 3904 | struct segmap *sp; |
3895 | struct pglist mlist; | | 3905 | struct pglist mlist; |
3896 | int cachebit; | | 3906 | int cachebit; |
3897 | int pagesz = NBPG; | | 3907 | int pagesz = NBPG; |
3898 | int i; | | 3908 | int i; |
3899 | | | 3909 | |
3900 | cachebit = (cpuinfo.flags & CPUFLG_CACHEPAGETABLES) != 0; | | 3910 | cachebit = (cpuinfo.flags & CPUFLG_CACHEPAGETABLES) != 0; |
3901 | | | 3911 | |
3902 | /* | | 3912 | /* |
3903 | * Allocate properly aligned and contiguous physically memory | | 3913 | * Allocate properly aligned and contiguous physically memory |
3904 | * for the PTE tables. | | 3914 | * for the PTE tables. |
3905 | */ | | 3915 | */ |
3906 | ctxsize = (sc->mmu_ncontext * sizeof(int) + pagesz - 1) & -pagesz; | | 3916 | ctxsize = (sc->mmu_ncontext * sizeof(int) + pagesz - 1) & -pagesz; |
3907 | alignment = ctxsize; | | 3917 | alignment = ctxsize; |
3908 | | | 3918 | |
3909 | /* The region, segment and page table we need fit in one page */ | | 3919 | /* The region, segment and page table we need fit in one page */ |
3910 | size = ctxsize + pagesz; | | 3920 | size = ctxsize + pagesz; |
3911 | | | 3921 | |
3912 | if (uvm_pglistalloc(size, vm_first_phys, vm_first_phys+vm_num_phys, | | 3922 | if (uvm_pglistalloc(size, vm_first_phys, vm_first_phys+vm_num_phys, |
3913 | alignment, 0, &mlist, 1, 0) != 0) | | 3923 | alignment, 0, &mlist, 1, 0) != 0) |
3914 | panic("pmap_alloc_cpu: no memory"); | | 3924 | panic("pmap_alloc_cpu: no memory"); |
3915 | | | 3925 | |
3916 | pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&mlist)); | | 3926 | pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&mlist)); |
3917 | | | 3927 | |
3918 | /* Allocate virtual memory */ | | 3928 | /* Allocate virtual memory */ |
3919 | va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY); | | 3929 | va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY); |
3920 | if (va == 0) | | 3930 | if (va == 0) |
3921 | panic("pmap_alloc_cpu: no memory"); | | 3931 | panic("pmap_alloc_cpu: no memory"); |
3922 | | | 3932 | |
3923 | /* | | 3933 | /* |
3924 | * Layout the page tables in our chunk of memory | | 3934 | * Layout the page tables in our chunk of memory |
3925 | */ | | 3935 | */ |
3926 | ctxtable = (u_int *)va; | | 3936 | ctxtable = (u_int *)va; |
3927 | regtable = (u_int *)(va + ctxsize); | | 3937 | regtable = (u_int *)(va + ctxsize); |
3928 | segtable = regtable + SRMMU_L1SIZE; | | 3938 | segtable = regtable + SRMMU_L1SIZE; |
3929 | pagtable = segtable + SRMMU_L2SIZE; | | 3939 | pagtable = segtable + SRMMU_L2SIZE; |
3930 | | | 3940 | |
3931 | ctxtable_pa = (u_int *)pa; | | 3941 | ctxtable_pa = (u_int *)pa; |
3932 | regtable_pa = (u_int *)(pa + ctxsize); | | 3942 | regtable_pa = (u_int *)(pa + ctxsize); |
3933 | segtable_pa = regtable_pa + SRMMU_L1SIZE; | | 3943 | segtable_pa = regtable_pa + SRMMU_L1SIZE; |
3934 | pagtable_pa = segtable_pa + SRMMU_L2SIZE; | | 3944 | pagtable_pa = segtable_pa + SRMMU_L2SIZE; |
3935 | | | 3945 | |
3936 | /* Map the pages */ | | 3946 | /* Map the pages */ |
3937 | while (size != 0) { | | 3947 | while (size != 0) { |
3938 | pmap_kenter_pa(va, pa | (cachebit ? 0 : PMAP_NC), | | 3948 | pmap_kenter_pa(va, pa | (cachebit ? 0 : PMAP_NC), |
3939 | VM_PROT_READ | VM_PROT_WRITE); | | 3949 | VM_PROT_READ | VM_PROT_WRITE); |
3940 | va += pagesz; | | 3950 | va += pagesz; |
3941 | pa += pagesz; | | 3951 | pa += pagesz; |
3942 | size -= pagesz; | | 3952 | size -= pagesz; |
3943 | } | | 3953 | } |
3944 | pmap_update(pmap_kernel()); | | 3954 | pmap_update(pmap_kernel()); |
3945 | | | 3955 | |
3946 | /* | | 3956 | /* |
3947 | * Store the region table pointer (and its corresponding physical | | 3957 | * Store the region table pointer (and its corresponding physical |
3948 | * address) in the CPU's slot in the kernel pmap region table | | 3958 | * address) in the CPU's slot in the kernel pmap region table |
3949 | * pointer table. | | 3959 | * pointer table. |
3950 | */ | | 3960 | */ |
3951 | pmap_kernel()->pm_reg_ptps[sc->ci_cpuid] = regtable; | | 3961 | pmap_kernel()->pm_reg_ptps[sc->ci_cpuid] = regtable; |
3952 | pmap_kernel()->pm_reg_ptps_pa[sc->ci_cpuid] = (paddr_t)regtable_pa; | | 3962 | pmap_kernel()->pm_reg_ptps_pa[sc->ci_cpuid] = (paddr_t)regtable_pa; |
3953 | | | 3963 | |
3954 | vr = VA_VREG(CPUINFO_VA); | | 3964 | vr = VA_VREG(CPUINFO_VA); |
3955 | vs = VA_VSEG(CPUINFO_VA); | | 3965 | vs = VA_VSEG(CPUINFO_VA); |
3956 | vpg = VA_VPG(CPUINFO_VA); | | 3966 | vpg = VA_VPG(CPUINFO_VA); |
3957 | rp = &pmap_kernel()->pm_regmap[vr]; | | 3967 | rp = &pmap_kernel()->pm_regmap[vr]; |
3958 | sp = &rp->rg_segmap[vs]; | | 3968 | sp = &rp->rg_segmap[vs]; |
3959 | | | 3969 | |
3960 | /* | | 3970 | /* |
3961 | * Copy page tables from CPU #0, then modify entry for CPUINFO_VA | | 3971 | * Copy page tables from CPU #0, then modify entry for CPUINFO_VA |
3962 | * so that it points at the per-CPU pages. | | 3972 | * so that it points at the per-CPU pages. |
3963 | */ | | 3973 | */ |
3964 | qcopy(pmap_kernel()->pm_reg_ptps[0], regtable, | | 3974 | qcopy(pmap_kernel()->pm_reg_ptps[0], regtable, |
3965 | SRMMU_L1SIZE * sizeof(int)); | | 3975 | SRMMU_L1SIZE * sizeof(int)); |
3966 | qcopy(rp->rg_seg_ptps, segtable, SRMMU_L2SIZE * sizeof(int)); | | 3976 | qcopy(rp->rg_seg_ptps, segtable, SRMMU_L2SIZE * sizeof(int)); |
3967 | qcopy(sp->sg_pte, pagtable, SRMMU_L3SIZE * sizeof(int)); | | 3977 | qcopy(sp->sg_pte, pagtable, SRMMU_L3SIZE * sizeof(int)); |
3968 | | | 3978 | |
3969 | setpgt4m(&ctxtable[0], | | 3979 | setpgt4m(&ctxtable[0], |
3970 | ((u_long)regtable_pa >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD); | | 3980 | ((u_long)regtable_pa >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD); |
3971 | setpgt4m(®table[vr], | | 3981 | setpgt4m(®table[vr], |
3972 | ((u_long)segtable_pa >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD); | | 3982 | ((u_long)segtable_pa >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD); |
3973 | setpgt4m(&segtable[vs], | | 3983 | setpgt4m(&segtable[vs], |
3974 | ((u_long)pagtable_pa >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD); | | 3984 | ((u_long)pagtable_pa >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD); |
3975 | setpgt4m(&pagtable[vpg], | | 3985 | setpgt4m(&pagtable[vpg], |
3976 | (VA2PA((void *)sc) >> SRMMU_PPNPASHIFT) | | | 3986 | (VA2PA((void *)sc) >> SRMMU_PPNPASHIFT) | |
3977 | (SRMMU_TEPTE | PPROT_N_RWX | SRMMU_PG_C)); | | 3987 | (SRMMU_TEPTE | PPROT_N_RWX | SRMMU_PG_C)); |
3978 | | | 3988 | |
3979 | /* Install this CPU's context table */ | | 3989 | /* Install this CPU's context table */ |
3980 | sc->ctx_tbl = ctxtable; | | 3990 | sc->ctx_tbl = ctxtable; |
3981 | sc->ctx_tbl_pa = (paddr_t)ctxtable_pa; | | 3991 | sc->ctx_tbl_pa = (paddr_t)ctxtable_pa; |
3982 | | | 3992 | |
3983 | /* Pre-compute this CPU's vpage[] PTEs */ | | 3993 | /* Pre-compute this CPU's vpage[] PTEs */ |
3984 | for (i = 0; i < 2; i++) { | | 3994 | for (i = 0; i < 2; i++) { |
3985 | rp = &pmap_kernel()->pm_regmap[VA_VREG(sc->vpage[i])]; | | 3995 | rp = &pmap_kernel()->pm_regmap[VA_VREG(sc->vpage[i])]; |
3986 | sp = &rp->rg_segmap[VA_VSEG(sc->vpage[i])]; | | 3996 | sp = &rp->rg_segmap[VA_VSEG(sc->vpage[i])]; |
3987 | sc->vpage_pte[i] = &sp->sg_pte[VA_SUN4M_VPG(sc->vpage[i])]; | | 3997 | sc->vpage_pte[i] = &sp->sg_pte[VA_SUN4M_VPG(sc->vpage[i])]; |
3988 | } | | 3998 | } |
3989 | #endif /* SUN4M || SUN4D */ | | 3999 | #endif /* SUN4M || SUN4D */ |
3990 | } | | 4000 | } |
3991 | #endif /* MULTIPROCESSOR */ | | 4001 | #endif /* MULTIPROCESSOR */ |
3992 | | | 4002 | |
3993 | | | 4003 | |
3994 | void | | 4004 | void |
3995 | pmap_init(void) | | 4005 | pmap_init(void) |
3996 | { | | 4006 | { |
3997 | u_int sz; | | 4007 | u_int sz; |
3998 | | | 4008 | |
3999 | if (PAGE_SIZE != NBPG) | | 4009 | if (PAGE_SIZE != NBPG) |
4000 | panic("pmap_init: PAGE_SIZE!=NBPG"); | | 4010 | panic("pmap_init: PAGE_SIZE!=NBPG"); |
4001 | | | 4011 | |
4002 | vm_num_phys = vm_last_phys - vm_first_phys; | | 4012 | vm_num_phys = vm_last_phys - vm_first_phys; |
4003 | | | 4013 | |
4004 | /* Setup a pool for additional pvlist structures */ | | 4014 | /* Setup a pool for additional pvlist structures */ |
4005 | pool_init(&pv_pool, sizeof(struct pvlist), 0, 0, 0, "pvtable", NULL, | | 4015 | pool_init(&pv_pool, sizeof(struct pvlist), 0, 0, 0, "pvtable", NULL, |
4006 | IPL_NONE); | | 4016 | IPL_NONE); |
4007 | | | 4017 | |
4008 | /* | | 4018 | /* |
4009 | * Setup a pool for pmap structures. | | 4019 | * Setup a pool for pmap structures. |
4010 | * The pool size includes space for an array of per-CPU | | 4020 | * The pool size includes space for an array of per-CPU |
4011 | * region table pointers & physical addresses | | 4021 | * region table pointers & physical addresses |
4012 | */ | | 4022 | */ |
4013 | sz = ALIGN(sizeof(struct pmap)) + | | 4023 | sz = ALIGN(sizeof(struct pmap)) + |
4014 | ALIGN(NUREG * sizeof(struct regmap)) + | | 4024 | ALIGN(NUREG * sizeof(struct regmap)) + |
4015 | sparc_ncpus * sizeof(int *) + /* pm_reg_ptps */ | | 4025 | sparc_ncpus * sizeof(int *) + /* pm_reg_ptps */ |
4016 | sparc_ncpus * sizeof(int); /* pm_reg_ptps_pa */ | | 4026 | sparc_ncpus * sizeof(int); /* pm_reg_ptps_pa */ |
4017 | pool_cache_bootstrap(&pmap_cache, sz, 0, 0, 0, "pmappl", NULL, | | 4027 | pool_cache_bootstrap(&pmap_cache, sz, 0, 0, 0, "pmappl", NULL, |
4018 | IPL_NONE, pmap_pmap_pool_ctor, pmap_pmap_pool_dtor, NULL); | | 4028 | IPL_NONE, pmap_pmap_pool_ctor, pmap_pmap_pool_dtor, NULL); |
4019 | | | 4029 | |
4020 | sz = NSEGRG * sizeof (struct segmap); | | 4030 | sz = NSEGRG * sizeof (struct segmap); |
4021 | pool_init(&segmap_pool, sz, 0, 0, 0, "segmap", NULL, IPL_NONE); | | 4031 | pool_init(&segmap_pool, sz, 0, 0, 0, "segmap", NULL, IPL_NONE); |
4022 | | | 4032 | |
4023 | #if defined(SUN4M) || defined(SUN4D) | | 4033 | #if defined(SUN4M) || defined(SUN4D) |
4024 | if (CPU_HAS_SRMMU) { | | 4034 | if (CPU_HAS_SRMMU) { |
4025 | /* | | 4035 | /* |
4026 | * The SRMMU only ever needs chunks in one of two sizes: | | 4036 | * The SRMMU only ever needs chunks in one of two sizes: |
4027 | * 1024 (for region level tables) and 256 (for segment | | 4037 | * 1024 (for region level tables) and 256 (for segment |
4028 | * and page level tables). | | 4038 | * and page level tables). |
4029 | */ | | 4039 | */ |
4030 | sz = SRMMU_L1SIZE * sizeof(int); | | 4040 | sz = SRMMU_L1SIZE * sizeof(int); |
4031 | pool_init(&L1_pool, sz, sz, 0, 0, "L1 pagetable", | | 4041 | pool_init(&L1_pool, sz, sz, 0, 0, "L1 pagetable", |
4032 | &pgt_page_allocator, IPL_NONE); | | 4042 | &pgt_page_allocator, IPL_NONE); |
4033 | | | 4043 | |
4034 | sz = SRMMU_L2SIZE * sizeof(int); | | 4044 | sz = SRMMU_L2SIZE * sizeof(int); |
4035 | pool_init(&L23_pool, sz, sz, 0, 0, "L2/L3 pagetable", | | 4045 | pool_init(&L23_pool, sz, sz, 0, 0, "L2/L3 pagetable", |
4036 | &pgt_page_allocator, IPL_NONE); | | 4046 | &pgt_page_allocator, IPL_NONE); |
4037 | } | | 4047 | } |
4038 | #endif /* SUN4M || SUN4D */ | | 4048 | #endif /* SUN4M || SUN4D */ |
4039 | #if defined(SUN4) || defined(SUN4C) | | 4049 | #if defined(SUN4) || defined(SUN4C) |
4040 | if (CPU_HAS_SUNMMU) { | | 4050 | if (CPU_HAS_SUNMMU) { |
4041 | sz = NPTESG * sizeof(int); | | 4051 | sz = NPTESG * sizeof(int); |
4042 | pool_init(&pte_pool, sz, 0, 0, 0, "ptemap", NULL, | | 4052 | pool_init(&pte_pool, sz, 0, 0, 0, "ptemap", NULL, |
4043 | IPL_NONE); | | 4053 | IPL_NONE); |
4044 | } | | 4054 | } |
4045 | #endif /* SUN4 || SUN4C */ | | 4055 | #endif /* SUN4 || SUN4C */ |
4046 | } | | 4056 | } |
4047 | | | 4057 | |
4048 | | | 4058 | |
4049 | /* | | 4059 | /* |
4050 | * Map physical addresses into kernel VM. | | 4060 | * Map physical addresses into kernel VM. |
4051 | */ | | 4061 | */ |
4052 | vaddr_t | | 4062 | vaddr_t |
4053 | pmap_map(vaddr_t va, paddr_t pa, paddr_t endpa, int prot) | | 4063 | pmap_map(vaddr_t va, paddr_t pa, paddr_t endpa, int prot) |
4054 | { | | 4064 | { |
4055 | int pgsize = PAGE_SIZE; | | 4065 | int pgsize = PAGE_SIZE; |
4056 | | | 4066 | |
4057 | while (pa < endpa) { | | 4067 | while (pa < endpa) { |
4058 | pmap_kenter_pa(va, pa, prot); | | 4068 | pmap_kenter_pa(va, pa, prot); |
4059 | va += pgsize; | | 4069 | va += pgsize; |
4060 | pa += pgsize; | | 4070 | pa += pgsize; |
4061 | } | | 4071 | } |
4062 | pmap_update(pmap_kernel()); | | 4072 | pmap_update(pmap_kernel()); |
4063 | return (va); | | 4073 | return (va); |
4064 | } | | 4074 | } |
4065 | | | 4075 | |
4066 | #ifdef DEBUG | | 4076 | #ifdef DEBUG |
4067 | /* | | 4077 | /* |
4068 | * Check a pmap for spuriously lingering mappings | | 4078 | * Check a pmap for spuriously lingering mappings |
4069 | */ | | 4079 | */ |
4070 | static inline void | | 4080 | static inline void |
4071 | pmap_quiet_check(struct pmap *pm) | | 4081 | pmap_quiet_check(struct pmap *pm) |
4072 | { | | 4082 | { |
4073 | int vs, vr; | | 4083 | int vs, vr; |
4074 | | | 4084 | |
4075 | if (CPU_HAS_SUNMMU) { | | 4085 | if (CPU_HAS_SUNMMU) { |
4076 | #if defined(SUN4_MMU3L) | | 4086 | #if defined(SUN4_MMU3L) |
4077 | if (TAILQ_FIRST(&pm->pm_reglist)) | | 4087 | if (TAILQ_FIRST(&pm->pm_reglist)) |
4078 | panic("pmap_destroy: region list not empty"); | | 4088 | panic("pmap_destroy: region list not empty"); |
4079 | #endif | | 4089 | #endif |
4080 | if (TAILQ_FIRST(&pm->pm_seglist)) | | 4090 | if (TAILQ_FIRST(&pm->pm_seglist)) |
4081 | panic("pmap_destroy: segment list not empty"); | | 4091 | panic("pmap_destroy: segment list not empty"); |
4082 | } | | 4092 | } |
4083 | | | 4093 | |
4084 | for (vr = 0; vr < NUREG; vr++) { | | 4094 | for (vr = 0; vr < NUREG; vr++) { |
4085 | struct regmap *rp = &pm->pm_regmap[vr]; | | 4095 | struct regmap *rp = &pm->pm_regmap[vr]; |
4086 | | | 4096 | |
4087 | if (HASSUN4_MMU3L) { | | 4097 | if (HASSUN4_MMU3L) { |
4088 | if (rp->rg_smeg != reginval) | | 4098 | if (rp->rg_smeg != reginval) |
4089 | printf("pmap_chk: spurious smeg in " | | 4099 | printf("pmap_chk: spurious smeg in " |
4090 | "user region %d\n", vr); | | 4100 | "user region %d\n", vr); |
4091 | } | | 4101 | } |
4092 | if (CPU_HAS_SRMMU) { | | 4102 | if (CPU_HAS_SRMMU) { |
4093 | int n; | | 4103 | int n; |
4094 | #if defined(MULTIPROCESSOR) | | 4104 | #if defined(MULTIPROCESSOR) |
4095 | for (n = 0; n < sparc_ncpus; n++) | | 4105 | for (n = 0; n < sparc_ncpus; n++) |
4096 | #else | | 4106 | #else |
4097 | n = 0; | | 4107 | n = 0; |
4098 | #endif | | 4108 | #endif |
4099 | { | | 4109 | { |
4100 | if (pm->pm_reg_ptps[n][vr] != SRMMU_TEINVALID) | | 4110 | if (pm->pm_reg_ptps[n][vr] != SRMMU_TEINVALID) |
4101 | printf("pmap_chk: spurious PTP in user " | | 4111 | printf("pmap_chk: spurious PTP in user " |
4102 | "region %d on CPU %d\n", vr, n); | | 4112 | "region %d on CPU %d\n", vr, n); |
4103 | } | | 4113 | } |
4104 | } | | 4114 | } |
4105 | if (rp->rg_nsegmap != 0) | | 4115 | if (rp->rg_nsegmap != 0) |
4106 | printf("pmap_chk: %d segments remain in " | | 4116 | printf("pmap_chk: %d segments remain in " |
4107 | "region %d\n", rp->rg_nsegmap, vr); | | 4117 | "region %d\n", rp->rg_nsegmap, vr); |
4108 | if (rp->rg_segmap != NULL) { | | 4118 | if (rp->rg_segmap != NULL) { |
4109 | printf("pmap_chk: segments still " | | 4119 | printf("pmap_chk: segments still " |
4110 | "allocated in region %d\n", vr); | | 4120 | "allocated in region %d\n", vr); |
4111 | for (vs = 0; vs < NSEGRG; vs++) { | | 4121 | for (vs = 0; vs < NSEGRG; vs++) { |
4112 | struct segmap *sp = &rp->rg_segmap[vs]; | | 4122 | struct segmap *sp = &rp->rg_segmap[vs]; |
4113 | if (sp->sg_npte != 0) | | 4123 | if (sp->sg_npte != 0) |
4114 | printf("pmap_chk: %d ptes " | | 4124 | printf("pmap_chk: %d ptes " |
4115 | "remain in segment %d\n", | | 4125 | "remain in segment %d\n", |
4116 | sp->sg_npte, vs); | | 4126 | sp->sg_npte, vs); |
4117 | if (sp->sg_pte != NULL) { | | 4127 | if (sp->sg_pte != NULL) { |
4118 | printf("pmap_chk: ptes still " | | 4128 | printf("pmap_chk: ptes still " |
4119 | "allocated in segment %d\n", vs); | | 4129 | "allocated in segment %d\n", vs); |
4120 | } | | 4130 | } |
4121 | if (CPU_HAS_SUNMMU) { | | 4131 | if (CPU_HAS_SUNMMU) { |
4122 | if (sp->sg_pmeg != seginval) | | 4132 | if (sp->sg_pmeg != seginval) |
4123 | printf("pmap_chk: pm %p(%d,%d) " | | 4133 | printf("pmap_chk: pm %p(%d,%d) " |
4124 | "spurious soft pmeg %d\n", | | 4134 | "spurious soft pmeg %d\n", |
4125 | pm, vr, vs, sp->sg_pmeg); | | 4135 | pm, vr, vs, sp->sg_pmeg); |
4126 | } | | 4136 | } |
4127 | } | | 4137 | } |
4128 | } | | 4138 | } |
4129 | | | 4139 | |
4130 | /* Check for spurious pmeg entries in the MMU */ | | 4140 | /* Check for spurious pmeg entries in the MMU */ |
4131 | if (pm->pm_ctx == NULL) | | 4141 | if (pm->pm_ctx == NULL) |
4132 | continue; | | 4142 | continue; |
4133 | if (CPU_HAS_SUNMMU) { | | 4143 | if (CPU_HAS_SUNMMU) { |
4134 | int ctx; | | 4144 | int ctx; |
4135 | if (mmu_has_hole && (vr >= 32 || vr < (256 - 32))) | | 4145 | if (mmu_has_hole && (vr >= 32 || vr < (256 - 32))) |
4136 | continue; | | 4146 | continue; |
4137 | ctx = getcontext4(); | | 4147 | ctx = getcontext4(); |
4138 | setcontext4(pm->pm_ctxnum); | | 4148 | setcontext4(pm->pm_ctxnum); |
4139 | for (vs = 0; vs < NSEGRG; vs++) { | | 4149 | for (vs = 0; vs < NSEGRG; vs++) { |
4140 | vaddr_t va = VSTOVA(vr,vs); | | 4150 | vaddr_t va = VSTOVA(vr,vs); |
4141 | int pmeg = getsegmap(va); | | 4151 | int pmeg = getsegmap(va); |
4142 | if (pmeg != seginval) | | 4152 | if (pmeg != seginval) |
4143 | printf("pmap_chk: pm %p(%d,%d:%x): " | | 4153 | printf("pmap_chk: pm %p(%d,%d:%x): " |
4144 | "spurious pmeg %d\n", | | 4154 | "spurious pmeg %d\n", |
4145 | pm, vr, vs, (u_int)va, pmeg); | | 4155 | pm, vr, vs, (u_int)va, pmeg); |
4146 | } | | 4156 | } |
4147 | setcontext4(ctx); | | 4157 | setcontext4(ctx); |
4148 | } | | 4158 | } |
4149 | } | | 4159 | } |
4150 | if (pm->pm_stats.resident_count) { | | 4160 | if (pm->pm_stats.resident_count) { |
4151 | printf("pmap_chk: res count %ld\n", | | 4161 | printf("pmap_chk: res count %ld\n", |
4152 | pm->pm_stats.resident_count); | | 4162 | pm->pm_stats.resident_count); |
4153 | } | | 4163 | } |
4154 | if (pm->pm_stats.wired_count) { | | 4164 | if (pm->pm_stats.wired_count) { |
4155 | printf("pmap_chk: wired count %ld\n", | | 4165 | printf("pmap_chk: wired count %ld\n", |
4156 | pm->pm_stats.wired_count); | | 4166 | pm->pm_stats.wired_count); |
4157 | } | | 4167 | } |
4158 | } | | 4168 | } |
4159 | #endif /* DEBUG */ | | 4169 | #endif /* DEBUG */ |
4160 | | | 4170 | |
4161 | int | | 4171 | int |
4162 | pmap_pmap_pool_ctor(void *arg, void *object, int flags) | | 4172 | pmap_pmap_pool_ctor(void *arg, void *object, int flags) |
4163 | { | | 4173 | { |
4164 | struct pmap *pm = object; | | 4174 | struct pmap *pm = object; |
4165 | u_long addr; | | 4175 | u_long addr; |
4166 | | | 4176 | |
4167 | memset(pm, 0, sizeof *pm); | | 4177 | memset(pm, 0, sizeof *pm); |
4168 | | | 4178 | |
4169 | /* | | 4179 | /* |
4170 | * `pmap_pool' entries include space for the per-CPU | | 4180 | * `pmap_pool' entries include space for the per-CPU |
4171 | * region table pointer arrays. | | 4181 | * region table pointer arrays. |
4172 | */ | | 4182 | */ |
4173 | addr = (u_long)pm + ALIGN(sizeof(struct pmap)); | | 4183 | addr = (u_long)pm + ALIGN(sizeof(struct pmap)); |
4174 | pm->pm_regmap = (void *)addr; | | 4184 | pm->pm_regmap = (void *)addr; |
4175 | addr += ALIGN(NUREG * sizeof(struct regmap)); | | 4185 | addr += ALIGN(NUREG * sizeof(struct regmap)); |
4176 | pm->pm_reg_ptps = (int **)addr; | | 4186 | pm->pm_reg_ptps = (int **)addr; |
4177 | addr += sparc_ncpus * sizeof(int *); | | 4187 | addr += sparc_ncpus * sizeof(int *); |
4178 | pm->pm_reg_ptps_pa = (int *)addr; | | 4188 | pm->pm_reg_ptps_pa = (int *)addr; |
4179 | | | 4189 | |
4180 | qzero((void *)pm->pm_regmap, NUREG * sizeof(struct regmap)); | | 4190 | qzero((void *)pm->pm_regmap, NUREG * sizeof(struct regmap)); |
4181 | | | 4191 | |
4182 | /* pm->pm_ctx = NULL; // already done */ | | 4192 | /* pm->pm_ctx = NULL; // already done */ |
4183 | | | 4193 | |
4184 | if (CPU_HAS_SUNMMU) { | | 4194 | if (CPU_HAS_SUNMMU) { |
4185 | TAILQ_INIT(&pm->pm_seglist); | | 4195 | TAILQ_INIT(&pm->pm_seglist); |
4186 | #if defined(SUN4_MMU3L) | | 4196 | #if defined(SUN4_MMU3L) |
4187 | TAILQ_INIT(&pm->pm_reglist); | | 4197 | TAILQ_INIT(&pm->pm_reglist); |
4188 | if (HASSUN4_MMU3L) { | | 4198 | if (HASSUN4_MMU3L) { |
4189 | int i; | | 4199 | int i; |
4190 | for (i = NUREG; --i >= 0;) | | 4200 | for (i = NUREG; --i >= 0;) |
4191 | pm->pm_regmap[i].rg_smeg = reginval; | | 4201 | pm->pm_regmap[i].rg_smeg = reginval; |
4192 | } | | 4202 | } |
4193 | #endif | | 4203 | #endif |
4194 | } | | 4204 | } |
4195 | #if defined(SUN4M) || defined(SUN4D) | | 4205 | #if defined(SUN4M) || defined(SUN4D) |
4196 | else { | | 4206 | else { |
4197 | int i, n; | | 4207 | int i, n; |
4198 | | | 4208 | |
4199 | /* | | 4209 | /* |
4200 | * We must allocate and initialize hardware-readable (MMU) | | 4210 | * We must allocate and initialize hardware-readable (MMU) |
4201 | * pagetables. We must also map the kernel regions into this | | 4211 | * pagetables. We must also map the kernel regions into this |
4202 | * pmap's pagetables, so that we can access the kernel from | | 4212 | * pmap's pagetables, so that we can access the kernel from |
4203 | * this user context. | | 4213 | * this user context. |
4204 | */ | | 4214 | */ |
4205 | #if defined(MULTIPROCESSOR) | | 4215 | #if defined(MULTIPROCESSOR) |
4206 | for (n = 0; n < sparc_ncpus; n++) | | 4216 | for (n = 0; n < sparc_ncpus; n++) |
4207 | #else | | 4217 | #else |
4208 | n = 0; | | 4218 | n = 0; |
4209 | #endif | | 4219 | #endif |
4210 | { | | 4220 | { |
4211 | int *upt, *kpt; | | 4221 | int *upt, *kpt; |
4212 | | | 4222 | |
4213 | upt = pool_get(&L1_pool, flags); | | 4223 | upt = pool_get(&L1_pool, flags); |
4214 | pm->pm_reg_ptps[n] = upt; | | 4224 | pm->pm_reg_ptps[n] = upt; |
4215 | pm->pm_reg_ptps_pa[n] = VA2PA((char *)upt); | | 4225 | pm->pm_reg_ptps_pa[n] = VA2PA((char *)upt); |
4216 | | | 4226 | |
4217 | /* Invalidate user space regions */ | | 4227 | /* Invalidate user space regions */ |
4218 | for (i = 0; i < NUREG; i++) | | 4228 | for (i = 0; i < NUREG; i++) |
4219 | setpgt4m(upt++, SRMMU_TEINVALID); | | 4229 | setpgt4m(upt++, SRMMU_TEINVALID); |
4220 | | | 4230 | |
4221 | /* Copy kernel regions */ | | 4231 | /* Copy kernel regions */ |
4222 | kpt = &pmap_kernel()->pm_reg_ptps[n][VA_VREG(KERNBASE)]; | | 4232 | kpt = &pmap_kernel()->pm_reg_ptps[n][VA_VREG(KERNBASE)]; |
4223 | for (i = 0; i < NKREG; i++) | | 4233 | for (i = 0; i < NKREG; i++) |
4224 | setpgt4m(upt++, kpt[i]); | | 4234 | setpgt4m(upt++, kpt[i]); |
4225 | } | | 4235 | } |
4226 | } | | 4236 | } |
4227 | #endif /* SUN4M || SUN4D */ | | 4237 | #endif /* SUN4M || SUN4D */ |
4228 | | | 4238 | |
4229 | /* XXX - a peculiar place to do this, but we can't do it in pmap_init | | 4239 | /* XXX - a peculiar place to do this, but we can't do it in pmap_init |
4230 | * and here at least it's off the beaten code track. | | 4240 | * and here at least it's off the beaten code track. |
4231 | */ | | 4241 | */ |
4232 | {static int x; if (x == 0) pool_setlowat(&pv_pool, 512), x = 1; } | | 4242 | {static int x; if (x == 0) pool_setlowat(&pv_pool, 512), x = 1; } |
4233 | | | 4243 | |
4234 | return (0); | | 4244 | return (0); |
4235 | } | | 4245 | } |
4236 | | | 4246 | |
4237 | void | | 4247 | void |
4238 | pmap_pmap_pool_dtor(void *arg, void *object) | | 4248 | pmap_pmap_pool_dtor(void *arg, void *object) |
4239 | { | | 4249 | { |
4240 | struct pmap *pm = object; | | 4250 | struct pmap *pm = object; |
4241 | union ctxinfo *c; | | 4251 | union ctxinfo *c; |
4242 | int s = splvm(); /* paranoia */ | | 4252 | int s = splvm(); /* paranoia */ |
4243 | | | 4253 | |
4244 | #ifdef DEBUG | | 4254 | #ifdef DEBUG |
4245 | if (pmapdebug & PDB_DESTROY) | | 4255 | if (pmapdebug & PDB_DESTROY) |
4246 | printf("pmap_pmap_pool_dtor(%p)\n", pm); | | 4256 | printf("pmap_pmap_pool_dtor(%p)\n", pm); |
4247 | #endif | | 4257 | #endif |
4248 | | | 4258 | |
4249 | if ((c = pm->pm_ctx) != NULL) { | | 4259 | if ((c = pm->pm_ctx) != NULL) { |
4250 | ctx_free(pm); | | 4260 | ctx_free(pm); |
4251 | } | | 4261 | } |
4252 | | | 4262 | |
4253 | #if defined(SUN4M) || defined(SUN4D) | | 4263 | #if defined(SUN4M) || defined(SUN4D) |
4254 | if (CPU_HAS_SRMMU) { | | 4264 | if (CPU_HAS_SRMMU) { |
4255 | int n; | | 4265 | int n; |
4256 | | | 4266 | |
4257 | #if defined(MULTIPROCESSOR) | | 4267 | #if defined(MULTIPROCESSOR) |
4258 | for (n = 0; n < sparc_ncpus; n++) | | 4268 | for (n = 0; n < sparc_ncpus; n++) |
4259 | #else | | 4269 | #else |
4260 | n = 0; | | 4270 | n = 0; |
4261 | #endif | | 4271 | #endif |
4262 | { | | 4272 | { |
4263 | int *pt = pm->pm_reg_ptps[n]; | | 4273 | int *pt = pm->pm_reg_ptps[n]; |
4264 | pm->pm_reg_ptps[n] = NULL; | | 4274 | pm->pm_reg_ptps[n] = NULL; |
4265 | pm->pm_reg_ptps_pa[n] = 0; | | 4275 | pm->pm_reg_ptps_pa[n] = 0; |
4266 | pool_put(&L1_pool, pt); | | 4276 | pool_put(&L1_pool, pt); |
4267 | } | | 4277 | } |
4268 | } | | 4278 | } |
4269 | #endif /* SUN4M || SUN4D */ | | 4279 | #endif /* SUN4M || SUN4D */ |
4270 | splx(s); | | 4280 | splx(s); |
4271 | } | | 4281 | } |
4272 | | | 4282 | |
4273 | /* | | 4283 | /* |
4274 | * Create and return a physical map. | | 4284 | * Create and return a physical map. |
4275 | */ | | 4285 | */ |
4276 | struct pmap * | | 4286 | struct pmap * |
4277 | pmap_create(void) | | 4287 | pmap_create(void) |
4278 | { | | 4288 | { |
4279 | struct pmap *pm; | | 4289 | struct pmap *pm; |
4280 | | | 4290 | |
4281 | pm = pool_cache_get(&pmap_cache, PR_WAITOK); | | 4291 | pm = pool_cache_get(&pmap_cache, PR_WAITOK); |
4282 | | | 4292 | |
4283 | /* | | 4293 | /* |
4284 | * Reset fields that are not preserved in the pmap cache pool. | | 4294 | * Reset fields that are not preserved in the pmap cache pool. |
4285 | */ | | 4295 | */ |
4286 | pm->pm_refcount = 1; | | 4296 | pm->pm_refcount = 1; |
4287 | #if defined(MULTIPROCESSOR) | | 4297 | #if defined(MULTIPROCESSOR) |
4288 | /* reset active CPU set */ | | 4298 | /* reset active CPU set */ |
4289 | pm->pm_cpuset = 0; | | 4299 | pm->pm_cpuset = 0; |
4290 | #endif | | 4300 | #endif |
4291 | if (CPU_HAS_SUNMMU) { | | 4301 | if (CPU_HAS_SUNMMU) { |
4292 | /* reset the region gap */ | | 4302 | /* reset the region gap */ |
4293 | pm->pm_gap_start = 0; | | 4303 | pm->pm_gap_start = 0; |
4294 | pm->pm_gap_end = VA_VREG(VM_MAXUSER_ADDRESS); | | 4304 | pm->pm_gap_end = VA_VREG(VM_MAXUSER_ADDRESS); |
4295 | } | | 4305 | } |
4296 | | | 4306 | |
4297 | #ifdef DEBUG | | 4307 | #ifdef DEBUG |
4298 | if (pmapdebug & PDB_CREATE) | | 4308 | if (pmapdebug & PDB_CREATE) |
4299 | printf("pmap_create[%d]: created %p\n", cpu_number(), pm); | | 4309 | printf("pmap_create[%d]: created %p\n", cpu_number(), pm); |
4300 | pmap_quiet_check(pm); | | 4310 | pmap_quiet_check(pm); |
4301 | #endif | | 4311 | #endif |
4302 | return (pm); | | 4312 | return (pm); |
4303 | } | | 4313 | } |
4304 | | | 4314 | |
4305 | /* | | 4315 | /* |
4306 | * Retire the given pmap from service. | | 4316 | * Retire the given pmap from service. |
4307 | * Should only be called if the map contains no valid mappings. | | 4317 | * Should only be called if the map contains no valid mappings. |
4308 | */ | | 4318 | */ |
4309 | void | | 4319 | void |
4310 | pmap_destroy(struct pmap *pm) | | 4320 | pmap_destroy(struct pmap *pm) |
4311 | { | | 4321 | { |
4312 | | | 4322 | |
4313 | #ifdef DEBUG | | 4323 | #ifdef DEBUG |
4314 | if (pmapdebug & PDB_DESTROY) | | 4324 | if (pmapdebug & PDB_DESTROY) |
4315 | printf("pmap_destroy[%d](%p)\n", cpu_number(), pm); | | 4325 | printf("pmap_destroy[%d](%p)\n", cpu_number(), pm); |
4316 | #endif | | 4326 | #endif |
4317 | if (atomic_dec_uint_nv(&pm->pm_refcount) == 0) { | | 4327 | if (atomic_dec_uint_nv(&pm->pm_refcount) == 0) { |
4318 | #ifdef DEBUG | | 4328 | #ifdef DEBUG |
4319 | pmap_quiet_check(pm); | | 4329 | pmap_quiet_check(pm); |
4320 | #endif | | 4330 | #endif |
4321 | pool_cache_put(&pmap_cache, pm); | | 4331 | pool_cache_put(&pmap_cache, pm); |
4322 | } | | 4332 | } |
4323 | } | | 4333 | } |
4324 | | | 4334 | |
4325 | /* | | 4335 | /* |
4326 | * Add a reference to the given pmap. | | 4336 | * Add a reference to the given pmap. |
4327 | */ | | 4337 | */ |
4328 | void | | 4338 | void |
4329 | pmap_reference(struct pmap *pm) | | 4339 | pmap_reference(struct pmap *pm) |
4330 | { | | 4340 | { |
4331 | | | 4341 | |
4332 | atomic_inc_uint(&pm->pm_refcount); | | 4342 | atomic_inc_uint(&pm->pm_refcount); |
4333 | } | | 4343 | } |
4334 | | | 4344 | |
4335 | #if defined(SUN4) || defined(SUN4C) | | 4345 | #if defined(SUN4) || defined(SUN4C) |
4336 | /* | | 4346 | /* |
4337 | * helper to deallocate level 2 & 3 page tables. | | 4347 | * helper to deallocate level 2 & 3 page tables. |
4338 | */ | | 4348 | */ |
4339 | static void | | 4349 | static void |
4340 | pgt_lvl23_remove4_4c(struct pmap *pm, struct regmap *rp, struct segmap *sp, | | 4350 | pgt_lvl23_remove4_4c(struct pmap *pm, struct regmap *rp, struct segmap *sp, |
4341 | int vr, int vs) | | 4351 | int vr, int vs) |
4342 | { | | 4352 | { |
4343 | vaddr_t va, tva; | | 4353 | vaddr_t va, tva; |
4344 | int i, pmeg; | | 4354 | int i, pmeg; |
4345 | | | 4355 | |
4346 | va = VSTOVA(vr,vs); | | 4356 | va = VSTOVA(vr,vs); |
4347 | if ((pmeg = sp->sg_pmeg) != seginval) { | | 4357 | if ((pmeg = sp->sg_pmeg) != seginval) { |
4348 | if (CTX_USABLE(pm,rp)) { | | 4358 | if (CTX_USABLE(pm,rp)) { |
4349 | setcontext4(pm->pm_ctxnum); | | 4359 | setcontext4(pm->pm_ctxnum); |
4350 | setsegmap(va, seginval); | | 4360 | setsegmap(va, seginval); |
4351 | } else { | | 4361 | } else { |
4352 | /* no context, use context 0 */ | | 4362 | /* no context, use context 0 */ |
4353 | setcontext4(0); | | 4363 | setcontext4(0); |
4354 | if (HASSUN4_MMU3L && rp->rg_smeg != reginval) { | | 4364 | if (HASSUN4_MMU3L && rp->rg_smeg != reginval) { |
4355 | setregmap(0, rp->rg_smeg); | | 4365 | setregmap(0, rp->rg_smeg); |
4356 | tva = vs << SGSHIFT; | | 4366 | tva = vs << SGSHIFT; |
4357 | setsegmap(tva, seginval); | | 4367 | setsegmap(tva, seginval); |
4358 | } | | 4368 | } |
4359 | } | | 4369 | } |
4360 | if (!HASSUN4_MMU3L) { | | 4370 | if (!HASSUN4_MMU3L) { |
4361 | if (pm == pmap_kernel()) { | | 4371 | if (pm == pmap_kernel()) { |
4362 | /* Unmap segment from all contexts */ | | 4372 | /* Unmap segment from all contexts */ |
4363 | for (i = ncontext; --i >= 0;) { | | 4373 | for (i = ncontext; --i >= 0;) { |
4364 | setcontext4(i); | | 4374 | setcontext4(i); |
4365 | setsegmap(va, seginval); | | 4375 | setsegmap(va, seginval); |
4366 | } | | 4376 | } |
4367 | } | | 4377 | } |
4368 | } | | 4378 | } |
4369 | me_free(pm, pmeg); | | 4379 | me_free(pm, pmeg); |
4370 | sp->sg_pmeg = seginval; | | 4380 | sp->sg_pmeg = seginval; |
4371 | } | | 4381 | } |
4372 | /* Free software tables for non-kernel maps */ | | 4382 | /* Free software tables for non-kernel maps */ |
4373 | if (pm != pmap_kernel()) { | | 4383 | if (pm != pmap_kernel()) { |
4374 | pool_put(&pte_pool, sp->sg_pte); | | 4384 | pool_put(&pte_pool, sp->sg_pte); |
4375 | sp->sg_pte = NULL; | | 4385 | sp->sg_pte = NULL; |
4376 | } | | 4386 | } |
4377 | | | 4387 | |
4378 | if (rp->rg_nsegmap <= 0) | | 4388 | if (rp->rg_nsegmap <= 0) |
4379 | panic("pgt_rm: pm %p: nsegmap = %d\n", pm, rp->rg_nsegmap); | | 4389 | panic("pgt_rm: pm %p: nsegmap = %d\n", pm, rp->rg_nsegmap); |
4380 | | | 4390 | |
4381 | if (--rp->rg_nsegmap == 0) { | | 4391 | if (--rp->rg_nsegmap == 0) { |
4382 | #if defined(SUN4_MMU3L) | | 4392 | #if defined(SUN4_MMU3L) |
4383 | if (HASSUN4_MMU3L) { | | 4393 | if (HASSUN4_MMU3L) { |
4384 | if (rp->rg_smeg != reginval) { | | 4394 | if (rp->rg_smeg != reginval) { |
4385 | if (pm == pmap_kernel()) { | | 4395 | if (pm == pmap_kernel()) { |
4386 | /* Unmap from all contexts */ | | 4396 | /* Unmap from all contexts */ |
4387 | for (i = ncontext; --i >= 0;) { | | 4397 | for (i = ncontext; --i >= 0;) { |
4388 | setcontext4(i); | | 4398 | setcontext4(i); |
4389 | setregmap(va, reginval); | | 4399 | setregmap(va, reginval); |
4390 | } | | 4400 | } |
4391 | } else if (pm->pm_ctx) { | | 4401 | } else if (pm->pm_ctx) { |
4392 | setcontext4(pm->pm_ctxnum); | | 4402 | setcontext4(pm->pm_ctxnum); |
4393 | setregmap(va, reginval); | | 4403 | setregmap(va, reginval); |
4394 | } | | 4404 | } |
4395 | | | 4405 | |
4396 | /* Release MMU resource */ | | 4406 | /* Release MMU resource */ |
4397 | region_free(pm, rp->rg_smeg); | | 4407 | region_free(pm, rp->rg_smeg); |
4398 | rp->rg_smeg = reginval; | | 4408 | rp->rg_smeg = reginval; |
4399 | } | | 4409 | } |
4400 | } | | 4410 | } |
4401 | #endif /* SUN4_MMU3L */ | | 4411 | #endif /* SUN4_MMU3L */ |
4402 | /* Free software tables for non-kernel maps */ | | 4412 | /* Free software tables for non-kernel maps */ |
4403 | if (pm != pmap_kernel()) { | | 4413 | if (pm != pmap_kernel()) { |
4404 | GAP_WIDEN(pm,vr); | | 4414 | GAP_WIDEN(pm,vr); |
4405 | pool_put(&segmap_pool, rp->rg_segmap); | | 4415 | pool_put(&segmap_pool, rp->rg_segmap); |
4406 | rp->rg_segmap = NULL; | | 4416 | rp->rg_segmap = NULL; |
4407 | } | | 4417 | } |
4408 | } | | 4418 | } |
4409 | } | | 4419 | } |
4410 | #endif /* SUN4 || SUN4C */ | | 4420 | #endif /* SUN4 || SUN4C */ |
4411 | | | 4421 | |
4412 | #if defined(SUN4M) || defined(SUN4D) | | 4422 | #if defined(SUN4M) || defined(SUN4D) |
4413 | /* | | 4423 | /* |
4414 | * SRMMU helper to deallocate level 2 & 3 page tables. | | 4424 | * SRMMU helper to deallocate level 2 & 3 page tables. |
4415 | */ | | 4425 | */ |
4416 | static void | | 4426 | static void |
4417 | pgt_lvl23_remove4m(struct pmap *pm, struct regmap *rp, struct segmap *sp, | | 4427 | pgt_lvl23_remove4m(struct pmap *pm, struct regmap *rp, struct segmap *sp, |
4418 | int vr, int vs) | | 4428 | int vr, int vs) |
4419 | { | | 4429 | { |
4420 | | | 4430 | |
4421 | /* Invalidate level 2 PTP entry */ | | 4431 | /* Invalidate level 2 PTP entry */ |
4422 | if (pm->pm_ctx) | | 4432 | if (pm->pm_ctx) |
4423 | tlb_flush_segment(VSTOVA(vr,vs), pm->pm_ctxnum, | | 4433 | tlb_flush_segment(VSTOVA(vr,vs), pm->pm_ctxnum, |
4424 | PMAP_CPUSET(pm)); | | 4434 | PMAP_CPUSET(pm)); |
4425 | setpgt4m(&rp->rg_seg_ptps[vs], SRMMU_TEINVALID); | | 4435 | setpgt4m(&rp->rg_seg_ptps[vs], SRMMU_TEINVALID); |
4426 | pool_put(&L23_pool, sp->sg_pte); | | 4436 | pool_put(&L23_pool, sp->sg_pte); |
4427 | sp->sg_pte = NULL; | | 4437 | sp->sg_pte = NULL; |
4428 | | | 4438 | |
4429 | /* If region is now empty, remove level 2 pagetable as well */ | | 4439 | /* If region is now empty, remove level 2 pagetable as well */ |
4430 | if (--rp->rg_nsegmap == 0) { | | 4440 | if (--rp->rg_nsegmap == 0) { |
4431 | int n = 0; | | 4441 | int n = 0; |
4432 | if (pm->pm_ctx) | | 4442 | if (pm->pm_ctx) |
4433 | tlb_flush_region(VRTOVA(vr), pm->pm_ctxnum, | | 4443 | tlb_flush_region(VRTOVA(vr), pm->pm_ctxnum, |
4434 | PMAP_CPUSET(pm)); | | 4444 | PMAP_CPUSET(pm)); |
4435 | #ifdef MULTIPROCESSOR | | 4445 | #ifdef MULTIPROCESSOR |
4436 | /* Invalidate level 1 PTP entries on all CPUs */ | | 4446 | /* Invalidate level 1 PTP entries on all CPUs */ |
4437 | for (; n < sparc_ncpus; n++) { | | 4447 | for (; n < sparc_ncpus; n++) { |
4438 | if ((cpus[n]->flags & CPUFLG_HATCHED) == 0) | | 4448 | if ((cpus[n]->flags & CPUFLG_HATCHED) == 0) |
4439 | continue; | | 4449 | continue; |
4440 | #endif | | 4450 | #endif |
4441 | setpgt4m(&pm->pm_reg_ptps[n][vr], SRMMU_TEINVALID); | | 4451 | setpgt4m(&pm->pm_reg_ptps[n][vr], SRMMU_TEINVALID); |
4442 | #ifdef MULTIPROCESSOR | | 4452 | #ifdef MULTIPROCESSOR |
4443 | } | | 4453 | } |
4444 | #endif | | 4454 | #endif |
4445 | | | 4455 | |
4446 | pool_put(&segmap_pool, rp->rg_segmap); | | 4456 | pool_put(&segmap_pool, rp->rg_segmap); |
4447 | rp->rg_segmap = NULL; | | 4457 | rp->rg_segmap = NULL; |
4448 | pool_put(&L23_pool, rp->rg_seg_ptps); | | 4458 | pool_put(&L23_pool, rp->rg_seg_ptps); |
4449 | } | | 4459 | } |
4450 | } | | 4460 | } |
4451 | #endif /* SUN4M || SUN4D */ | | 4461 | #endif /* SUN4M || SUN4D */ |
4452 | | | 4462 | |
4453 | void | | 4463 | void |
4454 | pmap_remove_all(struct pmap *pm) | | 4464 | pmap_remove_all(struct pmap *pm) |
4455 | { | | 4465 | { |
4456 | if (pm->pm_ctx == NULL) | | 4466 | if (pm->pm_ctx == NULL) |
4457 | return; | | 4467 | return; |
4458 | | | 4468 | |
4459 | #if defined(SUN4) || defined(SUN4C) | | 4469 | #if defined(SUN4) || defined(SUN4C) |
4460 | if (CPU_HAS_SUNMMU) { | | 4470 | if (CPU_HAS_SUNMMU) { |
4461 | int ctx = getcontext4(); | | 4471 | int ctx = getcontext4(); |
4462 | setcontext4(pm->pm_ctxnum); | | 4472 | setcontext4(pm->pm_ctxnum); |
4463 | cache_flush_context(pm->pm_ctxnum); | | 4473 | cache_flush_context(pm->pm_ctxnum); |
4464 | setcontext4(ctx); | | 4474 | setcontext4(ctx); |
4465 | } | | 4475 | } |
4466 | #endif | | 4476 | #endif |
4467 | | | 4477 | |
4468 | #if defined(SUN4M) || defined(SUN4D) | | 4478 | #if defined(SUN4M) || defined(SUN4D) |
4469 | if (CPU_HAS_SRMMU) { | | 4479 | if (CPU_HAS_SRMMU) { |
4470 | cache_flush_context(pm->pm_ctxnum); | | 4480 | cache_flush_context(pm->pm_ctxnum); |
4471 | } | | 4481 | } |
4472 | #endif | | 4482 | #endif |
4473 | | | 4483 | |
4474 | pm->pm_flags |= PMAP_USERCACHECLEAN; | | 4484 | pm->pm_flags |= PMAP_USERCACHECLEAN; |
4475 | } | | 4485 | } |
4476 | | | 4486 | |
4477 | /* | | 4487 | /* |
4478 | * Remove the given range of mapping entries. | | 4488 | * Remove the given range of mapping entries. |
4479 | * The starting and ending addresses are already rounded to pages. | | 4489 | * The starting and ending addresses are already rounded to pages. |
4480 | * Sheer lunacy: pmap_remove is often asked to remove nonexistent | | 4490 | * Sheer lunacy: pmap_remove is often asked to remove nonexistent |
4481 | * mappings. | | 4491 | * mappings. |
4482 | */ | | 4492 | */ |
4483 | void | | 4493 | void |
4484 | pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva) | | 4494 | pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva) |
4485 | { | | 4495 | { |
4486 | vaddr_t nva; | | 4496 | vaddr_t nva; |
4487 | int vr, vs, s, ctx; | | 4497 | int vr, vs, s, ctx; |
4488 | void (*rm)(struct pmap *, vaddr_t, vaddr_t, int, int); | | 4498 | void (*rm)(struct pmap *, vaddr_t, vaddr_t, int, int); |
4489 | | | 4499 | |
4490 | #ifdef DEBUG | | 4500 | #ifdef DEBUG |
4491 | if (pmapdebug & PDB_REMOVE) | | 4501 | if (pmapdebug & PDB_REMOVE) |
4492 | printf("pmap_remove[%d](%p, 0x%lx, 0x%lx)\n", | | 4502 | printf("pmap_remove[%d](%p, 0x%lx, 0x%lx)\n", |
4493 | cpu_number(), pm, va, endva); | | 4503 | cpu_number(), pm, va, endva); |
4494 | #endif | | 4504 | #endif |
4495 | | | 4505 | |
4496 | if (!CPU_HAS_SRMMU) | | 4506 | if (!CPU_HAS_SRMMU) |
4497 | write_user_windows(); | | 4507 | write_user_windows(); |
4498 | | | 4508 | |
4499 | if (pm == pmap_kernel()) { | | 4509 | if (pm == pmap_kernel()) { |
4500 | /* | | 4510 | /* |
4501 | * Removing from kernel address space. | | 4511 | * Removing from kernel address space. |
4502 | */ | | 4512 | */ |
4503 | rm = pmap_rmk; | | 4513 | rm = pmap_rmk; |
4504 | } else { | | 4514 | } else { |
4505 | /* | | 4515 | /* |
4506 | * Removing from user address space. | | 4516 | * Removing from user address space. |
4507 | */ | | 4517 | */ |
4508 | rm = pmap_rmu; | | 4518 | rm = pmap_rmu; |
4509 | } | | 4519 | } |
4510 | | | 4520 | |
4511 | ctx = getcontext(); | | 4521 | ctx = getcontext(); |
4512 | s = splvm(); /* XXX conservative */ | | 4522 | s = splvm(); /* XXX conservative */ |
4513 | PMAP_LOCK(); | | 4523 | PMAP_LOCK(); |
4514 | for (; va < endva; va = nva) { | | 4524 | for (; va < endva; va = nva) { |
4515 | /* do one virtual segment at a time */ | | 4525 | /* do one virtual segment at a time */ |
4516 | vr = VA_VREG(va); | | 4526 | vr = VA_VREG(va); |
4517 | vs = VA_VSEG(va); | | 4527 | vs = VA_VSEG(va); |
4518 | nva = VSTOVA(vr, vs + 1); | | 4528 | nva = VSTOVA(vr, vs + 1); |
4519 | if (nva == 0 || nva > endva) | | 4529 | if (nva == 0 || nva > endva) |
4520 | nva = endva; | | 4530 | nva = endva; |
4521 | if (pm->pm_regmap[vr].rg_nsegmap != 0) | | 4531 | if (pm->pm_regmap[vr].rg_nsegmap != 0) |
4522 | (*rm)(pm, va, nva, vr, vs); | | 4532 | (*rm)(pm, va, nva, vr, vs); |
4523 | } | | 4533 | } |
4524 | PMAP_UNLOCK(); | | 4534 | PMAP_UNLOCK(); |
4525 | splx(s); | | 4535 | splx(s); |
4526 | setcontext(ctx); | | 4536 | setcontext(ctx); |
4527 | } | | 4537 | } |
4528 | | | 4538 | |
4529 | /* | | 4539 | /* |
4530 | * It is the same amount of work to cache_flush_page 16 pages | | 4540 | * It is the same amount of work to cache_flush_page 16 pages |
4531 | * as to cache_flush_segment 1 segment, assuming a 64K cache size | | 4541 | * as to cache_flush_segment 1 segment, assuming a 64K cache size |
4532 | * and a 4K page size or a 128K cache size and 8K page size. | | 4542 | * and a 4K page size or a 128K cache size and 8K page size. |
4533 | */ | | 4543 | */ |
4534 | #define PMAP_SFL_THRESHOLD 16 /* if > magic, use cache_flush_segment */ | | 4544 | #define PMAP_SFL_THRESHOLD 16 /* if > magic, use cache_flush_segment */ |
4535 | | | 4545 | |
4536 | /* | | 4546 | /* |
4537 | * Remove a range contained within a single segment. | | 4547 | * Remove a range contained within a single segment. |
4538 | * These are egregiously complicated routines. | | 4548 | * These are egregiously complicated routines. |
4539 | */ | | 4549 | */ |
4540 | | | 4550 | |
4541 | #if defined(SUN4) || defined(SUN4C) | | 4551 | #if defined(SUN4) || defined(SUN4C) |
4542 | | | 4552 | |
4543 | /* remove from kernel */ | | 4553 | /* remove from kernel */ |
4544 | /*static*/ void | | 4554 | /*static*/ void |
4545 | pmap_rmk4_4c(struct pmap *pm, vaddr_t va, vaddr_t endva, int vr, int vs) | | 4555 | pmap_rmk4_4c(struct pmap *pm, vaddr_t va, vaddr_t endva, int vr, int vs) |
4546 | { | | 4556 | { |
4547 | int pte, mmupte, *ptep, perpage, npg; | | 4557 | int pte, mmupte, *ptep, perpage, npg; |
4548 | struct vm_page *pg; | | 4558 | struct vm_page *pg; |
4549 | int nleft, pmeg, inmmu; | | 4559 | int nleft, pmeg, inmmu; |
4550 | struct regmap *rp; | | 4560 | struct regmap *rp; |
4551 | struct segmap *sp; | | 4561 | struct segmap *sp; |
4552 | | | 4562 | |
4553 | rp = &pm->pm_regmap[vr]; | | 4563 | rp = &pm->pm_regmap[vr]; |
4554 | sp = &rp->rg_segmap[vs]; | | 4564 | sp = &rp->rg_segmap[vs]; |
4555 | | | 4565 | |
4556 | if (rp->rg_nsegmap == 0) | | 4566 | if (rp->rg_nsegmap == 0) |
4557 | return; | | 4567 | return; |
4558 | if ((nleft = sp->sg_npte) == 0) | | 4568 | if ((nleft = sp->sg_npte) == 0) |
4559 | return; | | 4569 | return; |
4560 | pmeg = sp->sg_pmeg; | | 4570 | pmeg = sp->sg_pmeg; |
4561 | inmmu = pmeg != seginval; | | 4571 | inmmu = pmeg != seginval; |
4562 | ptep = &sp->sg_pte[VA_VPG(va)]; | | 4572 | ptep = &sp->sg_pte[VA_VPG(va)]; |
4563 | | | 4573 | |
4564 | /* decide how to flush cache */ | | 4574 | /* decide how to flush cache */ |
4565 | npg = (endva - va) >> PGSHIFT; | | 4575 | npg = (endva - va) >> PGSHIFT; |
4566 | if (!inmmu) { | | 4576 | if (!inmmu) { |
4567 | perpage = 0; | | 4577 | perpage = 0; |
4568 | } else if (npg > PMAP_SFL_THRESHOLD) { | | 4578 | } else if (npg > PMAP_SFL_THRESHOLD) { |
4569 | /* flush the whole segment */ | | 4579 | /* flush the whole segment */ |
4570 | perpage = 0; | | 4580 | perpage = 0; |
4571 | cache_flush_segment(vr, vs, 0); | | 4581 | cache_flush_segment(vr, vs, 0); |
4572 | } else { | | 4582 | } else { |
4573 | /* flush each page individually; some never need flushing */ | | 4583 | /* flush each page individually; some never need flushing */ |
4574 | perpage = (CACHEINFO.c_vactype != VAC_NONE); | | 4584 | perpage = (CACHEINFO.c_vactype != VAC_NONE); |
4575 | } | | 4585 | } |
4576 | | | 4586 | |
4577 | for (; va < endva; va += NBPG, ptep++) { | | 4587 | for (; va < endva; va += NBPG, ptep++) { |
4578 | pte = *ptep; | | 4588 | pte = *ptep; |
4579 | mmupte = inmmu ? getpte4(va) : 0; | | 4589 | mmupte = inmmu ? getpte4(va) : 0; |
4580 | if ((pte & PG_V) == 0) { | | 4590 | if ((pte & PG_V) == 0) { |
4581 | #ifdef DIAGNOSTIC | | 4591 | #ifdef DIAGNOSTIC |
4582 | if (inmmu && (mmupte & PG_V) != 0) | | 4592 | if (inmmu && (mmupte & PG_V) != 0) |
4583 | printf("rmk: inconsistent ptes va=%lx\n", va); | | 4593 | printf("rmk: inconsistent ptes va=%lx\n", va); |
4584 | #endif | | 4594 | #endif |
4585 | continue; | | 4595 | continue; |
4586 | } | | 4596 | } |
4587 | if ((pte & PG_TYPE) == PG_OBMEM) { | | 4597 | if ((pte & PG_TYPE) == PG_OBMEM) { |
4588 | /* if cacheable, flush page as needed */ | | 4598 | /* if cacheable, flush page as needed */ |
4589 | if (perpage && (mmupte & PG_NC) == 0) | | 4599 | if (perpage && (mmupte & PG_NC) == 0) |
4590 | cache_flush_page(va, 0); | | 4600 | cache_flush_page(va, 0); |
4591 | if ((pg = pvhead4_4c(pte)) != NULL) { | | 4601 | if ((pg = pvhead4_4c(pte)) != NULL) { |
4592 | if (inmmu) | | 4602 | if (inmmu) |
4593 | VM_MDPAGE_PVHEAD(pg)->pv_flags |= MR4_4C(mmupte); | | 4603 | VM_MDPAGE_PVHEAD(pg)->pv_flags |= MR4_4C(mmupte); |
4594 | pv_unlink4_4c(pg, pm, va); | | 4604 | pv_unlink4_4c(pg, pm, va); |
4595 | } | | 4605 | } |
4596 | } | | 4606 | } |
4597 | nleft--; | | 4607 | nleft--; |
4598 | #ifdef DIAGNOSTIC | | 4608 | #ifdef DIAGNOSTIC |
4599 | if (nleft < 0) | | 4609 | if (nleft < 0) |
4600 | panic("pmap_rmk: too many PTEs in segment; " | | 4610 | panic("pmap_rmk: too many PTEs in segment; " |
4601 | "va 0x%lx; endva 0x%lx", va, endva); | | 4611 | "va 0x%lx; endva 0x%lx", va, endva); |
4602 | #endif | | 4612 | #endif |
4603 | if (pte & PG_WIRED) { | | 4613 | if (pte & PG_WIRED) { |
4604 | sp->sg_nwired--; | | 4614 | sp->sg_nwired--; |
4605 | pm->pm_stats.wired_count--; | | 4615 | pm->pm_stats.wired_count--; |
4606 | } | | 4616 | } |
4607 | | | 4617 | |
4608 | if (inmmu) | | 4618 | if (inmmu) |
4609 | setpte4(va, 0); | | 4619 | setpte4(va, 0); |
4610 | *ptep = 0; | | 4620 | *ptep = 0; |
4611 | pm->pm_stats.resident_count--; | | 4621 | pm->pm_stats.resident_count--; |
4612 | } | | 4622 | } |
4613 | | | 4623 | |
4614 | #ifdef DIAGNOSTIC | | 4624 | #ifdef DIAGNOSTIC |
4615 | if (sp->sg_nwired > nleft || sp->sg_nwired < 0) | | 4625 | if (sp->sg_nwired > nleft || sp->sg_nwired < 0) |
4616 | panic("pmap_rmk: pm %p, va %lx: nleft=%d, nwired=%d", | | 4626 | panic("pmap_rmk: pm %p, va %lx: nleft=%d, nwired=%d", |
4617 | pm, va, nleft, sp->sg_nwired); | | 4627 | pm, va, nleft, sp->sg_nwired); |
4618 | #endif | | 4628 | #endif |
4619 | if ((sp->sg_npte = nleft) == 0) | | 4629 | if ((sp->sg_npte = nleft) == 0) |
4620 | pgt_lvl23_remove4_4c(pm, rp, sp, vr, vs); | | 4630 | pgt_lvl23_remove4_4c(pm, rp, sp, vr, vs); |
4621 | else if (sp->sg_nwired == 0) { | | 4631 | else if (sp->sg_nwired == 0) { |
4622 | if (sp->sg_pmeg != seginval) | | 4632 | if (sp->sg_pmeg != seginval) |
4623 | mmu_pmeg_unlock(sp->sg_pmeg); | | 4633 | mmu_pmeg_unlock(sp->sg_pmeg); |
4624 | } | | 4634 | } |
4625 | } | | 4635 | } |
4626 | | | 4636 | |
4627 | #endif /* SUN4 || SUN4C */ | | 4637 | #endif /* SUN4 || SUN4C */ |
4628 | | | 4638 | |
4629 | #if defined(SUN4M) || defined(SUN4D) /* SRMMU version of pmap_rmk */ | | 4639 | #if defined(SUN4M) || defined(SUN4D) /* SRMMU version of pmap_rmk */ |
4630 | | | 4640 | |
4631 | /* remove from kernel (4m)*/ | | 4641 | /* remove from kernel (4m)*/ |
4632 | /* pm is already locked */ | | 4642 | /* pm is already locked */ |
4633 | /*static*/ void | | 4643 | /*static*/ void |
4634 | pmap_rmk4m(struct pmap *pm, vaddr_t va, vaddr_t endva, int vr, int vs) | | 4644 | pmap_rmk4m(struct pmap *pm, vaddr_t va, vaddr_t endva, int vr, int vs) |
4635 | { | | 4645 | { |
4636 | int tpte, perpage, npg; | | 4646 | int tpte, perpage, npg; |
4637 | struct vm_page *pg; | | 4647 | struct vm_page *pg; |
4638 | struct regmap *rp; | | 4648 | struct regmap *rp; |
4639 | struct segmap *sp; | | 4649 | struct segmap *sp; |
4640 | | | 4650 | |
4641 | rp = &pm->pm_regmap[vr]; | | 4651 | rp = &pm->pm_regmap[vr]; |
4642 | sp = &rp->rg_segmap[vs]; | | 4652 | sp = &rp->rg_segmap[vs]; |
4643 | if (rp->rg_nsegmap == 0) | | 4653 | if (rp->rg_nsegmap == 0) |
4644 | return; | | 4654 | return; |
4645 | | | 4655 | |
4646 | /* decide how to flush cache */ | | 4656 | /* decide how to flush cache */ |
4647 | npg = (endva - va) >> PGSHIFT; | | 4657 | npg = (endva - va) >> PGSHIFT; |
4648 | if (npg > PMAP_SFL_THRESHOLD) { | | 4658 | if (npg > PMAP_SFL_THRESHOLD) { |
4649 | /* flush the whole segment */ | | 4659 | /* flush the whole segment */ |
4650 | perpage = 0; | | 4660 | perpage = 0; |
4651 | if (CACHEINFO.c_vactype != VAC_NONE) | | 4661 | if (CACHEINFO.c_vactype != VAC_NONE) |
4652 | cache_flush_segment(vr, vs, 0); | | 4662 | cache_flush_segment(vr, vs, 0); |
4653 | } else { | | 4663 | } else { |
4654 | /* flush each page individually; some never need flushing */ | | 4664 | /* flush each page individually; some never need flushing */ |
4655 | perpage = (CACHEINFO.c_vactype != VAC_NONE); | | 4665 | perpage = (CACHEINFO.c_vactype != VAC_NONE); |
4656 | } | | 4666 | } |
4657 | while (va < endva) { | | 4667 | while (va < endva) { |
4658 | tpte = sp->sg_pte[VA_SUN4M_VPG(va)]; | | 4668 | tpte = sp->sg_pte[VA_SUN4M_VPG(va)]; |
4659 | if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) { | | 4669 | if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) { |
4660 | #ifdef DEBUG | | 4670 | #ifdef DEBUG |
4661 | if ((pmapdebug & PDB_SANITYCHK) && | | 4671 | if ((pmapdebug & PDB_SANITYCHK) && |
4662 | (getpte4m(va) & SRMMU_TETYPE) == SRMMU_TEPTE) | | 4672 | (getpte4m(va) & SRMMU_TETYPE) == SRMMU_TEPTE) |
4663 | panic("pmap_rmk: Spurious kTLB entry for 0x%lx", | | 4673 | panic("pmap_rmk: Spurious kTLB entry for 0x%lx", |
4664 | va); | | 4674 | va); |
4665 | #endif | | 4675 | #endif |
4666 | va += NBPG; | | 4676 | va += NBPG; |
4667 | continue; | | 4677 | continue; |
4668 | } | | 4678 | } |
4669 | if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) { | | 4679 | if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) { |
4670 | /* if cacheable, flush page as needed */ | | 4680 | /* if cacheable, flush page as needed */ |
4671 | if (perpage && (tpte & SRMMU_PG_C)) | | 4681 | if (perpage && (tpte & SRMMU_PG_C)) |
4672 | cache_flush_page(va, 0); | | 4682 | cache_flush_page(va, 0); |
4673 | if ((pg = pvhead4m(tpte)) != NULL) { | | 4683 | if ((pg = pvhead4m(tpte)) != NULL) { |
4674 | VM_MDPAGE_PVHEAD(pg)->pv_flags |= MR4M(tpte); | | 4684 | VM_MDPAGE_PVHEAD(pg)->pv_flags |= MR4M(tpte); |
4675 | pv_unlink4m(pg, pm, va); | | 4685 | pv_unlink4m(pg, pm, va); |
4676 | } | | 4686 | } |
4677 | } | | 4687 | } |
4678 | setpgt4m_va(va, &sp->sg_pte[VA_SUN4M_VPG(va)], | | 4688 | setpgt4m_va(va, &sp->sg_pte[VA_SUN4M_VPG(va)], |
4679 | SRMMU_TEINVALID, 1, 0, CPUSET_ALL); | | 4689 | SRMMU_TEINVALID, 1, 0, CPUSET_ALL); |
4680 | pm->pm_stats.resident_count--; | | 4690 | pm->pm_stats.resident_count--; |
4681 | va += NBPG; | | 4691 | va += NBPG; |
4682 | } | | 4692 | } |
4683 | } | | 4693 | } |
4684 | #endif /* SUN4M || SUN4D */ | | 4694 | #endif /* SUN4M || SUN4D */ |
4685 | | | 4695 | |
4686 | #if defined(SUN4) || defined(SUN4C) | | 4696 | #if defined(SUN4) || defined(SUN4C) |
4687 | | | 4697 | |
4688 | /* remove from user */ | | 4698 | /* remove from user */ |
4689 | /*static*/ void | | 4699 | /*static*/ void |
4690 | pmap_rmu4_4c(struct pmap *pm, vaddr_t va, vaddr_t endva, int vr, int vs) | | 4700 | pmap_rmu4_4c(struct pmap *pm, vaddr_t va, vaddr_t endva, int vr, int vs) |
4691 | { | | 4701 | { |
4692 | int *ptep, pteva, pte, perpage, npg; | | 4702 | int *ptep, pteva, pte, perpage, npg; |
4693 | struct vm_page *pg; | | 4703 | struct vm_page *pg; |
4694 | int nleft, pmeg, inmmu; | | 4704 | int nleft, pmeg, inmmu; |
4695 | struct regmap *rp; | | 4705 | struct regmap *rp; |
4696 | struct segmap *sp; | | 4706 | struct segmap *sp; |
4697 | | | 4707 | |
4698 | rp = &pm->pm_regmap[vr]; | | 4708 | rp = &pm->pm_regmap[vr]; |
4699 | if (rp->rg_nsegmap == 0) | | 4709 | if (rp->rg_nsegmap == 0) |
4700 | return; | | 4710 | return; |
4701 | sp = &rp->rg_segmap[vs]; | | 4711 | sp = &rp->rg_segmap[vs]; |
4702 | if ((nleft = sp->sg_npte) == 0) | | 4712 | if ((nleft = sp->sg_npte) == 0) |
4703 | return; | | 4713 | return; |
4704 | pmeg = sp->sg_pmeg; | | 4714 | pmeg = sp->sg_pmeg; |
4705 | inmmu = pmeg != seginval; | | 4715 | inmmu = pmeg != seginval; |
4706 | | | 4716 | |
4707 | /* | | 4717 | /* |
4708 | * PTEs are in MMU. Invalidate in hardware, update ref & | | 4718 | * PTEs are in MMU. Invalidate in hardware, update ref & |
4709 | * mod bits, and flush cache if required. | | 4719 | * mod bits, and flush cache if required. |
4710 | */ | | 4720 | */ |
4711 | if (!inmmu) { | | 4721 | if (!inmmu) { |
4712 | perpage = 0; | | 4722 | perpage = 0; |
4713 | pteva = 0; | | 4723 | pteva = 0; |
4714 | } else if (CTX_USABLE(pm,rp)) { | | 4724 | } else if (CTX_USABLE(pm,rp)) { |
4715 | /* process has a context, must flush cache */ | | 4725 | /* process has a context, must flush cache */ |
4716 | npg = (endva - va) >> PGSHIFT; | | 4726 | npg = (endva - va) >> PGSHIFT; |
4717 | setcontext4(pm->pm_ctxnum); | | 4727 | setcontext4(pm->pm_ctxnum); |
4718 | if ((pm->pm_flags & PMAP_USERCACHECLEAN) != 0) | | 4728 | if ((pm->pm_flags & PMAP_USERCACHECLEAN) != 0) |
4719 | perpage = 0; | | 4729 | perpage = 0; |
4720 | else if (npg > PMAP_SFL_THRESHOLD) { | | 4730 | else if (npg > PMAP_SFL_THRESHOLD) { |
4721 | perpage = 0; /* flush the whole segment */ | | 4731 | perpage = 0; /* flush the whole segment */ |
4722 | cache_flush_segment(vr, vs, pm->pm_ctxnum); | | 4732 | cache_flush_segment(vr, vs, pm->pm_ctxnum); |
4723 | } else | | 4733 | } else |
4724 | perpage = (CACHEINFO.c_vactype != VAC_NONE); | | 4734 | perpage = (CACHEINFO.c_vactype != VAC_NONE); |
4725 | pteva = va; | | 4735 | pteva = va; |
4726 | } else { | | 4736 | } else { |
4727 | /* no context, use context 0; cache flush unnecessary */ | | 4737 | /* no context, use context 0; cache flush unnecessary */ |
4728 | setcontext4(0); | | 4738 | setcontext4(0); |
4729 | if (HASSUN4_MMU3L) | | 4739 | if (HASSUN4_MMU3L) |
4730 | setregmap(0, tregion); | | 4740 | setregmap(0, tregion); |
4731 | /* XXX use per-CPU pteva? */ | | 4741 | /* XXX use per-CPU pteva? */ |
4732 | setsegmap(0, pmeg); | | 4742 | setsegmap(0, pmeg); |
4733 | pteva = VA_VPG(va) << PGSHIFT; | | 4743 | pteva = VA_VPG(va) << PGSHIFT; |
4734 | perpage = 0; | | 4744 | perpage = 0; |
4735 | } | | 4745 | } |
4736 | | | 4746 | |
4737 | ptep = sp->sg_pte + VA_VPG(va); | | 4747 | ptep = sp->sg_pte + VA_VPG(va); |
4738 | for (; va < endva; ptep++, pteva += NBPG, va += NBPG) { | | 4748 | for (; va < endva; ptep++, pteva += NBPG, va += NBPG) { |
4739 | int mmupte; | | 4749 | int mmupte; |
4740 | pte = *ptep; | | 4750 | pte = *ptep; |
4741 | mmupte = inmmu ? getpte4(pteva) : 0; | | 4751 | mmupte = inmmu ? getpte4(pteva) : 0; |
4742 | | | 4752 | |
4743 | if ((pte & PG_V) == 0) { | | 4753 | if ((pte & PG_V) == 0) { |
4744 | #ifdef DIAGNOSTIC | | 4754 | #ifdef DIAGNOSTIC |
4745 | if (inmmu && (mmupte & PG_V) != 0) | | 4755 | if (inmmu && (mmupte & PG_V) != 0) |
4746 | printf("pmap_rmu: pte=%x, mmupte=%x\n", | | 4756 | printf("pmap_rmu: pte=%x, mmupte=%x\n", |
4747 | pte, getpte4(pteva)); | | 4757 | pte, getpte4(pteva)); |
4748 | #endif | | 4758 | #endif |
4749 | continue; | | 4759 | continue; |
4750 | } | | 4760 | } |
4751 | if ((pte & PG_TYPE) == PG_OBMEM) { | | 4761 | if ((pte & PG_TYPE) == PG_OBMEM) { |
4752 | /* if cacheable, flush page as needed */ | | 4762 | /* if cacheable, flush page as needed */ |
4753 | if (perpage && (mmupte & PG_NC) == 0) | | 4763 | if (perpage && (mmupte & PG_NC) == 0) |
4754 | cache_flush_page(va, pm->pm_ctxnum); | | 4764 | cache_flush_page(va, pm->pm_ctxnum); |
4755 | if ((pg = pvhead4_4c(pte)) != NULL) { | | 4765 | if ((pg = pvhead4_4c(pte)) != NULL) { |
4756 | if (inmmu) | | 4766 | if (inmmu) |
4757 | VM_MDPAGE_PVHEAD(pg)->pv_flags |= MR4_4C(mmupte); | | 4767 | VM_MDPAGE_PVHEAD(pg)->pv_flags |= MR4_4C(mmupte); |
4758 | pv_unlink4_4c(pg, pm, va); | | 4768 | pv_unlink4_4c(pg, pm, va); |
4759 | } | | 4769 | } |
4760 | } | | 4770 | } |
4761 | nleft--; | | 4771 | nleft--; |
4762 | #ifdef DIAGNOSTIC | | 4772 | #ifdef DIAGNOSTIC |
4763 | if (nleft < 0) | | 4773 | if (nleft < 0) |
4764 | panic("pmap_rmu: too many PTEs in segment; " | | 4774 | panic("pmap_rmu: too many PTEs in segment; " |
4765 | "va 0x%lx; endva 0x%lx", va, endva); | | 4775 | "va 0x%lx; endva 0x%lx", va, endva); |
4766 | #endif | | 4776 | #endif |
4767 | if (inmmu) | | 4777 | if (inmmu) |
4768 | setpte4(pteva, 0); | | 4778 | setpte4(pteva, 0); |
4769 | | | 4779 | |
4770 | if (pte & PG_WIRED) { | | 4780 | if (pte & PG_WIRED) { |
4771 | sp->sg_nwired--; | | 4781 | sp->sg_nwired--; |
4772 | pm->pm_stats.wired_count--; | | 4782 | pm->pm_stats.wired_count--; |
4773 | } | | 4783 | } |
4774 | *ptep = 0; | | 4784 | *ptep = 0; |
4775 | pm->pm_stats.resident_count--; | | 4785 | pm->pm_stats.resident_count--; |
4776 | } | | 4786 | } |
4777 | | | 4787 | |
4778 | #ifdef DIAGNOSTIC | | 4788 | #ifdef DIAGNOSTIC |
4779 | if (sp->sg_nwired > nleft || sp->sg_nwired < 0) | | 4789 | if (sp->sg_nwired > nleft || sp->sg_nwired < 0) |
4780 | panic("pmap_rmu: pm %p, va %lx: nleft=%d, nwired=%d", | | 4790 | panic("pmap_rmu: pm %p, va %lx: nleft=%d, nwired=%d", |
4781 | pm, va, nleft, sp->sg_nwired); | | 4791 | pm, va, nleft, sp->sg_nwired); |
4782 | #endif | | 4792 | #endif |
4783 | if ((sp->sg_npte = nleft) == 0) | | 4793 | if ((sp->sg_npte = nleft) == 0) |
4784 | pgt_lvl23_remove4_4c(pm, rp, sp, vr, vs); | | 4794 | pgt_lvl23_remove4_4c(pm, rp, sp, vr, vs); |
4785 | else if (sp->sg_nwired == 0) { | | 4795 | else if (sp->sg_nwired == 0) { |
4786 | if (sp->sg_pmeg != seginval) | | 4796 | if (sp->sg_pmeg != seginval) |
4787 | mmu_pmeg_unlock(sp->sg_pmeg); | | 4797 | mmu_pmeg_unlock(sp->sg_pmeg); |
4788 | } | | 4798 | } |
4789 | } | | 4799 | } |
4790 | | | 4800 | |
4791 | #endif /* SUN4 || SUN4C */ | | 4801 | #endif /* SUN4 || SUN4C */ |
4792 | | | 4802 | |
4793 | #if defined(SUN4M) || defined(SUN4D) /* SRMMU version of pmap_rmu */ | | 4803 | #if defined(SUN4M) || defined(SUN4D) /* SRMMU version of pmap_rmu */ |
4794 | /* remove from user */ | | 4804 | /* remove from user */ |
4795 | /* Note: pm is already locked */ | | 4805 | /* Note: pm is already locked */ |
4796 | /*static*/ void | | 4806 | /*static*/ void |
4797 | pmap_rmu4m(struct pmap *pm, vaddr_t va, vaddr_t endva, int vr, int vs) | | 4807 | pmap_rmu4m(struct pmap *pm, vaddr_t va, vaddr_t endva, int vr, int vs) |
4798 | { | | 4808 | { |
4799 | int *pte0, perpage, npg; | | 4809 | int *pte0, perpage, npg; |
4800 | struct vm_page *pg; | | 4810 | struct vm_page *pg; |
4801 | int nleft; | | 4811 | int nleft; |
4802 | struct regmap *rp; | | 4812 | struct regmap *rp; |
4803 | struct segmap *sp; | | 4813 | struct segmap *sp; |
4804 | | | 4814 | |
4805 | rp = &pm->pm_regmap[vr]; | | 4815 | rp = &pm->pm_regmap[vr]; |
4806 | if (rp->rg_nsegmap == 0) | | 4816 | if (rp->rg_nsegmap == 0) |
4807 | return; | | 4817 | return; |
4808 | sp = &rp->rg_segmap[vs]; | | 4818 | sp = &rp->rg_segmap[vs]; |
4809 | if ((nleft = sp->sg_npte) == 0) | | 4819 | if ((nleft = sp->sg_npte) == 0) |
4810 | return; | | 4820 | return; |
4811 | pte0 = sp->sg_pte; | | 4821 | pte0 = sp->sg_pte; |
4812 | | | 4822 | |
4813 | /* | | 4823 | /* |
4814 | * Invalidate PTE in MMU pagetables. Flush cache if necessary. | | 4824 | * Invalidate PTE in MMU pagetables. Flush cache if necessary. |
4815 | */ | | 4825 | */ |
4816 | if (pm->pm_ctx && (pm->pm_flags & PMAP_USERCACHECLEAN) == 0) { | | 4826 | if (pm->pm_ctx && (pm->pm_flags & PMAP_USERCACHECLEAN) == 0) { |
4817 | /* process has a context, must flush cache */ | | 4827 | /* process has a context, must flush cache */ |
4818 | if (CACHEINFO.c_vactype != VAC_NONE) { | | 4828 | if (CACHEINFO.c_vactype != VAC_NONE) { |
4819 | npg = (endva - va) >> PGSHIFT; | | 4829 | npg = (endva - va) >> PGSHIFT; |
4820 | if (npg > PMAP_SFL_THRESHOLD) { | | 4830 | if (npg > PMAP_SFL_THRESHOLD) { |
4821 | perpage = 0; /* flush the whole segment */ | | 4831 | perpage = 0; /* flush the whole segment */ |