| @@ -1,14 +1,14 @@ | | | @@ -1,14 +1,14 @@ |
1 | /* $NetBSD: pmap_segtab.c,v 1.4 2011/06/23 08:11:56 matt Exp $ */ | | 1 | /* $NetBSD: pmap_segtab.c,v 1.4.8.1 2012/07/05 18:50:52 riz Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, | | 8 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, |
9 | * NASA Ames Research Center and by Chris G. Demetriou. | | 9 | * NASA Ames Research Center and by Chris G. Demetriou. |
10 | * | | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | | 11 | * Redistribution and use in source and binary forms, with or without |
12 | * modification, are permitted provided that the following conditions | | 12 | * modification, are permitted provided that the following conditions |
13 | * are met: | | 13 | * are met: |
14 | * 1. Redistributions of source code must retain the above copyright | | 14 | * 1. Redistributions of source code must retain the above copyright |
| @@ -57,27 +57,27 @@ | | | @@ -57,27 +57,27 @@ |
57 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | | 57 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
58 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | | 58 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
59 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 59 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
60 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | | 60 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
61 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 61 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
62 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 62 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
63 | * SUCH DAMAGE. | | 63 | * SUCH DAMAGE. |
64 | * | | 64 | * |
65 | * @(#)pmap.c 8.4 (Berkeley) 1/26/94 | | 65 | * @(#)pmap.c 8.4 (Berkeley) 1/26/94 |
66 | */ | | 66 | */ |
67 | | | 67 | |
68 | #include <sys/cdefs.h> | | 68 | #include <sys/cdefs.h> |
69 | | | 69 | |
70 | __KERNEL_RCSID(0, "$NetBSD: pmap_segtab.c,v 1.4 2011/06/23 08:11:56 matt Exp $"); | | 70 | __KERNEL_RCSID(0, "$NetBSD: pmap_segtab.c,v 1.4.8.1 2012/07/05 18:50:52 riz Exp $"); |
71 | | | 71 | |
72 | /* | | 72 | /* |
73 | * Manages physical address maps. | | 73 | * Manages physical address maps. |
74 | * | | 74 | * |
75 | * In addition to hardware address maps, this | | 75 | * In addition to hardware address maps, this |
76 | * module is called upon to provide software-use-only | | 76 | * module is called upon to provide software-use-only |
77 | * maps which may or may not be stored in the same | | 77 | * maps which may or may not be stored in the same |
78 | * form as hardware maps. These pseudo-maps are | | 78 | * form as hardware maps. These pseudo-maps are |
79 | * used to store intermediate results from copy | | 79 | * used to store intermediate results from copy |
80 | * operations to and from address spaces. | | 80 | * operations to and from address spaces. |
81 | * | | 81 | * |
82 | * Since the information managed by this module is | | 82 | * Since the information managed by this module is |
83 | * also stored by the logical address mapping module, | | 83 | * also stored by the logical address mapping module, |
| @@ -100,37 +100,39 @@ __KERNEL_RCSID(0, "$NetBSD: pmap_segtab. | | | @@ -100,37 +100,39 @@ __KERNEL_RCSID(0, "$NetBSD: pmap_segtab. |
100 | #include "opt_multiprocessor.h" | | 100 | #include "opt_multiprocessor.h" |
101 | | | 101 | |
102 | #include <sys/param.h> | | 102 | #include <sys/param.h> |
103 | #include <sys/systm.h> | | 103 | #include <sys/systm.h> |
104 | #include <sys/proc.h> | | 104 | #include <sys/proc.h> |
105 | #include <sys/mutex.h> | | 105 | #include <sys/mutex.h> |
106 | #include <sys/atomic.h> | | 106 | #include <sys/atomic.h> |
107 | | | 107 | |
108 | #include <uvm/uvm.h> | | 108 | #include <uvm/uvm.h> |
109 | | | 109 | |
110 | CTASSERT(NBPG >= sizeof(struct pmap_segtab)); | | 110 | CTASSERT(NBPG >= sizeof(struct pmap_segtab)); |
111 | | | 111 | |
112 | struct pmap_segtab_info { | | 112 | struct pmap_segtab_info { |
113 | struct pmap_segtab * volatile free_segtab; /* free list kept locally */ | | 113 | struct pmap_segtab *free_segtab; /* free list kept locally */ |
114 | #ifdef DEBUG | | 114 | #ifdef DEBUG |
115 | uint32_t nget_segtab; | | 115 | uint32_t nget_segtab; |
116 | uint32_t nput_segtab; | | 116 | uint32_t nput_segtab; |
117 | uint32_t npage_segtab; | | 117 | uint32_t npage_segtab; |
118 | #define SEGTAB_ADD(n, v) (pmap_segtab_info.n ## _segtab += (v)) | | 118 | #define SEGTAB_ADD(n, v) (pmap_segtab_info.n ## _segtab += (v)) |
119 | #else | | 119 | #else |
120 | #define SEGTAB_ADD(n, v) ((void) 0) | | 120 | #define SEGTAB_ADD(n, v) ((void) 0) |
121 | #endif | | 121 | #endif |
122 | } pmap_segtab_info; | | 122 | } pmap_segtab_info; |
123 | | | 123 | |
| | | 124 | kmutex_t pmap_segtab_lock __cacheline_aligned; |
| | | 125 | |
124 | static inline struct vm_page * | | 126 | static inline struct vm_page * |
125 | pmap_pte_pagealloc(void) | | 127 | pmap_pte_pagealloc(void) |
126 | { | | 128 | { |
127 | return pmap_md_alloc_poolpage(UVM_PGA_ZERO|UVM_PGA_USERESERVE); | | 129 | return pmap_md_alloc_poolpage(UVM_PGA_ZERO|UVM_PGA_USERESERVE); |
128 | } | | 130 | } |
129 | | | 131 | |
130 | static inline pt_entry_t * | | 132 | static inline pt_entry_t * |
131 | pmap_segmap(struct pmap *pmap, vaddr_t va) | | 133 | pmap_segmap(struct pmap *pmap, vaddr_t va) |
132 | { | | 134 | { |
133 | struct pmap_segtab *stp = pmap->pm_segtab; | | 135 | struct pmap_segtab *stp = pmap->pm_segtab; |
134 | KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va)); | | 136 | KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va)); |
135 | return stp->seg_tab[va >> SEGSHIFT]; | | 137 | return stp->seg_tab[va >> SEGSHIFT]; |
136 | } | | 138 | } |
| @@ -152,71 +154,65 @@ pmap_pte_lookup(pmap_t pmap, vaddr_t va) | | | @@ -152,71 +154,65 @@ pmap_pte_lookup(pmap_t pmap, vaddr_t va) |
152 | * is zero, the map is an actual physical | | 154 | * is zero, the map is an actual physical |
153 | * map, and may be referenced by the | | 155 | * map, and may be referenced by the |
154 | * hardware. | | 156 | * hardware. |
155 | * | | 157 | * |
156 | * If the size specified is non-zero, | | 158 | * If the size specified is non-zero, |
157 | * the map will be used in software only, and | | 159 | * the map will be used in software only, and |
158 | * is bounded by that size. | | 160 | * is bounded by that size. |
159 | */ | | 161 | */ |
160 | void | | 162 | void |
161 | pmap_segtab_alloc(pmap_t pmap) | | 163 | pmap_segtab_alloc(pmap_t pmap) |
162 | { | | 164 | { |
163 | struct pmap_segtab *stp; | | 165 | struct pmap_segtab *stp; |
164 | again: | | 166 | again: |
165 | stp = NULL; | | 167 | mutex_spin_enter(&pmap_segtab_lock); |
166 | while (__predict_true(pmap_segtab_info.free_segtab != NULL)) { | | 168 | if (__predict_true((stp = pmap_segtab_info.free_segtab) != NULL)) { |
167 | struct pmap_segtab *next_stp; | | 169 | pmap_segtab_info.free_segtab = |
168 | stp = pmap_segtab_info.free_segtab; | | 170 | (struct pmap_segtab *)stp->seg_tab[0]; |
169 | next_stp = (struct pmap_segtab *)stp->seg_tab[0]; | | 171 | stp->seg_tab[0] = NULL; |
170 | if (stp == atomic_cas_ptr(&pmap_segtab_info.free_segtab, stp, next_stp)) { | | 172 | SEGTAB_ADD(nget, 1); |
171 | SEGTAB_ADD(nget, 1); | | | |
172 | break; | | | |
173 | } | | | |
174 | } | | 173 | } |
| | | 174 | mutex_spin_exit(&pmap_segtab_lock); |
175 | | | 175 | |
176 | if (__predict_true(stp != NULL)) { | | 176 | if (__predict_false(stp == NULL)) { |
177 | stp->seg_tab[0] = NULL; | | | |
178 | } else { | | | |
179 | struct vm_page * const stp_pg = pmap_pte_pagealloc(); | | 177 | struct vm_page * const stp_pg = pmap_pte_pagealloc(); |
180 | | | 178 | |
181 | if (__predict_false(stp_pg == NULL)) { | | 179 | if (__predict_false(stp_pg == NULL)) { |
182 | /* | | 180 | /* |
183 | * XXX What else can we do? Could we deadlock here? | | 181 | * XXX What else can we do? Could we deadlock here? |
184 | */ | | 182 | */ |
185 | uvm_wait("pmap_create"); | | 183 | uvm_wait("pmap_create"); |
186 | goto again; | | 184 | goto again; |
187 | } | | 185 | } |
188 | SEGTAB_ADD(npage, 1); | | 186 | SEGTAB_ADD(npage, 1); |
189 | const paddr_t stp_pa = VM_PAGE_TO_PHYS(stp_pg); | | 187 | const paddr_t stp_pa = VM_PAGE_TO_PHYS(stp_pg); |
190 | | | 188 | |
191 | stp = (struct pmap_segtab *)POOL_PHYSTOV(stp_pa); | | 189 | stp = (struct pmap_segtab *)POOL_PHYSTOV(stp_pa); |
192 | const size_t n = NBPG / sizeof(struct pmap_segtab); | | 190 | const size_t n = NBPG / sizeof(struct pmap_segtab); |
193 | if (n > 1) { | | 191 | if (n > 1) { |
194 | /* | | 192 | /* |
195 | * link all the segtabs in this page together | | 193 | * link all the segtabs in this page together |
196 | */ | | 194 | */ |
197 | for (size_t i = 1; i < n - 1; i++) { | | 195 | for (size_t i = 1; i < n - 1; i++) { |
198 | stp[i].seg_tab[0] = (void *)&stp[i+1]; | | 196 | stp[i].seg_tab[0] = (void *)&stp[i+1]; |
199 | } | | 197 | } |
200 | /* | | 198 | /* |
201 | * Now link the new segtabs into the free segtab list. | | 199 | * Now link the new segtabs into the free segtab list. |
202 | */ | | 200 | */ |
203 | for (;;) { | | 201 | mutex_spin_enter(&pmap_segtab_lock); |
204 | void *tmp = pmap_segtab_info.free_segtab; | | 202 | stp[n-1].seg_tab[0] = (void *)pmap_segtab_info.free_segtab; |
205 | stp[n-1].seg_tab[0] = tmp; | | 203 | pmap_segtab_info.free_segtab = stp + 1; |
206 | if (tmp == atomic_cas_ptr(&pmap_segtab_info.free_segtab, tmp, stp+1)) | | | |
207 | break; | | | |
208 | } | | | |
209 | SEGTAB_ADD(nput, n - 1); | | 204 | SEGTAB_ADD(nput, n - 1); |
| | | 205 | mutex_spin_exit(&pmap_segtab_lock); |
210 | } | | 206 | } |
211 | } | | 207 | } |
212 | | | 208 | |
213 | #ifdef PARANOIADIAG | | 209 | #ifdef PARANOIADIAG |
214 | for (i = 0; i < PMAP_SEGTABSIZE; i++) { | | 210 | for (i = 0; i < PMAP_SEGTABSIZE; i++) { |
215 | if (stp->seg_tab[i] != 0) | | 211 | if (stp->seg_tab[i] != 0) |
216 | panic("pmap_create: pm_segtab.seg_tab[%zu] != 0"); | | 212 | panic("pmap_create: pm_segtab.seg_tab[%zu] != 0"); |
217 | } | | 213 | } |
218 | #endif | | 214 | #endif |
219 | | | 215 | |
220 | pmap->pm_segtab = stp; | | 216 | pmap->pm_segtab = stp; |
221 | } | | 217 | } |
222 | | | 218 | |
| @@ -245,34 +241,31 @@ pmap_segtab_free(pmap_t pmap) | | | @@ -245,34 +241,31 @@ pmap_segtab_free(pmap_t pmap) |
245 | panic("pmap_destroy: segmap not empty"); | | 241 | panic("pmap_destroy: segmap not empty"); |
246 | } | | 242 | } |
247 | #endif | | 243 | #endif |
248 | | | 244 | |
249 | pa = POOL_VTOPHYS(pte); | | 245 | pa = POOL_VTOPHYS(pte); |
250 | uvm_pagefree(PHYS_TO_VM_PAGE(pa)); | | 246 | uvm_pagefree(PHYS_TO_VM_PAGE(pa)); |
251 | | | 247 | |
252 | stp->seg_tab[i] = NULL; | | 248 | stp->seg_tab[i] = NULL; |
253 | } | | 249 | } |
254 | | | 250 | |
255 | /* | | 251 | /* |
256 | * Insert the the segtab into the segtab freelist. | | 252 | * Insert the the segtab into the segtab freelist. |
257 | */ | | 253 | */ |
258 | for (;;) { | | 254 | mutex_spin_enter(&pmap_segtab_lock); |
259 | void *tmp = pmap_segtab_info.free_segtab; | | 255 | stp->seg_tab[0] = (void *) pmap_segtab_info.free_segtab; |
260 | stp->seg_tab[0] = tmp; | | 256 | pmap_segtab_info.free_segtab = stp; |
261 | if (tmp == atomic_cas_ptr(&pmap_segtab_info.free_segtab, tmp, stp)) { | | 257 | SEGTAB_ADD(nput, 1); |
262 | SEGTAB_ADD(nput, 1); | | 258 | mutex_spin_exit(&pmap_segtab_lock); |
263 | break; | | | |
264 | } | | | |
265 | } | | | |
266 | } | | 259 | } |
267 | | | 260 | |
268 | /* | | 261 | /* |
269 | * Make a new pmap (vmspace) active for the given process. | | 262 | * Make a new pmap (vmspace) active for the given process. |
270 | */ | | 263 | */ |
271 | void | | 264 | void |
272 | pmap_segtab_activate(struct pmap *pm, struct lwp *l) | | 265 | pmap_segtab_activate(struct pmap *pm, struct lwp *l) |
273 | { | | 266 | { |
274 | if (l == curlwp) { | | 267 | if (l == curlwp) { |
275 | if (pm == pmap_kernel()) { | | 268 | if (pm == pmap_kernel()) { |
276 | l->l_cpu->ci_pmap_user_segtab = (void*)0xdeadbabe; | | 269 | l->l_cpu->ci_pmap_user_segtab = (void*)0xdeadbabe; |
277 | } else { | | 270 | } else { |
278 | KASSERT(pm == l->l_proc->p_vmspace->vm_map.pmap); | | 271 | KASSERT(pm == l->l_proc->p_vmspace->vm_map.pmap); |