| @@ -1,367 +1,367 @@ | | | @@ -1,367 +1,367 @@ |
1 | /* $NetBSD: uvm_emap.c,v 1.3 2009/07/19 15:17:29 rmind Exp $ */ | | 1 | /* $NetBSD: uvm_emap.c,v 1.4 2009/07/20 03:51:42 kiyohara Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 2009 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 2009 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Mindaugas Rasiukevicius and Andrew Doran. | | 8 | * by Mindaugas Rasiukevicius and Andrew Doran. |
9 | * | | 9 | * |
10 | * Redistribution and use in source and binary forms, with or without | | 10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions | | 11 | * modification, are permitted provided that the following conditions |
12 | * are met: | | 12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright | | 13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. | | 14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright | | 15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the | | 16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. | | 17 | * documentation and/or other materials provided with the distribution. |
18 | * | | 18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. | | 29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ | | 30 | */ |
31 | | | 31 | |
32 | /* | | 32 | /* |
33 | * UVM ephemeral mapping interface. | | 33 | * UVM ephemeral mapping interface. |
34 | * | | 34 | * |
35 | * Generic (more expensive) stubs are implemented for architectures which | | 35 | * Generic (more expensive) stubs are implemented for architectures which |
36 | * do not support pmap. | | 36 | * do not support pmap. |
37 | * | | 37 | * |
38 | * Note that uvm_emap_update() is called from lower pmap(9) layer, while | | 38 | * Note that uvm_emap_update() is called from lower pmap(9) layer, while |
39 | * other functions call to pmap(9). Typical pattern of update in pmap: | | 39 | * other functions call to pmap(9). Typical pattern of update in pmap: |
40 | * | | 40 | * |
41 | * u_int gen = uvm_emap_gen_return(); | | 41 | * u_int gen = uvm_emap_gen_return(); |
42 | * tlbflush(); | | 42 | * tlbflush(); |
43 | * uvm_emap_update(); | | 43 | * uvm_emap_update(); |
44 | * | | 44 | * |
45 | * It is also used from IPI context, therefore functions must safe. | | 45 | * It is also used from IPI context, therefore functions must safe. |
46 | */ | | 46 | */ |
47 | | | 47 | |
48 | #include <sys/cdefs.h> | | 48 | #include <sys/cdefs.h> |
49 | __KERNEL_RCSID(0, "$NetBSD: uvm_emap.c,v 1.3 2009/07/19 15:17:29 rmind Exp $"); | | 49 | __KERNEL_RCSID(0, "$NetBSD: uvm_emap.c,v 1.4 2009/07/20 03:51:42 kiyohara Exp $"); |
50 | | | 50 | |
51 | #include <sys/param.h> | | 51 | #include <sys/param.h> |
52 | #include <sys/kernel.h> | | 52 | #include <sys/kernel.h> |
53 | | | 53 | |
54 | #include <sys/atomic.h> | | 54 | #include <sys/atomic.h> |
55 | #include <sys/lwp.h> | | 55 | #include <sys/lwp.h> |
56 | #include <sys/vmem.h> | | 56 | #include <sys/vmem.h> |
57 | #include <sys/types.h> | | 57 | #include <sys/types.h> |
58 | | | 58 | |
59 | #include <uvm/uvm.h> | | 59 | #include <uvm/uvm.h> |
60 | #include <uvm/uvm_extern.h> | | 60 | #include <uvm/uvm_extern.h> |
61 | | | 61 | |
62 | /* XXX: Arbitrary. */ | | 62 | /* XXX: Arbitrary. */ |
63 | #ifdef _LP64 | | 63 | #ifdef _LP64 |
64 | #define UVM_EMAP_SIZE (128 * 1024 * 1024) /* 128 MB */ | | 64 | #define UVM_EMAP_SIZE (128 * 1024 * 1024) /* 128 MB */ |
65 | #else | | 65 | #else |
66 | #define UVM_EMAP_SIZE (32 * 1024 * 1024) /* 32 MB */ | | 66 | #define UVM_EMAP_SIZE (32 * 1024 * 1024) /* 32 MB */ |
67 | #endif | | 67 | #endif |
68 | | | 68 | |
69 | static u_int _uvm_emap_gen[COHERENCY_UNIT - sizeof(u_int)] | | 69 | static u_int _uvm_emap_gen[COHERENCY_UNIT - sizeof(u_int)] |
70 | __aligned(COHERENCY_UNIT); | | 70 | __aligned(COHERENCY_UNIT); |
71 | | | 71 | |
72 | #define uvm_emap_gen (_uvm_emap_gen[0]) | | 72 | #define uvm_emap_gen (_uvm_emap_gen[0]) |
73 | | | 73 | |
74 | static u_int uvm_emap_size = UVM_EMAP_SIZE; | | 74 | u_int uvm_emap_size = UVM_EMAP_SIZE; |
75 | static vaddr_t uvm_emap_va; | | 75 | static vaddr_t uvm_emap_va; |
76 | static vmem_t * uvm_emap_vmem; | | 76 | static vmem_t * uvm_emap_vmem; |
77 | | | 77 | |
78 | /* | | 78 | /* |
79 | * uvm_emap_init: initialize subsystem. | | 79 | * uvm_emap_init: initialize subsystem. |
80 | */ | | 80 | */ |
81 | void | | 81 | void |
82 | uvm_emap_sysinit(void) | | 82 | uvm_emap_sysinit(void) |
83 | { | | 83 | { |
84 | struct uvm_cpu *ucpu; | | 84 | struct uvm_cpu *ucpu; |
85 | size_t qmax; | | 85 | size_t qmax; |
86 | u_int i; | | 86 | u_int i; |
87 | | | 87 | |
88 | uvm_emap_size = roundup(uvm_emap_size, PAGE_SIZE); | | 88 | uvm_emap_size = roundup(uvm_emap_size, PAGE_SIZE); |
89 | qmax = 16 * PAGE_SIZE; | | 89 | qmax = 16 * PAGE_SIZE; |
90 | | | 90 | |
91 | uvm_emap_va = uvm_km_alloc(kernel_map, uvm_emap_size, 0, | | 91 | uvm_emap_va = uvm_km_alloc(kernel_map, uvm_emap_size, 0, |
92 | UVM_KMF_VAONLY | UVM_KMF_WAITVA); | | 92 | UVM_KMF_VAONLY | UVM_KMF_WAITVA); |
93 | if (uvm_emap_va == 0) { | | 93 | if (uvm_emap_va == 0) { |
94 | panic("uvm_emap_init: KVA allocation failed"); | | 94 | panic("uvm_emap_init: KVA allocation failed"); |
95 | } | | 95 | } |
96 | | | 96 | |
97 | uvm_emap_vmem = vmem_create("emap", uvm_emap_va, uvm_emap_size, | | 97 | uvm_emap_vmem = vmem_create("emap", uvm_emap_va, uvm_emap_size, |
98 | PAGE_SIZE, NULL, NULL, NULL, qmax, VM_SLEEP, IPL_NONE); | | 98 | PAGE_SIZE, NULL, NULL, NULL, qmax, VM_SLEEP, IPL_NONE); |
99 | if (uvm_emap_vmem == NULL) { | | 99 | if (uvm_emap_vmem == NULL) { |
100 | panic("uvm_emap_init: vmem creation failed"); | | 100 | panic("uvm_emap_init: vmem creation failed"); |
101 | } | | 101 | } |
102 | | | 102 | |
103 | /* Initial generation value is 1. */ | | 103 | /* Initial generation value is 1. */ |
104 | uvm_emap_gen = 1; | | 104 | uvm_emap_gen = 1; |
105 | for (i = 0; i < MAXCPUS; i++) { | | 105 | for (i = 0; i < MAXCPUS; i++) { |
106 | ucpu = &uvm.cpus[i]; | | 106 | ucpu = &uvm.cpus[i]; |
107 | ucpu->emap_gen = 1; | | 107 | ucpu->emap_gen = 1; |
108 | } | | 108 | } |
109 | } | | 109 | } |
110 | | | 110 | |
111 | /* | | 111 | /* |
112 | * uvm_emap_alloc: allocate a window. | | 112 | * uvm_emap_alloc: allocate a window. |
113 | */ | | 113 | */ |
114 | vaddr_t | | 114 | vaddr_t |
115 | uvm_emap_alloc(vsize_t size, bool waitok) | | 115 | uvm_emap_alloc(vsize_t size, bool waitok) |
116 | { | | 116 | { |
117 | | | 117 | |
118 | KASSERT(size > 0); | | 118 | KASSERT(size > 0); |
119 | KASSERT(round_page(size) == size); | | 119 | KASSERT(round_page(size) == size); |
120 | | | 120 | |
121 | return vmem_alloc(uvm_emap_vmem, size, | | 121 | return vmem_alloc(uvm_emap_vmem, size, |
122 | VM_INSTANTFIT | (waitok ? VM_SLEEP : VM_NOSLEEP)); | | 122 | VM_INSTANTFIT | (waitok ? VM_SLEEP : VM_NOSLEEP)); |
123 | } | | 123 | } |
124 | | | 124 | |
125 | /* | | 125 | /* |
126 | * uvm_emap_free: free a window. | | 126 | * uvm_emap_free: free a window. |
127 | */ | | 127 | */ |
128 | void | | 128 | void |
129 | uvm_emap_free(vaddr_t va, size_t size) | | 129 | uvm_emap_free(vaddr_t va, size_t size) |
130 | { | | 130 | { |
131 | | | 131 | |
132 | KASSERT(va >= uvm_emap_va); | | 132 | KASSERT(va >= uvm_emap_va); |
133 | KASSERT(size <= uvm_emap_size); | | 133 | KASSERT(size <= uvm_emap_size); |
134 | KASSERT(va + size <= uvm_emap_va + uvm_emap_size); | | 134 | KASSERT(va + size <= uvm_emap_va + uvm_emap_size); |
135 | | | 135 | |
136 | vmem_free(uvm_emap_vmem, va, size); | | 136 | vmem_free(uvm_emap_vmem, va, size); |
137 | } | | 137 | } |
138 | | | 138 | |
139 | #ifdef __HAVE_PMAP_EMAP | | 139 | #ifdef __HAVE_PMAP_EMAP |
140 | | | 140 | |
141 | /* | | 141 | /* |
142 | * uvm_emap_enter: enter a new mapping, without TLB flush. | | 142 | * uvm_emap_enter: enter a new mapping, without TLB flush. |
143 | */ | | 143 | */ |
144 | void | | 144 | void |
145 | uvm_emap_enter(vaddr_t va, struct vm_page **pgs, u_int npages) | | 145 | uvm_emap_enter(vaddr_t va, struct vm_page **pgs, u_int npages) |
146 | { | | 146 | { |
147 | paddr_t pa; | | 147 | paddr_t pa; |
148 | u_int n; | | 148 | u_int n; |
149 | | | 149 | |
150 | for (n = 0; n < npages; n++, va += PAGE_SIZE) { | | 150 | for (n = 0; n < npages; n++, va += PAGE_SIZE) { |
151 | pa = VM_PAGE_TO_PHYS(pgs[n]); | | 151 | pa = VM_PAGE_TO_PHYS(pgs[n]); |
152 | pmap_emap_enter(va, pa, VM_PROT_READ); | | 152 | pmap_emap_enter(va, pa, VM_PROT_READ); |
153 | } | | 153 | } |
154 | } | | 154 | } |
155 | | | 155 | |
156 | /* | | 156 | /* |
157 | * uvm_emap_remove: remove a mapping. | | 157 | * uvm_emap_remove: remove a mapping. |
158 | */ | | 158 | */ |
159 | void | | 159 | void |
160 | uvm_emap_remove(vaddr_t sva, vsize_t len) | | 160 | uvm_emap_remove(vaddr_t sva, vsize_t len) |
161 | { | | 161 | { |
162 | | | 162 | |
163 | pmap_emap_remove(sva, len); | | 163 | pmap_emap_remove(sva, len); |
164 | } | | 164 | } |
165 | | | 165 | |
166 | /* | | 166 | /* |
167 | * uvm_emap_gen_return: get the global generation number. | | 167 | * uvm_emap_gen_return: get the global generation number. |
168 | * | | 168 | * |
169 | * => can be called from IPI handler, therefore function must be safe. | | 169 | * => can be called from IPI handler, therefore function must be safe. |
170 | */ | | 170 | */ |
171 | u_int | | 171 | u_int |
172 | uvm_emap_gen_return(void) | | 172 | uvm_emap_gen_return(void) |
173 | { | | 173 | { |
174 | u_int gen; | | 174 | u_int gen; |
175 | | | 175 | |
176 | gen = uvm_emap_gen; | | 176 | gen = uvm_emap_gen; |
177 | if (__predict_false(gen == UVM_EMAP_INACTIVE)) { | | 177 | if (__predict_false(gen == UVM_EMAP_INACTIVE)) { |
178 | /* | | 178 | /* |
179 | * Instead of looping, just increase in our side. | | 179 | * Instead of looping, just increase in our side. |
180 | * Other thread could race and increase it again, | | 180 | * Other thread could race and increase it again, |
181 | * but without any negative effect. | | 181 | * but without any negative effect. |
182 | */ | | 182 | */ |
183 | gen = atomic_inc_uint_nv(&uvm_emap_gen); | | 183 | gen = atomic_inc_uint_nv(&uvm_emap_gen); |
184 | } | | 184 | } |
185 | KASSERT(gen != UVM_EMAP_INACTIVE); | | 185 | KASSERT(gen != UVM_EMAP_INACTIVE); |
186 | return gen; | | 186 | return gen; |
187 | } | | 187 | } |
188 | | | 188 | |
189 | /* | | 189 | /* |
190 | * uvm_emap_switch: if the CPU is 'behind' the LWP in emap visibility, | | 190 | * uvm_emap_switch: if the CPU is 'behind' the LWP in emap visibility, |
191 | * perform TLB flush and thus update the local view. Main purpose is | | 191 | * perform TLB flush and thus update the local view. Main purpose is |
192 | * to handle kernel preemption, while emap is in use. | | 192 | * to handle kernel preemption, while emap is in use. |
193 | * | | 193 | * |
194 | * => called from mi_switch(), when LWP returns after block or preempt. | | 194 | * => called from mi_switch(), when LWP returns after block or preempt. |
195 | */ | | 195 | */ |
196 | void | | 196 | void |
197 | uvm_emap_switch(lwp_t *l) | | 197 | uvm_emap_switch(lwp_t *l) |
198 | { | | 198 | { |
199 | struct uvm_cpu *ucpu; | | 199 | struct uvm_cpu *ucpu; |
200 | u_int curgen, gen; | | 200 | u_int curgen, gen; |
201 | | | 201 | |
202 | KASSERT(kpreempt_disabled()); | | 202 | KASSERT(kpreempt_disabled()); |
203 | | | 203 | |
204 | /* If LWP did not use emap, then nothing to do. */ | | 204 | /* If LWP did not use emap, then nothing to do. */ |
205 | if (__predict_true(l->l_emap_gen == UVM_EMAP_INACTIVE)) { | | 205 | if (__predict_true(l->l_emap_gen == UVM_EMAP_INACTIVE)) { |
206 | return; | | 206 | return; |
207 | } | | 207 | } |
208 | | | 208 | |
209 | /* | | 209 | /* |
210 | * No need to synchronise if generation number of current CPU is | | 210 | * No need to synchronise if generation number of current CPU is |
211 | * newer than the number of this LWP. | | 211 | * newer than the number of this LWP. |
212 | * | | 212 | * |
213 | * This test assumes two's complement arithmetic and allows | | 213 | * This test assumes two's complement arithmetic and allows |
214 | * ~2B missed updates before it will produce bad results. | | 214 | * ~2B missed updates before it will produce bad results. |
215 | */ | | 215 | */ |
216 | ucpu = curcpu()->ci_data.cpu_uvm; | | 216 | ucpu = curcpu()->ci_data.cpu_uvm; |
217 | curgen = ucpu->emap_gen; | | 217 | curgen = ucpu->emap_gen; |
218 | gen = l->l_emap_gen; | | 218 | gen = l->l_emap_gen; |
219 | if (__predict_true((signed int)(curgen - gen) >= 0)) { | | 219 | if (__predict_true((signed int)(curgen - gen) >= 0)) { |
220 | return; | | 220 | return; |
221 | } | | 221 | } |
222 | | | 222 | |
223 | /* | | 223 | /* |
224 | * See comments in uvm_emap_consume() about memory | | 224 | * See comments in uvm_emap_consume() about memory |
225 | * barriers and race conditions. | | 225 | * barriers and race conditions. |
226 | */ | | 226 | */ |
227 | curgen = uvm_emap_gen_return(); | | 227 | curgen = uvm_emap_gen_return(); |
228 | pmap_emap_sync(false); | | 228 | pmap_emap_sync(false); |
229 | ucpu->emap_gen = curgen; | | 229 | ucpu->emap_gen = curgen; |
230 | } | | 230 | } |
231 | | | 231 | |
232 | /* | | 232 | /* |
233 | * uvm_emap_consume: update the current CPU and LWP to the given generation | | 233 | * uvm_emap_consume: update the current CPU and LWP to the given generation |
234 | * of the emap. In a case of LWP migration to a different CPU after block | | 234 | * of the emap. In a case of LWP migration to a different CPU after block |
235 | * or preempt, uvm_emap_switch() will synchronise. | | 235 | * or preempt, uvm_emap_switch() will synchronise. |
236 | * | | 236 | * |
237 | * => may be called from both interrupt and thread context. | | 237 | * => may be called from both interrupt and thread context. |
238 | */ | | 238 | */ |
239 | void | | 239 | void |
240 | uvm_emap_consume(u_int gen) | | 240 | uvm_emap_consume(u_int gen) |
241 | { | | 241 | { |
242 | struct cpu_info *ci; | | 242 | struct cpu_info *ci; |
243 | struct uvm_cpu *ucpu; | | 243 | struct uvm_cpu *ucpu; |
244 | lwp_t *l = curlwp; | | 244 | lwp_t *l = curlwp; |
245 | u_int curgen; | | 245 | u_int curgen; |
246 | | | 246 | |
247 | if (gen == UVM_EMAP_INACTIVE) { | | 247 | if (gen == UVM_EMAP_INACTIVE) { |
248 | return; | | 248 | return; |
249 | } | | 249 | } |
250 | | | 250 | |
251 | /* | | 251 | /* |
252 | * No need to synchronise if generation number of current CPU is | | 252 | * No need to synchronise if generation number of current CPU is |
253 | * newer than the number of this LWP. | | 253 | * newer than the number of this LWP. |
254 | * | | 254 | * |
255 | * This test assumes two's complement arithmetic and allows | | 255 | * This test assumes two's complement arithmetic and allows |
256 | * ~2B missed updates before it will produce bad results. | | 256 | * ~2B missed updates before it will produce bad results. |
257 | */ | | 257 | */ |
258 | KPREEMPT_DISABLE(l); | | 258 | KPREEMPT_DISABLE(l); |
259 | ci = l->l_cpu; | | 259 | ci = l->l_cpu; |
260 | ucpu = ci->ci_data.cpu_uvm; | | 260 | ucpu = ci->ci_data.cpu_uvm; |
261 | if (__predict_true((signed int)(ucpu->emap_gen - gen) >= 0)) { | | 261 | if (__predict_true((signed int)(ucpu->emap_gen - gen) >= 0)) { |
262 | l->l_emap_gen = ucpu->emap_gen; | | 262 | l->l_emap_gen = ucpu->emap_gen; |
263 | KPREEMPT_ENABLE(l); | | 263 | KPREEMPT_ENABLE(l); |
264 | return; | | 264 | return; |
265 | } | | 265 | } |
266 | | | 266 | |
267 | /* | | 267 | /* |
268 | * Record the current generation _before_ issuing the TLB flush. | | 268 | * Record the current generation _before_ issuing the TLB flush. |
269 | * No need for a memory barrier before, as reading a stale value | | 269 | * No need for a memory barrier before, as reading a stale value |
270 | * for uvm_emap_gen is not a problem. | | 270 | * for uvm_emap_gen is not a problem. |
271 | * | | 271 | * |
272 | * pmap_emap_sync() must implicitly perform a full memory barrier, | | 272 | * pmap_emap_sync() must implicitly perform a full memory barrier, |
273 | * which prevents us from fetching a value from after the TLB flush | | 273 | * which prevents us from fetching a value from after the TLB flush |
274 | * has occurred (which would be bad). | | 274 | * has occurred (which would be bad). |
275 | * | | 275 | * |
276 | * We can race with an interrupt on the current CPU updating the | | 276 | * We can race with an interrupt on the current CPU updating the |
277 | * counter to a newer value. This could cause us to set a stale | | 277 | * counter to a newer value. This could cause us to set a stale |
278 | * value into ucpu->emap_gen, overwriting a newer update from the | | 278 | * value into ucpu->emap_gen, overwriting a newer update from the |
279 | * interrupt. However, it does not matter since: | | 279 | * interrupt. However, it does not matter since: |
280 | * (1) Interrupts always run to completion or block. | | 280 | * (1) Interrupts always run to completion or block. |
281 | * (2) Interrupts will only ever install a newer value and, | | 281 | * (2) Interrupts will only ever install a newer value and, |
282 | * (3) We will roll the value forward later. | | 282 | * (3) We will roll the value forward later. |
283 | */ | | 283 | */ |
284 | curgen = uvm_emap_gen_return(); | | 284 | curgen = uvm_emap_gen_return(); |
285 | pmap_emap_sync(true); | | 285 | pmap_emap_sync(true); |
286 | ucpu->emap_gen = curgen; | | 286 | ucpu->emap_gen = curgen; |
287 | l->l_emap_gen = curgen; | | 287 | l->l_emap_gen = curgen; |
288 | KASSERT((signed int)(curgen - gen) >= 0); | | 288 | KASSERT((signed int)(curgen - gen) >= 0); |
289 | KPREEMPT_ENABLE(l); | | 289 | KPREEMPT_ENABLE(l); |
290 | } | | 290 | } |
291 | | | 291 | |
292 | /* | | 292 | /* |
293 | * uvm_emap_produce: increment emap generation counter. | | 293 | * uvm_emap_produce: increment emap generation counter. |
294 | * | | 294 | * |
295 | * => pmap updates must be globally visible. | | 295 | * => pmap updates must be globally visible. |
296 | * => caller must have already entered mappings. | | 296 | * => caller must have already entered mappings. |
297 | * => may be called from both interrupt and thread context. | | 297 | * => may be called from both interrupt and thread context. |
298 | */ | | 298 | */ |
299 | u_int | | 299 | u_int |
300 | uvm_emap_produce(void) | | 300 | uvm_emap_produce(void) |
301 | { | | 301 | { |
302 | u_int gen; | | 302 | u_int gen; |
303 | again: | | 303 | again: |
304 | gen = atomic_inc_uint_nv(&uvm_emap_gen); | | 304 | gen = atomic_inc_uint_nv(&uvm_emap_gen); |
305 | if (__predict_false(gen == UVM_EMAP_INACTIVE)) { | | 305 | if (__predict_false(gen == UVM_EMAP_INACTIVE)) { |
306 | goto again; | | 306 | goto again; |
307 | } | | 307 | } |
308 | return gen; | | 308 | return gen; |
309 | } | | 309 | } |
310 | | | 310 | |
311 | /* | | 311 | /* |
312 | * uvm_emap_update: update global emap generation number for current CPU. | | 312 | * uvm_emap_update: update global emap generation number for current CPU. |
313 | * | | 313 | * |
314 | * Function is called by MD code (eg. pmap) to take advantage of TLB flushes | | 314 | * Function is called by MD code (eg. pmap) to take advantage of TLB flushes |
315 | * initiated for other reasons, that sync the emap as a side effect. Note | | 315 | * initiated for other reasons, that sync the emap as a side effect. Note |
316 | * update should be performed before the actual TLB flush, to avoid race | | 316 | * update should be performed before the actual TLB flush, to avoid race |
317 | * with newly generated number. | | 317 | * with newly generated number. |
318 | * | | 318 | * |
319 | * => can be called from IPI handler, therefore function must be safe. | | 319 | * => can be called from IPI handler, therefore function must be safe. |
320 | * => should be called _after_ TLB flush. | | 320 | * => should be called _after_ TLB flush. |
321 | * => emap generation number should be taken _before_ TLB flush. | | 321 | * => emap generation number should be taken _before_ TLB flush. |
322 | * => must be called with preemption disabled. | | 322 | * => must be called with preemption disabled. |
323 | */ | | 323 | */ |
324 | void | | 324 | void |
325 | uvm_emap_update(u_int gen) | | 325 | uvm_emap_update(u_int gen) |
326 | { | | 326 | { |
327 | struct uvm_cpu *ucpu; | | 327 | struct uvm_cpu *ucpu; |
328 | | | 328 | |
329 | /* | | 329 | /* |
330 | * See comments in uvm_emap_consume() about memory barriers and | | 330 | * See comments in uvm_emap_consume() about memory barriers and |
331 | * race conditions. Store is atomic if emap_gen size is word. | | 331 | * race conditions. Store is atomic if emap_gen size is word. |
332 | */ | | 332 | */ |
333 | CTASSERT(sizeof(ucpu->emap_gen) == sizeof(int)); | | 333 | CTASSERT(sizeof(ucpu->emap_gen) == sizeof(int)); |
334 | /* XXX: KASSERT(kpreempt_disabled()); */ | | 334 | /* XXX: KASSERT(kpreempt_disabled()); */ |
335 | | | 335 | |
336 | ucpu = curcpu()->ci_data.cpu_uvm; | | 336 | ucpu = curcpu()->ci_data.cpu_uvm; |
337 | ucpu->emap_gen = gen; | | 337 | ucpu->emap_gen = gen; |
338 | } | | 338 | } |
339 | | | 339 | |
340 | #else | | 340 | #else |
341 | | | 341 | |
342 | /* | | 342 | /* |
343 | * Stubs for architectures which do not support emap. | | 343 | * Stubs for architectures which do not support emap. |
344 | */ | | 344 | */ |
345 | | | 345 | |
346 | void | | 346 | void |
347 | uvm_emap_enter(vaddr_t va, struct vm_page **pgs, u_int npages) | | 347 | uvm_emap_enter(vaddr_t va, struct vm_page **pgs, u_int npages) |
348 | { | | 348 | { |
349 | paddr_t pa; | | 349 | paddr_t pa; |
350 | u_int n; | | 350 | u_int n; |
351 | | | 351 | |
352 | for (n = 0; n < npages; n++, va += PAGE_SIZE) { | | 352 | for (n = 0; n < npages; n++, va += PAGE_SIZE) { |
353 | pa = VM_PAGE_TO_PHYS(pgs[n]); | | 353 | pa = VM_PAGE_TO_PHYS(pgs[n]); |
354 | pmap_kenter_pa(va, pa, VM_PROT_READ); | | 354 | pmap_kenter_pa(va, pa, VM_PROT_READ); |
355 | } | | 355 | } |
356 | pmap_update(pmap_kernel()); | | 356 | pmap_update(pmap_kernel()); |
357 | } | | 357 | } |
358 | | | 358 | |
359 | void | | 359 | void |
360 | uvm_emap_remove(vaddr_t sva, vsize_t len) | | 360 | uvm_emap_remove(vaddr_t sva, vsize_t len) |
361 | { | | 361 | { |
362 | | | 362 | |
363 | pmap_kremove(sva, len); | | 363 | pmap_kremove(sva, len); |
364 | pmap_update(pmap_kernel()); | | 364 | pmap_update(pmap_kernel()); |
365 | } | | 365 | } |
366 | | | 366 | |
367 | #endif | | 367 | #endif |