Mon Jul 20 03:51:42 2009 UTC ()
Globalize uvm_emap_size.  It use to calculate size of kernel page table.
  http://mail-index.netbsd.org/current-users/2009/07/13/msg009983.html


(kiyohara)
diff -r1.3 -r1.4 src/sys/uvm/uvm_emap.c
diff -r1.21 -r1.22 src/sys/uvm/uvm_param.h

cvs diff -r1.3 -r1.4 src/sys/uvm/Attic/uvm_emap.c (switch to unified diff)

--- src/sys/uvm/Attic/uvm_emap.c 2009/07/19 15:17:29 1.3
+++ src/sys/uvm/Attic/uvm_emap.c 2009/07/20 03:51:42 1.4
@@ -1,367 +1,367 @@ @@ -1,367 +1,367 @@
1/* $NetBSD: uvm_emap.c,v 1.3 2009/07/19 15:17:29 rmind Exp $ */ 1/* $NetBSD: uvm_emap.c,v 1.4 2009/07/20 03:51:42 kiyohara Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2009 The NetBSD Foundation, Inc. 4 * Copyright (c) 2009 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Mindaugas Rasiukevicius and Andrew Doran. 8 * by Mindaugas Rasiukevicius and Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * UVM ephemeral mapping interface. 33 * UVM ephemeral mapping interface.
34 * 34 *
35 * Generic (more expensive) stubs are implemented for architectures which 35 * Generic (more expensive) stubs are implemented for architectures which
36 * do not support pmap. 36 * do not support pmap.
37 * 37 *
38 * Note that uvm_emap_update() is called from lower pmap(9) layer, while 38 * Note that uvm_emap_update() is called from lower pmap(9) layer, while
39 * other functions call to pmap(9). Typical pattern of update in pmap: 39 * other functions call to pmap(9). Typical pattern of update in pmap:
40 * 40 *
41 * u_int gen = uvm_emap_gen_return(); 41 * u_int gen = uvm_emap_gen_return();
42 * tlbflush(); 42 * tlbflush();
43 * uvm_emap_update(); 43 * uvm_emap_update();
44 * 44 *
45 * It is also used from IPI context, therefore functions must safe. 45 * It is also used from IPI context, therefore functions must safe.
46 */ 46 */
47 47
48#include <sys/cdefs.h> 48#include <sys/cdefs.h>
49__KERNEL_RCSID(0, "$NetBSD: uvm_emap.c,v 1.3 2009/07/19 15:17:29 rmind Exp $"); 49__KERNEL_RCSID(0, "$NetBSD: uvm_emap.c,v 1.4 2009/07/20 03:51:42 kiyohara Exp $");
50 50
51#include <sys/param.h> 51#include <sys/param.h>
52#include <sys/kernel.h> 52#include <sys/kernel.h>
53 53
54#include <sys/atomic.h> 54#include <sys/atomic.h>
55#include <sys/lwp.h> 55#include <sys/lwp.h>
56#include <sys/vmem.h> 56#include <sys/vmem.h>
57#include <sys/types.h> 57#include <sys/types.h>
58 58
59#include <uvm/uvm.h> 59#include <uvm/uvm.h>
60#include <uvm/uvm_extern.h> 60#include <uvm/uvm_extern.h>
61 61
62/* XXX: Arbitrary. */ 62/* XXX: Arbitrary. */
63#ifdef _LP64 63#ifdef _LP64
64#define UVM_EMAP_SIZE (128 * 1024 * 1024) /* 128 MB */ 64#define UVM_EMAP_SIZE (128 * 1024 * 1024) /* 128 MB */
65#else 65#else
66#define UVM_EMAP_SIZE (32 * 1024 * 1024) /* 32 MB */ 66#define UVM_EMAP_SIZE (32 * 1024 * 1024) /* 32 MB */
67#endif 67#endif
68 68
69static u_int _uvm_emap_gen[COHERENCY_UNIT - sizeof(u_int)] 69static u_int _uvm_emap_gen[COHERENCY_UNIT - sizeof(u_int)]
70 __aligned(COHERENCY_UNIT); 70 __aligned(COHERENCY_UNIT);
71 71
72#define uvm_emap_gen (_uvm_emap_gen[0]) 72#define uvm_emap_gen (_uvm_emap_gen[0])
73 73
74static u_int uvm_emap_size = UVM_EMAP_SIZE; 74u_int uvm_emap_size = UVM_EMAP_SIZE;
75static vaddr_t uvm_emap_va; 75static vaddr_t uvm_emap_va;
76static vmem_t * uvm_emap_vmem; 76static vmem_t * uvm_emap_vmem;
77 77
78/* 78/*
79 * uvm_emap_init: initialize subsystem. 79 * uvm_emap_init: initialize subsystem.
80 */ 80 */
81void 81void
82uvm_emap_sysinit(void) 82uvm_emap_sysinit(void)
83{ 83{
84 struct uvm_cpu *ucpu; 84 struct uvm_cpu *ucpu;
85 size_t qmax; 85 size_t qmax;
86 u_int i; 86 u_int i;
87 87
88 uvm_emap_size = roundup(uvm_emap_size, PAGE_SIZE); 88 uvm_emap_size = roundup(uvm_emap_size, PAGE_SIZE);
89 qmax = 16 * PAGE_SIZE; 89 qmax = 16 * PAGE_SIZE;
90 90
91 uvm_emap_va = uvm_km_alloc(kernel_map, uvm_emap_size, 0, 91 uvm_emap_va = uvm_km_alloc(kernel_map, uvm_emap_size, 0,
92 UVM_KMF_VAONLY | UVM_KMF_WAITVA); 92 UVM_KMF_VAONLY | UVM_KMF_WAITVA);
93 if (uvm_emap_va == 0) { 93 if (uvm_emap_va == 0) {
94 panic("uvm_emap_init: KVA allocation failed"); 94 panic("uvm_emap_init: KVA allocation failed");
95 } 95 }
96 96
97 uvm_emap_vmem = vmem_create("emap", uvm_emap_va, uvm_emap_size, 97 uvm_emap_vmem = vmem_create("emap", uvm_emap_va, uvm_emap_size,
98 PAGE_SIZE, NULL, NULL, NULL, qmax, VM_SLEEP, IPL_NONE); 98 PAGE_SIZE, NULL, NULL, NULL, qmax, VM_SLEEP, IPL_NONE);
99 if (uvm_emap_vmem == NULL) { 99 if (uvm_emap_vmem == NULL) {
100 panic("uvm_emap_init: vmem creation failed"); 100 panic("uvm_emap_init: vmem creation failed");
101 } 101 }
102 102
103 /* Initial generation value is 1. */ 103 /* Initial generation value is 1. */
104 uvm_emap_gen = 1; 104 uvm_emap_gen = 1;
105 for (i = 0; i < MAXCPUS; i++) { 105 for (i = 0; i < MAXCPUS; i++) {
106 ucpu = &uvm.cpus[i]; 106 ucpu = &uvm.cpus[i];
107 ucpu->emap_gen = 1; 107 ucpu->emap_gen = 1;
108 } 108 }
109} 109}
110 110
111/* 111/*
112 * uvm_emap_alloc: allocate a window. 112 * uvm_emap_alloc: allocate a window.
113 */ 113 */
114vaddr_t 114vaddr_t
115uvm_emap_alloc(vsize_t size, bool waitok) 115uvm_emap_alloc(vsize_t size, bool waitok)
116{ 116{
117 117
118 KASSERT(size > 0); 118 KASSERT(size > 0);
119 KASSERT(round_page(size) == size); 119 KASSERT(round_page(size) == size);
120 120
121 return vmem_alloc(uvm_emap_vmem, size, 121 return vmem_alloc(uvm_emap_vmem, size,
122 VM_INSTANTFIT | (waitok ? VM_SLEEP : VM_NOSLEEP)); 122 VM_INSTANTFIT | (waitok ? VM_SLEEP : VM_NOSLEEP));
123} 123}
124 124
125/* 125/*
126 * uvm_emap_free: free a window. 126 * uvm_emap_free: free a window.
127 */ 127 */
128void 128void
129uvm_emap_free(vaddr_t va, size_t size) 129uvm_emap_free(vaddr_t va, size_t size)
130{ 130{
131 131
132 KASSERT(va >= uvm_emap_va); 132 KASSERT(va >= uvm_emap_va);
133 KASSERT(size <= uvm_emap_size); 133 KASSERT(size <= uvm_emap_size);
134 KASSERT(va + size <= uvm_emap_va + uvm_emap_size); 134 KASSERT(va + size <= uvm_emap_va + uvm_emap_size);
135 135
136 vmem_free(uvm_emap_vmem, va, size); 136 vmem_free(uvm_emap_vmem, va, size);
137} 137}
138 138
139#ifdef __HAVE_PMAP_EMAP 139#ifdef __HAVE_PMAP_EMAP
140 140
141/* 141/*
142 * uvm_emap_enter: enter a new mapping, without TLB flush. 142 * uvm_emap_enter: enter a new mapping, without TLB flush.
143 */ 143 */
144void 144void
145uvm_emap_enter(vaddr_t va, struct vm_page **pgs, u_int npages) 145uvm_emap_enter(vaddr_t va, struct vm_page **pgs, u_int npages)
146{ 146{
147 paddr_t pa; 147 paddr_t pa;
148 u_int n; 148 u_int n;
149 149
150 for (n = 0; n < npages; n++, va += PAGE_SIZE) { 150 for (n = 0; n < npages; n++, va += PAGE_SIZE) {
151 pa = VM_PAGE_TO_PHYS(pgs[n]); 151 pa = VM_PAGE_TO_PHYS(pgs[n]);
152 pmap_emap_enter(va, pa, VM_PROT_READ); 152 pmap_emap_enter(va, pa, VM_PROT_READ);
153 } 153 }
154} 154}
155 155
156/* 156/*
157 * uvm_emap_remove: remove a mapping. 157 * uvm_emap_remove: remove a mapping.
158 */ 158 */
159void 159void
160uvm_emap_remove(vaddr_t sva, vsize_t len) 160uvm_emap_remove(vaddr_t sva, vsize_t len)
161{ 161{
162 162
163 pmap_emap_remove(sva, len); 163 pmap_emap_remove(sva, len);
164} 164}
165 165
166/* 166/*
167 * uvm_emap_gen_return: get the global generation number. 167 * uvm_emap_gen_return: get the global generation number.
168 * 168 *
169 * => can be called from IPI handler, therefore function must be safe. 169 * => can be called from IPI handler, therefore function must be safe.
170 */ 170 */
171u_int 171u_int
172uvm_emap_gen_return(void) 172uvm_emap_gen_return(void)
173{ 173{
174 u_int gen; 174 u_int gen;
175 175
176 gen = uvm_emap_gen; 176 gen = uvm_emap_gen;
177 if (__predict_false(gen == UVM_EMAP_INACTIVE)) { 177 if (__predict_false(gen == UVM_EMAP_INACTIVE)) {
178 /* 178 /*
179 * Instead of looping, just increase in our side. 179 * Instead of looping, just increase in our side.
180 * Other thread could race and increase it again, 180 * Other thread could race and increase it again,
181 * but without any negative effect. 181 * but without any negative effect.
182 */ 182 */
183 gen = atomic_inc_uint_nv(&uvm_emap_gen); 183 gen = atomic_inc_uint_nv(&uvm_emap_gen);
184 } 184 }
185 KASSERT(gen != UVM_EMAP_INACTIVE); 185 KASSERT(gen != UVM_EMAP_INACTIVE);
186 return gen; 186 return gen;
187} 187}
188 188
189/* 189/*
190 * uvm_emap_switch: if the CPU is 'behind' the LWP in emap visibility, 190 * uvm_emap_switch: if the CPU is 'behind' the LWP in emap visibility,
191 * perform TLB flush and thus update the local view. Main purpose is 191 * perform TLB flush and thus update the local view. Main purpose is
192 * to handle kernel preemption, while emap is in use. 192 * to handle kernel preemption, while emap is in use.
193 * 193 *
194 * => called from mi_switch(), when LWP returns after block or preempt. 194 * => called from mi_switch(), when LWP returns after block or preempt.
195 */ 195 */
196void 196void
197uvm_emap_switch(lwp_t *l) 197uvm_emap_switch(lwp_t *l)
198{ 198{
199 struct uvm_cpu *ucpu; 199 struct uvm_cpu *ucpu;
200 u_int curgen, gen; 200 u_int curgen, gen;
201 201
202 KASSERT(kpreempt_disabled()); 202 KASSERT(kpreempt_disabled());
203 203
204 /* If LWP did not use emap, then nothing to do. */ 204 /* If LWP did not use emap, then nothing to do. */
205 if (__predict_true(l->l_emap_gen == UVM_EMAP_INACTIVE)) { 205 if (__predict_true(l->l_emap_gen == UVM_EMAP_INACTIVE)) {
206 return; 206 return;
207 } 207 }
208 208
209 /* 209 /*
210 * No need to synchronise if generation number of current CPU is 210 * No need to synchronise if generation number of current CPU is
211 * newer than the number of this LWP. 211 * newer than the number of this LWP.
212 * 212 *
213 * This test assumes two's complement arithmetic and allows 213 * This test assumes two's complement arithmetic and allows
214 * ~2B missed updates before it will produce bad results. 214 * ~2B missed updates before it will produce bad results.
215 */ 215 */
216 ucpu = curcpu()->ci_data.cpu_uvm; 216 ucpu = curcpu()->ci_data.cpu_uvm;
217 curgen = ucpu->emap_gen; 217 curgen = ucpu->emap_gen;
218 gen = l->l_emap_gen; 218 gen = l->l_emap_gen;
219 if (__predict_true((signed int)(curgen - gen) >= 0)) { 219 if (__predict_true((signed int)(curgen - gen) >= 0)) {
220 return; 220 return;
221 } 221 }
222 222
223 /* 223 /*
224 * See comments in uvm_emap_consume() about memory 224 * See comments in uvm_emap_consume() about memory
225 * barriers and race conditions. 225 * barriers and race conditions.
226 */ 226 */
227 curgen = uvm_emap_gen_return(); 227 curgen = uvm_emap_gen_return();
228 pmap_emap_sync(false); 228 pmap_emap_sync(false);
229 ucpu->emap_gen = curgen; 229 ucpu->emap_gen = curgen;
230} 230}
231 231
232/* 232/*
233 * uvm_emap_consume: update the current CPU and LWP to the given generation 233 * uvm_emap_consume: update the current CPU and LWP to the given generation
234 * of the emap. In a case of LWP migration to a different CPU after block 234 * of the emap. In a case of LWP migration to a different CPU after block
235 * or preempt, uvm_emap_switch() will synchronise. 235 * or preempt, uvm_emap_switch() will synchronise.
236 * 236 *
237 * => may be called from both interrupt and thread context. 237 * => may be called from both interrupt and thread context.
238 */ 238 */
239void 239void
240uvm_emap_consume(u_int gen) 240uvm_emap_consume(u_int gen)
241{ 241{
242 struct cpu_info *ci; 242 struct cpu_info *ci;
243 struct uvm_cpu *ucpu; 243 struct uvm_cpu *ucpu;
244 lwp_t *l = curlwp; 244 lwp_t *l = curlwp;
245 u_int curgen; 245 u_int curgen;
246 246
247 if (gen == UVM_EMAP_INACTIVE) { 247 if (gen == UVM_EMAP_INACTIVE) {
248 return; 248 return;
249 } 249 }
250 250
251 /* 251 /*
252 * No need to synchronise if generation number of current CPU is 252 * No need to synchronise if generation number of current CPU is
253 * newer than the number of this LWP. 253 * newer than the number of this LWP.
254 * 254 *
255 * This test assumes two's complement arithmetic and allows 255 * This test assumes two's complement arithmetic and allows
256 * ~2B missed updates before it will produce bad results. 256 * ~2B missed updates before it will produce bad results.
257 */ 257 */
258 KPREEMPT_DISABLE(l); 258 KPREEMPT_DISABLE(l);
259 ci = l->l_cpu; 259 ci = l->l_cpu;
260 ucpu = ci->ci_data.cpu_uvm; 260 ucpu = ci->ci_data.cpu_uvm;
261 if (__predict_true((signed int)(ucpu->emap_gen - gen) >= 0)) { 261 if (__predict_true((signed int)(ucpu->emap_gen - gen) >= 0)) {
262 l->l_emap_gen = ucpu->emap_gen; 262 l->l_emap_gen = ucpu->emap_gen;
263 KPREEMPT_ENABLE(l); 263 KPREEMPT_ENABLE(l);
264 return; 264 return;
265 } 265 }
266 266
267 /* 267 /*
268 * Record the current generation _before_ issuing the TLB flush. 268 * Record the current generation _before_ issuing the TLB flush.
269 * No need for a memory barrier before, as reading a stale value 269 * No need for a memory barrier before, as reading a stale value
270 * for uvm_emap_gen is not a problem. 270 * for uvm_emap_gen is not a problem.
271 * 271 *
272 * pmap_emap_sync() must implicitly perform a full memory barrier, 272 * pmap_emap_sync() must implicitly perform a full memory barrier,
273 * which prevents us from fetching a value from after the TLB flush 273 * which prevents us from fetching a value from after the TLB flush
274 * has occurred (which would be bad). 274 * has occurred (which would be bad).
275 * 275 *
276 * We can race with an interrupt on the current CPU updating the 276 * We can race with an interrupt on the current CPU updating the
277 * counter to a newer value. This could cause us to set a stale 277 * counter to a newer value. This could cause us to set a stale
278 * value into ucpu->emap_gen, overwriting a newer update from the 278 * value into ucpu->emap_gen, overwriting a newer update from the
279 * interrupt. However, it does not matter since: 279 * interrupt. However, it does not matter since:
280 * (1) Interrupts always run to completion or block. 280 * (1) Interrupts always run to completion or block.
281 * (2) Interrupts will only ever install a newer value and, 281 * (2) Interrupts will only ever install a newer value and,
282 * (3) We will roll the value forward later. 282 * (3) We will roll the value forward later.
283 */ 283 */
284 curgen = uvm_emap_gen_return(); 284 curgen = uvm_emap_gen_return();
285 pmap_emap_sync(true); 285 pmap_emap_sync(true);
286 ucpu->emap_gen = curgen; 286 ucpu->emap_gen = curgen;
287 l->l_emap_gen = curgen; 287 l->l_emap_gen = curgen;
288 KASSERT((signed int)(curgen - gen) >= 0); 288 KASSERT((signed int)(curgen - gen) >= 0);
289 KPREEMPT_ENABLE(l); 289 KPREEMPT_ENABLE(l);
290} 290}
291 291
292/* 292/*
293 * uvm_emap_produce: increment emap generation counter. 293 * uvm_emap_produce: increment emap generation counter.
294 * 294 *
295 * => pmap updates must be globally visible. 295 * => pmap updates must be globally visible.
296 * => caller must have already entered mappings. 296 * => caller must have already entered mappings.
297 * => may be called from both interrupt and thread context. 297 * => may be called from both interrupt and thread context.
298 */ 298 */
299u_int 299u_int
300uvm_emap_produce(void) 300uvm_emap_produce(void)
301{ 301{
302 u_int gen; 302 u_int gen;
303again: 303again:
304 gen = atomic_inc_uint_nv(&uvm_emap_gen); 304 gen = atomic_inc_uint_nv(&uvm_emap_gen);
305 if (__predict_false(gen == UVM_EMAP_INACTIVE)) { 305 if (__predict_false(gen == UVM_EMAP_INACTIVE)) {
306 goto again; 306 goto again;
307 } 307 }
308 return gen; 308 return gen;
309} 309}
310 310
311/* 311/*
312 * uvm_emap_update: update global emap generation number for current CPU. 312 * uvm_emap_update: update global emap generation number for current CPU.
313 * 313 *
314 * Function is called by MD code (eg. pmap) to take advantage of TLB flushes 314 * Function is called by MD code (eg. pmap) to take advantage of TLB flushes
315 * initiated for other reasons, that sync the emap as a side effect. Note 315 * initiated for other reasons, that sync the emap as a side effect. Note
316 * update should be performed before the actual TLB flush, to avoid race 316 * update should be performed before the actual TLB flush, to avoid race
317 * with newly generated number. 317 * with newly generated number.
318 * 318 *
319 * => can be called from IPI handler, therefore function must be safe. 319 * => can be called from IPI handler, therefore function must be safe.
320 * => should be called _after_ TLB flush. 320 * => should be called _after_ TLB flush.
321 * => emap generation number should be taken _before_ TLB flush. 321 * => emap generation number should be taken _before_ TLB flush.
322 * => must be called with preemption disabled. 322 * => must be called with preemption disabled.
323 */ 323 */
324void 324void
325uvm_emap_update(u_int gen) 325uvm_emap_update(u_int gen)
326{ 326{
327 struct uvm_cpu *ucpu; 327 struct uvm_cpu *ucpu;
328 328
329 /* 329 /*
330 * See comments in uvm_emap_consume() about memory barriers and 330 * See comments in uvm_emap_consume() about memory barriers and
331 * race conditions. Store is atomic if emap_gen size is word. 331 * race conditions. Store is atomic if emap_gen size is word.
332 */ 332 */
333 CTASSERT(sizeof(ucpu->emap_gen) == sizeof(int)); 333 CTASSERT(sizeof(ucpu->emap_gen) == sizeof(int));
334 /* XXX: KASSERT(kpreempt_disabled()); */ 334 /* XXX: KASSERT(kpreempt_disabled()); */
335 335
336 ucpu = curcpu()->ci_data.cpu_uvm; 336 ucpu = curcpu()->ci_data.cpu_uvm;
337 ucpu->emap_gen = gen; 337 ucpu->emap_gen = gen;
338} 338}
339 339
340#else 340#else
341 341
342/* 342/*
343 * Stubs for architectures which do not support emap. 343 * Stubs for architectures which do not support emap.
344 */ 344 */
345 345
346void 346void
347uvm_emap_enter(vaddr_t va, struct vm_page **pgs, u_int npages) 347uvm_emap_enter(vaddr_t va, struct vm_page **pgs, u_int npages)
348{ 348{
349 paddr_t pa; 349 paddr_t pa;
350 u_int n; 350 u_int n;
351 351
352 for (n = 0; n < npages; n++, va += PAGE_SIZE) { 352 for (n = 0; n < npages; n++, va += PAGE_SIZE) {
353 pa = VM_PAGE_TO_PHYS(pgs[n]); 353 pa = VM_PAGE_TO_PHYS(pgs[n]);
354 pmap_kenter_pa(va, pa, VM_PROT_READ); 354 pmap_kenter_pa(va, pa, VM_PROT_READ);
355 } 355 }
356 pmap_update(pmap_kernel()); 356 pmap_update(pmap_kernel());
357} 357}
358 358
359void 359void
360uvm_emap_remove(vaddr_t sva, vsize_t len) 360uvm_emap_remove(vaddr_t sva, vsize_t len)
361{ 361{
362 362
363 pmap_kremove(sva, len); 363 pmap_kremove(sva, len);
364 pmap_update(pmap_kernel()); 364 pmap_update(pmap_kernel());
365} 365}
366 366
367#endif 367#endif

cvs diff -r1.21 -r1.22 src/sys/uvm/uvm_param.h (switch to unified diff)

--- src/sys/uvm/uvm_param.h 2006/08/04 22:42:36 1.21
+++ src/sys/uvm/uvm_param.h 2009/07/20 03:51:42 1.22
@@ -1,259 +1,260 @@ @@ -1,259 +1,260 @@
1/* $NetBSD: uvm_param.h,v 1.21 2006/08/04 22:42:36 he Exp $ */ 1/* $NetBSD: uvm_param.h,v 1.22 2009/07/20 03:51:42 kiyohara Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1991, 1993 4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to Berkeley by 7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University. 8 * The Mach Operating System project at Carnegie-Mellon University.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors 18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software 19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission. 20 * without specific prior written permission.
21 * 21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE. 32 * SUCH DAMAGE.
33 * 33 *
34 * @(#)vm_param.h 8.2 (Berkeley) 1/9/95 34 * @(#)vm_param.h 8.2 (Berkeley) 1/9/95
35 * 35 *
36 * 36 *
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved. 38 * All rights reserved.
39 * 39 *
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41 * 41 *
42 * Permission to use, copy, modify and distribute this software and 42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright 43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the 44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions 45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation. 46 * thereof, and that both notices appear in supporting documentation.
47 * 47 *
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51 * 51 *
52 * Carnegie Mellon requests users of this software to return to 52 * Carnegie Mellon requests users of this software to return to
53 * 53 *
54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
55 * School of Computer Science 55 * School of Computer Science
56 * Carnegie Mellon University 56 * Carnegie Mellon University
57 * Pittsburgh PA 15213-3890 57 * Pittsburgh PA 15213-3890
58 * 58 *
59 * any improvements or extensions that they make and grant Carnegie the 59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes. 60 * rights to redistribute these changes.
61 */ 61 */
62 62
63/* 63/*
64 * Machine independent virtual memory parameters. 64 * Machine independent virtual memory parameters.
65 */ 65 */
66 66
67#ifndef _VM_PARAM_ 67#ifndef _VM_PARAM_
68#define _VM_PARAM_ 68#define _VM_PARAM_
69 69
70#ifdef _KERNEL_OPT 70#ifdef _KERNEL_OPT
71#include "opt_uvm.h" 71#include "opt_uvm.h"
72#endif 72#endif
73#ifdef _KERNEL 73#ifdef _KERNEL
74#include <sys/types.h> 74#include <sys/types.h>
75#include <sys/lock.h> 75#include <sys/lock.h>
76#include <machine/vmparam.h> 76#include <machine/vmparam.h>
77#include <sys/resourcevar.h> 77#include <sys/resourcevar.h>
78#endif 78#endif
79 79
80#if defined(_KERNEL) 80#if defined(_KERNEL)
81 81
82#if defined(PAGE_SIZE) 82#if defined(PAGE_SIZE)
83 83
84/* 84/*
85 * If PAGE_SIZE is defined at this stage, it must be a constant. 85 * If PAGE_SIZE is defined at this stage, it must be a constant.
86 */ 86 */
87 87
88#if PAGE_SIZE == 0 88#if PAGE_SIZE == 0
89#error Invalid PAGE_SIZE definition 89#error Invalid PAGE_SIZE definition
90#endif 90#endif
91 91
92/* 92/*
93 * If the platform does not need to support a variable PAGE_SIZE, 93 * If the platform does not need to support a variable PAGE_SIZE,
94 * then provide default values for MIN_PAGE_SIZE and MAX_PAGE_SIZE. 94 * then provide default values for MIN_PAGE_SIZE and MAX_PAGE_SIZE.
95 */ 95 */
96 96
97#if !defined(MIN_PAGE_SIZE) 97#if !defined(MIN_PAGE_SIZE)
98#define MIN_PAGE_SIZE PAGE_SIZE 98#define MIN_PAGE_SIZE PAGE_SIZE
99#endif /* ! MIN_PAGE_SIZE */ 99#endif /* ! MIN_PAGE_SIZE */
100 100
101#if !defined(MAX_PAGE_SIZE) 101#if !defined(MAX_PAGE_SIZE)
102#define MAX_PAGE_SIZE PAGE_SIZE 102#define MAX_PAGE_SIZE PAGE_SIZE
103#endif /* ! MAX_PAGE_SIZE */ 103#endif /* ! MAX_PAGE_SIZE */
104 104
105#else /* ! PAGE_SIZE */ 105#else /* ! PAGE_SIZE */
106 106
107/* 107/*
108 * PAGE_SIZE is not a constant; MIN_PAGE_SIZE and MAX_PAGE_SIZE must 108 * PAGE_SIZE is not a constant; MIN_PAGE_SIZE and MAX_PAGE_SIZE must
109 * be defined. 109 * be defined.
110 */ 110 */
111 111
112#if !defined(MIN_PAGE_SIZE) 112#if !defined(MIN_PAGE_SIZE)
113#error MIN_PAGE_SIZE not defined 113#error MIN_PAGE_SIZE not defined
114#endif 114#endif
115 115
116#if !defined(MAX_PAGE_SIZE) 116#if !defined(MAX_PAGE_SIZE)
117#error MAX_PAGE_SIZE not defined 117#error MAX_PAGE_SIZE not defined
118#endif 118#endif
119 119
120#endif /* PAGE_SIZE */ 120#endif /* PAGE_SIZE */
121 121
122/* 122/*
123 * MIN_PAGE_SIZE and MAX_PAGE_SIZE must be constants. 123 * MIN_PAGE_SIZE and MAX_PAGE_SIZE must be constants.
124 */ 124 */
125 125
126#if MIN_PAGE_SIZE == 0 126#if MIN_PAGE_SIZE == 0
127#error Invalid MIN_PAGE_SIZE definition 127#error Invalid MIN_PAGE_SIZE definition
128#endif 128#endif
129 129
130#if MAX_PAGE_SIZE == 0 130#if MAX_PAGE_SIZE == 0
131#error Invalid MAX_PAGE_SIZE definition 131#error Invalid MAX_PAGE_SIZE definition
132#endif 132#endif
133 133
134/* 134/*
135 * If MIN_PAGE_SIZE and MAX_PAGE_SIZE are not equal, then we must use 135 * If MIN_PAGE_SIZE and MAX_PAGE_SIZE are not equal, then we must use
136 * non-constant PAGE_SIZE, et al for LKMs. 136 * non-constant PAGE_SIZE, et al for LKMs.
137 */ 137 */
138#if (MIN_PAGE_SIZE != MAX_PAGE_SIZE) && defined(_LKM) 138#if (MIN_PAGE_SIZE != MAX_PAGE_SIZE) && defined(_LKM)
139#undef PAGE_SIZE 139#undef PAGE_SIZE
140#undef PAGE_MASK 140#undef PAGE_MASK
141#undef PAGE_SHIFT 141#undef PAGE_SHIFT
142#endif 142#endif
143 143
144/* 144/*
145 * Now provide PAGE_SIZE, PAGE_MASK, and PAGE_SHIFT if we do not 145 * Now provide PAGE_SIZE, PAGE_MASK, and PAGE_SHIFT if we do not
146 * have ones that are compile-time constants. 146 * have ones that are compile-time constants.
147 */ 147 */
148#if !defined(PAGE_SIZE) 148#if !defined(PAGE_SIZE)
149#define PAGE_SIZE uvmexp.pagesize /* size of page */ 149#define PAGE_SIZE uvmexp.pagesize /* size of page */
150#define PAGE_MASK uvmexp.pagemask /* size of page - 1 */ 150#define PAGE_MASK uvmexp.pagemask /* size of page - 1 */
151#define PAGE_SHIFT uvmexp.pageshift /* bits to shift for pages */ 151#define PAGE_SHIFT uvmexp.pageshift /* bits to shift for pages */
152#endif /* PAGE_SIZE */ 152#endif /* PAGE_SIZE */
153 153
154#endif /* _KERNEL */ 154#endif /* _KERNEL */
155 155
156/* 156/*
157 * CTL_VM identifiers 157 * CTL_VM identifiers
158 */ 158 */
159#define VM_METER 1 /* struct vmmeter */ 159#define VM_METER 1 /* struct vmmeter */
160#define VM_LOADAVG 2 /* struct loadavg */ 160#define VM_LOADAVG 2 /* struct loadavg */
161#define VM_UVMEXP 3 /* struct uvmexp */ 161#define VM_UVMEXP 3 /* struct uvmexp */
162#define VM_NKMEMPAGES 4 /* kmem_map pages */ 162#define VM_NKMEMPAGES 4 /* kmem_map pages */
163#define VM_UVMEXP2 5 /* struct uvmexp_sysctl */ 163#define VM_UVMEXP2 5 /* struct uvmexp_sysctl */
164#define VM_ANONMIN 6 164#define VM_ANONMIN 6
165#define VM_EXECMIN 7 165#define VM_EXECMIN 7
166#define VM_FILEMIN 8 166#define VM_FILEMIN 8
167#define VM_MAXSLP 9 167#define VM_MAXSLP 9
168#define VM_USPACE 10 168#define VM_USPACE 10
169#define VM_ANONMAX 11 169#define VM_ANONMAX 11
170#define VM_EXECMAX 12 170#define VM_EXECMAX 12
171#define VM_FILEMAX 13 171#define VM_FILEMAX 13
172 172
173#define VM_MAXID 14 /* number of valid vm ids */ 173#define VM_MAXID 14 /* number of valid vm ids */
174 174
175#define CTL_VM_NAMES { \ 175#define CTL_VM_NAMES { \
176 { 0, 0 }, \ 176 { 0, 0 }, \
177 { "vmmeter", CTLTYPE_STRUCT }, \ 177 { "vmmeter", CTLTYPE_STRUCT }, \
178 { "loadavg", CTLTYPE_STRUCT }, \ 178 { "loadavg", CTLTYPE_STRUCT }, \
179 { "uvmexp", CTLTYPE_STRUCT }, \ 179 { "uvmexp", CTLTYPE_STRUCT }, \
180 { "nkmempages", CTLTYPE_INT }, \ 180 { "nkmempages", CTLTYPE_INT }, \
181 { "uvmexp2", CTLTYPE_STRUCT }, \ 181 { "uvmexp2", CTLTYPE_STRUCT }, \
182 { "anonmin", CTLTYPE_INT }, \ 182 { "anonmin", CTLTYPE_INT }, \
183 { "execmin", CTLTYPE_INT }, \ 183 { "execmin", CTLTYPE_INT }, \
184 { "filemin", CTLTYPE_INT }, \ 184 { "filemin", CTLTYPE_INT }, \
185 { "maxslp", CTLTYPE_INT }, \ 185 { "maxslp", CTLTYPE_INT }, \
186 { "uspace", CTLTYPE_INT }, \ 186 { "uspace", CTLTYPE_INT }, \
187 { "anonmax", CTLTYPE_INT }, \ 187 { "anonmax", CTLTYPE_INT }, \
188 { "execmax", CTLTYPE_INT }, \ 188 { "execmax", CTLTYPE_INT }, \
189 { "filemax", CTLTYPE_INT }, \ 189 { "filemax", CTLTYPE_INT }, \
190} 190}
191 191
192#ifndef ASSEMBLER 192#ifndef ASSEMBLER
193/* 193/*
194 * Convert addresses to pages and vice versa. 194 * Convert addresses to pages and vice versa.
195 * No rounding is used. 195 * No rounding is used.
196 */ 196 */
197#ifdef _KERNEL 197#ifdef _KERNEL
198#define atop(x) (((paddr_t)(x)) >> PAGE_SHIFT) 198#define atop(x) (((paddr_t)(x)) >> PAGE_SHIFT)
199#define ptoa(x) ((vaddr_t)((vaddr_t)(x) << PAGE_SHIFT)) 199#define ptoa(x) ((vaddr_t)((vaddr_t)(x) << PAGE_SHIFT))
200 200
201/* 201/*
202 * Round off or truncate to the nearest page. These will work 202 * Round off or truncate to the nearest page. These will work
203 * for either addresses or counts (i.e., 1 byte rounds to 1 page). 203 * for either addresses or counts (i.e., 1 byte rounds to 1 page).
204 */ 204 */
205#define round_page(x) (((x) + PAGE_MASK) & ~PAGE_MASK) 205#define round_page(x) (((x) + PAGE_MASK) & ~PAGE_MASK)
206#define trunc_page(x) ((x) & ~PAGE_MASK) 206#define trunc_page(x) ((x) & ~PAGE_MASK)
207 207
208/* 208/*
209 * Set up the default mapping address (VM_DEFAULT_ADDRESS) according to: 209 * Set up the default mapping address (VM_DEFAULT_ADDRESS) according to:
210 * 210 *
211 * USE_TOPDOWN_VM: a kernel option to enable on a per-kernel basis 211 * USE_TOPDOWN_VM: a kernel option to enable on a per-kernel basis
212 * which only be used on ports that define... 212 * which only be used on ports that define...
213 * __HAVE_TOPDOWN_VM: a per-port option to offer the topdown option 213 * __HAVE_TOPDOWN_VM: a per-port option to offer the topdown option
214 * 214 *
215 * __USE_TOPDOWN_VM: a per-port option to unconditionally use it 215 * __USE_TOPDOWN_VM: a per-port option to unconditionally use it
216 * 216 *
217 * if __USE_TOPDOWN_VM is defined, the port can specify a default vm 217 * if __USE_TOPDOWN_VM is defined, the port can specify a default vm
218 * address, or we will use the topdown default from below. If it is 218 * address, or we will use the topdown default from below. If it is
219 * NOT defined, then the port can offer topdown as an option, but it 219 * NOT defined, then the port can offer topdown as an option, but it
220 * MUST define the VM_DEFAULT_ADDRESS macro itself. 220 * MUST define the VM_DEFAULT_ADDRESS macro itself.
221 */ 221 */
222#if defined(USE_TOPDOWN_VM) || defined(__USE_TOPDOWN_VM) 222#if defined(USE_TOPDOWN_VM) || defined(__USE_TOPDOWN_VM)
223# if !defined(__HAVE_TOPDOWN_VM) && !defined(__USE_TOPDOWN_VM) 223# if !defined(__HAVE_TOPDOWN_VM) && !defined(__USE_TOPDOWN_VM)
224# error "Top down memory allocation not enabled for this system" 224# error "Top down memory allocation not enabled for this system"
225# else /* !__HAVE_TOPDOWN_VM && !__USE_TOPDOWN_VM */ 225# else /* !__HAVE_TOPDOWN_VM && !__USE_TOPDOWN_VM */
226# define __USING_TOPDOWN_VM 226# define __USING_TOPDOWN_VM
227# if !defined(VM_DEFAULT_ADDRESS) 227# if !defined(VM_DEFAULT_ADDRESS)
228# if !defined(__USE_TOPDOWN_VM) 228# if !defined(__USE_TOPDOWN_VM)
229# error "Top down memory allocation not configured for this system" 229# error "Top down memory allocation not configured for this system"
230# else /* !__USE_TOPDOWN_VM */ 230# else /* !__USE_TOPDOWN_VM */
231# define VM_DEFAULT_ADDRESS(da, sz) \ 231# define VM_DEFAULT_ADDRESS(da, sz) \
232 trunc_page(VM_MAXUSER_ADDRESS - MAXSSIZ - (sz)) 232 trunc_page(VM_MAXUSER_ADDRESS - MAXSSIZ - (sz))
233# endif /* !__USE_TOPDOWN_VM */ 233# endif /* !__USE_TOPDOWN_VM */
234# endif /* !VM_DEFAULT_ADDRESS */ 234# endif /* !VM_DEFAULT_ADDRESS */
235# endif /* !__HAVE_TOPDOWN_VM && !__USE_TOPDOWN_VM */ 235# endif /* !__HAVE_TOPDOWN_VM && !__USE_TOPDOWN_VM */
236#endif /* USE_TOPDOWN_VM || __USE_TOPDOWN_VM */ 236#endif /* USE_TOPDOWN_VM || __USE_TOPDOWN_VM */
237 237
238#if !defined(__USING_TOPDOWN_VM) 238#if !defined(__USING_TOPDOWN_VM)
239# if defined(VM_DEFAULT_ADDRESS) 239# if defined(VM_DEFAULT_ADDRESS)
240# error "Default vm address should not be defined here" 240# error "Default vm address should not be defined here"
241# else /* VM_DEFAULT_ADDRESS */ 241# else /* VM_DEFAULT_ADDRESS */
242# define VM_DEFAULT_ADDRESS(da, sz) round_page((vaddr_t)(da) + (vsize_t)maxdmap) 242# define VM_DEFAULT_ADDRESS(da, sz) round_page((vaddr_t)(da) + (vsize_t)maxdmap)
243# endif /* VM_DEFAULT_ADDRESS */ 243# endif /* VM_DEFAULT_ADDRESS */
244#endif /* !__USING_TOPDOWN_VM */ 244#endif /* !__USING_TOPDOWN_VM */
245 245
246extern int ubc_nwins; /* number of UBC mapping windows */ 246extern int ubc_nwins; /* number of UBC mapping windows */
247extern int ubc_winshift; /* shift for a UBC mapping window */ 247extern int ubc_winshift; /* shift for a UBC mapping window */
 248extern u_int uvm_emap_size; /* size of emap */
248 249
249#else 250#else
250/* out-of-kernel versions of round_page and trunc_page */ 251/* out-of-kernel versions of round_page and trunc_page */
251#define round_page(x) \ 252#define round_page(x) \
252 ((((vaddr_t)(x) + (vm_page_size - 1)) / vm_page_size) * \ 253 ((((vaddr_t)(x) + (vm_page_size - 1)) / vm_page_size) * \
253 vm_page_size) 254 vm_page_size)
254#define trunc_page(x) \ 255#define trunc_page(x) \
255 ((((vaddr_t)(x)) / vm_page_size) * vm_page_size) 256 ((((vaddr_t)(x)) / vm_page_size) * vm_page_size)
256 257
257#endif /* _KERNEL */ 258#endif /* _KERNEL */
258#endif /* ASSEMBLER */ 259#endif /* ASSEMBLER */
259#endif /* _VM_PARAM_ */ 260#endif /* _VM_PARAM_ */