Sat Jan 11 17:14:00 2014 UTC ()
Comment about missing stackframe member initialization (Richard Hansen)

I haven't studied the code, but I'm concerned that not initializing
sf->sf_edi could potentially leak a few bytes of information to a new
userspace process.


(christos)
diff -r1.18 -r1.19 src/sys/arch/x86/x86/vm_machdep.c

cvs diff -r1.18 -r1.19 src/sys/arch/x86/x86/vm_machdep.c (switch to unified diff)

--- src/sys/arch/x86/x86/vm_machdep.c 2013/12/01 01:05:16 1.18
+++ src/sys/arch/x86/x86/vm_machdep.c 2014/01/11 17:14:00 1.19
@@ -1,403 +1,408 @@ @@ -1,403 +1,408 @@
1/* $NetBSD: vm_machdep.c,v 1.18 2013/12/01 01:05:16 christos Exp $ */ 1/* $NetBSD: vm_machdep.c,v 1.19 2014/01/11 17:14:00 christos Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1982, 1986 The Regents of the University of California. 4 * Copyright (c) 1982, 1986 The Regents of the University of California.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to Berkeley by 7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer 8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz. 9 * Science Department, and William Jolitz.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer. 15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors 19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software 20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission. 21 * without specific prior written permission.
22 * 22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE. 33 * SUCH DAMAGE.
34 * 34 *
35 * @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 35 * @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
36 */ 36 */
37 37
38/*- 38/*-
39 * Copyright (c) 1995 Charles M. Hannum. All rights reserved. 39 * Copyright (c) 1995 Charles M. Hannum. All rights reserved.
40 * Copyright (c) 1989, 1990 William Jolitz 40 * Copyright (c) 1989, 1990 William Jolitz
41 * All rights reserved. 41 * All rights reserved.
42 * 42 *
43 * This code is derived from software contributed to Berkeley by 43 * This code is derived from software contributed to Berkeley by
44 * the Systems Programming Group of the University of Utah Computer 44 * the Systems Programming Group of the University of Utah Computer
45 * Science Department, and William Jolitz. 45 * Science Department, and William Jolitz.
46 * 46 *
47 * Redistribution and use in source and binary forms, with or without 47 * Redistribution and use in source and binary forms, with or without
48 * modification, are permitted provided that the following conditions 48 * modification, are permitted provided that the following conditions
49 * are met: 49 * are met:
50 * 1. Redistributions of source code must retain the above copyright 50 * 1. Redistributions of source code must retain the above copyright
51 * notice, this list of conditions and the following disclaimer. 51 * notice, this list of conditions and the following disclaimer.
52 * 2. Redistributions in binary form must reproduce the above copyright 52 * 2. Redistributions in binary form must reproduce the above copyright
53 * notice, this list of conditions and the following disclaimer in the 53 * notice, this list of conditions and the following disclaimer in the
54 * documentation and/or other materials provided with the distribution. 54 * documentation and/or other materials provided with the distribution.
55 * 3. All advertising materials mentioning features or use of this software 55 * 3. All advertising materials mentioning features or use of this software
56 * must display the following acknowledgement: 56 * must display the following acknowledgement:
57 * This product includes software developed by the University of 57 * This product includes software developed by the University of
58 * California, Berkeley and its contributors. 58 * California, Berkeley and its contributors.
59 * 4. Neither the name of the University nor the names of its contributors 59 * 4. Neither the name of the University nor the names of its contributors
60 * may be used to endorse or promote products derived from this software 60 * may be used to endorse or promote products derived from this software
61 * without specific prior written permission. 61 * without specific prior written permission.
62 * 62 *
63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 66 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * SUCH DAMAGE. 73 * SUCH DAMAGE.
74 * 74 *
75 * @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 75 * @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
76 */ 76 */
77 77
78/* 78/*
79 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ 79 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
80 */ 80 */
81 81
82#include <sys/cdefs.h> 82#include <sys/cdefs.h>
83__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.18 2013/12/01 01:05:16 christos Exp $"); 83__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.19 2014/01/11 17:14:00 christos Exp $");
84 84
85#include "opt_mtrr.h" 85#include "opt_mtrr.h"
86 86
87#include <sys/param.h> 87#include <sys/param.h>
88#include <sys/systm.h> 88#include <sys/systm.h>
89#include <sys/proc.h> 89#include <sys/proc.h>
90#include <sys/vnode.h> 90#include <sys/vnode.h>
91#include <sys/buf.h> 91#include <sys/buf.h>
92#include <sys/core.h> 92#include <sys/core.h>
93#include <sys/exec.h> 93#include <sys/exec.h>
94#include <sys/ptrace.h> 94#include <sys/ptrace.h>
95 95
96#include <uvm/uvm.h> 96#include <uvm/uvm.h>
97 97
98#include <machine/cpu.h> 98#include <machine/cpu.h>
99#include <machine/gdt.h> 99#include <machine/gdt.h>
100#include <machine/reg.h> 100#include <machine/reg.h>
101#include <machine/specialreg.h> 101#include <machine/specialreg.h>
102#ifdef MTRR 102#ifdef MTRR
103#include <machine/mtrr.h> 103#include <machine/mtrr.h>
104#endif 104#endif
105 105
106#ifdef __x86_64__ 106#ifdef __x86_64__
107#include <machine/fpu.h> 107#include <machine/fpu.h>
108#else 108#else
109#include "npx.h" 109#include "npx.h"
110#if NNPX > 0 110#if NNPX > 0
111#define fpusave_lwp(x, y) npxsave_lwp(x, y) 111#define fpusave_lwp(x, y) npxsave_lwp(x, y)
112#else 112#else
113#define fpusave_lwp(x, y) 113#define fpusave_lwp(x, y)
114#endif 114#endif
115#endif 115#endif
116 116
117void 117void
118cpu_proc_fork(struct proc *p1, struct proc *p2) 118cpu_proc_fork(struct proc *p1, struct proc *p2)
119{ 119{
120 120
121 p2->p_md.md_flags = p1->p_md.md_flags; 121 p2->p_md.md_flags = p1->p_md.md_flags;
122} 122}
123 123
124/* 124/*
125 * cpu_lwp_fork: finish a new LWP (l2) operation. 125 * cpu_lwp_fork: finish a new LWP (l2) operation.
126 * 126 *
127 * First LWP (l1) is the process being forked. If it is &lwp0, then we 127 * First LWP (l1) is the process being forked. If it is &lwp0, then we
128 * are creating a kthread, where return path and argument are specified 128 * are creating a kthread, where return path and argument are specified
129 * with `func' and `arg'. 129 * with `func' and `arg'.
130 * 130 *
131 * If an alternate user-level stack is requested (with non-zero values 131 * If an alternate user-level stack is requested (with non-zero values
132 * in both the stack and stacksize arguments), then set up the user stack 132 * in both the stack and stacksize arguments), then set up the user stack
133 * pointer accordingly. 133 * pointer accordingly.
134 */ 134 */
135void 135void
136cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize, 136cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
137 void (*func)(void *), void *arg) 137 void (*func)(void *), void *arg)
138{ 138{
139 struct pcb *pcb1, *pcb2; 139 struct pcb *pcb1, *pcb2;
140 struct trapframe *tf; 140 struct trapframe *tf;
141 struct switchframe *sf; 141 struct switchframe *sf;
142 vaddr_t uv; 142 vaddr_t uv;
143 143
144 pcb1 = lwp_getpcb(l1); 144 pcb1 = lwp_getpcb(l1);
145 pcb2 = lwp_getpcb(l2); 145 pcb2 = lwp_getpcb(l2);
146 146
147 /* 147 /*
148 * If parent LWP was using FPU, then we have to save the FPU h/w 148 * If parent LWP was using FPU, then we have to save the FPU h/w
149 * state to PCB so that we can copy it. 149 * state to PCB so that we can copy it.
150 */ 150 */
151 if (pcb1->pcb_fpcpu != NULL) { 151 if (pcb1->pcb_fpcpu != NULL) {
152 fpusave_lwp(l1, true); 152 fpusave_lwp(l1, true);
153 } 153 }
154 154
155 /* 155 /*
156 * Sync the PCB before we copy it. 156 * Sync the PCB before we copy it.
157 */ 157 */
158 if (l1 == curlwp) { 158 if (l1 == curlwp) {
159 KASSERT(pcb1 == curpcb); 159 KASSERT(pcb1 == curpcb);
160 savectx(pcb1); 160 savectx(pcb1);
161 } else { 161 } else {
162 KASSERT(l1 == &lwp0); 162 KASSERT(l1 == &lwp0);
163 } 163 }
164 164
165 /* Copy the PCB from parent. */ 165 /* Copy the PCB from parent. */
166 memcpy(pcb2, pcb1, sizeof(struct pcb)); 166 memcpy(pcb2, pcb1, sizeof(struct pcb));
167 167
168#if defined(XEN) 168#if defined(XEN)
169 pcb2->pcb_iopl = SEL_KPL; 169 pcb2->pcb_iopl = SEL_KPL;
170#endif 170#endif
171 171
172 /* 172 /*
173 * Set the kernel stack address (from the address to uarea) and 173 * Set the kernel stack address (from the address to uarea) and
174 * trapframe address for child. 174 * trapframe address for child.
175 * 175 *
176 * Rig kernel stack so that it would start out in lwp_trampoline() 176 * Rig kernel stack so that it would start out in lwp_trampoline()
177 * and call child_return() with l2 as an argument. This causes the 177 * and call child_return() with l2 as an argument. This causes the
178 * newly-created child process to go directly to user level with a 178 * newly-created child process to go directly to user level with a
179 * parent return value of 0 from fork(), while the parent process 179 * parent return value of 0 from fork(), while the parent process
180 * returns normally. 180 * returns normally.
181 */ 181 */
182 uv = uvm_lwp_getuarea(l2); 182 uv = uvm_lwp_getuarea(l2);
183 183
184#ifdef __x86_64__ 184#ifdef __x86_64__
185 pcb2->pcb_rsp0 = (uv + KSTACK_SIZE - 16) & ~0xf; 185 pcb2->pcb_rsp0 = (uv + KSTACK_SIZE - 16) & ~0xf;
186 tf = (struct trapframe *)pcb2->pcb_rsp0 - 1; 186 tf = (struct trapframe *)pcb2->pcb_rsp0 - 1;
187#else 187#else
188 pcb2->pcb_esp0 = (uv + KSTACK_SIZE - 16); 188 pcb2->pcb_esp0 = (uv + KSTACK_SIZE - 16);
189 tf = (struct trapframe *)pcb2->pcb_esp0 - 1; 189 tf = (struct trapframe *)pcb2->pcb_esp0 - 1;
190 190
191 pcb2->pcb_iomap = NULL; 191 pcb2->pcb_iomap = NULL;
192#endif 192#endif
193 l2->l_md.md_regs = tf; 193 l2->l_md.md_regs = tf;
194 194
195 /* 195 /*
196 * Copy the trapframe from parent, so that return to userspace 196 * Copy the trapframe from parent, so that return to userspace
197 * will be to right address, with correct registers. 197 * will be to right address, with correct registers.
198 */ 198 */
199 memcpy(tf, l1->l_md.md_regs, sizeof(struct trapframe)); 199 memcpy(tf, l1->l_md.md_regs, sizeof(struct trapframe));
200 200
201 /* Child LWP might get aston() before returning to userspace. */ 201 /* Child LWP might get aston() before returning to userspace. */
202 tf->tf_trapno = T_ASTFLT; 202 tf->tf_trapno = T_ASTFLT;
203 203
204#if 0 /* DIAGNOSTIC */ 204#if 0 /* DIAGNOSTIC */
205 /* Set a red zone in the kernel stack after the uarea. */ 205 /* Set a red zone in the kernel stack after the uarea. */
206 pmap_kremove(uv, PAGE_SIZE); 206 pmap_kremove(uv, PAGE_SIZE);
207 pmap_update(pmap_kernel()); 207 pmap_update(pmap_kernel());
208#endif 208#endif
209 209
210 /* If specified, set a different user stack for a child. */ 210 /* If specified, set a different user stack for a child. */
211 if (stack != NULL) { 211 if (stack != NULL) {
212#ifdef __x86_64__ 212#ifdef __x86_64__
213 tf->tf_rsp = (uint64_t)stack + stacksize; 213 tf->tf_rsp = (uint64_t)stack + stacksize;
214#else 214#else
215 tf->tf_esp = (uint32_t)stack + stacksize; 215 tf->tf_esp = (uint32_t)stack + stacksize;
216#endif 216#endif
217 } 217 }
218 218
219 l2->l_md.md_flags = l1->l_md.md_flags; 219 l2->l_md.md_flags = l1->l_md.md_flags;
220 l2->l_md.md_astpending = 0; 220 l2->l_md.md_astpending = 0;
221 221
222 sf = (struct switchframe *)tf - 1; 222 sf = (struct switchframe *)tf - 1;
223 223
224#ifdef __x86_64__ 224#ifdef __x86_64__
225 sf->sf_r12 = (uint64_t)func; 225 sf->sf_r12 = (uint64_t)func;
226 sf->sf_r13 = (uint64_t)arg; 226 sf->sf_r13 = (uint64_t)arg;
227 sf->sf_rip = (uint64_t)lwp_trampoline; 227 sf->sf_rip = (uint64_t)lwp_trampoline;
228 pcb2->pcb_rsp = (uint64_t)sf; 228 pcb2->pcb_rsp = (uint64_t)sf;
229 pcb2->pcb_rbp = (uint64_t)l2; 229 pcb2->pcb_rbp = (uint64_t)l2;
230#else 230#else
 231 /*
 232 * XXX Is there a reason sf->sf_edi isn't initialized here?
 233 * Could this leak potentially sensitive information to new
 234 * userspace processes?
 235 */
231 sf->sf_esi = (int)func; 236 sf->sf_esi = (int)func;
232 sf->sf_ebx = (int)arg; 237 sf->sf_ebx = (int)arg;
233 sf->sf_eip = (int)lwp_trampoline; 238 sf->sf_eip = (int)lwp_trampoline;
234 pcb2->pcb_esp = (int)sf; 239 pcb2->pcb_esp = (int)sf;
235 pcb2->pcb_ebp = (int)l2; 240 pcb2->pcb_ebp = (int)l2;
236#endif 241#endif
237} 242}
238 243
239/* 244/*
240 * cpu_lwp_free is called from exit() to let machine-dependent 245 * cpu_lwp_free is called from exit() to let machine-dependent
241 * code free machine-dependent resources. Note that this routine 246 * code free machine-dependent resources. Note that this routine
242 * must not block. 247 * must not block.
243 */ 248 */
244void 249void
245cpu_lwp_free(struct lwp *l, int proc) 250cpu_lwp_free(struct lwp *l, int proc)
246{ 251{
247 struct pcb *pcb = lwp_getpcb(l); 252 struct pcb *pcb = lwp_getpcb(l);
248 253
249 /* If we were using the FPU, forget about it. */ 254 /* If we were using the FPU, forget about it. */
250 if (pcb->pcb_fpcpu != NULL) { 255 if (pcb->pcb_fpcpu != NULL) {
251 fpusave_lwp(l, false); 256 fpusave_lwp(l, false);
252 } 257 }
253 258
254#ifdef MTRR 259#ifdef MTRR
255 if (proc && l->l_proc->p_md.md_flags & MDP_USEDMTRR) 260 if (proc && l->l_proc->p_md.md_flags & MDP_USEDMTRR)
256 mtrr_clean(l->l_proc); 261 mtrr_clean(l->l_proc);
257#endif 262#endif
258} 263}
259 264
260/* 265/*
261 * cpu_lwp_free2 is called when an LWP is being reaped. 266 * cpu_lwp_free2 is called when an LWP is being reaped.
262 * This routine may block. 267 * This routine may block.
263 */ 268 */
264void 269void
265cpu_lwp_free2(struct lwp *l) 270cpu_lwp_free2(struct lwp *l)
266{ 271{
267 272
268 KASSERT(l->l_md.md_gc_ptp == NULL); 273 KASSERT(l->l_md.md_gc_ptp == NULL);
269 KASSERT(l->l_md.md_gc_pmap == NULL); 274 KASSERT(l->l_md.md_gc_pmap == NULL);
270} 275}
271 276
272/* 277/*
273 * Convert kernel VA to physical address 278 * Convert kernel VA to physical address
274 */ 279 */
275paddr_t 280paddr_t
276kvtop(void *addr) 281kvtop(void *addr)
277{ 282{
278 paddr_t pa; 283 paddr_t pa;
279 bool ret; 284 bool ret;
280 285
281 ret = pmap_extract(pmap_kernel(), (vaddr_t)addr, &pa); 286 ret = pmap_extract(pmap_kernel(), (vaddr_t)addr, &pa);
282 KASSERT(ret == true); 287 KASSERT(ret == true);
283 return pa; 288 return pa;
284} 289}
285 290
286/* 291/*
287 * Map a user I/O request into kernel virtual address space. 292 * Map a user I/O request into kernel virtual address space.
288 * Note: the pages are already locked by uvm_vslock(), so we 293 * Note: the pages are already locked by uvm_vslock(), so we
289 * do not need to pass an access_type to pmap_enter(). 294 * do not need to pass an access_type to pmap_enter().
290 */ 295 */
291int 296int
292vmapbuf(struct buf *bp, vsize_t len) 297vmapbuf(struct buf *bp, vsize_t len)
293{ 298{
294 vaddr_t faddr, taddr, off; 299 vaddr_t faddr, taddr, off;
295 paddr_t fpa; 300 paddr_t fpa;
296 301
297 KASSERT((bp->b_flags & B_PHYS) != 0); 302 KASSERT((bp->b_flags & B_PHYS) != 0);
298 303
299 bp->b_saveaddr = bp->b_data; 304 bp->b_saveaddr = bp->b_data;
300 faddr = trunc_page((vaddr_t)bp->b_data); 305 faddr = trunc_page((vaddr_t)bp->b_data);
301 off = (vaddr_t)bp->b_data - faddr; 306 off = (vaddr_t)bp->b_data - faddr;
302 len = round_page(off + len); 307 len = round_page(off + len);
303 taddr = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA); 308 taddr = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA);
304 bp->b_data = (void *)(taddr + off); 309 bp->b_data = (void *)(taddr + off);
305 /* 310 /*
306 * The region is locked, so we expect that pmap_pte() will return 311 * The region is locked, so we expect that pmap_pte() will return
307 * non-NULL. 312 * non-NULL.
308 * XXX: unwise to expect this in a multithreaded environment. 313 * XXX: unwise to expect this in a multithreaded environment.
309 * anything can happen to a pmap between the time we lock a 314 * anything can happen to a pmap between the time we lock a
310 * region, release the pmap lock, and then relock it for 315 * region, release the pmap lock, and then relock it for
311 * the pmap_extract(). 316 * the pmap_extract().
312 * 317 *
313 * no need to flush TLB since we expect nothing to be mapped 318 * no need to flush TLB since we expect nothing to be mapped
314 * where we we just allocated (TLB will be flushed when our 319 * where we we just allocated (TLB will be flushed when our
315 * mapping is removed). 320 * mapping is removed).
316 */ 321 */
317 while (len) { 322 while (len) {
318 (void) pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map), 323 (void) pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
319 faddr, &fpa); 324 faddr, &fpa);
320 pmap_kenter_pa(taddr, fpa, VM_PROT_READ|VM_PROT_WRITE, 0); 325 pmap_kenter_pa(taddr, fpa, VM_PROT_READ|VM_PROT_WRITE, 0);
321 faddr += PAGE_SIZE; 326 faddr += PAGE_SIZE;
322 taddr += PAGE_SIZE; 327 taddr += PAGE_SIZE;
323 len -= PAGE_SIZE; 328 len -= PAGE_SIZE;
324 } 329 }
325 pmap_update(pmap_kernel()); 330 pmap_update(pmap_kernel());
326 331
327 return 0; 332 return 0;
328} 333}
329 334
330/* 335/*
331 * Unmap a previously-mapped user I/O request. 336 * Unmap a previously-mapped user I/O request.
332 */ 337 */
333void 338void
334vunmapbuf(struct buf *bp, vsize_t len) 339vunmapbuf(struct buf *bp, vsize_t len)
335{ 340{
336 vaddr_t addr, off; 341 vaddr_t addr, off;
337 342
338 KASSERT((bp->b_flags & B_PHYS) != 0); 343 KASSERT((bp->b_flags & B_PHYS) != 0);
339 344
340 addr = trunc_page((vaddr_t)bp->b_data); 345 addr = trunc_page((vaddr_t)bp->b_data);
341 off = (vaddr_t)bp->b_data - addr; 346 off = (vaddr_t)bp->b_data - addr;
342 len = round_page(off + len); 347 len = round_page(off + len);
343 pmap_kremove(addr, len); 348 pmap_kremove(addr, len);
344 pmap_update(pmap_kernel()); 349 pmap_update(pmap_kernel());
345 uvm_km_free(phys_map, addr, len, UVM_KMF_VAONLY); 350 uvm_km_free(phys_map, addr, len, UVM_KMF_VAONLY);
346 bp->b_data = bp->b_saveaddr; 351 bp->b_data = bp->b_saveaddr;
347 bp->b_saveaddr = 0; 352 bp->b_saveaddr = 0;
348} 353}
349 354
350#ifdef __HAVE_CPU_UAREA_ROUTINES 355#ifdef __HAVE_CPU_UAREA_ROUTINES
351void * 356void *
352cpu_uarea_alloc(bool system) 357cpu_uarea_alloc(bool system)
353{ 358{
354 struct pglist pglist; 359 struct pglist pglist;
355 int error; 360 int error;
356 361
357 /* 362 /*
358 * Allocate a new physically contiguous uarea which can be 363 * Allocate a new physically contiguous uarea which can be
359 * direct-mapped. 364 * direct-mapped.
360 */ 365 */
361 error = uvm_pglistalloc(USPACE, 0, ptoa(physmem), 0, 0, &pglist, 1, 1); 366 error = uvm_pglistalloc(USPACE, 0, ptoa(physmem), 0, 0, &pglist, 1, 1);
362 if (error) { 367 if (error) {
363 return NULL; 368 return NULL;
364 } 369 }
365 370
366 /* 371 /*
367 * Get the physical address from the first page. 372 * Get the physical address from the first page.
368 */ 373 */
369 const struct vm_page * const pg = TAILQ_FIRST(&pglist); 374 const struct vm_page * const pg = TAILQ_FIRST(&pglist);
370 KASSERT(pg != NULL); 375 KASSERT(pg != NULL);
371 const paddr_t pa = VM_PAGE_TO_PHYS(pg); 376 const paddr_t pa = VM_PAGE_TO_PHYS(pg);
372 377
373 /* 378 /*
374 * We need to return a direct-mapped VA for the pa. 379 * We need to return a direct-mapped VA for the pa.
375 */ 380 */
376 381
377 return (void *)PMAP_MAP_POOLPAGE(pa); 382 return (void *)PMAP_MAP_POOLPAGE(pa);
378} 383}
379 384
380/* 385/*
381 * Return true if we freed it, false if we didn't. 386 * Return true if we freed it, false if we didn't.
382 */ 387 */
383bool 388bool
384cpu_uarea_free(void *vva) 389cpu_uarea_free(void *vva)
385{ 390{
386 vaddr_t va = (vaddr_t) vva; 391 vaddr_t va = (vaddr_t) vva;
387 392
388 if (va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS) { 393 if (va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS) {
389 return false; 394 return false;
390 } 395 }
391 396
392 /* 397 /*
393 * Since the pages are physically contiguous, the vm_page structures 398 * Since the pages are physically contiguous, the vm_page structures
394 * will be as well. 399 * will be as well.
395 */ 400 */
396 struct vm_page *pg = PHYS_TO_VM_PAGE(PMAP_UNMAP_POOLPAGE(va)); 401 struct vm_page *pg = PHYS_TO_VM_PAGE(PMAP_UNMAP_POOLPAGE(va));
397 KASSERT(pg != NULL); 402 KASSERT(pg != NULL);
398 for (size_t i = 0; i < UPAGES; i++, pg++) { 403 for (size_t i = 0; i < UPAGES; i++, pg++) {
399 uvm_pagefree(pg); 404 uvm_pagefree(pg);
400 } 405 }
401 return true; 406 return true;
402} 407}
403#endif /* __HAVE_CPU_UAREA_ROUTINES */ 408#endif /* __HAVE_CPU_UAREA_ROUTINES */