Thu Dec 1 23:59:14 2011 UTC ()
Add code to deal with direct mapped uareas.


(matt)
diff -r1.121.6.1.2.22 -r1.121.6.1.2.23 src/sys/arch/mips/mips/vm_machdep.c

cvs diff -r1.121.6.1.2.22 -r1.121.6.1.2.23 src/sys/arch/mips/mips/vm_machdep.c (switch to unified diff)

--- src/sys/arch/mips/mips/vm_machdep.c 2011/11/29 07:48:31 1.121.6.1.2.22
+++ src/sys/arch/mips/mips/vm_machdep.c 2011/12/01 23:59:14 1.121.6.1.2.23
@@ -1,483 +1,512 @@ @@ -1,483 +1,512 @@
1/* vm_machdep.c,v 1.121.6.1.2.19 2011/04/29 08:26:31 matt Exp */ 1/* vm_machdep.c,v 1.121.6.1.2.19 2011/04/29 08:26:31 matt Exp */
2 2
3/* 3/*
4 * Copyright (c) 1988 University of Utah. 4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1992, 1993 5 * Copyright (c) 1992, 1993
6 * The Regents of the University of California. All rights reserved. 6 * The Regents of the University of California. All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to Berkeley by 8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer 9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department and Ralph Campbell. 10 * Science Department and Ralph Campbell.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
15 * 1. Redistributions of source code must retain the above copyright 15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer. 16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright 17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the 18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution. 19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors 20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software 21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission. 22 * without specific prior written permission.
23 * 23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE. 34 * SUCH DAMAGE.
35 * 35 *
36 * from: Utah Hdr: vm_machdep.c 1.21 91/04/06 36 * from: Utah Hdr: vm_machdep.c 1.21 91/04/06
37 * 37 *
38 * @(#)vm_machdep.c 8.3 (Berkeley) 1/4/94 38 * @(#)vm_machdep.c 8.3 (Berkeley) 1/4/94
39 */ 39 */
40 40
41#include <sys/cdefs.h> 41#include <sys/cdefs.h>
42__KERNEL_RCSID(0, "vm_machdep.c,v 1.121.6.1.2.19 2011/04/29 08:26:31 matt Exp"); 42__KERNEL_RCSID(0, "vm_machdep.c,v 1.121.6.1.2.19 2011/04/29 08:26:31 matt Exp");
43 43
44#include "opt_ddb.h" 44#include "opt_ddb.h"
45#include "opt_coredump.h" 45#include "opt_coredump.h"
46 46
47#include <sys/param.h> 47#include <sys/param.h>
48#include <sys/systm.h> 48#include <sys/systm.h>
49#include <sys/proc.h> 49#include <sys/proc.h>
50#include <sys/malloc.h> 50#include <sys/malloc.h>
51#include <sys/buf.h> 51#include <sys/buf.h>
52#include <sys/cpu.h> 52#include <sys/cpu.h>
53#include <sys/vnode.h> 53#include <sys/vnode.h>
54#include <sys/core.h> 54#include <sys/core.h>
55#include <sys/exec.h> 55#include <sys/exec.h>
56#include <sys/sa.h> 56#include <sys/sa.h>
57#include <sys/savar.h> 57#include <sys/savar.h>
58 58
59#include <uvm/uvm.h> 59#include <uvm/uvm.h>
60 60
61#include <mips/cache.h> 61#include <mips/cache.h>
62#include <mips/pcb.h> 62#include <mips/pcb.h>
63#include <mips/regnum.h> 63#include <mips/regnum.h>
64#include <mips/locore.h> 64#include <mips/locore.h>
65#include <mips/pte.h> 65#include <mips/pte.h>
66#include <mips/psl.h> 66#include <mips/psl.h>
67 67
68paddr_t kvtophys(vaddr_t); /* XXX */ 68paddr_t kvtophys(vaddr_t); /* XXX */
69 69
70/* 70/*
71 * cpu_lwp_fork: Finish a fork operation, with lwp l2 nearly set up. 71 * cpu_lwp_fork: Finish a fork operation, with lwp l2 nearly set up.
72 * Copy and update the pcb and trapframe, making the child ready to run. 72 * Copy and update the pcb and trapframe, making the child ready to run.
73 * 73 *
74 * First LWP (l1) is the lwp being forked. If it is &lwp0, then we are 74 * First LWP (l1) is the lwp being forked. If it is &lwp0, then we are
75 * creating a kthread, where return path and argument are specified 75 * creating a kthread, where return path and argument are specified
76 * with `func' and `arg'. 76 * with `func' and `arg'.
77 * 77 *
78 * Rig the child's kernel stack so that it will start out in lwp_trampoline() 78 * Rig the child's kernel stack so that it will start out in lwp_trampoline()
79 * and call child_return() with l2 as an argument. This causes the 79 * and call child_return() with l2 as an argument. This causes the
80 * newly-created child process to go directly to user level with an apparent 80 * newly-created child process to go directly to user level with an apparent
81 * return value of 0 from fork(), while the parent process returns normally. 81 * return value of 0 from fork(), while the parent process returns normally.
82 * 82 *
83 * If an alternate user-level stack is requested (with non-zero values 83 * If an alternate user-level stack is requested (with non-zero values
84 * in both the stack and stacksize arguments), then set up the user stack 84 * in both the stack and stacksize arguments), then set up the user stack
85 * pointer accordingly. 85 * pointer accordingly.
86 */ 86 */
87void 87void
88cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize, 88cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
89 void (*func)(void *), void *arg) 89 void (*func)(void *), void *arg)
90{ 90{
91 struct pcb * const pcb1 = lwp_getpcb(l1); 91 struct pcb * const pcb1 = lwp_getpcb(l1);
92 struct pcb * const pcb2 = lwp_getpcb(l2); 92 struct pcb * const pcb2 = lwp_getpcb(l2);
93 struct trapframe *tf; 93 struct trapframe *tf;
94 94
95 KASSERT(l1 == curlwp || l1 == &lwp0); 95 KASSERT(l1 == curlwp || l1 == &lwp0);
96 96
97 l2->l_md.md_ss_addr = 0; 97 l2->l_md.md_ss_addr = 0;
98 l2->l_md.md_ss_instr = 0; 98 l2->l_md.md_ss_instr = 0;
99 l2->l_md.md_astpending = 0; 99 l2->l_md.md_astpending = 0;
100 100
101#ifndef NOFPU 101#ifndef NOFPU
102 /* If parent LWP was using FPU, then save the FPU h/w state. */ 102 /* If parent LWP was using FPU, then save the FPU h/w state. */
103 fpu_save_lwp(l1); 103 fpu_save_lwp(l1);
104#endif 104#endif
105 105
106 /* Copy the PCB from parent. */ 106 /* Copy the PCB from parent. */
107 *pcb2 = *pcb1; 107 *pcb2 = *pcb1;
108 108
109 /* 109 /*
110 * Copy the trapframe from parent, so that return to userspace 110 * Copy the trapframe from parent, so that return to userspace
111 * will be to right address, with correct registers. 111 * will be to right address, with correct registers.
112 */ 112 */
113 vaddr_t ua2 = (vaddr_t)l2->l_addr; 113 vaddr_t ua2 = (vaddr_t)l2->l_addr;
114 tf = (struct trapframe *)(ua2 + USPACE) - 1; 114 tf = (struct trapframe *)(ua2 + USPACE) - 1;
115 *tf = *l1->l_md.md_utf; 115 *tf = *l1->l_md.md_utf;
116 116
117 /* If specified, set a different user stack for a child. */ 117 /* If specified, set a different user stack for a child. */
118 if (stack != NULL) 118 if (stack != NULL)
119 tf->tf_regs[_R_SP] = (intptr_t)stack + stacksize; 119 tf->tf_regs[_R_SP] = (intptr_t)stack + stacksize;
120 120
121 l2->l_md.md_utf = tf; 121 l2->l_md.md_utf = tf;
122 l2->l_md.md_flags = l1->l_md.md_flags & MDP_FPUSED; 122 l2->l_md.md_flags = l1->l_md.md_flags & MDP_FPUSED;
123 123
124 bool direct_mapped_p = MIPS_KSEG0_P(ua2); 124 bool direct_mapped_p = MIPS_KSEG0_P(ua2);
 125#ifdef ENABLE_MIPS_KSEGX
 126 if (!direct_mapped_p)
 127 direct_mapped_p = VM_KSEGX_ADDRESS <= ua2
 128 && ua2 < VM_KSEGX_ADDRESS + VM_KSEGX_SIZE;
 129#endif
125#ifdef _LP64 130#ifdef _LP64
126 direct_mapped_p = direct_mapped_p || MIPS_XKPHYS_P(ua2); 131 direct_mapped_p = direct_mapped_p || MIPS_XKPHYS_P(ua2);
127#endif 132#endif
128 if (!direct_mapped_p) { 133 if (!direct_mapped_p) {
129 pt_entry_t * const pte = kvtopte(ua2); 134 pt_entry_t * const pte = kvtopte(ua2);
130 const uint32_t x = (MIPS_HAS_R4K_MMU) ? 135 const uint32_t x = (MIPS_HAS_R4K_MMU) ?
131 (MIPS3_PG_G | MIPS3_PG_RO | MIPS3_PG_WIRED) : MIPS1_PG_G; 136 (MIPS3_PG_G | MIPS3_PG_RO | MIPS3_PG_WIRED) : MIPS1_PG_G;
132 137
133 for (u_int i = 0; i < UPAGES; i++) { 138 for (u_int i = 0; i < UPAGES; i++) {
134 l2->l_md.md_upte[i] = pte[i].pt_entry &~ x; 139 l2->l_md.md_upte[i] = pte[i].pt_entry &~ x;
135 } 140 }
136 } 141 }
137 142
138 cpu_setfunc(l2, func, arg); 143 cpu_setfunc(l2, func, arg);
139} 144}
140 145
141void 146void
142cpu_setfunc(struct lwp *l, void (*func)(void *), void *arg) 147cpu_setfunc(struct lwp *l, void (*func)(void *), void *arg)
143{ 148{
144 struct pcb * const pcb = lwp_getpcb(l); 149 struct pcb * const pcb = lwp_getpcb(l);
145 struct trapframe * const tf = l->l_md.md_utf; 150 struct trapframe * const tf = l->l_md.md_utf;
146 151
147 KASSERT(tf == (struct trapframe *)((char *)l->l_addr + USPACE) - 1); 152 KASSERT(tf == (struct trapframe *)((char *)l->l_addr + USPACE) - 1);
148 153
149 /* 154 /*
150 * Rig kernel stack so that it would start out in lwp_trampoline() 155 * Rig kernel stack so that it would start out in lwp_trampoline()
151 * and call child_return() with l as an argument. This causes the 156 * and call child_return() with l as an argument. This causes the
152 * newly-created child process to go directly to user level with a 157 * newly-created child process to go directly to user level with a
153 * parent return value of 0 from fork(), while the parent process 158 * parent return value of 0 from fork(), while the parent process
154 * returns normally. 159 * returns normally.
155 */ 160 */
156 161
157 pcb->pcb_context.val[_L_S0] = (intptr_t)func; /* S0 */ 162 pcb->pcb_context.val[_L_S0] = (intptr_t)func; /* S0 */
158 pcb->pcb_context.val[_L_S1] = (intptr_t)arg; /* S1 */ 163 pcb->pcb_context.val[_L_S1] = (intptr_t)arg; /* S1 */
159 pcb->pcb_context.val[MIPS_CURLWP_LABEL] = (intptr_t)l; /* T8 */ 164 pcb->pcb_context.val[MIPS_CURLWP_LABEL] = (intptr_t)l; /* T8 */
160 pcb->pcb_context.val[_L_SP] = (intptr_t)tf; /* SP */ 165 pcb->pcb_context.val[_L_SP] = (intptr_t)tf; /* SP */
161 pcb->pcb_context.val[_L_RA] = 166 pcb->pcb_context.val[_L_RA] =
162 mips_locore_jumpvec.ljv_lwp_trampoline; /* RA */ 167 mips_locore_jumpvec.ljv_lwp_trampoline; /* RA */
163#ifdef _LP64 168#ifdef _LP64
164 KASSERT(pcb->pcb_context.val[_L_SR] & MIPS_SR_KX); 169 KASSERT(pcb->pcb_context.val[_L_SR] & MIPS_SR_KX);
165#endif 170#endif
166 KASSERT(pcb->pcb_context.val[_L_SR] & MIPS_SR_INT_IE); 171 KASSERT(pcb->pcb_context.val[_L_SR] & MIPS_SR_INT_IE);
167} 172}
168 173
169/* 174/*
170 * Routine to copy MD stuff from proc to proc on a fork. 175 * Routine to copy MD stuff from proc to proc on a fork.
171 * For mips, this is the ABI and "32 bit process on a 64 bit kernel" flag. 176 * For mips, this is the ABI and "32 bit process on a 64 bit kernel" flag.
172 */ 177 */
173void 178void
174cpu_proc_fork(struct proc *p1, struct proc *p2) 179cpu_proc_fork(struct proc *p1, struct proc *p2)
175{ 180{
176 p2->p_md.md_abi = p1->p_md.md_abi; 181 p2->p_md.md_abi = p1->p_md.md_abi;
177} 182}
178 183
179static struct evcnt uarea_remapped =  184static struct evcnt uarea_remapped =
180 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "uarea", "remapped"); 185 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "uarea", "remapped");
181static struct evcnt uarea_reallocated =  186static struct evcnt uarea_reallocated =
182 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "uarea", "reallocated"); 187 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "uarea", "reallocated");
183EVCNT_ATTACH_STATIC(uarea_remapped); 188EVCNT_ATTACH_STATIC(uarea_remapped);
184EVCNT_ATTACH_STATIC(uarea_reallocated); 189EVCNT_ATTACH_STATIC(uarea_reallocated);
185 190
186void 191void
187cpu_uarea_remap(struct lwp *l) 192cpu_uarea_remap(struct lwp *l)
188{ 193{
189 bool uarea_ok; 194 bool uarea_ok;
190 vaddr_t va; 195 vaddr_t va;
191 paddr_t pa; 196 paddr_t pa;
192 struct pcb *pcb = lwp_getpcb(l); 197 struct pcb *pcb = lwp_getpcb(l);
193 198
194 /* 199 /*
195 * Grab the starting physical address of the uarea. 200 * Grab the starting physical address of the uarea.
196 */ 201 */
197 va = (vaddr_t)l->l_addr; 202 va = (vaddr_t)l->l_addr;
 203 if (MIPS_KSEG0_P(va))
 204 return;
 205#ifdef _LP64
 206 if (MIPS_XKPHYS_P(va))
 207 return;
 208#elif defined(ENABLE_MIPS_KSEGX)
 209 if (VM_KSEGX_ADDRESS <= va && va < VM_KSEGX_ADDRESS + VM_KSEGX_SIZE)
 210 return;
 211#endif
 212
198 if (!pmap_extract(pmap_kernel(), va, &pa)) 213 if (!pmap_extract(pmap_kernel(), va, &pa))
199 panic("%s: pmap_extract(%#"PRIxVADDR") failed", __func__, va); 214 panic("%s: pmap_extract(%#"PRIxVADDR") failed", __func__, va);
200 215
201 /* 216 /*
202 * Check to see if the existing uarea is physically contiguous. 217 * Check to see if the existing uarea is physically contiguous.
203 */ 218 */
204 uarea_ok = true; 219 uarea_ok = true;
205 for (vaddr_t i = PAGE_SIZE; uarea_ok && i < USPACE; i += PAGE_SIZE) { 220 for (vaddr_t i = PAGE_SIZE; uarea_ok && i < USPACE; i += PAGE_SIZE) {
206 paddr_t pa0; 221 paddr_t pa0;
207 if (!pmap_extract(pmap_kernel(), va + i, &pa0)) 222 if (!pmap_extract(pmap_kernel(), va + i, &pa0))
208 panic("%s: pmap_extract(%#"PRIxVADDR") failed", 223 panic("%s: pmap_extract(%#"PRIxVADDR") failed",
209 __func__, va+1); 224 __func__, va+1);
210 uarea_ok = (pa0 - pa == i); 225 uarea_ok = (pa0 - pa == i);
211 } 226 }
212 227
213#ifndef _LP64 228#ifndef _LP64
214 /* 229 /*
215 * If this is a 32bit kernel, it needs to be mappedable via KSEG0 230 * If this is a 32bit kernel, it needs to be mappedable via KSEG0
216 */ 231 */
217 uarea_ok = uarea_ok && (pa + USPACE - 1 <= MIPS_PHYS_MASK); 232 uarea_ok = uarea_ok && (pa + USPACE - 1 <= MIPS_PHYS_MASK);
218#endif 233#endif
219 KASSERTMSG(pcb->pcb_context.val[_L_SP] == (intptr_t)l->l_md.md_utf, 234 KASSERTMSG(pcb->pcb_context.val[_L_SP] == (intptr_t)l->l_md.md_utf,
220 ("%s: %s (%#"PRIxREGISTER") != %s (%p)", 235 ("%s: %s (%#"PRIxREGISTER") != %s (%p)",
221 __func__, 236 __func__,
222 "pcb->pcb_context.val[_L_SP]", pcb->pcb_context.val[_L_SP], 237 "pcb->pcb_context.val[_L_SP]", pcb->pcb_context.val[_L_SP],
223 "(intptr_t)l->l_md.md_utf", l->l_md.md_utf)); 238 "(intptr_t)l->l_md.md_utf", l->l_md.md_utf));
224 239
225 if (!uarea_ok) { 240 if (!uarea_ok) {
226 struct pglist pglist; 241 struct pglist pglist;
227#ifdef _LP64 242#ifdef _LP64
228 const paddr_t high = mips_avail_end; 243 const paddr_t high = mips_avail_end;
229#else 244#else
230 const paddr_t high = MIPS_KSEG1_START - MIPS_KSEG0_START; 245 const paddr_t high = MIPS_KSEG1_START - MIPS_KSEG0_START;
231#endif 246#endif
232 int error; 247 int error;
233 248
234 /* 249 /*
235 * Allocate a new physically contiguou uarea which can be 250 * Allocate a new physically contiguou uarea which can be
236 * direct-mapped. 251 * direct-mapped.
237 */ 252 */
238 error = uvm_pglistalloc(USPACE, mips_avail_start, high, 253 error = uvm_pglistalloc(USPACE, mips_avail_start, high,
239 USPACE_ALIGN, 0, &pglist, 1, 1); 254 USPACE_ALIGN, 0, &pglist, 1, 1);
240 if (error) 255 if (error)
241 panic("%s: uvm_pglistalloc failed: %d", __func__, 256 panic("%s: uvm_pglistalloc failed: %d", __func__,
242 error); 257 error);
243 258
244 /* 259 /*
245 * Get the physical address from the first page. 260 * Get the physical address from the first page.
246 */ 261 */
247 pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist)); 262 pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
248 } 263 }
249 264
250 /* 265 /*
251 * Now set the new uarea (if it's different). If l->l_addr was already 266 * Now set the new uarea (if it's different). If l->l_addr was already
252 * direct mapped address then this routine really won't change anything 267 * direct mapped address then this routine really won't change anything
253 * but that's not probable so don't micro optimize for it. 268 * but that's not probable so don't micro optimize for it.
254 */ 269 */
255#ifdef _LP64 270#ifdef _LP64
256 va = MIPS_PHYS_TO_XKPHYS_CACHED(pa); 271 va = MIPS_PHYS_TO_XKPHYS_CACHED(pa);
257#else 272#else
258 va = MIPS_PHYS_TO_KSEG0(pa); 273 va = MIPS_PHYS_TO_KSEG0(pa);
259#endif 274#endif
260 if (!uarea_ok) { 275 if (!uarea_ok) {
261 /* 276 /*
262 * Copy the trapframe and pcb from the old uarea to the new. 277 * Copy the trapframe and pcb from the old uarea to the new.
263 */ 278 */
264 ((struct trapframe *)(va + USPACE))[-1] = *l->l_md.md_utf; 279 ((struct trapframe *)(va + USPACE))[-1] = *l->l_md.md_utf;
265 *(struct pcb *)va = *pcb; 280 *(struct pcb *)va = *pcb;
266 /* 281 /*
267 * Discard the old uarea. 282 * Discard the old uarea.
268 */ 283 */
269 uvm_uarea_free(USER_TO_UAREA(l->l_addr), curcpu()); 284 uvm_uarea_free(USER_TO_UAREA(l->l_addr), curcpu());
270 uarea_reallocated.ev_count++; 285 uarea_reallocated.ev_count++;
271 } 286 }
272 287
273 l->l_addr = (struct user *)va; 288 l->l_addr = (struct user *)va;
274 l->l_md.md_utf = (struct trapframe *)(va + USPACE) - 1; 289 l->l_md.md_utf = (struct trapframe *)(va + USPACE) - 1;
275 pcb = lwp_getpcb(l); 290 pcb = lwp_getpcb(l);
276 pcb->pcb_context.val[_L_SP] = (vaddr_t)l->l_md.md_utf; 291 pcb->pcb_context.val[_L_SP] = (vaddr_t)l->l_md.md_utf;
277 uarea_remapped.ev_count++; 292 uarea_remapped.ev_count++;
278} 293}
279 294
280/* 295/*
281 * Finish a swapin operation. 296 * Finish a swapin operation.
282 * We neded to update the cached PTEs for the user area in the 297 * We neded to update the cached PTEs for the user area in the
283 * machine dependent part of the proc structure. 298 * machine dependent part of the proc structure.
284 */ 299 */
285void 300void
286cpu_swapin(struct lwp *l) 301cpu_swapin(struct lwp *l)
287{ 302{
288 pt_entry_t *pte; 303 pt_entry_t *pte;
289 int i, x; 304 int i, x;
 305 vaddr_t kva = (vaddr_t) lwp_getpcb(l);
 306
 307#ifdef _LP64
 308 if (MIPS_XKPHYS_P(kva))
 309 return;
 310#else
 311 if (MIPS_KSEG0_P(kva))
 312 return;
 313
 314#ifdef ENABLE_MIPS_KSEGX
 315 if (VM_KSEGX_ADDRESS <= kva && kva < VM_KSEGX_ADDRESS + VM_KSEGX_SIZE)
 316 return;
 317#endif
 318#endif
290 319
291 /* 320 /*
292 * Cache the PTEs for the user area in the machine dependent 321 * Cache the PTEs for the user area in the machine dependent
293 * part of the proc struct so cpu_switchto() can quickly map 322 * part of the proc struct so cpu_switchto() can quickly map
294 * in the user struct and kernel stack. 323 * in the user struct and kernel stack.
295 */ 324 */
296 x = (MIPS_HAS_R4K_MMU) ? 325 x = (MIPS_HAS_R4K_MMU) ?
297 (MIPS3_PG_G | MIPS3_PG_RO | MIPS3_PG_WIRED) : 326 (MIPS3_PG_G | MIPS3_PG_RO | MIPS3_PG_WIRED) :
298 MIPS1_PG_G; 327 MIPS1_PG_G;
299 pte = kvtopte(l->l_addr); 328 pte = kvtopte(kva);
300 for (i = 0; i < UPAGES; i++) 329 for (i = 0; i < UPAGES; i++)
301 l->l_md.md_upte[i] = pte[i].pt_entry &~ x; 330 l->l_md.md_upte[i] = pte[i].pt_entry &~ x;
302} 331}
303 332
304void 333void
305cpu_lwp_free(struct lwp *l, int proc) 334cpu_lwp_free(struct lwp *l, int proc)
306{ 335{
307 KASSERT(l == curlwp); 336 KASSERT(l == curlwp);
308 337
309#ifndef NOFPU 338#ifndef NOFPU
310 fpu_discard(); 339 fpu_discard();
311 340
312 KASSERT(l->l_fpcpu == NULL); 341 KASSERT(l->l_fpcpu == NULL);
313 KASSERT(curcpu()->ci_fpcurlwp != l); 342 KASSERT(curcpu()->ci_fpcurlwp != l);
314#endif 343#endif
315} 344}
316 345
317vaddr_t 346vaddr_t
318cpu_lwp_pc(struct lwp *l) 347cpu_lwp_pc(struct lwp *l)
319{ 348{
320 return l->l_md.md_utf->tf_regs[_R_PC]; 349 return l->l_md.md_utf->tf_regs[_R_PC];
321} 350}
322 351
323void 352void
324cpu_lwp_free2(struct lwp *l) 353cpu_lwp_free2(struct lwp *l)
325{ 354{
326 355
327 (void)l; 356 (void)l;
328} 357}
329 358
330#ifdef COREDUMP 359#ifdef COREDUMP
331/* 360/*
332 * Dump the machine specific segment at the start of a core dump. 361 * Dump the machine specific segment at the start of a core dump.
333 */ 362 */
334int 363int
335cpu_coredump(struct lwp *l, void *iocookie, struct core *chdr) 364cpu_coredump(struct lwp *l, void *iocookie, struct core *chdr)
336{ 365{
337 int error; 366 int error;
338 struct coreseg cseg; 367 struct coreseg cseg;
339 struct cpustate { 368 struct cpustate {
340 struct trapframe frame; 369 struct trapframe frame;
341 struct fpreg fpregs; 370 struct fpreg fpregs;
342 } cpustate; 371 } cpustate;
343 372
344 if (iocookie == NULL) { 373 if (iocookie == NULL) {
345 CORE_SETMAGIC(*chdr, COREMAGIC, MID_MACHINE, 0); 374 CORE_SETMAGIC(*chdr, COREMAGIC, MID_MACHINE, 0);
346 chdr->c_hdrsize = ALIGN(sizeof(struct core)); 375 chdr->c_hdrsize = ALIGN(sizeof(struct core));
347 chdr->c_seghdrsize = ALIGN(sizeof(struct coreseg)); 376 chdr->c_seghdrsize = ALIGN(sizeof(struct coreseg));
348 chdr->c_cpusize = sizeof(struct cpustate); 377 chdr->c_cpusize = sizeof(struct cpustate);
349 chdr->c_nseg++; 378 chdr->c_nseg++;
350 return 0; 379 return 0;
351 } 380 }
352 381
353 fpu_save_lwp(l); 382 fpu_save_lwp(l);
354 383
355 struct pcb * const pcb = lwp_getpcb(l); 384 struct pcb * const pcb = lwp_getpcb(l);
356 cpustate.frame = *l->l_md.md_utf; 385 cpustate.frame = *l->l_md.md_utf;
357 cpustate.fpregs = pcb->pcb_fpregs; 386 cpustate.fpregs = pcb->pcb_fpregs;
358 387
359 CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_MACHINE, CORE_CPU); 388 CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_MACHINE, CORE_CPU);
360 cseg.c_addr = 0; 389 cseg.c_addr = 0;
361 cseg.c_size = chdr->c_cpusize; 390 cseg.c_size = chdr->c_cpusize;
362 391
363 error = coredump_write(iocookie, UIO_SYSSPACE, &cseg, 392 error = coredump_write(iocookie, UIO_SYSSPACE, &cseg,
364 chdr->c_seghdrsize); 393 chdr->c_seghdrsize);
365 if (error) 394 if (error)
366 return error; 395 return error;
367 396
368 return coredump_write(iocookie, UIO_SYSSPACE, &cpustate, 397 return coredump_write(iocookie, UIO_SYSSPACE, &cpustate,
369 chdr->c_cpusize); 398 chdr->c_cpusize);
370} 399}
371#endif 400#endif
372 401
373/* 402/*
374 * Map a user I/O request into kernel virtual address space. 403 * Map a user I/O request into kernel virtual address space.
375 */ 404 */
376void 405void
377vmapbuf(struct buf *bp, vsize_t len) 406vmapbuf(struct buf *bp, vsize_t len)
378{ 407{
379 struct pmap *upmap; 408 struct pmap *upmap;
380 vaddr_t uva; /* User VA (map from) */ 409 vaddr_t uva; /* User VA (map from) */
381 vaddr_t kva; /* Kernel VA (new to) */ 410 vaddr_t kva; /* Kernel VA (new to) */
382 paddr_t pa; /* physical address */ 411 paddr_t pa; /* physical address */
383 vsize_t off; 412 vsize_t off;
384 413
385 if ((bp->b_flags & B_PHYS) == 0) 414 if ((bp->b_flags & B_PHYS) == 0)
386 panic("vmapbuf"); 415 panic("vmapbuf");
387 416
388 uva = mips_trunc_page(bp->b_saveaddr = bp->b_data); 417 uva = mips_trunc_page(bp->b_saveaddr = bp->b_data);
389 off = (vaddr_t)bp->b_data - uva; 418 off = (vaddr_t)bp->b_data - uva;
390 len = mips_round_page(off + len); 419 len = mips_round_page(off + len);
391 kva = uvm_km_alloc(phys_map, len, atop(uva) & uvmexp.colormask, 420 kva = uvm_km_alloc(phys_map, len, atop(uva) & uvmexp.colormask,
392 UVM_FLAG_COLORMATCH | UVM_KMF_VAONLY | UVM_KMF_WAITVA); 421 UVM_FLAG_COLORMATCH | UVM_KMF_VAONLY | UVM_KMF_WAITVA);
393 bp->b_data = (void *)(kva + off); 422 bp->b_data = (void *)(kva + off);
394 upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map); 423 upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
395 do { 424 do {
396 if (pmap_extract(upmap, uva, &pa) == false) 425 if (pmap_extract(upmap, uva, &pa) == false)
397 panic("vmapbuf: null page frame"); 426 panic("vmapbuf: null page frame");
398 pmap_enter(vm_map_pmap(phys_map), kva, pa, 427 pmap_enter(vm_map_pmap(phys_map), kva, pa,
399 VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED); 428 VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
400 uva += PAGE_SIZE; 429 uva += PAGE_SIZE;
401 kva += PAGE_SIZE; 430 kva += PAGE_SIZE;
402 len -= PAGE_SIZE; 431 len -= PAGE_SIZE;
403 } while (len); 432 } while (len);
404 pmap_update(vm_map_pmap(phys_map)); 433 pmap_update(vm_map_pmap(phys_map));
405} 434}
406 435
407/* 436/*
408 * Unmap a previously-mapped user I/O request. 437 * Unmap a previously-mapped user I/O request.
409 */ 438 */
410void 439void
411vunmapbuf(struct buf *bp, vsize_t len) 440vunmapbuf(struct buf *bp, vsize_t len)
412{ 441{
413 vaddr_t kva; 442 vaddr_t kva;
414 vsize_t off; 443 vsize_t off;
415 444
416 if ((bp->b_flags & B_PHYS) == 0) 445 if ((bp->b_flags & B_PHYS) == 0)
417 panic("vunmapbuf"); 446 panic("vunmapbuf");
418 447
419 kva = mips_trunc_page(bp->b_data); 448 kva = mips_trunc_page(bp->b_data);
420 off = (vaddr_t)bp->b_data - kva; 449 off = (vaddr_t)bp->b_data - kva;
421 len = mips_round_page(off + len); 450 len = mips_round_page(off + len);
422 pmap_remove(vm_map_pmap(phys_map), kva, kva + len); 451 pmap_remove(vm_map_pmap(phys_map), kva, kva + len);
423 pmap_update(pmap_kernel()); 452 pmap_update(pmap_kernel());
424 uvm_km_free(phys_map, kva, len, UVM_KMF_VAONLY); 453 uvm_km_free(phys_map, kva, len, UVM_KMF_VAONLY);
425 bp->b_data = bp->b_saveaddr; 454 bp->b_data = bp->b_saveaddr;
426 bp->b_saveaddr = NULL; 455 bp->b_saveaddr = NULL;
427} 456}
428 457
429/* 458/*
430 * Map a (kernel) virtual address to a physical address. 459 * Map a (kernel) virtual address to a physical address.
431 * 460 *
432 * MIPS processor has 3 distinct kernel address ranges: 461 * MIPS processor has 3 distinct kernel address ranges:
433 * 462 *
434 * - kseg0 kernel "virtual address" for the cached physical address space. 463 * - kseg0 kernel "virtual address" for the cached physical address space.
435 * - kseg1 kernel "virtual address" for the uncached physical address space. 464 * - kseg1 kernel "virtual address" for the uncached physical address space.
436 * - kseg2 normal kernel "virtual address" mapped via the TLB. 465 * - kseg2 normal kernel "virtual address" mapped via the TLB.
437 */ 466 */
438paddr_t 467paddr_t
439kvtophys(vaddr_t kva) 468kvtophys(vaddr_t kva)
440{ 469{
441 pt_entry_t *pte; 470 pt_entry_t *pte;
442 paddr_t phys; 471 paddr_t phys;
443 472
444 if (kva >= VM_MIN_KERNEL_ADDRESS) { 473 if (kva >= VM_MIN_KERNEL_ADDRESS) {
445 if (kva >= VM_MAX_KERNEL_ADDRESS) 474 if (kva >= VM_MAX_KERNEL_ADDRESS)
446 goto overrun; 475 goto overrun;
447 476
448#ifdef ENABLE_MIPS_KSEGX 477#ifdef ENABLE_MIPS_KSEGX
449 if (VM_KSEGX_ADDRESS <= kva 478 if (VM_KSEGX_ADDRESS <= kva
450 && kva < VM_KSEGX_ADDRESS + VM_KSEGX_SIZE) { 479 && kva < VM_KSEGX_ADDRESS + VM_KSEGX_SIZE) {
451 return mips_ksegx_start + kva - VM_KSEGX_ADDRESS; 480 return mips_ksegx_start + kva - VM_KSEGX_ADDRESS;
452 } 481 }
453#endif 482#endif
454 483
455 pte = kvtopte(kva); 484 pte = kvtopte(kva);
456 if ((size_t) (pte - Sysmap) >= Sysmapsize) { 485 if ((size_t) (pte - Sysmap) >= Sysmapsize) {
457 printf("oops: Sysmap overrun, max %d index %zd\n", 486 printf("oops: Sysmap overrun, max %d index %zd\n",
458 Sysmapsize, pte - Sysmap); 487 Sysmapsize, pte - Sysmap);
459 } 488 }
460 if (!mips_pg_v(pte->pt_entry)) { 489 if (!mips_pg_v(pte->pt_entry)) {
461 printf("kvtophys: pte not valid for %#"PRIxVADDR"\n", 490 printf("kvtophys: pte not valid for %#"PRIxVADDR"\n",
462 kva); 491 kva);
463 } 492 }
464 phys = mips_tlbpfn_to_paddr(pte->pt_entry) | (kva & PGOFSET); 493 phys = mips_tlbpfn_to_paddr(pte->pt_entry) | (kva & PGOFSET);
465 return phys; 494 return phys;
466 } 495 }
467 if (MIPS_KSEG1_P(kva)) 496 if (MIPS_KSEG1_P(kva))
468 return MIPS_KSEG1_TO_PHYS(kva); 497 return MIPS_KSEG1_TO_PHYS(kva);
469 498
470 if (MIPS_KSEG0_P(kva)) 499 if (MIPS_KSEG0_P(kva))
471 return MIPS_KSEG0_TO_PHYS(kva); 500 return MIPS_KSEG0_TO_PHYS(kva);
472#ifdef _LP64 501#ifdef _LP64
473 if (MIPS_XKPHYS_P(kva)) 502 if (MIPS_XKPHYS_P(kva))
474 return MIPS_XKPHYS_TO_PHYS(kva); 503 return MIPS_XKPHYS_TO_PHYS(kva);
475#endif 504#endif
476overrun: 505overrun:
477 printf("Virtual address %#"PRIxVADDR": cannot map to physical\n", kva); 506 printf("Virtual address %#"PRIxVADDR": cannot map to physical\n", kva);
478#ifdef DDB 507#ifdef DDB
479 Debugger(); 508 Debugger();
480 return 0; /* XXX */ 509 return 0; /* XXX */
481#endif 510#endif
482 panic("kvtophys"); 511 panic("kvtophys");
483} 512}