Thu Dec 1 23:59:14 2011 UTC ()
Add code to deal with direct mapped uareas.


(matt)
diff -r1.121.6.1.2.22 -r1.121.6.1.2.23 src/sys/arch/mips/mips/vm_machdep.c

cvs diff -r1.121.6.1.2.22 -r1.121.6.1.2.23 src/sys/arch/mips/mips/vm_machdep.c (expand / switch to unified diff)

--- src/sys/arch/mips/mips/vm_machdep.c 2011/11/29 07:48:31 1.121.6.1.2.22
+++ src/sys/arch/mips/mips/vm_machdep.c 2011/12/01 23:59:14 1.121.6.1.2.23
@@ -112,26 +112,31 @@ cpu_lwp_fork(struct lwp *l1, struct lwp  @@ -112,26 +112,31 @@ cpu_lwp_fork(struct lwp *l1, struct lwp
112 */ 112 */
113 vaddr_t ua2 = (vaddr_t)l2->l_addr; 113 vaddr_t ua2 = (vaddr_t)l2->l_addr;
114 tf = (struct trapframe *)(ua2 + USPACE) - 1; 114 tf = (struct trapframe *)(ua2 + USPACE) - 1;
115 *tf = *l1->l_md.md_utf; 115 *tf = *l1->l_md.md_utf;
116 116
117 /* If specified, set a different user stack for a child. */ 117 /* If specified, set a different user stack for a child. */
118 if (stack != NULL) 118 if (stack != NULL)
119 tf->tf_regs[_R_SP] = (intptr_t)stack + stacksize; 119 tf->tf_regs[_R_SP] = (intptr_t)stack + stacksize;
120 120
121 l2->l_md.md_utf = tf; 121 l2->l_md.md_utf = tf;
122 l2->l_md.md_flags = l1->l_md.md_flags & MDP_FPUSED; 122 l2->l_md.md_flags = l1->l_md.md_flags & MDP_FPUSED;
123 123
124 bool direct_mapped_p = MIPS_KSEG0_P(ua2); 124 bool direct_mapped_p = MIPS_KSEG0_P(ua2);
 125#ifdef ENABLE_MIPS_KSEGX
 126 if (!direct_mapped_p)
 127 direct_mapped_p = VM_KSEGX_ADDRESS <= ua2
 128 && ua2 < VM_KSEGX_ADDRESS + VM_KSEGX_SIZE;
 129#endif
125#ifdef _LP64 130#ifdef _LP64
126 direct_mapped_p = direct_mapped_p || MIPS_XKPHYS_P(ua2); 131 direct_mapped_p = direct_mapped_p || MIPS_XKPHYS_P(ua2);
127#endif 132#endif
128 if (!direct_mapped_p) { 133 if (!direct_mapped_p) {
129 pt_entry_t * const pte = kvtopte(ua2); 134 pt_entry_t * const pte = kvtopte(ua2);
130 const uint32_t x = (MIPS_HAS_R4K_MMU) ? 135 const uint32_t x = (MIPS_HAS_R4K_MMU) ?
131 (MIPS3_PG_G | MIPS3_PG_RO | MIPS3_PG_WIRED) : MIPS1_PG_G; 136 (MIPS3_PG_G | MIPS3_PG_RO | MIPS3_PG_WIRED) : MIPS1_PG_G;
132 137
133 for (u_int i = 0; i < UPAGES; i++) { 138 for (u_int i = 0; i < UPAGES; i++) {
134 l2->l_md.md_upte[i] = pte[i].pt_entry &~ x; 139 l2->l_md.md_upte[i] = pte[i].pt_entry &~ x;
135 } 140 }
136 } 141 }
137 142
@@ -185,26 +190,36 @@ EVCNT_ATTACH_STATIC(uarea_reallocated); @@ -185,26 +190,36 @@ EVCNT_ATTACH_STATIC(uarea_reallocated);
185 190
186void 191void
187cpu_uarea_remap(struct lwp *l) 192cpu_uarea_remap(struct lwp *l)
188{ 193{
189 bool uarea_ok; 194 bool uarea_ok;
190 vaddr_t va; 195 vaddr_t va;
191 paddr_t pa; 196 paddr_t pa;
192 struct pcb *pcb = lwp_getpcb(l); 197 struct pcb *pcb = lwp_getpcb(l);
193 198
194 /* 199 /*
195 * Grab the starting physical address of the uarea. 200 * Grab the starting physical address of the uarea.
196 */ 201 */
197 va = (vaddr_t)l->l_addr; 202 va = (vaddr_t)l->l_addr;
 203 if (MIPS_KSEG0_P(va))
 204 return;
 205#ifdef _LP64
 206 if (MIPS_XKPHYS_P(va))
 207 return;
 208#elif defined(ENABLE_MIPS_KSEGX)
 209 if (VM_KSEGX_ADDRESS <= va && va < VM_KSEGX_ADDRESS + VM_KSEGX_SIZE)
 210 return;
 211#endif
 212
198 if (!pmap_extract(pmap_kernel(), va, &pa)) 213 if (!pmap_extract(pmap_kernel(), va, &pa))
199 panic("%s: pmap_extract(%#"PRIxVADDR") failed", __func__, va); 214 panic("%s: pmap_extract(%#"PRIxVADDR") failed", __func__, va);
200 215
201 /* 216 /*
202 * Check to see if the existing uarea is physically contiguous. 217 * Check to see if the existing uarea is physically contiguous.
203 */ 218 */
204 uarea_ok = true; 219 uarea_ok = true;
205 for (vaddr_t i = PAGE_SIZE; uarea_ok && i < USPACE; i += PAGE_SIZE) { 220 for (vaddr_t i = PAGE_SIZE; uarea_ok && i < USPACE; i += PAGE_SIZE) {
206 paddr_t pa0; 221 paddr_t pa0;
207 if (!pmap_extract(pmap_kernel(), va + i, &pa0)) 222 if (!pmap_extract(pmap_kernel(), va + i, &pa0))
208 panic("%s: pmap_extract(%#"PRIxVADDR") failed", 223 panic("%s: pmap_extract(%#"PRIxVADDR") failed",
209 __func__, va+1); 224 __func__, va+1);
210 uarea_ok = (pa0 - pa == i); 225 uarea_ok = (pa0 - pa == i);
@@ -277,36 +292,50 @@ cpu_uarea_remap(struct lwp *l) @@ -277,36 +292,50 @@ cpu_uarea_remap(struct lwp *l)
277 uarea_remapped.ev_count++; 292 uarea_remapped.ev_count++;
278} 293}
279 294
280/* 295/*
281 * Finish a swapin operation. 296 * Finish a swapin operation.
282 * We neded to update the cached PTEs for the user area in the 297 * We neded to update the cached PTEs for the user area in the
283 * machine dependent part of the proc structure. 298 * machine dependent part of the proc structure.
284 */ 299 */
285void 300void
286cpu_swapin(struct lwp *l) 301cpu_swapin(struct lwp *l)
287{ 302{
288 pt_entry_t *pte; 303 pt_entry_t *pte;
289 int i, x; 304 int i, x;
 305 vaddr_t kva = (vaddr_t) lwp_getpcb(l);
 306
 307#ifdef _LP64
 308 if (MIPS_XKPHYS_P(kva))
 309 return;
 310#else
 311 if (MIPS_KSEG0_P(kva))
 312 return;
 313
 314#ifdef ENABLE_MIPS_KSEGX
 315 if (VM_KSEGX_ADDRESS <= kva && kva < VM_KSEGX_ADDRESS + VM_KSEGX_SIZE)
 316 return;
 317#endif
 318#endif
290 319
291 /* 320 /*
292 * Cache the PTEs for the user area in the machine dependent 321 * Cache the PTEs for the user area in the machine dependent
293 * part of the proc struct so cpu_switchto() can quickly map 322 * part of the proc struct so cpu_switchto() can quickly map
294 * in the user struct and kernel stack. 323 * in the user struct and kernel stack.
295 */ 324 */
296 x = (MIPS_HAS_R4K_MMU) ? 325 x = (MIPS_HAS_R4K_MMU) ?
297 (MIPS3_PG_G | MIPS3_PG_RO | MIPS3_PG_WIRED) : 326 (MIPS3_PG_G | MIPS3_PG_RO | MIPS3_PG_WIRED) :
298 MIPS1_PG_G; 327 MIPS1_PG_G;
299 pte = kvtopte(l->l_addr); 328 pte = kvtopte(kva);
300 for (i = 0; i < UPAGES; i++) 329 for (i = 0; i < UPAGES; i++)
301 l->l_md.md_upte[i] = pte[i].pt_entry &~ x; 330 l->l_md.md_upte[i] = pte[i].pt_entry &~ x;
302} 331}
303 332
304void 333void
305cpu_lwp_free(struct lwp *l, int proc) 334cpu_lwp_free(struct lwp *l, int proc)
306{ 335{
307 KASSERT(l == curlwp); 336 KASSERT(l == curlwp);
308 337
309#ifndef NOFPU 338#ifndef NOFPU
310 fpu_discard(); 339 fpu_discard();
311 340
312 KASSERT(l->l_fpcpu == NULL); 341 KASSERT(l->l_fpcpu == NULL);