Wed Dec 2 15:51:13 2009 UTC ()
Use common pmap_bootstrap_finalize() to initialize lwp0 uarea etc.
Tested on HP9000/382.


(tsutsui)
diff -r1.147 -r1.148 src/sys/arch/hp300/hp300/locore.s
diff -r1.39 -r1.40 src/sys/arch/hp300/hp300/pmap_bootstrap.c

cvs diff -r1.147 -r1.148 src/sys/arch/hp300/hp300/locore.s (expand / switch to unified diff)

--- src/sys/arch/hp300/hp300/locore.s 2009/11/26 00:19:17 1.147
+++ src/sys/arch/hp300/hp300/locore.s 2009/12/02 15:51:12 1.148
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: locore.s,v 1.147 2009/11/26 00:19:17 matt Exp $ */ 1/* $NetBSD: locore.s,v 1.148 2009/12/02 15:51:12 tsutsui Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1980, 1990, 1993 4 * Copyright (c) 1980, 1990, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to Berkeley by 7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer 8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department. 9 * Science Department.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -542,34 +542,33 @@ Lhpmmu3: @@ -542,34 +542,33 @@ Lhpmmu3:
542Lehighcode: 542Lehighcode:
543 543
544 /* 544 /*
545 * END MMU TRAMPOLINE. Address register %a5 is now free. 545 * END MMU TRAMPOLINE. Address register %a5 is now free.
546 */ 546 */
547 547
548/* 548/*
549 * Should be running mapped from this point on 549 * Should be running mapped from this point on
550 */ 550 */
551Lenab1: 551Lenab1:
552/* select the software page size now */ 552/* select the software page size now */
553 lea _ASM_LABEL(tmpstk),%sp | temporary stack 553 lea _ASM_LABEL(tmpstk),%sp | temporary stack
554 jbsr _C_LABEL(uvm_setpagesize) | select software page size 554 jbsr _C_LABEL(uvm_setpagesize) | select software page size
555/* set kernel stack, user SP, and initial pcb */ 555/* call final pmap setup which initialize lwp0, curlwp, and curpcb */
556 lea _C_LABEL(lwp0),%a2 | grab lwp0.l_addr 556 jbsr _C_LABEL(pmap_bootstrap_finalize)
557 movl %a2@(L_ADDR),%a1 | set kernel stack to end of area  557/* set kernel stack, user SP */
558 lea %a1@(USPACE-4),%sp | and curlwp so that we don't 558 movl _C_LABEL(lwp0uarea),%a1 |
559 movl %a2,_C_LABEL(curlwp) | deref NULL in trap() 559 lea %a1@(USPACE-4),%sp | set kernel stack to end of area
560 movl #USRSTACK-4,%a2 560 movl #USRSTACK-4,%a2
561 movl %a2,%usp | init user SP 561 movl %a2,%usp | init user SP
562 movl %a1,_C_LABEL(curpcb) | lwp0 is running 
563 562
564 tstl _C_LABEL(fputype) | Have an FPU? 563 tstl _C_LABEL(fputype) | Have an FPU?
565 jeq Lenab2 | No, skip. 564 jeq Lenab2 | No, skip.
566 clrl %a1@(PCB_FPCTX) | ensure null FP context 565 clrl %a1@(PCB_FPCTX) | ensure null FP context
567 movl %a1,%sp@- 566 movl %a1,%sp@-
568 jbsr _C_LABEL(m68881_restore) | restore it (does not kill %a1) 567 jbsr _C_LABEL(m68881_restore) | restore it (does not kill %a1)
569 addql #4,%sp 568 addql #4,%sp
570Lenab2: 569Lenab2:
571/* flush TLB and turn on caches */ 570/* flush TLB and turn on caches */
572 jbsr _C_LABEL(_TBIA) | invalidate TLB 571 jbsr _C_LABEL(_TBIA) | invalidate TLB
573 cmpl #MMU_68040,_C_LABEL(mmutype) | 68040? 572 cmpl #MMU_68040,_C_LABEL(mmutype) | 68040?
574 jeq Lnocache0 | yes, cache already on 573 jeq Lnocache0 | yes, cache already on
575 movl #CACHE_ON,%d0 574 movl #CACHE_ON,%d0

cvs diff -r1.39 -r1.40 src/sys/arch/hp300/hp300/pmap_bootstrap.c (expand / switch to unified diff)

--- src/sys/arch/hp300/hp300/pmap_bootstrap.c 2009/11/27 03:23:09 1.39
+++ src/sys/arch/hp300/hp300/pmap_bootstrap.c 2009/12/02 15:51:12 1.40
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap_bootstrap.c,v 1.39 2009/11/27 03:23:09 rmind Exp $ */ 1/* $NetBSD: pmap_bootstrap.c,v 1.40 2009/12/02 15:51:12 tsutsui Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1991, 1993 4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to Berkeley by 7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer 8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department. 9 * Science Department.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -26,30 +26,29 @@ @@ -26,30 +26,29 @@
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE. 33 * SUCH DAMAGE.
34 * 34 *
35 * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93 35 * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93
36 */ 36 */
37 37
38#include <sys/cdefs.h> 38#include <sys/cdefs.h>
39__KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.39 2009/11/27 03:23:09 rmind Exp $"); 39__KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.40 2009/12/02 15:51:12 tsutsui Exp $");
40 40
41#include <sys/param.h> 41#include <sys/param.h>
42#include <sys/proc.h> 
43 42
44#include <machine/frame.h> 43#include <machine/frame.h>
45#include <machine/cpu.h> 44#include <machine/cpu.h>
46#include <machine/hp300spu.h> 45#include <machine/hp300spu.h>
47#include <machine/vmparam.h> 46#include <machine/vmparam.h>
48#include <machine/pte.h> 47#include <machine/pte.h>
49 48
50#include <hp300/hp300/clockreg.h> 49#include <hp300/hp300/clockreg.h>
51 50
52#include <uvm/uvm_extern.h> 51#include <uvm/uvm_extern.h>
53 52
54#define RELOC(v, t) *((t*)((uintptr_t)&(v) + firstpa)) 53#define RELOC(v, t) *((t*)((uintptr_t)&(v) + firstpa))
55#define RELOCPTR(v, t) ((t)((uintptr_t)RELOC((v), t) + firstpa)) 54#define RELOCPTR(v, t) ((t)((uintptr_t)RELOC((v), t) + firstpa))
@@ -84,64 +83,64 @@ void *msgbufaddr; @@ -84,64 +83,64 @@ void *msgbufaddr;
84 * Bootstrap the VM system. 83 * Bootstrap the VM system.
85 * 84 *
86 * Called with MMU off so we must relocate all global references by `firstpa' 85 * Called with MMU off so we must relocate all global references by `firstpa'
87 * (don't call any functions here!) `nextpa' is the first available physical 86 * (don't call any functions here!) `nextpa' is the first available physical
88 * memory address. Returns an updated first PA reflecting the memory we 87 * memory address. Returns an updated first PA reflecting the memory we
89 * have allocated. MMU is still off when we return. 88 * have allocated. MMU is still off when we return.
90 * 89 *
91 * XXX assumes sizeof(u_int) == sizeof(pt_entry_t) 90 * XXX assumes sizeof(u_int) == sizeof(pt_entry_t)
92 * XXX a PIC compiler would make this much easier. 91 * XXX a PIC compiler would make this much easier.
93 */ 92 */
94void 93void
95pmap_bootstrap(paddr_t nextpa, paddr_t firstpa) 94pmap_bootstrap(paddr_t nextpa, paddr_t firstpa)
96{ 95{
97 paddr_t kstpa, kptpa, kptmpa, lkptpa, p0upa; 96 paddr_t kstpa, kptpa, kptmpa, lkptpa, lwp0upa;
98 u_int nptpages, kstsize; 97 u_int nptpages, kstsize;
99 st_entry_t protoste, *ste; 98 st_entry_t protoste, *ste;
100 pt_entry_t protopte, *pte, *epte; 99 pt_entry_t protopte, *pte, *epte;
101 100
102 /* 101 /*
103 * Calculate important physical addresses: 102 * Calculate important physical addresses:
104 * 103 *
105 * kstpa kernel segment table 1 page (!040) 104 * kstpa kernel segment table 1 page (!040)
106 * N pages (040) 105 * N pages (040)
107 * 106 *
108 * kptpa statically allocated 107 * kptpa statically allocated
109 * kernel PT pages Sysptsize+ pages 108 * kernel PT pages Sysptsize+ pages
110 * 109 *
111 * [ Sysptsize is the number of pages of PT, IIOMAPSIZE and 110 * [ Sysptsize is the number of pages of PT, IIOMAPSIZE and
112 * EIOMAPSIZE are the number of PTEs, hence we need to round 111 * EIOMAPSIZE are the number of PTEs, hence we need to round
113 * the total to a page boundary with IO maps at the end. ] 112 * the total to a page boundary with IO maps at the end. ]
114 * 113 *
115 * kptmpa kernel PT map 1 page 114 * kptmpa kernel PT map 1 page
116 * 115 *
117 * lkptpa last kernel PT page 1 page 116 * lkptpa last kernel PT page 1 page
118 * 117 *
119 * p0upa proc 0 u-area UPAGES pages 118 * lwp0upa lwp 0 u-area UPAGES pages
120 * 119 *
121 * The KVA corresponding to any of these PAs is: 120 * The KVA corresponding to any of these PAs is:
122 * (PA - firstpa + KERNBASE). 121 * (PA - firstpa + KERNBASE).
123 */ 122 */
124 if (RELOC(mmutype, int) == MMU_68040) 123 if (RELOC(mmutype, int) == MMU_68040)
125 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); 124 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE);
126 else 125 else
127 kstsize = 1; 126 kstsize = 1;
128 kstpa = nextpa; 127 kstpa = nextpa;
129 nextpa += kstsize * PAGE_SIZE; 128 nextpa += kstsize * PAGE_SIZE;
130 kptmpa = nextpa; 129 kptmpa = nextpa;
131 nextpa += PAGE_SIZE; 130 nextpa += PAGE_SIZE;
132 lkptpa = nextpa; 131 lkptpa = nextpa;
133 nextpa += PAGE_SIZE; 132 nextpa += PAGE_SIZE;
134 p0upa = nextpa; 133 lwp0upa = nextpa;
135 nextpa += USPACE; 134 nextpa += USPACE;
136 kptpa = nextpa; 135 kptpa = nextpa;
137 nptpages = RELOC(Sysptsize, int) + 136 nptpages = RELOC(Sysptsize, int) +
138 (IIOMAPSIZE + EIOMAPSIZE + NPTEPG - 1) / NPTEPG; 137 (IIOMAPSIZE + EIOMAPSIZE + NPTEPG - 1) / NPTEPG;
139 nextpa += nptpages * PAGE_SIZE; 138 nextpa += nptpages * PAGE_SIZE;
140 139
141 /* 140 /*
142 * Initialize segment table and kernel page table map. 141 * Initialize segment table and kernel page table map.
143 * 142 *
144 * On 68030s and earlier MMUs the two are identical except for 143 * On 68030s and earlier MMUs the two are identical except for
145 * the valid bits so both are initialized with essentially the 144 * the valid bits so both are initialized with essentially the
146 * same values. On the 68040, which has a mandatory 3-level 145 * same values. On the 68040, which has a mandatory 3-level
147 * structure, the segment table holds the level 1 table and part 146 * structure, the segment table holds the level 1 table and part
@@ -320,27 +319,27 @@ pmap_bootstrap(paddr_t nextpa, paddr_t f @@ -320,27 +319,27 @@ pmap_bootstrap(paddr_t nextpa, paddr_t f
320 /* 319 /*
321 * Validate PTEs for kernel text (RO). The first page 320 * Validate PTEs for kernel text (RO). The first page
322 * of kernel text remains invalid; see locore.s 321 * of kernel text remains invalid; see locore.s
323 */ 322 */
324 pte = &((u_int *)kptpa)[m68k_btop(KERNBASE + PAGE_SIZE)]; 323 pte = &((u_int *)kptpa)[m68k_btop(KERNBASE + PAGE_SIZE)];
325 epte = &pte[m68k_btop(m68k_trunc_page(&etext))]; 324 epte = &pte[m68k_btop(m68k_trunc_page(&etext))];
326 protopte = (firstpa + PAGE_SIZE) | PG_RO | PG_V; 325 protopte = (firstpa + PAGE_SIZE) | PG_RO | PG_V;
327 while (pte < epte) { 326 while (pte < epte) {
328 *pte++ = protopte; 327 *pte++ = protopte;
329 protopte += PAGE_SIZE; 328 protopte += PAGE_SIZE;
330 } 329 }
331 /* 330 /*
332 * Validate PTEs for kernel data/bss, dynamic data allocated 331 * Validate PTEs for kernel data/bss, dynamic data allocated
333 * by us so far (nextpa - firstpa bytes), and pages for proc0 332 * by us so far (nextpa - firstpa bytes), and pages for lwp0
334 * u-area and page table allocated below (RW). 333 * u-area and page table allocated below (RW).
335 */ 334 */
336 epte = &((u_int *)kptpa)[m68k_btop(nextpa - firstpa)]; 335 epte = &((u_int *)kptpa)[m68k_btop(nextpa - firstpa)];
337 protopte = (protopte & ~PG_PROT) | PG_RW; 336 protopte = (protopte & ~PG_PROT) | PG_RW;
338 /* 337 /*
339 * Enable copy-back caching of data pages 338 * Enable copy-back caching of data pages
340 */ 339 */
341 if (RELOC(mmutype, int) == MMU_68040) 340 if (RELOC(mmutype, int) == MMU_68040)
342 protopte |= PG_CCB; 341 protopte |= PG_CCB;
343 while (pte < epte) { 342 while (pte < epte) {
344 *pte++ = protopte; 343 *pte++ = protopte;
345 protopte += PAGE_SIZE; 344 protopte += PAGE_SIZE;
346 } 345 }
@@ -385,41 +384,41 @@ pmap_bootstrap(paddr_t nextpa, paddr_t f @@ -385,41 +384,41 @@ pmap_bootstrap(paddr_t nextpa, paddr_t f
385 */ 384 */
386 RELOC(Sysmap, pt_entry_t *) = 385 RELOC(Sysmap, pt_entry_t *) =
387 (pt_entry_t *)m68k_ptob((NPTEPG - 2) * NPTEPG); 386 (pt_entry_t *)m68k_ptob((NPTEPG - 2) * NPTEPG);
388 /* 387 /*
389 * CLKbase, MMUbase: important registers in internal IO space 388 * CLKbase, MMUbase: important registers in internal IO space
390 * accessed from assembly language. 389 * accessed from assembly language.
391 */ 390 */
392 RELOC(CLKbase, vaddr_t) = 391 RELOC(CLKbase, vaddr_t) =
393 (vaddr_t)RELOC(intiobase, char *) + CLKBASE; 392 (vaddr_t)RELOC(intiobase, char *) + CLKBASE;
394 RELOC(MMUbase, vaddr_t) = 393 RELOC(MMUbase, vaddr_t) =
395 (vaddr_t)RELOC(intiobase, char *) + MMUBASE; 394 (vaddr_t)RELOC(intiobase, char *) + MMUBASE;
396 395
397 /* 396 /*
398 * Setup u-area for process 0. 397 * Setup u-area for lwp 0.
399 */ 398 */
400 /* 399 /*
401 * Zero the u-area. 400 * Zero the u-area.
402 * NOTE: `pte' and `epte' aren't PTEs here. 401 * NOTE: `pte' and `epte' aren't PTEs here.
403 */ 402 */
404 pte = (u_int *)p0upa; 403 pte = (u_int *)lwp0upa;
405 epte = (u_int *)(p0upa + USPACE); 404 epte = (u_int *)(lwp0upa + USPACE);
406 while (pte < epte) 405 while (pte < epte)
407 *pte++ = 0; 406 *pte++ = 0;
408 /* 407 /*
409 * Remember the u-area address so it can be loaded in the 408 * Remember the u-area address so it can be loaded in the lwp0
410 * proc struct p_addr field later. 409 * via uvm_lwp_setuarea() later in pmap_bootstrap_finalize().
411 */ 410 */
412 RELOC(lwp0.l_addr, struct user *) = (struct user *)(p0upa - firstpa); 411 RELOC(lwp0uarea, vaddr_t) = lwp0upa - firstpa;
413 412
414 /* 413 /*
415 * VM data structures are now initialized, set up data for 414 * VM data structures are now initialized, set up data for
416 * the pmap module. 415 * the pmap module.
417 * 416 *
418 * Note about avail_end: msgbuf is initialized just after 417 * Note about avail_end: msgbuf is initialized just after
419 * avail_end in machdep.c. Since the last page is used 418 * avail_end in machdep.c. Since the last page is used
420 * for rebooting the system (code is copied there and 419 * for rebooting the system (code is copied there and
421 * excution continues from copied code before the MMU 420 * excution continues from copied code before the MMU
422 * is disabled), the msgbuf will get trounced between 421 * is disabled), the msgbuf will get trounced between
423 * reboots if it's placed in the last physical page. 422 * reboots if it's placed in the last physical page.
424 * To work around this, we move avail_end back one more 423 * To work around this, we move avail_end back one more
425 * page so the msgbuf can be preserved. 424 * page so the msgbuf can be preserved.