Mon Mar 5 12:15:24 2012 UTC ()
increase kernel VM space.
fixed problem can't boot on WS011SH (probably other 128MB RAM model too).


(nonaka)
diff -r1.13 -r1.14 src/sys/arch/hpcarm/hpcarm/pxa2x0_hpc_machdep.c

cvs diff -r1.13 -r1.14 src/sys/arch/hpcarm/hpcarm/pxa2x0_hpc_machdep.c (switch to unified diff)

--- src/sys/arch/hpcarm/hpcarm/pxa2x0_hpc_machdep.c 2011/07/19 15:37:39 1.13
+++ src/sys/arch/hpcarm/hpcarm/pxa2x0_hpc_machdep.c 2012/03/05 12:15:23 1.14
@@ -1,748 +1,754 @@ @@ -1,748 +1,754 @@
1/* $NetBSD: pxa2x0_hpc_machdep.c,v 1.13 2011/07/19 15:37:39 dyoung Exp $ */ 1/* $NetBSD: pxa2x0_hpc_machdep.c,v 1.14 2012/03/05 12:15:23 nonaka Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1994-1998 Mark Brinicombe. 4 * Copyright (c) 1994-1998 Mark Brinicombe.
5 * Copyright (c) 1994 Brini. 5 * Copyright (c) 1994 Brini.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software written for Brini by Mark Brinicombe 8 * This code is derived from software written for Brini by Mark Brinicombe
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software 18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement: 19 * must display the following acknowledgement:
20 * This product includes software developed by Brini. 20 * This product includes software developed by Brini.
21 * 4. The name of the company nor the name of the author may be used to 21 * 4. The name of the company nor the name of the author may be used to
22 * endorse or promote products derived from this software without specific 22 * endorse or promote products derived from this software without specific
23 * prior written permission. 23 * prior written permission.
24 * 24 *
25 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED 25 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 28 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 29 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE. 35 * SUCH DAMAGE.
36 */ 36 */
37 37
38/* 38/*
39 * Machine dependent functions for kernel setup. 39 * Machine dependent functions for kernel setup.
40 */ 40 */
41 41
42#include <sys/cdefs.h> 42#include <sys/cdefs.h>
43__KERNEL_RCSID(0, "$NetBSD: pxa2x0_hpc_machdep.c,v 1.13 2011/07/19 15:37:39 dyoung Exp $"); 43__KERNEL_RCSID(0, "$NetBSD: pxa2x0_hpc_machdep.c,v 1.14 2012/03/05 12:15:23 nonaka Exp $");
44 44
45#include "opt_ddb.h" 45#include "opt_ddb.h"
46#include "opt_dram_pages.h" 46#include "opt_dram_pages.h"
47#include "opt_modular.h" 47#include "opt_modular.h"
48#include "opt_pmap_debug.h" 48#include "opt_pmap_debug.h"
49#include "ksyms.h" 49#include "ksyms.h"
50 50
51#include <sys/param.h> 51#include <sys/param.h>
52#include <sys/systm.h> 52#include <sys/systm.h>
53#include <sys/kernel.h> 53#include <sys/kernel.h>
54#include <sys/reboot.h> 54#include <sys/reboot.h>
55#include <sys/proc.h> 55#include <sys/proc.h>
56#include <sys/msgbuf.h> 56#include <sys/msgbuf.h>
57#include <sys/exec.h> 57#include <sys/exec.h>
58#include <sys/ksyms.h> 58#include <sys/ksyms.h>
59#include <sys/boot_flag.h> 59#include <sys/boot_flag.h>
60#include <sys/conf.h> /* XXX for consinit related hacks */ 60#include <sys/conf.h> /* XXX for consinit related hacks */
61#include <sys/device.h> 61#include <sys/device.h>
62#include <sys/bus.h> 62#include <sys/bus.h>
63 63
64#if NKSYMS || defined(DDB) || defined(MODULAR) 64#if NKSYMS || defined(DDB) || defined(MODULAR)
65#include <machine/db_machdep.h> 65#include <machine/db_machdep.h>
66#include <ddb/db_sym.h> 66#include <ddb/db_sym.h>
67#include <ddb/db_extern.h> 67#include <ddb/db_extern.h>
68#ifndef DB_ELFSIZE 68#ifndef DB_ELFSIZE
69#error Must define DB_ELFSIZE! 69#error Must define DB_ELFSIZE!
70#endif 70#endif
71#define ELFSIZE DB_ELFSIZE 71#define ELFSIZE DB_ELFSIZE
72#include <sys/exec_elf.h> 72#include <sys/exec_elf.h>
73#endif 73#endif
74 74
75#include <uvm/uvm.h> 75#include <uvm/uvm.h>
76 76
77#include <arm/xscale/pxa2x0cpu.h> 77#include <arm/xscale/pxa2x0cpu.h>
78#include <arm/xscale/pxa2x0reg.h> 78#include <arm/xscale/pxa2x0reg.h>
79#include <arm/xscale/pxa2x0var.h> 79#include <arm/xscale/pxa2x0var.h>
80#include <arm/xscale/pxa2x0_gpio.h> 80#include <arm/xscale/pxa2x0_gpio.h>
81#include <arm/cpuconf.h> 81#include <arm/cpuconf.h>
82#include <arm/undefined.h> 82#include <arm/undefined.h>
83 83
84#include <machine/bootconfig.h> 84#include <machine/bootconfig.h>
85#include <machine/bootinfo.h> 85#include <machine/bootinfo.h>
86#include <machine/cpu.h> 86#include <machine/cpu.h>
87#include <machine/frame.h> 87#include <machine/frame.h>
88#include <machine/intr.h> 88#include <machine/intr.h>
89#include <machine/io.h> 89#include <machine/io.h>
90#include <machine/platid.h> 90#include <machine/platid.h>
91#include <machine/rtc.h> 91#include <machine/rtc.h>
92#include <machine/signal.h> 92#include <machine/signal.h>
93 93
94#include <dev/hpc/apm/apmvar.h> 94#include <dev/hpc/apm/apmvar.h>
95#include <dev/ic/comreg.h> 95#include <dev/ic/comreg.h>
96 96
97#include <sys/mount.h> 97#include <sys/mount.h>
98#include <nfs/rpcv2.h> 98#include <nfs/rpcv2.h>
99#include <nfs/nfsproto.h> 99#include <nfs/nfsproto.h>
100#include <nfs/nfs.h> 100#include <nfs/nfs.h>
101#include <nfs/nfsmount.h> 101#include <nfs/nfsmount.h>
102 102
103/* Kernel text starts 2MB in from the bottom of the kernel address space. */ 103/* Kernel text starts 2MB in from the bottom of the kernel address space. */
104#define KERNEL_TEXT_BASE (KERNEL_BASE + 0x00200000) 104#define KERNEL_TEXT_BASE (KERNEL_BASE + 0x00200000)
105#define KERNEL_VM_BASE (KERNEL_BASE + 0x00C00000) 105#ifndef KERNEL_VM_BASE
106#define KERNEL_VM_SIZE 0x05000000 106#define KERNEL_VM_BASE (KERNEL_BASE + 0x01000000)
 107#endif
 108
 109/*
 110 * The range 0xc1000000 - 0xccffffff is available for kernel VM space
 111 * Core-logic registers and I/O mappings occupy 0xfd000000 - 0xffffffff
 112 */
 113#define KERNEL_VM_SIZE 0x0c000000
107 114
108/* 115/*
109 * Address to call from cpu_reset() to reset the machine. 116 * Address to call from cpu_reset() to reset the machine.
110 * This is machine architecture dependent as it varies depending 117 * This is machine architecture dependent as it varies depending
111 * on where the ROM appears when you turn the MMU off. 118 * on where the ROM appears when you turn the MMU off.
112 */ 119 */
113u_int cpu_reset_address = 0; 120u_int cpu_reset_address = 0;
114 121
115/* Define various stack sizes in pages */ 122/* Define various stack sizes in pages */
116#define IRQ_STACK_SIZE 1 123#define IRQ_STACK_SIZE 1
117#define ABT_STACK_SIZE 1 124#define ABT_STACK_SIZE 1
118#define UND_STACK_SIZE 1 125#define UND_STACK_SIZE 1
119 126
120extern BootConfig bootconfig; /* Boot config storage */ 127extern BootConfig bootconfig; /* Boot config storage */
121extern struct bootinfo *bootinfo, bootinfo_storage; 128extern struct bootinfo *bootinfo, bootinfo_storage;
122extern char booted_kernel_storage[80]; 129extern char booted_kernel_storage[80];
123extern char *booted_kernel; 130extern char *booted_kernel;
124 131
125extern paddr_t physical_start; 132extern paddr_t physical_start;
126extern paddr_t physical_freestart; 133extern paddr_t physical_freestart;
127extern paddr_t physical_freeend; 134extern paddr_t physical_freeend;
128extern paddr_t physical_end; 135extern paddr_t physical_end;
129extern int physmem; 136extern int physmem;
130 137
131/* Physical and virtual addresses for some global pages */ 138/* Physical and virtual addresses for some global pages */
132extern pv_addr_t irqstack; 139extern pv_addr_t irqstack;
133extern pv_addr_t undstack; 140extern pv_addr_t undstack;
134extern pv_addr_t abtstack; 141extern pv_addr_t abtstack;
135extern pv_addr_t kernelstack; 142extern pv_addr_t kernelstack;
136 143
137extern char *boot_args; 144extern char *boot_args;
138extern char boot_file[16]; 145extern char boot_file[16];
139 146
140extern vaddr_t msgbufphys; 147extern vaddr_t msgbufphys;
141 148
142extern u_int data_abort_handler_address; 149extern u_int data_abort_handler_address;
143extern u_int prefetch_abort_handler_address; 150extern u_int prefetch_abort_handler_address;
144extern u_int undefined_handler_address; 151extern u_int undefined_handler_address;
145extern int end; 152extern int end;
146 153
147#ifdef PMAP_DEBUG 154#ifdef PMAP_DEBUG
148extern int pmap_debug_level; 155extern int pmap_debug_level;
149#endif /* PMAP_DEBUG */ 156#endif /* PMAP_DEBUG */
150 157
151#define KERNEL_PT_VMEM 0 /* Page table for mapping video memory */ 158#define KERNEL_PT_SYS 0 /* Page table for mapping proc0 zero page */
152#define KERNEL_PT_SYS 1 /* Page table for mapping proc0 zero page */ 159#define KERNEL_PT_KERNEL 1 /* Page table for mapping kernel */
153#define KERNEL_PT_KERNEL 2 /* Page table for mapping kernel */ 
154#define KERNEL_PT_KERNEL_NUM 4 160#define KERNEL_PT_KERNEL_NUM 4
155#define KERNEL_PT_VMDATA (KERNEL_PT_KERNEL + KERNEL_PT_KERNEL_NUM) 161#define KERNEL_PT_VMDATA (KERNEL_PT_KERNEL + KERNEL_PT_KERNEL_NUM)
156 /* Page tables for mapping kernel VM */ 162 /* Page tables for mapping kernel VM */
157#define KERNEL_PT_VMDATA_NUM 4 /* start with 16MB of KVM */ 163#define KERNEL_PT_VMDATA_NUM 4 /* start with 16MB of KVM */
158#define NUM_KERNEL_PTS (KERNEL_PT_VMDATA + KERNEL_PT_VMDATA_NUM) 164#define NUM_KERNEL_PTS (KERNEL_PT_VMDATA + KERNEL_PT_VMDATA_NUM)
159 165
160pv_addr_t kernel_pt_table[NUM_KERNEL_PTS]; 166pv_addr_t kernel_pt_table[NUM_KERNEL_PTS];
161 167
162pv_addr_t minidataclean; 168pv_addr_t minidataclean;
163 169
164/* Prototypes */ 170/* Prototypes */
165void data_abort_handler(trapframe_t *); 171void data_abort_handler(trapframe_t *);
166void prefetch_abort_handler(trapframe_t *); 172void prefetch_abort_handler(trapframe_t *);
167void undefinedinstruction_bounce(trapframe_t *); 173void undefinedinstruction_bounce(trapframe_t *);
168u_int cpu_get_control(void); 174u_int cpu_get_control(void);
169 175
170u_int initarm(int, char **, struct bootinfo *); 176u_int initarm(int, char **, struct bootinfo *);
171 177
172/* Machine dependent initialize function */ 178/* Machine dependent initialize function */
173extern void pxa2x0_machdep_init(void); 179extern void pxa2x0_machdep_init(void);
174 180
175/* Mode dependent sleep function holder */ 181/* Mode dependent sleep function holder */
176extern void (*__sleep_func)(void *); 182extern void (*__sleep_func)(void *);
177extern void *__sleep_ctx; 183extern void *__sleep_ctx;
178 184
179#ifdef DEBUG_BEFOREMMU 185#ifdef DEBUG_BEFOREMMU
180static void fakecninit(void); 186static void fakecninit(void);
181#endif 187#endif
182 188
183/* Number of DRAM pages which are installed */ 189/* Number of DRAM pages which are installed */
184/* Units are 4K pages, so 8192 is 32 MB of memory */ 190/* Units are 4K pages, so 8192 is 32 MB of memory */
185#ifndef DRAM_PAGES 191#ifndef DRAM_PAGES
186#define DRAM_PAGES 8192 192#define DRAM_PAGES 8192
187#endif 193#endif
188 194
189/* 195/*
190 * Static device mappings. These peripheral registers are mapped at 196 * Static device mappings. These peripheral registers are mapped at
191 * fixed virtual addresses very early in initarm() so that we can use 197 * fixed virtual addresses very early in initarm() so that we can use
192 * them while booting the kernel and stay at the same address 198 * them while booting the kernel and stay at the same address
193 * throughout whole kernel's life time. 199 * throughout whole kernel's life time.
194 */ 200 */
195#define PXA2X0_GPIO_VBASE 0xfd000000 201#define PXA2X0_GPIO_VBASE 0xfd000000
196#define PXA2X0_CLKMAN_VBASE 0xfd100000 202#define PXA2X0_CLKMAN_VBASE 0xfd100000
197#define PXA2X0_INTCTL_VBASE 0xfd200000 203#define PXA2X0_INTCTL_VBASE 0xfd200000
198#define PXA2X0_MEMCTL_VBASE 0xfd300000 204#define PXA2X0_MEMCTL_VBASE 0xfd300000
199#define PXA2X0_FFUART_VBASE 0xfd400000 205#define PXA2X0_FFUART_VBASE 0xfd400000
200#define PXA2X0_BTUART_VBASE 0xfd500000 206#define PXA2X0_BTUART_VBASE 0xfd500000
201#define PXA2X0_STUART_VBASE 0xfd600000 207#define PXA2X0_STUART_VBASE 0xfd600000
202 208
203#define _A(a) ((a) & L1_S_FRAME) 209#define _A(a) ((a) & L1_S_FRAME)
204#define _S(s) (((s) + L1_S_SIZE - 1) & L1_S_FRAME) 210#define _S(s) (((s) + L1_S_SIZE - 1) & L1_S_FRAME)
205const struct pmap_devmap pxa2x0_devmap[] = { 211const struct pmap_devmap pxa2x0_devmap[] = {
206 { 212 {
207 PXA2X0_GPIO_VBASE, 213 PXA2X0_GPIO_VBASE,
208 _A(PXA2X0_GPIO_BASE), 214 _A(PXA2X0_GPIO_BASE),
209 _S(PXA2X0_GPIO_SIZE), 215 _S(PXA2X0_GPIO_SIZE),
210 VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE, 216 VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE,
211 }, 217 },
212 { 218 {
213 PXA2X0_CLKMAN_VBASE, 219 PXA2X0_CLKMAN_VBASE,
214 _A(PXA2X0_CLKMAN_BASE), 220 _A(PXA2X0_CLKMAN_BASE),
215 _S(PXA2X0_CLKMAN_SIZE), 221 _S(PXA2X0_CLKMAN_SIZE),
216 VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE, 222 VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE,
217 }, 223 },
218 { 224 {
219 PXA2X0_INTCTL_VBASE, 225 PXA2X0_INTCTL_VBASE,
220 _A(PXA2X0_INTCTL_BASE), 226 _A(PXA2X0_INTCTL_BASE),
221 _S(PXA2X0_INTCTL_SIZE), 227 _S(PXA2X0_INTCTL_SIZE),
222 VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE, 228 VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE,
223 }, 229 },
224 { 230 {
225 PXA2X0_MEMCTL_VBASE, 231 PXA2X0_MEMCTL_VBASE,
226 _A(PXA2X0_MEMCTL_BASE), 232 _A(PXA2X0_MEMCTL_BASE),
227 _S(PXA2X0_MEMCTL_SIZE), 233 _S(PXA2X0_MEMCTL_SIZE),
228 VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE, 234 VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE,
229 }, 235 },
230 { 236 {
231 PXA2X0_FFUART_VBASE, 237 PXA2X0_FFUART_VBASE,
232 _A(PXA2X0_FFUART_BASE), 238 _A(PXA2X0_FFUART_BASE),
233 _S(4 * COM_NPORTS), 239 _S(4 * COM_NPORTS),
234 VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE, 240 VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE,
235 }, 241 },
236 { 242 {
237 PXA2X0_BTUART_VBASE, 243 PXA2X0_BTUART_VBASE,
238 _A(PXA2X0_BTUART_BASE), 244 _A(PXA2X0_BTUART_BASE),
239 _S(4 * COM_NPORTS), 245 _S(4 * COM_NPORTS),
240 VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE, 246 VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE,
241 }, 247 },
242 { 248 {
243 PXA2X0_STUART_VBASE, 249 PXA2X0_STUART_VBASE,
244 _A(PXA2X0_STUART_BASE), 250 _A(PXA2X0_STUART_BASE),
245 _S(4 * COM_NPORTS), 251 _S(4 * COM_NPORTS),
246 VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE, 252 VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE,
247 }, 253 },
248 254
249 { 0, 0, 0, 0, 0, } 255 { 0, 0, 0, 0, 0, }
250}; 256};
251#undef _A 257#undef _A
252#undef _S 258#undef _S
253extern const struct pmap_devmap machdep_devmap[]; 259extern const struct pmap_devmap machdep_devmap[];
254 260
255 261
256static inline pd_entry_t * 262static inline pd_entry_t *
257read_ttb(void) 263read_ttb(void)
258{ 264{
259 u_long ttb; 265 u_long ttb;
260 266
261 __asm volatile("mrc p15, 0, %0, c2, c0, 0" : "=r" (ttb)); 267 __asm volatile("mrc p15, 0, %0, c2, c0, 0" : "=r" (ttb));
262 268
263 return (pd_entry_t *)(ttb & ~((1 << 14) - 1)); 269 return (pd_entry_t *)(ttb & ~((1 << 14) - 1));
264} 270}
265 271
266/* 272/*
267 * Initial entry point on startup. This gets called before main() is 273 * Initial entry point on startup. This gets called before main() is
268 * entered. 274 * entered.
269 * It should be responsible for setting up everything that must be 275 * It should be responsible for setting up everything that must be
270 * in place when main is called. 276 * in place when main is called.
271 * This includes: 277 * This includes:
272 * Taking a copy of the boot configuration structure. 278 * Taking a copy of the boot configuration structure.
273 * Initializing the physical console so characters can be printed. 279 * Initializing the physical console so characters can be printed.
274 * Setting up page tables for the kernel. 280 * Setting up page tables for the kernel.
275 */ 281 */
276u_int 282u_int
277initarm(int argc, char **argv, struct bootinfo *bi) 283initarm(int argc, char **argv, struct bootinfo *bi)
278{ 284{
279#ifdef DIAGNOSTIC 285#ifdef DIAGNOSTIC
280 extern vsize_t xscale_minidata_clean_size; /* used in KASSERT */ 286 extern vsize_t xscale_minidata_clean_size; /* used in KASSERT */
281#endif 287#endif
282 extern vaddr_t xscale_cache_clean_addr; 288 extern vaddr_t xscale_cache_clean_addr;
283 u_int kerneldatasize, symbolsize; 289 u_int kerneldatasize, symbolsize;
284 u_int l1pagetable; 290 u_int l1pagetable;
285 vaddr_t freemempos; 291 vaddr_t freemempos;
286 vsize_t pt_size; 292 vsize_t pt_size;
287 int loop, i; 293 int loop, i;
288#if NKSYMS || defined(DDB) || defined(MODULAR) 294#if NKSYMS || defined(DDB) || defined(MODULAR)
289 Elf_Shdr *sh; 295 Elf_Shdr *sh;
290#endif 296#endif
291 297
292 __sleep_func = NULL; 298 __sleep_func = NULL;
293 __sleep_ctx = NULL; 299 __sleep_ctx = NULL;
294 300
295 /* parse kernel args */ 301 /* parse kernel args */
296 boothowto = 0; 302 boothowto = 0;
297 boot_file[0] = '\0'; 303 boot_file[0] = '\0';
298 strncpy(booted_kernel_storage, argv[0], sizeof(booted_kernel_storage)); 304 strncpy(booted_kernel_storage, argv[0], sizeof(booted_kernel_storage));
299 for (i = 1; i < argc; i++) { 305 for (i = 1; i < argc; i++) {
300 char *cp = argv[i]; 306 char *cp = argv[i];
301 307
302 switch (*cp) { 308 switch (*cp) {
303 case 'b': 309 case 'b':
304 /* boot device: -b=sd0 etc. */ 310 /* boot device: -b=sd0 etc. */
305 cp = cp + 2; 311 cp = cp + 2;
306 if (strcmp(cp, MOUNT_NFS) == 0) 312 if (strcmp(cp, MOUNT_NFS) == 0)
307 rootfstype = MOUNT_NFS; 313 rootfstype = MOUNT_NFS;
308 else 314 else
309 strncpy(boot_file, cp, sizeof(boot_file)); 315 strncpy(boot_file, cp, sizeof(boot_file));
310 break; 316 break;
311 default: 317 default:
312 BOOT_FLAG(*cp, boothowto); 318 BOOT_FLAG(*cp, boothowto);
313 break; 319 break;
314 } 320 }
315 } 321 }
316 322
317 /* copy bootinfo into known kernel space */ 323 /* copy bootinfo into known kernel space */
318 bootinfo_storage = *bi; 324 bootinfo_storage = *bi;
319 bootinfo = &bootinfo_storage; 325 bootinfo = &bootinfo_storage;
320 326
321#ifdef BOOTINFO_FB_WIDTH 327#ifdef BOOTINFO_FB_WIDTH
322 bootinfo->fb_line_bytes = BOOTINFO_FB_LINE_BYTES; 328 bootinfo->fb_line_bytes = BOOTINFO_FB_LINE_BYTES;
323 bootinfo->fb_width = BOOTINFO_FB_WIDTH; 329 bootinfo->fb_width = BOOTINFO_FB_WIDTH;
324 bootinfo->fb_height = BOOTINFO_FB_HEIGHT; 330 bootinfo->fb_height = BOOTINFO_FB_HEIGHT;
325 bootinfo->fb_type = BOOTINFO_FB_TYPE; 331 bootinfo->fb_type = BOOTINFO_FB_TYPE;
326#endif 332#endif
327 333
328 if (bootinfo->magic == BOOTINFO_MAGIC) { 334 if (bootinfo->magic == BOOTINFO_MAGIC) {
329 platid.dw.dw0 = bootinfo->platid_cpu; 335 platid.dw.dw0 = bootinfo->platid_cpu;
330 platid.dw.dw1 = bootinfo->platid_machine; 336 platid.dw.dw1 = bootinfo->platid_machine;
331 } 337 }
332 338
333#ifndef RTC_OFFSET 339#ifndef RTC_OFFSET
334 /* 340 /*
335 * rtc_offset from bootinfo.timezone set by hpcboot.exe 341 * rtc_offset from bootinfo.timezone set by hpcboot.exe
336 */ 342 */
337 if (rtc_offset == 0 && 343 if (rtc_offset == 0 &&
338 (bootinfo->timezone > (-12 * 60) && 344 (bootinfo->timezone > (-12 * 60) &&
339 bootinfo->timezone <= (12 * 60))) 345 bootinfo->timezone <= (12 * 60)))
340 rtc_offset = bootinfo->timezone; 346 rtc_offset = bootinfo->timezone;
341#endif 347#endif
342 348
343 /* 349 /*
344 * Heads up ... Setup the CPU / MMU / TLB functions. 350 * Heads up ... Setup the CPU / MMU / TLB functions.
345 */ 351 */
346 set_cpufuncs(); 352 set_cpufuncs();
347 IRQdisable; 353 IRQdisable;
348 354
349 pmap_devmap_bootstrap((vaddr_t)read_ttb(), pxa2x0_devmap); 355 pmap_devmap_bootstrap((vaddr_t)read_ttb(), pxa2x0_devmap);
350 pxa2x0_memctl_bootstrap(PXA2X0_MEMCTL_VBASE); 356 pxa2x0_memctl_bootstrap(PXA2X0_MEMCTL_VBASE);
351 pxa2x0_intr_bootstrap(PXA2X0_INTCTL_VBASE); 357 pxa2x0_intr_bootstrap(PXA2X0_INTCTL_VBASE);
352 pxa2x0_clkman_bootstrap(PXA2X0_CLKMAN_VBASE); 358 pxa2x0_clkman_bootstrap(PXA2X0_CLKMAN_VBASE);
353 pxa2x0_gpio_bootstrap(PXA2X0_GPIO_VBASE); 359 pxa2x0_gpio_bootstrap(PXA2X0_GPIO_VBASE);
354 360
355 /* 361 /*
356 * XXX for now, overwrite bootconfig to hardcoded values in 362 * XXX for now, overwrite bootconfig to hardcoded values in
357 * XXX pxa2x0_machdep_init(). 363 * XXX pxa2x0_machdep_init().
358 * XXX kill bootconfig and directly call uvm_physload 364 * XXX kill bootconfig and directly call uvm_physload
359 */ 365 */
360 bootconfig.dram[0].address = 0xa0000000; 366 bootconfig.dram[0].address = 0xa0000000;
361 bootconfig.dram[0].pages = DRAM_PAGES; 367 bootconfig.dram[0].pages = DRAM_PAGES;
362 bootconfig.dramblocks = 1; 368 bootconfig.dramblocks = 1;
363 369
364 pxa2x0_machdep_init(); 370 pxa2x0_machdep_init();
365 371
366#ifdef DEBUG_BEFOREMMU 372#ifdef DEBUG_BEFOREMMU
367 /* 373 /*
368 * At this point, we cannot call real consinit(). 374 * At this point, we cannot call real consinit().
369 * Just call a faked up version of consinit(), which does the thing 375 * Just call a faked up version of consinit(), which does the thing
370 * with MMU disabled. 376 * with MMU disabled.
371 */ 377 */
372 fakecninit(); 378 fakecninit();
373#endif 379#endif
374 380
375 kerneldatasize = (uint32_t)&end - (uint32_t)KERNEL_TEXT_BASE; 381 kerneldatasize = (uint32_t)&end - (uint32_t)KERNEL_TEXT_BASE;
376 symbolsize = 0; 382 symbolsize = 0;
377#if NKSYMS || defined(DDB) || defined(MODULAR) 383#if NKSYMS || defined(DDB) || defined(MODULAR)
378 if (!memcmp(&end, "\177ELF", 4)) { 384 if (!memcmp(&end, "\177ELF", 4)) {
379 sh = (Elf_Shdr *)((char *)&end + ((Elf_Ehdr *)&end)->e_shoff); 385 sh = (Elf_Shdr *)((char *)&end + ((Elf_Ehdr *)&end)->e_shoff);
380 loop = ((Elf_Ehdr *)&end)->e_shnum; 386 loop = ((Elf_Ehdr *)&end)->e_shnum;
381 for (; loop; loop--, sh++) 387 for (; loop; loop--, sh++)
382 if (sh->sh_offset > 0 && 388 if (sh->sh_offset > 0 &&
383 (sh->sh_offset + sh->sh_size) > symbolsize) 389 (sh->sh_offset + sh->sh_size) > symbolsize)
384 symbolsize = sh->sh_offset + sh->sh_size; 390 symbolsize = sh->sh_offset + sh->sh_size;
385 } 391 }
386#endif 392#endif
387 393
388 printf("kernsize=0x%x\n", kerneldatasize); 394 printf("kernsize=0x%x\n", kerneldatasize);
389 kerneldatasize += symbolsize; 395 kerneldatasize += symbolsize;
390 kerneldatasize = ((kerneldatasize - 1) & ~(PAGE_SIZE * 4 - 1)) + 396 kerneldatasize = ((kerneldatasize - 1) & ~(PAGE_SIZE * 4 - 1)) +
391 PAGE_SIZE * 8; 397 PAGE_SIZE * 8;
392 398
393 /* 399 /*
394 * hpcboot has loaded me with MMU disabled. 400 * hpcboot has loaded me with MMU disabled.
395 * So create kernel page tables and enable MMU. 401 * So create kernel page tables and enable MMU.
396 */ 402 */
397 403
398 /* 404 /*
399 * Set up the variables that define the availability of physcial 405 * Set up the variables that define the availability of physcial
400 * memory. 406 * memory.
401 */ 407 */
402 physical_start = bootconfig.dram[0].address; 408 physical_start = bootconfig.dram[0].address;
403 physical_freestart = physical_start 409 physical_freestart = physical_start
404 + (KERNEL_TEXT_BASE - KERNEL_BASE) + kerneldatasize; 410 + (KERNEL_TEXT_BASE - KERNEL_BASE) + kerneldatasize;
405 physical_end = bootconfig.dram[bootconfig.dramblocks - 1].address 411 physical_end = bootconfig.dram[bootconfig.dramblocks - 1].address
406 + bootconfig.dram[bootconfig.dramblocks - 1].pages * PAGE_SIZE; 412 + bootconfig.dram[bootconfig.dramblocks - 1].pages * PAGE_SIZE;
407 physical_freeend = physical_end; 413 physical_freeend = physical_end;
408  414
409 for (loop = 0; loop < bootconfig.dramblocks; ++loop) 415 for (loop = 0; loop < bootconfig.dramblocks; ++loop)
410 physmem += bootconfig.dram[loop].pages; 416 physmem += bootconfig.dram[loop].pages;
411  417
412 /* XXX handle UMA framebuffer memory */ 418 /* XXX handle UMA framebuffer memory */
413 419
414 freemempos = 0xa0009000UL; 420 freemempos = 0xa0009000UL;
415 memset((void *)freemempos, 0, KERNEL_TEXT_BASE - KERNEL_BASE - 0x9000); 421 memset((void *)freemempos, 0, KERNEL_TEXT_BASE - KERNEL_BASE - 0x9000);
416 422
417 /* 423 /*
418 * Right. We have the bottom meg of memory mapped to 0x00000000 424 * Right. We have the bottom meg of memory mapped to 0x00000000
419 * so was can get at it. The kernel will occupy the start of it. 425 * so was can get at it. The kernel will occupy the start of it.
420 * After the kernel/args we allocate some of the fixed page tables 426 * After the kernel/args we allocate some of the fixed page tables
421 * we need to get the system going. 427 * we need to get the system going.
422 * We allocate one page directory and NUM_KERNEL_PTS page tables 428 * We allocate one page directory and NUM_KERNEL_PTS page tables
423 * and store the physical addresses in the kernel_pt_table array. 429 * and store the physical addresses in the kernel_pt_table array.
424 * Must remember that neither the page L1 or L2 page tables are the 430 * Must remember that neither the page L1 or L2 page tables are the
425 * same size as a page ! 431 * same size as a page !
426 * 432 *
427 * Ok, the next bit of physical allocate may look complex but it is 433 * Ok, the next bit of physical allocate may look complex but it is
428 * simple really. I have done it like this so that no memory gets 434 * simple really. I have done it like this so that no memory gets
429 * wasted during the allocate of various pages and tables that are 435 * wasted during the allocate of various pages and tables that are
430 * all different sizes. 436 * all different sizes.
431 * The start address will be page aligned. 437 * The start address will be page aligned.
432 * We allocate the kernel page directory on the first free 16KB 438 * We allocate the kernel page directory on the first free 16KB
433 * boundary we find. 439 * boundary we find.
434 * We allocate the kernel page tables on the first 1KB boundary we 440 * We allocate the kernel page tables on the first 1KB boundary we
435 * find. We allocate at least 9 PT's (12 currently). This means 441 * find. We allocate at least 9 PT's (12 currently). This means
436 * that in the process we KNOW that we will encounter at least one 442 * that in the process we KNOW that we will encounter at least one
437 * 16KB boundary. 443 * 16KB boundary.
438 * 444 *
439 * Eventually if the top end of the memory gets used for process L1 445 * Eventually if the top end of the memory gets used for process L1
440 * page tables the kernel L1 page table may be moved up there. 446 * page tables the kernel L1 page table may be moved up there.
441 */ 447 */
442 448
443#ifdef VERBOSE_INIT_ARM 449#ifdef VERBOSE_INIT_ARM
444 printf("Allocating page tables\n"); 450 printf("Allocating page tables\n");
445#endif 451#endif
446 452
447 /* Define a macro to simplify memory allocation */ 453 /* Define a macro to simplify memory allocation */
448#define valloc_pages(var, np) \ 454#define valloc_pages(var, np) \
449 alloc_pages((var).pv_pa, (np)); \ 455 alloc_pages((var).pv_pa, (np)); \
450 (var).pv_va = KERNEL_BASE + (var).pv_pa - physical_start; 456 (var).pv_va = KERNEL_BASE + (var).pv_pa - physical_start;
451#define alloc_pages(var, np) \ 457#define alloc_pages(var, np) \
452 (var) = freemempos; \ 458 (var) = freemempos; \
453 freemempos += (np) * PAGE_SIZE; 459 freemempos += (np) * PAGE_SIZE;
454 460
455 { 461 {
456 int loop1 = 0; 462 int loop1 = 0;
457 kernel_l1pt.pv_pa = 0; 463 kernel_l1pt.pv_pa = 0;
458 kernel_l1pt.pv_va = 0; 464 kernel_l1pt.pv_va = 0;
459 for (loop = 0; loop <= NUM_KERNEL_PTS; ++loop) { 465 for (loop = 0; loop <= NUM_KERNEL_PTS; ++loop) {
460 /* Are we 16KB aligned for an L1 ? */ 466 /* Are we 16KB aligned for an L1 ? */
461 if (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) == 0 467 if (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) == 0
462 && kernel_l1pt.pv_pa == 0) { 468 && kernel_l1pt.pv_pa == 0) {
463 valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); 469 valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
464 } else { 470 } else {
465 valloc_pages(kernel_pt_table[loop1], 471 valloc_pages(kernel_pt_table[loop1],
466 L2_TABLE_SIZE / PAGE_SIZE); 472 L2_TABLE_SIZE / PAGE_SIZE);
467 ++loop1; 473 ++loop1;
468 } 474 }
469 } 475 }
470 } 476 }
471 477
472 /* This should never be able to happen but better confirm that. */ 478 /* This should never be able to happen but better confirm that. */
473 if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE-1)) != 0) 479 if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE-1)) != 0)
474 panic("initarm: Failed to align the kernel page directory"); 480 panic("initarm: Failed to align the kernel page directory");
475 481
476 /* 482 /*
477 * Allocate a page for the system page mapped to V0x00000000 483 * Allocate a page for the system page mapped to V0x00000000
478 * This page will just contain the system vectors and can be 484 * This page will just contain the system vectors and can be
479 * shared by all processes. 485 * shared by all processes.
480 */ 486 */
481 valloc_pages(systempage, 1); 487 valloc_pages(systempage, 1);
482 488
483 pt_size = round_page(freemempos) - physical_start; 489 pt_size = round_page(freemempos) - physical_start;
484 490
485 /* Allocate stacks for all modes */ 491 /* Allocate stacks for all modes */
486 valloc_pages(irqstack, IRQ_STACK_SIZE); 492 valloc_pages(irqstack, IRQ_STACK_SIZE);
487 valloc_pages(abtstack, ABT_STACK_SIZE); 493 valloc_pages(abtstack, ABT_STACK_SIZE);
488 valloc_pages(undstack, UND_STACK_SIZE); 494 valloc_pages(undstack, UND_STACK_SIZE);
489 valloc_pages(kernelstack, UPAGES); 495 valloc_pages(kernelstack, UPAGES);
490 496
491#ifdef VERBOSE_INIT_ARM 497#ifdef VERBOSE_INIT_ARM
492 printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa, 498 printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa,
493 irqstack.pv_va);  499 irqstack.pv_va);
494 printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa, 500 printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa,
495 abtstack.pv_va);  501 abtstack.pv_va);
496 printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa, 502 printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa,
497 undstack.pv_va);  503 undstack.pv_va);
498 printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa, 504 printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa,
499 kernelstack.pv_va);  505 kernelstack.pv_va);
500#endif 506#endif
501 507
502 alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE); 508 alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE);
503 509
504 /* Allocate enough pages for cleaning the Mini-Data cache. */ 510 /* Allocate enough pages for cleaning the Mini-Data cache. */
505 KASSERT(xscale_minidata_clean_size <= PAGE_SIZE); 511 KASSERT(xscale_minidata_clean_size <= PAGE_SIZE);
506 valloc_pages(minidataclean, 1); 512 valloc_pages(minidataclean, 1);
507#ifdef VERBOSE_INIT_ARM 513#ifdef VERBOSE_INIT_ARM
508 printf("minidataclean: p0x%08lx v0x%08lx, size = %ld\n", 514 printf("minidataclean: p0x%08lx v0x%08lx, size = %ld\n",
509 minidataclean.pv_pa, minidataclean.pv_va, 515 minidataclean.pv_pa, minidataclean.pv_va,
510 xscale_minidata_clean_size); 516 xscale_minidata_clean_size);
511#endif 517#endif
512 518
513 /* 519 /*
514 * Ok, we have allocated physical pages for the primary kernel 520 * Ok, we have allocated physical pages for the primary kernel
515 * page tables. 521 * page tables.
516 */ 522 */
517 523
518#ifdef VERBOSE_INIT_ARM 524#ifdef VERBOSE_INIT_ARM
519 printf("Creating L1 page table\n"); 525 printf("Creating L1 page table\n");
520#endif 526#endif
521 527
522 /* 528 /*
523 * Now we start construction of the L1 page table. 529 * Now we start construction of the L1 page table.
524 * We start by mapping the L2 page tables into the L1. 530 * We start by mapping the L2 page tables into the L1.
525 * This means that we can replace L1 mappings later on if necessary. 531 * This means that we can replace L1 mappings later on if necessary.
526 */ 532 */
527 l1pagetable = kernel_l1pt.pv_pa; 533 l1pagetable = kernel_l1pt.pv_pa;
528 534
529 /* Map the L2 pages tables in the L1 page table */ 535 /* Map the L2 pages tables in the L1 page table */
530 pmap_link_l2pt(l1pagetable, 0x00000000, 536 pmap_link_l2pt(l1pagetable, 0x00000000,
531 &kernel_pt_table[KERNEL_PT_SYS]); 537 &kernel_pt_table[KERNEL_PT_SYS]);
532 for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; ++loop) 538 for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; ++loop)
533 pmap_link_l2pt(l1pagetable, KERNEL_BASE + loop * 0x00400000, 539 pmap_link_l2pt(l1pagetable, KERNEL_BASE + loop * 0x00400000,
534 &kernel_pt_table[KERNEL_PT_KERNEL + loop]); 540 &kernel_pt_table[KERNEL_PT_KERNEL + loop]);
535 for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop) 541 for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
536 pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000, 542 pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
537 &kernel_pt_table[KERNEL_PT_VMDATA + loop]); 543 &kernel_pt_table[KERNEL_PT_VMDATA + loop]);
538 544
539 /* update the top of the kernel VM */ 545 /* update the top of the kernel VM */
540 pmap_curmaxkvaddr = 546 pmap_curmaxkvaddr =
541 KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000); 547 KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000);
542 548
543#ifdef VERBOSE_INIT_ARM 549#ifdef VERBOSE_INIT_ARM
544 printf("Mapping kernel\n"); 550 printf("Mapping kernel\n");
545#endif 551#endif
546 552
547 /* Now we fill in the L2 pagetable for the kernel code/data */ 553 /* Now we fill in the L2 pagetable for the kernel code/data */
548 554
549 /* 555 /*
550 * XXX there is no ELF header to find RO region. 556 * XXX there is no ELF header to find RO region.
551 * XXX What should we do? 557 * XXX What should we do?
552 */ 558 */
553#if 0 559#if 0
554 if (N_GETMAGIC(kernexec[0]) == ZMAGIC) { 560 if (N_GETMAGIC(kernexec[0]) == ZMAGIC) {
555 logical = pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE, 561 logical = pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE,
556 physical_start, kernexec->a_text, 562 physical_start, kernexec->a_text,
557 VM_PROT_READ, PTE_CACHE); 563 VM_PROT_READ, PTE_CACHE);
558 logical += pmap_map_chunk(l1pagetable, 564 logical += pmap_map_chunk(l1pagetable,
559 KERNEL_TEXT_BASE + logical, physical_start + logical, 565 KERNEL_TEXT_BASE + logical, physical_start + logical,
560 kerneldatasize - kernexec->a_text, 566 kerneldatasize - kernexec->a_text,
561 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 567 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
562 } else 568 } else
563#endif 569#endif
564 pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE, 570 pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE,
565 KERNEL_TEXT_BASE - KERNEL_BASE + physical_start, 571 KERNEL_TEXT_BASE - KERNEL_BASE + physical_start,
566 kerneldatasize, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 572 kerneldatasize, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
567 573
568#ifdef VERBOSE_INIT_ARM 574#ifdef VERBOSE_INIT_ARM
569 printf("Constructing L2 page tables\n"); 575 printf("Constructing L2 page tables\n");
570#endif 576#endif
571 577
572 /* Map the stack pages */ 578 /* Map the stack pages */
573 pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa, 579 pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
574 IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 580 IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
575 pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa, 581 pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
576 ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 582 ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
577 pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa, 583 pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
578 UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 584 UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
579 pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa, 585 pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
580 UPAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 586 UPAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
581 587
582 pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa, 588 pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
583 L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); 589 L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
584 590
585 /* Map page tables */ 591 /* Map page tables */
586 for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) { 592 for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
587 pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va, 593 pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va,
588 kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE, 594 kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
589 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); 595 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
590 } 596 }
591 597
592 /* Map the Mini-Data cache clean area. */ 598 /* Map the Mini-Data cache clean area. */
593 xscale_setup_minidata(l1pagetable, minidataclean.pv_va, 599 xscale_setup_minidata(l1pagetable, minidataclean.pv_va,
594 minidataclean.pv_pa); 600 minidataclean.pv_pa);
595 601
596 /* Map the vector page. */ 602 /* Map the vector page. */
597 pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa, 603 pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa,
598 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 604 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
599 605
600 /* 606 /*
601 * map integrated peripherals at same address in l1pagetable 607 * map integrated peripherals at same address in l1pagetable
602 * so that we can continue to use console. 608 * so that we can continue to use console.
603 */ 609 */
604 pmap_devmap_bootstrap(l1pagetable, pxa2x0_devmap); 610 pmap_devmap_bootstrap(l1pagetable, pxa2x0_devmap);
605 pmap_devmap_bootstrap(l1pagetable, machdep_devmap); 611 pmap_devmap_bootstrap(l1pagetable, machdep_devmap);
606 612
607 /* 613 /*
608 * Give the XScale global cache clean code an appropriately 614 * Give the XScale global cache clean code an appropriately
609 * sized chunk of unmapped VA space starting at 0xff000000 615 * sized chunk of unmapped VA space starting at 0xff000000
610 * (our device mappings end before this address). 616 * (our device mappings end before this address).
611 */ 617 */
612 xscale_cache_clean_addr = 0xff000000U; 618 xscale_cache_clean_addr = 0xff000000U;
613 619
614 /* 620 /*
615 * Now we have the real page tables in place so we can switch to them. 621 * Now we have the real page tables in place so we can switch to them.
616 * Once this is done we will be running with the REAL kernel page 622 * Once this is done we will be running with the REAL kernel page
617 * tables. 623 * tables.
618 */ 624 */
619 625
620#ifdef VERBOSE_INIT_ARM 626#ifdef VERBOSE_INIT_ARM
621 printf("done.\n"); 627 printf("done.\n");
622#endif 628#endif
623 629
624 /* 630 /*
625 * Pages were allocated during the secondary bootstrap for the 631 * Pages were allocated during the secondary bootstrap for the
626 * stacks for different CPU modes. 632 * stacks for different CPU modes.
627 * We must now set the r13 registers in the different CPU modes to 633 * We must now set the r13 registers in the different CPU modes to
628 * point to these stacks. 634 * point to these stacks.
629 * Since the ARM stacks use STMFD etc. we must set r13 to the top end 635 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
630 * of the stack memory. 636 * of the stack memory.
631 */ 637 */
632#ifdef VERBOSE_INIT_ARM 638#ifdef VERBOSE_INIT_ARM
633 printf("init subsystems: stacks "); 639 printf("init subsystems: stacks ");
634#endif 640#endif
635 641
636 set_stackptr(PSR_IRQ32_MODE, 642 set_stackptr(PSR_IRQ32_MODE,
637 irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE); 643 irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
638 set_stackptr(PSR_ABT32_MODE, 644 set_stackptr(PSR_ABT32_MODE,
639 abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE); 645 abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
640 set_stackptr(PSR_UND32_MODE, 646 set_stackptr(PSR_UND32_MODE,
641 undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE); 647 undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);
642#ifdef PMAP_DEBUG 648#ifdef PMAP_DEBUG
643 if (pmap_debug_level >= 0) 649 if (pmap_debug_level >= 0)
644 printf("kstack V%08lx P%08lx\n", kernelstack.pv_va, 650 printf("kstack V%08lx P%08lx\n", kernelstack.pv_va,
645 kernelstack.pv_pa); 651 kernelstack.pv_pa);
646#endif /* PMAP_DEBUG */ 652#endif /* PMAP_DEBUG */
647 653
648 /* 654 /*
649 * Well we should set a data abort handler. 655 * Well we should set a data abort handler.
650 * Once things get going this will change as we will need a proper 656 * Once things get going this will change as we will need a proper
651 * handler. Until then we will use a handler that just panics but 657 * handler. Until then we will use a handler that just panics but
652 * tells us why. 658 * tells us why.
653 * Initialization of the vectors will just panic on a data abort. 659 * Initialization of the vectors will just panic on a data abort.
654 * This just fills in a slightly better one. 660 * This just fills in a slightly better one.
655 */ 661 */
656#ifdef VERBOSE_INIT_ARM 662#ifdef VERBOSE_INIT_ARM
657 printf("vectors "); 663 printf("vectors ");
658#endif 664#endif
659 data_abort_handler_address = (u_int)data_abort_handler; 665 data_abort_handler_address = (u_int)data_abort_handler;
660 prefetch_abort_handler_address = (u_int)prefetch_abort_handler; 666 prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
661 undefined_handler_address = (u_int)undefinedinstruction_bounce; 667 undefined_handler_address = (u_int)undefinedinstruction_bounce;
662#ifdef DEBUG 668#ifdef DEBUG
663 printf("%08x %08x %08x\n", data_abort_handler_address, 669 printf("%08x %08x %08x\n", data_abort_handler_address,
664 prefetch_abort_handler_address, undefined_handler_address);  670 prefetch_abort_handler_address, undefined_handler_address);
665#endif 671#endif
666 672
667 /* Initialize the undefined instruction handlers */ 673 /* Initialize the undefined instruction handlers */
668#ifdef VERBOSE_INIT_ARM 674#ifdef VERBOSE_INIT_ARM
669 printf("undefined\n"); 675 printf("undefined\n");
670#endif 676#endif
671 undefined_init(); 677 undefined_init();
672 678
673 /* Set the page table address. */ 679 /* Set the page table address. */
674#ifdef VERBOSE_INIT_ARM 680#ifdef VERBOSE_INIT_ARM
675 printf("switching to new L1 page table @%#lx...\n", kernel_l1pt.pv_pa); 681 printf("switching to new L1 page table @%#lx...\n", kernel_l1pt.pv_pa);
676#endif 682#endif
677 cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT); 683 cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
678 cpu_setttb(kernel_l1pt.pv_pa); 684 cpu_setttb(kernel_l1pt.pv_pa);
679 cpu_tlb_flushID(); 685 cpu_tlb_flushID();
680 cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)); 686 cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));
681 687
682 /* 688 /*
683 * Moved from cpu_startup() as data_abort_handler() references 689 * Moved from cpu_startup() as data_abort_handler() references
684 * this during uvm init. 690 * this during uvm init.
685 */ 691 */
686 uvm_lwp_setuarea(&lwp0, kernelstack.pv_va); 692 uvm_lwp_setuarea(&lwp0, kernelstack.pv_va);
687 693
688 arm32_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL); 694 arm32_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL);
689 695
690 consinit(); 696 consinit();
691 697
692#ifdef VERBOSE_INIT_ARM 698#ifdef VERBOSE_INIT_ARM
693 printf("bootstrap done.\n"); 699 printf("bootstrap done.\n");
694#endif 700#endif
695 701
696#ifdef VERBOSE_INIT_ARM 702#ifdef VERBOSE_INIT_ARM
697 printf("freemempos=%08lx\n", freemempos); 703 printf("freemempos=%08lx\n", freemempos);
698 printf("MMU enabled. control=%08x\n", cpu_get_control()); 704 printf("MMU enabled. control=%08x\n", cpu_get_control());
699#endif 705#endif
700 706
701 /* Load memory into UVM. */ 707 /* Load memory into UVM. */
702 uvm_setpagesize(); /* initialize PAGE_SIZE-dependent variables */ 708 uvm_setpagesize(); /* initialize PAGE_SIZE-dependent variables */
703 for (loop = 0; loop < bootconfig.dramblocks; loop++) { 709 for (loop = 0; loop < bootconfig.dramblocks; loop++) {
704 paddr_t dblk_start = (paddr_t)bootconfig.dram[loop].address; 710 paddr_t dblk_start = (paddr_t)bootconfig.dram[loop].address;
705 paddr_t dblk_end = dblk_start 711 paddr_t dblk_end = dblk_start
706 + (bootconfig.dram[loop].pages * PAGE_SIZE); 712 + (bootconfig.dram[loop].pages * PAGE_SIZE);
707 713
708 if (dblk_start < physical_freestart) 714 if (dblk_start < physical_freestart)
709 dblk_start = physical_freestart; 715 dblk_start = physical_freestart;
710 if (dblk_end > physical_freeend) 716 if (dblk_end > physical_freeend)
711 dblk_end = physical_freeend; 717 dblk_end = physical_freeend;
712 718
713 uvm_page_physload(atop(dblk_start), atop(dblk_end), 719 uvm_page_physload(atop(dblk_start), atop(dblk_end),
714 atop(dblk_start), atop(dblk_end), VM_FREELIST_DEFAULT); 720 atop(dblk_start), atop(dblk_end), VM_FREELIST_DEFAULT);
715 } 721 }
716 722
717 /* Boot strap pmap telling it where the kernel page table is */ 723 /* Boot strap pmap telling it where the kernel page table is */
718 pmap_bootstrap(KERNEL_VM_BASE, KERNEL_VM_BASE + KERNEL_VM_SIZE); 724 pmap_bootstrap(KERNEL_VM_BASE, KERNEL_VM_BASE + KERNEL_VM_SIZE);
719 725
720#ifdef DDB 726#ifdef DDB
721 db_machine_init(); 727 db_machine_init();
722#endif 728#endif
723#if NKSYMS || defined(DDB) || defined(MODULAR) 729#if NKSYMS || defined(DDB) || defined(MODULAR)
724 ksyms_addsyms_elf(symbolsize, ((int *)&end), ((char *)&end) + symbolsize); 730 ksyms_addsyms_elf(symbolsize, ((int *)&end), ((char *)&end) + symbolsize);
725#endif 731#endif
726 732
727 printf("kernsize=0x%x", kerneldatasize); 733 printf("kernsize=0x%x", kerneldatasize);
728 printf(" (including 0x%x symbols)\n", symbolsize); 734 printf(" (including 0x%x symbols)\n", symbolsize);
729 735
730#ifdef DDB 736#ifdef DDB
731 if (boothowto & RB_KDB) 737 if (boothowto & RB_KDB)
732 Debugger(); 738 Debugger();
733#endif /* DDB */ 739#endif /* DDB */
734 740
735 /* We return the new stack pointer address */ 741 /* We return the new stack pointer address */
736 return (kernelstack.pv_va + USPACE_SVC_STACK_TOP); 742 return (kernelstack.pv_va + USPACE_SVC_STACK_TOP);
737} 743}
738 744
739#ifdef DEBUG_BEFOREMMU 745#ifdef DEBUG_BEFOREMMU
740static void 746static void
741fakecninit(void) 747fakecninit(void)
742{ 748{
743#if (NCOM > 0) && defined(COM_PXA2X0) 749#if (NCOM > 0) && defined(COM_PXA2X0)
744 comcnattach(&pxa2x0_a4x_bs_tag, comcnaddr, comcnspeed, 750 comcnattach(&pxa2x0_a4x_bs_tag, comcnaddr, comcnspeed,
745 PXA2X0_COM_FREQ, COM_TYPE_PXA2x0, comcnmode); 751 PXA2X0_COM_FREQ, COM_TYPE_PXA2x0, comcnmode);
746#endif 752#endif
747} 753}
748#endif 754#endif