Wed Dec 31 18:48:14 2008 UTC ()
- remove assigned but unused `pt' variable (gcc generates the same binary)
- remove obsolete Sysmap comments


(tsutsui)
diff -r1.101 -r1.102 src/sys/arch/amiga/amiga/amiga_init.c

cvs diff -r1.101 -r1.102 src/sys/arch/amiga/amiga/amiga_init.c (switch to unified diff)

--- src/sys/arch/amiga/amiga/amiga_init.c 2008/12/31 10:33:13 1.101
+++ src/sys/arch/amiga/amiga/amiga_init.c 2008/12/31 18:48:14 1.102
@@ -1,1115 +1,1108 @@ @@ -1,1115 +1,1108 @@
1/* $NetBSD: amiga_init.c,v 1.101 2008/12/31 10:33:13 tsutsui Exp $ */ 1/* $NetBSD: amiga_init.c,v 1.102 2008/12/31 18:48:14 tsutsui Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1994 Michael L. Hitch 4 * Copyright (c) 1994 Michael L. Hitch
5 * Copyright (c) 1993 Markus Wild 5 * Copyright (c) 1993 Markus Wild
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions 9 * modification, are permitted provided that the following conditions
10 * are met: 10 * are met:
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution. 15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software 16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement: 17 * must display the following acknowledgement:
18 * This product includes software developed by Markus Wild. 18 * This product includes software developed by Markus Wild.
19 * 4. The name of the author may not be used to endorse or promote products 19 * 4. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission 20 * derived from this software without specific prior written permission
21 * 21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */ 32 */
33 33
34#include "opt_amigaccgrf.h" 34#include "opt_amigaccgrf.h"
35#include "opt_p5ppc68kboard.h" 35#include "opt_p5ppc68kboard.h"
36#include "opt_devreload.h" 36#include "opt_devreload.h"
37 37
38#include <sys/cdefs.h> 38#include <sys/cdefs.h>
39__KERNEL_RCSID(0, "$NetBSD: amiga_init.c,v 1.101 2008/12/31 10:33:13 tsutsui Exp $"); 39__KERNEL_RCSID(0, "$NetBSD: amiga_init.c,v 1.102 2008/12/31 18:48:14 tsutsui Exp $");
40 40
41#include <sys/param.h> 41#include <sys/param.h>
42#include <sys/systm.h> 42#include <sys/systm.h>
43#include <sys/proc.h> 43#include <sys/proc.h>
44#include <uvm/uvm_extern.h> 44#include <uvm/uvm_extern.h>
45#include <sys/user.h> 45#include <sys/user.h>
46#include <sys/ioctl.h> 46#include <sys/ioctl.h>
47#include <sys/select.h> 47#include <sys/select.h>
48#include <sys/tty.h> 48#include <sys/tty.h>
49#include <sys/buf.h> 49#include <sys/buf.h>
50#include <sys/msgbuf.h> 50#include <sys/msgbuf.h>
51#include <sys/mbuf.h> 51#include <sys/mbuf.h>
52#include <sys/protosw.h> 52#include <sys/protosw.h>
53#include <sys/domain.h> 53#include <sys/domain.h>
54#include <sys/dkbad.h> 54#include <sys/dkbad.h>
55#include <sys/reboot.h> 55#include <sys/reboot.h>
56#include <sys/exec.h> 56#include <sys/exec.h>
57#include <machine/pte.h> 57#include <machine/pte.h>
58#include <machine/cpu.h> 58#include <machine/cpu.h>
59#include <amiga/amiga/cc.h> 59#include <amiga/amiga/cc.h>
60#include <amiga/amiga/cia.h> 60#include <amiga/amiga/cia.h>
61#include <amiga/amiga/custom.h> 61#include <amiga/amiga/custom.h>
62#include <amiga/amiga/cfdev.h> 62#include <amiga/amiga/cfdev.h>
63#include <amiga/amiga/drcustom.h> 63#include <amiga/amiga/drcustom.h>
64#include <amiga/amiga/gayle.h> 64#include <amiga/amiga/gayle.h>
65#include <amiga/amiga/memlist.h> 65#include <amiga/amiga/memlist.h>
66#include <amiga/dev/zbusvar.h> 66#include <amiga/dev/zbusvar.h>
67 67
68#define RELOC(v, t) *((t*)((u_int)&(v) + loadbase)) 68#define RELOC(v, t) *((t*)((u_int)&(v) + loadbase))
69 69
70extern u_int lowram; 70extern u_int lowram;
71extern u_int Sysptsize, Umap, proc0paddr; 71extern u_int Sysptsize, Umap, proc0paddr;
72extern pt_entry_t *Sysptmap; 72extern pt_entry_t *Sysptmap;
73extern st_entry_t *Sysseg; 73extern st_entry_t *Sysseg;
74extern u_int Sysseg_pa; 74extern u_int Sysseg_pa;
75extern u_int virtual_avail; 75extern u_int virtual_avail;
76#if defined(M68040) || defined(M68060) 76#if defined(M68040) || defined(M68060)
77extern int protostfree; 77extern int protostfree;
78#endif 78#endif
79extern u_long boot_partition; 79extern u_long boot_partition;
80vaddr_t amiga_uptbase; 80vaddr_t amiga_uptbase;
81#ifdef P5PPC68KBOARD 81#ifdef P5PPC68KBOARD
82extern int p5ppc; 82extern int p5ppc;
83#endif 83#endif
84 84
85extern char *esym; 85extern char *esym;
86 86
87#ifdef GRF_AGA 87#ifdef GRF_AGA
88extern u_long aga_enable; 88extern u_long aga_enable;
89#endif 89#endif
90 90
91extern u_long noncontig_enable; 91extern u_long noncontig_enable;
92 92
93/* 93/*
94 * some addresses used in locore 94 * some addresses used in locore
95 */ 95 */
96vaddr_t INTREQRaddr; 96vaddr_t INTREQRaddr;
97vaddr_t INTREQWaddr; 97vaddr_t INTREQWaddr;
98 98
99/* 99/*
100 * these are used by the extended spl?() macros. 100 * these are used by the extended spl?() macros.
101 */ 101 */
102volatile unsigned short *amiga_intena_read, *amiga_intena_write; 102volatile unsigned short *amiga_intena_read, *amiga_intena_write;
103 103
104vaddr_t CHIPMEMADDR; 104vaddr_t CHIPMEMADDR;
105vaddr_t chipmem_start; 105vaddr_t chipmem_start;
106vaddr_t chipmem_end; 106vaddr_t chipmem_end;
107 107
108vaddr_t z2mem_start; /* XXX */ 108vaddr_t z2mem_start; /* XXX */
109static vaddr_t z2mem_end; /* XXX */ 109static vaddr_t z2mem_end; /* XXX */
110int use_z2_mem = 1; /* XXX */ 110int use_z2_mem = 1; /* XXX */
111 111
112u_long boot_fphystart, boot_fphysize, boot_cphysize; 112u_long boot_fphystart, boot_fphysize, boot_cphysize;
113static u_int start_c_fphystart; 113static u_int start_c_fphystart;
114static u_int start_c_pstart; 114static u_int start_c_pstart;
115 115
116static u_long boot_flags; 116static u_long boot_flags;
117 117
118struct boot_memlist *memlist; 118struct boot_memlist *memlist;
119 119
120struct cfdev *cfdev; 120struct cfdev *cfdev;
121int ncfdev; 121int ncfdev;
122 122
123u_long scsi_nosync; 123u_long scsi_nosync;
124int shift_nosync; 124int shift_nosync;
125 125
126void start_c(int, u_int, u_int, u_int, char *, u_int, u_long, u_long, u_int); 126void start_c(int, u_int, u_int, u_int, char *, u_int, u_long, u_long, u_int);
127void rollcolor(int); 127void rollcolor(int);
128#ifdef DEVRELOAD 128#ifdef DEVRELOAD
129static int kernel_image_magic_size(void); 129static int kernel_image_magic_size(void);
130static void kernel_image_magic_copy(u_char *); 130static void kernel_image_magic_copy(u_char *);
131int kernel_reload_write(struct uio *); 131int kernel_reload_write(struct uio *);
132extern void kernel_reload(char *, u_long, u_long, u_long, u_long, 132extern void kernel_reload(char *, u_long, u_long, u_long, u_long,
133 u_long, u_long, u_long, u_long, u_long, u_long); 133 u_long, u_long, u_long, u_long, u_long, u_long);
134#endif 134#endif
135extern void etext(void); 135extern void etext(void);
136void start_c_finish(void); 136void start_c_finish(void);
137 137
138void * 138void *
139chipmem_steal(long amount) 139chipmem_steal(long amount)
140{ 140{
141 /* 141 /*
142 * steal from top of chipmem, so we don't collide with 142 * steal from top of chipmem, so we don't collide with
143 * the kernel loaded into chipmem in the not-yet-mapped state. 143 * the kernel loaded into chipmem in the not-yet-mapped state.
144 */ 144 */
145 vaddr_t p = chipmem_end - amount; 145 vaddr_t p = chipmem_end - amount;
146 if (p & 1) 146 if (p & 1)
147 p = p - 1; 147 p = p - 1;
148 chipmem_end = p; 148 chipmem_end = p;
149 if(chipmem_start > chipmem_end) 149 if(chipmem_start > chipmem_end)
150 panic("not enough chip memory"); 150 panic("not enough chip memory");
151 return((void *)p); 151 return((void *)p);
152} 152}
153 153
154/* 154/*
155 * XXX 155 * XXX
156 * used by certain drivers currently to allocate zorro II memory 156 * used by certain drivers currently to allocate zorro II memory
157 * for bounce buffers, if use_z2_mem is NULL, chipmem will be 157 * for bounce buffers, if use_z2_mem is NULL, chipmem will be
158 * returned instead. 158 * returned instead.
159 * XXX 159 * XXX
160 */ 160 */
161void * 161void *
162alloc_z2mem(amount) 162alloc_z2mem(amount)
163 long amount; 163 long amount;
164{ 164{
165 if (use_z2_mem && z2mem_end && (z2mem_end - amount) >= z2mem_start) { 165 if (use_z2_mem && z2mem_end && (z2mem_end - amount) >= z2mem_start) {
166 z2mem_end -= amount; 166 z2mem_end -= amount;
167 return ((void *)z2mem_end); 167 return ((void *)z2mem_end);
168 } 168 }
169 return (alloc_chipmem(amount)); 169 return (alloc_chipmem(amount));
170} 170}
171 171
172 172
173/* 173/*
174 * this is the C-level entry function, it's called from locore.s. 174 * this is the C-level entry function, it's called from locore.s.
175 * Preconditions: 175 * Preconditions:
176 * Interrupts are disabled 176 * Interrupts are disabled
177 * PA may not be == VA, so we may have to relocate addresses 177 * PA may not be == VA, so we may have to relocate addresses
178 * before enabling the MMU 178 * before enabling the MMU
179 * Exec is no longer available (because we're loaded all over 179 * Exec is no longer available (because we're loaded all over
180 * low memory, no ExecBase is available anymore) 180 * low memory, no ExecBase is available anymore)
181 * 181 *
182 * It's purpose is: 182 * It's purpose is:
183 * Do the things that are done in locore.s in the hp300 version, 183 * Do the things that are done in locore.s in the hp300 version,
184 * this includes allocation of kernel maps and enabling the MMU. 184 * this includes allocation of kernel maps and enabling the MMU.
185 * 185 *
186 * Some of the code in here is `stolen' from Amiga MACH, and was 186 * Some of the code in here is `stolen' from Amiga MACH, and was
187 * written by Bryan Ford and Niklas Hallqvist. 187 * written by Bryan Ford and Niklas Hallqvist.
188 * 188 *
189 * Very crude 68040 support by Michael L. Hitch. 189 * Very crude 68040 support by Michael L. Hitch.
190 * 190 *
191 */ 191 */
192 192
193int kernel_copyback = 1; 193int kernel_copyback = 1;
194 194
195__attribute__ ((no_instrument_function)) 195__attribute__ ((no_instrument_function))
196void 196void
197start_c(id, fphystart, fphysize, cphysize, esym_addr, flags, inh_sync, 197start_c(id, fphystart, fphysize, cphysize, esym_addr, flags, inh_sync,
198 boot_part, loadbase) 198 boot_part, loadbase)
199 int id; 199 int id;
200 u_int fphystart, fphysize, cphysize; 200 u_int fphystart, fphysize, cphysize;
201 char *esym_addr; 201 char *esym_addr;
202 u_int flags; 202 u_int flags;
203 u_long inh_sync; 203 u_long inh_sync;
204 u_long boot_part; 204 u_long boot_part;
205 u_int loadbase; 205 u_int loadbase;
206{ 206{
207 extern char end[]; 207 extern char end[];
208 extern u_int protorp[2]; 208 extern u_int protorp[2];
209 struct cfdev *cd; 209 struct cfdev *cd;
210 u_int pstart, pend, vstart, vend, avail; 210 u_int pstart, pend, vstart, vend, avail;
211 u_int pt, ptpa, ptsize, ptextra, kstsize; 211 u_int ptpa, ptsize, ptextra, kstsize;
212 u_int Sysptmap_pa; 212 u_int Sysptmap_pa;
213 register st_entry_t sg_proto, *sg, *esg; 213 register st_entry_t sg_proto, *sg, *esg;
214 register pt_entry_t pg_proto, *pg; 214 register pt_entry_t pg_proto, *pg;
215 u_int end_loaded, ncd, i; 215 u_int end_loaded, ncd, i;
216 struct boot_memlist *ml; 216 struct boot_memlist *ml;
217 217
218#ifdef DEBUG_KERNEL_START 218#ifdef DEBUG_KERNEL_START
219 /* XXX this only is valid if Altais is in slot 0 */ 219 /* XXX this only is valid if Altais is in slot 0 */
220 volatile u_int8_t *altaiscolpt = (u_int8_t *)0x200003c8; 220 volatile u_int8_t *altaiscolpt = (u_int8_t *)0x200003c8;
221 volatile u_int8_t *altaiscol = (u_int8_t *)0x200003c9; 221 volatile u_int8_t *altaiscol = (u_int8_t *)0x200003c9;
222#endif 222#endif
223 223
224#ifdef DEBUG_KERNEL_START 224#ifdef DEBUG_KERNEL_START
225 if ((id>>24)==0x7D) { 225 if ((id>>24)==0x7D) {
226 *altaiscolpt = 0; 226 *altaiscolpt = 0;
227 *altaiscol = 40; 227 *altaiscol = 40;
228 *altaiscol = 0; 228 *altaiscol = 0;
229 *altaiscol = 0; 229 *altaiscol = 0;
230 } else 230 } else
231((volatile struct Custom *)0xdff000)->color[0] = 0xa00; /* RED */ 231((volatile struct Custom *)0xdff000)->color[0] = 0xa00; /* RED */
232#endif 232#endif
233 233
234#ifdef LIMITMEM 234#ifdef LIMITMEM
235 if (fphysize > LIMITMEM*1024*1024) 235 if (fphysize > LIMITMEM*1024*1024)
236 fphysize = LIMITMEM*1024*1024; 236 fphysize = LIMITMEM*1024*1024;
237#endif 237#endif
238 238
239 RELOC(boot_fphystart, u_long) = fphystart; 239 RELOC(boot_fphystart, u_long) = fphystart;
240 RELOC(boot_fphysize, u_long) = fphysize; 240 RELOC(boot_fphysize, u_long) = fphysize;
241 RELOC(boot_cphysize, u_long) = cphysize; 241 RELOC(boot_cphysize, u_long) = cphysize;
242 242
243 RELOC(machineid, int) = id; 243 RELOC(machineid, int) = id;
244 RELOC(chipmem_end, vaddr_t) = cphysize; 244 RELOC(chipmem_end, vaddr_t) = cphysize;
245 RELOC(esym, char *) = esym_addr; 245 RELOC(esym, char *) = esym_addr;
246 RELOC(boot_flags, u_long) = flags; 246 RELOC(boot_flags, u_long) = flags;
247 RELOC(boot_partition, u_long) = boot_part; 247 RELOC(boot_partition, u_long) = boot_part;
248#ifdef GRF_AGA 248#ifdef GRF_AGA
249 if (flags & 1) 249 if (flags & 1)
250 RELOC(aga_enable, u_long) |= 1; 250 RELOC(aga_enable, u_long) |= 1;
251#endif 251#endif
252 if (flags & (3 << 1)) 252 if (flags & (3 << 1))
253 RELOC(noncontig_enable, u_long) = (flags >> 1) & 3; 253 RELOC(noncontig_enable, u_long) = (flags >> 1) & 3;
254 254
255 RELOC(scsi_nosync, u_long) = inh_sync; 255 RELOC(scsi_nosync, u_long) = inh_sync;
256 256
257 /* 257 /*
258 * the kernel ends at end(), plus the cfdev and memlist structures 258 * the kernel ends at end(), plus the cfdev and memlist structures
259 * we placed there in the loader. Correct for this now. Also, 259 * we placed there in the loader. Correct for this now. Also,
260 * account for kernel symbols if they are present. 260 * account for kernel symbols if they are present.
261 */ 261 */
262 if (esym_addr == NULL) 262 if (esym_addr == NULL)
263 end_loaded = (u_int) &end; 263 end_loaded = (u_int) &end;
264 else 264 else
265 end_loaded = (u_int) esym_addr; 265 end_loaded = (u_int) esym_addr;
266 RELOC(ncfdev, int) = *(int *)(&RELOC(*(u_int *)end_loaded, u_int)); 266 RELOC(ncfdev, int) = *(int *)(&RELOC(*(u_int *)end_loaded, u_int));
267 RELOC(cfdev, struct cfdev *) = (struct cfdev *) ((int)end_loaded + 4); 267 RELOC(cfdev, struct cfdev *) = (struct cfdev *) ((int)end_loaded + 4);
268 end_loaded += 4 + RELOC(ncfdev, int) * sizeof(struct cfdev); 268 end_loaded += 4 + RELOC(ncfdev, int) * sizeof(struct cfdev);
269 269
270 RELOC(memlist, struct boot_memlist *) = 270 RELOC(memlist, struct boot_memlist *) =
271 (struct boot_memlist *)end_loaded; 271 (struct boot_memlist *)end_loaded;
272 ml = &RELOC(*(struct boot_memlist *)end_loaded, struct boot_memlist); 272 ml = &RELOC(*(struct boot_memlist *)end_loaded, struct boot_memlist);
273 end_loaded = (u_int) &((RELOC(memlist, struct boot_memlist *))-> 273 end_loaded = (u_int) &((RELOC(memlist, struct boot_memlist *))->
274 m_seg[ml->m_nseg]); 274 m_seg[ml->m_nseg]);
275 275
276 /* 276 /*
277 * Get ZorroII (16-bit) memory if there is any and it's not where the 277 * Get ZorroII (16-bit) memory if there is any and it's not where the
278 * kernel is loaded. 278 * kernel is loaded.
279 */ 279 */
280 if (ml->m_nseg > 0 && ml->m_nseg < 16 && RELOC(use_z2_mem, int)) { 280 if (ml->m_nseg > 0 && ml->m_nseg < 16 && RELOC(use_z2_mem, int)) {
281 struct boot_memseg *sp, *esp; 281 struct boot_memseg *sp, *esp;
282 282
283 sp = ml->m_seg; 283 sp = ml->m_seg;
284 esp = sp + ml->m_nseg; 284 esp = sp + ml->m_nseg;
285 for (; sp < esp; sp++) { 285 for (; sp < esp; sp++) {
286 if ((sp->ms_attrib & (MEMF_FAST | MEMF_24BITDMA)) 286 if ((sp->ms_attrib & (MEMF_FAST | MEMF_24BITDMA))
287 != (MEMF_FAST|MEMF_24BITDMA)) 287 != (MEMF_FAST|MEMF_24BITDMA))
288 continue; 288 continue;
289 if (sp->ms_start == fphystart) 289 if (sp->ms_start == fphystart)
290 continue; 290 continue;
291 RELOC(z2mem_end, paddr_t) = 291 RELOC(z2mem_end, paddr_t) =
292 sp->ms_start + sp->ms_size; 292 sp->ms_start + sp->ms_size;
293 RELOC(z2mem_start, paddr_t) = 293 RELOC(z2mem_start, paddr_t) =
294 RELOC(z2mem_end, paddr_t) - MAXPHYS * 294 RELOC(z2mem_end, paddr_t) - MAXPHYS *
295 RELOC(use_z2_mem, int) * 7; 295 RELOC(use_z2_mem, int) * 7;
296 RELOC(NZTWOMEMPG, u_int) = 296 RELOC(NZTWOMEMPG, u_int) =
297 (RELOC(z2mem_end, paddr_t) - 297 (RELOC(z2mem_end, paddr_t) -
298 RELOC(z2mem_start, paddr_t)) / PAGE_SIZE; 298 RELOC(z2mem_start, paddr_t)) / PAGE_SIZE;
299 if ((RELOC(z2mem_end, paddr_t) - 299 if ((RELOC(z2mem_end, paddr_t) -
300 RELOC(z2mem_start, paddr_t)) > sp->ms_size) { 300 RELOC(z2mem_start, paddr_t)) > sp->ms_size) {
301 RELOC(NZTWOMEMPG, u_int) = sp->ms_size / 301 RELOC(NZTWOMEMPG, u_int) = sp->ms_size /
302 PAGE_SIZE; 302 PAGE_SIZE;
303 RELOC(z2mem_start, paddr_t) = 303 RELOC(z2mem_start, paddr_t) =
304 RELOC(z2mem_end, paddr_t) - sp->ms_size; 304 RELOC(z2mem_end, paddr_t) - sp->ms_size;
305 } 305 }
306 break; 306 break;
307 } 307 }
308 } 308 }
309 309
310 /* 310 /*
311 * Scan ConfigDev list and get size of Zorro I/O boards that are 311 * Scan ConfigDev list and get size of Zorro I/O boards that are
312 * outside the Zorro II I/O area. 312 * outside the Zorro II I/O area.
313 */ 313 */
314 for (RELOC(ZBUSAVAIL, u_int) = 0, cd = 314 for (RELOC(ZBUSAVAIL, u_int) = 0, cd =
315 &RELOC(*RELOC(cfdev, struct cfdev *),struct cfdev), 315 &RELOC(*RELOC(cfdev, struct cfdev *),struct cfdev),
316 ncd = RELOC(ncfdev, int); ncd > 0; ncd--, cd++) { 316 ncd = RELOC(ncfdev, int); ncd > 0; ncd--, cd++) {
317 int bd_type = cd->rom.type & (ERT_TYPEMASK | ERTF_MEMLIST); 317 int bd_type = cd->rom.type & (ERT_TYPEMASK | ERTF_MEMLIST);
318 318
319 if (bd_type != ERT_ZORROIII && 319 if (bd_type != ERT_ZORROIII &&
320 (bd_type != ERT_ZORROII || isztwopa(cd->addr))) 320 (bd_type != ERT_ZORROII || isztwopa(cd->addr)))
321 continue; /* It's not Z2 or Z3 I/O board */ 321 continue; /* It's not Z2 or Z3 I/O board */
322 /* 322 /*
323 * Hack to adjust board size for Zorro III boards that 323 * Hack to adjust board size for Zorro III boards that
324 * do not specify an extended size or subsize. This is 324 * do not specify an extended size or subsize. This is
325 * specifically for the GVP Spectrum and hopefully won't 325 * specifically for the GVP Spectrum and hopefully won't
326 * break with other boards that configure like this. 326 * break with other boards that configure like this.
327 */ 327 */
328 if (bd_type == ERT_ZORROIII && 328 if (bd_type == ERT_ZORROIII &&
329 !(cd->rom.flags & ERFF_EXTENDED) && 329 !(cd->rom.flags & ERFF_EXTENDED) &&
330 (cd->rom.flags & ERT_Z3_SSMASK) == 0) 330 (cd->rom.flags & ERT_Z3_SSMASK) == 0)
331 cd->size = 0x10000 << 331 cd->size = 0x10000 <<
332 ((cd->rom.type - 1) & ERT_MEMMASK); 332 ((cd->rom.type - 1) & ERT_MEMMASK);
333 RELOC(ZBUSAVAIL, u_int) += m68k_round_page(cd->size); 333 RELOC(ZBUSAVAIL, u_int) += m68k_round_page(cd->size);
334 } 334 }
335 335
336 /* 336 /*
337 * assume KVA_MIN == 0. We subtract the kernel code (and 337 * assume KVA_MIN == 0. We subtract the kernel code (and
338 * the configdev's and memlists) from the virtual and 338 * the configdev's and memlists) from the virtual and
339 * phsical starts and ends. 339 * phsical starts and ends.
340 */ 340 */
341 vend = fphysize; 341 vend = fphysize;
342 avail = vend; 342 avail = vend;
343 vstart = (u_int) end_loaded; 343 vstart = (u_int) end_loaded;
344 vstart = m68k_round_page (vstart); 344 vstart = m68k_round_page (vstart);
345 pstart = vstart + fphystart; 345 pstart = vstart + fphystart;
346 pend = vend + fphystart; 346 pend = vend + fphystart;
347 avail -= vstart; 347 avail -= vstart;
348 348
349 /* 349 /*
350 * save KVA of proc0 u-area and allocate it. 350 * save KVA of proc0 u-area and allocate it.
351 */ 351 */
352 RELOC(proc0paddr, u_int) = vstart; 352 RELOC(proc0paddr, u_int) = vstart;
353 pstart += USPACE; 353 pstart += USPACE;
354 vstart += USPACE; 354 vstart += USPACE;
355 avail -= USPACE; 355 avail -= USPACE;
356 356
357#if defined(M68040) || defined(M68060) 357#if defined(M68040) || defined(M68060)
358 if (RELOC(mmutype, int) == MMU_68040) 358 if (RELOC(mmutype, int) == MMU_68040)
359 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); 359 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE);
360 else 360 else
361#endif 361#endif
362 kstsize = 1; 362 kstsize = 1;
363 363
364 /* 364 /*
365 * allocate the kernel segment table 365 * allocate the kernel segment table
366 */ 366 */
367 RELOC(Sysseg_pa, u_int) = pstart; 367 RELOC(Sysseg_pa, u_int) = pstart;
368 RELOC(Sysseg, u_int) = vstart; 368 RELOC(Sysseg, u_int) = vstart;
369 vstart += PAGE_SIZE * kstsize; 369 vstart += PAGE_SIZE * kstsize;
370 pstart += PAGE_SIZE * kstsize; 370 pstart += PAGE_SIZE * kstsize;
371 avail -= PAGE_SIZE * kstsize; 371 avail -= PAGE_SIZE * kstsize;
372 372
373 /* 373 /*
374 * allocate kernel page table map 374 * allocate kernel page table map
375 */ 375 */
376 RELOC(Sysptmap, u_int) = vstart; 376 RELOC(Sysptmap, u_int) = vstart;
377 Sysptmap_pa = pstart; 377 Sysptmap_pa = pstart;
378 vstart += PAGE_SIZE; 378 vstart += PAGE_SIZE;
379 pstart += PAGE_SIZE; 379 pstart += PAGE_SIZE;
380 avail -= PAGE_SIZE; 380 avail -= PAGE_SIZE;
381 381
382 /* 382 /*
383 * allocate initial page table pages 383 * allocate initial page table pages
384 */ 384 */
385 pt = vstart; 
386 ptpa = pstart; 385 ptpa = pstart;
387#ifdef DRACO 386#ifdef DRACO
388 if ((id>>24)==0x7D) { 387 if ((id>>24)==0x7D) {
389 ptextra = NDRCCPG 388 ptextra = NDRCCPG
390 + RELOC(NZTWOMEMPG, u_int) 389 + RELOC(NZTWOMEMPG, u_int)
391 + btoc(RELOC(ZBUSAVAIL, u_int)); 390 + btoc(RELOC(ZBUSAVAIL, u_int));
392 } else 391 } else
393#endif 392#endif
394 ptextra = NCHIPMEMPG + NCIAPG + NZTWOROMPG + RELOC(NZTWOMEMPG, u_int) + 393 ptextra = NCHIPMEMPG + NCIAPG + NZTWOROMPG + RELOC(NZTWOMEMPG, u_int) +
395 btoc(RELOC(ZBUSAVAIL, u_int)) + NPCMCIAPG; 394 btoc(RELOC(ZBUSAVAIL, u_int)) + NPCMCIAPG;
396 395
397 ptsize = (RELOC(Sysptsize, u_int) + 396 ptsize = (RELOC(Sysptsize, u_int) +
398 howmany(ptextra, NPTEPG)) << PGSHIFT; 397 howmany(ptextra, NPTEPG)) << PGSHIFT;
399 398
400 vstart += ptsize; 399 vstart += ptsize;
401 pstart += ptsize; 400 pstart += ptsize;
402 avail -= ptsize; 401 avail -= ptsize;
403 402
404 /* 403 /*
405 * pt maps the first N megs of ram Sysptmap comes directly 
406 * after pt (ptpa) and so it must map >= N meg + Its one 
407 * page and so it must map 8M of space. Specifically 
408 * Sysptmap holds the pte's that map the kernel page tables. 
409 * 
410 * We want Sysmap to be the first address mapped by Sysptmap. 
411 * Sysmap is now placed at the end of Supervisor virtual address space. 404 * Sysmap is now placed at the end of Supervisor virtual address space.
412 */ 405 */
413 RELOC(Sysmap, u_int *) = (u_int *)-(NPTEPG * PAGE_SIZE); 406 RELOC(Sysmap, u_int *) = (u_int *)-(NPTEPG * PAGE_SIZE);
414 407
415 /* 408 /*
416 * initialize segment table and page table map 409 * initialize segment table and page table map
417 */ 410 */
418#if defined(M68040) || defined(M68060) 411#if defined(M68040) || defined(M68060)
419 if (RELOC(mmutype, int) == MMU_68040) { 412 if (RELOC(mmutype, int) == MMU_68040) {
420 /* 413 /*
421 * First invalidate the entire "segment table" pages 414 * First invalidate the entire "segment table" pages
422 * (levels 1 and 2 have the same "invalid" values). 415 * (levels 1 and 2 have the same "invalid" values).
423 */ 416 */
424 sg = (u_int *)RELOC(Sysseg_pa, u_int); 417 sg = (u_int *)RELOC(Sysseg_pa, u_int);
425 esg = &sg[kstsize * NPTEPG]; 418 esg = &sg[kstsize * NPTEPG];
426 while (sg < esg) 419 while (sg < esg)
427 *sg++ = SG_NV; 420 *sg++ = SG_NV;
428 /* 421 /*
429 * Initialize level 2 descriptors (which immediately 422 * Initialize level 2 descriptors (which immediately
430 * follow the level 1 table). We need: 423 * follow the level 1 table). We need:
431 * NPTEPG / SG4_LEV3SIZE 424 * NPTEPG / SG4_LEV3SIZE
432 * level 2 descriptors to map each of the nptpages 425 * level 2 descriptors to map each of the nptpages
433 * pages of PTEs. Note that we set the "used" bit 426 * pages of PTEs. Note that we set the "used" bit
434 * now to save the HW the expense of doing it. 427 * now to save the HW the expense of doing it.
435 */ 428 */
436 i = (ptsize >> PGSHIFT) * (NPTEPG / SG4_LEV3SIZE); 429 i = (ptsize >> PGSHIFT) * (NPTEPG / SG4_LEV3SIZE);
437 sg = &((u_int *)(RELOC(Sysseg_pa, u_int)))[SG4_LEV1SIZE]; 430 sg = &((u_int *)(RELOC(Sysseg_pa, u_int)))[SG4_LEV1SIZE];
438 esg = &sg[i]; 431 esg = &sg[i];
439 sg_proto = ptpa | SG_U | SG_RW | SG_V; 432 sg_proto = ptpa | SG_U | SG_RW | SG_V;
440 while (sg < esg) { 433 while (sg < esg) {
441 *sg++ = sg_proto; 434 *sg++ = sg_proto;
442 sg_proto += (SG4_LEV3SIZE * sizeof (st_entry_t)); 435 sg_proto += (SG4_LEV3SIZE * sizeof (st_entry_t));
443 } 436 }
444 437
445 /* 438 /*
446 * Initialize level 1 descriptors. We need: 439 * Initialize level 1 descriptors. We need:
447 * roundup(num, SG4_LEV2SIZE) / SG4_LEVEL2SIZE 440 * roundup(num, SG4_LEV2SIZE) / SG4_LEVEL2SIZE
448 * level 1 descriptors to map the 'num' level 2's. 441 * level 1 descriptors to map the 'num' level 2's.
449 */ 442 */
450 i = roundup(i, SG4_LEV2SIZE) / SG4_LEV2SIZE; 443 i = roundup(i, SG4_LEV2SIZE) / SG4_LEV2SIZE;
451 /* Include additional level 2 table for Sysmap in protostfree */ 444 /* Include additional level 2 table for Sysmap in protostfree */
452 RELOC(protostfree, u_int) = 445 RELOC(protostfree, u_int) =
453 (-1 << (i + 2)) /* & ~(-1 << MAXKL2SIZE) */; 446 (-1 << (i + 2)) /* & ~(-1 << MAXKL2SIZE) */;
454 sg = (u_int *) RELOC(Sysseg_pa, u_int); 447 sg = (u_int *) RELOC(Sysseg_pa, u_int);
455 esg = &sg[i]; 448 esg = &sg[i];
456 sg_proto = (u_int)&sg[SG4_LEV1SIZE] | SG_U | SG_RW |SG_V; 449 sg_proto = (u_int)&sg[SG4_LEV1SIZE] | SG_U | SG_RW |SG_V;
457 while (sg < esg) { 450 while (sg < esg) {
458 *sg++ = sg_proto; 451 *sg++ = sg_proto;
459 sg_proto += (SG4_LEV2SIZE * sizeof(st_entry_t)); 452 sg_proto += (SG4_LEV2SIZE * sizeof(st_entry_t));
460 } 453 }
461 454
462 /* Sysmap is last entry in level 1 */ 455 /* Sysmap is last entry in level 1 */
463 sg = (u_int *) RELOC(Sysseg_pa, u_int); 456 sg = (u_int *) RELOC(Sysseg_pa, u_int);
464 sg = &sg[SG4_LEV1SIZE - 1]; 457 sg = &sg[SG4_LEV1SIZE - 1];
465 *sg = sg_proto; 458 *sg = sg_proto;
466 459
467 /* 460 /*
468 * Kernel segment table at end of next level 2 table 461 * Kernel segment table at end of next level 2 table
469 */ 462 */
470 /* XXX fix calculations XXX */ 463 /* XXX fix calculations XXX */
471 i = ((((ptsize >> PGSHIFT) + 3) & -2) - 1) * (NPTEPG / SG4_LEV3SIZE); 464 i = ((((ptsize >> PGSHIFT) + 3) & -2) - 1) * (NPTEPG / SG4_LEV3SIZE);
472 sg = &((u_int *)(RELOC(Sysseg_pa, u_int)))[SG4_LEV1SIZE + i]; 465 sg = &((u_int *)(RELOC(Sysseg_pa, u_int)))[SG4_LEV1SIZE + i];
473 esg = &sg[NPTEPG / SG4_LEV3SIZE]; 466 esg = &sg[NPTEPG / SG4_LEV3SIZE];
474 sg_proto = Sysptmap_pa | SG_U | SG_RW | SG_V; 467 sg_proto = Sysptmap_pa | SG_U | SG_RW | SG_V;
475 while (sg < esg) { 468 while (sg < esg) {
476 *sg++ = sg_proto; 469 *sg++ = sg_proto;
477 sg_proto += (SG4_LEV3SIZE * sizeof (st_entry_t)); 470 sg_proto += (SG4_LEV3SIZE * sizeof (st_entry_t));
478 } 471 }
479 472
480 /* 473 /*
481 * Initialize Sysptmap 474 * Initialize Sysptmap
482 */ 475 */
483 sg = (u_int *) Sysptmap_pa; 476 sg = (u_int *) Sysptmap_pa;
484 esg = &sg[ptsize >> PGSHIFT]; 477 esg = &sg[ptsize >> PGSHIFT];
485 pg_proto = ptpa | PG_RW | PG_CI | PG_V; 478 pg_proto = ptpa | PG_RW | PG_CI | PG_V;
486 while (sg < esg) { 479 while (sg < esg) {
487 *sg++ = pg_proto; 480 *sg++ = pg_proto;
488 pg_proto += PAGE_SIZE; 481 pg_proto += PAGE_SIZE;
489 } 482 }
490 /* 483 /*
491 * Invalidate rest of Sysptmap page 484 * Invalidate rest of Sysptmap page
492 */ 485 */
493 esg = (u_int *)(Sysptmap_pa + PAGE_SIZE - sizeof(st_entry_t)); 486 esg = (u_int *)(Sysptmap_pa + PAGE_SIZE - sizeof(st_entry_t));
494 while (sg < esg) 487 while (sg < esg)
495 *sg++ = SG_NV; 488 *sg++ = SG_NV;
496 sg = (u_int *) Sysptmap_pa; 489 sg = (u_int *) Sysptmap_pa;
497 sg = &sg[256 - 1]; /* XXX */ 490 sg = &sg[256 - 1]; /* XXX */
498 *sg = Sysptmap_pa | PG_RW | PG_CI | PG_V; 491 *sg = Sysptmap_pa | PG_RW | PG_CI | PG_V;
499 } else 492 } else
500#endif /* M68040 */ 493#endif /* M68040 */
501 { 494 {
502 /* 495 /*
503 * Map the page table pages in both the HW segment table 496 * Map the page table pages in both the HW segment table
504 * and the software Sysptmap. 497 * and the software Sysptmap.
505 */ 498 */
506 sg = (u_int *)RELOC(Sysseg_pa, u_int); 499 sg = (u_int *)RELOC(Sysseg_pa, u_int);
507 pg = (u_int *)Sysptmap_pa; 500 pg = (u_int *)Sysptmap_pa;
508 esg = &pg[ptsize >> PGSHIFT]; 501 esg = &pg[ptsize >> PGSHIFT];
509 sg_proto = ptpa | SG_RW | SG_V; 502 sg_proto = ptpa | SG_RW | SG_V;
510 pg_proto = ptpa | PG_RW | PG_CI | PG_V; 503 pg_proto = ptpa | PG_RW | PG_CI | PG_V;
511 while (pg < esg) { 504 while (pg < esg) {
512 *sg++ = sg_proto; 505 *sg++ = sg_proto;
513 *pg++ = pg_proto; 506 *pg++ = pg_proto;
514 sg_proto += PAGE_SIZE; 507 sg_proto += PAGE_SIZE;
515 pg_proto += PAGE_SIZE; 508 pg_proto += PAGE_SIZE;
516 } 509 }
517 /* 510 /*
518 * invalidate the remainder of each table 511 * invalidate the remainder of each table
519 */ 512 */
520 /* XXX PAGE_SIZE dependent constant: 256 or 1024 */ 513 /* XXX PAGE_SIZE dependent constant: 256 or 1024 */
521 esg = (u_int *)(Sysptmap_pa + (256 - 1) * sizeof(st_entry_t)); 514 esg = (u_int *)(Sysptmap_pa + (256 - 1) * sizeof(st_entry_t));
522 while (pg < esg) { 515 while (pg < esg) {
523 *sg++ = SG_NV; 516 *sg++ = SG_NV;
524 *pg++ = PG_NV; 517 *pg++ = PG_NV;
525 } 518 }
526 *sg = Sysptmap_pa | SG_RW | SG_V; 519 *sg = Sysptmap_pa | SG_RW | SG_V;
527 *pg = Sysptmap_pa | PG_RW | PG_CI | PG_V; 520 *pg = Sysptmap_pa | PG_RW | PG_CI | PG_V;
528 /* XXX zero out rest of page? */ 521 /* XXX zero out rest of page? */
529 } 522 }
530 523
531 /* 524 /*
532 * initialize kernel page table page(s) (assume load at VA 0) 525 * initialize kernel page table page(s) (assume load at VA 0)
533 */ 526 */
534 pg_proto = fphystart | PG_RO | PG_V; /* text pages are RO */ 527 pg_proto = fphystart | PG_RO | PG_V; /* text pages are RO */
535 pg = (u_int *) ptpa; 528 pg = (u_int *) ptpa;
536 *pg++ = PG_NV; /* Make page 0 invalid */ 529 *pg++ = PG_NV; /* Make page 0 invalid */
537 pg_proto += PAGE_SIZE; 530 pg_proto += PAGE_SIZE;
538 for (i = PAGE_SIZE; i < (u_int) etext; 531 for (i = PAGE_SIZE; i < (u_int) etext;
539 i += PAGE_SIZE, pg_proto += PAGE_SIZE) 532 i += PAGE_SIZE, pg_proto += PAGE_SIZE)
540 *pg++ = pg_proto; 533 *pg++ = pg_proto;
541 534
542 /* 535 /*
543 * data, bss and dynamic tables are read/write 536 * data, bss and dynamic tables are read/write
544 */ 537 */
545 pg_proto = (pg_proto & PG_FRAME) | PG_RW | PG_V; 538 pg_proto = (pg_proto & PG_FRAME) | PG_RW | PG_V;
546 539
547#if defined(M68040) || defined(M68060) 540#if defined(M68040) || defined(M68060)
548 /* 541 /*
549 * map the kernel segment table cache invalidated for 542 * map the kernel segment table cache invalidated for
550 * these machines (for the 68040 not strictly necessary, but 543 * these machines (for the 68040 not strictly necessary, but
551 * recommended by Motorola; for the 68060 mandatory) 544 * recommended by Motorola; for the 68060 mandatory)
552 */ 545 */
553 if (RELOC(mmutype, int) == MMU_68040) { 546 if (RELOC(mmutype, int) == MMU_68040) {
554 547
555 if (RELOC(kernel_copyback, int)) 548 if (RELOC(kernel_copyback, int))
556 pg_proto |= PG_CCB; 549 pg_proto |= PG_CCB;
557 550
558 /* 551 /*
559 * ASSUME: segment table and statically allocated page tables 552 * ASSUME: segment table and statically allocated page tables
560 * of the kernel are contiguously allocated, start at 553 * of the kernel are contiguously allocated, start at
561 * Sysseg and end at the current value of vstart. 554 * Sysseg and end at the current value of vstart.
562 */ 555 */
563 for (; i<RELOC(Sysseg, u_int); 556 for (; i<RELOC(Sysseg, u_int);
564 i+= PAGE_SIZE, pg_proto += PAGE_SIZE) 557 i+= PAGE_SIZE, pg_proto += PAGE_SIZE)
565 *pg++ = pg_proto; 558 *pg++ = pg_proto;
566 559
567 pg_proto = (pg_proto & ~PG_CCB) | PG_CI; 560 pg_proto = (pg_proto & ~PG_CCB) | PG_CI;
568 for (; i < vstart; i += PAGE_SIZE, pg_proto += PAGE_SIZE) 561 for (; i < vstart; i += PAGE_SIZE, pg_proto += PAGE_SIZE)
569 *pg++ = pg_proto; 562 *pg++ = pg_proto;
570 563
571 pg_proto = (pg_proto & ~PG_CI); 564 pg_proto = (pg_proto & ~PG_CI);
572 if (RELOC(kernel_copyback, int)) 565 if (RELOC(kernel_copyback, int))
573 pg_proto |= PG_CCB; 566 pg_proto |= PG_CCB;
574 } 567 }
575#endif 568#endif
576 /* 569 /*
577 * go till end of data allocated so far 570 * go till end of data allocated so far
578 * plus proc0 u-area (to be allocated) 571 * plus proc0 u-area (to be allocated)
579 */ 572 */
580 for (; i < vstart; i += PAGE_SIZE, pg_proto += PAGE_SIZE) 573 for (; i < vstart; i += PAGE_SIZE, pg_proto += PAGE_SIZE)
581 *pg++ = pg_proto; 574 *pg++ = pg_proto;
582 /* 575 /*
583 * invalidate remainder of kernel PT 576 * invalidate remainder of kernel PT
584 */ 577 */
585 while (pg < (pt_entry_t *) (ptpa + ptsize)) 578 while (pg < (pt_entry_t *) (ptpa + ptsize))
586 *pg++ = PG_NV; 579 *pg++ = PG_NV;
587 580
588 /* 581 /*
589 * validate internal IO PTEs following current vstart 582 * validate internal IO PTEs following current vstart
590 */ 583 */
591 pg = &((u_int *)ptpa)[vstart >> PGSHIFT]; 584 pg = &((u_int *)ptpa)[vstart >> PGSHIFT];
592#ifdef DRACO 585#ifdef DRACO
593 if ((id >> 24) == 0x7D) { 586 if ((id >> 24) == 0x7D) {
594 RELOC(DRCCADDR, u_int) = vstart; 587 RELOC(DRCCADDR, u_int) = vstart;
595 RELOC(CIAADDR, vaddr_t) = 588 RELOC(CIAADDR, vaddr_t) =
596 RELOC(DRCCADDR, u_int) + DRCIAPG * PAGE_SIZE; 589 RELOC(DRCCADDR, u_int) + DRCIAPG * PAGE_SIZE;
597 if (RELOC(z2mem_end, vaddr_t) == 0) 590 if (RELOC(z2mem_end, vaddr_t) == 0)
598 RELOC(ZBUSADDR, vaddr_t) = 591 RELOC(ZBUSADDR, vaddr_t) =
599 RELOC(DRCCADDR, u_int) + NDRCCPG * PAGE_SIZE; 592 RELOC(DRCCADDR, u_int) + NDRCCPG * PAGE_SIZE;
600 pg_proto = DRCCBASE | PG_RW | PG_CI | PG_V; 593 pg_proto = DRCCBASE | PG_RW | PG_CI | PG_V;
601 while (pg_proto < DRZ2BASE) { 594 while (pg_proto < DRZ2BASE) {
602 *pg++ = pg_proto; 595 *pg++ = pg_proto;
603 pg_proto += DRCCSTRIDE; 596 pg_proto += DRCCSTRIDE;
604 vstart += PAGE_SIZE; 597 vstart += PAGE_SIZE;
605 } 598 }
606 599
607 /* NCR 53C710 chip */ 600 /* NCR 53C710 chip */
608 *pg++ = DRSCSIBASE | PG_RW | PG_CI | PG_V; 601 *pg++ = DRSCSIBASE | PG_RW | PG_CI | PG_V;
609 vstart += PAGE_SIZE; 602 vstart += PAGE_SIZE;
610 603
611#ifdef DEBUG_KERNEL_START 604#ifdef DEBUG_KERNEL_START
612 /* 605 /*
613 * early rollcolor Altais mapping 606 * early rollcolor Altais mapping
614 * XXX (only works if in slot 0) 607 * XXX (only works if in slot 0)
615 */ 608 */
616 *pg++ = 0x20000000 | PG_RW | PG_CI | PG_V; 609 *pg++ = 0x20000000 | PG_RW | PG_CI | PG_V;
617 vstart += PAGE_SIZE; 610 vstart += PAGE_SIZE;
618#endif 611#endif
619 } else 612 } else
620#endif 613#endif
621 { 614 {
622 RELOC(CHIPMEMADDR, vaddr_t) = vstart; 615 RELOC(CHIPMEMADDR, vaddr_t) = vstart;
623 pg_proto = CHIPMEMBASE | PG_RW | PG_CI | PG_V; 616 pg_proto = CHIPMEMBASE | PG_RW | PG_CI | PG_V;
624 /* CI needed here?? */ 617 /* CI needed here?? */
625 while (pg_proto < CHIPMEMTOP) { 618 while (pg_proto < CHIPMEMTOP) {
626 *pg++ = pg_proto; 619 *pg++ = pg_proto;
627 pg_proto += PAGE_SIZE; 620 pg_proto += PAGE_SIZE;
628 vstart += PAGE_SIZE; 621 vstart += PAGE_SIZE;
629 } 622 }
630 } 623 }
631 if (RELOC(z2mem_end, paddr_t)) { /* XXX */ 624 if (RELOC(z2mem_end, paddr_t)) { /* XXX */
632 RELOC(ZTWOMEMADDR, vaddr_t) = vstart; 625 RELOC(ZTWOMEMADDR, vaddr_t) = vstart;
633 RELOC(ZBUSADDR, vaddr_t) = RELOC(ZTWOMEMADDR, vaddr_t) + 626 RELOC(ZBUSADDR, vaddr_t) = RELOC(ZTWOMEMADDR, vaddr_t) +
634 RELOC(NZTWOMEMPG, u_int) * PAGE_SIZE; 627 RELOC(NZTWOMEMPG, u_int) * PAGE_SIZE;
635 pg_proto = RELOC(z2mem_start, paddr_t) | /* XXX */ 628 pg_proto = RELOC(z2mem_start, paddr_t) | /* XXX */
636 PG_RW | PG_V; /* XXX */ 629 PG_RW | PG_V; /* XXX */
637 while (pg_proto < RELOC(z2mem_end, paddr_t)) { /* XXX */ 630 while (pg_proto < RELOC(z2mem_end, paddr_t)) { /* XXX */
638 *pg++ = pg_proto; /* XXX */ 631 *pg++ = pg_proto; /* XXX */
639 pg_proto += PAGE_SIZE; /* XXX */ 632 pg_proto += PAGE_SIZE; /* XXX */
640 vstart += PAGE_SIZE; 633 vstart += PAGE_SIZE;
641 } /* XXX */ 634 } /* XXX */
642 } /* XXX */ 635 } /* XXX */
643#ifdef DRACO 636#ifdef DRACO
644 if ((id >> 24) != 0x7D) 637 if ((id >> 24) != 0x7D)
645#endif 638#endif
646 { 639 {
647 RELOC(CIAADDR, vaddr_t) = vstart; 640 RELOC(CIAADDR, vaddr_t) = vstart;
648 pg_proto = CIABASE | PG_RW | PG_CI | PG_V; 641 pg_proto = CIABASE | PG_RW | PG_CI | PG_V;
649 while (pg_proto < CIATOP) { 642 while (pg_proto < CIATOP) {
650 *pg++ = pg_proto; 643 *pg++ = pg_proto;
651 pg_proto += PAGE_SIZE; 644 pg_proto += PAGE_SIZE;
652 vstart += PAGE_SIZE; 645 vstart += PAGE_SIZE;
653 } 646 }
654 RELOC(ZTWOROMADDR, vaddr_t) = vstart; 647 RELOC(ZTWOROMADDR, vaddr_t) = vstart;
655 pg_proto = ZTWOROMBASE | PG_RW | PG_CI | PG_V; 648 pg_proto = ZTWOROMBASE | PG_RW | PG_CI | PG_V;
656 while (pg_proto < ZTWOROMTOP) { 649 while (pg_proto < ZTWOROMTOP) {
657 *pg++ = pg_proto; 650 *pg++ = pg_proto;
658 pg_proto += PAGE_SIZE; 651 pg_proto += PAGE_SIZE;
659 vstart += PAGE_SIZE; 652 vstart += PAGE_SIZE;
660 } 653 }
661 RELOC(ZBUSADDR, vaddr_t) = vstart; 654 RELOC(ZBUSADDR, vaddr_t) = vstart;
662 /* not on 8k boundary :-( */ 655 /* not on 8k boundary :-( */
663 RELOC(CIAADDR, vaddr_t) += PAGE_SIZE/2; 656 RELOC(CIAADDR, vaddr_t) += PAGE_SIZE/2;
664 RELOC(CUSTOMADDR, vaddr_t) = 657 RELOC(CUSTOMADDR, vaddr_t) =
665 RELOC(ZTWOROMADDR, vaddr_t) - ZTWOROMBASE + CUSTOMBASE; 658 RELOC(ZTWOROMADDR, vaddr_t) - ZTWOROMBASE + CUSTOMBASE;
666 } 659 }
667 660
668 /* 661 /*
669 *[ following page tables MAY be allocated to ZORRO3 space, 662 *[ following page tables MAY be allocated to ZORRO3 space,
670 * but they're then later mapped in autoconf.c ] 663 * but they're then later mapped in autoconf.c ]
671 */ 664 */
672 vstart += RELOC(ZBUSAVAIL, u_int); 665 vstart += RELOC(ZBUSAVAIL, u_int);
673 666
674 /* 667 /*
675 * init mem sizes 668 * init mem sizes
676 */ 669 */
677 RELOC(maxmem, u_int) = pend >> PGSHIFT; 670 RELOC(maxmem, u_int) = pend >> PGSHIFT;
678 RELOC(lowram, u_int) = fphystart; 671 RELOC(lowram, u_int) = fphystart;
679 RELOC(physmem, u_int) = fphysize >> PGSHIFT; 672 RELOC(physmem, u_int) = fphysize >> PGSHIFT;
680 673
681 RELOC(virtual_avail, u_int) = vstart; 674 RELOC(virtual_avail, u_int) = vstart;
682 675
683 /* 676 /*
684 * Put user page tables starting at next 16MB boundary, to make kernel 677 * Put user page tables starting at next 16MB boundary, to make kernel
685 * dumps more readable, with guaranteed 16MB of. 678 * dumps more readable, with guaranteed 16MB of.
686 * XXX 16 MB instead of 256 MB should be enough, but... 679 * XXX 16 MB instead of 256 MB should be enough, but...
687 * we need to fix the fastmem loading first. (see comment at line 375) 680 * we need to fix the fastmem loading first. (see comment at line 375)
688 */ 681 */
689 RELOC(amiga_uptbase, vaddr_t) = 682 RELOC(amiga_uptbase, vaddr_t) =
690 roundup(vstart + 0x10000000, 0x10000000); 683 roundup(vstart + 0x10000000, 0x10000000);
691 684
692 /* 685 /*
693 * set this before copying the kernel, so the variable is updated in 686 * set this before copying the kernel, so the variable is updated in
694 * the `real' place too. protorp[0] is already preset to the 687 * the `real' place too. protorp[0] is already preset to the
695 * CRP setting. 688 * CRP setting.
696 */ 689 */
697 RELOC(protorp[1], u_int) = RELOC(Sysseg_pa, u_int); 690 RELOC(protorp[1], u_int) = RELOC(Sysseg_pa, u_int);
698 691
699 RELOC(start_c_fphystart, u_int) = fphystart; 692 RELOC(start_c_fphystart, u_int) = fphystart;
700 RELOC(start_c_pstart, u_int) = pstart; 693 RELOC(start_c_pstart, u_int) = pstart;
701 694
702 /* 695 /*
703 * copy over the kernel (and all now initialized variables) 696 * copy over the kernel (and all now initialized variables)
704 * to fastram. DONT use bcopy(), this beast is much larger 697 * to fastram. DONT use bcopy(), this beast is much larger
705 * than 128k ! 698 * than 128k !
706 */ 699 */
707 if (loadbase == 0) { 700 if (loadbase == 0) {
708 register u_int *lp, *le, *fp; 701 register u_int *lp, *le, *fp;
709 702
710 lp = 0; 703 lp = 0;
711 le = (u_int *)end_loaded; 704 le = (u_int *)end_loaded;
712 fp = (u_int *)fphystart; 705 fp = (u_int *)fphystart;
713 while (lp < le) 706 while (lp < le)
714 *fp++ = *lp++; 707 *fp++ = *lp++;
715 } 708 }
716 709
717#ifdef DEBUG_KERNEL_START 710#ifdef DEBUG_KERNEL_START
718 if ((id>>24)==0x7D) { 711 if ((id>>24)==0x7D) {
719 *altaiscolpt = 0; 712 *altaiscolpt = 0;
720 *altaiscol = 40; 713 *altaiscol = 40;
721 *altaiscol = 40; 714 *altaiscol = 40;
722 *altaiscol = 0; 715 *altaiscol = 0;
723 } else 716 } else
724((volatile struct Custom *)0xdff000)->color[0] = 0xAA0; /* YELLOW */ 717((volatile struct Custom *)0xdff000)->color[0] = 0xAA0; /* YELLOW */
725#endif 718#endif
726 /* 719 /*
727 * prepare to enable the MMU 720 * prepare to enable the MMU
728 */ 721 */
729#if defined(M68040) || defined(M68060) 722#if defined(M68040) || defined(M68060)
730 if (RELOC(mmutype, int) == MMU_68040) { 723 if (RELOC(mmutype, int) == MMU_68040) {
731 if (id & AMIGA_68060) { 724 if (id & AMIGA_68060) {
732 /* do i need to clear the branch cache? */ 725 /* do i need to clear the branch cache? */
733 __asm volatile ( ".word 0x4e7a,0x0002;" 726 __asm volatile ( ".word 0x4e7a,0x0002;"
734 "orl #0x400000,%%d0;" 727 "orl #0x400000,%%d0;"
735 ".word 0x4e7b,0x0002" : : : "d0"); 728 ".word 0x4e7b,0x0002" : : : "d0");
736 } 729 }
737 730
738 /* 731 /*
739 * movel Sysseg_pa,%a0; 732 * movel Sysseg_pa,%a0;
740 * movec %a0,%srp; 733 * movec %a0,%srp;
741 */ 734 */
742 735
743 __asm volatile ("movel %0,%%a0; .word 0x4e7b,0x8807" 736 __asm volatile ("movel %0,%%a0; .word 0x4e7b,0x8807"
744 : : "a" (RELOC(Sysseg_pa, u_int)) : "a0"); 737 : : "a" (RELOC(Sysseg_pa, u_int)) : "a0");
745 738
746#ifdef DEBUG_KERNEL_START 739#ifdef DEBUG_KERNEL_START
747 if ((id>>24)==0x7D) { 740 if ((id>>24)==0x7D) {
748 *altaiscolpt = 0; 741 *altaiscolpt = 0;
749 *altaiscol = 40; 742 *altaiscol = 40;
750 *altaiscol = 33; 743 *altaiscol = 33;
751 *altaiscol = 0; 744 *altaiscol = 0;
752 } else 745 } else
753((volatile struct Custom *)0xdff000)->color[0] = 0xA70; /* ORANGE */ 746((volatile struct Custom *)0xdff000)->color[0] = 0xA70; /* ORANGE */
754#endif 747#endif
755 } else 748 } else
756#endif 749#endif
757 { 750 {
758 /* 751 /*
759 * setup and load SRP 752 * setup and load SRP
760 * nolimit, share global, 4 byte PTE's 753 * nolimit, share global, 4 byte PTE's
761 */ 754 */
762 (RELOC(protorp[0], u_int)) = 0x80000202; 755 (RELOC(protorp[0], u_int)) = 0x80000202;
763 __asm volatile ("pmove %0@,%%srp":: "a" (&RELOC(protorp, u_int))); 756 __asm volatile ("pmove %0@,%%srp":: "a" (&RELOC(protorp, u_int)));
764 } 757 }
765} 758}
766 759
767void 760void
768start_c_finish() 761start_c_finish()
769{ 762{
770#ifdef P5PPC68KBOARD 763#ifdef P5PPC68KBOARD
771 struct cfdev *cdp, *ecdp; 764 struct cfdev *cdp, *ecdp;
772#endif 765#endif
773 766
774#ifdef DEBUG_KERNEL_START 767#ifdef DEBUG_KERNEL_START
775#ifdef DRACO 768#ifdef DRACO
776 if ((id >> 24) == 0x7D) { /* mapping on, is_draco() is valid */ 769 if ((id >> 24) == 0x7D) { /* mapping on, is_draco() is valid */
777 int i; 770 int i;
778 /* XXX experimental Altais register mapping only */ 771 /* XXX experimental Altais register mapping only */
779 altaiscolpt = (volatile u_int8_t *)(DRCCADDR+PAGE_SIZE*9+0x3c8); 772 altaiscolpt = (volatile u_int8_t *)(DRCCADDR+PAGE_SIZE*9+0x3c8);
780 altaiscol = altaiscolpt + 1; 773 altaiscol = altaiscolpt + 1;
781 for (i=0; i<140000; i++) { 774 for (i=0; i<140000; i++) {
782 *altaiscolpt = 0; 775 *altaiscolpt = 0;
783 *altaiscol = 0; 776 *altaiscol = 0;
784 *altaiscol = 40; 777 *altaiscol = 40;
785 *altaiscol = 0; 778 *altaiscol = 0;
786 } 779 }
787 } else 780 } else
788#endif 781#endif
789((volatile struct Custom *)CUSTOMADDR)->color[0] = 0x0a0; /* GREEN */ 782((volatile struct Custom *)CUSTOMADDR)->color[0] = 0x0a0; /* GREEN */
790#endif 783#endif
791 784
792 bzero ((u_char *)proc0paddr, USPACE); 785 bzero ((u_char *)proc0paddr, USPACE);
793 pmap_bootstrap(start_c_pstart, start_c_fphystart); 786 pmap_bootstrap(start_c_pstart, start_c_fphystart);
794 787
795 /* 788 /*
796 * to make life easier in locore.s, set these addresses explicitly 789 * to make life easier in locore.s, set these addresses explicitly
797 */ 790 */
798 CIAAbase = CIAADDR + 0x1001; /* CIA-A at odd addresses ! */ 791 CIAAbase = CIAADDR + 0x1001; /* CIA-A at odd addresses ! */
799 CIABbase = CIAADDR; 792 CIABbase = CIAADDR;
800 CUSTOMbase = CUSTOMADDR; 793 CUSTOMbase = CUSTOMADDR;
801#ifdef DRACO 794#ifdef DRACO
802 if (is_draco()) { 795 if (is_draco()) {
803 draco_intena = (volatile u_int8_t *)DRCCADDR+1; 796 draco_intena = (volatile u_int8_t *)DRCCADDR+1;
804 draco_intpen = draco_intena + PAGE_SIZE; 797 draco_intpen = draco_intena + PAGE_SIZE;
805 draco_intfrc = draco_intpen + PAGE_SIZE; 798 draco_intfrc = draco_intpen + PAGE_SIZE;
806 draco_misc = draco_intfrc + PAGE_SIZE; 799 draco_misc = draco_intfrc + PAGE_SIZE;
807 draco_ioct = (struct drioct *)(DRCCADDR + DRIOCTLPG*PAGE_SIZE); 800 draco_ioct = (struct drioct *)(DRCCADDR + DRIOCTLPG*PAGE_SIZE);
808 } else 801 } else
809#endif 802#endif
810 { 803 {
811 INTREQRaddr = (vaddr_t)&custom.intreqr; 804 INTREQRaddr = (vaddr_t)&custom.intreqr;
812 INTREQWaddr = (vaddr_t)&custom.intreq; 805 INTREQWaddr = (vaddr_t)&custom.intreq;
813 } 806 }
814 /* 807 /*
815 * Get our chip memory allocation system working 808 * Get our chip memory allocation system working
816 */ 809 */
817 chipmem_start += CHIPMEMADDR; 810 chipmem_start += CHIPMEMADDR;
818 chipmem_end += CHIPMEMADDR; 811 chipmem_end += CHIPMEMADDR;
819 812
820 /* XXX is: this MUST NOT BE DONE before the pmap_bootstrap() call */ 813 /* XXX is: this MUST NOT BE DONE before the pmap_bootstrap() call */
821 if (z2mem_end) { 814 if (z2mem_end) {
822 z2mem_end = ZTWOMEMADDR + NZTWOMEMPG * PAGE_SIZE; 815 z2mem_end = ZTWOMEMADDR + NZTWOMEMPG * PAGE_SIZE;
823 z2mem_start = ZTWOMEMADDR; 816 z2mem_start = ZTWOMEMADDR;
824 } 817 }
825 818
826#if 0 819#if 0
827 i = *(int *)proc0paddr; 820 i = *(int *)proc0paddr;
828 *(volatile int *)proc0paddr = i; 821 *(volatile int *)proc0paddr = i;
829#endif 822#endif
830 823
831 /* 824 /*
832 * disable all interrupts but enable allow them to be enabled 825 * disable all interrupts but enable allow them to be enabled
833 * by specific driver code (global int enable bit) 826 * by specific driver code (global int enable bit)
834 */ 827 */
835#ifdef DRACO 828#ifdef DRACO
836 if (is_draco()) { 829 if (is_draco()) {
837 /* XXX to be done. For now, just: */ 830 /* XXX to be done. For now, just: */
838 *draco_intena = 0; 831 *draco_intena = 0;
839 *draco_intpen = 0; 832 *draco_intpen = 0;
840 *draco_intfrc = 0; 833 *draco_intfrc = 0;
841 ciaa.icr = 0x7f; /* and keyboard */ 834 ciaa.icr = 0x7f; /* and keyboard */
842 ciab.icr = 0x7f; /* and again */ 835 ciab.icr = 0x7f; /* and again */
843 836
844 draco_ioct->io_control &= 837 draco_ioct->io_control &=
845 ~(DRCNTRL_KBDINTENA|DRCNTRL_FDCINTENA); /* and another */ 838 ~(DRCNTRL_KBDINTENA|DRCNTRL_FDCINTENA); /* and another */
846 839
847 draco_ioct->io_status2 &= 840 draco_ioct->io_status2 &=
848 ~(DRSTAT2_PARIRQENA|DRSTAT2_TMRINTENA); /* some more */ 841 ~(DRSTAT2_PARIRQENA|DRSTAT2_TMRINTENA); /* some more */
849 842
850 *(volatile u_int8_t *)(DRCCADDR + 1 + 843 *(volatile u_int8_t *)(DRCCADDR + 1 +
851 DRSUPIOPG*PAGE_SIZE + 4*(0x3F8 + 1)) = 0; /* and com0 */ 844 DRSUPIOPG*PAGE_SIZE + 4*(0x3F8 + 1)) = 0; /* and com0 */
852 845
853 *(volatile u_int8_t *)(DRCCADDR + 1 + 846 *(volatile u_int8_t *)(DRCCADDR + 1 +
854 DRSUPIOPG*PAGE_SIZE + 4*(0x2F8 + 1)) = 0; /* and com1 */ 847 DRSUPIOPG*PAGE_SIZE + 4*(0x2F8 + 1)) = 0; /* and com1 */
855 848
856 draco_ioct->io_control |= DRCNTRL_WDOGDIS; /* stop Fido */ 849 draco_ioct->io_control |= DRCNTRL_WDOGDIS; /* stop Fido */
857 *draco_misc &= ~1/*DRMISC_FASTZ2*/; 850 *draco_misc &= ~1/*DRMISC_FASTZ2*/;
858 851
859 } else 852 } else
860#endif 853#endif
861 { 854 {
862 custom.intena = 0x7fff; /* disable ints */ 855 custom.intena = 0x7fff; /* disable ints */
863 custom.intena = INTF_SETCLR | INTF_INTEN; 856 custom.intena = INTF_SETCLR | INTF_INTEN;
864 /* but allow them */ 857 /* but allow them */
865 custom.intreq = 0x7fff; /* clear any current */ 858 custom.intreq = 0x7fff; /* clear any current */
866 ciaa.icr = 0x7f; /* and keyboard */ 859 ciaa.icr = 0x7f; /* and keyboard */
867 ciab.icr = 0x7f; /* and again */ 860 ciab.icr = 0x7f; /* and again */
868 861
869 /* 862 /*
870 * remember address of read and write intena register for use 863 * remember address of read and write intena register for use
871 * by extended spl?() macros. 864 * by extended spl?() macros.
872 */ 865 */
873 amiga_intena_read = &custom.intenar; 866 amiga_intena_read = &custom.intenar;
874 amiga_intena_write = &custom.intena; 867 amiga_intena_write = &custom.intena;
875 } 868 }
876 869
877 /* 870 /*
878 * This is needed for 3000's with superkick ROM's. Bit 7 of 871 * This is needed for 3000's with superkick ROM's. Bit 7 of
879 * 0xde0002 enables the ROM if set. If this isn't set the machine 872 * 0xde0002 enables the ROM if set. If this isn't set the machine
880 * has to be powercycled in order for it to boot again. ICKA! RFH 873 * has to be powercycled in order for it to boot again. ICKA! RFH
881 */ 874 */
882 if (is_a3000()) { 875 if (is_a3000()) {
883 volatile unsigned char *a3000_magic_reset; 876 volatile unsigned char *a3000_magic_reset;
884 877
885 a3000_magic_reset = (volatile unsigned char *)ztwomap(0xde0002); 878 a3000_magic_reset = (volatile unsigned char *)ztwomap(0xde0002);
886 879
887 /* Turn SuperKick ROM (V36) back on */ 880 /* Turn SuperKick ROM (V36) back on */
888 *a3000_magic_reset |= 0x80; 881 *a3000_magic_reset |= 0x80;
889 } 882 }
890 883
891#ifdef P5PPC68KBOARD 884#ifdef P5PPC68KBOARD
892 /* 885 /*
893 * Are we an P5 PPC/68K board? install different reset 886 * Are we an P5 PPC/68K board? install different reset
894 * routine. 887 * routine.
895 */ 888 */
896 889
897 for (cdp = cfdev, ecdp = &cfdev[ncfdev]; cdp < ecdp; cdp++) { 890 for (cdp = cfdev, ecdp = &cfdev[ncfdev]; cdp < ecdp; cdp++) {
898 if (cdp->rom.manid == 8512 && 891 if (cdp->rom.manid == 8512 &&
899 (cdp->rom.prodid == 100 || cdp->rom.prodid == 110)) { 892 (cdp->rom.prodid == 100 || cdp->rom.prodid == 110)) {
900 p5ppc = 1; 893 p5ppc = 1;
901 break; 894 break;
902 } 895 }
903 } 896 }
904#endif 897#endif
905} 898}
906 899
907void 900void
908rollcolor(color) 901rollcolor(color)
909 int color; 902 int color;
910{ 903{
911 int s, i; 904 int s, i;
912 905
913 s = splhigh(); 906 s = splhigh();
914 /* 907 /*
915 * need to adjust count - 908 * need to adjust count -
916 * too slow when cache off, too fast when cache on 909 * too slow when cache off, too fast when cache on
917 */ 910 */
918 for (i = 0; i < 400000; i++) 911 for (i = 0; i < 400000; i++)
919 ((volatile struct Custom *)CUSTOMbase)->color[0] = color; 912 ((volatile struct Custom *)CUSTOMbase)->color[0] = color;
920 splx(s); 913 splx(s);
921} 914}
922 915
923#ifdef DEVRELOAD 916#ifdef DEVRELOAD
924/* 917/*
925 * Kernel reloading code 918 * Kernel reloading code
926 */ 919 */
927 920
928static struct exec kernel_exec; 921static struct exec kernel_exec;
929static u_char *kernel_image; 922static u_char *kernel_image;
930static u_long kernel_text_size, kernel_load_ofs; 923static u_long kernel_text_size, kernel_load_ofs;
931static u_long kernel_load_phase; 924static u_long kernel_load_phase;
932static u_long kernel_load_endseg; 925static u_long kernel_load_endseg;
933static u_long kernel_symbol_size, kernel_symbol_esym; 926static u_long kernel_symbol_size, kernel_symbol_esym;
934 927
935/* This supports the /dev/reload device, major 2, minor 20, 928/* This supports the /dev/reload device, major 2, minor 20,
936 hooked into mem.c. Author: Bryan Ford. */ 929 hooked into mem.c. Author: Bryan Ford. */
937 930
938/* 931/*
939 * This is called below to find out how much magic storage 932 * This is called below to find out how much magic storage
940 * will be needed after a kernel image to be reloaded. 933 * will be needed after a kernel image to be reloaded.
941 */ 934 */
942static int 935static int
943kernel_image_magic_size() 936kernel_image_magic_size()
944{ 937{
945 int sz; 938 int sz;
946 939
947 /* 4 + cfdev's + Mem_Seg's + 4 */ 940 /* 4 + cfdev's + Mem_Seg's + 4 */
948 sz = 8 + ncfdev * sizeof(struct cfdev) 941 sz = 8 + ncfdev * sizeof(struct cfdev)
949 + memlist->m_nseg * sizeof(struct boot_memseg); 942 + memlist->m_nseg * sizeof(struct boot_memseg);
950 return(sz); 943 return(sz);
951} 944}
952 945
953/* This actually copies the magic information. */ 946/* This actually copies the magic information. */
954static void 947static void
955kernel_image_magic_copy(dest) 948kernel_image_magic_copy(dest)
956 u_char *dest; 949 u_char *dest;
957{ 950{
958 *((int*)dest) = ncfdev; 951 *((int*)dest) = ncfdev;
959 dest += 4; 952 dest += 4;
960 bcopy(cfdev, dest, ncfdev * sizeof(struct cfdev) 953 bcopy(cfdev, dest, ncfdev * sizeof(struct cfdev)
961 + memlist->m_nseg * sizeof(struct boot_memseg) + 4); 954 + memlist->m_nseg * sizeof(struct boot_memseg) + 4);
962} 955}
963 956
964#undef AOUT_LDPGSZ 957#undef AOUT_LDPGSZ
965#define AOUT_LDPGSZ 8192 /* XXX ??? */ 958#define AOUT_LDPGSZ 8192 /* XXX ??? */
966 959
967int 960int
968kernel_reload_write(uio) 961kernel_reload_write(uio)
969 struct uio *uio; 962 struct uio *uio;
970{ 963{
971 extern int eclockfreq; 964 extern int eclockfreq;
972 struct iovec *iov; 965 struct iovec *iov;
973 int error, c; 966 int error, c;
974 967
975 iov = uio->uio_iov; 968 iov = uio->uio_iov;
976 969
977 if (kernel_image == 0) { 970 if (kernel_image == 0) {
978 /* 971 /*
979 * We have to get at least the whole exec header 972 * We have to get at least the whole exec header
980 * in the first write. 973 * in the first write.
981 */ 974 */
982 if (iov->iov_len < sizeof(kernel_exec)) 975 if (iov->iov_len < sizeof(kernel_exec))
983 return ENOEXEC; /* XXX */ 976 return ENOEXEC; /* XXX */
984 977
985 /* 978 /*
986 * Pull in the exec header and check it. 979 * Pull in the exec header and check it.
987 */ 980 */
988 if ((error = uiomove((void *)&kernel_exec, sizeof(kernel_exec), 981 if ((error = uiomove((void *)&kernel_exec, sizeof(kernel_exec),
989 uio)) != 0) 982 uio)) != 0)
990 return(error); 983 return(error);
991 printf("loading kernel %ld+%ld+%ld+%ld\n", kernel_exec.a_text, 984 printf("loading kernel %ld+%ld+%ld+%ld\n", kernel_exec.a_text,
992 kernel_exec.a_data, kernel_exec.a_bss, 985 kernel_exec.a_data, kernel_exec.a_bss,
993 esym == NULL ? 0 : kernel_exec.a_syms); 986 esym == NULL ? 0 : kernel_exec.a_syms);
994 /* 987 /*
995 * Looks good - allocate memory for a kernel image. 988 * Looks good - allocate memory for a kernel image.
996 */ 989 */
997 kernel_text_size = (kernel_exec.a_text 990 kernel_text_size = (kernel_exec.a_text
998 + AOUT_LDPGSZ - 1) & (-AOUT_LDPGSZ); 991 + AOUT_LDPGSZ - 1) & (-AOUT_LDPGSZ);
999 /* 992 /*
1000 * Estimate space needed for symbol names, since we don't 993 * Estimate space needed for symbol names, since we don't
1001 * know how big it really is. 994 * know how big it really is.
1002 */ 995 */
1003 if (esym != NULL) { 996 if (esym != NULL) {
1004 kernel_symbol_size = kernel_exec.a_syms; 997 kernel_symbol_size = kernel_exec.a_syms;
1005 kernel_symbol_size += 16 * (kernel_symbol_size / 12); 998 kernel_symbol_size += 16 * (kernel_symbol_size / 12);
1006 } 999 }
1007 /* 1000 /*
1008 * XXX - should check that image will fit in CHIP memory 1001 * XXX - should check that image will fit in CHIP memory
1009 * XXX return an error if it doesn't 1002 * XXX return an error if it doesn't
1010 */ 1003 */
1011 if ((kernel_text_size + kernel_exec.a_data + 1004 if ((kernel_text_size + kernel_exec.a_data +
1012 kernel_exec.a_bss + kernel_symbol_size + 1005 kernel_exec.a_bss + kernel_symbol_size +
1013 kernel_image_magic_size()) > boot_cphysize) 1006 kernel_image_magic_size()) > boot_cphysize)
1014 return (EFBIG); 1007 return (EFBIG);
1015 kernel_image = malloc(kernel_text_size + kernel_exec.a_data 1008 kernel_image = malloc(kernel_text_size + kernel_exec.a_data
1016 + kernel_exec.a_bss 1009 + kernel_exec.a_bss
1017 + kernel_symbol_size 1010 + kernel_symbol_size
1018 + kernel_image_magic_size(), 1011 + kernel_image_magic_size(),
1019 M_TEMP, M_WAITOK); 1012 M_TEMP, M_WAITOK);
1020 kernel_load_ofs = 0; 1013 kernel_load_ofs = 0;
1021 kernel_load_phase = 0; 1014 kernel_load_phase = 0;
1022 kernel_load_endseg = kernel_exec.a_text; 1015 kernel_load_endseg = kernel_exec.a_text;
1023 return(0); 1016 return(0);
1024 } 1017 }
1025 /* 1018 /*
1026 * Continue loading in the kernel image. 1019 * Continue loading in the kernel image.
1027 */ 1020 */
1028 c = min(iov->iov_len, kernel_load_endseg - kernel_load_ofs); 1021 c = min(iov->iov_len, kernel_load_endseg - kernel_load_ofs);
1029 c = min(c, MAXPHYS); 1022 c = min(c, MAXPHYS);
1030 if ((error = uiomove(kernel_image + kernel_load_ofs, (int)c, uio)) != 0) 1023 if ((error = uiomove(kernel_image + kernel_load_ofs, (int)c, uio)) != 0)
1031 return(error); 1024 return(error);
1032 kernel_load_ofs += c; 1025 kernel_load_ofs += c;
1033 1026
1034 /* 1027 /*
1035 * Fun and games to handle loading symbols - the length of the 1028 * Fun and games to handle loading symbols - the length of the
1036 * string table isn't know until after the symbol table has 1029 * string table isn't know until after the symbol table has
1037 * been loaded. We have to load the kernel text, data, and 1030 * been loaded. We have to load the kernel text, data, and
1038 * the symbol table, then get the size of the strings. A 1031 * the symbol table, then get the size of the strings. A
1039 * new kernel image is then allocated and the data currently 1032 * new kernel image is then allocated and the data currently
1040 * loaded moved to the new image. Then continue reading the 1033 * loaded moved to the new image. Then continue reading the
1041 * string table. This has problems if there isn't enough 1034 * string table. This has problems if there isn't enough
1042 * room to allocate space for the two copies of the kernel 1035 * room to allocate space for the two copies of the kernel
1043 * image. So the approach I took is to guess at the size 1036 * image. So the approach I took is to guess at the size
1044 * of the symbol strings. If the guess is wrong, the symbol 1037 * of the symbol strings. If the guess is wrong, the symbol
1045 * table is ignored. 1038 * table is ignored.
1046 */ 1039 */
1047 1040
1048 if (kernel_load_ofs != kernel_load_endseg) 1041 if (kernel_load_ofs != kernel_load_endseg)
1049 return(0); 1042 return(0);
1050 1043
1051 switch (kernel_load_phase) { 1044 switch (kernel_load_phase) {
1052 case 0: /* done loading kernel text */ 1045 case 0: /* done loading kernel text */
1053 kernel_load_ofs = kernel_text_size; 1046 kernel_load_ofs = kernel_text_size;
1054 kernel_load_endseg = kernel_load_ofs + kernel_exec.a_data; 1047 kernel_load_endseg = kernel_load_ofs + kernel_exec.a_data;
1055 kernel_load_phase = 1; 1048 kernel_load_phase = 1;
1056 break; 1049 break;
1057 case 1: /* done loading kernel data */ 1050 case 1: /* done loading kernel data */
1058 for(c = 0; c < kernel_exec.a_bss; c++) 1051 for(c = 0; c < kernel_exec.a_bss; c++)
1059 kernel_image[kernel_load_ofs + c] = 0; 1052 kernel_image[kernel_load_ofs + c] = 0;
1060 kernel_load_ofs += kernel_exec.a_bss; 1053 kernel_load_ofs += kernel_exec.a_bss;
1061 if (esym) { 1054 if (esym) {
1062 kernel_load_endseg = kernel_load_ofs 1055 kernel_load_endseg = kernel_load_ofs
1063 + kernel_exec.a_syms + 8; 1056 + kernel_exec.a_syms + 8;
1064 *((u_long *)(kernel_image + kernel_load_ofs)) = 1057 *((u_long *)(kernel_image + kernel_load_ofs)) =
1065 kernel_exec.a_syms; 1058 kernel_exec.a_syms;
1066 kernel_load_ofs += 4; 1059 kernel_load_ofs += 4;
1067 kernel_load_phase = 3; 1060 kernel_load_phase = 3;
1068 break; 1061 break;
1069 } 1062 }
1070 /*FALLTHROUGH*/ 1063 /*FALLTHROUGH*/
1071 case 2: /* done loading kernel */ 1064 case 2: /* done loading kernel */
1072 1065
1073 /* 1066 /*
1074 * Put the finishing touches on the kernel image. 1067 * Put the finishing touches on the kernel image.
1075 */ 1068 */
1076 kernel_image_magic_copy(kernel_image + kernel_load_ofs); 1069 kernel_image_magic_copy(kernel_image + kernel_load_ofs);
1077 /* 1070 /*
1078 * Start the new kernel with code in locore.s. 1071 * Start the new kernel with code in locore.s.
1079 */ 1072 */
1080 kernel_reload(kernel_image, 1073 kernel_reload(kernel_image,
1081 kernel_load_ofs + kernel_image_magic_size(), 1074 kernel_load_ofs + kernel_image_magic_size(),
1082 kernel_exec.a_entry, boot_fphystart, boot_fphysize, 1075 kernel_exec.a_entry, boot_fphystart, boot_fphysize,
1083 boot_cphysize, kernel_symbol_esym, eclockfreq, 1076 boot_cphysize, kernel_symbol_esym, eclockfreq,
1084 boot_flags, scsi_nosync, boot_partition); 1077 boot_flags, scsi_nosync, boot_partition);
1085 /* 1078 /*
1086 * kernel_reload() now checks to see if the reload_code 1079 * kernel_reload() now checks to see if the reload_code
1087 * is at the same location in the new kernel. 1080 * is at the same location in the new kernel.
1088 * If it isn't, it will return and we will return 1081 * If it isn't, it will return and we will return
1089 * an error. 1082 * an error.
1090 */ 1083 */
1091 free(kernel_image, M_TEMP); 1084 free(kernel_image, M_TEMP);
1092 kernel_image = NULL; 1085 kernel_image = NULL;
1093 return (ENODEV); /* Say operation not supported */ 1086 return (ENODEV); /* Say operation not supported */
1094 case 3: /* done loading kernel symbol table */ 1087 case 3: /* done loading kernel symbol table */
1095 c = *((u_long *)(kernel_image + kernel_load_ofs - 4)); 1088 c = *((u_long *)(kernel_image + kernel_load_ofs - 4));
1096 if (c > 16 * (kernel_exec.a_syms / 12)) 1089 if (c > 16 * (kernel_exec.a_syms / 12))
1097 c = 16 * (kernel_exec.a_syms / 12); 1090 c = 16 * (kernel_exec.a_syms / 12);
1098 kernel_load_endseg += c - 4; 1091 kernel_load_endseg += c - 4;
1099 kernel_symbol_esym = kernel_load_endseg; 1092 kernel_symbol_esym = kernel_load_endseg;
1100#ifdef notyet 1093#ifdef notyet
1101 kernel_image_copy = kernel_image; 1094 kernel_image_copy = kernel_image;
1102 kernel_image = malloc(kernel_load_ofs + c 1095 kernel_image = malloc(kernel_load_ofs + c
1103 + kernel_image_magic_size(), M_TEMP, M_WAITOK); 1096 + kernel_image_magic_size(), M_TEMP, M_WAITOK);
1104 if (kernel_image == NULL) 1097 if (kernel_image == NULL)
1105 panic("kernel_reload failed second malloc"); 1098 panic("kernel_reload failed second malloc");
1106 for (c = 0; c < kernel_load_ofs; c += MAXPHYS) 1099 for (c = 0; c < kernel_load_ofs; c += MAXPHYS)
1107 bcopy(kernel_image_copy + c, kernel_image + c, 1100 bcopy(kernel_image_copy + c, kernel_image + c,
1108 (kernel_load_ofs - c) > MAXPHYS ? MAXPHYS : 1101 (kernel_load_ofs - c) > MAXPHYS ? MAXPHYS :
1109 kernel_load_ofs - c); 1102 kernel_load_ofs - c);
1110#endif 1103#endif
1111 kernel_load_phase = 2; 1104 kernel_load_phase = 2;
1112 } 1105 }
1113 return(0); 1106 return(0);
1114} 1107}
1115#endif 1108#endif