| @@ -1,1114 +1,1119 @@ | | | @@ -1,1114 +1,1119 @@ |
1 | /* $NetBSD: amiga_init.c,v 1.103 2008/12/31 19:54:40 tsutsui Exp $ */ | | 1 | /* $NetBSD: amiga_init.c,v 1.104 2009/01/03 07:04:42 tsutsui Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright (c) 1994 Michael L. Hitch | | 4 | * Copyright (c) 1994 Michael L. Hitch |
5 | * Copyright (c) 1993 Markus Wild | | 5 | * Copyright (c) 1993 Markus Wild |
6 | * All rights reserved. | | 6 | * All rights reserved. |
7 | * | | 7 | * |
8 | * Redistribution and use in source and binary forms, with or without | | 8 | * Redistribution and use in source and binary forms, with or without |
9 | * modification, are permitted provided that the following conditions | | 9 | * modification, are permitted provided that the following conditions |
10 | * are met: | | 10 | * are met: |
11 | * 1. Redistributions of source code must retain the above copyright | | 11 | * 1. Redistributions of source code must retain the above copyright |
12 | * notice, this list of conditions and the following disclaimer. | | 12 | * notice, this list of conditions and the following disclaimer. |
13 | * 2. Redistributions in binary form must reproduce the above copyright | | 13 | * 2. Redistributions in binary form must reproduce the above copyright |
14 | * notice, this list of conditions and the following disclaimer in the | | 14 | * notice, this list of conditions and the following disclaimer in the |
15 | * documentation and/or other materials provided with the distribution. | | 15 | * documentation and/or other materials provided with the distribution. |
16 | * 3. All advertising materials mentioning features or use of this software | | 16 | * 3. All advertising materials mentioning features or use of this software |
17 | * must display the following acknowledgement: | | 17 | * must display the following acknowledgement: |
18 | * This product includes software developed by Markus Wild. | | 18 | * This product includes software developed by Markus Wild. |
19 | * 4. The name of the author may not be used to endorse or promote products | | 19 | * 4. The name of the author may not be used to endorse or promote products |
20 | * derived from this software without specific prior written permission | | 20 | * derived from this software without specific prior written permission |
21 | * | | 21 | * |
22 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | | 22 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | | 23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
24 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | | 24 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
25 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | | 25 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | | 26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | | 27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
28 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | | 28 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
29 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | | 29 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | | 30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | | 31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
32 | */ | | 32 | */ |
33 | | | 33 | |
34 | #include "opt_amigaccgrf.h" | | 34 | #include "opt_amigaccgrf.h" |
35 | #include "opt_p5ppc68kboard.h" | | 35 | #include "opt_p5ppc68kboard.h" |
36 | #include "opt_devreload.h" | | 36 | #include "opt_devreload.h" |
37 | | | 37 | |
38 | #include <sys/cdefs.h> | | 38 | #include <sys/cdefs.h> |
39 | __KERNEL_RCSID(0, "$NetBSD: amiga_init.c,v 1.103 2008/12/31 19:54:40 tsutsui Exp $"); | | 39 | __KERNEL_RCSID(0, "$NetBSD: amiga_init.c,v 1.104 2009/01/03 07:04:42 tsutsui Exp $"); |
40 | | | 40 | |
41 | #include <sys/param.h> | | 41 | #include <sys/param.h> |
42 | #include <sys/systm.h> | | 42 | #include <sys/systm.h> |
43 | #include <sys/proc.h> | | 43 | #include <sys/proc.h> |
44 | #include <uvm/uvm_extern.h> | | 44 | #include <uvm/uvm_extern.h> |
45 | #include <sys/user.h> | | 45 | #include <sys/user.h> |
46 | #include <sys/ioctl.h> | | 46 | #include <sys/ioctl.h> |
47 | #include <sys/select.h> | | 47 | #include <sys/select.h> |
48 | #include <sys/tty.h> | | 48 | #include <sys/tty.h> |
49 | #include <sys/buf.h> | | 49 | #include <sys/buf.h> |
50 | #include <sys/msgbuf.h> | | 50 | #include <sys/msgbuf.h> |
51 | #include <sys/mbuf.h> | | 51 | #include <sys/mbuf.h> |
52 | #include <sys/protosw.h> | | 52 | #include <sys/protosw.h> |
53 | #include <sys/domain.h> | | 53 | #include <sys/domain.h> |
54 | #include <sys/dkbad.h> | | 54 | #include <sys/dkbad.h> |
55 | #include <sys/reboot.h> | | 55 | #include <sys/reboot.h> |
56 | #include <sys/exec.h> | | 56 | #include <sys/exec.h> |
57 | #include <machine/pte.h> | | 57 | #include <machine/pte.h> |
58 | #include <machine/cpu.h> | | 58 | #include <machine/cpu.h> |
59 | #include <amiga/amiga/cc.h> | | 59 | #include <amiga/amiga/cc.h> |
60 | #include <amiga/amiga/cia.h> | | 60 | #include <amiga/amiga/cia.h> |
61 | #include <amiga/amiga/custom.h> | | 61 | #include <amiga/amiga/custom.h> |
62 | #include <amiga/amiga/cfdev.h> | | 62 | #include <amiga/amiga/cfdev.h> |
63 | #include <amiga/amiga/drcustom.h> | | 63 | #include <amiga/amiga/drcustom.h> |
64 | #include <amiga/amiga/gayle.h> | | 64 | #include <amiga/amiga/gayle.h> |
65 | #include <amiga/amiga/memlist.h> | | 65 | #include <amiga/amiga/memlist.h> |
66 | #include <amiga/dev/zbusvar.h> | | 66 | #include <amiga/dev/zbusvar.h> |
67 | | | 67 | |
68 | #define RELOC(v, t) *((t*)((u_int)&(v) + loadbase)) | | 68 | #define RELOC(v, t) *((t*)((u_int)&(v) + loadbase)) |
69 | | | 69 | |
70 | extern u_int lowram; | | 70 | extern u_int lowram; |
71 | extern u_int Sysptsize, Umap, proc0paddr; | | 71 | extern u_int Sysptsize, Umap, proc0paddr; |
72 | extern pt_entry_t *Sysptmap; | | 72 | extern pt_entry_t *Sysptmap; |
73 | extern st_entry_t *Sysseg; | | 73 | extern st_entry_t *Sysseg; |
74 | extern u_int Sysseg_pa; | | 74 | extern u_int Sysseg_pa; |
75 | extern u_int virtual_avail; | | 75 | extern u_int virtual_avail; |
76 | #if defined(M68040) || defined(M68060) | | 76 | #if defined(M68040) || defined(M68060) |
77 | extern int protostfree; | | 77 | extern int protostfree; |
78 | #endif | | 78 | #endif |
79 | extern u_long boot_partition; | | 79 | extern u_long boot_partition; |
80 | vaddr_t amiga_uptbase; | | 80 | vaddr_t amiga_uptbase; |
81 | #ifdef P5PPC68KBOARD | | 81 | #ifdef P5PPC68KBOARD |
82 | extern int p5ppc; | | 82 | extern int p5ppc; |
83 | #endif | | 83 | #endif |
84 | | | 84 | |
85 | extern char *esym; | | 85 | extern char *esym; |
86 | | | 86 | |
87 | #ifdef GRF_AGA | | 87 | #ifdef GRF_AGA |
88 | extern u_long aga_enable; | | 88 | extern u_long aga_enable; |
89 | #endif | | 89 | #endif |
90 | | | 90 | |
91 | extern u_long noncontig_enable; | | 91 | extern u_long noncontig_enable; |
92 | | | 92 | |
93 | /* | | 93 | /* |
94 | * some addresses used in locore | | 94 | * some addresses used in locore |
95 | */ | | 95 | */ |
96 | vaddr_t INTREQRaddr; | | 96 | vaddr_t INTREQRaddr; |
97 | vaddr_t INTREQWaddr; | | 97 | vaddr_t INTREQWaddr; |
98 | | | 98 | |
99 | /* | | 99 | /* |
100 | * these are used by the extended spl?() macros. | | 100 | * these are used by the extended spl?() macros. |
101 | */ | | 101 | */ |
102 | volatile unsigned short *amiga_intena_read, *amiga_intena_write; | | 102 | volatile unsigned short *amiga_intena_read, *amiga_intena_write; |
103 | | | 103 | |
104 | vaddr_t CHIPMEMADDR; | | 104 | vaddr_t CHIPMEMADDR; |
105 | vaddr_t chipmem_start; | | 105 | vaddr_t chipmem_start; |
106 | vaddr_t chipmem_end; | | 106 | vaddr_t chipmem_end; |
107 | | | 107 | |
108 | vaddr_t z2mem_start; /* XXX */ | | 108 | vaddr_t z2mem_start; /* XXX */ |
109 | static vaddr_t z2mem_end; /* XXX */ | | 109 | static vaddr_t z2mem_end; /* XXX */ |
110 | int use_z2_mem = 1; /* XXX */ | | 110 | int use_z2_mem = 1; /* XXX */ |
111 | | | 111 | |
112 | u_long boot_fphystart, boot_fphysize, boot_cphysize; | | 112 | u_long boot_fphystart, boot_fphysize, boot_cphysize; |
113 | static u_int start_c_fphystart; | | 113 | static u_int start_c_fphystart; |
114 | static u_int start_c_pstart; | | 114 | static u_int start_c_pstart; |
115 | | | 115 | |
116 | static u_long boot_flags; | | 116 | static u_long boot_flags; |
117 | | | 117 | |
118 | struct boot_memlist *memlist; | | 118 | struct boot_memlist *memlist; |
119 | | | 119 | |
120 | struct cfdev *cfdev; | | 120 | struct cfdev *cfdev; |
121 | int ncfdev; | | 121 | int ncfdev; |
122 | | | 122 | |
123 | u_long scsi_nosync; | | 123 | u_long scsi_nosync; |
124 | int shift_nosync; | | 124 | int shift_nosync; |
125 | | | 125 | |
126 | void start_c(int, u_int, u_int, u_int, char *, u_int, u_long, u_long, u_int); | | 126 | void start_c(int, u_int, u_int, u_int, char *, u_int, u_long, u_long, u_int); |
127 | void rollcolor(int); | | 127 | void rollcolor(int); |
128 | #ifdef DEVRELOAD | | 128 | #ifdef DEVRELOAD |
129 | static int kernel_image_magic_size(void); | | 129 | static int kernel_image_magic_size(void); |
130 | static void kernel_image_magic_copy(u_char *); | | 130 | static void kernel_image_magic_copy(u_char *); |
131 | int kernel_reload_write(struct uio *); | | 131 | int kernel_reload_write(struct uio *); |
132 | extern void kernel_reload(char *, u_long, u_long, u_long, u_long, | | 132 | extern void kernel_reload(char *, u_long, u_long, u_long, u_long, |
133 | u_long, u_long, u_long, u_long, u_long, u_long); | | 133 | u_long, u_long, u_long, u_long, u_long, u_long); |
134 | #endif | | 134 | #endif |
135 | extern void etext(void); | | 135 | extern void etext(void); |
136 | void start_c_finish(void); | | 136 | void start_c_finish(void); |
137 | | | 137 | |
138 | void * | | 138 | void * |
139 | chipmem_steal(long amount) | | 139 | chipmem_steal(long amount) |
140 | { | | 140 | { |
141 | /* | | 141 | /* |
142 | * steal from top of chipmem, so we don't collide with | | 142 | * steal from top of chipmem, so we don't collide with |
143 | * the kernel loaded into chipmem in the not-yet-mapped state. | | 143 | * the kernel loaded into chipmem in the not-yet-mapped state. |
144 | */ | | 144 | */ |
145 | vaddr_t p = chipmem_end - amount; | | 145 | vaddr_t p = chipmem_end - amount; |
146 | if (p & 1) | | 146 | if (p & 1) |
147 | p = p - 1; | | 147 | p = p - 1; |
148 | chipmem_end = p; | | 148 | chipmem_end = p; |
149 | if(chipmem_start > chipmem_end) | | 149 | if(chipmem_start > chipmem_end) |
150 | panic("not enough chip memory"); | | 150 | panic("not enough chip memory"); |
151 | return((void *)p); | | 151 | return((void *)p); |
152 | } | | 152 | } |
153 | | | 153 | |
154 | /* | | 154 | /* |
155 | * XXX | | 155 | * XXX |
156 | * used by certain drivers currently to allocate zorro II memory | | 156 | * used by certain drivers currently to allocate zorro II memory |
157 | * for bounce buffers, if use_z2_mem is NULL, chipmem will be | | 157 | * for bounce buffers, if use_z2_mem is NULL, chipmem will be |
158 | * returned instead. | | 158 | * returned instead. |
159 | * XXX | | 159 | * XXX |
160 | */ | | 160 | */ |
161 | void * | | 161 | void * |
162 | alloc_z2mem(amount) | | 162 | alloc_z2mem(amount) |
163 | long amount; | | 163 | long amount; |
164 | { | | 164 | { |
165 | if (use_z2_mem && z2mem_end && (z2mem_end - amount) >= z2mem_start) { | | 165 | if (use_z2_mem && z2mem_end && (z2mem_end - amount) >= z2mem_start) { |
166 | z2mem_end -= amount; | | 166 | z2mem_end -= amount; |
167 | return ((void *)z2mem_end); | | 167 | return ((void *)z2mem_end); |
168 | } | | 168 | } |
169 | return (alloc_chipmem(amount)); | | 169 | return (alloc_chipmem(amount)); |
170 | } | | 170 | } |
171 | | | 171 | |
172 | | | 172 | |
173 | /* | | 173 | /* |
174 | * this is the C-level entry function, it's called from locore.s. | | 174 | * this is the C-level entry function, it's called from locore.s. |
175 | * Preconditions: | | 175 | * Preconditions: |
176 | * Interrupts are disabled | | 176 | * Interrupts are disabled |
177 | * PA may not be == VA, so we may have to relocate addresses | | 177 | * PA may not be == VA, so we may have to relocate addresses |
178 | * before enabling the MMU | | 178 | * before enabling the MMU |
179 | * Exec is no longer available (because we're loaded all over | | 179 | * Exec is no longer available (because we're loaded all over |
180 | * low memory, no ExecBase is available anymore) | | 180 | * low memory, no ExecBase is available anymore) |
181 | * | | 181 | * |
182 | * It's purpose is: | | 182 | * It's purpose is: |
183 | * Do the things that are done in locore.s in the hp300 version, | | 183 | * Do the things that are done in locore.s in the hp300 version, |
184 | * this includes allocation of kernel maps and enabling the MMU. | | 184 | * this includes allocation of kernel maps and enabling the MMU. |
185 | * | | 185 | * |
186 | * Some of the code in here is `stolen' from Amiga MACH, and was | | 186 | * Some of the code in here is `stolen' from Amiga MACH, and was |
187 | * written by Bryan Ford and Niklas Hallqvist. | | 187 | * written by Bryan Ford and Niklas Hallqvist. |
188 | * | | 188 | * |
189 | * Very crude 68040 support by Michael L. Hitch. | | 189 | * Very crude 68040 support by Michael L. Hitch. |
190 | * | | 190 | * |
191 | */ | | 191 | */ |
192 | | | 192 | |
193 | int kernel_copyback = 1; | | 193 | int kernel_copyback = 1; |
194 | | | 194 | |
195 | __attribute__ ((no_instrument_function)) | | 195 | __attribute__ ((no_instrument_function)) |
196 | void | | 196 | void |
197 | start_c(id, fphystart, fphysize, cphysize, esym_addr, flags, inh_sync, | | 197 | start_c(id, fphystart, fphysize, cphysize, esym_addr, flags, inh_sync, |
198 | boot_part, loadbase) | | 198 | boot_part, loadbase) |
199 | int id; | | 199 | int id; |
200 | u_int fphystart, fphysize, cphysize; | | 200 | u_int fphystart, fphysize, cphysize; |
201 | char *esym_addr; | | 201 | char *esym_addr; |
202 | u_int flags; | | 202 | u_int flags; |
203 | u_long inh_sync; | | 203 | u_long inh_sync; |
204 | u_long boot_part; | | 204 | u_long boot_part; |
205 | u_int loadbase; | | 205 | u_int loadbase; |
206 | { | | 206 | { |
207 | extern char end[]; | | 207 | extern char end[]; |
208 | extern u_int protorp[2]; | | 208 | extern u_int protorp[2]; |
209 | struct cfdev *cd; | | 209 | struct cfdev *cd; |
210 | paddr_t pstart, pend; | | 210 | paddr_t pstart, pend; |
211 | vaddr_t vstart, vend; | | 211 | vaddr_t vstart, vend; |
212 | psize_t avail; | | 212 | psize_t avail; |
213 | paddr_t ptpa; | | 213 | paddr_t ptpa; |
214 | psize_t ptsize; | | 214 | psize_t ptsize; |
215 | u_int ptextra, kstsize; | | 215 | u_int ptextra, kstsize; |
216 | paddr_t Sysptmap_pa; | | 216 | paddr_t Sysptmap_pa; |
217 | register st_entry_t sg_proto, *sg, *esg; | | 217 | register st_entry_t sg_proto, *sg, *esg; |
218 | register pt_entry_t pg_proto, *pg, *epg; | | 218 | register pt_entry_t pg_proto, *pg, *epg; |
219 | vaddr_t end_loaded; | | 219 | vaddr_t end_loaded; |
220 | u_int ncd, i; | | 220 | u_int ncd, i; |
| | | 221 | #if defined(M68040) || defined(M68060) |
| | | 222 | u_int nl1desc, nl2desc; |
| | | 223 | #endif |
221 | vaddr_t kva; | | 224 | vaddr_t kva; |
222 | struct boot_memlist *ml; | | 225 | struct boot_memlist *ml; |
223 | | | 226 | |
224 | #ifdef DEBUG_KERNEL_START | | 227 | #ifdef DEBUG_KERNEL_START |
225 | /* XXX this only is valid if Altais is in slot 0 */ | | 228 | /* XXX this only is valid if Altais is in slot 0 */ |
226 | volatile u_int8_t *altaiscolpt = (u_int8_t *)0x200003c8; | | 229 | volatile u_int8_t *altaiscolpt = (u_int8_t *)0x200003c8; |
227 | volatile u_int8_t *altaiscol = (u_int8_t *)0x200003c9; | | 230 | volatile u_int8_t *altaiscol = (u_int8_t *)0x200003c9; |
228 | #endif | | 231 | #endif |
229 | | | 232 | |
230 | #ifdef DEBUG_KERNEL_START | | 233 | #ifdef DEBUG_KERNEL_START |
231 | if ((id>>24)==0x7D) { | | 234 | if ((id>>24)==0x7D) { |
232 | *altaiscolpt = 0; | | 235 | *altaiscolpt = 0; |
233 | *altaiscol = 40; | | 236 | *altaiscol = 40; |
234 | *altaiscol = 0; | | 237 | *altaiscol = 0; |
235 | *altaiscol = 0; | | 238 | *altaiscol = 0; |
236 | } else | | 239 | } else |
237 | ((volatile struct Custom *)0xdff000)->color[0] = 0xa00; /* RED */ | | 240 | ((volatile struct Custom *)0xdff000)->color[0] = 0xa00; /* RED */ |
238 | #endif | | 241 | #endif |
239 | | | 242 | |
240 | #ifdef LIMITMEM | | 243 | #ifdef LIMITMEM |
241 | if (fphysize > LIMITMEM*1024*1024) | | 244 | if (fphysize > LIMITMEM*1024*1024) |
242 | fphysize = LIMITMEM*1024*1024; | | 245 | fphysize = LIMITMEM*1024*1024; |
243 | #endif | | 246 | #endif |
244 | | | 247 | |
245 | RELOC(boot_fphystart, u_long) = fphystart; | | 248 | RELOC(boot_fphystart, u_long) = fphystart; |
246 | RELOC(boot_fphysize, u_long) = fphysize; | | 249 | RELOC(boot_fphysize, u_long) = fphysize; |
247 | RELOC(boot_cphysize, u_long) = cphysize; | | 250 | RELOC(boot_cphysize, u_long) = cphysize; |
248 | | | 251 | |
249 | RELOC(machineid, int) = id; | | 252 | RELOC(machineid, int) = id; |
250 | RELOC(chipmem_end, vaddr_t) = cphysize; | | 253 | RELOC(chipmem_end, vaddr_t) = cphysize; |
251 | RELOC(esym, char *) = esym_addr; | | 254 | RELOC(esym, char *) = esym_addr; |
252 | RELOC(boot_flags, u_long) = flags; | | 255 | RELOC(boot_flags, u_long) = flags; |
253 | RELOC(boot_partition, u_long) = boot_part; | | 256 | RELOC(boot_partition, u_long) = boot_part; |
254 | #ifdef GRF_AGA | | 257 | #ifdef GRF_AGA |
255 | if (flags & 1) | | 258 | if (flags & 1) |
256 | RELOC(aga_enable, u_long) |= 1; | | 259 | RELOC(aga_enable, u_long) |= 1; |
257 | #endif | | 260 | #endif |
258 | if (flags & (3 << 1)) | | 261 | if (flags & (3 << 1)) |
259 | RELOC(noncontig_enable, u_long) = (flags >> 1) & 3; | | 262 | RELOC(noncontig_enable, u_long) = (flags >> 1) & 3; |
260 | | | 263 | |
261 | RELOC(scsi_nosync, u_long) = inh_sync; | | 264 | RELOC(scsi_nosync, u_long) = inh_sync; |
262 | | | 265 | |
263 | /* | | 266 | /* |
264 | * the kernel ends at end(), plus the cfdev and memlist structures | | 267 | * the kernel ends at end(), plus the cfdev and memlist structures |
265 | * we placed there in the loader. Correct for this now. Also, | | 268 | * we placed there in the loader. Correct for this now. Also, |
266 | * account for kernel symbols if they are present. | | 269 | * account for kernel symbols if they are present. |
267 | */ | | 270 | */ |
268 | if (esym_addr == NULL) | | 271 | if (esym_addr == NULL) |
269 | end_loaded = (vaddr_t)&end; | | 272 | end_loaded = (vaddr_t)&end; |
270 | else | | 273 | else |
271 | end_loaded = (vaddr_t)esym_addr; | | 274 | end_loaded = (vaddr_t)esym_addr; |
272 | RELOC(ncfdev, int) = *(int *)(&RELOC(*(u_int *)end_loaded, u_int)); | | 275 | RELOC(ncfdev, int) = *(int *)(&RELOC(*(u_int *)end_loaded, u_int)); |
273 | RELOC(cfdev, struct cfdev *) = (struct cfdev *) ((int)end_loaded + 4); | | 276 | RELOC(cfdev, struct cfdev *) = (struct cfdev *) ((int)end_loaded + 4); |
274 | end_loaded += 4 + RELOC(ncfdev, int) * sizeof(struct cfdev); | | 277 | end_loaded += 4 + RELOC(ncfdev, int) * sizeof(struct cfdev); |
275 | | | 278 | |
276 | RELOC(memlist, struct boot_memlist *) = | | 279 | RELOC(memlist, struct boot_memlist *) = |
277 | (struct boot_memlist *)end_loaded; | | 280 | (struct boot_memlist *)end_loaded; |
278 | ml = &RELOC(*(struct boot_memlist *)end_loaded, struct boot_memlist); | | 281 | ml = &RELOC(*(struct boot_memlist *)end_loaded, struct boot_memlist); |
279 | end_loaded = (vaddr_t)&((RELOC(memlist, struct boot_memlist *))-> | | 282 | end_loaded = (vaddr_t)&((RELOC(memlist, struct boot_memlist *))-> |
280 | m_seg[ml->m_nseg]); | | 283 | m_seg[ml->m_nseg]); |
281 | | | 284 | |
282 | /* | | 285 | /* |
283 | * Get ZorroII (16-bit) memory if there is any and it's not where the | | 286 | * Get ZorroII (16-bit) memory if there is any and it's not where the |
284 | * kernel is loaded. | | 287 | * kernel is loaded. |
285 | */ | | 288 | */ |
286 | if (ml->m_nseg > 0 && ml->m_nseg < 16 && RELOC(use_z2_mem, int)) { | | 289 | if (ml->m_nseg > 0 && ml->m_nseg < 16 && RELOC(use_z2_mem, int)) { |
287 | struct boot_memseg *sp, *esp; | | 290 | struct boot_memseg *sp, *esp; |
288 | | | 291 | |
289 | sp = ml->m_seg; | | 292 | sp = ml->m_seg; |
290 | esp = sp + ml->m_nseg; | | 293 | esp = sp + ml->m_nseg; |
291 | for (; sp < esp; sp++) { | | 294 | for (; sp < esp; sp++) { |
292 | if ((sp->ms_attrib & (MEMF_FAST | MEMF_24BITDMA)) | | 295 | if ((sp->ms_attrib & (MEMF_FAST | MEMF_24BITDMA)) |
293 | != (MEMF_FAST|MEMF_24BITDMA)) | | 296 | != (MEMF_FAST|MEMF_24BITDMA)) |
294 | continue; | | 297 | continue; |
295 | if (sp->ms_start == fphystart) | | 298 | if (sp->ms_start == fphystart) |
296 | continue; | | 299 | continue; |
297 | RELOC(z2mem_end, paddr_t) = | | 300 | RELOC(z2mem_end, paddr_t) = |
298 | sp->ms_start + sp->ms_size; | | 301 | sp->ms_start + sp->ms_size; |
299 | RELOC(z2mem_start, paddr_t) = | | 302 | RELOC(z2mem_start, paddr_t) = |
300 | RELOC(z2mem_end, paddr_t) - MAXPHYS * | | 303 | RELOC(z2mem_end, paddr_t) - MAXPHYS * |
301 | RELOC(use_z2_mem, int) * 7; | | 304 | RELOC(use_z2_mem, int) * 7; |
302 | RELOC(NZTWOMEMPG, u_int) = | | 305 | RELOC(NZTWOMEMPG, u_int) = |
303 | (RELOC(z2mem_end, paddr_t) - | | 306 | (RELOC(z2mem_end, paddr_t) - |
304 | RELOC(z2mem_start, paddr_t)) / PAGE_SIZE; | | 307 | RELOC(z2mem_start, paddr_t)) / PAGE_SIZE; |
305 | if ((RELOC(z2mem_end, paddr_t) - | | 308 | if ((RELOC(z2mem_end, paddr_t) - |
306 | RELOC(z2mem_start, paddr_t)) > sp->ms_size) { | | 309 | RELOC(z2mem_start, paddr_t)) > sp->ms_size) { |
307 | RELOC(NZTWOMEMPG, u_int) = sp->ms_size / | | 310 | RELOC(NZTWOMEMPG, u_int) = sp->ms_size / |
308 | PAGE_SIZE; | | 311 | PAGE_SIZE; |
309 | RELOC(z2mem_start, paddr_t) = | | 312 | RELOC(z2mem_start, paddr_t) = |
310 | RELOC(z2mem_end, paddr_t) - sp->ms_size; | | 313 | RELOC(z2mem_end, paddr_t) - sp->ms_size; |
311 | } | | 314 | } |
312 | break; | | 315 | break; |
313 | } | | 316 | } |
314 | } | | 317 | } |
315 | | | 318 | |
316 | /* | | 319 | /* |
317 | * Scan ConfigDev list and get size of Zorro I/O boards that are | | 320 | * Scan ConfigDev list and get size of Zorro I/O boards that are |
318 | * outside the Zorro II I/O area. | | 321 | * outside the Zorro II I/O area. |
319 | */ | | 322 | */ |
320 | for (RELOC(ZBUSAVAIL, u_int) = 0, cd = | | 323 | for (RELOC(ZBUSAVAIL, u_int) = 0, cd = |
321 | &RELOC(*RELOC(cfdev, struct cfdev *),struct cfdev), | | 324 | &RELOC(*RELOC(cfdev, struct cfdev *),struct cfdev), |
322 | ncd = RELOC(ncfdev, int); ncd > 0; ncd--, cd++) { | | 325 | ncd = RELOC(ncfdev, int); ncd > 0; ncd--, cd++) { |
323 | int bd_type = cd->rom.type & (ERT_TYPEMASK | ERTF_MEMLIST); | | 326 | int bd_type = cd->rom.type & (ERT_TYPEMASK | ERTF_MEMLIST); |
324 | | | 327 | |
325 | if (bd_type != ERT_ZORROIII && | | 328 | if (bd_type != ERT_ZORROIII && |
326 | (bd_type != ERT_ZORROII || isztwopa(cd->addr))) | | 329 | (bd_type != ERT_ZORROII || isztwopa(cd->addr))) |
327 | continue; /* It's not Z2 or Z3 I/O board */ | | 330 | continue; /* It's not Z2 or Z3 I/O board */ |
328 | /* | | 331 | /* |
329 | * Hack to adjust board size for Zorro III boards that | | 332 | * Hack to adjust board size for Zorro III boards that |
330 | * do not specify an extended size or subsize. This is | | 333 | * do not specify an extended size or subsize. This is |
331 | * specifically for the GVP Spectrum and hopefully won't | | 334 | * specifically for the GVP Spectrum and hopefully won't |
332 | * break with other boards that configure like this. | | 335 | * break with other boards that configure like this. |
333 | */ | | 336 | */ |
334 | if (bd_type == ERT_ZORROIII && | | 337 | if (bd_type == ERT_ZORROIII && |
335 | !(cd->rom.flags & ERFF_EXTENDED) && | | 338 | !(cd->rom.flags & ERFF_EXTENDED) && |
336 | (cd->rom.flags & ERT_Z3_SSMASK) == 0) | | 339 | (cd->rom.flags & ERT_Z3_SSMASK) == 0) |
337 | cd->size = 0x10000 << | | 340 | cd->size = 0x10000 << |
338 | ((cd->rom.type - 1) & ERT_MEMMASK); | | 341 | ((cd->rom.type - 1) & ERT_MEMMASK); |
339 | RELOC(ZBUSAVAIL, u_int) += m68k_round_page(cd->size); | | 342 | RELOC(ZBUSAVAIL, u_int) += m68k_round_page(cd->size); |
340 | } | | 343 | } |
341 | | | 344 | |
342 | /* | | 345 | /* |
343 | * assume KVA_MIN == 0. We subtract the kernel code (and | | 346 | * assume KVA_MIN == 0. We subtract the kernel code (and |
344 | * the configdev's and memlists) from the virtual and | | 347 | * the configdev's and memlists) from the virtual and |
345 | * phsical starts and ends. | | 348 | * phsical starts and ends. |
346 | */ | | 349 | */ |
347 | vend = fphysize; | | 350 | vend = fphysize; |
348 | avail = vend; | | 351 | avail = vend; |
349 | vstart = end_loaded; | | 352 | vstart = end_loaded; |
350 | vstart = m68k_round_page(vstart); | | 353 | vstart = m68k_round_page(vstart); |
351 | pstart = (paddr_t)vstart + fphystart; | | 354 | pstart = (paddr_t)vstart + fphystart; |
352 | pend = vend + fphystart; | | 355 | pend = vend + fphystart; |
353 | avail -= vstart; | | 356 | avail -= vstart; |
354 | | | 357 | |
355 | /* | | 358 | /* |
356 | * save KVA of proc0 u-area and allocate it. | | 359 | * save KVA of proc0 u-area and allocate it. |
357 | */ | | 360 | */ |
358 | RELOC(proc0paddr, u_int) = vstart; | | 361 | RELOC(proc0paddr, u_int) = vstart; |
359 | pstart += USPACE; | | 362 | pstart += USPACE; |
360 | vstart += USPACE; | | 363 | vstart += USPACE; |
361 | avail -= USPACE; | | 364 | avail -= USPACE; |
362 | | | 365 | |
363 | #if defined(M68040) || defined(M68060) | | 366 | #if defined(M68040) || defined(M68060) |
364 | if (RELOC(mmutype, int) == MMU_68040) | | 367 | if (RELOC(mmutype, int) == MMU_68040) |
365 | kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); | | 368 | kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); |
366 | else | | 369 | else |
367 | #endif | | 370 | #endif |
368 | kstsize = 1; | | 371 | kstsize = 1; |
369 | | | 372 | |
370 | /* | | 373 | /* |
371 | * allocate the kernel segment table | | 374 | * allocate the kernel segment table |
372 | */ | | 375 | */ |
373 | RELOC(Sysseg_pa, u_int) = pstart; | | 376 | RELOC(Sysseg_pa, u_int) = pstart; |
374 | RELOC(Sysseg, u_int) = vstart; | | 377 | RELOC(Sysseg, u_int) = vstart; |
375 | vstart += PAGE_SIZE * kstsize; | | 378 | vstart += PAGE_SIZE * kstsize; |
376 | pstart += PAGE_SIZE * kstsize; | | 379 | pstart += PAGE_SIZE * kstsize; |
377 | avail -= PAGE_SIZE * kstsize; | | 380 | avail -= PAGE_SIZE * kstsize; |
378 | | | 381 | |
379 | /* | | 382 | /* |
380 | * allocate kernel page table map | | 383 | * allocate kernel page table map |
381 | */ | | 384 | */ |
382 | RELOC(Sysptmap, u_int) = vstart; | | 385 | RELOC(Sysptmap, u_int) = vstart; |
383 | Sysptmap_pa = pstart; | | 386 | Sysptmap_pa = pstart; |
384 | vstart += PAGE_SIZE; | | 387 | vstart += PAGE_SIZE; |
385 | pstart += PAGE_SIZE; | | 388 | pstart += PAGE_SIZE; |
386 | avail -= PAGE_SIZE; | | 389 | avail -= PAGE_SIZE; |
387 | | | 390 | |
388 | /* | | 391 | /* |
389 | * allocate initial page table pages | | 392 | * allocate initial page table pages |
390 | */ | | 393 | */ |
391 | ptpa = pstart; | | 394 | ptpa = pstart; |
392 | #ifdef DRACO | | 395 | #ifdef DRACO |
393 | if ((id>>24)==0x7D) { | | 396 | if ((id>>24)==0x7D) { |
394 | ptextra = NDRCCPG | | 397 | ptextra = NDRCCPG |
395 | + RELOC(NZTWOMEMPG, u_int) | | 398 | + RELOC(NZTWOMEMPG, u_int) |
396 | + btoc(RELOC(ZBUSAVAIL, u_int)); | | 399 | + btoc(RELOC(ZBUSAVAIL, u_int)); |
397 | } else | | 400 | } else |
398 | #endif | | 401 | #endif |
399 | ptextra = NCHIPMEMPG + NCIAPG + NZTWOROMPG + RELOC(NZTWOMEMPG, u_int) + | | 402 | ptextra = NCHIPMEMPG + NCIAPG + NZTWOROMPG + RELOC(NZTWOMEMPG, u_int) + |
400 | btoc(RELOC(ZBUSAVAIL, u_int)) + NPCMCIAPG; | | 403 | btoc(RELOC(ZBUSAVAIL, u_int)) + NPCMCIAPG; |
401 | | | 404 | |
402 | ptsize = (RELOC(Sysptsize, u_int) + | | 405 | ptsize = (RELOC(Sysptsize, u_int) + |
403 | howmany(ptextra, NPTEPG)) << PGSHIFT; | | 406 | howmany(ptextra, NPTEPG)) << PGSHIFT; |
404 | | | 407 | |
405 | vstart += ptsize; | | 408 | vstart += ptsize; |
406 | pstart += ptsize; | | 409 | pstart += ptsize; |
407 | avail -= ptsize; | | 410 | avail -= ptsize; |
408 | | | 411 | |
409 | /* | | 412 | /* |
410 | * Sysmap is now placed at the end of Supervisor virtual address space. | | 413 | * Sysmap is now placed at the end of Supervisor virtual address space. |
411 | */ | | 414 | */ |
412 | RELOC(Sysmap, u_int *) = (u_int *)-(NPTEPG * PAGE_SIZE); | | 415 | RELOC(Sysmap, u_int *) = (u_int *)-(NPTEPG * PAGE_SIZE); |
413 | | | 416 | |
414 | /* | | 417 | /* |
415 | * initialize segment table and page table map | | 418 | * initialize segment table and page table map |
416 | */ | | 419 | */ |
417 | #if defined(M68040) || defined(M68060) | | 420 | #if defined(M68040) || defined(M68060) |
418 | if (RELOC(mmutype, int) == MMU_68040) { | | 421 | if (RELOC(mmutype, int) == MMU_68040) { |
419 | /* | | 422 | /* |
420 | * First invalidate the entire "segment table" pages | | 423 | * First invalidate the entire "segment table" pages |
421 | * (levels 1 and 2 have the same "invalid" values). | | 424 | * (levels 1 and 2 have the same "invalid" values). |
422 | */ | | 425 | */ |
423 | sg = (st_entry_t *)RELOC(Sysseg_pa, u_int); | | 426 | sg = (st_entry_t *)RELOC(Sysseg_pa, u_int); |
424 | esg = &sg[kstsize * NPTEPG]; | | 427 | esg = &sg[kstsize * NPTEPG]; |
425 | while (sg < esg) | | 428 | while (sg < esg) |
426 | *sg++ = SG_NV; | | 429 | *sg++ = SG_NV; |
427 | /* | | 430 | /* |
428 | * Initialize level 2 descriptors (which immediately | | 431 | * Initialize level 2 descriptors (which immediately |
429 | * follow the level 1 table). We need: | | 432 | * follow the level 1 table). We need: |
430 | * NPTEPG / SG4_LEV3SIZE | | 433 | * NPTEPG / SG4_LEV3SIZE |
431 | * level 2 descriptors to map each of the nptpages | | 434 | * level 2 descriptors to map each of the nptpages |
432 | * pages of PTEs. Note that we set the "used" bit | | 435 | * pages of PTEs. Note that we set the "used" bit |
433 | * now to save the HW the expense of doing it. | | 436 | * now to save the HW the expense of doing it. |
434 | */ | | 437 | */ |
435 | i = (ptsize >> PGSHIFT) * (NPTEPG / SG4_LEV3SIZE); | | 438 | nl2desc = (ptsize >> PGSHIFT) * (NPTEPG / SG4_LEV3SIZE); |
436 | sg = &((st_entry_t *)(RELOC(Sysseg_pa, u_int)))[SG4_LEV1SIZE]; | | 439 | sg = (st_entry_t *)RELOC(Sysseg_pa, u_int); |
437 | esg = &sg[i]; | | 440 | sg = &sg[SG4_LEV1SIZE]; |
| | | 441 | esg = &sg[nl2desc]; |
438 | sg_proto = ptpa | SG_U | SG_RW | SG_V; | | 442 | sg_proto = ptpa | SG_U | SG_RW | SG_V; |
439 | while (sg < esg) { | | 443 | while (sg < esg) { |
440 | *sg++ = sg_proto; | | 444 | *sg++ = sg_proto; |
441 | sg_proto += (SG4_LEV3SIZE * sizeof (st_entry_t)); | | 445 | sg_proto += (SG4_LEV3SIZE * sizeof (st_entry_t)); |
442 | } | | 446 | } |
443 | | | 447 | |
444 | /* | | 448 | /* |
445 | * Initialize level 1 descriptors. We need: | | 449 | * Initialize level 1 descriptors. We need: |
446 | * roundup(num, SG4_LEV2SIZE) / SG4_LEVEL2SIZE | | 450 | * roundup(nl2desc, SG4_LEV2SIZE) / SG4_LEVEL2SIZE |
447 | * level 1 descriptors to map the 'num' level 2's. | | 451 | * level 1 descriptors to map the 'nl2desc' level 2's. |
448 | */ | | 452 | */ |
449 | i = roundup(i, SG4_LEV2SIZE) / SG4_LEV2SIZE; | | 453 | nl1desc = roundup(nl2desc, SG4_LEV2SIZE) / SG4_LEV2SIZE; |
450 | /* Include additional level 2 table for Sysmap in protostfree */ | | | |
451 | RELOC(protostfree, u_int) = | | | |
452 | (-1 << (i + 2)) /* & ~(-1 << MAXKL2SIZE) */; | | | |
453 | sg = (st_entry_t *)RELOC(Sysseg_pa, u_int); | | 454 | sg = (st_entry_t *)RELOC(Sysseg_pa, u_int); |
454 | esg = &sg[i]; | | 455 | esg = &sg[nl1desc]; |
455 | sg_proto = (paddr_t)&sg[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; | | 456 | sg_proto = (paddr_t)&sg[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; |
456 | while (sg < esg) { | | 457 | while (sg < esg) { |
457 | *sg++ = sg_proto; | | 458 | *sg++ = sg_proto; |
458 | sg_proto += (SG4_LEV2SIZE * sizeof(st_entry_t)); | | 459 | sg_proto += (SG4_LEV2SIZE * sizeof(st_entry_t)); |
459 | } | | 460 | } |
460 | | | 461 | |
461 | /* Sysmap is last entry in level 1 */ | | 462 | /* Sysmap is last entry in level 1 */ |
462 | sg = (st_entry_t *)RELOC(Sysseg_pa, u_int); | | 463 | sg = (st_entry_t *)RELOC(Sysseg_pa, u_int); |
463 | sg = &sg[SG4_LEV1SIZE - 1]; | | 464 | sg = &sg[SG4_LEV1SIZE - 1]; |
464 | *sg = sg_proto; | | 465 | *sg = sg_proto; |
465 | | | 466 | |
466 | /* | | 467 | /* |
467 | * Kernel segment table at end of next level 2 table | | 468 | * Kernel segment table at end of next level 2 table |
468 | */ | | 469 | */ |
469 | /* XXX fix calculations XXX */ | | 470 | i = SG4_LEV1SIZE + (nl1desc * SG4_LEV2SIZE); |
470 | i = ((((ptsize >> PGSHIFT) + 3) & -2) - 1) * (NPTEPG / SG4_LEV3SIZE); | | 471 | sg = (st_entry_t *)RELOC(Sysseg_pa, u_int); |
471 | sg = &((st_entry_t *)(RELOC(Sysseg_pa, u_int)))[SG4_LEV1SIZE + i]; | | 472 | sg = &sg[i + SG4_LEV2SIZE - (NPTEPG / SG4_LEV3SIZE)]; |
472 | esg = &sg[NPTEPG / SG4_LEV3SIZE]; | | 473 | esg = &sg[NPTEPG / SG4_LEV3SIZE]; |
473 | sg_proto = Sysptmap_pa | SG_U | SG_RW | SG_V; | | 474 | sg_proto = Sysptmap_pa | SG_U | SG_RW | SG_V; |
474 | while (sg < esg) { | | 475 | while (sg < esg) { |
475 | *sg++ = sg_proto; | | 476 | *sg++ = sg_proto; |
476 | sg_proto += (SG4_LEV3SIZE * sizeof (st_entry_t)); | | 477 | sg_proto += (SG4_LEV3SIZE * sizeof (st_entry_t)); |
477 | } | | 478 | } |
478 | | | 479 | |
| | | 480 | /* Include additional level 2 table for Sysmap in protostfree */ |
| | | 481 | RELOC(protostfree, u_int) = |
| | | 482 | (~0 << (1 + nl1desc + 1)) /* & ~(~0 << MAXKL2SIZE) */; |
| | | 483 | |
479 | /* | | 484 | /* |
480 | * Initialize Sysptmap | | 485 | * Initialize Sysptmap |
481 | */ | | 486 | */ |
482 | pg = (pt_entry_t *)Sysptmap_pa; | | 487 | pg = (pt_entry_t *)Sysptmap_pa; |
483 | epg = &pg[ptsize >> PGSHIFT]; | | 488 | epg = &pg[ptsize >> PGSHIFT]; |
484 | pg_proto = ptpa | PG_RW | PG_CI | PG_V; | | 489 | pg_proto = ptpa | PG_RW | PG_CI | PG_V; |
485 | while (pg < epg) { | | 490 | while (pg < epg) { |
486 | *pg++ = pg_proto; | | 491 | *pg++ = pg_proto; |
487 | pg_proto += PAGE_SIZE; | | 492 | pg_proto += PAGE_SIZE; |
488 | } | | 493 | } |
489 | /* | | 494 | /* |
490 | * Invalidate rest of Sysptmap page | | 495 | * Invalidate rest of Sysptmap page |
491 | */ | | 496 | */ |
492 | epg = (pt_entry_t *)(Sysptmap_pa + PAGE_SIZE - sizeof(st_entry_t)); | | 497 | epg = (pt_entry_t *)(Sysptmap_pa + PAGE_SIZE - sizeof(st_entry_t)); |
493 | while (pg < epg) | | 498 | while (pg < epg) |
494 | *pg++ = SG_NV; | | 499 | *pg++ = SG_NV; |
495 | pg = (pt_entry_t *)Sysptmap_pa; | | 500 | pg = (pt_entry_t *)Sysptmap_pa; |
496 | pg = &pg[256 - 1]; /* XXX */ | | 501 | pg = &pg[256 - 1]; /* XXX */ |
497 | *pg = Sysptmap_pa | PG_RW | PG_CI | PG_V; | | 502 | *pg = Sysptmap_pa | PG_RW | PG_CI | PG_V; |
498 | } else | | 503 | } else |
499 | #endif /* M68040 */ | | 504 | #endif /* M68040 */ |
500 | { | | 505 | { |
501 | /* | | 506 | /* |
502 | * Map the page table pages in both the HW segment table | | 507 | * Map the page table pages in both the HW segment table |
503 | * and the software Sysptmap. | | 508 | * and the software Sysptmap. |
504 | */ | | 509 | */ |
505 | sg = (st_entry_t *)RELOC(Sysseg_pa, u_int); | | 510 | sg = (st_entry_t *)RELOC(Sysseg_pa, u_int); |
506 | pg = (pt_entry_t *)Sysptmap_pa; | | 511 | pg = (pt_entry_t *)Sysptmap_pa; |
507 | epg = &pg[ptsize >> PGSHIFT]; | | 512 | epg = &pg[ptsize >> PGSHIFT]; |
508 | sg_proto = ptpa | SG_RW | SG_V; | | 513 | sg_proto = ptpa | SG_RW | SG_V; |
509 | pg_proto = ptpa | PG_RW | PG_CI | PG_V; | | 514 | pg_proto = ptpa | PG_RW | PG_CI | PG_V; |
510 | while (pg < epg) { | | 515 | while (pg < epg) { |
511 | *sg++ = sg_proto; | | 516 | *sg++ = sg_proto; |
512 | *pg++ = pg_proto; | | 517 | *pg++ = pg_proto; |
513 | sg_proto += PAGE_SIZE; | | 518 | sg_proto += PAGE_SIZE; |
514 | pg_proto += PAGE_SIZE; | | 519 | pg_proto += PAGE_SIZE; |
515 | } | | 520 | } |
516 | /* | | 521 | /* |
517 | * invalidate the remainder of each table | | 522 | * invalidate the remainder of each table |
518 | */ | | 523 | */ |
519 | /* XXX PAGE_SIZE dependent constant: 256 or 1024 */ | | 524 | /* XXX PAGE_SIZE dependent constant: 256 or 1024 */ |
520 | epg = (pt_entry_t *)(Sysptmap_pa + (256 - 1) * sizeof(st_entry_t)); | | 525 | epg = (pt_entry_t *)(Sysptmap_pa + (256 - 1) * sizeof(st_entry_t)); |
521 | while (pg < epg) { | | 526 | while (pg < epg) { |
522 | *sg++ = SG_NV; | | 527 | *sg++ = SG_NV; |
523 | *pg++ = PG_NV; | | 528 | *pg++ = PG_NV; |
524 | } | | 529 | } |
525 | *sg = Sysptmap_pa | SG_RW | SG_V; | | 530 | *sg = Sysptmap_pa | SG_RW | SG_V; |
526 | *pg = Sysptmap_pa | PG_RW | PG_CI | PG_V; | | 531 | *pg = Sysptmap_pa | PG_RW | PG_CI | PG_V; |
527 | /* XXX zero out rest of page? */ | | 532 | /* XXX zero out rest of page? */ |
528 | } | | 533 | } |
529 | | | 534 | |
530 | /* | | 535 | /* |
531 | * initialize kernel page table page(s) (assume load at VA 0) | | 536 | * initialize kernel page table page(s) (assume load at VA 0) |
532 | */ | | 537 | */ |
533 | pg_proto = fphystart | PG_RO | PG_V; /* text pages are RO */ | | 538 | pg_proto = fphystart | PG_RO | PG_V; /* text pages are RO */ |
534 | pg = (pt_entry_t *)ptpa; | | 539 | pg = (pt_entry_t *)ptpa; |
535 | *pg++ = PG_NV; /* Make page 0 invalid */ | | 540 | *pg++ = PG_NV; /* Make page 0 invalid */ |
536 | pg_proto += PAGE_SIZE; | | 541 | pg_proto += PAGE_SIZE; |
537 | for (kva = PAGE_SIZE; kva < (vaddr_t)etext; | | 542 | for (kva = PAGE_SIZE; kva < (vaddr_t)etext; |
538 | kva += PAGE_SIZE, pg_proto += PAGE_SIZE) | | 543 | kva += PAGE_SIZE, pg_proto += PAGE_SIZE) |
539 | *pg++ = pg_proto; | | 544 | *pg++ = pg_proto; |
540 | | | 545 | |
541 | /* | | 546 | /* |
542 | * data, bss and dynamic tables are read/write | | 547 | * data, bss and dynamic tables are read/write |
543 | */ | | 548 | */ |
544 | pg_proto = (pg_proto & PG_FRAME) | PG_RW | PG_V; | | 549 | pg_proto = (pg_proto & PG_FRAME) | PG_RW | PG_V; |
545 | | | 550 | |
546 | #if defined(M68040) || defined(M68060) | | 551 | #if defined(M68040) || defined(M68060) |
547 | /* | | 552 | /* |
548 | * map the kernel segment table cache invalidated for | | 553 | * map the kernel segment table cache invalidated for |
549 | * these machines (for the 68040 not strictly necessary, but | | 554 | * these machines (for the 68040 not strictly necessary, but |
550 | * recommended by Motorola; for the 68060 mandatory) | | 555 | * recommended by Motorola; for the 68060 mandatory) |
551 | */ | | 556 | */ |
552 | if (RELOC(mmutype, int) == MMU_68040) { | | 557 | if (RELOC(mmutype, int) == MMU_68040) { |
553 | | | 558 | |
554 | if (RELOC(kernel_copyback, int)) | | 559 | if (RELOC(kernel_copyback, int)) |
555 | pg_proto |= PG_CCB; | | 560 | pg_proto |= PG_CCB; |
556 | | | 561 | |
557 | /* | | 562 | /* |
558 | * ASSUME: segment table and statically allocated page tables | | 563 | * ASSUME: segment table and statically allocated page tables |
559 | * of the kernel are contiguously allocated, start at | | 564 | * of the kernel are contiguously allocated, start at |
560 | * Sysseg and end at the current value of vstart. | | 565 | * Sysseg and end at the current value of vstart. |
561 | */ | | 566 | */ |
562 | for (; kva < RELOC(Sysseg, u_int); | | 567 | for (; kva < RELOC(Sysseg, u_int); |
563 | kva += PAGE_SIZE, pg_proto += PAGE_SIZE) | | 568 | kva += PAGE_SIZE, pg_proto += PAGE_SIZE) |
564 | *pg++ = pg_proto; | | 569 | *pg++ = pg_proto; |
565 | | | 570 | |
566 | pg_proto = (pg_proto & ~PG_CCB) | PG_CI; | | 571 | pg_proto = (pg_proto & ~PG_CCB) | PG_CI; |
567 | for (; kva < vstart; kva += PAGE_SIZE, pg_proto += PAGE_SIZE) | | 572 | for (; kva < vstart; kva += PAGE_SIZE, pg_proto += PAGE_SIZE) |
568 | *pg++ = pg_proto; | | 573 | *pg++ = pg_proto; |
569 | | | 574 | |
570 | pg_proto = (pg_proto & ~PG_CI); | | 575 | pg_proto = (pg_proto & ~PG_CI); |
571 | if (RELOC(kernel_copyback, int)) | | 576 | if (RELOC(kernel_copyback, int)) |
572 | pg_proto |= PG_CCB; | | 577 | pg_proto |= PG_CCB; |
573 | } | | 578 | } |
574 | #endif | | 579 | #endif |
575 | /* | | 580 | /* |
576 | * go till end of data allocated so far | | 581 | * go till end of data allocated so far |
577 | * plus proc0 u-area (to be allocated) | | 582 | * plus proc0 u-area (to be allocated) |
578 | */ | | 583 | */ |
579 | for (; kva < vstart; kva += PAGE_SIZE, pg_proto += PAGE_SIZE) | | 584 | for (; kva < vstart; kva += PAGE_SIZE, pg_proto += PAGE_SIZE) |
580 | *pg++ = pg_proto; | | 585 | *pg++ = pg_proto; |
581 | /* | | 586 | /* |
582 | * invalidate remainder of kernel PT | | 587 | * invalidate remainder of kernel PT |
583 | */ | | 588 | */ |
584 | while (pg < (pt_entry_t *) (ptpa + ptsize)) | | 589 | while (pg < (pt_entry_t *) (ptpa + ptsize)) |
585 | *pg++ = PG_NV; | | 590 | *pg++ = PG_NV; |
586 | | | 591 | |
587 | /* | | 592 | /* |
588 | * validate internal IO PTEs following current vstart | | 593 | * validate internal IO PTEs following current vstart |
589 | */ | | 594 | */ |
590 | pg = &((u_int *)ptpa)[vstart >> PGSHIFT]; | | 595 | pg = &((u_int *)ptpa)[vstart >> PGSHIFT]; |
591 | #ifdef DRACO | | 596 | #ifdef DRACO |
592 | if ((id >> 24) == 0x7D) { | | 597 | if ((id >> 24) == 0x7D) { |
593 | RELOC(DRCCADDR, u_int) = vstart; | | 598 | RELOC(DRCCADDR, u_int) = vstart; |
594 | RELOC(CIAADDR, vaddr_t) = | | 599 | RELOC(CIAADDR, vaddr_t) = |
595 | RELOC(DRCCADDR, u_int) + DRCIAPG * PAGE_SIZE; | | 600 | RELOC(DRCCADDR, u_int) + DRCIAPG * PAGE_SIZE; |
596 | if (RELOC(z2mem_end, vaddr_t) == 0) | | 601 | if (RELOC(z2mem_end, vaddr_t) == 0) |
597 | RELOC(ZBUSADDR, vaddr_t) = | | 602 | RELOC(ZBUSADDR, vaddr_t) = |
598 | RELOC(DRCCADDR, u_int) + NDRCCPG * PAGE_SIZE; | | 603 | RELOC(DRCCADDR, u_int) + NDRCCPG * PAGE_SIZE; |
599 | pg_proto = DRCCBASE | PG_RW | PG_CI | PG_V; | | 604 | pg_proto = DRCCBASE | PG_RW | PG_CI | PG_V; |
600 | while (pg_proto < DRZ2BASE) { | | 605 | while (pg_proto < DRZ2BASE) { |
601 | *pg++ = pg_proto; | | 606 | *pg++ = pg_proto; |
602 | pg_proto += DRCCSTRIDE; | | 607 | pg_proto += DRCCSTRIDE; |
603 | vstart += PAGE_SIZE; | | 608 | vstart += PAGE_SIZE; |
604 | } | | 609 | } |
605 | | | 610 | |
606 | /* NCR 53C710 chip */ | | 611 | /* NCR 53C710 chip */ |
607 | *pg++ = DRSCSIBASE | PG_RW | PG_CI | PG_V; | | 612 | *pg++ = DRSCSIBASE | PG_RW | PG_CI | PG_V; |
608 | vstart += PAGE_SIZE; | | 613 | vstart += PAGE_SIZE; |
609 | | | 614 | |
610 | #ifdef DEBUG_KERNEL_START | | 615 | #ifdef DEBUG_KERNEL_START |
611 | /* | | 616 | /* |
612 | * early rollcolor Altais mapping | | 617 | * early rollcolor Altais mapping |
613 | * XXX (only works if in slot 0) | | 618 | * XXX (only works if in slot 0) |
614 | */ | | 619 | */ |
615 | *pg++ = 0x20000000 | PG_RW | PG_CI | PG_V; | | 620 | *pg++ = 0x20000000 | PG_RW | PG_CI | PG_V; |
616 | vstart += PAGE_SIZE; | | 621 | vstart += PAGE_SIZE; |
617 | #endif | | 622 | #endif |
618 | } else | | 623 | } else |
619 | #endif | | 624 | #endif |
620 | { | | 625 | { |
621 | RELOC(CHIPMEMADDR, vaddr_t) = vstart; | | 626 | RELOC(CHIPMEMADDR, vaddr_t) = vstart; |
622 | pg_proto = CHIPMEMBASE | PG_RW | PG_CI | PG_V; | | 627 | pg_proto = CHIPMEMBASE | PG_RW | PG_CI | PG_V; |
623 | /* CI needed here?? */ | | 628 | /* CI needed here?? */ |
624 | while (pg_proto < CHIPMEMTOP) { | | 629 | while (pg_proto < CHIPMEMTOP) { |
625 | *pg++ = pg_proto; | | 630 | *pg++ = pg_proto; |
626 | pg_proto += PAGE_SIZE; | | 631 | pg_proto += PAGE_SIZE; |
627 | vstart += PAGE_SIZE; | | 632 | vstart += PAGE_SIZE; |
628 | } | | 633 | } |
629 | } | | 634 | } |
630 | if (RELOC(z2mem_end, paddr_t)) { /* XXX */ | | 635 | if (RELOC(z2mem_end, paddr_t)) { /* XXX */ |
631 | RELOC(ZTWOMEMADDR, vaddr_t) = vstart; | | 636 | RELOC(ZTWOMEMADDR, vaddr_t) = vstart; |
632 | RELOC(ZBUSADDR, vaddr_t) = RELOC(ZTWOMEMADDR, vaddr_t) + | | 637 | RELOC(ZBUSADDR, vaddr_t) = RELOC(ZTWOMEMADDR, vaddr_t) + |
633 | RELOC(NZTWOMEMPG, u_int) * PAGE_SIZE; | | 638 | RELOC(NZTWOMEMPG, u_int) * PAGE_SIZE; |
634 | pg_proto = RELOC(z2mem_start, paddr_t) | /* XXX */ | | 639 | pg_proto = RELOC(z2mem_start, paddr_t) | /* XXX */ |
635 | PG_RW | PG_V; /* XXX */ | | 640 | PG_RW | PG_V; /* XXX */ |
636 | while (pg_proto < RELOC(z2mem_end, paddr_t)) { /* XXX */ | | 641 | while (pg_proto < RELOC(z2mem_end, paddr_t)) { /* XXX */ |
637 | *pg++ = pg_proto; /* XXX */ | | 642 | *pg++ = pg_proto; /* XXX */ |
638 | pg_proto += PAGE_SIZE; /* XXX */ | | 643 | pg_proto += PAGE_SIZE; /* XXX */ |
639 | vstart += PAGE_SIZE; | | 644 | vstart += PAGE_SIZE; |
640 | } /* XXX */ | | 645 | } /* XXX */ |
641 | } /* XXX */ | | 646 | } /* XXX */ |
642 | #ifdef DRACO | | 647 | #ifdef DRACO |
643 | if ((id >> 24) != 0x7D) | | 648 | if ((id >> 24) != 0x7D) |
644 | #endif | | 649 | #endif |
645 | { | | 650 | { |
646 | RELOC(CIAADDR, vaddr_t) = vstart; | | 651 | RELOC(CIAADDR, vaddr_t) = vstart; |
647 | pg_proto = CIABASE | PG_RW | PG_CI | PG_V; | | 652 | pg_proto = CIABASE | PG_RW | PG_CI | PG_V; |
648 | while (pg_proto < CIATOP) { | | 653 | while (pg_proto < CIATOP) { |
649 | *pg++ = pg_proto; | | 654 | *pg++ = pg_proto; |
650 | pg_proto += PAGE_SIZE; | | 655 | pg_proto += PAGE_SIZE; |
651 | vstart += PAGE_SIZE; | | 656 | vstart += PAGE_SIZE; |
652 | } | | 657 | } |
653 | RELOC(ZTWOROMADDR, vaddr_t) = vstart; | | 658 | RELOC(ZTWOROMADDR, vaddr_t) = vstart; |
654 | pg_proto = ZTWOROMBASE | PG_RW | PG_CI | PG_V; | | 659 | pg_proto = ZTWOROMBASE | PG_RW | PG_CI | PG_V; |
655 | while (pg_proto < ZTWOROMTOP) { | | 660 | while (pg_proto < ZTWOROMTOP) { |
656 | *pg++ = pg_proto; | | 661 | *pg++ = pg_proto; |
657 | pg_proto += PAGE_SIZE; | | 662 | pg_proto += PAGE_SIZE; |
658 | vstart += PAGE_SIZE; | | 663 | vstart += PAGE_SIZE; |
659 | } | | 664 | } |
660 | RELOC(ZBUSADDR, vaddr_t) = vstart; | | 665 | RELOC(ZBUSADDR, vaddr_t) = vstart; |
661 | /* not on 8k boundary :-( */ | | 666 | /* not on 8k boundary :-( */ |
662 | RELOC(CIAADDR, vaddr_t) += PAGE_SIZE/2; | | 667 | RELOC(CIAADDR, vaddr_t) += PAGE_SIZE/2; |
663 | RELOC(CUSTOMADDR, vaddr_t) = | | 668 | RELOC(CUSTOMADDR, vaddr_t) = |
664 | RELOC(ZTWOROMADDR, vaddr_t) - ZTWOROMBASE + CUSTOMBASE; | | 669 | RELOC(ZTWOROMADDR, vaddr_t) - ZTWOROMBASE + CUSTOMBASE; |
665 | } | | 670 | } |
666 | | | 671 | |
667 | /* | | 672 | /* |
668 | *[ following page tables MAY be allocated to ZORRO3 space, | | 673 | *[ following page tables MAY be allocated to ZORRO3 space, |
669 | * but they're then later mapped in autoconf.c ] | | 674 | * but they're then later mapped in autoconf.c ] |
670 | */ | | 675 | */ |
671 | vstart += RELOC(ZBUSAVAIL, u_int); | | 676 | vstart += RELOC(ZBUSAVAIL, u_int); |
672 | | | 677 | |
673 | /* | | 678 | /* |
674 | * init mem sizes | | 679 | * init mem sizes |
675 | */ | | 680 | */ |
676 | RELOC(maxmem, u_int) = pend >> PGSHIFT; | | 681 | RELOC(maxmem, u_int) = pend >> PGSHIFT; |
677 | RELOC(lowram, u_int) = fphystart; | | 682 | RELOC(lowram, u_int) = fphystart; |
678 | RELOC(physmem, u_int) = fphysize >> PGSHIFT; | | 683 | RELOC(physmem, u_int) = fphysize >> PGSHIFT; |
679 | | | 684 | |
680 | RELOC(virtual_avail, u_int) = vstart; | | 685 | RELOC(virtual_avail, u_int) = vstart; |
681 | | | 686 | |
682 | /* | | 687 | /* |
683 | * Put user page tables starting at next 16MB boundary, to make kernel | | 688 | * Put user page tables starting at next 16MB boundary, to make kernel |
684 | * dumps more readable, with guaranteed 16MB of. | | 689 | * dumps more readable, with guaranteed 16MB of. |
685 | * XXX 16 MB instead of 256 MB should be enough, but... | | 690 | * XXX 16 MB instead of 256 MB should be enough, but... |
686 | * we need to fix the fastmem loading first. (see comment at line 375) | | 691 | * we need to fix the fastmem loading first. (see comment at line 375) |
687 | */ | | 692 | */ |
688 | RELOC(amiga_uptbase, vaddr_t) = | | 693 | RELOC(amiga_uptbase, vaddr_t) = |
689 | roundup(vstart + 0x10000000, 0x10000000); | | 694 | roundup(vstart + 0x10000000, 0x10000000); |
690 | | | 695 | |
691 | /* | | 696 | /* |
692 | * set this before copying the kernel, so the variable is updated in | | 697 | * set this before copying the kernel, so the variable is updated in |
693 | * the `real' place too. protorp[0] is already preset to the | | 698 | * the `real' place too. protorp[0] is already preset to the |
694 | * CRP setting. | | 699 | * CRP setting. |
695 | */ | | 700 | */ |
696 | RELOC(protorp[1], u_int) = RELOC(Sysseg_pa, u_int); | | 701 | RELOC(protorp[1], u_int) = RELOC(Sysseg_pa, u_int); |
697 | | | 702 | |
698 | RELOC(start_c_fphystart, u_int) = fphystart; | | 703 | RELOC(start_c_fphystart, u_int) = fphystart; |
699 | RELOC(start_c_pstart, u_int) = pstart; | | 704 | RELOC(start_c_pstart, u_int) = pstart; |
700 | | | 705 | |
701 | /* | | 706 | /* |
702 | * copy over the kernel (and all now initialized variables) | | 707 | * copy over the kernel (and all now initialized variables) |
703 | * to fastram. DONT use bcopy(), this beast is much larger | | 708 | * to fastram. DONT use bcopy(), this beast is much larger |
704 | * than 128k ! | | 709 | * than 128k ! |
705 | */ | | 710 | */ |
706 | if (loadbase == 0) { | | 711 | if (loadbase == 0) { |
707 | register paddr_t *lp, *le, *fp; | | 712 | register paddr_t *lp, *le, *fp; |
708 | | | 713 | |
709 | lp = (paddr_t *)0; | | 714 | lp = (paddr_t *)0; |
710 | le = (paddr_t *)end_loaded; | | 715 | le = (paddr_t *)end_loaded; |
711 | fp = (paddr_t *)fphystart; | | 716 | fp = (paddr_t *)fphystart; |
712 | while (lp < le) | | 717 | while (lp < le) |
713 | *fp++ = *lp++; | | 718 | *fp++ = *lp++; |
714 | } | | 719 | } |
715 | | | 720 | |
716 | #ifdef DEBUG_KERNEL_START | | 721 | #ifdef DEBUG_KERNEL_START |
717 | if ((id>>24)==0x7D) { | | 722 | if ((id>>24)==0x7D) { |
718 | *altaiscolpt = 0; | | 723 | *altaiscolpt = 0; |
719 | *altaiscol = 40; | | 724 | *altaiscol = 40; |
720 | *altaiscol = 40; | | 725 | *altaiscol = 40; |
721 | *altaiscol = 0; | | 726 | *altaiscol = 0; |
722 | } else | | 727 | } else |
723 | ((volatile struct Custom *)0xdff000)->color[0] = 0xAA0; /* YELLOW */ | | 728 | ((volatile struct Custom *)0xdff000)->color[0] = 0xAA0; /* YELLOW */ |
724 | #endif | | 729 | #endif |
725 | /* | | 730 | /* |
726 | * prepare to enable the MMU | | 731 | * prepare to enable the MMU |
727 | */ | | 732 | */ |
728 | #if defined(M68040) || defined(M68060) | | 733 | #if defined(M68040) || defined(M68060) |
729 | if (RELOC(mmutype, int) == MMU_68040) { | | 734 | if (RELOC(mmutype, int) == MMU_68040) { |
730 | if (id & AMIGA_68060) { | | 735 | if (id & AMIGA_68060) { |
731 | /* do i need to clear the branch cache? */ | | 736 | /* do i need to clear the branch cache? */ |
732 | __asm volatile ( ".word 0x4e7a,0x0002;" | | 737 | __asm volatile ( ".word 0x4e7a,0x0002;" |
733 | "orl #0x400000,%%d0;" | | 738 | "orl #0x400000,%%d0;" |
734 | ".word 0x4e7b,0x0002" : : : "d0"); | | 739 | ".word 0x4e7b,0x0002" : : : "d0"); |
735 | } | | 740 | } |
736 | | | 741 | |
737 | /* | | 742 | /* |
738 | * movel Sysseg_pa,%a0; | | 743 | * movel Sysseg_pa,%a0; |
739 | * movec %a0,%srp; | | 744 | * movec %a0,%srp; |
740 | */ | | 745 | */ |
741 | | | 746 | |
742 | __asm volatile ("movel %0,%%a0; .word 0x4e7b,0x8807" | | 747 | __asm volatile ("movel %0,%%a0; .word 0x4e7b,0x8807" |
743 | : : "a" (RELOC(Sysseg_pa, u_int)) : "a0"); | | 748 | : : "a" (RELOC(Sysseg_pa, u_int)) : "a0"); |
744 | | | 749 | |
745 | #ifdef DEBUG_KERNEL_START | | 750 | #ifdef DEBUG_KERNEL_START |
746 | if ((id>>24)==0x7D) { | | 751 | if ((id>>24)==0x7D) { |
747 | *altaiscolpt = 0; | | 752 | *altaiscolpt = 0; |
748 | *altaiscol = 40; | | 753 | *altaiscol = 40; |
749 | *altaiscol = 33; | | 754 | *altaiscol = 33; |
750 | *altaiscol = 0; | | 755 | *altaiscol = 0; |
751 | } else | | 756 | } else |
752 | ((volatile struct Custom *)0xdff000)->color[0] = 0xA70; /* ORANGE */ | | 757 | ((volatile struct Custom *)0xdff000)->color[0] = 0xA70; /* ORANGE */ |
753 | #endif | | 758 | #endif |
754 | } else | | 759 | } else |
755 | #endif | | 760 | #endif |
756 | { | | 761 | { |
757 | /* | | 762 | /* |
758 | * setup and load SRP | | 763 | * setup and load SRP |
759 | * nolimit, share global, 4 byte PTE's | | 764 | * nolimit, share global, 4 byte PTE's |
760 | */ | | 765 | */ |
761 | (RELOC(protorp[0], u_int)) = 0x80000202; | | 766 | (RELOC(protorp[0], u_int)) = 0x80000202; |
762 | __asm volatile ("pmove %0@,%%srp":: "a" (&RELOC(protorp, u_int))); | | 767 | __asm volatile ("pmove %0@,%%srp":: "a" (&RELOC(protorp, u_int))); |
763 | } | | 768 | } |
764 | } | | 769 | } |
765 | | | 770 | |
766 | void | | 771 | void |
767 | start_c_finish() | | 772 | start_c_finish() |
768 | { | | 773 | { |
769 | #ifdef P5PPC68KBOARD | | 774 | #ifdef P5PPC68KBOARD |
770 | struct cfdev *cdp, *ecdp; | | 775 | struct cfdev *cdp, *ecdp; |
771 | #endif | | 776 | #endif |
772 | | | 777 | |
773 | #ifdef DEBUG_KERNEL_START | | 778 | #ifdef DEBUG_KERNEL_START |
774 | #ifdef DRACO | | 779 | #ifdef DRACO |
775 | if ((id >> 24) == 0x7D) { /* mapping on, is_draco() is valid */ | | 780 | if ((id >> 24) == 0x7D) { /* mapping on, is_draco() is valid */ |
776 | int i; | | 781 | int i; |
777 | /* XXX experimental Altais register mapping only */ | | 782 | /* XXX experimental Altais register mapping only */ |
778 | altaiscolpt = (volatile u_int8_t *)(DRCCADDR+PAGE_SIZE*9+0x3c8); | | 783 | altaiscolpt = (volatile u_int8_t *)(DRCCADDR+PAGE_SIZE*9+0x3c8); |
779 | altaiscol = altaiscolpt + 1; | | 784 | altaiscol = altaiscolpt + 1; |
780 | for (i=0; i<140000; i++) { | | 785 | for (i=0; i<140000; i++) { |
781 | *altaiscolpt = 0; | | 786 | *altaiscolpt = 0; |
782 | *altaiscol = 0; | | 787 | *altaiscol = 0; |
783 | *altaiscol = 40; | | 788 | *altaiscol = 40; |
784 | *altaiscol = 0; | | 789 | *altaiscol = 0; |
785 | } | | 790 | } |
786 | } else | | 791 | } else |
787 | #endif | | 792 | #endif |
788 | ((volatile struct Custom *)CUSTOMADDR)->color[0] = 0x0a0; /* GREEN */ | | 793 | ((volatile struct Custom *)CUSTOMADDR)->color[0] = 0x0a0; /* GREEN */ |
789 | #endif | | 794 | #endif |
790 | | | 795 | |
791 | bzero ((u_char *)proc0paddr, USPACE); | | 796 | bzero ((u_char *)proc0paddr, USPACE); |
792 | pmap_bootstrap(start_c_pstart, start_c_fphystart); | | 797 | pmap_bootstrap(start_c_pstart, start_c_fphystart); |
793 | | | 798 | |
794 | /* | | 799 | /* |
795 | * to make life easier in locore.s, set these addresses explicitly | | 800 | * to make life easier in locore.s, set these addresses explicitly |
796 | */ | | 801 | */ |
797 | CIAAbase = CIAADDR + 0x1001; /* CIA-A at odd addresses ! */ | | 802 | CIAAbase = CIAADDR + 0x1001; /* CIA-A at odd addresses ! */ |
798 | CIABbase = CIAADDR; | | 803 | CIABbase = CIAADDR; |
799 | CUSTOMbase = CUSTOMADDR; | | 804 | CUSTOMbase = CUSTOMADDR; |
800 | #ifdef DRACO | | 805 | #ifdef DRACO |
801 | if (is_draco()) { | | 806 | if (is_draco()) { |
802 | draco_intena = (volatile u_int8_t *)DRCCADDR+1; | | 807 | draco_intena = (volatile u_int8_t *)DRCCADDR+1; |
803 | draco_intpen = draco_intena + PAGE_SIZE; | | 808 | draco_intpen = draco_intena + PAGE_SIZE; |
804 | draco_intfrc = draco_intpen + PAGE_SIZE; | | 809 | draco_intfrc = draco_intpen + PAGE_SIZE; |
805 | draco_misc = draco_intfrc + PAGE_SIZE; | | 810 | draco_misc = draco_intfrc + PAGE_SIZE; |
806 | draco_ioct = (struct drioct *)(DRCCADDR + DRIOCTLPG*PAGE_SIZE); | | 811 | draco_ioct = (struct drioct *)(DRCCADDR + DRIOCTLPG*PAGE_SIZE); |
807 | } else | | 812 | } else |
808 | #endif | | 813 | #endif |
809 | { | | 814 | { |
810 | INTREQRaddr = (vaddr_t)&custom.intreqr; | | 815 | INTREQRaddr = (vaddr_t)&custom.intreqr; |
811 | INTREQWaddr = (vaddr_t)&custom.intreq; | | 816 | INTREQWaddr = (vaddr_t)&custom.intreq; |
812 | } | | 817 | } |
813 | /* | | 818 | /* |
814 | * Get our chip memory allocation system working | | 819 | * Get our chip memory allocation system working |
815 | */ | | 820 | */ |
816 | chipmem_start += CHIPMEMADDR; | | 821 | chipmem_start += CHIPMEMADDR; |
817 | chipmem_end += CHIPMEMADDR; | | 822 | chipmem_end += CHIPMEMADDR; |
818 | | | 823 | |
819 | /* XXX is: this MUST NOT BE DONE before the pmap_bootstrap() call */ | | 824 | /* XXX is: this MUST NOT BE DONE before the pmap_bootstrap() call */ |
820 | if (z2mem_end) { | | 825 | if (z2mem_end) { |
821 | z2mem_end = ZTWOMEMADDR + NZTWOMEMPG * PAGE_SIZE; | | 826 | z2mem_end = ZTWOMEMADDR + NZTWOMEMPG * PAGE_SIZE; |
822 | z2mem_start = ZTWOMEMADDR; | | 827 | z2mem_start = ZTWOMEMADDR; |
823 | } | | 828 | } |
824 | | | 829 | |
825 | #if 0 | | 830 | #if 0 |
826 | i = *(int *)proc0paddr; | | 831 | i = *(int *)proc0paddr; |
827 | *(volatile int *)proc0paddr = i; | | 832 | *(volatile int *)proc0paddr = i; |
828 | #endif | | 833 | #endif |
829 | | | 834 | |
830 | /* | | 835 | /* |
831 | * disable all interrupts but enable allow them to be enabled | | 836 | * disable all interrupts but enable allow them to be enabled |
832 | * by specific driver code (global int enable bit) | | 837 | * by specific driver code (global int enable bit) |
833 | */ | | 838 | */ |
834 | #ifdef DRACO | | 839 | #ifdef DRACO |
835 | if (is_draco()) { | | 840 | if (is_draco()) { |
836 | /* XXX to be done. For now, just: */ | | 841 | /* XXX to be done. For now, just: */ |
837 | *draco_intena = 0; | | 842 | *draco_intena = 0; |
838 | *draco_intpen = 0; | | 843 | *draco_intpen = 0; |
839 | *draco_intfrc = 0; | | 844 | *draco_intfrc = 0; |
840 | ciaa.icr = 0x7f; /* and keyboard */ | | 845 | ciaa.icr = 0x7f; /* and keyboard */ |
841 | ciab.icr = 0x7f; /* and again */ | | 846 | ciab.icr = 0x7f; /* and again */ |
842 | | | 847 | |
843 | draco_ioct->io_control &= | | 848 | draco_ioct->io_control &= |
844 | ~(DRCNTRL_KBDINTENA|DRCNTRL_FDCINTENA); /* and another */ | | 849 | ~(DRCNTRL_KBDINTENA|DRCNTRL_FDCINTENA); /* and another */ |
845 | | | 850 | |
846 | draco_ioct->io_status2 &= | | 851 | draco_ioct->io_status2 &= |
847 | ~(DRSTAT2_PARIRQENA|DRSTAT2_TMRINTENA); /* some more */ | | 852 | ~(DRSTAT2_PARIRQENA|DRSTAT2_TMRINTENA); /* some more */ |
848 | | | 853 | |
849 | *(volatile u_int8_t *)(DRCCADDR + 1 + | | 854 | *(volatile u_int8_t *)(DRCCADDR + 1 + |
850 | DRSUPIOPG*PAGE_SIZE + 4*(0x3F8 + 1)) = 0; /* and com0 */ | | 855 | DRSUPIOPG*PAGE_SIZE + 4*(0x3F8 + 1)) = 0; /* and com0 */ |
851 | | | 856 | |
852 | *(volatile u_int8_t *)(DRCCADDR + 1 + | | 857 | *(volatile u_int8_t *)(DRCCADDR + 1 + |
853 | DRSUPIOPG*PAGE_SIZE + 4*(0x2F8 + 1)) = 0; /* and com1 */ | | 858 | DRSUPIOPG*PAGE_SIZE + 4*(0x2F8 + 1)) = 0; /* and com1 */ |
854 | | | 859 | |
855 | draco_ioct->io_control |= DRCNTRL_WDOGDIS; /* stop Fido */ | | 860 | draco_ioct->io_control |= DRCNTRL_WDOGDIS; /* stop Fido */ |
856 | *draco_misc &= ~1/*DRMISC_FASTZ2*/; | | 861 | *draco_misc &= ~1/*DRMISC_FASTZ2*/; |
857 | | | 862 | |
858 | } else | | 863 | } else |
859 | #endif | | 864 | #endif |
860 | { | | 865 | { |
861 | custom.intena = 0x7fff; /* disable ints */ | | 866 | custom.intena = 0x7fff; /* disable ints */ |
862 | custom.intena = INTF_SETCLR | INTF_INTEN; | | 867 | custom.intena = INTF_SETCLR | INTF_INTEN; |
863 | /* but allow them */ | | 868 | /* but allow them */ |
864 | custom.intreq = 0x7fff; /* clear any current */ | | 869 | custom.intreq = 0x7fff; /* clear any current */ |
865 | ciaa.icr = 0x7f; /* and keyboard */ | | 870 | ciaa.icr = 0x7f; /* and keyboard */ |
866 | ciab.icr = 0x7f; /* and again */ | | 871 | ciab.icr = 0x7f; /* and again */ |
867 | | | 872 | |
868 | /* | | 873 | /* |
869 | * remember address of read and write intena register for use | | 874 | * remember address of read and write intena register for use |
870 | * by extended spl?() macros. | | 875 | * by extended spl?() macros. |
871 | */ | | 876 | */ |
872 | amiga_intena_read = &custom.intenar; | | 877 | amiga_intena_read = &custom.intenar; |
873 | amiga_intena_write = &custom.intena; | | 878 | amiga_intena_write = &custom.intena; |
874 | } | | 879 | } |
875 | | | 880 | |
876 | /* | | 881 | /* |
877 | * This is needed for 3000's with superkick ROM's. Bit 7 of | | 882 | * This is needed for 3000's with superkick ROM's. Bit 7 of |
878 | * 0xde0002 enables the ROM if set. If this isn't set the machine | | 883 | * 0xde0002 enables the ROM if set. If this isn't set the machine |
879 | * has to be powercycled in order for it to boot again. ICKA! RFH | | 884 | * has to be powercycled in order for it to boot again. ICKA! RFH |
880 | */ | | 885 | */ |
881 | if (is_a3000()) { | | 886 | if (is_a3000()) { |
882 | volatile unsigned char *a3000_magic_reset; | | 887 | volatile unsigned char *a3000_magic_reset; |
883 | | | 888 | |
884 | a3000_magic_reset = (volatile unsigned char *)ztwomap(0xde0002); | | 889 | a3000_magic_reset = (volatile unsigned char *)ztwomap(0xde0002); |
885 | | | 890 | |
886 | /* Turn SuperKick ROM (V36) back on */ | | 891 | /* Turn SuperKick ROM (V36) back on */ |
887 | *a3000_magic_reset |= 0x80; | | 892 | *a3000_magic_reset |= 0x80; |
888 | } | | 893 | } |
889 | | | 894 | |
890 | #ifdef P5PPC68KBOARD | | 895 | #ifdef P5PPC68KBOARD |
891 | /* | | 896 | /* |
892 | * Are we an P5 PPC/68K board? install different reset | | 897 | * Are we an P5 PPC/68K board? install different reset |
893 | * routine. | | 898 | * routine. |
894 | */ | | 899 | */ |
895 | | | 900 | |
896 | for (cdp = cfdev, ecdp = &cfdev[ncfdev]; cdp < ecdp; cdp++) { | | 901 | for (cdp = cfdev, ecdp = &cfdev[ncfdev]; cdp < ecdp; cdp++) { |
897 | if (cdp->rom.manid == 8512 && | | 902 | if (cdp->rom.manid == 8512 && |
898 | (cdp->rom.prodid == 100 || cdp->rom.prodid == 110)) { | | 903 | (cdp->rom.prodid == 100 || cdp->rom.prodid == 110)) { |
899 | p5ppc = 1; | | 904 | p5ppc = 1; |
900 | break; | | 905 | break; |
901 | } | | 906 | } |
902 | } | | 907 | } |
903 | #endif | | 908 | #endif |
904 | } | | 909 | } |
905 | | | 910 | |
906 | void | | 911 | void |
907 | rollcolor(color) | | 912 | rollcolor(color) |
908 | int color; | | 913 | int color; |
909 | { | | 914 | { |
910 | int s, i; | | 915 | int s, i; |
911 | | | 916 | |
912 | s = splhigh(); | | 917 | s = splhigh(); |
913 | /* | | 918 | /* |
914 | * need to adjust count - | | 919 | * need to adjust count - |
915 | * too slow when cache off, too fast when cache on | | 920 | * too slow when cache off, too fast when cache on |
916 | */ | | 921 | */ |
917 | for (i = 0; i < 400000; i++) | | 922 | for (i = 0; i < 400000; i++) |
918 | ((volatile struct Custom *)CUSTOMbase)->color[0] = color; | | 923 | ((volatile struct Custom *)CUSTOMbase)->color[0] = color; |
919 | splx(s); | | 924 | splx(s); |
920 | } | | 925 | } |
921 | | | 926 | |
922 | #ifdef DEVRELOAD | | 927 | #ifdef DEVRELOAD |
923 | /* | | 928 | /* |
924 | * Kernel reloading code | | 929 | * Kernel reloading code |
925 | */ | | 930 | */ |
926 | | | 931 | |
927 | static struct exec kernel_exec; | | 932 | static struct exec kernel_exec; |
928 | static u_char *kernel_image; | | 933 | static u_char *kernel_image; |
929 | static u_long kernel_text_size, kernel_load_ofs; | | 934 | static u_long kernel_text_size, kernel_load_ofs; |
930 | static u_long kernel_load_phase; | | 935 | static u_long kernel_load_phase; |
931 | static u_long kernel_load_endseg; | | 936 | static u_long kernel_load_endseg; |
932 | static u_long kernel_symbol_size, kernel_symbol_esym; | | 937 | static u_long kernel_symbol_size, kernel_symbol_esym; |
933 | | | 938 | |
934 | /* This supports the /dev/reload device, major 2, minor 20, | | 939 | /* This supports the /dev/reload device, major 2, minor 20, |
935 | hooked into mem.c. Author: Bryan Ford. */ | | 940 | hooked into mem.c. Author: Bryan Ford. */ |
936 | | | 941 | |
937 | /* | | 942 | /* |
938 | * This is called below to find out how much magic storage | | 943 | * This is called below to find out how much magic storage |
939 | * will be needed after a kernel image to be reloaded. | | 944 | * will be needed after a kernel image to be reloaded. |
940 | */ | | 945 | */ |
941 | static int | | 946 | static int |
942 | kernel_image_magic_size() | | 947 | kernel_image_magic_size() |
943 | { | | 948 | { |
944 | int sz; | | 949 | int sz; |
945 | | | 950 | |
946 | /* 4 + cfdev's + Mem_Seg's + 4 */ | | 951 | /* 4 + cfdev's + Mem_Seg's + 4 */ |
947 | sz = 8 + ncfdev * sizeof(struct cfdev) | | 952 | sz = 8 + ncfdev * sizeof(struct cfdev) |
948 | + memlist->m_nseg * sizeof(struct boot_memseg); | | 953 | + memlist->m_nseg * sizeof(struct boot_memseg); |
949 | return(sz); | | 954 | return(sz); |
950 | } | | 955 | } |
951 | | | 956 | |
952 | /* This actually copies the magic information. */ | | 957 | /* This actually copies the magic information. */ |
953 | static void | | 958 | static void |
954 | kernel_image_magic_copy(dest) | | 959 | kernel_image_magic_copy(dest) |
955 | u_char *dest; | | 960 | u_char *dest; |
956 | { | | 961 | { |
957 | *((int*)dest) = ncfdev; | | 962 | *((int*)dest) = ncfdev; |
958 | dest += 4; | | 963 | dest += 4; |
959 | bcopy(cfdev, dest, ncfdev * sizeof(struct cfdev) | | 964 | bcopy(cfdev, dest, ncfdev * sizeof(struct cfdev) |
960 | + memlist->m_nseg * sizeof(struct boot_memseg) + 4); | | 965 | + memlist->m_nseg * sizeof(struct boot_memseg) + 4); |
961 | } | | 966 | } |
962 | | | 967 | |
963 | #undef AOUT_LDPGSZ | | 968 | #undef AOUT_LDPGSZ |
964 | #define AOUT_LDPGSZ 8192 /* XXX ??? */ | | 969 | #define AOUT_LDPGSZ 8192 /* XXX ??? */ |
965 | | | 970 | |
966 | int | | 971 | int |
967 | kernel_reload_write(uio) | | 972 | kernel_reload_write(uio) |
968 | struct uio *uio; | | 973 | struct uio *uio; |
969 | { | | 974 | { |
970 | extern int eclockfreq; | | 975 | extern int eclockfreq; |
971 | struct iovec *iov; | | 976 | struct iovec *iov; |
972 | int error, c; | | 977 | int error, c; |
973 | | | 978 | |
974 | iov = uio->uio_iov; | | 979 | iov = uio->uio_iov; |
975 | | | 980 | |
976 | if (kernel_image == 0) { | | 981 | if (kernel_image == 0) { |
977 | /* | | 982 | /* |
978 | * We have to get at least the whole exec header | | 983 | * We have to get at least the whole exec header |
979 | * in the first write. | | 984 | * in the first write. |
980 | */ | | 985 | */ |
981 | if (iov->iov_len < sizeof(kernel_exec)) | | 986 | if (iov->iov_len < sizeof(kernel_exec)) |
982 | return ENOEXEC; /* XXX */ | | 987 | return ENOEXEC; /* XXX */ |
983 | | | 988 | |
984 | /* | | 989 | /* |
985 | * Pull in the exec header and check it. | | 990 | * Pull in the exec header and check it. |
986 | */ | | 991 | */ |
987 | if ((error = uiomove((void *)&kernel_exec, sizeof(kernel_exec), | | 992 | if ((error = uiomove((void *)&kernel_exec, sizeof(kernel_exec), |
988 | uio)) != 0) | | 993 | uio)) != 0) |
989 | return(error); | | 994 | return(error); |
990 | printf("loading kernel %ld+%ld+%ld+%ld\n", kernel_exec.a_text, | | 995 | printf("loading kernel %ld+%ld+%ld+%ld\n", kernel_exec.a_text, |
991 | kernel_exec.a_data, kernel_exec.a_bss, | | 996 | kernel_exec.a_data, kernel_exec.a_bss, |
992 | esym == NULL ? 0 : kernel_exec.a_syms); | | 997 | esym == NULL ? 0 : kernel_exec.a_syms); |
993 | /* | | 998 | /* |
994 | * Looks good - allocate memory for a kernel image. | | 999 | * Looks good - allocate memory for a kernel image. |
995 | */ | | 1000 | */ |
996 | kernel_text_size = (kernel_exec.a_text | | 1001 | kernel_text_size = (kernel_exec.a_text |
997 | + AOUT_LDPGSZ - 1) & (-AOUT_LDPGSZ); | | 1002 | + AOUT_LDPGSZ - 1) & (-AOUT_LDPGSZ); |
998 | /* | | 1003 | /* |
999 | * Estimate space needed for symbol names, since we don't | | 1004 | * Estimate space needed for symbol names, since we don't |
1000 | * know how big it really is. | | 1005 | * know how big it really is. |
1001 | */ | | 1006 | */ |
1002 | if (esym != NULL) { | | 1007 | if (esym != NULL) { |
1003 | kernel_symbol_size = kernel_exec.a_syms; | | 1008 | kernel_symbol_size = kernel_exec.a_syms; |
1004 | kernel_symbol_size += 16 * (kernel_symbol_size / 12); | | 1009 | kernel_symbol_size += 16 * (kernel_symbol_size / 12); |
1005 | } | | 1010 | } |
1006 | /* | | 1011 | /* |
1007 | * XXX - should check that image will fit in CHIP memory | | 1012 | * XXX - should check that image will fit in CHIP memory |
1008 | * XXX return an error if it doesn't | | 1013 | * XXX return an error if it doesn't |
1009 | */ | | 1014 | */ |
1010 | if ((kernel_text_size + kernel_exec.a_data + | | 1015 | if ((kernel_text_size + kernel_exec.a_data + |
1011 | kernel_exec.a_bss + kernel_symbol_size + | | 1016 | kernel_exec.a_bss + kernel_symbol_size + |
1012 | kernel_image_magic_size()) > boot_cphysize) | | 1017 | kernel_image_magic_size()) > boot_cphysize) |
1013 | return (EFBIG); | | 1018 | return (EFBIG); |
1014 | kernel_image = malloc(kernel_text_size + kernel_exec.a_data | | 1019 | kernel_image = malloc(kernel_text_size + kernel_exec.a_data |
1015 | + kernel_exec.a_bss | | 1020 | + kernel_exec.a_bss |
1016 | + kernel_symbol_size | | 1021 | + kernel_symbol_size |
1017 | + kernel_image_magic_size(), | | 1022 | + kernel_image_magic_size(), |
1018 | M_TEMP, M_WAITOK); | | 1023 | M_TEMP, M_WAITOK); |
1019 | kernel_load_ofs = 0; | | 1024 | kernel_load_ofs = 0; |
1020 | kernel_load_phase = 0; | | 1025 | kernel_load_phase = 0; |
1021 | kernel_load_endseg = kernel_exec.a_text; | | 1026 | kernel_load_endseg = kernel_exec.a_text; |
1022 | return(0); | | 1027 | return(0); |
1023 | } | | 1028 | } |
1024 | /* | | 1029 | /* |
1025 | * Continue loading in the kernel image. | | 1030 | * Continue loading in the kernel image. |
1026 | */ | | 1031 | */ |
1027 | c = min(iov->iov_len, kernel_load_endseg - kernel_load_ofs); | | 1032 | c = min(iov->iov_len, kernel_load_endseg - kernel_load_ofs); |
1028 | c = min(c, MAXPHYS); | | 1033 | c = min(c, MAXPHYS); |
1029 | if ((error = uiomove(kernel_image + kernel_load_ofs, (int)c, uio)) != 0) | | 1034 | if ((error = uiomove(kernel_image + kernel_load_ofs, (int)c, uio)) != 0) |
1030 | return(error); | | 1035 | return(error); |
1031 | kernel_load_ofs += c; | | 1036 | kernel_load_ofs += c; |
1032 | | | 1037 | |
1033 | /* | | 1038 | /* |
1034 | * Fun and games to handle loading symbols - the length of the | | 1039 | * Fun and games to handle loading symbols - the length of the |
1035 | * string table isn't know until after the symbol table has | | 1040 | * string table isn't know until after the symbol table has |
1036 | * been loaded. We have to load the kernel text, data, and | | 1041 | * been loaded. We have to load the kernel text, data, and |
1037 | * the symbol table, then get the size of the strings. A | | 1042 | * the symbol table, then get the size of the strings. A |
1038 | * new kernel image is then allocated and the data currently | | 1043 | * new kernel image is then allocated and the data currently |
1039 | * loaded moved to the new image. Then continue reading the | | 1044 | * loaded moved to the new image. Then continue reading the |
1040 | * string table. This has problems if there isn't enough | | 1045 | * string table. This has problems if there isn't enough |
1041 | * room to allocate space for the two copies of the kernel | | 1046 | * room to allocate space for the two copies of the kernel |
1042 | * image. So the approach I took is to guess at the size | | 1047 | * image. So the approach I took is to guess at the size |
1043 | * of the symbol strings. If the guess is wrong, the symbol | | 1048 | * of the symbol strings. If the guess is wrong, the symbol |
1044 | * table is ignored. | | 1049 | * table is ignored. |
1045 | */ | | 1050 | */ |
1046 | | | 1051 | |
1047 | if (kernel_load_ofs != kernel_load_endseg) | | 1052 | if (kernel_load_ofs != kernel_load_endseg) |
1048 | return(0); | | 1053 | return(0); |
1049 | | | 1054 | |
1050 | switch (kernel_load_phase) { | | 1055 | switch (kernel_load_phase) { |
1051 | case 0: /* done loading kernel text */ | | 1056 | case 0: /* done loading kernel text */ |
1052 | kernel_load_ofs = kernel_text_size; | | 1057 | kernel_load_ofs = kernel_text_size; |
1053 | kernel_load_endseg = kernel_load_ofs + kernel_exec.a_data; | | 1058 | kernel_load_endseg = kernel_load_ofs + kernel_exec.a_data; |
1054 | kernel_load_phase = 1; | | 1059 | kernel_load_phase = 1; |
1055 | break; | | 1060 | break; |
1056 | case 1: /* done loading kernel data */ | | 1061 | case 1: /* done loading kernel data */ |
1057 | for(c = 0; c < kernel_exec.a_bss; c++) | | 1062 | for(c = 0; c < kernel_exec.a_bss; c++) |
1058 | kernel_image[kernel_load_ofs + c] = 0; | | 1063 | kernel_image[kernel_load_ofs + c] = 0; |
1059 | kernel_load_ofs += kernel_exec.a_bss; | | 1064 | kernel_load_ofs += kernel_exec.a_bss; |
1060 | if (esym) { | | 1065 | if (esym) { |
1061 | kernel_load_endseg = kernel_load_ofs | | 1066 | kernel_load_endseg = kernel_load_ofs |
1062 | + kernel_exec.a_syms + 8; | | 1067 | + kernel_exec.a_syms + 8; |
1063 | *((u_long *)(kernel_image + kernel_load_ofs)) = | | 1068 | *((u_long *)(kernel_image + kernel_load_ofs)) = |
1064 | kernel_exec.a_syms; | | 1069 | kernel_exec.a_syms; |
1065 | kernel_load_ofs += 4; | | 1070 | kernel_load_ofs += 4; |
1066 | kernel_load_phase = 3; | | 1071 | kernel_load_phase = 3; |
1067 | break; | | 1072 | break; |
1068 | } | | 1073 | } |
1069 | /*FALLTHROUGH*/ | | 1074 | /*FALLTHROUGH*/ |
1070 | case 2: /* done loading kernel */ | | 1075 | case 2: /* done loading kernel */ |
1071 | | | 1076 | |
1072 | /* | | 1077 | /* |
1073 | * Put the finishing touches on the kernel image. | | 1078 | * Put the finishing touches on the kernel image. |
1074 | */ | | 1079 | */ |
1075 | kernel_image_magic_copy(kernel_image + kernel_load_ofs); | | 1080 | kernel_image_magic_copy(kernel_image + kernel_load_ofs); |
1076 | /* | | 1081 | /* |
1077 | * Start the new kernel with code in locore.s. | | 1082 | * Start the new kernel with code in locore.s. |
1078 | */ | | 1083 | */ |
1079 | kernel_reload(kernel_image, | | 1084 | kernel_reload(kernel_image, |
1080 | kernel_load_ofs + kernel_image_magic_size(), | | 1085 | kernel_load_ofs + kernel_image_magic_size(), |
1081 | kernel_exec.a_entry, boot_fphystart, boot_fphysize, | | 1086 | kernel_exec.a_entry, boot_fphystart, boot_fphysize, |
1082 | boot_cphysize, kernel_symbol_esym, eclockfreq, | | 1087 | boot_cphysize, kernel_symbol_esym, eclockfreq, |
1083 | boot_flags, scsi_nosync, boot_partition); | | 1088 | boot_flags, scsi_nosync, boot_partition); |
1084 | /* | | 1089 | /* |
1085 | * kernel_reload() now checks to see if the reload_code | | 1090 | * kernel_reload() now checks to see if the reload_code |
1086 | * is at the same location in the new kernel. | | 1091 | * is at the same location in the new kernel. |
1087 | * If it isn't, it will return and we will return | | 1092 | * If it isn't, it will return and we will return |
1088 | * an error. | | 1093 | * an error. |
1089 | */ | | 1094 | */ |
1090 | free(kernel_image, M_TEMP); | | 1095 | free(kernel_image, M_TEMP); |
1091 | kernel_image = NULL; | | 1096 | kernel_image = NULL; |
1092 | return (ENODEV); /* Say operation not supported */ | | 1097 | return (ENODEV); /* Say operation not supported */ |
1093 | case 3: /* done loading kernel symbol table */ | | 1098 | case 3: /* done loading kernel symbol table */ |
1094 | c = *((u_long *)(kernel_image + kernel_load_ofs - 4)); | | 1099 | c = *((u_long *)(kernel_image + kernel_load_ofs - 4)); |
1095 | if (c > 16 * (kernel_exec.a_syms / 12)) | | 1100 | if (c > 16 * (kernel_exec.a_syms / 12)) |
1096 | c = 16 * (kernel_exec.a_syms / 12); | | 1101 | c = 16 * (kernel_exec.a_syms / 12); |
1097 | kernel_load_endseg += c - 4; | | 1102 | kernel_load_endseg += c - 4; |
1098 | kernel_symbol_esym = kernel_load_endseg; | | 1103 | kernel_symbol_esym = kernel_load_endseg; |
1099 | #ifdef notyet | | 1104 | #ifdef notyet |
1100 | kernel_image_copy = kernel_image; | | 1105 | kernel_image_copy = kernel_image; |
1101 | kernel_image = malloc(kernel_load_ofs + c | | 1106 | kernel_image = malloc(kernel_load_ofs + c |
1102 | + kernel_image_magic_size(), M_TEMP, M_WAITOK); | | 1107 | + kernel_image_magic_size(), M_TEMP, M_WAITOK); |
1103 | if (kernel_image == NULL) | | 1108 | if (kernel_image == NULL) |
1104 | panic("kernel_reload failed second malloc"); | | 1109 | panic("kernel_reload failed second malloc"); |
1105 | for (c = 0; c < kernel_load_ofs; c += MAXPHYS) | | 1110 | for (c = 0; c < kernel_load_ofs; c += MAXPHYS) |
1106 | bcopy(kernel_image_copy + c, kernel_image + c, | | 1111 | bcopy(kernel_image_copy + c, kernel_image + c, |
1107 | (kernel_load_ofs - c) > MAXPHYS ? MAXPHYS : | | 1112 | (kernel_load_ofs - c) > MAXPHYS ? MAXPHYS : |
1108 | kernel_load_ofs - c); | | 1113 | kernel_load_ofs - c); |
1109 | #endif | | 1114 | #endif |
1110 | kernel_load_phase = 2; | | 1115 | kernel_load_phase = 2; |
1111 | } | | 1116 | } |
1112 | return(0); | | 1117 | return(0); |
1113 | } | | 1118 | } |
1114 | #endif | | 1119 | #endif |