Sat Jan 3 07:11:02 2009 UTC ()
Sync with an update on amiga:
Rewrite a weird calculation marked "XXX fix calcurations XXX"
for an index value of lev2 segment table for the kernel Sysptmap
with more meaningful expressions and PAGE_SIZE independent macro.
Also use "~0" rather than "-1" for an unsigned bitmap value.

Should produce the same results and no functional change.

Tested on Falcon with 68060 by Tuomo Makinen.


(tsutsui)
diff -r1.70 -r1.71 src/sys/arch/atari/atari/atari_init.c

cvs diff -r1.70 -r1.71 src/sys/arch/atari/atari/atari_init.c (switch to unified diff)

--- src/sys/arch/atari/atari/atari_init.c 2009/01/02 04:38:09 1.70
+++ src/sys/arch/atari/atari/atari_init.c 2009/01/03 07:11:02 1.71
@@ -1,1216 +1,1217 @@ @@ -1,1216 +1,1217 @@
1/* $NetBSD: atari_init.c,v 1.70 2009/01/02 04:38:09 tsutsui Exp $ */ 1/* $NetBSD: atari_init.c,v 1.71 2009/01/03 07:11:02 tsutsui Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1995 Leo Weppelman 4 * Copyright (c) 1995 Leo Weppelman
5 * Copyright (c) 1994 Michael L. Hitch 5 * Copyright (c) 1994 Michael L. Hitch
6 * Copyright (c) 1993 Markus Wild 6 * Copyright (c) 1993 Markus Wild
7 * All rights reserved. 7 * All rights reserved.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the 15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution. 16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software 17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement: 18 * must display the following acknowledgement:
19 * This product includes software developed by Markus Wild. 19 * This product includes software developed by Markus Wild.
20 * 4. The name of the author may not be used to endorse or promote products 20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission 21 * derived from this software without specific prior written permission
22 * 22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */ 33 */
34 34
35#include <sys/cdefs.h> 35#include <sys/cdefs.h>
36__KERNEL_RCSID(0, "$NetBSD: atari_init.c,v 1.70 2009/01/02 04:38:09 tsutsui Exp $"); 36__KERNEL_RCSID(0, "$NetBSD: atari_init.c,v 1.71 2009/01/03 07:11:02 tsutsui Exp $");
37 37
38#include "opt_ddb.h" 38#include "opt_ddb.h"
39#include "opt_mbtype.h" 39#include "opt_mbtype.h"
40#include "opt_m060sp.h" 40#include "opt_m060sp.h"
41 41
42#include <sys/param.h> 42#include <sys/param.h>
43#include <sys/systm.h> 43#include <sys/systm.h>
44#include <sys/proc.h> 44#include <sys/proc.h>
45#include <sys/user.h> 45#include <sys/user.h>
46#include <sys/ioctl.h> 46#include <sys/ioctl.h>
47#include <sys/select.h> 47#include <sys/select.h>
48#include <sys/tty.h> 48#include <sys/tty.h>
49#include <sys/buf.h> 49#include <sys/buf.h>
50#include <sys/msgbuf.h> 50#include <sys/msgbuf.h>
51#include <sys/mbuf.h> 51#include <sys/mbuf.h>
52#include <sys/extent.h> 52#include <sys/extent.h>
53#include <sys/protosw.h> 53#include <sys/protosw.h>
54#include <sys/domain.h> 54#include <sys/domain.h>
55#include <sys/dkbad.h> 55#include <sys/dkbad.h>
56#include <sys/reboot.h> 56#include <sys/reboot.h>
57#include <sys/exec.h> 57#include <sys/exec.h>
58#include <sys/core.h> 58#include <sys/core.h>
59#include <sys/kcore.h> 59#include <sys/kcore.h>
60 60
61#include <uvm/uvm_extern.h> 61#include <uvm/uvm_extern.h>
62 62
63#include <machine/vmparam.h> 63#include <machine/vmparam.h>
64#include <machine/pte.h> 64#include <machine/pte.h>
65#include <machine/cpu.h> 65#include <machine/cpu.h>
66#include <machine/iomap.h> 66#include <machine/iomap.h>
67#include <machine/mfp.h> 67#include <machine/mfp.h>
68#include <machine/scu.h> 68#include <machine/scu.h>
69#include <machine/acia.h> 69#include <machine/acia.h>
70#include <machine/kcore.h> 70#include <machine/kcore.h>
71 71
72#include <m68k/cpu.h> 72#include <m68k/cpu.h>
73#include <m68k/cacheops.h> 73#include <m68k/cacheops.h>
74 74
75#include <atari/atari/intr.h> 75#include <atari/atari/intr.h>
76#include <atari/atari/stalloc.h> 76#include <atari/atari/stalloc.h>
77#include <atari/dev/ym2149reg.h> 77#include <atari/dev/ym2149reg.h>
78 78
79#include "pci.h" 79#include "pci.h"
80 80
81void start_c __P((int, u_int, u_int, u_int, char *)); 81void start_c __P((int, u_int, u_int, u_int, char *));
82static void atari_hwinit __P((void)); 82static void atari_hwinit __P((void));
83static void cpu_init_kcorehdr __P((paddr_t, paddr_t)); 83static void cpu_init_kcorehdr __P((paddr_t, paddr_t));
84static void initcpu __P((void)); 84static void initcpu __P((void));
85static void mmu030_setup __P((paddr_t, u_int, paddr_t, psize_t, paddr_t, 85static void mmu030_setup __P((paddr_t, u_int, paddr_t, psize_t, paddr_t,
86 paddr_t)); 86 paddr_t));
87static void map_io_areas __P((paddr_t, psize_t, u_int)); 87static void map_io_areas __P((paddr_t, psize_t, u_int));
88static void set_machtype __P((void)); 88static void set_machtype __P((void));
89 89
90#if defined(M68040) || defined(M68060) 90#if defined(M68040) || defined(M68060)
91static void mmu040_setup __P((paddr_t, u_int, paddr_t, psize_t, paddr_t, 91static void mmu040_setup __P((paddr_t, u_int, paddr_t, psize_t, paddr_t,
92 paddr_t)); 92 paddr_t));
93#endif 93#endif
94 94
95/* 95/*
96 * Extent maps to manage all memory space, including I/O ranges. Allocate 96 * Extent maps to manage all memory space, including I/O ranges. Allocate
97 * storage for 8 regions in each, initially. Later, iomem_malloc_safe 97 * storage for 8 regions in each, initially. Later, iomem_malloc_safe
98 * will indicate that it's safe to use malloc() to dynamically allocate 98 * will indicate that it's safe to use malloc() to dynamically allocate
99 * region descriptors. 99 * region descriptors.
100 * This means that the fixed static storage is only used for registrating 100 * This means that the fixed static storage is only used for registrating
101 * the found memory regions and the bus-mapping of the console. 101 * the found memory regions and the bus-mapping of the console.
102 * 102 *
103 * The extent maps are not static! They are used for bus address space 103 * The extent maps are not static! They are used for bus address space
104 * allocation. 104 * allocation.
105 */ 105 */
106static long iomem_ex_storage[EXTENT_FIXED_STORAGE_SIZE(8) / sizeof(long)]; 106static long iomem_ex_storage[EXTENT_FIXED_STORAGE_SIZE(8) / sizeof(long)];
107struct extent *iomem_ex; 107struct extent *iomem_ex;
108int iomem_malloc_safe; 108int iomem_malloc_safe;
109 109
110/* 110/*
111 * All info needed to generate a panic dump. All fields are setup by 111 * All info needed to generate a panic dump. All fields are setup by
112 * start_c(). 112 * start_c().
113 * XXX: Should sheck usage of phys_segs. There is some unwanted overlap 113 * XXX: Should sheck usage of phys_segs. There is some unwanted overlap
114 * here.... Also, the name is badly choosen. Phys_segs contains the 114 * here.... Also, the name is badly choosen. Phys_segs contains the
115 * segment descriptions _after_ reservations are made. 115 * segment descriptions _after_ reservations are made.
116 * XXX: 'lowram' is obsoleted by the new panicdump format 116 * XXX: 'lowram' is obsoleted by the new panicdump format
117 */ 117 */
118static cpu_kcore_hdr_t cpu_kcore_hdr; 118static cpu_kcore_hdr_t cpu_kcore_hdr;
119 119
120extern u_int lowram; 120extern u_int lowram;
121extern u_int Sysptsize, proc0paddr; 121extern u_int Sysptsize, proc0paddr;
122extern pt_entry_t *Sysptmap; 122extern pt_entry_t *Sysptmap;
123extern st_entry_t *Sysseg; 123extern st_entry_t *Sysseg;
124int machineid, mmutype, cputype, astpending; 124int machineid, mmutype, cputype, astpending;
125char *vmmap; 125char *vmmap;
126#if defined(M68040) || defined(M68060) 126#if defined(M68040) || defined(M68060)
127extern int protostfree; 127extern int protostfree;
128#endif 128#endif
129 129
130extern char *esym; 130extern char *esym;
131extern struct pcb *curpcb; 131extern struct pcb *curpcb;
132 132
133/* 133/*
134 * This is the virtual address of physical page 0. Used by 'do_boot()'. 134 * This is the virtual address of physical page 0. Used by 'do_boot()'.
135 */ 135 */
136vaddr_t page_zero; 136vaddr_t page_zero;
137 137
138/* 138/*
139 * Crude support for allocation in ST-ram. Currently only used to allocate 139 * Crude support for allocation in ST-ram. Currently only used to allocate
140 * video ram. 140 * video ram.
141 * The physical address is also returned because the video init needs it to 141 * The physical address is also returned because the video init needs it to
142 * setup the controller at the time the vm-system is not yet operational so 142 * setup the controller at the time the vm-system is not yet operational so
143 * 'kvtop()' cannot be used. 143 * 'kvtop()' cannot be used.
144 */ 144 */
145#ifndef ST_POOL_SIZE 145#ifndef ST_POOL_SIZE
146#define ST_POOL_SIZE 40 /* XXX: enough? */ 146#define ST_POOL_SIZE 40 /* XXX: enough? */
147#endif 147#endif
148 148
149u_long st_pool_size = ST_POOL_SIZE * PAGE_SIZE; /* Patchable */ 149u_long st_pool_size = ST_POOL_SIZE * PAGE_SIZE; /* Patchable */
150u_long st_pool_virt, st_pool_phys; 150u_long st_pool_virt, st_pool_phys;
151 151
152/* 152/*
153 * Are we relocating the kernel to TT-Ram if possible? It is faster, but 153 * Are we relocating the kernel to TT-Ram if possible? It is faster, but
154 * it is also reported not to work on all TT's. So the default is NO. 154 * it is also reported not to work on all TT's. So the default is NO.
155 */ 155 */
156#ifndef RELOC_KERNEL 156#ifndef RELOC_KERNEL
157#define RELOC_KERNEL 0 157#define RELOC_KERNEL 0
158#endif 158#endif
159int reloc_kernel = RELOC_KERNEL; /* Patchable */ 159int reloc_kernel = RELOC_KERNEL; /* Patchable */
160 160
161/* 161/*
162 * this is the C-level entry function, it's called from locore.s. 162 * this is the C-level entry function, it's called from locore.s.
163 * Preconditions: 163 * Preconditions:
164 * Interrupts are disabled 164 * Interrupts are disabled
165 * PA == VA, we don't have to relocate addresses before enabling 165 * PA == VA, we don't have to relocate addresses before enabling
166 * the MMU 166 * the MMU
167 * Exec is no longer available (because we're loaded all over  167 * Exec is no longer available (because we're loaded all over
168 * low memory, no ExecBase is available anymore) 168 * low memory, no ExecBase is available anymore)
169 * 169 *
170 * It's purpose is: 170 * It's purpose is:
171 * Do the things that are done in locore.s in the hp300 version,  171 * Do the things that are done in locore.s in the hp300 version,
172 * this includes allocation of kernel maps and enabling the MMU. 172 * this includes allocation of kernel maps and enabling the MMU.
173 *  173 *
174 * Some of the code in here is `stolen' from Amiga MACH, and was  174 * Some of the code in here is `stolen' from Amiga MACH, and was
175 * written by Bryan Ford and Niklas Hallqvist. 175 * written by Bryan Ford and Niklas Hallqvist.
176 *  176 *
177 * Very crude 68040 support by Michael L. Hitch. 177 * Very crude 68040 support by Michael L. Hitch.
178 */ 178 */
179int kernel_copyback = 1; 179int kernel_copyback = 1;
180 180
181void 181void
182start_c(id, ttphystart, ttphysize, stphysize, esym_addr) 182start_c(id, ttphystart, ttphysize, stphysize, esym_addr)
183int id; /* Machine id */ 183int id; /* Machine id */
184u_int ttphystart, ttphysize; /* Start address and size of TT-ram */ 184u_int ttphystart, ttphysize; /* Start address and size of TT-ram */
185u_int stphysize; /* Size of ST-ram */ 185u_int stphysize; /* Size of ST-ram */
186char *esym_addr; /* Address of kernel '_esym' symbol */ 186char *esym_addr; /* Address of kernel '_esym' symbol */
187{ 187{
188 extern char end[]; 188 extern char end[];
189 extern void etext __P((void)); 189 extern void etext __P((void));
190 extern u_long protorp[2]; 190 extern u_long protorp[2];
191 paddr_t pstart; /* Next available physical address */ 191 paddr_t pstart; /* Next available physical address */
192 vaddr_t vstart; /* Next available virtual address */ 192 vaddr_t vstart; /* Next available virtual address */
193 vsize_t avail; 193 vsize_t avail;
194 paddr_t ptpa; 194 paddr_t ptpa;
195 psize_t ptsize; 195 psize_t ptsize;
196 u_int ptextra; 196 u_int ptextra;
197 vaddr_t kva; 197 vaddr_t kva;
198 u_int tc, i; 198 u_int tc, i;
199 pt_entry_t *pg, *epg; 199 pt_entry_t *pg, *epg;
200 pt_entry_t pg_proto; 200 pt_entry_t pg_proto;
201 vaddr_t end_loaded; 201 vaddr_t end_loaded;
202 paddr_t kbase; 202 paddr_t kbase;
203 u_int kstsize; 203 u_int kstsize;
204 paddr_t Sysseg_pa; 204 paddr_t Sysseg_pa;
205 paddr_t Sysptmap_pa; 205 paddr_t Sysptmap_pa;
206 206
207#if defined(_MILANHW_) 207#if defined(_MILANHW_)
208 /* XXX 208 /* XXX
209 * XXX The right place todo this is probably the booter (Leo) 209 * XXX The right place todo this is probably the booter (Leo)
210 * XXX More than 16MB memory is not yet supported on the Milan! 210 * XXX More than 16MB memory is not yet supported on the Milan!
211 * The Milan Lies about the presence of TT-RAM. If you insert 211 * The Milan Lies about the presence of TT-RAM. If you insert
212 * 16MB it is split in 14MB ST starting at address 0 and 2MB TT RAM, 212 * 16MB it is split in 14MB ST starting at address 0 and 2MB TT RAM,
213 * starting at address 16MB.  213 * starting at address 16MB.
214 */ 214 */
215 stphysize += ttphysize; 215 stphysize += ttphysize;
216 ttphysize = ttphystart = 0; 216 ttphysize = ttphystart = 0;
217#endif 217#endif
218 boot_segs[0].start = 0; 218 boot_segs[0].start = 0;
219 boot_segs[0].end = stphysize; 219 boot_segs[0].end = stphysize;
220 boot_segs[1].start = ttphystart; 220 boot_segs[1].start = ttphystart;
221 boot_segs[1].end = ttphystart + ttphysize; 221 boot_segs[1].end = ttphystart + ttphysize;
222 boot_segs[2].start = boot_segs[2].end = 0; /* End of segments! */ 222 boot_segs[2].start = boot_segs[2].end = 0; /* End of segments! */
223 223
224 /* 224 /*
225 * The following is a hack. We do not know how much ST memory we 225 * The following is a hack. We do not know how much ST memory we
226 * really need until after configuration has finished. At this 226 * really need until after configuration has finished. At this
227 * time I have no idea how to grab ST memory at that time. 227 * time I have no idea how to grab ST memory at that time.
228 * The round_page() call is ment to correct errors made by 228 * The round_page() call is ment to correct errors made by
229 * binpatching! 229 * binpatching!
230 */ 230 */
231 st_pool_size = m68k_round_page(st_pool_size); 231 st_pool_size = m68k_round_page(st_pool_size);
232 st_pool_phys = stphysize - st_pool_size; 232 st_pool_phys = stphysize - st_pool_size;
233 stphysize = st_pool_phys; 233 stphysize = st_pool_phys;
234 234
235 machineid = id; 235 machineid = id;
236 esym = esym_addr; 236 esym = esym_addr;
237 237
238 /*  238 /*
239 * the kernel ends at end() or esym. 239 * the kernel ends at end() or esym.
240 */ 240 */
241 if(esym == NULL) 241 if(esym == NULL)
242 end_loaded = (vaddr_t)&end; 242 end_loaded = (vaddr_t)&end;
243 else 243 else
244 end_loaded = (vaddr_t)esym; 244 end_loaded = (vaddr_t)esym;
245 245
246 /* 246 /*
247 * If we have enough fast-memory to put the kernel in and the 247 * If we have enough fast-memory to put the kernel in and the
248 * RELOC_KERNEL option is set, do it! 248 * RELOC_KERNEL option is set, do it!
249 */ 249 */
250 if((reloc_kernel != 0) && (ttphysize >= end_loaded)) 250 if((reloc_kernel != 0) && (ttphysize >= end_loaded))
251 kbase = ttphystart; 251 kbase = ttphystart;
252 else 252 else
253 kbase = 0; 253 kbase = 0;
254 254
255 /* 255 /*
256 * Determine the type of machine we are running on. This needs 256 * Determine the type of machine we are running on. This needs
257 * to be done early (and before initcpu())! 257 * to be done early (and before initcpu())!
258 */ 258 */
259 set_machtype(); 259 set_machtype();
260 260
261 /* 261 /*
262 * Initialize CPU specific stuff 262 * Initialize CPU specific stuff
263 */ 263 */
264 initcpu(); 264 initcpu();
265 265
266 /* 266 /*
267 * We run the kernel from ST memory at the moment. 267 * We run the kernel from ST memory at the moment.
268 * The kernel segment table is put just behind the loaded image. 268 * The kernel segment table is put just behind the loaded image.
269 * pstart: start of usable ST memory 269 * pstart: start of usable ST memory
270 * avail : size of ST memory available. 270 * avail : size of ST memory available.
271 */ 271 */
272 vstart = (vaddr_t)end_loaded; 272 vstart = (vaddr_t)end_loaded;
273 vstart = m68k_round_page(vstart); 273 vstart = m68k_round_page(vstart);
274 pstart = (paddr_t)vstart; /* pre-reloc PA == kernel VA here */ 274 pstart = (paddr_t)vstart; /* pre-reloc PA == kernel VA here */
275 avail = stphysize - pstart; 275 avail = stphysize - pstart;
276 276
277 /* 277 /*
278 * Save KVA of proc0 user-area and allocate it 278 * Save KVA of proc0 user-area and allocate it
279 */ 279 */
280 proc0paddr = vstart; 280 proc0paddr = vstart;
281 pstart += USPACE; 281 pstart += USPACE;
282 vstart += USPACE; 282 vstart += USPACE;
283 avail -= USPACE; 283 avail -= USPACE;
284 284
285 /* 285 /*
286 * Calculate the number of pages needed for Sysseg. 286 * Calculate the number of pages needed for Sysseg.
287 * For the 68030, we need 256 descriptors (segment-table-entries). 287 * For the 68030, we need 256 descriptors (segment-table-entries).
288 * This easily fits into one page. 288 * This easily fits into one page.
289 * For the 68040, both the level-1 and level-2 descriptors are 289 * For the 68040, both the level-1 and level-2 descriptors are
290 * stored into Sysseg. We currently handle a maximum sum of MAXKL2SIZE 290 * stored into Sysseg. We currently handle a maximum sum of MAXKL2SIZE
291 * level-1 & level-2 tables. 291 * level-1 & level-2 tables.
292 */ 292 */
293#if defined(M68040) || defined(M68060) 293#if defined(M68040) || defined(M68060)
294 if (mmutype == MMU_68040) 294 if (mmutype == MMU_68040)
295 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); 295 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE);
296 else 296 else
297#endif 297#endif
298 kstsize = 1; 298 kstsize = 1;
299 /* 299 /*
300 * allocate the kernel segment table 300 * allocate the kernel segment table
301 */ 301 */
302 Sysseg_pa = pstart; /* pre-reloc PA to init STEs */ 302 Sysseg_pa = pstart; /* pre-reloc PA to init STEs */
303 Sysseg = (st_entry_t *)vstart; 303 Sysseg = (st_entry_t *)vstart;
304 pstart += kstsize * PAGE_SIZE; 304 pstart += kstsize * PAGE_SIZE;
305 vstart += kstsize * PAGE_SIZE; 305 vstart += kstsize * PAGE_SIZE;
306 avail -= kstsize * PAGE_SIZE; 306 avail -= kstsize * PAGE_SIZE;
307 307
308 /* 308 /*
309 * allocate kernel page table map 309 * allocate kernel page table map
310 */ 310 */
311 Sysptmap_pa = pstart; /* pre-reloc PA to init PTEs */ 311 Sysptmap_pa = pstart; /* pre-reloc PA to init PTEs */
312 Sysptmap = (pt_entry_t *)vstart; 312 Sysptmap = (pt_entry_t *)vstart;
313 pstart += PAGE_SIZE; 313 pstart += PAGE_SIZE;
314 vstart += PAGE_SIZE; 314 vstart += PAGE_SIZE;
315 avail -= PAGE_SIZE; 315 avail -= PAGE_SIZE;
316 316
317 /* 317 /*
318 * Determine the number of pte's we need for extra's like 318 * Determine the number of pte's we need for extra's like
319 * ST I/O map's. 319 * ST I/O map's.
320 */ 320 */
321 ptextra = btoc(STIO_SIZE); 321 ptextra = btoc(STIO_SIZE);
322 322
323 /* 323 /*
324 * If present, add pci areas 324 * If present, add pci areas
325 */ 325 */
326 if (machineid & ATARI_HADES) 326 if (machineid & ATARI_HADES)
327 ptextra += btoc(PCI_CONF_SIZE + PCI_IO_SIZE + PCI_MEM_SIZE); 327 ptextra += btoc(PCI_CONF_SIZE + PCI_IO_SIZE + PCI_MEM_SIZE);
328 if (machineid & ATARI_MILAN) 328 if (machineid & ATARI_MILAN)
329 ptextra += btoc(PCI_IO_SIZE + PCI_MEM_SIZE); 329 ptextra += btoc(PCI_IO_SIZE + PCI_MEM_SIZE);
330 ptextra += btoc(BOOTM_VA_POOL); 330 ptextra += btoc(BOOTM_VA_POOL);
331 331
332 /* 332 /*
333 * The 'pt' (the initial kernel pagetable) has to map the kernel and 333 * The 'pt' (the initial kernel pagetable) has to map the kernel and
334 * the I/O areas. The various I/O areas are mapped (virtually) at 334 * the I/O areas. The various I/O areas are mapped (virtually) at
335 * the top of the address space mapped by 'pt' (ie. just below Sysmap). 335 * the top of the address space mapped by 'pt' (ie. just below Sysmap).
336 */ 336 */
337 ptpa = pstart; /* pre-reloc PA to init PTEs */ 337 ptpa = pstart; /* pre-reloc PA to init PTEs */
338 ptsize = (Sysptsize + howmany(ptextra, NPTEPG)) << PGSHIFT; 338 ptsize = (Sysptsize + howmany(ptextra, NPTEPG)) << PGSHIFT;
339 pstart += ptsize; 339 pstart += ptsize;
340 vstart += ptsize; 340 vstart += ptsize;
341 avail -= ptsize; 341 avail -= ptsize;
342 342
343 /* 343 /*
344 * Sysmap is now placed at the end of Supervisor virtual address space. 344 * Sysmap is now placed at the end of Supervisor virtual address space.
345 */ 345 */
346 Sysmap = (pt_entry_t *)-(NPTEPG * PAGE_SIZE); 346 Sysmap = (pt_entry_t *)-(NPTEPG * PAGE_SIZE);
347 347
348 /* 348 /*
349 * Initialize segment tables 349 * Initialize segment tables
350 */ 350 */
351#if defined(M68040) || defined(M68060) 351#if defined(M68040) || defined(M68060)
352 if (mmutype == MMU_68040) 352 if (mmutype == MMU_68040)
353 mmu040_setup(Sysseg_pa, kstsize, ptpa, ptsize, Sysptmap_pa, 353 mmu040_setup(Sysseg_pa, kstsize, ptpa, ptsize, Sysptmap_pa,
354 kbase); 354 kbase);
355 else 355 else
356#endif /* defined(M68040) || defined(M68060) */ 356#endif /* defined(M68040) || defined(M68060) */
357 mmu030_setup(Sysseg_pa, kstsize, ptpa, ptsize, Sysptmap_pa, 357 mmu030_setup(Sysseg_pa, kstsize, ptpa, ptsize, Sysptmap_pa,
358 kbase); 358 kbase);
359 359
360 /* 360 /*
361 * initialize kernel page table page(s). 361 * initialize kernel page table page(s).
362 * Assume load at VA 0. 362 * Assume load at VA 0.
363 * - Text pages are RO 363 * - Text pages are RO
364 * - Page zero is invalid 364 * - Page zero is invalid
365 */ 365 */
366 pg_proto = (0 + kbase) /* relocated PA */ | PG_RO | PG_V; 366 pg_proto = (0 + kbase) /* relocated PA */ | PG_RO | PG_V;
367 pg = (pt_entry_t *)ptpa; 367 pg = (pt_entry_t *)ptpa;
368 *pg++ = PG_NV; 368 *pg++ = PG_NV;
369 369
370 pg_proto += PAGE_SIZE; 370 pg_proto += PAGE_SIZE;
371 for (kva = PAGE_SIZE; kva < (vaddr_t)etext; kva += PAGE_SIZE) { 371 for (kva = PAGE_SIZE; kva < (vaddr_t)etext; kva += PAGE_SIZE) {
372 *pg++ = pg_proto; 372 *pg++ = pg_proto;
373 pg_proto += PAGE_SIZE; 373 pg_proto += PAGE_SIZE;
374 } 374 }
375 375
376 /*  376 /*
377 * data, bss and dynamic tables are read/write 377 * data, bss and dynamic tables are read/write
378 */ 378 */
379 pg_proto = (pg_proto & PG_FRAME) | PG_RW | PG_V; 379 pg_proto = (pg_proto & PG_FRAME) | PG_RW | PG_V;
380 380
381#if defined(M68040) || defined(M68060) 381#if defined(M68040) || defined(M68060)
382 /* 382 /*
383 * Map the kernel segment table cache invalidated for  383 * Map the kernel segment table cache invalidated for
384 * these machines (for the 68040 not strictly necessary, but 384 * these machines (for the 68040 not strictly necessary, but
385 * recommended by Motorola; for the 68060 mandatory) 385 * recommended by Motorola; for the 68060 mandatory)
386 */ 386 */
387 if (mmutype == MMU_68040) { 387 if (mmutype == MMU_68040) {
388 388
389 if (kernel_copyback) 389 if (kernel_copyback)
390 pg_proto |= PG_CCB; 390 pg_proto |= PG_CCB;
391 391
392 for (; kva < (vaddr_t)Sysseg; kva += PAGE_SIZE) { 392 for (; kva < (vaddr_t)Sysseg; kva += PAGE_SIZE) {
393 *pg++ = pg_proto; 393 *pg++ = pg_proto;
394 pg_proto += PAGE_SIZE; 394 pg_proto += PAGE_SIZE;
395 } 395 }
396 396
397 pg_proto = (pg_proto & ~PG_CCB) | PG_CI; 397 pg_proto = (pg_proto & ~PG_CCB) | PG_CI;
398 for (; kva < (vaddr_t)Sysptmap; kva += PAGE_SIZE) { 398 for (; kva < (vaddr_t)Sysptmap; kva += PAGE_SIZE) {
399 *pg++ = pg_proto; 399 *pg++ = pg_proto;
400 pg_proto += PAGE_SIZE; 400 pg_proto += PAGE_SIZE;
401 } 401 }
402 402
403 pg_proto = (pg_proto & ~PG_CI); 403 pg_proto = (pg_proto & ~PG_CI);
404 if (kernel_copyback) 404 if (kernel_copyback)
405 pg_proto |= PG_CCB; 405 pg_proto |= PG_CCB;
406 } 406 }
407#endif /* defined(M68040) || defined(M68060) */ 407#endif /* defined(M68040) || defined(M68060) */
408 408
409 /* 409 /*
410 * go till end of data allocated so far 410 * go till end of data allocated so far
411 * plus proc0 u-area (to be allocated) 411 * plus proc0 u-area (to be allocated)
412 */ 412 */
413 for (; kva < vstart; kva += PAGE_SIZE) { 413 for (; kva < vstart; kva += PAGE_SIZE) {
414 *pg++ = pg_proto; 414 *pg++ = pg_proto;
415 pg_proto += PAGE_SIZE; 415 pg_proto += PAGE_SIZE;
416 } 416 }
417 417
418 /* 418 /*
419 * invalidate remainder of kernel PT 419 * invalidate remainder of kernel PT
420 */ 420 */
421 epg = (pt_entry_t *)ptpa; 421 epg = (pt_entry_t *)ptpa;
422 epg = &epg[ptsize / sizeof(pt_entry_t)]; 422 epg = &epg[ptsize / sizeof(pt_entry_t)];
423 while (pg < epg) 423 while (pg < epg)
424 *pg++ = PG_NV; 424 *pg++ = PG_NV;
425 425
426 /* 426 /*
427 * Map various I/O areas 427 * Map various I/O areas
428 */ 428 */
429 map_io_areas(ptpa, ptsize, ptextra); 429 map_io_areas(ptpa, ptsize, ptextra);
430 430
431 /* 431 /*
432 * Map the allocated space in ST-ram now. In the contig-case, there 432 * Map the allocated space in ST-ram now. In the contig-case, there
433 * is no need to make a distinction between virtual and physical 433 * is no need to make a distinction between virtual and physical
434 * addresses. But I make it anyway to be prepared. 434 * addresses. But I make it anyway to be prepared.
435 * Physcal space is already reserved! 435 * Physcal space is already reserved!
436 */ 436 */
437 st_pool_virt = vstart; 437 st_pool_virt = vstart;
438 pg = (pt_entry_t *)ptpa; 438 pg = (pt_entry_t *)ptpa;
439 pg = &pg[vstart / PAGE_SIZE]; 439 pg = &pg[vstart / PAGE_SIZE];
440 pg_proto = st_pool_phys | PG_RW | PG_CI | PG_V; 440 pg_proto = st_pool_phys | PG_RW | PG_CI | PG_V;
441 vstart += st_pool_size; 441 vstart += st_pool_size;
442 while(pg_proto < (st_pool_phys + st_pool_size)) { 442 while(pg_proto < (st_pool_phys + st_pool_size)) {
443 *pg++ = pg_proto; 443 *pg++ = pg_proto;
444 pg_proto += PAGE_SIZE; 444 pg_proto += PAGE_SIZE;
445 } 445 }
446 446
447 /* 447 /*
448 * Map physical page_zero and page-zero+1 (First ST-ram page). We need 448 * Map physical page_zero and page-zero+1 (First ST-ram page). We need
449 * to reference it in the reboot code. Two pages are mapped, because 449 * to reference it in the reboot code. Two pages are mapped, because
450 * we must make sure 'doboot()' is contained in it (see the tricky 450 * we must make sure 'doboot()' is contained in it (see the tricky
451 * copying there....). 451 * copying there....).
452 */ 452 */
453 page_zero = vstart; 453 page_zero = vstart;
454 pg = (pt_entry_t *)ptpa; 454 pg = (pt_entry_t *)ptpa;
455 pg = &pg[vstart / PAGE_SIZE]; 455 pg = &pg[vstart / PAGE_SIZE];
456 *pg++ = PG_RW | PG_CI | PG_V; 456 *pg++ = PG_RW | PG_CI | PG_V;
457 vstart += PAGE_SIZE; 457 vstart += PAGE_SIZE;
458 *pg = PG_RW | PG_CI | PG_V | PAGE_SIZE; 458 *pg = PG_RW | PG_CI | PG_V | PAGE_SIZE;
459 vstart += PAGE_SIZE; 459 vstart += PAGE_SIZE;
460 460
461 /* 461 /*
462 * All necessary STEs and PTEs have been initialized. 462 * All necessary STEs and PTEs have been initialized.
463 * Update Sysseg_pa and Sysptmap_pa to point relocated PA. 463 * Update Sysseg_pa and Sysptmap_pa to point relocated PA.
464 */ 464 */
465 if (kbase) { 465 if (kbase) {
466 Sysseg_pa += kbase; 466 Sysseg_pa += kbase;
467 Sysptmap_pa += kbase; 467 Sysptmap_pa += kbase;
468 } 468 }
469 469
470 lowram = 0 >> PGSHIFT; /* XXX */ 470 lowram = 0 >> PGSHIFT; /* XXX */
471 471
472 /* 472 /*
473 * Fill in usable segments. The page indexes will be initialized 473 * Fill in usable segments. The page indexes will be initialized
474 * later when all reservations are made. 474 * later when all reservations are made.
475 */ 475 */
476 usable_segs[0].start = 0; 476 usable_segs[0].start = 0;
477 usable_segs[0].end = stphysize; 477 usable_segs[0].end = stphysize;
478 usable_segs[1].start = ttphystart; 478 usable_segs[1].start = ttphystart;
479 usable_segs[1].end = ttphystart + ttphysize; 479 usable_segs[1].end = ttphystart + ttphysize;
480 usable_segs[2].start = usable_segs[2].end = 0; /* End of segments! */ 480 usable_segs[2].start = usable_segs[2].end = 0; /* End of segments! */
481 481
482 if(kbase) { 482 if(kbase) {
483 /* 483 /*
484 * First page of ST-ram is unusable, reserve the space 484 * First page of ST-ram is unusable, reserve the space
485 * for the kernel in the TT-ram segment. 485 * for the kernel in the TT-ram segment.
486 * Note: Because physical page-zero is partially mapped to ROM 486 * Note: Because physical page-zero is partially mapped to ROM
487 * by hardware, it is unusable. 487 * by hardware, it is unusable.
488 */ 488 */
489 usable_segs[0].start = PAGE_SIZE; 489 usable_segs[0].start = PAGE_SIZE;
490 usable_segs[1].start += pstart; 490 usable_segs[1].start += pstart;
491 } 491 }
492 else 492 else
493 usable_segs[0].start += pstart; 493 usable_segs[0].start += pstart;
494 494
495 /* 495 /*
496 * As all segment sizes are now valid, calculate page indexes and 496 * As all segment sizes are now valid, calculate page indexes and
497 * available physical memory. 497 * available physical memory.
498 */ 498 */
499 usable_segs[0].first_page = 0; 499 usable_segs[0].first_page = 0;
500 for (i = 1; usable_segs[i].start; i++) { 500 for (i = 1; usable_segs[i].start; i++) {
501 usable_segs[i].first_page = usable_segs[i-1].first_page; 501 usable_segs[i].first_page = usable_segs[i-1].first_page;
502 usable_segs[i].first_page += 502 usable_segs[i].first_page +=
503 (usable_segs[i-1].end - usable_segs[i-1].start) / PAGE_SIZE; 503 (usable_segs[i-1].end - usable_segs[i-1].start) / PAGE_SIZE;
504 } 504 }
505 for (i = 0, physmem = 0; usable_segs[i].start; i++) 505 for (i = 0, physmem = 0; usable_segs[i].start; i++)
506 physmem += usable_segs[i].end - usable_segs[i].start; 506 physmem += usable_segs[i].end - usable_segs[i].start;
507 physmem >>= PGSHIFT; 507 physmem >>= PGSHIFT;
508 508
509 /* 509 /*
510 * get the pmap module in sync with reality. 510 * get the pmap module in sync with reality.
511 */ 511 */
512 pmap_bootstrap(vstart, Sysseg_pa); 512 pmap_bootstrap(vstart, Sysseg_pa);
513 513
514 /* 514 /*
515 * Prepare to enable the MMU. 515 * Prepare to enable the MMU.
516 * Setup and load SRP nolimit, share global, 4 byte PTE's 516 * Setup and load SRP nolimit, share global, 4 byte PTE's
517 */ 517 */
518 protorp[0] = 0x80000202; 518 protorp[0] = 0x80000202;
519 protorp[1] = Sysseg_pa; /* + segtable address */ 519 protorp[1] = Sysseg_pa; /* + segtable address */
520 520
521 cpu_init_kcorehdr(kbase, Sysseg_pa); 521 cpu_init_kcorehdr(kbase, Sysseg_pa);
522 522
523 /* 523 /*
524 * copy over the kernel (and all now initialized variables)  524 * copy over the kernel (and all now initialized variables)
525 * to fastram. DONT use bcopy(), this beast is much larger  525 * to fastram. DONT use bcopy(), this beast is much larger
526 * than 128k ! 526 * than 128k !
527 */ 527 */
528 if(kbase) { 528 if(kbase) {
529 register paddr_t *lp, *le, *fp; 529 register paddr_t *lp, *le, *fp;
530 530
531 lp = (paddr_t *)0; 531 lp = (paddr_t *)0;
532 le = (paddr_t *)pstart; 532 le = (paddr_t *)pstart;
533 fp = (paddr_t *)kbase; 533 fp = (paddr_t *)kbase;
534 while(lp < le) 534 while(lp < le)
535 *fp++ = *lp++; 535 *fp++ = *lp++;
536 } 536 }
537#if defined(M68040) || defined(M68060) 537#if defined(M68040) || defined(M68060)
538 if (mmutype == MMU_68040) { 538 if (mmutype == MMU_68040) {
539 /* 539 /*
540 * movel Sysseg_pa,a0; 540 * movel Sysseg_pa,a0;
541 * movec a0,SRP; 541 * movec a0,SRP;
542 * pflusha; 542 * pflusha;
543 * movel #$0xc000,d0; 543 * movel #$0xc000,d0;
544 * movec d0,TC 544 * movec d0,TC
545 */ 545 */
546 if (cputype == CPU_68060) { 546 if (cputype == CPU_68060) {
547 /* XXX: Need the branch cache be cleared? */ 547 /* XXX: Need the branch cache be cleared? */
548 __asm volatile (".word 0x4e7a,0x0002;"  548 __asm volatile (".word 0x4e7a,0x0002;"
549 "orl #0x400000,%%d0;"  549 "orl #0x400000,%%d0;"
550 ".word 0x4e7b,0x0002" : : : "d0"); 550 ".word 0x4e7b,0x0002" : : : "d0");
551 } 551 }
552 __asm volatile ("movel %0,%%a0;" 552 __asm volatile ("movel %0,%%a0;"
553 ".word 0x4e7b,0x8807" : : "a" (Sysseg_pa) : "a0"); 553 ".word 0x4e7b,0x8807" : : "a" (Sysseg_pa) : "a0");
554 __asm volatile (".word 0xf518" : : ); 554 __asm volatile (".word 0xf518" : : );
555 __asm volatile ("movel #0xc000,%%d0;" 555 __asm volatile ("movel #0xc000,%%d0;"
556 ".word 0x4e7b,0x0003" : : : "d0" ); 556 ".word 0x4e7b,0x0003" : : : "d0" );
557 } else 557 } else
558#endif 558#endif
559 { 559 {
560 __asm volatile ("pmove %0@,%%srp" : : "a" (&protorp[0])); 560 __asm volatile ("pmove %0@,%%srp" : : "a" (&protorp[0]));
561 /* 561 /*
562 * setup and load TC register. 562 * setup and load TC register.
563 * enable_cpr, enable_srp, pagesize=8k, 563 * enable_cpr, enable_srp, pagesize=8k,
564 * A = 8 bits, B = 11 bits 564 * A = 8 bits, B = 11 bits
565 */ 565 */
566 tc = 0x82d08b00; 566 tc = 0x82d08b00;
567 __asm volatile ("pmove %0@,%%tc" : : "a" (&tc)); 567 __asm volatile ("pmove %0@,%%tc" : : "a" (&tc));
568 } 568 }
569 569
570 /* Is this to fool the optimizer?? */ 570 /* Is this to fool the optimizer?? */
571 i = *(int *)proc0paddr; 571 i = *(int *)proc0paddr;
572 *(volatile int *)proc0paddr = i; 572 *(volatile int *)proc0paddr = i;
573 573
574 /* 574 /*
575 * Initialize the "u-area" pages. 575 * Initialize the "u-area" pages.
576 * Must initialize p_addr before autoconfig or the 576 * Must initialize p_addr before autoconfig or the
577 * fault handler will get a NULL reference. 577 * fault handler will get a NULL reference.
578 */ 578 */
579 bzero((u_char *)proc0paddr, USPACE); 579 bzero((u_char *)proc0paddr, USPACE);
580 lwp0.l_addr = (struct user *)proc0paddr; 580 lwp0.l_addr = (struct user *)proc0paddr;
581 curlwp = &lwp0; 581 curlwp = &lwp0;
582 curpcb = &((struct user *)proc0paddr)->u_pcb; 582 curpcb = &((struct user *)proc0paddr)->u_pcb;
583 583
584 /* 584 /*
585 * Get the hardware into a defined state 585 * Get the hardware into a defined state
586 */ 586 */
587 atari_hwinit(); 587 atari_hwinit();
588 588
589 /* 589 /*
590 * Initialize stmem allocator 590 * Initialize stmem allocator
591 */ 591 */
592 init_stmem(); 592 init_stmem();
593 593
594 /* 594 /*
595 * Initialize the I/O mem extent map. 595 * Initialize the I/O mem extent map.
596 * Note: we don't have to check the return value since 596 * Note: we don't have to check the return value since
597 * creation of a fixed extent map will never fail (since 597 * creation of a fixed extent map will never fail (since
598 * descriptor storage has already been allocated). 598 * descriptor storage has already been allocated).
599 * 599 *
600 * N.B. The iomem extent manages _all_ physical addresses 600 * N.B. The iomem extent manages _all_ physical addresses
601 * on the machine. When the amount of RAM is found, all 601 * on the machine. When the amount of RAM is found, all
602 * extents of RAM are allocated from the map. 602 * extents of RAM are allocated from the map.
603 */ 603 */
604 iomem_ex = extent_create("iomem", 0x0, 0xffffffff, M_DEVBUF, 604 iomem_ex = extent_create("iomem", 0x0, 0xffffffff, M_DEVBUF,
605 (void *)iomem_ex_storage, sizeof(iomem_ex_storage), 605 (void *)iomem_ex_storage, sizeof(iomem_ex_storage),
606 EX_NOCOALESCE|EX_NOWAIT); 606 EX_NOCOALESCE|EX_NOWAIT);
607 607
608 /* 608 /*
609 * Allocate the physical RAM from the extent map 609 * Allocate the physical RAM from the extent map
610 */ 610 */
611 for (i = 0; boot_segs[i].end != 0; i++) { 611 for (i = 0; boot_segs[i].end != 0; i++) {
612 if (extent_alloc_region(iomem_ex, boot_segs[i].start, 612 if (extent_alloc_region(iomem_ex, boot_segs[i].start,
613 boot_segs[i].end - boot_segs[i].start, EX_NOWAIT)) { 613 boot_segs[i].end - boot_segs[i].start, EX_NOWAIT)) {
614 /* XXX: Ahum, should not happen ;-) */ 614 /* XXX: Ahum, should not happen ;-) */
615 printf("Warning: Cannot allocate boot memory from" 615 printf("Warning: Cannot allocate boot memory from"
616 " extent map!?\n"); 616 " extent map!?\n");
617 } 617 }
618 } 618 }
619 619
620 /* 620 /*
621 * Initialize interrupt mapping. 621 * Initialize interrupt mapping.
622 */ 622 */
623 intr_init(); 623 intr_init();
624} 624}
625 625
626/* 626/*
627 * Try to figure out on what type of machine we are running 627 * Try to figure out on what type of machine we are running
628 * Note: This module runs *before* the io-mapping is setup! 628 * Note: This module runs *before* the io-mapping is setup!
629 */ 629 */
630static void 630static void
631set_machtype() 631set_machtype()
632{ 632{
633#ifdef _MILANHW_ 633#ifdef _MILANHW_
634 machineid |= ATARI_MILAN; 634 machineid |= ATARI_MILAN;
635 635
636#else 636#else
637 stio_addr = 0xff8000; /* XXX: For TT & Falcon only */ 637 stio_addr = 0xff8000; /* XXX: For TT & Falcon only */
638 if(badbaddr((void *)__UNVOLATILE(&MFP2->mf_gpip), sizeof(char))) { 638 if(badbaddr((void *)__UNVOLATILE(&MFP2->mf_gpip), sizeof(char))) {
639 /* 639 /*
640 * Watch out! We can also have a Hades with < 16Mb 640 * Watch out! We can also have a Hades with < 16Mb
641 * RAM here... 641 * RAM here...
642 */ 642 */
643 if(!badbaddr((void *)__UNVOLATILE(&MFP->mf_gpip), 643 if(!badbaddr((void *)__UNVOLATILE(&MFP->mf_gpip),
644 sizeof(char))) { 644 sizeof(char))) {
645 machineid |= ATARI_FALCON; 645 machineid |= ATARI_FALCON;
646 return; 646 return;
647 } 647 }
648 } 648 }
649 if(!badbaddr((void *)(PCI_CONFB_PHYS + PCI_CONFM_PHYS), sizeof(char))) 649 if(!badbaddr((void *)(PCI_CONFB_PHYS + PCI_CONFM_PHYS), sizeof(char)))
650 machineid |= ATARI_HADES; 650 machineid |= ATARI_HADES;
651 else machineid |= ATARI_TT; 651 else machineid |= ATARI_TT;
652#endif /* _MILANHW_ */ 652#endif /* _MILANHW_ */
653} 653}
654 654
655static void 655static void
656atari_hwinit() 656atari_hwinit()
657{ 657{
658#if defined(_ATARIHW_) 658#if defined(_ATARIHW_)
659 /* 659 /*
660 * Initialize the sound chip 660 * Initialize the sound chip
661 */ 661 */
662 ym2149_init(); 662 ym2149_init();
663 663
664 /* 664 /*
665 * Make sure that the midi acia will not generate an interrupt 665 * Make sure that the midi acia will not generate an interrupt
666 * unless something attaches to it. We cannot do this for the 666 * unless something attaches to it. We cannot do this for the
667 * keyboard acia because this breaks the '-d' option of the 667 * keyboard acia because this breaks the '-d' option of the
668 * booter... 668 * booter...
669 */ 669 */
670 MDI->ac_cs = 0; 670 MDI->ac_cs = 0;
671#endif /* defined(_ATARIHW_) */ 671#endif /* defined(_ATARIHW_) */
672 672
673 /* 673 /*
674 * Initialize both MFP chips (if both present!) to generate 674 * Initialize both MFP chips (if both present!) to generate
675 * auto-vectored interrupts with EOI. The active-edge registers are 675 * auto-vectored interrupts with EOI. The active-edge registers are
676 * set up. The interrupt enable registers are set to disable all 676 * set up. The interrupt enable registers are set to disable all
677 * interrupts. 677 * interrupts.
678 */ 678 */
679 MFP->mf_iera = MFP->mf_ierb = 0; 679 MFP->mf_iera = MFP->mf_ierb = 0;
680 MFP->mf_imra = MFP->mf_imrb = 0; 680 MFP->mf_imra = MFP->mf_imrb = 0;
681 MFP->mf_aer = MFP->mf_ddr = 0; 681 MFP->mf_aer = MFP->mf_ddr = 0;
682 MFP->mf_vr = 0x40; 682 MFP->mf_vr = 0x40;
683 683
684#if defined(_ATARIHW_) 684#if defined(_ATARIHW_)
685 if(machineid & (ATARI_TT|ATARI_HADES)) { 685 if(machineid & (ATARI_TT|ATARI_HADES)) {
686 MFP2->mf_iera = MFP2->mf_ierb = 0; 686 MFP2->mf_iera = MFP2->mf_ierb = 0;
687 MFP2->mf_imra = MFP2->mf_imrb = 0; 687 MFP2->mf_imra = MFP2->mf_imrb = 0;
688 MFP2->mf_aer = 0x80; 688 MFP2->mf_aer = 0x80;
689 MFP2->mf_vr = 0x50; 689 MFP2->mf_vr = 0x50;
690 } 690 }
691 691
692 if(machineid & ATARI_TT) { 692 if(machineid & ATARI_TT) {
693 /* 693 /*
694 * Initialize the SCU, to enable interrupts on the SCC (ipl5), 694 * Initialize the SCU, to enable interrupts on the SCC (ipl5),
695 * MFP (ipl6) and softints (ipl1). 695 * MFP (ipl6) and softints (ipl1).
696 */ 696 */
697 SCU->sys_mask = SCU_SYS_SOFT; 697 SCU->sys_mask = SCU_SYS_SOFT;
698 SCU->vme_mask = SCU_MFP | SCU_SCC; 698 SCU->vme_mask = SCU_MFP | SCU_SCC;
699#ifdef DDB 699#ifdef DDB
700 /* 700 /*
701 * This allows people with the correct hardware modification 701 * This allows people with the correct hardware modification
702 * to drop into the debugger from an NMI. 702 * to drop into the debugger from an NMI.
703 */ 703 */
704 SCU->sys_mask |= SCU_IRQ7; 704 SCU->sys_mask |= SCU_IRQ7;
705#endif 705#endif
706 } 706 }
707#endif /* defined(_ATARIHW_) */ 707#endif /* defined(_ATARIHW_) */
708 708
709#if NPCI > 0 709#if NPCI > 0
710 if(machineid & (ATARI_HADES|ATARI_MILAN)) { 710 if(machineid & (ATARI_HADES|ATARI_MILAN)) {
711 /* 711 /*
712 * Configure PCI-bus 712 * Configure PCI-bus
713 */ 713 */
714 init_pci_bus(); 714 init_pci_bus();
715 } 715 }
716#endif 716#endif
717 717
718} 718}
719 719
720/* 720/*
721 * Do the dull work of mapping the various I/O areas. They MUST be Cache 721 * Do the dull work of mapping the various I/O areas. They MUST be Cache
722 * inhibited! 722 * inhibited!
723 * All I/O areas are virtually mapped at the end of the pt-table. 723 * All I/O areas are virtually mapped at the end of the pt-table.
724 */ 724 */
725static void 725static void
726map_io_areas(ptpa, ptsize, ptextra) 726map_io_areas(ptpa, ptsize, ptextra)
727 paddr_t ptpa; 727 paddr_t ptpa;
728 psize_t ptsize; /* Size of 'pt' in bytes */ 728 psize_t ptsize; /* Size of 'pt' in bytes */
729 u_int ptextra; /* #of additional I/O pte's */ 729 u_int ptextra; /* #of additional I/O pte's */
730{ 730{
731 extern void bootm_init __P((vaddr_t, pt_entry_t *, u_long)); 731 extern void bootm_init __P((vaddr_t, pt_entry_t *, u_long));
732 vaddr_t ioaddr; 732 vaddr_t ioaddr;
733 pt_entry_t *pt, *pg, *epg; 733 pt_entry_t *pt, *pg, *epg;
734 pt_entry_t pg_proto; 734 pt_entry_t pg_proto;
735 u_long mask; 735 u_long mask;
736 736
737 pt = (pt_entry_t *)ptpa; 737 pt = (pt_entry_t *)ptpa;
738 ioaddr = ((ptsize / sizeof(pt_entry_t)) - ptextra) * PAGE_SIZE; 738 ioaddr = ((ptsize / sizeof(pt_entry_t)) - ptextra) * PAGE_SIZE;
739 739
740 /* 740 /*
741 * Map ST-IO area 741 * Map ST-IO area
742 */ 742 */
743 stio_addr = ioaddr; 743 stio_addr = ioaddr;
744 ioaddr += STIO_SIZE; 744 ioaddr += STIO_SIZE;
745 pg = &pt[stio_addr / PAGE_SIZE]; 745 pg = &pt[stio_addr / PAGE_SIZE];
746 epg = &pg[btoc(STIO_SIZE)]; 746 epg = &pg[btoc(STIO_SIZE)];
747#ifdef _MILANHW_ 747#ifdef _MILANHW_
748 /* 748 /*
749 * Turn on byte swaps in the ST I/O area. On the Milan, the 749 * Turn on byte swaps in the ST I/O area. On the Milan, the
750 * U0 signal of the MMU controls the BigEndian signal 750 * U0 signal of the MMU controls the BigEndian signal
751 * of the PLX9080. We use this setting so we can read/write the 751 * of the PLX9080. We use this setting so we can read/write the
752 * PLX registers (and PCI-config space) in big-endian mode. 752 * PLX registers (and PCI-config space) in big-endian mode.
753 */ 753 */
754 pg_proto = STIO_PHYS | PG_RW | PG_CI | PG_V | 0x100; 754 pg_proto = STIO_PHYS | PG_RW | PG_CI | PG_V | 0x100;
755#else 755#else
756 pg_proto = STIO_PHYS | PG_RW | PG_CI | PG_V; 756 pg_proto = STIO_PHYS | PG_RW | PG_CI | PG_V;
757#endif 757#endif
758 while(pg < epg) { 758 while(pg < epg) {
759 *pg++ = pg_proto; 759 *pg++ = pg_proto;
760 pg_proto += PAGE_SIZE; 760 pg_proto += PAGE_SIZE;
761 } 761 }
762 762
763 /* 763 /*
764 * Map PCI areas 764 * Map PCI areas
765 */ 765 */
766 if (machineid & ATARI_HADES) { 766 if (machineid & ATARI_HADES) {
767 /* 767 /*
768 * Only Hades maps the PCI-config space! 768 * Only Hades maps the PCI-config space!
769 */ 769 */
770 pci_conf_addr = ioaddr; 770 pci_conf_addr = ioaddr;
771 ioaddr += PCI_CONF_SIZE; 771 ioaddr += PCI_CONF_SIZE;
772 pg = &pt[pci_conf_addr / PAGE_SIZE]; 772 pg = &pt[pci_conf_addr / PAGE_SIZE];
773 epg = &pg[btoc(PCI_CONF_SIZE)]; 773 epg = &pg[btoc(PCI_CONF_SIZE)];
774 mask = PCI_CONFM_PHYS; 774 mask = PCI_CONFM_PHYS;
775 pg_proto = PCI_CONFB_PHYS | PG_RW | PG_CI | PG_V; 775 pg_proto = PCI_CONFB_PHYS | PG_RW | PG_CI | PG_V;
776 for(; pg < epg; mask <<= 1) 776 for(; pg < epg; mask <<= 1)
777 *pg++ = pg_proto | mask; 777 *pg++ = pg_proto | mask;
778 } 778 }
779 else pci_conf_addr = 0; /* XXX: should crash */ 779 else pci_conf_addr = 0; /* XXX: should crash */
780 780
781 if (machineid & (ATARI_HADES|ATARI_MILAN)) { 781 if (machineid & (ATARI_HADES|ATARI_MILAN)) {
782 pci_io_addr = ioaddr; 782 pci_io_addr = ioaddr;
783 ioaddr += PCI_IO_SIZE; 783 ioaddr += PCI_IO_SIZE;
784 pg = &pt[pci_io_addr / PAGE_SIZE]; 784 pg = &pt[pci_io_addr / PAGE_SIZE];
785 epg = &pg[btoc(PCI_IO_SIZE)]; 785 epg = &pg[btoc(PCI_IO_SIZE)];
786 pg_proto = PCI_IO_PHYS | PG_RW | PG_CI | PG_V; 786 pg_proto = PCI_IO_PHYS | PG_RW | PG_CI | PG_V;
787 while(pg < epg) { 787 while(pg < epg) {
788 *pg++ = pg_proto; 788 *pg++ = pg_proto;
789 pg_proto += PAGE_SIZE; 789 pg_proto += PAGE_SIZE;
790 } 790 }
791 791
792 pci_mem_addr = ioaddr; 792 pci_mem_addr = ioaddr;
793 /* Provide an uncached PCI address for the MILAN */ 793 /* Provide an uncached PCI address for the MILAN */
794 pci_mem_uncached = ioaddr; 794 pci_mem_uncached = ioaddr;
795 ioaddr += PCI_MEM_SIZE; 795 ioaddr += PCI_MEM_SIZE;
796 epg = &pg[btoc(PCI_MEM_SIZE)]; 796 epg = &pg[btoc(PCI_MEM_SIZE)];
797 pg_proto = PCI_VGA_PHYS | PG_RW | PG_CI | PG_V; 797 pg_proto = PCI_VGA_PHYS | PG_RW | PG_CI | PG_V;
798 while(pg < epg) { 798 while(pg < epg) {
799 *pg++ = pg_proto; 799 *pg++ = pg_proto;
800 pg_proto += PAGE_SIZE; 800 pg_proto += PAGE_SIZE;
801 } 801 }
802 } 802 }
803 803
804 bootm_init(ioaddr, pg, BOOTM_VA_POOL); 804 bootm_init(ioaddr, pg, BOOTM_VA_POOL);
805 /* 805 /*
806 * ioaddr += BOOTM_VA_POOL; 806 * ioaddr += BOOTM_VA_POOL;
807 * pg = &pg[btoc(BOOTM_VA_POOL)]; 807 * pg = &pg[btoc(BOOTM_VA_POOL)];
808 */ 808 */
809} 809}
810 810
811/* 811/*
812 * Used by dumpconf() to get the size of the machine-dependent panic-dump 812 * Used by dumpconf() to get the size of the machine-dependent panic-dump
813 * header in disk blocks. 813 * header in disk blocks.
814 */ 814 */
815 815
816#define CHDRSIZE (ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t))) 816#define CHDRSIZE (ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)))
817#define MDHDRSIZE roundup(CHDRSIZE, dbtob(1)) 817#define MDHDRSIZE roundup(CHDRSIZE, dbtob(1))
818 818
819int 819int
820cpu_dumpsize() 820cpu_dumpsize()
821{ 821{
822 822
823 return btodb(MDHDRSIZE); 823 return btodb(MDHDRSIZE);
824} 824}
825 825
826/* 826/*
827 * Called by dumpsys() to dump the machine-dependent header. 827 * Called by dumpsys() to dump the machine-dependent header.
828 * XXX: Assumes that it will all fit in one diskblock. 828 * XXX: Assumes that it will all fit in one diskblock.
829 */ 829 */
830int 830int
831cpu_dump(int (*dump)(dev_t, daddr_t, void *, size_t), daddr_t *p_blkno) 831cpu_dump(int (*dump)(dev_t, daddr_t, void *, size_t), daddr_t *p_blkno)
832{ 832{
833 int buf[MDHDRSIZE/sizeof(int)]; 833 int buf[MDHDRSIZE/sizeof(int)];
834 int error; 834 int error;
835 kcore_seg_t *kseg_p; 835 kcore_seg_t *kseg_p;
836 cpu_kcore_hdr_t *chdr_p; 836 cpu_kcore_hdr_t *chdr_p;
837 837
838 kseg_p = (kcore_seg_t *)buf; 838 kseg_p = (kcore_seg_t *)buf;
839 chdr_p = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*kseg_p)) / sizeof(int)]; 839 chdr_p = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*kseg_p)) / sizeof(int)];
840 840
841 /* 841 /*
842 * Generate a segment header 842 * Generate a segment header
843 */ 843 */
844 CORE_SETMAGIC(*kseg_p, KCORE_MAGIC, MID_MACHINE, CORE_CPU); 844 CORE_SETMAGIC(*kseg_p, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
845 kseg_p->c_size = MDHDRSIZE - ALIGN(sizeof(*kseg_p)); 845 kseg_p->c_size = MDHDRSIZE - ALIGN(sizeof(*kseg_p));
846 846
847 /* 847 /*
848 * Add the md header 848 * Add the md header
849 */ 849 */
850 *chdr_p = cpu_kcore_hdr; 850 *chdr_p = cpu_kcore_hdr;
851 error = dump(dumpdev, *p_blkno, (void *)buf, sizeof(buf)); 851 error = dump(dumpdev, *p_blkno, (void *)buf, sizeof(buf));
852 *p_blkno += btodb(sizeof(buf)); 852 *p_blkno += btodb(sizeof(buf));
853 return (error); 853 return (error);
854} 854}
855 855
856#if (M68K_NPHYS_RAM_SEGS < NMEM_SEGS) 856#if (M68K_NPHYS_RAM_SEGS < NMEM_SEGS)
857#error "Configuration error: M68K_NPHYS_RAM_SEGS < NMEM_SEGS" 857#error "Configuration error: M68K_NPHYS_RAM_SEGS < NMEM_SEGS"
858#endif 858#endif
859/* 859/*
860 * Initialize the cpu_kcore_header. 860 * Initialize the cpu_kcore_header.
861 */ 861 */
862static void 862static void
863cpu_init_kcorehdr(kbase, sysseg_pa) 863cpu_init_kcorehdr(kbase, sysseg_pa)
864 paddr_t kbase; 864 paddr_t kbase;
865 paddr_t sysseg_pa; 865 paddr_t sysseg_pa;
866{ 866{
867 cpu_kcore_hdr_t *h = &cpu_kcore_hdr; 867 cpu_kcore_hdr_t *h = &cpu_kcore_hdr;
868 struct m68k_kcore_hdr *m = &h->un._m68k; 868 struct m68k_kcore_hdr *m = &h->un._m68k;
869 extern char end[]; 869 extern char end[];
870 int i; 870 int i;
871 871
872 bzero(&cpu_kcore_hdr, sizeof(cpu_kcore_hdr)); 872 bzero(&cpu_kcore_hdr, sizeof(cpu_kcore_hdr));
873 873
874 /* 874 /*
875 * Initialize the `dispatcher' portion of the header. 875 * Initialize the `dispatcher' portion of the header.
876 */ 876 */
877 strcpy(h->name, machine); 877 strcpy(h->name, machine);
878 h->page_size = PAGE_SIZE; 878 h->page_size = PAGE_SIZE;
879 h->kernbase = KERNBASE; 879 h->kernbase = KERNBASE;
880 880
881 /* 881 /*
882 * Fill in information about our MMU configuration. 882 * Fill in information about our MMU configuration.
883 */ 883 */
884 m->mmutype = mmutype; 884 m->mmutype = mmutype;
885 m->sg_v = SG_V; 885 m->sg_v = SG_V;
886 m->sg_frame = SG_FRAME; 886 m->sg_frame = SG_FRAME;
887 m->sg_ishift = SG_ISHIFT; 887 m->sg_ishift = SG_ISHIFT;
888 m->sg_pmask = SG_PMASK;  888 m->sg_pmask = SG_PMASK;
889 m->sg40_shift1 = SG4_SHIFT1; 889 m->sg40_shift1 = SG4_SHIFT1;
890 m->sg40_mask2 = SG4_MASK2; 890 m->sg40_mask2 = SG4_MASK2;
891 m->sg40_shift2 = SG4_SHIFT2; 891 m->sg40_shift2 = SG4_SHIFT2;
892 m->sg40_mask3 = SG4_MASK3; 892 m->sg40_mask3 = SG4_MASK3;
893 m->sg40_shift3 = SG4_SHIFT3; 893 m->sg40_shift3 = SG4_SHIFT3;
894 m->sg40_addr1 = SG4_ADDR1; 894 m->sg40_addr1 = SG4_ADDR1;
895 m->sg40_addr2 = SG4_ADDR2; 895 m->sg40_addr2 = SG4_ADDR2;
896 m->pg_v = PG_V; 896 m->pg_v = PG_V;
897 m->pg_frame = PG_FRAME; 897 m->pg_frame = PG_FRAME;
898 898
899 /* 899 /*
900 * Initialize pointer to kernel segment table. 900 * Initialize pointer to kernel segment table.
901 */ 901 */
902 m->sysseg_pa = sysseg_pa; /* PA after relocation */ 902 m->sysseg_pa = sysseg_pa; /* PA after relocation */
903 903
904 /* 904 /*
905 * Initialize relocation value such that: 905 * Initialize relocation value such that:
906 * 906 *
907 * pa = (va - KERNBASE) + reloc 907 * pa = (va - KERNBASE) + reloc
908 */ 908 */
909 m->reloc = kbase; 909 m->reloc = kbase;
910 910
911 /* 911 /*
912 * Define the end of the relocatable range. 912 * Define the end of the relocatable range.
913 */ 913 */
914 m->relocend = (vaddr_t)end; 914 m->relocend = (vaddr_t)end;
915 915
916 for (i = 0; i < NMEM_SEGS; i++) { 916 for (i = 0; i < NMEM_SEGS; i++) {
917 m->ram_segs[i].start = boot_segs[i].start; 917 m->ram_segs[i].start = boot_segs[i].start;
918 m->ram_segs[i].size = boot_segs[i].end - 918 m->ram_segs[i].size = boot_segs[i].end -
919 boot_segs[i].start; 919 boot_segs[i].start;
920 } 920 }
921} 921}
922 922
923void 923void
924mmu030_setup(sysseg_pa, kstsize, ptpa, ptsize, sysptmap_pa, kbase) 924mmu030_setup(sysseg_pa, kstsize, ptpa, ptsize, sysptmap_pa, kbase)
925 paddr_t sysseg_pa; /* System segment table */ 925 paddr_t sysseg_pa; /* System segment table */
926 u_int kstsize; /* size of 'sysseg' in pages */ 926 u_int kstsize; /* size of 'sysseg' in pages */
927 paddr_t ptpa; /* Kernel page table */ 927 paddr_t ptpa; /* Kernel page table */
928 psize_t ptsize; /* size of 'pt' in bytes */ 928 psize_t ptsize; /* size of 'pt' in bytes */
929 paddr_t sysptmap_pa; /* System page table */ 929 paddr_t sysptmap_pa; /* System page table */
930 paddr_t kbase; 930 paddr_t kbase;
931{ 931{
932 st_entry_t sg_proto, *sg, *esg; 932 st_entry_t sg_proto, *sg, *esg;
933 pt_entry_t pg_proto, *pg, *epg; 933 pt_entry_t pg_proto, *pg, *epg;
934 934
935 /* 935 /*
936 * Map the page table pages in both the HW segment table 936 * Map the page table pages in both the HW segment table
937 * and the software Sysptmap. 937 * and the software Sysptmap.
938 */ 938 */
939 sg = (st_entry_t *)sysseg_pa; 939 sg = (st_entry_t *)sysseg_pa;
940 pg = (pt_entry_t *)sysptmap_pa; 940 pg = (pt_entry_t *)sysptmap_pa;
941 epg = &pg[ptsize >> PGSHIFT]; 941 epg = &pg[ptsize >> PGSHIFT];
942 sg_proto = (ptpa + kbase) /* relocated PA */ | SG_RW | SG_V; 942 sg_proto = (ptpa + kbase) /* relocated PA */ | SG_RW | SG_V;
943 pg_proto = (ptpa + kbase) /* relocated PA */ | PG_RW | PG_CI | PG_V; 943 pg_proto = (ptpa + kbase) /* relocated PA */ | PG_RW | PG_CI | PG_V;
944 while (pg < epg) { 944 while (pg < epg) {
945 *sg++ = sg_proto; 945 *sg++ = sg_proto;
946 *pg++ = pg_proto; 946 *pg++ = pg_proto;
947 sg_proto += PAGE_SIZE; 947 sg_proto += PAGE_SIZE;
948 pg_proto += PAGE_SIZE; 948 pg_proto += PAGE_SIZE;
949 } 949 }
950 950
951 /*  951 /*
952 * Invalidate the remainder of the tables. 952 * Invalidate the remainder of the tables.
953 */ 953 */
954 esg = (st_entry_t *)sysseg_pa; 954 esg = (st_entry_t *)sysseg_pa;
955 esg = &esg[256]; /* XXX should be TIA_SIZE */ 955 esg = &esg[256]; /* XXX should be TIA_SIZE */
956 while (sg < esg) 956 while (sg < esg)
957 *sg++ = SG_NV; 957 *sg++ = SG_NV;
958 epg = (pt_entry_t *)sysptmap_pa; 958 epg = (pt_entry_t *)sysptmap_pa;
959 epg = &epg[NPTEPG]; /* XXX should be TIB_SIZE */ 959 epg = &epg[NPTEPG]; /* XXX should be TIB_SIZE */
960 while (pg < epg) 960 while (pg < epg)
961 *pg++ = PG_NV; 961 *pg++ = PG_NV;
962 962
963 /* 963 /*
964 * Initialize the PTE for the last one to point Sysptmap. 964 * Initialize the PTE for the last one to point Sysptmap.
965 */ 965 */
966 sg = (st_entry_t *)sysseg_pa; 966 sg = (st_entry_t *)sysseg_pa;
967 sg = &sg[256 - 1]; /* XXX should be TIA_SIZE */ 967 sg = &sg[256 - 1]; /* XXX should be TIA_SIZE */
968 pg = (pt_entry_t *)sysptmap_pa; 968 pg = (pt_entry_t *)sysptmap_pa;
969 pg = &pg[256 - 1]; /* XXX should be TIA_SIZE */ 969 pg = &pg[256 - 1]; /* XXX should be TIA_SIZE */
970 *sg = (sysptmap_pa + kbase) /* relocated PA */ | SG_RW | SG_V; 970 *sg = (sysptmap_pa + kbase) /* relocated PA */ | SG_RW | SG_V;
971 *pg = (sysptmap_pa + kbase) /* relocated PA */ | PG_RW | PG_CI | PG_V; 971 *pg = (sysptmap_pa + kbase) /* relocated PA */ | PG_RW | PG_CI | PG_V;
972} 972}
973 973
974#if defined(M68040) || defined(M68060) 974#if defined(M68040) || defined(M68060)
975void 975void
976mmu040_setup(sysseg_pa, kstsize, ptpa, ptsize, sysptmap_pa, kbase) 976mmu040_setup(sysseg_pa, kstsize, ptpa, ptsize, sysptmap_pa, kbase)
977 paddr_t sysseg_pa; /* System segment table */ 977 paddr_t sysseg_pa; /* System segment table */
978 u_int kstsize; /* size of 'sysseg' in pages */ 978 u_int kstsize; /* size of 'sysseg' in pages */
979 paddr_t ptpa; /* Kernel page table */ 979 paddr_t ptpa; /* Kernel page table */
980 psize_t ptsize; /* size of 'pt' in bytes */ 980 psize_t ptsize; /* size of 'pt' in bytes */
981 paddr_t sysptmap_pa; /* System page table */ 981 paddr_t sysptmap_pa; /* System page table */
982 paddr_t kbase; 982 paddr_t kbase;
983{ 983{
984 int i; 984 int nl1desc, nl2desc, i;
985 st_entry_t sg_proto, *sg, *esg; 985 st_entry_t sg_proto, *sg, *esg;
986 pt_entry_t pg_proto, *pg, *epg; 986 pt_entry_t pg_proto, *pg, *epg;
987 987
988 /* 988 /*
989 * First invalidate the entire "segment table" pages 989 * First invalidate the entire "segment table" pages
990 * (levels 1 and 2 have the same "invalid" values). 990 * (levels 1 and 2 have the same "invalid" values).
991 */ 991 */
992 sg = (st_entry_t *)sysseg_pa; 992 sg = (st_entry_t *)sysseg_pa;
993 esg = &sg[kstsize * NPTEPG]; 993 esg = &sg[kstsize * NPTEPG];
994 while (sg < esg) 994 while (sg < esg)
995 *sg++ = SG_NV; 995 *sg++ = SG_NV;
996 996
997 /* 997 /*
998 * Initialize level 2 descriptors (which immediately 998 * Initialize level 2 descriptors (which immediately
999 * follow the level 1 table). 999 * follow the level 1 table).
1000 * We need: 1000 * We need:
1001 * NPTEPG / SG4_LEV3SIZE 1001 * NPTEPG / SG4_LEV3SIZE
1002 * level 2 descriptors to map each of the nptpages 1002 * level 2 descriptors to map each of the nptpages
1003 * pages of PTEs. Note that we set the "used" bit 1003 * pages of PTEs. Note that we set the "used" bit
1004 * now to save the HW the expense of doing it. 1004 * now to save the HW the expense of doing it.
1005 */ 1005 */
1006 i = (ptsize >> PGSHIFT) * (NPTEPG / SG4_LEV3SIZE); 1006 nl2desc = (ptsize >> PGSHIFT) * (NPTEPG / SG4_LEV3SIZE);
1007 sg = (st_entry_t *)sysseg_pa; 1007 sg = (st_entry_t *)sysseg_pa;
1008 sg = &sg[SG4_LEV1SIZE]; 1008 sg = &sg[SG4_LEV1SIZE];
1009 esg = &sg[i]; 1009 esg = &sg[nl2desc];
1010 sg_proto = (ptpa + kbase) /* relocated PA */ | SG_U | SG_RW | SG_V; 1010 sg_proto = (ptpa + kbase) /* relocated PA */ | SG_U | SG_RW | SG_V;
1011 while (sg < esg) { 1011 while (sg < esg) {
1012 *sg++ = sg_proto; 1012 *sg++ = sg_proto;
1013 sg_proto += (SG4_LEV3SIZE * sizeof(st_entry_t)); 1013 sg_proto += (SG4_LEV3SIZE * sizeof(st_entry_t));
1014 } 1014 }
1015 1015
1016 /* 1016 /*
1017 * Initialize level 1 descriptors. We need: 1017 * Initialize level 1 descriptors. We need:
1018 * roundup(num, SG4_LEV2SIZE) / SG4_LEVEL2SIZE 1018 * roundup(nl2desc, SG4_LEV2SIZE) / SG4_LEVEL2SIZE
1019 * level 1 descriptors to map the 'num' level 2's. 1019 * level 1 descriptors to map the 'nl2desc' level 2's.
1020 */ 1020 */
1021 i = roundup(i, SG4_LEV2SIZE) / SG4_LEV2SIZE; 1021 nl1desc = roundup(nl2desc, SG4_LEV2SIZE) / SG4_LEV2SIZE;
1022 protostfree = (-1 << (i + 2)) /* & ~(-1 << MAXKL2SIZE) */; 
1023 sg = (st_entry_t *)sysseg_pa; 1022 sg = (st_entry_t *)sysseg_pa;
1024 esg = &sg[i]; 1023 esg = &sg[nl1desc];
1025 sg_proto = ((paddr_t)&sg[SG4_LEV1SIZE] + kbase) /* relocated PA */ 1024 sg_proto = ((paddr_t)&sg[SG4_LEV1SIZE] + kbase) /* relocated PA */
1026 | SG_U | SG_RW | SG_V; 1025 | SG_U | SG_RW | SG_V;
1027 while (sg < esg) { 1026 while (sg < esg) {
1028 *sg++ = sg_proto; 1027 *sg++ = sg_proto;
1029 sg_proto += (SG4_LEV2SIZE * sizeof(st_entry_t)); 1028 sg_proto += (SG4_LEV2SIZE * sizeof(st_entry_t));
1030 } 1029 }
1031 1030
1032 /* Sysmap is last entry in level 1 */ 1031 /* Sysmap is last entry in level 1 */
1033 sg = (st_entry_t *)sysseg_pa; 1032 sg = (st_entry_t *)sysseg_pa;
1034 sg = &sg[SG4_LEV1SIZE - 1]; 1033 sg = &sg[SG4_LEV1SIZE - 1];
1035 *sg = sg_proto; 1034 *sg = sg_proto;
1036 1035
1037 /* 1036 /*
1038 * Kernel segment table at end of next level 2 table 1037 * Kernel segment table at end of next level 2 table
1039 */ 1038 */
1040 /* XXX fix calculations XXX */ 1039 i = SG4_LEV1SIZE + (nl1desc * SG4_LEV2SIZE);
1041 i = ((((ptsize >> PGSHIFT) + 3) & -2) - 1) * (NPTEPG / SG4_LEV3SIZE); 
1042 sg = (st_entry_t *)sysseg_pa; 1040 sg = (st_entry_t *)sysseg_pa;
1043 sg = &sg[SG4_LEV1SIZE + i]; 1041 sg = &sg[i + SG4_LEV2SIZE - (NPTEPG / SG4_LEV3SIZE)];
1044 esg = &sg[NPTEPG / SG4_LEV3SIZE]; 1042 esg = &sg[NPTEPG / SG4_LEV3SIZE];
1045 sg_proto = (sysptmap_pa + kbase) /* relocated PA */ 1043 sg_proto = (sysptmap_pa + kbase) /* relocated PA */
1046 | SG_U | SG_RW | SG_V; 1044 | SG_U | SG_RW | SG_V;
1047 while (sg < esg) { 1045 while (sg < esg) {
1048 *sg++ = sg_proto; 1046 *sg++ = sg_proto;
1049 sg_proto += (SG4_LEV3SIZE * sizeof(st_entry_t)); 1047 sg_proto += (SG4_LEV3SIZE * sizeof(st_entry_t));
1050 } 1048 }
1051 1049
 1050 /* Include additional level 2 table for Sysmap in protostfree */
 1051 protostfree = (~0 << (1 + nl1desc + 1)) /* & ~(~0 << MAXKL2SIZE) */;
 1052
1052 /* 1053 /*
1053 * Initialize Sysptmap 1054 * Initialize Sysptmap
1054 */ 1055 */
1055 pg = (pt_entry_t *)sysptmap_pa; 1056 pg = (pt_entry_t *)sysptmap_pa;
1056 epg = &pg[ptsize >> PGSHIFT]; 1057 epg = &pg[ptsize >> PGSHIFT];
1057 pg_proto = (ptpa + kbase) /* relocated PA */ | PG_RW | PG_CI | PG_V; 1058 pg_proto = (ptpa + kbase) /* relocated PA */ | PG_RW | PG_CI | PG_V;
1058 while (pg < epg) { 1059 while (pg < epg) {
1059 *pg++ = pg_proto; 1060 *pg++ = pg_proto;
1060 pg_proto += PAGE_SIZE; 1061 pg_proto += PAGE_SIZE;
1061 } 1062 }
1062 1063
1063 /* 1064 /*
1064 * Invalidate rest of Sysptmap page. 1065 * Invalidate rest of Sysptmap page.
1065 */ 1066 */
1066 epg = (pt_entry_t *)sysptmap_pa; 1067 epg = (pt_entry_t *)sysptmap_pa;
1067 epg = &epg[NPTEPG]; /* XXX: should be TIB_SIZE */ 1068 epg = &epg[NPTEPG]; /* XXX: should be TIB_SIZE */
1068 while (pg < epg) 1069 while (pg < epg)
1069 *pg++ = PG_NV; 1070 *pg++ = PG_NV;
1070 1071
1071 /* 1072 /*
1072 * Initialize the PTE for the last one to point Sysptmap. 1073 * Initialize the PTE for the last one to point Sysptmap.
1073 */ 1074 */
1074 pg = (pt_entry_t *)sysptmap_pa; 1075 pg = (pt_entry_t *)sysptmap_pa;
1075 pg = &pg[256 - 1]; /* XXX: should be TIA_SIZE */ 1076 pg = &pg[256 - 1]; /* XXX: should be TIA_SIZE */
1076 *pg = (sysptmap_pa + kbase) /* relocated PA */ | PG_RW | PG_CI | PG_V; 1077 *pg = (sysptmap_pa + kbase) /* relocated PA */ | PG_RW | PG_CI | PG_V;
1077} 1078}
1078#endif /* M68040 */ 1079#endif /* M68040 */
1079 1080
1080#if defined(M68060) 1081#if defined(M68060)
1081int m68060_pcr_init = 0x21; /* make this patchable */ 1082int m68060_pcr_init = 0x21; /* make this patchable */
1082#endif 1083#endif
1083 1084
1084static void 1085static void
1085initcpu() 1086initcpu()
1086{ 1087{
1087 typedef void trapfun __P((void)); 1088 typedef void trapfun __P((void));
1088 1089
1089 switch (cputype) { 1090 switch (cputype) {
1090 1091
1091#if defined(M68060) 1092#if defined(M68060)
1092 case CPU_68060: 1093 case CPU_68060:
1093 { 1094 {
1094 extern trapfun *vectab[256]; 1095 extern trapfun *vectab[256];
1095 extern trapfun buserr60, addrerr4060, fpfault; 1096 extern trapfun buserr60, addrerr4060, fpfault;
1096#if defined(M060SP) 1097#if defined(M060SP)
1097 extern u_int8_t FP_CALL_TOP[], I_CALL_TOP[]; 1098 extern u_int8_t FP_CALL_TOP[], I_CALL_TOP[];
1098#else 1099#else
1099 extern trapfun illinst; 1100 extern trapfun illinst;
1100#endif 1101#endif
1101 1102
1102 __asm volatile ("movl %0,%%d0; .word 0x4e7b,0x0808" : :  1103 __asm volatile ("movl %0,%%d0; .word 0x4e7b,0x0808" : :
1103 "d"(m68060_pcr_init):"d0" ); 1104 "d"(m68060_pcr_init):"d0" );
1104 1105
1105 /* bus/addrerr vectors */ 1106 /* bus/addrerr vectors */
1106 vectab[2] = buserr60; 1107 vectab[2] = buserr60;
1107 vectab[3] = addrerr4060; 1108 vectab[3] = addrerr4060;
1108 1109
1109#if defined(M060SP) 1110#if defined(M060SP)
1110 /* integer support */ 1111 /* integer support */
1111 vectab[61] = (trapfun *)&I_CALL_TOP[128 + 0x00]; 1112 vectab[61] = (trapfun *)&I_CALL_TOP[128 + 0x00];
1112 1113
1113 /* floating point support */ 1114 /* floating point support */
1114 /* 1115 /*
1115 * XXX maybe we really should run-time check for the 1116 * XXX maybe we really should run-time check for the
1116 * stack frame format here: 1117 * stack frame format here:
1117 */ 1118 */
1118 vectab[11] = (trapfun *)&FP_CALL_TOP[128 + 0x30]; 1119 vectab[11] = (trapfun *)&FP_CALL_TOP[128 + 0x30];
1119 1120
1120 vectab[55] = (trapfun *)&FP_CALL_TOP[128 + 0x38]; 1121 vectab[55] = (trapfun *)&FP_CALL_TOP[128 + 0x38];
1121 vectab[60] = (trapfun *)&FP_CALL_TOP[128 + 0x40]; 1122 vectab[60] = (trapfun *)&FP_CALL_TOP[128 + 0x40];
1122 1123
1123 vectab[54] = (trapfun *)&FP_CALL_TOP[128 + 0x00]; 1124 vectab[54] = (trapfun *)&FP_CALL_TOP[128 + 0x00];
1124 vectab[52] = (trapfun *)&FP_CALL_TOP[128 + 0x08]; 1125 vectab[52] = (trapfun *)&FP_CALL_TOP[128 + 0x08];
1125 vectab[53] = (trapfun *)&FP_CALL_TOP[128 + 0x10]; 1126 vectab[53] = (trapfun *)&FP_CALL_TOP[128 + 0x10];
1126 vectab[51] = (trapfun *)&FP_CALL_TOP[128 + 0x18]; 1127 vectab[51] = (trapfun *)&FP_CALL_TOP[128 + 0x18];
1127 vectab[50] = (trapfun *)&FP_CALL_TOP[128 + 0x20]; 1128 vectab[50] = (trapfun *)&FP_CALL_TOP[128 + 0x20];
1128 vectab[49] = (trapfun *)&FP_CALL_TOP[128 + 0x28]; 1129 vectab[49] = (trapfun *)&FP_CALL_TOP[128 + 0x28];
1129#else 1130#else
1130 vectab[61] = illinst; 1131 vectab[61] = illinst;
1131#endif 1132#endif
1132 vectab[48] = fpfault; 1133 vectab[48] = fpfault;
1133 } 1134 }
1134 break; 1135 break;
1135#endif /* defined(M68060) */ 1136#endif /* defined(M68060) */
1136#if defined(M68040) 1137#if defined(M68040)
1137 case CPU_68040: 1138 case CPU_68040:
1138 { 1139 {
1139 extern trapfun *vectab[256]; 1140 extern trapfun *vectab[256];
1140 extern trapfun buserr40, addrerr4060; 1141 extern trapfun buserr40, addrerr4060;
1141 1142
1142 /* bus/addrerr vectors */ 1143 /* bus/addrerr vectors */
1143 vectab[2] = buserr40; 1144 vectab[2] = buserr40;
1144 vectab[3] = addrerr4060; 1145 vectab[3] = addrerr4060;
1145 } 1146 }
1146 break; 1147 break;
1147#endif /* defined(M68040) */ 1148#endif /* defined(M68040) */
1148#if defined(M68030) || defined(M68020) 1149#if defined(M68030) || defined(M68020)
1149 case CPU_68030: 1150 case CPU_68030:
1150 case CPU_68020: 1151 case CPU_68020:
1151 { 1152 {
1152 extern trapfun *vectab[256]; 1153 extern trapfun *vectab[256];
1153 extern trapfun buserr2030, addrerr2030; 1154 extern trapfun buserr2030, addrerr2030;
1154 1155
1155 /* bus/addrerr vectors */ 1156 /* bus/addrerr vectors */
1156 vectab[2] = buserr2030; 1157 vectab[2] = buserr2030;
1157 vectab[3] = addrerr2030; 1158 vectab[3] = addrerr2030;
1158 } 1159 }
1159 break; 1160 break;
1160#endif /* defined(M68030) || defined(M68020) */ 1161#endif /* defined(M68030) || defined(M68020) */
1161 } 1162 }
1162 1163
1163 DCIS(); 1164 DCIS();
1164} 1165}
1165 1166
1166#ifdef DEBUG 1167#ifdef DEBUG
1167void dump_segtable __P((u_int *)); 1168void dump_segtable __P((u_int *));
1168void dump_pagetable __P((u_int *, u_int, u_int)); 1169void dump_pagetable __P((u_int *, u_int, u_int));
1169u_int vmtophys __P((u_int *, u_int)); 1170u_int vmtophys __P((u_int *, u_int));
1170 1171
1171void 1172void
1172dump_segtable(stp) 1173dump_segtable(stp)
1173 u_int *stp; 1174 u_int *stp;
1174{ 1175{
1175 u_int *s, *es; 1176 u_int *s, *es;
1176 int shift, i; 1177 int shift, i;
1177 1178
1178 s = stp; 1179 s = stp;
1179 { 1180 {
1180 es = s + (M68K_STSIZE >> 2); 1181 es = s + (M68K_STSIZE >> 2);
1181 shift = SG_ISHIFT; 1182 shift = SG_ISHIFT;
1182 } 1183 }
1183 1184
1184 /*  1185 /*
1185 * XXX need changes for 68040  1186 * XXX need changes for 68040
1186 */ 1187 */
1187 for (i = 0; s < es; s++, i++) 1188 for (i = 0; s < es; s++, i++)
1188 if (*s & SG_V) 1189 if (*s & SG_V)
1189 printf("$%08x: $%08x\t", i << shift, *s & SG_FRAME); 1190 printf("$%08x: $%08x\t", i << shift, *s & SG_FRAME);
1190 printf("\n"); 1191 printf("\n");
1191} 1192}
1192 1193
1193void 1194void
1194dump_pagetable(ptp, i, n) 1195dump_pagetable(ptp, i, n)
1195 u_int *ptp, i, n; 1196 u_int *ptp, i, n;
1196{ 1197{
1197 u_int *p, *ep; 1198 u_int *p, *ep;
1198 1199
1199 p = ptp + i; 1200 p = ptp + i;
1200 ep = p + n; 1201 ep = p + n;
1201 for (; p < ep; p++, i++) 1202 for (; p < ep; p++, i++)
1202 if (*p & PG_V) 1203 if (*p & PG_V)
1203 printf("$%08x -> $%08x\t", i, *p & PG_FRAME); 1204 printf("$%08x -> $%08x\t", i, *p & PG_FRAME);
1204 printf("\n"); 1205 printf("\n");
1205} 1206}
1206 1207
1207u_int 1208u_int
1208vmtophys(ste, vm) 1209vmtophys(ste, vm)
1209 u_int *ste, vm; 1210 u_int *ste, vm;
1210{ 1211{
1211 ste = (u_int *) (*(ste + (vm >> SEGSHIFT)) & SG_FRAME); 1212 ste = (u_int *) (*(ste + (vm >> SEGSHIFT)) & SG_FRAME);
1212 ste += (vm & SG_PMASK) >> PGSHIFT; 1213 ste += (vm & SG_PMASK) >> PGSHIFT;
1213 return((*ste & -PAGE_SIZE) | (vm & (PAGE_SIZE - 1))); 1214 return((*ste & -PAGE_SIZE) | (vm & (PAGE_SIZE - 1)));
1214} 1215}
1215 1216
1216#endif 1217#endif