Fri Feb 21 22:22:48 2014 UTC ()
Rework PIC method to be simplier.  Change be more cortex neutral.


(matt)
diff -r1.12 -r1.13 src/sys/arch/arm/cortex/a9_mpsubr.S
diff -r1.10 -r1.11 src/sys/arch/evbarm/bcm53xx/bcm53xx_start.S
diff -r1.5 -r1.6 src/sys/arch/evbarm/cubie/cubie_start.S

cvs diff -r1.12 -r1.13 src/sys/arch/arm/cortex/Attic/a9_mpsubr.S (switch to unified diff)

--- src/sys/arch/arm/cortex/Attic/a9_mpsubr.S 2014/01/24 05:14:11 1.12
+++ src/sys/arch/arm/cortex/Attic/a9_mpsubr.S 2014/02/21 22:22:48 1.13
@@ -1,504 +1,585 @@ @@ -1,504 +1,585 @@
1/* $NetBSD: a9_mpsubr.S,v 1.12 2014/01/24 05:14:11 matt Exp $ */ 1/* $NetBSD: a9_mpsubr.S,v 1.13 2014/02/21 22:22:48 matt Exp $ */
2/*- 2/*-
3 * Copyright (c) 2012 The NetBSD Foundation, Inc. 3 * Copyright (c) 2012 The NetBSD Foundation, Inc.
4 * All rights reserved. 4 * All rights reserved.
5 * 5 *
6 * This code is derived from software contributed to The NetBSD Foundation 6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Matt Thomas of 3am Software Foundry. 7 * by Matt Thomas of 3am Software Foundry.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the 15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution. 16 * documentation and/or other materials provided with the distribution.
17 * 17 *
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE. 28 * POSSIBILITY OF SUCH DAMAGE.
29 */ 29 */
30 30
31#include "opt_cpuoptions.h" 31#include "opt_cpuoptions.h"
32#include "opt_cputypes.h" 32#include "opt_cputypes.h"
33#include "opt_multiprocessor.h" 33#include "opt_multiprocessor.h"
34 34
35#include <arm/asm.h> 35#include <arm/asm.h>
36#include <arm/armreg.h> 36#include <arm/armreg.h>
37#include <arm/cortex/scu_reg.h> 37#include <arm/cortex/scu_reg.h>
38#include "assym.h" 38#include "assym.h"
39 39
40 40
41/* We'll modify va and pa at run time so we can use relocatable addresses. */ 41/* We'll modify va and pa at run time so we can use relocatable addresses. */
42#define MMU_INIT(va,pa,n_sec,attr) \ 42#define MMU_INIT(va,pa,n_sec,attr) \
43 .word va ; \ 43 .word va ; \
44 .word pa ; \ 44 .word pa ; \
45 .word n_sec ; \ 45 .word n_sec ; \
46 .word attr ; 46 .word attr ;
47 47
48/* 48/*
49 * Set up a preliminary mapping in the MMU to allow us to run 49 * Set up a preliminary mapping in the MMU to allow us to run
50 * at KERNEL_BASE with caches on. 50 * at KERNEL_BASE with caches on.
51 */ 51 */
52arm_boot_l1pt_init: 52arm_boot_l1pt_init:
53 mov ip, r1 @ save mmu table addr 53 mov ip, r1 @ save mmu table addr
54 /* Build page table from scratch */ 54 /* Build page table from scratch */
55 mov r1, r0 /* Start address to clear memory. */ 55 mov r1, r0 /* Start address to clear memory. */
56 /* Zero the entire table so all virtual addresses are invalid. */ 56 /* Zero the entire table so all virtual addresses are invalid. */
57 mov r2, #L1_TABLE_SIZE /* in bytes */ 57 mov r2, #L1_TABLE_SIZE /* in bytes */
58 mov r3, #0 58 mov r3, #0
59 mov r4, r3 59 mov r4, r3
60 mov r5, r3 60 mov r5, r3
61 mov r6, r3 61 mov r6, r3
62 mov r7, r3 62 mov r7, r3
63 mov r8, r3 63 mov r8, r3
64 mov r10, r3 64 mov r10, r3
65 mov r11, r3 65 mov r11, r3
661: stmia r1!, {r3-r8,r10-r11} 661: stmia r1!, {r3-r8,r10-r11}
67 stmia r1!, {r3-r8,r10-r11} 67 stmia r1!, {r3-r8,r10-r11}
68 stmia r1!, {r3-r8,r10-r11} 68 stmia r1!, {r3-r8,r10-r11}
69 stmia r1!, {r3-r8,r10-r11} 69 stmia r1!, {r3-r8,r10-r11}
70 subs r2, r2, #(4 * 4 * 8) /* bytes per loop */ 70 subs r2, r2, #(4 * 4 * 8) /* bytes per loop */
71 bne 1b 71 bne 1b
72 72
73 /* Now create our entries per the mmu_init_table. */ 73 /* Now create our entries per the mmu_init_table. */
74 l1table .req r0 74 l1table .req r0
75 va .req r1 75 va .req r1
76 pa .req r2 76 pa .req r2
77 n_sec .req r3 77 n_sec .req r3
78 attr .req r4 78 attr .req r4
79 itable .req r5 79 itable .req r5
80 80
81 mov itable, ip @ reclaim table address 81 mov itable, ip @ reclaim table address
82 b 3f 82 b 3f
83 83
842: str pa, [l1table, va, lsl #2] 842: str pa, [l1table, va, lsl #2]
85 add va, va, #1 85 add va, va, #1
86 add pa, pa, #(L1_S_SIZE) 86 add pa, pa, #(L1_S_SIZE)
87 subs n_sec, n_sec, #1 87 subs n_sec, n_sec, #1
88 bhi 2b 88 bhi 2b
89 89
903: ldmia itable!, {va,pa,n_sec,attr} 903: ldmia itable!, {va,pa,n_sec,attr}
91 /* Convert va to l1 offset: va = 4 * (va >> L1_S_SHIFT) */ 91 /* Convert va to l1 offset: va = 4 * (va >> L1_S_SHIFT) */
92 lsr va, va, #L1_S_SHIFT 92 lsr va, va, #L1_S_SHIFT
93 /* Convert pa to l1 entry: pa = (pa & L1_S_FRAME) | attr */ 93 /* Convert pa to l1 entry: pa = (pa & L1_S_FRAME) | attr */
94#ifdef _ARM_ARCH_7 94#ifdef _ARM_ARCH_7
95 bfc pa, #0, #L1_S_SHIFT 95 bfc pa, #0, #L1_S_SHIFT
96#else 96#else
97 lsr pa, pa, #L1_S_SHIFT 97 lsr pa, pa, #L1_S_SHIFT
98 lsl pa, pa, #L1_S_SHIFT 98 lsl pa, pa, #L1_S_SHIFT
99#endif 99#endif
100 orr pa, pa, attr 100 orr pa, pa, attr
101 cmp n_sec, #0 101 cmp n_sec, #0
102 bne 2b 102 bne 2b
103 bx lr @ return 103 bx lr @ return
104 104
105 .unreq va 105 .unreq va
106 .unreq pa 106 .unreq pa
107 .unreq n_sec 107 .unreq n_sec
108 .unreq attr 108 .unreq attr
109 .unreq itable 109 .unreq itable
110 .unreq l1table 110 .unreq l1table
111 111
112#if defined(CPU_CORTEXA8) 112#if defined(CPU_CORTEXA8)
113#undef CPU_CONTROL_SWP_ENABLE // not present on A8 113#undef CPU_CONTROL_SWP_ENABLE // not present on A8
114#define CPU_CONTROL_SWP_ENABLE 0 114#define CPU_CONTROL_SWP_ENABLE 0
115#endif 115#endif
116#ifdef __ARMEL__ 116#ifdef __ARMEL__
117#undef CPU_CONTROL_EX_BEND // needs to clear on LE systems 117#define CPU_CONTROL_EX_BEND_SET 0
118#define CPU_CONTROL_EX_BEND 0 118#else
 119#define CPU_CONTROL_EX_BEND_SET CPU_CONTROL_EX_BEND
119#endif 120#endif
120#ifdef ARM32_DISABLE_ALIGNMENT_FAULTS 121#ifdef ARM32_DISABLE_ALIGNMENT_FAULTS
121#undef CPU_CONTROL_AFLT_ENABLE 122#define CPU_CONTROL_AFLT_ENABLE_CLR CPU_CONTROL_AFLT_ENABLE
122#define CPU_CONTROL_AFLT_ENABLE 0 123#define CPU_CONTROL_AFLT_ENABLE_SET 0
 124#else
 125#deifne CPU_CONTROL_AFLT_ENABLE_CLR 0
 126#define CPU_CONTROL_AFLT_ENABLE_SET CPU_CONTROL_AFLT_ENABLE
123#endif 127#endif
124 128
125#define CPU_CONTROL_SET \ 129#define CPU_CONTROL_SET \
126 (CPU_CONTROL_MMU_ENABLE | \ 130 (CPU_CONTROL_MMU_ENABLE | \
127 CPU_CONTROL_AFLT_ENABLE | \ 131 CPU_CONTROL_AFLT_ENABLE_SET | \
128 CPU_CONTROL_EX_BEND | \ 
129 CPU_CONTROL_DC_ENABLE | \ 132 CPU_CONTROL_DC_ENABLE | \
130 CPU_CONTROL_SWP_ENABLE | \ 133 CPU_CONTROL_SWP_ENABLE | \
131 CPU_CONTROL_BPRD_ENABLE | \ 134 CPU_CONTROL_BPRD_ENABLE | \
132 CPU_CONTROL_IC_ENABLE | \ 135 CPU_CONTROL_IC_ENABLE | \
 136 CPU_CONTROL_EX_BEND_SET | \
133 CPU_CONTROL_UNAL_ENABLE) 137 CPU_CONTROL_UNAL_ENABLE)
134 138
 139#define CPU_CONTROL_CLR \
 140 (CPU_CONTROL_AFLT_ENABLE_CLR)
 141
135arm_cpuinit: 142arm_cpuinit:
136 /* 143 /*
137 * In theory, because the MMU is off, we shouldn't need all of this, 144 * In theory, because the MMU is off, we shouldn't need all of this,
138 * but let's not take any chances and do a typical sequence to set 145 * but let's not take any chances and do a typical sequence to set
139 * the Translation Table Base. 146 * the Translation Table Base.
140 */ 147 */
141 mov ip, lr 148 mov ip, lr
142 mov r10, r0 149 mov r10, r0
 150 mov r1, #0
 151
 152 mcr p15, 0, r1, c7, c5, 0 // invalidate I cache
143 153
144 mcr p15, 0, r10, c7, c5, 0 /* invalidate I cache */ 154 mrc p15, 0, r2, c1, c0, 0 // read SCTRL
 155 movw r1, #(CPU_CONTROL_DC_ENABLE|CPU_CONTROL_IC_ENABLE)
 156 bic r2, r2, r1 // clear I+D cache enable
145 157
146 mrc p15, 0, r2, c1, c0, 0 /* " " " */ 158#ifdef __ARMEB__
147 bic r2, r2, #CPU_CONTROL_DC_ENABLE @ clear data cache enable 159 /*
148 bic r2, r2, #CPU_CONTROL_IC_ENABLE @ clear instruction cache enable 160 * SCTRL.EE determines the endianness of translation table lookups.
149 mcr p15, 0, r2, c1, c0, 0 /* " " " */ 161 * So we need to make sure it's set before starting to use the new
 162 * translation tables (which are big endian).
 163 */
 164 orr r2, r2, #CPU_CONTROL_EX_BEND
 165 bic r2, r2, #CPU_CONTROL_MMU_ENABLE
 166 pli [pc, #32] /* preload the next few cachelines */
 167 pli [pc, #64]
 168 pli [pc, #96]
 169 pli [pc, #128]
 170#endif
 171
 172 mcr p15, 0, r2, c1, c0, 0 /* write SCTRL */
150 173
151 XPUTC(#70) 174 XPUTC(#70)
152 mov r1, #0 
153 dsb /* Drain the write buffers. */ 175 dsb /* Drain the write buffers. */
154 1761:
155 XPUTC(#71) 177 XPUTC(#71)
156 mrc p15, 0, r2, c0, c0, 5 /* get MPIDR */ 178 mrc p15, 0, r1, c0, c0, 5 /* get MPIDR */
157 cmp r2, #0 179 cmp r1, #0
158 orrlt r10, r10, #0x5b /* MP, cachable (Normal WB) */ 180 orrlt r10, r10, #0x5b /* MP, cachable (Normal WB) */
159 orrge r10, r10, #0x1b /* Non-MP, cacheable, normal WB */ 181 orrge r10, r10, #0x1b /* Non-MP, cacheable, normal WB */
160 mcr p15, 0, r10, c2, c0, 0 /* Set Translation Table Base */ 182 mcr p15, 0, r10, c2, c0, 0 /* Set Translation Table Base */
161 183
162 XPUTC(#49) 184 XPUTC(#72)
 185 mov r1, #0
163 mcr p15, 0, r1, c2, c0, 2 /* Set Translation Table Control */ 186 mcr p15, 0, r1, c2, c0, 2 /* Set Translation Table Control */
164 187
165 XPUTC(#72) 188 XPUTC(#73)
166 mov r1, #0 189 mov r1, #0
167 mcr p15, 0, r1, c8, c7, 0 /* Invalidate TLBs */ 190 mcr p15, 0, r1, c8, c7, 0 /* Invalidate TLBs */
168 191
169 /* Set the Domain Access register. Very important! */ 192 /* Set the Domain Access register. Very important! */
170 XPUTC(#73) 193 XPUTC(#74)
171 mov r1, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT) 194 mov r1, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
172 mcr p15, 0, r1, c3, c0, 0 195 mcr p15, 0, r1, c3, c0, 0
173 196
174 /* 197 /*
175 * Enable the MMU, etc. 198 * Enable the MMU, etc.
176 */ 199 */
177 XPUTC(#74) 200 XPUTC(#75)
178 mrc p15, 0, r0, c1, c0, 0 201 mrc p15, 0, r0, c1, c0, 0
179 202
180 movw r3, #:lower16:CPU_CONTROL_SET 203 movw r3, #:lower16:CPU_CONTROL_SET
181#if (CPU_CONTROL_SET & 0xffff0000) 204#if (CPU_CONTROL_SET & 0xffff0000)
182 movt r3, #:upper16:CPU_CONTROL_SET 205 movt r3, #:upper16:CPU_CONTROL_SET
183#endif 206#endif
184 orr r0, r0, r3 207 orr r0, r0, r3
 208#if defined(CPU_CONTROL_CLR) && (CPU_CONTROL_CLR != 0)
 209 bic r0, r0, #CPU_CONTROL_CLR
 210#endif
 211 pli 1f
185  212
186 dsb 213 dsb
187 .align 5 
188 @ turn mmu on! 214 @ turn mmu on!
189 mov r0, r0 215 mov r0, r0 /* fetch instruction cacheline */
190 mcr p15, 0, r0, c1, c0, 0 2161: mcr p15, 0, r0, c1, c0, 0
191 217
192 /* 218 /*
193 * Ensure that the coprocessor has finished turning on the MMU. 219 * Ensure that the coprocessor has finished turning on the MMU.
194 */ 220 */
195 mrc p15, 0, r0, c0, c0, 0 /* Read an arbitrary value. */ 221 mrc p15, 0, r0, c0, c0, 0 /* Read an arbitrary value. */
196 mov r0, r0 /* Stall until read completes. */ 222 mov r0, r0 /* Stall until read completes. */
197 XPUTC(#76) 2231: XPUTC(#76)
198 224
199 bx ip /* return */ 225 bx ip /* return */
200 226
201/* 227/*
202 * Coprocessor register initialization values 228 * Coprocessor register initialization values
203 */ 229 */
204 230
205 .p2align 2 231 .p2align 2
206 232
207 /* bits to set in the Control Register */ 233 /* bits to set in the Control Register */
208 234
209#if defined(VERBOSE_INIT_ARM) && XPUTC_COM 235#if defined(VERBOSE_INIT_ARM) && XPUTC_COM
210#define TIMO 0x25000 236#define TIMO 0x25000
211#ifndef COM_MULT 237#ifndef COM_MULT
212#define COM_MULT 1 238#define COM_MULT 1
213#endif 239#endif
214xputc: 240xputc:
215#ifdef MULTIPROCESSOR 241#ifdef MULTIPROCESSOR
 242 adr r3, xputc
 243 movw r2, #:lower16:comlock
 244 movt r2, #:upper16:comlock
 245 bfi r3, r2, #0, #28
216 mov r2, #1 246 mov r2, #1
217 ldr r3, .Lcomlock 
21810: 24710:
219 ldrex r1, [r3] 248 ldrex r1, [r3]
220 cmp r1, #0 249 cmp r1, #0
221 bne 10b 250 bne 10b
222 strex r1, r2, [r3] 251 strex r1, r2, [r3]
223 cmp r1, #0 252 cmp r1, #0
224 bne 10b 253 bne 10b
225 dsb 254 dsb
226#endif 255#endif
227 256
228 mov r2, #TIMO 257 mov r2, #TIMO
229 ldr r3, .Luart0 258#ifdef CONADDR
 259 movw r3, #:lower16:CONADDR
 260 movt r3, #:upper16:CONADDR
 261#elif defined(CONSADDR)
 262 movw r3, #:lower16:CONSADDR
 263 movt r3, #:upper16:CONSADDR
 264#endif
2301: 2651:
231#if COM_MULT == 1 266#if COM_MULT == 1
232 ldrb r1, [r3, #(COM_LSR*COM_MULT)] 267 ldrb r1, [r3, #(COM_LSR*COM_MULT)]
233#else 268#else
234#if COM_MULT == 2 269#if COM_MULT == 2
235 ldrh r1, [r3, #(COM_LSR*COM_MULT)] 270 ldrh r1, [r3, #(COM_LSR*COM_MULT)]
236#elif COM_MULT == 4 271#elif COM_MULT == 4
237 ldr r1, [r3, #(COM_LSR*COM_MULT)] 272 ldr r1, [r3, #(COM_LSR*COM_MULT)]
238#endif 273#endif
239#ifdef COM_BSWAP 274#ifdef COM_BSWAP
240 lsr r1, r1, #(COM_MULT-1)*8 275 lsr r1, r1, #(COM_MULT-1)*8
241#endif 276#endif
242#endif 277#endif
243 tst r1, #LSR_TXRDY 278 tst r1, #LSR_TXRDY
244 bne 2f 279 bne 2f
245 subs r2, r2, #1 280 subs r2, r2, #1
246 bne 1b 281 bne 1b
2472: 2822:
248#if COM_MULT == 1 283#if COM_MULT == 1
249 strb r0, [r3, #COM_DATA] 284 strb r0, [r3, #COM_DATA]
250#else 285#else
251#ifdef COM_BSWAP 286#ifdef COM_BSWAP
252 lsl r0, r0, #(COM_MULT-1)*8 287 lsl r0, r0, #(COM_MULT-1)*8
253#endif 288#endif
254#if COM_MULT == 2 289#if COM_MULT == 2
255 strh r0, [r3, #COM_DATA] 290 strh r0, [r3, #COM_DATA]
256#else 291#else
257 str r0, [r3, #COM_DATA] 292 str r0, [r3, #COM_DATA]
258#endif 293#endif
259#endif 294#endif
260 295
261 mov r2, #TIMO 296 mov r2, #TIMO
2623:  2973:
263#if COM_MULT == 1 298#if COM_MULT == 1
264 ldrb r1, [r3, #(COM_LSR*COM_MULT)] 299 ldrb r1, [r3, #(COM_LSR*COM_MULT)]
265#else 300#else
266#if COM_MULT == 2 301#if COM_MULT == 2
267 ldrh r1, [r3, #(COM_LSR*COM_MULT)] 302 ldrh r1, [r3, #(COM_LSR*COM_MULT)]
268#elif COM_MULT == 4 303#elif COM_MULT == 4
269 ldr r1, [r3, #(COM_LSR*COM_MULT)] 304 ldr r1, [r3, #(COM_LSR*COM_MULT)]
270#endif 305#endif
271#ifdef COM_BSWAP 306#ifdef COM_BSWAP
272 lsr r1, r1, #(COM_MULT-1)*8 307 lsr r1, r1, #(COM_MULT-1)*8
273#endif 308#endif
274#endif 309#endif
275 tst r1, #LSR_TSRE 310 tst r1, #LSR_TSRE
276 bne 4f 311 bne 4f
277 subs r2, r2, #1 312 subs r2, r2, #1
278 bne 3b 313 bne 3b
2794: 3144:
280#ifdef MULTIPROCESSOR 315#ifdef MULTIPROCESSOR
281 ldr r3, .Lcomlock 316 adr r3, xputc
 317 movw r2, #:lower16:comlock
 318 movt r2, #:upper16:comlock
 319 bfi r3, r2, #0, #28
282 mov r0, #0 320 mov r0, #0
283 str r0, [r3] 321 str r0, [r3]
284 dsb 322 dsb
285#endif 323#endif
286 bx lr 324 bx lr
287 325
288.Luart0: 
289#ifdef CONADDR 
290 .word CONADDR 
291#elif defined(CONSADDR) 
292 .word CONSADDR 
293#endif 
294 
295#ifdef MULTIPROCESSOR 326#ifdef MULTIPROCESSOR
296.Lcomlock: 
297 .word comlock 
298 
299 .pushsection .data 327 .pushsection .data
300comlock: 328comlock:
301 .p2align 2 329 .p2align 4
302 .word 0 @ not in bss 330 .word 0 @ not in bss
 331 .p2align 4
303 332
304 .popsection 333 .popsection
305#endif /* MULTIPROCESSOR */ 334#endif /* MULTIPROCESSOR */
306#endif /* VERBOSE_INIT_ARM */ 335#endif /* VERBOSE_INIT_ARM */
307 336
308#ifdef CPU_CORTEXA9 337cortex_init:
309a9_start: 
310 mov r10, lr @ save lr 338 mov r10, lr @ save lr
311 339
312 cpsid if, #PSR_SVC32_MODE 340 cpsid if, #PSR_SVC32_MODE
313 341
314 XPUTC(#64) 342 XPUTC(#64)
315 bl _C_LABEL(armv7_icache_inv_all) @ invalidate i-cache 343 adr ip, cortex_init
 344 movw r0, #:lower16:_C_LABEL(armv7_icache_inv_all)
 345 movt r0, #:upper16:_C_LABEL(armv7_icache_inv_all)
 346 bfi ip, r0, #0, #28
 347 blx ip @ toss i-cache
316 348
 349#ifdef CPU_CORTEXA9
317 /* 350 /*
318 * Step 1a, invalidate the all cache tags in all ways on the SCU. 351 * Step 1a, invalidate the all cache tags in all ways on the SCU.
319 */ 352 */
320 XPUTC(#65) 353 XPUTC(#65)
321 mrc p15, 4, r3, c15, c0, 0 @ read cbar 354 mrc p15, 4, r3, c15, c0, 0 @ read cbar
322 ldr r0, [r3, #SCU_CFG] @ read scu config 355 ldr r0, [r3, #SCU_CFG] @ read scu config
323 and r0, r0, #7 @ get cpu max 356 and r0, r0, #7 @ get cpu max
324 add r0, r0, #2 @ adjust to cpu num 357 add r0, r0, #2 @ adjust to cpu num
325 mov r1, #0xf @ select all ways 358 mov r1, #0xf @ select all ways
326 lsl r1, r1, r0 @ shift into place 359 lsl r1, r1, r0 @ shift into place
327 str r1, [r3, #SCU_INV_ALL_REG] @ write scu invalidate all 360 str r1, [r3, #SCU_INV_ALL_REG] @ write scu invalidate all
328 dsb 361 dsb
329 isb 362 isb
 363#endif
330 364
331 /* 365 /*
332 * Step 1b, invalidate the data cache 366 * Step 1b, invalidate the data cache
333 */ 367 */
334 XPUTC(#66) 368 XPUTC(#66)
335 bl _C_LABEL(armv7_dcache_wbinv_all) @ writeback/invalidate d-cache 369 adr ip, cortex_init
 370 movw r0, #:lower16:_C_LABEL(armv7_dcache_wbinv_all)
 371 movt r0, #:upper16:_C_LABEL(armv7_dcache_wbinv_all)
 372 bfi ip, r0, #0, #28
 373 blx ip @ writeback & toss d-cache
336 XPUTC(#67) 374 XPUTC(#67)
337 375
 376#ifdef CPU_CORTEXA9
338 /* 377 /*
339 * Step 2, disable the data cache 378 * Step 2, disable the data cache
340 */ 379 */
341 mrc p15, 0, r2, c1, c0, 0 @ get system ctl register (save) 380 mrc p15, 0, r2, c1, c0, 0 @ get system ctl register (save)
342 bic r1, r2, #CPU_CONTROL_DC_ENABLE @ clear data cache enable 381 bic r1, r2, #CPU_CONTROL_DC_ENABLE @ clear data cache enable
343 mcr p15, 0, r1, c1, c0, 0 @ set system ctl register 382 mcr p15, 0, r1, c1, c0, 0 @ set system ctl register
344 isb 383 isb
345 XPUTC(#49) 384 XPUTC(#49)
346 385
347 /* 386 /*
348 * Step 3, enable the SCU (and set SMP mode) 387 * Step 3, enable the SCU (and set SMP mode)
349 */ 388 */
350 mrc p15, 4, r3, c15, c0, 0 @ read cbar 389 mrc p15, 4, r3, c15, c0, 0 @ read cbar
351 ldr r1, [r3, #SCU_CTL] @ read scu control 390 ldr r1, [r3, #SCU_CTL] @ read scu control
352 orr r1, r1, #SCU_CTL_SCU_ENA @ set scu enable flag 391 orr r1, r1, #SCU_CTL_SCU_ENA @ set scu enable flag
353 str r1, [r3, #SCU_CTL] @ write scu control 392 str r1, [r3, #SCU_CTL] @ write scu control
354 dsb 393 dsb
355 isb 394 isb
356 XPUTC(#50) 395 XPUTC(#50)
357 396
358 /* 397 /*
359 * Step 4a, enable the data cache 398 * Step 4a, enable the data cache
360 */ 399 */
361 orr r2, r2, #CPU_CONTROL_DC_ENABLE @ set data cache enable 400 orr r2, r2, #CPU_CONTROL_DC_ENABLE @ set data cache enable
362 mcr p15, 0, r2, c1, c0, 0 @ reenable caches 401 mcr p15, 0, r2, c1, c0, 0 @ reenable caches
363 isb 402 isb
364 XPUTC(#51) 403 XPUTC(#51)
 404#endif
365 405
366#ifdef MULTIPROCESSOR 406#ifdef MULTIPROCESSOR
367 /* 407 /*
368 * Step 4b, set ACTLR.SMP=1 (and ACTRL.FX=1) 408 * Step 4b, set ACTLR.SMP=1 (and on A9, ACTRL.FX=1)
369 */ 409 */
370 mrc p15, 0, r0, c1, c0, 1 @ read aux ctl 410 mrc p15, 0, r0, c1, c0, 1 @ read aux ctl
371 orr r0, r0, #CORTEXA9_AUXCTL_SMP @ enable SMP 411 orr r0, r0, #CORTEXA9_AUXCTL_SMP @ enable SMP
372 mcr p15, 0, r0, c1, c0, 1 @ write aux ctl 412 mcr p15, 0, r0, c1, c0, 1 @ write aux ctl
373 isb 413 isb
 414#ifdef CPU_CORTEXA9
374 orr r0, r0, #CORTEXA9_AUXCTL_FW @ enable cache/tlb/coherency 415 orr r0, r0, #CORTEXA9_AUXCTL_FW @ enable cache/tlb/coherency
375 mcr p15, 0, r0, c1, c0, 1 @ write aux ctl 416 mcr p15, 0, r0, c1, c0, 1 @ write aux ctl
376 isb 417 isb
377 XPUTC(#52) 
378#endif 418#endif
 419 XPUTC(#52)
 420#endif /* MULTIPROCESSOR */
379 421
380 bx r10 422 bx r10
381ASEND(a9_start) 423ASEND(cortex_init)
382 424
383/* 425/*
384 * Secondary processors come here after exiting the SKU ROM. 426 * Secondary processors come here after exiting the SKU ROM.
 427 * Running native endian until we have SMP enabled. Since no data
 428 * is accessed, that shouldn't be a problem.
385 */ 429 */
386a9_mpstart: 430cortex_mpstart:
387#ifdef MULTIPROCESSOR 431 cpsid if, #PSR_SVC32_MODE @ make sure we are in SVC mode
 432 mrs r0, cpsr @ fetch CPSR value
 433 msr spsr_sxc, r0 @ set SPSR[23:8] to known value
 434
 435#ifndef MULTIPROCESSOR
 436 /*
 437 * If not MULTIPROCESSOR, drop CPU into power saving state.
 438 */
 4393: wfe
 440 b 3b
 441#else
388 /* 442 /*
389 * Step 1, invalidate the caches 443 * Step 1, invalidate the caches
390 */ 444 */
391 bl _C_LABEL(armv7_icache_inv_all) @ toss i-cache 445 adr ip, cortex_mpstart
392 bl _C_LABEL(armv7_dcache_inv_all) @ toss d-cache 446 movw r0, #:lower16:_C_LABEL(armv7_icache_inv_all)
 447 movt r0, #:upper16:_C_LABEL(armv7_icache_inv_all)
 448 bfi ip, r0, #0, #28
 449 blx ip @ toss i-cache
 450 adr ip, cortex_mpstart
 451 movw ip, #:lower16:_C_LABEL(armv7_dcache_inv_all)
 452 movt ip, #:upper16:_C_LABEL(armv7_dcache_inv_all)
 453 bfi ip, r0, #0, #28
 454 blx ip @ toss d-cache
393 455
 456#if defined(CPU_CORTEXA9)
394 /* 457 /*
395 * Step 2, wait for the SCU to be enabled 458 * Step 2, wait for the SCU to be enabled
396 */ 459 */
397 mrc p15, 4, r3, c15, c0, 0 @ read cbar 460 mrc p15, 4, r3, c15, c0, 0 @ read cbar
3981: ldr r0, [r3, #SCU_CTL] @ read scu control 4611: ldr r0, [r3, #SCU_CTL] @ read scu control
399 tst r0, #SCU_CTL_SCU_ENA @ enable bit set yet? 462 tst r0, #SCU_CTL_SCU_ENA @ enable bit set yet?
400 bne 1b @ try again 463 bne 1b @ try again
 464#endif
401 465
402 /* 466 /*
403 * Step 3, set ACTLR.SMP=1 (and ACTRL.FX=1) 467 * Step 3, set ACTLR.SMP=1 (and ACTRL.FX=1)
404 */ 468 */
405 mrc p15, 0, r0, c1, c0, 1 @ read aux ctl 469 mrc p15, 0, r0, c1, c0, 1 @ read aux ctl
406 orr r0, #CORTEXA9_AUXCTL_SMP @ enable SMP 470 orr r0, #CORTEXA9_AUXCTL_SMP @ enable SMP
407 mcr p15, 0, r0, c1, c0, 1 @ write aux ctl 471 mcr p15, 0, r0, c1, c0, 1 @ write aux ctl
408 mov r0, r0 472 mov r0, r0
 473#if defined(CPU_CORTEXA9)
409 orr r0, #CORTEXA9_AUXCTL_FW @ enable cache/tlb/coherency 474 orr r0, #CORTEXA9_AUXCTL_FW @ enable cache/tlb/coherency
410 mcr p15, 0, r0, c1, c0, 1 @ write aux ctl 475 mcr p15, 0, r0, c1, c0, 1 @ write aux ctl
411 mov r0, r0 476 mov r0, r0
 477#endif
412 478
413 /* 479 /*
414 * We should be in SMP mode now. 480 * We should be in SMP mode now.
415 */ 481 */
416 mrc p15, 0, r4, c0, c0, 5 @ get MPIDR 482 mrc p15, 0, r4, c0, c0, 5 @ get MPIDR
417 and r4, r4, #7 @ get our cpu numder 483 and r4, r4, #7 @ get our cpu numder
418 484
 485#ifdef __ARMEB__
 486 setend be @ switch to BE now
 487#endif
 488
419#if defined(VERBOSE_INIT_ARM) 489#if defined(VERBOSE_INIT_ARM)
420 add r0, r4, #48 490 add r0, r4, #48
421 bl xputc 491 bl xputc
422#endif 492#endif
423 493
424 ldr r0, .Lcpu_hatched @ now show we've hatched 494 /*
 495 * To access things are not in .start, we need to replace the upper
 496 * 4 bits of the address with where we are current executing.
 497 */
 498 adr r10, cortex_mpstart
 499 lsr r10, r10, #28
 500
 501 movw r0, #:lower16:_C_LABEL(arm_cpu_hatched)
 502 movt r0, #:upper16:_C_LABEL(arm_cpu_hatched)
 503 bfi r0, r10, #28, #4 // replace top 4 bits
 504 add r0, r0, r10
425 mov r5, #1 505 mov r5, #1
426 lsl r5, r5, r4 506 lsl r5, r5, r4
427 mov r1, r5 507 /*
428 bl _C_LABEL(atomic_or_32) 508 * We inline the atomic_or_32 call since we might be in a different
 509 * area of memory.
 510 */
 5112: ldrex r1, [r0]
 512 orr r1, r1, r5
 513 strex r2, r1, [r0]
 514 cmp r2, #0
 515 bne 2b
429 516
430 XPUTC(#97) 517 XPUTC(#97)
431#endif 
432 
433 cpsid if, #PSR_SVC32_MODE @ make sure we are in SVC mode 
434 518
435 /* Now we will wait for someone tell this cpu to start running */ 519 /* Now we will wait for someone tell this cpu to start running */
436#ifdef MULTIPROCESSOR 520 movw r0, #:lower16:_C_LABEL(arm_cpu_mbox)
437 ldr r0, .Lcpu_mbox 521 movt r0, #:upper16:_C_LABEL(arm_cpu_mbox)
438#else 522 bfi r0, r10, #28, #4
439 cmp r0, r0 523 add r0, r0, r10
440#endif 5243: dmb
4412: 
442#ifdef MULTIPROCESSOR 
443 dmb 
444 ldr r2, [r0] 525 ldr r2, [r0]
445 tst r2, r5 526 tst r2, r5
446#endif 527 wfeeq
447 @wfeeq 528 beq 3b
448 beq 2b 
449 529
450#ifdef MULTIPROCESSOR 530 XPUTC(#98)
4513: XPUTC(#98) 531 movw r0, #:lower16:_C_LABEL(arm_cpu_marker)
452 ldr r0, .Lcpu_marker 532 movt r0, #:upper16:_C_LABEL(arm_cpu_marker)
 533 bfi r0, r10, #28, #4
453 str pc, [r0] 534 str pc, [r0]
454 535
455 ldr r0, .Lkernel_l1pt /* get address of l1pt pvaddr */ 536 movw r0, #:lower16:_C_LABEL(kernel_l1pt)
 537 movt r0, #:upper16:_C_LABEL(kernel_l1pt)
 538 bfi r0, r10, #28, #4 /* get address of l1pt pvaddr */
456 ldr r0, [r0, #PV_PA] /* Now get the phys addr */ 539 ldr r0, [r0, #PV_PA] /* Now get the phys addr */
457 bl cpu_init 540 /*
458 541 * After we turn on the MMU, we will no longer in .start so setup
459 ldr r0, .Lcpu_marker 542 * return to rest of MP startup code in .text.
460 str pc, [r0] 543 */
 544 movw lr, #:lower16:cortex_mpcontinuation
 545 movt lr, #:upper16:cortex_mpcontinuation
 546 b arm_cpuinit
 547#endif /* MULTIPROCESSOR */
 548ASEND(cortex_mpstart)
461 549
 550#ifdef MULTIPROCESSOR
 551 .pushsection .text
 552cortex_mpcontinuation:
462 /* MMU, L1, are now on. */ 553 /* MMU, L1, are now on. */
463 554
464 ldr r0, .Lcpu_info /* get pointer to cpu_infos */ 555 movw r0, #:lower16:_C_LABEL(arm_cpu_marker)
 556 movt r0, #:upper16:_C_LABEL(arm_cpu_marker)
 557 str pc, [r0]
 558
 559 movw r0, #:lower16:cpu_info
 560 movt r0, #:upper16:cpu_info /* get pointer to cpu_infos */
465 ldr r5, [r0, r4, lsl #2] /* load our cpu_info */ 561 ldr r5, [r0, r4, lsl #2] /* load our cpu_info */
466 ldr r6, [r5, #CI_IDLELWP] /* get the idlelwp */ 562 ldr r6, [r5, #CI_IDLELWP] /* get the idlelwp */
467 ldr r7, [r6, #L_PCB] /* now get its pcb */ 563 ldr r7, [r6, #L_PCB] /* now get its pcb */
468 ldr sp, [r7, #PCB_SP] /* finally, we can load our SP */ 564 ldr sp, [r7, #PCB_KSP] /* finally, we can load our SP */
469#ifdef TPIDRPRW_IS_CURCPU 565#ifdef TPIDRPRW_IS_CURCPU
470 mcr p15, 0, r5, c13, c0, 4 /* squirrel away curcpu() */ 566 mcr p15, 0, r5, c13, c0, 4 /* squirrel away curcpu() */
471#elif defined(TPIDRPRW_IS_CURLWP) 567#elif defined(TPIDRPRW_IS_CURLWP)
472 mcr p15, 0, r6, c13, c0, 4 /* squirrel away curlwp() */ 568 mcr p15, 0, r6, c13, c0, 4 /* squirrel away curlwp() */
473#else 569#else
474#error either TPIDRPRW_IS_CURCPU or TPIDRPRW_IS_CURLWP must be defined 570#error either TPIDRPRW_IS_CURCPU or TPIDRPRW_IS_CURLWP must be defined
475#endif 571#endif
476 str r6, [r5, #CI_CURLWP] /* and note we are running on it */ 572 str r6, [r5, #CI_CURLWP] /* and note we are running on it */
477 573
478 ldr r0, .Lcpu_marker 574 str pc, [r0] // r0 still have arm_cpu_marker
479 str pc, [r0] 
480 575
481 mov r0, r5 /* pass cpu_info */ 576 mov r0, r5 // pass cpu_info
482 mov r1, r4 /* pass cpu_id */ 577 mov r1, r4 // pass cpu_id
483 ldr r2, .Lbcm53xx_cpu_hatch /* pass md_cpu_hatch */ 578 movw r2, #:lower16:MD_CPU_HATCH // pass md_cpu_hatch
 579 movt r2, #:upper16:MD_CPU_HATCH // pass md_cpu_hatch
484 bl _C_LABEL(cpu_hatch) 580 bl _C_LABEL(cpu_hatch)
485 b _C_LABEL(idle_loop) 581 b _C_LABEL(idle_loop)
486ASEND(a9_mpstart) 582ASEND(cortex_mpcontinuation)
487 /* NOT REACHED */ 583 /* NOT REACHED */
488 584 .popsection
489.Lkernel_l1pt: 
490 .word _C_LABEL(kernel_l1pt) 
491.Lcpu_info: 
492 .word _C_LABEL(cpu_info) 
493.Lcpu_max: 
494 .word _C_LABEL(arm_cpu_max) 
495.Lcpu_hatched: 
496 .word _C_LABEL(arm_cpu_hatched) 
497.Lcpu_mbox: 
498 .word _C_LABEL(arm_cpu_mbox) 
499.Lcpu_marker: 
500 .word _C_LABEL(arm_cpu_marker) 
501.Lbcm53xx_cpu_hatch: 
502 .word _C_LABEL(bcm53xx_cpu_hatch) 
503#endif /* MULTIPROCESSOR */ 585#endif /* MULTIPROCESSOR */
504#endif /* CPU_CORTEXA9 */ 

cvs diff -r1.10 -r1.11 src/sys/arch/evbarm/bcm53xx/Attic/bcm53xx_start.S (switch to unified diff)

--- src/sys/arch/evbarm/bcm53xx/Attic/bcm53xx_start.S 2014/01/24 04:15:33 1.10
+++ src/sys/arch/evbarm/bcm53xx/Attic/bcm53xx_start.S 2014/02/21 22:22:48 1.11
@@ -1,205 +1,203 @@ @@ -1,205 +1,203 @@
1/* $NetBSD: bcm53xx_start.S,v 1.10 2014/01/24 04:15:33 matt Exp $ */ 1/* $NetBSD: bcm53xx_start.S,v 1.11 2014/02/21 22:22:48 matt Exp $ */
2/*- 2/*-
3 * Copyright (c) 2012 The NetBSD Foundation, Inc. 3 * Copyright (c) 2012 The NetBSD Foundation, Inc.
4 * All rights reserved. 4 * All rights reserved.
5 * 5 *
6 * This code is derived from software contributed to The NetBSD Foundation 6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Matt Thomas of 3am Software Foundry. 7 * by Matt Thomas of 3am Software Foundry.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the 15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution. 16 * documentation and/or other materials provided with the distribution.
17 * 17 *
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE. 28 * POSSIBILITY OF SUCH DAMAGE.
29 */ 29 */
30 30
31//#define GXEMUL 31//#define GXEMUL
32 32
33#include "opt_broadcom.h" 33#include "opt_broadcom.h"
34#include "opt_cpuoptions.h" 34#include "opt_cpuoptions.h"
35#include "opt_cputypes.h" 35#include "opt_cputypes.h"
36#include "opt_multiprocessor.h" 36#include "opt_multiprocessor.h"
37 37
38#include <arm/asm.h> 38#include <arm/asm.h>
39#include <arm/armreg.h> 39#include <arm/armreg.h>
40#include "assym.h" 40#include "assym.h"
41 41
42#include <arm/cortex/a9tmr_reg.h> 42#include <arm/cortex/a9tmr_reg.h>
43 43
44#ifndef CONADDR 44#ifndef CONADDR
45#define CONADDR 0x18000300 45#define CONADDR 0x18000300
46#endif 46#endif
47 47
48RCSID("$NetBSD: bcm53xx_start.S,v 1.10 2014/01/24 04:15:33 matt Exp $") 48RCSID("$NetBSD: bcm53xx_start.S,v 1.11 2014/02/21 22:22:48 matt Exp $")
49 49
50#undef VERBOSE_INIT_ARM 50#undef VERBOSE_INIT_ARM
51#define VERBOSE_INIT_ARM 51#define VERBOSE_INIT_ARM
52 52
53#if defined(VERBOSE_INIT_ARM) 53#if defined(VERBOSE_INIT_ARM)
54#define XPUTC(n) mov r0, n; bl xputc 54#define XPUTC(n) mov r0, n; bl xputc
55#define XPUTC_COM 1 55#define XPUTC_COM 1
56#else 56#else
57#define XPUTC(n) 57#define XPUTC(n)
58#endif 58#endif
59 59
60#define MD_CPU_HATCH bcm53xx_cpu_hatch 60#define MD_CPU_HATCH bcm53xx_cpu_hatch
61 61
62/* 62/*
63 * Kernel start routine for BCM5301X boards. 63 * Kernel start routine for BCM5301X boards.
64 * At this point, this code has been loaded into SDRAM 64 * At this point, this code has been loaded into SDRAM
65 * and the MMU is off 65 * and the MMU is off
66 */ 66 */
67 .text 67 .text
68 68
69 .global _C_LABEL(bcm53xx_start) 69 .global _C_LABEL(bcm53xx_start)
70_C_LABEL(bcm53xx_start): 70_C_LABEL(bcm53xx_start):
71#ifdef __ARMEB__ 71#ifdef __ARMEB__
72 setend be /* make sure we are running big endian */ 72 setend be /* make sure we are running big endian */
73#endif 73#endif
74 /* 74 /*
75 * Save any arguments u-boot passed us. 75 * Save any arguments u-boot passed us.
76 */ 76 */
77 movw r4, #:lower16:(uboot_args-.LPIC0) 77 adr r4, _C_LABEL(bcm53xx_start)
78 movt r4, #:upper16:(uboot_args-.LPIC0) 78 movw r5, #:lower16:uboot_args
79 bic r4, r4, #0xf0000000 79 movt r5, #:upper16:uboot_args
80 add r4, r4, pc 80 bfi r4, r5, #0, #28
81 stmia r4, {r0-r3} 81 stmia r4, {r0-r3}
82.LPIC0: 82.LPIC0:
83 83
84 /* 84 /*
85 * Let's turn on the CCA watchdog in case something goes horribly wrong. 85 * Let's turn on the CCA watchdog in case something goes horribly wrong.
86 */ 86 */
87 ldr r0, .Lcca_wdog 87 ldr r0, .Lcca_wdog
88 ldr r1, .Lcca_wdog + 4 88 ldr r1, .Lcca_wdog + 4
89 str r1, [r0] 89 str r1, [r0]
90 90
91 /* 91 /*
92 * Cal the initial start code for the a9 92 * Cal the initial start code for Cortex cores
93 */ 93 */
94 bl a9_start 94 bl cortex_init
95 95
96 /* 96 /*
97 * Set up a preliminary mapping in the MMU to allow us to run 97 * Set up a preliminary mapping in the MMU to allow us to run
98 * at KERNEL_BASE with caches on. 98 * at KERNEL_BASE with caches on.
99 */ 99 */
 100 movw r1, #:lower16:(mmu_init_table-.LPIC1)
 101 add r1, r1, pc
 102.LPIC1:
100 ldr r0, .Ltemp_l1_table /* The L1PT address - entered into TTB later */ 103 ldr r0, .Ltemp_l1_table /* The L1PT address - entered into TTB later */
101 adr r1, mmu_init_table 
102 bl arm_boot_l1pt_init 104 bl arm_boot_l1pt_init
103 105
104 XPUTC(#68) 106 XPUTC(#68)
105 107
106 /* 108 /*
107 * Before we turn on the MMU, let's the other process out of the 109 * Before we turn on the MMU, let's the other process out of the
108 * SKU ROM but setting the magic LUT address to our own mp_start 110 * SKU ROM but setting the magic LUT address to our own mp_start
109 * routine.  111 * routine.
110 */ 112 */
111 ldr r1, .Lsku_rom_lut 113 movw r1, #:lower16:0xffff0400
112 adr r2, a9_mpstart 114 movt r1, #:upper16:0xffff0400
 115 adr r2, cortex_mpstart
113 str r2, [r1] 116 str r2, [r1]
114 sev /* wake up the others */ 117 sev /* wake up the others */
115 118
116 /* 119 /*
117 * init the CPU TLB, Cache, MMU. 120 * init the CPU TLB, Cache, MMU.
118 */ 121 */
119 XPUTC(#69) 122 XPUTC(#69)
120 123
121 ldr r0, .Ltemp_l1_table /* The page table address */ 124 ldr r0, .Ltemp_l1_table /* The page table address */
122 bl arm_cpuinit 125 bl arm_cpuinit
123 126
124 XPUTC(#89) 127 XPUTC(#89)
125 128
 129 adr r1, bcm53xx_start
126 movw r0, #:lower16:uboot_args 130 movw r0, #:lower16:uboot_args
127 movt r0, #:upper16:uboot_args 131 movt r0, #:upper16:uboot_args
 132 bfi r1, r0, #0, #28
128 ldr r2, [r0] 133 ldr r2, [r0]
129 movw r1, #:lower16:(uboot_args-.LPIC1) 134 ldr r3, [r1]
130 movt r1, #:upper16:(uboot_args-.LPIC1) 135 cmp r1, r3
131 add r1, r1, pc 
132 ldr r1, [r1] 
133.LPIC1: 
134 cmp r1, r2 
1351: bne 1b 1361: bne 1b
136 137
137 XPUTC(#90) 138 XPUTC(#90)
138 139
139 /* 140 /*
140 * Let's turn off the CCA watchdog since nothing went horribly wrong. 141 * Let's turn off the CCA watchdog since nothing went horribly wrong.
141 */ 142 */
142 ldr r0, .Lcca_wdog 143 ldr r0, .Lcca_wdog
143 mov r1, #0 144 mov r1, #0
144 str r1, [r0] 145 str r1, [r0]
145 146
146 XPUTC(#33) 147 XPUTC(#33)
147 XPUTC(#10) 148 XPUTC(#10)
148 XPUTC(#13) 149 XPUTC(#13)
149 /* 150 /*
150 * Jump to start in locore.S, which in turn will call initarm and main. 151 * Jump to start in locore.S, which in turn will call initarm and main.
151 */ 152 */
152 movw ip, #:lower16:start 153 movw ip, #:lower16:start
153 movt ip, #:upper16:start 154 movt ip, #:upper16:start
154 bx ip 155 bx ip
155 nop 156 nop
156 nop 157 nop
157 nop 158 nop
158 nop 159 nop
159 160
160 /* NOTREACHED */ 161 /* NOTREACHED */
161 162
162.Lsku_rom_lut: 
163 .word 0xffff0400 
164 
165.Lcca_wdog: 163.Lcca_wdog:
166 .word 0x18000080 164 .word 0x18000080
167 .word 0x0fffffff /* maximum watchdog time out, about 10 seconds */ 165 .word 0x0fffffff /* maximum watchdog time out, about 10 seconds */
168 166
169.Ltemp_l1_table: 167.Ltemp_l1_table:
170 /* Put the temporary L1 translation table far enough away. */ 168 /* Put the temporary L1 translation table far enough away. */
171 .word 31 * 0x100000 - L1_TABLE_SIZE 169 .word 31 * 0x100000 - L1_TABLE_SIZE
172 170
173#include <arm/cortex/a9_mpsubr.S> 171#include <arm/cortex/a9_mpsubr.S>
174 172
175mmu_init_table: 173mmu_init_table:
176 /* Add 128MB of VA==PA at 0x80000000 so we can keep the kernel going */ 174 /* Add 128MB of VA==PA at 0x80000000 so we can keep the kernel going */
177#ifdef BCM5301X 175#ifdef BCM5301X
178 MMU_INIT(KERNEL_BASE, 0x80000000, 128, 176 MMU_INIT(KERNEL_BASE, 0x80000000, 128,
179 L1_S_PROTO | L1_S_APv7_KRW | L1_S_B | L1_S_C | L1_S_V6_S) 177 L1_S_PROTO | L1_S_APv7_KRW | L1_S_B | L1_S_C | L1_S_V6_S)
180#elif defined(BCM563XX) 178#elif defined(BCM563XX)
181 MMU_INIT(KERNEL_BASE, 0x60000000, 128, 179 MMU_INIT(KERNEL_BASE, 0x60000000, 128,
182 L1_S_PROTO | L1_S_APv7_KRW | L1_S_B | L1_S_C | L1_S_V6_S) 180 L1_S_PROTO | L1_S_APv7_KRW | L1_S_B | L1_S_C | L1_S_V6_S)
183#endif 181#endif
184 182
185 MMU_INIT(0, 0x00000000, 183 MMU_INIT(0, 0x00000000,
186 (32 * L1_S_SIZE + L1_S_SIZE - 1) / L1_S_SIZE, 184 (32 * L1_S_SIZE + L1_S_SIZE - 1) / L1_S_SIZE,
187 L1_S_PROTO | L1_S_APv7_KRW | L1_S_B | L1_S_C | L1_S_V6_S) 185 L1_S_PROTO | L1_S_APv7_KRW | L1_S_B | L1_S_C | L1_S_V6_S)
188 186
189 /* Map the 2MB of primary peripherals */ 187 /* Map the 2MB of primary peripherals */
190 MMU_INIT(KERNEL_IO_IOREG_VBASE, BCM53XX_IOREG_PBASE, 188 MMU_INIT(KERNEL_IO_IOREG_VBASE, BCM53XX_IOREG_PBASE,
191 (BCM53XX_IOREG_SIZE + L1_S_SIZE - 1) / L1_S_SIZE, 189 (BCM53XX_IOREG_SIZE + L1_S_SIZE - 1) / L1_S_SIZE,
192 L1_S_PROTO | L1_S_APv7_KRW) 190 L1_S_PROTO | L1_S_APv7_KRW)
193 191
194 /* Map the 2MB of primary peripherals at PA:VA 1:1 */ 192 /* Map the 2MB of primary peripherals at PA:VA 1:1 */
195 MMU_INIT(BCM53XX_IOREG_PBASE, BCM53XX_IOREG_PBASE, 193 MMU_INIT(BCM53XX_IOREG_PBASE, BCM53XX_IOREG_PBASE,
196 (BCM53XX_IOREG_SIZE + L1_S_SIZE - 1) / L1_S_SIZE, 194 (BCM53XX_IOREG_SIZE + L1_S_SIZE - 1) / L1_S_SIZE,
197 L1_S_PROTO | L1_S_APv7_KRW) 195 L1_S_PROTO | L1_S_APv7_KRW)
198 196
199 /* Map the 1MB of armcore peripherals */ 197 /* Map the 1MB of armcore peripherals */
200 MMU_INIT(KERNEL_IO_ARMCORE_VBASE, BCM53XX_ARMCORE_PBASE, 198 MMU_INIT(KERNEL_IO_ARMCORE_VBASE, BCM53XX_ARMCORE_PBASE,
201 (BCM53XX_ARMCORE_SIZE + L1_S_SIZE - 1) / L1_S_SIZE, 199 (BCM53XX_ARMCORE_SIZE + L1_S_SIZE - 1) / L1_S_SIZE,
202 L1_S_PROTO | L1_S_APv7_KRW) 200 L1_S_PROTO | L1_S_APv7_KRW)
203 201
204 /* end of table */ 202 /* end of table */
205 MMU_INIT(0, 0, 0, 0) 203 MMU_INIT(0, 0, 0, 0)

cvs diff -r1.5 -r1.6 src/sys/arch/evbarm/cubie/Attic/cubie_start.S (switch to unified diff)

--- src/sys/arch/evbarm/cubie/Attic/cubie_start.S 2014/01/24 05:13:06 1.5
+++ src/sys/arch/evbarm/cubie/Attic/cubie_start.S 2014/02/21 22:22:48 1.6
@@ -1,154 +1,168 @@ @@ -1,154 +1,168 @@
1/*- 1/*-
2 * Copyright (c) 2014 The NetBSD Foundation, Inc. 2 * Copyright (c) 2014 The NetBSD Foundation, Inc.
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This code is derived from software contributed to The NetBSD Foundation 5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas of 3am Software Foundry. 6 * by Matt Thomas of 3am Software Foundry.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions 9 * modification, are permitted provided that the following conditions
10 * are met: 10 * are met:
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution. 15 * documentation and/or other materials provided with the distribution.
16 * 16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE. 27 * POSSIBILITY OF SUCH DAMAGE.
28 */ 28 */
29 29
30#include "opt_allwinner.h" 30#include "opt_allwinner.h"
31#include "opt_com.h" 31#include "opt_com.h"
32#include "opt_cpuoptions.h" 32#include "opt_cpuoptions.h"
33#include "opt_cputypes.h" 33#include "opt_cputypes.h"
34#include "opt_multiprocessor.h" 34#include "opt_multiprocessor.h"
35 35
36#include <arm/asm.h> 36#include <arm/asm.h>
37#include <arm/armreg.h> 37#include <arm/armreg.h>
38#include "assym.h" 38#include "assym.h"
39 39
40#include <arm/allwinner/awin_reg.h> 40#include <arm/allwinner/awin_reg.h>
41#include <evbarm/cubie/platform.h>  41#include <evbarm/cubie/platform.h>
42 42
43RCSID("$NetBSD: cubie_start.S,v 1.5 2014/01/24 05:13:06 matt Exp $") 43RCSID("$NetBSD: cubie_start.S,v 1.6 2014/02/21 22:22:48 matt Exp $")
44 44
45#if defined(VERBOSE_INIT_ARM) 45#if defined(VERBOSE_INIT_ARM)
46#define XPUTC(n) mov r0, n; bl xputc 46#define XPUTC(n) mov r0, n; bl xputc
47#ifdef __ARMEB__ 47#ifdef __ARMEB__
48#define COM_BSWAP 48#define COM_BSWAP
49#endif 49#endif
50#define COM_MULT 4 50#define COM_MULT 4
51#define XPUTC_COM 1 51#define XPUTC_COM 1
52#else 52#else
53#define XPUTC(n) 53#define XPUTC(n)
54#endif 54#endif
55 55
56#define INIT_MEMSIZE 128 56#define INIT_MEMSIZE 128
57#define TEMP_L1_TABLE (AWIN_SDRAM_PBASE + INIT_MEMSIZE * 0x100000 - L1_TABLE_SIZE) 57#define TEMP_L1_TABLE (AWIN_SDRAM_PBASE + INIT_MEMSIZE * 0x100000 - L1_TABLE_SIZE)
58 58
 59#define MD_CPU_HATCH _C_LABEL(awin_cpu_hatch)
 60
59/* 61/*
60 * Kernel start routine for BEAGLEBOARD boards. 62 * Kernel start routine for BEAGLEBOARD boards.
61 * At this point, this code has been loaded into SDRAM 63 * At this point, this code has been loaded into SDRAM
62 * and the MMU is off 64 * and the MMU is off
63 */ 65 */
64 .section .start,"ax",%progbits 66 .section .start,"ax",%progbits
65 67
66 .global _C_LABEL(cubie_start) 68 .global _C_LABEL(cubie_start)
67_C_LABEL(cubie_start): 69_C_LABEL(cubie_start):
68#ifdef __ARMEB__ 70#ifdef __ARMEB__
69 setend be /* force big endian */ 71 setend be /* force big endian */
70#endif 72#endif
71 73
72 /* Move into supervisor mode and disable IRQs/FIQs. */ 74 /* Move into supervisor mode and disable IRQs/FIQs. */
73 cpsid if, #PSR_SVC32_MODE 75 cpsid if, #PSR_SVC32_MODE
74 76
75 /* 77 /*
76 * Save any arguments passed to us (do it PIC). 78 * Save any arguments passed to us. But since .start is at 0x40000000
77 */ 79 * and .text is at 0x8000000, we can't directly use the address that
78 movw r4, #:lower16:uboot_args-.LPIC0 80 * the linker gave us directly. We have to replace the upper 4 bits
79 movt r4, #:upper16:uboot_args-.LPIC0 81 * of the address the linker gave us and replace it with the upper 4
80 /* 82 * bits of our pc. Or replace the lower 28 bits of our PC with the
81 * Since .start is at 0x40000000 and .text is at 0x8000000 83 * lower 28 bits of what the linker gave us.
82 * we have to clear the upper bits of the address so it's relative 84 */
83 * to the current PC, not .text. 85 adr r4, _C_LABEL(cubie_start)
84 */ 86 movw r5, #:lower16:uboot_args
85 bic r4, r4, #0xf0000000 87 movt r5, #:upper16:uboot_args
86 add r4, r4, pc 88 bfi r4, r5, #0, #28
87 stmia r4, {r0-r3} 89
88.LPIC0: 90 stmia r4, {r0-r3} // Save the arguments
89#ifdef CPU_CORTEXA9 
90 /* 91 /*
91 * Turn on the SCU if we are on a Cortex-A9 92 * Turn on the SMP bit
92 */ 93 */
93 bl a9_start 94 bl cortex_init
94 XPUTC(#67) 95 XPUTC(#67)
95#endif 96
 97#if defined(MULTIPROCESSOR) && 0
 98 movw r0, #:lower16:(AWIN_CORE_PBASE+AWIN_CPUCFG_OFFSET)
 99 movt r0, #:upper16:(AWIN_CORE_PBASE+AWIN_CPUCFG_OFFSET)
 100
 101 /* Set where the other CPU(s) are going to execute */
 102 adr r1, cortex_mpstart
 103 str r1, [r0, #AWIN_CPUCFG_PRIVATE_REG]
 104
 105 /* Bring CPU1 out of reset */
 106 ldr r1, [r0, #AWIN_CPUCFG_CPU1_RST_CTRL_REG]
 107 orr r1, r1, #(AWIN_CPUCFG_CPU_RST_CTRL_CORE_RESET|AWIN_CPUCFG_CPU_RST_CTRL_RESET)
 108 str r1, [r0, #AWIN_CPUCFG_CPU1_RST_CTRL_REG]
 109#endif /* MULTIPROCESSOR */
96 110
97 /* 111 /*
98 * Set up a preliminary mapping in the MMU to allow us to run 112 * Set up a preliminary mapping in the MMU to allow us to run
99 * at KERNEL_BASE with caches on. 113 * at KERNEL_BASE with caches on.
100 */ 114 */
101 movw r0, #:lower16:TEMP_L1_TABLE 115 movw r0, #:lower16:TEMP_L1_TABLE
102 movt r0, #:upper16:TEMP_L1_TABLE 116 movt r0, #:upper16:TEMP_L1_TABLE
103 adr r1, .Lmmu_init_table 117 adr r1, .Lmmu_init_table
104 bl arm_boot_l1pt_init 118 bl arm_boot_l1pt_init
105 119
106 XPUTC(#68) 120 XPUTC(#68)
107 121
108 /* 122 /*
109 * Turn on the MMU, Caches, etc. 123 * Turn on the MMU, Caches, etc.
110 */ 124 */
111 movw r0, #:lower16:TEMP_L1_TABLE 125 movw r0, #:lower16:TEMP_L1_TABLE
112 movt r0, #:upper16:TEMP_L1_TABLE 126 movt r0, #:upper16:TEMP_L1_TABLE
113 bl arm_cpuinit 127 bl arm_cpuinit
114 128
115 XPUTC(#90) 129 XPUTC(#90)
116 XPUTC(#13) 130 XPUTC(#13)
117 XPUTC(#10) 131 XPUTC(#10)
118 132
119 /* 133 /*
120 * Jump to start in locore.S, which in turn will call initarm and main. 134 * Jump to start in locore.S, which in turn will call initarm and main.
121 */ 135 */
122 movw ip, #:lower16:start 136 movw ip, #:lower16:start
123 movt ip, #:upper16:start 137 movt ip, #:upper16:start
124 bx ip /* Jump to start (flushes pipeline). */ 138 bx ip /* Jump to start (flushes pipeline). */
125 139
126 /* NOTREACHED */ 140 /* NOTREACHED */
127 141
128#include <arm/cortex/a9_mpsubr.S> 142#include <arm/cortex/a9_mpsubr.S>
129 143
130.Lmmu_init_table: 144.Lmmu_init_table:
131 /* Map KERNEL_BASE VA to SDRAM PA, write-back cacheable, shareable */ 145 /* Map KERNEL_BASE VA to SDRAM PA, write-back cacheable, shareable */
132 MMU_INIT(KERNEL_BASE, AWIN_SDRAM_PBASE, 146 MMU_INIT(KERNEL_BASE, AWIN_SDRAM_PBASE,
133 (INIT_MEMSIZE * L1_S_SIZE + L1_S_SIZE - 1) / L1_S_SIZE, 147 (INIT_MEMSIZE * L1_S_SIZE + L1_S_SIZE - 1) / L1_S_SIZE,
134 L1_S_PROTO | L1_S_APv7_KRW | L1_S_B | L1_S_C | L1_S_V6_S) 148 L1_S_PROTO | L1_S_APv7_KRW | L1_S_B | L1_S_C | L1_S_V6_S)
135 149
136 /* Map memory 1:1 VA to PA, write-back cacheable, shareable */ 150 /* Map memory 1:1 VA to PA, write-back cacheable, shareable */
137 MMU_INIT(AWIN_SDRAM_PBASE, AWIN_SDRAM_PBASE, 151 MMU_INIT(AWIN_SDRAM_PBASE, AWIN_SDRAM_PBASE,
138 (INIT_MEMSIZE * L1_S_SIZE + L1_S_SIZE - 1) / L1_S_SIZE, 152 (INIT_MEMSIZE * L1_S_SIZE + L1_S_SIZE - 1) / L1_S_SIZE,
139 L1_S_PROTO | L1_S_APv7_KRW | L1_S_B | L1_S_C | L1_S_V6_S) 153 L1_S_PROTO | L1_S_APv7_KRW | L1_S_B | L1_S_C | L1_S_V6_S)
140 154
141 /* Map AWIN CORE (so console will work) */ 155 /* Map AWIN CORE (so console will work) */
142 MMU_INIT(AWIN_CORE_VBASE, AWIN_CORE_PBASE, 156 MMU_INIT(AWIN_CORE_VBASE, AWIN_CORE_PBASE,
143 (AWIN_CORE_SIZE + L1_S_SIZE - 1) / L1_S_SIZE, 157 (AWIN_CORE_SIZE + L1_S_SIZE - 1) / L1_S_SIZE,
144 L1_S_PROTO | L1_S_APv7_KRW) 158 L1_S_PROTO | L1_S_APv7_KRW)
145 159
146 /* Map AWIN CORE (so console will work) */ 160 /* Map AWIN CORE (so console will work) */
147 MMU_INIT(AWIN_CORE_PBASE, AWIN_CORE_PBASE, 161 MMU_INIT(AWIN_CORE_PBASE, AWIN_CORE_PBASE,
148 (AWIN_CORE_SIZE + L1_S_SIZE - 1) / L1_S_SIZE, 162 (AWIN_CORE_SIZE + L1_S_SIZE - 1) / L1_S_SIZE,
149 L1_S_PROTO | L1_S_APv7_KRW) 163 L1_S_PROTO | L1_S_APv7_KRW)
150 164
151 /* end of table */ 165 /* end of table */
152 MMU_INIT(0, 0, 0, 0) 166 MMU_INIT(0, 0, 0, 0)
153 167
154END(_C_LABEL(cubie_start)) 168END(_C_LABEL(cubie_start))