Fri Feb 21 22:22:48 2014 UTC ()
Rework PIC method to be simplier.  Change be more cortex neutral.


(matt)
diff -r1.12 -r1.13 src/sys/arch/arm/cortex/a9_mpsubr.S
diff -r1.10 -r1.11 src/sys/arch/evbarm/bcm53xx/bcm53xx_start.S
diff -r1.5 -r1.6 src/sys/arch/evbarm/cubie/cubie_start.S

cvs diff -r1.12 -r1.13 src/sys/arch/arm/cortex/Attic/a9_mpsubr.S (expand / switch to unified diff)

--- src/sys/arch/arm/cortex/Attic/a9_mpsubr.S 2014/01/24 05:14:11 1.12
+++ src/sys/arch/arm/cortex/Attic/a9_mpsubr.S 2014/02/21 22:22:48 1.13
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: a9_mpsubr.S,v 1.12 2014/01/24 05:14:11 matt Exp $ */ 1/* $NetBSD: a9_mpsubr.S,v 1.13 2014/02/21 22:22:48 matt Exp $ */
2/*- 2/*-
3 * Copyright (c) 2012 The NetBSD Foundation, Inc. 3 * Copyright (c) 2012 The NetBSD Foundation, Inc.
4 * All rights reserved. 4 * All rights reserved.
5 * 5 *
6 * This code is derived from software contributed to The NetBSD Foundation 6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Matt Thomas of 3am Software Foundry. 7 * by Matt Thomas of 3am Software Foundry.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -101,142 +101,177 @@ arm_boot_l1pt_init: @@ -101,142 +101,177 @@ arm_boot_l1pt_init:
101 cmp n_sec, #0 101 cmp n_sec, #0
102 bne 2b 102 bne 2b
103 bx lr @ return 103 bx lr @ return
104 104
105 .unreq va 105 .unreq va
106 .unreq pa 106 .unreq pa
107 .unreq n_sec 107 .unreq n_sec
108 .unreq attr 108 .unreq attr
109 .unreq itable 109 .unreq itable
110 .unreq l1table 110 .unreq l1table
111 111
112#if defined(CPU_CORTEXA8) 112#if defined(CPU_CORTEXA8)
113#undef CPU_CONTROL_SWP_ENABLE // not present on A8 113#undef CPU_CONTROL_SWP_ENABLE // not present on A8
114#define CPU_CONTROL_SWP_ENABLE 0 114#define CPU_CONTROL_SWP_ENABLE 0
115#endif 115#endif
116#ifdef __ARMEL__ 116#ifdef __ARMEL__
117#undef CPU_CONTROL_EX_BEND // needs to clear on LE systems 117#define CPU_CONTROL_EX_BEND_SET 0
118#define CPU_CONTROL_EX_BEND 0 118#else
 119#define CPU_CONTROL_EX_BEND_SET CPU_CONTROL_EX_BEND
119#endif 120#endif
120#ifdef ARM32_DISABLE_ALIGNMENT_FAULTS 121#ifdef ARM32_DISABLE_ALIGNMENT_FAULTS
121#undef CPU_CONTROL_AFLT_ENABLE 122#define CPU_CONTROL_AFLT_ENABLE_CLR CPU_CONTROL_AFLT_ENABLE
122#define CPU_CONTROL_AFLT_ENABLE 0 123#define CPU_CONTROL_AFLT_ENABLE_SET 0
 124#else
 125#deifne CPU_CONTROL_AFLT_ENABLE_CLR 0
 126#define CPU_CONTROL_AFLT_ENABLE_SET CPU_CONTROL_AFLT_ENABLE
123#endif 127#endif
124 128
125#define CPU_CONTROL_SET \ 129#define CPU_CONTROL_SET \
126 (CPU_CONTROL_MMU_ENABLE | \ 130 (CPU_CONTROL_MMU_ENABLE | \
127 CPU_CONTROL_AFLT_ENABLE | \ 131 CPU_CONTROL_AFLT_ENABLE_SET | \
128 CPU_CONTROL_EX_BEND | \ 
129 CPU_CONTROL_DC_ENABLE | \ 132 CPU_CONTROL_DC_ENABLE | \
130 CPU_CONTROL_SWP_ENABLE | \ 133 CPU_CONTROL_SWP_ENABLE | \
131 CPU_CONTROL_BPRD_ENABLE | \ 134 CPU_CONTROL_BPRD_ENABLE | \
132 CPU_CONTROL_IC_ENABLE | \ 135 CPU_CONTROL_IC_ENABLE | \
 136 CPU_CONTROL_EX_BEND_SET | \
133 CPU_CONTROL_UNAL_ENABLE) 137 CPU_CONTROL_UNAL_ENABLE)
134 138
 139#define CPU_CONTROL_CLR \
 140 (CPU_CONTROL_AFLT_ENABLE_CLR)
 141
135arm_cpuinit: 142arm_cpuinit:
136 /* 143 /*
137 * In theory, because the MMU is off, we shouldn't need all of this, 144 * In theory, because the MMU is off, we shouldn't need all of this,
138 * but let's not take any chances and do a typical sequence to set 145 * but let's not take any chances and do a typical sequence to set
139 * the Translation Table Base. 146 * the Translation Table Base.
140 */ 147 */
141 mov ip, lr 148 mov ip, lr
142 mov r10, r0 149 mov r10, r0
 150 mov r1, #0
 151
 152 mcr p15, 0, r1, c7, c5, 0 // invalidate I cache
143 153
144 mcr p15, 0, r10, c7, c5, 0 /* invalidate I cache */ 154 mrc p15, 0, r2, c1, c0, 0 // read SCTRL
 155 movw r1, #(CPU_CONTROL_DC_ENABLE|CPU_CONTROL_IC_ENABLE)
 156 bic r2, r2, r1 // clear I+D cache enable
145 157
146 mrc p15, 0, r2, c1, c0, 0 /* " " " */ 158#ifdef __ARMEB__
147 bic r2, r2, #CPU_CONTROL_DC_ENABLE @ clear data cache enable 159 /*
148 bic r2, r2, #CPU_CONTROL_IC_ENABLE @ clear instruction cache enable 160 * SCTRL.EE determines the endianness of translation table lookups.
149 mcr p15, 0, r2, c1, c0, 0 /* " " " */ 161 * So we need to make sure it's set before starting to use the new
 162 * translation tables (which are big endian).
 163 */
 164 orr r2, r2, #CPU_CONTROL_EX_BEND
 165 bic r2, r2, #CPU_CONTROL_MMU_ENABLE
 166 pli [pc, #32] /* preload the next few cachelines */
 167 pli [pc, #64]
 168 pli [pc, #96]
 169 pli [pc, #128]
 170#endif
 171
 172 mcr p15, 0, r2, c1, c0, 0 /* write SCTRL */
150 173
151 XPUTC(#70) 174 XPUTC(#70)
152 mov r1, #0 
153 dsb /* Drain the write buffers. */ 175 dsb /* Drain the write buffers. */
154 1761:
155 XPUTC(#71) 177 XPUTC(#71)
156 mrc p15, 0, r2, c0, c0, 5 /* get MPIDR */ 178 mrc p15, 0, r1, c0, c0, 5 /* get MPIDR */
157 cmp r2, #0 179 cmp r1, #0
158 orrlt r10, r10, #0x5b /* MP, cachable (Normal WB) */ 180 orrlt r10, r10, #0x5b /* MP, cachable (Normal WB) */
159 orrge r10, r10, #0x1b /* Non-MP, cacheable, normal WB */ 181 orrge r10, r10, #0x1b /* Non-MP, cacheable, normal WB */
160 mcr p15, 0, r10, c2, c0, 0 /* Set Translation Table Base */ 182 mcr p15, 0, r10, c2, c0, 0 /* Set Translation Table Base */
161 183
162 XPUTC(#49) 184 XPUTC(#72)
 185 mov r1, #0
163 mcr p15, 0, r1, c2, c0, 2 /* Set Translation Table Control */ 186 mcr p15, 0, r1, c2, c0, 2 /* Set Translation Table Control */
164 187
165 XPUTC(#72) 188 XPUTC(#73)
166 mov r1, #0 189 mov r1, #0
167 mcr p15, 0, r1, c8, c7, 0 /* Invalidate TLBs */ 190 mcr p15, 0, r1, c8, c7, 0 /* Invalidate TLBs */
168 191
169 /* Set the Domain Access register. Very important! */ 192 /* Set the Domain Access register. Very important! */
170 XPUTC(#73) 193 XPUTC(#74)
171 mov r1, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT) 194 mov r1, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
172 mcr p15, 0, r1, c3, c0, 0 195 mcr p15, 0, r1, c3, c0, 0
173 196
174 /* 197 /*
175 * Enable the MMU, etc. 198 * Enable the MMU, etc.
176 */ 199 */
177 XPUTC(#74) 200 XPUTC(#75)
178 mrc p15, 0, r0, c1, c0, 0 201 mrc p15, 0, r0, c1, c0, 0
179 202
180 movw r3, #:lower16:CPU_CONTROL_SET 203 movw r3, #:lower16:CPU_CONTROL_SET
181#if (CPU_CONTROL_SET & 0xffff0000) 204#if (CPU_CONTROL_SET & 0xffff0000)
182 movt r3, #:upper16:CPU_CONTROL_SET 205 movt r3, #:upper16:CPU_CONTROL_SET
183#endif 206#endif
184 orr r0, r0, r3 207 orr r0, r0, r3
 208#if defined(CPU_CONTROL_CLR) && (CPU_CONTROL_CLR != 0)
 209 bic r0, r0, #CPU_CONTROL_CLR
 210#endif
 211 pli 1f
185  212
186 dsb 213 dsb
187 .align 5 
188 @ turn mmu on! 214 @ turn mmu on!
189 mov r0, r0 215 mov r0, r0 /* fetch instruction cacheline */
190 mcr p15, 0, r0, c1, c0, 0 2161: mcr p15, 0, r0, c1, c0, 0
191 217
192 /* 218 /*
193 * Ensure that the coprocessor has finished turning on the MMU. 219 * Ensure that the coprocessor has finished turning on the MMU.
194 */ 220 */
195 mrc p15, 0, r0, c0, c0, 0 /* Read an arbitrary value. */ 221 mrc p15, 0, r0, c0, c0, 0 /* Read an arbitrary value. */
196 mov r0, r0 /* Stall until read completes. */ 222 mov r0, r0 /* Stall until read completes. */
197 XPUTC(#76) 2231: XPUTC(#76)
198 224
199 bx ip /* return */ 225 bx ip /* return */
200 226
201/* 227/*
202 * Coprocessor register initialization values 228 * Coprocessor register initialization values
203 */ 229 */
204 230
205 .p2align 2 231 .p2align 2
206 232
207 /* bits to set in the Control Register */ 233 /* bits to set in the Control Register */
208 234
209#if defined(VERBOSE_INIT_ARM) && XPUTC_COM 235#if defined(VERBOSE_INIT_ARM) && XPUTC_COM
210#define TIMO 0x25000 236#define TIMO 0x25000
211#ifndef COM_MULT 237#ifndef COM_MULT
212#define COM_MULT 1 238#define COM_MULT 1
213#endif 239#endif
214xputc: 240xputc:
215#ifdef MULTIPROCESSOR 241#ifdef MULTIPROCESSOR
 242 adr r3, xputc
 243 movw r2, #:lower16:comlock
 244 movt r2, #:upper16:comlock
 245 bfi r3, r2, #0, #28
216 mov r2, #1 246 mov r2, #1
217 ldr r3, .Lcomlock 
21810: 24710:
219 ldrex r1, [r3] 248 ldrex r1, [r3]
220 cmp r1, #0 249 cmp r1, #0
221 bne 10b 250 bne 10b
222 strex r1, r2, [r3] 251 strex r1, r2, [r3]
223 cmp r1, #0 252 cmp r1, #0
224 bne 10b 253 bne 10b
225 dsb 254 dsb
226#endif 255#endif
227 256
228 mov r2, #TIMO 257 mov r2, #TIMO
229 ldr r3, .Luart0 258#ifdef CONADDR
 259 movw r3, #:lower16:CONADDR
 260 movt r3, #:upper16:CONADDR
 261#elif defined(CONSADDR)
 262 movw r3, #:lower16:CONSADDR
 263 movt r3, #:upper16:CONSADDR
 264#endif
2301: 2651:
231#if COM_MULT == 1 266#if COM_MULT == 1
232 ldrb r1, [r3, #(COM_LSR*COM_MULT)] 267 ldrb r1, [r3, #(COM_LSR*COM_MULT)]
233#else 268#else
234#if COM_MULT == 2 269#if COM_MULT == 2
235 ldrh r1, [r3, #(COM_LSR*COM_MULT)] 270 ldrh r1, [r3, #(COM_LSR*COM_MULT)]
236#elif COM_MULT == 4 271#elif COM_MULT == 4
237 ldr r1, [r3, #(COM_LSR*COM_MULT)] 272 ldr r1, [r3, #(COM_LSR*COM_MULT)]
238#endif 273#endif
239#ifdef COM_BSWAP 274#ifdef COM_BSWAP
240 lsr r1, r1, #(COM_MULT-1)*8 275 lsr r1, r1, #(COM_MULT-1)*8
241#endif 276#endif
242#endif 277#endif
@@ -268,83 +303,87 @@ xputc: @@ -268,83 +303,87 @@ xputc:
268#elif COM_MULT == 4 303#elif COM_MULT == 4
269 ldr r1, [r3, #(COM_LSR*COM_MULT)] 304 ldr r1, [r3, #(COM_LSR*COM_MULT)]
270#endif 305#endif
271#ifdef COM_BSWAP 306#ifdef COM_BSWAP
272 lsr r1, r1, #(COM_MULT-1)*8 307 lsr r1, r1, #(COM_MULT-1)*8
273#endif 308#endif
274#endif 309#endif
275 tst r1, #LSR_TSRE 310 tst r1, #LSR_TSRE
276 bne 4f 311 bne 4f
277 subs r2, r2, #1 312 subs r2, r2, #1
278 bne 3b 313 bne 3b
2794: 3144:
280#ifdef MULTIPROCESSOR 315#ifdef MULTIPROCESSOR
281 ldr r3, .Lcomlock 316 adr r3, xputc
 317 movw r2, #:lower16:comlock
 318 movt r2, #:upper16:comlock
 319 bfi r3, r2, #0, #28
282 mov r0, #0 320 mov r0, #0
283 str r0, [r3] 321 str r0, [r3]
284 dsb 322 dsb
285#endif 323#endif
286 bx lr 324 bx lr
287 325
288.Luart0: 
289#ifdef CONADDR 
290 .word CONADDR 
291#elif defined(CONSADDR) 
292 .word CONSADDR 
293#endif 
294 
295#ifdef MULTIPROCESSOR 326#ifdef MULTIPROCESSOR
296.Lcomlock: 
297 .word comlock 
298 
299 .pushsection .data 327 .pushsection .data
300comlock: 328comlock:
301 .p2align 2 329 .p2align 4
302 .word 0 @ not in bss 330 .word 0 @ not in bss
 331 .p2align 4
303 332
304 .popsection 333 .popsection
305#endif /* MULTIPROCESSOR */ 334#endif /* MULTIPROCESSOR */
306#endif /* VERBOSE_INIT_ARM */ 335#endif /* VERBOSE_INIT_ARM */
307 336
308#ifdef CPU_CORTEXA9 337cortex_init:
309a9_start: 
310 mov r10, lr @ save lr 338 mov r10, lr @ save lr
311 339
312 cpsid if, #PSR_SVC32_MODE 340 cpsid if, #PSR_SVC32_MODE
313 341
314 XPUTC(#64) 342 XPUTC(#64)
315 bl _C_LABEL(armv7_icache_inv_all) @ invalidate i-cache 343 adr ip, cortex_init
 344 movw r0, #:lower16:_C_LABEL(armv7_icache_inv_all)
 345 movt r0, #:upper16:_C_LABEL(armv7_icache_inv_all)
 346 bfi ip, r0, #0, #28
 347 blx ip @ toss i-cache
316 348
 349#ifdef CPU_CORTEXA9
317 /* 350 /*
318 * Step 1a, invalidate the all cache tags in all ways on the SCU. 351 * Step 1a, invalidate the all cache tags in all ways on the SCU.
319 */ 352 */
320 XPUTC(#65) 353 XPUTC(#65)
321 mrc p15, 4, r3, c15, c0, 0 @ read cbar 354 mrc p15, 4, r3, c15, c0, 0 @ read cbar
322 ldr r0, [r3, #SCU_CFG] @ read scu config 355 ldr r0, [r3, #SCU_CFG] @ read scu config
323 and r0, r0, #7 @ get cpu max 356 and r0, r0, #7 @ get cpu max
324 add r0, r0, #2 @ adjust to cpu num 357 add r0, r0, #2 @ adjust to cpu num
325 mov r1, #0xf @ select all ways 358 mov r1, #0xf @ select all ways
326 lsl r1, r1, r0 @ shift into place 359 lsl r1, r1, r0 @ shift into place
327 str r1, [r3, #SCU_INV_ALL_REG] @ write scu invalidate all 360 str r1, [r3, #SCU_INV_ALL_REG] @ write scu invalidate all
328 dsb 361 dsb
329 isb 362 isb
 363#endif
330 364
331 /* 365 /*
332 * Step 1b, invalidate the data cache 366 * Step 1b, invalidate the data cache
333 */ 367 */
334 XPUTC(#66) 368 XPUTC(#66)
335 bl _C_LABEL(armv7_dcache_wbinv_all) @ writeback/invalidate d-cache 369 adr ip, cortex_init
 370 movw r0, #:lower16:_C_LABEL(armv7_dcache_wbinv_all)
 371 movt r0, #:upper16:_C_LABEL(armv7_dcache_wbinv_all)
 372 bfi ip, r0, #0, #28
 373 blx ip @ writeback & toss d-cache
336 XPUTC(#67) 374 XPUTC(#67)
337 375
 376#ifdef CPU_CORTEXA9
338 /* 377 /*
339 * Step 2, disable the data cache 378 * Step 2, disable the data cache
340 */ 379 */
341 mrc p15, 0, r2, c1, c0, 0 @ get system ctl register (save) 380 mrc p15, 0, r2, c1, c0, 0 @ get system ctl register (save)
342 bic r1, r2, #CPU_CONTROL_DC_ENABLE @ clear data cache enable 381 bic r1, r2, #CPU_CONTROL_DC_ENABLE @ clear data cache enable
343 mcr p15, 0, r1, c1, c0, 0 @ set system ctl register 382 mcr p15, 0, r1, c1, c0, 0 @ set system ctl register
344 isb 383 isb
345 XPUTC(#49) 384 XPUTC(#49)
346 385
347 /* 386 /*
348 * Step 3, enable the SCU (and set SMP mode) 387 * Step 3, enable the SCU (and set SMP mode)
349 */ 388 */
350 mrc p15, 4, r3, c15, c0, 0 @ read cbar 389 mrc p15, 4, r3, c15, c0, 0 @ read cbar
@@ -352,153 +391,195 @@ a9_start: @@ -352,153 +391,195 @@ a9_start:
352 orr r1, r1, #SCU_CTL_SCU_ENA @ set scu enable flag 391 orr r1, r1, #SCU_CTL_SCU_ENA @ set scu enable flag
353 str r1, [r3, #SCU_CTL] @ write scu control 392 str r1, [r3, #SCU_CTL] @ write scu control
354 dsb 393 dsb
355 isb 394 isb
356 XPUTC(#50) 395 XPUTC(#50)
357 396
358 /* 397 /*
359 * Step 4a, enable the data cache 398 * Step 4a, enable the data cache
360 */ 399 */
361 orr r2, r2, #CPU_CONTROL_DC_ENABLE @ set data cache enable 400 orr r2, r2, #CPU_CONTROL_DC_ENABLE @ set data cache enable
362 mcr p15, 0, r2, c1, c0, 0 @ reenable caches 401 mcr p15, 0, r2, c1, c0, 0 @ reenable caches
363 isb 402 isb
364 XPUTC(#51) 403 XPUTC(#51)
 404#endif
365 405
366#ifdef MULTIPROCESSOR 406#ifdef MULTIPROCESSOR
367 /* 407 /*
368 * Step 4b, set ACTLR.SMP=1 (and ACTRL.FX=1) 408 * Step 4b, set ACTLR.SMP=1 (and on A9, ACTRL.FX=1)
369 */ 409 */
370 mrc p15, 0, r0, c1, c0, 1 @ read aux ctl 410 mrc p15, 0, r0, c1, c0, 1 @ read aux ctl
371 orr r0, r0, #CORTEXA9_AUXCTL_SMP @ enable SMP 411 orr r0, r0, #CORTEXA9_AUXCTL_SMP @ enable SMP
372 mcr p15, 0, r0, c1, c0, 1 @ write aux ctl 412 mcr p15, 0, r0, c1, c0, 1 @ write aux ctl
373 isb 413 isb
 414#ifdef CPU_CORTEXA9
374 orr r0, r0, #CORTEXA9_AUXCTL_FW @ enable cache/tlb/coherency 415 orr r0, r0, #CORTEXA9_AUXCTL_FW @ enable cache/tlb/coherency
375 mcr p15, 0, r0, c1, c0, 1 @ write aux ctl 416 mcr p15, 0, r0, c1, c0, 1 @ write aux ctl
376 isb 417 isb
377 XPUTC(#52) 
378#endif 418#endif
 419 XPUTC(#52)
 420#endif /* MULTIPROCESSOR */
379 421
380 bx r10 422 bx r10
381ASEND(a9_start) 423ASEND(cortex_init)
382 424
383/* 425/*
384 * Secondary processors come here after exiting the SKU ROM. 426 * Secondary processors come here after exiting the SKU ROM.
 427 * Running native endian until we have SMP enabled. Since no data
 428 * is accessed, that shouldn't be a problem.
385 */ 429 */
386a9_mpstart: 430cortex_mpstart:
387#ifdef MULTIPROCESSOR 431 cpsid if, #PSR_SVC32_MODE @ make sure we are in SVC mode
 432 mrs r0, cpsr @ fetch CPSR value
 433 msr spsr_sxc, r0 @ set SPSR[23:8] to known value
 434
 435#ifndef MULTIPROCESSOR
 436 /*
 437 * If not MULTIPROCESSOR, drop CPU into power saving state.
 438 */
 4393: wfe
 440 b 3b
 441#else
388 /* 442 /*
389 * Step 1, invalidate the caches 443 * Step 1, invalidate the caches
390 */ 444 */
391 bl _C_LABEL(armv7_icache_inv_all) @ toss i-cache 445 adr ip, cortex_mpstart
392 bl _C_LABEL(armv7_dcache_inv_all) @ toss d-cache 446 movw r0, #:lower16:_C_LABEL(armv7_icache_inv_all)
 447 movt r0, #:upper16:_C_LABEL(armv7_icache_inv_all)
 448 bfi ip, r0, #0, #28
 449 blx ip @ toss i-cache
 450 adr ip, cortex_mpstart
 451 movw ip, #:lower16:_C_LABEL(armv7_dcache_inv_all)
 452 movt ip, #:upper16:_C_LABEL(armv7_dcache_inv_all)
 453 bfi ip, r0, #0, #28
 454 blx ip @ toss d-cache
393 455
 456#if defined(CPU_CORTEXA9)
394 /* 457 /*
395 * Step 2, wait for the SCU to be enabled 458 * Step 2, wait for the SCU to be enabled
396 */ 459 */
397 mrc p15, 4, r3, c15, c0, 0 @ read cbar 460 mrc p15, 4, r3, c15, c0, 0 @ read cbar
3981: ldr r0, [r3, #SCU_CTL] @ read scu control 4611: ldr r0, [r3, #SCU_CTL] @ read scu control
399 tst r0, #SCU_CTL_SCU_ENA @ enable bit set yet? 462 tst r0, #SCU_CTL_SCU_ENA @ enable bit set yet?
400 bne 1b @ try again 463 bne 1b @ try again
 464#endif
401 465
402 /* 466 /*
403 * Step 3, set ACTLR.SMP=1 (and ACTRL.FX=1) 467 * Step 3, set ACTLR.SMP=1 (and ACTRL.FX=1)
404 */ 468 */
405 mrc p15, 0, r0, c1, c0, 1 @ read aux ctl 469 mrc p15, 0, r0, c1, c0, 1 @ read aux ctl
406 orr r0, #CORTEXA9_AUXCTL_SMP @ enable SMP 470 orr r0, #CORTEXA9_AUXCTL_SMP @ enable SMP
407 mcr p15, 0, r0, c1, c0, 1 @ write aux ctl 471 mcr p15, 0, r0, c1, c0, 1 @ write aux ctl
408 mov r0, r0 472 mov r0, r0
 473#if defined(CPU_CORTEXA9)
409 orr r0, #CORTEXA9_AUXCTL_FW @ enable cache/tlb/coherency 474 orr r0, #CORTEXA9_AUXCTL_FW @ enable cache/tlb/coherency
410 mcr p15, 0, r0, c1, c0, 1 @ write aux ctl 475 mcr p15, 0, r0, c1, c0, 1 @ write aux ctl
411 mov r0, r0 476 mov r0, r0
 477#endif
412 478
413 /* 479 /*
414 * We should be in SMP mode now. 480 * We should be in SMP mode now.
415 */ 481 */
416 mrc p15, 0, r4, c0, c0, 5 @ get MPIDR 482 mrc p15, 0, r4, c0, c0, 5 @ get MPIDR
417 and r4, r4, #7 @ get our cpu numder 483 and r4, r4, #7 @ get our cpu numder
418 484
 485#ifdef __ARMEB__
 486 setend be @ switch to BE now
 487#endif
 488
419#if defined(VERBOSE_INIT_ARM) 489#if defined(VERBOSE_INIT_ARM)
420 add r0, r4, #48 490 add r0, r4, #48
421 bl xputc 491 bl xputc
422#endif 492#endif
423 493
424 ldr r0, .Lcpu_hatched @ now show we've hatched 494 /*
 495 * To access things are not in .start, we need to replace the upper
 496 * 4 bits of the address with where we are current executing.
 497 */
 498 adr r10, cortex_mpstart
 499 lsr r10, r10, #28
 500
 501 movw r0, #:lower16:_C_LABEL(arm_cpu_hatched)
 502 movt r0, #:upper16:_C_LABEL(arm_cpu_hatched)
 503 bfi r0, r10, #28, #4 // replace top 4 bits
 504 add r0, r0, r10
425 mov r5, #1 505 mov r5, #1
426 lsl r5, r5, r4 506 lsl r5, r5, r4
427 mov r1, r5 507 /*
428 bl _C_LABEL(atomic_or_32) 508 * We inline the atomic_or_32 call since we might be in a different
 509 * area of memory.
 510 */
 5112: ldrex r1, [r0]
 512 orr r1, r1, r5
 513 strex r2, r1, [r0]
 514 cmp r2, #0
 515 bne 2b
429 516
430 XPUTC(#97) 517 XPUTC(#97)
431#endif 
432 
433 cpsid if, #PSR_SVC32_MODE @ make sure we are in SVC mode 
434 518
435 /* Now we will wait for someone tell this cpu to start running */ 519 /* Now we will wait for someone tell this cpu to start running */
436#ifdef MULTIPROCESSOR 520 movw r0, #:lower16:_C_LABEL(arm_cpu_mbox)
437 ldr r0, .Lcpu_mbox 521 movt r0, #:upper16:_C_LABEL(arm_cpu_mbox)
438#else 522 bfi r0, r10, #28, #4
439 cmp r0, r0 523 add r0, r0, r10
440#endif 5243: dmb
4412: 
442#ifdef MULTIPROCESSOR 
443 dmb 
444 ldr r2, [r0] 525 ldr r2, [r0]
445 tst r2, r5 526 tst r2, r5
446#endif 527 wfeeq
447 @wfeeq 528 beq 3b
448 beq 2b 
449 529
450#ifdef MULTIPROCESSOR 530 XPUTC(#98)
4513: XPUTC(#98) 531 movw r0, #:lower16:_C_LABEL(arm_cpu_marker)
452 ldr r0, .Lcpu_marker 532 movt r0, #:upper16:_C_LABEL(arm_cpu_marker)
 533 bfi r0, r10, #28, #4
453 str pc, [r0] 534 str pc, [r0]
454 535
455 ldr r0, .Lkernel_l1pt /* get address of l1pt pvaddr */ 536 movw r0, #:lower16:_C_LABEL(kernel_l1pt)
 537 movt r0, #:upper16:_C_LABEL(kernel_l1pt)
 538 bfi r0, r10, #28, #4 /* get address of l1pt pvaddr */
456 ldr r0, [r0, #PV_PA] /* Now get the phys addr */ 539 ldr r0, [r0, #PV_PA] /* Now get the phys addr */
457 bl cpu_init 540 /*
458 541 * After we turn on the MMU, we will no longer in .start so setup
459 ldr r0, .Lcpu_marker 542 * return to rest of MP startup code in .text.
460 str pc, [r0] 543 */
 544 movw lr, #:lower16:cortex_mpcontinuation
 545 movt lr, #:upper16:cortex_mpcontinuation
 546 b arm_cpuinit
 547#endif /* MULTIPROCESSOR */
 548ASEND(cortex_mpstart)
461 549
 550#ifdef MULTIPROCESSOR
 551 .pushsection .text
 552cortex_mpcontinuation:
462 /* MMU, L1, are now on. */ 553 /* MMU, L1, are now on. */
463 554
464 ldr r0, .Lcpu_info /* get pointer to cpu_infos */ 555 movw r0, #:lower16:_C_LABEL(arm_cpu_marker)
 556 movt r0, #:upper16:_C_LABEL(arm_cpu_marker)
 557 str pc, [r0]
 558
 559 movw r0, #:lower16:cpu_info
 560 movt r0, #:upper16:cpu_info /* get pointer to cpu_infos */
465 ldr r5, [r0, r4, lsl #2] /* load our cpu_info */ 561 ldr r5, [r0, r4, lsl #2] /* load our cpu_info */
466 ldr r6, [r5, #CI_IDLELWP] /* get the idlelwp */ 562 ldr r6, [r5, #CI_IDLELWP] /* get the idlelwp */
467 ldr r7, [r6, #L_PCB] /* now get its pcb */ 563 ldr r7, [r6, #L_PCB] /* now get its pcb */
468 ldr sp, [r7, #PCB_SP] /* finally, we can load our SP */ 564 ldr sp, [r7, #PCB_KSP] /* finally, we can load our SP */
469#ifdef TPIDRPRW_IS_CURCPU 565#ifdef TPIDRPRW_IS_CURCPU
470 mcr p15, 0, r5, c13, c0, 4 /* squirrel away curcpu() */ 566 mcr p15, 0, r5, c13, c0, 4 /* squirrel away curcpu() */
471#elif defined(TPIDRPRW_IS_CURLWP) 567#elif defined(TPIDRPRW_IS_CURLWP)
472 mcr p15, 0, r6, c13, c0, 4 /* squirrel away curlwp() */ 568 mcr p15, 0, r6, c13, c0, 4 /* squirrel away curlwp() */
473#else 569#else
474#error either TPIDRPRW_IS_CURCPU or TPIDRPRW_IS_CURLWP must be defined 570#error either TPIDRPRW_IS_CURCPU or TPIDRPRW_IS_CURLWP must be defined
475#endif 571#endif
476 str r6, [r5, #CI_CURLWP] /* and note we are running on it */ 572 str r6, [r5, #CI_CURLWP] /* and note we are running on it */
477 573
478 ldr r0, .Lcpu_marker 574 str pc, [r0] // r0 still have arm_cpu_marker
479 str pc, [r0] 
480 575
481 mov r0, r5 /* pass cpu_info */ 576 mov r0, r5 // pass cpu_info
482 mov r1, r4 /* pass cpu_id */ 577 mov r1, r4 // pass cpu_id
483 ldr r2, .Lbcm53xx_cpu_hatch /* pass md_cpu_hatch */ 578 movw r2, #:lower16:MD_CPU_HATCH // pass md_cpu_hatch
 579 movt r2, #:upper16:MD_CPU_HATCH // pass md_cpu_hatch
484 bl _C_LABEL(cpu_hatch) 580 bl _C_LABEL(cpu_hatch)
485 b _C_LABEL(idle_loop) 581 b _C_LABEL(idle_loop)
486ASEND(a9_mpstart) 582ASEND(cortex_mpcontinuation)
487 /* NOT REACHED */ 583 /* NOT REACHED */
488 584 .popsection
489.Lkernel_l1pt: 
490 .word _C_LABEL(kernel_l1pt) 
491.Lcpu_info: 
492 .word _C_LABEL(cpu_info) 
493.Lcpu_max: 
494 .word _C_LABEL(arm_cpu_max) 
495.Lcpu_hatched: 
496 .word _C_LABEL(arm_cpu_hatched) 
497.Lcpu_mbox: 
498 .word _C_LABEL(arm_cpu_mbox) 
499.Lcpu_marker: 
500 .word _C_LABEL(arm_cpu_marker) 
501.Lbcm53xx_cpu_hatch: 
502 .word _C_LABEL(bcm53xx_cpu_hatch) 
503#endif /* MULTIPROCESSOR */ 585#endif /* MULTIPROCESSOR */
504#endif /* CPU_CORTEXA9 */ 

cvs diff -r1.10 -r1.11 src/sys/arch/evbarm/bcm53xx/Attic/bcm53xx_start.S (expand / switch to unified diff)

--- src/sys/arch/evbarm/bcm53xx/Attic/bcm53xx_start.S 2014/01/24 04:15:33 1.10
+++ src/sys/arch/evbarm/bcm53xx/Attic/bcm53xx_start.S 2014/02/21 22:22:48 1.11
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: bcm53xx_start.S,v 1.10 2014/01/24 04:15:33 matt Exp $ */ 1/* $NetBSD: bcm53xx_start.S,v 1.11 2014/02/21 22:22:48 matt Exp $ */
2/*- 2/*-
3 * Copyright (c) 2012 The NetBSD Foundation, Inc. 3 * Copyright (c) 2012 The NetBSD Foundation, Inc.
4 * All rights reserved. 4 * All rights reserved.
5 * 5 *
6 * This code is derived from software contributed to The NetBSD Foundation 6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Matt Thomas of 3am Software Foundry. 7 * by Matt Thomas of 3am Software Foundry.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -35,27 +35,27 @@ @@ -35,27 +35,27 @@
35#include "opt_cputypes.h" 35#include "opt_cputypes.h"
36#include "opt_multiprocessor.h" 36#include "opt_multiprocessor.h"
37 37
38#include <arm/asm.h> 38#include <arm/asm.h>
39#include <arm/armreg.h> 39#include <arm/armreg.h>
40#include "assym.h" 40#include "assym.h"
41 41
42#include <arm/cortex/a9tmr_reg.h> 42#include <arm/cortex/a9tmr_reg.h>
43 43
44#ifndef CONADDR 44#ifndef CONADDR
45#define CONADDR 0x18000300 45#define CONADDR 0x18000300
46#endif 46#endif
47 47
48RCSID("$NetBSD: bcm53xx_start.S,v 1.10 2014/01/24 04:15:33 matt Exp $") 48RCSID("$NetBSD: bcm53xx_start.S,v 1.11 2014/02/21 22:22:48 matt Exp $")
49 49
50#undef VERBOSE_INIT_ARM 50#undef VERBOSE_INIT_ARM
51#define VERBOSE_INIT_ARM 51#define VERBOSE_INIT_ARM
52 52
53#if defined(VERBOSE_INIT_ARM) 53#if defined(VERBOSE_INIT_ARM)
54#define XPUTC(n) mov r0, n; bl xputc 54#define XPUTC(n) mov r0, n; bl xputc
55#define XPUTC_COM 1 55#define XPUTC_COM 1
56#else 56#else
57#define XPUTC(n) 57#define XPUTC(n)
58#endif 58#endif
59 59
60#define MD_CPU_HATCH bcm53xx_cpu_hatch 60#define MD_CPU_HATCH bcm53xx_cpu_hatch
61 61
@@ -64,84 +64,85 @@ RCSID("$NetBSD: bcm53xx_start.S,v 1.10 2 @@ -64,84 +64,85 @@ RCSID("$NetBSD: bcm53xx_start.S,v 1.10 2
64 * At this point, this code has been loaded into SDRAM 64 * At this point, this code has been loaded into SDRAM
65 * and the MMU is off 65 * and the MMU is off
66 */ 66 */
67 .text 67 .text
68 68
69 .global _C_LABEL(bcm53xx_start) 69 .global _C_LABEL(bcm53xx_start)
70_C_LABEL(bcm53xx_start): 70_C_LABEL(bcm53xx_start):
71#ifdef __ARMEB__ 71#ifdef __ARMEB__
72 setend be /* make sure we are running big endian */ 72 setend be /* make sure we are running big endian */
73#endif 73#endif
74 /* 74 /*
75 * Save any arguments u-boot passed us. 75 * Save any arguments u-boot passed us.
76 */ 76 */
77 movw r4, #:lower16:(uboot_args-.LPIC0) 77 adr r4, _C_LABEL(bcm53xx_start)
78 movt r4, #:upper16:(uboot_args-.LPIC0) 78 movw r5, #:lower16:uboot_args
79 bic r4, r4, #0xf0000000 79 movt r5, #:upper16:uboot_args
80 add r4, r4, pc 80 bfi r4, r5, #0, #28
81 stmia r4, {r0-r3} 81 stmia r4, {r0-r3}
82.LPIC0: 82.LPIC0:
83 83
84 /* 84 /*
85 * Let's turn on the CCA watchdog in case something goes horribly wrong. 85 * Let's turn on the CCA watchdog in case something goes horribly wrong.
86 */ 86 */
87 ldr r0, .Lcca_wdog 87 ldr r0, .Lcca_wdog
88 ldr r1, .Lcca_wdog + 4 88 ldr r1, .Lcca_wdog + 4
89 str r1, [r0] 89 str r1, [r0]
90 90
91 /* 91 /*
92 * Cal the initial start code for the a9 92 * Cal the initial start code for Cortex cores
93 */ 93 */
94 bl a9_start 94 bl cortex_init
95 95
96 /* 96 /*
97 * Set up a preliminary mapping in the MMU to allow us to run 97 * Set up a preliminary mapping in the MMU to allow us to run
98 * at KERNEL_BASE with caches on. 98 * at KERNEL_BASE with caches on.
99 */ 99 */
 100 movw r1, #:lower16:(mmu_init_table-.LPIC1)
 101 add r1, r1, pc
 102.LPIC1:
100 ldr r0, .Ltemp_l1_table /* The L1PT address - entered into TTB later */ 103 ldr r0, .Ltemp_l1_table /* The L1PT address - entered into TTB later */
101 adr r1, mmu_init_table 
102 bl arm_boot_l1pt_init 104 bl arm_boot_l1pt_init
103 105
104 XPUTC(#68) 106 XPUTC(#68)
105 107
106 /* 108 /*
107 * Before we turn on the MMU, let's the other process out of the 109 * Before we turn on the MMU, let's the other process out of the
108 * SKU ROM but setting the magic LUT address to our own mp_start 110 * SKU ROM but setting the magic LUT address to our own mp_start
109 * routine.  111 * routine.
110 */ 112 */
111 ldr r1, .Lsku_rom_lut 113 movw r1, #:lower16:0xffff0400
112 adr r2, a9_mpstart 114 movt r1, #:upper16:0xffff0400
 115 adr r2, cortex_mpstart
113 str r2, [r1] 116 str r2, [r1]
114 sev /* wake up the others */ 117 sev /* wake up the others */
115 118
116 /* 119 /*
117 * init the CPU TLB, Cache, MMU. 120 * init the CPU TLB, Cache, MMU.
118 */ 121 */
119 XPUTC(#69) 122 XPUTC(#69)
120 123
121 ldr r0, .Ltemp_l1_table /* The page table address */ 124 ldr r0, .Ltemp_l1_table /* The page table address */
122 bl arm_cpuinit 125 bl arm_cpuinit
123 126
124 XPUTC(#89) 127 XPUTC(#89)
125 128
 129 adr r1, bcm53xx_start
126 movw r0, #:lower16:uboot_args 130 movw r0, #:lower16:uboot_args
127 movt r0, #:upper16:uboot_args 131 movt r0, #:upper16:uboot_args
 132 bfi r1, r0, #0, #28
128 ldr r2, [r0] 133 ldr r2, [r0]
129 movw r1, #:lower16:(uboot_args-.LPIC1) 134 ldr r3, [r1]
130 movt r1, #:upper16:(uboot_args-.LPIC1) 135 cmp r1, r3
131 add r1, r1, pc 
132 ldr r1, [r1] 
133.LPIC1: 
134 cmp r1, r2 
1351: bne 1b 1361: bne 1b
136 137
137 XPUTC(#90) 138 XPUTC(#90)
138 139
139 /* 140 /*
140 * Let's turn off the CCA watchdog since nothing went horribly wrong. 141 * Let's turn off the CCA watchdog since nothing went horribly wrong.
141 */ 142 */
142 ldr r0, .Lcca_wdog 143 ldr r0, .Lcca_wdog
143 mov r1, #0 144 mov r1, #0
144 str r1, [r0] 145 str r1, [r0]
145 146
146 XPUTC(#33) 147 XPUTC(#33)
147 XPUTC(#10) 148 XPUTC(#10)
@@ -149,29 +150,26 @@ _C_LABEL(bcm53xx_start): @@ -149,29 +150,26 @@ _C_LABEL(bcm53xx_start):
149 /* 150 /*
150 * Jump to start in locore.S, which in turn will call initarm and main. 151 * Jump to start in locore.S, which in turn will call initarm and main.
151 */ 152 */
152 movw ip, #:lower16:start 153 movw ip, #:lower16:start
153 movt ip, #:upper16:start 154 movt ip, #:upper16:start
154 bx ip 155 bx ip
155 nop 156 nop
156 nop 157 nop
157 nop 158 nop
158 nop 159 nop
159 160
160 /* NOTREACHED */ 161 /* NOTREACHED */
161 162
162.Lsku_rom_lut: 
163 .word 0xffff0400 
164 
165.Lcca_wdog: 163.Lcca_wdog:
166 .word 0x18000080 164 .word 0x18000080
167 .word 0x0fffffff /* maximum watchdog time out, about 10 seconds */ 165 .word 0x0fffffff /* maximum watchdog time out, about 10 seconds */
168 166
169.Ltemp_l1_table: 167.Ltemp_l1_table:
170 /* Put the temporary L1 translation table far enough away. */ 168 /* Put the temporary L1 translation table far enough away. */
171 .word 31 * 0x100000 - L1_TABLE_SIZE 169 .word 31 * 0x100000 - L1_TABLE_SIZE
172 170
173#include <arm/cortex/a9_mpsubr.S> 171#include <arm/cortex/a9_mpsubr.S>
174 172
175mmu_init_table: 173mmu_init_table:
176 /* Add 128MB of VA==PA at 0x80000000 so we can keep the kernel going */ 174 /* Add 128MB of VA==PA at 0x80000000 so we can keep the kernel going */
177#ifdef BCM5301X 175#ifdef BCM5301X

cvs diff -r1.5 -r1.6 src/sys/arch/evbarm/cubie/Attic/cubie_start.S (expand / switch to unified diff)

--- src/sys/arch/evbarm/cubie/Attic/cubie_start.S 2014/01/24 05:13:06 1.5
+++ src/sys/arch/evbarm/cubie/Attic/cubie_start.S 2014/02/21 22:22:48 1.6
@@ -30,79 +30,93 @@ @@ -30,79 +30,93 @@
30#include "opt_allwinner.h" 30#include "opt_allwinner.h"
31#include "opt_com.h" 31#include "opt_com.h"
32#include "opt_cpuoptions.h" 32#include "opt_cpuoptions.h"
33#include "opt_cputypes.h" 33#include "opt_cputypes.h"
34#include "opt_multiprocessor.h" 34#include "opt_multiprocessor.h"
35 35
36#include <arm/asm.h> 36#include <arm/asm.h>
37#include <arm/armreg.h> 37#include <arm/armreg.h>
38#include "assym.h" 38#include "assym.h"
39 39
40#include <arm/allwinner/awin_reg.h> 40#include <arm/allwinner/awin_reg.h>
41#include <evbarm/cubie/platform.h>  41#include <evbarm/cubie/platform.h>
42 42
43RCSID("$NetBSD: cubie_start.S,v 1.5 2014/01/24 05:13:06 matt Exp $") 43RCSID("$NetBSD: cubie_start.S,v 1.6 2014/02/21 22:22:48 matt Exp $")
44 44
45#if defined(VERBOSE_INIT_ARM) 45#if defined(VERBOSE_INIT_ARM)
46#define XPUTC(n) mov r0, n; bl xputc 46#define XPUTC(n) mov r0, n; bl xputc
47#ifdef __ARMEB__ 47#ifdef __ARMEB__
48#define COM_BSWAP 48#define COM_BSWAP
49#endif 49#endif
50#define COM_MULT 4 50#define COM_MULT 4
51#define XPUTC_COM 1 51#define XPUTC_COM 1
52#else 52#else
53#define XPUTC(n) 53#define XPUTC(n)
54#endif 54#endif
55 55
56#define INIT_MEMSIZE 128 56#define INIT_MEMSIZE 128
57#define TEMP_L1_TABLE (AWIN_SDRAM_PBASE + INIT_MEMSIZE * 0x100000 - L1_TABLE_SIZE) 57#define TEMP_L1_TABLE (AWIN_SDRAM_PBASE + INIT_MEMSIZE * 0x100000 - L1_TABLE_SIZE)
58 58
 59#define MD_CPU_HATCH _C_LABEL(awin_cpu_hatch)
 60
59/* 61/*
60 * Kernel start routine for BEAGLEBOARD boards. 62 * Kernel start routine for BEAGLEBOARD boards.
61 * At this point, this code has been loaded into SDRAM 63 * At this point, this code has been loaded into SDRAM
62 * and the MMU is off 64 * and the MMU is off
63 */ 65 */
64 .section .start,"ax",%progbits 66 .section .start,"ax",%progbits
65 67
66 .global _C_LABEL(cubie_start) 68 .global _C_LABEL(cubie_start)
67_C_LABEL(cubie_start): 69_C_LABEL(cubie_start):
68#ifdef __ARMEB__ 70#ifdef __ARMEB__
69 setend be /* force big endian */ 71 setend be /* force big endian */
70#endif 72#endif
71 73
72 /* Move into supervisor mode and disable IRQs/FIQs. */ 74 /* Move into supervisor mode and disable IRQs/FIQs. */
73 cpsid if, #PSR_SVC32_MODE 75 cpsid if, #PSR_SVC32_MODE
74 76
75 /* 77 /*
76 * Save any arguments passed to us (do it PIC). 78 * Save any arguments passed to us. But since .start is at 0x40000000
77 */ 79 * and .text is at 0x8000000, we can't directly use the address that
78 movw r4, #:lower16:uboot_args-.LPIC0 80 * the linker gave us directly. We have to replace the upper 4 bits
79 movt r4, #:upper16:uboot_args-.LPIC0 81 * of the address the linker gave us and replace it with the upper 4
80 /* 82 * bits of our pc. Or replace the lower 28 bits of our PC with the
81 * Since .start is at 0x40000000 and .text is at 0x8000000 83 * lower 28 bits of what the linker gave us.
82 * we have to clear the upper bits of the address so it's relative 84 */
83 * to the current PC, not .text. 85 adr r4, _C_LABEL(cubie_start)
84 */ 86 movw r5, #:lower16:uboot_args
85 bic r4, r4, #0xf0000000 87 movt r5, #:upper16:uboot_args
86 add r4, r4, pc 88 bfi r4, r5, #0, #28
87 stmia r4, {r0-r3} 89
88.LPIC0: 90 stmia r4, {r0-r3} // Save the arguments
89#ifdef CPU_CORTEXA9 
90 /* 91 /*
91 * Turn on the SCU if we are on a Cortex-A9 92 * Turn on the SMP bit
92 */ 93 */
93 bl a9_start 94 bl cortex_init
94 XPUTC(#67) 95 XPUTC(#67)
95#endif 96
 97#if defined(MULTIPROCESSOR) && 0
 98 movw r0, #:lower16:(AWIN_CORE_PBASE+AWIN_CPUCFG_OFFSET)
 99 movt r0, #:upper16:(AWIN_CORE_PBASE+AWIN_CPUCFG_OFFSET)
 100
 101 /* Set where the other CPU(s) are going to execute */
 102 adr r1, cortex_mpstart
 103 str r1, [r0, #AWIN_CPUCFG_PRIVATE_REG]
 104
 105 /* Bring CPU1 out of reset */
 106 ldr r1, [r0, #AWIN_CPUCFG_CPU1_RST_CTRL_REG]
 107 orr r1, r1, #(AWIN_CPUCFG_CPU_RST_CTRL_CORE_RESET|AWIN_CPUCFG_CPU_RST_CTRL_RESET)
 108 str r1, [r0, #AWIN_CPUCFG_CPU1_RST_CTRL_REG]
 109#endif /* MULTIPROCESSOR */
96 110
97 /* 111 /*
98 * Set up a preliminary mapping in the MMU to allow us to run 112 * Set up a preliminary mapping in the MMU to allow us to run
99 * at KERNEL_BASE with caches on. 113 * at KERNEL_BASE with caches on.
100 */ 114 */
101 movw r0, #:lower16:TEMP_L1_TABLE 115 movw r0, #:lower16:TEMP_L1_TABLE
102 movt r0, #:upper16:TEMP_L1_TABLE 116 movt r0, #:upper16:TEMP_L1_TABLE
103 adr r1, .Lmmu_init_table 117 adr r1, .Lmmu_init_table
104 bl arm_boot_l1pt_init 118 bl arm_boot_l1pt_init
105 119
106 XPUTC(#68) 120 XPUTC(#68)
107 121
108 /* 122 /*