Sun Sep 2 05:01:54 2012 UTC ()
Cleanup and bring forwards from bcm53xx_start.S
Use more symbolic names ...


(matt)
diff -r1.1 -r1.2 src/sys/arch/arm/cortex/a9_mpsubr.S

cvs diff -r1.1 -r1.2 src/sys/arch/arm/cortex/Attic/a9_mpsubr.S (expand / switch to unified diff)

--- src/sys/arch/arm/cortex/Attic/a9_mpsubr.S 2012/09/01 00:03:14 1.1
+++ src/sys/arch/arm/cortex/Attic/a9_mpsubr.S 2012/09/02 05:01:54 1.2
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: a9_mpsubr.S,v 1.1 2012/09/01 00:03:14 matt Exp $ */ 1/* $NetBSD: a9_mpsubr.S,v 1.2 2012/09/02 05:01:54 matt Exp $ */
2/*- 2/*-
3 * Copyright (c) 2012 The NetBSD Foundation, Inc. 3 * Copyright (c) 2012 The NetBSD Foundation, Inc.
4 * All rights reserved. 4 * All rights reserved.
5 * 5 *
6 * This code is derived from software contributed to The NetBSD Foundation 6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Matt Thomas of 3am Software Foundry. 7 * by Matt Thomas of 3am Software Foundry.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -24,34 +24,43 @@ @@ -24,34 +24,43 @@
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE. 28 * POSSIBILITY OF SUCH DAMAGE.
29 */ 29 */
30 30
31#include "opt_cpuoptions.h" 31#include "opt_cpuoptions.h"
32#include "opt_cputypes.h" 32#include "opt_cputypes.h"
33#include "opt_multiprocessor.h" 33#include "opt_multiprocessor.h"
34 34
35#include <arm/asm.h> 35#include <arm/asm.h>
36#include <arm/armreg.h> 36#include <arm/armreg.h>
 37#include <arm/cortex/scu_reg.h>
37#include "assym.h" 38#include "assym.h"
38 39
 40
 41/* We'll modify va and pa at run time so we can use relocatable addresses. */
 42#define MMU_INIT(va,pa,n_sec,attr) \
 43 .word va ; \
 44 .word pa ; \
 45 .word n_sec ; \
 46 .word attr ;
 47
39/* 48/*
40 * Set up a preliminary mapping in the MMU to allow us to run 49 * Set up a preliminary mapping in the MMU to allow us to run
41 * at KERNEL_BASE with caches on. 50 * at KERNEL_BASE with caches on.
42 */ 51 */
43arm_boot_l1pt_init: 52arm_boot_l1pt_init:
44 mv ip, r1 @ save mmu table addr 53 mov ip, r1 @ save mmu table addr
45 /* Build page table from scratch */ 54 /* Build page table from scratch */
46 mov r1, r0 /* Start address to clear memory. */ 55 mov r1, r0 /* Start address to clear memory. */
47 /* Zero the entire table so all virtual addresses are invalid. */ 56 /* Zero the entire table so all virtual addresses are invalid. */
48 mov r2, #L1_TABLE_SIZE /* in bytes */ 57 mov r2, #L1_TABLE_SIZE /* in bytes */
49 mov r3, #0 58 mov r3, #0
50 mov r4, r3 59 mov r4, r3
51 mov r5, r3 60 mov r5, r3
52 mov r6, r3 61 mov r6, r3
53 mov r7, r3 62 mov r7, r3
54 mov r8, r3 63 mov r8, r3
55 mov r10, r3 64 mov r10, r3
56 mov r11, r3 65 mov r11, r3
571: stmia r1!, {r3-r8,r10-r11} 661: stmia r1!, {r3-r8,r10-r11}
@@ -62,58 +71,54 @@ arm_boot_l1pt_init: @@ -62,58 +71,54 @@ arm_boot_l1pt_init:
62 bne 1b 71 bne 1b
63 72
64 /* Now create our entries per the mmu_init_table. */ 73 /* Now create our entries per the mmu_init_table. */
65 l1table .req r0 74 l1table .req r0
66 va .req r1 75 va .req r1
67 pa .req r2 76 pa .req r2
68 n_sec .req r3 77 n_sec .req r3
69 attr .req r4 78 attr .req r4
70 itable .req r5 79 itable .req r5
71 80
72 mov itable, ip @ reclaim table address 81 mov itable, ip @ reclaim table address
73 b 3f 82 b 3f
74 83
752: str pa, [l1table, va] 842: str pa, [l1table, va, lsl #2]
76 add va, va, #4 85 add va, va, #1
77 add pa, pa, #(L1_S_SIZE) 86 add pa, pa, #(L1_S_SIZE)
78 subs n_sec, n_sec, #1 87 subs n_sec, n_sec, #1
79 bhi 2b 88 bhi 2b
80 89
813: ldmia itable!, {va,pa,n_sec,attr} 903: ldmia itable!, {va,pa,n_sec,attr}
82 /* Convert va to l1 offset: va = 4 * (va >> L1_S_SHIFT) */ 91 /* Convert va to l1 offset: va = 4 * (va >> L1_S_SHIFT) */
83 lsr va, va, #L1_S_SHIFT 92 lsr va, va, #L1_S_SHIFT
84 lsl va, va, #2 
85 /* Convert pa to l1 entry: pa = (pa & L1_S_FRAME) | attr */ 93 /* Convert pa to l1 entry: pa = (pa & L1_S_FRAME) | attr */
86#ifdef _ARM_ARCH_7 94#ifdef _ARM_ARCH_7
87 bfc pa, #0, #L1_S_SHIFT 95 bfc pa, #0, #L1_S_SHIFT
88#else 96#else
89 lsr pa, pa, #L1_S_SHIFT 97 lsr pa, pa, #L1_S_SHIFT
90 lsl pa, pa, #L1_S_SHIFT 98 lsl pa, pa, #L1_S_SHIFT
91#ndif 99#endif
92 orr pa, pa, attr 100 orr pa, pa, attr
93 cmp n_sec, #0 101 cmp n_sec, #0
94 bne 2b 102 bne 2b
95 bx lr @ return 103 bx lr @ return
96 104
97 .unreq va 105 .unreq va
98 .unreq pa 106 .unreq pa
99 .unreq n_sec 107 .unreq n_sec
100 .unreq attr 108 .unreq attr
101 .unreq itable 109 .unreq itable
102 .unreq l1table 110 .unreq l1table
103 111
104.Lctl_ID: 
105 .word CPU_CONTROL_IC_ENABLE|CPU_CONTROL_DC_ENABLE 
106 
107a9_cpuinit: 112a9_cpuinit:
108 /* 113 /*
109 * In theory, because the MMU is off, we shouldn't need all of this, 114 * In theory, because the MMU is off, we shouldn't need all of this,
110 * but let's not take any chances and do a typical sequence to set 115 * but let's not take any chances and do a typical sequence to set
111 * the Translation Table Base. 116 * the Translation Table Base.
112 */ 117 */
113 mov ip, lr 118 mov ip, lr
114 mov r10, r0 119 mov r10, r0
115 120
116 mrc p15, 0, r2, c1, c0, 0 /* " " " */ 121 mrc p15, 0, r2, c1, c0, 0 /* " " " */
117 bic r2, r2, #CPU_CONTROL_DC_ENABLE @ clear data cache enable 122 bic r2, r2, #CPU_CONTROL_DC_ENABLE @ clear data cache enable
118 bic r2, r2, #CPU_CONTROL_IC_ENABLE @ clear instruction cache enable 123 bic r2, r2, #CPU_CONTROL_IC_ENABLE @ clear instruction cache enable
119 mcr p15, 0, r2, c1, c0, 0 /* " " " */ 124 mcr p15, 0, r2, c1, c0, 0 /* " " " */
@@ -149,68 +154,90 @@ a9_cpuinit: @@ -149,68 +154,90 @@ a9_cpuinit:
149 .align 5 154 .align 5
150 @ turn mmu on! 155 @ turn mmu on!
151 mov r0, r0 156 mov r0, r0
152 mcr p15, 0, r0, c1, c0, 0 157 mcr p15, 0, r0, c1, c0, 0
153 158
154 /* 159 /*
155 * Ensure that the coprocessor has finished turning on the MMU. 160 * Ensure that the coprocessor has finished turning on the MMU.
156 */ 161 */
157 mrc p15, 0, r0, c0, c0, 0 /* Read an arbitrary value. */ 162 mrc p15, 0, r0, c0, c0, 0 /* Read an arbitrary value. */
158 mov r0, r0 /* Stall until read completes. */ 163 mov r0, r0 /* Stall until read completes. */
159 164
160 bx ip /* return */ 165 bx ip /* return */
161 166
162#if defined(VERBOSE_INIT_ARM) && XPUTC 167/*
 168 * Coprocessor register initialization values
 169 */
 170
 171 .p2align 2
 172 /* bits to clear in the Control Register */
 173.Lcontrol_clr:
 174 .word 0
 175
 176 /* bits to set in the Control Register */
 177.Lcontrol_set:
 178 .word CPU_CONTROL_MMU_ENABLE | \
 179 CPU_CONTROL_AFLT_ENABLE | \
 180 CPU_CONTROL_DC_ENABLE | \
 181 CPU_CONTROL_SYST_ENABLE | \
 182 CPU_CONTROL_SWP_ENABLE | \
 183 CPU_CONTROL_IC_ENABLE
 184
 185#if defined(VERBOSE_INIT_ARM) && XPUTC_COM
163#define TIMO 0x25000 186#define TIMO 0x25000
 187#ifndef COM_MULT
 188#define COM_MULT 1
 189#endif
164xputc: 190xputc:
165#ifdef MULTIPROCESSOR 191#ifdef MULTIPROCESSOR
166 mov r2, #1 192 mov r2, #1
167 ldr r3, .Lcomlock 193 ldr r3, .Lcomlock
16810: 19410:
169 ldrex r1, [r3] 195 ldrex r1, [r3]
170 cmp r1, #0 196 cmp r1, #0
171 bne 10b 197 bne 10b
172 strex r1, r2, [r3] 198 strex r1, r2, [r3]
173 cmp r1, #0 199 cmp r1, #0
174 bne 10b 200 bne 10b
 201 dsb
175#endif 202#endif
176 203
177 mov r2, #TIMO 204 mov r2, #TIMO
178 ldr r3, .Luart0 205 ldr r3, .Luart0
1791: ldrb r1, [r3, #COM_LSR] 2061: ldrb r1, [r3, #(COM_LSR*COM_MULT)]
180 tst r1, #LSR_TXRDY 207 tst r1, #LSR_TXRDY
181 bne 2f 208 bne 2f
182 subs r2, r2, #1 209 subs r2, r2, #1
183 bne 1b 210 bne 1b
1842: 2112:
185 strb r0, [r3, #COM_DATA] 212 strb r0, [r3, #COM_DATA]
186 213
187 mov r2, #TIMO 214 mov r2, #TIMO
1883: ldrb r1, [r3, #COM_LSR] 2153: ldrb r1, [r3, #(COM_LSR*COM_MULT)]
189 tst r1, #LSR_TSRE 216 tst r1, #LSR_TSRE
190 bne 4f 217 bne 4f
191 subs r2, r2, #1 218 subs r2, r2, #1
192 bne 3b 219 bne 3b
1934: 2204:
194#ifdef MULTIPROCESSOR 221#ifdef MULTIPROCESSOR
195 ldr r3, .Lcomlock 222 ldr r3, .Lcomlock
196 mov r0, #0 223 mov r0, #0
197 str r0, [r3] 224 str r0, [r3]
198 dsb 225 dsb
199#endif 226#endif
200 bx lr 227 bx lr
201 228
202.Luart0: 229.Luart0:
203 .word CONSADDR 230 .word CONADDR
204 231
205#ifdef MULTIPROCESSOR 232#ifdef MULTIPROCESSOR
206.Lcomlock: 233.Lcomlock:
207 .word comlock 234 .word comlock
208 235
209 .pushsection .data 236 .pushsection .data
210comlock: 237comlock:
211 .p2align 2 238 .p2align 2
212 .word 0 @ not in bss 239 .word 0 @ not in bss
213 240
214 .popsection 241 .popsection
215#endif /* MULTIPROCESSOR */ 242#endif /* MULTIPROCESSOR */
216#endif /* VERBOSE_INIT_ARM */ 243#endif /* VERBOSE_INIT_ARM */
@@ -219,103 +246,107 @@ comlock: @@ -219,103 +246,107 @@ comlock:
219a9_start: 246a9_start:
220 mov r10, lr @ save lr 247 mov r10, lr @ save lr
221 248
222 cpsid if, #PSR_SVC32_MODE 249 cpsid if, #PSR_SVC32_MODE
223 250
224 XPUTC(#64) 251 XPUTC(#64)
225 bl _C_LABEL(armv7_icache_inv_all) @ invalidate i-cache 252 bl _C_LABEL(armv7_icache_inv_all) @ invalidate i-cache
226 253
227 /* 254 /*
228 * Step 1a, invalidate the all cache tags in all ways on the SCU. 255 * Step 1a, invalidate the all cache tags in all ways on the SCU.
229 */ 256 */
230 XPUTC(#65) 257 XPUTC(#65)
231 mrc p15, 4, r3, c15, c0, 0 @ read cbar 258 mrc p15, 4, r3, c15, c0, 0 @ read cbar
232 ldr r0, [r3, #4] @ read scu config 259 ldr r0, [r3, #SCU_CFG] @ read scu config
233 and r0, r0, #7 @ get cpu max 260 and r0, r0, #7 @ get cpu max
234 add r0, r0, #1 @ adjust to cpu num 261 add r0, r0, #2 @ adjust to cpu num
235 lsl r0, r0, #4 @ multiply by 16 262 mov r1, #0xf @ select all ways
236 sub r0, r0, #1 @ make it into a mask 263 lsl r1, r1, r0 @ shift into place
237 str r0, [r3, #12] @ write scu invalidate all 264 str r1, [r3, #SCU_INV_ALL_REG] @ write scu invalidate all
238 dsb 265 dsb
239 isb 266 isb
240 267
241 /* 268 /*
242 * Step 1b, invalidate the data cache 269 * Step 1b, invalidate the data cache
243 */ 270 */
244 XPUTC(#66) 271 XPUTC(#66)
245 bl _C_LABEL(armv7_dcache_wbinv_all) @ writeback/invalidate d-cache 272 bl _C_LABEL(armv7_dcache_wbinv_all) @ writeback/invalidate d-cache
246 273
247 /* 274 /*
248 * Step 2, disable the data cache 275 * Step 2, disable the data cache
249 */ 276 */
250 mrc p15, 0, r2, c1, c0, 0 @ get system ctl register (save) 277 mrc p15, 0, r2, c1, c0, 0 @ get system ctl register (save)
251 bic r1, r2, #CPU_CONTROL_DC_ENABLE @ clear data cache enable 278 bic r1, r2, #CPU_CONTROL_DC_ENABLE @ clear data cache enable
252 mcr p15, 0, r1, c1, c0, 0 @ set system ctl register 279 mcr p15, 0, r1, c1, c0, 0 @ set system ctl register
253 isb 280 isb
254 281
255 /* 282 /*
256 * Step 3, enable the SCU (and set SMP mode) 283 * Step 3, enable the SCU (and set SMP mode)
257 */ 284 */
258 ldr r1, [r3, #4] @ read scu config 285 ldr r1, [r3, #SCU_CTL] @ read scu control
259 orr r1, r1, #0xf0 @ set smp mode 286 orr r1, r1, #SCU_CTL_SCU_ENA @ set scu enable flag
260 str r1, [r3, #4] @ write scu config 287 str r1, [r3, #SCU_CTL] @ write scu control
261 ldr r1, [r3, #0] @ read scu control 
262 orr r1, r1, #1 @ set scu enable flag 
263 str r1, [r3, #4] @ write scu control 
264 dsb 288 dsb
265 isb 289 isb
266 290
267 /* 291 /*
268 * Step 4a, enable the data cache 292 * Step 4a, enable the data cache
269 */ 293 */
270 mcr p15, 0, r2, c1, c0, 0 @ reenable caches 294 mcr p15, 0, r2, c1, c0, 0 @ reenable caches
271 isb 295 isb
272 296
273 /* 297 /*
274 * Step 4b, set ACTLR.SMP=1 (and ACTRL.FX=1) 298 * Step 4b, set ACTLR.SMP=1 (and ACTRL.FX=1)
275 */ 299 */
276 mrc p15, 0, r0, c1, c0, 1 @ read aux ctl 300 mrc p15, 0, r0, c1, c0, 1 @ read aux ctl
277 orr r0, #0x41 @ enable cache/tlb/coherency 301 orr r0, #CORTEXA9_AUXCTL_SMP @ enable SMP
 302 mcr p15, 0, r0, c1, c0, 1 @ write aux ctl
 303 isb
 304 orr r0, #CORTEXA9_AUXCTL_FW @ enable cache/tlb/coherency
278 mcr p15, 0, r0, c1, c0, 1 @ write aux ctl 305 mcr p15, 0, r0, c1, c0, 1 @ write aux ctl
279 isb 306 isb
280 307
281 bx r10 308 bx r10
282ASEND(a9_startup) 309ASEND(a9_start)
283 310
284/* 311/*
285 * Secondary processors come here after exiting the SKU ROM. 312 * Secondary processors come here after exiting the SKU ROM.
286 */ 313 */
287a9_mpstart: 314a9_mpstart:
288#ifdef MULTIPROCESSOR 315#ifdef MULTIPROCESSOR
289 /* 316 /*
290 * Step 1, invalidate the caches 317 * Step 1, invalidate the caches
291 */ 318 */
292 bl _C_LABEL(armv7_icache_inv_all) @ toss i-cache 319 bl _C_LABEL(armv7_icache_inv_all) @ toss i-cache
293 bl _C_LABEL(armv7_dcache_inv_all) @ toss d-cache 320 bl _C_LABEL(armv7_dcache_inv_all) @ toss d-cache
294 321
295 /* 322 /*
296 * Step 2, wait for the SCU to be enabled 323 * Step 2, wait for the SCU to be enabled
297 */ 324 */
298 mrc p15, 4, r3, c15, c0, 0 @ read cbar 325 mrc p15, 4, r3, c15, c0, 0 @ read cbar
2991: ldr r0, [r3, #0] @ read scu control 3261: ldr r0, [r3, #SCU_CTL] @ read scu control
300 tst r0, #1 @ enable bit set yet? 327 tst r0, #SCU_CTL_SCU_ENA @ enable bit set yet?
301 bne 1b @ try again 328 bne 1b @ try again
302 329
303 /* 330 /*
304 * Step 3, set ACTLR.SMP=1 (and ACTRL.FX=1) 331 * Step 3, set ACTLR.SMP=1 (and ACTRL.FX=1)
305 */ 332 */
306 mrc p15, 0, r0, c1, c0, 1 @ read aux ctl 333 mrc p15, 0, r0, c1, c0, 1 @ read aux ctl
307 orr r0, #0x41 @ enable cache/tlb/coherency 334 orr r0, #CORTEXA9_AUXCTL_SMP @ enable SMP
308 mcr p15, 0, r0, c1, c0, 1 @ write aux ctl 335 mcr p15, 0, r0, c1, c0, 1 @ write aux ctl
 336 mov r0, r0
 337 orr r0, #CORTEXA9_AUXCTL_FW @ enable cache/tlb/coherency
 338 mcr p15, 0, r0, c1, c0, 1 @ write aux ctl
 339 mov r0, r0
309 340
310 /* 341 /*
311 * We should be in SMP mode now. 342 * We should be in SMP mode now.
312 */ 343 */
313 mrc p15, 0, r4, c0, c0, 5 @ get MPIDR 344 mrc p15, 0, r4, c0, c0, 5 @ get MPIDR
314 and r4, r4, #7 @ get our cpu numder 345 and r4, r4, #7 @ get our cpu numder
315 346
316#if defined(VERBOSE_INIT_ARM) 347#if defined(VERBOSE_INIT_ARM)
317 add r0, r4, #48 348 add r0, r4, #48
318 bl xputc 349 bl xputc
319#endif 350#endif
320 351
321 ldr r0, .Lcpu_hatched @ now show we've hatched 352 ldr r0, .Lcpu_hatched @ now show we've hatched
@@ -370,27 +401,27 @@ a9_mpstart: @@ -370,27 +401,27 @@ a9_mpstart:
370#else 401#else
371#error either TPIDRPRW_IS_CURCPU or TPIDRPRW_IS_CURLWP must be defined 402#error either TPIDRPRW_IS_CURCPU or TPIDRPRW_IS_CURLWP must be defined
372#endif 403#endif
373 str r6, [r5, #CI_CURLWP] /* and note we are running on it */ 404 str r6, [r5, #CI_CURLWP] /* and note we are running on it */
374 405
375 ldr r0, .Lcpu_marker 406 ldr r0, .Lcpu_marker
376 str pc, [r0] 407 str pc, [r0]
377 408
378 mov r0, r5 /* pass cpu_info */ 409 mov r0, r5 /* pass cpu_info */
379 mov r1, r4 /* pass cpu_id */ 410 mov r1, r4 /* pass cpu_id */
380 ldr r2, .Lbcm53xx_cpu_hatch /* pass md_cpu_hatch */ 411 ldr r2, .Lbcm53xx_cpu_hatch /* pass md_cpu_hatch */
381 bl _C_LABEL(cpu_hatch) 412 bl _C_LABEL(cpu_hatch)
382 b _C_LABEL(idle_loop) 413 b _C_LABEL(idle_loop)
383 414ASEND(a9_mpstart)
384 /* NOT REACHED */ 415 /* NOT REACHED */
385 416
386.Lkernel_l1pt: 417.Lkernel_l1pt:
387 .word _C_LABEL(kernel_l1pt) 418 .word _C_LABEL(kernel_l1pt)
388.Lcpu_info: 419.Lcpu_info:
389 .word _C_LABEL(cpu_info) 420 .word _C_LABEL(cpu_info)
390.Lcpu_max: 421.Lcpu_max:
391 .word _C_LABEL(arm_cpu_max) 422 .word _C_LABEL(arm_cpu_max)
392.Lcpu_hatched: 423.Lcpu_hatched:
393 .word _C_LABEL(arm_cpu_hatched) 424 .word _C_LABEL(arm_cpu_hatched)
394.Lcpu_mbox: 425.Lcpu_mbox:
395 .word _C_LABEL(arm_cpu_mbox) 426 .word _C_LABEL(arm_cpu_mbox)
396.Lcpu_marker: 427.Lcpu_marker: