Thu Feb 20 14:48:11 2014 UTC ()
Make sure AFLT_ENABLE in the cpuctrl mask for armv7_setup.


(matt)
diff -r1.136 -r1.137 src/sys/arch/arm/arm/cpufunc.c

cvs diff -r1.136 -r1.137 src/sys/arch/arm/arm/cpufunc.c (switch to unified diff)

--- src/sys/arch/arm/arm/cpufunc.c 2014/01/23 19:28:47 1.136
+++ src/sys/arch/arm/arm/cpufunc.c 2014/02/20 14:48:11 1.137
@@ -1,1051 +1,1051 @@ @@ -1,1051 +1,1051 @@
1/* $NetBSD: cpufunc.c,v 1.136 2014/01/23 19:28:47 matt Exp $ */ 1/* $NetBSD: cpufunc.c,v 1.137 2014/02/20 14:48:11 matt Exp $ */
2 2
3/* 3/*
4 * arm7tdmi support code Copyright (c) 2001 John Fremlin 4 * arm7tdmi support code Copyright (c) 2001 John Fremlin
5 * arm8 support code Copyright (c) 1997 ARM Limited 5 * arm8 support code Copyright (c) 1997 ARM Limited
6 * arm8 support code Copyright (c) 1997 Causality Limited 6 * arm8 support code Copyright (c) 1997 Causality Limited
7 * arm9 support code Copyright (C) 2001 ARM Ltd 7 * arm9 support code Copyright (C) 2001 ARM Ltd
8 * arm11 support code Copyright (c) 2007 Microsoft 8 * arm11 support code Copyright (c) 2007 Microsoft
9 * cortexa8 support code Copyright (c) 2008 3am Software Foundry 9 * cortexa8 support code Copyright (c) 2008 3am Software Foundry
10 * cortexa8 improvements Copyright (c) Goeran Weinholt 10 * cortexa8 improvements Copyright (c) Goeran Weinholt
11 * Copyright (c) 1997 Mark Brinicombe. 11 * Copyright (c) 1997 Mark Brinicombe.
12 * Copyright (c) 1997 Causality Limited 12 * Copyright (c) 1997 Causality Limited
13 * All rights reserved. 13 * All rights reserved.
14 * 14 *
15 * Redistribution and use in source and binary forms, with or without 15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions 16 * modification, are permitted provided that the following conditions
17 * are met: 17 * are met:
18 * 1. Redistributions of source code must retain the above copyright 18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer. 19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright 20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the 21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution. 22 * documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software 23 * 3. All advertising materials mentioning features or use of this software
24 * must display the following acknowledgement: 24 * must display the following acknowledgement:
25 * This product includes software developed by Causality Limited. 25 * This product includes software developed by Causality Limited.
26 * 4. The name of Causality Limited may not be used to endorse or promote 26 * 4. The name of Causality Limited may not be used to endorse or promote
27 * products derived from this software without specific prior written 27 * products derived from this software without specific prior written
28 * permission. 28 * permission.
29 * 29 *
30 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS 30 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
31 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 31 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
32 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 32 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
33 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, 33 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
34 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 34 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
35 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 35 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
36 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * SUCH DAMAGE. 40 * SUCH DAMAGE.
41 * 41 *
42 * RiscBSD kernel project 42 * RiscBSD kernel project
43 * 43 *
44 * cpufuncs.c 44 * cpufuncs.c
45 * 45 *
46 * C functions for supporting CPU / MMU / TLB specific operations. 46 * C functions for supporting CPU / MMU / TLB specific operations.
47 * 47 *
48 * Created : 30/01/97 48 * Created : 30/01/97
49 */ 49 */
50 50
51#include <sys/cdefs.h> 51#include <sys/cdefs.h>
52__KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.136 2014/01/23 19:28:47 matt Exp $"); 52__KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.137 2014/02/20 14:48:11 matt Exp $");
53 53
54#include "opt_compat_netbsd.h" 54#include "opt_compat_netbsd.h"
55#include "opt_cpuoptions.h" 55#include "opt_cpuoptions.h"
56#include "opt_perfctrs.h" 56#include "opt_perfctrs.h"
57 57
58#include <sys/types.h> 58#include <sys/types.h>
59#include <sys/param.h> 59#include <sys/param.h>
60#include <sys/pmc.h> 60#include <sys/pmc.h>
61#include <sys/systm.h> 61#include <sys/systm.h>
62#include <machine/cpu.h> 62#include <machine/cpu.h>
63#include <machine/bootconfig.h> 63#include <machine/bootconfig.h>
64#include <arch/arm/arm/disassem.h> 64#include <arch/arm/arm/disassem.h>
65 65
66#include <uvm/uvm.h> 66#include <uvm/uvm.h>
67 67
68#include <arm/cpuconf.h> 68#include <arm/cpuconf.h>
69#include <arm/cpufunc.h> 69#include <arm/cpufunc.h>
70#include <arm/locore.h> 70#include <arm/locore.h>
71 71
72#ifdef CPU_XSCALE_80200 72#ifdef CPU_XSCALE_80200
73#include <arm/xscale/i80200reg.h> 73#include <arm/xscale/i80200reg.h>
74#include <arm/xscale/i80200var.h> 74#include <arm/xscale/i80200var.h>
75#endif 75#endif
76 76
77#ifdef CPU_XSCALE_80321 77#ifdef CPU_XSCALE_80321
78#include <arm/xscale/i80321reg.h> 78#include <arm/xscale/i80321reg.h>
79#include <arm/xscale/i80321var.h> 79#include <arm/xscale/i80321var.h>
80#endif 80#endif
81 81
82#ifdef CPU_XSCALE_IXP425 82#ifdef CPU_XSCALE_IXP425
83#include <arm/xscale/ixp425reg.h> 83#include <arm/xscale/ixp425reg.h>
84#include <arm/xscale/ixp425var.h> 84#include <arm/xscale/ixp425var.h>
85#endif 85#endif
86 86
87#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) 87#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
88#include <arm/xscale/xscalereg.h> 88#include <arm/xscale/xscalereg.h>
89#endif 89#endif
90 90
91#if defined(PERFCTRS) 91#if defined(PERFCTRS)
92struct arm_pmc_funcs *arm_pmc; 92struct arm_pmc_funcs *arm_pmc;
93#endif 93#endif
94 94
95#if defined(CPU_ARMV7) && (defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6)) 95#if defined(CPU_ARMV7) && (defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6))
96bool cpu_armv7_p; 96bool cpu_armv7_p;
97#endif 97#endif
98 98
99#if defined(CPU_ARMV6) && (defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6)) 99#if defined(CPU_ARMV6) && (defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6))
100bool cpu_armv6_p; 100bool cpu_armv6_p;
101#endif 101#endif
102 102
103 103
104/* PRIMARY CACHE VARIABLES */ 104/* PRIMARY CACHE VARIABLES */
105#if (ARM_MMU_V6 + ARM_MMU_V7) != 0 105#if (ARM_MMU_V6 + ARM_MMU_V7) != 0
106u_int arm_cache_prefer_mask; 106u_int arm_cache_prefer_mask;
107#endif 107#endif
108struct arm_cache_info arm_pcache; 108struct arm_cache_info arm_pcache;
109struct arm_cache_info arm_scache; 109struct arm_cache_info arm_scache;
110 110
111u_int arm_dcache_align; 111u_int arm_dcache_align;
112u_int arm_dcache_align_mask; 112u_int arm_dcache_align_mask;
113 113
114/* 1 == use cpu_sleep(), 0 == don't */ 114/* 1 == use cpu_sleep(), 0 == don't */
115int cpu_do_powersave; 115int cpu_do_powersave;
116 116
117#ifdef CPU_ARM2 117#ifdef CPU_ARM2
118struct cpu_functions arm2_cpufuncs = { 118struct cpu_functions arm2_cpufuncs = {
119 /* CPU functions */ 119 /* CPU functions */
120 120
121 .cf_id = arm2_id, 121 .cf_id = arm2_id,
122 .cf_cpwait = cpufunc_nullop, 122 .cf_cpwait = cpufunc_nullop,
123 123
124 /* MMU functions */ 124 /* MMU functions */
125 125
126 .cf_control = (void *)cpufunc_nullop, 126 .cf_control = (void *)cpufunc_nullop,
127 127
128 /* TLB functions */ 128 /* TLB functions */
129 129
130 .cf_tlb_flushID = cpufunc_nullop, 130 .cf_tlb_flushID = cpufunc_nullop,
131 .cf_tlb_flushID_SE = (void *)cpufunc_nullop, 131 .cf_tlb_flushID_SE = (void *)cpufunc_nullop,
132 .cf_tlb_flushI = cpufunc_nullop, 132 .cf_tlb_flushI = cpufunc_nullop,
133 .cf_tlb_flushI_SE = (void *)cpufunc_nullop, 133 .cf_tlb_flushI_SE = (void *)cpufunc_nullop,
134 .cf_tlb_flushD = cpufunc_nullop, 134 .cf_tlb_flushD = cpufunc_nullop,
135 .cf_tlb_flushD_SE = (void *)cpufunc_nullop, 135 .cf_tlb_flushD_SE = (void *)cpufunc_nullop,
136 136
137 /* Cache operations */ 137 /* Cache operations */
138 138
139 .cf_icache_sync_all = cpufunc_nullop, 139 .cf_icache_sync_all = cpufunc_nullop,
140 .cf_icache_sync_range = (void *) cpufunc_nullop, 140 .cf_icache_sync_range = (void *) cpufunc_nullop,
141 141
142 .cf_dcache_wbinv_all = arm3_cache_flush, 142 .cf_dcache_wbinv_all = arm3_cache_flush,
143 .cf_dcache_wbinv_range = (void *)cpufunc_nullop, 143 .cf_dcache_wbinv_range = (void *)cpufunc_nullop,
144 .cf_dcache_inv_range = (void *)cpufunc_nullop, 144 .cf_dcache_inv_range = (void *)cpufunc_nullop,
145 .cf_dcache_wb_range = (void *)cpufunc_nullop, 145 .cf_dcache_wb_range = (void *)cpufunc_nullop,
146 146
147 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 147 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
148 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 148 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
149 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 149 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
150 150
151 .cf_idcache_wbinv_all = cpufunc_nullop, 151 .cf_idcache_wbinv_all = cpufunc_nullop,
152 .cf_idcache_wbinv_range = (void *)cpufunc_nullop, 152 .cf_idcache_wbinv_range = (void *)cpufunc_nullop,
153 153
154 /* Other functions */ 154 /* Other functions */
155 155
156 .cf_flush_prefetchbuf = cpufunc_nullop, 156 .cf_flush_prefetchbuf = cpufunc_nullop,
157 .cf_drain_writebuf = cpufunc_nullop, 157 .cf_drain_writebuf = cpufunc_nullop,
158 .cf_flush_brnchtgt_C = cpufunc_nullop, 158 .cf_flush_brnchtgt_C = cpufunc_nullop,
159 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 159 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
160 160
161 .cf_sleep = (void *)cpufunc_nullop, 161 .cf_sleep = (void *)cpufunc_nullop,
162 162
163 /* Soft functions */ 163 /* Soft functions */
164 164
165 .cf_dataabt_fixup = early_abort_fixup, 165 .cf_dataabt_fixup = early_abort_fixup,
166 .cf_prefetchabt_fixup = cpufunc_null_fixup, 166 .cf_prefetchabt_fixup = cpufunc_null_fixup,
167 167
168 .cf_setup = (void *)cpufunc_nullop 168 .cf_setup = (void *)cpufunc_nullop
169 169
170}; 170};
171#endif /* CPU_ARM2 */ 171#endif /* CPU_ARM2 */
172 172
173#ifdef CPU_ARM250 173#ifdef CPU_ARM250
174struct cpu_functions arm250_cpufuncs = { 174struct cpu_functions arm250_cpufuncs = {
175 /* CPU functions */ 175 /* CPU functions */
176 176
177 .cf_id = arm250_id, 177 .cf_id = arm250_id,
178 .cf_cpwait = cpufunc_nullop, 178 .cf_cpwait = cpufunc_nullop,
179 179
180 /* MMU functions */ 180 /* MMU functions */
181 181
182 .cf_control = (void *)cpufunc_nullop, 182 .cf_control = (void *)cpufunc_nullop,
183 183
184 /* TLB functions */ 184 /* TLB functions */
185 185
186 .cf_tlb_flushID = cpufunc_nullop, 186 .cf_tlb_flushID = cpufunc_nullop,
187 .cf_tlb_flushID_SE = (void *)cpufunc_nullop, 187 .cf_tlb_flushID_SE = (void *)cpufunc_nullop,
188 .cf_tlb_flushI = cpufunc_nullop, 188 .cf_tlb_flushI = cpufunc_nullop,
189 .cf_tlb_flushI_SE = (void *)cpufunc_nullop, 189 .cf_tlb_flushI_SE = (void *)cpufunc_nullop,
190 .cf_tlb_flushD = cpufunc_nullop, 190 .cf_tlb_flushD = cpufunc_nullop,
191 .cf_tlb_flushD_SE = (void *)cpufunc_nullop, 191 .cf_tlb_flushD_SE = (void *)cpufunc_nullop,
192 192
193 /* Cache operations */ 193 /* Cache operations */
194 194
195 .cf_icache_sync_all = cpufunc_nullop, 195 .cf_icache_sync_all = cpufunc_nullop,
196 .cf_icache_sync_range = (void *) cpufunc_nullop, 196 .cf_icache_sync_range = (void *) cpufunc_nullop,
197 197
198 .cf_dcache_wbinv_all = arm3_cache_flush, 198 .cf_dcache_wbinv_all = arm3_cache_flush,
199 .cf_dcache_wbinv_range = (void *)cpufunc_nullop, 199 .cf_dcache_wbinv_range = (void *)cpufunc_nullop,
200 .cf_dcache_inv_range = (void *)cpufunc_nullop, 200 .cf_dcache_inv_range = (void *)cpufunc_nullop,
201 .cf_dcache_wb_range = (void *)cpufunc_nullop, 201 .cf_dcache_wb_range = (void *)cpufunc_nullop,
202 202
203 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 203 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
204 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 204 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
205 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 205 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
206 206
207 .cf_idcache_wbinv_all = cpufunc_nullop, 207 .cf_idcache_wbinv_all = cpufunc_nullop,
208 .cf_idcache_wbinv_range = (void *)cpufunc_nullop, 208 .cf_idcache_wbinv_range = (void *)cpufunc_nullop,
209 209
210 /* Other functions */ 210 /* Other functions */
211 211
212 .cf_flush_prefetchbuf = cpufunc_nullop, 212 .cf_flush_prefetchbuf = cpufunc_nullop,
213 .cf_drain_writebuf = cpufunc_nullop, 213 .cf_drain_writebuf = cpufunc_nullop,
214 .cf_flush_brnchtgt_C = cpufunc_nullop, 214 .cf_flush_brnchtgt_C = cpufunc_nullop,
215 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 215 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
216 216
217 .cf_sleep = (void *)cpufunc_nullop, 217 .cf_sleep = (void *)cpufunc_nullop,
218 218
219 /* Soft functions */ 219 /* Soft functions */
220 220
221 .cf_dataabt_fixup = early_abort_fixup, 221 .cf_dataabt_fixup = early_abort_fixup,
222 .cf_prefetchabt_fixup = cpufunc_null_fixup, 222 .cf_prefetchabt_fixup = cpufunc_null_fixup,
223 223
224 .cf_setup = (void *)cpufunc_nullop 224 .cf_setup = (void *)cpufunc_nullop
225 225
226}; 226};
227#endif /* CPU_ARM250 */ 227#endif /* CPU_ARM250 */
228 228
229#ifdef CPU_ARM3 229#ifdef CPU_ARM3
230struct cpu_functions arm3_cpufuncs = { 230struct cpu_functions arm3_cpufuncs = {
231 /* CPU functions */ 231 /* CPU functions */
232 232
233 .cf_id = cpufunc_id, 233 .cf_id = cpufunc_id,
234 .cf_cpwait = cpufunc_nullop, 234 .cf_cpwait = cpufunc_nullop,
235 235
236 /* MMU functions */ 236 /* MMU functions */
237 237
238 .cf_control = arm3_control, 238 .cf_control = arm3_control,
239 239
240 /* TLB functions */ 240 /* TLB functions */
241 241
242 .cf_tlb_flushID = cpufunc_nullop, 242 .cf_tlb_flushID = cpufunc_nullop,
243 .cf_tlb_flushID_SE = (void *)cpufunc_nullop, 243 .cf_tlb_flushID_SE = (void *)cpufunc_nullop,
244 .cf_tlb_flushI = cpufunc_nullop, 244 .cf_tlb_flushI = cpufunc_nullop,
245 .cf_tlb_flushI_SE = (void *)cpufunc_nullop, 245 .cf_tlb_flushI_SE = (void *)cpufunc_nullop,
246 .cf_tlb_flushD = cpufunc_nullop, 246 .cf_tlb_flushD = cpufunc_nullop,
247 .cf_tlb_flushD_SE = (void *)cpufunc_nullop, 247 .cf_tlb_flushD_SE = (void *)cpufunc_nullop,
248 248
249 /* Cache operations */ 249 /* Cache operations */
250 250
251 .cf_icache_sync_all = cpufunc_nullop, 251 .cf_icache_sync_all = cpufunc_nullop,
252 .cf_icache_sync_range = (void *) cpufunc_nullop, 252 .cf_icache_sync_range = (void *) cpufunc_nullop,
253 253
254 .cf_dcache_wbinv_all = arm3_cache_flush, 254 .cf_dcache_wbinv_all = arm3_cache_flush,
255 .cf_dcache_wbinv_range = (void *)arm3_cache_flush, 255 .cf_dcache_wbinv_range = (void *)arm3_cache_flush,
256 .cf_dcache_inv_range = (void *)arm3_cache_flush, 256 .cf_dcache_inv_range = (void *)arm3_cache_flush,
257 .cf_dcache_wb_range = (void *)cpufunc_nullop, 257 .cf_dcache_wb_range = (void *)cpufunc_nullop,
258 258
259 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 259 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
260 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 260 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
261 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 261 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
262 262
263 .cf_idcache_wbinv_all = arm3_cache_flush, 263 .cf_idcache_wbinv_all = arm3_cache_flush,
264 .cf_idcache_wbinv_range = (void *)arm3_cache_flush, 264 .cf_idcache_wbinv_range = (void *)arm3_cache_flush,
265 265
266 /* Other functions */ 266 /* Other functions */
267 267
268 .cf_flush_prefetchbuf = cpufunc_nullop, 268 .cf_flush_prefetchbuf = cpufunc_nullop,
269 .cf_drain_writebuf = cpufunc_nullop, 269 .cf_drain_writebuf = cpufunc_nullop,
270 .cf_flush_brnchtgt_C = cpufunc_nullop, 270 .cf_flush_brnchtgt_C = cpufunc_nullop,
271 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 271 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
272 272
273 .cf_sleep = (void *)cpufunc_nullop, 273 .cf_sleep = (void *)cpufunc_nullop,
274 274
275 /* Soft functions */ 275 /* Soft functions */
276 276
277 .cf_dataabt_fixup = early_abort_fixup, 277 .cf_dataabt_fixup = early_abort_fixup,
278 .cf_prefetchabt_fixup = cpufunc_null_fixup, 278 .cf_prefetchabt_fixup = cpufunc_null_fixup,
279 279
280 .cf_setup = (void *)cpufunc_nullop 280 .cf_setup = (void *)cpufunc_nullop
281 281
282}; 282};
283#endif /* CPU_ARM3 */ 283#endif /* CPU_ARM3 */
284 284
285#ifdef CPU_ARM6 285#ifdef CPU_ARM6
286struct cpu_functions arm6_cpufuncs = { 286struct cpu_functions arm6_cpufuncs = {
287 /* CPU functions */ 287 /* CPU functions */
288 288
289 .cf_id = cpufunc_id, 289 .cf_id = cpufunc_id,
290 .cf_cpwait = cpufunc_nullop, 290 .cf_cpwait = cpufunc_nullop,
291 291
292 /* MMU functions */ 292 /* MMU functions */
293 293
294 .cf_control = cpufunc_control, 294 .cf_control = cpufunc_control,
295 .cf_domains = cpufunc_domains, 295 .cf_domains = cpufunc_domains,
296 .cf_setttb = arm67_setttb, 296 .cf_setttb = arm67_setttb,
297 .cf_faultstatus = cpufunc_faultstatus, 297 .cf_faultstatus = cpufunc_faultstatus,
298 .cf_faultaddress = cpufunc_faultaddress, 298 .cf_faultaddress = cpufunc_faultaddress,
299 299
300 /* TLB functions */ 300 /* TLB functions */
301 301
302 .cf_tlb_flushID = arm67_tlb_flush, 302 .cf_tlb_flushID = arm67_tlb_flush,
303 .cf_tlb_flushID_SE = arm67_tlb_purge, 303 .cf_tlb_flushID_SE = arm67_tlb_purge,
304 .cf_tlb_flushI = arm67_tlb_flush, 304 .cf_tlb_flushI = arm67_tlb_flush,
305 .cf_tlb_flushI_SE = arm67_tlb_purge, 305 .cf_tlb_flushI_SE = arm67_tlb_purge,
306 .cf_tlb_flushD = arm67_tlb_flush, 306 .cf_tlb_flushD = arm67_tlb_flush,
307 .cf_tlb_flushD_SE = arm67_tlb_purge, 307 .cf_tlb_flushD_SE = arm67_tlb_purge,
308 308
309 /* Cache operations */ 309 /* Cache operations */
310 310
311 .cf_icache_sync_all = cpufunc_nullop, 311 .cf_icache_sync_all = cpufunc_nullop,
312 .cf_icache_sync_range = (void *) cpufunc_nullop, 312 .cf_icache_sync_range = (void *) cpufunc_nullop,
313 313
314 .cf_dcache_wbinv_all = arm67_cache_flush, 314 .cf_dcache_wbinv_all = arm67_cache_flush,
315 .cf_dcache_wbinv_range = (void *)arm67_cache_flush, 315 .cf_dcache_wbinv_range = (void *)arm67_cache_flush,
316 .cf_dcache_inv_range = (void *)arm67_cache_flush, 316 .cf_dcache_inv_range = (void *)arm67_cache_flush,
317 .cf_dcache_wb_range = (void *)cpufunc_nullop, 317 .cf_dcache_wb_range = (void *)cpufunc_nullop,
318 318
319 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 319 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
320 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 320 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
321 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 321 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
322 322
323 .cf_idcache_wbinv_all = arm67_cache_flush, 323 .cf_idcache_wbinv_all = arm67_cache_flush,
324 .cf_idcache_wbinv_range = (void *)arm67_cache_flush, 324 .cf_idcache_wbinv_range = (void *)arm67_cache_flush,
325 325
326 /* Other functions */ 326 /* Other functions */
327 327
328 .cf_flush_prefetchbuf = cpufunc_nullop, 328 .cf_flush_prefetchbuf = cpufunc_nullop,
329 .cf_drain_writebuf = cpufunc_nullop, 329 .cf_drain_writebuf = cpufunc_nullop,
330 .cf_flush_brnchtgt_C = cpufunc_nullop, 330 .cf_flush_brnchtgt_C = cpufunc_nullop,
331 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 331 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
332 332
333 .cf_sleep = (void *)cpufunc_nullop, 333 .cf_sleep = (void *)cpufunc_nullop,
334 334
335 /* Soft functions */ 335 /* Soft functions */
336 336
337#ifdef ARM6_LATE_ABORT 337#ifdef ARM6_LATE_ABORT
338 .cf_dataabt_fixup = late_abort_fixup, 338 .cf_dataabt_fixup = late_abort_fixup,
339#else 339#else
340 .cf_dataabt_fixup = early_abort_fixup, 340 .cf_dataabt_fixup = early_abort_fixup,
341#endif 341#endif
342 .cf_prefetchabt_fixup = cpufunc_null_fixup, 342 .cf_prefetchabt_fixup = cpufunc_null_fixup,
343 343
344 .cf_context_switch = arm67_context_switch, 344 .cf_context_switch = arm67_context_switch,
345 345
346 .cf_setup = arm6_setup 346 .cf_setup = arm6_setup
347 347
348}; 348};
349#endif /* CPU_ARM6 */ 349#endif /* CPU_ARM6 */
350 350
351#ifdef CPU_ARM7 351#ifdef CPU_ARM7
352struct cpu_functions arm7_cpufuncs = { 352struct cpu_functions arm7_cpufuncs = {
353 /* CPU functions */ 353 /* CPU functions */
354 354
355 .cf_id = cpufunc_id, 355 .cf_id = cpufunc_id,
356 .cf_cpwait = cpufunc_nullop, 356 .cf_cpwait = cpufunc_nullop,
357 357
358 /* MMU functions */ 358 /* MMU functions */
359 359
360 .cf_control = cpufunc_control, 360 .cf_control = cpufunc_control,
361 .cf_domains = cpufunc_domains, 361 .cf_domains = cpufunc_domains,
362 .cf_setttb = arm67_setttb, 362 .cf_setttb = arm67_setttb,
363 .cf_faultstatus = cpufunc_faultstatus, 363 .cf_faultstatus = cpufunc_faultstatus,
364 .cf_faultaddress = cpufunc_faultaddress, 364 .cf_faultaddress = cpufunc_faultaddress,
365 365
366 /* TLB functions */ 366 /* TLB functions */
367 367
368 .cf_tlb_flushID = arm67_tlb_flush, 368 .cf_tlb_flushID = arm67_tlb_flush,
369 .cf_tlb_flushID_SE = arm67_tlb_purge, 369 .cf_tlb_flushID_SE = arm67_tlb_purge,
370 .cf_tlb_flushI = arm67_tlb_flush, 370 .cf_tlb_flushI = arm67_tlb_flush,
371 .cf_tlb_flushI_SE = arm67_tlb_purge, 371 .cf_tlb_flushI_SE = arm67_tlb_purge,
372 .cf_tlb_flushD = arm67_tlb_flush, 372 .cf_tlb_flushD = arm67_tlb_flush,
373 .cf_tlb_flushD_SE = arm67_tlb_purge, 373 .cf_tlb_flushD_SE = arm67_tlb_purge,
374 374
375 /* Cache operations */ 375 /* Cache operations */
376 376
377 .cf_icache_sync_all = cpufunc_nullop, 377 .cf_icache_sync_all = cpufunc_nullop,
378 .cf_icache_sync_range = (void *)cpufunc_nullop, 378 .cf_icache_sync_range = (void *)cpufunc_nullop,
379 379
380 .cf_dcache_wbinv_all = arm67_cache_flush, 380 .cf_dcache_wbinv_all = arm67_cache_flush,
381 .cf_dcache_wbinv_range = (void *)arm67_cache_flush, 381 .cf_dcache_wbinv_range = (void *)arm67_cache_flush,
382 .cf_dcache_inv_range = (void *)arm67_cache_flush, 382 .cf_dcache_inv_range = (void *)arm67_cache_flush,
383 .cf_dcache_wb_range = (void *)cpufunc_nullop, 383 .cf_dcache_wb_range = (void *)cpufunc_nullop,
384 384
385 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 385 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
386 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 386 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
387 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 387 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
388 388
389 .cf_idcache_wbinv_all = arm67_cache_flush, 389 .cf_idcache_wbinv_all = arm67_cache_flush,
390 .cf_idcache_wbinv_range = (void *)arm67_cache_flush, 390 .cf_idcache_wbinv_range = (void *)arm67_cache_flush,
391 391
392 /* Other functions */ 392 /* Other functions */
393 393
394 .cf_flush_prefetchbuf = cpufunc_nullop, 394 .cf_flush_prefetchbuf = cpufunc_nullop,
395 .cf_drain_writebuf = cpufunc_nullop, 395 .cf_drain_writebuf = cpufunc_nullop,
396 .cf_flush_brnchtgt_C = cpufunc_nullop, 396 .cf_flush_brnchtgt_C = cpufunc_nullop,
397 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 397 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
398 398
399 .cf_sleep = (void *)cpufunc_nullop, 399 .cf_sleep = (void *)cpufunc_nullop,
400 400
401 /* Soft functions */ 401 /* Soft functions */
402 402
403 .cf_dataabt_fixup = late_abort_fixup, 403 .cf_dataabt_fixup = late_abort_fixup,
404 .cf_prefetchabt_fixup = cpufunc_null_fixup, 404 .cf_prefetchabt_fixup = cpufunc_null_fixup,
405 405
406 .cf_context_switch = arm67_context_switch, 406 .cf_context_switch = arm67_context_switch,
407 407
408 .cf_setup = arm7_setup 408 .cf_setup = arm7_setup
409 409
410}; 410};
411#endif /* CPU_ARM7 */ 411#endif /* CPU_ARM7 */
412 412
413#ifdef CPU_ARM7TDMI 413#ifdef CPU_ARM7TDMI
414struct cpu_functions arm7tdmi_cpufuncs = { 414struct cpu_functions arm7tdmi_cpufuncs = {
415 /* CPU functions */ 415 /* CPU functions */
416 416
417 .cf_id = cpufunc_id, 417 .cf_id = cpufunc_id,
418 .cf_cpwait = cpufunc_nullop, 418 .cf_cpwait = cpufunc_nullop,
419 419
420 /* MMU functions */ 420 /* MMU functions */
421 421
422 .cf_control = cpufunc_control, 422 .cf_control = cpufunc_control,
423 .cf_domains = cpufunc_domains, 423 .cf_domains = cpufunc_domains,
424 .cf_setttb = arm7tdmi_setttb, 424 .cf_setttb = arm7tdmi_setttb,
425 .cf_faultstatus = cpufunc_faultstatus, 425 .cf_faultstatus = cpufunc_faultstatus,
426 .cf_faultaddress = cpufunc_faultaddress, 426 .cf_faultaddress = cpufunc_faultaddress,
427 427
428 /* TLB functions */ 428 /* TLB functions */
429 429
430 .cf_tlb_flushID = arm7tdmi_tlb_flushID, 430 .cf_tlb_flushID = arm7tdmi_tlb_flushID,
431 .cf_tlb_flushID_SE = arm7tdmi_tlb_flushID_SE, 431 .cf_tlb_flushID_SE = arm7tdmi_tlb_flushID_SE,
432 .cf_tlb_flushI = arm7tdmi_tlb_flushID, 432 .cf_tlb_flushI = arm7tdmi_tlb_flushID,
433 .cf_tlb_flushI_SE = arm7tdmi_tlb_flushID_SE, 433 .cf_tlb_flushI_SE = arm7tdmi_tlb_flushID_SE,
434 .cf_tlb_flushD = arm7tdmi_tlb_flushID, 434 .cf_tlb_flushD = arm7tdmi_tlb_flushID,
435 .cf_tlb_flushD_SE = arm7tdmi_tlb_flushID_SE, 435 .cf_tlb_flushD_SE = arm7tdmi_tlb_flushID_SE,
436 436
437 /* Cache operations */ 437 /* Cache operations */
438 438
439 .cf_icache_sync_all = cpufunc_nullop, 439 .cf_icache_sync_all = cpufunc_nullop,
440 .cf_icache_sync_range = (void *)cpufunc_nullop, 440 .cf_icache_sync_range = (void *)cpufunc_nullop,
441 441
442 .cf_dcache_wbinv_all = arm7tdmi_cache_flushID, 442 .cf_dcache_wbinv_all = arm7tdmi_cache_flushID,
443 .cf_dcache_wbinv_range = (void *)arm7tdmi_cache_flushID, 443 .cf_dcache_wbinv_range = (void *)arm7tdmi_cache_flushID,
444 .cf_dcache_inv_range = (void *)arm7tdmi_cache_flushID, 444 .cf_dcache_inv_range = (void *)arm7tdmi_cache_flushID,
445 .cf_dcache_wb_range = (void *)cpufunc_nullop, 445 .cf_dcache_wb_range = (void *)cpufunc_nullop,
446 446
447 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 447 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
448 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 448 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
449 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 449 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
450 450
451 .cf_idcache_wbinv_all = arm7tdmi_cache_flushID, 451 .cf_idcache_wbinv_all = arm7tdmi_cache_flushID,
452 .cf_idcache_wbinv_range = (void *)arm7tdmi_cache_flushID, 452 .cf_idcache_wbinv_range = (void *)arm7tdmi_cache_flushID,
453 453
454 /* Other functions */ 454 /* Other functions */
455 455
456 .cf_flush_prefetchbuf = cpufunc_nullop, 456 .cf_flush_prefetchbuf = cpufunc_nullop,
457 .cf_drain_writebuf = cpufunc_nullop, 457 .cf_drain_writebuf = cpufunc_nullop,
458 .cf_flush_brnchtgt_C = cpufunc_nullop, 458 .cf_flush_brnchtgt_C = cpufunc_nullop,
459 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 459 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
460 460
461 .cf_sleep = (void *)cpufunc_nullop, 461 .cf_sleep = (void *)cpufunc_nullop,
462 462
463 /* Soft functions */ 463 /* Soft functions */
464 464
465 .cf_dataabt_fixup = late_abort_fixup, 465 .cf_dataabt_fixup = late_abort_fixup,
466 .cf_prefetchabt_fixup = cpufunc_null_fixup, 466 .cf_prefetchabt_fixup = cpufunc_null_fixup,
467 467
468 .cf_context_switch = arm7tdmi_context_switch, 468 .cf_context_switch = arm7tdmi_context_switch,
469 469
470 .cf_setup = arm7tdmi_setup 470 .cf_setup = arm7tdmi_setup
471 471
472}; 472};
473#endif /* CPU_ARM7TDMI */ 473#endif /* CPU_ARM7TDMI */
474 474
475#ifdef CPU_ARM8 475#ifdef CPU_ARM8
476struct cpu_functions arm8_cpufuncs = { 476struct cpu_functions arm8_cpufuncs = {
477 /* CPU functions */ 477 /* CPU functions */
478 478
479 .cf_id = cpufunc_id, 479 .cf_id = cpufunc_id,
480 .cf_cpwait = cpufunc_nullop, 480 .cf_cpwait = cpufunc_nullop,
481 481
482 /* MMU functions */ 482 /* MMU functions */
483 483
484 .cf_control = cpufunc_control, 484 .cf_control = cpufunc_control,
485 .cf_domains = cpufunc_domains, 485 .cf_domains = cpufunc_domains,
486 .cf_setttb = arm8_setttb, 486 .cf_setttb = arm8_setttb,
487 .cf_faultstatus = cpufunc_faultstatus, 487 .cf_faultstatus = cpufunc_faultstatus,
488 .cf_faultaddress = cpufunc_faultaddress, 488 .cf_faultaddress = cpufunc_faultaddress,
489 489
490 /* TLB functions */ 490 /* TLB functions */
491 491
492 .cf_tlb_flushID = arm8_tlb_flushID, 492 .cf_tlb_flushID = arm8_tlb_flushID,
493 .cf_tlb_flushID_SE = arm8_tlb_flushID_SE, 493 .cf_tlb_flushID_SE = arm8_tlb_flushID_SE,
494 .cf_tlb_flushI = arm8_tlb_flushID, 494 .cf_tlb_flushI = arm8_tlb_flushID,
495 .cf_tlb_flushI_SE = arm8_tlb_flushID_SE, 495 .cf_tlb_flushI_SE = arm8_tlb_flushID_SE,
496 .cf_tlb_flushD = arm8_tlb_flushID, 496 .cf_tlb_flushD = arm8_tlb_flushID,
497 .cf_tlb_flushD_SE = arm8_tlb_flushID_SE, 497 .cf_tlb_flushD_SE = arm8_tlb_flushID_SE,
498 498
499 /* Cache operations */ 499 /* Cache operations */
500 500
501 .cf_icache_sync_all = cpufunc_nullop, 501 .cf_icache_sync_all = cpufunc_nullop,
502 .cf_icache_sync_range = (void *)cpufunc_nullop, 502 .cf_icache_sync_range = (void *)cpufunc_nullop,
503 503
504 .cf_dcache_wbinv_all = arm8_cache_purgeID, 504 .cf_dcache_wbinv_all = arm8_cache_purgeID,
505 .cf_dcache_wbinv_range = (void *)arm8_cache_purgeID, 505 .cf_dcache_wbinv_range = (void *)arm8_cache_purgeID,
506/*XXX*/ .cf_dcache_inv_range = (void *)arm8_cache_purgeID, 506/*XXX*/ .cf_dcache_inv_range = (void *)arm8_cache_purgeID,
507 .cf_dcache_wb_range = (void *)arm8_cache_cleanID, 507 .cf_dcache_wb_range = (void *)arm8_cache_cleanID,
508 508
509 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 509 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
510 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 510 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
511 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 511 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
512 512
513 .cf_idcache_wbinv_all = arm8_cache_purgeID, 513 .cf_idcache_wbinv_all = arm8_cache_purgeID,
514 .cf_idcache_wbinv_range = (void *)arm8_cache_purgeID, 514 .cf_idcache_wbinv_range = (void *)arm8_cache_purgeID,
515 515
516 /* Other functions */ 516 /* Other functions */
517 517
518 .cf_flush_prefetchbuf = cpufunc_nullop, 518 .cf_flush_prefetchbuf = cpufunc_nullop,
519 .cf_drain_writebuf = cpufunc_nullop, 519 .cf_drain_writebuf = cpufunc_nullop,
520 .cf_flush_brnchtgt_C = cpufunc_nullop, 520 .cf_flush_brnchtgt_C = cpufunc_nullop,
521 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 521 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
522 522
523 .cf_sleep = (void *)cpufunc_nullop, 523 .cf_sleep = (void *)cpufunc_nullop,
524 524
525 /* Soft functions */ 525 /* Soft functions */
526 526
527 .cf_dataabt_fixup = cpufunc_null_fixup, 527 .cf_dataabt_fixup = cpufunc_null_fixup,
528 .cf_prefetchabt_fixup = cpufunc_null_fixup, 528 .cf_prefetchabt_fixup = cpufunc_null_fixup,
529 529
530 .cf_context_switch = arm8_context_switch, 530 .cf_context_switch = arm8_context_switch,
531 531
532 .cf_setup = arm8_setup 532 .cf_setup = arm8_setup
533}; 533};
534#endif /* CPU_ARM8 */ 534#endif /* CPU_ARM8 */
535 535
536#ifdef CPU_ARM9 536#ifdef CPU_ARM9
537struct cpu_functions arm9_cpufuncs = { 537struct cpu_functions arm9_cpufuncs = {
538 /* CPU functions */ 538 /* CPU functions */
539 539
540 .cf_id = cpufunc_id, 540 .cf_id = cpufunc_id,
541 .cf_cpwait = cpufunc_nullop, 541 .cf_cpwait = cpufunc_nullop,
542 542
543 /* MMU functions */ 543 /* MMU functions */
544 544
545 .cf_control = cpufunc_control, 545 .cf_control = cpufunc_control,
546 .cf_domains = cpufunc_domains, 546 .cf_domains = cpufunc_domains,
547 .cf_setttb = arm9_setttb, 547 .cf_setttb = arm9_setttb,
548 .cf_faultstatus = cpufunc_faultstatus, 548 .cf_faultstatus = cpufunc_faultstatus,
549 .cf_faultaddress = cpufunc_faultaddress, 549 .cf_faultaddress = cpufunc_faultaddress,
550 550
551 /* TLB functions */ 551 /* TLB functions */
552 552
553 .cf_tlb_flushID = armv4_tlb_flushID, 553 .cf_tlb_flushID = armv4_tlb_flushID,
554 .cf_tlb_flushID_SE = arm9_tlb_flushID_SE, 554 .cf_tlb_flushID_SE = arm9_tlb_flushID_SE,
555 .cf_tlb_flushI = armv4_tlb_flushI, 555 .cf_tlb_flushI = armv4_tlb_flushI,
556 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI, 556 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI,
557 .cf_tlb_flushD = armv4_tlb_flushD, 557 .cf_tlb_flushD = armv4_tlb_flushD,
558 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 558 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE,
559 559
560 /* Cache operations */ 560 /* Cache operations */
561 561
562 .cf_icache_sync_all = arm9_icache_sync_all, 562 .cf_icache_sync_all = arm9_icache_sync_all,
563 .cf_icache_sync_range = arm9_icache_sync_range, 563 .cf_icache_sync_range = arm9_icache_sync_range,
564 564
565 .cf_dcache_wbinv_all = arm9_dcache_wbinv_all, 565 .cf_dcache_wbinv_all = arm9_dcache_wbinv_all,
566 .cf_dcache_wbinv_range = arm9_dcache_wbinv_range, 566 .cf_dcache_wbinv_range = arm9_dcache_wbinv_range,
567/*XXX*/ .cf_dcache_inv_range = arm9_dcache_wbinv_range, 567/*XXX*/ .cf_dcache_inv_range = arm9_dcache_wbinv_range,
568 .cf_dcache_wb_range = arm9_dcache_wb_range, 568 .cf_dcache_wb_range = arm9_dcache_wb_range,
569 569
570 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 570 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
571 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 571 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
572 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 572 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
573 573
574 .cf_idcache_wbinv_all = arm9_idcache_wbinv_all, 574 .cf_idcache_wbinv_all = arm9_idcache_wbinv_all,
575 .cf_idcache_wbinv_range = arm9_idcache_wbinv_range, 575 .cf_idcache_wbinv_range = arm9_idcache_wbinv_range,
576 576
577 /* Other functions */ 577 /* Other functions */
578 578
579 .cf_flush_prefetchbuf = cpufunc_nullop, 579 .cf_flush_prefetchbuf = cpufunc_nullop,
580 .cf_drain_writebuf = armv4_drain_writebuf, 580 .cf_drain_writebuf = armv4_drain_writebuf,
581 .cf_flush_brnchtgt_C = cpufunc_nullop, 581 .cf_flush_brnchtgt_C = cpufunc_nullop,
582 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 582 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
583 583
584 .cf_sleep = (void *)cpufunc_nullop, 584 .cf_sleep = (void *)cpufunc_nullop,
585 585
586 /* Soft functions */ 586 /* Soft functions */
587 587
588 .cf_dataabt_fixup = cpufunc_null_fixup, 588 .cf_dataabt_fixup = cpufunc_null_fixup,
589 .cf_prefetchabt_fixup = cpufunc_null_fixup, 589 .cf_prefetchabt_fixup = cpufunc_null_fixup,
590 590
591 .cf_context_switch = arm9_context_switch, 591 .cf_context_switch = arm9_context_switch,
592 592
593 .cf_setup = arm9_setup 593 .cf_setup = arm9_setup
594 594
595}; 595};
596#endif /* CPU_ARM9 */ 596#endif /* CPU_ARM9 */
597 597
598#if defined(CPU_ARM9E) || defined(CPU_ARM10) 598#if defined(CPU_ARM9E) || defined(CPU_ARM10)
599struct cpu_functions armv5_ec_cpufuncs = { 599struct cpu_functions armv5_ec_cpufuncs = {
600 /* CPU functions */ 600 /* CPU functions */
601 601
602 .cf_id = cpufunc_id, 602 .cf_id = cpufunc_id,
603 .cf_cpwait = cpufunc_nullop, 603 .cf_cpwait = cpufunc_nullop,
604 604
605 /* MMU functions */ 605 /* MMU functions */
606 606
607 .cf_control = cpufunc_control, 607 .cf_control = cpufunc_control,
608 .cf_domains = cpufunc_domains, 608 .cf_domains = cpufunc_domains,
609 .cf_setttb = armv5_ec_setttb, 609 .cf_setttb = armv5_ec_setttb,
610 .cf_faultstatus = cpufunc_faultstatus, 610 .cf_faultstatus = cpufunc_faultstatus,
611 .cf_faultaddress = cpufunc_faultaddress, 611 .cf_faultaddress = cpufunc_faultaddress,
612 612
613 /* TLB functions */ 613 /* TLB functions */
614 614
615 .cf_tlb_flushID = armv4_tlb_flushID, 615 .cf_tlb_flushID = armv4_tlb_flushID,
616 .cf_tlb_flushID_SE = arm10_tlb_flushID_SE, 616 .cf_tlb_flushID_SE = arm10_tlb_flushID_SE,
617 .cf_tlb_flushI = armv4_tlb_flushI, 617 .cf_tlb_flushI = armv4_tlb_flushI,
618 .cf_tlb_flushI_SE = arm10_tlb_flushI_SE, 618 .cf_tlb_flushI_SE = arm10_tlb_flushI_SE,
619 .cf_tlb_flushD = armv4_tlb_flushD, 619 .cf_tlb_flushD = armv4_tlb_flushD,
620 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 620 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE,
621 621
622 /* Cache operations */ 622 /* Cache operations */
623 623
624 .cf_icache_sync_all = armv5_ec_icache_sync_all, 624 .cf_icache_sync_all = armv5_ec_icache_sync_all,
625 .cf_icache_sync_range = armv5_ec_icache_sync_range, 625 .cf_icache_sync_range = armv5_ec_icache_sync_range,
626 626
627 .cf_dcache_wbinv_all = armv5_ec_dcache_wbinv_all, 627 .cf_dcache_wbinv_all = armv5_ec_dcache_wbinv_all,
628 .cf_dcache_wbinv_range = armv5_ec_dcache_wbinv_range, 628 .cf_dcache_wbinv_range = armv5_ec_dcache_wbinv_range,
629/*XXX*/ .cf_dcache_inv_range = armv5_ec_dcache_wbinv_range, 629/*XXX*/ .cf_dcache_inv_range = armv5_ec_dcache_wbinv_range,
630 .cf_dcache_wb_range = armv5_ec_dcache_wb_range, 630 .cf_dcache_wb_range = armv5_ec_dcache_wb_range,
631 631
632 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 632 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
633 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 633 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
634 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 634 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
635 635
636 .cf_idcache_wbinv_all = armv5_ec_idcache_wbinv_all, 636 .cf_idcache_wbinv_all = armv5_ec_idcache_wbinv_all,
637 .cf_idcache_wbinv_range = armv5_ec_idcache_wbinv_range, 637 .cf_idcache_wbinv_range = armv5_ec_idcache_wbinv_range,
638 638
639 /* Other functions */ 639 /* Other functions */
640 640
641 .cf_flush_prefetchbuf = cpufunc_nullop, 641 .cf_flush_prefetchbuf = cpufunc_nullop,
642 .cf_drain_writebuf = armv4_drain_writebuf, 642 .cf_drain_writebuf = armv4_drain_writebuf,
643 .cf_flush_brnchtgt_C = cpufunc_nullop, 643 .cf_flush_brnchtgt_C = cpufunc_nullop,
644 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 644 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
645 645
646 .cf_sleep = (void *)cpufunc_nullop, 646 .cf_sleep = (void *)cpufunc_nullop,
647 647
648 /* Soft functions */ 648 /* Soft functions */
649 649
650 .cf_dataabt_fixup = cpufunc_null_fixup, 650 .cf_dataabt_fixup = cpufunc_null_fixup,
651 .cf_prefetchabt_fixup = cpufunc_null_fixup, 651 .cf_prefetchabt_fixup = cpufunc_null_fixup,
652 652
653 .cf_context_switch = arm10_context_switch, 653 .cf_context_switch = arm10_context_switch,
654 654
655 .cf_setup = arm10_setup 655 .cf_setup = arm10_setup
656 656
657}; 657};
658#endif /* CPU_ARM9E || CPU_ARM10 */ 658#endif /* CPU_ARM9E || CPU_ARM10 */
659 659
660#ifdef CPU_ARM10 660#ifdef CPU_ARM10
661struct cpu_functions arm10_cpufuncs = { 661struct cpu_functions arm10_cpufuncs = {
662 /* CPU functions */ 662 /* CPU functions */
663 663
664 .cf_id = cpufunc_id, 664 .cf_id = cpufunc_id,
665 .cf_cpwait = cpufunc_nullop, 665 .cf_cpwait = cpufunc_nullop,
666 666
667 /* MMU functions */ 667 /* MMU functions */
668 668
669 .cf_control = cpufunc_control, 669 .cf_control = cpufunc_control,
670 .cf_domains = cpufunc_domains, 670 .cf_domains = cpufunc_domains,
671 .cf_setttb = armv5_setttb, 671 .cf_setttb = armv5_setttb,
672 .cf_faultstatus = cpufunc_faultstatus, 672 .cf_faultstatus = cpufunc_faultstatus,
673 .cf_faultaddress = cpufunc_faultaddress, 673 .cf_faultaddress = cpufunc_faultaddress,
674 674
675 /* TLB functions */ 675 /* TLB functions */
676 676
677 .cf_tlb_flushID = armv4_tlb_flushID, 677 .cf_tlb_flushID = armv4_tlb_flushID,
678 .cf_tlb_flushID_SE = arm10_tlb_flushID_SE, 678 .cf_tlb_flushID_SE = arm10_tlb_flushID_SE,
679 .cf_tlb_flushI = armv4_tlb_flushI, 679 .cf_tlb_flushI = armv4_tlb_flushI,
680 .cf_tlb_flushI_SE = arm10_tlb_flushI_SE, 680 .cf_tlb_flushI_SE = arm10_tlb_flushI_SE,
681 .cf_tlb_flushD = armv4_tlb_flushD, 681 .cf_tlb_flushD = armv4_tlb_flushD,
682 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 682 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE,
683 683
684 /* Cache operations */ 684 /* Cache operations */
685 685
686 .cf_icache_sync_all = armv5_icache_sync_all, 686 .cf_icache_sync_all = armv5_icache_sync_all,
687 .cf_icache_sync_range = armv5_icache_sync_range, 687 .cf_icache_sync_range = armv5_icache_sync_range,
688 688
689 .cf_dcache_wbinv_all = armv5_dcache_wbinv_all, 689 .cf_dcache_wbinv_all = armv5_dcache_wbinv_all,
690 .cf_dcache_wbinv_range = armv5_dcache_wbinv_range, 690 .cf_dcache_wbinv_range = armv5_dcache_wbinv_range,
691/*XXX*/ .cf_dcache_inv_range = armv5_dcache_wbinv_range, 691/*XXX*/ .cf_dcache_inv_range = armv5_dcache_wbinv_range,
692 .cf_dcache_wb_range = armv5_dcache_wb_range, 692 .cf_dcache_wb_range = armv5_dcache_wb_range,
693 693
694 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 694 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
695 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 695 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
696 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 696 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
697 697
698 .cf_idcache_wbinv_all = armv5_idcache_wbinv_all, 698 .cf_idcache_wbinv_all = armv5_idcache_wbinv_all,
699 .cf_idcache_wbinv_range = armv5_idcache_wbinv_range, 699 .cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
700 700
701 /* Other functions */ 701 /* Other functions */
702 702
703 .cf_flush_prefetchbuf = cpufunc_nullop, 703 .cf_flush_prefetchbuf = cpufunc_nullop,
704 .cf_drain_writebuf = armv4_drain_writebuf, 704 .cf_drain_writebuf = armv4_drain_writebuf,
705 .cf_flush_brnchtgt_C = cpufunc_nullop, 705 .cf_flush_brnchtgt_C = cpufunc_nullop,
706 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 706 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
707 707
708 .cf_sleep = (void *)cpufunc_nullop, 708 .cf_sleep = (void *)cpufunc_nullop,
709 709
710 /* Soft functions */ 710 /* Soft functions */
711 711
712 .cf_dataabt_fixup = cpufunc_null_fixup, 712 .cf_dataabt_fixup = cpufunc_null_fixup,
713 .cf_prefetchabt_fixup = cpufunc_null_fixup, 713 .cf_prefetchabt_fixup = cpufunc_null_fixup,
714 714
715 .cf_context_switch = arm10_context_switch, 715 .cf_context_switch = arm10_context_switch,
716 716
717 .cf_setup = arm10_setup 717 .cf_setup = arm10_setup
718 718
719}; 719};
720#endif /* CPU_ARM10 */ 720#endif /* CPU_ARM10 */
721 721
722#ifdef CPU_ARM11 722#ifdef CPU_ARM11
723struct cpu_functions arm11_cpufuncs = { 723struct cpu_functions arm11_cpufuncs = {
724 /* CPU functions */ 724 /* CPU functions */
725 725
726 .cf_id = cpufunc_id, 726 .cf_id = cpufunc_id,
727 .cf_cpwait = cpufunc_nullop, 727 .cf_cpwait = cpufunc_nullop,
728 728
729 /* MMU functions */ 729 /* MMU functions */
730 730
731 .cf_control = cpufunc_control, 731 .cf_control = cpufunc_control,
732 .cf_domains = cpufunc_domains, 732 .cf_domains = cpufunc_domains,
733 .cf_setttb = arm11_setttb, 733 .cf_setttb = arm11_setttb,
734 .cf_faultstatus = cpufunc_faultstatus, 734 .cf_faultstatus = cpufunc_faultstatus,
735 .cf_faultaddress = cpufunc_faultaddress, 735 .cf_faultaddress = cpufunc_faultaddress,
736 736
737 /* TLB functions */ 737 /* TLB functions */
738 738
739 .cf_tlb_flushID = arm11_tlb_flushID, 739 .cf_tlb_flushID = arm11_tlb_flushID,
740 .cf_tlb_flushID_SE = arm11_tlb_flushID_SE, 740 .cf_tlb_flushID_SE = arm11_tlb_flushID_SE,
741 .cf_tlb_flushI = arm11_tlb_flushI, 741 .cf_tlb_flushI = arm11_tlb_flushI,
742 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE, 742 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE,
743 .cf_tlb_flushD = arm11_tlb_flushD, 743 .cf_tlb_flushD = arm11_tlb_flushD,
744 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE, 744 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE,
745 745
746 /* Cache operations */ 746 /* Cache operations */
747 747
748 .cf_icache_sync_all = armv6_icache_sync_all, 748 .cf_icache_sync_all = armv6_icache_sync_all,
749 .cf_icache_sync_range = armv6_icache_sync_range, 749 .cf_icache_sync_range = armv6_icache_sync_range,
750 750
751 .cf_dcache_wbinv_all = armv6_dcache_wbinv_all, 751 .cf_dcache_wbinv_all = armv6_dcache_wbinv_all,
752 .cf_dcache_wbinv_range = armv6_dcache_wbinv_range, 752 .cf_dcache_wbinv_range = armv6_dcache_wbinv_range,
753 .cf_dcache_inv_range = armv6_dcache_inv_range, 753 .cf_dcache_inv_range = armv6_dcache_inv_range,
754 .cf_dcache_wb_range = armv6_dcache_wb_range, 754 .cf_dcache_wb_range = armv6_dcache_wb_range,
755 755
756 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 756 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
757 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 757 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
758 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 758 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
759 759
760 .cf_idcache_wbinv_all = armv6_idcache_wbinv_all, 760 .cf_idcache_wbinv_all = armv6_idcache_wbinv_all,
761 .cf_idcache_wbinv_range = armv6_idcache_wbinv_range, 761 .cf_idcache_wbinv_range = armv6_idcache_wbinv_range,
762 762
763 /* Other functions */ 763 /* Other functions */
764 764
765 .cf_flush_prefetchbuf = cpufunc_nullop, 765 .cf_flush_prefetchbuf = cpufunc_nullop,
766 .cf_drain_writebuf = arm11_drain_writebuf, 766 .cf_drain_writebuf = arm11_drain_writebuf,
767 .cf_flush_brnchtgt_C = cpufunc_nullop, 767 .cf_flush_brnchtgt_C = cpufunc_nullop,
768 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 768 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
769 769
770 .cf_sleep = arm11_sleep, 770 .cf_sleep = arm11_sleep,
771 771
772 /* Soft functions */ 772 /* Soft functions */
773 773
774 .cf_dataabt_fixup = cpufunc_null_fixup, 774 .cf_dataabt_fixup = cpufunc_null_fixup,
775 .cf_prefetchabt_fixup = cpufunc_null_fixup, 775 .cf_prefetchabt_fixup = cpufunc_null_fixup,
776 776
777 .cf_context_switch = arm11_context_switch, 777 .cf_context_switch = arm11_context_switch,
778 778
779 .cf_setup = arm11_setup 779 .cf_setup = arm11_setup
780 780
781}; 781};
782#endif /* CPU_ARM11 */ 782#endif /* CPU_ARM11 */
783 783
784#ifdef CPU_ARM1136 784#ifdef CPU_ARM1136
785struct cpu_functions arm1136_cpufuncs = { 785struct cpu_functions arm1136_cpufuncs = {
786 /* CPU functions */ 786 /* CPU functions */
787 787
788 .cf_id = cpufunc_id, 788 .cf_id = cpufunc_id,
789 .cf_cpwait = cpufunc_nullop, 789 .cf_cpwait = cpufunc_nullop,
790 790
791 /* MMU functions */ 791 /* MMU functions */
792 792
793 .cf_control = cpufunc_control, 793 .cf_control = cpufunc_control,
794 .cf_domains = cpufunc_domains, 794 .cf_domains = cpufunc_domains,
795 .cf_setttb = arm11x6_setttb, 795 .cf_setttb = arm11x6_setttb,
796 .cf_faultstatus = cpufunc_faultstatus, 796 .cf_faultstatus = cpufunc_faultstatus,
797 .cf_faultaddress = cpufunc_faultaddress, 797 .cf_faultaddress = cpufunc_faultaddress,
798 798
799 /* TLB functions */ 799 /* TLB functions */
800 800
801 .cf_tlb_flushID = arm11_tlb_flushID, 801 .cf_tlb_flushID = arm11_tlb_flushID,
802 .cf_tlb_flushID_SE = arm11_tlb_flushID_SE, 802 .cf_tlb_flushID_SE = arm11_tlb_flushID_SE,
803 .cf_tlb_flushI = arm11_tlb_flushI, 803 .cf_tlb_flushI = arm11_tlb_flushI,
804 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE, 804 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE,
805 .cf_tlb_flushD = arm11_tlb_flushD, 805 .cf_tlb_flushD = arm11_tlb_flushD,
806 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE, 806 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE,
807 807
808 /* Cache operations */ 808 /* Cache operations */
809 809
810 .cf_icache_sync_all = arm11x6_icache_sync_all, /* 411920 */ 810 .cf_icache_sync_all = arm11x6_icache_sync_all, /* 411920 */
811 .cf_icache_sync_range = arm11x6_icache_sync_range, /* 371025 */ 811 .cf_icache_sync_range = arm11x6_icache_sync_range, /* 371025 */
812 812
813 .cf_dcache_wbinv_all = arm11x6_dcache_wbinv_all, /* 411920 */ 813 .cf_dcache_wbinv_all = arm11x6_dcache_wbinv_all, /* 411920 */
814 .cf_dcache_wbinv_range = armv6_dcache_wbinv_range, 814 .cf_dcache_wbinv_range = armv6_dcache_wbinv_range,
815 .cf_dcache_inv_range = armv6_dcache_inv_range, 815 .cf_dcache_inv_range = armv6_dcache_inv_range,
816 .cf_dcache_wb_range = armv6_dcache_wb_range, 816 .cf_dcache_wb_range = armv6_dcache_wb_range,
817 817
818 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 818 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
819 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 819 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
820 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 820 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
821 821
822 .cf_idcache_wbinv_all = arm11x6_idcache_wbinv_all, /* 411920 */ 822 .cf_idcache_wbinv_all = arm11x6_idcache_wbinv_all, /* 411920 */
823 .cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range, /* 371025 */ 823 .cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range, /* 371025 */
824 824
825 /* Other functions */ 825 /* Other functions */
826 826
827 .cf_flush_prefetchbuf = arm11x6_flush_prefetchbuf, 827 .cf_flush_prefetchbuf = arm11x6_flush_prefetchbuf,
828 .cf_drain_writebuf = arm11_drain_writebuf, 828 .cf_drain_writebuf = arm11_drain_writebuf,
829 .cf_flush_brnchtgt_C = cpufunc_nullop, 829 .cf_flush_brnchtgt_C = cpufunc_nullop,
830 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 830 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
831 831
832 .cf_sleep = arm11_sleep, /* arm1136_sleep_rev0 */ 832 .cf_sleep = arm11_sleep, /* arm1136_sleep_rev0 */
833 833
834 /* Soft functions */ 834 /* Soft functions */
835 835
836 .cf_dataabt_fixup = cpufunc_null_fixup, 836 .cf_dataabt_fixup = cpufunc_null_fixup,
837 .cf_prefetchabt_fixup = cpufunc_null_fixup, 837 .cf_prefetchabt_fixup = cpufunc_null_fixup,
838 838
839 .cf_context_switch = arm11_context_switch, 839 .cf_context_switch = arm11_context_switch,
840 840
841 .cf_setup = arm11x6_setup 841 .cf_setup = arm11x6_setup
842 842
843}; 843};
844#endif /* CPU_ARM1136 */ 844#endif /* CPU_ARM1136 */
845 845
846#ifdef CPU_ARM1176 846#ifdef CPU_ARM1176
847struct cpu_functions arm1176_cpufuncs = { 847struct cpu_functions arm1176_cpufuncs = {
848 /* CPU functions */ 848 /* CPU functions */
849 849
850 .cf_id = cpufunc_id, 850 .cf_id = cpufunc_id,
851 .cf_cpwait = cpufunc_nullop, 851 .cf_cpwait = cpufunc_nullop,
852 852
853 /* MMU functions */ 853 /* MMU functions */
854 854
855 .cf_control = cpufunc_control, 855 .cf_control = cpufunc_control,
856 .cf_domains = cpufunc_domains, 856 .cf_domains = cpufunc_domains,
857 .cf_setttb = arm11x6_setttb, 857 .cf_setttb = arm11x6_setttb,
858 .cf_faultstatus = cpufunc_faultstatus, 858 .cf_faultstatus = cpufunc_faultstatus,
859 .cf_faultaddress = cpufunc_faultaddress, 859 .cf_faultaddress = cpufunc_faultaddress,
860 860
861 /* TLB functions */ 861 /* TLB functions */
862 862
863 .cf_tlb_flushID = arm11_tlb_flushID, 863 .cf_tlb_flushID = arm11_tlb_flushID,
864 .cf_tlb_flushID_SE = arm11_tlb_flushID_SE, 864 .cf_tlb_flushID_SE = arm11_tlb_flushID_SE,
865 .cf_tlb_flushI = arm11_tlb_flushI, 865 .cf_tlb_flushI = arm11_tlb_flushI,
866 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE, 866 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE,
867 .cf_tlb_flushD = arm11_tlb_flushD, 867 .cf_tlb_flushD = arm11_tlb_flushD,
868 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE, 868 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE,
869 869
870 /* Cache operations */ 870 /* Cache operations */
871 871
872 .cf_icache_sync_all = arm11x6_icache_sync_all, /* 415045 */ 872 .cf_icache_sync_all = arm11x6_icache_sync_all, /* 415045 */
873 .cf_icache_sync_range = arm11x6_icache_sync_range, /* 371367 */ 873 .cf_icache_sync_range = arm11x6_icache_sync_range, /* 371367 */
874 874
875 .cf_dcache_wbinv_all = arm11x6_dcache_wbinv_all, /* 415045 */ 875 .cf_dcache_wbinv_all = arm11x6_dcache_wbinv_all, /* 415045 */
876 .cf_dcache_wbinv_range = armv6_dcache_wbinv_range, 876 .cf_dcache_wbinv_range = armv6_dcache_wbinv_range,
877 .cf_dcache_inv_range = armv6_dcache_inv_range, 877 .cf_dcache_inv_range = armv6_dcache_inv_range,
878 .cf_dcache_wb_range = armv6_dcache_wb_range, 878 .cf_dcache_wb_range = armv6_dcache_wb_range,
879 879
880 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 880 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
881 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 881 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
882 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 882 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
883 883
884 .cf_idcache_wbinv_all = arm11x6_idcache_wbinv_all, /* 415045 */ 884 .cf_idcache_wbinv_all = arm11x6_idcache_wbinv_all, /* 415045 */
885 .cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range, /* 371367 */ 885 .cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range, /* 371367 */
886 886
887 /* Other functions */ 887 /* Other functions */
888 888
889 .cf_flush_prefetchbuf = arm11x6_flush_prefetchbuf, 889 .cf_flush_prefetchbuf = arm11x6_flush_prefetchbuf,
890 .cf_drain_writebuf = arm11_drain_writebuf, 890 .cf_drain_writebuf = arm11_drain_writebuf,
891 .cf_flush_brnchtgt_C = cpufunc_nullop, 891 .cf_flush_brnchtgt_C = cpufunc_nullop,
892 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 892 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
893 893
894 .cf_sleep = arm11x6_sleep, /* no ref. */ 894 .cf_sleep = arm11x6_sleep, /* no ref. */
895 895
896 /* Soft functions */ 896 /* Soft functions */
897 897
898 .cf_dataabt_fixup = cpufunc_null_fixup, 898 .cf_dataabt_fixup = cpufunc_null_fixup,
899 .cf_prefetchabt_fixup = cpufunc_null_fixup, 899 .cf_prefetchabt_fixup = cpufunc_null_fixup,
900 900
901 .cf_context_switch = arm11_context_switch, 901 .cf_context_switch = arm11_context_switch,
902 902
903 .cf_setup = arm11x6_setup 903 .cf_setup = arm11x6_setup
904 904
905}; 905};
906#endif /* CPU_ARM1176 */ 906#endif /* CPU_ARM1176 */
907 907
908 908
909#ifdef CPU_ARM11MPCORE 909#ifdef CPU_ARM11MPCORE
910struct cpu_functions arm11mpcore_cpufuncs = { 910struct cpu_functions arm11mpcore_cpufuncs = {
911 /* CPU functions */ 911 /* CPU functions */
912 912
913 .cf_id = cpufunc_id, 913 .cf_id = cpufunc_id,
914 .cf_cpwait = cpufunc_nullop, 914 .cf_cpwait = cpufunc_nullop,
915 915
916 /* MMU functions */ 916 /* MMU functions */
917 917
918 .cf_control = cpufunc_control, 918 .cf_control = cpufunc_control,
919 .cf_domains = cpufunc_domains, 919 .cf_domains = cpufunc_domains,
920 .cf_setttb = arm11_setttb, 920 .cf_setttb = arm11_setttb,
921 .cf_faultstatus = cpufunc_faultstatus, 921 .cf_faultstatus = cpufunc_faultstatus,
922 .cf_faultaddress = cpufunc_faultaddress, 922 .cf_faultaddress = cpufunc_faultaddress,
923 923
924 /* TLB functions */ 924 /* TLB functions */
925 925
926 .cf_tlb_flushID = arm11_tlb_flushID, 926 .cf_tlb_flushID = arm11_tlb_flushID,
927 .cf_tlb_flushID_SE = arm11_tlb_flushID_SE, 927 .cf_tlb_flushID_SE = arm11_tlb_flushID_SE,
928 .cf_tlb_flushI = arm11_tlb_flushI, 928 .cf_tlb_flushI = arm11_tlb_flushI,
929 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE, 929 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE,
930 .cf_tlb_flushD = arm11_tlb_flushD, 930 .cf_tlb_flushD = arm11_tlb_flushD,
931 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE, 931 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE,
932 932
933 /* Cache operations */ 933 /* Cache operations */
934 934
935 .cf_icache_sync_all = armv6_icache_sync_all, 935 .cf_icache_sync_all = armv6_icache_sync_all,
936 .cf_icache_sync_range = armv5_icache_sync_range, 936 .cf_icache_sync_range = armv5_icache_sync_range,
937 937
938 .cf_dcache_wbinv_all = armv6_dcache_wbinv_all, 938 .cf_dcache_wbinv_all = armv6_dcache_wbinv_all,
939 .cf_dcache_wbinv_range = armv5_dcache_wbinv_range, 939 .cf_dcache_wbinv_range = armv5_dcache_wbinv_range,
940 .cf_dcache_inv_range = armv5_dcache_inv_range, 940 .cf_dcache_inv_range = armv5_dcache_inv_range,
941 .cf_dcache_wb_range = armv5_dcache_wb_range, 941 .cf_dcache_wb_range = armv5_dcache_wb_range,
942 942
943 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 943 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
944 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 944 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
945 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 945 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
946 946
947 .cf_idcache_wbinv_all = armv6_idcache_wbinv_all, 947 .cf_idcache_wbinv_all = armv6_idcache_wbinv_all,
948 .cf_idcache_wbinv_range = armv5_idcache_wbinv_range, 948 .cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
949 949
950 /* Other functions */ 950 /* Other functions */
951 951
952 .cf_flush_prefetchbuf = cpufunc_nullop, 952 .cf_flush_prefetchbuf = cpufunc_nullop,
953 .cf_drain_writebuf = arm11_drain_writebuf, 953 .cf_drain_writebuf = arm11_drain_writebuf,
954 .cf_flush_brnchtgt_C = cpufunc_nullop, 954 .cf_flush_brnchtgt_C = cpufunc_nullop,
955 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 955 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
956 956
957 .cf_sleep = arm11_sleep, 957 .cf_sleep = arm11_sleep,
958 958
959 /* Soft functions */ 959 /* Soft functions */
960 960
961 .cf_dataabt_fixup = cpufunc_null_fixup, 961 .cf_dataabt_fixup = cpufunc_null_fixup,
962 .cf_prefetchabt_fixup = cpufunc_null_fixup, 962 .cf_prefetchabt_fixup = cpufunc_null_fixup,
963 963
964 .cf_context_switch = arm11_context_switch, 964 .cf_context_switch = arm11_context_switch,
965 965
966 .cf_setup = arm11mpcore_setup 966 .cf_setup = arm11mpcore_setup
967 967
968}; 968};
969#endif /* CPU_ARM11MPCORE */ 969#endif /* CPU_ARM11MPCORE */
970 970
971#ifdef CPU_SA110 971#ifdef CPU_SA110
972struct cpu_functions sa110_cpufuncs = { 972struct cpu_functions sa110_cpufuncs = {
973 /* CPU functions */ 973 /* CPU functions */
974 974
975 .cf_id = cpufunc_id, 975 .cf_id = cpufunc_id,
976 .cf_cpwait = cpufunc_nullop, 976 .cf_cpwait = cpufunc_nullop,
977 977
978 /* MMU functions */ 978 /* MMU functions */
979 979
980 .cf_control = cpufunc_control, 980 .cf_control = cpufunc_control,
981 .cf_domains = cpufunc_domains, 981 .cf_domains = cpufunc_domains,
982 .cf_setttb = sa1_setttb, 982 .cf_setttb = sa1_setttb,
983 .cf_faultstatus = cpufunc_faultstatus, 983 .cf_faultstatus = cpufunc_faultstatus,
984 .cf_faultaddress = cpufunc_faultaddress, 984 .cf_faultaddress = cpufunc_faultaddress,
985 985
986 /* TLB functions */ 986 /* TLB functions */
987 987
988 .cf_tlb_flushID = armv4_tlb_flushID, 988 .cf_tlb_flushID = armv4_tlb_flushID,
989 .cf_tlb_flushID_SE = sa1_tlb_flushID_SE, 989 .cf_tlb_flushID_SE = sa1_tlb_flushID_SE,
990 .cf_tlb_flushI = armv4_tlb_flushI, 990 .cf_tlb_flushI = armv4_tlb_flushI,
991 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI, 991 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI,
992 .cf_tlb_flushD = armv4_tlb_flushD, 992 .cf_tlb_flushD = armv4_tlb_flushD,
993 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 993 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE,
994 994
995 /* Cache operations */ 995 /* Cache operations */
996 996
997 .cf_icache_sync_all = sa1_cache_syncI, 997 .cf_icache_sync_all = sa1_cache_syncI,
998 .cf_icache_sync_range = sa1_cache_syncI_rng, 998 .cf_icache_sync_range = sa1_cache_syncI_rng,
999 999
1000 .cf_dcache_wbinv_all = sa1_cache_purgeD, 1000 .cf_dcache_wbinv_all = sa1_cache_purgeD,
1001 .cf_dcache_wbinv_range = sa1_cache_purgeD_rng, 1001 .cf_dcache_wbinv_range = sa1_cache_purgeD_rng,
1002/*XXX*/ .cf_dcache_inv_range = sa1_cache_purgeD_rng, 1002/*XXX*/ .cf_dcache_inv_range = sa1_cache_purgeD_rng,
1003 .cf_dcache_wb_range = sa1_cache_cleanD_rng, 1003 .cf_dcache_wb_range = sa1_cache_cleanD_rng,
1004 1004
1005 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 1005 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
1006 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 1006 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
1007 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 1007 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
1008 1008
1009 .cf_idcache_wbinv_all = sa1_cache_purgeID, 1009 .cf_idcache_wbinv_all = sa1_cache_purgeID,
1010 .cf_idcache_wbinv_range = sa1_cache_purgeID_rng, 1010 .cf_idcache_wbinv_range = sa1_cache_purgeID_rng,
1011 1011
1012 /* Other functions */ 1012 /* Other functions */
1013 1013
1014 .cf_flush_prefetchbuf = cpufunc_nullop, 1014 .cf_flush_prefetchbuf = cpufunc_nullop,
1015 .cf_drain_writebuf = armv4_drain_writebuf, 1015 .cf_drain_writebuf = armv4_drain_writebuf,
1016 .cf_flush_brnchtgt_C = cpufunc_nullop, 1016 .cf_flush_brnchtgt_C = cpufunc_nullop,
1017 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 1017 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
1018 1018
1019 .cf_sleep = (void *)cpufunc_nullop, 1019 .cf_sleep = (void *)cpufunc_nullop,
1020 1020
1021 /* Soft functions */ 1021 /* Soft functions */
1022 1022
1023 .cf_dataabt_fixup = cpufunc_null_fixup, 1023 .cf_dataabt_fixup = cpufunc_null_fixup,
1024 .cf_prefetchabt_fixup = cpufunc_null_fixup, 1024 .cf_prefetchabt_fixup = cpufunc_null_fixup,
1025 1025
1026 .cf_context_switch = sa110_context_switch, 1026 .cf_context_switch = sa110_context_switch,
1027 1027
1028 .cf_setup = sa110_setup 1028 .cf_setup = sa110_setup
1029}; 1029};
1030#endif /* CPU_SA110 */ 1030#endif /* CPU_SA110 */
1031 1031
1032#if defined(CPU_SA1100) || defined(CPU_SA1110) 1032#if defined(CPU_SA1100) || defined(CPU_SA1110)
1033struct cpu_functions sa11x0_cpufuncs = { 1033struct cpu_functions sa11x0_cpufuncs = {
1034 /* CPU functions */ 1034 /* CPU functions */
1035 1035
1036 .cf_id = cpufunc_id, 1036 .cf_id = cpufunc_id,
1037 .cf_cpwait = cpufunc_nullop, 1037 .cf_cpwait = cpufunc_nullop,
1038 1038
1039 /* MMU functions */ 1039 /* MMU functions */
1040 1040
1041 .cf_control = cpufunc_control, 1041 .cf_control = cpufunc_control,
1042 .cf_domains = cpufunc_domains, 1042 .cf_domains = cpufunc_domains,
1043 .cf_setttb = sa1_setttb, 1043 .cf_setttb = sa1_setttb,
1044 .cf_faultstatus = cpufunc_faultstatus, 1044 .cf_faultstatus = cpufunc_faultstatus,
1045 .cf_faultaddress = cpufunc_faultaddress, 1045 .cf_faultaddress = cpufunc_faultaddress,
1046 1046
1047 /* TLB functions */ 1047 /* TLB functions */
1048 1048
1049 .cf_tlb_flushID = armv4_tlb_flushID, 1049 .cf_tlb_flushID = armv4_tlb_flushID,
1050 .cf_tlb_flushID_SE = sa1_tlb_flushID_SE, 1050 .cf_tlb_flushID_SE = sa1_tlb_flushID_SE,
1051 .cf_tlb_flushI = armv4_tlb_flushI, 1051 .cf_tlb_flushI = armv4_tlb_flushI,
@@ -2059,1522 +2059,1515 @@ set_cpufuncs(void) @@ -2059,1522 +2059,1515 @@ set_cpufuncs(void)
2059#endif /* __CPU_XSCALE_PXA2XX */ 2059#endif /* __CPU_XSCALE_PXA2XX */
2060#ifdef CPU_XSCALE_IXP425 2060#ifdef CPU_XSCALE_IXP425
2061 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 || 2061 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
2062 cputype == CPU_ID_IXP425_266) { 2062 cputype == CPU_ID_IXP425_266) {
2063 ixp425_icu_init(); 2063 ixp425_icu_init();
2064 2064
2065 cpufuncs = xscale_cpufuncs; 2065 cpufuncs = xscale_cpufuncs;
2066#if defined(PERFCTRS) 2066#if defined(PERFCTRS)
2067 xscale_pmu_init(); 2067 xscale_pmu_init();
2068#endif 2068#endif
2069 2069
2070 get_cachetype_cp15(); 2070 get_cachetype_cp15();
2071 pmap_pte_init_xscale(); 2071 pmap_pte_init_xscale();
2072 2072
2073 return 0; 2073 return 0;
2074 } 2074 }
2075#endif /* CPU_XSCALE_IXP425 */ 2075#endif /* CPU_XSCALE_IXP425 */
2076#if defined(CPU_CORTEX) 2076#if defined(CPU_CORTEX)
2077 if (CPU_ID_CORTEX_P(cputype)) { 2077 if (CPU_ID_CORTEX_P(cputype)) {
2078 cpufuncs = cortex_cpufuncs; 2078 cpufuncs = cortex_cpufuncs;
2079 cpu_do_powersave = 1; /* Enable powersave */ 2079 cpu_do_powersave = 1; /* Enable powersave */
2080#if defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6) 2080#if defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6)
2081 cpu_armv7_p = true; 2081 cpu_armv7_p = true;
2082#endif 2082#endif
2083 get_cachetype_cp15(); 2083 get_cachetype_cp15();
2084 pmap_pte_init_armv7(); 2084 pmap_pte_init_armv7();
2085 if (arm_cache_prefer_mask) 2085 if (arm_cache_prefer_mask)
2086 uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1; 2086 uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
2087 /* 2087 /*
2088 * Start and reset the PMC Cycle Counter. 2088 * Start and reset the PMC Cycle Counter.
2089 */ 2089 */
2090 armreg_pmcr_write(ARM11_PMCCTL_E | ARM11_PMCCTL_P | ARM11_PMCCTL_C); 2090 armreg_pmcr_write(ARM11_PMCCTL_E | ARM11_PMCCTL_P | ARM11_PMCCTL_C);
2091 armreg_pmcntenset_write(CORTEX_CNTENS_C); 2091 armreg_pmcntenset_write(CORTEX_CNTENS_C);
2092 return 0; 2092 return 0;
2093 } 2093 }
2094#endif /* CPU_CORTEX */ 2094#endif /* CPU_CORTEX */
2095 2095
2096#if defined(CPU_PJ4B) 2096#if defined(CPU_PJ4B)
2097 if ((cputype == CPU_ID_MV88SV581X_V6 || 2097 if ((cputype == CPU_ID_MV88SV581X_V6 ||
2098 cputype == CPU_ID_MV88SV581X_V7 || 2098 cputype == CPU_ID_MV88SV581X_V7 ||
2099 cputype == CPU_ID_MV88SV584X_V7 || 2099 cputype == CPU_ID_MV88SV584X_V7 ||
2100 cputype == CPU_ID_ARM_88SV581X_V6 || 2100 cputype == CPU_ID_ARM_88SV581X_V6 ||
2101 cputype == CPU_ID_ARM_88SV581X_V7) && 2101 cputype == CPU_ID_ARM_88SV581X_V7) &&
2102 (armreg_pfr0_read() & ARM_PFR0_THUMBEE_MASK)) { 2102 (armreg_pfr0_read() & ARM_PFR0_THUMBEE_MASK)) {
2103 cpufuncs = pj4bv7_cpufuncs; 2103 cpufuncs = pj4bv7_cpufuncs;
2104#if defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6) 2104#if defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6)
2105 cpu_armv7_p = true; 2105 cpu_armv7_p = true;
2106#endif 2106#endif
2107 get_cachetype_cp15(); 2107 get_cachetype_cp15();
2108 pmap_pte_init_armv7(); 2108 pmap_pte_init_armv7();
2109 return 0; 2109 return 0;
2110 } 2110 }
2111#endif /* CPU_PJ4B */ 2111#endif /* CPU_PJ4B */
2112 2112
2113 /* 2113 /*
2114 * Bzzzz. And the answer was ... 2114 * Bzzzz. And the answer was ...
2115 */ 2115 */
2116 panic("No support for this CPU type (%08x) in kernel", cputype); 2116 panic("No support for this CPU type (%08x) in kernel", cputype);
2117 return(ARCHITECTURE_NOT_PRESENT); 2117 return(ARCHITECTURE_NOT_PRESENT);
2118} 2118}
2119 2119
2120#ifdef CPU_ARM2 2120#ifdef CPU_ARM2
2121u_int arm2_id(void) 2121u_int arm2_id(void)
2122{ 2122{
2123 2123
2124 return CPU_ID_ARM2; 2124 return CPU_ID_ARM2;
2125} 2125}
2126#endif /* CPU_ARM2 */ 2126#endif /* CPU_ARM2 */
2127 2127
2128#ifdef CPU_ARM250 2128#ifdef CPU_ARM250
2129u_int arm250_id(void) 2129u_int arm250_id(void)
2130{ 2130{
2131 2131
2132 return CPU_ID_ARM250; 2132 return CPU_ID_ARM250;
2133} 2133}
2134#endif /* CPU_ARM250 */ 2134#endif /* CPU_ARM250 */
2135 2135
2136/* 2136/*
2137 * Fixup routines for data and prefetch aborts. 2137 * Fixup routines for data and prefetch aborts.
2138 * 2138 *
2139 * Several compile time symbols are used 2139 * Several compile time symbols are used
2140 * 2140 *
2141 * DEBUG_FAULT_CORRECTION - Print debugging information during the 2141 * DEBUG_FAULT_CORRECTION - Print debugging information during the
2142 * correction of registers after a fault. 2142 * correction of registers after a fault.
2143 * ARM6_LATE_ABORT - ARM6 supports both early and late aborts 2143 * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
2144 * when defined should use late aborts 2144 * when defined should use late aborts
2145 */ 2145 */
2146 2146
2147 2147
2148/* 2148/*
2149 * Null abort fixup routine. 2149 * Null abort fixup routine.
2150 * For use when no fixup is required. 2150 * For use when no fixup is required.
2151 */ 2151 */
2152int 2152int
2153cpufunc_null_fixup(void *arg) 2153cpufunc_null_fixup(void *arg)
2154{ 2154{
2155 return(ABORT_FIXUP_OK); 2155 return(ABORT_FIXUP_OK);
2156} 2156}
2157 2157
2158 2158
2159#if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \ 2159#if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
2160 defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) 2160 defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
2161 2161
2162#ifdef DEBUG_FAULT_CORRECTION 2162#ifdef DEBUG_FAULT_CORRECTION
2163#define DFC_PRINTF(x) printf x 2163#define DFC_PRINTF(x) printf x
2164#define DFC_DISASSEMBLE(x) disassemble(x) 2164#define DFC_DISASSEMBLE(x) disassemble(x)
2165#else 2165#else
2166#define DFC_PRINTF(x) /* nothing */ 2166#define DFC_PRINTF(x) /* nothing */
2167#define DFC_DISASSEMBLE(x) /* nothing */ 2167#define DFC_DISASSEMBLE(x) /* nothing */
2168#endif 2168#endif
2169 2169
2170/* 2170/*
2171 * "Early" data abort fixup. 2171 * "Early" data abort fixup.
2172 * 2172 *
2173 * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode). Also used 2173 * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode). Also used
2174 * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI]. 2174 * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
2175 * 2175 *
2176 * In early aborts, we may have to fix up LDM, STM, LDC and STC. 2176 * In early aborts, we may have to fix up LDM, STM, LDC and STC.
2177 */ 2177 */
2178int 2178int
2179early_abort_fixup(void *arg) 2179early_abort_fixup(void *arg)
2180{ 2180{
2181 trapframe_t *frame = arg; 2181 trapframe_t *frame = arg;
2182 u_int fault_pc; 2182 u_int fault_pc;
2183 u_int fault_instruction; 2183 u_int fault_instruction;
2184 int saved_lr = 0; 2184 int saved_lr = 0;
2185 2185
2186 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) { 2186 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2187 2187
2188 /* Ok an abort in SVC mode */ 2188 /* Ok an abort in SVC mode */
2189 2189
2190 /* 2190 /*
2191 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage 2191 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2192 * as the fault happened in svc mode but we need it in the 2192 * as the fault happened in svc mode but we need it in the
2193 * usr slot so we can treat the registers as an array of ints 2193 * usr slot so we can treat the registers as an array of ints
2194 * during fixing. 2194 * during fixing.
2195 * NOTE: This PC is in the position but writeback is not 2195 * NOTE: This PC is in the position but writeback is not
2196 * allowed on r15. 2196 * allowed on r15.
2197 * Doing it like this is more efficient than trapping this 2197 * Doing it like this is more efficient than trapping this
2198 * case in all possible locations in the following fixup code. 2198 * case in all possible locations in the following fixup code.
2199 */ 2199 */
2200 2200
2201 saved_lr = frame->tf_usr_lr; 2201 saved_lr = frame->tf_usr_lr;
2202 frame->tf_usr_lr = frame->tf_svc_lr; 2202 frame->tf_usr_lr = frame->tf_svc_lr;
2203 2203
2204 /* 2204 /*
2205 * Note the trapframe does not have the SVC r13 so a fault 2205 * Note the trapframe does not have the SVC r13 so a fault
2206 * from an instruction with writeback to r13 in SVC mode is 2206 * from an instruction with writeback to r13 in SVC mode is
2207 * not allowed. This should not happen as the kstack is 2207 * not allowed. This should not happen as the kstack is
2208 * always valid. 2208 * always valid.
2209 */ 2209 */
2210 } 2210 }
2211 2211
2212 /* Get fault address and status from the CPU */ 2212 /* Get fault address and status from the CPU */
2213 2213
2214 fault_pc = frame->tf_pc; 2214 fault_pc = frame->tf_pc;
2215 fault_instruction = *((volatile unsigned int *)fault_pc); 2215 fault_instruction = *((volatile unsigned int *)fault_pc);
2216 2216
2217 /* Decode the fault instruction and fix the registers as needed */ 2217 /* Decode the fault instruction and fix the registers as needed */
2218 2218
2219 if ((fault_instruction & 0x0e000000) == 0x08000000) { 2219 if ((fault_instruction & 0x0e000000) == 0x08000000) {
2220 int base; 2220 int base;
2221 int loop; 2221 int loop;
2222 int count; 2222 int count;
2223 int *registers = &frame->tf_r0; 2223 int *registers = &frame->tf_r0;
2224 2224
2225 DFC_PRINTF(("LDM/STM\n")); 2225 DFC_PRINTF(("LDM/STM\n"));
2226 DFC_DISASSEMBLE(fault_pc); 2226 DFC_DISASSEMBLE(fault_pc);
2227 if (fault_instruction & (1 << 21)) { 2227 if (fault_instruction & (1 << 21)) {
2228 DFC_PRINTF(("This instruction must be corrected\n")); 2228 DFC_PRINTF(("This instruction must be corrected\n"));
2229 base = (fault_instruction >> 16) & 0x0f; 2229 base = (fault_instruction >> 16) & 0x0f;
2230 if (base == 15) 2230 if (base == 15)
2231 return ABORT_FIXUP_FAILED; 2231 return ABORT_FIXUP_FAILED;
2232 /* Count registers transferred */ 2232 /* Count registers transferred */
2233 count = 0; 2233 count = 0;
2234 for (loop = 0; loop < 16; ++loop) { 2234 for (loop = 0; loop < 16; ++loop) {
2235 if (fault_instruction & (1<<loop)) 2235 if (fault_instruction & (1<<loop))
2236 ++count; 2236 ++count;
2237 } 2237 }
2238 DFC_PRINTF(("%d registers used\n", count)); 2238 DFC_PRINTF(("%d registers used\n", count));
2239 DFC_PRINTF(("Corrected r%d by %d bytes ", 2239 DFC_PRINTF(("Corrected r%d by %d bytes ",
2240 base, count * 4)); 2240 base, count * 4));
2241 if (fault_instruction & (1 << 23)) { 2241 if (fault_instruction & (1 << 23)) {
2242 DFC_PRINTF(("down\n")); 2242 DFC_PRINTF(("down\n"));
2243 registers[base] -= count * 4; 2243 registers[base] -= count * 4;
2244 } else { 2244 } else {
2245 DFC_PRINTF(("up\n")); 2245 DFC_PRINTF(("up\n"));
2246 registers[base] += count * 4; 2246 registers[base] += count * 4;
2247 } 2247 }
2248 } 2248 }
2249 } else if ((fault_instruction & 0x0e000000) == 0x0c000000) { 2249 } else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
2250 int base; 2250 int base;
2251 int offset; 2251 int offset;
2252 int *registers = &frame->tf_r0; 2252 int *registers = &frame->tf_r0;
2253 2253
2254 /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */ 2254 /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
2255 2255
2256 DFC_DISASSEMBLE(fault_pc); 2256 DFC_DISASSEMBLE(fault_pc);
2257 2257
2258 /* Only need to fix registers if write back is turned on */ 2258 /* Only need to fix registers if write back is turned on */
2259 2259
2260 if ((fault_instruction & (1 << 21)) != 0) { 2260 if ((fault_instruction & (1 << 21)) != 0) {
2261 base = (fault_instruction >> 16) & 0x0f; 2261 base = (fault_instruction >> 16) & 0x0f;
2262 if (base == 13 && 2262 if (base == 13 &&
2263 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) 2263 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
2264 return ABORT_FIXUP_FAILED; 2264 return ABORT_FIXUP_FAILED;
2265 if (base == 15) 2265 if (base == 15)
2266 return ABORT_FIXUP_FAILED; 2266 return ABORT_FIXUP_FAILED;
2267 2267
2268 offset = (fault_instruction & 0xff) << 2; 2268 offset = (fault_instruction & 0xff) << 2;
2269 DFC_PRINTF(("r%d=%08x\n", base, registers[base])); 2269 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2270 if ((fault_instruction & (1 << 23)) != 0) 2270 if ((fault_instruction & (1 << 23)) != 0)
2271 offset = -offset; 2271 offset = -offset;
2272 registers[base] += offset; 2272 registers[base] += offset;
2273 DFC_PRINTF(("r%d=%08x\n", base, registers[base])); 2273 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2274 } 2274 }
2275 } else if ((fault_instruction & 0x0e000000) == 0x0c000000) 2275 } else if ((fault_instruction & 0x0e000000) == 0x0c000000)
2276 return ABORT_FIXUP_FAILED; 2276 return ABORT_FIXUP_FAILED;
2277 2277
2278 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) { 2278 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2279 2279
2280 /* Ok an abort in SVC mode */ 2280 /* Ok an abort in SVC mode */
2281 2281
2282 /* 2282 /*
2283 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage 2283 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2284 * as the fault happened in svc mode but we need it in the 2284 * as the fault happened in svc mode but we need it in the
2285 * usr slot so we can treat the registers as an array of ints 2285 * usr slot so we can treat the registers as an array of ints
2286 * during fixing. 2286 * during fixing.
2287 * NOTE: This PC is in the position but writeback is not 2287 * NOTE: This PC is in the position but writeback is not
2288 * allowed on r15. 2288 * allowed on r15.
2289 * Doing it like this is more efficient than trapping this 2289 * Doing it like this is more efficient than trapping this
2290 * case in all possible locations in the prior fixup code. 2290 * case in all possible locations in the prior fixup code.
2291 */ 2291 */
2292 2292
2293 frame->tf_svc_lr = frame->tf_usr_lr; 2293 frame->tf_svc_lr = frame->tf_usr_lr;
2294 frame->tf_usr_lr = saved_lr; 2294 frame->tf_usr_lr = saved_lr;
2295 2295
2296 /* 2296 /*
2297 * Note the trapframe does not have the SVC r13 so a fault 2297 * Note the trapframe does not have the SVC r13 so a fault
2298 * from an instruction with writeback to r13 in SVC mode is 2298 * from an instruction with writeback to r13 in SVC mode is
2299 * not allowed. This should not happen as the kstack is 2299 * not allowed. This should not happen as the kstack is
2300 * always valid. 2300 * always valid.
2301 */ 2301 */
2302 } 2302 }
2303 2303
2304 return(ABORT_FIXUP_OK); 2304 return(ABORT_FIXUP_OK);
2305} 2305}
2306#endif /* CPU_ARM2/250/3/6/7 */ 2306#endif /* CPU_ARM2/250/3/6/7 */
2307 2307
2308 2308
2309#if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \ 2309#if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \
2310 defined(CPU_ARM7TDMI) 2310 defined(CPU_ARM7TDMI)
2311/* 2311/*
2312 * "Late" (base updated) data abort fixup 2312 * "Late" (base updated) data abort fixup
2313 * 2313 *
2314 * For ARM6 (in late-abort mode) and ARM7. 2314 * For ARM6 (in late-abort mode) and ARM7.
2315 * 2315 *
2316 * In this model, all data-transfer instructions need fixing up. We defer 2316 * In this model, all data-transfer instructions need fixing up. We defer
2317 * LDM, STM, LDC and STC fixup to the early-abort handler. 2317 * LDM, STM, LDC and STC fixup to the early-abort handler.
2318 */ 2318 */
2319int 2319int
2320late_abort_fixup(void *arg) 2320late_abort_fixup(void *arg)
2321{ 2321{
2322 trapframe_t *frame = arg; 2322 trapframe_t *frame = arg;
2323 u_int fault_pc; 2323 u_int fault_pc;
2324 u_int fault_instruction; 2324 u_int fault_instruction;
2325 int saved_lr = 0; 2325 int saved_lr = 0;
2326 2326
2327 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) { 2327 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2328 2328
2329 /* Ok an abort in SVC mode */ 2329 /* Ok an abort in SVC mode */
2330 2330
2331 /* 2331 /*
2332 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage 2332 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2333 * as the fault happened in svc mode but we need it in the 2333 * as the fault happened in svc mode but we need it in the
2334 * usr slot so we can treat the registers as an array of ints 2334 * usr slot so we can treat the registers as an array of ints
2335 * during fixing. 2335 * during fixing.
2336 * NOTE: This PC is in the position but writeback is not 2336 * NOTE: This PC is in the position but writeback is not
2337 * allowed on r15. 2337 * allowed on r15.
2338 * Doing it like this is more efficient than trapping this 2338 * Doing it like this is more efficient than trapping this
2339 * case in all possible locations in the following fixup code. 2339 * case in all possible locations in the following fixup code.
2340 */ 2340 */
2341 2341
2342 saved_lr = frame->tf_usr_lr; 2342 saved_lr = frame->tf_usr_lr;
2343 frame->tf_usr_lr = frame->tf_svc_lr; 2343 frame->tf_usr_lr = frame->tf_svc_lr;
2344 2344
2345 /* 2345 /*
2346 * Note the trapframe does not have the SVC r13 so a fault 2346 * Note the trapframe does not have the SVC r13 so a fault
2347 * from an instruction with writeback to r13 in SVC mode is 2347 * from an instruction with writeback to r13 in SVC mode is
2348 * not allowed. This should not happen as the kstack is 2348 * not allowed. This should not happen as the kstack is
2349 * always valid. 2349 * always valid.
2350 */ 2350 */
2351 } 2351 }
2352 2352
2353 /* Get fault address and status from the CPU */ 2353 /* Get fault address and status from the CPU */
2354 2354
2355 fault_pc = frame->tf_pc; 2355 fault_pc = frame->tf_pc;
2356 fault_instruction = *((volatile unsigned int *)fault_pc); 2356 fault_instruction = *((volatile unsigned int *)fault_pc);
2357 2357
2358 /* Decode the fault instruction and fix the registers as needed */ 2358 /* Decode the fault instruction and fix the registers as needed */
2359 2359
2360 /* Was is a swap instruction ? */ 2360 /* Was is a swap instruction ? */
2361 2361
2362 if ((fault_instruction & 0x0fb00ff0) == 0x01000090) { 2362 if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
2363 DFC_DISASSEMBLE(fault_pc); 2363 DFC_DISASSEMBLE(fault_pc);
2364 } else if ((fault_instruction & 0x0c000000) == 0x04000000) { 2364 } else if ((fault_instruction & 0x0c000000) == 0x04000000) {
2365 2365
2366 /* Was is a ldr/str instruction */ 2366 /* Was is a ldr/str instruction */
2367 /* This is for late abort only */ 2367 /* This is for late abort only */
2368 2368
2369 int base; 2369 int base;
2370 int offset; 2370 int offset;
2371 int *registers = &frame->tf_r0; 2371 int *registers = &frame->tf_r0;
2372 2372
2373 DFC_DISASSEMBLE(fault_pc); 2373 DFC_DISASSEMBLE(fault_pc);
2374 2374
2375 /* This is for late abort only */ 2375 /* This is for late abort only */
2376 2376
2377 if ((fault_instruction & (1 << 24)) == 0 2377 if ((fault_instruction & (1 << 24)) == 0
2378 || (fault_instruction & (1 << 21)) != 0) { 2378 || (fault_instruction & (1 << 21)) != 0) {
2379 /* postindexed ldr/str with no writeback */ 2379 /* postindexed ldr/str with no writeback */
2380 2380
2381 base = (fault_instruction >> 16) & 0x0f; 2381 base = (fault_instruction >> 16) & 0x0f;
2382 if (base == 13 && 2382 if (base == 13 &&
2383 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) 2383 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
2384 return ABORT_FIXUP_FAILED; 2384 return ABORT_FIXUP_FAILED;
2385 if (base == 15) 2385 if (base == 15)
2386 return ABORT_FIXUP_FAILED; 2386 return ABORT_FIXUP_FAILED;
2387 DFC_PRINTF(("late abt fix: r%d=%08x : ", 2387 DFC_PRINTF(("late abt fix: r%d=%08x : ",
2388 base, registers[base])); 2388 base, registers[base]));
2389 if ((fault_instruction & (1 << 25)) == 0) { 2389 if ((fault_instruction & (1 << 25)) == 0) {
2390 /* Immediate offset - easy */ 2390 /* Immediate offset - easy */
2391 2391
2392 offset = fault_instruction & 0xfff; 2392 offset = fault_instruction & 0xfff;
2393 if ((fault_instruction & (1 << 23))) 2393 if ((fault_instruction & (1 << 23)))
2394 offset = -offset; 2394 offset = -offset;
2395 registers[base] += offset; 2395 registers[base] += offset;
2396 DFC_PRINTF(("imm=%08x ", offset)); 2396 DFC_PRINTF(("imm=%08x ", offset));
2397 } else { 2397 } else {
2398 /* offset is a shifted register */ 2398 /* offset is a shifted register */
2399 int shift; 2399 int shift;
2400 2400
2401 offset = fault_instruction & 0x0f; 2401 offset = fault_instruction & 0x0f;
2402 if (offset == base) 2402 if (offset == base)
2403 return ABORT_FIXUP_FAILED; 2403 return ABORT_FIXUP_FAILED;
2404 2404
2405 /* 2405 /*
2406 * Register offset - hard we have to 2406 * Register offset - hard we have to
2407 * cope with shifts ! 2407 * cope with shifts !
2408 */ 2408 */
2409 offset = registers[offset]; 2409 offset = registers[offset];
2410 2410
2411 if ((fault_instruction & (1 << 4)) == 0) 2411 if ((fault_instruction & (1 << 4)) == 0)
2412 /* shift with amount */ 2412 /* shift with amount */
2413 shift = (fault_instruction >> 7) & 0x1f; 2413 shift = (fault_instruction >> 7) & 0x1f;
2414 else { 2414 else {
2415 /* shift with register */ 2415 /* shift with register */
2416 if ((fault_instruction & (1 << 7)) != 0) 2416 if ((fault_instruction & (1 << 7)) != 0)
2417 /* undefined for now so bail out */ 2417 /* undefined for now so bail out */
2418 return ABORT_FIXUP_FAILED; 2418 return ABORT_FIXUP_FAILED;
2419 shift = ((fault_instruction >> 8) & 0xf); 2419 shift = ((fault_instruction >> 8) & 0xf);
2420 if (base == shift) 2420 if (base == shift)
2421 return ABORT_FIXUP_FAILED; 2421 return ABORT_FIXUP_FAILED;
2422 DFC_PRINTF(("shift reg=%d ", shift)); 2422 DFC_PRINTF(("shift reg=%d ", shift));
2423 shift = registers[shift]; 2423 shift = registers[shift];
2424 } 2424 }
2425 DFC_PRINTF(("shift=%08x ", shift)); 2425 DFC_PRINTF(("shift=%08x ", shift));
2426 switch (((fault_instruction >> 5) & 0x3)) { 2426 switch (((fault_instruction >> 5) & 0x3)) {
2427 case 0 : /* Logical left */ 2427 case 0 : /* Logical left */
2428 offset = (int)(((u_int)offset) << shift); 2428 offset = (int)(((u_int)offset) << shift);
2429 break; 2429 break;
2430 case 1 : /* Logical Right */ 2430 case 1 : /* Logical Right */
2431 if (shift == 0) shift = 32; 2431 if (shift == 0) shift = 32;
2432 offset = (int)(((u_int)offset) >> shift); 2432 offset = (int)(((u_int)offset) >> shift);
2433 break; 2433 break;
2434 case 2 : /* Arithmetic Right */ 2434 case 2 : /* Arithmetic Right */
2435 if (shift == 0) shift = 32; 2435 if (shift == 0) shift = 32;
2436 offset = (int)(((int)offset) >> shift); 2436 offset = (int)(((int)offset) >> shift);
2437 break; 2437 break;
2438 case 3 : /* Rotate right (rol or rxx) */ 2438 case 3 : /* Rotate right (rol or rxx) */
2439 return ABORT_FIXUP_FAILED; 2439 return ABORT_FIXUP_FAILED;
2440 break; 2440 break;
2441 } 2441 }
2442 2442
2443 DFC_PRINTF(("abt: fixed LDR/STR with " 2443 DFC_PRINTF(("abt: fixed LDR/STR with "
2444 "register offset\n")); 2444 "register offset\n"));
2445 if ((fault_instruction & (1 << 23))) 2445 if ((fault_instruction & (1 << 23)))
2446 offset = -offset; 2446 offset = -offset;
2447 DFC_PRINTF(("offset=%08x ", offset)); 2447 DFC_PRINTF(("offset=%08x ", offset));
2448 registers[base] += offset; 2448 registers[base] += offset;
2449 } 2449 }
2450 DFC_PRINTF(("r%d=%08x\n", base, registers[base])); 2450 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2451 } 2451 }
2452 } 2452 }
2453 2453
2454 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) { 2454 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2455 2455
2456 /* Ok an abort in SVC mode */ 2456 /* Ok an abort in SVC mode */
2457 2457
2458 /* 2458 /*
2459 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage 2459 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2460 * as the fault happened in svc mode but we need it in the 2460 * as the fault happened in svc mode but we need it in the
2461 * usr slot so we can treat the registers as an array of ints 2461 * usr slot so we can treat the registers as an array of ints
2462 * during fixing. 2462 * during fixing.
2463 * NOTE: This PC is in the position but writeback is not 2463 * NOTE: This PC is in the position but writeback is not
2464 * allowed on r15. 2464 * allowed on r15.
2465 * Doing it like this is more efficient than trapping this 2465 * Doing it like this is more efficient than trapping this
2466 * case in all possible locations in the prior fixup code. 2466 * case in all possible locations in the prior fixup code.
2467 */ 2467 */
2468 2468
2469 frame->tf_svc_lr = frame->tf_usr_lr; 2469 frame->tf_svc_lr = frame->tf_usr_lr;
2470 frame->tf_usr_lr = saved_lr; 2470 frame->tf_usr_lr = saved_lr;
2471 2471
2472 /* 2472 /*
2473 * Note the trapframe does not have the SVC r13 so a fault 2473 * Note the trapframe does not have the SVC r13 so a fault
2474 * from an instruction with writeback to r13 in SVC mode is 2474 * from an instruction with writeback to r13 in SVC mode is
2475 * not allowed. This should not happen as the kstack is 2475 * not allowed. This should not happen as the kstack is
2476 * always valid. 2476 * always valid.
2477 */ 2477 */
2478 } 2478 }
2479 2479
2480 /* 2480 /*
2481 * Now let the early-abort fixup routine have a go, in case it 2481 * Now let the early-abort fixup routine have a go, in case it
2482 * was an LDM, STM, LDC or STC that faulted. 2482 * was an LDM, STM, LDC or STC that faulted.
2483 */ 2483 */
2484 2484
2485 return early_abort_fixup(arg); 2485 return early_abort_fixup(arg);
2486} 2486}
2487#endif /* CPU_ARM6(LATE)/7/7TDMI */ 2487#endif /* CPU_ARM6(LATE)/7/7TDMI */
2488 2488
2489/* 2489/*
2490 * CPU Setup code 2490 * CPU Setup code
2491 */ 2491 */
2492 2492
2493#if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \ 2493#if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
2494 defined(CPU_ARM8) || defined (CPU_ARM9) || defined (CPU_ARM9E) || \ 2494 defined(CPU_ARM8) || defined (CPU_ARM9) || defined (CPU_ARM9E) || \
2495 defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \ 2495 defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
2496 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 2496 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2497 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \ 2497 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
2498 defined(CPU_ARM10) || defined(CPU_ARM11) || \ 2498 defined(CPU_ARM10) || defined(CPU_ARM11) || \
2499 defined(CPU_FA526) || defined(CPU_CORTEX) || defined(CPU_SHEEVA) 2499 defined(CPU_FA526) || defined(CPU_CORTEX) || defined(CPU_SHEEVA)
2500 2500
2501#define IGN 0 2501#define IGN 0
2502#define OR 1 2502#define OR 1
2503#define BIC 2 2503#define BIC 2
2504 2504
2505struct cpu_option { 2505struct cpu_option {
2506 const char *co_name; 2506 const char *co_name;
2507 int co_falseop; 2507 int co_falseop;
2508 int co_trueop; 2508 int co_trueop;
2509 int co_value; 2509 int co_value;
2510}; 2510};
2511 2511
2512static u_int parse_cpu_options(char *, struct cpu_option *, u_int); 2512static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
2513 2513
2514static u_int 2514static u_int
2515parse_cpu_options(char *args, struct cpu_option *optlist, u_int cpuctrl) 2515parse_cpu_options(char *args, struct cpu_option *optlist, u_int cpuctrl)
2516{ 2516{
2517 int integer; 2517 int integer;
2518 2518
2519 if (args == NULL) 2519 if (args == NULL)
2520 return(cpuctrl); 2520 return(cpuctrl);
2521 2521
2522 while (optlist->co_name) { 2522 while (optlist->co_name) {
2523 if (get_bootconf_option(args, optlist->co_name, 2523 if (get_bootconf_option(args, optlist->co_name,
2524 BOOTOPT_TYPE_BOOLEAN, &integer)) { 2524 BOOTOPT_TYPE_BOOLEAN, &integer)) {
2525 if (integer) { 2525 if (integer) {
2526 if (optlist->co_trueop == OR) 2526 if (optlist->co_trueop == OR)
2527 cpuctrl |= optlist->co_value; 2527 cpuctrl |= optlist->co_value;
2528 else if (optlist->co_trueop == BIC) 2528 else if (optlist->co_trueop == BIC)
2529 cpuctrl &= ~optlist->co_value; 2529 cpuctrl &= ~optlist->co_value;
2530 } else { 2530 } else {
2531 if (optlist->co_falseop == OR) 2531 if (optlist->co_falseop == OR)
2532 cpuctrl |= optlist->co_value; 2532 cpuctrl |= optlist->co_value;
2533 else if (optlist->co_falseop == BIC) 2533 else if (optlist->co_falseop == BIC)
2534 cpuctrl &= ~optlist->co_value; 2534 cpuctrl &= ~optlist->co_value;
2535 } 2535 }
2536 } 2536 }
2537 ++optlist; 2537 ++optlist;
2538 } 2538 }
2539 return(cpuctrl); 2539 return(cpuctrl);
2540} 2540}
2541#endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */ 2541#endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
2542 2542
2543#if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \ 2543#if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \
2544 || defined(CPU_ARM8) 2544 || defined(CPU_ARM8)
2545struct cpu_option arm678_options[] = { 2545struct cpu_option arm678_options[] = {
2546#ifdef COMPAT_12 2546#ifdef COMPAT_12
2547 { "nocache", IGN, BIC, CPU_CONTROL_IDC_ENABLE }, 2547 { "nocache", IGN, BIC, CPU_CONTROL_IDC_ENABLE },
2548 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE }, 2548 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2549#endif /* COMPAT_12 */ 2549#endif /* COMPAT_12 */
2550 { "cpu.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 2550 { "cpu.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
2551 { "cpu.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 2551 { "cpu.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
2552 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2552 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2553 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2553 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
2554 { NULL, IGN, IGN, 0 } 2554 { NULL, IGN, IGN, 0 }
2555}; 2555};
2556 2556
2557#endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */ 2557#endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
2558 2558
2559#ifdef CPU_ARM6 2559#ifdef CPU_ARM6
2560struct cpu_option arm6_options[] = { 2560struct cpu_option arm6_options[] = {
2561 { "arm6.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 2561 { "arm6.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
2562 { "arm6.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 2562 { "arm6.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
2563 { "arm6.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2563 { "arm6.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2564 { "arm6.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2564 { "arm6.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
2565 { NULL, IGN, IGN, 0 } 2565 { NULL, IGN, IGN, 0 }
2566}; 2566};
2567 2567
2568void 2568void
2569arm6_setup(char *args) 2569arm6_setup(char *args)
2570{ 2570{
2571 2571
2572 /* Set up default control registers bits */ 2572 /* Set up default control registers bits */
2573 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2573 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2574 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2574 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2575 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE; 2575 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2576#if 0 2576#if 0
2577 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2577 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2578 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2578 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2579 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE 2579 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2580 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE 2580 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2581 | CPU_CONTROL_AFLT_ENABLE; 2581 | CPU_CONTROL_AFLT_ENABLE;
2582#endif 2582#endif
2583 2583
2584#ifdef ARM6_LATE_ABORT 2584#ifdef ARM6_LATE_ABORT
2585 cpuctrl |= CPU_CONTROL_LABT_ENABLE; 2585 cpuctrl |= CPU_CONTROL_LABT_ENABLE;
2586#endif /* ARM6_LATE_ABORT */ 2586#endif /* ARM6_LATE_ABORT */
2587 2587
2588#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2588#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2589 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2589 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2590#endif 2590#endif
2591 2591
2592 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl); 2592 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2593 cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl); 2593 cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl);
2594 2594
2595#ifdef __ARMEB__ 2595#ifdef __ARMEB__
2596 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2596 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2597#endif 2597#endif
2598 2598
2599 /* Clear out the cache */ 2599 /* Clear out the cache */
2600 cpu_idcache_wbinv_all(); 2600 cpu_idcache_wbinv_all();
2601 2601
2602 /* Set the control register */ 2602 /* Set the control register */
2603 curcpu()->ci_ctrl = cpuctrl; 2603 curcpu()->ci_ctrl = cpuctrl;
2604 cpu_control(0xffffffff, cpuctrl); 2604 cpu_control(0xffffffff, cpuctrl);
2605} 2605}
2606#endif /* CPU_ARM6 */ 2606#endif /* CPU_ARM6 */
2607 2607
2608#ifdef CPU_ARM7 2608#ifdef CPU_ARM7
2609struct cpu_option arm7_options[] = { 2609struct cpu_option arm7_options[] = {
2610 { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 2610 { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
2611 { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 2611 { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
2612 { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2612 { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2613 { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2613 { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
2614#ifdef COMPAT_12 2614#ifdef COMPAT_12
2615 { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK }, 2615 { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK },
2616#endif /* COMPAT_12 */ 2616#endif /* COMPAT_12 */
2617 { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK }, 2617 { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK },
2618 { NULL, IGN, IGN, 0 } 2618 { NULL, IGN, IGN, 0 }
2619}; 2619};
2620 2620
2621void 2621void
2622arm7_setup(char *args) 2622arm7_setup(char *args)
2623{ 2623{
2624 2624
2625 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2625 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2626 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2626 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2627 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE; 2627 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2628#if 0 2628#if 0
2629 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2629 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2630 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2630 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2631 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE 2631 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2632 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE 2632 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE
2633 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE 2633 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2634 | CPU_CONTROL_AFLT_ENABLE; 2634 | CPU_CONTROL_AFLT_ENABLE;
2635#endif 2635#endif
2636 2636
2637#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2637#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2638 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2638 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2639#endif 2639#endif
2640 2640
2641 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl); 2641 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2642 cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl); 2642 cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl);
2643 2643
2644#ifdef __ARMEB__ 2644#ifdef __ARMEB__
2645 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2645 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2646#endif 2646#endif
2647 2647
2648 /* Clear out the cache */ 2648 /* Clear out the cache */
2649 cpu_idcache_wbinv_all(); 2649 cpu_idcache_wbinv_all();
2650 2650
2651 /* Set the control register */ 2651 /* Set the control register */
2652 curcpu()->ci_ctrl = cpuctrl; 2652 curcpu()->ci_ctrl = cpuctrl;
2653 cpu_control(0xffffffff, cpuctrl); 2653 cpu_control(0xffffffff, cpuctrl);
2654} 2654}
2655#endif /* CPU_ARM7 */ 2655#endif /* CPU_ARM7 */
2656 2656
2657#ifdef CPU_ARM7TDMI 2657#ifdef CPU_ARM7TDMI
2658struct cpu_option arm7tdmi_options[] = { 2658struct cpu_option arm7tdmi_options[] = {
2659 { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 2659 { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
2660 { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 2660 { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
2661 { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2661 { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2662 { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2662 { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
2663#ifdef COMPAT_12 2663#ifdef COMPAT_12
2664 { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK }, 2664 { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK },
2665#endif /* COMPAT_12 */ 2665#endif /* COMPAT_12 */
2666 { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK }, 2666 { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK },
2667 { NULL, IGN, IGN, 0 } 2667 { NULL, IGN, IGN, 0 }
2668}; 2668};
2669 2669
2670void 2670void
2671arm7tdmi_setup(char *args) 2671arm7tdmi_setup(char *args)
2672{ 2672{
2673 int cpuctrl; 2673 int cpuctrl;
2674 2674
2675 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2675 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2676 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2676 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2677 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE; 2677 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2678 2678
2679 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl); 2679 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2680 cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl); 2680 cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
2681 2681
2682#ifdef __ARMEB__ 2682#ifdef __ARMEB__
2683 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2683 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2684#endif 2684#endif
2685 2685
2686 /* Clear out the cache */ 2686 /* Clear out the cache */
2687 cpu_idcache_wbinv_all(); 2687 cpu_idcache_wbinv_all();
2688 2688
2689 /* Set the control register */ 2689 /* Set the control register */
2690 curcpu()->ci_ctrl = cpuctrl; 2690 curcpu()->ci_ctrl = cpuctrl;
2691 cpu_control(0xffffffff, cpuctrl); 2691 cpu_control(0xffffffff, cpuctrl);
2692} 2692}
2693#endif /* CPU_ARM7TDMI */ 2693#endif /* CPU_ARM7TDMI */
2694 2694
2695#ifdef CPU_ARM8 2695#ifdef CPU_ARM8
2696struct cpu_option arm8_options[] = { 2696struct cpu_option arm8_options[] = {
2697 { "arm8.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 2697 { "arm8.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
2698 { "arm8.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 2698 { "arm8.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
2699 { "arm8.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2699 { "arm8.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2700 { "arm8.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2700 { "arm8.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
2701#ifdef COMPAT_12 2701#ifdef COMPAT_12
2702 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2702 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
2703#endif /* COMPAT_12 */ 2703#endif /* COMPAT_12 */
2704 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2704 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
2705 { "arm8.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2705 { "arm8.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
2706 { NULL, IGN, IGN, 0 } 2706 { NULL, IGN, IGN, 0 }
2707}; 2707};
2708 2708
2709void 2709void
2710arm8_setup(char *args) 2710arm8_setup(char *args)
2711{ 2711{
2712 int integer; 2712 int integer;
2713 int clocktest; 2713 int clocktest;
2714 int setclock = 0; 2714 int setclock = 0;
2715 2715
2716 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2716 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2717 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2717 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2718 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE; 2718 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2719#if 0 2719#if 0
2720 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2720 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2721 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2721 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2722 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE 2722 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2723 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE 2723 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
2724 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE; 2724 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
2725#endif 2725#endif
2726 2726
2727#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2727#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2728 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2728 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2729#endif 2729#endif
2730 2730
2731 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl); 2731 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2732 cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl); 2732 cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
2733 2733
2734#ifdef __ARMEB__ 2734#ifdef __ARMEB__
2735 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2735 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2736#endif 2736#endif
2737 2737
2738 /* Get clock configuration */ 2738 /* Get clock configuration */
2739 clocktest = arm8_clock_config(0, 0) & 0x0f; 2739 clocktest = arm8_clock_config(0, 0) & 0x0f;
2740 2740
2741 /* Special ARM8 clock and test configuration */ 2741 /* Special ARM8 clock and test configuration */
2742 if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) { 2742 if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2743 clocktest = 0; 2743 clocktest = 0;
2744 setclock = 1; 2744 setclock = 1;
2745 } 2745 }
2746 if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) { 2746 if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2747 if (integer) 2747 if (integer)
2748 clocktest |= 0x01; 2748 clocktest |= 0x01;
2749 else 2749 else
2750 clocktest &= ~(0x01); 2750 clocktest &= ~(0x01);
2751 setclock = 1; 2751 setclock = 1;
2752 } 2752 }
2753 if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) { 2753 if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2754 if (integer) 2754 if (integer)
2755 clocktest |= 0x02; 2755 clocktest |= 0x02;
2756 else 2756 else
2757 clocktest &= ~(0x02); 2757 clocktest &= ~(0x02);
2758 setclock = 1; 2758 setclock = 1;
2759 } 2759 }
2760 if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) { 2760 if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
2761 clocktest = (clocktest & ~0xc0) | (integer & 3) << 2; 2761 clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
2762 setclock = 1; 2762 setclock = 1;
2763 } 2763 }
2764 if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) { 2764 if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
2765 clocktest |= (integer & 7) << 5; 2765 clocktest |= (integer & 7) << 5;
2766 setclock = 1; 2766 setclock = 1;
2767 } 2767 }
2768 2768
2769 /* Clear out the cache */ 2769 /* Clear out the cache */
2770 cpu_idcache_wbinv_all(); 2770 cpu_idcache_wbinv_all();
2771 2771
2772 /* Set the control register */ 2772 /* Set the control register */
2773 curcpu()->ci_ctrl = cpuctrl; 2773 curcpu()->ci_ctrl = cpuctrl;
2774 cpu_control(0xffffffff, cpuctrl); 2774 cpu_control(0xffffffff, cpuctrl);
2775 2775
2776 /* Set the clock/test register */ 2776 /* Set the clock/test register */
2777 if (setclock) 2777 if (setclock)
2778 arm8_clock_config(0x7f, clocktest); 2778 arm8_clock_config(0x7f, clocktest);
2779} 2779}
2780#endif /* CPU_ARM8 */ 2780#endif /* CPU_ARM8 */
2781 2781
2782#ifdef CPU_ARM9 2782#ifdef CPU_ARM9
2783struct cpu_option arm9_options[] = { 2783struct cpu_option arm9_options[] = {
2784 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2784 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2785 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2785 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2786 { "arm9.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2786 { "arm9.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2787 { "arm9.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 2787 { "arm9.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
2788 { "arm9.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 2788 { "arm9.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
2789 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2789 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2790 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2790 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
2791 { "arm9.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2791 { "arm9.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2792 { NULL, IGN, IGN, 0 } 2792 { NULL, IGN, IGN, 0 }
2793}; 2793};
2794 2794
2795void 2795void
2796arm9_setup(char *args) 2796arm9_setup(char *args)
2797{ 2797{
2798 2798
2799 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2799 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2800 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2800 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2801 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2801 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2802 | CPU_CONTROL_WBUF_ENABLE; 2802 | CPU_CONTROL_WBUF_ENABLE;
2803 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2803 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2804 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2804 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2805 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2805 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2806 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 2806 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2807 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 2807 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2808 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC 2808 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
2809 | CPU_CONTROL_ROUNDROBIN; 2809 | CPU_CONTROL_ROUNDROBIN;
2810 2810
2811#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2811#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2812 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2812 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2813#endif 2813#endif
2814 2814
2815 cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl); 2815 cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
2816 2816
2817#ifdef __ARMEB__ 2817#ifdef __ARMEB__
2818 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2818 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2819#endif 2819#endif
2820 2820
2821#ifndef ARM_HAS_VBAR 2821#ifndef ARM_HAS_VBAR
2822 if (vector_page == ARM_VECTORS_HIGH) 2822 if (vector_page == ARM_VECTORS_HIGH)
2823 cpuctrl |= CPU_CONTROL_VECRELOC; 2823 cpuctrl |= CPU_CONTROL_VECRELOC;
2824#endif 2824#endif
2825 2825
2826 /* Clear out the cache */ 2826 /* Clear out the cache */
2827 cpu_idcache_wbinv_all(); 2827 cpu_idcache_wbinv_all();
2828 2828
2829 /* Set the control register */ 2829 /* Set the control register */
2830 curcpu()->ci_ctrl = cpuctrl; 2830 curcpu()->ci_ctrl = cpuctrl;
2831 cpu_control(cpuctrlmask, cpuctrl); 2831 cpu_control(cpuctrlmask, cpuctrl);
2832 2832
2833} 2833}
2834#endif /* CPU_ARM9 */ 2834#endif /* CPU_ARM9 */
2835 2835
2836#if defined(CPU_ARM9E) || defined(CPU_ARM10) 2836#if defined(CPU_ARM9E) || defined(CPU_ARM10)
2837struct cpu_option arm10_options[] = { 2837struct cpu_option arm10_options[] = {
2838 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2838 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2839 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2839 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2840 { "arm10.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2840 { "arm10.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2841 { "arm10.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 2841 { "arm10.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
2842 { "arm10.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 2842 { "arm10.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
2843 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2843 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2844 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2844 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
2845 { "arm10.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2845 { "arm10.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2846 { NULL, IGN, IGN, 0 } 2846 { NULL, IGN, IGN, 0 }
2847}; 2847};
2848 2848
2849void 2849void
2850arm10_setup(char *args) 2850arm10_setup(char *args)
2851{ 2851{
2852 2852
2853 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 2853 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2854 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2854 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2855 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE; 2855 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
2856#if 0 2856#if 0
2857 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 2857 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2858 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2858 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2859 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 2859 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2860 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 2860 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2861 | CPU_CONTROL_BPRD_ENABLE 2861 | CPU_CONTROL_BPRD_ENABLE
2862 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK; 2862 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2863#endif 2863#endif
2864 2864
2865#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2865#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2866 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2866 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2867#endif 2867#endif
2868 2868
2869 cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl); 2869 cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
2870 2870
2871#ifdef __ARMEB__ 2871#ifdef __ARMEB__
2872 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2872 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2873#endif 2873#endif
2874 2874
2875#ifndef ARM_HAS_VBAR 2875#ifndef ARM_HAS_VBAR
2876 if (vector_page == ARM_VECTORS_HIGH) 2876 if (vector_page == ARM_VECTORS_HIGH)
2877 cpuctrl |= CPU_CONTROL_VECRELOC; 2877 cpuctrl |= CPU_CONTROL_VECRELOC;
2878#endif 2878#endif
2879 2879
2880 /* Clear out the cache */ 2880 /* Clear out the cache */
2881 cpu_idcache_wbinv_all(); 2881 cpu_idcache_wbinv_all();
2882 2882
2883 /* Now really make sure they are clean. */ 2883 /* Now really make sure they are clean. */
2884 __asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : ); 2884 __asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2885 2885
2886 /* Set the control register */ 2886 /* Set the control register */
2887 curcpu()->ci_ctrl = cpuctrl; 2887 curcpu()->ci_ctrl = cpuctrl;
2888 cpu_control(0xffffffff, cpuctrl); 2888 cpu_control(0xffffffff, cpuctrl);
2889 2889
2890 /* And again. */ 2890 /* And again. */
2891 cpu_idcache_wbinv_all(); 2891 cpu_idcache_wbinv_all();
2892} 2892}
2893#endif /* CPU_ARM9E || CPU_ARM10 */ 2893#endif /* CPU_ARM9E || CPU_ARM10 */
2894 2894
2895#if defined(CPU_ARM11) 2895#if defined(CPU_ARM11)
2896struct cpu_option arm11_options[] = { 2896struct cpu_option arm11_options[] = {
2897 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2897 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2898 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2898 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2899 { "arm11.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2899 { "arm11.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2900 { "arm11.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 2900 { "arm11.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
2901 { "arm11.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 2901 { "arm11.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
2902 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2902 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
2903 { "arm11.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2903 { "arm11.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
2904 { NULL, IGN, IGN, 0 } 2904 { NULL, IGN, IGN, 0 }
2905}; 2905};
2906 2906
2907void 2907void
2908arm11_setup(char *args) 2908arm11_setup(char *args)
2909{ 2909{
2910 2910
2911 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 2911 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2912 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2912 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2913 /* | CPU_CONTROL_BPRD_ENABLE */; 2913 /* | CPU_CONTROL_BPRD_ENABLE */;
2914 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 2914 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2915 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2915 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2916 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE 2916 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
2917 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 2917 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2918 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK; 2918 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2919 2919
2920#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2920#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2921 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2921 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2922#endif 2922#endif
2923 2923
2924 cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl); 2924 cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2925 2925
2926#ifdef __ARMEB__ 2926#ifdef __ARMEB__
2927 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2927 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2928#endif 2928#endif
2929 2929
2930#ifndef ARM_HAS_VBAR 2930#ifndef ARM_HAS_VBAR
2931 if (vector_page == ARM_VECTORS_HIGH) 2931 if (vector_page == ARM_VECTORS_HIGH)
2932 cpuctrl |= CPU_CONTROL_VECRELOC; 2932 cpuctrl |= CPU_CONTROL_VECRELOC;
2933#endif 2933#endif
2934 2934
2935 /* Clear out the cache */ 2935 /* Clear out the cache */
2936 cpu_idcache_wbinv_all(); 2936 cpu_idcache_wbinv_all();
2937 2937
2938 /* Now really make sure they are clean. */ 2938 /* Now really make sure they are clean. */
2939 __asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : ); 2939 __asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2940 2940
2941 /* Allow detection code to find the VFP if it's fitted. */ 2941 /* Allow detection code to find the VFP if it's fitted. */
2942 __asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff)); 2942 __asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
2943 2943
2944 /* Set the control register */ 2944 /* Set the control register */
2945 curcpu()->ci_ctrl = cpuctrl; 2945 curcpu()->ci_ctrl = cpuctrl;
2946 cpu_control(cpuctrlmask, cpuctrl); 2946 cpu_control(cpuctrlmask, cpuctrl);
2947 2947
2948 /* And again. */ 2948 /* And again. */
2949 cpu_idcache_wbinv_all(); 2949 cpu_idcache_wbinv_all();
2950} 2950}
2951#endif /* CPU_ARM11 */ 2951#endif /* CPU_ARM11 */
2952 2952
2953#if defined(CPU_ARM11MPCORE) 2953#if defined(CPU_ARM11MPCORE)
2954 2954
2955void 2955void
2956arm11mpcore_setup(char *args) 2956arm11mpcore_setup(char *args)
2957{ 2957{
2958 2958
2959 int cpuctrl = CPU_CONTROL_IC_ENABLE 2959 int cpuctrl = CPU_CONTROL_IC_ENABLE
2960 | CPU_CONTROL_DC_ENABLE 2960 | CPU_CONTROL_DC_ENABLE
2961 | CPU_CONTROL_BPRD_ENABLE ; 2961 | CPU_CONTROL_BPRD_ENABLE ;
2962 int cpuctrlmask = CPU_CONTROL_IC_ENABLE 2962 int cpuctrlmask = CPU_CONTROL_IC_ENABLE
2963 | CPU_CONTROL_DC_ENABLE 2963 | CPU_CONTROL_DC_ENABLE
2964 | CPU_CONTROL_BPRD_ENABLE 2964 | CPU_CONTROL_BPRD_ENABLE
2965 | CPU_CONTROL_AFLT_ENABLE 2965 | CPU_CONTROL_AFLT_ENABLE
2966 | CPU_CONTROL_VECRELOC; 2966 | CPU_CONTROL_VECRELOC;
2967 2967
2968#ifdef ARM11MPCORE_MMU_COMPAT 2968#ifdef ARM11MPCORE_MMU_COMPAT
2969 /* XXX: S and R? */ 2969 /* XXX: S and R? */
2970#endif 2970#endif
2971 2971
2972#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2972#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2973 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2973 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2974#endif 2974#endif
2975 2975
2976 cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl); 2976 cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2977 2977
2978#ifndef ARM_HAS_VBAR 2978#ifndef ARM_HAS_VBAR
2979 if (vector_page == ARM_VECTORS_HIGH) 2979 if (vector_page == ARM_VECTORS_HIGH)
2980 cpuctrl |= CPU_CONTROL_VECRELOC; 2980 cpuctrl |= CPU_CONTROL_VECRELOC;
2981#endif 2981#endif
2982 2982
2983 /* Clear out the cache */ 2983 /* Clear out the cache */
2984 cpu_idcache_wbinv_all(); 2984 cpu_idcache_wbinv_all();
2985 2985
2986 /* Now really make sure they are clean. */ 2986 /* Now really make sure they are clean. */
2987 __asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : ); 2987 __asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2988 2988
2989 /* Allow detection code to find the VFP if it's fitted. */ 2989 /* Allow detection code to find the VFP if it's fitted. */
2990 __asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff)); 2990 __asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
2991 2991
2992 /* Set the control register */ 2992 /* Set the control register */
2993 curcpu()->ci_ctrl = cpu_control(cpuctrlmask, cpuctrl); 2993 curcpu()->ci_ctrl = cpu_control(cpuctrlmask, cpuctrl);
2994 2994
2995 /* And again. */ 2995 /* And again. */
2996 cpu_idcache_wbinv_all(); 2996 cpu_idcache_wbinv_all();
2997} 2997}
2998#endif /* CPU_ARM11MPCORE */ 2998#endif /* CPU_ARM11MPCORE */
2999 2999
3000#ifdef CPU_PJ4B 3000#ifdef CPU_PJ4B
3001void 3001void
3002pj4bv7_setup(char *args) 3002pj4bv7_setup(char *args)
3003{ 3003{
3004 int cpuctrl; 3004 int cpuctrl;
3005 3005
3006 pj4b_config(); 3006 pj4b_config();
3007 3007
3008 cpuctrl = CPU_CONTROL_MMU_ENABLE; 3008 cpuctrl = CPU_CONTROL_MMU_ENABLE;
3009#ifdef ARM32_DISABLE_ALIGNMENT_FAULTS 3009#ifdef ARM32_DISABLE_ALIGNMENT_FAULTS
3010 cpuctrl |= CPU_CONTROL_UNAL_ENABLE; 3010 cpuctrl |= CPU_CONTROL_UNAL_ENABLE;
3011#else 3011#else
3012 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3012 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3013#endif 3013#endif
3014 cpuctrl |= CPU_CONTROL_DC_ENABLE; 3014 cpuctrl |= CPU_CONTROL_DC_ENABLE;
3015 cpuctrl |= CPU_CONTROL_IC_ENABLE; 3015 cpuctrl |= CPU_CONTROL_IC_ENABLE;
3016 cpuctrl |= (0xf << 3); 3016 cpuctrl |= (0xf << 3);
3017 cpuctrl |= CPU_CONTROL_BPRD_ENABLE; 3017 cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
3018 cpuctrl |= (0x5 << 16) | (1 < 22); 3018 cpuctrl |= (0x5 << 16) | (1 < 22);
3019 cpuctrl |= CPU_CONTROL_XP_ENABLE; 3019 cpuctrl |= CPU_CONTROL_XP_ENABLE;
3020 3020
3021#ifndef ARM_HAS_VBAR 3021#ifndef ARM_HAS_VBAR
3022 if (vector_page == ARM_VECTORS_HIGH) 3022 if (vector_page == ARM_VECTORS_HIGH)
3023 cpuctrl |= CPU_CONTROL_VECRELOC; 3023 cpuctrl |= CPU_CONTROL_VECRELOC;
3024#endif 3024#endif
3025 3025
3026 /* Clear out the cache */ 3026 /* Clear out the cache */
3027 cpu_idcache_wbinv_all(); 3027 cpu_idcache_wbinv_all();
3028 3028
3029 /* Set the control register */ 3029 /* Set the control register */
3030 cpu_control(0xffffffff, cpuctrl); 3030 cpu_control(0xffffffff, cpuctrl);
3031 3031
3032 /* And again. */ 3032 /* And again. */
3033 cpu_idcache_wbinv_all(); 3033 cpu_idcache_wbinv_all();
3034 3034
3035 curcpu()->ci_ctrl = cpuctrl; 3035 curcpu()->ci_ctrl = cpuctrl;
3036} 3036}
3037#endif /* CPU_PJ4B */ 3037#endif /* CPU_PJ4B */
3038 3038
3039#if defined(CPU_CORTEX) 3039#if defined(CPU_CORTEX)
3040struct cpu_option armv7_options[] = { 3040struct cpu_option armv7_options[] = {
3041 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3041 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3042 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3042 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3043 { "armv7.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3043 { "armv7.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3044 { "armv7.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 3044 { "armv7.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
3045 { "armv7.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 3045 { "armv7.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
3046 { NULL, IGN, IGN, 0} 3046 { NULL, IGN, IGN, 0}
3047}; 3047};
3048 3048
3049void 3049void
3050armv7_setup(char *args) 3050armv7_setup(char *args)
3051{ 3051{
3052 3052
3053 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_IC_ENABLE 3053 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_IC_ENABLE
3054 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_BPRD_ENABLE 3054 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_BPRD_ENABLE
3055#ifdef __ARMEB__ 3055#ifdef __ARMEB__
3056 | CPU_CONTROL_EX_BEND 3056 | CPU_CONTROL_EX_BEND
3057#endif 3057#endif
 3058#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 3059 | CPU_CONTROL_AFLT_ENABLE;
 3060#endif
3058 | CPU_CONTROL_UNAL_ENABLE; 3061 | CPU_CONTROL_UNAL_ENABLE;
3059 3062
3060#if 0 3063 int cpuctrlmask = cpuctrl | CPU_CONTROL_AFLT_ENABLE;
3061 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 
3062 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 
3063 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE 
3064 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 
3065 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK; 
3066#endif 
3067 3064
3068#ifdef ARM32_DISABLE_ALIGNMENT_FAULTS 
3069#else 
3070 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 
3071#endif 
3072 3065
3073 cpuctrl = parse_cpu_options(args, armv7_options, cpuctrl); 3066 cpuctrl = parse_cpu_options(args, armv7_options, cpuctrl);
3074 3067
3075#ifndef ARM_HAS_VBAR 3068#ifndef ARM_HAS_VBAR
3076 if (vector_page == ARM_VECTORS_HIGH) 3069 if (vector_page == ARM_VECTORS_HIGH)
3077 cpuctrl |= CPU_CONTROL_VECRELOC; 3070 cpuctrl |= CPU_CONTROL_VECRELOC;
3078#endif 3071#endif
3079 3072
3080 /* Clear out the cache */ 3073 /* Clear out the cache */
3081 cpu_idcache_wbinv_all(); 3074 cpu_idcache_wbinv_all();
3082 3075
3083 /* Set the control register */ 3076 /* Set the control register */
3084 curcpu()->ci_ctrl = cpuctrl; 3077 curcpu()->ci_ctrl = cpuctrl;
3085 cpu_control(cpuctrl, cpuctrl); 3078 cpu_control(cpuctrlmask, cpuctrl);
3086} 3079}
3087#endif /* CPU_CORTEX */ 3080#endif /* CPU_CORTEX */
3088 3081
3089 3082
3090#if defined(CPU_ARM1136) || defined(CPU_ARM1176)  3083#if defined(CPU_ARM1136) || defined(CPU_ARM1176)
3091void 3084void
3092arm11x6_setup(char *args) 3085arm11x6_setup(char *args)
3093{ 3086{
3094 int cpuctrl, cpuctrl_wax; 3087 int cpuctrl, cpuctrl_wax;
3095 uint32_t auxctrl, auxctrl_wax; 3088 uint32_t auxctrl, auxctrl_wax;
3096 uint32_t tmp, tmp2; 3089 uint32_t tmp, tmp2;
3097 uint32_t sbz=0; 3090 uint32_t sbz=0;
3098 uint32_t cpuid; 3091 uint32_t cpuid;
3099 3092
3100 cpuid = cpu_id(); 3093 cpuid = cpu_id();
3101 3094
3102 cpuctrl = 3095 cpuctrl =
3103 CPU_CONTROL_MMU_ENABLE | 3096 CPU_CONTROL_MMU_ENABLE |
3104 CPU_CONTROL_DC_ENABLE | 3097 CPU_CONTROL_DC_ENABLE |
3105 CPU_CONTROL_WBUF_ENABLE | 3098 CPU_CONTROL_WBUF_ENABLE |
3106 CPU_CONTROL_32BP_ENABLE | 3099 CPU_CONTROL_32BP_ENABLE |
3107 CPU_CONTROL_32BD_ENABLE | 3100 CPU_CONTROL_32BD_ENABLE |
3108 CPU_CONTROL_LABT_ENABLE | 3101 CPU_CONTROL_LABT_ENABLE |
3109 CPU_CONTROL_SYST_ENABLE | 3102 CPU_CONTROL_SYST_ENABLE |
3110 CPU_CONTROL_UNAL_ENABLE | 3103 CPU_CONTROL_UNAL_ENABLE |
3111 CPU_CONTROL_IC_ENABLE; 3104 CPU_CONTROL_IC_ENABLE;
3112 3105
3113 /* 3106 /*
3114 * "write as existing" bits 3107 * "write as existing" bits
3115 * inverse of this is mask 3108 * inverse of this is mask
3116 */ 3109 */
3117 cpuctrl_wax = 3110 cpuctrl_wax =
3118 (3 << 30) | 3111 (3 << 30) |
3119 (1 << 29) | 3112 (1 << 29) |
3120 (1 << 28) | 3113 (1 << 28) |
3121 (3 << 26) | 3114 (3 << 26) |
3122 (3 << 19) | 3115 (3 << 19) |
3123 (1 << 17); 3116 (1 << 17);
3124 3117
3125#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3118#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3126 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3119 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3127#endif 3120#endif
3128 3121
3129 cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl); 3122 cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
3130 3123
3131#ifdef __ARMEB__ 3124#ifdef __ARMEB__
3132 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 3125 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3133#endif 3126#endif
3134 3127
3135#ifndef ARM_HAS_VBAR 3128#ifndef ARM_HAS_VBAR
3136 if (vector_page == ARM_VECTORS_HIGH) 3129 if (vector_page == ARM_VECTORS_HIGH)
3137 cpuctrl |= CPU_CONTROL_VECRELOC; 3130 cpuctrl |= CPU_CONTROL_VECRELOC;
3138#endif 3131#endif
3139 3132
3140 auxctrl = 0; 3133 auxctrl = 0;
3141 auxctrl_wax = ~0; 3134 auxctrl_wax = ~0;
3142 /* 3135 /*
3143 * This options enables the workaround for the 364296 ARM1136 3136 * This options enables the workaround for the 364296 ARM1136
3144 * r0pX errata (possible cache data corruption with 3137 * r0pX errata (possible cache data corruption with
3145 * hit-under-miss enabled). It sets the undocumented bit 31 in 3138 * hit-under-miss enabled). It sets the undocumented bit 31 in
3146 * the auxiliary control register and the FI bit in the control 3139 * the auxiliary control register and the FI bit in the control
3147 * register, thus disabling hit-under-miss without putting the 3140 * register, thus disabling hit-under-miss without putting the
3148 * processor into full low interrupt latency mode. ARM11MPCore 3141 * processor into full low interrupt latency mode. ARM11MPCore
3149 * is not affected. 3142 * is not affected.
3150 */ 3143 */
3151 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */ 3144 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
3152 cpuctrl |= CPU_CONTROL_FI_ENABLE; 3145 cpuctrl |= CPU_CONTROL_FI_ENABLE;
3153 auxctrl = ARM1136_AUXCTL_PFI; 3146 auxctrl = ARM1136_AUXCTL_PFI;
3154 auxctrl_wax = ~ARM1136_AUXCTL_PFI; 3147 auxctrl_wax = ~ARM1136_AUXCTL_PFI;
3155 } 3148 }
3156 3149
3157 /* 3150 /*
3158 * Enable an errata workaround 3151 * Enable an errata workaround
3159 */ 3152 */
3160 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */ 3153 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
3161 auxctrl = ARM1176_AUXCTL_PHD; 3154 auxctrl = ARM1176_AUXCTL_PHD;
3162 auxctrl_wax = ~ARM1176_AUXCTL_PHD; 3155 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
3163 } 3156 }
3164 3157
3165 /* Clear out the cache */ 3158 /* Clear out the cache */
3166 cpu_idcache_wbinv_all(); 3159 cpu_idcache_wbinv_all();
3167 3160
3168 /* Now really make sure they are clean. */ 3161 /* Now really make sure they are clean. */
3169 __asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz)); 3162 __asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
3170 3163
3171 /* Allow detection code to find the VFP if it's fitted. */ 3164 /* Allow detection code to find the VFP if it's fitted. */
3172 __asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff)); 3165 __asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
3173 3166
3174 /* Set the control register */ 3167 /* Set the control register */
3175 curcpu()->ci_ctrl = cpuctrl; 3168 curcpu()->ci_ctrl = cpuctrl;
3176 cpu_control(~cpuctrl_wax, cpuctrl); 3169 cpu_control(~cpuctrl_wax, cpuctrl);
3177 3170
3178 __asm volatile ("mrc p15, 0, %0, c1, c0, 1\n\t" 3171 __asm volatile ("mrc p15, 0, %0, c1, c0, 1\n\t"
3179 "and %1, %0, %2\n\t" 3172 "and %1, %0, %2\n\t"
3180 "orr %1, %1, %3\n\t" 3173 "orr %1, %1, %3\n\t"
3181 "teq %0, %1\n\t" 3174 "teq %0, %1\n\t"
3182 "mcrne p15, 0, %1, c1, c0, 1\n\t" 3175 "mcrne p15, 0, %1, c1, c0, 1\n\t"
3183 : "=r"(tmp), "=r"(tmp2) : 3176 : "=r"(tmp), "=r"(tmp2) :
3184 "r"(auxctrl_wax), "r"(auxctrl)); 3177 "r"(auxctrl_wax), "r"(auxctrl));
3185 3178
3186 /* And again. */ 3179 /* And again. */
3187 cpu_idcache_wbinv_all(); 3180 cpu_idcache_wbinv_all();
3188} 3181}
3189#endif /* CPU_ARM1136 || CPU_ARM1176 */ 3182#endif /* CPU_ARM1136 || CPU_ARM1176 */
3190 3183
3191#ifdef CPU_SA110 3184#ifdef CPU_SA110
3192struct cpu_option sa110_options[] = { 3185struct cpu_option sa110_options[] = {
3193#ifdef COMPAT_12 3186#ifdef COMPAT_12
3194 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3187 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3195 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE }, 3188 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3196#endif /* COMPAT_12 */ 3189#endif /* COMPAT_12 */
3197 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3190 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3198 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3191 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3199 { "sa110.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3192 { "sa110.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3200 { "sa110.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 3193 { "sa110.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
3201 { "sa110.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 3194 { "sa110.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
3202 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3195 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
3203 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 3196 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
3204 { "sa110.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3197 { "sa110.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
3205 { NULL, IGN, IGN, 0 } 3198 { NULL, IGN, IGN, 0 }
3206}; 3199};
3207 3200
3208void 3201void
3209sa110_setup(char *args) 3202sa110_setup(char *args)
3210{ 3203{
3211 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3204 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3212 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3205 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3213 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3206 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3214 | CPU_CONTROL_WBUF_ENABLE; 3207 | CPU_CONTROL_WBUF_ENABLE;
3215#if 0 3208#if 0
3216 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3209 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3217 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3210 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3218 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3211 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3219 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 3212 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3220 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 3213 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3221 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE 3214 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3222 | CPU_CONTROL_CPCLK; 3215 | CPU_CONTROL_CPCLK;
3223#endif 3216#endif
3224 3217
3225#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3218#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3226 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3219 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3227#endif 3220#endif
3228 3221
3229 cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl); 3222 cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
3230 3223
3231#ifdef __ARMEB__ 3224#ifdef __ARMEB__
3232 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 3225 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3233#endif 3226#endif
3234 3227
3235#ifndef ARM_HAS_VBAR 3228#ifndef ARM_HAS_VBAR
3236 if (vector_page == ARM_VECTORS_HIGH) 3229 if (vector_page == ARM_VECTORS_HIGH)
3237 cpuctrl |= CPU_CONTROL_VECRELOC; 3230 cpuctrl |= CPU_CONTROL_VECRELOC;
3238#endif 3231#endif
3239 3232
3240 /* Clear out the cache */ 3233 /* Clear out the cache */
3241 cpu_idcache_wbinv_all(); 3234 cpu_idcache_wbinv_all();
3242 3235
3243 /* Set the control register */ 3236 /* Set the control register */
3244 curcpu()->ci_ctrl = cpuctrl; 3237 curcpu()->ci_ctrl = cpuctrl;
3245#if 0 3238#if 0
3246 cpu_control(cpuctrlmask, cpuctrl); 3239 cpu_control(cpuctrlmask, cpuctrl);
3247#endif 3240#endif
3248 cpu_control(0xffffffff, cpuctrl); 3241 cpu_control(0xffffffff, cpuctrl);
3249 3242
3250 /* 3243 /*
3251 * enable clockswitching, note that this doesn't read or write to r0, 3244 * enable clockswitching, note that this doesn't read or write to r0,
3252 * r0 is just to make it valid asm 3245 * r0 is just to make it valid asm
3253 */ 3246 */
3254 __asm volatile ("mcr p15, 0, r0, c15, c1, 2"); 3247 __asm volatile ("mcr p15, 0, r0, c15, c1, 2");
3255} 3248}
3256#endif /* CPU_SA110 */ 3249#endif /* CPU_SA110 */
3257 3250
3258#if defined(CPU_SA1100) || defined(CPU_SA1110) 3251#if defined(CPU_SA1100) || defined(CPU_SA1110)
3259struct cpu_option sa11x0_options[] = { 3252struct cpu_option sa11x0_options[] = {
3260#ifdef COMPAT_12 3253#ifdef COMPAT_12
3261 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3254 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3262 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE }, 3255 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3263#endif /* COMPAT_12 */ 3256#endif /* COMPAT_12 */
3264 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3257 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3265 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3258 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3266 { "sa11x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3259 { "sa11x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3267 { "sa11x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 3260 { "sa11x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
3268 { "sa11x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 3261 { "sa11x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
3269 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3262 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
3270 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 3263 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
3271 { "sa11x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3264 { "sa11x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
3272 { NULL, IGN, IGN, 0 } 3265 { NULL, IGN, IGN, 0 }
3273}; 3266};
3274 3267
3275void 3268void
3276sa11x0_setup(char *args) 3269sa11x0_setup(char *args)
3277{ 3270{
3278 3271
3279 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3272 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3280 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3273 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3281 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3274 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3282 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE; 3275 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
3283#if 0 3276#if 0
3284 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3277 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3285 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3278 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3286 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3279 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3287 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 3280 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3288 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 3281 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3289 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE 3282 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3290 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC; 3283 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3291#endif 3284#endif
3292 3285
3293#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3286#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3294 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3287 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3295#endif 3288#endif
3296 3289
3297 cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl); 3290 cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
3298 3291
3299#ifdef __ARMEB__ 3292#ifdef __ARMEB__
3300 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 3293 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3301#endif 3294#endif
3302 3295
3303#ifndef ARM_HAS_VBAR 3296#ifndef ARM_HAS_VBAR
3304 if (vector_page == ARM_VECTORS_HIGH) 3297 if (vector_page == ARM_VECTORS_HIGH)
3305 cpuctrl |= CPU_CONTROL_VECRELOC; 3298 cpuctrl |= CPU_CONTROL_VECRELOC;
3306#endif 3299#endif
3307 3300
3308 /* Clear out the cache */ 3301 /* Clear out the cache */
3309 cpu_idcache_wbinv_all(); 3302 cpu_idcache_wbinv_all();
3310 3303
3311 /* Set the control register */ 3304 /* Set the control register */
3312 curcpu()->ci_ctrl = cpuctrl; 3305 curcpu()->ci_ctrl = cpuctrl;
3313 cpu_control(0xffffffff, cpuctrl); 3306 cpu_control(0xffffffff, cpuctrl);
3314} 3307}
3315#endif /* CPU_SA1100 || CPU_SA1110 */ 3308#endif /* CPU_SA1100 || CPU_SA1110 */
3316 3309
3317#if defined(CPU_FA526) 3310#if defined(CPU_FA526)
3318struct cpu_option fa526_options[] = { 3311struct cpu_option fa526_options[] = {
3319#ifdef COMPAT_12 3312#ifdef COMPAT_12
3320 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3313 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3321 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE }, 3314 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3322#endif /* COMPAT_12 */ 3315#endif /* COMPAT_12 */
3323 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3316 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3324 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3317 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3325 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3318 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
3326 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 3319 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
3327 { NULL, IGN, IGN, 0 } 3320 { NULL, IGN, IGN, 0 }
3328}; 3321};
3329 3322
3330void 3323void
3331fa526_setup(char *args) 3324fa526_setup(char *args)
3332{ 3325{
3333 3326
3334 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3327 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3335 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3328 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3336 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3329 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3337 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE; 3330 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
3338#if 0 3331#if 0
3339 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3332 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3340 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3333 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3341 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3334 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3342 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 3335 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3343 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 3336 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3344 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE 3337 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3345 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC; 3338 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3346#endif 3339#endif
3347 3340
3348#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3341#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3349 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3342 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3350#endif 3343#endif
3351 3344
3352 cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl); 3345 cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
3353 3346
3354#ifdef __ARMEB__ 3347#ifdef __ARMEB__
3355 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 3348 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3356#endif 3349#endif
3357 3350
3358#ifndef ARM_HAS_VBAR 3351#ifndef ARM_HAS_VBAR
3359 if (vector_page == ARM_VECTORS_HIGH) 3352 if (vector_page == ARM_VECTORS_HIGH)
3360 cpuctrl |= CPU_CONTROL_VECRELOC; 3353 cpuctrl |= CPU_CONTROL_VECRELOC;
3361#endif 3354#endif
3362 3355
3363 /* Clear out the cache */ 3356 /* Clear out the cache */
3364 cpu_idcache_wbinv_all(); 3357 cpu_idcache_wbinv_all();
3365 3358
3366 /* Set the control register */ 3359 /* Set the control register */
3367 curcpu()->ci_ctrl = cpuctrl; 3360 curcpu()->ci_ctrl = cpuctrl;
3368 cpu_control(0xffffffff, cpuctrl); 3361 cpu_control(0xffffffff, cpuctrl);
3369} 3362}
3370#endif /* CPU_FA526 */ 3363#endif /* CPU_FA526 */
3371 3364
3372#if defined(CPU_IXP12X0) 3365#if defined(CPU_IXP12X0)
3373struct cpu_option ixp12x0_options[] = { 3366struct cpu_option ixp12x0_options[] = {
3374 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3367 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3375 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3368 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3376 { "ixp12x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3369 { "ixp12x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3377 { "ixp12x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 3370 { "ixp12x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
3378 { "ixp12x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 3371 { "ixp12x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
3379 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3372 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
3380 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 3373 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
3381 { "ixp12x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3374 { "ixp12x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
3382 { NULL, IGN, IGN, 0 } 3375 { NULL, IGN, IGN, 0 }
3383}; 3376};
3384 3377
3385void 3378void
3386ixp12x0_setup(char *args) 3379ixp12x0_setup(char *args)
3387{ 3380{
3388 3381
3389 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE 3382 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
3390 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE 3383 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
3391 | CPU_CONTROL_IC_ENABLE; 3384 | CPU_CONTROL_IC_ENABLE;
3392 3385
3393 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE 3386 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
3394 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE 3387 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
3395 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE 3388 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
3396 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE 3389 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
3397 | CPU_CONTROL_VECRELOC; 3390 | CPU_CONTROL_VECRELOC;
3398 3391
3399#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3392#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3400 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3393 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3401#endif 3394#endif
3402 3395
3403 cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl); 3396 cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
3404 3397
3405#ifdef __ARMEB__ 3398#ifdef __ARMEB__
3406 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 3399 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3407#endif 3400#endif
3408 3401
3409#ifndef ARM_HAS_VBAR 3402#ifndef ARM_HAS_VBAR
3410 if (vector_page == ARM_VECTORS_HIGH) 3403 if (vector_page == ARM_VECTORS_HIGH)
3411 cpuctrl |= CPU_CONTROL_VECRELOC; 3404 cpuctrl |= CPU_CONTROL_VECRELOC;
3412#endif 3405#endif
3413 3406
3414 /* Clear out the cache */ 3407 /* Clear out the cache */
3415 cpu_idcache_wbinv_all(); 3408 cpu_idcache_wbinv_all();
3416 3409
3417 /* Set the control register */ 3410 /* Set the control register */
3418 curcpu()->ci_ctrl = cpuctrl; 3411 curcpu()->ci_ctrl = cpuctrl;
3419 /* cpu_control(0xffffffff, cpuctrl); */ 3412 /* cpu_control(0xffffffff, cpuctrl); */
3420 cpu_control(cpuctrlmask, cpuctrl); 3413 cpu_control(cpuctrlmask, cpuctrl);
3421} 3414}
3422#endif /* CPU_IXP12X0 */ 3415#endif /* CPU_IXP12X0 */
3423 3416
3424#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 3417#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
3425 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || defined(CPU_CORTEX) 3418 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || defined(CPU_CORTEX)
3426struct cpu_option xscale_options[] = { 3419struct cpu_option xscale_options[] = {
3427#ifdef COMPAT_12 3420#ifdef COMPAT_12
3428 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 3421 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
3429 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3422 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3430#endif /* COMPAT_12 */ 3423#endif /* COMPAT_12 */
3431 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 3424 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
3432 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3425 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3433 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3426 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3434 { "xscale.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 3427 { "xscale.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
3435 { "xscale.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3428 { "xscale.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3436 { "xscale.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 3429 { "xscale.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
3437 { "xscale.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 3430 { "xscale.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
3438 { NULL, IGN, IGN, 0 } 3431 { NULL, IGN, IGN, 0 }
3439}; 3432};
3440 3433
3441void 3434void
3442xscale_setup(char *args) 3435xscale_setup(char *args)
3443{ 3436{
3444 uint32_t auxctl; 3437 uint32_t auxctl;
3445 3438
3446 /* 3439 /*
3447 * The XScale Write Buffer is always enabled. Our option 3440 * The XScale Write Buffer is always enabled. Our option
3448 * is to enable/disable coalescing. Note that bits 6:3 3441 * is to enable/disable coalescing. Note that bits 6:3
3449 * must always be enabled. 3442 * must always be enabled.
3450 */ 3443 */
3451 3444
3452 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3445 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3453 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3446 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3454 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3447 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3455 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE 3448 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
3456 | CPU_CONTROL_BPRD_ENABLE; 3449 | CPU_CONTROL_BPRD_ENABLE;
3457#if 0 3450#if 0
3458 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3451 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3459 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3452 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3460 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3453 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3461 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 3454 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3462 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 3455 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3463 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE 3456 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3464 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC; 3457 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3465#endif 3458#endif
3466 3459
3467#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3460#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3468 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3461 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3469#endif 3462#endif
3470 3463
3471 cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl); 3464 cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
3472 3465
3473#ifdef __ARMEB__ 3466#ifdef __ARMEB__
3474 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 3467 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3475#endif 3468#endif
3476 3469
3477#ifndef ARM_HAS_VBAR 3470#ifndef ARM_HAS_VBAR
3478 if (vector_page == ARM_VECTORS_HIGH) 3471 if (vector_page == ARM_VECTORS_HIGH)
3479 cpuctrl |= CPU_CONTROL_VECRELOC; 3472 cpuctrl |= CPU_CONTROL_VECRELOC;
3480#endif 3473#endif
3481 3474
3482 /* Clear out the cache */ 3475 /* Clear out the cache */
3483 cpu_idcache_wbinv_all(); 3476 cpu_idcache_wbinv_all();
3484 3477
3485 /* 3478 /*
3486 * Set the control register. Note that bits 6:3 must always 3479 * Set the control register. Note that bits 6:3 must always
3487 * be set to 1. 3480 * be set to 1.
3488 */ 3481 */
3489 curcpu()->ci_ctrl = cpuctrl; 3482 curcpu()->ci_ctrl = cpuctrl;
3490#if 0 3483#if 0
3491 cpu_control(cpuctrlmask, cpuctrl); 3484 cpu_control(cpuctrlmask, cpuctrl);
3492#endif 3485#endif
3493 cpu_control(0xffffffff, cpuctrl); 3486 cpu_control(0xffffffff, cpuctrl);
3494 3487
3495 /* Make sure write coalescing is turned on */ 3488 /* Make sure write coalescing is turned on */
3496 __asm volatile("mrc p15, 0, %0, c1, c0, 1" 3489 __asm volatile("mrc p15, 0, %0, c1, c0, 1"
3497 : "=r" (auxctl)); 3490 : "=r" (auxctl));
3498#ifdef XSCALE_NO_COALESCE_WRITES 3491#ifdef XSCALE_NO_COALESCE_WRITES
3499 auxctl |= XSCALE_AUXCTL_K; 3492 auxctl |= XSCALE_AUXCTL_K;
3500#else 3493#else
3501 auxctl &= ~XSCALE_AUXCTL_K; 3494 auxctl &= ~XSCALE_AUXCTL_K;
3502#endif 3495#endif
3503 __asm volatile("mcr p15, 0, %0, c1, c0, 1" 3496 __asm volatile("mcr p15, 0, %0, c1, c0, 1"
3504 : : "r" (auxctl)); 3497 : : "r" (auxctl));
3505} 3498}
3506#endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */ 3499#endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */
3507 3500
3508#if defined(CPU_SHEEVA) 3501#if defined(CPU_SHEEVA)
3509struct cpu_option sheeva_options[] = { 3502struct cpu_option sheeva_options[] = {
3510 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3503 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3511 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3504 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3512 { "sheeva.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3505 { "sheeva.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3513 { "sheeva.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 3506 { "sheeva.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
3514 { "sheeva.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 3507 { "sheeva.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
3515 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3508 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
3516 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 3509 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
3517 { "sheeva.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3510 { "sheeva.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
3518 { NULL, IGN, IGN, 0 } 3511 { NULL, IGN, IGN, 0 }
3519}; 3512};
3520 3513
3521void 3514void
3522sheeva_setup(char *args) 3515sheeva_setup(char *args)
3523{ 3516{
3524 uint32_t sheeva_ext; 3517 uint32_t sheeva_ext;
3525 3518
3526 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 3519 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
3527 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3520 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3528 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE; 3521 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
3529#if 0 3522#if 0
3530 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 3523 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
3531 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3524 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3532 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 3525 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3533 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 3526 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3534 | CPU_CONTROL_BPRD_ENABLE 3527 | CPU_CONTROL_BPRD_ENABLE
3535 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK; 3528 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
3536#endif 3529#endif
3537 3530
3538#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3531#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3539 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3532 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3540#endif 3533#endif
3541 3534
3542 cpuctrl = parse_cpu_options(args, sheeva_options, cpuctrl); 3535 cpuctrl = parse_cpu_options(args, sheeva_options, cpuctrl);
3543 3536
3544 /* Enable DCache Streaming Switch and Write Allocate */ 3537 /* Enable DCache Streaming Switch and Write Allocate */
3545 __asm volatile("mrc p15, 1, %0, c15, c1, 0" 3538 __asm volatile("mrc p15, 1, %0, c15, c1, 0"
3546 : "=r" (sheeva_ext)); 3539 : "=r" (sheeva_ext));
3547 3540
3548 sheeva_ext |= FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN; 3541 sheeva_ext |= FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN;
3549 3542
3550 __asm volatile("mcr p15, 1, %0, c15, c1, 0" 3543 __asm volatile("mcr p15, 1, %0, c15, c1, 0"
3551 :: "r" (sheeva_ext)); 3544 :: "r" (sheeva_ext));
3552 3545
3553 /* 3546 /*
3554 * Sheeva has L2 Cache. Enable/Disable it here. 3547 * Sheeva has L2 Cache. Enable/Disable it here.
3555 * Really not support yet... 3548 * Really not support yet...
3556 */ 3549 */
3557 3550
3558#ifdef __ARMEB__ 3551#ifdef __ARMEB__
3559 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 3552 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3560#endif 3553#endif
3561 3554
3562#ifndef ARM_HAS_VBAR 3555#ifndef ARM_HAS_VBAR
3563 if (vector_page == ARM_VECTORS_HIGH) 3556 if (vector_page == ARM_VECTORS_HIGH)
3564 cpuctrl |= CPU_CONTROL_VECRELOC; 3557 cpuctrl |= CPU_CONTROL_VECRELOC;
3565#endif 3558#endif
3566 3559
3567 /* Clear out the cache */ 3560 /* Clear out the cache */
3568 cpu_idcache_wbinv_all(); 3561 cpu_idcache_wbinv_all();
3569 3562
3570 /* Now really make sure they are clean. */ 3563 /* Now really make sure they are clean. */
3571 __asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : ); 3564 __asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
3572 3565
3573 /* Set the control register */ 3566 /* Set the control register */
3574 curcpu()->ci_ctrl = cpuctrl; 3567 curcpu()->ci_ctrl = cpuctrl;
3575 cpu_control(0xffffffff, cpuctrl); 3568 cpu_control(0xffffffff, cpuctrl);
3576 3569
3577 /* And again. */ 3570 /* And again. */
3578 cpu_idcache_wbinv_all(); 3571 cpu_idcache_wbinv_all();
3579} 3572}
3580#endif /* CPU_SHEEVA */ 3573#endif /* CPU_SHEEVA */