| @@ -1,577 +1,579 @@ | | | @@ -1,577 +1,579 @@ |
1 | /* $NetBSD: cpu.c,v 1.20.2.1 2019/10/23 19:14:19 martin Exp $ */ | | 1 | /* $NetBSD: cpu.c,v 1.20.2.2 2019/12/29 09:40:59 martin Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org> | | 4 | * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org> |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * Redistribution and use in source and binary forms, with or without | | 7 | * Redistribution and use in source and binary forms, with or without |
8 | * modification, are permitted provided that the following conditions | | 8 | * modification, are permitted provided that the following conditions |
9 | * are met: | | 9 | * are met: |
10 | * 1. Redistributions of source code must retain the above copyright | | 10 | * 1. Redistributions of source code must retain the above copyright |
11 | * notice, this list of conditions and the following disclaimer. | | 11 | * notice, this list of conditions and the following disclaimer. |
12 | * 2. Redistributions in binary form must reproduce the above copyright | | 12 | * 2. Redistributions in binary form must reproduce the above copyright |
13 | * notice, this list of conditions and the following disclaimer in the | | 13 | * notice, this list of conditions and the following disclaimer in the |
14 | * documentation and/or other materials provided with the distribution. | | 14 | * documentation and/or other materials provided with the distribution. |
15 | * | | 15 | * |
16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | | 16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | | 17 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
18 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | | 18 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
19 | * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, | | 19 | * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, |
20 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | | 20 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
21 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | | 21 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
22 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 22 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
23 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | | 23 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
24 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING | | 24 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING |
25 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 25 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
26 | * POSSIBILITY OF SUCH DAMAGE. | | 26 | * POSSIBILITY OF SUCH DAMAGE. |
27 | */ | | 27 | */ |
28 | | | 28 | |
29 | #include <sys/cdefs.h> | | 29 | #include <sys/cdefs.h> |
30 | __KERNEL_RCSID(1, "$NetBSD: cpu.c,v 1.20.2.1 2019/10/23 19:14:19 martin Exp $"); | | 30 | __KERNEL_RCSID(1, "$NetBSD: cpu.c,v 1.20.2.2 2019/12/29 09:40:59 martin Exp $"); |
31 | | | 31 | |
32 | #include "locators.h" | | 32 | #include "locators.h" |
33 | #include "opt_arm_debug.h" | | 33 | #include "opt_arm_debug.h" |
34 | #include "opt_fdt.h" | | 34 | #include "opt_fdt.h" |
35 | #include "opt_multiprocessor.h" | | 35 | #include "opt_multiprocessor.h" |
36 | | | 36 | |
37 | #include <sys/param.h> | | 37 | #include <sys/param.h> |
38 | #include <sys/systm.h> | | 38 | #include <sys/systm.h> |
39 | #include <sys/atomic.h> | | 39 | #include <sys/atomic.h> |
40 | #include <sys/device.h> | | 40 | #include <sys/device.h> |
41 | #include <sys/cpu.h> | | 41 | #include <sys/cpu.h> |
42 | #include <sys/kmem.h> | | 42 | #include <sys/kmem.h> |
43 | #include <sys/reboot.h> | | 43 | #include <sys/reboot.h> |
44 | #include <sys/sysctl.h> | | 44 | #include <sys/sysctl.h> |
45 | | | 45 | |
46 | #include <aarch64/armreg.h> | | 46 | #include <aarch64/armreg.h> |
47 | #include <aarch64/cpu.h> | | 47 | #include <aarch64/cpu.h> |
48 | #include <aarch64/cpufunc.h> | | 48 | #include <aarch64/cpufunc.h> |
49 | #include <aarch64/machdep.h> | | 49 | #include <aarch64/machdep.h> |
50 | | | 50 | |
51 | #ifdef FDT | | 51 | #ifdef FDT |
52 | #include <arm/fdt/arm_fdtvar.h> | | 52 | #include <arm/fdt/arm_fdtvar.h> |
53 | #endif | | 53 | #endif |
54 | | | 54 | |
55 | #ifdef VERBOSE_INIT_ARM | | 55 | #ifdef VERBOSE_INIT_ARM |
56 | #define VPRINTF(...) printf(__VA_ARGS__) | | 56 | #define VPRINTF(...) printf(__VA_ARGS__) |
57 | #else | | 57 | #else |
58 | #define VPRINTF(...) __nothing | | 58 | #define VPRINTF(...) __nothing |
59 | #endif | | 59 | #endif |
60 | | | 60 | |
61 | void cpu_attach(device_t, cpuid_t); | | 61 | void cpu_attach(device_t, cpuid_t); |
62 | static void identify_aarch64_model(uint32_t, char *, size_t); | | 62 | static void identify_aarch64_model(uint32_t, char *, size_t); |
63 | static void cpu_identify(device_t self, struct cpu_info *); | | 63 | static void cpu_identify(device_t self, struct cpu_info *); |
64 | static void cpu_identify1(device_t self, struct cpu_info *); | | 64 | static void cpu_identify1(device_t self, struct cpu_info *); |
65 | static void cpu_identify2(device_t self, struct cpu_info *); | | 65 | static void cpu_identify2(device_t self, struct cpu_info *); |
66 | static void cpu_setup_id(struct cpu_info *); | | 66 | static void cpu_setup_id(struct cpu_info *); |
67 | static void cpu_setup_sysctl(device_t, struct cpu_info *); | | 67 | static void cpu_setup_sysctl(device_t, struct cpu_info *); |
68 | | | 68 | |
69 | #ifdef MULTIPROCESSOR | | 69 | #ifdef MULTIPROCESSOR |
70 | uint64_t cpu_mpidr[MAXCPUS]; | | 70 | uint64_t cpu_mpidr[MAXCPUS]; |
71 | | | 71 | |
72 | volatile u_int aarch64_cpu_mbox[howmany(MAXCPUS, sizeof(u_int))] __cacheline_aligned = { 0 }; | | 72 | volatile u_int aarch64_cpu_mbox[howmany(MAXCPUS, sizeof(u_int))] __cacheline_aligned = { 0 }; |
73 | volatile u_int aarch64_cpu_hatched[howmany(MAXCPUS, sizeof(u_int))] __cacheline_aligned = { 0 }; | | 73 | volatile u_int aarch64_cpu_hatched[howmany(MAXCPUS, sizeof(u_int))] __cacheline_aligned = { 0 }; |
74 | u_int arm_cpu_max = 1; | | 74 | u_int arm_cpu_max = 1; |
75 | | | 75 | |
76 | static kmutex_t cpu_hatch_lock; | | 76 | static kmutex_t cpu_hatch_lock; |
77 | #endif /* MULTIPROCESSOR */ | | 77 | #endif /* MULTIPROCESSOR */ |
78 | | | 78 | |
79 | #ifdef MULTIPROCESSOR | | 79 | #ifdef MULTIPROCESSOR |
80 | #define NCPUINFO MAXCPUS | | 80 | #define NCPUINFO MAXCPUS |
81 | #else | | 81 | #else |
82 | #define NCPUINFO 1 | | 82 | #define NCPUINFO 1 |
83 | #endif /* MULTIPROCESSOR */ | | 83 | #endif /* MULTIPROCESSOR */ |
84 | | | 84 | |
85 | /* | | 85 | /* |
86 | * Our exported CPU info; | | 86 | * Our exported CPU info; |
87 | * these will be refered from secondary cpus in the middle of hatching. | | 87 | * these will be refered from secondary cpus in the middle of hatching. |
88 | */ | | 88 | */ |
89 | struct cpu_info cpu_info_store[NCPUINFO] = { | | 89 | struct cpu_info cpu_info_store[NCPUINFO] = { |
90 | [0] = { | | 90 | [0] = { |
91 | .ci_cpl = IPL_HIGH, | | 91 | .ci_cpl = IPL_HIGH, |
92 | .ci_curlwp = &lwp0 | | 92 | .ci_curlwp = &lwp0 |
93 | } | | 93 | } |
94 | }; | | 94 | }; |
95 | | | 95 | |
96 | struct cpu_info *cpu_info[NCPUINFO] __read_mostly = { | | 96 | struct cpu_info *cpu_info[NCPUINFO] __read_mostly = { |
97 | [0] = &cpu_info_store[0] | | 97 | [0] = &cpu_info_store[0] |
98 | }; | | 98 | }; |
99 | | | 99 | |
100 | void | | 100 | void |
101 | cpu_attach(device_t dv, cpuid_t id) | | 101 | cpu_attach(device_t dv, cpuid_t id) |
102 | { | | 102 | { |
103 | struct cpu_info *ci; | | 103 | struct cpu_info *ci; |
104 | const int unit = device_unit(dv); | | 104 | const int unit = device_unit(dv); |
105 | uint64_t mpidr; | | 105 | uint64_t mpidr; |
106 | | | 106 | |
107 | if (unit == 0) { | | 107 | if (unit == 0) { |
108 | ci = curcpu(); | | 108 | ci = curcpu(); |
109 | ci->ci_cpuid = id; | | 109 | ci->ci_cpuid = id; |
110 | cpu_setup_id(ci); | | 110 | cpu_setup_id(ci); |
111 | } else { | | 111 | } else { |
112 | #ifdef MULTIPROCESSOR | | 112 | #ifdef MULTIPROCESSOR |
113 | if ((boothowto & RB_MD1) != 0) { | | 113 | if ((boothowto & RB_MD1) != 0) { |
114 | aprint_naive("\n"); | | 114 | aprint_naive("\n"); |
115 | aprint_normal(": multiprocessor boot disabled\n"); | | 115 | aprint_normal(": multiprocessor boot disabled\n"); |
116 | return; | | 116 | return; |
117 | } | | 117 | } |
118 | | | 118 | |
119 | KASSERT(unit < MAXCPUS); | | 119 | KASSERT(unit < MAXCPUS); |
120 | ci = &cpu_info_store[unit]; | | 120 | ci = &cpu_info_store[unit]; |
121 | | | 121 | |
122 | ci->ci_cpl = IPL_HIGH; | | 122 | ci->ci_cpl = IPL_HIGH; |
123 | ci->ci_cpuid = id; | | 123 | ci->ci_cpuid = id; |
124 | // XXX big.LITTLE | | 124 | // XXX big.LITTLE |
125 | ci->ci_data.cpu_cc_freq = cpu_info_store[0].ci_data.cpu_cc_freq; | | 125 | ci->ci_data.cpu_cc_freq = cpu_info_store[0].ci_data.cpu_cc_freq; |
126 | /* ci_id is stored by own cpus when hatching */ | | 126 | /* ci_id is stored by own cpus when hatching */ |
127 | | | 127 | |
128 | cpu_info[ncpu] = ci; | | 128 | cpu_info[ncpu] = ci; |
129 | if (cpu_hatched_p(unit) == 0) { | | 129 | if (cpu_hatched_p(unit) == 0) { |
130 | ci->ci_dev = dv; | | 130 | ci->ci_dev = dv; |
131 | dv->dv_private = ci; | | 131 | dv->dv_private = ci; |
132 | ci->ci_index = -1; | | 132 | ci->ci_index = -1; |
133 | | | 133 | |
134 | aprint_naive(": disabled\n"); | | 134 | aprint_naive(": disabled\n"); |
135 | aprint_normal(": disabled (unresponsive)\n"); | | 135 | aprint_normal(": disabled (unresponsive)\n"); |
136 | return; | | 136 | return; |
137 | } | | 137 | } |
138 | #else /* MULTIPROCESSOR */ | | 138 | #else /* MULTIPROCESSOR */ |
139 | aprint_naive(": disabled\n"); | | 139 | aprint_naive(": disabled\n"); |
140 | aprint_normal(": disabled (uniprocessor kernel)\n"); | | 140 | aprint_normal(": disabled (uniprocessor kernel)\n"); |
141 | return; | | 141 | return; |
142 | #endif /* MULTIPROCESSOR */ | | 142 | #endif /* MULTIPROCESSOR */ |
143 | } | | 143 | } |
144 | | | 144 | |
145 | mpidr = ci->ci_id.ac_mpidr; | | 145 | mpidr = ci->ci_id.ac_mpidr; |
146 | if (mpidr & MPIDR_MT) { | | 146 | if (mpidr & MPIDR_MT) { |
147 | ci->ci_smt_id = __SHIFTOUT(mpidr, MPIDR_AFF0); | | 147 | ci->ci_smt_id = __SHIFTOUT(mpidr, MPIDR_AFF0); |
148 | ci->ci_core_id = __SHIFTOUT(mpidr, MPIDR_AFF1); | | 148 | ci->ci_core_id = __SHIFTOUT(mpidr, MPIDR_AFF1); |
149 | ci->ci_package_id = __SHIFTOUT(mpidr, MPIDR_AFF2); | | 149 | ci->ci_package_id = __SHIFTOUT(mpidr, MPIDR_AFF2); |
150 | } else { | | 150 | } else { |
151 | ci->ci_core_id = __SHIFTOUT(mpidr, MPIDR_AFF0); | | 151 | ci->ci_core_id = __SHIFTOUT(mpidr, MPIDR_AFF0); |
152 | ci->ci_package_id = __SHIFTOUT(mpidr, MPIDR_AFF1); | | 152 | ci->ci_package_id = __SHIFTOUT(mpidr, MPIDR_AFF1); |
153 | } | | 153 | } |
154 | | | 154 | |
155 | ci->ci_dev = dv; | | 155 | ci->ci_dev = dv; |
156 | dv->dv_private = ci; | | 156 | dv->dv_private = ci; |
157 | | | 157 | |
158 | cpu_identify(ci->ci_dev, ci); | | 158 | cpu_identify(ci->ci_dev, ci); |
159 | #ifdef MULTIPROCESSOR | | 159 | #ifdef MULTIPROCESSOR |
160 | if (unit != 0) { | | 160 | if (unit != 0) { |
161 | mi_cpu_attach(ci); | | 161 | mi_cpu_attach(ci); |
162 | return; | | 162 | return; |
163 | } | | 163 | } |
164 | #endif /* MULTIPROCESSOR */ | | 164 | #endif /* MULTIPROCESSOR */ |
165 | | | 165 | |
166 | set_cpufuncs(); | | 166 | set_cpufuncs(); |
167 | fpu_attach(ci); | | 167 | fpu_attach(ci); |
168 | | | 168 | |
169 | cpu_identify1(dv, ci); | | 169 | cpu_identify1(dv, ci); |
170 | aarch64_getcacheinfo(); | | 170 | aarch64_getcacheinfo(); |
171 | aarch64_printcacheinfo(dv); | | 171 | aarch64_printcacheinfo(dv); |
172 | cpu_identify2(dv, ci); | | 172 | cpu_identify2(dv, ci); |
173 | | | 173 | |
174 | cpu_setup_sysctl(dv, ci); | | 174 | cpu_setup_sysctl(dv, ci); |
175 | } | | 175 | } |
176 | | | 176 | |
177 | struct cpuidtab { | | 177 | struct cpuidtab { |
178 | uint32_t cpu_partnum; | | 178 | uint32_t cpu_partnum; |
179 | const char *cpu_name; | | 179 | const char *cpu_name; |
180 | const char *cpu_class; | | 180 | const char *cpu_class; |
181 | const char *cpu_architecture; | | 181 | const char *cpu_architecture; |
182 | }; | | 182 | }; |
183 | | | 183 | |
184 | #define CPU_PARTMASK (CPU_ID_IMPLEMENTOR_MASK | CPU_ID_PARTNO_MASK) | | 184 | #define CPU_PARTMASK (CPU_ID_IMPLEMENTOR_MASK | CPU_ID_PARTNO_MASK) |
185 | | | 185 | |
186 | const struct cpuidtab cpuids[] = { | | 186 | const struct cpuidtab cpuids[] = { |
187 | { CPU_ID_CORTEXA35R0 & CPU_PARTMASK, "Cortex-A35", "Cortex", "V8-A" }, | | 187 | { CPU_ID_CORTEXA35R0 & CPU_PARTMASK, "Cortex-A35", "Cortex", "V8-A" }, |
188 | { CPU_ID_CORTEXA53R0 & CPU_PARTMASK, "Cortex-A53", "Cortex", "V8-A" }, | | 188 | { CPU_ID_CORTEXA53R0 & CPU_PARTMASK, "Cortex-A53", "Cortex", "V8-A" }, |
189 | { CPU_ID_CORTEXA57R0 & CPU_PARTMASK, "Cortex-A57", "Cortex", "V8-A" }, | | 189 | { CPU_ID_CORTEXA57R0 & CPU_PARTMASK, "Cortex-A57", "Cortex", "V8-A" }, |
190 | { CPU_ID_CORTEXA55R1 & CPU_PARTMASK, "Cortex-A55", "Cortex", "V8.2-A+" }, | | 190 | { CPU_ID_CORTEXA55R1 & CPU_PARTMASK, "Cortex-A55", "Cortex", "V8.2-A+" }, |
191 | { CPU_ID_CORTEXA65R0 & CPU_PARTMASK, "Cortex-A65", "Cortex", "V8.2-A+" }, | | 191 | { CPU_ID_CORTEXA65R0 & CPU_PARTMASK, "Cortex-A65", "Cortex", "V8.2-A+" }, |
192 | { CPU_ID_CORTEXA72R0 & CPU_PARTMASK, "Cortex-A72", "Cortex", "V8-A" }, | | 192 | { CPU_ID_CORTEXA72R0 & CPU_PARTMASK, "Cortex-A72", "Cortex", "V8-A" }, |
193 | { CPU_ID_CORTEXA73R0 & CPU_PARTMASK, "Cortex-A73", "Cortex", "V8-A" }, | | 193 | { CPU_ID_CORTEXA73R0 & CPU_PARTMASK, "Cortex-A73", "Cortex", "V8-A" }, |
194 | { CPU_ID_CORTEXA75R2 & CPU_PARTMASK, "Cortex-A75", "Cortex", "V8.2-A+" }, | | 194 | { CPU_ID_CORTEXA75R2 & CPU_PARTMASK, "Cortex-A75", "Cortex", "V8.2-A+" }, |
195 | { CPU_ID_CORTEXA76R3 & CPU_PARTMASK, "Cortex-A76", "Cortex", "V8.2-A+" }, | | 195 | { CPU_ID_CORTEXA76R3 & CPU_PARTMASK, "Cortex-A76", "Cortex", "V8.2-A+" }, |
196 | { CPU_ID_CORTEXA76AER1 & CPU_PARTMASK, "Cortex-A76AE", "Cortex", "V8.2-A+" }, | | 196 | { CPU_ID_CORTEXA76AER1 & CPU_PARTMASK, "Cortex-A76AE", "Cortex", "V8.2-A+" }, |
197 | { CPU_ID_CORTEXA77R0 & CPU_PARTMASK, "Cortex-A77", "Cortex", "V8.2-A+" }, | | 197 | { CPU_ID_CORTEXA77R0 & CPU_PARTMASK, "Cortex-A77", "Cortex", "V8.2-A+" }, |
198 | { CPU_ID_EMAG8180 & CPU_PARTMASK, "Ampere eMAG", "Skylark", "V8-A" }, | | 198 | { CPU_ID_EMAG8180 & CPU_PARTMASK, "Ampere eMAG", "Skylark", "V8-A" }, |
| | | 199 | { CPU_ID_NEOVERSEE1R1 & CPU_PARTMASK, "Neoverse E1", "Neoverse", "V8.2-A+" }, |
| | | 200 | { CPU_ID_NEOVERSEN1R3 & CPU_PARTMASK, "Neoverse N1", "Neoverse", "V8.2-A+" }, |
199 | { CPU_ID_THUNDERXRX, "Cavium ThunderX", "Cavium", "V8-A" }, | | 201 | { CPU_ID_THUNDERXRX, "Cavium ThunderX", "Cavium", "V8-A" }, |
200 | { CPU_ID_THUNDERX81XXRX, "Cavium ThunderX CN81XX", "Cavium", "V8-A" }, | | 202 | { CPU_ID_THUNDERX81XXRX, "Cavium ThunderX CN81XX", "Cavium", "V8-A" }, |
201 | { CPU_ID_THUNDERX83XXRX, "Cavium ThunderX CN83XX", "Cavium", "V8-A" }, | | 203 | { CPU_ID_THUNDERX83XXRX, "Cavium ThunderX CN83XX", "Cavium", "V8-A" }, |
202 | { CPU_ID_THUNDERX2RX, "Cavium ThunderX2", "Cavium", "V8.1-A" }, | | 204 | { CPU_ID_THUNDERX2RX, "Cavium ThunderX2", "Cavium", "V8.1-A" }, |
203 | }; | | 205 | }; |
204 | | | 206 | |
205 | static void | | 207 | static void |
206 | identify_aarch64_model(uint32_t cpuid, char *buf, size_t len) | | 208 | identify_aarch64_model(uint32_t cpuid, char *buf, size_t len) |
207 | { | | 209 | { |
208 | int i; | | 210 | int i; |
209 | uint32_t cpupart, variant, revision; | | 211 | uint32_t cpupart, variant, revision; |
210 | | | 212 | |
211 | cpupart = cpuid & CPU_PARTMASK; | | 213 | cpupart = cpuid & CPU_PARTMASK; |
212 | variant = __SHIFTOUT(cpuid, CPU_ID_VARIANT_MASK); | | 214 | variant = __SHIFTOUT(cpuid, CPU_ID_VARIANT_MASK); |
213 | revision = __SHIFTOUT(cpuid, CPU_ID_REVISION_MASK); | | 215 | revision = __SHIFTOUT(cpuid, CPU_ID_REVISION_MASK); |
214 | | | 216 | |
215 | for (i = 0; i < __arraycount(cpuids); i++) { | | 217 | for (i = 0; i < __arraycount(cpuids); i++) { |
216 | if (cpupart == cpuids[i].cpu_partnum) { | | 218 | if (cpupart == cpuids[i].cpu_partnum) { |
217 | snprintf(buf, len, "%s r%dp%d (%s %s core)", | | 219 | snprintf(buf, len, "%s r%dp%d (%s %s core)", |
218 | cpuids[i].cpu_name, variant, revision, | | 220 | cpuids[i].cpu_name, variant, revision, |
219 | cpuids[i].cpu_class, | | 221 | cpuids[i].cpu_class, |
220 | cpuids[i].cpu_architecture); | | 222 | cpuids[i].cpu_architecture); |
221 | return; | | 223 | return; |
222 | } | | 224 | } |
223 | } | | 225 | } |
224 | | | 226 | |
225 | snprintf(buf, len, "unknown CPU (ID = 0x%08x)", cpuid); | | 227 | snprintf(buf, len, "unknown CPU (ID = 0x%08x)", cpuid); |
226 | } | | 228 | } |
227 | | | 229 | |
228 | static void | | 230 | static void |
229 | cpu_identify(device_t self, struct cpu_info *ci) | | 231 | cpu_identify(device_t self, struct cpu_info *ci) |
230 | { | | 232 | { |
231 | char model[128]; | | 233 | char model[128]; |
232 | | | 234 | |
233 | identify_aarch64_model(ci->ci_id.ac_midr, model, sizeof(model)); | | 235 | identify_aarch64_model(ci->ci_id.ac_midr, model, sizeof(model)); |
234 | if (ci->ci_index == 0) | | 236 | if (ci->ci_index == 0) |
235 | cpu_setmodel("%s", model); | | 237 | cpu_setmodel("%s", model); |
236 | | | 238 | |
237 | aprint_naive("\n"); | | 239 | aprint_naive("\n"); |
238 | aprint_normal(": %s\n", model); | | 240 | aprint_normal(": %s\n", model); |
239 | aprint_normal_dev(ci->ci_dev, "package %lu, core %lu, smt %lu\n", | | 241 | aprint_normal_dev(ci->ci_dev, "package %lu, core %lu, smt %lu\n", |
240 | ci->ci_package_id, ci->ci_core_id, ci->ci_smt_id); | | 242 | ci->ci_package_id, ci->ci_core_id, ci->ci_smt_id); |
241 | } | | 243 | } |
242 | | | 244 | |
243 | static void | | 245 | static void |
244 | cpu_identify1(device_t self, struct cpu_info *ci) | | 246 | cpu_identify1(device_t self, struct cpu_info *ci) |
245 | { | | 247 | { |
246 | uint32_t ctr, sctlr; /* for cache */ | | 248 | uint32_t ctr, sctlr; /* for cache */ |
247 | | | 249 | |
248 | /* SCTLR - System Control Register */ | | 250 | /* SCTLR - System Control Register */ |
249 | sctlr = reg_sctlr_el1_read(); | | 251 | sctlr = reg_sctlr_el1_read(); |
250 | if (sctlr & SCTLR_I) | | 252 | if (sctlr & SCTLR_I) |
251 | aprint_normal_dev(self, "IC enabled"); | | 253 | aprint_normal_dev(self, "IC enabled"); |
252 | else | | 254 | else |
253 | aprint_normal_dev(self, "IC disabled"); | | 255 | aprint_normal_dev(self, "IC disabled"); |
254 | | | 256 | |
255 | if (sctlr & SCTLR_C) | | 257 | if (sctlr & SCTLR_C) |
256 | aprint_normal(", DC enabled"); | | 258 | aprint_normal(", DC enabled"); |
257 | else | | 259 | else |
258 | aprint_normal(", DC disabled"); | | 260 | aprint_normal(", DC disabled"); |
259 | | | 261 | |
260 | if (sctlr & SCTLR_A) | | 262 | if (sctlr & SCTLR_A) |
261 | aprint_normal(", Alignment check enabled\n"); | | 263 | aprint_normal(", Alignment check enabled\n"); |
262 | else { | | 264 | else { |
263 | switch (sctlr & (SCTLR_SA | SCTLR_SA0)) { | | 265 | switch (sctlr & (SCTLR_SA | SCTLR_SA0)) { |
264 | case SCTLR_SA | SCTLR_SA0: | | 266 | case SCTLR_SA | SCTLR_SA0: |
265 | aprint_normal( | | 267 | aprint_normal( |
266 | ", EL0/EL1 stack Alignment check enabled\n"); | | 268 | ", EL0/EL1 stack Alignment check enabled\n"); |
267 | break; | | 269 | break; |
268 | case SCTLR_SA: | | 270 | case SCTLR_SA: |
269 | aprint_normal(", EL1 stack Alignment check enabled\n"); | | 271 | aprint_normal(", EL1 stack Alignment check enabled\n"); |
270 | break; | | 272 | break; |
271 | case SCTLR_SA0: | | 273 | case SCTLR_SA0: |
272 | aprint_normal(", EL0 stack Alignment check enabled\n"); | | 274 | aprint_normal(", EL0 stack Alignment check enabled\n"); |
273 | break; | | 275 | break; |
274 | case 0: | | 276 | case 0: |
275 | aprint_normal(", Alignment check disabled\n"); | | 277 | aprint_normal(", Alignment check disabled\n"); |
276 | break; | | 278 | break; |
277 | } | | 279 | } |
278 | } | | 280 | } |
279 | | | 281 | |
280 | /* | | 282 | /* |
281 | * CTR - Cache Type Register | | 283 | * CTR - Cache Type Register |
282 | */ | | 284 | */ |
283 | ctr = reg_ctr_el0_read(); | | 285 | ctr = reg_ctr_el0_read(); |
284 | aprint_normal_dev(self, "Cache Writeback Granule %" PRIu64 "B," | | 286 | aprint_normal_dev(self, "Cache Writeback Granule %" PRIu64 "B," |
285 | " Exclusives Reservation Granule %" PRIu64 "B\n", | | 287 | " Exclusives Reservation Granule %" PRIu64 "B\n", |
286 | __SHIFTOUT(ctr, CTR_EL0_CWG_LINE) * 4, | | 288 | __SHIFTOUT(ctr, CTR_EL0_CWG_LINE) * 4, |
287 | __SHIFTOUT(ctr, CTR_EL0_ERG_LINE) * 4); | | 289 | __SHIFTOUT(ctr, CTR_EL0_ERG_LINE) * 4); |
288 | | | 290 | |
289 | aprint_normal_dev(self, "Dcache line %ld, Icache line %ld\n", | | 291 | aprint_normal_dev(self, "Dcache line %ld, Icache line %ld\n", |
290 | sizeof(int) << __SHIFTOUT(ctr, CTR_EL0_DMIN_LINE), | | 292 | sizeof(int) << __SHIFTOUT(ctr, CTR_EL0_DMIN_LINE), |
291 | sizeof(int) << __SHIFTOUT(ctr, CTR_EL0_IMIN_LINE)); | | 293 | sizeof(int) << __SHIFTOUT(ctr, CTR_EL0_IMIN_LINE)); |
292 | } | | 294 | } |
293 | | | 295 | |
294 | | | 296 | |
295 | /* | | 297 | /* |
296 | * identify vfp, etc. | | 298 | * identify vfp, etc. |
297 | */ | | 299 | */ |
298 | static void | | 300 | static void |
299 | cpu_identify2(device_t self, struct cpu_info *ci) | | 301 | cpu_identify2(device_t self, struct cpu_info *ci) |
300 | { | | 302 | { |
301 | struct aarch64_sysctl_cpu_id *id = &ci->ci_id; | | 303 | struct aarch64_sysctl_cpu_id *id = &ci->ci_id; |
302 | uint64_t dfr0; | | 304 | uint64_t dfr0; |
303 | | | 305 | |
304 | if (!CPU_IS_PRIMARY(ci)) { | | 306 | if (!CPU_IS_PRIMARY(ci)) { |
305 | cpu_setup_id(ci); | | 307 | cpu_setup_id(ci); |
306 | cpu_setup_sysctl(self, ci); | | 308 | cpu_setup_sysctl(self, ci); |
307 | } | | 309 | } |
308 | | | 310 | |
309 | dfr0 = reg_id_aa64dfr0_el1_read(); | | 311 | dfr0 = reg_id_aa64dfr0_el1_read(); |
310 | | | 312 | |
311 | aprint_normal_dev(self, "revID=0x%" PRIx64, id->ac_revidr); | | 313 | aprint_normal_dev(self, "revID=0x%" PRIx64, id->ac_revidr); |
312 | | | 314 | |
313 | /* ID_AA64DFR0_EL1 */ | | 315 | /* ID_AA64DFR0_EL1 */ |
314 | switch (__SHIFTOUT(dfr0, ID_AA64DFR0_EL1_PMUVER)) { | | 316 | switch (__SHIFTOUT(dfr0, ID_AA64DFR0_EL1_PMUVER)) { |
315 | case ID_AA64DFR0_EL1_PMUVER_V3: | | 317 | case ID_AA64DFR0_EL1_PMUVER_V3: |
316 | aprint_normal(", PMCv3"); | | 318 | aprint_normal(", PMCv3"); |
317 | break; | | 319 | break; |
318 | case ID_AA64DFR0_EL1_PMUVER_NOV3: | | 320 | case ID_AA64DFR0_EL1_PMUVER_NOV3: |
319 | aprint_normal(", PMC"); | | 321 | aprint_normal(", PMC"); |
320 | break; | | 322 | break; |
321 | } | | 323 | } |
322 | | | 324 | |
323 | /* ID_AA64MMFR0_EL1 */ | | 325 | /* ID_AA64MMFR0_EL1 */ |
324 | switch (__SHIFTOUT(id->ac_aa64mmfr0, ID_AA64MMFR0_EL1_TGRAN4)) { | | 326 | switch (__SHIFTOUT(id->ac_aa64mmfr0, ID_AA64MMFR0_EL1_TGRAN4)) { |
325 | case ID_AA64MMFR0_EL1_TGRAN4_4KB: | | 327 | case ID_AA64MMFR0_EL1_TGRAN4_4KB: |
326 | aprint_normal(", 4k table"); | | 328 | aprint_normal(", 4k table"); |
327 | break; | | 329 | break; |
328 | } | | 330 | } |
329 | switch (__SHIFTOUT(id->ac_aa64mmfr0, ID_AA64MMFR0_EL1_TGRAN16)) { | | 331 | switch (__SHIFTOUT(id->ac_aa64mmfr0, ID_AA64MMFR0_EL1_TGRAN16)) { |
330 | case ID_AA64MMFR0_EL1_TGRAN16_16KB: | | 332 | case ID_AA64MMFR0_EL1_TGRAN16_16KB: |
331 | aprint_normal(", 16k table"); | | 333 | aprint_normal(", 16k table"); |
332 | break; | | 334 | break; |
333 | } | | 335 | } |
334 | switch (__SHIFTOUT(id->ac_aa64mmfr0, ID_AA64MMFR0_EL1_TGRAN64)) { | | 336 | switch (__SHIFTOUT(id->ac_aa64mmfr0, ID_AA64MMFR0_EL1_TGRAN64)) { |
335 | case ID_AA64MMFR0_EL1_TGRAN64_64KB: | | 337 | case ID_AA64MMFR0_EL1_TGRAN64_64KB: |
336 | aprint_normal(", 64k table"); | | 338 | aprint_normal(", 64k table"); |
337 | break; | | 339 | break; |
338 | } | | 340 | } |
339 | | | 341 | |
340 | switch (__SHIFTOUT(id->ac_aa64mmfr0, ID_AA64MMFR0_EL1_ASIDBITS)) { | | 342 | switch (__SHIFTOUT(id->ac_aa64mmfr0, ID_AA64MMFR0_EL1_ASIDBITS)) { |
341 | case ID_AA64MMFR0_EL1_ASIDBITS_8BIT: | | 343 | case ID_AA64MMFR0_EL1_ASIDBITS_8BIT: |
342 | aprint_normal(", 8bit ASID"); | | 344 | aprint_normal(", 8bit ASID"); |
343 | break; | | 345 | break; |
344 | case ID_AA64MMFR0_EL1_ASIDBITS_16BIT: | | 346 | case ID_AA64MMFR0_EL1_ASIDBITS_16BIT: |
345 | aprint_normal(", 16bit ASID"); | | 347 | aprint_normal(", 16bit ASID"); |
346 | break; | | 348 | break; |
347 | } | | 349 | } |
348 | aprint_normal("\n"); | | 350 | aprint_normal("\n"); |
349 | | | 351 | |
350 | | | 352 | |
351 | | | 353 | |
352 | aprint_normal_dev(self, "auxID=0x%" PRIx64, ci->ci_id.ac_aa64isar0); | | 354 | aprint_normal_dev(self, "auxID=0x%" PRIx64, ci->ci_id.ac_aa64isar0); |
353 | | | 355 | |
354 | /* PFR0 */ | | 356 | /* PFR0 */ |
355 | switch (__SHIFTOUT(id->ac_aa64pfr0, ID_AA64PFR0_EL1_GIC)) { | | 357 | switch (__SHIFTOUT(id->ac_aa64pfr0, ID_AA64PFR0_EL1_GIC)) { |
356 | case ID_AA64PFR0_EL1_GIC_CPUIF_EN: | | 358 | case ID_AA64PFR0_EL1_GIC_CPUIF_EN: |
357 | aprint_normal(", GICv3"); | | 359 | aprint_normal(", GICv3"); |
358 | break; | | 360 | break; |
359 | } | | 361 | } |
360 | switch (__SHIFTOUT(id->ac_aa64pfr0, ID_AA64PFR0_EL1_FP)) { | | 362 | switch (__SHIFTOUT(id->ac_aa64pfr0, ID_AA64PFR0_EL1_FP)) { |
361 | case ID_AA64PFR0_EL1_FP_IMPL: | | 363 | case ID_AA64PFR0_EL1_FP_IMPL: |
362 | aprint_normal(", FP"); | | 364 | aprint_normal(", FP"); |
363 | break; | | 365 | break; |
364 | } | | 366 | } |
365 | | | 367 | |
366 | /* ISAR0 */ | | 368 | /* ISAR0 */ |
367 | switch (__SHIFTOUT(id->ac_aa64isar0, ID_AA64ISAR0_EL1_CRC32)) { | | 369 | switch (__SHIFTOUT(id->ac_aa64isar0, ID_AA64ISAR0_EL1_CRC32)) { |
368 | case ID_AA64ISAR0_EL1_CRC32_CRC32X: | | 370 | case ID_AA64ISAR0_EL1_CRC32_CRC32X: |
369 | aprint_normal(", CRC32"); | | 371 | aprint_normal(", CRC32"); |
370 | break; | | 372 | break; |
371 | } | | 373 | } |
372 | switch (__SHIFTOUT(id->ac_aa64isar0, ID_AA64ISAR0_EL1_SHA1)) { | | 374 | switch (__SHIFTOUT(id->ac_aa64isar0, ID_AA64ISAR0_EL1_SHA1)) { |
373 | case ID_AA64ISAR0_EL1_SHA1_SHA1CPMHSU: | | 375 | case ID_AA64ISAR0_EL1_SHA1_SHA1CPMHSU: |
374 | aprint_normal(", SHA1"); | | 376 | aprint_normal(", SHA1"); |
375 | break; | | 377 | break; |
376 | } | | 378 | } |
377 | switch (__SHIFTOUT(id->ac_aa64isar0, ID_AA64ISAR0_EL1_SHA2)) { | | 379 | switch (__SHIFTOUT(id->ac_aa64isar0, ID_AA64ISAR0_EL1_SHA2)) { |
378 | case ID_AA64ISAR0_EL1_SHA2_SHA256HSU: | | 380 | case ID_AA64ISAR0_EL1_SHA2_SHA256HSU: |
379 | aprint_normal(", SHA256"); | | 381 | aprint_normal(", SHA256"); |
380 | break; | | 382 | break; |
381 | } | | 383 | } |
382 | switch (__SHIFTOUT(id->ac_aa64isar0, ID_AA64ISAR0_EL1_AES)) { | | 384 | switch (__SHIFTOUT(id->ac_aa64isar0, ID_AA64ISAR0_EL1_AES)) { |
383 | case ID_AA64ISAR0_EL1_AES_AES: | | 385 | case ID_AA64ISAR0_EL1_AES_AES: |
384 | aprint_normal(", AES"); | | 386 | aprint_normal(", AES"); |
385 | break; | | 387 | break; |
386 | case ID_AA64ISAR0_EL1_AES_PMUL: | | 388 | case ID_AA64ISAR0_EL1_AES_PMUL: |
387 | aprint_normal(", AES+PMULL"); | | 389 | aprint_normal(", AES+PMULL"); |
388 | break; | | 390 | break; |
389 | } | | 391 | } |
390 | | | 392 | |
391 | | | 393 | |
392 | /* PFR0:AdvSIMD */ | | 394 | /* PFR0:AdvSIMD */ |
393 | switch (__SHIFTOUT(id->ac_aa64pfr0, ID_AA64PFR0_EL1_ADVSIMD)) { | | 395 | switch (__SHIFTOUT(id->ac_aa64pfr0, ID_AA64PFR0_EL1_ADVSIMD)) { |
394 | case ID_AA64PFR0_EL1_ADV_SIMD_IMPL: | | 396 | case ID_AA64PFR0_EL1_ADV_SIMD_IMPL: |
395 | aprint_normal(", NEON"); | | 397 | aprint_normal(", NEON"); |
396 | break; | | 398 | break; |
397 | } | | 399 | } |
398 | | | 400 | |
399 | /* MVFR0/MVFR1 */ | | 401 | /* MVFR0/MVFR1 */ |
400 | switch (__SHIFTOUT(id->ac_mvfr0, MVFR0_FPROUND)) { | | 402 | switch (__SHIFTOUT(id->ac_mvfr0, MVFR0_FPROUND)) { |
401 | case MVFR0_FPROUND_ALL: | | 403 | case MVFR0_FPROUND_ALL: |
402 | aprint_normal(", rounding"); | | 404 | aprint_normal(", rounding"); |
403 | break; | | 405 | break; |
404 | } | | 406 | } |
405 | switch (__SHIFTOUT(id->ac_mvfr0, MVFR0_FPTRAP)) { | | 407 | switch (__SHIFTOUT(id->ac_mvfr0, MVFR0_FPTRAP)) { |
406 | case MVFR0_FPTRAP_TRAP: | | 408 | case MVFR0_FPTRAP_TRAP: |
407 | aprint_normal(", exceptions"); | | 409 | aprint_normal(", exceptions"); |
408 | break; | | 410 | break; |
409 | } | | 411 | } |
410 | switch (__SHIFTOUT(id->ac_mvfr1, MVFR1_FPDNAN)) { | | 412 | switch (__SHIFTOUT(id->ac_mvfr1, MVFR1_FPDNAN)) { |
411 | case MVFR1_FPDNAN_NAN: | | 413 | case MVFR1_FPDNAN_NAN: |
412 | aprint_normal(", NaN propagation"); | | 414 | aprint_normal(", NaN propagation"); |
413 | break; | | 415 | break; |
414 | } | | 416 | } |
415 | switch (__SHIFTOUT(id->ac_mvfr1, MVFR1_FPFTZ)) { | | 417 | switch (__SHIFTOUT(id->ac_mvfr1, MVFR1_FPFTZ)) { |
416 | case MVFR1_FPFTZ_DENORMAL: | | 418 | case MVFR1_FPFTZ_DENORMAL: |
417 | aprint_normal(", denormals"); | | 419 | aprint_normal(", denormals"); |
418 | break; | | 420 | break; |
419 | } | | 421 | } |
420 | switch (__SHIFTOUT(id->ac_mvfr0, MVFR0_SIMDREG)) { | | 422 | switch (__SHIFTOUT(id->ac_mvfr0, MVFR0_SIMDREG)) { |
421 | case MVFR0_SIMDREG_16x64: | | 423 | case MVFR0_SIMDREG_16x64: |
422 | aprint_normal(", 16x64bitRegs"); | | 424 | aprint_normal(", 16x64bitRegs"); |
423 | break; | | 425 | break; |
424 | case MVFR0_SIMDREG_32x64: | | 426 | case MVFR0_SIMDREG_32x64: |
425 | aprint_normal(", 32x64bitRegs"); | | 427 | aprint_normal(", 32x64bitRegs"); |
426 | break; | | 428 | break; |
427 | } | | 429 | } |
428 | switch (__SHIFTOUT(id->ac_mvfr1, MVFR1_SIMDFMAC)) { | | 430 | switch (__SHIFTOUT(id->ac_mvfr1, MVFR1_SIMDFMAC)) { |
429 | case MVFR1_SIMDFMAC_FMAC: | | 431 | case MVFR1_SIMDFMAC_FMAC: |
430 | aprint_normal(", Fused Multiply-Add"); | | 432 | aprint_normal(", Fused Multiply-Add"); |
431 | break; | | 433 | break; |
432 | } | | 434 | } |
433 | | | 435 | |
434 | aprint_normal("\n"); | | 436 | aprint_normal("\n"); |
435 | } | | 437 | } |
436 | | | 438 | |
437 | /* | | 439 | /* |
438 | * Fill in this CPUs id data. Must be called from hatched cpus. | | 440 | * Fill in this CPUs id data. Must be called from hatched cpus. |
439 | */ | | 441 | */ |
440 | static void | | 442 | static void |
441 | cpu_setup_id(struct cpu_info *ci) | | 443 | cpu_setup_id(struct cpu_info *ci) |
442 | { | | 444 | { |
443 | struct aarch64_sysctl_cpu_id *id = &ci->ci_id; | | 445 | struct aarch64_sysctl_cpu_id *id = &ci->ci_id; |
444 | | | 446 | |
445 | memset(id, 0, sizeof *id); | | 447 | memset(id, 0, sizeof *id); |
446 | | | 448 | |
447 | id->ac_midr = reg_midr_el1_read(); | | 449 | id->ac_midr = reg_midr_el1_read(); |
448 | id->ac_revidr = reg_revidr_el1_read(); | | 450 | id->ac_revidr = reg_revidr_el1_read(); |
449 | id->ac_mpidr = reg_mpidr_el1_read(); | | 451 | id->ac_mpidr = reg_mpidr_el1_read(); |
450 | | | 452 | |
451 | id->ac_aa64dfr0 = reg_id_aa64dfr0_el1_read(); | | 453 | id->ac_aa64dfr0 = reg_id_aa64dfr0_el1_read(); |
452 | id->ac_aa64dfr1 = reg_id_aa64dfr1_el1_read(); | | 454 | id->ac_aa64dfr1 = reg_id_aa64dfr1_el1_read(); |
453 | | | 455 | |
454 | id->ac_aa64isar0 = reg_id_aa64isar0_el1_read(); | | 456 | id->ac_aa64isar0 = reg_id_aa64isar0_el1_read(); |
455 | id->ac_aa64isar1 = reg_id_aa64isar1_el1_read(); | | 457 | id->ac_aa64isar1 = reg_id_aa64isar1_el1_read(); |
456 | | | 458 | |
457 | id->ac_aa64mmfr0 = reg_id_aa64mmfr0_el1_read(); | | 459 | id->ac_aa64mmfr0 = reg_id_aa64mmfr0_el1_read(); |
458 | id->ac_aa64mmfr1 = reg_id_aa64mmfr1_el1_read(); | | 460 | id->ac_aa64mmfr1 = reg_id_aa64mmfr1_el1_read(); |
459 | /* Only in ARMv8.2. */ | | 461 | /* Only in ARMv8.2. */ |
460 | id->ac_aa64mmfr2 = 0 /* reg_id_aa64mmfr2_el1_read() */; | | 462 | id->ac_aa64mmfr2 = 0 /* reg_id_aa64mmfr2_el1_read() */; |
461 | | | 463 | |
462 | id->ac_mvfr0 = reg_mvfr0_el1_read(); | | 464 | id->ac_mvfr0 = reg_mvfr0_el1_read(); |
463 | id->ac_mvfr1 = reg_mvfr1_el1_read(); | | 465 | id->ac_mvfr1 = reg_mvfr1_el1_read(); |
464 | id->ac_mvfr2 = reg_mvfr2_el1_read(); | | 466 | id->ac_mvfr2 = reg_mvfr2_el1_read(); |
465 | | | 467 | |
466 | /* Only in ARMv8.2. */ | | 468 | /* Only in ARMv8.2. */ |
467 | id->ac_aa64zfr0 = 0 /* reg_id_aa64zfr0_el1_read() */; | | 469 | id->ac_aa64zfr0 = 0 /* reg_id_aa64zfr0_el1_read() */; |
468 | | | 470 | |
469 | id->ac_aa64pfr0 = reg_id_aa64pfr0_el1_read(); | | 471 | id->ac_aa64pfr0 = reg_id_aa64pfr0_el1_read(); |
470 | id->ac_aa64pfr1 = reg_id_aa64pfr1_el1_read(); | | 472 | id->ac_aa64pfr1 = reg_id_aa64pfr1_el1_read(); |
471 | } | | 473 | } |
472 | | | 474 | |
473 | /* | | 475 | /* |
474 | * setup the per-cpu sysctl tree. | | 476 | * setup the per-cpu sysctl tree. |
475 | */ | | 477 | */ |
476 | static void | | 478 | static void |
477 | cpu_setup_sysctl(device_t dv, struct cpu_info *ci) | | 479 | cpu_setup_sysctl(device_t dv, struct cpu_info *ci) |
478 | { | | 480 | { |
479 | const struct sysctlnode *cpunode = NULL; | | 481 | const struct sysctlnode *cpunode = NULL; |
480 | | | 482 | |
481 | sysctl_createv(NULL, 0, NULL, &cpunode, | | 483 | sysctl_createv(NULL, 0, NULL, &cpunode, |
482 | CTLFLAG_PERMANENT, | | 484 | CTLFLAG_PERMANENT, |
483 | CTLTYPE_NODE, device_xname(dv), NULL, | | 485 | CTLTYPE_NODE, device_xname(dv), NULL, |
484 | NULL, 0, NULL, 0, | | 486 | NULL, 0, NULL, 0, |
485 | CTL_MACHDEP, | | 487 | CTL_MACHDEP, |
486 | CTL_CREATE, CTL_EOL); | | 488 | CTL_CREATE, CTL_EOL); |
487 | | | 489 | |
488 | if (cpunode == NULL) | | 490 | if (cpunode == NULL) |
489 | return; | | 491 | return; |
490 | | | 492 | |
491 | sysctl_createv(NULL, 0, &cpunode, NULL, | | 493 | sysctl_createv(NULL, 0, &cpunode, NULL, |
492 | CTLFLAG_PERMANENT, | | 494 | CTLFLAG_PERMANENT, |
493 | CTLTYPE_STRUCT, "cpu_id", NULL, | | 495 | CTLTYPE_STRUCT, "cpu_id", NULL, |
494 | NULL, 0, &ci->ci_id, sizeof(ci->ci_id), | | 496 | NULL, 0, &ci->ci_id, sizeof(ci->ci_id), |
495 | CTL_CREATE, CTL_EOL); | | 497 | CTL_CREATE, CTL_EOL); |
496 | } | | 498 | } |
497 | | | 499 | |
498 | #ifdef MULTIPROCESSOR | | 500 | #ifdef MULTIPROCESSOR |
499 | void | | 501 | void |
500 | cpu_boot_secondary_processors(void) | | 502 | cpu_boot_secondary_processors(void) |
501 | { | | 503 | { |
502 | u_int n, bit; | | 504 | u_int n, bit; |
503 | | | 505 | |
504 | if ((boothowto & RB_MD1) != 0) | | 506 | if ((boothowto & RB_MD1) != 0) |
505 | return; | | 507 | return; |
506 | | | 508 | |
507 | mutex_init(&cpu_hatch_lock, MUTEX_DEFAULT, IPL_NONE); | | 509 | mutex_init(&cpu_hatch_lock, MUTEX_DEFAULT, IPL_NONE); |
508 | | | 510 | |
509 | VPRINTF("%s: starting secondary processors\n", __func__); | | 511 | VPRINTF("%s: starting secondary processors\n", __func__); |
510 | | | 512 | |
511 | /* send mbox to have secondary processors do cpu_hatch() */ | | 513 | /* send mbox to have secondary processors do cpu_hatch() */ |
512 | for (n = 0; n < __arraycount(aarch64_cpu_mbox); n++) | | 514 | for (n = 0; n < __arraycount(aarch64_cpu_mbox); n++) |
513 | atomic_or_uint(&aarch64_cpu_mbox[n], aarch64_cpu_hatched[n]); | | 515 | atomic_or_uint(&aarch64_cpu_mbox[n], aarch64_cpu_hatched[n]); |
514 | __asm __volatile ("sev; sev; sev"); | | 516 | __asm __volatile ("sev; sev; sev"); |
515 | | | 517 | |
516 | /* wait all cpus have done cpu_hatch() */ | | 518 | /* wait all cpus have done cpu_hatch() */ |
517 | for (n = 0; n < __arraycount(aarch64_cpu_mbox); n++) { | | 519 | for (n = 0; n < __arraycount(aarch64_cpu_mbox); n++) { |
518 | while (membar_consumer(), aarch64_cpu_mbox[n] & aarch64_cpu_hatched[n]) { | | 520 | while (membar_consumer(), aarch64_cpu_mbox[n] & aarch64_cpu_hatched[n]) { |
519 | __asm __volatile ("wfe"); | | 521 | __asm __volatile ("wfe"); |
520 | } | | 522 | } |
521 | /* Add processors to kcpuset */ | | 523 | /* Add processors to kcpuset */ |
522 | for (bit = 0; bit < 32; bit++) { | | 524 | for (bit = 0; bit < 32; bit++) { |
523 | if (aarch64_cpu_hatched[n] & __BIT(bit)) | | 525 | if (aarch64_cpu_hatched[n] & __BIT(bit)) |
524 | kcpuset_set(kcpuset_attached, n * 32 + bit); | | 526 | kcpuset_set(kcpuset_attached, n * 32 + bit); |
525 | } | | 527 | } |
526 | } | | 528 | } |
527 | | | 529 | |
528 | VPRINTF("%s: secondary processors hatched\n", __func__); | | 530 | VPRINTF("%s: secondary processors hatched\n", __func__); |
529 | } | | 531 | } |
530 | | | 532 | |
531 | void | | 533 | void |
532 | cpu_hatch(struct cpu_info *ci) | | 534 | cpu_hatch(struct cpu_info *ci) |
533 | { | | 535 | { |
534 | KASSERT(curcpu() == ci); | | 536 | KASSERT(curcpu() == ci); |
535 | | | 537 | |
536 | mutex_enter(&cpu_hatch_lock); | | 538 | mutex_enter(&cpu_hatch_lock); |
537 | | | 539 | |
538 | set_cpufuncs(); | | 540 | set_cpufuncs(); |
539 | fpu_attach(ci); | | 541 | fpu_attach(ci); |
540 | | | 542 | |
541 | cpu_identify1(ci->ci_dev, ci); | | 543 | cpu_identify1(ci->ci_dev, ci); |
542 | aarch64_getcacheinfo(); | | 544 | aarch64_getcacheinfo(); |
543 | aarch64_printcacheinfo(ci->ci_dev); | | 545 | aarch64_printcacheinfo(ci->ci_dev); |
544 | cpu_identify2(ci->ci_dev, ci); | | 546 | cpu_identify2(ci->ci_dev, ci); |
545 | | | 547 | |
546 | mutex_exit(&cpu_hatch_lock); | | 548 | mutex_exit(&cpu_hatch_lock); |
547 | | | 549 | |
548 | intr_cpu_init(ci); | | 550 | intr_cpu_init(ci); |
549 | | | 551 | |
550 | #ifdef FDT | | 552 | #ifdef FDT |
551 | arm_fdt_cpu_hatch(ci); | | 553 | arm_fdt_cpu_hatch(ci); |
552 | #endif | | 554 | #endif |
553 | #ifdef MD_CPU_HATCH | | 555 | #ifdef MD_CPU_HATCH |
554 | MD_CPU_HATCH(ci); /* for non-fdt arch? */ | | 556 | MD_CPU_HATCH(ci); /* for non-fdt arch? */ |
555 | #endif | | 557 | #endif |
556 | | | 558 | |
557 | /* | | 559 | /* |
558 | * clear my bit of aarch64_cpu_mbox to tell cpu_boot_secondary_processors(). | | 560 | * clear my bit of aarch64_cpu_mbox to tell cpu_boot_secondary_processors(). |
559 | * there are cpu0,1,2,3, and if cpu2 is unresponsive, | | 561 | * there are cpu0,1,2,3, and if cpu2 is unresponsive, |
560 | * ci_index are each cpu0=0, cpu1=1, cpu2=undef, cpu3=2. | | 562 | * ci_index are each cpu0=0, cpu1=1, cpu2=undef, cpu3=2. |
561 | * therefore we have to use device_unit instead of ci_index for mbox. | | 563 | * therefore we have to use device_unit instead of ci_index for mbox. |
562 | */ | | 564 | */ |
563 | const u_int off = device_unit(ci->ci_dev) / 32; | | 565 | const u_int off = device_unit(ci->ci_dev) / 32; |
564 | const u_int bit = device_unit(ci->ci_dev) % 32; | | 566 | const u_int bit = device_unit(ci->ci_dev) % 32; |
565 | atomic_and_uint(&aarch64_cpu_mbox[off], ~__BIT(bit)); | | 567 | atomic_and_uint(&aarch64_cpu_mbox[off], ~__BIT(bit)); |
566 | __asm __volatile ("sev; sev; sev"); | | 568 | __asm __volatile ("sev; sev; sev"); |
567 | } | | 569 | } |
568 | | | 570 | |
569 | bool | | 571 | bool |
570 | cpu_hatched_p(u_int cpuindex) | | 572 | cpu_hatched_p(u_int cpuindex) |
571 | { | | 573 | { |
572 | const u_int off = cpuindex / 32; | | 574 | const u_int off = cpuindex / 32; |
573 | const u_int bit = cpuindex % 32; | | 575 | const u_int bit = cpuindex % 32; |
574 | membar_consumer(); | | 576 | membar_consumer(); |
575 | return (aarch64_cpu_hatched[off] & __BIT(bit)) != 0; | | 577 | return (aarch64_cpu_hatched[off] & __BIT(bit)) != 0; |
576 | } | | 578 | } |
577 | #endif /* MULTIPROCESSOR */ | | 579 | #endif /* MULTIPROCESSOR */ |