| @@ -1,1453 +1,1453 @@ | | | @@ -1,1453 +1,1453 @@ |
1 | /* $NetBSD: cpu.c,v 1.195 2020/07/14 00:45:53 yamaguchi Exp $ */ | | 1 | /* $NetBSD: cpu.c,v 1.196 2020/07/28 14:49:55 fcambus Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright (c) 2000-2020 NetBSD Foundation, Inc. | | 4 | * Copyright (c) 2000-2020 NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Bill Sommerfeld of RedBack Networks Inc, and by Andrew Doran. | | 8 | * by Bill Sommerfeld of RedBack Networks Inc, and by Andrew Doran. |
9 | * | | 9 | * |
10 | * Redistribution and use in source and binary forms, with or without | | 10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions | | 11 | * modification, are permitted provided that the following conditions |
12 | * are met: | | 12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright | | 13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. | | 14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright | | 15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the | | 16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. | | 17 | * documentation and/or other materials provided with the distribution. |
18 | * | | 18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. | | 29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ | | 30 | */ |
31 | | | 31 | |
32 | /* | | 32 | /* |
33 | * Copyright (c) 1999 Stefan Grefen | | 33 | * Copyright (c) 1999 Stefan Grefen |
34 | * | | 34 | * |
35 | * Redistribution and use in source and binary forms, with or without | | 35 | * Redistribution and use in source and binary forms, with or without |
36 | * modification, are permitted provided that the following conditions | | 36 | * modification, are permitted provided that the following conditions |
37 | * are met: | | 37 | * are met: |
38 | * 1. Redistributions of source code must retain the above copyright | | 38 | * 1. Redistributions of source code must retain the above copyright |
39 | * notice, this list of conditions and the following disclaimer. | | 39 | * notice, this list of conditions and the following disclaimer. |
40 | * 2. Redistributions in binary form must reproduce the above copyright | | 40 | * 2. Redistributions in binary form must reproduce the above copyright |
41 | * notice, this list of conditions and the following disclaimer in the | | 41 | * notice, this list of conditions and the following disclaimer in the |
42 | * documentation and/or other materials provided with the distribution. | | 42 | * documentation and/or other materials provided with the distribution. |
43 | * 3. All advertising materials mentioning features or use of this software | | 43 | * 3. All advertising materials mentioning features or use of this software |
44 | * must display the following acknowledgement: | | 44 | * must display the following acknowledgement: |
45 | * This product includes software developed by the NetBSD | | 45 | * This product includes software developed by the NetBSD |
46 | * Foundation, Inc. and its contributors. | | 46 | * Foundation, Inc. and its contributors. |
47 | * 4. Neither the name of The NetBSD Foundation nor the names of its | | 47 | * 4. Neither the name of The NetBSD Foundation nor the names of its |
48 | * contributors may be used to endorse or promote products derived | | 48 | * contributors may be used to endorse or promote products derived |
49 | * from this software without specific prior written permission. | | 49 | * from this software without specific prior written permission. |
50 | * | | 50 | * |
51 | * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY | | 51 | * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY |
52 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 52 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
53 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | | 53 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
54 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE | | 54 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE |
55 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | | 55 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
56 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | | 56 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
57 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 57 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
58 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | | 58 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
59 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 59 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
60 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 60 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
61 | * SUCH DAMAGE. | | 61 | * SUCH DAMAGE. |
62 | */ | | 62 | */ |
63 | | | 63 | |
64 | #include <sys/cdefs.h> | | 64 | #include <sys/cdefs.h> |
65 | __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.195 2020/07/14 00:45:53 yamaguchi Exp $"); | | 65 | __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.196 2020/07/28 14:49:55 fcambus Exp $"); |
66 | | | 66 | |
67 | #include "opt_ddb.h" | | 67 | #include "opt_ddb.h" |
68 | #include "opt_mpbios.h" /* for MPDEBUG */ | | 68 | #include "opt_mpbios.h" /* for MPDEBUG */ |
69 | #include "opt_mtrr.h" | | 69 | #include "opt_mtrr.h" |
70 | #include "opt_multiprocessor.h" | | 70 | #include "opt_multiprocessor.h" |
71 | #include "opt_svs.h" | | 71 | #include "opt_svs.h" |
72 | | | 72 | |
73 | #include "lapic.h" | | 73 | #include "lapic.h" |
74 | #include "ioapic.h" | | 74 | #include "ioapic.h" |
75 | #include "acpica.h" | | 75 | #include "acpica.h" |
76 | #include "hpet.h" | | 76 | #include "hpet.h" |
77 | | | 77 | |
78 | #include <sys/param.h> | | 78 | #include <sys/param.h> |
79 | #include <sys/proc.h> | | 79 | #include <sys/proc.h> |
80 | #include <sys/systm.h> | | 80 | #include <sys/systm.h> |
81 | #include <sys/device.h> | | 81 | #include <sys/device.h> |
82 | #include <sys/cpu.h> | | 82 | #include <sys/cpu.h> |
83 | #include <sys/cpufreq.h> | | 83 | #include <sys/cpufreq.h> |
84 | #include <sys/idle.h> | | 84 | #include <sys/idle.h> |
85 | #include <sys/atomic.h> | | 85 | #include <sys/atomic.h> |
86 | #include <sys/reboot.h> | | 86 | #include <sys/reboot.h> |
87 | #include <sys/csan.h> | | 87 | #include <sys/csan.h> |
88 | | | 88 | |
89 | #include <uvm/uvm.h> | | 89 | #include <uvm/uvm.h> |
90 | | | 90 | |
91 | #include "acpica.h" /* for NACPICA, for mp_verbose */ | | 91 | #include "acpica.h" /* for NACPICA, for mp_verbose */ |
92 | | | 92 | |
93 | #include <x86/machdep.h> | | 93 | #include <x86/machdep.h> |
94 | #include <machine/cpufunc.h> | | 94 | #include <machine/cpufunc.h> |
95 | #include <machine/cpuvar.h> | | 95 | #include <machine/cpuvar.h> |
96 | #include <machine/pmap.h> | | 96 | #include <machine/pmap.h> |
97 | #include <machine/vmparam.h> | | 97 | #include <machine/vmparam.h> |
98 | #if defined(MULTIPROCESSOR) | | 98 | #if defined(MULTIPROCESSOR) |
99 | #include <machine/mpbiosvar.h> | | 99 | #include <machine/mpbiosvar.h> |
100 | #endif | | 100 | #endif |
101 | #include <machine/mpconfig.h> /* for mp_verbose */ | | 101 | #include <machine/mpconfig.h> /* for mp_verbose */ |
102 | #include <machine/pcb.h> | | 102 | #include <machine/pcb.h> |
103 | #include <machine/specialreg.h> | | 103 | #include <machine/specialreg.h> |
104 | #include <machine/segments.h> | | 104 | #include <machine/segments.h> |
105 | #include <machine/gdt.h> | | 105 | #include <machine/gdt.h> |
106 | #include <machine/mtrr.h> | | 106 | #include <machine/mtrr.h> |
107 | #include <machine/pio.h> | | 107 | #include <machine/pio.h> |
108 | #include <machine/cpu_counter.h> | | 108 | #include <machine/cpu_counter.h> |
109 | | | 109 | |
110 | #include <x86/fpu.h> | | 110 | #include <x86/fpu.h> |
111 | | | 111 | |
112 | #if NACPICA > 0 | | 112 | #if NACPICA > 0 |
113 | #include <dev/acpi/acpi_srat.h> | | 113 | #include <dev/acpi/acpi_srat.h> |
114 | #endif | | 114 | #endif |
115 | | | 115 | |
116 | #if NLAPIC > 0 | | 116 | #if NLAPIC > 0 |
117 | #include <machine/apicvar.h> | | 117 | #include <machine/apicvar.h> |
118 | #include <machine/i82489reg.h> | | 118 | #include <machine/i82489reg.h> |
119 | #include <machine/i82489var.h> | | 119 | #include <machine/i82489var.h> |
120 | #endif | | 120 | #endif |
121 | | | 121 | |
122 | #include <dev/ic/mc146818reg.h> | | 122 | #include <dev/ic/mc146818reg.h> |
123 | #include <dev/ic/hpetvar.h> | | 123 | #include <dev/ic/hpetvar.h> |
124 | #include <i386/isa/nvram.h> | | 124 | #include <i386/isa/nvram.h> |
125 | #include <dev/isa/isareg.h> | | 125 | #include <dev/isa/isareg.h> |
126 | | | 126 | |
127 | #include "tsc.h" | | 127 | #include "tsc.h" |
128 | | | 128 | |
129 | #ifndef XENPV | | 129 | #ifndef XENPV |
130 | #include "hyperv.h" | | 130 | #include "hyperv.h" |
131 | #if NHYPERV > 0 | | 131 | #if NHYPERV > 0 |
132 | #include <x86/x86/hypervvar.h> | | 132 | #include <x86/x86/hypervvar.h> |
133 | #endif | | 133 | #endif |
134 | #endif | | 134 | #endif |
135 | | | 135 | |
136 | #ifdef XEN | | 136 | #ifdef XEN |
137 | #include <xen/hypervisor.h> | | 137 | #include <xen/hypervisor.h> |
138 | #endif | | 138 | #endif |
139 | | | 139 | |
140 | static int cpu_match(device_t, cfdata_t, void *); | | 140 | static int cpu_match(device_t, cfdata_t, void *); |
141 | static void cpu_attach(device_t, device_t, void *); | | 141 | static void cpu_attach(device_t, device_t, void *); |
142 | static void cpu_defer(device_t); | | 142 | static void cpu_defer(device_t); |
143 | static int cpu_rescan(device_t, const char *, const int *); | | 143 | static int cpu_rescan(device_t, const char *, const int *); |
144 | static void cpu_childdetached(device_t, device_t); | | 144 | static void cpu_childdetached(device_t, device_t); |
145 | static bool cpu_stop(device_t); | | 145 | static bool cpu_stop(device_t); |
146 | static bool cpu_suspend(device_t, const pmf_qual_t *); | | 146 | static bool cpu_suspend(device_t, const pmf_qual_t *); |
147 | static bool cpu_resume(device_t, const pmf_qual_t *); | | 147 | static bool cpu_resume(device_t, const pmf_qual_t *); |
148 | static bool cpu_shutdown(device_t, int); | | 148 | static bool cpu_shutdown(device_t, int); |
149 | | | 149 | |
150 | struct cpu_softc { | | 150 | struct cpu_softc { |
151 | device_t sc_dev; /* device tree glue */ | | 151 | device_t sc_dev; /* device tree glue */ |
152 | struct cpu_info *sc_info; /* pointer to CPU info */ | | 152 | struct cpu_info *sc_info; /* pointer to CPU info */ |
153 | bool sc_wasonline; | | 153 | bool sc_wasonline; |
154 | }; | | 154 | }; |
155 | | | 155 | |
156 | #ifdef MULTIPROCESSOR | | 156 | #ifdef MULTIPROCESSOR |
157 | int mp_cpu_start(struct cpu_info *, paddr_t); | | 157 | int mp_cpu_start(struct cpu_info *, paddr_t); |
158 | void mp_cpu_start_cleanup(struct cpu_info *); | | 158 | void mp_cpu_start_cleanup(struct cpu_info *); |
159 | const struct cpu_functions mp_cpu_funcs = { mp_cpu_start, NULL, | | 159 | const struct cpu_functions mp_cpu_funcs = { mp_cpu_start, NULL, |
160 | mp_cpu_start_cleanup }; | | 160 | mp_cpu_start_cleanup }; |
161 | #endif | | 161 | #endif |
162 | | | 162 | |
163 | | | 163 | |
164 | CFATTACH_DECL2_NEW(cpu, sizeof(struct cpu_softc), | | 164 | CFATTACH_DECL2_NEW(cpu, sizeof(struct cpu_softc), |
165 | cpu_match, cpu_attach, NULL, NULL, cpu_rescan, cpu_childdetached); | | 165 | cpu_match, cpu_attach, NULL, NULL, cpu_rescan, cpu_childdetached); |
166 | | | 166 | |
167 | /* | | 167 | /* |
168 | * Statically-allocated CPU info for the primary CPU (or the only | | 168 | * Statically-allocated CPU info for the primary CPU (or the only |
169 | * CPU, on uniprocessors). The CPU info list is initialized to | | 169 | * CPU, on uniprocessors). The CPU info list is initialized to |
170 | * point at it. | | 170 | * point at it. |
171 | */ | | 171 | */ |
172 | struct cpu_info cpu_info_primary __aligned(CACHE_LINE_SIZE) = { | | 172 | struct cpu_info cpu_info_primary __aligned(CACHE_LINE_SIZE) = { |
173 | .ci_dev = 0, | | 173 | .ci_dev = 0, |
174 | .ci_self = &cpu_info_primary, | | 174 | .ci_self = &cpu_info_primary, |
175 | .ci_idepth = -1, | | 175 | .ci_idepth = -1, |
176 | .ci_curlwp = &lwp0, | | 176 | .ci_curlwp = &lwp0, |
177 | .ci_curldt = -1, | | 177 | .ci_curldt = -1, |
178 | }; | | 178 | }; |
179 | | | 179 | |
180 | struct cpu_info *cpu_info_list = &cpu_info_primary; | | 180 | struct cpu_info *cpu_info_list = &cpu_info_primary; |
181 | | | 181 | |
182 | #ifdef i386 | | 182 | #ifdef i386 |
183 | void cpu_set_tss_gates(struct cpu_info *); | | 183 | void cpu_set_tss_gates(struct cpu_info *); |
184 | #endif | | 184 | #endif |
185 | | | 185 | |
186 | static void cpu_init_idle_lwp(struct cpu_info *); | | 186 | static void cpu_init_idle_lwp(struct cpu_info *); |
187 | | | 187 | |
188 | uint32_t cpu_feature[7] __read_mostly; /* X86 CPUID feature bits */ | | 188 | uint32_t cpu_feature[7] __read_mostly; /* X86 CPUID feature bits */ |
189 | /* [0] basic features cpuid.1:%edx | | 189 | /* [0] basic features cpuid.1:%edx |
190 | * [1] basic features cpuid.1:%ecx (CPUID2_xxx bits) | | 190 | * [1] basic features cpuid.1:%ecx (CPUID2_xxx bits) |
191 | * [2] extended features cpuid:80000001:%edx | | 191 | * [2] extended features cpuid:80000001:%edx |
192 | * [3] extended features cpuid:80000001:%ecx | | 192 | * [3] extended features cpuid:80000001:%ecx |
193 | * [4] VIA padlock features | | 193 | * [4] VIA padlock features |
194 | * [5] structured extended features cpuid.7:%ebx | | 194 | * [5] structured extended features cpuid.7:%ebx |
195 | * [6] structured extended features cpuid.7:%ecx | | 195 | * [6] structured extended features cpuid.7:%ecx |
196 | */ | | 196 | */ |
197 | | | 197 | |
198 | #ifdef MULTIPROCESSOR | | 198 | #ifdef MULTIPROCESSOR |
199 | bool x86_mp_online; | | 199 | bool x86_mp_online; |
200 | paddr_t mp_trampoline_paddr = MP_TRAMPOLINE; | | 200 | paddr_t mp_trampoline_paddr = MP_TRAMPOLINE; |
201 | #endif | | 201 | #endif |
202 | #if NLAPIC > 0 | | 202 | #if NLAPIC > 0 |
203 | static vaddr_t cmos_data_mapping; | | 203 | static vaddr_t cmos_data_mapping; |
204 | #endif | | 204 | #endif |
205 | struct cpu_info *cpu_starting; | | 205 | struct cpu_info *cpu_starting; |
206 | | | 206 | |
207 | #ifdef MULTIPROCESSOR | | 207 | #ifdef MULTIPROCESSOR |
208 | void cpu_hatch(void *); | | 208 | void cpu_hatch(void *); |
209 | static void cpu_boot_secondary(struct cpu_info *ci); | | 209 | static void cpu_boot_secondary(struct cpu_info *ci); |
210 | static void cpu_start_secondary(struct cpu_info *ci); | | 210 | static void cpu_start_secondary(struct cpu_info *ci); |
211 | #if NLAPIC > 0 | | 211 | #if NLAPIC > 0 |
212 | static void cpu_copy_trampoline(paddr_t); | | 212 | static void cpu_copy_trampoline(paddr_t); |
213 | #endif | | 213 | #endif |
214 | #endif /* MULTIPROCESSOR */ | | 214 | #endif /* MULTIPROCESSOR */ |
215 | | | 215 | |
216 | /* | | 216 | /* |
217 | * Runs once per boot once multiprocessor goo has been detected and | | 217 | * Runs once per boot once multiprocessor goo has been detected and |
218 | * the local APIC on the boot processor has been mapped. | | 218 | * the local APIC on the boot processor has been mapped. |
219 | * | | 219 | * |
220 | * Called from lapic_boot_init() (from mpbios_scan()). | | 220 | * Called from lapic_boot_init() (from mpbios_scan()). |
221 | */ | | 221 | */ |
222 | #if NLAPIC > 0 | | 222 | #if NLAPIC > 0 |
223 | void | | 223 | void |
224 | cpu_init_first(void) | | 224 | cpu_init_first(void) |
225 | { | | 225 | { |
226 | | | 226 | |
227 | cpu_info_primary.ci_cpuid = lapic_cpu_number(); | | 227 | cpu_info_primary.ci_cpuid = lapic_cpu_number(); |
228 | | | 228 | |
229 | cmos_data_mapping = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_VAONLY); | | 229 | cmos_data_mapping = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_VAONLY); |
230 | if (cmos_data_mapping == 0) | | 230 | if (cmos_data_mapping == 0) |
231 | panic("No KVA for page 0"); | | 231 | panic("No KVA for page 0"); |
232 | pmap_kenter_pa(cmos_data_mapping, 0, VM_PROT_READ|VM_PROT_WRITE, 0); | | 232 | pmap_kenter_pa(cmos_data_mapping, 0, VM_PROT_READ|VM_PROT_WRITE, 0); |
233 | pmap_update(pmap_kernel()); | | 233 | pmap_update(pmap_kernel()); |
234 | } | | 234 | } |
235 | #endif | | 235 | #endif |
236 | | | 236 | |
237 | static int | | 237 | static int |
238 | cpu_match(device_t parent, cfdata_t match, void *aux) | | 238 | cpu_match(device_t parent, cfdata_t match, void *aux) |
239 | { | | 239 | { |
240 | | | 240 | |
241 | return 1; | | 241 | return 1; |
242 | } | | 242 | } |
243 | | | 243 | |
244 | #ifdef __HAVE_PCPU_AREA | | 244 | #ifdef __HAVE_PCPU_AREA |
245 | void | | 245 | void |
246 | cpu_pcpuarea_init(struct cpu_info *ci) | | 246 | cpu_pcpuarea_init(struct cpu_info *ci) |
247 | { | | 247 | { |
248 | struct vm_page *pg; | | 248 | struct vm_page *pg; |
249 | size_t i, npages; | | 249 | size_t i, npages; |
250 | vaddr_t base, va; | | 250 | vaddr_t base, va; |
251 | paddr_t pa; | | 251 | paddr_t pa; |
252 | | | 252 | |
253 | CTASSERT(sizeof(struct pcpu_entry) % PAGE_SIZE == 0); | | 253 | CTASSERT(sizeof(struct pcpu_entry) % PAGE_SIZE == 0); |
254 | | | 254 | |
255 | npages = sizeof(struct pcpu_entry) / PAGE_SIZE; | | 255 | npages = sizeof(struct pcpu_entry) / PAGE_SIZE; |
256 | base = (vaddr_t)&pcpuarea->ent[cpu_index(ci)]; | | 256 | base = (vaddr_t)&pcpuarea->ent[cpu_index(ci)]; |
257 | | | 257 | |
258 | for (i = 0; i < npages; i++) { | | 258 | for (i = 0; i < npages; i++) { |
259 | pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO); | | 259 | pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO); |
260 | if (pg == NULL) { | | 260 | if (pg == NULL) { |
261 | panic("failed to allocate pcpu PA"); | | 261 | panic("failed to allocate pcpu PA"); |
262 | } | | 262 | } |
263 | | | 263 | |
264 | va = base + i * PAGE_SIZE; | | 264 | va = base + i * PAGE_SIZE; |
265 | pa = VM_PAGE_TO_PHYS(pg); | | 265 | pa = VM_PAGE_TO_PHYS(pg); |
266 | | | 266 | |
267 | pmap_kenter_pa(va, pa, VM_PROT_READ|VM_PROT_WRITE, 0); | | 267 | pmap_kenter_pa(va, pa, VM_PROT_READ|VM_PROT_WRITE, 0); |
268 | } | | 268 | } |
269 | | | 269 | |
270 | pmap_update(pmap_kernel()); | | 270 | pmap_update(pmap_kernel()); |
271 | } | | 271 | } |
272 | #endif | | 272 | #endif |
273 | | | 273 | |
274 | static void | | 274 | static void |
275 | cpu_vm_init(struct cpu_info *ci) | | 275 | cpu_vm_init(struct cpu_info *ci) |
276 | { | | 276 | { |
277 | int ncolors = 2, i; | | 277 | int ncolors = 2, i; |
278 | | | 278 | |
279 | for (i = CAI_ICACHE; i <= CAI_L2CACHE; i++) { | | 279 | for (i = CAI_ICACHE; i <= CAI_L2CACHE; i++) { |
280 | struct x86_cache_info *cai; | | 280 | struct x86_cache_info *cai; |
281 | int tcolors; | | 281 | int tcolors; |
282 | | | 282 | |
283 | cai = &ci->ci_cinfo[i]; | | 283 | cai = &ci->ci_cinfo[i]; |
284 | | | 284 | |
285 | tcolors = atop(cai->cai_totalsize); | | 285 | tcolors = atop(cai->cai_totalsize); |
286 | switch (cai->cai_associativity) { | | 286 | switch (cai->cai_associativity) { |
287 | case 0xff: | | 287 | case 0xff: |
288 | tcolors = 1; /* fully associative */ | | 288 | tcolors = 1; /* fully associative */ |
289 | break; | | 289 | break; |
290 | case 0: | | 290 | case 0: |
291 | case 1: | | 291 | case 1: |
292 | break; | | 292 | break; |
293 | default: | | 293 | default: |
294 | tcolors /= cai->cai_associativity; | | 294 | tcolors /= cai->cai_associativity; |
295 | } | | 295 | } |
296 | ncolors = uimax(ncolors, tcolors); | | 296 | ncolors = uimax(ncolors, tcolors); |
297 | /* | | 297 | /* |
298 | * If the desired number of colors is not a power of | | 298 | * If the desired number of colors is not a power of |
299 | * two, it won't be good. Find the greatest power of | | 299 | * two, it won't be good. Find the greatest power of |
300 | * two which is an even divisor of the number of colors, | | 300 | * two which is an even divisor of the number of colors, |
301 | * to preserve even coloring of pages. | | 301 | * to preserve even coloring of pages. |
302 | */ | | 302 | */ |
303 | if (ncolors & (ncolors - 1) ) { | | 303 | if (ncolors & (ncolors - 1) ) { |
304 | int try, picked = 1; | | 304 | int try, picked = 1; |
305 | for (try = 1; try < ncolors; try *= 2) { | | 305 | for (try = 1; try < ncolors; try *= 2) { |
306 | if (ncolors % try == 0) picked = try; | | 306 | if (ncolors % try == 0) picked = try; |
307 | } | | 307 | } |
308 | if (picked == 1) { | | 308 | if (picked == 1) { |
309 | panic("desired number of cache colors %d is " | | 309 | panic("desired number of cache colors %d is " |
310 | " > 1, but not even!", ncolors); | | 310 | " > 1, but not even!", ncolors); |
311 | } | | 311 | } |
312 | ncolors = picked; | | 312 | ncolors = picked; |
313 | } | | 313 | } |
314 | } | | 314 | } |
315 | | | 315 | |
316 | /* | | 316 | /* |
317 | * Knowing the size of the largest cache on this CPU, potentially | | 317 | * Knowing the size of the largest cache on this CPU, potentially |
318 | * re-color our pages. | | 318 | * re-color our pages. |
319 | */ | | 319 | */ |
320 | aprint_debug_dev(ci->ci_dev, "%d page colors\n", ncolors); | | 320 | aprint_debug_dev(ci->ci_dev, "%d page colors\n", ncolors); |
321 | uvm_page_recolor(ncolors); | | 321 | uvm_page_recolor(ncolors); |
322 | | | 322 | |
323 | pmap_tlb_cpu_init(ci); | | 323 | pmap_tlb_cpu_init(ci); |
324 | #ifndef __HAVE_DIRECT_MAP | | 324 | #ifndef __HAVE_DIRECT_MAP |
325 | pmap_vpage_cpu_init(ci); | | 325 | pmap_vpage_cpu_init(ci); |
326 | #endif | | 326 | #endif |
327 | } | | 327 | } |
328 | | | 328 | |
329 | static void | | 329 | static void |
330 | cpu_attach(device_t parent, device_t self, void *aux) | | 330 | cpu_attach(device_t parent, device_t self, void *aux) |
331 | { | | 331 | { |
332 | struct cpu_softc *sc = device_private(self); | | 332 | struct cpu_softc *sc = device_private(self); |
333 | struct cpu_attach_args *caa = aux; | | 333 | struct cpu_attach_args *caa = aux; |
334 | struct cpu_info *ci; | | 334 | struct cpu_info *ci; |
335 | uintptr_t ptr; | | 335 | uintptr_t ptr; |
336 | #if NLAPIC > 0 | | 336 | #if NLAPIC > 0 |
337 | int cpunum = caa->cpu_number; | | 337 | int cpunum = caa->cpu_number; |
338 | #endif | | 338 | #endif |
339 | static bool again; | | 339 | static bool again; |
340 | | | 340 | |
341 | sc->sc_dev = self; | | 341 | sc->sc_dev = self; |
342 | | | 342 | |
343 | if (ncpu > maxcpus) { | | 343 | if (ncpu > maxcpus) { |
344 | #ifndef _LP64 | | 344 | #ifndef _LP64 |
345 | aprint_error(": too many CPUs, please use NetBSD/amd64\n"); | | 345 | aprint_error(": too many CPUs, please use NetBSD/amd64\n"); |
346 | #else | | 346 | #else |
347 | aprint_error(": too many CPUs\n"); | | 347 | aprint_error(": too many CPUs\n"); |
348 | #endif | | 348 | #endif |
349 | return; | | 349 | return; |
350 | } | | 350 | } |
351 | | | 351 | |
352 | /* | | 352 | /* |
353 | * If we're an Application Processor, allocate a cpu_info | | 353 | * If we're an Application Processor, allocate a cpu_info |
354 | * structure, otherwise use the primary's. | | 354 | * structure, otherwise use the primary's. |
355 | */ | | 355 | */ |
356 | if (caa->cpu_role == CPU_ROLE_AP) { | | 356 | if (caa->cpu_role == CPU_ROLE_AP) { |
357 | if ((boothowto & RB_MD1) != 0) { | | 357 | if ((boothowto & RB_MD1) != 0) { |
358 | aprint_error(": multiprocessor boot disabled\n"); | | 358 | aprint_error(": multiprocessor boot disabled\n"); |
359 | if (!pmf_device_register(self, NULL, NULL)) | | 359 | if (!pmf_device_register(self, NULL, NULL)) |
360 | aprint_error_dev(self, | | 360 | aprint_error_dev(self, |
361 | "couldn't establish power handler\n"); | | 361 | "couldn't establish power handler\n"); |
362 | return; | | 362 | return; |
363 | } | | 363 | } |
364 | aprint_naive(": Application Processor\n"); | | 364 | aprint_naive(": Application Processor\n"); |
365 | ptr = (uintptr_t)uvm_km_alloc(kernel_map, | | 365 | ptr = (uintptr_t)uvm_km_alloc(kernel_map, |
366 | sizeof(*ci) + CACHE_LINE_SIZE - 1, 0, | | 366 | sizeof(*ci) + CACHE_LINE_SIZE - 1, 0, |
367 | UVM_KMF_WIRED|UVM_KMF_ZERO); | | 367 | UVM_KMF_WIRED|UVM_KMF_ZERO); |
368 | ci = (struct cpu_info *)roundup2(ptr, CACHE_LINE_SIZE); | | 368 | ci = (struct cpu_info *)roundup2(ptr, CACHE_LINE_SIZE); |
369 | ci->ci_curldt = -1; | | 369 | ci->ci_curldt = -1; |
370 | } else { | | 370 | } else { |
371 | aprint_naive(": %s Processor\n", | | 371 | aprint_naive(": %s Processor\n", |
372 | caa->cpu_role == CPU_ROLE_SP ? "Single" : "Boot"); | | 372 | caa->cpu_role == CPU_ROLE_SP ? "Single" : "Boot"); |
373 | ci = &cpu_info_primary; | | 373 | ci = &cpu_info_primary; |
374 | #if NLAPIC > 0 | | 374 | #if NLAPIC > 0 |
375 | if (cpunum != lapic_cpu_number()) { | | 375 | if (cpunum != lapic_cpu_number()) { |
376 | /* XXX should be done earlier. */ | | 376 | /* XXX should be done earlier. */ |
377 | uint32_t reg; | | 377 | uint32_t reg; |
378 | aprint_verbose("\n"); | | 378 | aprint_verbose("\n"); |
379 | aprint_verbose_dev(self, "running CPU at apic %d" | | 379 | aprint_verbose_dev(self, "running CPU at apic %d" |
380 | " instead of at expected %d", lapic_cpu_number(), | | 380 | " instead of at expected %d", lapic_cpu_number(), |
381 | cpunum); | | 381 | cpunum); |
382 | reg = lapic_readreg(LAPIC_ID); | | 382 | reg = lapic_readreg(LAPIC_ID); |
383 | lapic_writereg(LAPIC_ID, (reg & ~LAPIC_ID_MASK) | | | 383 | lapic_writereg(LAPIC_ID, (reg & ~LAPIC_ID_MASK) | |
384 | (cpunum << LAPIC_ID_SHIFT)); | | 384 | (cpunum << LAPIC_ID_SHIFT)); |
385 | } | | 385 | } |
386 | if (cpunum != lapic_cpu_number()) { | | 386 | if (cpunum != lapic_cpu_number()) { |
387 | aprint_error_dev(self, "unable to reset apic id\n"); | | 387 | aprint_error_dev(self, "unable to reset apic id\n"); |
388 | } | | 388 | } |
389 | #endif | | 389 | #endif |
390 | } | | 390 | } |
391 | | | 391 | |
392 | ci->ci_self = ci; | | 392 | ci->ci_self = ci; |
393 | sc->sc_info = ci; | | 393 | sc->sc_info = ci; |
394 | ci->ci_dev = self; | | 394 | ci->ci_dev = self; |
395 | ci->ci_acpiid = caa->cpu_id; | | 395 | ci->ci_acpiid = caa->cpu_id; |
396 | ci->ci_cpuid = caa->cpu_number; | | 396 | ci->ci_cpuid = caa->cpu_number; |
397 | ci->ci_func = caa->cpu_func; | | 397 | ci->ci_func = caa->cpu_func; |
398 | ci->ci_kfpu_spl = -1; | | 398 | ci->ci_kfpu_spl = -1; |
399 | aprint_normal("\n"); | | 399 | aprint_normal("\n"); |
400 | | | 400 | |
401 | /* Must be before mi_cpu_attach(). */ | | 401 | /* Must be before mi_cpu_attach(). */ |
402 | cpu_vm_init(ci); | | 402 | cpu_vm_init(ci); |
403 | | | 403 | |
404 | if (caa->cpu_role == CPU_ROLE_AP) { | | 404 | if (caa->cpu_role == CPU_ROLE_AP) { |
405 | int error; | | 405 | int error; |
406 | | | 406 | |
407 | error = mi_cpu_attach(ci); | | 407 | error = mi_cpu_attach(ci); |
408 | if (error != 0) { | | 408 | if (error != 0) { |
409 | aprint_error_dev(self, | | 409 | aprint_error_dev(self, |
410 | "mi_cpu_attach failed with %d\n", error); | | 410 | "mi_cpu_attach failed with %d\n", error); |
411 | return; | | 411 | return; |
412 | } | | 412 | } |
413 | #ifdef __HAVE_PCPU_AREA | | 413 | #ifdef __HAVE_PCPU_AREA |
414 | cpu_pcpuarea_init(ci); | | 414 | cpu_pcpuarea_init(ci); |
415 | #endif | | 415 | #endif |
416 | cpu_init_tss(ci); | | 416 | cpu_init_tss(ci); |
417 | } else { | | 417 | } else { |
418 | KASSERT(ci->ci_data.cpu_idlelwp != NULL); | | 418 | KASSERT(ci->ci_data.cpu_idlelwp != NULL); |
419 | #if NACPICA > 0 | | 419 | #if NACPICA > 0 |
420 | /* Parse out NUMA info for cpu_identify(). */ | | 420 | /* Parse out NUMA info for cpu_identify(). */ |
421 | acpisrat_init(); | | 421 | acpisrat_init(); |
422 | #endif | | 422 | #endif |
423 | } | | 423 | } |
424 | | | 424 | |
425 | #ifdef SVS | | 425 | #ifdef SVS |
426 | cpu_svs_init(ci); | | 426 | cpu_svs_init(ci); |
427 | #endif | | 427 | #endif |
428 | | | 428 | |
429 | pmap_reference(pmap_kernel()); | | 429 | pmap_reference(pmap_kernel()); |
430 | ci->ci_pmap = pmap_kernel(); | | 430 | ci->ci_pmap = pmap_kernel(); |
431 | ci->ci_tlbstate = TLBSTATE_STALE; | | 431 | ci->ci_tlbstate = TLBSTATE_STALE; |
432 | | | 432 | |
433 | /* | | 433 | /* |
434 | * Boot processor may not be attached first, but the below | | 434 | * Boot processor may not be attached first, but the below |
435 | * must be done to allow booting other processors. | | 435 | * must be done to allow booting other processors. |
436 | */ | | 436 | */ |
437 | if (!again) { | | 437 | if (!again) { |
438 | /* Make sure DELAY() (likely i8254_delay()) is initialized. */ | | 438 | /* Make sure DELAY() (likely i8254_delay()) is initialized. */ |
439 | DELAY(1); | | 439 | DELAY(1); |
440 | | | 440 | |
441 | /* | | 441 | /* |
442 | * Basic init. Compute an approximate frequency for the TSC | | 442 | * Basic init. Compute an approximate frequency for the TSC |
443 | * using the i8254. If there's a HPET we'll redo it later. | | 443 | * using the i8254. If there's a HPET we'll redo it later. |
444 | */ | | 444 | */ |
445 | atomic_or_32(&ci->ci_flags, CPUF_PRESENT | CPUF_PRIMARY); | | 445 | atomic_or_32(&ci->ci_flags, CPUF_PRESENT | CPUF_PRIMARY); |
446 | cpu_intr_init(ci); | | 446 | cpu_intr_init(ci); |
447 | tsc_setfunc(ci); | | 447 | tsc_setfunc(ci); |
448 | cpu_get_tsc_freq(ci); | | 448 | cpu_get_tsc_freq(ci); |
449 | cpu_init(ci); | | 449 | cpu_init(ci); |
450 | #ifdef i386 | | 450 | #ifdef i386 |
451 | cpu_set_tss_gates(ci); | | 451 | cpu_set_tss_gates(ci); |
452 | #endif | | 452 | #endif |
453 | pmap_cpu_init_late(ci); | | 453 | pmap_cpu_init_late(ci); |
454 | #if NLAPIC > 0 | | 454 | #if NLAPIC > 0 |
455 | if (caa->cpu_role != CPU_ROLE_SP) { | | 455 | if (caa->cpu_role != CPU_ROLE_SP) { |
456 | /* Enable lapic. */ | | 456 | /* Enable lapic. */ |
457 | lapic_enable(); | | 457 | lapic_enable(); |
458 | lapic_set_lvt(); | | 458 | lapic_set_lvt(); |
459 | if (!vm_guest_is_xenpvh_or_pvhvm()) | | 459 | if (!vm_guest_is_xenpvh_or_pvhvm()) |
460 | lapic_calibrate_timer(false); | | 460 | lapic_calibrate_timer(false); |
461 | } | | 461 | } |
462 | #endif | | 462 | #endif |
463 | kcsan_cpu_init(ci); | | 463 | kcsan_cpu_init(ci); |
464 | again = true; | | 464 | again = true; |
465 | } | | 465 | } |
466 | | | 466 | |
467 | /* further PCB init done later. */ | | 467 | /* further PCB init done later. */ |
468 | | | 468 | |
469 | switch (caa->cpu_role) { | | 469 | switch (caa->cpu_role) { |
470 | case CPU_ROLE_SP: | | 470 | case CPU_ROLE_SP: |
471 | atomic_or_32(&ci->ci_flags, CPUF_SP); | | 471 | atomic_or_32(&ci->ci_flags, CPUF_SP); |
472 | cpu_identify(ci); | | 472 | cpu_identify(ci); |
473 | x86_errata(); | | 473 | x86_errata(); |
474 | x86_cpu_idle_init(); | | 474 | x86_cpu_idle_init(); |
475 | #ifdef XENPVHVM | | 475 | #ifdef XENPVHVM |
476 | xen_hvm_init_cpu(ci); | | 476 | xen_hvm_init_cpu(ci); |
477 | #endif | | 477 | #endif |
478 | break; | | 478 | break; |
479 | | | 479 | |
480 | case CPU_ROLE_BP: | | 480 | case CPU_ROLE_BP: |
481 | atomic_or_32(&ci->ci_flags, CPUF_BSP); | | 481 | atomic_or_32(&ci->ci_flags, CPUF_BSP); |
482 | cpu_identify(ci); | | 482 | cpu_identify(ci); |
483 | x86_errata(); | | 483 | x86_errata(); |
484 | x86_cpu_idle_init(); | | 484 | x86_cpu_idle_init(); |
485 | #ifdef XENPVHVM | | 485 | #ifdef XENPVHVM |
486 | xen_hvm_init_cpu(ci); | | 486 | xen_hvm_init_cpu(ci); |
487 | #endif | | 487 | #endif |
488 | break; | | 488 | break; |
489 | | | 489 | |
490 | #ifdef MULTIPROCESSOR | | 490 | #ifdef MULTIPROCESSOR |
491 | case CPU_ROLE_AP: | | 491 | case CPU_ROLE_AP: |
492 | /* | | 492 | /* |
493 | * report on an AP | | 493 | * report on an AP |
494 | */ | | 494 | */ |
495 | cpu_intr_init(ci); | | 495 | cpu_intr_init(ci); |
496 | idt_vec_init_cpu_md(&ci->ci_idtvec, cpu_index(ci)); | | 496 | idt_vec_init_cpu_md(&ci->ci_idtvec, cpu_index(ci)); |
497 | gdt_alloc_cpu(ci); | | 497 | gdt_alloc_cpu(ci); |
498 | #ifdef i386 | | 498 | #ifdef i386 |
499 | cpu_set_tss_gates(ci); | | 499 | cpu_set_tss_gates(ci); |
500 | #endif | | 500 | #endif |
501 | pmap_cpu_init_late(ci); | | 501 | pmap_cpu_init_late(ci); |
502 | cpu_start_secondary(ci); | | 502 | cpu_start_secondary(ci); |
503 | if (ci->ci_flags & CPUF_PRESENT) { | | 503 | if (ci->ci_flags & CPUF_PRESENT) { |
504 | struct cpu_info *tmp; | | 504 | struct cpu_info *tmp; |
505 | | | 505 | |
506 | cpu_identify(ci); | | 506 | cpu_identify(ci); |
507 | tmp = cpu_info_list; | | 507 | tmp = cpu_info_list; |
508 | while (tmp->ci_next) | | 508 | while (tmp->ci_next) |
509 | tmp = tmp->ci_next; | | 509 | tmp = tmp->ci_next; |
510 | | | 510 | |
511 | tmp->ci_next = ci; | | 511 | tmp->ci_next = ci; |
512 | } | | 512 | } |
513 | break; | | 513 | break; |
514 | #endif | | 514 | #endif |
515 | | | 515 | |
516 | default: | | 516 | default: |
517 | panic("unknown processor type??\n"); | | 517 | panic("unknown processor type??\n"); |
518 | } | | 518 | } |
519 | | | 519 | |
520 | pat_init(ci); | | 520 | pat_init(ci); |
521 | | | 521 | |
522 | if (!pmf_device_register1(self, cpu_suspend, cpu_resume, cpu_shutdown)) | | 522 | if (!pmf_device_register1(self, cpu_suspend, cpu_resume, cpu_shutdown)) |
523 | aprint_error_dev(self, "couldn't establish power handler\n"); | | 523 | aprint_error_dev(self, "couldn't establish power handler\n"); |
524 | | | 524 | |
525 | #ifdef MULTIPROCESSOR | | 525 | #ifdef MULTIPROCESSOR |
526 | if (mp_verbose) { | | 526 | if (mp_verbose) { |
527 | struct lwp *l = ci->ci_data.cpu_idlelwp; | | 527 | struct lwp *l = ci->ci_data.cpu_idlelwp; |
528 | struct pcb *pcb = lwp_getpcb(l); | | 528 | struct pcb *pcb = lwp_getpcb(l); |
529 | | | 529 | |
530 | aprint_verbose_dev(self, | | 530 | aprint_verbose_dev(self, |
531 | "idle lwp at %p, idle sp at %p\n", | | 531 | "idle lwp at %p, idle sp at %p\n", |
532 | l, | | 532 | l, |
533 | #ifdef i386 | | 533 | #ifdef i386 |
534 | (void *)pcb->pcb_esp | | 534 | (void *)pcb->pcb_esp |
535 | #else | | 535 | #else |
536 | (void *)pcb->pcb_rsp | | 536 | (void *)pcb->pcb_rsp |
537 | #endif | | 537 | #endif |
538 | ); | | 538 | ); |
539 | } | | 539 | } |
540 | #endif | | 540 | #endif |
541 | | | 541 | |
542 | /* | | 542 | /* |
543 | * Postpone the "cpufeaturebus" scan. | | 543 | * Postpone the "cpufeaturebus" scan. |
544 | * It is safe to scan the pseudo-bus | | 544 | * It is safe to scan the pseudo-bus |
545 | * only after all CPUs have attached. | | 545 | * only after all CPUs have attached. |
546 | */ | | 546 | */ |
547 | (void)config_defer(self, cpu_defer); | | 547 | (void)config_defer(self, cpu_defer); |
548 | } | | 548 | } |
549 | | | 549 | |
550 | static void | | 550 | static void |
551 | cpu_defer(device_t self) | | 551 | cpu_defer(device_t self) |
552 | { | | 552 | { |
553 | cpu_rescan(self, NULL, NULL); | | 553 | cpu_rescan(self, NULL, NULL); |
554 | } | | 554 | } |
555 | | | 555 | |
556 | static int | | 556 | static int |
557 | cpu_rescan(device_t self, const char *ifattr, const int *locators) | | 557 | cpu_rescan(device_t self, const char *ifattr, const int *locators) |
558 | { | | 558 | { |
559 | struct cpu_softc *sc = device_private(self); | | 559 | struct cpu_softc *sc = device_private(self); |
560 | struct cpufeature_attach_args cfaa; | | 560 | struct cpufeature_attach_args cfaa; |
561 | struct cpu_info *ci = sc->sc_info; | | 561 | struct cpu_info *ci = sc->sc_info; |
562 | | | 562 | |
563 | /* | | 563 | /* |
564 | * If we booted with RB_MD1 to disable multiprocessor, the | | 564 | * If we booted with RB_MD1 to disable multiprocessor, the |
565 | * auto-configuration data still contains the additional | | 565 | * auto-configuration data still contains the additional |
566 | * CPUs. But their initialization was mostly bypassed | | 566 | * CPUs. But their initialization was mostly bypassed |
567 | * during attach, so we have to make sure we don't look at | | 567 | * during attach, so we have to make sure we don't look at |
568 | * their featurebus info, since it wasn't retrieved. | | 568 | * their featurebus info, since it wasn't retrieved. |
569 | */ | | 569 | */ |
570 | if (ci == NULL) | | 570 | if (ci == NULL) |
571 | return 0; | | 571 | return 0; |
572 | | | 572 | |
573 | memset(&cfaa, 0, sizeof(cfaa)); | | 573 | memset(&cfaa, 0, sizeof(cfaa)); |
574 | cfaa.ci = ci; | | 574 | cfaa.ci = ci; |
575 | | | 575 | |
576 | if (ifattr_match(ifattr, "cpufeaturebus")) { | | 576 | if (ifattr_match(ifattr, "cpufeaturebus")) { |
577 | if (ci->ci_frequency == NULL) { | | 577 | if (ci->ci_frequency == NULL) { |
578 | cfaa.name = "frequency"; | | 578 | cfaa.name = "frequency"; |
579 | ci->ci_frequency = config_found_ia(self, | | 579 | ci->ci_frequency = config_found_ia(self, |
580 | "cpufeaturebus", &cfaa, NULL); | | 580 | "cpufeaturebus", &cfaa, NULL); |
581 | } | | 581 | } |
582 | | | 582 | |
583 | if (ci->ci_padlock == NULL) { | | 583 | if (ci->ci_padlock == NULL) { |
584 | cfaa.name = "padlock"; | | 584 | cfaa.name = "padlock"; |
585 | ci->ci_padlock = config_found_ia(self, | | 585 | ci->ci_padlock = config_found_ia(self, |
586 | "cpufeaturebus", &cfaa, NULL); | | 586 | "cpufeaturebus", &cfaa, NULL); |
587 | } | | 587 | } |
588 | | | 588 | |
589 | if (ci->ci_temperature == NULL) { | | 589 | if (ci->ci_temperature == NULL) { |
590 | cfaa.name = "temperature"; | | 590 | cfaa.name = "temperature"; |
591 | ci->ci_temperature = config_found_ia(self, | | 591 | ci->ci_temperature = config_found_ia(self, |
592 | "cpufeaturebus", &cfaa, NULL); | | 592 | "cpufeaturebus", &cfaa, NULL); |
593 | } | | 593 | } |
594 | | | 594 | |
595 | if (ci->ci_vm == NULL) { | | 595 | if (ci->ci_vm == NULL) { |
596 | cfaa.name = "vm"; | | 596 | cfaa.name = "vm"; |
597 | ci->ci_vm = config_found_ia(self, | | 597 | ci->ci_vm = config_found_ia(self, |
598 | "cpufeaturebus", &cfaa, NULL); | | 598 | "cpufeaturebus", &cfaa, NULL); |
599 | } | | 599 | } |
600 | } | | 600 | } |
601 | | | 601 | |
602 | return 0; | | 602 | return 0; |
603 | } | | 603 | } |
604 | | | 604 | |
605 | static void | | 605 | static void |
606 | cpu_childdetached(device_t self, device_t child) | | 606 | cpu_childdetached(device_t self, device_t child) |
607 | { | | 607 | { |
608 | struct cpu_softc *sc = device_private(self); | | 608 | struct cpu_softc *sc = device_private(self); |
609 | struct cpu_info *ci = sc->sc_info; | | 609 | struct cpu_info *ci = sc->sc_info; |
610 | | | 610 | |
611 | if (ci->ci_frequency == child) | | 611 | if (ci->ci_frequency == child) |
612 | ci->ci_frequency = NULL; | | 612 | ci->ci_frequency = NULL; |
613 | | | 613 | |
614 | if (ci->ci_padlock == child) | | 614 | if (ci->ci_padlock == child) |
615 | ci->ci_padlock = NULL; | | 615 | ci->ci_padlock = NULL; |
616 | | | 616 | |
617 | if (ci->ci_temperature == child) | | 617 | if (ci->ci_temperature == child) |
618 | ci->ci_temperature = NULL; | | 618 | ci->ci_temperature = NULL; |
619 | | | 619 | |
620 | if (ci->ci_vm == child) | | 620 | if (ci->ci_vm == child) |
621 | ci->ci_vm = NULL; | | 621 | ci->ci_vm = NULL; |
622 | } | | 622 | } |
623 | | | 623 | |
624 | /* | | 624 | /* |
625 | * Initialize the processor appropriately. | | 625 | * Initialize the processor appropriately. |
626 | */ | | 626 | */ |
627 | | | 627 | |
628 | void | | 628 | void |
629 | cpu_init(struct cpu_info *ci) | | 629 | cpu_init(struct cpu_info *ci) |
630 | { | | 630 | { |
631 | extern int x86_fpu_save; | | 631 | extern int x86_fpu_save; |
632 | uint32_t cr4 = 0; | | 632 | uint32_t cr4 = 0; |
633 | | | 633 | |
634 | lcr0(rcr0() | CR0_WP); | | 634 | lcr0(rcr0() | CR0_WP); |
635 | | | 635 | |
636 | /* If global TLB caching is supported, enable it */ | | 636 | /* If global TLB caching is supported, enable it */ |
637 | if (cpu_feature[0] & CPUID_PGE) | | 637 | if (cpu_feature[0] & CPUID_PGE) |
638 | cr4 |= CR4_PGE; | | 638 | cr4 |= CR4_PGE; |
639 | | | 639 | |
640 | /* | | 640 | /* |
641 | * If we have FXSAVE/FXRESTOR, use them. | | 641 | * If we have FXSAVE/FXRESTOR, use them. |
642 | */ | | 642 | */ |
643 | if (cpu_feature[0] & CPUID_FXSR) { | | 643 | if (cpu_feature[0] & CPUID_FXSR) { |
644 | cr4 |= CR4_OSFXSR; | | 644 | cr4 |= CR4_OSFXSR; |
645 | | | 645 | |
646 | /* | | 646 | /* |
647 | * If we have SSE/SSE2, enable XMM exceptions. | | 647 | * If we have SSE/SSE2, enable XMM exceptions. |
648 | */ | | 648 | */ |
649 | if (cpu_feature[0] & (CPUID_SSE|CPUID_SSE2)) | | 649 | if (cpu_feature[0] & (CPUID_SSE|CPUID_SSE2)) |
650 | cr4 |= CR4_OSXMMEXCPT; | | 650 | cr4 |= CR4_OSXMMEXCPT; |
651 | } | | 651 | } |
652 | | | 652 | |
653 | /* If xsave is supported, enable it */ | | 653 | /* If xsave is supported, enable it */ |
654 | if (cpu_feature[1] & CPUID2_XSAVE) | | 654 | if (cpu_feature[1] & CPUID2_XSAVE) |
655 | cr4 |= CR4_OSXSAVE; | | 655 | cr4 |= CR4_OSXSAVE; |
656 | | | 656 | |
657 | /* If SMEP is supported, enable it */ | | 657 | /* If SMEP is supported, enable it */ |
658 | if (cpu_feature[5] & CPUID_SEF_SMEP) | | 658 | if (cpu_feature[5] & CPUID_SEF_SMEP) |
659 | cr4 |= CR4_SMEP; | | 659 | cr4 |= CR4_SMEP; |
660 | | | 660 | |
661 | /* If SMAP is supported, enable it */ | | 661 | /* If SMAP is supported, enable it */ |
662 | if (cpu_feature[5] & CPUID_SEF_SMAP) | | 662 | if (cpu_feature[5] & CPUID_SEF_SMAP) |
663 | cr4 |= CR4_SMAP; | | 663 | cr4 |= CR4_SMAP; |
664 | | | 664 | |
665 | #ifdef SVS | | 665 | #ifdef SVS |
666 | /* If PCID is supported, enable it */ | | 666 | /* If PCID is supported, enable it */ |
667 | if (svs_pcid) | | 667 | if (svs_pcid) |
668 | cr4 |= CR4_PCIDE; | | 668 | cr4 |= CR4_PCIDE; |
669 | #endif | | 669 | #endif |
670 | | | 670 | |
671 | if (cr4) { | | 671 | if (cr4) { |
672 | cr4 |= rcr4(); | | 672 | cr4 |= rcr4(); |
673 | lcr4(cr4); | | 673 | lcr4(cr4); |
674 | } | | 674 | } |
675 | | | 675 | |
676 | /* | | 676 | /* |
677 | * Changing CR4 register may change cpuid values. For example, setting | | 677 | * Changing CR4 register may change cpuid values. For example, setting |
678 | * CR4_OSXSAVE sets CPUID2_OSXSAVE. The CPUID2_OSXSAVE is in | | 678 | * CR4_OSXSAVE sets CPUID2_OSXSAVE. The CPUID2_OSXSAVE is in |
679 | * ci_feat_val[1], so update it. | | 679 | * ci_feat_val[1], so update it. |
680 | * XXX Other than ci_feat_val[1] might be changed. | | 680 | * XXX Other than ci_feat_val[1] might be changed. |
681 | */ | | 681 | */ |
682 | if (cpuid_level >= 1) { | | 682 | if (cpuid_level >= 1) { |
683 | u_int descs[4]; | | 683 | u_int descs[4]; |
684 | | | 684 | |
685 | x86_cpuid(1, descs); | | 685 | x86_cpuid(1, descs); |
686 | ci->ci_feat_val[1] = descs[2]; | | 686 | ci->ci_feat_val[1] = descs[2]; |
687 | } | | 687 | } |
688 | | | 688 | |
689 | if (x86_fpu_save >= FPU_SAVE_FXSAVE) { | | 689 | if (x86_fpu_save >= FPU_SAVE_FXSAVE) { |
690 | fpuinit_mxcsr_mask(); | | 690 | fpuinit_mxcsr_mask(); |
691 | } | | 691 | } |
692 | | | 692 | |
693 | /* If xsave is enabled, enable all fpu features */ | | 693 | /* If xsave is enabled, enable all fpu features */ |
694 | if (cr4 & CR4_OSXSAVE) | | 694 | if (cr4 & CR4_OSXSAVE) |
695 | wrxcr(0, x86_xsave_features & XCR0_FPU); | | 695 | wrxcr(0, x86_xsave_features & XCR0_FPU); |
696 | | | 696 | |
697 | #ifdef MTRR | | 697 | #ifdef MTRR |
698 | /* | | 698 | /* |
699 | * On a P6 or above, initialize MTRR's if the hardware supports them. | | 699 | * On a P6 or above, initialize MTRR's if the hardware supports them. |
700 | */ | | 700 | */ |
701 | if (cpu_feature[0] & CPUID_MTRR) { | | 701 | if (cpu_feature[0] & CPUID_MTRR) { |
702 | if ((ci->ci_flags & CPUF_AP) == 0) | | 702 | if ((ci->ci_flags & CPUF_AP) == 0) |
703 | i686_mtrr_init_first(); | | 703 | i686_mtrr_init_first(); |
704 | mtrr_init_cpu(ci); | | 704 | mtrr_init_cpu(ci); |
705 | } | | 705 | } |
706 | | | 706 | |
707 | #ifdef i386 | | 707 | #ifdef i386 |
708 | if (strcmp((char *)(ci->ci_vendor), "AuthenticAMD") == 0) { | | 708 | if (strcmp((char *)(ci->ci_vendor), "AuthenticAMD") == 0) { |
709 | /* | | 709 | /* |
710 | * Must be a K6-2 Step >= 7 or a K6-III. | | 710 | * Must be a K6-2 Step >= 7 or a K6-III. |
711 | */ | | 711 | */ |
712 | if (CPUID_TO_FAMILY(ci->ci_signature) == 5) { | | 712 | if (CPUID_TO_FAMILY(ci->ci_signature) == 5) { |
713 | if (CPUID_TO_MODEL(ci->ci_signature) > 8 || | | 713 | if (CPUID_TO_MODEL(ci->ci_signature) > 8 || |
714 | (CPUID_TO_MODEL(ci->ci_signature) == 8 && | | 714 | (CPUID_TO_MODEL(ci->ci_signature) == 8 && |
715 | CPUID_TO_STEPPING(ci->ci_signature) >= 7)) { | | 715 | CPUID_TO_STEPPING(ci->ci_signature) >= 7)) { |
716 | mtrr_funcs = &k6_mtrr_funcs; | | 716 | mtrr_funcs = &k6_mtrr_funcs; |
717 | k6_mtrr_init_first(); | | 717 | k6_mtrr_init_first(); |
718 | mtrr_init_cpu(ci); | | 718 | mtrr_init_cpu(ci); |
719 | } | | 719 | } |
720 | } | | 720 | } |
721 | } | | 721 | } |
722 | #endif /* i386 */ | | 722 | #endif /* i386 */ |
723 | #endif /* MTRR */ | | 723 | #endif /* MTRR */ |
724 | | | 724 | |
725 | if (ci != &cpu_info_primary) { | | 725 | if (ci != &cpu_info_primary) { |
726 | /* Synchronize TSC */ | | 726 | /* Synchronize TSC */ |
727 | atomic_or_32(&ci->ci_flags, CPUF_RUNNING); | | 727 | atomic_or_32(&ci->ci_flags, CPUF_RUNNING); |
728 | tsc_sync_ap(ci); | | 728 | tsc_sync_ap(ci); |
729 | } else { | | 729 | } else { |
730 | atomic_or_32(&ci->ci_flags, CPUF_RUNNING); | | 730 | atomic_or_32(&ci->ci_flags, CPUF_RUNNING); |
731 | } | | 731 | } |
732 | } | | 732 | } |
733 | | | 733 | |
734 | #ifdef MULTIPROCESSOR | | 734 | #ifdef MULTIPROCESSOR |
735 | void | | 735 | void |
736 | cpu_boot_secondary_processors(void) | | 736 | cpu_boot_secondary_processors(void) |
737 | { | | 737 | { |
738 | struct cpu_info *ci; | | 738 | struct cpu_info *ci; |
739 | kcpuset_t *cpus; | | 739 | kcpuset_t *cpus; |
740 | u_long i; | | 740 | u_long i; |
741 | | | 741 | |
742 | /* Now that we know the number of CPUs, patch the text segment. */ | | 742 | /* Now that we know the number of CPUs, patch the text segment. */ |
743 | x86_patch(false); | | 743 | x86_patch(false); |
744 | | | 744 | |
745 | #if NACPICA > 0 | | 745 | #if NACPICA > 0 |
746 | /* Finished with NUMA info for now. */ | | 746 | /* Finished with NUMA info for now. */ |
747 | acpisrat_exit(); | | 747 | acpisrat_exit(); |
748 | #endif | | 748 | #endif |
749 | | | 749 | |
750 | kcpuset_create(&cpus, true); | | 750 | kcpuset_create(&cpus, true); |
751 | kcpuset_set(cpus, cpu_index(curcpu())); | | 751 | kcpuset_set(cpus, cpu_index(curcpu())); |
752 | for (i = 0; i < maxcpus; i++) { | | 752 | for (i = 0; i < maxcpus; i++) { |
753 | ci = cpu_lookup(i); | | 753 | ci = cpu_lookup(i); |
754 | if (ci == NULL) | | 754 | if (ci == NULL) |
755 | continue; | | 755 | continue; |
756 | if (ci->ci_data.cpu_idlelwp == NULL) | | 756 | if (ci->ci_data.cpu_idlelwp == NULL) |
757 | continue; | | 757 | continue; |
758 | if ((ci->ci_flags & CPUF_PRESENT) == 0) | | 758 | if ((ci->ci_flags & CPUF_PRESENT) == 0) |
759 | continue; | | 759 | continue; |
760 | if (ci->ci_flags & (CPUF_BSP|CPUF_SP|CPUF_PRIMARY)) | | 760 | if (ci->ci_flags & (CPUF_BSP|CPUF_SP|CPUF_PRIMARY)) |
761 | continue; | | 761 | continue; |
762 | cpu_boot_secondary(ci); | | 762 | cpu_boot_secondary(ci); |
763 | kcpuset_set(cpus, cpu_index(ci)); | | 763 | kcpuset_set(cpus, cpu_index(ci)); |
764 | } | | 764 | } |
765 | while (!kcpuset_match(cpus, kcpuset_running)) | | 765 | while (!kcpuset_match(cpus, kcpuset_running)) |
766 | ; | | 766 | ; |
767 | kcpuset_destroy(cpus); | | 767 | kcpuset_destroy(cpus); |
768 | | | 768 | |
769 | x86_mp_online = true; | | 769 | x86_mp_online = true; |
770 | | | 770 | |
771 | /* Now that we know about the TSC, attach the timecounter. */ | | 771 | /* Now that we know about the TSC, attach the timecounter. */ |
772 | tsc_tc_init(); | | 772 | tsc_tc_init(); |
773 | } | | 773 | } |
774 | #endif | | 774 | #endif |
775 | | | 775 | |
776 | static void | | 776 | static void |
777 | cpu_init_idle_lwp(struct cpu_info *ci) | | 777 | cpu_init_idle_lwp(struct cpu_info *ci) |
778 | { | | 778 | { |
779 | struct lwp *l = ci->ci_data.cpu_idlelwp; | | 779 | struct lwp *l = ci->ci_data.cpu_idlelwp; |
780 | struct pcb *pcb = lwp_getpcb(l); | | 780 | struct pcb *pcb = lwp_getpcb(l); |
781 | | | 781 | |
782 | pcb->pcb_cr0 = rcr0(); | | 782 | pcb->pcb_cr0 = rcr0(); |
783 | } | | 783 | } |
784 | | | 784 | |
785 | void | | 785 | void |
786 | cpu_init_idle_lwps(void) | | 786 | cpu_init_idle_lwps(void) |
787 | { | | 787 | { |
788 | struct cpu_info *ci; | | 788 | struct cpu_info *ci; |
789 | u_long i; | | 789 | u_long i; |
790 | | | 790 | |
791 | for (i = 0; i < maxcpus; i++) { | | 791 | for (i = 0; i < maxcpus; i++) { |
792 | ci = cpu_lookup(i); | | 792 | ci = cpu_lookup(i); |
793 | if (ci == NULL) | | 793 | if (ci == NULL) |
794 | continue; | | 794 | continue; |
795 | if (ci->ci_data.cpu_idlelwp == NULL) | | 795 | if (ci->ci_data.cpu_idlelwp == NULL) |
796 | continue; | | 796 | continue; |
797 | if ((ci->ci_flags & CPUF_PRESENT) == 0) | | 797 | if ((ci->ci_flags & CPUF_PRESENT) == 0) |
798 | continue; | | 798 | continue; |
799 | cpu_init_idle_lwp(ci); | | 799 | cpu_init_idle_lwp(ci); |
800 | } | | 800 | } |
801 | } | | 801 | } |
802 | | | 802 | |
803 | #ifdef MULTIPROCESSOR | | 803 | #ifdef MULTIPROCESSOR |
804 | void | | 804 | void |
805 | cpu_start_secondary(struct cpu_info *ci) | | 805 | cpu_start_secondary(struct cpu_info *ci) |
806 | { | | 806 | { |
807 | u_long psl; | | 807 | u_long psl; |
808 | int i; | | 808 | int i; |
809 | | | 809 | |
810 | #if NLAPIC > 0 | | 810 | #if NLAPIC > 0 |
811 | paddr_t mp_pdirpa; | | 811 | paddr_t mp_pdirpa; |
812 | mp_pdirpa = pmap_init_tmp_pgtbl(mp_trampoline_paddr); | | 812 | mp_pdirpa = pmap_init_tmp_pgtbl(mp_trampoline_paddr); |
813 | cpu_copy_trampoline(mp_pdirpa); | | 813 | cpu_copy_trampoline(mp_pdirpa); |
814 | #endif | | 814 | #endif |
815 | | | 815 | |
816 | atomic_or_32(&ci->ci_flags, CPUF_AP); | | 816 | atomic_or_32(&ci->ci_flags, CPUF_AP); |
817 | ci->ci_curlwp = ci->ci_data.cpu_idlelwp; | | 817 | ci->ci_curlwp = ci->ci_data.cpu_idlelwp; |
818 | if (CPU_STARTUP(ci, mp_trampoline_paddr) != 0) { | | 818 | if (CPU_STARTUP(ci, mp_trampoline_paddr) != 0) { |
819 | return; | | 819 | return; |
820 | } | | 820 | } |
821 | | | 821 | |
822 | /* | | 822 | /* |
823 | * Wait for it to become ready. Setting cpu_starting opens the | | 823 | * Wait for it to become ready. Setting cpu_starting opens the |
824 | * initial gate and allows the AP to start soft initialization. | | 824 | * initial gate and allows the AP to start soft initialization. |
825 | */ | | 825 | */ |
826 | KASSERT(cpu_starting == NULL); | | 826 | KASSERT(cpu_starting == NULL); |
827 | cpu_starting = ci; | | 827 | cpu_starting = ci; |
828 | for (i = 100000; (!(ci->ci_flags & CPUF_PRESENT)) && i > 0; i--) { | | 828 | for (i = 100000; (!(ci->ci_flags & CPUF_PRESENT)) && i > 0; i--) { |
829 | delay_func(10); | | 829 | delay_func(10); |
830 | } | | 830 | } |
831 | | | 831 | |
832 | if ((ci->ci_flags & CPUF_PRESENT) == 0) { | | 832 | if ((ci->ci_flags & CPUF_PRESENT) == 0) { |
833 | aprint_error_dev(ci->ci_dev, "failed to become ready\n"); | | 833 | aprint_error_dev(ci->ci_dev, "failed to become ready\n"); |
834 | #if defined(MPDEBUG) && defined(DDB) | | 834 | #if defined(MPDEBUG) && defined(DDB) |
835 | printf("dropping into debugger; continue from here to resume boot\n"); | | 835 | printf("dropping into debugger; continue from here to resume boot\n"); |
836 | Debugger(); | | 836 | Debugger(); |
837 | #endif | | 837 | #endif |
838 | } else { | | 838 | } else { |
839 | /* | | 839 | /* |
840 | * Synchronize time stamp counters. Invalidate cache and do | | 840 | * Synchronize time stamp counters. Invalidate cache and do |
841 | * twice (in tsc_sync_bp) to minimize possible cache effects. | | 841 | * twice (in tsc_sync_bp) to minimize possible cache effects. |
842 | * Disable interrupts to try and rule out any external | | 842 | * Disable interrupts to try and rule out any external |
843 | * interference. | | 843 | * interference. |
844 | */ | | 844 | */ |
845 | psl = x86_read_psl(); | | 845 | psl = x86_read_psl(); |
846 | x86_disable_intr(); | | 846 | x86_disable_intr(); |
847 | tsc_sync_bp(ci); | | 847 | tsc_sync_bp(ci); |
848 | x86_write_psl(psl); | | 848 | x86_write_psl(psl); |
849 | } | | 849 | } |
850 | | | 850 | |
851 | CPU_START_CLEANUP(ci); | | 851 | CPU_START_CLEANUP(ci); |
852 | cpu_starting = NULL; | | 852 | cpu_starting = NULL; |
853 | } | | 853 | } |
854 | | | 854 | |
855 | void | | 855 | void |
856 | cpu_boot_secondary(struct cpu_info *ci) | | 856 | cpu_boot_secondary(struct cpu_info *ci) |
857 | { | | 857 | { |
858 | int64_t drift; | | 858 | int64_t drift; |
859 | u_long psl; | | 859 | u_long psl; |
860 | int i; | | 860 | int i; |
861 | | | 861 | |
862 | atomic_or_32(&ci->ci_flags, CPUF_GO); | | 862 | atomic_or_32(&ci->ci_flags, CPUF_GO); |
863 | for (i = 100000; (!(ci->ci_flags & CPUF_RUNNING)) && i > 0; i--) { | | 863 | for (i = 100000; (!(ci->ci_flags & CPUF_RUNNING)) && i > 0; i--) { |
864 | delay_func(10); | | 864 | delay_func(10); |
865 | } | | 865 | } |
866 | if ((ci->ci_flags & CPUF_RUNNING) == 0) { | | 866 | if ((ci->ci_flags & CPUF_RUNNING) == 0) { |
867 | aprint_error_dev(ci->ci_dev, "failed to start\n"); | | 867 | aprint_error_dev(ci->ci_dev, "failed to start\n"); |
868 | #if defined(MPDEBUG) && defined(DDB) | | 868 | #if defined(MPDEBUG) && defined(DDB) |
869 | printf("dropping into debugger; continue from here to resume boot\n"); | | 869 | printf("dropping into debugger; continue from here to resume boot\n"); |
870 | Debugger(); | | 870 | Debugger(); |
871 | #endif | | 871 | #endif |
872 | } else { | | 872 | } else { |
873 | /* Synchronize TSC again, check for drift. */ | | 873 | /* Synchronize TSC again, check for drift. */ |
874 | drift = ci->ci_data.cpu_cc_skew; | | 874 | drift = ci->ci_data.cpu_cc_skew; |
875 | psl = x86_read_psl(); | | 875 | psl = x86_read_psl(); |
876 | x86_disable_intr(); | | 876 | x86_disable_intr(); |
877 | tsc_sync_bp(ci); | | 877 | tsc_sync_bp(ci); |
878 | x86_write_psl(psl); | | 878 | x86_write_psl(psl); |
879 | drift -= ci->ci_data.cpu_cc_skew; | | 879 | drift -= ci->ci_data.cpu_cc_skew; |
880 | aprint_debug_dev(ci->ci_dev, "TSC skew=%lld drift=%lld\n", | | 880 | aprint_debug_dev(ci->ci_dev, "TSC skew=%lld drift=%lld\n", |
881 | (long long)ci->ci_data.cpu_cc_skew, (long long)drift); | | 881 | (long long)ci->ci_data.cpu_cc_skew, (long long)drift); |
882 | tsc_sync_drift(drift); | | 882 | tsc_sync_drift(drift); |
883 | } | | 883 | } |
884 | } | | 884 | } |
885 | | | 885 | |
886 | /* | | 886 | /* |
887 | * The CPU ends up here when it's ready to run. | | 887 | * The CPU ends up here when it's ready to run. |
888 | * This is called from code in mptramp.s; at this point, we are running | | 888 | * This is called from code in mptramp.s; at this point, we are running |
889 | * in the idle pcb/idle stack of the new CPU. When this function returns, | | 889 | * in the idle pcb/idle stack of the new CPU. When this function returns, |
890 | * this processor will enter the idle loop and start looking for work. | | 890 | * this processor will enter the idle loop and start looking for work. |
891 | */ | | 891 | */ |
892 | void | | 892 | void |
893 | cpu_hatch(void *v) | | 893 | cpu_hatch(void *v) |
894 | { | | 894 | { |
895 | struct cpu_info *ci = (struct cpu_info *)v; | | 895 | struct cpu_info *ci = (struct cpu_info *)v; |
896 | struct pcb *pcb; | | 896 | struct pcb *pcb; |
897 | int s, i; | | 897 | int s, i; |
898 | | | 898 | |
899 | /* ------------------------------------------------------------- */ | | 899 | /* ------------------------------------------------------------- */ |
900 | | | 900 | |
901 | /* | | 901 | /* |
902 | * This section of code must be compiled with SSP disabled, to | | 902 | * This section of code must be compiled with SSP disabled, to |
903 | * prevent a race against cpu0. See sys/conf/ssp.mk. | | 903 | * prevent a race against cpu0. See sys/conf/ssp.mk. |
904 | */ | | 904 | */ |
905 | | | 905 | |
906 | cpu_init_msrs(ci, true); | | 906 | cpu_init_msrs(ci, true); |
907 | cpu_probe(ci); | | 907 | cpu_probe(ci); |
908 | cpu_speculation_init(ci); | | 908 | cpu_speculation_init(ci); |
909 | #if NHYPERV > 0 | | 909 | #if NHYPERV > 0 |
910 | hyperv_init_cpu(ci); | | 910 | hyperv_init_cpu(ci); |
911 | #endif | | 911 | #endif |
912 | | | 912 | |
913 | ci->ci_data.cpu_cc_freq = cpu_info_primary.ci_data.cpu_cc_freq; | | 913 | ci->ci_data.cpu_cc_freq = cpu_info_primary.ci_data.cpu_cc_freq; |
914 | /* cpu_get_tsc_freq(ci); */ | | 914 | /* cpu_get_tsc_freq(ci); */ |
915 | | | 915 | |
916 | KDASSERT((ci->ci_flags & CPUF_PRESENT) == 0); | | 916 | KDASSERT((ci->ci_flags & CPUF_PRESENT) == 0); |
917 | | | 917 | |
918 | /* | | 918 | /* |
919 | * Synchronize the TSC for the first time. Note that interrupts are | | 919 | * Synchronize the TSC for the first time. Note that interrupts are |
920 | * off at this point. | | 920 | * off at this point. |
921 | */ | | 921 | */ |
922 | atomic_or_32(&ci->ci_flags, CPUF_PRESENT); | | 922 | atomic_or_32(&ci->ci_flags, CPUF_PRESENT); |
923 | tsc_sync_ap(ci); | | 923 | tsc_sync_ap(ci); |
924 | | | 924 | |
925 | /* ------------------------------------------------------------- */ | | 925 | /* ------------------------------------------------------------- */ |
926 | | | 926 | |
927 | /* | | 927 | /* |
928 | * Wait to be brought online. | | 928 | * Wait to be brought online. |
929 | * | | 929 | * |
930 | * Use MONITOR/MWAIT if available. These instructions put the CPU in | | 930 | * Use MONITOR/MWAIT if available. These instructions put the CPU in |
931 | * a low consumption mode (C-state), and if the TSC is not invariant, | | 931 | * a low consumption mode (C-state), and if the TSC is not invariant, |
932 | * this causes the TSC to drift. We want this to happen, so that we | | 932 | * this causes the TSC to drift. We want this to happen, so that we |
933 | * can later detect (in tsc_tc_init) any abnormal drift with invariant | | 933 | * can later detect (in tsc_tc_init) any abnormal drift with invariant |
934 | * TSCs. That's just for safety; by definition such drifts should | | 934 | * TSCs. That's just for safety; by definition such drifts should |
935 | * never occur with invariant TSCs. | | 935 | * never occur with invariant TSCs. |
936 | * | | 936 | * |
937 | * If not available, try PAUSE. We'd like to use HLT, but we have | | 937 | * If not available, try PAUSE. We'd like to use HLT, but we have |
938 | * interrupts off. | | 938 | * interrupts off. |
939 | */ | | 939 | */ |
940 | while ((ci->ci_flags & CPUF_GO) == 0) { | | 940 | while ((ci->ci_flags & CPUF_GO) == 0) { |
941 | if ((cpu_feature[1] & CPUID2_MONITOR) != 0) { | | 941 | if ((cpu_feature[1] & CPUID2_MONITOR) != 0) { |
942 | x86_monitor(&ci->ci_flags, 0, 0); | | 942 | x86_monitor(&ci->ci_flags, 0, 0); |
943 | if ((ci->ci_flags & CPUF_GO) != 0) { | | 943 | if ((ci->ci_flags & CPUF_GO) != 0) { |
944 | continue; | | 944 | continue; |
945 | } | | 945 | } |
946 | x86_mwait(0, 0); | | 946 | x86_mwait(0, 0); |
947 | } else { | | 947 | } else { |
948 | /* | | 948 | /* |
949 | * XXX The loop repetition count could be a lot higher, but | | 949 | * XXX The loop repetition count could be a lot higher, but |
950 | * XXX currently qemu emulator takes a _very_long_time_ to | | 950 | * XXX currently qemu emulator takes a _very_long_time_ to |
951 | * XXX execute the pause instruction. So for now, use a low | | 951 | * XXX execute the pause instruction. So for now, use a low |
952 | * XXX value to allow the cpu to hatch before timing out. | | 952 | * XXX value to allow the cpu to hatch before timing out. |
953 | */ | | 953 | */ |
954 | for (i = 50; i != 0; i--) { | | 954 | for (i = 50; i != 0; i--) { |
955 | x86_pause(); | | 955 | x86_pause(); |
956 | } | | 956 | } |
957 | } | | 957 | } |
958 | } | | 958 | } |
959 | | | 959 | |
960 | /* Because the text may have been patched in x86_patch(). */ | | 960 | /* Because the text may have been patched in x86_patch(). */ |
961 | wbinvd(); | | 961 | wbinvd(); |
962 | x86_flush(); | | 962 | x86_flush(); |
963 | tlbflushg(); | | 963 | tlbflushg(); |
964 | | | 964 | |
965 | KASSERT((ci->ci_flags & CPUF_RUNNING) == 0); | | 965 | KASSERT((ci->ci_flags & CPUF_RUNNING) == 0); |
966 | | | 966 | |
967 | #ifdef PAE | | 967 | #ifdef PAE |
968 | pd_entry_t * l3_pd = ci->ci_pae_l3_pdir; | | 968 | pd_entry_t * l3_pd = ci->ci_pae_l3_pdir; |
969 | for (i = 0 ; i < PDP_SIZE; i++) { | | 969 | for (i = 0 ; i < PDP_SIZE; i++) { |
970 | l3_pd[i] = pmap_kernel()->pm_pdirpa[i] | PTE_P; | | 970 | l3_pd[i] = pmap_kernel()->pm_pdirpa[i] | PTE_P; |
971 | } | | 971 | } |
972 | lcr3(ci->ci_pae_l3_pdirpa); | | 972 | lcr3(ci->ci_pae_l3_pdirpa); |
973 | #else | | 973 | #else |
974 | lcr3(pmap_pdirpa(pmap_kernel(), 0)); | | 974 | lcr3(pmap_pdirpa(pmap_kernel(), 0)); |
975 | #endif | | 975 | #endif |
976 | | | 976 | |
977 | pcb = lwp_getpcb(curlwp); | | 977 | pcb = lwp_getpcb(curlwp); |
978 | pcb->pcb_cr3 = rcr3(); | | 978 | pcb->pcb_cr3 = rcr3(); |
979 | pcb = lwp_getpcb(ci->ci_data.cpu_idlelwp); | | 979 | pcb = lwp_getpcb(ci->ci_data.cpu_idlelwp); |
980 | lcr0(pcb->pcb_cr0); | | 980 | lcr0(pcb->pcb_cr0); |
981 | | | 981 | |
982 | cpu_init_idt(ci); | | 982 | cpu_init_idt(ci); |
983 | gdt_init_cpu(ci); | | 983 | gdt_init_cpu(ci); |
984 | #if NLAPIC > 0 | | 984 | #if NLAPIC > 0 |
985 | lapic_enable(); | | 985 | lapic_enable(); |
986 | lapic_set_lvt(); | | 986 | lapic_set_lvt(); |
987 | #endif | | 987 | #endif |
988 | | | 988 | |
989 | fpuinit(ci); | | 989 | fpuinit(ci); |
990 | lldt(GSYSSEL(GLDT_SEL, SEL_KPL)); | | 990 | lldt(GSYSSEL(GLDT_SEL, SEL_KPL)); |
991 | ltr(ci->ci_tss_sel); | | 991 | ltr(ci->ci_tss_sel); |
992 | | | 992 | |
993 | /* | | 993 | /* |
994 | * cpu_init will re-synchronize the TSC, and will detect any abnormal | | 994 | * cpu_init will re-synchronize the TSC, and will detect any abnormal |
995 | * drift that would have been caused by the use of MONITOR/MWAIT | | 995 | * drift that would have been caused by the use of MONITOR/MWAIT |
996 | * above. | | 996 | * above. |
997 | */ | | 997 | */ |
998 | cpu_init(ci); | | 998 | cpu_init(ci); |
999 | #ifdef XENPVHVM | | 999 | #ifdef XENPVHVM |
1000 | xen_hvm_init_cpu(ci); | | 1000 | xen_hvm_init_cpu(ci); |
1001 | #endif | | 1001 | #endif |
1002 | (*x86_initclock_func)(); | | 1002 | (*x86_initclock_func)(); |
1003 | cpu_get_tsc_freq(ci); | | 1003 | cpu_get_tsc_freq(ci); |
1004 | | | 1004 | |
1005 | s = splhigh(); | | 1005 | s = splhigh(); |
1006 | #if NLAPIC > 0 | | 1006 | #if NLAPIC > 0 |
1007 | lapic_write_tpri(0); | | 1007 | lapic_write_tpri(0); |
1008 | #endif | | 1008 | #endif |
1009 | x86_enable_intr(); | | 1009 | x86_enable_intr(); |
1010 | splx(s); | | 1010 | splx(s); |
1011 | x86_errata(); | | 1011 | x86_errata(); |
1012 | | | 1012 | |
1013 | aprint_debug_dev(ci->ci_dev, "running\n"); | | 1013 | aprint_debug_dev(ci->ci_dev, "running\n"); |
1014 | | | 1014 | |
1015 | kcsan_cpu_init(ci); | | 1015 | kcsan_cpu_init(ci); |
1016 | | | 1016 | |
1017 | idle_loop(NULL); | | 1017 | idle_loop(NULL); |
1018 | KASSERT(false); | | 1018 | KASSERT(false); |
1019 | } | | 1019 | } |
1020 | #endif | | 1020 | #endif |
1021 | | | 1021 | |
1022 | #if defined(DDB) | | 1022 | #if defined(DDB) |
1023 | | | 1023 | |
1024 | #include <ddb/db_output.h> | | 1024 | #include <ddb/db_output.h> |
1025 | #include <machine/db_machdep.h> | | 1025 | #include <machine/db_machdep.h> |
1026 | | | 1026 | |
1027 | /* | | 1027 | /* |
1028 | * Dump CPU information from ddb. | | 1028 | * Dump CPU information from ddb. |
1029 | */ | | 1029 | */ |
1030 | void | | 1030 | void |
1031 | cpu_debug_dump(void) | | 1031 | cpu_debug_dump(void) |
1032 | { | | 1032 | { |
1033 | struct cpu_info *ci; | | 1033 | struct cpu_info *ci; |
1034 | CPU_INFO_ITERATOR cii; | | 1034 | CPU_INFO_ITERATOR cii; |
1035 | const char sixtyfour64space[] = | | 1035 | const char sixtyfour64space[] = |
1036 | #ifdef _LP64 | | 1036 | #ifdef _LP64 |
1037 | " " | | 1037 | " " |
1038 | #endif | | 1038 | #endif |
1039 | ""; | | 1039 | ""; |
1040 | | | 1040 | |
1041 | db_printf("addr %sdev id flags ipis spl curlwp " | | 1041 | db_printf("addr %sdev id flags ipis spl curlwp " |
1042 | "\n", sixtyfour64space); | | 1042 | "\n", sixtyfour64space); |
1043 | for (CPU_INFO_FOREACH(cii, ci)) { | | 1043 | for (CPU_INFO_FOREACH(cii, ci)) { |
1044 | db_printf("%p %s %ld %x %x %d %10p\n", | | 1044 | db_printf("%p %s %ld %x %x %d %10p\n", |
1045 | ci, | | 1045 | ci, |
1046 | ci->ci_dev == NULL ? "BOOT" : device_xname(ci->ci_dev), | | 1046 | ci->ci_dev == NULL ? "BOOT" : device_xname(ci->ci_dev), |
1047 | (long)ci->ci_cpuid, | | 1047 | (long)ci->ci_cpuid, |
1048 | ci->ci_flags, ci->ci_ipis, ci->ci_ilevel, | | 1048 | ci->ci_flags, ci->ci_ipis, ci->ci_ilevel, |
1049 | ci->ci_curlwp); | | 1049 | ci->ci_curlwp); |
1050 | } | | 1050 | } |
1051 | } | | 1051 | } |
1052 | #endif | | 1052 | #endif |
1053 | | | 1053 | |
1054 | #ifdef MULTIPROCESSOR | | 1054 | #ifdef MULTIPROCESSOR |
1055 | #if NLAPIC > 0 | | 1055 | #if NLAPIC > 0 |
1056 | static void | | 1056 | static void |
1057 | cpu_copy_trampoline(paddr_t pdir_pa) | | 1057 | cpu_copy_trampoline(paddr_t pdir_pa) |
1058 | { | | 1058 | { |
1059 | extern uint32_t nox_flag; | | 1059 | extern uint32_t nox_flag; |
1060 | extern u_char cpu_spinup_trampoline[]; | | 1060 | extern u_char cpu_spinup_trampoline[]; |
1061 | extern u_char cpu_spinup_trampoline_end[]; | | 1061 | extern u_char cpu_spinup_trampoline_end[]; |
1062 | vaddr_t mp_trampoline_vaddr; | | 1062 | vaddr_t mp_trampoline_vaddr; |
1063 | struct { | | 1063 | struct { |
1064 | uint32_t large; | | 1064 | uint32_t large; |
1065 | uint32_t nox; | | 1065 | uint32_t nox; |
1066 | uint32_t pdir; | | 1066 | uint32_t pdir; |
1067 | } smp_data; | | 1067 | } smp_data; |
1068 | CTASSERT(sizeof(smp_data) == 3 * 4); | | 1068 | CTASSERT(sizeof(smp_data) == 3 * 4); |
1069 | | | 1069 | |
1070 | smp_data.large = (pmap_largepages != 0); | | 1070 | smp_data.large = (pmap_largepages != 0); |
1071 | smp_data.nox = nox_flag; | | 1071 | smp_data.nox = nox_flag; |
1072 | smp_data.pdir = (uint32_t)(pdir_pa & 0xFFFFFFFF); | | 1072 | smp_data.pdir = (uint32_t)(pdir_pa & 0xFFFFFFFF); |
1073 | | | 1073 | |
1074 | /* Enter the physical address */ | | 1074 | /* Enter the physical address */ |
1075 | mp_trampoline_vaddr = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, | | 1075 | mp_trampoline_vaddr = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, |
1076 | UVM_KMF_VAONLY); | | 1076 | UVM_KMF_VAONLY); |
1077 | pmap_kenter_pa(mp_trampoline_vaddr, mp_trampoline_paddr, | | 1077 | pmap_kenter_pa(mp_trampoline_vaddr, mp_trampoline_paddr, |
1078 | VM_PROT_READ | VM_PROT_WRITE, 0); | | 1078 | VM_PROT_READ | VM_PROT_WRITE, 0); |
1079 | pmap_update(pmap_kernel()); | | 1079 | pmap_update(pmap_kernel()); |
1080 | | | 1080 | |
1081 | /* Copy boot code */ | | 1081 | /* Copy boot code */ |
1082 | memcpy((void *)mp_trampoline_vaddr, | | 1082 | memcpy((void *)mp_trampoline_vaddr, |
1083 | cpu_spinup_trampoline, | | 1083 | cpu_spinup_trampoline, |
1084 | cpu_spinup_trampoline_end - cpu_spinup_trampoline); | | 1084 | cpu_spinup_trampoline_end - cpu_spinup_trampoline); |
1085 | | | 1085 | |
1086 | /* Copy smp_data at the end */ | | 1086 | /* Copy smp_data at the end */ |
1087 | memcpy((void *)(mp_trampoline_vaddr + PAGE_SIZE - sizeof(smp_data)), | | 1087 | memcpy((void *)(mp_trampoline_vaddr + PAGE_SIZE - sizeof(smp_data)), |
1088 | &smp_data, sizeof(smp_data)); | | 1088 | &smp_data, sizeof(smp_data)); |
1089 | | | 1089 | |
1090 | pmap_kremove(mp_trampoline_vaddr, PAGE_SIZE); | | 1090 | pmap_kremove(mp_trampoline_vaddr, PAGE_SIZE); |
1091 | pmap_update(pmap_kernel()); | | 1091 | pmap_update(pmap_kernel()); |
1092 | uvm_km_free(kernel_map, mp_trampoline_vaddr, PAGE_SIZE, UVM_KMF_VAONLY); | | 1092 | uvm_km_free(kernel_map, mp_trampoline_vaddr, PAGE_SIZE, UVM_KMF_VAONLY); |
1093 | } | | 1093 | } |
1094 | #endif | | 1094 | #endif |
1095 | | | 1095 | |
1096 | int | | 1096 | int |
1097 | mp_cpu_start(struct cpu_info *ci, paddr_t target) | | 1097 | mp_cpu_start(struct cpu_info *ci, paddr_t target) |
1098 | { | | 1098 | { |
1099 | int error; | | 1099 | int error; |
1100 | | | 1100 | |
1101 | /* | | 1101 | /* |
1102 | * Bootstrap code must be addressable in real mode | | 1102 | * Bootstrap code must be addressable in real mode |
1103 | * and it must be page aligned. | | 1103 | * and it must be page aligned. |
1104 | */ | | 1104 | */ |
1105 | KASSERT(target < 0x10000 && target % PAGE_SIZE == 0); | | 1105 | KASSERT(target < 0x10000 && target % PAGE_SIZE == 0); |
1106 | | | 1106 | |
1107 | /* | | 1107 | /* |
1108 | * "The BSP must initialize CMOS shutdown code to 0Ah ..." | | 1108 | * "The BSP must initialize CMOS shutdown code to 0Ah ..." |
1109 | */ | | 1109 | */ |
1110 | | | 1110 | |
1111 | outb(IO_RTC, NVRAM_RESET); | | 1111 | outb(IO_RTC, NVRAM_RESET); |
1112 | outb(IO_RTC+1, NVRAM_RESET_JUMP); | | 1112 | outb(IO_RTC+1, NVRAM_RESET_JUMP); |
1113 | | | 1113 | |
1114 | #if NLAPIC > 0 | | 1114 | #if NLAPIC > 0 |
1115 | /* | | 1115 | /* |
1116 | * "and the warm reset vector (DWORD based at 40:67) to point | | 1116 | * "and the warm reset vector (DWORD based at 40:67) to point |
1117 | * to the AP startup code ..." | | 1117 | * to the AP startup code ..." |
1118 | */ | | 1118 | */ |
1119 | unsigned short dwordptr[2]; | | 1119 | unsigned short dwordptr[2]; |
1120 | dwordptr[0] = 0; | | 1120 | dwordptr[0] = 0; |
1121 | dwordptr[1] = target >> 4; | | 1121 | dwordptr[1] = target >> 4; |
1122 | | | 1122 | |
1123 | memcpy((uint8_t *)cmos_data_mapping + 0x467, dwordptr, 4); | | 1123 | memcpy((uint8_t *)cmos_data_mapping + 0x467, dwordptr, 4); |
1124 | #endif | | 1124 | #endif |
1125 | | | 1125 | |
1126 | if ((cpu_feature[0] & CPUID_APIC) == 0) { | | 1126 | if ((cpu_feature[0] & CPUID_APIC) == 0) { |
1127 | aprint_error("mp_cpu_start: CPU does not have APIC\n"); | | 1127 | aprint_error("mp_cpu_start: CPU does not have APIC\n"); |
1128 | return ENODEV; | | 1128 | return ENODEV; |
1129 | } | | 1129 | } |
1130 | | | 1130 | |
1131 | /* | | 1131 | /* |
1132 | * ... prior to executing the following sequence:". We'll also add in | | 1132 | * ... prior to executing the following sequence:". We'll also add in |
1133 | * local cache flush, in case the BIOS has left the AP with its cache | | 1133 | * local cache flush, in case the BIOS has left the AP with its cache |
1134 | * disabled. It may not be able to cope with MP coherency. | | 1134 | * disabled. It may not be able to cope with MP coherency. |
1135 | */ | | 1135 | */ |
1136 | wbinvd(); | | 1136 | wbinvd(); |
1137 | | | 1137 | |
1138 | if (ci->ci_flags & CPUF_AP) { | | 1138 | if (ci->ci_flags & CPUF_AP) { |
1139 | error = x86_ipi_init(ci->ci_cpuid); | | 1139 | error = x86_ipi_init(ci->ci_cpuid); |
1140 | if (error != 0) { | | 1140 | if (error != 0) { |
1141 | aprint_error_dev(ci->ci_dev, "%s: IPI not taken (1)\n", | | 1141 | aprint_error_dev(ci->ci_dev, "%s: IPI not taken (1)\n", |
1142 | __func__); | | 1142 | __func__); |
1143 | return error; | | 1143 | return error; |
1144 | } | | 1144 | } |
1145 | delay_func(10000); | | 1145 | delay_func(10000); |
1146 | | | 1146 | |
1147 | error = x86_ipi_startup(ci->ci_cpuid, target / PAGE_SIZE); | | 1147 | error = x86_ipi_startup(ci->ci_cpuid, target / PAGE_SIZE); |
1148 | if (error != 0) { | | 1148 | if (error != 0) { |
1149 | aprint_error_dev(ci->ci_dev, "%s: IPI not taken (2)\n", | | 1149 | aprint_error_dev(ci->ci_dev, "%s: IPI not taken (2)\n", |
1150 | __func__); | | 1150 | __func__); |
1151 | return error; | | 1151 | return error; |
1152 | } | | 1152 | } |
1153 | delay_func(200); | | 1153 | delay_func(200); |
1154 | | | 1154 | |
1155 | error = x86_ipi_startup(ci->ci_cpuid, target / PAGE_SIZE); | | 1155 | error = x86_ipi_startup(ci->ci_cpuid, target / PAGE_SIZE); |
1156 | if (error != 0) { | | 1156 | if (error != 0) { |
1157 | aprint_error_dev(ci->ci_dev, "%s: IPI not taken (3)\n", | | 1157 | aprint_error_dev(ci->ci_dev, "%s: IPI not taken (3)\n", |
1158 | __func__); | | 1158 | __func__); |
1159 | return error; | | 1159 | return error; |
1160 | } | | 1160 | } |
1161 | delay_func(200); | | 1161 | delay_func(200); |
1162 | } | | 1162 | } |
1163 | | | 1163 | |
1164 | return 0; | | 1164 | return 0; |
1165 | } | | 1165 | } |
1166 | | | 1166 | |
1167 | void | | 1167 | void |
1168 | mp_cpu_start_cleanup(struct cpu_info *ci) | | 1168 | mp_cpu_start_cleanup(struct cpu_info *ci) |
1169 | { | | 1169 | { |
1170 | /* | | 1170 | /* |
1171 | * Ensure the NVRAM reset byte contains something vaguely sane. | | 1171 | * Ensure the NVRAM reset byte contains something vaguely sane. |
1172 | */ | | 1172 | */ |
1173 | | | 1173 | |
1174 | outb(IO_RTC, NVRAM_RESET); | | 1174 | outb(IO_RTC, NVRAM_RESET); |
1175 | outb(IO_RTC+1, NVRAM_RESET_RST); | | 1175 | outb(IO_RTC+1, NVRAM_RESET_RST); |
1176 | } | | 1176 | } |
1177 | #endif | | 1177 | #endif |
1178 | | | 1178 | |
1179 | #ifdef __x86_64__ | | 1179 | #ifdef __x86_64__ |
1180 | typedef void (vector)(void); | | 1180 | typedef void (vector)(void); |
1181 | extern vector Xsyscall, Xsyscall32, Xsyscall_svs; | | 1181 | extern vector Xsyscall, Xsyscall32, Xsyscall_svs; |
1182 | #endif | | 1182 | #endif |
1183 | | | 1183 | |
1184 | void | | 1184 | void |
1185 | cpu_init_msrs(struct cpu_info *ci, bool full) | | 1185 | cpu_init_msrs(struct cpu_info *ci, bool full) |
1186 | { | | 1186 | { |
1187 | #ifdef __x86_64__ | | 1187 | #ifdef __x86_64__ |
1188 | wrmsr(MSR_STAR, | | 1188 | wrmsr(MSR_STAR, |
1189 | ((uint64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) | | | 1189 | ((uint64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) | |
1190 | ((uint64_t)LSEL(LSYSRETBASE_SEL, SEL_UPL) << 48)); | | 1190 | ((uint64_t)LSEL(LSYSRETBASE_SEL, SEL_UPL) << 48)); |
1191 | wrmsr(MSR_LSTAR, (uint64_t)Xsyscall); | | 1191 | wrmsr(MSR_LSTAR, (uint64_t)Xsyscall); |
1192 | wrmsr(MSR_CSTAR, (uint64_t)Xsyscall32); | | 1192 | wrmsr(MSR_CSTAR, (uint64_t)Xsyscall32); |
1193 | wrmsr(MSR_SFMASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D|PSL_AC); | | 1193 | wrmsr(MSR_SFMASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D|PSL_AC); |
1194 | | | 1194 | |
1195 | #ifdef SVS | | 1195 | #ifdef SVS |
1196 | if (svs_enabled) | | 1196 | if (svs_enabled) |
1197 | wrmsr(MSR_LSTAR, (uint64_t)Xsyscall_svs); | | 1197 | wrmsr(MSR_LSTAR, (uint64_t)Xsyscall_svs); |
1198 | #endif | | 1198 | #endif |
1199 | | | 1199 | |
1200 | if (full) { | | 1200 | if (full) { |
1201 | wrmsr(MSR_FSBASE, 0); | | 1201 | wrmsr(MSR_FSBASE, 0); |
1202 | wrmsr(MSR_GSBASE, (uint64_t)ci); | | 1202 | wrmsr(MSR_GSBASE, (uint64_t)ci); |
1203 | wrmsr(MSR_KERNELGSBASE, 0); | | 1203 | wrmsr(MSR_KERNELGSBASE, 0); |
1204 | } | | 1204 | } |
1205 | #endif /* __x86_64__ */ | | 1205 | #endif /* __x86_64__ */ |
1206 | | | 1206 | |
1207 | if (cpu_feature[2] & CPUID_NOX) | | 1207 | if (cpu_feature[2] & CPUID_NOX) |
1208 | wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NXE); | | 1208 | wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NXE); |
1209 | } | | 1209 | } |
1210 | | | 1210 | |
1211 | void | | 1211 | void |
1212 | cpu_offline_md(void) | | 1212 | cpu_offline_md(void) |
1213 | { | | 1213 | { |
1214 | return; | | 1214 | return; |
1215 | } | | 1215 | } |
1216 | | | 1216 | |
1217 | /* XXX joerg restructure and restart CPUs individually */ | | 1217 | /* XXX joerg restructure and restart CPUs individually */ |
1218 | static bool | | 1218 | static bool |
1219 | cpu_stop(device_t dv) | | 1219 | cpu_stop(device_t dv) |
1220 | { | | 1220 | { |
1221 | struct cpu_softc *sc = device_private(dv); | | 1221 | struct cpu_softc *sc = device_private(dv); |
1222 | struct cpu_info *ci = sc->sc_info; | | 1222 | struct cpu_info *ci = sc->sc_info; |
1223 | int err; | | 1223 | int err; |
1224 | | | 1224 | |
1225 | KASSERT((ci->ci_flags & CPUF_PRESENT) != 0); | | 1225 | KASSERT((ci->ci_flags & CPUF_PRESENT) != 0); |
1226 | | | 1226 | |
1227 | if ((ci->ci_flags & CPUF_PRIMARY) != 0) | | 1227 | if (CPU_IS_PRIMARY(ci)) |
1228 | return true; | | 1228 | return true; |
1229 | | | 1229 | |
1230 | if (ci->ci_data.cpu_idlelwp == NULL) | | 1230 | if (ci->ci_data.cpu_idlelwp == NULL) |
1231 | return true; | | 1231 | return true; |
1232 | | | 1232 | |
1233 | sc->sc_wasonline = !(ci->ci_schedstate.spc_flags & SPCF_OFFLINE); | | 1233 | sc->sc_wasonline = !(ci->ci_schedstate.spc_flags & SPCF_OFFLINE); |
1234 | | | 1234 | |
1235 | if (sc->sc_wasonline) { | | 1235 | if (sc->sc_wasonline) { |
1236 | mutex_enter(&cpu_lock); | | 1236 | mutex_enter(&cpu_lock); |
1237 | err = cpu_setstate(ci, false); | | 1237 | err = cpu_setstate(ci, false); |
1238 | mutex_exit(&cpu_lock); | | 1238 | mutex_exit(&cpu_lock); |
1239 | | | 1239 | |
1240 | if (err != 0) | | 1240 | if (err != 0) |
1241 | return false; | | 1241 | return false; |
1242 | } | | 1242 | } |
1243 | | | 1243 | |
1244 | return true; | | 1244 | return true; |
1245 | } | | 1245 | } |
1246 | | | 1246 | |
1247 | static bool | | 1247 | static bool |
1248 | cpu_suspend(device_t dv, const pmf_qual_t *qual) | | 1248 | cpu_suspend(device_t dv, const pmf_qual_t *qual) |
1249 | { | | 1249 | { |
1250 | struct cpu_softc *sc = device_private(dv); | | 1250 | struct cpu_softc *sc = device_private(dv); |
1251 | struct cpu_info *ci = sc->sc_info; | | 1251 | struct cpu_info *ci = sc->sc_info; |
1252 | | | 1252 | |
1253 | if ((ci->ci_flags & CPUF_PRESENT) == 0) | | 1253 | if ((ci->ci_flags & CPUF_PRESENT) == 0) |
1254 | return true; | | 1254 | return true; |
1255 | else { | | 1255 | else { |
1256 | cpufreq_suspend(ci); | | 1256 | cpufreq_suspend(ci); |
1257 | } | | 1257 | } |
1258 | | | 1258 | |
1259 | return cpu_stop(dv); | | 1259 | return cpu_stop(dv); |
1260 | } | | 1260 | } |
1261 | | | 1261 | |
1262 | static bool | | 1262 | static bool |
1263 | cpu_resume(device_t dv, const pmf_qual_t *qual) | | 1263 | cpu_resume(device_t dv, const pmf_qual_t *qual) |
1264 | { | | 1264 | { |
1265 | struct cpu_softc *sc = device_private(dv); | | 1265 | struct cpu_softc *sc = device_private(dv); |
1266 | struct cpu_info *ci = sc->sc_info; | | 1266 | struct cpu_info *ci = sc->sc_info; |
1267 | int err = 0; | | 1267 | int err = 0; |
1268 | | | 1268 | |
1269 | if ((ci->ci_flags & CPUF_PRESENT) == 0) | | 1269 | if ((ci->ci_flags & CPUF_PRESENT) == 0) |
1270 | return true; | | 1270 | return true; |
1271 | | | 1271 | |
1272 | if ((ci->ci_flags & CPUF_PRIMARY) != 0) | | 1272 | if (CPU_IS_PRIMARY(ci)) |
1273 | goto out; | | 1273 | goto out; |
1274 | | | 1274 | |
1275 | if (ci->ci_data.cpu_idlelwp == NULL) | | 1275 | if (ci->ci_data.cpu_idlelwp == NULL) |
1276 | goto out; | | 1276 | goto out; |
1277 | | | 1277 | |
1278 | if (sc->sc_wasonline) { | | 1278 | if (sc->sc_wasonline) { |
1279 | mutex_enter(&cpu_lock); | | 1279 | mutex_enter(&cpu_lock); |
1280 | err = cpu_setstate(ci, true); | | 1280 | err = cpu_setstate(ci, true); |
1281 | mutex_exit(&cpu_lock); | | 1281 | mutex_exit(&cpu_lock); |
1282 | } | | 1282 | } |
1283 | | | 1283 | |
1284 | out: | | 1284 | out: |
1285 | if (err != 0) | | 1285 | if (err != 0) |
1286 | return false; | | 1286 | return false; |
1287 | | | 1287 | |
1288 | cpufreq_resume(ci); | | 1288 | cpufreq_resume(ci); |
1289 | | | 1289 | |
1290 | return true; | | 1290 | return true; |
1291 | } | | 1291 | } |
1292 | | | 1292 | |
1293 | static bool | | 1293 | static bool |
1294 | cpu_shutdown(device_t dv, int how) | | 1294 | cpu_shutdown(device_t dv, int how) |
1295 | { | | 1295 | { |
1296 | struct cpu_softc *sc = device_private(dv); | | 1296 | struct cpu_softc *sc = device_private(dv); |
1297 | struct cpu_info *ci = sc->sc_info; | | 1297 | struct cpu_info *ci = sc->sc_info; |
1298 | | | 1298 | |
1299 | if ((ci->ci_flags & CPUF_BSP) != 0) | | 1299 | if ((ci->ci_flags & CPUF_BSP) != 0) |
1300 | return false; | | 1300 | return false; |
1301 | | | 1301 | |
1302 | if ((ci->ci_flags & CPUF_PRESENT) == 0) | | 1302 | if ((ci->ci_flags & CPUF_PRESENT) == 0) |
1303 | return true; | | 1303 | return true; |
1304 | | | 1304 | |
1305 | return cpu_stop(dv); | | 1305 | return cpu_stop(dv); |
1306 | } | | 1306 | } |
1307 | | | 1307 | |
1308 | /* Get the TSC frequency and set it to ci->ci_data.cpu_cc_freq. */ | | 1308 | /* Get the TSC frequency and set it to ci->ci_data.cpu_cc_freq. */ |
1309 | void | | 1309 | void |
1310 | cpu_get_tsc_freq(struct cpu_info *ci) | | 1310 | cpu_get_tsc_freq(struct cpu_info *ci) |
1311 | { | | 1311 | { |
1312 | uint64_t freq = 0, freq_from_cpuid, t0, t1; | | 1312 | uint64_t freq = 0, freq_from_cpuid, t0, t1; |
1313 | int64_t overhead; | | 1313 | int64_t overhead; |
1314 | | | 1314 | |
1315 | if ((ci->ci_flags & CPUF_PRIMARY) != 0 && cpu_hascounter()) { | | 1315 | if (CPU_IS_PRIMARY(ci) && cpu_hascounter()) { |
1316 | /* | | 1316 | /* |
1317 | * If it's the first call of this function, try to get TSC | | 1317 | * If it's the first call of this function, try to get TSC |
1318 | * freq from CPUID by calling cpu_tsc_freq_cpuid(). | | 1318 | * freq from CPUID by calling cpu_tsc_freq_cpuid(). |
1319 | * The function also set lapic_per_second variable if it's | | 1319 | * The function also set lapic_per_second variable if it's |
1320 | * known. This is required for Intel's Comet Lake and newer | | 1320 | * known. This is required for Intel's Comet Lake and newer |
1321 | * processors to set LAPIC timer correctly. | | 1321 | * processors to set LAPIC timer correctly. |
1322 | */ | | 1322 | */ |
1323 | if (ci->ci_data.cpu_cc_freq == 0) | | 1323 | if (ci->ci_data.cpu_cc_freq == 0) |
1324 | freq = freq_from_cpuid = cpu_tsc_freq_cpuid(ci); | | 1324 | freq = freq_from_cpuid = cpu_tsc_freq_cpuid(ci); |
1325 | #if NHPET > 0 | | 1325 | #if NHPET > 0 |
1326 | if (freq == 0) | | 1326 | if (freq == 0) |
1327 | freq = hpet_tsc_freq(); | | 1327 | freq = hpet_tsc_freq(); |
1328 | #endif | | 1328 | #endif |
1329 | if (freq == 0) { | | 1329 | if (freq == 0) { |
1330 | /* | | 1330 | /* |
1331 | * Work out the approximate overhead involved below. | | 1331 | * Work out the approximate overhead involved below. |
1332 | * Discard the result of the first go around the | | 1332 | * Discard the result of the first go around the |
1333 | * loop. | | 1333 | * loop. |
1334 | */ | | 1334 | */ |
1335 | overhead = 0; | | 1335 | overhead = 0; |
1336 | for (int i = 0; i <= 8; i++) { | | 1336 | for (int i = 0; i <= 8; i++) { |
1337 | t0 = cpu_counter(); | | 1337 | t0 = cpu_counter(); |
1338 | delay_func(0); | | 1338 | delay_func(0); |
1339 | t1 = cpu_counter(); | | 1339 | t1 = cpu_counter(); |
1340 | if (i > 0) { | | 1340 | if (i > 0) { |
1341 | overhead += (t1 - t0); | | 1341 | overhead += (t1 - t0); |
1342 | } | | 1342 | } |
1343 | } | | 1343 | } |
1344 | overhead >>= 3; | | 1344 | overhead >>= 3; |
1345 | | | 1345 | |
1346 | /* Now do the calibration. */ | | 1346 | /* Now do the calibration. */ |
1347 | t0 = cpu_counter(); | | 1347 | t0 = cpu_counter(); |
1348 | delay_func(100000); | | 1348 | delay_func(100000); |
1349 | t1 = cpu_counter(); | | 1349 | t1 = cpu_counter(); |
1350 | freq = (t1 - t0 - overhead) * 10; | | 1350 | freq = (t1 - t0 - overhead) * 10; |
1351 | } | | 1351 | } |
1352 | if (ci->ci_data.cpu_cc_freq != 0) { | | 1352 | if (ci->ci_data.cpu_cc_freq != 0) { |
1353 | freq_from_cpuid = cpu_tsc_freq_cpuid(ci); | | 1353 | freq_from_cpuid = cpu_tsc_freq_cpuid(ci); |
1354 | if ((freq_from_cpuid != 0) | | 1354 | if ((freq_from_cpuid != 0) |
1355 | && (freq != freq_from_cpuid)) | | 1355 | && (freq != freq_from_cpuid)) |
1356 | aprint_verbose_dev(ci->ci_dev, "TSC freq " | | 1356 | aprint_verbose_dev(ci->ci_dev, "TSC freq " |
1357 | "calibrated %" PRIu64 " Hz\n", freq); | | 1357 | "calibrated %" PRIu64 " Hz\n", freq); |
1358 | } | | 1358 | } |
1359 | } else { | | 1359 | } else { |
1360 | freq = cpu_info_primary.ci_data.cpu_cc_freq; | | 1360 | freq = cpu_info_primary.ci_data.cpu_cc_freq; |
1361 | } | | 1361 | } |
1362 | | | 1362 | |
1363 | ci->ci_data.cpu_cc_freq = freq; | | 1363 | ci->ci_data.cpu_cc_freq = freq; |
1364 | } | | 1364 | } |
1365 | | | 1365 | |
1366 | void | | 1366 | void |
1367 | x86_cpu_idle_mwait(void) | | 1367 | x86_cpu_idle_mwait(void) |
1368 | { | | 1368 | { |
1369 | struct cpu_info *ci = curcpu(); | | 1369 | struct cpu_info *ci = curcpu(); |
1370 | | | 1370 | |
1371 | KASSERT(ci->ci_ilevel == IPL_NONE); | | 1371 | KASSERT(ci->ci_ilevel == IPL_NONE); |
1372 | | | 1372 | |
1373 | x86_monitor(&ci->ci_want_resched, 0, 0); | | 1373 | x86_monitor(&ci->ci_want_resched, 0, 0); |
1374 | if (__predict_false(ci->ci_want_resched)) { | | 1374 | if (__predict_false(ci->ci_want_resched)) { |
1375 | return; | | 1375 | return; |
1376 | } | | 1376 | } |
1377 | x86_mwait(0, 0); | | 1377 | x86_mwait(0, 0); |
1378 | } | | 1378 | } |
1379 | | | 1379 | |
1380 | void | | 1380 | void |
1381 | x86_cpu_idle_halt(void) | | 1381 | x86_cpu_idle_halt(void) |
1382 | { | | 1382 | { |
1383 | struct cpu_info *ci = curcpu(); | | 1383 | struct cpu_info *ci = curcpu(); |
1384 | | | 1384 | |
1385 | KASSERT(ci->ci_ilevel == IPL_NONE); | | 1385 | KASSERT(ci->ci_ilevel == IPL_NONE); |
1386 | | | 1386 | |
1387 | x86_disable_intr(); | | 1387 | x86_disable_intr(); |
1388 | if (!__predict_false(ci->ci_want_resched)) { | | 1388 | if (!__predict_false(ci->ci_want_resched)) { |
1389 | x86_stihlt(); | | 1389 | x86_stihlt(); |
1390 | } else { | | 1390 | } else { |
1391 | x86_enable_intr(); | | 1391 | x86_enable_intr(); |
1392 | } | | 1392 | } |
1393 | } | | 1393 | } |
1394 | | | 1394 | |
1395 | /* | | 1395 | /* |
1396 | * Loads pmap for the current CPU. | | 1396 | * Loads pmap for the current CPU. |
1397 | */ | | 1397 | */ |
1398 | void | | 1398 | void |
1399 | cpu_load_pmap(struct pmap *pmap, struct pmap *oldpmap) | | 1399 | cpu_load_pmap(struct pmap *pmap, struct pmap *oldpmap) |
1400 | { | | 1400 | { |
1401 | #ifdef SVS | | 1401 | #ifdef SVS |
1402 | if (svs_enabled) { | | 1402 | if (svs_enabled) { |
1403 | svs_pdir_switch(pmap); | | 1403 | svs_pdir_switch(pmap); |
1404 | } | | 1404 | } |
1405 | #endif | | 1405 | #endif |
1406 | | | 1406 | |
1407 | #ifdef PAE | | 1407 | #ifdef PAE |
1408 | struct cpu_info *ci = curcpu(); | | 1408 | struct cpu_info *ci = curcpu(); |
1409 | bool interrupts_enabled; | | 1409 | bool interrupts_enabled; |
1410 | pd_entry_t *l3_pd = ci->ci_pae_l3_pdir; | | 1410 | pd_entry_t *l3_pd = ci->ci_pae_l3_pdir; |
1411 | int i; | | 1411 | int i; |
1412 | | | 1412 | |
1413 | /* | | 1413 | /* |
1414 | * disable interrupts to block TLB shootdowns, which can reload cr3. | | 1414 | * disable interrupts to block TLB shootdowns, which can reload cr3. |
1415 | * while this doesn't block NMIs, it's probably ok as NMIs unlikely | | 1415 | * while this doesn't block NMIs, it's probably ok as NMIs unlikely |
1416 | * reload cr3. | | 1416 | * reload cr3. |
1417 | */ | | 1417 | */ |
1418 | interrupts_enabled = (x86_read_flags() & PSL_I) != 0; | | 1418 | interrupts_enabled = (x86_read_flags() & PSL_I) != 0; |
1419 | if (interrupts_enabled) | | 1419 | if (interrupts_enabled) |
1420 | x86_disable_intr(); | | 1420 | x86_disable_intr(); |
1421 | | | 1421 | |
1422 | for (i = 0 ; i < PDP_SIZE; i++) { | | 1422 | for (i = 0 ; i < PDP_SIZE; i++) { |
1423 | l3_pd[i] = pmap->pm_pdirpa[i] | PTE_P; | | 1423 | l3_pd[i] = pmap->pm_pdirpa[i] | PTE_P; |
1424 | } | | 1424 | } |
1425 | | | 1425 | |
1426 | if (interrupts_enabled) | | 1426 | if (interrupts_enabled) |
1427 | x86_enable_intr(); | | 1427 | x86_enable_intr(); |
1428 | tlbflush(); | | 1428 | tlbflush(); |
1429 | #else | | 1429 | #else |
1430 | lcr3(pmap_pdirpa(pmap, 0)); | | 1430 | lcr3(pmap_pdirpa(pmap, 0)); |
1431 | #endif | | 1431 | #endif |
1432 | } | | 1432 | } |
1433 | | | 1433 | |
1434 | /* | | 1434 | /* |
1435 | * Notify all other cpus to halt. | | 1435 | * Notify all other cpus to halt. |
1436 | */ | | 1436 | */ |
1437 | | | 1437 | |
1438 | void | | 1438 | void |
1439 | cpu_broadcast_halt(void) | | 1439 | cpu_broadcast_halt(void) |
1440 | { | | 1440 | { |
1441 | x86_broadcast_ipi(X86_IPI_HALT); | | 1441 | x86_broadcast_ipi(X86_IPI_HALT); |
1442 | } | | 1442 | } |
1443 | | | 1443 | |
1444 | /* | | 1444 | /* |
1445 | * Send a dummy ipi to a cpu to force it to run splraise()/spllower(), | | 1445 | * Send a dummy ipi to a cpu to force it to run splraise()/spllower(), |
1446 | * and trigger an AST on the running LWP. | | 1446 | * and trigger an AST on the running LWP. |
1447 | */ | | 1447 | */ |
1448 | | | 1448 | |
1449 | void | | 1449 | void |
1450 | cpu_kick(struct cpu_info *ci) | | 1450 | cpu_kick(struct cpu_info *ci) |
1451 | { | | 1451 | { |
1452 | x86_send_ipi(ci, X86_IPI_AST); | | 1452 | x86_send_ipi(ci, X86_IPI_AST); |
1453 | } | | 1453 | } |