Mon Apr 20 04:17:52 2020 UTC ()
Whitespace fix. No functional change.


(msaitoh)
diff -r1.105 -r1.106 src/sys/arch/x86/x86/identcpu.c

cvs diff -r1.105 -r1.106 src/sys/arch/x86/x86/identcpu.c (switch to unified diff)

--- src/sys/arch/x86/x86/identcpu.c 2020/04/09 02:07:01 1.105
+++ src/sys/arch/x86/x86/identcpu.c 2020/04/20 04:17:51 1.106
@@ -1,1172 +1,1172 @@ @@ -1,1172 +1,1172 @@
1/* $NetBSD: identcpu.c,v 1.105 2020/04/09 02:07:01 christos Exp $ */ 1/* $NetBSD: identcpu.c,v 1.106 2020/04/20 04:17:51 msaitoh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1999, 2000, 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 1999, 2000, 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Frank van der Linden, and by Jason R. Thorpe. 8 * by Frank van der Linden, and by Jason R. Thorpe.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: identcpu.c,v 1.105 2020/04/09 02:07:01 christos Exp $"); 33__KERNEL_RCSID(0, "$NetBSD: identcpu.c,v 1.106 2020/04/20 04:17:51 msaitoh Exp $");
34 34
35#include "opt_xen.h" 35#include "opt_xen.h"
36 36
37#include <sys/param.h> 37#include <sys/param.h>
38#include <sys/systm.h> 38#include <sys/systm.h>
39#include <sys/device.h> 39#include <sys/device.h>
40#include <sys/cpu.h> 40#include <sys/cpu.h>
41 41
42#include <uvm/uvm_extern.h> 42#include <uvm/uvm_extern.h>
43 43
44#include <machine/specialreg.h> 44#include <machine/specialreg.h>
45#include <machine/pio.h> 45#include <machine/pio.h>
46#include <machine/cpu.h> 46#include <machine/cpu.h>
47 47
48#include <x86/cputypes.h> 48#include <x86/cputypes.h>
49#include <x86/cacheinfo.h> 49#include <x86/cacheinfo.h>
50#include <x86/cpuvar.h> 50#include <x86/cpuvar.h>
51#include <x86/fpu.h> 51#include <x86/fpu.h>
52 52
53#include <x86/x86/vmtreg.h> /* for vmt_hvcall() */ 53#include <x86/x86/vmtreg.h> /* for vmt_hvcall() */
54#include <x86/x86/vmtvar.h> /* for vmt_hvcall() */ 54#include <x86/x86/vmtvar.h> /* for vmt_hvcall() */
55 55
56#ifndef XEN 56#ifndef XEN
57#include "hyperv.h" 57#include "hyperv.h"
58#if NHYPERV > 0 58#if NHYPERV > 0
59#include <x86/x86/hypervvar.h> 59#include <x86/x86/hypervvar.h>
60#endif 60#endif
61#endif 61#endif
62 62
63static const struct x86_cache_info intel_cpuid_cache_info[] = INTEL_CACHE_INFO; 63static const struct x86_cache_info intel_cpuid_cache_info[] = INTEL_CACHE_INFO;
64 64
65static const struct x86_cache_info amd_cpuid_l2l3cache_assoc_info[] =  65static const struct x86_cache_info amd_cpuid_l2l3cache_assoc_info[] =
66 AMD_L2L3CACHE_INFO; 66 AMD_L2L3CACHE_INFO;
67 67
68int cpu_vendor; 68int cpu_vendor;
69char cpu_brand_string[49]; 69char cpu_brand_string[49];
70 70
71int x86_fpu_save __read_mostly; 71int x86_fpu_save __read_mostly;
72unsigned int x86_fpu_save_size __read_mostly = sizeof(struct save87); 72unsigned int x86_fpu_save_size __read_mostly = sizeof(struct save87);
73uint64_t x86_xsave_features __read_mostly = 0; 73uint64_t x86_xsave_features __read_mostly = 0;
74size_t x86_xsave_offsets[XSAVE_MAX_COMPONENT+1] __read_mostly; 74size_t x86_xsave_offsets[XSAVE_MAX_COMPONENT+1] __read_mostly;
75size_t x86_xsave_sizes[XSAVE_MAX_COMPONENT+1] __read_mostly; 75size_t x86_xsave_sizes[XSAVE_MAX_COMPONENT+1] __read_mostly;
76 76
77/* 77/*
78 * Note: these are just the ones that may not have a cpuid instruction. 78 * Note: these are just the ones that may not have a cpuid instruction.
79 * We deal with the rest in a different way. 79 * We deal with the rest in a different way.
80 */ 80 */
81const int i386_nocpuid_cpus[] = { 81const int i386_nocpuid_cpus[] = {
82 CPUVENDOR_INTEL, CPUCLASS_386, /* CPU_386SX */ 82 CPUVENDOR_INTEL, CPUCLASS_386, /* CPU_386SX */
83 CPUVENDOR_INTEL, CPUCLASS_386, /* CPU_386 */ 83 CPUVENDOR_INTEL, CPUCLASS_386, /* CPU_386 */
84 CPUVENDOR_INTEL, CPUCLASS_486, /* CPU_486SX */ 84 CPUVENDOR_INTEL, CPUCLASS_486, /* CPU_486SX */
85 CPUVENDOR_INTEL, CPUCLASS_486, /* CPU_486 */ 85 CPUVENDOR_INTEL, CPUCLASS_486, /* CPU_486 */
86 CPUVENDOR_CYRIX, CPUCLASS_486, /* CPU_486DLC */ 86 CPUVENDOR_CYRIX, CPUCLASS_486, /* CPU_486DLC */
87 CPUVENDOR_CYRIX, CPUCLASS_486, /* CPU_6x86 */ 87 CPUVENDOR_CYRIX, CPUCLASS_486, /* CPU_6x86 */
88 CPUVENDOR_NEXGEN, CPUCLASS_386, /* CPU_NX586 */ 88 CPUVENDOR_NEXGEN, CPUCLASS_386, /* CPU_NX586 */
89}; 89};
90 90
91static const char cpu_vendor_names[][10] = { 91static const char cpu_vendor_names[][10] = {
92 "Unknown", "Intel", "NS/Cyrix", "NexGen", "AMD", "IDT/VIA", "Transmeta", 92 "Unknown", "Intel", "NS/Cyrix", "NexGen", "AMD", "IDT/VIA", "Transmeta",
93 "Vortex86" 93 "Vortex86"
94}; 94};
95 95
96static const struct x86_cache_info * 96static const struct x86_cache_info *
97cache_info_lookup(const struct x86_cache_info *cai, uint8_t desc) 97cache_info_lookup(const struct x86_cache_info *cai, uint8_t desc)
98{ 98{
99 int i; 99 int i;
100 100
101 for (i = 0; cai[i].cai_desc != 0; i++) { 101 for (i = 0; cai[i].cai_desc != 0; i++) {
102 if (cai[i].cai_desc == desc) 102 if (cai[i].cai_desc == desc)
103 return (&cai[i]); 103 return (&cai[i]);
104 } 104 }
105 105
106 return (NULL); 106 return (NULL);
107} 107}
108 108
109/* 109/*
110 * Get cache info from one of the following: 110 * Get cache info from one of the following:
111 * Intel Deterministic Cache Parameter Leaf (0x04) 111 * Intel Deterministic Cache Parameter Leaf (0x04)
112 * AMD Cache Topology Information Leaf (0x8000001d) 112 * AMD Cache Topology Information Leaf (0x8000001d)
113 */ 113 */
114static void 114static void
115cpu_dcp_cacheinfo(struct cpu_info *ci, uint32_t leaf) 115cpu_dcp_cacheinfo(struct cpu_info *ci, uint32_t leaf)
116{ 116{
117 u_int descs[4]; 117 u_int descs[4];
118 int type, level, ways, partitions, linesize, sets, totalsize; 118 int type, level, ways, partitions, linesize, sets, totalsize;
119 int caitype = -1; 119 int caitype = -1;
120 int i; 120 int i;
121 121
122 for (i = 0; ; i++) { 122 for (i = 0; ; i++) {
123 x86_cpuid2(leaf, i, descs); 123 x86_cpuid2(leaf, i, descs);
124 type = __SHIFTOUT(descs[0], CPUID_DCP_CACHETYPE); 124 type = __SHIFTOUT(descs[0], CPUID_DCP_CACHETYPE);
125 if (type == CPUID_DCP_CACHETYPE_N) 125 if (type == CPUID_DCP_CACHETYPE_N)
126 break; 126 break;
127 level = __SHIFTOUT(descs[0], CPUID_DCP_CACHELEVEL); 127 level = __SHIFTOUT(descs[0], CPUID_DCP_CACHELEVEL);
128 switch (level) { 128 switch (level) {
129 case 1: 129 case 1:
130 if (type == CPUID_DCP_CACHETYPE_I) 130 if (type == CPUID_DCP_CACHETYPE_I)
131 caitype = CAI_ICACHE; 131 caitype = CAI_ICACHE;
132 else if (type == CPUID_DCP_CACHETYPE_D) 132 else if (type == CPUID_DCP_CACHETYPE_D)
133 caitype = CAI_DCACHE; 133 caitype = CAI_DCACHE;
134 else 134 else
135 caitype = -1; 135 caitype = -1;
136 break; 136 break;
137 case 2: 137 case 2:
138 if (type == CPUID_DCP_CACHETYPE_U) 138 if (type == CPUID_DCP_CACHETYPE_U)
139 caitype = CAI_L2CACHE; 139 caitype = CAI_L2CACHE;
140 else 140 else
141 caitype = -1; 141 caitype = -1;
142 break; 142 break;
143 case 3: 143 case 3:
144 if (type == CPUID_DCP_CACHETYPE_U) 144 if (type == CPUID_DCP_CACHETYPE_U)
145 caitype = CAI_L3CACHE; 145 caitype = CAI_L3CACHE;
146 else 146 else
147 caitype = -1; 147 caitype = -1;
148 break; 148 break;
149 default: 149 default:
150 caitype = -1; 150 caitype = -1;
151 break; 151 break;
152 } 152 }
153 if (caitype == -1) 153 if (caitype == -1)
154 continue; 154 continue;
155 155
156 ways = __SHIFTOUT(descs[1], CPUID_DCP_WAYS) + 1; 156 ways = __SHIFTOUT(descs[1], CPUID_DCP_WAYS) + 1;
157 partitions =__SHIFTOUT(descs[1], CPUID_DCP_PARTITIONS) 157 partitions =__SHIFTOUT(descs[1], CPUID_DCP_PARTITIONS)
158 + 1; 158 + 1;
159 linesize = __SHIFTOUT(descs[1], CPUID_DCP_LINESIZE) 159 linesize = __SHIFTOUT(descs[1], CPUID_DCP_LINESIZE)
160 + 1; 160 + 1;
161 sets = descs[2] + 1; 161 sets = descs[2] + 1;
162 totalsize = ways * partitions * linesize * sets; 162 totalsize = ways * partitions * linesize * sets;
163 ci->ci_cinfo[caitype].cai_totalsize = totalsize; 163 ci->ci_cinfo[caitype].cai_totalsize = totalsize;
164 ci->ci_cinfo[caitype].cai_associativity = ways; 164 ci->ci_cinfo[caitype].cai_associativity = ways;
165 ci->ci_cinfo[caitype].cai_linesize = linesize; 165 ci->ci_cinfo[caitype].cai_linesize = linesize;
166 } 166 }
167} 167}
168 168
169static void 169static void
170cpu_probe_intel_cache(struct cpu_info *ci) 170cpu_probe_intel_cache(struct cpu_info *ci)
171{ 171{
172 const struct x86_cache_info *cai; 172 const struct x86_cache_info *cai;
173 u_int descs[4]; 173 u_int descs[4];
174 int iterations, i, j; 174 int iterations, i, j;
175 uint8_t desc; 175 uint8_t desc;
176 176
177 if (cpuid_level >= 2) {  177 if (cpuid_level >= 2) {
178 /* Parse the cache info from `cpuid leaf 2', if we have it. */ 178 /* Parse the cache info from `cpuid leaf 2', if we have it. */
179 x86_cpuid(2, descs); 179 x86_cpuid(2, descs);
180 iterations = descs[0] & 0xff; 180 iterations = descs[0] & 0xff;
181 while (iterations-- > 0) { 181 while (iterations-- > 0) {
182 for (i = 0; i < 4; i++) { 182 for (i = 0; i < 4; i++) {
183 if (descs[i] & 0x80000000) 183 if (descs[i] & 0x80000000)
184 continue; 184 continue;
185 for (j = 0; j < 4; j++) { 185 for (j = 0; j < 4; j++) {
186 if (i == 0 && j == 0) 186 if (i == 0 && j == 0)
187 continue; 187 continue;
188 desc = (descs[i] >> (j * 8)) & 0xff; 188 desc = (descs[i] >> (j * 8)) & 0xff;
189 if (desc == 0) 189 if (desc == 0)
190 continue; 190 continue;
191 cai = cache_info_lookup( 191 cai = cache_info_lookup(
192 intel_cpuid_cache_info, desc); 192 intel_cpuid_cache_info, desc);
193 if (cai != NULL) { 193 if (cai != NULL) {
194 ci->ci_cinfo[cai->cai_index] = 194 ci->ci_cinfo[cai->cai_index] =
195 *cai; 195 *cai;
196 } 196 }
197 } 197 }
198 } 198 }
199 } 199 }
200 } 200 }
201 201
202 if (cpuid_level < 4) 202 if (cpuid_level < 4)
203 return; 203 return;
204 204
205 /* Parse the cache info from `cpuid leaf 4', if we have it. */ 205 /* Parse the cache info from `cpuid leaf 4', if we have it. */
206 cpu_dcp_cacheinfo(ci, 4); 206 cpu_dcp_cacheinfo(ci, 4);
207} 207}
208 208
209static void 209static void
210cpu_probe_intel_errata(struct cpu_info *ci) 210cpu_probe_intel_errata(struct cpu_info *ci)
211{ 211{
212 u_int family, model, stepping; 212 u_int family, model, stepping;
213 213
214 family = CPUID_TO_FAMILY(ci->ci_signature); 214 family = CPUID_TO_FAMILY(ci->ci_signature);
215 model = CPUID_TO_MODEL(ci->ci_signature); 215 model = CPUID_TO_MODEL(ci->ci_signature);
216 stepping = CPUID_TO_STEPPING(ci->ci_signature); 216 stepping = CPUID_TO_STEPPING(ci->ci_signature);
217 217
218 if (family == 0x6 && model == 0x5C && stepping == 0x9) { /* Apollo Lake */ 218 if (family == 0x6 && model == 0x5C && stepping == 0x9) { /* Apollo Lake */
219 wrmsr(MSR_MISC_ENABLE, 219 wrmsr(MSR_MISC_ENABLE,
220 rdmsr(MSR_MISC_ENABLE) & ~IA32_MISC_MWAIT_EN); 220 rdmsr(MSR_MISC_ENABLE) & ~IA32_MISC_MWAIT_EN);
221 221
222 cpu_feature[1] &= ~CPUID2_MONITOR; 222 cpu_feature[1] &= ~CPUID2_MONITOR;
223 ci->ci_feat_val[1] &= ~CPUID2_MONITOR; 223 ci->ci_feat_val[1] &= ~CPUID2_MONITOR;
224 } 224 }
225} 225}
226 226
227static void 227static void
228cpu_probe_intel(struct cpu_info *ci) 228cpu_probe_intel(struct cpu_info *ci)
229{ 229{
230 230
231 if (cpu_vendor != CPUVENDOR_INTEL) 231 if (cpu_vendor != CPUVENDOR_INTEL)
232 return; 232 return;
233 233
234 cpu_probe_intel_cache(ci); 234 cpu_probe_intel_cache(ci);
235 cpu_probe_intel_errata(ci); 235 cpu_probe_intel_errata(ci);
236} 236}
237 237
238static void 238static void
239cpu_probe_amd_cache(struct cpu_info *ci) 239cpu_probe_amd_cache(struct cpu_info *ci)
240{ 240{
241 const struct x86_cache_info *cp; 241 const struct x86_cache_info *cp;
242 struct x86_cache_info *cai; 242 struct x86_cache_info *cai;
243 int family, model; 243 int family, model;
244 u_int descs[4]; 244 u_int descs[4];
245 u_int lfunc; 245 u_int lfunc;
246 246
247 family = CPUID_TO_FAMILY(ci->ci_signature); 247 family = CPUID_TO_FAMILY(ci->ci_signature);
248 model = CPUID_TO_MODEL(ci->ci_signature); 248 model = CPUID_TO_MODEL(ci->ci_signature);
249 249
250 /* K5 model 0 has none of this info. */ 250 /* K5 model 0 has none of this info. */
251 if (family == 5 && model == 0) 251 if (family == 5 && model == 0)
252 return; 252 return;
253 253
254 /* Determine the largest extended function value. */ 254 /* Determine the largest extended function value. */
255 x86_cpuid(0x80000000, descs); 255 x86_cpuid(0x80000000, descs);
256 lfunc = descs[0]; 256 lfunc = descs[0];
257 257
258 if (lfunc < 0x80000005) 258 if (lfunc < 0x80000005)
259 return; 259 return;
260 260
261 /* Determine L1 cache/TLB info. */ 261 /* Determine L1 cache/TLB info. */
262 x86_cpuid(0x80000005, descs); 262 x86_cpuid(0x80000005, descs);
263 263
264 /* K6-III and higher have large page TLBs. */ 264 /* K6-III and higher have large page TLBs. */
265 if ((family == 5 && model >= 9) || family >= 6) { 265 if ((family == 5 && model >= 9) || family >= 6) {
266 cai = &ci->ci_cinfo[CAI_ITLB2]; 266 cai = &ci->ci_cinfo[CAI_ITLB2];
267 cai->cai_totalsize = AMD_L1_EAX_ITLB_ENTRIES(descs[0]); 267 cai->cai_totalsize = AMD_L1_EAX_ITLB_ENTRIES(descs[0]);
268 cai->cai_associativity = AMD_L1_EAX_ITLB_ASSOC(descs[0]); 268 cai->cai_associativity = AMD_L1_EAX_ITLB_ASSOC(descs[0]);
269 cai->cai_linesize = (4 * 1024 * 1024); 269 cai->cai_linesize = (4 * 1024 * 1024);
270 270
271 cai = &ci->ci_cinfo[CAI_DTLB2]; 271 cai = &ci->ci_cinfo[CAI_DTLB2];
272 cai->cai_totalsize = AMD_L1_EAX_DTLB_ENTRIES(descs[0]); 272 cai->cai_totalsize = AMD_L1_EAX_DTLB_ENTRIES(descs[0]);
273 cai->cai_associativity = AMD_L1_EAX_DTLB_ASSOC(descs[0]); 273 cai->cai_associativity = AMD_L1_EAX_DTLB_ASSOC(descs[0]);
274 cai->cai_linesize = (4 * 1024 * 1024); 274 cai->cai_linesize = (4 * 1024 * 1024);
275 } 275 }
276 276
277 cai = &ci->ci_cinfo[CAI_ITLB]; 277 cai = &ci->ci_cinfo[CAI_ITLB];
278 cai->cai_totalsize = AMD_L1_EBX_ITLB_ENTRIES(descs[1]); 278 cai->cai_totalsize = AMD_L1_EBX_ITLB_ENTRIES(descs[1]);
279 cai->cai_associativity = AMD_L1_EBX_ITLB_ASSOC(descs[1]); 279 cai->cai_associativity = AMD_L1_EBX_ITLB_ASSOC(descs[1]);
280 cai->cai_linesize = (4 * 1024); 280 cai->cai_linesize = (4 * 1024);
281 281
282 cai = &ci->ci_cinfo[CAI_DTLB]; 282 cai = &ci->ci_cinfo[CAI_DTLB];
283 cai->cai_totalsize = AMD_L1_EBX_DTLB_ENTRIES(descs[1]); 283 cai->cai_totalsize = AMD_L1_EBX_DTLB_ENTRIES(descs[1]);
284 cai->cai_associativity = AMD_L1_EBX_DTLB_ASSOC(descs[1]); 284 cai->cai_associativity = AMD_L1_EBX_DTLB_ASSOC(descs[1]);
285 cai->cai_linesize = (4 * 1024); 285 cai->cai_linesize = (4 * 1024);
286 286
287 cai = &ci->ci_cinfo[CAI_DCACHE]; 287 cai = &ci->ci_cinfo[CAI_DCACHE];
288 cai->cai_totalsize = AMD_L1_ECX_DC_SIZE(descs[2]); 288 cai->cai_totalsize = AMD_L1_ECX_DC_SIZE(descs[2]);
289 cai->cai_associativity = AMD_L1_ECX_DC_ASSOC(descs[2]); 289 cai->cai_associativity = AMD_L1_ECX_DC_ASSOC(descs[2]);
290 cai->cai_linesize = AMD_L1_ECX_DC_LS(descs[2]); 290 cai->cai_linesize = AMD_L1_ECX_DC_LS(descs[2]);
291 291
292 cai = &ci->ci_cinfo[CAI_ICACHE]; 292 cai = &ci->ci_cinfo[CAI_ICACHE];
293 cai->cai_totalsize = AMD_L1_EDX_IC_SIZE(descs[3]); 293 cai->cai_totalsize = AMD_L1_EDX_IC_SIZE(descs[3]);
294 cai->cai_associativity = AMD_L1_EDX_IC_ASSOC(descs[3]); 294 cai->cai_associativity = AMD_L1_EDX_IC_ASSOC(descs[3]);
295 cai->cai_linesize = AMD_L1_EDX_IC_LS(descs[3]); 295 cai->cai_linesize = AMD_L1_EDX_IC_LS(descs[3]);
296 296
297 if (lfunc < 0x80000006) 297 if (lfunc < 0x80000006)
298 return; 298 return;
299 299
300 /* Determine L2 cache/TLB info. */ 300 /* Determine L2 cache/TLB info. */
301 x86_cpuid(0x80000006, descs); 301 x86_cpuid(0x80000006, descs);
302 302
303 cai = &ci->ci_cinfo[CAI_L2CACHE]; 303 cai = &ci->ci_cinfo[CAI_L2CACHE];
304 cai->cai_totalsize = AMD_L2_ECX_C_SIZE(descs[2]); 304 cai->cai_totalsize = AMD_L2_ECX_C_SIZE(descs[2]);
305 cai->cai_associativity = AMD_L2_ECX_C_ASSOC(descs[2]); 305 cai->cai_associativity = AMD_L2_ECX_C_ASSOC(descs[2]);
306 cai->cai_linesize = AMD_L2_ECX_C_LS(descs[2]); 306 cai->cai_linesize = AMD_L2_ECX_C_LS(descs[2]);
307 307
308 cp = cache_info_lookup(amd_cpuid_l2l3cache_assoc_info, 308 cp = cache_info_lookup(amd_cpuid_l2l3cache_assoc_info,
309 cai->cai_associativity); 309 cai->cai_associativity);
310 if (cp != NULL) 310 if (cp != NULL)
311 cai->cai_associativity = cp->cai_associativity; 311 cai->cai_associativity = cp->cai_associativity;
312 else 312 else
313 cai->cai_associativity = 0; /* XXX Unknown/reserved */ 313 cai->cai_associativity = 0; /* XXX Unknown/reserved */
314 314
315 if (family < 0xf) 315 if (family < 0xf)
316 return; 316 return;
317 317
318 /* Determine L3 cache info on AMD Family 10h and newer processors */ 318 /* Determine L3 cache info on AMD Family 10h and newer processors */
319 cai = &ci->ci_cinfo[CAI_L3CACHE]; 319 cai = &ci->ci_cinfo[CAI_L3CACHE];
320 cai->cai_totalsize = AMD_L3_EDX_C_SIZE(descs[3]); 320 cai->cai_totalsize = AMD_L3_EDX_C_SIZE(descs[3]);
321 cai->cai_associativity = AMD_L3_EDX_C_ASSOC(descs[3]); 321 cai->cai_associativity = AMD_L3_EDX_C_ASSOC(descs[3]);
322 cai->cai_linesize = AMD_L3_EDX_C_LS(descs[3]); 322 cai->cai_linesize = AMD_L3_EDX_C_LS(descs[3]);
323 323
324 cp = cache_info_lookup(amd_cpuid_l2l3cache_assoc_info, 324 cp = cache_info_lookup(amd_cpuid_l2l3cache_assoc_info,
325 cai->cai_associativity); 325 cai->cai_associativity);
326 if (cp != NULL) 326 if (cp != NULL)
327 cai->cai_associativity = cp->cai_associativity; 327 cai->cai_associativity = cp->cai_associativity;
328 else 328 else
329 cai->cai_associativity = 0; /* XXX Unknown reserved */ 329 cai->cai_associativity = 0; /* XXX Unknown reserved */
330 330
331 if (lfunc < 0x80000019) 331 if (lfunc < 0x80000019)
332 return; 332 return;
333 333
334 /* Determine 1GB TLB info. */ 334 /* Determine 1GB TLB info. */
335 x86_cpuid(0x80000019, descs); 335 x86_cpuid(0x80000019, descs);
336 336
337 cai = &ci->ci_cinfo[CAI_L1_1GBDTLB]; 337 cai = &ci->ci_cinfo[CAI_L1_1GBDTLB];
338 cai->cai_totalsize = AMD_L1_1GB_EAX_DTLB_ENTRIES(descs[1]); 338 cai->cai_totalsize = AMD_L1_1GB_EAX_DTLB_ENTRIES(descs[1]);
339 cai->cai_associativity = AMD_L1_1GB_EAX_DTLB_ASSOC(descs[1]); 339 cai->cai_associativity = AMD_L1_1GB_EAX_DTLB_ASSOC(descs[1]);
340 cai->cai_linesize = (1 * 1024); 340 cai->cai_linesize = (1 * 1024);
341 341
342 cai = &ci->ci_cinfo[CAI_L1_1GBITLB]; 342 cai = &ci->ci_cinfo[CAI_L1_1GBITLB];
343 cai->cai_totalsize = AMD_L1_1GB_EAX_IUTLB_ENTRIES(descs[0]); 343 cai->cai_totalsize = AMD_L1_1GB_EAX_IUTLB_ENTRIES(descs[0]);
344 cai->cai_associativity = AMD_L1_1GB_EAX_IUTLB_ASSOC(descs[0]); 344 cai->cai_associativity = AMD_L1_1GB_EAX_IUTLB_ASSOC(descs[0]);
345 cai->cai_linesize = (1 * 1024); 345 cai->cai_linesize = (1 * 1024);
346 346
347 cai = &ci->ci_cinfo[CAI_L2_1GBDTLB]; 347 cai = &ci->ci_cinfo[CAI_L2_1GBDTLB];
348 cai->cai_totalsize = AMD_L2_1GB_EBX_DUTLB_ENTRIES(descs[1]); 348 cai->cai_totalsize = AMD_L2_1GB_EBX_DUTLB_ENTRIES(descs[1]);
349 cai->cai_associativity = AMD_L2_1GB_EBX_DUTLB_ASSOC(descs[1]); 349 cai->cai_associativity = AMD_L2_1GB_EBX_DUTLB_ASSOC(descs[1]);
350 cai->cai_linesize = (1 * 1024); 350 cai->cai_linesize = (1 * 1024);
351 351
352 cai = &ci->ci_cinfo[CAI_L2_1GBITLB]; 352 cai = &ci->ci_cinfo[CAI_L2_1GBITLB];
353 cai->cai_totalsize = AMD_L2_1GB_EBX_IUTLB_ENTRIES(descs[0]); 353 cai->cai_totalsize = AMD_L2_1GB_EBX_IUTLB_ENTRIES(descs[0]);
354 cai->cai_associativity = AMD_L2_1GB_EBX_IUTLB_ASSOC(descs[0]); 354 cai->cai_associativity = AMD_L2_1GB_EBX_IUTLB_ASSOC(descs[0]);
355 cai->cai_linesize = (1 * 1024); 355 cai->cai_linesize = (1 * 1024);
356 356
357 if (lfunc < 0x8000001d) 357 if (lfunc < 0x8000001d)
358 return; 358 return;
359 359
360 if (ci->ci_feat_val[3] & CPUID_TOPOEXT) 360 if (ci->ci_feat_val[3] & CPUID_TOPOEXT)
361 cpu_dcp_cacheinfo(ci, 0x8000001d); 361 cpu_dcp_cacheinfo(ci, 0x8000001d);
362} 362}
363 363
364static void 364static void
365cpu_probe_amd_errata(struct cpu_info *ci) 365cpu_probe_amd_errata(struct cpu_info *ci)
366{ 366{
367 u_int model; 367 u_int model;
368 uint64_t val; 368 uint64_t val;
369 int flag; 369 int flag;
370 370
371 model = CPUID_TO_MODEL(ci->ci_signature); 371 model = CPUID_TO_MODEL(ci->ci_signature);
372 372
373 switch (CPUID_TO_FAMILY(ci->ci_signature)) { 373 switch (CPUID_TO_FAMILY(ci->ci_signature)) {
374 case 0x05: /* K5 */ 374 case 0x05: /* K5 */
375 if (model == 0) { 375 if (model == 0) {
376 /* 376 /*
377 * According to the AMD Processor Recognition App Note, 377 * According to the AMD Processor Recognition App Note,
378 * the AMD-K5 Model 0 uses the wrong bit to indicate 378 * the AMD-K5 Model 0 uses the wrong bit to indicate
379 * support for global PTEs, instead using bit 9 (APIC) 379 * support for global PTEs, instead using bit 9 (APIC)
380 * rather than bit 13 (i.e. "0x200" vs. 0x2000"). 380 * rather than bit 13 (i.e. "0x200" vs. 0x2000").
381 */ 381 */
382 flag = ci->ci_feat_val[0]; 382 flag = ci->ci_feat_val[0];
383 if ((flag & CPUID_APIC) != 0) 383 if ((flag & CPUID_APIC) != 0)
384 flag = (flag & ~CPUID_APIC) | CPUID_PGE; 384 flag = (flag & ~CPUID_APIC) | CPUID_PGE;
385 ci->ci_feat_val[0] = flag; 385 ci->ci_feat_val[0] = flag;
386 } 386 }
387 break; 387 break;
388 388
389 case 0x10: /* Family 10h */ 389 case 0x10: /* Family 10h */
390 /* 390 /*
391 * On Family 10h, certain BIOSes do not enable WC+ support. 391 * On Family 10h, certain BIOSes do not enable WC+ support.
392 * This causes WC+ to become CD, and degrades guest 392 * This causes WC+ to become CD, and degrades guest
393 * performance at the NPT level. 393 * performance at the NPT level.
394 * 394 *
395 * Explicitly enable WC+ if we're not a guest. 395 * Explicitly enable WC+ if we're not a guest.
396 */ 396 */
397 if (!ISSET(ci->ci_feat_val[1], CPUID2_RAZ)) { 397 if (!ISSET(ci->ci_feat_val[1], CPUID2_RAZ)) {
398 val = rdmsr(MSR_BU_CFG2); 398 val = rdmsr(MSR_BU_CFG2);
399 val &= ~BU_CFG2_CWPLUS_DIS; 399 val &= ~BU_CFG2_CWPLUS_DIS;
400 wrmsr(MSR_BU_CFG2, val); 400 wrmsr(MSR_BU_CFG2, val);
401 } 401 }
402 break; 402 break;
403 403
404 case 0x17: 404 case 0x17:
405 /* 405 /*
406 * "Revision Guide for AMD Family 17h Models 00h-0Fh 406 * "Revision Guide for AMD Family 17h Models 00h-0Fh
407 * Processors" revision 1.12: 407 * Processors" revision 1.12:
408 * 408 *
409 * 1057 MWAIT or MWAITX Instructions May Fail to Correctly 409 * 1057 MWAIT or MWAITX Instructions May Fail to Correctly
410 * Exit From the Monitor Event Pending State 410 * Exit From the Monitor Event Pending State
411 * 411 *
412 * 1109 MWAIT Instruction May Hang a Thread 412 * 1109 MWAIT Instruction May Hang a Thread
413 */ 413 */
414 if (model == 0x01) { 414 if (model == 0x01) {
415 cpu_feature[1] &= ~CPUID2_MONITOR; 415 cpu_feature[1] &= ~CPUID2_MONITOR;
416 ci->ci_feat_val[1] &= ~CPUID2_MONITOR; 416 ci->ci_feat_val[1] &= ~CPUID2_MONITOR;
417 } 417 }
418 break; 418 break;
419 } 419 }
420} 420}
421 421
422static void 422static void
423cpu_probe_amd(struct cpu_info *ci) 423cpu_probe_amd(struct cpu_info *ci)
424{ 424{
425 425
426 if (cpu_vendor != CPUVENDOR_AMD) 426 if (cpu_vendor != CPUVENDOR_AMD)
427 return; 427 return;
428 428
429 cpu_probe_amd_cache(ci); 429 cpu_probe_amd_cache(ci);
430 cpu_probe_amd_errata(ci); 430 cpu_probe_amd_errata(ci);
431} 431}
432 432
433static inline uint8_t 433static inline uint8_t
434cyrix_read_reg(uint8_t reg) 434cyrix_read_reg(uint8_t reg)
435{ 435{
436 436
437 outb(0x22, reg); 437 outb(0x22, reg);
438 return inb(0x23); 438 return inb(0x23);
439} 439}
440 440
441static inline void 441static inline void
442cyrix_write_reg(uint8_t reg, uint8_t data) 442cyrix_write_reg(uint8_t reg, uint8_t data)
443{ 443{
444 444
445 outb(0x22, reg); 445 outb(0x22, reg);
446 outb(0x23, data); 446 outb(0x23, data);
447} 447}
448 448
449static void 449static void
450cpu_probe_cyrix_cmn(struct cpu_info *ci) 450cpu_probe_cyrix_cmn(struct cpu_info *ci)
451{ 451{
452 /* 452 /*
453 * i8254 latch check routine: 453 * i8254 latch check routine:
454 * National Geode (formerly Cyrix MediaGX) has a serious bug in 454 * National Geode (formerly Cyrix MediaGX) has a serious bug in
455 * its built-in i8254-compatible clock module (cs5510 cs5520). 455 * its built-in i8254-compatible clock module (cs5510 cs5520).
456 * Set the variable 'clock_broken_latch' to indicate it. 456 * Set the variable 'clock_broken_latch' to indicate it.
457 * 457 *
458 * This bug is not present in the cs5530, and the flag 458 * This bug is not present in the cs5530, and the flag
459 * is disabled again in sys/arch/i386/pci/pcib.c if this later 459 * is disabled again in sys/arch/i386/pci/pcib.c if this later
460 * model device is detected. Ideally, this work-around should not 460 * model device is detected. Ideally, this work-around should not
461 * even be in here, it should be in there. XXX 461 * even be in here, it should be in there. XXX
462 */ 462 */
463 uint8_t c3; 463 uint8_t c3;
464#ifndef XEN 464#ifndef XEN
465 extern int clock_broken_latch; 465 extern int clock_broken_latch;
466 466
467 switch (ci->ci_signature) { 467 switch (ci->ci_signature) {
468 case 0x440: /* Cyrix MediaGX */ 468 case 0x440: /* Cyrix MediaGX */
469 case 0x540: /* GXm */ 469 case 0x540: /* GXm */
470 clock_broken_latch = 1; 470 clock_broken_latch = 1;
471 break; 471 break;
472 } 472 }
473#endif 473#endif
474 474
475 /* set up various cyrix registers */ 475 /* set up various cyrix registers */
476 /* 476 /*
477 * Enable suspend on halt (powersave mode). 477 * Enable suspend on halt (powersave mode).
478 * When powersave mode is enabled, the TSC stops counting 478 * When powersave mode is enabled, the TSC stops counting
479 * while the CPU is halted in idle() waiting for an interrupt. 479 * while the CPU is halted in idle() waiting for an interrupt.
480 * This means we can't use the TSC for interval time in 480 * This means we can't use the TSC for interval time in
481 * microtime(9), and thus it is disabled here. 481 * microtime(9), and thus it is disabled here.
482 * 482 *
483 * It still makes a perfectly good cycle counter 483 * It still makes a perfectly good cycle counter
484 * for program profiling, so long as you remember you're 484 * for program profiling, so long as you remember you're
485 * counting cycles, and not time. Further, if you don't 485 * counting cycles, and not time. Further, if you don't
486 * mind not using powersave mode, the TSC works just fine, 486 * mind not using powersave mode, the TSC works just fine,
487 * so this should really be optional. XXX 487 * so this should really be optional. XXX
488 */ 488 */
489 cyrix_write_reg(0xc2, cyrix_read_reg(0xc2) | 0x08); 489 cyrix_write_reg(0xc2, cyrix_read_reg(0xc2) | 0x08);
490 490
491 /*  491 /*
492 * Do not disable the TSC on the Geode GX, it's reported to 492 * Do not disable the TSC on the Geode GX, it's reported to
493 * work fine. 493 * work fine.
494 */ 494 */
495 if (ci->ci_signature != 0x552) 495 if (ci->ci_signature != 0x552)
496 ci->ci_feat_val[0] &= ~CPUID_TSC; 496 ci->ci_feat_val[0] &= ~CPUID_TSC;
497 497
498 /* enable access to ccr4/ccr5 */ 498 /* enable access to ccr4/ccr5 */
499 c3 = cyrix_read_reg(0xC3); 499 c3 = cyrix_read_reg(0xC3);
500 cyrix_write_reg(0xC3, c3 | 0x10); 500 cyrix_write_reg(0xC3, c3 | 0x10);
501 /* cyrix's workaround for the "coma bug" */ 501 /* cyrix's workaround for the "coma bug" */
502 cyrix_write_reg(0x31, cyrix_read_reg(0x31) | 0xf8); 502 cyrix_write_reg(0x31, cyrix_read_reg(0x31) | 0xf8);
503 cyrix_write_reg(0x32, cyrix_read_reg(0x32) | 0x7f); 503 cyrix_write_reg(0x32, cyrix_read_reg(0x32) | 0x7f);
504 cyrix_write_reg(0x33, cyrix_read_reg(0x33) & ~0xff); 504 cyrix_write_reg(0x33, cyrix_read_reg(0x33) & ~0xff);
505 cyrix_write_reg(0x3c, cyrix_read_reg(0x3c) | 0x87); 505 cyrix_write_reg(0x3c, cyrix_read_reg(0x3c) | 0x87);
506 /* disable access to ccr4/ccr5 */ 506 /* disable access to ccr4/ccr5 */
507 cyrix_write_reg(0xC3, c3); 507 cyrix_write_reg(0xC3, c3);
508} 508}
509 509
510static void 510static void
511cpu_probe_cyrix(struct cpu_info *ci) 511cpu_probe_cyrix(struct cpu_info *ci)
512{ 512{
513 513
514 if (cpu_vendor != CPUVENDOR_CYRIX || 514 if (cpu_vendor != CPUVENDOR_CYRIX ||
515 CPUID_TO_FAMILY(ci->ci_signature) < 4 || 515 CPUID_TO_FAMILY(ci->ci_signature) < 4 ||
516 CPUID_TO_FAMILY(ci->ci_signature) > 6) 516 CPUID_TO_FAMILY(ci->ci_signature) > 6)
517 return; 517 return;
518 518
519 cpu_probe_cyrix_cmn(ci); 519 cpu_probe_cyrix_cmn(ci);
520} 520}
521 521
522static void 522static void
523cpu_probe_winchip(struct cpu_info *ci) 523cpu_probe_winchip(struct cpu_info *ci)
524{ 524{
525 525
526 if (cpu_vendor != CPUVENDOR_IDT || 526 if (cpu_vendor != CPUVENDOR_IDT ||
527 CPUID_TO_FAMILY(ci->ci_signature) != 5) 527 CPUID_TO_FAMILY(ci->ci_signature) != 5)
528 return; 528 return;
529 529
530 /* WinChip C6 */ 530 /* WinChip C6 */
531 if (CPUID_TO_MODEL(ci->ci_signature) == 4) 531 if (CPUID_TO_MODEL(ci->ci_signature) == 4)
532 ci->ci_feat_val[0] &= ~CPUID_TSC; 532 ci->ci_feat_val[0] &= ~CPUID_TSC;
533} 533}
534 534
535static void 535static void
536cpu_probe_c3(struct cpu_info *ci) 536cpu_probe_c3(struct cpu_info *ci)
537{ 537{
538 u_int family, model, stepping, descs[4], lfunc, msr; 538 u_int family, model, stepping, descs[4], lfunc, msr;
539 struct x86_cache_info *cai; 539 struct x86_cache_info *cai;
540 540
541 if (cpu_vendor != CPUVENDOR_IDT || 541 if (cpu_vendor != CPUVENDOR_IDT ||
542 CPUID_TO_FAMILY(ci->ci_signature) < 6) 542 CPUID_TO_FAMILY(ci->ci_signature) < 6)
543 return; 543 return;
544 544
545 family = CPUID_TO_FAMILY(ci->ci_signature); 545 family = CPUID_TO_FAMILY(ci->ci_signature);
546 model = CPUID_TO_MODEL(ci->ci_signature); 546 model = CPUID_TO_MODEL(ci->ci_signature);
547 stepping = CPUID_TO_STEPPING(ci->ci_signature); 547 stepping = CPUID_TO_STEPPING(ci->ci_signature);
548 548
549 /* Determine the largest extended function value. */ 549 /* Determine the largest extended function value. */
550 x86_cpuid(0x80000000, descs); 550 x86_cpuid(0x80000000, descs);
551 lfunc = descs[0]; 551 lfunc = descs[0];
552 552
553 if (family == 6) { 553 if (family == 6) {
554 /* 554 /*
555 * VIA Eden ESP. 555 * VIA Eden ESP.
556 * 556 *
557 * Quoting from page 3-4 of: "VIA Eden ESP Processor Datasheet" 557 * Quoting from page 3-4 of: "VIA Eden ESP Processor Datasheet"
558 * http://www.via.com.tw/download/mainboards/6/14/Eden20v115.pdf 558 * http://www.via.com.tw/download/mainboards/6/14/Eden20v115.pdf
559 *  559 *
560 * 1. The CMPXCHG8B instruction is provided and always enabled, 560 * 1. The CMPXCHG8B instruction is provided and always enabled,
561 * however, it appears disabled in the corresponding CPUID 561 * however, it appears disabled in the corresponding CPUID
562 * function bit 0 to avoid a bug in an early version of 562 * function bit 0 to avoid a bug in an early version of
563 * Windows NT. However, this default can be changed via a 563 * Windows NT. However, this default can be changed via a
564 * bit in the FCR MSR. 564 * bit in the FCR MSR.
565 */ 565 */
566 ci->ci_feat_val[0] |= CPUID_CX8; 566 ci->ci_feat_val[0] |= CPUID_CX8;
567 wrmsr(MSR_VIA_FCR, rdmsr(MSR_VIA_FCR) | VIA_ACE_ECX8); 567 wrmsr(MSR_VIA_FCR, rdmsr(MSR_VIA_FCR) | VIA_ACE_ECX8);
568 } 568 }
569 569
570 if (family > 6 || model > 0x9 || (model == 0x9 && stepping >= 3)) { 570 if (family > 6 || model > 0x9 || (model == 0x9 && stepping >= 3)) {
571 /* VIA Nehemiah or Esther. */ 571 /* VIA Nehemiah or Esther. */
572 x86_cpuid(0xc0000000, descs); 572 x86_cpuid(0xc0000000, descs);
573 lfunc = descs[0]; 573 lfunc = descs[0];
574 if (lfunc >= 0xc0000001) { /* has ACE, RNG */ 574 if (lfunc >= 0xc0000001) { /* has ACE, RNG */
575 int rng_enable = 0, ace_enable = 0; 575 int rng_enable = 0, ace_enable = 0;
576 x86_cpuid(0xc0000001, descs); 576 x86_cpuid(0xc0000001, descs);
577 lfunc = descs[3]; 577 lfunc = descs[3];
578 ci->ci_feat_val[4] = lfunc; 578 ci->ci_feat_val[4] = lfunc;
579 /* Check for and enable RNG */ 579 /* Check for and enable RNG */
580 if (lfunc & CPUID_VIA_HAS_RNG) { 580 if (lfunc & CPUID_VIA_HAS_RNG) {
581 if (!(lfunc & CPUID_VIA_DO_RNG)) { 581 if (!(lfunc & CPUID_VIA_DO_RNG)) {
582 rng_enable++; 582 rng_enable++;
583 ci->ci_feat_val[4] |= CPUID_VIA_DO_RNG; 583 ci->ci_feat_val[4] |= CPUID_VIA_DO_RNG;
584 } 584 }
585 } 585 }
586 /* Check for and enable ACE (AES-CBC) */ 586 /* Check for and enable ACE (AES-CBC) */
587 if (lfunc & CPUID_VIA_HAS_ACE) { 587 if (lfunc & CPUID_VIA_HAS_ACE) {
588 if (!(lfunc & CPUID_VIA_DO_ACE)) { 588 if (!(lfunc & CPUID_VIA_DO_ACE)) {
589 ace_enable++; 589 ace_enable++;
590 ci->ci_feat_val[4] |= CPUID_VIA_DO_ACE; 590 ci->ci_feat_val[4] |= CPUID_VIA_DO_ACE;
591 } 591 }
592 } 592 }
593 /* Check for and enable SHA */ 593 /* Check for and enable SHA */
594 if (lfunc & CPUID_VIA_HAS_PHE) { 594 if (lfunc & CPUID_VIA_HAS_PHE) {
595 if (!(lfunc & CPUID_VIA_DO_PHE)) { 595 if (!(lfunc & CPUID_VIA_DO_PHE)) {
596 ace_enable++; 596 ace_enable++;
597 ci->ci_feat_val[4] |= CPUID_VIA_DO_PHE; 597 ci->ci_feat_val[4] |= CPUID_VIA_DO_PHE;
598 } 598 }
599 } 599 }
600 /* Check for and enable ACE2 (AES-CTR) */ 600 /* Check for and enable ACE2 (AES-CTR) */
601 if (lfunc & CPUID_VIA_HAS_ACE2) { 601 if (lfunc & CPUID_VIA_HAS_ACE2) {
602 if (!(lfunc & CPUID_VIA_DO_ACE2)) { 602 if (!(lfunc & CPUID_VIA_DO_ACE2)) {
603 ace_enable++; 603 ace_enable++;
604 ci->ci_feat_val[4] |= CPUID_VIA_DO_ACE2; 604 ci->ci_feat_val[4] |= CPUID_VIA_DO_ACE2;
605 } 605 }
606 } 606 }
607 /* Check for and enable PMM (modmult engine) */ 607 /* Check for and enable PMM (modmult engine) */
608 if (lfunc & CPUID_VIA_HAS_PMM) { 608 if (lfunc & CPUID_VIA_HAS_PMM) {
609 if (!(lfunc & CPUID_VIA_DO_PMM)) { 609 if (!(lfunc & CPUID_VIA_DO_PMM)) {
610 ace_enable++; 610 ace_enable++;
611 ci->ci_feat_val[4] |= CPUID_VIA_DO_PMM; 611 ci->ci_feat_val[4] |= CPUID_VIA_DO_PMM;
612 } 612 }
613 } 613 }
614 614
615 /* 615 /*
616 * Actually do the enables. It's a little gross, 616 * Actually do the enables. It's a little gross,
617 * but per the PadLock programming guide, "Enabling 617 * but per the PadLock programming guide, "Enabling
618 * PadLock", condition 3, we must enable SSE too or 618 * PadLock", condition 3, we must enable SSE too or
619 * else the first use of RNG or ACE instructions 619 * else the first use of RNG or ACE instructions
620 * will generate a trap. 620 * will generate a trap.
621 * 621 *
622 * We must do this early because of kernel RNG 622 * We must do this early because of kernel RNG
623 * initialization but it is safe without the full 623 * initialization but it is safe without the full
624 * FPU-detect as all these CPUs have SSE. 624 * FPU-detect as all these CPUs have SSE.
625 */ 625 */
626 lcr4(rcr4() | CR4_OSFXSR); 626 lcr4(rcr4() | CR4_OSFXSR);
627 627
628 if (rng_enable) { 628 if (rng_enable) {
629 msr = rdmsr(MSR_VIA_RNG); 629 msr = rdmsr(MSR_VIA_RNG);
630 msr |= MSR_VIA_RNG_ENABLE; 630 msr |= MSR_VIA_RNG_ENABLE;
631 /* C7 stepping 8 and subsequent CPUs have dual RNG */ 631 /* C7 stepping 8 and subsequent CPUs have dual RNG */
632 if (model > 0xA || (model == 0xA && stepping > 0x7)) { 632 if (model > 0xA || (model == 0xA && stepping > 0x7)) {
633 msr |= MSR_VIA_RNG_2NOISE; 633 msr |= MSR_VIA_RNG_2NOISE;
634 } 634 }
635 wrmsr(MSR_VIA_RNG, msr); 635 wrmsr(MSR_VIA_RNG, msr);
636 } 636 }
637 637
638 if (ace_enable) { 638 if (ace_enable) {
639 msr = rdmsr(MSR_VIA_ACE); 639 msr = rdmsr(MSR_VIA_ACE);
640 wrmsr(MSR_VIA_ACE, msr | VIA_ACE_ENABLE); 640 wrmsr(MSR_VIA_ACE, msr | VIA_ACE_ENABLE);
641 } 641 }
642 } 642 }
643 } 643 }
644 644
645 /* Explicitly disable unsafe ALTINST mode. */ 645 /* Explicitly disable unsafe ALTINST mode. */
646 if (ci->ci_feat_val[4] & CPUID_VIA_DO_ACE) { 646 if (ci->ci_feat_val[4] & CPUID_VIA_DO_ACE) {
647 msr = rdmsr(MSR_VIA_ACE); 647 msr = rdmsr(MSR_VIA_ACE);
648 wrmsr(MSR_VIA_ACE, msr & ~VIA_ACE_ALTINST); 648 wrmsr(MSR_VIA_ACE, msr & ~VIA_ACE_ALTINST);
649 }  649 }
650 650
651 /* 651 /*
652 * Determine L1 cache/TLB info. 652 * Determine L1 cache/TLB info.
653 */ 653 */
654 if (lfunc < 0x80000005) { 654 if (lfunc < 0x80000005) {
655 /* No L1 cache info available. */ 655 /* No L1 cache info available. */
656 return; 656 return;
657 } 657 }
658 658
659 x86_cpuid(0x80000005, descs); 659 x86_cpuid(0x80000005, descs);
660 660
661 cai = &ci->ci_cinfo[CAI_ITLB]; 661 cai = &ci->ci_cinfo[CAI_ITLB];
662 cai->cai_totalsize = VIA_L1_EBX_ITLB_ENTRIES(descs[1]); 662 cai->cai_totalsize = VIA_L1_EBX_ITLB_ENTRIES(descs[1]);
663 cai->cai_associativity = VIA_L1_EBX_ITLB_ASSOC(descs[1]); 663 cai->cai_associativity = VIA_L1_EBX_ITLB_ASSOC(descs[1]);
664 cai->cai_linesize = (4 * 1024); 664 cai->cai_linesize = (4 * 1024);
665 665
666 cai = &ci->ci_cinfo[CAI_DTLB]; 666 cai = &ci->ci_cinfo[CAI_DTLB];
667 cai->cai_totalsize = VIA_L1_EBX_DTLB_ENTRIES(descs[1]); 667 cai->cai_totalsize = VIA_L1_EBX_DTLB_ENTRIES(descs[1]);
668 cai->cai_associativity = VIA_L1_EBX_DTLB_ASSOC(descs[1]); 668 cai->cai_associativity = VIA_L1_EBX_DTLB_ASSOC(descs[1]);
669 cai->cai_linesize = (4 * 1024); 669 cai->cai_linesize = (4 * 1024);
670 670
671 cai = &ci->ci_cinfo[CAI_DCACHE]; 671 cai = &ci->ci_cinfo[CAI_DCACHE];
672 cai->cai_totalsize = VIA_L1_ECX_DC_SIZE(descs[2]); 672 cai->cai_totalsize = VIA_L1_ECX_DC_SIZE(descs[2]);
673 cai->cai_associativity = VIA_L1_ECX_DC_ASSOC(descs[2]); 673 cai->cai_associativity = VIA_L1_ECX_DC_ASSOC(descs[2]);
674 cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[2]); 674 cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[2]);
675 if (family == 6 && model == 9 && stepping == 8) { 675 if (family == 6 && model == 9 && stepping == 8) {
676 /* Erratum: stepping 8 reports 4 when it should be 2 */ 676 /* Erratum: stepping 8 reports 4 when it should be 2 */
677 cai->cai_associativity = 2; 677 cai->cai_associativity = 2;
678 } 678 }
679 679
680 cai = &ci->ci_cinfo[CAI_ICACHE]; 680 cai = &ci->ci_cinfo[CAI_ICACHE];
681 cai->cai_totalsize = VIA_L1_EDX_IC_SIZE(descs[3]); 681 cai->cai_totalsize = VIA_L1_EDX_IC_SIZE(descs[3]);
682 cai->cai_associativity = VIA_L1_EDX_IC_ASSOC(descs[3]); 682 cai->cai_associativity = VIA_L1_EDX_IC_ASSOC(descs[3]);
683 cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[3]); 683 cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[3]);
684 if (family == 6 && model == 9 && stepping == 8) { 684 if (family == 6 && model == 9 && stepping == 8) {
685 /* Erratum: stepping 8 reports 4 when it should be 2 */ 685 /* Erratum: stepping 8 reports 4 when it should be 2 */
686 cai->cai_associativity = 2; 686 cai->cai_associativity = 2;
687 } 687 }
688 688
689 /* 689 /*
690 * Determine L2 cache/TLB info. 690 * Determine L2 cache/TLB info.
691 */ 691 */
692 if (lfunc < 0x80000006) { 692 if (lfunc < 0x80000006) {
693 /* No L2 cache info available. */ 693 /* No L2 cache info available. */
694 return; 694 return;
695 } 695 }
696 696
697 x86_cpuid(0x80000006, descs); 697 x86_cpuid(0x80000006, descs);
698 698
699 cai = &ci->ci_cinfo[CAI_L2CACHE]; 699 cai = &ci->ci_cinfo[CAI_L2CACHE];
700 if (family > 6 || model >= 9) { 700 if (family > 6 || model >= 9) {
701 cai->cai_totalsize = VIA_L2N_ECX_C_SIZE(descs[2]); 701 cai->cai_totalsize = VIA_L2N_ECX_C_SIZE(descs[2]);
702 cai->cai_associativity = VIA_L2N_ECX_C_ASSOC(descs[2]); 702 cai->cai_associativity = VIA_L2N_ECX_C_ASSOC(descs[2]);
703 cai->cai_linesize = VIA_L2N_ECX_C_LS(descs[2]); 703 cai->cai_linesize = VIA_L2N_ECX_C_LS(descs[2]);
704 } else { 704 } else {
705 cai->cai_totalsize = VIA_L2_ECX_C_SIZE(descs[2]); 705 cai->cai_totalsize = VIA_L2_ECX_C_SIZE(descs[2]);
706 cai->cai_associativity = VIA_L2_ECX_C_ASSOC(descs[2]); 706 cai->cai_associativity = VIA_L2_ECX_C_ASSOC(descs[2]);
707 cai->cai_linesize = VIA_L2_ECX_C_LS(descs[2]); 707 cai->cai_linesize = VIA_L2_ECX_C_LS(descs[2]);
708 } 708 }
709} 709}
710 710
711static void 711static void
712cpu_probe_geode(struct cpu_info *ci) 712cpu_probe_geode(struct cpu_info *ci)
713{ 713{
714 714
715 if (memcmp("Geode by NSC", ci->ci_vendor, 12) != 0 || 715 if (memcmp("Geode by NSC", ci->ci_vendor, 12) != 0 ||
716 CPUID_TO_FAMILY(ci->ci_signature) != 5) 716 CPUID_TO_FAMILY(ci->ci_signature) != 5)
717 return; 717 return;
718 718
719 cpu_probe_cyrix_cmn(ci); 719 cpu_probe_cyrix_cmn(ci);
720 cpu_probe_amd_cache(ci); 720 cpu_probe_amd_cache(ci);
721} 721}
722 722
723static void 723static void
724cpu_probe_vortex86(struct cpu_info *ci) 724cpu_probe_vortex86(struct cpu_info *ci)
725{ 725{
726#define PCI_MODE1_ADDRESS_REG 0x0cf8 726#define PCI_MODE1_ADDRESS_REG 0x0cf8
727#define PCI_MODE1_DATA_REG 0x0cfc 727#define PCI_MODE1_DATA_REG 0x0cfc
728#define PCI_MODE1_ENABLE 0x80000000UL 728#define PCI_MODE1_ENABLE 0x80000000UL
729 729
730 uint32_t reg, idx; 730 uint32_t reg, idx;
731 731
732 if (cpu_vendor != CPUVENDOR_VORTEX86) 732 if (cpu_vendor != CPUVENDOR_VORTEX86)
733 return; 733 return;
734 /* 734 /*
735 * CPU model available from "Customer ID register" in 735 * CPU model available from "Customer ID register" in
736 * North Bridge Function 0 PCI space 736 * North Bridge Function 0 PCI space
737 * we can't use pci_conf_read() because the PCI subsystem is not 737 * we can't use pci_conf_read() because the PCI subsystem is not
738 * not initialised early enough 738 * not initialised early enough
739 */ 739 */
740 740
741 outl(PCI_MODE1_ADDRESS_REG, PCI_MODE1_ENABLE | 0x90); 741 outl(PCI_MODE1_ADDRESS_REG, PCI_MODE1_ENABLE | 0x90);
742 reg = inl(PCI_MODE1_DATA_REG); 742 reg = inl(PCI_MODE1_DATA_REG);
743 743
744 if ((reg & 0xf0ffffff) != 0x30504d44) { 744 if ((reg & 0xf0ffffff) != 0x30504d44) {
745 idx = 0; 745 idx = 0;
746 } else { 746 } else {
747 idx = (reg >> 24) & 0xf; 747 idx = (reg >> 24) & 0xf;
748 } 748 }
749 749
750 static const char *cpu_vortex86_flavor[] = { 750 static const char *cpu_vortex86_flavor[] = {
751 "??", "SX", "DX", "MX", "DX2", "MX+", "DX3", "EX", "EX2", 751 "??", "SX", "DX", "MX", "DX2", "MX+", "DX3", "EX", "EX2",
752 }; 752 };
753 idx = idx < __arraycount(cpu_vortex86_flavor) ? idx : 0; 753 idx = idx < __arraycount(cpu_vortex86_flavor) ? idx : 0;
754 snprintf(cpu_brand_string, sizeof(cpu_brand_string), "Vortex86%s", 754 snprintf(cpu_brand_string, sizeof(cpu_brand_string), "Vortex86%s",
755 cpu_vortex86_flavor[idx]); 755 cpu_vortex86_flavor[idx]);
756 756
757#undef PCI_MODE1_ENABLE 757#undef PCI_MODE1_ENABLE
758#undef PCI_MODE1_ADDRESS_REG 758#undef PCI_MODE1_ADDRESS_REG
759#undef PCI_MODE1_DATA_REG 759#undef PCI_MODE1_DATA_REG
760} 760}
761 761
762static void 762static void
763cpu_probe_fpu_old(struct cpu_info *ci) 763cpu_probe_fpu_old(struct cpu_info *ci)
764{ 764{
765#if defined(__i386__) && !defined(XENPV) 765#if defined(__i386__) && !defined(XENPV)
766 766
767 clts(); 767 clts();
768 fninit(); 768 fninit();
769 769
770 /* Check for 'FDIV' bug on the original Pentium */ 770 /* Check for 'FDIV' bug on the original Pentium */
771 if (npx586bug1(4195835, 3145727) != 0) 771 if (npx586bug1(4195835, 3145727) != 0)
772 /* NB 120+MHz cpus are not affected */ 772 /* NB 120+MHz cpus are not affected */
773 i386_fpu_fdivbug = 1; 773 i386_fpu_fdivbug = 1;
774 774
775 stts(); 775 stts();
776#endif 776#endif
777} 777}
778 778
779static void 779static void
780cpu_probe_fpu(struct cpu_info *ci) 780cpu_probe_fpu(struct cpu_info *ci)
781{ 781{
782 u_int descs[4]; 782 u_int descs[4];
783 int i; 783 int i;
784 784
785 x86_fpu_save = FPU_SAVE_FSAVE; 785 x86_fpu_save = FPU_SAVE_FSAVE;
786 786
787#ifdef i386 787#ifdef i386
788 /* If we have FXSAVE/FXRESTOR, use them. */ 788 /* If we have FXSAVE/FXRESTOR, use them. */
789 if ((ci->ci_feat_val[0] & CPUID_FXSR) == 0) { 789 if ((ci->ci_feat_val[0] & CPUID_FXSR) == 0) {
790 i386_use_fxsave = 0; 790 i386_use_fxsave = 0;
791 cpu_probe_fpu_old(ci); 791 cpu_probe_fpu_old(ci);
792 return; 792 return;
793 } 793 }
794 794
795 i386_use_fxsave = 1; 795 i386_use_fxsave = 1;
796 /* 796 /*
797 * If we have SSE/SSE2, enable XMM exceptions, and 797 * If we have SSE/SSE2, enable XMM exceptions, and
798 * notify userland. 798 * notify userland.
799 */ 799 */
800 if (ci->ci_feat_val[0] & CPUID_SSE) 800 if (ci->ci_feat_val[0] & CPUID_SSE)
801 i386_has_sse = 1; 801 i386_has_sse = 1;
802 if (ci->ci_feat_val[0] & CPUID_SSE2) 802 if (ci->ci_feat_val[0] & CPUID_SSE2)
803 i386_has_sse2 = 1; 803 i386_has_sse2 = 1;
804#else 804#else
805 /* 805 /*
806 * For amd64 i386_use_fxsave, i386_has_sse and i386_has_sse2 are 806 * For amd64 i386_use_fxsave, i386_has_sse and i386_has_sse2 are
807 * #defined to 1, because fxsave/sse/sse2 are always present. 807 * #defined to 1, because fxsave/sse/sse2 are always present.
808 */ 808 */
809#endif 809#endif
810 810
811 x86_fpu_save = FPU_SAVE_FXSAVE; 811 x86_fpu_save = FPU_SAVE_FXSAVE;
812 x86_fpu_save_size = sizeof(struct fxsave); 812 x86_fpu_save_size = sizeof(struct fxsave);
813 813
814 /* See if XSAVE is supported */ 814 /* See if XSAVE is supported */
815 if ((ci->ci_feat_val[1] & CPUID2_XSAVE) == 0) 815 if ((ci->ci_feat_val[1] & CPUID2_XSAVE) == 0)
816 return; 816 return;
817 817
818#ifdef XENPV 818#ifdef XENPV
819 /* 819 /*
820 * Xen kernel can disable XSAVE via "no-xsave" option, in that case 820 * Xen kernel can disable XSAVE via "no-xsave" option, in that case
821 * the XSAVE/XRSTOR instructions become privileged and trigger 821 * the XSAVE/XRSTOR instructions become privileged and trigger
822 * supervisor trap. OSXSAVE flag seems to be reliably set according 822 * supervisor trap. OSXSAVE flag seems to be reliably set according
823 * to whether XSAVE is actually available. 823 * to whether XSAVE is actually available.
824 */ 824 */
825 if ((ci->ci_feat_val[1] & CPUID2_OSXSAVE) == 0) 825 if ((ci->ci_feat_val[1] & CPUID2_OSXSAVE) == 0)
826 return; 826 return;
827#endif 827#endif
828 828
829 x86_fpu_save = FPU_SAVE_XSAVE; 829 x86_fpu_save = FPU_SAVE_XSAVE;
830 830
831 x86_cpuid2(0xd, 1, descs); 831 x86_cpuid2(0xd, 1, descs);
832 if (descs[0] & CPUID_PES1_XSAVEOPT) 832 if (descs[0] & CPUID_PES1_XSAVEOPT)
833 x86_fpu_save = FPU_SAVE_XSAVEOPT; 833 x86_fpu_save = FPU_SAVE_XSAVEOPT;
834 834
835 /* Get features and maximum size of the save area */ 835 /* Get features and maximum size of the save area */
836 x86_cpuid(0xd, descs); 836 x86_cpuid(0xd, descs);
837 if (descs[2] > sizeof(struct fxsave)) 837 if (descs[2] > sizeof(struct fxsave))
838 x86_fpu_save_size = descs[2]; 838 x86_fpu_save_size = descs[2];
839 839
840 x86_xsave_features = (uint64_t)descs[3] << 32 | descs[0]; 840 x86_xsave_features = (uint64_t)descs[3] << 32 | descs[0];
841 841
842 /* Get component offsets and sizes for the save area */ 842 /* Get component offsets and sizes for the save area */
843 for (i = XSAVE_YMM_Hi128; i < __arraycount(x86_xsave_offsets); i++) { 843 for (i = XSAVE_YMM_Hi128; i < __arraycount(x86_xsave_offsets); i++) {
844 if (x86_xsave_features & __BIT(i)) { 844 if (x86_xsave_features & __BIT(i)) {
845 x86_cpuid2(0xd, i, descs); 845 x86_cpuid2(0xd, i, descs);
846 x86_xsave_offsets[i] = descs[1]; 846 x86_xsave_offsets[i] = descs[1];
847 x86_xsave_sizes[i] = descs[0]; 847 x86_xsave_sizes[i] = descs[0];
848 } 848 }
849 } 849 }
850} 850}
851 851
852void 852void
853cpu_probe(struct cpu_info *ci) 853cpu_probe(struct cpu_info *ci)
854{ 854{
855 u_int descs[4]; 855 u_int descs[4];
856 int i; 856 int i;
857 uint32_t miscbytes; 857 uint32_t miscbytes;
858 uint32_t brand[12]; 858 uint32_t brand[12];
859 859
860 if (ci == &cpu_info_primary) { 860 if (ci == &cpu_info_primary) {
861 cpu_vendor = i386_nocpuid_cpus[cputype << 1]; 861 cpu_vendor = i386_nocpuid_cpus[cputype << 1];
862 cpu_class = i386_nocpuid_cpus[(cputype << 1) + 1]; 862 cpu_class = i386_nocpuid_cpus[(cputype << 1) + 1];
863 } 863 }
864 864
865 if (cpuid_level < 0) { 865 if (cpuid_level < 0) {
866 /* cpuid instruction not supported */ 866 /* cpuid instruction not supported */
867 cpu_probe_fpu_old(ci); 867 cpu_probe_fpu_old(ci);
868 return; 868 return;
869 } 869 }
870 870
871 for (i = 0; i < __arraycount(ci->ci_feat_val); i++) { 871 for (i = 0; i < __arraycount(ci->ci_feat_val); i++) {
872 ci->ci_feat_val[i] = 0; 872 ci->ci_feat_val[i] = 0;
873 } 873 }
874 874
875 x86_cpuid(0, descs); 875 x86_cpuid(0, descs);
876 cpuid_level = descs[0]; 876 cpuid_level = descs[0];
877 ci->ci_max_cpuid = descs[0]; 877 ci->ci_max_cpuid = descs[0];
878 878
879 ci->ci_vendor[0] = descs[1]; 879 ci->ci_vendor[0] = descs[1];
880 ci->ci_vendor[2] = descs[2]; 880 ci->ci_vendor[2] = descs[2];
881 ci->ci_vendor[1] = descs[3]; 881 ci->ci_vendor[1] = descs[3];
882 ci->ci_vendor[3] = 0; 882 ci->ci_vendor[3] = 0;
883 883
884 if (ci == &cpu_info_primary) { 884 if (ci == &cpu_info_primary) {
885 if (memcmp(ci->ci_vendor, "GenuineIntel", 12) == 0) 885 if (memcmp(ci->ci_vendor, "GenuineIntel", 12) == 0)
886 cpu_vendor = CPUVENDOR_INTEL; 886 cpu_vendor = CPUVENDOR_INTEL;
887 else if (memcmp(ci->ci_vendor, "AuthenticAMD", 12) == 0) 887 else if (memcmp(ci->ci_vendor, "AuthenticAMD", 12) == 0)
888 cpu_vendor = CPUVENDOR_AMD; 888 cpu_vendor = CPUVENDOR_AMD;
889 else if (memcmp(ci->ci_vendor, "CyrixInstead", 12) == 0) 889 else if (memcmp(ci->ci_vendor, "CyrixInstead", 12) == 0)
890 cpu_vendor = CPUVENDOR_CYRIX; 890 cpu_vendor = CPUVENDOR_CYRIX;
891 else if (memcmp(ci->ci_vendor, "Geode by NSC", 12) == 0) 891 else if (memcmp(ci->ci_vendor, "Geode by NSC", 12) == 0)
892 cpu_vendor = CPUVENDOR_CYRIX; 892 cpu_vendor = CPUVENDOR_CYRIX;
893 else if (memcmp(ci->ci_vendor, "CentaurHauls", 12) == 0) 893 else if (memcmp(ci->ci_vendor, "CentaurHauls", 12) == 0)
894 cpu_vendor = CPUVENDOR_IDT; 894 cpu_vendor = CPUVENDOR_IDT;
895 else if (memcmp(ci->ci_vendor, "GenuineTMx86", 12) == 0) 895 else if (memcmp(ci->ci_vendor, "GenuineTMx86", 12) == 0)
896 cpu_vendor = CPUVENDOR_TRANSMETA; 896 cpu_vendor = CPUVENDOR_TRANSMETA;
897 else if (memcmp(ci->ci_vendor, "Vortex86 SoC", 12) == 0) 897 else if (memcmp(ci->ci_vendor, "Vortex86 SoC", 12) == 0)
898 cpu_vendor = CPUVENDOR_VORTEX86; 898 cpu_vendor = CPUVENDOR_VORTEX86;
899 else 899 else
900 cpu_vendor = CPUVENDOR_UNKNOWN; 900 cpu_vendor = CPUVENDOR_UNKNOWN;
901 } 901 }
902 902
903 if (cpuid_level >= 1) { 903 if (cpuid_level >= 1) {
904 x86_cpuid(1, descs); 904 x86_cpuid(1, descs);
905 ci->ci_signature = descs[0]; 905 ci->ci_signature = descs[0];
906 miscbytes = descs[1]; 906 miscbytes = descs[1];
907 ci->ci_feat_val[1] = descs[2]; 907 ci->ci_feat_val[1] = descs[2];
908 ci->ci_feat_val[0] = descs[3]; 908 ci->ci_feat_val[0] = descs[3];
909 909
910 if (ci == &cpu_info_primary) { 910 if (ci == &cpu_info_primary) {
911 /* Determine family + class. */ 911 /* Determine family + class. */
912 cpu_class = CPUID_TO_FAMILY(ci->ci_signature) 912 cpu_class = CPUID_TO_FAMILY(ci->ci_signature)
913 + (CPUCLASS_386 - 3); 913 + (CPUCLASS_386 - 3);
914 if (cpu_class > CPUCLASS_686) 914 if (cpu_class > CPUCLASS_686)
915 cpu_class = CPUCLASS_686; 915 cpu_class = CPUCLASS_686;
916 } 916 }
917 917
918 /* CLFLUSH line size is next 8 bits */ 918 /* CLFLUSH line size is next 8 bits */
919 if (ci->ci_feat_val[0] & CPUID_CFLUSH) 919 if (ci->ci_feat_val[0] & CPUID_CFLUSH)
920 ci->ci_cflush_lsize 920 ci->ci_cflush_lsize
921 = __SHIFTOUT(miscbytes, CPUID_CLFLUSH_SIZE) << 3; 921 = __SHIFTOUT(miscbytes, CPUID_CLFLUSH_SIZE) << 3;
922 ci->ci_initapicid = __SHIFTOUT(miscbytes, CPUID_LOCAL_APIC_ID); 922 ci->ci_initapicid = __SHIFTOUT(miscbytes, CPUID_LOCAL_APIC_ID);
923 } 923 }
924 924
925 /* 925 /*
926 * Get the basic information from the extended cpuid leafs. 926 * Get the basic information from the extended cpuid leafs.
927 * These were first implemented by amd, but most of the values 927 * These were first implemented by amd, but most of the values
928 * match with those generated by modern intel cpus. 928 * match with those generated by modern intel cpus.
929 */ 929 */
930 x86_cpuid(0x80000000, descs); 930 x86_cpuid(0x80000000, descs);
931 if (descs[0] >= 0x80000000) 931 if (descs[0] >= 0x80000000)
932 ci->ci_max_ext_cpuid = descs[0]; 932 ci->ci_max_ext_cpuid = descs[0];
933 else 933 else
934 ci->ci_max_ext_cpuid = 0; 934 ci->ci_max_ext_cpuid = 0;
935 935
936 if (ci->ci_max_ext_cpuid >= 0x80000001) { 936 if (ci->ci_max_ext_cpuid >= 0x80000001) {
937 /* Determine the extended feature flags. */ 937 /* Determine the extended feature flags. */
938 x86_cpuid(0x80000001, descs); 938 x86_cpuid(0x80000001, descs);
939 ci->ci_feat_val[3] = descs[2]; /* %ecx */ 939 ci->ci_feat_val[3] = descs[2]; /* %ecx */
940 ci->ci_feat_val[2] = descs[3]; /* %edx */ 940 ci->ci_feat_val[2] = descs[3]; /* %edx */
941 } 941 }
942 942
943 if (ci->ci_max_ext_cpuid >= 0x80000004) { 943 if (ci->ci_max_ext_cpuid >= 0x80000004) {
944 x86_cpuid(0x80000002, brand); 944 x86_cpuid(0x80000002, brand);
945 x86_cpuid(0x80000003, brand + 4); 945 x86_cpuid(0x80000003, brand + 4);
946 x86_cpuid(0x80000004, brand + 8); 946 x86_cpuid(0x80000004, brand + 8);
947 /* Skip leading spaces on brand */ 947 /* Skip leading spaces on brand */
948 for (i = 0; i < 48; i++) { 948 for (i = 0; i < 48; i++) {
949 if (((char *) brand)[i] != ' ') 949 if (((char *) brand)[i] != ' ')
950 break; 950 break;
951 } 951 }
952 memcpy(cpu_brand_string, ((char *) brand) + i, 48 - i); 952 memcpy(cpu_brand_string, ((char *) brand) + i, 48 - i);
953 } 953 }
954 954
955 /* 955 /*
956 * Get the structured extended features. 956 * Get the structured extended features.
957 */ 957 */
958 if (cpuid_level >= 7) { 958 if (cpuid_level >= 7) {
959 x86_cpuid(7, descs); 959 x86_cpuid(7, descs);
960 ci->ci_feat_val[5] = descs[1]; /* %ebx */ 960 ci->ci_feat_val[5] = descs[1]; /* %ebx */
961 ci->ci_feat_val[6] = descs[2]; /* %ecx */ 961 ci->ci_feat_val[6] = descs[2]; /* %ecx */
962 ci->ci_feat_val[7] = descs[3]; /* %edx */ 962 ci->ci_feat_val[7] = descs[3]; /* %edx */
963 } 963 }
964 964
965 cpu_probe_intel(ci); 965 cpu_probe_intel(ci);
966 cpu_probe_amd(ci); 966 cpu_probe_amd(ci);
967 cpu_probe_cyrix(ci); 967 cpu_probe_cyrix(ci);
968 cpu_probe_winchip(ci); 968 cpu_probe_winchip(ci);
969 cpu_probe_c3(ci); 969 cpu_probe_c3(ci);
970 cpu_probe_geode(ci); 970 cpu_probe_geode(ci);
971 cpu_probe_vortex86(ci); 971 cpu_probe_vortex86(ci);
972 972
973 if (ci == &cpu_info_primary) { 973 if (ci == &cpu_info_primary) {
974 cpu_probe_fpu(ci); 974 cpu_probe_fpu(ci);
975 } 975 }
976 976
977#ifndef XEN 977#ifndef XEN
978 x86_cpu_topology(ci); 978 x86_cpu_topology(ci);
979#endif 979#endif
980 980
981 if (cpu_vendor != CPUVENDOR_AMD && (ci->ci_feat_val[0] & CPUID_TM) && 981 if (cpu_vendor != CPUVENDOR_AMD && (ci->ci_feat_val[0] & CPUID_TM) &&
982 (rdmsr(MSR_MISC_ENABLE) & (1 << 3)) == 0) { 982 (rdmsr(MSR_MISC_ENABLE) & (1 << 3)) == 0) {
983 /* Enable thermal monitor 1. */ 983 /* Enable thermal monitor 1. */
984 wrmsr(MSR_MISC_ENABLE, rdmsr(MSR_MISC_ENABLE) | (1<<3)); 984 wrmsr(MSR_MISC_ENABLE, rdmsr(MSR_MISC_ENABLE) | (1<<3));
985 } 985 }
986 986
987 ci->ci_feat_val[0] &= ~CPUID_FEAT_BLACKLIST; 987 ci->ci_feat_val[0] &= ~CPUID_FEAT_BLACKLIST;
988 if (ci == &cpu_info_primary) { 988 if (ci == &cpu_info_primary) {
989 /* If first. Boot Processor is the cpu_feature reference. */ 989 /* If first. Boot Processor is the cpu_feature reference. */
990 for (i = 0; i < __arraycount(cpu_feature); i++) { 990 for (i = 0; i < __arraycount(cpu_feature); i++) {
991 cpu_feature[i] = ci->ci_feat_val[i]; 991 cpu_feature[i] = ci->ci_feat_val[i];
992 } 992 }
993 identify_hypervisor(); 993 identify_hypervisor();
994#ifndef XEN 994#ifndef XEN
995 /* Early patch of text segment. */ 995 /* Early patch of text segment. */
996 x86_patch(true); 996 x86_patch(true);
997#endif 997#endif
998 } else { 998 } else {
999 /* 999 /*
1000 * If not first. Warn about cpu_feature mismatch for 1000 * If not first. Warn about cpu_feature mismatch for
1001 * secondary CPUs. 1001 * secondary CPUs.
1002 */ 1002 */
1003 for (i = 0; i < __arraycount(cpu_feature); i++) { 1003 for (i = 0; i < __arraycount(cpu_feature); i++) {
1004 if (cpu_feature[i] != ci->ci_feat_val[i]) 1004 if (cpu_feature[i] != ci->ci_feat_val[i])
1005 aprint_error_dev(ci->ci_dev, 1005 aprint_error_dev(ci->ci_dev,
1006 "feature mismatch: cpu_feature[%d] is " 1006 "feature mismatch: cpu_feature[%d] is "
1007 "%#x, but CPU reported %#x\n", 1007 "%#x, but CPU reported %#x\n",
1008 i, cpu_feature[i], ci->ci_feat_val[i]); 1008 i, cpu_feature[i], ci->ci_feat_val[i]);
1009 } 1009 }
1010 } 1010 }
1011} 1011}
1012 1012
1013/* Write what we know about the cpu to the console... */ 1013/* Write what we know about the cpu to the console... */
1014void 1014void
1015cpu_identify(struct cpu_info *ci) 1015cpu_identify(struct cpu_info *ci)
1016{ 1016{
1017 1017
1018 cpu_setmodel("%s %d86-class", 1018 cpu_setmodel("%s %d86-class",
1019 cpu_vendor_names[cpu_vendor], cpu_class + 3); 1019 cpu_vendor_names[cpu_vendor], cpu_class + 3);
1020 if (cpu_brand_string[0] != '\0') { 1020 if (cpu_brand_string[0] != '\0') {
1021 aprint_normal_dev(ci->ci_dev, "%s", cpu_brand_string); 1021 aprint_normal_dev(ci->ci_dev, "%s", cpu_brand_string);
1022 } else { 1022 } else {
1023 aprint_normal_dev(ci->ci_dev, "%s", cpu_getmodel()); 1023 aprint_normal_dev(ci->ci_dev, "%s", cpu_getmodel());
1024 if (ci->ci_data.cpu_cc_freq != 0) 1024 if (ci->ci_data.cpu_cc_freq != 0)
1025 aprint_normal(", %dMHz", 1025 aprint_normal(", %dMHz",
1026 (int)(ci->ci_data.cpu_cc_freq / 1000000)); 1026 (int)(ci->ci_data.cpu_cc_freq / 1000000));
1027 } 1027 }
1028 if (ci->ci_signature != 0) 1028 if (ci->ci_signature != 0)
1029 aprint_normal(", id 0x%x", ci->ci_signature); 1029 aprint_normal(", id 0x%x", ci->ci_signature);
1030 aprint_normal("\n"); 1030 aprint_normal("\n");
1031 aprint_normal_dev(ci->ci_dev, "node %u, package %u, core %u, smt %u\n", 1031 aprint_normal_dev(ci->ci_dev, "node %u, package %u, core %u, smt %u\n",
1032 ci->ci_numa_id, ci->ci_package_id, ci->ci_core_id, ci->ci_smt_id); 1032 ci->ci_numa_id, ci->ci_package_id, ci->ci_core_id, ci->ci_smt_id);
1033 if (cpu_brand_string[0] == '\0') { 1033 if (cpu_brand_string[0] == '\0') {
1034 strlcpy(cpu_brand_string, cpu_getmodel(), 1034 strlcpy(cpu_brand_string, cpu_getmodel(),
1035 sizeof(cpu_brand_string)); 1035 sizeof(cpu_brand_string));
1036 } 1036 }
1037 if (cpu_class == CPUCLASS_386) { 1037 if (cpu_class == CPUCLASS_386) {
1038 panic("NetBSD requires an 80486DX or later processor"); 1038 panic("NetBSD requires an 80486DX or later processor");
1039 } 1039 }
1040 if (cputype == CPU_486DLC) { 1040 if (cputype == CPU_486DLC) {
1041 aprint_error("WARNING: BUGGY CYRIX CACHE\n"); 1041 aprint_error("WARNING: BUGGY CYRIX CACHE\n");
1042 } 1042 }
1043 1043
1044#if !defined(XENPV) || defined(DOM0OPS) /* on Xen PV rdmsr is for Dom0 only */ 1044#if !defined(XENPV) || defined(DOM0OPS) /* on Xen PV rdmsr is for Dom0 only */
1045 if (cpu_vendor == CPUVENDOR_AMD /* check enablement of an */ 1045 if (cpu_vendor == CPUVENDOR_AMD /* check enablement of an */
1046 && device_unit(ci->ci_dev) == 0 /* AMD feature only once */ 1046 && device_unit(ci->ci_dev) == 0 /* AMD feature only once */
1047 && ((cpu_feature[3] & CPUID_SVM) == CPUID_SVM)) { 1047 && ((cpu_feature[3] & CPUID_SVM) == CPUID_SVM)) {
1048 uint64_t val; 1048 uint64_t val;
1049 1049
1050 val = rdmsr(MSR_VMCR); 1050 val = rdmsr(MSR_VMCR);
1051 if (((val & VMCR_SVMED) == VMCR_SVMED) 1051 if (((val & VMCR_SVMED) == VMCR_SVMED)
1052 && ((val & VMCR_LOCK) == VMCR_LOCK)) { 1052 && ((val & VMCR_LOCK) == VMCR_LOCK)) {
1053 aprint_normal_dev(ci->ci_dev, 1053 aprint_normal_dev(ci->ci_dev,
1054 "SVM disabled by the BIOS\n"); 1054 "SVM disabled by the BIOS\n");
1055 } 1055 }
1056 } 1056 }
1057#endif 1057#endif
1058 1058
1059#ifdef i386 1059#ifdef i386
1060 if (i386_fpu_fdivbug == 1) 1060 if (i386_fpu_fdivbug == 1)
1061 aprint_normal_dev(ci->ci_dev, 1061 aprint_normal_dev(ci->ci_dev,
1062 "WARNING: Pentium FDIV bug detected!\n"); 1062 "WARNING: Pentium FDIV bug detected!\n");
1063 1063
1064 if (cpu_vendor == CPUVENDOR_TRANSMETA) { 1064 if (cpu_vendor == CPUVENDOR_TRANSMETA) {
1065 u_int descs[4]; 1065 u_int descs[4];
1066 x86_cpuid(0x80860000, descs); 1066 x86_cpuid(0x80860000, descs);
1067 if (descs[0] >= 0x80860007) 1067 if (descs[0] >= 0x80860007)
1068 /* Create longrun sysctls */ 1068 /* Create longrun sysctls */
1069 tmx86_init_longrun(); 1069 tmx86_init_longrun();
1070 } 1070 }
1071#endif /* i386 */ 1071#endif /* i386 */
1072 1072
1073} 1073}
1074 1074
1075/* 1075/*
1076 * Hypervisor 1076 * Hypervisor
1077 */ 1077 */
1078vm_guest_t vm_guest = VM_GUEST_NO; 1078vm_guest_t vm_guest = VM_GUEST_NO;
1079 1079
1080static const char * const vm_bios_vendors[] = { 1080static const char * const vm_bios_vendors[] = {
1081 "QEMU", /* QEMU */ 1081 "QEMU", /* QEMU */
1082 "Plex86", /* Plex86 */ 1082 "Plex86", /* Plex86 */
1083 "Bochs", /* Bochs */ 1083 "Bochs", /* Bochs */
1084 "Xen", /* Xen */ 1084 "Xen", /* Xen */
1085 "BHYVE", /* bhyve */ 1085 "BHYVE", /* bhyve */
1086 "Seabios", /* KVM */ 1086 "Seabios", /* KVM */
1087}; 1087};
1088 1088
1089static const char * const vm_system_products[] = { 1089static const char * const vm_system_products[] = {
1090 "VMware Virtual Platform", /* VMWare VM */ 1090 "VMware Virtual Platform", /* VMWare VM */
1091 "Virtual Machine", /* Microsoft VirtualPC */ 1091 "Virtual Machine", /* Microsoft VirtualPC */
1092 "VirtualBox", /* Sun xVM VirtualBox */ 1092 "VirtualBox", /* Sun xVM VirtualBox */
1093 "Parallels Virtual Platform", /* Parallels VM */ 1093 "Parallels Virtual Platform", /* Parallels VM */
1094 "KVM", /* KVM */ 1094 "KVM", /* KVM */
1095}; 1095};
1096 1096
1097void 1097void
1098identify_hypervisor(void) 1098identify_hypervisor(void)
1099{ 1099{
1100 u_int regs[6]; 1100 u_int regs[6];
1101 char hv_vendor[12]; 1101 char hv_vendor[12];
1102 const char *p; 1102 const char *p;
1103 int i; 1103 int i;
1104 1104
1105 if (vm_guest != VM_GUEST_NO) 1105 if (vm_guest != VM_GUEST_NO)
1106 return; 1106 return;
1107 1107
1108 /* 1108 /*
1109 * [RFC] CPUID usage for interaction between Hypervisors and Linux. 1109 * [RFC] CPUID usage for interaction between Hypervisors and Linux.
1110 * http://lkml.org/lkml/2008/10/1/246 1110 * http://lkml.org/lkml/2008/10/1/246
1111 * 1111 *
1112 * KB1009458: Mechanisms to determine if software is running in 1112 * KB1009458: Mechanisms to determine if software is running in
1113 * a VMware virtual machine 1113 * a VMware virtual machine
1114 * http://kb.vmware.com/kb/1009458 1114 * http://kb.vmware.com/kb/1009458
1115 */ 1115 */
1116 if (ISSET(cpu_feature[1], CPUID2_RAZ)) { 1116 if (ISSET(cpu_feature[1], CPUID2_RAZ)) {
1117 vm_guest = VM_GUEST_VM; 1117 vm_guest = VM_GUEST_VM;
1118 x86_cpuid(0x40000000, regs); 1118 x86_cpuid(0x40000000, regs);
1119 if (regs[0] >= 0x40000000) { 1119 if (regs[0] >= 0x40000000) {
1120 memcpy(&hv_vendor[0], &regs[1], sizeof(*regs)); 1120 memcpy(&hv_vendor[0], &regs[1], sizeof(*regs));
1121 memcpy(&hv_vendor[4], &regs[2], sizeof(*regs)); 1121 memcpy(&hv_vendor[4], &regs[2], sizeof(*regs));
1122 memcpy(&hv_vendor[8], &regs[3], sizeof(*regs)); 1122 memcpy(&hv_vendor[8], &regs[3], sizeof(*regs));
1123 if (memcmp(hv_vendor, "VMwareVMware", 12) == 0) 1123 if (memcmp(hv_vendor, "VMwareVMware", 12) == 0)
1124 vm_guest = VM_GUEST_VMWARE; 1124 vm_guest = VM_GUEST_VMWARE;
1125 else if (memcmp(hv_vendor, "Microsoft Hv", 12) == 0) { 1125 else if (memcmp(hv_vendor, "Microsoft Hv", 12) == 0) {
1126 vm_guest = VM_GUEST_HV; 1126 vm_guest = VM_GUEST_HV;
1127#if NHYPERV > 0 1127#if NHYPERV > 0
1128 hyperv_early_init(); 1128 hyperv_early_init();
1129#endif 1129#endif
1130 } else if (memcmp(hv_vendor, "KVMKVMKVM\0\0\0", 12) == 0) 1130 } else if (memcmp(hv_vendor, "KVMKVMKVM\0\0\0", 12) == 0)
1131 vm_guest = VM_GUEST_KVM; 1131 vm_guest = VM_GUEST_KVM;
1132 else if (memcmp(hv_vendor, "XenVMMXenVMM", 12) == 0) 1132 else if (memcmp(hv_vendor, "XenVMMXenVMM", 12) == 0)
1133 vm_guest = VM_GUEST_XEN; 1133 vm_guest = VM_GUEST_XEN;
1134 /* FreeBSD bhyve: "bhyve bhyve " */ 1134 /* FreeBSD bhyve: "bhyve bhyve " */
1135 /* OpenBSD vmm: "OpenBSDVMM58" */ 1135 /* OpenBSD vmm: "OpenBSDVMM58" */
1136 /* NetBSD nvmm: "___ NVMM ___" */ 1136 /* NetBSD nvmm: "___ NVMM ___" */
1137 } 1137 }
1138 return; 1138 return;
1139 } 1139 }
1140 1140
1141 /* 1141 /*
1142 * Examine SMBIOS strings for older hypervisors. 1142 * Examine SMBIOS strings for older hypervisors.
1143 */ 1143 */
1144 p = pmf_get_platform("system-serial"); 1144 p = pmf_get_platform("system-serial");
1145 if (p != NULL) { 1145 if (p != NULL) {
1146 if (strncmp(p, "VMware-", 7) == 0 || strncmp(p, "VMW", 3) == 0) { 1146 if (strncmp(p, "VMware-", 7) == 0 || strncmp(p, "VMW", 3) == 0) {
1147 vmt_hvcall(VM_CMD_GET_VERSION, regs); 1147 vmt_hvcall(VM_CMD_GET_VERSION, regs);
1148 if (regs[1] == VM_MAGIC) { 1148 if (regs[1] == VM_MAGIC) {
1149 vm_guest = VM_GUEST_VMWARE; 1149 vm_guest = VM_GUEST_VMWARE;
1150 return; 1150 return;
1151 } 1151 }
1152 } 1152 }
1153 } 1153 }
1154 p = pmf_get_platform("bios-vendor"); 1154 p = pmf_get_platform("bios-vendor");
1155 if (p != NULL) { 1155 if (p != NULL) {
1156 for (i = 0; i < __arraycount(vm_bios_vendors); i++) { 1156 for (i = 0; i < __arraycount(vm_bios_vendors); i++) {
1157 if (strcmp(p, vm_bios_vendors[i]) == 0) { 1157 if (strcmp(p, vm_bios_vendors[i]) == 0) {
1158 vm_guest = VM_GUEST_VM; 1158 vm_guest = VM_GUEST_VM;
1159 return; 1159 return;
1160 } 1160 }
1161 } 1161 }
1162 } 1162 }
1163 p = pmf_get_platform("system-product"); 1163 p = pmf_get_platform("system-product");
1164 if (p != NULL) { 1164 if (p != NULL) {
1165 for (i = 0; i < __arraycount(vm_system_products); i++) { 1165 for (i = 0; i < __arraycount(vm_system_products); i++) {
1166 if (strcmp(p, vm_system_products[i]) == 0) { 1166 if (strcmp(p, vm_system_products[i]) == 0) {
1167 vm_guest = VM_GUEST_VM; 1167 vm_guest = VM_GUEST_VM;
1168 return; 1168 return;
1169 } 1169 }
1170 } 1170 }
1171 } 1171 }
1172} 1172}