Wed Aug 26 17:55:49 2020 UTC ()
Pull up following revision(s) (requested by maxv in ticket #1058):

	sys/dev/nvmm/x86/nvmm_x86_svm.c: revision 1.70
	sys/dev/nvmm/x86/nvmm_x86.h: revision 1.19
	sys/dev/nvmm/x86/nvmm_x86_vmx.c: revision 1.69
	sys/dev/nvmm/x86/nvmm_x86_vmx.c: revision 1.71
	sys/dev/nvmm/x86/nvmm_x86_svm.c: revision 1.69
	sys/dev/nvmm/x86/nvmm_x86.c: revision 1.11
	sys/dev/nvmm/x86/nvmm_x86.c: revision 1.12
	sys/dev/nvmm/x86/nvmm_x86.c: revision 1.13
	sys/dev/nvmm/x86/nvmm_x86.c: revision 1.14

Improve the CPUID emulation:
 - Hide SGX*, PKU, WAITPKG, and SKINIT, because they are not supported.
 - Hide HLE and RTM, part of TSX. Because TSX is just too buggy and we
   cannot guarantee that it remains enabled in the guest (if for example
   the host disables TSX while the guest is running). Nobody wants this
   crap anyway, so bye-bye.
 - Advertise FSREP_MOV, because no reason to hide it.

Hide OSPKE. NFC since the host never uses PKU, but still.

Improve the CPUID emulation on nvmm-intel:
 - Limit the highest extended leaf.
 - Limit 0x00000007 to ECX=0, for future-proofness.

nvmm-x86-svm: improve the CPUID emulation

Limit the hypervisor range, and properly handle each basic leaf until 0xD.

nvmm-x86: advertise the SERIALIZE instruction, available on future CPUs

nvmm-x86: improve the CPUID emulation
 - x86-svm: explicitly handle 0x80000007 and 0x80000008. The latter
   contains extended features we must filter out. Apply the same in
   x86-vmx for symmetry.
 - x86-svm: explicitly handle extended leaves until 0x8000001F, and
   truncate to it.


(martin)
diff -r1.7.4.3 -r1.7.4.4 src/sys/dev/nvmm/x86/nvmm_x86.c
diff -r1.15.4.1 -r1.15.4.2 src/sys/dev/nvmm/x86/nvmm_x86.h
diff -r1.46.4.8 -r1.46.4.9 src/sys/dev/nvmm/x86/nvmm_x86_svm.c
diff -r1.36.2.10 -r1.36.2.11 src/sys/dev/nvmm/x86/nvmm_x86_vmx.c

cvs diff -r1.7.4.3 -r1.7.4.4 src/sys/dev/nvmm/x86/nvmm_x86.c (expand / switch to unified diff)

--- src/sys/dev/nvmm/x86/nvmm_x86.c 2020/08/18 09:29:52 1.7.4.3
+++ src/sys/dev/nvmm/x86/nvmm_x86.c 2020/08/26 17:55:49 1.7.4.4
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: nvmm_x86.c,v 1.7.4.3 2020/08/18 09:29:52 martin Exp $ */ 1/* $NetBSD: nvmm_x86.c,v 1.7.4.4 2020/08/26 17:55:49 martin Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2018-2020 The NetBSD Foundation, Inc. 4 * Copyright (c) 2018-2020 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Maxime Villard. 8 * by Maxime Villard.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -20,27 +20,27 @@ @@ -20,27 +20,27 @@
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: nvmm_x86.c,v 1.7.4.3 2020/08/18 09:29:52 martin Exp $"); 33__KERNEL_RCSID(0, "$NetBSD: nvmm_x86.c,v 1.7.4.4 2020/08/26 17:55:49 martin Exp $");
34 34
35#include <sys/param.h> 35#include <sys/param.h>
36#include <sys/systm.h> 36#include <sys/systm.h>
37#include <sys/kernel.h> 37#include <sys/kernel.h>
38#include <sys/cpu.h> 38#include <sys/cpu.h>
39 39
40#include <uvm/uvm.h> 40#include <uvm/uvm.h>
41#include <uvm/uvm_page.h> 41#include <uvm/uvm_page.h>
42 42
43#include <x86/cputypes.h> 43#include <x86/cputypes.h>
44#include <x86/specialreg.h> 44#include <x86/specialreg.h>
45#include <x86/pmap.h> 45#include <x86/pmap.h>
46 46
@@ -294,86 +294,86 @@ const struct nvmm_x86_cpuid_mask nvmm_cp @@ -294,86 +294,86 @@ const struct nvmm_x86_cpuid_mask nvmm_cp
294 CPUID_SSE2 | 294 CPUID_SSE2 |
295 CPUID_SS | 295 CPUID_SS |
296 CPUID_HTT | 296 CPUID_HTT |
297 /* CPUID_TM excluded */ 297 /* CPUID_TM excluded */
298 CPUID_IA64 | 298 CPUID_IA64 |
299 CPUID_SBF 299 CPUID_SBF
300}; 300};
301 301
302const struct nvmm_x86_cpuid_mask nvmm_cpuid_00000007 = { 302const struct nvmm_x86_cpuid_mask nvmm_cpuid_00000007 = {
303 .eax = ~0, 303 .eax = ~0,
304 .ebx = 304 .ebx =
305 CPUID_SEF_FSGSBASE | 305 CPUID_SEF_FSGSBASE |
306 /* CPUID_SEF_TSC_ADJUST excluded */ 306 /* CPUID_SEF_TSC_ADJUST excluded */
307 CPUID_SEF_SGX | 307 /* CPUID_SEF_SGX excluded */
308 CPUID_SEF_BMI1 | 308 CPUID_SEF_BMI1 |
309 CPUID_SEF_HLE | 309 /* CPUID_SEF_HLE excluded */
310 /* CPUID_SEF_AVX2 excluded */ 310 /* CPUID_SEF_AVX2 excluded */
311 CPUID_SEF_FDPEXONLY | 311 CPUID_SEF_FDPEXONLY |
312 CPUID_SEF_SMEP | 312 CPUID_SEF_SMEP |
313 CPUID_SEF_BMI2 | 313 CPUID_SEF_BMI2 |
314 CPUID_SEF_ERMS | 314 CPUID_SEF_ERMS |
315 /* CPUID_SEF_INVPCID excluded, but re-included in VMX */ 315 /* CPUID_SEF_INVPCID excluded, but re-included in VMX */
316 CPUID_SEF_RTM | 316 /* CPUID_SEF_RTM excluded */
317 /* CPUID_SEF_QM excluded */ 317 /* CPUID_SEF_QM excluded */
318 CPUID_SEF_FPUCSDS | 318 CPUID_SEF_FPUCSDS |
319 /* CPUID_SEF_MPX excluded */ 319 /* CPUID_SEF_MPX excluded */
320 CPUID_SEF_PQE | 320 CPUID_SEF_PQE |
321 /* CPUID_SEF_AVX512F excluded */ 321 /* CPUID_SEF_AVX512F excluded */
322 /* CPUID_SEF_AVX512DQ excluded */ 322 /* CPUID_SEF_AVX512DQ excluded */
323 CPUID_SEF_RDSEED | 323 CPUID_SEF_RDSEED |
324 CPUID_SEF_ADX | 324 CPUID_SEF_ADX |
325 CPUID_SEF_SMAP | 325 CPUID_SEF_SMAP |
326 /* CPUID_SEF_AVX512_IFMA excluded */ 326 /* CPUID_SEF_AVX512_IFMA excluded */
327 CPUID_SEF_CLFLUSHOPT | 327 CPUID_SEF_CLFLUSHOPT |
328 CPUID_SEF_CLWB, 328 CPUID_SEF_CLWB,
329 /* CPUID_SEF_PT excluded */ 329 /* CPUID_SEF_PT excluded */
330 /* CPUID_SEF_AVX512PF excluded */ 330 /* CPUID_SEF_AVX512PF excluded */
331 /* CPUID_SEF_AVX512ER excluded */ 331 /* CPUID_SEF_AVX512ER excluded */
332 /* CPUID_SEF_AVX512CD excluded */ 332 /* CPUID_SEF_AVX512CD excluded */
333 /* CPUID_SEF_SHA excluded */ 333 /* CPUID_SEF_SHA excluded */
334 /* CPUID_SEF_AVX512BW excluded */ 334 /* CPUID_SEF_AVX512BW excluded */
335 /* CPUID_SEF_AVX512VL excluded */ 335 /* CPUID_SEF_AVX512VL excluded */
336 .ecx = 336 .ecx =
337 CPUID_SEF_PREFETCHWT1 | 337 CPUID_SEF_PREFETCHWT1 |
338 /* CPUID_SEF_AVX512_VBMI excluded */ 338 /* CPUID_SEF_AVX512_VBMI excluded */
339 CPUID_SEF_UMIP | 339 CPUID_SEF_UMIP |
340 CPUID_SEF_PKU | 340 /* CPUID_SEF_PKU excluded */
341 CPUID_SEF_OSPKE | 341 /* CPUID_SEF_OSPKE excluded */
342 CPUID_SEF_WAITPKG | 342 /* CPUID_SEF_WAITPKG excluded */
343 /* CPUID_SEF_AVX512_VBMI2 excluded */ 343 /* CPUID_SEF_AVX512_VBMI2 excluded */
344 /* CPUID_SEF_CET_SS excluded */ 344 /* CPUID_SEF_CET_SS excluded */
345 CPUID_SEF_GFNI | 345 CPUID_SEF_GFNI |
346 CPUID_SEF_VAES | 346 CPUID_SEF_VAES |
347 CPUID_SEF_VPCLMULQDQ | 347 CPUID_SEF_VPCLMULQDQ |
348 /* CPUID_SEF_AVX512_VNNI excluded */ 348 /* CPUID_SEF_AVX512_VNNI excluded */
349 /* CPUID_SEF_AVX512_BITALG excluded */ 349 /* CPUID_SEF_AVX512_BITALG excluded */
350 /* CPUID_SEF_AVX512_VPOPCNTDQ excluded */ 350 /* CPUID_SEF_AVX512_VPOPCNTDQ excluded */
351 /* CPUID_SEF_MAWAU excluded */ 351 /* CPUID_SEF_MAWAU excluded */
352 /* CPUID_SEF_RDPID excluded */ 352 /* CPUID_SEF_RDPID excluded */
353 CPUID_SEF_CLDEMOTE | 353 CPUID_SEF_CLDEMOTE |
354 CPUID_SEF_MOVDIRI | 354 CPUID_SEF_MOVDIRI |
355 CPUID_SEF_MOVDIR64B | 355 CPUID_SEF_MOVDIR64B,
356 CPUID_SEF_SGXLC, 356 /* CPUID_SEF_SGXLC excluded */
357 /* CPUID_SEF_PKS excluded */ 357 /* CPUID_SEF_PKS excluded */
358 .edx = 358 .edx =
359 /* CPUID_SEF_AVX512_4VNNIW excluded */ 359 /* CPUID_SEF_AVX512_4VNNIW excluded */
360 /* CPUID_SEF_AVX512_4FMAPS excluded */ 360 /* CPUID_SEF_AVX512_4FMAPS excluded */
361 /* CPUID_SEF_FSREP_MOV excluded */ 361 CPUID_SEF_FSREP_MOV |
362 /* CPUID_SEF_AVX512_VP2INTERSECT excluded */ 362 /* CPUID_SEF_AVX512_VP2INTERSECT excluded */
363 /* CPUID_SEF_SRBDS_CTRL excluded */ 363 /* CPUID_SEF_SRBDS_CTRL excluded */
364 CPUID_SEF_MD_CLEAR | 364 CPUID_SEF_MD_CLEAR |
365 /* CPUID_SEF_TSX_FORCE_ABORT excluded */ 365 /* CPUID_SEF_TSX_FORCE_ABORT excluded */
366 /* CPUID_SEF_SERIALIZE excluded */ 366 CPUID_SEF_SERIALIZE |
367 /* CPUID_SEF_HYBRID excluded */ 367 /* CPUID_SEF_HYBRID excluded */
368 /* CPUID_SEF_TSXLDTRK excluded */ 368 /* CPUID_SEF_TSXLDTRK excluded */
369 /* CPUID_SEF_CET_IBT excluded */ 369 /* CPUID_SEF_CET_IBT excluded */
370 /* CPUID_SEF_IBRS excluded */ 370 /* CPUID_SEF_IBRS excluded */
371 /* CPUID_SEF_STIBP excluded */ 371 /* CPUID_SEF_STIBP excluded */
372 /* CPUID_SEF_L1D_FLUSH excluded */ 372 /* CPUID_SEF_L1D_FLUSH excluded */
373 CPUID_SEF_ARCH_CAP 373 CPUID_SEF_ARCH_CAP
374 /* CPUID_SEF_CORE_CAP excluded */ 374 /* CPUID_SEF_CORE_CAP excluded */
375 /* CPUID_SEF_SSBD excluded */ 375 /* CPUID_SEF_SSBD excluded */
376}; 376};
377 377
378const struct nvmm_x86_cpuid_mask nvmm_cpuid_80000001 = { 378const struct nvmm_x86_cpuid_mask nvmm_cpuid_80000001 = {
379 .eax = ~0, 379 .eax = ~0,
@@ -381,27 +381,27 @@ const struct nvmm_x86_cpuid_mask nvmm_cp @@ -381,27 +381,27 @@ const struct nvmm_x86_cpuid_mask nvmm_cp
381 .ecx = 381 .ecx =
382 CPUID_LAHF | 382 CPUID_LAHF |
383 CPUID_CMPLEGACY | 383 CPUID_CMPLEGACY |
384 /* CPUID_SVM excluded */ 384 /* CPUID_SVM excluded */
385 /* CPUID_EAPIC excluded */ 385 /* CPUID_EAPIC excluded */
386 CPUID_ALTMOVCR0 | 386 CPUID_ALTMOVCR0 |
387 CPUID_LZCNT | 387 CPUID_LZCNT |
388 CPUID_SSE4A | 388 CPUID_SSE4A |
389 CPUID_MISALIGNSSE | 389 CPUID_MISALIGNSSE |
390 CPUID_3DNOWPF | 390 CPUID_3DNOWPF |
391 /* CPUID_OSVW excluded */ 391 /* CPUID_OSVW excluded */
392 CPUID_IBS | 392 CPUID_IBS |
393 CPUID_XOP | 393 CPUID_XOP |
394 CPUID_SKINIT | 394 /* CPUID_SKINIT excluded */
395 CPUID_WDT | 395 CPUID_WDT |
396 CPUID_LWP | 396 CPUID_LWP |
397 CPUID_FMA4 | 397 CPUID_FMA4 |
398 CPUID_TCE | 398 CPUID_TCE |
399 CPUID_NODEID | 399 CPUID_NODEID |
400 CPUID_TBM | 400 CPUID_TBM |
401 CPUID_TOPOEXT | 401 CPUID_TOPOEXT |
402 CPUID_PCEC | 402 CPUID_PCEC |
403 CPUID_PCENB | 403 CPUID_PCENB |
404 CPUID_SPM | 404 CPUID_SPM |
405 CPUID_DBE | 405 CPUID_DBE |
406 CPUID_PTSC | 406 CPUID_PTSC |
407 CPUID_L2IPERFC, 407 CPUID_L2IPERFC,
@@ -411,26 +411,46 @@ const struct nvmm_x86_cpuid_mask nvmm_cp @@ -411,26 +411,46 @@ const struct nvmm_x86_cpuid_mask nvmm_cp
411 CPUID_MPC | 411 CPUID_MPC |
412 CPUID_XD | 412 CPUID_XD |
413 CPUID_MMXX | 413 CPUID_MMXX |
414 CPUID_MMX |  414 CPUID_MMX |
415 CPUID_FXSR | 415 CPUID_FXSR |
416 CPUID_FFXSR | 416 CPUID_FFXSR |
417 CPUID_P1GB | 417 CPUID_P1GB |
418 /* CPUID_RDTSCP excluded */ 418 /* CPUID_RDTSCP excluded */
419 CPUID_EM64T | 419 CPUID_EM64T |
420 CPUID_3DNOW2 | 420 CPUID_3DNOW2 |
421 CPUID_3DNOW 421 CPUID_3DNOW
422}; 422};
423 423
 424const struct nvmm_x86_cpuid_mask nvmm_cpuid_80000007 = {
 425 .eax = 0,
 426 .ebx = 0,
 427 .ecx = 0,
 428 .edx = CPUID_APM_ITSC
 429};
 430
 431const struct nvmm_x86_cpuid_mask nvmm_cpuid_80000008 = {
 432 .eax = ~0,
 433 .ebx =
 434 CPUID_CAPEX_CLZERO |
 435 /* CPUID_CAPEX_IRPERF excluded */
 436 CPUID_CAPEX_XSAVEERPTR |
 437 /* CPUID_CAPEX_RDPRU excluded */
 438 /* CPUID_CAPEX_MCOMMIT excluded */
 439 CPUID_CAPEX_WBNOINVD,
 440 .ecx = ~0, /* TODO? */
 441 .edx = 0
 442};
 443
424bool 444bool
425nvmm_x86_pat_validate(uint64_t val) 445nvmm_x86_pat_validate(uint64_t val)
426{ 446{
427 uint8_t *pat = (uint8_t *)&val; 447 uint8_t *pat = (uint8_t *)&val;
428 size_t i; 448 size_t i;
429 449
430 for (i = 0; i < 8; i++) { 450 for (i = 0; i < 8; i++) {
431 if (__predict_false(pat[i] & ~__BITS(2,0))) 451 if (__predict_false(pat[i] & ~__BITS(2,0)))
432 return false; 452 return false;
433 if (__predict_false(pat[i] == 2 || pat[i] == 3)) 453 if (__predict_false(pat[i] == 2 || pat[i] == 3))
434 return false; 454 return false;
435 } 455 }
436 456

cvs diff -r1.15.4.1 -r1.15.4.2 src/sys/dev/nvmm/x86/nvmm_x86.h (expand / switch to unified diff)

--- src/sys/dev/nvmm/x86/nvmm_x86.h 2019/11/10 12:58:30 1.15.4.1
+++ src/sys/dev/nvmm/x86/nvmm_x86.h 2020/08/26 17:55:49 1.15.4.2
@@ -1,17 +1,17 @@ @@ -1,17 +1,17 @@
1/* $NetBSD: nvmm_x86.h,v 1.15.4.1 2019/11/10 12:58:30 martin Exp $ */ 1/* $NetBSD: nvmm_x86.h,v 1.15.4.2 2020/08/26 17:55:49 martin Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2018-2019 The NetBSD Foundation, Inc. 4 * Copyright (c) 2018-2020 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Maxime Villard. 8 * by Maxime Villard.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
@@ -310,19 +310,21 @@ struct nvmm_vcpu_conf_tpr { @@ -310,19 +310,21 @@ struct nvmm_vcpu_conf_tpr {
310#ifdef _KERNEL 310#ifdef _KERNEL
311#define NVMM_X86_MACH_NCONF 0 311#define NVMM_X86_MACH_NCONF 0
312#define NVMM_X86_VCPU_NCONF 2 312#define NVMM_X86_VCPU_NCONF 2
313struct nvmm_x86_cpuid_mask { 313struct nvmm_x86_cpuid_mask {
314 uint32_t eax; 314 uint32_t eax;
315 uint32_t ebx; 315 uint32_t ebx;
316 uint32_t ecx; 316 uint32_t ecx;
317 uint32_t edx; 317 uint32_t edx;
318}; 318};
319extern const struct nvmm_x64_state nvmm_x86_reset_state; 319extern const struct nvmm_x64_state nvmm_x86_reset_state;
320extern const struct nvmm_x86_cpuid_mask nvmm_cpuid_00000001; 320extern const struct nvmm_x86_cpuid_mask nvmm_cpuid_00000001;
321extern const struct nvmm_x86_cpuid_mask nvmm_cpuid_00000007; 321extern const struct nvmm_x86_cpuid_mask nvmm_cpuid_00000007;
322extern const struct nvmm_x86_cpuid_mask nvmm_cpuid_80000001; 322extern const struct nvmm_x86_cpuid_mask nvmm_cpuid_80000001;
 323extern const struct nvmm_x86_cpuid_mask nvmm_cpuid_80000007;
 324extern const struct nvmm_x86_cpuid_mask nvmm_cpuid_80000008;
323bool nvmm_x86_pat_validate(uint64_t); 325bool nvmm_x86_pat_validate(uint64_t);
324#endif 326#endif
325 327
326#endif /* ASM_NVMM */ 328#endif /* ASM_NVMM */
327 329
328#endif /* _NVMM_X86_H_ */ 330#endif /* _NVMM_X86_H_ */

cvs diff -r1.46.4.8 -r1.46.4.9 src/sys/dev/nvmm/x86/nvmm_x86_svm.c (expand / switch to unified diff)

--- src/sys/dev/nvmm/x86/nvmm_x86_svm.c 2020/08/18 09:29:52 1.46.4.8
+++ src/sys/dev/nvmm/x86/nvmm_x86_svm.c 2020/08/26 17:55:48 1.46.4.9
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: nvmm_x86_svm.c,v 1.46.4.8 2020/08/18 09:29:52 martin Exp $ */ 1/* $NetBSD: nvmm_x86_svm.c,v 1.46.4.9 2020/08/26 17:55:48 martin Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2018-2019 The NetBSD Foundation, Inc. 4 * Copyright (c) 2018-2019 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Maxime Villard. 8 * by Maxime Villard.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -20,27 +20,27 @@ @@ -20,27 +20,27 @@
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_svm.c,v 1.46.4.8 2020/08/18 09:29:52 martin Exp $"); 33__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_svm.c,v 1.46.4.9 2020/08/26 17:55:48 martin Exp $");
34 34
35#include <sys/param.h> 35#include <sys/param.h>
36#include <sys/systm.h> 36#include <sys/systm.h>
37#include <sys/kernel.h> 37#include <sys/kernel.h>
38#include <sys/kmem.h> 38#include <sys/kmem.h>
39#include <sys/cpu.h> 39#include <sys/cpu.h>
40#include <sys/xcall.h> 40#include <sys/xcall.h>
41#include <sys/mman.h> 41#include <sys/mman.h>
42 42
43#include <uvm/uvm.h> 43#include <uvm/uvm.h>
44#include <uvm/uvm_page.h> 44#include <uvm/uvm_page.h>
45 45
46#include <x86/cputypes.h> 46#include <x86/cputypes.h>
@@ -773,35 +773,71 @@ svm_vcpu_event_commit(struct nvmm_cpu *v @@ -773,35 +773,71 @@ svm_vcpu_event_commit(struct nvmm_cpu *v
773 773
774static inline void 774static inline void
775svm_inkernel_advance(struct vmcb *vmcb) 775svm_inkernel_advance(struct vmcb *vmcb)
776{ 776{
777 /* 777 /*
778 * Maybe we should also apply single-stepping and debug exceptions. 778 * Maybe we should also apply single-stepping and debug exceptions.
779 * Matters for guest-ring3, because it can execute 'cpuid' under a 779 * Matters for guest-ring3, because it can execute 'cpuid' under a
780 * debugger. 780 * debugger.
781 */ 781 */
782 vmcb->state.rip = vmcb->ctrl.nrip; 782 vmcb->state.rip = vmcb->ctrl.nrip;
783 vmcb->ctrl.intr &= ~VMCB_CTRL_INTR_SHADOW; 783 vmcb->ctrl.intr &= ~VMCB_CTRL_INTR_SHADOW;
784} 784}
785 785
 786#define SVM_CPUID_MAX_BASIC 0xD
786#define SVM_CPUID_MAX_HYPERVISOR 0x40000000 787#define SVM_CPUID_MAX_HYPERVISOR 0x40000000
 788#define SVM_CPUID_MAX_EXTENDED 0x8000001F
 789static uint32_t svm_cpuid_max_basic __read_mostly;
 790static uint32_t svm_cpuid_max_extended __read_mostly;
 791
 792static void
 793svm_inkernel_exec_cpuid(struct svm_cpudata *cpudata, uint64_t eax, uint64_t ecx)
 794{
 795 u_int descs[4];
 796
 797 x86_cpuid2(eax, ecx, descs);
 798 cpudata->vmcb->state.rax = descs[0];
 799 cpudata->gprs[NVMM_X64_GPR_RBX] = descs[1];
 800 cpudata->gprs[NVMM_X64_GPR_RCX] = descs[2];
 801 cpudata->gprs[NVMM_X64_GPR_RDX] = descs[3];
 802}
787 803
788static void 804static void
789svm_inkernel_handle_cpuid(struct nvmm_cpu *vcpu, uint64_t eax, uint64_t ecx) 805svm_inkernel_handle_cpuid(struct nvmm_cpu *vcpu, uint64_t eax, uint64_t ecx)
790{ 806{
791 struct svm_cpudata *cpudata = vcpu->cpudata; 807 struct svm_cpudata *cpudata = vcpu->cpudata;
792 uint64_t cr4; 808 uint64_t cr4;
793 809
 810 if (eax < 0x40000000) {
 811 if (__predict_false(eax > svm_cpuid_max_basic)) {
 812 eax = svm_cpuid_max_basic;
 813 svm_inkernel_exec_cpuid(cpudata, eax, ecx);
 814 }
 815 } else if (eax < 0x80000000) {
 816 if (__predict_false(eax > SVM_CPUID_MAX_HYPERVISOR)) {
 817 eax = svm_cpuid_max_basic;
 818 svm_inkernel_exec_cpuid(cpudata, eax, ecx);
 819 }
 820 } else {
 821 if (__predict_false(eax > svm_cpuid_max_extended)) {
 822 eax = svm_cpuid_max_basic;
 823 svm_inkernel_exec_cpuid(cpudata, eax, ecx);
 824 }
 825 }
 826
794 switch (eax) { 827 switch (eax) {
 828 case 0x00000000:
 829 cpudata->vmcb->state.rax = svm_cpuid_max_basic;
 830 break;
795 case 0x00000001: 831 case 0x00000001:
796 cpudata->vmcb->state.rax &= nvmm_cpuid_00000001.eax; 832 cpudata->vmcb->state.rax &= nvmm_cpuid_00000001.eax;
797 833
798 cpudata->gprs[NVMM_X64_GPR_RBX] &= ~CPUID_LOCAL_APIC_ID; 834 cpudata->gprs[NVMM_X64_GPR_RBX] &= ~CPUID_LOCAL_APIC_ID;
799 cpudata->gprs[NVMM_X64_GPR_RBX] |= __SHIFTIN(vcpu->cpuid, 835 cpudata->gprs[NVMM_X64_GPR_RBX] |= __SHIFTIN(vcpu->cpuid,
800 CPUID_LOCAL_APIC_ID); 836 CPUID_LOCAL_APIC_ID);
801 837
802 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000001.ecx; 838 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000001.ecx;
803 cpudata->gprs[NVMM_X64_GPR_RCX] |= CPUID2_RAZ; 839 cpudata->gprs[NVMM_X64_GPR_RCX] |= CPUID2_RAZ;
804 840
805 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000001.edx; 841 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000001.edx;
806 842
807 /* CPUID2_OSXSAVE depends on CR4. */ 843 /* CPUID2_OSXSAVE depends on CR4. */
@@ -811,30 +847,40 @@ svm_inkernel_handle_cpuid(struct nvmm_cp @@ -811,30 +847,40 @@ svm_inkernel_handle_cpuid(struct nvmm_cp
811 } 847 }
812 break; 848 break;
813 case 0x00000002: /* Empty */ 849 case 0x00000002: /* Empty */
814 case 0x00000003: /* Empty */ 850 case 0x00000003: /* Empty */
815 case 0x00000004: /* Empty */ 851 case 0x00000004: /* Empty */
816 case 0x00000005: /* Monitor/MWait */ 852 case 0x00000005: /* Monitor/MWait */
817 case 0x00000006: /* Power Management Related Features */ 853 case 0x00000006: /* Power Management Related Features */
818 cpudata->vmcb->state.rax = 0; 854 cpudata->vmcb->state.rax = 0;
819 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 855 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
820 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 856 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
821 cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 857 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
822 break; 858 break;
823 case 0x00000007: /* Structured Extended Features */ 859 case 0x00000007: /* Structured Extended Features */
824 cpudata->vmcb->state.rax &= nvmm_cpuid_00000007.eax; 860 switch (ecx) {
825 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_00000007.ebx; 861 case 0:
826 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000007.ecx; 862 cpudata->vmcb->state.rax = 0;
827 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000007.edx; 863 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_00000007.ebx;
 864 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000007.ecx;
 865 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000007.edx;
 866 break;
 867 default:
 868 cpudata->vmcb->state.rax = 0;
 869 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
 870 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
 871 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
 872 break;
 873 }
828 break; 874 break;
829 case 0x00000008: /* Empty */ 875 case 0x00000008: /* Empty */
830 case 0x00000009: /* Empty */ 876 case 0x00000009: /* Empty */
831 case 0x0000000A: /* Empty */ 877 case 0x0000000A: /* Empty */
832 case 0x0000000B: /* Empty */ 878 case 0x0000000B: /* Empty */
833 case 0x0000000C: /* Empty */ 879 case 0x0000000C: /* Empty */
834 cpudata->vmcb->state.rax = 0; 880 cpudata->vmcb->state.rax = 0;
835 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 881 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
836 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 882 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
837 cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 883 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
838 break; 884 break;
839 case 0x0000000D: /* Processor Extended State Enumeration */ 885 case 0x0000000D: /* Processor Extended State Enumeration */
840 if (svm_xcr0_mask == 0) { 886 if (svm_xcr0_mask == 0) {
@@ -869,32 +915,94 @@ svm_inkernel_handle_cpuid(struct nvmm_cp @@ -869,32 +915,94 @@ svm_inkernel_handle_cpuid(struct nvmm_cp
869 } 915 }
870 break; 916 break;
871 917
872 case 0x40000000: /* Hypervisor Information */ 918 case 0x40000000: /* Hypervisor Information */
873 cpudata->vmcb->state.rax = SVM_CPUID_MAX_HYPERVISOR; 919 cpudata->vmcb->state.rax = SVM_CPUID_MAX_HYPERVISOR;
874 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 920 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
875 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 921 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
876 cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 922 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
877 memcpy(&cpudata->gprs[NVMM_X64_GPR_RBX], "___ ", 4); 923 memcpy(&cpudata->gprs[NVMM_X64_GPR_RBX], "___ ", 4);
878 memcpy(&cpudata->gprs[NVMM_X64_GPR_RCX], "NVMM", 4); 924 memcpy(&cpudata->gprs[NVMM_X64_GPR_RCX], "NVMM", 4);
879 memcpy(&cpudata->gprs[NVMM_X64_GPR_RDX], " ___", 4); 925 memcpy(&cpudata->gprs[NVMM_X64_GPR_RDX], " ___", 4);
880 break; 926 break;
881 927
 928 case 0x80000000:
 929 cpudata->vmcb->state.rax = svm_cpuid_max_extended;
 930 break;
882 case 0x80000001: 931 case 0x80000001:
883 cpudata->vmcb->state.rax &= nvmm_cpuid_80000001.eax; 932 cpudata->vmcb->state.rax &= nvmm_cpuid_80000001.eax;
884 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000001.ebx; 933 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000001.ebx;
885 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000001.ecx; 934 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000001.ecx;
886 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000001.edx; 935 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000001.edx;
887 break; 936 break;
 937 case 0x80000002: /* Extended Processor Name String */
 938 case 0x80000003: /* Extended Processor Name String */
 939 case 0x80000004: /* Extended Processor Name String */
 940 case 0x80000005: /* L1 Cache and TLB Information */
 941 case 0x80000006: /* L2 Cache and TLB and L3 Cache Information */
 942 break;
 943 case 0x80000007: /* Processor Power Management and RAS Capabilities */
 944 cpudata->vmcb->state.rax &= nvmm_cpuid_80000007.eax;
 945 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000007.ebx;
 946 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000007.ecx;
 947 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000007.edx;
 948 break;
 949 case 0x80000008: /* Processor Capacity Parameters and Ext Feat Ident */
 950 cpudata->vmcb->state.rax &= nvmm_cpuid_80000008.eax;
 951 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000008.ebx;
 952 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000008.ecx;
 953 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000008.edx;
 954 break;
 955 case 0x80000009: /* Empty */
 956 case 0x8000000A: /* SVM Features */
 957 case 0x8000000B: /* Empty */
 958 case 0x8000000C: /* Empty */
 959 case 0x8000000D: /* Empty */
 960 case 0x8000000E: /* Empty */
 961 case 0x8000000F: /* Empty */
 962 case 0x80000010: /* Empty */
 963 case 0x80000011: /* Empty */
 964 case 0x80000012: /* Empty */
 965 case 0x80000013: /* Empty */
 966 case 0x80000014: /* Empty */
 967 case 0x80000015: /* Empty */
 968 case 0x80000016: /* Empty */
 969 case 0x80000017: /* Empty */
 970 case 0x80000018: /* Empty */
 971 cpudata->vmcb->state.rax = 0;
 972 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
 973 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
 974 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
 975 break;
 976 case 0x80000019: /* TLB Characteristics for 1GB pages */
 977 case 0x8000001A: /* Instruction Optimizations */
 978 break;
 979 case 0x8000001B: /* Instruction-Based Sampling Capabilities */
 980 case 0x8000001C: /* Lightweight Profiling Capabilities */
 981 cpudata->vmcb->state.rax = 0;
 982 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
 983 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
 984 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
 985 break;
 986 case 0x8000001D: /* Cache Topology Information */
 987 case 0x8000001E: /* Processor Topology Information */
 988 break; /* TODO? */
 989 case 0x8000001F: /* Encrypted Memory Capabilities */
 990 cpudata->vmcb->state.rax = 0;
 991 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
 992 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
 993 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
 994 break;
 995
888 default: 996 default:
889 break; 997 break;
890 } 998 }
891} 999}
892 1000
893static void 1001static void
894svm_exit_insn(struct vmcb *vmcb, struct nvmm_vcpu_exit *exit, uint64_t reason) 1002svm_exit_insn(struct vmcb *vmcb, struct nvmm_vcpu_exit *exit, uint64_t reason)
895{ 1003{
896 exit->u.insn.npc = vmcb->ctrl.nrip; 1004 exit->u.insn.npc = vmcb->ctrl.nrip;
897 exit->reason = reason; 1005 exit->reason = reason;
898} 1006}
899 1007
900static void 1008static void
@@ -2401,26 +2509,33 @@ svm_init(void) @@ -2401,26 +2509,33 @@ svm_init(void)
2401 /* The guest TLB flush command. */ 2509 /* The guest TLB flush command. */
2402 if (descs[3] & CPUID_AMD_SVM_FlushByASID) { 2510 if (descs[3] & CPUID_AMD_SVM_FlushByASID) {
2403 svm_ctrl_tlb_flush = VMCB_CTRL_TLB_CTRL_FLUSH_GUEST; 2511 svm_ctrl_tlb_flush = VMCB_CTRL_TLB_CTRL_FLUSH_GUEST;
2404 } else { 2512 } else {
2405 svm_ctrl_tlb_flush = VMCB_CTRL_TLB_CTRL_FLUSH_ALL; 2513 svm_ctrl_tlb_flush = VMCB_CTRL_TLB_CTRL_FLUSH_ALL;
2406 } 2514 }
2407 2515
2408 /* Init the ASID. */ 2516 /* Init the ASID. */
2409 svm_init_asid(descs[1]); 2517 svm_init_asid(descs[1]);
2410 2518
2411 /* Init the XCR0 mask. */ 2519 /* Init the XCR0 mask. */
2412 svm_xcr0_mask = SVM_XCR0_MASK_DEFAULT & x86_xsave_features; 2520 svm_xcr0_mask = SVM_XCR0_MASK_DEFAULT & x86_xsave_features;
2413 2521
 2522 /* Init the max basic CPUID leaf. */
 2523 svm_cpuid_max_basic = uimin(cpuid_level, SVM_CPUID_MAX_BASIC);
 2524
 2525 /* Init the max extended CPUID leaf. */
 2526 x86_cpuid(0x80000000, descs);
 2527 svm_cpuid_max_extended = uimin(descs[0], SVM_CPUID_MAX_EXTENDED);
 2528
2414 memset(hsave, 0, sizeof(hsave)); 2529 memset(hsave, 0, sizeof(hsave));
2415 for (CPU_INFO_FOREACH(cii, ci)) { 2530 for (CPU_INFO_FOREACH(cii, ci)) {
2416 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO); 2531 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
2417 hsave[cpu_index(ci)].pa = VM_PAGE_TO_PHYS(pg); 2532 hsave[cpu_index(ci)].pa = VM_PAGE_TO_PHYS(pg);
2418 } 2533 }
2419 2534
2420 xc = xc_broadcast(0, svm_change_cpu, (void *)true, NULL); 2535 xc = xc_broadcast(0, svm_change_cpu, (void *)true, NULL);
2421 xc_wait(xc); 2536 xc_wait(xc);
2422} 2537}
2423 2538
2424static void 2539static void
2425svm_fini_asid(void) 2540svm_fini_asid(void)
2426{ 2541{

cvs diff -r1.36.2.10 -r1.36.2.11 src/sys/dev/nvmm/x86/nvmm_x86_vmx.c (expand / switch to unified diff)

--- src/sys/dev/nvmm/x86/nvmm_x86_vmx.c 2020/08/18 09:29:52 1.36.2.10
+++ src/sys/dev/nvmm/x86/nvmm_x86_vmx.c 2020/08/26 17:55:49 1.36.2.11
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: nvmm_x86_vmx.c,v 1.36.2.10 2020/08/18 09:29:52 martin Exp $ */ 1/* $NetBSD: nvmm_x86_vmx.c,v 1.36.2.11 2020/08/26 17:55:49 martin Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2018-2019 The NetBSD Foundation, Inc. 4 * Copyright (c) 2018-2019 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Maxime Villard. 8 * by Maxime Villard.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -20,27 +20,27 @@ @@ -20,27 +20,27 @@
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_vmx.c,v 1.36.2.10 2020/08/18 09:29:52 martin Exp $"); 33__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_vmx.c,v 1.36.2.11 2020/08/26 17:55:49 martin Exp $");
34 34
35#include <sys/param.h> 35#include <sys/param.h>
36#include <sys/systm.h> 36#include <sys/systm.h>
37#include <sys/kernel.h> 37#include <sys/kernel.h>
38#include <sys/kmem.h> 38#include <sys/kmem.h>
39#include <sys/cpu.h> 39#include <sys/cpu.h>
40#include <sys/xcall.h> 40#include <sys/xcall.h>
41#include <sys/mman.h> 41#include <sys/mman.h>
42#include <sys/bitops.h> 42#include <sys/bitops.h>
43 43
44#include <uvm/uvm.h> 44#include <uvm/uvm.h>
45#include <uvm/uvm_page.h> 45#include <uvm/uvm_page.h>
46 46
@@ -1152,26 +1152,27 @@ vmx_exit_exc_nmi(struct nvmm_machine *ma @@ -1152,26 +1152,27 @@ vmx_exit_exc_nmi(struct nvmm_machine *ma
1152 } 1152 }
1153 1153
1154 exit->reason = NVMM_VCPU_EXIT_NONE; 1154 exit->reason = NVMM_VCPU_EXIT_NONE;
1155 return; 1155 return;
1156 1156
1157error: 1157error:
1158 vmx_exit_invalid(exit, VMCS_EXITCODE_EXC_NMI); 1158 vmx_exit_invalid(exit, VMCS_EXITCODE_EXC_NMI);
1159} 1159}
1160 1160
1161#define VMX_CPUID_MAX_BASIC 0x16 1161#define VMX_CPUID_MAX_BASIC 0x16
1162#define VMX_CPUID_MAX_HYPERVISOR 0x40000000 1162#define VMX_CPUID_MAX_HYPERVISOR 0x40000000
1163#define VMX_CPUID_MAX_EXTENDED 0x80000008 1163#define VMX_CPUID_MAX_EXTENDED 0x80000008
1164static uint32_t vmx_cpuid_max_basic __read_mostly; 1164static uint32_t vmx_cpuid_max_basic __read_mostly;
 1165static uint32_t vmx_cpuid_max_extended __read_mostly;
1165 1166
1166static void 1167static void
1167vmx_inkernel_exec_cpuid(struct vmx_cpudata *cpudata, uint64_t eax, uint64_t ecx) 1168vmx_inkernel_exec_cpuid(struct vmx_cpudata *cpudata, uint64_t eax, uint64_t ecx)
1168{ 1169{
1169 u_int descs[4]; 1170 u_int descs[4];
1170 1171
1171 x86_cpuid2(eax, ecx, descs); 1172 x86_cpuid2(eax, ecx, descs);
1172 cpudata->gprs[NVMM_X64_GPR_RAX] = descs[0]; 1173 cpudata->gprs[NVMM_X64_GPR_RAX] = descs[0];
1173 cpudata->gprs[NVMM_X64_GPR_RBX] = descs[1]; 1174 cpudata->gprs[NVMM_X64_GPR_RBX] = descs[1];
1174 cpudata->gprs[NVMM_X64_GPR_RCX] = descs[2]; 1175 cpudata->gprs[NVMM_X64_GPR_RCX] = descs[2];
1175 cpudata->gprs[NVMM_X64_GPR_RDX] = descs[3]; 1176 cpudata->gprs[NVMM_X64_GPR_RDX] = descs[3];
1176} 1177}
1177 1178
@@ -1183,26 +1184,31 @@ vmx_inkernel_handle_cpuid(struct nvmm_ma @@ -1183,26 +1184,31 @@ vmx_inkernel_handle_cpuid(struct nvmm_ma
1183 unsigned int ncpus; 1184 unsigned int ncpus;
1184 uint64_t cr4; 1185 uint64_t cr4;
1185 1186
1186 if (eax < 0x40000000) { 1187 if (eax < 0x40000000) {
1187 if (__predict_false(eax > vmx_cpuid_max_basic)) { 1188 if (__predict_false(eax > vmx_cpuid_max_basic)) {
1188 eax = vmx_cpuid_max_basic; 1189 eax = vmx_cpuid_max_basic;
1189 vmx_inkernel_exec_cpuid(cpudata, eax, ecx); 1190 vmx_inkernel_exec_cpuid(cpudata, eax, ecx);
1190 } 1191 }
1191 } else if (eax < 0x80000000) { 1192 } else if (eax < 0x80000000) {
1192 if (__predict_false(eax > VMX_CPUID_MAX_HYPERVISOR)) { 1193 if (__predict_false(eax > VMX_CPUID_MAX_HYPERVISOR)) {
1193 eax = vmx_cpuid_max_basic; 1194 eax = vmx_cpuid_max_basic;
1194 vmx_inkernel_exec_cpuid(cpudata, eax, ecx); 1195 vmx_inkernel_exec_cpuid(cpudata, eax, ecx);
1195 } 1196 }
 1197 } else {
 1198 if (__predict_false(eax > vmx_cpuid_max_extended)) {
 1199 eax = vmx_cpuid_max_basic;
 1200 vmx_inkernel_exec_cpuid(cpudata, eax, ecx);
 1201 }
1196 } 1202 }
1197 1203
1198 switch (eax) { 1204 switch (eax) {
1199 case 0x00000000: 1205 case 0x00000000:
1200 cpudata->gprs[NVMM_X64_GPR_RAX] = vmx_cpuid_max_basic; 1206 cpudata->gprs[NVMM_X64_GPR_RAX] = vmx_cpuid_max_basic;
1201 break; 1207 break;
1202 case 0x00000001: 1208 case 0x00000001:
1203 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_00000001.eax; 1209 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_00000001.eax;
1204 1210
1205 cpudata->gprs[NVMM_X64_GPR_RBX] &= ~CPUID_LOCAL_APIC_ID; 1211 cpudata->gprs[NVMM_X64_GPR_RBX] &= ~CPUID_LOCAL_APIC_ID;
1206 cpudata->gprs[NVMM_X64_GPR_RBX] |= __SHIFTIN(vcpu->cpuid, 1212 cpudata->gprs[NVMM_X64_GPR_RBX] |= __SHIFTIN(vcpu->cpuid,
1207 CPUID_LOCAL_APIC_ID); 1213 CPUID_LOCAL_APIC_ID);
1208 1214
@@ -1228,32 +1234,42 @@ vmx_inkernel_handle_cpuid(struct nvmm_ma @@ -1228,32 +1234,42 @@ vmx_inkernel_handle_cpuid(struct nvmm_ma
1228 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 1234 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1229 cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 1235 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1230 break; 1236 break;
1231 case 0x00000004: /* Deterministic Cache Parameters */ 1237 case 0x00000004: /* Deterministic Cache Parameters */
1232 break; /* TODO? */ 1238 break; /* TODO? */
1233 case 0x00000005: /* MONITOR/MWAIT */ 1239 case 0x00000005: /* MONITOR/MWAIT */
1234 case 0x00000006: /* Thermal and Power Management */ 1240 case 0x00000006: /* Thermal and Power Management */
1235 cpudata->gprs[NVMM_X64_GPR_RAX] = 0; 1241 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1236 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 1242 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1237 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 1243 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1238 cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 1244 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1239 break; 1245 break;
1240 case 0x00000007: /* Structured Extended Feature Flags Enumeration */ 1246 case 0x00000007: /* Structured Extended Feature Flags Enumeration */
1241 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_00000007.eax; 1247 switch (ecx) {
1242 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_00000007.ebx; 1248 case 0:
1243 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000007.ecx; 1249 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1244 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000007.edx; 1250 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_00000007.ebx;
1245 if (vmx_procbased_ctls2 & PROC_CTLS2_INVPCID_ENABLE) { 1251 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000007.ecx;
1246 cpudata->gprs[NVMM_X64_GPR_RBX] |= CPUID_SEF_INVPCID; 1252 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000007.edx;
 1253 if (vmx_procbased_ctls2 & PROC_CTLS2_INVPCID_ENABLE) {
 1254 cpudata->gprs[NVMM_X64_GPR_RBX] |= CPUID_SEF_INVPCID;
 1255 }
 1256 break;
 1257 default:
 1258 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
 1259 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
 1260 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
 1261 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
 1262 break;
1247 } 1263 }
1248 break; 1264 break;
1249 case 0x00000008: /* Empty */ 1265 case 0x00000008: /* Empty */
1250 case 0x00000009: /* Direct Cache Access Information */ 1266 case 0x00000009: /* Direct Cache Access Information */
1251 cpudata->gprs[NVMM_X64_GPR_RAX] = 0; 1267 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1252 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 1268 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1253 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 1269 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1254 cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 1270 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1255 break; 1271 break;
1256 case 0x0000000A: /* Architectural Performance Monitoring */ 1272 case 0x0000000A: /* Architectural Performance Monitoring */
1257 cpudata->gprs[NVMM_X64_GPR_RAX] = 0; 1273 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1258 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 1274 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1259 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 1275 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
@@ -1345,39 +1361,52 @@ vmx_inkernel_handle_cpuid(struct nvmm_ma @@ -1345,39 +1361,52 @@ vmx_inkernel_handle_cpuid(struct nvmm_ma
1345 case 0x00000016: /* Processor Frequency Information */ 1361 case 0x00000016: /* Processor Frequency Information */
1346 break; 1362 break;
1347 1363
1348 case 0x40000000: /* Hypervisor Information */ 1364 case 0x40000000: /* Hypervisor Information */
1349 cpudata->gprs[NVMM_X64_GPR_RAX] = VMX_CPUID_MAX_HYPERVISOR; 1365 cpudata->gprs[NVMM_X64_GPR_RAX] = VMX_CPUID_MAX_HYPERVISOR;
1350 cpudata->gprs[NVMM_X64_GPR_RBX] = 0; 1366 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1351 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; 1367 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1352 cpudata->gprs[NVMM_X64_GPR_RDX] = 0; 1368 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1353 memcpy(&cpudata->gprs[NVMM_X64_GPR_RBX], "___ ", 4); 1369 memcpy(&cpudata->gprs[NVMM_X64_GPR_RBX], "___ ", 4);
1354 memcpy(&cpudata->gprs[NVMM_X64_GPR_RCX], "NVMM", 4); 1370 memcpy(&cpudata->gprs[NVMM_X64_GPR_RCX], "NVMM", 4);
1355 memcpy(&cpudata->gprs[NVMM_X64_GPR_RDX], " ___", 4); 1371 memcpy(&cpudata->gprs[NVMM_X64_GPR_RDX], " ___", 4);
1356 break; 1372 break;
1357 1373
 1374 case 0x80000000:
 1375 cpudata->gprs[NVMM_X64_GPR_RAX] = vmx_cpuid_max_extended;
 1376 break;
1358 case 0x80000001: 1377 case 0x80000001:
1359 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_80000001.eax; 1378 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_80000001.eax;
1360 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000001.ebx; 1379 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000001.ebx;
1361 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000001.ecx; 1380 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000001.ecx;
1362 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000001.edx; 1381 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000001.edx;
1363 break; 1382 break;
1364 case 0x80000002: /* Processor Brand String */ 1383 case 0x80000002: /* Processor Brand String */
1365 case 0x80000003: /* Processor Brand String */ 1384 case 0x80000003: /* Processor Brand String */
1366 case 0x80000004: /* Processor Brand String */ 1385 case 0x80000004: /* Processor Brand String */
1367 case 0x80000005: /* Reserved Zero */ 1386 case 0x80000005: /* Reserved Zero */
1368 case 0x80000006: /* Cache Information */ 1387 case 0x80000006: /* Cache Information */
 1388 break;
1369 case 0x80000007: /* TSC Information */ 1389 case 0x80000007: /* TSC Information */
 1390 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_80000007.eax;
 1391 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000007.ebx;
 1392 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000007.ecx;
 1393 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000007.edx;
 1394 break;
1370 case 0x80000008: /* Address Sizes */ 1395 case 0x80000008: /* Address Sizes */
 1396 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_80000008.eax;
 1397 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000008.ebx;
 1398 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000008.ecx;
 1399 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000008.edx;
1371 break; 1400 break;
1372 1401
1373 default: 1402 default:
1374 break; 1403 break;
1375 } 1404 }
1376} 1405}
1377 1406
1378static void 1407static void
1379vmx_exit_insn(struct nvmm_vcpu_exit *exit, uint64_t reason) 1408vmx_exit_insn(struct nvmm_vcpu_exit *exit, uint64_t reason)
1380{ 1409{
1381 uint64_t inslen, rip; 1410 uint64_t inslen, rip;
1382 1411
1383 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH); 1412 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
@@ -3313,39 +3342,44 @@ vmx_init_l1tf(void) @@ -3313,39 +3342,44 @@ vmx_init_l1tf(void)
3313 /* Enable hardware mitigation. */ 3342 /* Enable hardware mitigation. */
3314 vmx_msrlist_entry_nmsr += 1; 3343 vmx_msrlist_entry_nmsr += 1;
3315 } 3344 }
3316} 3345}
3317 3346
3318static void 3347static void
3319vmx_init(void) 3348vmx_init(void)
3320{ 3349{
3321 CPU_INFO_ITERATOR cii; 3350 CPU_INFO_ITERATOR cii;
3322 struct cpu_info *ci; 3351 struct cpu_info *ci;
3323 uint64_t xc, msr; 3352 uint64_t xc, msr;
3324 struct vmxon *vmxon; 3353 struct vmxon *vmxon;
3325 uint32_t revision; 3354 uint32_t revision;
 3355 u_int descs[4];
3326 paddr_t pa; 3356 paddr_t pa;
3327 vaddr_t va; 3357 vaddr_t va;
3328 int error; 3358 int error;
3329 3359
3330 /* Init the ASID bitmap (VPID). */ 3360 /* Init the ASID bitmap (VPID). */
3331 vmx_init_asid(VPID_MAX); 3361 vmx_init_asid(VPID_MAX);
3332 3362
3333 /* Init the XCR0 mask. */ 3363 /* Init the XCR0 mask. */
3334 vmx_xcr0_mask = VMX_XCR0_MASK_DEFAULT & x86_xsave_features; 3364 vmx_xcr0_mask = VMX_XCR0_MASK_DEFAULT & x86_xsave_features;
3335 3365
3336 /* Init the max CPUID leaves. */ 3366 /* Init the max basic CPUID leaf. */
3337 vmx_cpuid_max_basic = uimin(cpuid_level, VMX_CPUID_MAX_BASIC); 3367 vmx_cpuid_max_basic = uimin(cpuid_level, VMX_CPUID_MAX_BASIC);
3338 3368
 3369 /* Init the max extended CPUID leaf. */
 3370 x86_cpuid(0x80000000, descs);
 3371 vmx_cpuid_max_extended = uimin(descs[0], VMX_CPUID_MAX_EXTENDED);
 3372
3339 /* Init the TLB flush op, the EPT flush op and the EPTP type. */ 3373 /* Init the TLB flush op, the EPT flush op and the EPTP type. */
3340 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP); 3374 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
3341 if ((msr & IA32_VMX_EPT_VPID_INVVPID_CONTEXT) != 0) { 3375 if ((msr & IA32_VMX_EPT_VPID_INVVPID_CONTEXT) != 0) {
3342 vmx_tlb_flush_op = VMX_INVVPID_CONTEXT; 3376 vmx_tlb_flush_op = VMX_INVVPID_CONTEXT;
3343 } else { 3377 } else {
3344 vmx_tlb_flush_op = VMX_INVVPID_ALL; 3378 vmx_tlb_flush_op = VMX_INVVPID_ALL;
3345 } 3379 }
3346 if ((msr & IA32_VMX_EPT_VPID_INVEPT_CONTEXT) != 0) { 3380 if ((msr & IA32_VMX_EPT_VPID_INVEPT_CONTEXT) != 0) {
3347 vmx_ept_flush_op = VMX_INVEPT_CONTEXT; 3381 vmx_ept_flush_op = VMX_INVEPT_CONTEXT;
3348 } else { 3382 } else {
3349 vmx_ept_flush_op = VMX_INVEPT_ALL; 3383 vmx_ept_flush_op = VMX_INVEPT_ALL;
3350 } 3384 }
3351 if ((msr & IA32_VMX_EPT_VPID_WB) != 0) { 3385 if ((msr & IA32_VMX_EPT_VPID_WB) != 0) {