Fri Aug 30 13:29:17 2019 UTC ()
 Use macro.


(msaitoh)
diff -r1.29 -r1.30 src/sys/arch/x86/x86/spectre.c

cvs diff -r1.29 -r1.30 src/sys/arch/x86/x86/spectre.c (switch to unified diff)

--- src/sys/arch/x86/x86/spectre.c 2019/06/01 06:54:28 1.29
+++ src/sys/arch/x86/x86/spectre.c 2019/08/30 13:29:17 1.30
@@ -1,971 +1,971 @@ @@ -1,971 +1,971 @@
1/* $NetBSD: spectre.c,v 1.29 2019/06/01 06:54:28 maxv Exp $ */ 1/* $NetBSD: spectre.c,v 1.30 2019/08/30 13:29:17 msaitoh Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2018-2019 NetBSD Foundation, Inc. 4 * Copyright (c) 2018-2019 NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Maxime Villard. 8 * by Maxime Villard.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Mitigations for the SpectreV2, SpectreV4 and MDS CPU flaws. 33 * Mitigations for the SpectreV2, SpectreV4 and MDS CPU flaws.
34 */ 34 */
35 35
36#include <sys/cdefs.h> 36#include <sys/cdefs.h>
37__KERNEL_RCSID(0, "$NetBSD: spectre.c,v 1.29 2019/06/01 06:54:28 maxv Exp $"); 37__KERNEL_RCSID(0, "$NetBSD: spectre.c,v 1.30 2019/08/30 13:29:17 msaitoh Exp $");
38 38
39#include "opt_spectre.h" 39#include "opt_spectre.h"
40 40
41#include <sys/param.h> 41#include <sys/param.h>
42#include <sys/systm.h> 42#include <sys/systm.h>
43#include <sys/cpu.h> 43#include <sys/cpu.h>
44#include <sys/sysctl.h> 44#include <sys/sysctl.h>
45#include <sys/xcall.h> 45#include <sys/xcall.h>
46 46
47#include <machine/cpufunc.h> 47#include <machine/cpufunc.h>
48#include <machine/cpuvar.h> 48#include <machine/cpuvar.h>
49#include <machine/specialreg.h> 49#include <machine/specialreg.h>
50#include <machine/frameasm.h> 50#include <machine/frameasm.h>
51 51
52#include <x86/cputypes.h> 52#include <x86/cputypes.h>
53 53
54enum v2_mitigation { 54enum v2_mitigation {
55 V2_MITIGATION_NONE, 55 V2_MITIGATION_NONE,
56 V2_MITIGATION_AMD_DIS_IND, 56 V2_MITIGATION_AMD_DIS_IND,
57 V2_MITIGATION_INTEL_IBRS, 57 V2_MITIGATION_INTEL_IBRS,
58 V2_MITIGATION_INTEL_ENHANCED_IBRS 58 V2_MITIGATION_INTEL_ENHANCED_IBRS
59}; 59};
60 60
61enum v4_mitigation { 61enum v4_mitigation {
62 V4_MITIGATION_NONE, 62 V4_MITIGATION_NONE,
63 V4_MITIGATION_INTEL_SSBD, 63 V4_MITIGATION_INTEL_SSBD,
64 V4_MITIGATION_INTEL_SSB_NO, 64 V4_MITIGATION_INTEL_SSB_NO,
65 V4_MITIGATION_AMD_SSB_NO, 65 V4_MITIGATION_AMD_SSB_NO,
66 V4_MITIGATION_AMD_NONARCH_F15H, 66 V4_MITIGATION_AMD_NONARCH_F15H,
67 V4_MITIGATION_AMD_NONARCH_F16H, 67 V4_MITIGATION_AMD_NONARCH_F16H,
68 V4_MITIGATION_AMD_NONARCH_F17H 68 V4_MITIGATION_AMD_NONARCH_F17H
69}; 69};
70 70
71static enum v2_mitigation v2_mitigation_method = V2_MITIGATION_NONE; 71static enum v2_mitigation v2_mitigation_method = V2_MITIGATION_NONE;
72static enum v4_mitigation v4_mitigation_method = V4_MITIGATION_NONE; 72static enum v4_mitigation v4_mitigation_method = V4_MITIGATION_NONE;
73 73
74static bool v2_mitigation_enabled __read_mostly = false; 74static bool v2_mitigation_enabled __read_mostly = false;
75static bool v4_mitigation_enabled __read_mostly = false; 75static bool v4_mitigation_enabled __read_mostly = false;
76 76
77static char v2_mitigation_name[64] = "(none)"; 77static char v2_mitigation_name[64] = "(none)";
78static char v4_mitigation_name[64] = "(none)"; 78static char v4_mitigation_name[64] = "(none)";
79 79
80/* --------------------------------------------------------------------- */ 80/* --------------------------------------------------------------------- */
81 81
82static void 82static void
83v2_set_name(void) 83v2_set_name(void)
84{ 84{
85 char name[64] = ""; 85 char name[64] = "";
86 size_t nmitig = 0; 86 size_t nmitig = 0;
87 87
88#if defined(SPECTRE_V2_GCC_MITIGATION) 88#if defined(SPECTRE_V2_GCC_MITIGATION)
89 strlcat(name, "[GCC retpoline]", sizeof(name)); 89 strlcat(name, "[GCC retpoline]", sizeof(name));
90 nmitig++; 90 nmitig++;
91#endif 91#endif
92 92
93 if (!v2_mitigation_enabled) { 93 if (!v2_mitigation_enabled) {
94 if (nmitig == 0) 94 if (nmitig == 0)
95 strlcat(name, "(none)", sizeof(name)); 95 strlcat(name, "(none)", sizeof(name));
96 } else { 96 } else {
97 if (nmitig) 97 if (nmitig)
98 strlcat(name, " + ", sizeof(name)); 98 strlcat(name, " + ", sizeof(name));
99 switch (v2_mitigation_method) { 99 switch (v2_mitigation_method) {
100 case V2_MITIGATION_AMD_DIS_IND: 100 case V2_MITIGATION_AMD_DIS_IND:
101 strlcat(name, "[AMD DIS_IND]", sizeof(name)); 101 strlcat(name, "[AMD DIS_IND]", sizeof(name));
102 break; 102 break;
103 case V2_MITIGATION_INTEL_IBRS: 103 case V2_MITIGATION_INTEL_IBRS:
104 strlcat(name, "[Intel IBRS]", sizeof(name)); 104 strlcat(name, "[Intel IBRS]", sizeof(name));
105 break; 105 break;
106 case V2_MITIGATION_INTEL_ENHANCED_IBRS: 106 case V2_MITIGATION_INTEL_ENHANCED_IBRS:
107 strlcat(name, "[Intel Enhanced IBRS]", sizeof(name)); 107 strlcat(name, "[Intel Enhanced IBRS]", sizeof(name));
108 break; 108 break;
109 default: 109 default:
110 panic("%s: impossible", __func__); 110 panic("%s: impossible", __func__);
111 } 111 }
112 } 112 }
113 113
114 strlcpy(v2_mitigation_name, name, 114 strlcpy(v2_mitigation_name, name,
115 sizeof(v2_mitigation_name)); 115 sizeof(v2_mitigation_name));
116} 116}
117 117
118static void 118static void
119v2_detect_method(void) 119v2_detect_method(void)
120{ 120{
121 struct cpu_info *ci = curcpu(); 121 struct cpu_info *ci = curcpu();
122 u_int descs[4]; 122 u_int descs[4];
123 uint64_t msr; 123 uint64_t msr;
124 124
125 if (cpu_vendor == CPUVENDOR_INTEL) { 125 if (cpu_vendor == CPUVENDOR_INTEL) {
126 if (cpuid_level >= 7) { 126 if (cpuid_level >= 7) {
127 x86_cpuid(7, descs); 127 x86_cpuid(7, descs);
128 128
129 if (descs[3] & CPUID_SEF_ARCH_CAP) { 129 if (descs[3] & CPUID_SEF_ARCH_CAP) {
130 msr = rdmsr(MSR_IA32_ARCH_CAPABILITIES); 130 msr = rdmsr(MSR_IA32_ARCH_CAPABILITIES);
131 if (msr & IA32_ARCH_IBRS_ALL) { 131 if (msr & IA32_ARCH_IBRS_ALL) {
132 v2_mitigation_method = 132 v2_mitigation_method =
133 V2_MITIGATION_INTEL_ENHANCED_IBRS; 133 V2_MITIGATION_INTEL_ENHANCED_IBRS;
134 return; 134 return;
135 } 135 }
136 } 136 }
137#ifdef __x86_64__ 137#ifdef __x86_64__
138 if (descs[3] & CPUID_SEF_IBRS) { 138 if (descs[3] & CPUID_SEF_IBRS) {
139 v2_mitigation_method = V2_MITIGATION_INTEL_IBRS; 139 v2_mitigation_method = V2_MITIGATION_INTEL_IBRS;
140 return; 140 return;
141 } 141 }
142#endif 142#endif
143 } 143 }
144 v2_mitigation_method = V2_MITIGATION_NONE; 144 v2_mitigation_method = V2_MITIGATION_NONE;
145 } else if (cpu_vendor == CPUVENDOR_AMD) { 145 } else if (cpu_vendor == CPUVENDOR_AMD) {
146 /* 146 /*
147 * The AMD Family 10h manual documents the IC_CFG.DIS_IND bit. 147 * The AMD Family 10h manual documents the IC_CFG.DIS_IND bit.
148 * This bit disables the Indirect Branch Predictor. 148 * This bit disables the Indirect Branch Predictor.
149 * 149 *
150 * Families 12h and 16h are believed to have this bit too, but 150 * Families 12h and 16h are believed to have this bit too, but
151 * their manuals don't document it. 151 * their manuals don't document it.
152 */ 152 */
153 switch (CPUID_TO_FAMILY(ci->ci_signature)) { 153 switch (CPUID_TO_FAMILY(ci->ci_signature)) {
154 case 0x10: 154 case 0x10:
155 v2_mitigation_method = V2_MITIGATION_AMD_DIS_IND; 155 v2_mitigation_method = V2_MITIGATION_AMD_DIS_IND;
156 break; 156 break;
157 default: 157 default:
158 v2_mitigation_method = V2_MITIGATION_NONE; 158 v2_mitigation_method = V2_MITIGATION_NONE;
159 break; 159 break;
160 } 160 }
161 } else { 161 } else {
162 v2_mitigation_method = V2_MITIGATION_NONE; 162 v2_mitigation_method = V2_MITIGATION_NONE;
163 } 163 }
164} 164}
165 165
166/* -------------------------------------------------------------------------- */ 166/* -------------------------------------------------------------------------- */
167 167
168static volatile unsigned long ibrs_cpu_barrier1 __cacheline_aligned; 168static volatile unsigned long ibrs_cpu_barrier1 __cacheline_aligned;
169static volatile unsigned long ibrs_cpu_barrier2 __cacheline_aligned; 169static volatile unsigned long ibrs_cpu_barrier2 __cacheline_aligned;
170 170
171#ifdef __x86_64__ 171#ifdef __x86_64__
172static void 172static void
173ibrs_disable_hotpatch(void) 173ibrs_disable_hotpatch(void)
174{ 174{
175 extern uint8_t noibrs_enter, noibrs_enter_end; 175 extern uint8_t noibrs_enter, noibrs_enter_end;
176 extern uint8_t noibrs_leave, noibrs_leave_end; 176 extern uint8_t noibrs_leave, noibrs_leave_end;
177 u_long psl, cr0; 177 u_long psl, cr0;
178 uint8_t *bytes; 178 uint8_t *bytes;
179 size_t size; 179 size_t size;
180 180
181 x86_patch_window_open(&psl, &cr0); 181 x86_patch_window_open(&psl, &cr0);
182 182
183 bytes = &noibrs_enter; 183 bytes = &noibrs_enter;
184 size = (size_t)&noibrs_enter_end - (size_t)&noibrs_enter; 184 size = (size_t)&noibrs_enter_end - (size_t)&noibrs_enter;
185 x86_hotpatch(HP_NAME_IBRS_ENTER, bytes, size); 185 x86_hotpatch(HP_NAME_IBRS_ENTER, bytes, size);
186 186
187 bytes = &noibrs_leave; 187 bytes = &noibrs_leave;
188 size = (size_t)&noibrs_leave_end - (size_t)&noibrs_leave; 188 size = (size_t)&noibrs_leave_end - (size_t)&noibrs_leave;
189 x86_hotpatch(HP_NAME_IBRS_LEAVE, bytes, size); 189 x86_hotpatch(HP_NAME_IBRS_LEAVE, bytes, size);
190 190
191 x86_patch_window_close(psl, cr0); 191 x86_patch_window_close(psl, cr0);
192} 192}
193 193
194static void 194static void
195ibrs_enable_hotpatch(void) 195ibrs_enable_hotpatch(void)
196{ 196{
197 extern uint8_t ibrs_enter, ibrs_enter_end; 197 extern uint8_t ibrs_enter, ibrs_enter_end;
198 extern uint8_t ibrs_leave, ibrs_leave_end; 198 extern uint8_t ibrs_leave, ibrs_leave_end;
199 u_long psl, cr0; 199 u_long psl, cr0;
200 uint8_t *bytes; 200 uint8_t *bytes;
201 size_t size; 201 size_t size;
202 202
203 x86_patch_window_open(&psl, &cr0); 203 x86_patch_window_open(&psl, &cr0);
204 204
205 bytes = &ibrs_enter; 205 bytes = &ibrs_enter;
206 size = (size_t)&ibrs_enter_end - (size_t)&ibrs_enter; 206 size = (size_t)&ibrs_enter_end - (size_t)&ibrs_enter;
207 x86_hotpatch(HP_NAME_IBRS_ENTER, bytes, size); 207 x86_hotpatch(HP_NAME_IBRS_ENTER, bytes, size);
208 208
209 bytes = &ibrs_leave; 209 bytes = &ibrs_leave;
210 size = (size_t)&ibrs_leave_end - (size_t)&ibrs_leave; 210 size = (size_t)&ibrs_leave_end - (size_t)&ibrs_leave;
211 x86_hotpatch(HP_NAME_IBRS_LEAVE, bytes, size); 211 x86_hotpatch(HP_NAME_IBRS_LEAVE, bytes, size);
212 212
213 x86_patch_window_close(psl, cr0); 213 x86_patch_window_close(psl, cr0);
214} 214}
215#else 215#else
216/* IBRS not supported on i386 */ 216/* IBRS not supported on i386 */
217static void 217static void
218ibrs_disable_hotpatch(void) 218ibrs_disable_hotpatch(void)
219{ 219{
220 panic("%s: impossible", __func__); 220 panic("%s: impossible", __func__);
221} 221}
222static void 222static void
223ibrs_enable_hotpatch(void) 223ibrs_enable_hotpatch(void)
224{ 224{
225 panic("%s: impossible", __func__); 225 panic("%s: impossible", __func__);
226} 226}
227#endif 227#endif
228 228
229/* -------------------------------------------------------------------------- */ 229/* -------------------------------------------------------------------------- */
230 230
231static void 231static void
232mitigation_v2_apply_cpu(struct cpu_info *ci, bool enabled) 232mitigation_v2_apply_cpu(struct cpu_info *ci, bool enabled)
233{ 233{
234 uint64_t msr; 234 uint64_t msr;
235 235
236 switch (v2_mitigation_method) { 236 switch (v2_mitigation_method) {
237 case V2_MITIGATION_NONE: 237 case V2_MITIGATION_NONE:
238 panic("impossible"); 238 panic("impossible");
239 case V2_MITIGATION_INTEL_IBRS: 239 case V2_MITIGATION_INTEL_IBRS:
240 /* cpu0 is the one that does the hotpatch job */ 240 /* cpu0 is the one that does the hotpatch job */
241 if (ci == &cpu_info_primary) { 241 if (ci == &cpu_info_primary) {
242 if (enabled) { 242 if (enabled) {
243 ibrs_enable_hotpatch(); 243 ibrs_enable_hotpatch();
244 } else { 244 } else {
245 ibrs_disable_hotpatch(); 245 ibrs_disable_hotpatch();
246 } 246 }
247 } 247 }
248 if (!enabled) { 248 if (!enabled) {
249 wrmsr(MSR_IA32_SPEC_CTRL, 0); 249 wrmsr(MSR_IA32_SPEC_CTRL, 0);
250 } 250 }
251 break; 251 break;
252 case V2_MITIGATION_INTEL_ENHANCED_IBRS: 252 case V2_MITIGATION_INTEL_ENHANCED_IBRS:
253 msr = rdmsr(MSR_IA32_SPEC_CTRL); 253 msr = rdmsr(MSR_IA32_SPEC_CTRL);
254 if (enabled) { 254 if (enabled) {
255 msr |= IA32_SPEC_CTRL_IBRS; 255 msr |= IA32_SPEC_CTRL_IBRS;
256 } else { 256 } else {
257 msr &= ~IA32_SPEC_CTRL_IBRS; 257 msr &= ~IA32_SPEC_CTRL_IBRS;
258 } 258 }
259 wrmsr(MSR_IA32_SPEC_CTRL, msr); 259 wrmsr(MSR_IA32_SPEC_CTRL, msr);
260 break; 260 break;
261 case V2_MITIGATION_AMD_DIS_IND: 261 case V2_MITIGATION_AMD_DIS_IND:
262 msr = rdmsr(MSR_IC_CFG); 262 msr = rdmsr(MSR_IC_CFG);
263 if (enabled) { 263 if (enabled) {
264 msr |= IC_CFG_DIS_IND; 264 msr |= IC_CFG_DIS_IND;
265 } else { 265 } else {
266 msr &= ~IC_CFG_DIS_IND; 266 msr &= ~IC_CFG_DIS_IND;
267 } 267 }
268 wrmsr(MSR_IC_CFG, msr); 268 wrmsr(MSR_IC_CFG, msr);
269 break; 269 break;
270 } 270 }
271} 271}
272 272
273/* 273/*
274 * Note: IBRS requires hotpatching, so we need barriers. 274 * Note: IBRS requires hotpatching, so we need barriers.
275 */ 275 */
276static void 276static void
277mitigation_v2_change_cpu(void *arg1, void *arg2) 277mitigation_v2_change_cpu(void *arg1, void *arg2)
278{ 278{
279 struct cpu_info *ci = curcpu(); 279 struct cpu_info *ci = curcpu();
280 bool enabled = (bool)arg1; 280 bool enabled = (bool)arg1;
281 u_long psl = 0; 281 u_long psl = 0;
282 282
283 /* Rendez-vous 1 (IBRS only). */ 283 /* Rendez-vous 1 (IBRS only). */
284 if (v2_mitigation_method == V2_MITIGATION_INTEL_IBRS) { 284 if (v2_mitigation_method == V2_MITIGATION_INTEL_IBRS) {
285 psl = x86_read_psl(); 285 psl = x86_read_psl();
286 x86_disable_intr(); 286 x86_disable_intr();
287 287
288 atomic_dec_ulong(&ibrs_cpu_barrier1); 288 atomic_dec_ulong(&ibrs_cpu_barrier1);
289 while (atomic_cas_ulong(&ibrs_cpu_barrier1, 0, 0) != 0) { 289 while (atomic_cas_ulong(&ibrs_cpu_barrier1, 0, 0) != 0) {
290 x86_pause(); 290 x86_pause();
291 } 291 }
292 } 292 }
293 293
294 mitigation_v2_apply_cpu(ci, enabled); 294 mitigation_v2_apply_cpu(ci, enabled);
295 295
296 /* Rendez-vous 2 (IBRS only). */ 296 /* Rendez-vous 2 (IBRS only). */
297 if (v2_mitigation_method == V2_MITIGATION_INTEL_IBRS) { 297 if (v2_mitigation_method == V2_MITIGATION_INTEL_IBRS) {
298 atomic_dec_ulong(&ibrs_cpu_barrier2); 298 atomic_dec_ulong(&ibrs_cpu_barrier2);
299 while (atomic_cas_ulong(&ibrs_cpu_barrier2, 0, 0) != 0) { 299 while (atomic_cas_ulong(&ibrs_cpu_barrier2, 0, 0) != 0) {
300 x86_pause(); 300 x86_pause();
301 } 301 }
302 302
303 /* Write back and invalidate cache, flush pipelines. */ 303 /* Write back and invalidate cache, flush pipelines. */
304 wbinvd(); 304 wbinvd();
305 x86_flush(); 305 x86_flush();
306 306
307 x86_write_psl(psl); 307 x86_write_psl(psl);
308 } 308 }
309} 309}
310 310
311static int 311static int
312mitigation_v2_change(bool enabled) 312mitigation_v2_change(bool enabled)
313{ 313{
314 uint64_t xc; 314 uint64_t xc;
315 315
316 v2_detect_method(); 316 v2_detect_method();
317 317
318 switch (v2_mitigation_method) { 318 switch (v2_mitigation_method) {
319 case V2_MITIGATION_NONE: 319 case V2_MITIGATION_NONE:
320 printf("[!] No mitigation available\n"); 320 printf("[!] No mitigation available\n");
321 return EOPNOTSUPP; 321 return EOPNOTSUPP;
322 case V2_MITIGATION_AMD_DIS_IND: 322 case V2_MITIGATION_AMD_DIS_IND:
323 case V2_MITIGATION_INTEL_IBRS: 323 case V2_MITIGATION_INTEL_IBRS:
324 case V2_MITIGATION_INTEL_ENHANCED_IBRS: 324 case V2_MITIGATION_INTEL_ENHANCED_IBRS:
325 /* Initialize the barriers */ 325 /* Initialize the barriers */
326 ibrs_cpu_barrier1 = ncpu; 326 ibrs_cpu_barrier1 = ncpu;
327 ibrs_cpu_barrier2 = ncpu; 327 ibrs_cpu_barrier2 = ncpu;
328 328
329 printf("[+] %s SpectreV2 Mitigation...", 329 printf("[+] %s SpectreV2 Mitigation...",
330 enabled ? "Enabling" : "Disabling"); 330 enabled ? "Enabling" : "Disabling");
331 xc = xc_broadcast(XC_HIGHPRI, mitigation_v2_change_cpu, 331 xc = xc_broadcast(XC_HIGHPRI, mitigation_v2_change_cpu,
332 (void *)enabled, NULL); 332 (void *)enabled, NULL);
333 xc_wait(xc); 333 xc_wait(xc);
334 printf(" done!\n"); 334 printf(" done!\n");
335 v2_mitigation_enabled = enabled; 335 v2_mitigation_enabled = enabled;
336 v2_set_name(); 336 v2_set_name();
337 return 0; 337 return 0;
338 default: 338 default:
339 panic("impossible"); 339 panic("impossible");
340 } 340 }
341} 341}
342 342
343static int 343static int
344sysctl_machdep_spectreV2_mitigated(SYSCTLFN_ARGS) 344sysctl_machdep_spectreV2_mitigated(SYSCTLFN_ARGS)
345{ 345{
346 struct sysctlnode node; 346 struct sysctlnode node;
347 int error; 347 int error;
348 bool val; 348 bool val;
349 349
350 val = *(bool *)rnode->sysctl_data; 350 val = *(bool *)rnode->sysctl_data;
351 351
352 node = *rnode; 352 node = *rnode;
353 node.sysctl_data = &val; 353 node.sysctl_data = &val;
354 354
355 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 355 error = sysctl_lookup(SYSCTLFN_CALL(&node));
356 if (error != 0 || newp == NULL) 356 if (error != 0 || newp == NULL)
357 return error; 357 return error;
358 358
359 if (val == v2_mitigation_enabled) 359 if (val == v2_mitigation_enabled)
360 return 0; 360 return 0;
361 return mitigation_v2_change(val); 361 return mitigation_v2_change(val);
362} 362}
363 363
364/* -------------------------------------------------------------------------- */ 364/* -------------------------------------------------------------------------- */
365 365
366static void 366static void
367v4_set_name(void) 367v4_set_name(void)
368{ 368{
369 char name[64] = ""; 369 char name[64] = "";
370 370
371 if (!v4_mitigation_enabled) { 371 if (!v4_mitigation_enabled) {
372 strlcat(name, "(none)", sizeof(name)); 372 strlcat(name, "(none)", sizeof(name));
373 } else { 373 } else {
374 switch (v4_mitigation_method) { 374 switch (v4_mitigation_method) {
375 case V4_MITIGATION_NONE: 375 case V4_MITIGATION_NONE:
376 panic("%s: impossible", __func__); 376 panic("%s: impossible", __func__);
377 case V4_MITIGATION_INTEL_SSBD: 377 case V4_MITIGATION_INTEL_SSBD:
378 strlcat(name, "[Intel SSBD]", sizeof(name)); 378 strlcat(name, "[Intel SSBD]", sizeof(name));
379 break; 379 break;
380 case V4_MITIGATION_INTEL_SSB_NO: 380 case V4_MITIGATION_INTEL_SSB_NO:
381 strlcat(name, "[Intel SSB_NO]", sizeof(name)); 381 strlcat(name, "[Intel SSB_NO]", sizeof(name));
382 break; 382 break;
383 case V4_MITIGATION_AMD_SSB_NO: 383 case V4_MITIGATION_AMD_SSB_NO:
384 strlcat(name, "[AMD SSB_NO]", sizeof(name)); 384 strlcat(name, "[AMD SSB_NO]", sizeof(name));
385 break; 385 break;
386 case V4_MITIGATION_AMD_NONARCH_F15H: 386 case V4_MITIGATION_AMD_NONARCH_F15H:
387 case V4_MITIGATION_AMD_NONARCH_F16H: 387 case V4_MITIGATION_AMD_NONARCH_F16H:
388 case V4_MITIGATION_AMD_NONARCH_F17H: 388 case V4_MITIGATION_AMD_NONARCH_F17H:
389 strlcat(name, "[AMD NONARCH]", sizeof(name)); 389 strlcat(name, "[AMD NONARCH]", sizeof(name));
390 break; 390 break;
391 } 391 }
392 } 392 }
393 393
394 strlcpy(v4_mitigation_name, name, 394 strlcpy(v4_mitigation_name, name,
395 sizeof(v4_mitigation_name)); 395 sizeof(v4_mitigation_name));
396} 396}
397 397
398static void 398static void
399v4_detect_method(void) 399v4_detect_method(void)
400{ 400{
401 struct cpu_info *ci = curcpu(); 401 struct cpu_info *ci = curcpu();
402 u_int descs[4]; 402 u_int descs[4];
403 uint64_t msr; 403 uint64_t msr;
404 404
405 if (cpu_vendor == CPUVENDOR_INTEL) { 405 if (cpu_vendor == CPUVENDOR_INTEL) {
406 if (cpu_info_primary.ci_feat_val[7] & CPUID_SEF_ARCH_CAP) { 406 if (cpu_info_primary.ci_feat_val[7] & CPUID_SEF_ARCH_CAP) {
407 msr = rdmsr(MSR_IA32_ARCH_CAPABILITIES); 407 msr = rdmsr(MSR_IA32_ARCH_CAPABILITIES);
408 if (msr & IA32_ARCH_SSB_NO) { 408 if (msr & IA32_ARCH_SSB_NO) {
409 /* Not vulnerable to SpectreV4. */ 409 /* Not vulnerable to SpectreV4. */
410 v4_mitigation_method = V4_MITIGATION_INTEL_SSB_NO; 410 v4_mitigation_method = V4_MITIGATION_INTEL_SSB_NO;
411 return; 411 return;
412 } 412 }
413 } 413 }
414 if (cpuid_level >= 7) { 414 if (cpuid_level >= 7) {
415 x86_cpuid(7, descs); 415 x86_cpuid(7, descs);
416 if (descs[3] & CPUID_SEF_SSBD) { 416 if (descs[3] & CPUID_SEF_SSBD) {
417 /* descs[3] = %edx */ 417 /* descs[3] = %edx */
418 v4_mitigation_method = V4_MITIGATION_INTEL_SSBD; 418 v4_mitigation_method = V4_MITIGATION_INTEL_SSBD;
419 return; 419 return;
420 } 420 }
421 } 421 }
422 } else if (cpu_vendor == CPUVENDOR_AMD) { 422 } else if (cpu_vendor == CPUVENDOR_AMD) {
423 switch (CPUID_TO_FAMILY(ci->ci_signature)) { 423 switch (CPUID_TO_FAMILY(ci->ci_signature)) {
424 case 0x15: 424 case 0x15:
425 v4_mitigation_method = V4_MITIGATION_AMD_NONARCH_F15H; 425 v4_mitigation_method = V4_MITIGATION_AMD_NONARCH_F15H;
426 return; 426 return;
427 case 0x16: 427 case 0x16:
428 v4_mitigation_method = V4_MITIGATION_AMD_NONARCH_F16H; 428 v4_mitigation_method = V4_MITIGATION_AMD_NONARCH_F16H;
429 return; 429 return;
430 case 0x17: 430 case 0x17:
431 v4_mitigation_method = V4_MITIGATION_AMD_NONARCH_F17H; 431 v4_mitigation_method = V4_MITIGATION_AMD_NONARCH_F17H;
432 return; 432 return;
433 default: 433 default:
434 if (cpu_info_primary.ci_max_ext_cpuid < 0x80000008) { 434 if (cpu_info_primary.ci_max_ext_cpuid < 0x80000008) {
435 break; 435 break;
436 } 436 }
437 x86_cpuid(0x80000008, descs); 437 x86_cpuid(0x80000008, descs);
438 if (descs[1] & __BIT(26)) { 438 if (descs[1] & CPUID_CAPEX_SSB_NO) {
439 /* Not vulnerable to SpectreV4. */ 439 /* Not vulnerable to SpectreV4. */
440 v4_mitigation_method = V4_MITIGATION_AMD_SSB_NO; 440 v4_mitigation_method = V4_MITIGATION_AMD_SSB_NO;
441 return; 441 return;
442 } 442 }
443 443
444 break; 444 break;
445 } 445 }
446 } 446 }
447 447
448 v4_mitigation_method = V4_MITIGATION_NONE; 448 v4_mitigation_method = V4_MITIGATION_NONE;
449} 449}
450 450
451static void 451static void
452mitigation_v4_apply_cpu(bool enabled) 452mitigation_v4_apply_cpu(bool enabled)
453{ 453{
454 uint64_t msr, msrval = 0, msrbit = 0; 454 uint64_t msr, msrval = 0, msrbit = 0;
455 455
456 switch (v4_mitigation_method) { 456 switch (v4_mitigation_method) {
457 case V4_MITIGATION_NONE: 457 case V4_MITIGATION_NONE:
458 case V4_MITIGATION_INTEL_SSB_NO: 458 case V4_MITIGATION_INTEL_SSB_NO:
459 case V4_MITIGATION_AMD_SSB_NO: 459 case V4_MITIGATION_AMD_SSB_NO:
460 panic("impossible"); 460 panic("impossible");
461 case V4_MITIGATION_INTEL_SSBD: 461 case V4_MITIGATION_INTEL_SSBD:
462 msrval = MSR_IA32_SPEC_CTRL; 462 msrval = MSR_IA32_SPEC_CTRL;
463 msrbit = IA32_SPEC_CTRL_SSBD; 463 msrbit = IA32_SPEC_CTRL_SSBD;
464 break; 464 break;
465 case V4_MITIGATION_AMD_NONARCH_F15H: 465 case V4_MITIGATION_AMD_NONARCH_F15H:
466 msrval = MSR_LS_CFG; 466 msrval = MSR_LS_CFG;
467 msrbit = LS_CFG_DIS_SSB_F15H; 467 msrbit = LS_CFG_DIS_SSB_F15H;
468 break; 468 break;
469 case V4_MITIGATION_AMD_NONARCH_F16H: 469 case V4_MITIGATION_AMD_NONARCH_F16H:
470 msrval = MSR_LS_CFG; 470 msrval = MSR_LS_CFG;
471 msrbit = LS_CFG_DIS_SSB_F16H; 471 msrbit = LS_CFG_DIS_SSB_F16H;
472 break; 472 break;
473 case V4_MITIGATION_AMD_NONARCH_F17H: 473 case V4_MITIGATION_AMD_NONARCH_F17H:
474 msrval = MSR_LS_CFG; 474 msrval = MSR_LS_CFG;
475 msrbit = LS_CFG_DIS_SSB_F17H; 475 msrbit = LS_CFG_DIS_SSB_F17H;
476 break; 476 break;
477 } 477 }
478 478
479 msr = rdmsr(msrval); 479 msr = rdmsr(msrval);
480 if (enabled) { 480 if (enabled) {
481 msr |= msrbit; 481 msr |= msrbit;
482 } else { 482 } else {
483 msr &= ~msrbit; 483 msr &= ~msrbit;
484 } 484 }
485 wrmsr(msrval, msr); 485 wrmsr(msrval, msr);
486} 486}
487 487
488static void 488static void
489mitigation_v4_change_cpu(void *arg1, void *arg2) 489mitigation_v4_change_cpu(void *arg1, void *arg2)
490{ 490{
491 bool enabled = (bool)arg1; 491 bool enabled = (bool)arg1;
492 492
493 mitigation_v4_apply_cpu(enabled); 493 mitigation_v4_apply_cpu(enabled);
494} 494}
495 495
496static int 496static int
497mitigation_v4_change(bool enabled) 497mitigation_v4_change(bool enabled)
498{ 498{
499 uint64_t xc; 499 uint64_t xc;
500 500
501 v4_detect_method(); 501 v4_detect_method();
502 502
503 switch (v4_mitigation_method) { 503 switch (v4_mitigation_method) {
504 case V4_MITIGATION_NONE: 504 case V4_MITIGATION_NONE:
505 printf("[!] No mitigation available\n"); 505 printf("[!] No mitigation available\n");
506 return EOPNOTSUPP; 506 return EOPNOTSUPP;
507 case V4_MITIGATION_INTEL_SSBD: 507 case V4_MITIGATION_INTEL_SSBD:
508 case V4_MITIGATION_AMD_NONARCH_F15H: 508 case V4_MITIGATION_AMD_NONARCH_F15H:
509 case V4_MITIGATION_AMD_NONARCH_F16H: 509 case V4_MITIGATION_AMD_NONARCH_F16H:
510 case V4_MITIGATION_AMD_NONARCH_F17H: 510 case V4_MITIGATION_AMD_NONARCH_F17H:
511 printf("[+] %s SpectreV4 Mitigation...", 511 printf("[+] %s SpectreV4 Mitigation...",
512 enabled ? "Enabling" : "Disabling"); 512 enabled ? "Enabling" : "Disabling");
513 xc = xc_broadcast(0, mitigation_v4_change_cpu, 513 xc = xc_broadcast(0, mitigation_v4_change_cpu,
514 (void *)enabled, NULL); 514 (void *)enabled, NULL);
515 xc_wait(xc); 515 xc_wait(xc);
516 printf(" done!\n"); 516 printf(" done!\n");
517 v4_mitigation_enabled = enabled; 517 v4_mitigation_enabled = enabled;
518 v4_set_name(); 518 v4_set_name();
519 return 0; 519 return 0;
520 case V4_MITIGATION_INTEL_SSB_NO: 520 case V4_MITIGATION_INTEL_SSB_NO:
521 case V4_MITIGATION_AMD_SSB_NO: 521 case V4_MITIGATION_AMD_SSB_NO:
522 printf("[+] The CPU is not affected by SpectreV4\n"); 522 printf("[+] The CPU is not affected by SpectreV4\n");
523 return 0; 523 return 0;
524 default: 524 default:
525 panic("impossible"); 525 panic("impossible");
526 } 526 }
527} 527}
528 528
529static int 529static int
530sysctl_machdep_spectreV4_mitigated(SYSCTLFN_ARGS) 530sysctl_machdep_spectreV4_mitigated(SYSCTLFN_ARGS)
531{ 531{
532 struct sysctlnode node; 532 struct sysctlnode node;
533 int error; 533 int error;
534 bool val; 534 bool val;
535 535
536 val = *(bool *)rnode->sysctl_data; 536 val = *(bool *)rnode->sysctl_data;
537 537
538 node = *rnode; 538 node = *rnode;
539 node.sysctl_data = &val; 539 node.sysctl_data = &val;
540 540
541 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 541 error = sysctl_lookup(SYSCTLFN_CALL(&node));
542 if (error != 0 || newp == NULL) 542 if (error != 0 || newp == NULL)
543 return error; 543 return error;
544 544
545 if (val == v4_mitigation_enabled) 545 if (val == v4_mitigation_enabled)
546 return 0; 546 return 0;
547 return mitigation_v4_change(val); 547 return mitigation_v4_change(val);
548} 548}
549 549
550/* -------------------------------------------------------------------------- */ 550/* -------------------------------------------------------------------------- */
551 551
552enum mds_mitigation { 552enum mds_mitigation {
553 MDS_MITIGATION_NONE, 553 MDS_MITIGATION_NONE,
554 MDS_MITIGATION_VERW, 554 MDS_MITIGATION_VERW,
555 MDS_MITIGATION_MDS_NO 555 MDS_MITIGATION_MDS_NO
556}; 556};
557 557
558static char mds_mitigation_name[64] = "(none)"; 558static char mds_mitigation_name[64] = "(none)";
559 559
560static enum mds_mitigation mds_mitigation_method = MDS_MITIGATION_NONE; 560static enum mds_mitigation mds_mitigation_method = MDS_MITIGATION_NONE;
561static bool mds_mitigation_enabled __read_mostly = false; 561static bool mds_mitigation_enabled __read_mostly = false;
562 562
563static volatile unsigned long mds_cpu_barrier1 __cacheline_aligned; 563static volatile unsigned long mds_cpu_barrier1 __cacheline_aligned;
564static volatile unsigned long mds_cpu_barrier2 __cacheline_aligned; 564static volatile unsigned long mds_cpu_barrier2 __cacheline_aligned;
565 565
566#ifdef __x86_64__ 566#ifdef __x86_64__
567static void 567static void
568mds_disable_hotpatch(void) 568mds_disable_hotpatch(void)
569{ 569{
570 extern uint8_t nomds_leave, nomds_leave_end; 570 extern uint8_t nomds_leave, nomds_leave_end;
571 u_long psl, cr0; 571 u_long psl, cr0;
572 uint8_t *bytes; 572 uint8_t *bytes;
573 size_t size; 573 size_t size;
574 574
575 x86_patch_window_open(&psl, &cr0); 575 x86_patch_window_open(&psl, &cr0);
576 576
577 bytes = &nomds_leave; 577 bytes = &nomds_leave;
578 size = (size_t)&nomds_leave_end - (size_t)&nomds_leave; 578 size = (size_t)&nomds_leave_end - (size_t)&nomds_leave;
579 x86_hotpatch(HP_NAME_MDS_LEAVE, bytes, size); 579 x86_hotpatch(HP_NAME_MDS_LEAVE, bytes, size);
580 580
581 x86_patch_window_close(psl, cr0); 581 x86_patch_window_close(psl, cr0);
582} 582}
583 583
584static void 584static void
585mds_enable_hotpatch(void) 585mds_enable_hotpatch(void)
586{ 586{
587 extern uint8_t mds_leave, mds_leave_end; 587 extern uint8_t mds_leave, mds_leave_end;
588 u_long psl, cr0; 588 u_long psl, cr0;
589 uint8_t *bytes; 589 uint8_t *bytes;
590 size_t size; 590 size_t size;
591 591
592 x86_patch_window_open(&psl, &cr0); 592 x86_patch_window_open(&psl, &cr0);
593 593
594 bytes = &mds_leave; 594 bytes = &mds_leave;
595 size = (size_t)&mds_leave_end - (size_t)&mds_leave; 595 size = (size_t)&mds_leave_end - (size_t)&mds_leave;
596 x86_hotpatch(HP_NAME_MDS_LEAVE, bytes, size); 596 x86_hotpatch(HP_NAME_MDS_LEAVE, bytes, size);
597 597
598 x86_patch_window_close(psl, cr0); 598 x86_patch_window_close(psl, cr0);
599} 599}
600#else 600#else
601/* MDS not supported on i386 */ 601/* MDS not supported on i386 */
602static void 602static void
603mds_disable_hotpatch(void) 603mds_disable_hotpatch(void)
604{ 604{
605 panic("%s: impossible", __func__); 605 panic("%s: impossible", __func__);
606} 606}
607static void 607static void
608mds_enable_hotpatch(void) 608mds_enable_hotpatch(void)
609{ 609{
610 panic("%s: impossible", __func__); 610 panic("%s: impossible", __func__);
611} 611}
612#endif 612#endif
613 613
614static void 614static void
615mitigation_mds_apply_cpu(struct cpu_info *ci, bool enabled) 615mitigation_mds_apply_cpu(struct cpu_info *ci, bool enabled)
616{ 616{
617 switch (mds_mitigation_method) { 617 switch (mds_mitigation_method) {
618 case MDS_MITIGATION_NONE: 618 case MDS_MITIGATION_NONE:
619 case MDS_MITIGATION_MDS_NO: 619 case MDS_MITIGATION_MDS_NO:
620 panic("impossible"); 620 panic("impossible");
621 case MDS_MITIGATION_VERW: 621 case MDS_MITIGATION_VERW:
622 /* cpu0 is the one that does the hotpatch job */ 622 /* cpu0 is the one that does the hotpatch job */
623 if (ci == &cpu_info_primary) { 623 if (ci == &cpu_info_primary) {
624 if (enabled) { 624 if (enabled) {
625 mds_enable_hotpatch(); 625 mds_enable_hotpatch();
626 } else { 626 } else {
627 mds_disable_hotpatch(); 627 mds_disable_hotpatch();
628 } 628 }
629 } 629 }
630 break; 630 break;
631 } 631 }
632} 632}
633 633
634static void 634static void
635mitigation_mds_change_cpu(void *arg1, void *arg2) 635mitigation_mds_change_cpu(void *arg1, void *arg2)
636{ 636{
637 struct cpu_info *ci = curcpu(); 637 struct cpu_info *ci = curcpu();
638 bool enabled = (bool)arg1; 638 bool enabled = (bool)arg1;
639 u_long psl = 0; 639 u_long psl = 0;
640 640
641 /* Rendez-vous 1. */ 641 /* Rendez-vous 1. */
642 psl = x86_read_psl(); 642 psl = x86_read_psl();
643 x86_disable_intr(); 643 x86_disable_intr();
644 644
645 atomic_dec_ulong(&mds_cpu_barrier1); 645 atomic_dec_ulong(&mds_cpu_barrier1);
646 while (atomic_cas_ulong(&mds_cpu_barrier1, 0, 0) != 0) { 646 while (atomic_cas_ulong(&mds_cpu_barrier1, 0, 0) != 0) {
647 x86_pause(); 647 x86_pause();
648 } 648 }
649 649
650 mitigation_mds_apply_cpu(ci, enabled); 650 mitigation_mds_apply_cpu(ci, enabled);
651 651
652 /* Rendez-vous 2. */ 652 /* Rendez-vous 2. */
653 atomic_dec_ulong(&mds_cpu_barrier2); 653 atomic_dec_ulong(&mds_cpu_barrier2);
654 while (atomic_cas_ulong(&mds_cpu_barrier2, 0, 0) != 0) { 654 while (atomic_cas_ulong(&mds_cpu_barrier2, 0, 0) != 0) {
655 x86_pause(); 655 x86_pause();
656 } 656 }
657 657
658 /* Write back and invalidate cache, flush pipelines. */ 658 /* Write back and invalidate cache, flush pipelines. */
659 wbinvd(); 659 wbinvd();
660 x86_flush(); 660 x86_flush();
661 661
662 x86_write_psl(psl); 662 x86_write_psl(psl);
663} 663}
664 664
665static void 665static void
666mds_detect_method(void) 666mds_detect_method(void)
667{ 667{
668 u_int descs[4]; 668 u_int descs[4];
669 uint64_t msr; 669 uint64_t msr;
670 670
671 if (cpu_vendor != CPUVENDOR_INTEL) { 671 if (cpu_vendor != CPUVENDOR_INTEL) {
672 mds_mitigation_method = MDS_MITIGATION_MDS_NO; 672 mds_mitigation_method = MDS_MITIGATION_MDS_NO;
673 return; 673 return;
674 } 674 }
675 675
676 if (cpuid_level < 7) { 676 if (cpuid_level < 7) {
677 return; 677 return;
678 } 678 }
679 679
680 x86_cpuid(0x7, descs); 680 x86_cpuid(0x7, descs);
681 if (descs[3] & CPUID_SEF_ARCH_CAP) { 681 if (descs[3] & CPUID_SEF_ARCH_CAP) {
682 msr = rdmsr(MSR_IA32_ARCH_CAPABILITIES); 682 msr = rdmsr(MSR_IA32_ARCH_CAPABILITIES);
683 if (msr & IA32_ARCH_MDS_NO) { 683 if (msr & IA32_ARCH_MDS_NO) {
684 mds_mitigation_method = MDS_MITIGATION_MDS_NO; 684 mds_mitigation_method = MDS_MITIGATION_MDS_NO;
685 return; 685 return;
686 } 686 }
687 } 687 }
688 688
689#ifdef __x86_64__ 689#ifdef __x86_64__
690 if (descs[3] & CPUID_SEF_MD_CLEAR) { 690 if (descs[3] & CPUID_SEF_MD_CLEAR) {
691 mds_mitigation_method = MDS_MITIGATION_VERW; 691 mds_mitigation_method = MDS_MITIGATION_VERW;
692 } 692 }
693#endif 693#endif
694} 694}
695 695
696static void 696static void
697mds_set_name(void) 697mds_set_name(void)
698{ 698{
699 char name[64] = ""; 699 char name[64] = "";
700 700
701 if (!mds_mitigation_enabled) { 701 if (!mds_mitigation_enabled) {
702 strlcat(name, "(none)", sizeof(name)); 702 strlcat(name, "(none)", sizeof(name));
703 } else { 703 } else {
704 switch (mds_mitigation_method) { 704 switch (mds_mitigation_method) {
705 case MDS_MITIGATION_NONE: 705 case MDS_MITIGATION_NONE:
706 panic("%s: impossible", __func__); 706 panic("%s: impossible", __func__);
707 case MDS_MITIGATION_MDS_NO: 707 case MDS_MITIGATION_MDS_NO:
708 strlcat(name, "[MDS_NO]", sizeof(name)); 708 strlcat(name, "[MDS_NO]", sizeof(name));
709 break; 709 break;
710 case MDS_MITIGATION_VERW: 710 case MDS_MITIGATION_VERW:
711 strlcat(name, "[VERW]", sizeof(name)); 711 strlcat(name, "[VERW]", sizeof(name));
712 break; 712 break;
713 } 713 }
714 } 714 }
715 715
716 strlcpy(mds_mitigation_name, name, 716 strlcpy(mds_mitigation_name, name,
717 sizeof(mds_mitigation_name)); 717 sizeof(mds_mitigation_name));
718} 718}
719 719
720static int 720static int
721mitigation_mds_change(bool enabled) 721mitigation_mds_change(bool enabled)
722{ 722{
723 uint64_t xc; 723 uint64_t xc;
724 724
725 mds_detect_method(); 725 mds_detect_method();
726 726
727 switch (mds_mitigation_method) { 727 switch (mds_mitigation_method) {
728 case MDS_MITIGATION_NONE: 728 case MDS_MITIGATION_NONE:
729 printf("[!] No mitigation available\n"); 729 printf("[!] No mitigation available\n");
730 return EOPNOTSUPP; 730 return EOPNOTSUPP;
731 case MDS_MITIGATION_VERW: 731 case MDS_MITIGATION_VERW:
732 /* Initialize the barriers */ 732 /* Initialize the barriers */
733 mds_cpu_barrier1 = ncpu; 733 mds_cpu_barrier1 = ncpu;
734 mds_cpu_barrier2 = ncpu; 734 mds_cpu_barrier2 = ncpu;
735 735
736 printf("[+] %s MDS Mitigation...", 736 printf("[+] %s MDS Mitigation...",
737 enabled ? "Enabling" : "Disabling"); 737 enabled ? "Enabling" : "Disabling");
738 xc = xc_broadcast(XC_HIGHPRI, mitigation_mds_change_cpu, 738 xc = xc_broadcast(XC_HIGHPRI, mitigation_mds_change_cpu,
739 (void *)enabled, NULL); 739 (void *)enabled, NULL);
740 xc_wait(xc); 740 xc_wait(xc);
741 printf(" done!\n"); 741 printf(" done!\n");
742 mds_mitigation_enabled = enabled; 742 mds_mitigation_enabled = enabled;
743 mds_set_name(); 743 mds_set_name();
744 return 0; 744 return 0;
745 case MDS_MITIGATION_MDS_NO: 745 case MDS_MITIGATION_MDS_NO:
746 printf("[+] The CPU is not affected by MDS\n"); 746 printf("[+] The CPU is not affected by MDS\n");
747 return 0; 747 return 0;
748 default: 748 default:
749 panic("impossible"); 749 panic("impossible");
750 } 750 }
751} 751}
752 752
753static int 753static int
754sysctl_machdep_mds_mitigated(SYSCTLFN_ARGS) 754sysctl_machdep_mds_mitigated(SYSCTLFN_ARGS)
755{ 755{
756 struct sysctlnode node; 756 struct sysctlnode node;
757 int error; 757 int error;
758 bool val; 758 bool val;
759 759
760 val = *(bool *)rnode->sysctl_data; 760 val = *(bool *)rnode->sysctl_data;
761 761
762 node = *rnode; 762 node = *rnode;
763 node.sysctl_data = &val; 763 node.sysctl_data = &val;
764 764
765 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 765 error = sysctl_lookup(SYSCTLFN_CALL(&node));
766 if (error != 0 || newp == NULL) 766 if (error != 0 || newp == NULL)
767 return error; 767 return error;
768 768
769 if (val == mds_mitigation_enabled) 769 if (val == mds_mitigation_enabled)
770 return 0; 770 return 0;
771 return mitigation_mds_change(val); 771 return mitigation_mds_change(val);
772} 772}
773 773
774/* -------------------------------------------------------------------------- */ 774/* -------------------------------------------------------------------------- */
775 775
776void speculation_barrier(struct lwp *, struct lwp *); 776void speculation_barrier(struct lwp *, struct lwp *);
777 777
778void 778void
779speculation_barrier(struct lwp *oldlwp, struct lwp *newlwp) 779speculation_barrier(struct lwp *oldlwp, struct lwp *newlwp)
780{ 780{
781 /* 781 /*
782 * Speculation barriers are applicable only to Spectre V2. 782 * Speculation barriers are applicable only to Spectre V2.
783 */ 783 */
784 if (!v2_mitigation_enabled) 784 if (!v2_mitigation_enabled)
785 return; 785 return;
786 786
787 /* 787 /*
788 * From kernel thread to kernel thread, no need for a barrier. 788 * From kernel thread to kernel thread, no need for a barrier.
789 */ 789 */
790 if ((oldlwp != NULL && (oldlwp->l_flag & LW_SYSTEM)) && 790 if ((oldlwp != NULL && (oldlwp->l_flag & LW_SYSTEM)) &&
791 (newlwp->l_flag & LW_SYSTEM)) 791 (newlwp->l_flag & LW_SYSTEM))
792 return; 792 return;
793 793
794 switch (v2_mitigation_method) { 794 switch (v2_mitigation_method) {
795 case V2_MITIGATION_INTEL_IBRS: 795 case V2_MITIGATION_INTEL_IBRS:
796 wrmsr(MSR_IA32_PRED_CMD, IA32_PRED_CMD_IBPB); 796 wrmsr(MSR_IA32_PRED_CMD, IA32_PRED_CMD_IBPB);
797 break; 797 break;
798 default: 798 default:
799 /* nothing */ 799 /* nothing */
800 break; 800 break;
801 } 801 }
802} 802}
803 803
804void 804void
805cpu_speculation_init(struct cpu_info *ci) 805cpu_speculation_init(struct cpu_info *ci)
806{ 806{
807 /* 807 /*
808 * Spectre V2. 808 * Spectre V2.
809 * 809 *
810 * cpu0 is the one that detects the method and sets the global 810 * cpu0 is the one that detects the method and sets the global
811 * variable. 811 * variable.
812 */ 812 */
813 if (ci == &cpu_info_primary) { 813 if (ci == &cpu_info_primary) {
814 v2_detect_method(); 814 v2_detect_method();
815 v2_mitigation_enabled = 815 v2_mitigation_enabled =
816 (v2_mitigation_method != V2_MITIGATION_NONE); 816 (v2_mitigation_method != V2_MITIGATION_NONE);
817 v2_set_name(); 817 v2_set_name();
818 } 818 }
819 if (v2_mitigation_method != V2_MITIGATION_NONE) { 819 if (v2_mitigation_method != V2_MITIGATION_NONE) {
820 mitigation_v2_apply_cpu(ci, true); 820 mitigation_v2_apply_cpu(ci, true);
821 } 821 }
822 822
823 /* 823 /*
824 * Spectre V4. 824 * Spectre V4.
825 * 825 *
826 * cpu0 is the one that detects the method and sets the global 826 * cpu0 is the one that detects the method and sets the global
827 * variable. 827 * variable.
828 * 828 *
829 * Disabled by default, as recommended by AMD, but can be enabled 829 * Disabled by default, as recommended by AMD, but can be enabled
830 * dynamically. We only detect if the CPU is not vulnerable, to 830 * dynamically. We only detect if the CPU is not vulnerable, to
831 * mark it as 'mitigated' in the sysctl. 831 * mark it as 'mitigated' in the sysctl.
832 */ 832 */
833#if 0 833#if 0
834 if (ci == &cpu_info_primary) { 834 if (ci == &cpu_info_primary) {
835 v4_detect_method(); 835 v4_detect_method();
836 v4_mitigation_enabled = 836 v4_mitigation_enabled =
837 (v4_mitigation_method != V4_MITIGATION_NONE); 837 (v4_mitigation_method != V4_MITIGATION_NONE);
838 v4_set_name(); 838 v4_set_name();
839 } 839 }
840 if (v4_mitigation_method != V4_MITIGATION_NONE && 840 if (v4_mitigation_method != V4_MITIGATION_NONE &&
841 v4_mitigation_method != V4_MITIGATION_INTEL_SSB_NO && 841 v4_mitigation_method != V4_MITIGATION_INTEL_SSB_NO &&
842 v4_mitigation_method != V4_MITIGATION_AMD_SSB_NO) { 842 v4_mitigation_method != V4_MITIGATION_AMD_SSB_NO) {
843 mitigation_v4_apply_cpu(ci, true); 843 mitigation_v4_apply_cpu(ci, true);
844 } 844 }
845#else 845#else
846 if (ci == &cpu_info_primary) { 846 if (ci == &cpu_info_primary) {
847 v4_detect_method(); 847 v4_detect_method();
848 if (v4_mitigation_method == V4_MITIGATION_INTEL_SSB_NO || 848 if (v4_mitigation_method == V4_MITIGATION_INTEL_SSB_NO ||
849 v4_mitigation_method == V4_MITIGATION_AMD_SSB_NO) { 849 v4_mitigation_method == V4_MITIGATION_AMD_SSB_NO) {
850 v4_mitigation_enabled = true; 850 v4_mitigation_enabled = true;
851 v4_set_name(); 851 v4_set_name();
852 } 852 }
853 } 853 }
854#endif 854#endif
855 855
856 /* 856 /*
857 * Microarchitectural Data Sampling. 857 * Microarchitectural Data Sampling.
858 * 858 *
859 * cpu0 is the one that detects the method and sets the global 859 * cpu0 is the one that detects the method and sets the global
860 * variable. 860 * variable.
861 */ 861 */
862 if (ci == &cpu_info_primary) { 862 if (ci == &cpu_info_primary) {
863 mds_detect_method(); 863 mds_detect_method();
864 mds_mitigation_enabled = 864 mds_mitigation_enabled =
865 (mds_mitigation_method != MDS_MITIGATION_NONE); 865 (mds_mitigation_method != MDS_MITIGATION_NONE);
866 mds_set_name(); 866 mds_set_name();
867 } 867 }
868 if (mds_mitigation_method != MDS_MITIGATION_NONE && 868 if (mds_mitigation_method != MDS_MITIGATION_NONE &&
869 mds_mitigation_method != MDS_MITIGATION_MDS_NO) { 869 mds_mitigation_method != MDS_MITIGATION_MDS_NO) {
870 mitigation_mds_apply_cpu(ci, true); 870 mitigation_mds_apply_cpu(ci, true);
871 } 871 }
872} 872}
873 873
874void sysctl_speculation_init(struct sysctllog **); 874void sysctl_speculation_init(struct sysctllog **);
875 875
876void 876void
877sysctl_speculation_init(struct sysctllog **clog) 877sysctl_speculation_init(struct sysctllog **clog)
878{ 878{
879 const struct sysctlnode *spec_rnode; 879 const struct sysctlnode *spec_rnode;
880 880
881 /* SpectreV1 */ 881 /* SpectreV1 */
882 spec_rnode = NULL; 882 spec_rnode = NULL;
883 sysctl_createv(clog, 0, NULL, &spec_rnode, 883 sysctl_createv(clog, 0, NULL, &spec_rnode,
884 CTLFLAG_PERMANENT, 884 CTLFLAG_PERMANENT,
885 CTLTYPE_NODE, "spectre_v1", NULL, 885 CTLTYPE_NODE, "spectre_v1", NULL,
886 NULL, 0, NULL, 0, 886 NULL, 0, NULL, 0,
887 CTL_MACHDEP, CTL_CREATE); 887 CTL_MACHDEP, CTL_CREATE);
888 sysctl_createv(clog, 0, &spec_rnode, &spec_rnode, 888 sysctl_createv(clog, 0, &spec_rnode, &spec_rnode,
889 CTLFLAG_PERMANENT | CTLFLAG_IMMEDIATE, 889 CTLFLAG_PERMANENT | CTLFLAG_IMMEDIATE,
890 CTLTYPE_BOOL, "mitigated", 890 CTLTYPE_BOOL, "mitigated",
891 SYSCTL_DESCR("Whether Spectre Variant 1 is mitigated"), 891 SYSCTL_DESCR("Whether Spectre Variant 1 is mitigated"),
892 NULL, 0 /* mitigated=0 */, NULL, 0, 892 NULL, 0 /* mitigated=0 */, NULL, 0,
893 CTL_CREATE, CTL_EOL); 893 CTL_CREATE, CTL_EOL);
894 894
895 /* SpectreV2 */ 895 /* SpectreV2 */
896 spec_rnode = NULL; 896 spec_rnode = NULL;
897 sysctl_createv(clog, 0, NULL, &spec_rnode, 897 sysctl_createv(clog, 0, NULL, &spec_rnode,
898 CTLFLAG_PERMANENT, 898 CTLFLAG_PERMANENT,
899 CTLTYPE_NODE, "spectre_v2", NULL, 899 CTLTYPE_NODE, "spectre_v2", NULL,
900 NULL, 0, NULL, 0, 900 NULL, 0, NULL, 0,
901 CTL_MACHDEP, CTL_CREATE); 901 CTL_MACHDEP, CTL_CREATE);
902 sysctl_createv(clog, 0, &spec_rnode, NULL, 902 sysctl_createv(clog, 0, &spec_rnode, NULL,
903 CTLFLAG_READWRITE, 903 CTLFLAG_READWRITE,
904 CTLTYPE_BOOL, "hwmitigated", 904 CTLTYPE_BOOL, "hwmitigated",
905 SYSCTL_DESCR("Whether Spectre Variant 2 is HW-mitigated"), 905 SYSCTL_DESCR("Whether Spectre Variant 2 is HW-mitigated"),
906 sysctl_machdep_spectreV2_mitigated, 0, 906 sysctl_machdep_spectreV2_mitigated, 0,
907 &v2_mitigation_enabled, 0, 907 &v2_mitigation_enabled, 0,
908 CTL_CREATE, CTL_EOL); 908 CTL_CREATE, CTL_EOL);
909 sysctl_createv(clog, 0, &spec_rnode, NULL, 909 sysctl_createv(clog, 0, &spec_rnode, NULL,
910 CTLFLAG_PERMANENT | CTLFLAG_IMMEDIATE, 910 CTLFLAG_PERMANENT | CTLFLAG_IMMEDIATE,
911 CTLTYPE_BOOL, "swmitigated", 911 CTLTYPE_BOOL, "swmitigated",
912 SYSCTL_DESCR("Whether Spectre Variant 2 is SW-mitigated"), 912 SYSCTL_DESCR("Whether Spectre Variant 2 is SW-mitigated"),
913#if defined(SPECTRE_V2_GCC_MITIGATION) 913#if defined(SPECTRE_V2_GCC_MITIGATION)
914 NULL, 1, 914 NULL, 1,
915#else 915#else
916 NULL, 0, 916 NULL, 0,
917#endif 917#endif
918 NULL, 0, 918 NULL, 0,
919 CTL_CREATE, CTL_EOL); 919 CTL_CREATE, CTL_EOL);
920 sysctl_createv(clog, 0, &spec_rnode, NULL, 920 sysctl_createv(clog, 0, &spec_rnode, NULL,
921 CTLFLAG_PERMANENT, 921 CTLFLAG_PERMANENT,
922 CTLTYPE_STRING, "method", 922 CTLTYPE_STRING, "method",
923 SYSCTL_DESCR("Mitigation method in use"), 923 SYSCTL_DESCR("Mitigation method in use"),
924 NULL, 0, 924 NULL, 0,
925 v2_mitigation_name, 0, 925 v2_mitigation_name, 0,
926 CTL_CREATE, CTL_EOL); 926 CTL_CREATE, CTL_EOL);
927 927
928 /* SpectreV4 */ 928 /* SpectreV4 */
929 spec_rnode = NULL; 929 spec_rnode = NULL;
930 sysctl_createv(clog, 0, NULL, &spec_rnode, 930 sysctl_createv(clog, 0, NULL, &spec_rnode,
931 CTLFLAG_PERMANENT, 931 CTLFLAG_PERMANENT,
932 CTLTYPE_NODE, "spectre_v4", NULL, 932 CTLTYPE_NODE, "spectre_v4", NULL,
933 NULL, 0, NULL, 0, 933 NULL, 0, NULL, 0,
934 CTL_MACHDEP, CTL_CREATE); 934 CTL_MACHDEP, CTL_CREATE);
935 sysctl_createv(clog, 0, &spec_rnode, NULL, 935 sysctl_createv(clog, 0, &spec_rnode, NULL,
936 CTLFLAG_READWRITE, 936 CTLFLAG_READWRITE,
937 CTLTYPE_BOOL, "mitigated", 937 CTLTYPE_BOOL, "mitigated",
938 SYSCTL_DESCR("Whether Spectre Variant 4 is mitigated"), 938 SYSCTL_DESCR("Whether Spectre Variant 4 is mitigated"),
939 sysctl_machdep_spectreV4_mitigated, 0, 939 sysctl_machdep_spectreV4_mitigated, 0,
940 &v4_mitigation_enabled, 0, 940 &v4_mitigation_enabled, 0,
941 CTL_CREATE, CTL_EOL); 941 CTL_CREATE, CTL_EOL);
942 sysctl_createv(clog, 0, &spec_rnode, NULL, 942 sysctl_createv(clog, 0, &spec_rnode, NULL,
943 CTLFLAG_PERMANENT, 943 CTLFLAG_PERMANENT,
944 CTLTYPE_STRING, "method", 944 CTLTYPE_STRING, "method",
945 SYSCTL_DESCR("Mitigation method in use"), 945 SYSCTL_DESCR("Mitigation method in use"),
946 NULL, 0, 946 NULL, 0,
947 v4_mitigation_name, 0, 947 v4_mitigation_name, 0,
948 CTL_CREATE, CTL_EOL); 948 CTL_CREATE, CTL_EOL);
949 949
950 /* Microarchitectural Data Sampling */ 950 /* Microarchitectural Data Sampling */
951 spec_rnode = NULL; 951 spec_rnode = NULL;
952 sysctl_createv(clog, 0, NULL, &spec_rnode, 952 sysctl_createv(clog, 0, NULL, &spec_rnode,
953 CTLFLAG_PERMANENT, 953 CTLFLAG_PERMANENT,
954 CTLTYPE_NODE, "mds", NULL, 954 CTLTYPE_NODE, "mds", NULL,
955 NULL, 0, NULL, 0, 955 NULL, 0, NULL, 0,
956 CTL_MACHDEP, CTL_CREATE); 956 CTL_MACHDEP, CTL_CREATE);
957 sysctl_createv(clog, 0, &spec_rnode, NULL, 957 sysctl_createv(clog, 0, &spec_rnode, NULL,
958 CTLFLAG_READWRITE, 958 CTLFLAG_READWRITE,
959 CTLTYPE_BOOL, "mitigated", 959 CTLTYPE_BOOL, "mitigated",
960 SYSCTL_DESCR("Whether MDS is mitigated"), 960 SYSCTL_DESCR("Whether MDS is mitigated"),
961 sysctl_machdep_mds_mitigated, 0, 961 sysctl_machdep_mds_mitigated, 0,
962 &mds_mitigation_enabled, 0, 962 &mds_mitigation_enabled, 0,
963 CTL_CREATE, CTL_EOL); 963 CTL_CREATE, CTL_EOL);
964 sysctl_createv(clog, 0, &spec_rnode, NULL, 964 sysctl_createv(clog, 0, &spec_rnode, NULL,
965 CTLFLAG_PERMANENT, 965 CTLFLAG_PERMANENT,
966 CTLTYPE_STRING, "method", 966 CTLTYPE_STRING, "method",
967 SYSCTL_DESCR("Mitigation method in use"), 967 SYSCTL_DESCR("Mitigation method in use"),
968 NULL, 0, 968 NULL, 0,
969 mds_mitigation_name, 0, 969 mds_mitigation_name, 0,
970 CTL_CREATE, CTL_EOL); 970 CTL_CREATE, CTL_EOL);
971} 971}