Sat Dec 22 08:59:45 2018 UTC ()
If the CPU is not vulnerable to SpectreV4, say it in the sysctl by default.
Apply some minor style while here.


(maxv)
diff -r1.19 -r1.20 src/sys/arch/x86/x86/spectre.c

cvs diff -r1.19 -r1.20 src/sys/arch/x86/x86/spectre.c (switch to unified diff)

--- src/sys/arch/x86/x86/spectre.c 2018/05/28 20:18:58 1.19
+++ src/sys/arch/x86/x86/spectre.c 2018/12/22 08:59:44 1.20
@@ -1,701 +1,715 @@ @@ -1,701 +1,715 @@
1/* $NetBSD: spectre.c,v 1.19 2018/05/28 20:18:58 maxv Exp $ */ 1/* $NetBSD: spectre.c,v 1.20 2018/12/22 08:59:44 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2018 NetBSD Foundation, Inc. 4 * Copyright (c) 2018 NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Maxime Villard. 8 * by Maxime Villard.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Mitigations for the SpectreV2 and SpectreV4 CPU flaws. 33 * Mitigations for the SpectreV2 and SpectreV4 CPU flaws.
34 */ 34 */
35 35
36#include <sys/cdefs.h> 36#include <sys/cdefs.h>
37__KERNEL_RCSID(0, "$NetBSD: spectre.c,v 1.19 2018/05/28 20:18:58 maxv Exp $"); 37__KERNEL_RCSID(0, "$NetBSD: spectre.c,v 1.20 2018/12/22 08:59:44 maxv Exp $");
38 38
39#include "opt_spectre.h" 39#include "opt_spectre.h"
40 40
41#include <sys/param.h> 41#include <sys/param.h>
42#include <sys/systm.h> 42#include <sys/systm.h>
43#include <sys/cpu.h> 43#include <sys/cpu.h>
44#include <sys/sysctl.h> 44#include <sys/sysctl.h>
45#include <sys/xcall.h> 45#include <sys/xcall.h>
46 46
47#include <machine/cpufunc.h> 47#include <machine/cpufunc.h>
48#include <machine/cpuvar.h> 48#include <machine/cpuvar.h>
49#include <machine/specialreg.h> 49#include <machine/specialreg.h>
50#include <machine/frameasm.h> 50#include <machine/frameasm.h>
51 51
52#include <x86/cputypes.h> 52#include <x86/cputypes.h>
53 53
54enum v2_mitigation { 54enum v2_mitigation {
55 V2_MITIGATION_NONE, 55 V2_MITIGATION_NONE,
56 V2_MITIGATION_AMD_DIS_IND, 56 V2_MITIGATION_AMD_DIS_IND,
57 V2_MITIGATION_INTEL_IBRS 57 V2_MITIGATION_INTEL_IBRS
58}; 58};
59 59
60enum v4_mitigation { 60enum v4_mitigation {
61 V4_MITIGATION_NONE, 61 V4_MITIGATION_NONE,
62 V4_MITIGATION_INTEL_SSBD, 62 V4_MITIGATION_INTEL_SSBD,
63 V4_MITIGATION_INTEL_SSB_NO, 63 V4_MITIGATION_INTEL_SSB_NO,
64 V4_MITIGATION_AMD_NONARCH_F15H, 64 V4_MITIGATION_AMD_NONARCH_F15H,
65 V4_MITIGATION_AMD_NONARCH_F16H, 65 V4_MITIGATION_AMD_NONARCH_F16H,
66 V4_MITIGATION_AMD_NONARCH_F17H 66 V4_MITIGATION_AMD_NONARCH_F17H
67}; 67};
68 68
69static enum v2_mitigation v2_mitigation_method = V2_MITIGATION_NONE; 69static enum v2_mitigation v2_mitigation_method = V2_MITIGATION_NONE;
70static enum v4_mitigation v4_mitigation_method = V4_MITIGATION_NONE; 70static enum v4_mitigation v4_mitigation_method = V4_MITIGATION_NONE;
71 71
72static bool v2_mitigation_enabled __read_mostly = false; 72static bool v2_mitigation_enabled __read_mostly = false;
73static bool v4_mitigation_enabled __read_mostly = false; 73static bool v4_mitigation_enabled __read_mostly = false;
74 74
75static char v2_mitigation_name[64] = "(none)"; 75static char v2_mitigation_name[64] = "(none)";
76static char v4_mitigation_name[64] = "(none)"; 76static char v4_mitigation_name[64] = "(none)";
77 77
78/* --------------------------------------------------------------------- */ 78/* --------------------------------------------------------------------- */
79 79
80static void 80static void
81v2_set_name(void) 81v2_set_name(void)
82{ 82{
83 char name[64] = ""; 83 char name[64] = "";
84 size_t nmitig = 0; 84 size_t nmitig = 0;
85 85
86#if defined(SPECTRE_V2_GCC_MITIGATION) 86#if defined(SPECTRE_V2_GCC_MITIGATION)
87 strlcat(name, "[GCC retpoline]", sizeof(name)); 87 strlcat(name, "[GCC retpoline]", sizeof(name));
88 nmitig++; 88 nmitig++;
89#endif 89#endif
90 90
91 if (!v2_mitigation_enabled) { 91 if (!v2_mitigation_enabled) {
92 if (nmitig == 0) 92 if (nmitig == 0)
93 strlcat(name, "(none)", sizeof(name)); 93 strlcat(name, "(none)", sizeof(name));
94 } else { 94 } else {
95 if (nmitig) 95 if (nmitig)
96 strlcat(name, " + ", sizeof(name)); 96 strlcat(name, " + ", sizeof(name));
97 switch (v2_mitigation_method) { 97 switch (v2_mitigation_method) {
98 case V2_MITIGATION_AMD_DIS_IND: 98 case V2_MITIGATION_AMD_DIS_IND:
99 strlcat(name, "[AMD DIS_IND]", sizeof(name)); 99 strlcat(name, "[AMD DIS_IND]", sizeof(name));
100 break; 100 break;
101 case V2_MITIGATION_INTEL_IBRS: 101 case V2_MITIGATION_INTEL_IBRS:
102 strlcat(name, "[Intel IBRS]", sizeof(name)); 102 strlcat(name, "[Intel IBRS]", sizeof(name));
103 break; 103 break;
104 default: 104 default:
105 panic("%s: impossible", __func__); 105 panic("%s: impossible", __func__);
106 } 106 }
107 } 107 }
108 108
109 strlcpy(v2_mitigation_name, name, 109 strlcpy(v2_mitigation_name, name,
110 sizeof(v2_mitigation_name)); 110 sizeof(v2_mitigation_name));
111} 111}
112 112
113static void 113static void
114v2_detect_method(void) 114v2_detect_method(void)
115{ 115{
116 struct cpu_info *ci = curcpu(); 116 struct cpu_info *ci = curcpu();
117 u_int descs[4]; 117 u_int descs[4];
118 118
119 if (cpu_vendor == CPUVENDOR_INTEL) { 119 if (cpu_vendor == CPUVENDOR_INTEL) {
120 if (cpuid_level >= 7) { 120 if (cpuid_level >= 7) {
121 x86_cpuid(7, descs); 121 x86_cpuid(7, descs);
122 if (descs[3] & CPUID_SEF_IBRS) { 122 if (descs[3] & CPUID_SEF_IBRS) {
123 /* descs[3] = %edx */ 123 /* descs[3] = %edx */
124#ifdef __x86_64__ 124#ifdef __x86_64__
125 v2_mitigation_method = V2_MITIGATION_INTEL_IBRS; 125 v2_mitigation_method = V2_MITIGATION_INTEL_IBRS;
126#else 126#else
127 /* IBRS not supported on i386. */ 127 /* IBRS not supported on i386. */
128 v2_mitigation_method = V2_MITIGATION_NONE; 128 v2_mitigation_method = V2_MITIGATION_NONE;
129#endif 129#endif
130 return; 130 return;
131 } 131 }
132 } 132 }
133 v2_mitigation_method = V2_MITIGATION_NONE; 133 v2_mitigation_method = V2_MITIGATION_NONE;
134 } else if (cpu_vendor == CPUVENDOR_AMD) { 134 } else if (cpu_vendor == CPUVENDOR_AMD) {
135 /* 135 /*
136 * The AMD Family 10h manual documents the IC_CFG.DIS_IND bit. 136 * The AMD Family 10h manual documents the IC_CFG.DIS_IND bit.
137 * This bit disables the Indirect Branch Predictor. 137 * This bit disables the Indirect Branch Predictor.
138 * 138 *
139 * Families 12h and 16h are believed to have this bit too, but 139 * Families 12h and 16h are believed to have this bit too, but
140 * their manuals don't document it. 140 * their manuals don't document it.
141 */ 141 */
142 switch (CPUID_TO_FAMILY(ci->ci_signature)) { 142 switch (CPUID_TO_FAMILY(ci->ci_signature)) {
143 case 0x10: 143 case 0x10:
144 case 0x12: 144 case 0x12:
145 case 0x16: 145 case 0x16:
146 v2_mitigation_method = V2_MITIGATION_AMD_DIS_IND; 146 v2_mitigation_method = V2_MITIGATION_AMD_DIS_IND;
147 break; 147 break;
148 default: 148 default:
149 v2_mitigation_method = V2_MITIGATION_NONE; 149 v2_mitigation_method = V2_MITIGATION_NONE;
150 break; 150 break;
151 } 151 }
152 } else { 152 } else {
153 v2_mitigation_method = V2_MITIGATION_NONE; 153 v2_mitigation_method = V2_MITIGATION_NONE;
154 } 154 }
155} 155}
156 156
157/* -------------------------------------------------------------------------- */ 157/* -------------------------------------------------------------------------- */
158 158
159static volatile unsigned long ibrs_cpu_barrier1 __cacheline_aligned; 159static volatile unsigned long ibrs_cpu_barrier1 __cacheline_aligned;
160static volatile unsigned long ibrs_cpu_barrier2 __cacheline_aligned; 160static volatile unsigned long ibrs_cpu_barrier2 __cacheline_aligned;
161 161
162#ifdef __x86_64__ 162#ifdef __x86_64__
163static void 163static void
164ibrs_disable_hotpatch(void) 164ibrs_disable_hotpatch(void)
165{ 165{
166 extern uint8_t noibrs_enter, noibrs_enter_end; 166 extern uint8_t noibrs_enter, noibrs_enter_end;
167 extern uint8_t noibrs_leave, noibrs_leave_end; 167 extern uint8_t noibrs_leave, noibrs_leave_end;
168 u_long psl, cr0; 168 u_long psl, cr0;
169 uint8_t *bytes; 169 uint8_t *bytes;
170 size_t size; 170 size_t size;
171 171
172 x86_patch_window_open(&psl, &cr0); 172 x86_patch_window_open(&psl, &cr0);
173 173
174 bytes = &noibrs_enter; 174 bytes = &noibrs_enter;
175 size = (size_t)&noibrs_enter_end - (size_t)&noibrs_enter; 175 size = (size_t)&noibrs_enter_end - (size_t)&noibrs_enter;
176 x86_hotpatch(HP_NAME_IBRS_ENTER, bytes, size); 176 x86_hotpatch(HP_NAME_IBRS_ENTER, bytes, size);
177 177
178 bytes = &noibrs_leave; 178 bytes = &noibrs_leave;
179 size = (size_t)&noibrs_leave_end - (size_t)&noibrs_leave; 179 size = (size_t)&noibrs_leave_end - (size_t)&noibrs_leave;
180 x86_hotpatch(HP_NAME_IBRS_LEAVE, bytes, size); 180 x86_hotpatch(HP_NAME_IBRS_LEAVE, bytes, size);
181 181
182 x86_patch_window_close(psl, cr0); 182 x86_patch_window_close(psl, cr0);
183} 183}
184 184
185static void 185static void
186ibrs_enable_hotpatch(void) 186ibrs_enable_hotpatch(void)
187{ 187{
188 extern uint8_t ibrs_enter, ibrs_enter_end; 188 extern uint8_t ibrs_enter, ibrs_enter_end;
189 extern uint8_t ibrs_leave, ibrs_leave_end; 189 extern uint8_t ibrs_leave, ibrs_leave_end;
190 u_long psl, cr0; 190 u_long psl, cr0;
191 uint8_t *bytes; 191 uint8_t *bytes;
192 size_t size; 192 size_t size;
193 193
194 x86_patch_window_open(&psl, &cr0); 194 x86_patch_window_open(&psl, &cr0);
195 195
196 bytes = &ibrs_enter; 196 bytes = &ibrs_enter;
197 size = (size_t)&ibrs_enter_end - (size_t)&ibrs_enter; 197 size = (size_t)&ibrs_enter_end - (size_t)&ibrs_enter;
198 x86_hotpatch(HP_NAME_IBRS_ENTER, bytes, size); 198 x86_hotpatch(HP_NAME_IBRS_ENTER, bytes, size);
199 199
200 bytes = &ibrs_leave; 200 bytes = &ibrs_leave;
201 size = (size_t)&ibrs_leave_end - (size_t)&ibrs_leave; 201 size = (size_t)&ibrs_leave_end - (size_t)&ibrs_leave;
202 x86_hotpatch(HP_NAME_IBRS_LEAVE, bytes, size); 202 x86_hotpatch(HP_NAME_IBRS_LEAVE, bytes, size);
203 203
204 x86_patch_window_close(psl, cr0); 204 x86_patch_window_close(psl, cr0);
205} 205}
206#else 206#else
207/* IBRS not supported on i386 */ 207/* IBRS not supported on i386 */
208static void 208static void
209ibrs_disable_hotpatch(void) 209ibrs_disable_hotpatch(void)
210{ 210{
211 panic("%s: impossible", __func__); 211 panic("%s: impossible", __func__);
212} 212}
213static void 213static void
214ibrs_enable_hotpatch(void) 214ibrs_enable_hotpatch(void)
215{ 215{
216 panic("%s: impossible", __func__); 216 panic("%s: impossible", __func__);
217} 217}
218#endif 218#endif
219 219
220/* -------------------------------------------------------------------------- */ 220/* -------------------------------------------------------------------------- */
221 221
222static void 222static void
223mitigation_v2_apply_cpu(struct cpu_info *ci, bool enabled) 223mitigation_v2_apply_cpu(struct cpu_info *ci, bool enabled)
224{ 224{
225 uint64_t msr; 225 uint64_t msr;
226 226
227 switch (v2_mitigation_method) { 227 switch (v2_mitigation_method) {
228 case V2_MITIGATION_NONE: 228 case V2_MITIGATION_NONE:
229 panic("impossible"); 229 panic("impossible");
230 case V2_MITIGATION_INTEL_IBRS: 230 case V2_MITIGATION_INTEL_IBRS:
231 /* cpu0 is the one that does the hotpatch job */ 231 /* cpu0 is the one that does the hotpatch job */
232 if (ci == &cpu_info_primary) { 232 if (ci == &cpu_info_primary) {
233 if (enabled) { 233 if (enabled) {
234 ibrs_enable_hotpatch(); 234 ibrs_enable_hotpatch();
235 } else { 235 } else {
236 ibrs_disable_hotpatch(); 236 ibrs_disable_hotpatch();
237 } 237 }
238 } 238 }
239 if (!enabled) { 239 if (!enabled) {
240 wrmsr(MSR_IA32_SPEC_CTRL, 0); 240 wrmsr(MSR_IA32_SPEC_CTRL, 0);
241 } 241 }
242 break; 242 break;
243 case V2_MITIGATION_AMD_DIS_IND: 243 case V2_MITIGATION_AMD_DIS_IND:
244 msr = rdmsr(MSR_IC_CFG); 244 msr = rdmsr(MSR_IC_CFG);
245 if (enabled) { 245 if (enabled) {
246 msr |= IC_CFG_DIS_IND; 246 msr |= IC_CFG_DIS_IND;
247 } else { 247 } else {
248 msr &= ~IC_CFG_DIS_IND; 248 msr &= ~IC_CFG_DIS_IND;
249 } 249 }
250 wrmsr(MSR_IC_CFG, msr); 250 wrmsr(MSR_IC_CFG, msr);
251 break; 251 break;
252 } 252 }
253} 253}
254 254
255/* 255/*
256 * Note: IBRS requires hotpatching, so we need barriers. 256 * Note: IBRS requires hotpatching, so we need barriers.
257 */ 257 */
258static void 258static void
259mitigation_v2_change_cpu(void *arg1, void *arg2) 259mitigation_v2_change_cpu(void *arg1, void *arg2)
260{ 260{
261 struct cpu_info *ci = curcpu(); 261 struct cpu_info *ci = curcpu();
262 bool enabled = (bool)arg1; 262 bool enabled = (bool)arg1;
263 u_long psl = 0; 263 u_long psl = 0;
264 264
265 /* Rendez-vous 1 (IBRS only). */ 265 /* Rendez-vous 1 (IBRS only). */
266 if (v2_mitigation_method == V2_MITIGATION_INTEL_IBRS) { 266 if (v2_mitigation_method == V2_MITIGATION_INTEL_IBRS) {
267 psl = x86_read_psl(); 267 psl = x86_read_psl();
268 x86_disable_intr(); 268 x86_disable_intr();
269 269
270 atomic_dec_ulong(&ibrs_cpu_barrier1); 270 atomic_dec_ulong(&ibrs_cpu_barrier1);
271 while (atomic_cas_ulong(&ibrs_cpu_barrier1, 0, 0) != 0) { 271 while (atomic_cas_ulong(&ibrs_cpu_barrier1, 0, 0) != 0) {
272 x86_pause(); 272 x86_pause();
273 } 273 }
274 } 274 }
275 275
276 mitigation_v2_apply_cpu(ci, enabled); 276 mitigation_v2_apply_cpu(ci, enabled);
277 277
278 /* Rendez-vous 2 (IBRS only). */ 278 /* Rendez-vous 2 (IBRS only). */
279 if (v2_mitigation_method == V2_MITIGATION_INTEL_IBRS) { 279 if (v2_mitigation_method == V2_MITIGATION_INTEL_IBRS) {
280 atomic_dec_ulong(&ibrs_cpu_barrier2); 280 atomic_dec_ulong(&ibrs_cpu_barrier2);
281 while (atomic_cas_ulong(&ibrs_cpu_barrier2, 0, 0) != 0) { 281 while (atomic_cas_ulong(&ibrs_cpu_barrier2, 0, 0) != 0) {
282 x86_pause(); 282 x86_pause();
283 } 283 }
284 284
285 /* Write back and invalidate cache, flush pipelines. */ 285 /* Write back and invalidate cache, flush pipelines. */
286 wbinvd(); 286 wbinvd();
287 x86_flush(); 287 x86_flush();
288 288
289 x86_write_psl(psl); 289 x86_write_psl(psl);
290 } 290 }
291} 291}
292 292
293static int 293static int
294mitigation_v2_change(bool enabled) 294mitigation_v2_change(bool enabled)
295{ 295{
296 struct cpu_info *ci = NULL; 296 struct cpu_info *ci = NULL;
297 CPU_INFO_ITERATOR cii; 297 CPU_INFO_ITERATOR cii;
298 uint64_t xc; 298 uint64_t xc;
299 299
300 v2_detect_method(); 300 v2_detect_method();
301 301
302 mutex_enter(&cpu_lock); 302 mutex_enter(&cpu_lock);
303 303
304 /* 304 /*
305 * We expect all the CPUs to be online. 305 * We expect all the CPUs to be online.
306 */ 306 */
307 for (CPU_INFO_FOREACH(cii, ci)) { 307 for (CPU_INFO_FOREACH(cii, ci)) {
308 struct schedstate_percpu *spc = &ci->ci_schedstate; 308 struct schedstate_percpu *spc = &ci->ci_schedstate;
309 if (spc->spc_flags & SPCF_OFFLINE) { 309 if (spc->spc_flags & SPCF_OFFLINE) {
310 printf("[!] cpu%d offline, SpectreV2 not changed\n", 310 printf("[!] cpu%d offline, SpectreV2 not changed\n",
311 cpu_index(ci)); 311 cpu_index(ci));
312 mutex_exit(&cpu_lock); 312 mutex_exit(&cpu_lock);
313 return EOPNOTSUPP; 313 return EOPNOTSUPP;
314 } 314 }
315 } 315 }
316 316
317 switch (v2_mitigation_method) { 317 switch (v2_mitigation_method) {
318 case V2_MITIGATION_NONE: 318 case V2_MITIGATION_NONE:
319 printf("[!] No mitigation available\n"); 319 printf("[!] No mitigation available\n");
320 mutex_exit(&cpu_lock); 320 mutex_exit(&cpu_lock);
321 return EOPNOTSUPP; 321 return EOPNOTSUPP;
322 case V2_MITIGATION_AMD_DIS_IND: 322 case V2_MITIGATION_AMD_DIS_IND:
323 case V2_MITIGATION_INTEL_IBRS: 323 case V2_MITIGATION_INTEL_IBRS:
324 /* Initialize the barriers */ 324 /* Initialize the barriers */
325 ibrs_cpu_barrier1 = ncpu; 325 ibrs_cpu_barrier1 = ncpu;
326 ibrs_cpu_barrier2 = ncpu; 326 ibrs_cpu_barrier2 = ncpu;
327 327
328 printf("[+] %s SpectreV2 Mitigation...", 328 printf("[+] %s SpectreV2 Mitigation...",
329 enabled ? "Enabling" : "Disabling"); 329 enabled ? "Enabling" : "Disabling");
330 xc = xc_broadcast(0, mitigation_v2_change_cpu, 330 xc = xc_broadcast(0, mitigation_v2_change_cpu,
331 (void *)enabled, NULL); 331 (void *)enabled, NULL);
332 xc_wait(xc); 332 xc_wait(xc);
333 printf(" done!\n"); 333 printf(" done!\n");
334 v2_mitigation_enabled = enabled; 334 v2_mitigation_enabled = enabled;
335 mutex_exit(&cpu_lock); 335 mutex_exit(&cpu_lock);
336 v2_set_name(); 336 v2_set_name();
337 return 0; 337 return 0;
338 default: 338 default:
339 panic("impossible"); 339 panic("impossible");
340 } 340 }
341} 341}
342 342
343static int 343static int
344sysctl_machdep_spectreV2_mitigated(SYSCTLFN_ARGS) 344sysctl_machdep_spectreV2_mitigated(SYSCTLFN_ARGS)
345{ 345{
346 struct sysctlnode node; 346 struct sysctlnode node;
347 int error; 347 int error;
348 bool val; 348 bool val;
349 349
350 val = *(bool *)rnode->sysctl_data; 350 val = *(bool *)rnode->sysctl_data;
351 351
352 node = *rnode; 352 node = *rnode;
353 node.sysctl_data = &val; 353 node.sysctl_data = &val;
354 354
355 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 355 error = sysctl_lookup(SYSCTLFN_CALL(&node));
356 if (error != 0 || newp == NULL) 356 if (error != 0 || newp == NULL)
357 return error; 357 return error;
358 358
359 if (val == v2_mitigation_enabled) 359 if (val == v2_mitigation_enabled)
360 return 0; 360 return 0;
361 return mitigation_v2_change(val); 361 return mitigation_v2_change(val);
362} 362}
363 363
364/* -------------------------------------------------------------------------- */ 364/* -------------------------------------------------------------------------- */
365 365
366static void 366static void
367v4_set_name(void) 367v4_set_name(void)
368{ 368{
369 char name[64] = ""; 369 char name[64] = "";
370 370
371 if (!v4_mitigation_enabled) { 371 if (!v4_mitigation_enabled) {
372 strlcat(name, "(none)", sizeof(name)); 372 strlcat(name, "(none)", sizeof(name));
373 } else { 373 } else {
374 switch (v4_mitigation_method) { 374 switch (v4_mitigation_method) {
375 case V4_MITIGATION_NONE: 375 case V4_MITIGATION_NONE:
376 panic("%s: impossible", __func__); 376 panic("%s: impossible", __func__);
377 case V4_MITIGATION_INTEL_SSBD: 377 case V4_MITIGATION_INTEL_SSBD:
378 strlcat(name, "[Intel SSBD]", sizeof(name)); 378 strlcat(name, "[Intel SSBD]", sizeof(name));
379 break; 379 break;
380 case V4_MITIGATION_INTEL_SSB_NO: 380 case V4_MITIGATION_INTEL_SSB_NO:
381 strlcat(name, "[Intel SSB_NO]", sizeof(name)); 381 strlcat(name, "[Intel SSB_NO]", sizeof(name));
382 break; 382 break;
383 case V4_MITIGATION_AMD_NONARCH_F15H: 383 case V4_MITIGATION_AMD_NONARCH_F15H:
384 case V4_MITIGATION_AMD_NONARCH_F16H: 384 case V4_MITIGATION_AMD_NONARCH_F16H:
385 case V4_MITIGATION_AMD_NONARCH_F17H: 385 case V4_MITIGATION_AMD_NONARCH_F17H:
386 strlcat(name, "[AMD NONARCH]", sizeof(name)); 386 strlcat(name, "[AMD NONARCH]", sizeof(name));
387 break; 387 break;
388 } 388 }
389 } 389 }
390 390
391 strlcpy(v4_mitigation_name, name, 391 strlcpy(v4_mitigation_name, name,
392 sizeof(v4_mitigation_name)); 392 sizeof(v4_mitigation_name));
393} 393}
394 394
395static void 395static void
396v4_detect_method(void) 396v4_detect_method(void)
397{ 397{
398 struct cpu_info *ci = curcpu(); 398 struct cpu_info *ci = curcpu();
399 u_int descs[4]; 399 u_int descs[4];
400 uint64_t msr; 400 uint64_t msr;
401 401
402 if (cpu_vendor == CPUVENDOR_INTEL) { 402 if (cpu_vendor == CPUVENDOR_INTEL) {
403 if (cpu_info_primary.ci_feat_val[7] & CPUID_SEF_ARCH_CAP) { 403 if (cpu_info_primary.ci_feat_val[7] & CPUID_SEF_ARCH_CAP) {
404 msr = rdmsr(MSR_IA32_ARCH_CAPABILITIES); 404 msr = rdmsr(MSR_IA32_ARCH_CAPABILITIES);
405 if (msr & IA32_ARCH_SSB_NO) { 405 if (msr & IA32_ARCH_SSB_NO) {
406 /* 406 /*
407 * The processor indicates it is not vulnerable 407 * The processor indicates it is not vulnerable
408 * to the Speculative Store Bypass (SpectreV4) 408 * to the Speculative Store Bypass (SpectreV4)
409 * flaw. 409 * flaw.
410 */ 410 */
411 v4_mitigation_method = V4_MITIGATION_INTEL_SSB_NO; 411 v4_mitigation_method = V4_MITIGATION_INTEL_SSB_NO;
412 return; 412 return;
413 } 413 }
414 } 414 }
415 if (cpuid_level >= 7) { 415 if (cpuid_level >= 7) {
416 x86_cpuid(7, descs); 416 x86_cpuid(7, descs);
417 if (descs[3] & CPUID_SEF_SSBD) { 417 if (descs[3] & CPUID_SEF_SSBD) {
418 /* descs[3] = %edx */ 418 /* descs[3] = %edx */
419 v4_mitigation_method = V4_MITIGATION_INTEL_SSBD; 419 v4_mitigation_method = V4_MITIGATION_INTEL_SSBD;
420 return; 420 return;
421 } 421 }
422 } 422 }
423 } else if (cpu_vendor == CPUVENDOR_AMD) { 423 } else if (cpu_vendor == CPUVENDOR_AMD) {
424 switch (CPUID_TO_FAMILY(ci->ci_signature)) { 424 switch (CPUID_TO_FAMILY(ci->ci_signature)) {
425 case 0x15: 425 case 0x15:
426 v4_mitigation_method = V4_MITIGATION_AMD_NONARCH_F15H; 426 v4_mitigation_method = V4_MITIGATION_AMD_NONARCH_F15H;
427 return; 427 return;
428 case 0x16: 428 case 0x16:
429 v4_mitigation_method = V4_MITIGATION_AMD_NONARCH_F16H; 429 v4_mitigation_method = V4_MITIGATION_AMD_NONARCH_F16H;
430 return; 430 return;
431 case 0x17: 431 case 0x17:
432 v4_mitigation_method = V4_MITIGATION_AMD_NONARCH_F17H; 432 v4_mitigation_method = V4_MITIGATION_AMD_NONARCH_F17H;
433 return; 433 return;
434 default: 434 default:
435 break; 435 break;
436 } 436 }
437 } 437 }
438 438
439 v4_mitigation_method = V4_MITIGATION_NONE; 439 v4_mitigation_method = V4_MITIGATION_NONE;
440} 440}
441 441
442static void 442static void
443mitigation_v4_apply_cpu(bool enabled) 443mitigation_v4_apply_cpu(bool enabled)
444{ 444{
445 uint64_t msr, msrval = 0, msrbit = 0; 445 uint64_t msr, msrval = 0, msrbit = 0;
446 446
447 switch (v4_mitigation_method) { 447 switch (v4_mitigation_method) {
448 case V4_MITIGATION_NONE: 448 case V4_MITIGATION_NONE:
449 case V4_MITIGATION_INTEL_SSB_NO: 449 case V4_MITIGATION_INTEL_SSB_NO:
450 panic("impossible"); 450 panic("impossible");
451 case V4_MITIGATION_INTEL_SSBD: 451 case V4_MITIGATION_INTEL_SSBD:
452 msrval = MSR_IA32_SPEC_CTRL; 452 msrval = MSR_IA32_SPEC_CTRL;
453 msrbit = IA32_SPEC_CTRL_SSBD; 453 msrbit = IA32_SPEC_CTRL_SSBD;
454 break; 454 break;
455 case V4_MITIGATION_AMD_NONARCH_F15H: 455 case V4_MITIGATION_AMD_NONARCH_F15H:
456 msrval = MSR_LS_CFG; 456 msrval = MSR_LS_CFG;
457 msrbit = LS_CFG_DIS_SSB_F15H; 457 msrbit = LS_CFG_DIS_SSB_F15H;
458 break; 458 break;
459 case V4_MITIGATION_AMD_NONARCH_F16H: 459 case V4_MITIGATION_AMD_NONARCH_F16H:
460 msrval = MSR_LS_CFG; 460 msrval = MSR_LS_CFG;
461 msrbit = LS_CFG_DIS_SSB_F16H; 461 msrbit = LS_CFG_DIS_SSB_F16H;
462 break; 462 break;
463 case V4_MITIGATION_AMD_NONARCH_F17H: 463 case V4_MITIGATION_AMD_NONARCH_F17H:
464 msrval = MSR_LS_CFG; 464 msrval = MSR_LS_CFG;
465 msrbit = LS_CFG_DIS_SSB_F17H; 465 msrbit = LS_CFG_DIS_SSB_F17H;
466 break; 466 break;
467 } 467 }
468 468
469 msr = rdmsr(msrval); 469 msr = rdmsr(msrval);
470 if (enabled) { 470 if (enabled) {
471 msr |= msrbit; 471 msr |= msrbit;
472 } else { 472 } else {
473 msr &= ~msrbit; 473 msr &= ~msrbit;
474 } 474 }
475 wrmsr(msrval, msr); 475 wrmsr(msrval, msr);
476} 476}
477 477
478static void 478static void
479mitigation_v4_change_cpu(void *arg1, void *arg2) 479mitigation_v4_change_cpu(void *arg1, void *arg2)
480{ 480{
481 bool enabled = (bool)arg1; 481 bool enabled = (bool)arg1;
482 482
483 mitigation_v4_apply_cpu(enabled); 483 mitigation_v4_apply_cpu(enabled);
484} 484}
485 485
486static int mitigation_v4_change(bool enabled) 486static int
 487mitigation_v4_change(bool enabled)
487{ 488{
488 struct cpu_info *ci = NULL; 489 struct cpu_info *ci = NULL;
489 CPU_INFO_ITERATOR cii; 490 CPU_INFO_ITERATOR cii;
490 uint64_t xc; 491 uint64_t xc;
491 492
492 v4_detect_method(); 493 v4_detect_method();
493 494
494 mutex_enter(&cpu_lock); 495 mutex_enter(&cpu_lock);
495 496
496 /* 497 /*
497 * We expect all the CPUs to be online. 498 * We expect all the CPUs to be online.
498 */ 499 */
499 for (CPU_INFO_FOREACH(cii, ci)) { 500 for (CPU_INFO_FOREACH(cii, ci)) {
500 struct schedstate_percpu *spc = &ci->ci_schedstate; 501 struct schedstate_percpu *spc = &ci->ci_schedstate;
501 if (spc->spc_flags & SPCF_OFFLINE) { 502 if (spc->spc_flags & SPCF_OFFLINE) {
502 printf("[!] cpu%d offline, SpectreV4 not changed\n", 503 printf("[!] cpu%d offline, SpectreV4 not changed\n",
503 cpu_index(ci)); 504 cpu_index(ci));
504 mutex_exit(&cpu_lock); 505 mutex_exit(&cpu_lock);
505 return EOPNOTSUPP; 506 return EOPNOTSUPP;
506 } 507 }
507 } 508 }
508 509
509 switch (v4_mitigation_method) { 510 switch (v4_mitigation_method) {
510 case V4_MITIGATION_NONE: 511 case V4_MITIGATION_NONE:
511 printf("[!] No mitigation available\n"); 512 printf("[!] No mitigation available\n");
512 mutex_exit(&cpu_lock); 513 mutex_exit(&cpu_lock);
513 return EOPNOTSUPP; 514 return EOPNOTSUPP;
514 case V4_MITIGATION_INTEL_SSBD: 515 case V4_MITIGATION_INTEL_SSBD:
515 case V4_MITIGATION_AMD_NONARCH_F15H: 516 case V4_MITIGATION_AMD_NONARCH_F15H:
516 case V4_MITIGATION_AMD_NONARCH_F16H: 517 case V4_MITIGATION_AMD_NONARCH_F16H:
517 case V4_MITIGATION_AMD_NONARCH_F17H: 518 case V4_MITIGATION_AMD_NONARCH_F17H:
518 printf("[+] %s SpectreV4 Mitigation...", 519 printf("[+] %s SpectreV4 Mitigation...",
519 enabled ? "Enabling" : "Disabling"); 520 enabled ? "Enabling" : "Disabling");
520 xc = xc_broadcast(0, mitigation_v4_change_cpu, 521 xc = xc_broadcast(0, mitigation_v4_change_cpu,
521 (void *)enabled, NULL); 522 (void *)enabled, NULL);
522 xc_wait(xc); 523 xc_wait(xc);
523 printf(" done!\n"); 524 printf(" done!\n");
524 v4_mitigation_enabled = enabled; 525 v4_mitigation_enabled = enabled;
525 mutex_exit(&cpu_lock); 526 mutex_exit(&cpu_lock);
526 v4_set_name(); 527 v4_set_name();
527 return 0; 528 return 0;
528 case V4_MITIGATION_INTEL_SSB_NO: 529 case V4_MITIGATION_INTEL_SSB_NO:
529 printf("[+] The CPU is not affected by SpectreV4\n"); 530 printf("[+] The CPU is not affected by SpectreV4\n");
530 mutex_exit(&cpu_lock); 531 mutex_exit(&cpu_lock);
531 return 0; 532 return 0;
532 default: 533 default:
533 panic("impossible"); 534 panic("impossible");
534 } 535 }
535} 536}
536 537
537static int 538static int
538sysctl_machdep_spectreV4_mitigated(SYSCTLFN_ARGS) 539sysctl_machdep_spectreV4_mitigated(SYSCTLFN_ARGS)
539{ 540{
540 struct sysctlnode node; 541 struct sysctlnode node;
541 int error; 542 int error;
542 bool val; 543 bool val;
543 544
544 val = *(bool *)rnode->sysctl_data; 545 val = *(bool *)rnode->sysctl_data;
545 546
546 node = *rnode; 547 node = *rnode;
547 node.sysctl_data = &val; 548 node.sysctl_data = &val;
548 549
549 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 550 error = sysctl_lookup(SYSCTLFN_CALL(&node));
550 if (error != 0 || newp == NULL) 551 if (error != 0 || newp == NULL)
551 return error; 552 return error;
552 553
553 if (val == v4_mitigation_enabled) 554 if (val == v4_mitigation_enabled)
554 return 0; 555 return 0;
555 return mitigation_v4_change(val); 556 return mitigation_v4_change(val);
556} 557}
557 558
558/* -------------------------------------------------------------------------- */ 559/* -------------------------------------------------------------------------- */
559 560
560void speculation_barrier(struct lwp *, struct lwp *); 561void speculation_barrier(struct lwp *, struct lwp *);
561 562
562void 563void
563speculation_barrier(struct lwp *oldlwp, struct lwp *newlwp) 564speculation_barrier(struct lwp *oldlwp, struct lwp *newlwp)
564{ 565{
565 /* 566 /*
566 * Speculation barriers are applicable only to Spectre V2. 567 * Speculation barriers are applicable only to Spectre V2.
567 */ 568 */
568 if (!v2_mitigation_enabled) 569 if (!v2_mitigation_enabled)
569 return; 570 return;
570 571
571 /* 572 /*
572 * From kernel thread to kernel thread, no need for a barrier. 573 * From kernel thread to kernel thread, no need for a barrier.
573 */ 574 */
574 if ((oldlwp != NULL && (oldlwp->l_flag & LW_SYSTEM)) && 575 if ((oldlwp != NULL && (oldlwp->l_flag & LW_SYSTEM)) &&
575 (newlwp->l_flag & LW_SYSTEM)) 576 (newlwp->l_flag & LW_SYSTEM))
576 return; 577 return;
577 578
578 switch (v2_mitigation_method) { 579 switch (v2_mitigation_method) {
579 case V2_MITIGATION_INTEL_IBRS: 580 case V2_MITIGATION_INTEL_IBRS:
580 wrmsr(MSR_IA32_PRED_CMD, IA32_PRED_CMD_IBPB); 581 wrmsr(MSR_IA32_PRED_CMD, IA32_PRED_CMD_IBPB);
581 break; 582 break;
582 default: 583 default:
583 /* nothing */ 584 /* nothing */
584 break; 585 break;
585 } 586 }
586} 587}
587 588
588void 589void
589cpu_speculation_init(struct cpu_info *ci) 590cpu_speculation_init(struct cpu_info *ci)
590{ 591{
591 /* 592 /*
592 * Spectre V2. 593 * Spectre V2.
593 * 594 *
594 * cpu0 is the one that detects the method and sets the global 595 * cpu0 is the one that detects the method and sets the global
595 * variable. 596 * variable.
596 */ 597 */
597 if (ci == &cpu_info_primary) { 598 if (ci == &cpu_info_primary) {
598 v2_detect_method(); 599 v2_detect_method();
599 v2_mitigation_enabled = 600 v2_mitigation_enabled =
600 (v2_mitigation_method != V2_MITIGATION_NONE); 601 (v2_mitigation_method != V2_MITIGATION_NONE);
601 v2_set_name(); 602 v2_set_name();
602 } 603 }
603 if (v2_mitigation_method != V2_MITIGATION_NONE) { 604 if (v2_mitigation_method != V2_MITIGATION_NONE) {
604 mitigation_v2_apply_cpu(ci, true); 605 mitigation_v2_apply_cpu(ci, true);
605 } 606 }
606 607
607 /* 608 /*
608 * Spectre V4. 609 * Spectre V4.
609 * 610 *
610 * cpu0 is the one that detects the method and sets the global 611 * cpu0 is the one that detects the method and sets the global
611 * variable. 612 * variable.
 613 *
 614 * Disabled by default, as recommended by AMD, but can be enabled
 615 * dynamically. We only detect if the CPU is not vulnerable, to
 616 * mark it as 'mitigated' in the sysctl.
612 */ 617 */
613#if 0 618#if 0
614 if (ci == &cpu_info_primary) { 619 if (ci == &cpu_info_primary) {
615 v4_detect_method(); 620 v4_detect_method();
616 v4_mitigation_enabled = 621 v4_mitigation_enabled =
617 (v4_mitigation_method != V4_MITIGATION_NONE); 622 (v4_mitigation_method != V4_MITIGATION_NONE);
618 v4_set_name(); 623 v4_set_name();
619 } 624 }
620 if (v4_mitigation_method != V4_MITIGATION_NONE) { 625 if (v4_mitigation_method != V4_MITIGATION_NONE &&
 626 v4_mitigation_method != V4_MITIGATION_INTEL_SSB_NO) {
621 mitigation_v4_apply_cpu(ci, true); 627 mitigation_v4_apply_cpu(ci, true);
622 } 628 }
 629#else
 630 if (ci == &cpu_info_primary) {
 631 v4_detect_method();
 632 if (v4_mitigation_method == V4_MITIGATION_INTEL_SSB_NO) {
 633 v4_mitigation_enabled = true;
 634 v4_set_name();
 635 }
 636 }
623#endif 637#endif
624} 638}
625 639
626void sysctl_speculation_init(struct sysctllog **); 640void sysctl_speculation_init(struct sysctllog **);
627 641
628void 642void
629sysctl_speculation_init(struct sysctllog **clog) 643sysctl_speculation_init(struct sysctllog **clog)
630{ 644{
631 const struct sysctlnode *spec_rnode; 645 const struct sysctlnode *spec_rnode;
632 646
633 /* SpectreV1 */ 647 /* SpectreV1 */
634 spec_rnode = NULL; 648 spec_rnode = NULL;
635 sysctl_createv(clog, 0, NULL, &spec_rnode, 649 sysctl_createv(clog, 0, NULL, &spec_rnode,
636 CTLFLAG_PERMANENT, 650 CTLFLAG_PERMANENT,
637 CTLTYPE_NODE, "spectre_v1", NULL, 651 CTLTYPE_NODE, "spectre_v1", NULL,
638 NULL, 0, NULL, 0, 652 NULL, 0, NULL, 0,
639 CTL_MACHDEP, CTL_CREATE); 653 CTL_MACHDEP, CTL_CREATE);
640 sysctl_createv(clog, 0, &spec_rnode, &spec_rnode, 654 sysctl_createv(clog, 0, &spec_rnode, &spec_rnode,
641 CTLFLAG_PERMANENT | CTLFLAG_IMMEDIATE, 655 CTLFLAG_PERMANENT | CTLFLAG_IMMEDIATE,
642 CTLTYPE_BOOL, "mitigated", 656 CTLTYPE_BOOL, "mitigated",
643 SYSCTL_DESCR("Whether Spectre Variant 1 is mitigated"), 657 SYSCTL_DESCR("Whether Spectre Variant 1 is mitigated"),
644 NULL, 0 /* mitigated=0 */, NULL, 0, 658 NULL, 0 /* mitigated=0 */, NULL, 0,
645 CTL_CREATE, CTL_EOL); 659 CTL_CREATE, CTL_EOL);
646 660
647 /* SpectreV2 */ 661 /* SpectreV2 */
648 spec_rnode = NULL; 662 spec_rnode = NULL;
649 sysctl_createv(clog, 0, NULL, &spec_rnode, 663 sysctl_createv(clog, 0, NULL, &spec_rnode,
650 CTLFLAG_PERMANENT, 664 CTLFLAG_PERMANENT,
651 CTLTYPE_NODE, "spectre_v2", NULL, 665 CTLTYPE_NODE, "spectre_v2", NULL,
652 NULL, 0, NULL, 0, 666 NULL, 0, NULL, 0,
653 CTL_MACHDEP, CTL_CREATE); 667 CTL_MACHDEP, CTL_CREATE);
654 sysctl_createv(clog, 0, &spec_rnode, NULL, 668 sysctl_createv(clog, 0, &spec_rnode, NULL,
655 CTLFLAG_READWRITE, 669 CTLFLAG_READWRITE,
656 CTLTYPE_BOOL, "hwmitigated", 670 CTLTYPE_BOOL, "hwmitigated",
657 SYSCTL_DESCR("Whether Spectre Variant 2 is HW-mitigated"), 671 SYSCTL_DESCR("Whether Spectre Variant 2 is HW-mitigated"),
658 sysctl_machdep_spectreV2_mitigated, 0, 672 sysctl_machdep_spectreV2_mitigated, 0,
659 &v2_mitigation_enabled, 0, 673 &v2_mitigation_enabled, 0,
660 CTL_CREATE, CTL_EOL); 674 CTL_CREATE, CTL_EOL);
661 sysctl_createv(clog, 0, &spec_rnode, NULL, 675 sysctl_createv(clog, 0, &spec_rnode, NULL,
662 CTLFLAG_PERMANENT | CTLFLAG_IMMEDIATE, 676 CTLFLAG_PERMANENT | CTLFLAG_IMMEDIATE,
663 CTLTYPE_BOOL, "swmitigated", 677 CTLTYPE_BOOL, "swmitigated",
664 SYSCTL_DESCR("Whether Spectre Variant 2 is SW-mitigated"), 678 SYSCTL_DESCR("Whether Spectre Variant 2 is SW-mitigated"),
665#if defined(SPECTRE_V2_GCC_MITIGATION) 679#if defined(SPECTRE_V2_GCC_MITIGATION)
666 NULL, 1, 680 NULL, 1,
667#else 681#else
668 NULL, 0, 682 NULL, 0,
669#endif 683#endif
670 NULL, 0, 684 NULL, 0,
671 CTL_CREATE, CTL_EOL); 685 CTL_CREATE, CTL_EOL);
672 sysctl_createv(clog, 0, &spec_rnode, NULL, 686 sysctl_createv(clog, 0, &spec_rnode, NULL,
673 CTLFLAG_PERMANENT, 687 CTLFLAG_PERMANENT,
674 CTLTYPE_STRING, "method", 688 CTLTYPE_STRING, "method",
675 SYSCTL_DESCR("Mitigation method in use"), 689 SYSCTL_DESCR("Mitigation method in use"),
676 NULL, 0, 690 NULL, 0,
677 v2_mitigation_name, 0, 691 v2_mitigation_name, 0,
678 CTL_CREATE, CTL_EOL); 692 CTL_CREATE, CTL_EOL);
679 693
680 /* SpectreV4 */ 694 /* SpectreV4 */
681 spec_rnode = NULL; 695 spec_rnode = NULL;
682 sysctl_createv(clog, 0, NULL, &spec_rnode, 696 sysctl_createv(clog, 0, NULL, &spec_rnode,
683 CTLFLAG_PERMANENT, 697 CTLFLAG_PERMANENT,
684 CTLTYPE_NODE, "spectre_v4", NULL, 698 CTLTYPE_NODE, "spectre_v4", NULL,
685 NULL, 0, NULL, 0, 699 NULL, 0, NULL, 0,
686 CTL_MACHDEP, CTL_CREATE); 700 CTL_MACHDEP, CTL_CREATE);
687 sysctl_createv(clog, 0, &spec_rnode, NULL, 701 sysctl_createv(clog, 0, &spec_rnode, NULL,
688 CTLFLAG_READWRITE, 702 CTLFLAG_READWRITE,
689 CTLTYPE_BOOL, "mitigated", 703 CTLTYPE_BOOL, "mitigated",
690 SYSCTL_DESCR("Whether Spectre Variant 4 is mitigated"), 704 SYSCTL_DESCR("Whether Spectre Variant 4 is mitigated"),
691 sysctl_machdep_spectreV4_mitigated, 0, 705 sysctl_machdep_spectreV4_mitigated, 0,
692 &v4_mitigation_enabled, 0, 706 &v4_mitigation_enabled, 0,
693 CTL_CREATE, CTL_EOL); 707 CTL_CREATE, CTL_EOL);
694 sysctl_createv(clog, 0, &spec_rnode, NULL, 708 sysctl_createv(clog, 0, &spec_rnode, NULL,
695 CTLFLAG_PERMANENT, 709 CTLFLAG_PERMANENT,
696 CTLTYPE_STRING, "method", 710 CTLTYPE_STRING, "method",
697 SYSCTL_DESCR("Mitigation method in use"), 711 SYSCTL_DESCR("Mitigation method in use"),
698 NULL, 0, 712 NULL, 0,
699 v4_mitigation_name, 0, 713 v4_mitigation_name, 0,
700 CTL_CREATE, CTL_EOL); 714 CTL_CREATE, CTL_EOL);
701} 715}