Sun Apr 9 08:17:56 2023 UTC ()
amd64: Make curlwp and curcpu() flushable.

The only effect of the `volatile' qualifier on an asm block with
outputs is to force the instructions to appear in the generated code,
even if the outputs end up being unused.  Since these instructions
have no (architectural) side effects -- provided %gs is set
correctly, which must be the case here -- there's no need for the
volatile qualifier, so nix it.


(riastradh)
diff -r1.70 -r1.71 src/sys/arch/amd64/include/cpu.h

cvs diff -r1.70 -r1.71 src/sys/arch/amd64/include/cpu.h (expand / switch to unified diff)

--- src/sys/arch/amd64/include/cpu.h 2021/11/02 11:26:03 1.70
+++ src/sys/arch/amd64/include/cpu.h 2023/04/09 08:17:56 1.71
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cpu.h,v 1.70 2021/11/02 11:26:03 ryo Exp $ */ 1/* $NetBSD: cpu.h,v 1.71 2023/04/09 08:17:56 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1990 The Regents of the University of California. 4 * Copyright (c) 1990 The Regents of the University of California.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to Berkeley by 7 * This code is derived from software contributed to Berkeley by
8 * William Jolitz. 8 * William Jolitz.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -43,39 +43,39 @@ @@ -43,39 +43,39 @@
43 43
44#ifdef _KERNEL 44#ifdef _KERNEL
45 45
46#if defined(__GNUC__) && !defined(_MODULE) 46#if defined(__GNUC__) && !defined(_MODULE)
47 47
48static struct cpu_info *x86_curcpu(void); 48static struct cpu_info *x86_curcpu(void);
49static lwp_t *x86_curlwp(void); 49static lwp_t *x86_curlwp(void);
50 50
51__inline __always_inline static struct cpu_info * __unused __nomsan 51__inline __always_inline static struct cpu_info * __unused __nomsan
52x86_curcpu(void) 52x86_curcpu(void)
53{ 53{
54 struct cpu_info *ci; 54 struct cpu_info *ci;
55 55
56 __asm volatile("movq %%gs:%1, %0" : 56 __asm("movq %%gs:%1, %0" :
57 "=r" (ci) : 57 "=r" (ci) :
58 "m" 58 "m"
59 (*(struct cpu_info * const *)offsetof(struct cpu_info, ci_self))); 59 (*(struct cpu_info * const *)offsetof(struct cpu_info, ci_self)));
60 return ci; 60 return ci;
61} 61}
62 62
63__inline static lwp_t * __unused __nomsan __attribute__ ((const)) 63__inline static lwp_t * __unused __nomsan __attribute__ ((const))
64x86_curlwp(void) 64x86_curlwp(void)
65{ 65{
66 lwp_t *l; 66 lwp_t *l;
67 67
68 __asm volatile("movq %%gs:%1, %0" : 68 __asm("movq %%gs:%1, %0" :
69 "=r" (l) : 69 "=r" (l) :
70 "m" 70 "m"
71 (*(struct cpu_info * const *)offsetof(struct cpu_info, ci_curlwp))); 71 (*(struct cpu_info * const *)offsetof(struct cpu_info, ci_curlwp)));
72 return l; 72 return l;
73} 73}
74 74
75#endif /* __GNUC__ && !_MODULE */ 75#endif /* __GNUC__ && !_MODULE */
76 76
77#ifdef XENPV 77#ifdef XENPV
78#define CLKF_USERMODE(frame) (curcpu()->ci_xen_clockf_usermode) 78#define CLKF_USERMODE(frame) (curcpu()->ci_xen_clockf_usermode)
79#define CLKF_PC(frame) (curcpu()->ci_xen_clockf_pc) 79#define CLKF_PC(frame) (curcpu()->ci_xen_clockf_pc)
80#else /* XENPV */ 80#else /* XENPV */
81#define CLKF_USERMODE(frame) USERMODE((frame)->cf_if.if_tf.tf_cs) 81#define CLKF_USERMODE(frame) USERMODE((frame)->cf_if.if_tf.tf_cs)