Sun Feb 13 13:42:21 2022 UTC ()
vax: __cpu_simple_lock audit.

Fix missing "memory" asm clobber so the compiler can't reorder memory
access around __cpu_simple_lock/lock_try/unlock.


(riastradh)
diff -r1.33 -r1.34 src/sys/arch/vax/include/lock.h

cvs diff -r1.33 -r1.34 src/sys/arch/vax/include/lock.h (expand / switch to unified diff)

--- src/sys/arch/vax/include/lock.h 2022/02/12 17:17:53 1.33
+++ src/sys/arch/vax/include/lock.h 2022/02/13 13:42:21 1.34
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: lock.h,v 1.33 2022/02/12 17:17:53 riastradh Exp $ */ 1/* $NetBSD: lock.h,v 1.34 2022/02/13 13:42:21 riastradh Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2000 Ludd, University of Lule}, Sweden. 4 * Copyright (c) 2000 Ludd, University of Lule}, Sweden.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -74,67 +74,67 @@ static __inline int @@ -74,67 +74,67 @@ static __inline int
74__cpu_simple_lock_try(__cpu_simple_lock_t *__alp) 74__cpu_simple_lock_try(__cpu_simple_lock_t *__alp)
75{ 75{
76 int ret; 76 int ret;
77 77
78#ifdef _HARDKERNEL 78#ifdef _HARDKERNEL
79 __asm __volatile ("movl %1,%%r1;jsb Slocktry;movl %%r0,%0" 79 __asm __volatile ("movl %1,%%r1;jsb Slocktry;movl %%r0,%0"
80 : "=&r"(ret) 80 : "=&r"(ret)
81 : "g"(__alp) 81 : "g"(__alp)
82 : "r0","r1","cc","memory"); 82 : "r0","r1","cc","memory");
83#else 83#else
84 __asm __volatile ("clrl %0;bbssi $0,%1,1f;incl %0;1:" 84 __asm __volatile ("clrl %0;bbssi $0,%1,1f;incl %0;1:"
85 : "=&r"(ret) 85 : "=&r"(ret)
86 : "m"(*__alp) 86 : "m"(*__alp)
87 : "cc"); 87 : "cc", "memory");
88#endif 88#endif
89 89
90 return ret; 90 return ret;
91} 91}
92 92
93static __inline void __cpu_simple_lock(__cpu_simple_lock_t *); 93static __inline void __cpu_simple_lock(__cpu_simple_lock_t *);
94static __inline void 94static __inline void
95__cpu_simple_lock(__cpu_simple_lock_t *__alp) 95__cpu_simple_lock(__cpu_simple_lock_t *__alp)
96{ 96{
97#if defined(_HARDKERNEL) && defined(MULTIPROCESSOR) 97#if defined(_HARDKERNEL) && defined(MULTIPROCESSOR)
98 struct cpu_info * const __ci = curcpu(); 98 struct cpu_info * const __ci = curcpu();
99 99
100 while (__cpu_simple_lock_try(__alp) == 0) { 100 while (__cpu_simple_lock_try(__alp) == 0) {
101#define VAX_LOCK_CHECKS ((1 << IPI_SEND_CNCHAR) | (1 << IPI_DDB)) 101#define VAX_LOCK_CHECKS ((1 << IPI_SEND_CNCHAR) | (1 << IPI_DDB))
102 if (__ci->ci_ipimsgs & VAX_LOCK_CHECKS) { 102 if (__ci->ci_ipimsgs & VAX_LOCK_CHECKS) {
103 cpu_handle_ipi(); 103 cpu_handle_ipi();
104 } 104 }
105 } 105 }
106#else /* _HARDKERNEL && MULTIPROCESSOR */ 106#else /* _HARDKERNEL && MULTIPROCESSOR */
107 __asm __volatile ("1:bbssi $0,%0,1b" 107 __asm __volatile ("1:bbssi $0,%0,1b"
108 : /* No outputs */ 108 : /* No outputs */
109 : "m"(*__alp) 109 : "m"(*__alp)
110 : "cc"); 110 : "cc", "memory");
111#endif /* _HARDKERNEL && MULTIPROCESSOR */ 111#endif /* _HARDKERNEL && MULTIPROCESSOR */
112} 112}
113 113
114static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *); 114static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *);
115static __inline void 115static __inline void
116__cpu_simple_unlock(__cpu_simple_lock_t *__alp) 116__cpu_simple_unlock(__cpu_simple_lock_t *__alp)
117{ 117{
118#ifdef _HARDKERNEL 118#ifdef _HARDKERNEL
119 __asm __volatile ("movl %0,%%r1;jsb Sunlock" 119 __asm __volatile ("movl %0,%%r1;jsb Sunlock"
120 : /* No output */ 120 : /* No output */
121 : "g"(__alp) 121 : "g"(__alp)
122 : "r1","cc","memory"); 122 : "r1","cc","memory");
123#else 123#else
124 __asm __volatile ("bbcci $0,%0,1f;1:" 124 __asm __volatile ("bbcci $0,%0,1f;1:"
125 : /* No output */ 125 : /* No output */
126 : "m"(*__alp) 126 : "m"(*__alp)
127 : "cc"); 127 : "cc", "memory");
128#endif 128#endif
129} 129}
130 130
131#if defined(MULTIPROCESSOR) 131#if defined(MULTIPROCESSOR)
132/* 132/*
133 * On the Vax, interprocessor interrupts can come in at device priority 133 * On the Vax, interprocessor interrupts can come in at device priority
134 * level or lower. This can cause some problems while waiting for r/w 134 * level or lower. This can cause some problems while waiting for r/w
135 * spinlocks from a high'ish priority level: IPIs that come in will not 135 * spinlocks from a high'ish priority level: IPIs that come in will not
136 * be processed. This can lead to deadlock. 136 * be processed. This can lead to deadlock.
137 * 137 *
138 * This hook allows IPIs to be processed while a spinlock's interlock 138 * This hook allows IPIs to be processed while a spinlock's interlock
139 * is released. 139 * is released.
140 */ 140 */