Sun Feb 13 13:41:44 2022 UTC ()
m68k: __cpu_simple_unlock audit.

- Use `cc' clobbers in asm volatile because they touch the condition
  codes.

- Use `memory' clobbers in asm volatile so the compiler doesn't move
  up loads and stores in the critical section at _other_ addresses
  than the lock so they happen before __cpu_simple_lock or
  __cpu_simple_lock_try.

- Not sure if we have any (or if there even are any?) multicore m68k
  systems out there, but __cpu_simple_unlock needs __insn_barrier
  either way so the compiler doesn't delay loads and stores prior to
  __cpu_simple_unlock so they happen after it.


(riastradh)
diff -r1.16 -r1.17 src/sys/arch/m68k/include/lock.h

cvs diff -r1.16 -r1.17 src/sys/arch/m68k/include/lock.h (expand / switch to context diff)
--- src/sys/arch/m68k/include/lock.h 2019/11/29 20:05:49 1.16
+++ src/sys/arch/m68k/include/lock.h 2022/02/13 13:41:44 1.17
@@ -1,4 +1,4 @@
-/*	$NetBSD: lock.h,v 1.16 2019/11/29 20:05:49 riastradh Exp $	*/
+/*	$NetBSD: lock.h,v 1.17 2022/02/13 13:41:44 riastradh Exp $	*/
 
 /*-
  * Copyright (c) 2000 The NetBSD Foundation, Inc.
@@ -75,7 +75,9 @@
 	__asm volatile(
 		"1:	tas	%0	\n"
 		"	jne	1b	\n"
-		: "=m" (*alp));
+		: "=m" (*alp)
+		: /* no inputs */
+		: "cc", "memory");
 }
 
 static __inline int
@@ -89,7 +91,9 @@
 		"	jeq	1f	\n"
 		"	moveq	#0, %1	\n"
 		"1:			\n"
-		: "=m" (*alp), "=d" (__rv));
+		: "=m" (*alp), "=d" (__rv)
+		: /* no inputs */
+		: "cc", "memory");
 
 	return (__rv);
 }
@@ -98,6 +102,7 @@
 __cpu_simple_unlock(__cpu_simple_lock_t *alp)
 {
 
+	__insn_barrier();
 	*alp = __SIMPLELOCK_UNLOCKED;
 }