Sun Feb 13 13:41:44 2022 UTC ()
m68k: __cpu_simple_unlock audit.

- Use `cc' clobbers in asm volatile because they touch the condition
  codes.

- Use `memory' clobbers in asm volatile so the compiler doesn't move
  up loads and stores in the critical section at _other_ addresses
  than the lock so they happen before __cpu_simple_lock or
  __cpu_simple_lock_try.

- Not sure if we have any (or if there even are any?) multicore m68k
  systems out there, but __cpu_simple_unlock needs __insn_barrier
  either way so the compiler doesn't delay loads and stores prior to
  __cpu_simple_unlock so they happen after it.


(riastradh)
diff -r1.16 -r1.17 src/sys/arch/m68k/include/lock.h

cvs diff -r1.16 -r1.17 src/sys/arch/m68k/include/lock.h (expand / switch to unified diff)

--- src/sys/arch/m68k/include/lock.h 2019/11/29 20:05:49 1.16
+++ src/sys/arch/m68k/include/lock.h 2022/02/13 13:41:44 1.17
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: lock.h,v 1.16 2019/11/29 20:05:49 riastradh Exp $ */ 1/* $NetBSD: lock.h,v 1.17 2022/02/13 13:41:44 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2000 The NetBSD Foundation, Inc. 4 * Copyright (c) 2000 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe. 8 * by Jason R. Thorpe.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -65,40 +65,45 @@ __cpu_simple_lock_clear(__cpu_simple_loc @@ -65,40 +65,45 @@ __cpu_simple_lock_clear(__cpu_simple_loc
65static __inline void 65static __inline void
66__cpu_simple_lock_set(__cpu_simple_lock_t *__ptr) 66__cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
67{ 67{
68 *__ptr = __SIMPLELOCK_LOCKED; 68 *__ptr = __SIMPLELOCK_LOCKED;
69} 69}
70 70
71static __inline void 71static __inline void
72__cpu_simple_lock(__cpu_simple_lock_t *alp) 72__cpu_simple_lock(__cpu_simple_lock_t *alp)
73{ 73{
74 74
75 __asm volatile( 75 __asm volatile(
76 "1: tas %0 \n" 76 "1: tas %0 \n"
77 " jne 1b \n" 77 " jne 1b \n"
78 : "=m" (*alp)); 78 : "=m" (*alp)
 79 : /* no inputs */
 80 : "cc", "memory");
79} 81}
80 82
81static __inline int 83static __inline int
82__cpu_simple_lock_try(__cpu_simple_lock_t *alp) 84__cpu_simple_lock_try(__cpu_simple_lock_t *alp)
83{ 85{
84 int __rv; 86 int __rv;
85 87
86 __asm volatile( 88 __asm volatile(
87 " moveq #1, %1 \n" 89 " moveq #1, %1 \n"
88 " tas %0 \n" 90 " tas %0 \n"
89 " jeq 1f \n" 91 " jeq 1f \n"
90 " moveq #0, %1 \n" 92 " moveq #0, %1 \n"
91 "1: \n" 93 "1: \n"
92 : "=m" (*alp), "=d" (__rv)); 94 : "=m" (*alp), "=d" (__rv)
 95 : /* no inputs */
 96 : "cc", "memory");
93 97
94 return (__rv); 98 return (__rv);
95} 99}
96 100
97static __inline void 101static __inline void
98__cpu_simple_unlock(__cpu_simple_lock_t *alp) 102__cpu_simple_unlock(__cpu_simple_lock_t *alp)
99{ 103{
100 104
 105 __insn_barrier();
101 *alp = __SIMPLELOCK_UNLOCKED; 106 *alp = __SIMPLELOCK_UNLOCKED;
102} 107}
103 108
104#endif /* _M68K_LOCK_H_ */ 109#endif /* _M68K_LOCK_H_ */