Thu Jan 24 10:15:31 2013 UTC ()
Shut up clang but returning the result to a uint32_t variable.


(matt)
diff -r1.22 -r1.23 src/sys/arch/arm/include/lock.h

cvs diff -r1.22 -r1.23 src/sys/arch/arm/include/lock.h (expand / switch to unified diff)

--- src/sys/arch/arm/include/lock.h 2012/11/25 20:40:30 1.22
+++ src/sys/arch/arm/include/lock.h 2013/01/24 10:15:30 1.23
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: lock.h,v 1.22 2012/11/25 20:40:30 pgoyette Exp $ */ 1/* $NetBSD: lock.h,v 1.23 2013/01/24 10:15:30 matt Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc. 4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe. 8 * by Jason R. Thorpe.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -68,27 +68,28 @@ __cpu_simple_lock_set(__cpu_simple_lock_ @@ -68,27 +68,28 @@ __cpu_simple_lock_set(__cpu_simple_lock_
68#ifdef _KERNEL 68#ifdef _KERNEL
69#include <arm/cpufunc.h> 69#include <arm/cpufunc.h>
70 70
71#define mb_read drain_writebuf /* in cpufunc.h */ 71#define mb_read drain_writebuf /* in cpufunc.h */
72#define mb_write drain_writebuf /* in cpufunc.h */ 72#define mb_write drain_writebuf /* in cpufunc.h */
73#define mb_memory drain_writebuf /* in cpufunc.h */ 73#define mb_memory drain_writebuf /* in cpufunc.h */
74#endif 74#endif
75 75
76#if defined(_KERNEL) 76#if defined(_KERNEL)
77static __inline unsigned char 77static __inline unsigned char
78__swp(__cpu_simple_lock_t __val, volatile __cpu_simple_lock_t *__ptr) 78__swp(__cpu_simple_lock_t __val, volatile __cpu_simple_lock_t *__ptr)
79{ 79{
80#ifdef _ARM_ARCH_6 80#ifdef _ARM_ARCH_6
81 __cpu_simple_lock_t __rv, __tmp; 81 uint32_t __rv;
 82 __cpu_simple_lock_t __tmp;
82 if (sizeof(*__ptr) == 1) { 83 if (sizeof(*__ptr) == 1) {
83 __asm volatile( 84 __asm volatile(
84 "1:\t" 85 "1:\t"
85 "ldrexb\t%[__rv], [%[__ptr]]" "\n\t" 86 "ldrexb\t%[__rv], [%[__ptr]]" "\n\t"
86 "cmp\t%[__rv],%[__val]" "\n\t" 87 "cmp\t%[__rv],%[__val]" "\n\t"
87 "strexbne\t%[__tmp], %[__val], [%[__ptr]]" "\n\t" 88 "strexbne\t%[__tmp], %[__val], [%[__ptr]]" "\n\t"
88 "cmpne\t%[__tmp], #0" "\n\t" 89 "cmpne\t%[__tmp], #0" "\n\t"
89 "bne\t1b" "\n\t" 90 "bne\t1b" "\n\t"
90#ifdef _ARM_ARCH_7 91#ifdef _ARM_ARCH_7
91 "dmb" 92 "dmb"
92#else 93#else
93 "mcr\tp15, 0, %[__tmp], c7, c10, 5" 94 "mcr\tp15, 0, %[__tmp], c7, c10, 5"
94#endif 95#endif
@@ -102,29 +103,30 @@ __swp(__cpu_simple_lock_t __val, volatil @@ -102,29 +103,30 @@ __swp(__cpu_simple_lock_t __val, volatil
102 "strexne\t%[__tmp], %[__val], [%[__ptr]]" "\n\t" 103 "strexne\t%[__tmp], %[__val], [%[__ptr]]" "\n\t"
103 "cmpne\t%[__tmp], #0" "\n\t" 104 "cmpne\t%[__tmp], #0" "\n\t"
104 "bne\t1b" "\n\t" 105 "bne\t1b" "\n\t"
105#ifdef _ARM_ARCH_7 106#ifdef _ARM_ARCH_7
106 "nop" 107 "nop"
107#else 108#else
108 "mcr\tp15, 0, %[__tmp], c7, c10, 5" 109 "mcr\tp15, 0, %[__tmp], c7, c10, 5"
109#endif 110#endif
110 : [__rv] "=&r" (__rv), [__tmp] "=&r"(__tmp) 111 : [__rv] "=&r" (__rv), [__tmp] "=&r"(__tmp)
111 : [__val] "r" (__val), [__ptr] "r" (__ptr) : "cc", "memory"); 112 : [__val] "r" (__val), [__ptr] "r" (__ptr) : "cc", "memory");
112 } 113 }
113 return __rv; 114 return __rv;
114#else 115#else
 116 uint32_t __val32;
115 __asm volatile("swpb %0, %1, [%2]" 117 __asm volatile("swpb %0, %1, [%2]"
116 : "=&r" (__val) : "r" (__val), "r" (__ptr) : "memory"); 118 : "=&r" (__val32) : "r" (__val), "r" (__ptr) : "memory");
117 return __val; 119 return __val32;
118#endif 120#endif
119} 121}
120#else 122#else
121/* 123/*
122 * On Cortex-A9 (SMP), SWP no longer guarantees atomic results. Thus we pad 124 * On Cortex-A9 (SMP), SWP no longer guarantees atomic results. Thus we pad
123 * out SWP so that when the A9 generates an undefined exception we can replace 125 * out SWP so that when the A9 generates an undefined exception we can replace
124 * the SWP/MOV instructions with the right LDREX/STREX instructions. 126 * the SWP/MOV instructions with the right LDREX/STREX instructions.
125 * 127 *
126 * This is why we force the SWP into the template needed for LDREX/STREX 128 * This is why we force the SWP into the template needed for LDREX/STREX
127 * including the extra instructions and extra register for testing the result. 129 * including the extra instructions and extra register for testing the result.
128 */ 130 */
129static __inline int 131static __inline int
130__swp(int __val, volatile int *__ptr) 132__swp(int __val, volatile int *__ptr)