Fri Feb 11 17:53:28 2022 UTC ()
ucas(9): Membar audit.

- Omit needless membar_enter before ipi_trigger_broadcast.  This was
  presumably intended to imply a happens-before relation for the
  following two CPUs:

	/* CPU doing ucas */
	ucas_critical_enter()
		ucas_critical_pausing_cpus = ncpu - 1	(A)
		ipi_trigger_broadcast()

	/* other CPU walking by whistling innocently */
	IPI handler
	ucas_critical_cpu_gate()
		load ucas_critical_pausing_cpus		(B)

  That is, this was presumably meant to ensure (A) happens-before (B).
  This relation is already guaranteed by ipi(9), so there is no need
  for any explicit memory barrier.

- Issue a store-release in ucas_critical_cpu_gate so we have the
  following happens-before relation which was otherwise not guaranteed
  except if __HAVE_ATOMIC_AS_MEMBAR:

	/* other CPU walking by whistling innocently */
	...other logic touching the target ucas word...	(A)
	IPI handler
	ucas_critical_cpu_gate()
		...
		atomic_dec_uint(&ucas_critical_pausing_cpus)

  happens-before

	/* CPU doing ucas */
	ucas_critical_enter() -> ucas_critical_wait();
	...touching the word with ufetch/ustore...	(B)

  We need to ensure the logic (A) on another CPU touching the target
  ucas word happens-before we actually do the ucas at (B).

  (a) This requires the other CPU to do a store-release on
      ucas_critical_pausing_cpus in ucas_critical_cpu_gate, and

  (b) this requires the ucas CPU to do a load-acquire on
      ucas_critical_pausing_cpus in ucas_critical_wait.

  Without _both_ sides -- store-release and then load-acquire -- there
  is no such happens-before guarantee; another CPU may have a buffered
  store, for instance, that clobbers the ucas.

  For now, do the store-release with membar_exit conditional on
  __HAVE_ATOMIC_AS_MEMBAR and then atomic_dec_uint -- later with the
  C11 API we can dispense with the #ifdef and just use
  atomic_fetch_add_explicit(..., memory_order_release).  The
  load-acquire we can do with atomic_load_acquire.

- Issue a load-acquire in ucas_critical_cpu_gate so we have the
  following happens-before relation which was otherwise not guaranteed:

	/* CPU doing ucas */
	...ufetch/ustore...				(A)
	ucas_critical_exit()
		ucas_critical_pausing_cpus = -1;

	/* other CPU walking by whistling innocently */
	IPI handler
	ucas_critical_cpu_gate()
		...
		while (ucas_critical_pausing_cpus != -1)
			spin;
	...other logic touching the target ucas word...	(B)

  We need to ensure the logic (A) to do the ucas happens-before logic
  that might use it on another CPU at (B).

  (a) This requires that the ucas CPU do a store-release on
      ucas_critical_pausing_cpus in ucas_critical_exit, and

  (b) this requires that the other CPU do a load-acquire on
      ucas_critical_pausing_cpus in ucas_critical_cpu_gate.

  Without _both_ sides -- store-release and then load-acquire -- there
  is no such happens-before guarantee; the other CPU might witness a
  cached stale value of the target location but a new value of some
  other location in the wrong order.

- Use atomic_load/store_* to avoid the appearance of races, e.g. for
  sanitizers.

- Document which barriers pair up with which barriers and what they're
  doing.


(riastradh)
diff -r1.14 -r1.15 src/sys/kern/subr_copy.c

cvs diff -r1.14 -r1.15 src/sys/kern/subr_copy.c (switch to unified diff)

--- src/sys/kern/subr_copy.c 2020/05/23 23:42:43 1.14
+++ src/sys/kern/subr_copy.c 2022/02/11 17:53:28 1.15
@@ -1,669 +1,709 @@ @@ -1,669 +1,709 @@
1/* $NetBSD: subr_copy.c,v 1.14 2020/05/23 23:42:43 ad Exp $ */ 1/* $NetBSD: subr_copy.c,v 1.15 2022/02/11 17:53:28 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1997, 1998, 1999, 2002, 2007, 2008, 2019 4 * Copyright (c) 1997, 1998, 1999, 2002, 2007, 2008, 2019
5 * The NetBSD Foundation, Inc. 5 * The NetBSD Foundation, Inc.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to The NetBSD Foundation 8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 * NASA Ames Research Center. 10 * NASA Ames Research Center.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
15 * 1. Redistributions of source code must retain the above copyright 15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer. 16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright 17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the 18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution. 19 * documentation and/or other materials provided with the distribution.
20 * 20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE. 31 * POSSIBILITY OF SUCH DAMAGE.
32 */ 32 */
33 33
34/* 34/*
35 * Copyright (c) 1982, 1986, 1991, 1993 35 * Copyright (c) 1982, 1986, 1991, 1993
36 * The Regents of the University of California. All rights reserved. 36 * The Regents of the University of California. All rights reserved.
37 * (c) UNIX System Laboratories, Inc. 37 * (c) UNIX System Laboratories, Inc.
38 * All or some portions of this file are derived from material licensed 38 * All or some portions of this file are derived from material licensed
39 * to the University of California by American Telephone and Telegraph 39 * to the University of California by American Telephone and Telegraph
40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41 * the permission of UNIX System Laboratories, Inc. 41 * the permission of UNIX System Laboratories, Inc.
42 * 42 *
43 * Copyright (c) 1992, 1993 43 * Copyright (c) 1992, 1993
44 * The Regents of the University of California. All rights reserved. 44 * The Regents of the University of California. All rights reserved.
45 * 45 *
46 * This software was developed by the Computer Systems Engineering group 46 * This software was developed by the Computer Systems Engineering group
47 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 47 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
48 * contributed to Berkeley. 48 * contributed to Berkeley.
49 * 49 *
50 * All advertising materials mentioning features or use of this software 50 * All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement: 51 * must display the following acknowledgement:
52 * This product includes software developed by the University of 52 * This product includes software developed by the University of
53 * California, Lawrence Berkeley Laboratory. 53 * California, Lawrence Berkeley Laboratory.
54 * 54 *
55 * Redistribution and use in source and binary forms, with or without 55 * Redistribution and use in source and binary forms, with or without
56 * modification, are permitted provided that the following conditions 56 * modification, are permitted provided that the following conditions
57 * are met: 57 * are met:
58 * 1. Redistributions of source code must retain the above copyright 58 * 1. Redistributions of source code must retain the above copyright
59 * notice, this list of conditions and the following disclaimer. 59 * notice, this list of conditions and the following disclaimer.
60 * 2. Redistributions in binary form must reproduce the above copyright 60 * 2. Redistributions in binary form must reproduce the above copyright
61 * notice, this list of conditions and the following disclaimer in the 61 * notice, this list of conditions and the following disclaimer in the
62 * documentation and/or other materials provided with the distribution. 62 * documentation and/or other materials provided with the distribution.
63 * 3. Neither the name of the University nor the names of its contributors 63 * 3. Neither the name of the University nor the names of its contributors
64 * may be used to endorse or promote products derived from this software 64 * may be used to endorse or promote products derived from this software
65 * without specific prior written permission. 65 * without specific prior written permission.
66 * 66 *
67 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 67 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 70 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
71 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 71 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
72 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 72 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
73 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 73 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
74 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 74 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
75 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 75 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
76 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 76 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
77 * SUCH DAMAGE. 77 * SUCH DAMAGE.
78 * 78 *
79 * @(#)kern_subr.c 8.4 (Berkeley) 2/14/95 79 * @(#)kern_subr.c 8.4 (Berkeley) 2/14/95
80 */ 80 */
81 81
82#include <sys/cdefs.h> 82#include <sys/cdefs.h>
83__KERNEL_RCSID(0, "$NetBSD: subr_copy.c,v 1.14 2020/05/23 23:42:43 ad Exp $"); 83__KERNEL_RCSID(0, "$NetBSD: subr_copy.c,v 1.15 2022/02/11 17:53:28 riastradh Exp $");
84 84
85#define __UFETCHSTORE_PRIVATE 85#define __UFETCHSTORE_PRIVATE
86#define __UCAS_PRIVATE 86#define __UCAS_PRIVATE
87 87
88#include <sys/param.h> 88#include <sys/param.h>
89#include <sys/fcntl.h> 89#include <sys/fcntl.h>
90#include <sys/proc.h> 90#include <sys/proc.h>
91#include <sys/systm.h> 91#include <sys/systm.h>
92 92
93#include <uvm/uvm_extern.h> 93#include <uvm/uvm_extern.h>
94 94
95void 95void
96uio_setup_sysspace(struct uio *uio) 96uio_setup_sysspace(struct uio *uio)
97{ 97{
98 98
99 uio->uio_vmspace = vmspace_kernel(); 99 uio->uio_vmspace = vmspace_kernel();
100} 100}
101 101
102int 102int
103uiomove(void *buf, size_t n, struct uio *uio) 103uiomove(void *buf, size_t n, struct uio *uio)
104{ 104{
105 struct vmspace *vm = uio->uio_vmspace; 105 struct vmspace *vm = uio->uio_vmspace;
106 struct iovec *iov; 106 struct iovec *iov;
107 size_t cnt; 107 size_t cnt;
108 int error = 0; 108 int error = 0;
109 char *cp = buf; 109 char *cp = buf;
110 110
111 ASSERT_SLEEPABLE(); 111 ASSERT_SLEEPABLE();
112 112
113 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE); 113 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE);
114 while (n > 0 && uio->uio_resid) { 114 while (n > 0 && uio->uio_resid) {
115 iov = uio->uio_iov; 115 iov = uio->uio_iov;
116 cnt = iov->iov_len; 116 cnt = iov->iov_len;
117 if (cnt == 0) { 117 if (cnt == 0) {
118 KASSERT(uio->uio_iovcnt > 0); 118 KASSERT(uio->uio_iovcnt > 0);
119 uio->uio_iov++; 119 uio->uio_iov++;
120 uio->uio_iovcnt--; 120 uio->uio_iovcnt--;
121 continue; 121 continue;
122 } 122 }
123 if (cnt > n) 123 if (cnt > n)
124 cnt = n; 124 cnt = n;
125 if (!VMSPACE_IS_KERNEL_P(vm)) { 125 if (!VMSPACE_IS_KERNEL_P(vm)) {
126 preempt_point(); 126 preempt_point();
127 } 127 }
128 128
129 if (uio->uio_rw == UIO_READ) { 129 if (uio->uio_rw == UIO_READ) {
130 error = copyout_vmspace(vm, cp, iov->iov_base, 130 error = copyout_vmspace(vm, cp, iov->iov_base,
131 cnt); 131 cnt);
132 } else { 132 } else {
133 error = copyin_vmspace(vm, iov->iov_base, cp, 133 error = copyin_vmspace(vm, iov->iov_base, cp,
134 cnt); 134 cnt);
135 } 135 }
136 if (error) { 136 if (error) {
137 break; 137 break;
138 } 138 }
139 iov->iov_base = (char *)iov->iov_base + cnt; 139 iov->iov_base = (char *)iov->iov_base + cnt;
140 iov->iov_len -= cnt; 140 iov->iov_len -= cnt;
141 uio->uio_resid -= cnt; 141 uio->uio_resid -= cnt;
142 uio->uio_offset += cnt; 142 uio->uio_offset += cnt;
143 cp += cnt; 143 cp += cnt;
144 KDASSERT(cnt <= n); 144 KDASSERT(cnt <= n);
145 n -= cnt; 145 n -= cnt;
146 } 146 }
147 147
148 return (error); 148 return (error);
149} 149}
150 150
151/* 151/*
152 * Wrapper for uiomove() that validates the arguments against a known-good 152 * Wrapper for uiomove() that validates the arguments against a known-good
153 * kernel buffer. 153 * kernel buffer.
154 */ 154 */
155int 155int
156uiomove_frombuf(void *buf, size_t buflen, struct uio *uio) 156uiomove_frombuf(void *buf, size_t buflen, struct uio *uio)
157{ 157{
158 size_t offset; 158 size_t offset;
159 159
160 if (uio->uio_offset < 0 || /* uio->uio_resid < 0 || */ 160 if (uio->uio_offset < 0 || /* uio->uio_resid < 0 || */
161 (offset = uio->uio_offset) != uio->uio_offset) 161 (offset = uio->uio_offset) != uio->uio_offset)
162 return (EINVAL); 162 return (EINVAL);
163 if (offset >= buflen) 163 if (offset >= buflen)
164 return (0); 164 return (0);
165 return (uiomove((char *)buf + offset, buflen - offset, uio)); 165 return (uiomove((char *)buf + offset, buflen - offset, uio));
166} 166}
167 167
168/* 168/*
169 * Give next character to user as result of read. 169 * Give next character to user as result of read.
170 */ 170 */
171int 171int
172ureadc(int c, struct uio *uio) 172ureadc(int c, struct uio *uio)
173{ 173{
174 struct iovec *iov; 174 struct iovec *iov;
175 175
176 if (uio->uio_resid <= 0) 176 if (uio->uio_resid <= 0)
177 panic("ureadc: non-positive resid"); 177 panic("ureadc: non-positive resid");
178again: 178again:
179 if (uio->uio_iovcnt <= 0) 179 if (uio->uio_iovcnt <= 0)
180 panic("ureadc: non-positive iovcnt"); 180 panic("ureadc: non-positive iovcnt");
181 iov = uio->uio_iov; 181 iov = uio->uio_iov;
182 if (iov->iov_len <= 0) { 182 if (iov->iov_len <= 0) {
183 uio->uio_iovcnt--; 183 uio->uio_iovcnt--;
184 uio->uio_iov++; 184 uio->uio_iov++;
185 goto again; 185 goto again;
186 } 186 }
187 if (!VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) { 187 if (!VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
188 int error; 188 int error;
189 if ((error = ustore_char(iov->iov_base, c)) != 0) 189 if ((error = ustore_char(iov->iov_base, c)) != 0)
190 return (error); 190 return (error);
191 } else { 191 } else {
192 *(char *)iov->iov_base = c; 192 *(char *)iov->iov_base = c;
193 } 193 }
194 iov->iov_base = (char *)iov->iov_base + 1; 194 iov->iov_base = (char *)iov->iov_base + 1;
195 iov->iov_len--; 195 iov->iov_len--;
196 uio->uio_resid--; 196 uio->uio_resid--;
197 uio->uio_offset++; 197 uio->uio_offset++;
198 return (0); 198 return (0);
199} 199}
200 200
201/* 201/*
202 * Like copyin(), but operates on an arbitrary vmspace. 202 * Like copyin(), but operates on an arbitrary vmspace.
203 */ 203 */
204int 204int
205copyin_vmspace(struct vmspace *vm, const void *uaddr, void *kaddr, size_t len) 205copyin_vmspace(struct vmspace *vm, const void *uaddr, void *kaddr, size_t len)
206{ 206{
207 struct iovec iov; 207 struct iovec iov;
208 struct uio uio; 208 struct uio uio;
209 int error; 209 int error;
210 210
211 if (len == 0) 211 if (len == 0)
212 return (0); 212 return (0);
213 213
214 if (VMSPACE_IS_KERNEL_P(vm)) { 214 if (VMSPACE_IS_KERNEL_P(vm)) {
215 return kcopy(uaddr, kaddr, len); 215 return kcopy(uaddr, kaddr, len);
216 } 216 }
217 if (__predict_true(vm == curproc->p_vmspace)) { 217 if (__predict_true(vm == curproc->p_vmspace)) {
218 return copyin(uaddr, kaddr, len); 218 return copyin(uaddr, kaddr, len);
219 } 219 }
220 220
221 iov.iov_base = kaddr; 221 iov.iov_base = kaddr;
222 iov.iov_len = len; 222 iov.iov_len = len;
223 uio.uio_iov = &iov; 223 uio.uio_iov = &iov;
224 uio.uio_iovcnt = 1; 224 uio.uio_iovcnt = 1;
225 uio.uio_offset = (off_t)(uintptr_t)uaddr; 225 uio.uio_offset = (off_t)(uintptr_t)uaddr;
226 uio.uio_resid = len; 226 uio.uio_resid = len;
227 uio.uio_rw = UIO_READ; 227 uio.uio_rw = UIO_READ;
228 UIO_SETUP_SYSSPACE(&uio); 228 UIO_SETUP_SYSSPACE(&uio);
229 error = uvm_io(&vm->vm_map, &uio, 0); 229 error = uvm_io(&vm->vm_map, &uio, 0);
230 230
231 return (error); 231 return (error);
232} 232}
233 233
234/* 234/*
235 * Like copyout(), but operates on an arbitrary vmspace. 235 * Like copyout(), but operates on an arbitrary vmspace.
236 */ 236 */
237int 237int
238copyout_vmspace(struct vmspace *vm, const void *kaddr, void *uaddr, size_t len) 238copyout_vmspace(struct vmspace *vm, const void *kaddr, void *uaddr, size_t len)
239{ 239{
240 struct iovec iov; 240 struct iovec iov;
241 struct uio uio; 241 struct uio uio;
242 int error; 242 int error;
243 243
244 if (len == 0) 244 if (len == 0)
245 return (0); 245 return (0);
246 246
247 if (VMSPACE_IS_KERNEL_P(vm)) { 247 if (VMSPACE_IS_KERNEL_P(vm)) {
248 return kcopy(kaddr, uaddr, len); 248 return kcopy(kaddr, uaddr, len);
249 } 249 }
250 if (__predict_true(vm == curproc->p_vmspace)) { 250 if (__predict_true(vm == curproc->p_vmspace)) {
251 return copyout(kaddr, uaddr, len); 251 return copyout(kaddr, uaddr, len);
252 } 252 }
253 253
254 iov.iov_base = __UNCONST(kaddr); /* XXXUNCONST cast away const */ 254 iov.iov_base = __UNCONST(kaddr); /* XXXUNCONST cast away const */
255 iov.iov_len = len; 255 iov.iov_len = len;
256 uio.uio_iov = &iov; 256 uio.uio_iov = &iov;
257 uio.uio_iovcnt = 1; 257 uio.uio_iovcnt = 1;
258 uio.uio_offset = (off_t)(uintptr_t)uaddr; 258 uio.uio_offset = (off_t)(uintptr_t)uaddr;
259 uio.uio_resid = len; 259 uio.uio_resid = len;
260 uio.uio_rw = UIO_WRITE; 260 uio.uio_rw = UIO_WRITE;
261 UIO_SETUP_SYSSPACE(&uio); 261 UIO_SETUP_SYSSPACE(&uio);
262 error = uvm_io(&vm->vm_map, &uio, 0); 262 error = uvm_io(&vm->vm_map, &uio, 0);
263 263
264 return (error); 264 return (error);
265} 265}
266 266
267/* 267/*
268 * Like copyin(), but operates on an arbitrary process. 268 * Like copyin(), but operates on an arbitrary process.
269 */ 269 */
270int 270int
271copyin_proc(struct proc *p, const void *uaddr, void *kaddr, size_t len) 271copyin_proc(struct proc *p, const void *uaddr, void *kaddr, size_t len)
272{ 272{
273 struct vmspace *vm; 273 struct vmspace *vm;
274 int error; 274 int error;
275 275
276 error = proc_vmspace_getref(p, &vm); 276 error = proc_vmspace_getref(p, &vm);
277 if (error) { 277 if (error) {
278 return error; 278 return error;
279 } 279 }
280 error = copyin_vmspace(vm, uaddr, kaddr, len); 280 error = copyin_vmspace(vm, uaddr, kaddr, len);
281 uvmspace_free(vm); 281 uvmspace_free(vm);
282 282
283 return error; 283 return error;
284} 284}
285 285
286/* 286/*
287 * Like copyout(), but operates on an arbitrary process. 287 * Like copyout(), but operates on an arbitrary process.
288 */ 288 */
289int 289int
290copyout_proc(struct proc *p, const void *kaddr, void *uaddr, size_t len) 290copyout_proc(struct proc *p, const void *kaddr, void *uaddr, size_t len)
291{ 291{
292 struct vmspace *vm; 292 struct vmspace *vm;
293 int error; 293 int error;
294 294
295 error = proc_vmspace_getref(p, &vm); 295 error = proc_vmspace_getref(p, &vm);
296 if (error) { 296 if (error) {
297 return error; 297 return error;
298 } 298 }
299 error = copyout_vmspace(vm, kaddr, uaddr, len); 299 error = copyout_vmspace(vm, kaddr, uaddr, len);
300 uvmspace_free(vm); 300 uvmspace_free(vm);
301 301
302 return error; 302 return error;
303} 303}
304 304
305/* 305/*
306 * Like copyin(), but operates on an arbitrary pid. 306 * Like copyin(), but operates on an arbitrary pid.
307 */ 307 */
308int 308int
309copyin_pid(pid_t pid, const void *uaddr, void *kaddr, size_t len) 309copyin_pid(pid_t pid, const void *uaddr, void *kaddr, size_t len)
310{ 310{
311 struct proc *p; 311 struct proc *p;
312 struct vmspace *vm; 312 struct vmspace *vm;
313 int error; 313 int error;
314 314
315 mutex_enter(&proc_lock); 315 mutex_enter(&proc_lock);
316 p = proc_find(pid); 316 p = proc_find(pid);
317 if (p == NULL) { 317 if (p == NULL) {
318 mutex_exit(&proc_lock); 318 mutex_exit(&proc_lock);
319 return ESRCH; 319 return ESRCH;
320 } 320 }
321 mutex_enter(p->p_lock); 321 mutex_enter(p->p_lock);
322 error = proc_vmspace_getref(p, &vm); 322 error = proc_vmspace_getref(p, &vm);
323 mutex_exit(p->p_lock); 323 mutex_exit(p->p_lock);
324 mutex_exit(&proc_lock); 324 mutex_exit(&proc_lock);
325 325
326 if (error == 0) { 326 if (error == 0) {
327 error = copyin_vmspace(vm, uaddr, kaddr, len); 327 error = copyin_vmspace(vm, uaddr, kaddr, len);
328 uvmspace_free(vm); 328 uvmspace_free(vm);
329 } 329 }
330 return error; 330 return error;
331} 331}
332 332
333/* 333/*
334 * Like copyin(), except it operates on kernel addresses when the FKIOCTL 334 * Like copyin(), except it operates on kernel addresses when the FKIOCTL
335 * flag is passed in `ioctlflags' from the ioctl call. 335 * flag is passed in `ioctlflags' from the ioctl call.
336 */ 336 */
337int 337int
338ioctl_copyin(int ioctlflags, const void *src, void *dst, size_t len) 338ioctl_copyin(int ioctlflags, const void *src, void *dst, size_t len)
339{ 339{
340 if (ioctlflags & FKIOCTL) 340 if (ioctlflags & FKIOCTL)
341 return kcopy(src, dst, len); 341 return kcopy(src, dst, len);
342 return copyin(src, dst, len); 342 return copyin(src, dst, len);
343} 343}
344 344
345/* 345/*
346 * Like copyout(), except it operates on kernel addresses when the FKIOCTL 346 * Like copyout(), except it operates on kernel addresses when the FKIOCTL
347 * flag is passed in `ioctlflags' from the ioctl call. 347 * flag is passed in `ioctlflags' from the ioctl call.
348 */ 348 */
349int 349int
350ioctl_copyout(int ioctlflags, const void *src, void *dst, size_t len) 350ioctl_copyout(int ioctlflags, const void *src, void *dst, size_t len)
351{ 351{
352 if (ioctlflags & FKIOCTL) 352 if (ioctlflags & FKIOCTL)
353 return kcopy(src, dst, len); 353 return kcopy(src, dst, len);
354 return copyout(src, dst, len); 354 return copyout(src, dst, len);
355} 355}
356 356
357/* 357/*
358 * User-space CAS / fetch / store 358 * User-space CAS / fetch / store
359 */ 359 */
360 360
361#ifdef __NO_STRICT_ALIGNMENT 361#ifdef __NO_STRICT_ALIGNMENT
362#define CHECK_ALIGNMENT(x) __nothing 362#define CHECK_ALIGNMENT(x) __nothing
363#else /* ! __NO_STRICT_ALIGNMENT */ 363#else /* ! __NO_STRICT_ALIGNMENT */
364static bool 364static bool
365ufetchstore_aligned(uintptr_t uaddr, size_t size) 365ufetchstore_aligned(uintptr_t uaddr, size_t size)
366{ 366{
367 return (uaddr & (size - 1)) == 0; 367 return (uaddr & (size - 1)) == 0;
368} 368}
369 369
370#define CHECK_ALIGNMENT() \ 370#define CHECK_ALIGNMENT() \
371do { \ 371do { \
372 if (!ufetchstore_aligned((uintptr_t)uaddr, sizeof(*uaddr))) \ 372 if (!ufetchstore_aligned((uintptr_t)uaddr, sizeof(*uaddr))) \
373 return EFAULT; \ 373 return EFAULT; \
374} while (/*CONSTCOND*/0) 374} while (/*CONSTCOND*/0)
375#endif /* __NO_STRICT_ALIGNMENT */ 375#endif /* __NO_STRICT_ALIGNMENT */
376 376
377/* 377/*
378 * __HAVE_UCAS_FULL platforms provide _ucas_32() and _ucas_64() themselves. 378 * __HAVE_UCAS_FULL platforms provide _ucas_32() and _ucas_64() themselves.
379 * _RUMPKERNEL also provides it's own _ucas_32() and _ucas_64(). 379 * _RUMPKERNEL also provides it's own _ucas_32() and _ucas_64().
380 * 380 *
381 * In all other cases, we provide generic implementations that work on 381 * In all other cases, we provide generic implementations that work on
382 * all platforms. 382 * all platforms.
383 */ 383 */
384 384
385#if !defined(__HAVE_UCAS_FULL) && !defined(_RUMPKERNEL) 385#if !defined(__HAVE_UCAS_FULL) && !defined(_RUMPKERNEL)
386#if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR) 386#if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
387#include <sys/atomic.h> 387#include <sys/atomic.h>
388#include <sys/cpu.h> 388#include <sys/cpu.h>
389#include <sys/once.h> 389#include <sys/once.h>
390#include <sys/mutex.h> 390#include <sys/mutex.h>
391#include <sys/ipi.h> 391#include <sys/ipi.h>
392 392
393static int ucas_critical_splcookie; 393static int ucas_critical_splcookie;
394static volatile u_int ucas_critical_pausing_cpus; 394static volatile u_int ucas_critical_pausing_cpus;
395static u_int ucas_critical_ipi; 395static u_int ucas_critical_ipi;
396static ONCE_DECL(ucas_critical_init_once) 396static ONCE_DECL(ucas_critical_init_once)
397 397
398static void 398static void
399ucas_critical_cpu_gate(void *arg __unused) 399ucas_critical_cpu_gate(void *arg __unused)
400{ 400{
401 int count = SPINLOCK_BACKOFF_MIN; 401 int count = SPINLOCK_BACKOFF_MIN;
402 402
403 KASSERT(ucas_critical_pausing_cpus > 0); 403 KASSERT(atomic_load_relaxed(&ucas_critical_pausing_cpus) > 0);
 404
 405 /*
 406 * Notify ucas_critical_wait that we have stopped. Using
 407 * store-release ensures all our memory operations up to the
 408 * IPI happen before the ucas -- no buffered stores on our end
 409 * can clobber it later on, for instance.
 410 *
 411 * Matches atomic_load_acquire in ucas_critical_wait -- turns
 412 * the following atomic_dec_uint into a store-release.
 413 */
 414#ifndef __HAVE_ATOMIC_AS_MEMBAR
 415 membar_exit();
 416#endif
404 atomic_dec_uint(&ucas_critical_pausing_cpus); 417 atomic_dec_uint(&ucas_critical_pausing_cpus);
405 while (ucas_critical_pausing_cpus != (u_int)-1) { 418
 419 /*
 420 * Wait for ucas_critical_exit to reopen the gate and let us
 421 * proceed. Using a load-acquire ensures the ucas happens
 422 * before any of our memory operations when we return from the
 423 * IPI and proceed -- we won't observe any stale cached value
 424 * that the ucas overwrote, for instance.
 425 *
 426 * Matches atomic_store_release in ucas_critical_exit.
 427 */
 428 while (atomic_load_acquire(&ucas_critical_pausing_cpus) != (u_int)-1) {
406 SPINLOCK_BACKOFF(count); 429 SPINLOCK_BACKOFF(count);
407 } 430 }
408} 431}
409 432
410static int 433static int
411ucas_critical_init(void) 434ucas_critical_init(void)
412{ 435{
 436
413 ucas_critical_ipi = ipi_register(ucas_critical_cpu_gate, NULL); 437 ucas_critical_ipi = ipi_register(ucas_critical_cpu_gate, NULL);
414 return 0; 438 return 0;
415} 439}
416 440
417static void 441static void
418ucas_critical_wait(void) 442ucas_critical_wait(void)
419{ 443{
420 int count = SPINLOCK_BACKOFF_MIN; 444 int count = SPINLOCK_BACKOFF_MIN;
421 445
422 while (ucas_critical_pausing_cpus > 0) { 446 /*
 447 * Wait for all CPUs to stop at the gate. Using a load-acquire
 448 * ensures all memory operations before they stop at the gate
 449 * happen before the ucas -- no buffered stores in other CPUs
 450 * can clobber it later on, for instance.
 451 *
 452 * Matches membar_exit/atomic_dec_uint (store-release) in
 453 * ucas_critical_cpu_gate.
 454 */
 455 while (atomic_load_acquire(&ucas_critical_pausing_cpus) > 0) {
423 SPINLOCK_BACKOFF(count); 456 SPINLOCK_BACKOFF(count);
424 } 457 }
425} 458}
426#endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */ 459#endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
427 460
428static inline void 461static inline void
429ucas_critical_enter(lwp_t * const l) 462ucas_critical_enter(lwp_t * const l)
430{ 463{
431 464
432#if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR) 465#if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
433 if (ncpu > 1) { 466 if (ncpu > 1) {
434 RUN_ONCE(&ucas_critical_init_once, ucas_critical_init); 467 RUN_ONCE(&ucas_critical_init_once, ucas_critical_init);
435 468
436 /* 469 /*
437 * Acquire the mutex first, then go to splhigh() and 470 * Acquire the mutex first, then go to splhigh() and
438 * broadcast the IPI to lock all of the other CPUs 471 * broadcast the IPI to lock all of the other CPUs
439 * behind the gate. 472 * behind the gate.
440 * 473 *
441 * N.B. Going to splhigh() implicitly disables preemption, 474 * N.B. Going to splhigh() implicitly disables preemption,
442 * so there's no need to do it explicitly. 475 * so there's no need to do it explicitly.
443 */ 476 */
444 mutex_enter(&cpu_lock); 477 mutex_enter(&cpu_lock);
445 ucas_critical_splcookie = splhigh(); 478 ucas_critical_splcookie = splhigh();
446 ucas_critical_pausing_cpus = ncpu - 1; 479 ucas_critical_pausing_cpus = ncpu - 1;
447 membar_enter(); 
448 
449 ipi_trigger_broadcast(ucas_critical_ipi, true); 480 ipi_trigger_broadcast(ucas_critical_ipi, true);
450 ucas_critical_wait(); 481 ucas_critical_wait();
451 return; 482 return;
452 } 483 }
453#endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */ 484#endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
454 485
455 KPREEMPT_DISABLE(l); 486 KPREEMPT_DISABLE(l);
456} 487}
457 488
458static inline void 489static inline void
459ucas_critical_exit(lwp_t * const l) 490ucas_critical_exit(lwp_t * const l)
460{ 491{
461 492
462#if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR) 493#if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
463 if (ncpu > 1) { 494 if (ncpu > 1) {
464 membar_exit(); 495 /*
465 ucas_critical_pausing_cpus = (u_int)-1; 496 * Open the gate and notify all CPUs in
 497 * ucas_critical_cpu_gate that they can now proceed.
 498 * Using a store-release ensures the ucas happens
 499 * before any memory operations they issue after the
 500 * IPI -- they won't observe any stale cache of the
 501 * target word, for instance.
 502 *
 503 * Matches atomic_load_acquire in ucas_critical_cpu_gate.
 504 */
 505 atomic_store_release(&ucas_critical_pausing_cpus, (u_int)-1);
466 splx(ucas_critical_splcookie); 506 splx(ucas_critical_splcookie);
467 mutex_exit(&cpu_lock); 507 mutex_exit(&cpu_lock);
468 return; 508 return;
469 } 509 }
470#endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */ 510#endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
471 511
472 KPREEMPT_ENABLE(l); 512 KPREEMPT_ENABLE(l);
473} 513}
474 514
475int 515int
476_ucas_32(volatile uint32_t *uaddr, uint32_t old, uint32_t new, uint32_t *ret) 516_ucas_32(volatile uint32_t *uaddr, uint32_t old, uint32_t new, uint32_t *ret)
477{ 517{
478 lwp_t * const l = curlwp; 518 lwp_t * const l = curlwp;
479 uint32_t *uva = ((void *)(uintptr_t)uaddr); 519 uint32_t *uva = ((void *)(uintptr_t)uaddr);
480 int error; 520 int error;
481 521
482 /* 522 /*
483 * Wire the user address down to avoid taking a page fault during 523 * Wire the user address down to avoid taking a page fault during
484 * the critical section. 524 * the critical section.
485 */ 525 */
486 error = uvm_vslock(l->l_proc->p_vmspace, uva, sizeof(*uaddr), 526 error = uvm_vslock(l->l_proc->p_vmspace, uva, sizeof(*uaddr),
487 VM_PROT_READ | VM_PROT_WRITE); 527 VM_PROT_READ | VM_PROT_WRITE);
488 if (error) 528 if (error)
489 return error; 529 return error;
490 530
491 ucas_critical_enter(l); 531 ucas_critical_enter(l);
492 error = _ufetch_32(uva, ret); 532 error = _ufetch_32(uva, ret);
493 if (error == 0 && *ret == old) { 533 if (error == 0 && *ret == old) {
494 error = _ustore_32(uva, new); 534 error = _ustore_32(uva, new);
495 } 535 }
496 ucas_critical_exit(l); 536 ucas_critical_exit(l);
497 537
498 uvm_vsunlock(l->l_proc->p_vmspace, uva, sizeof(*uaddr)); 538 uvm_vsunlock(l->l_proc->p_vmspace, uva, sizeof(*uaddr));
499 539
500 return error; 540 return error;
501} 541}
502 542
503#ifdef _LP64 543#ifdef _LP64
504int 544int
505_ucas_64(volatile uint64_t *uaddr, uint64_t old, uint64_t new, uint64_t *ret) 545_ucas_64(volatile uint64_t *uaddr, uint64_t old, uint64_t new, uint64_t *ret)
506{ 546{
507 lwp_t * const l = curlwp; 547 lwp_t * const l = curlwp;
508 uint64_t *uva = ((void *)(uintptr_t)uaddr); 548 uint64_t *uva = ((void *)(uintptr_t)uaddr);
509 int error; 549 int error;
510 550
511 /* 551 /*
512 * Wire the user address down to avoid taking a page fault during 552 * Wire the user address down to avoid taking a page fault during
513 * the critical section. 553 * the critical section.
514 */ 554 */
515 error = uvm_vslock(l->l_proc->p_vmspace, uva, sizeof(*uaddr), 555 error = uvm_vslock(l->l_proc->p_vmspace, uva, sizeof(*uaddr),
516 VM_PROT_READ | VM_PROT_WRITE); 556 VM_PROT_READ | VM_PROT_WRITE);
517 if (error) 557 if (error)
518 return error; 558 return error;
519 559
520 ucas_critical_enter(l); 560 ucas_critical_enter(l);
521 error = _ufetch_64(uva, ret); 561 error = _ufetch_64(uva, ret);
522 if (error == 0 && *ret == old) { 562 if (error == 0 && *ret == old) {
523 error = _ustore_64(uva, new); 563 error = _ustore_64(uva, new);
524 } 564 }
525 ucas_critical_exit(l); 565 ucas_critical_exit(l);
526 566
527 uvm_vsunlock(l->l_proc->p_vmspace, uva, sizeof(*uaddr)); 567 uvm_vsunlock(l->l_proc->p_vmspace, uva, sizeof(*uaddr));
528 568
529 return error; 569 return error;
530} 570}
531#endif /* _LP64 */ 571#endif /* _LP64 */
532#endif /* ! __HAVE_UCAS_FULL && ! _RUMPKERNEL */ 572#endif /* ! __HAVE_UCAS_FULL && ! _RUMPKERNEL */
533 573
534int 574int
535ucas_32(volatile uint32_t *uaddr, uint32_t old, uint32_t new, uint32_t *ret) 575ucas_32(volatile uint32_t *uaddr, uint32_t old, uint32_t new, uint32_t *ret)
536{ 576{
537 577
538 ASSERT_SLEEPABLE(); 578 ASSERT_SLEEPABLE();
539 CHECK_ALIGNMENT(); 579 CHECK_ALIGNMENT();
540#if (defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)) && \ 580#if (defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)) && \
541 !defined(_RUMPKERNEL) 581 !defined(_RUMPKERNEL)
542 if (ncpu > 1) { 582 if (ncpu > 1) {
543 return _ucas_32_mp(uaddr, old, new, ret); 583 return _ucas_32_mp(uaddr, old, new, ret);
544 } 584 }
545#endif /* __HAVE_UCAS_MP && MULTIPROCESSOR */ 585#endif /* __HAVE_UCAS_MP && MULTIPROCESSOR */
546 return _ucas_32(uaddr, old, new, ret); 586 return _ucas_32(uaddr, old, new, ret);
547} 587}
548 588
549#ifdef _LP64 589#ifdef _LP64
550int 590int
551ucas_64(volatile uint64_t *uaddr, uint64_t old, uint64_t new, uint64_t *ret) 591ucas_64(volatile uint64_t *uaddr, uint64_t old, uint64_t new, uint64_t *ret)
552{ 592{
553 593
554 ASSERT_SLEEPABLE(); 594 ASSERT_SLEEPABLE();
555 CHECK_ALIGNMENT(); 595 CHECK_ALIGNMENT();
556#if (defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)) && \ 596#if (defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)) && \
557 !defined(_RUMPKERNEL) 597 !defined(_RUMPKERNEL)
558 if (ncpu > 1) { 598 if (ncpu > 1) {
559 return _ucas_64_mp(uaddr, old, new, ret); 599 return _ucas_64_mp(uaddr, old, new, ret);
560 } 600 }
561#endif /* __HAVE_UCAS_MP && MULTIPROCESSOR */ 601#endif /* __HAVE_UCAS_MP && MULTIPROCESSOR */
562 return _ucas_64(uaddr, old, new, ret); 602 return _ucas_64(uaddr, old, new, ret);
563} 603}
564#endif /* _LP64 */ 604#endif /* _LP64 */
565 605
566__strong_alias(ucas_int,ucas_32); 606__strong_alias(ucas_int,ucas_32);
567#ifdef _LP64 607#ifdef _LP64
568__strong_alias(ucas_ptr,ucas_64); 608__strong_alias(ucas_ptr,ucas_64);
569#else 609#else
570__strong_alias(ucas_ptr,ucas_32); 610__strong_alias(ucas_ptr,ucas_32);
571#endif /* _LP64 */ 611#endif /* _LP64 */
572 612
573int 613int
574ufetch_8(const uint8_t *uaddr, uint8_t *valp) 614ufetch_8(const uint8_t *uaddr, uint8_t *valp)
575{ 615{
576 616
577 ASSERT_SLEEPABLE(); 617 ASSERT_SLEEPABLE();
578 CHECK_ALIGNMENT(); 618 CHECK_ALIGNMENT();
579 return _ufetch_8(uaddr, valp); 619 return _ufetch_8(uaddr, valp);
580} 620}
581 621
582int 622int
583ufetch_16(const uint16_t *uaddr, uint16_t *valp) 623ufetch_16(const uint16_t *uaddr, uint16_t *valp)
584{ 624{
585 625
586 ASSERT_SLEEPABLE(); 626 ASSERT_SLEEPABLE();
587 CHECK_ALIGNMENT(); 627 CHECK_ALIGNMENT();
588 return _ufetch_16(uaddr, valp); 628 return _ufetch_16(uaddr, valp);
589} 629}
590 630
591int 631int
592ufetch_32(const uint32_t *uaddr, uint32_t *valp) 632ufetch_32(const uint32_t *uaddr, uint32_t *valp)
593{ 633{
594 634
595 ASSERT_SLEEPABLE(); 635 ASSERT_SLEEPABLE();
596 CHECK_ALIGNMENT(); 636 CHECK_ALIGNMENT();
597 return _ufetch_32(uaddr, valp); 637 return _ufetch_32(uaddr, valp);
598} 638}
599 639
600#ifdef _LP64 640#ifdef _LP64
601int 641int
602ufetch_64(const uint64_t *uaddr, uint64_t *valp) 642ufetch_64(const uint64_t *uaddr, uint64_t *valp)
603{ 643{
604 644
605 ASSERT_SLEEPABLE(); 645 ASSERT_SLEEPABLE();
606 CHECK_ALIGNMENT(); 646 CHECK_ALIGNMENT();
607 return _ufetch_64(uaddr, valp); 647 return _ufetch_64(uaddr, valp);
608} 648}
609#endif /* _LP64 */ 649#endif /* _LP64 */
610 650
611__strong_alias(ufetch_char,ufetch_8); 651__strong_alias(ufetch_char,ufetch_8);
612__strong_alias(ufetch_short,ufetch_16); 652__strong_alias(ufetch_short,ufetch_16);
613__strong_alias(ufetch_int,ufetch_32); 653__strong_alias(ufetch_int,ufetch_32);
614#ifdef _LP64 654#ifdef _LP64
615__strong_alias(ufetch_long,ufetch_64); 655__strong_alias(ufetch_long,ufetch_64);
616__strong_alias(ufetch_ptr,ufetch_64); 656__strong_alias(ufetch_ptr,ufetch_64);
617#else 657#else
618__strong_alias(ufetch_long,ufetch_32); 658__strong_alias(ufetch_long,ufetch_32);
619__strong_alias(ufetch_ptr,ufetch_32); 659__strong_alias(ufetch_ptr,ufetch_32);
620#endif /* _LP64 */ 660#endif /* _LP64 */
621 661
622int 662int
623ustore_8(uint8_t *uaddr, uint8_t val) 663ustore_8(uint8_t *uaddr, uint8_t val)
624{ 664{
625 665
626 ASSERT_SLEEPABLE(); 666 ASSERT_SLEEPABLE();
627 CHECK_ALIGNMENT(); 667 CHECK_ALIGNMENT();
628 return _ustore_8(uaddr, val); 668 return _ustore_8(uaddr, val);
629} 669}
630 670
631int 671int
632ustore_16(uint16_t *uaddr, uint16_t val) 672ustore_16(uint16_t *uaddr, uint16_t val)
633{ 673{
634 674
635 ASSERT_SLEEPABLE(); 675 ASSERT_SLEEPABLE();
636 CHECK_ALIGNMENT(); 676 CHECK_ALIGNMENT();
637 return _ustore_16(uaddr, val); 677 return _ustore_16(uaddr, val);
638} 678}
639 679
640int 680int
641ustore_32(uint32_t *uaddr, uint32_t val) 681ustore_32(uint32_t *uaddr, uint32_t val)
642{ 682{
643 683
644 ASSERT_SLEEPABLE(); 684 ASSERT_SLEEPABLE();
645 CHECK_ALIGNMENT(); 685 CHECK_ALIGNMENT();
646 return _ustore_32(uaddr, val); 686 return _ustore_32(uaddr, val);
647} 687}
648 688
649#ifdef _LP64 689#ifdef _LP64
650int 690int
651ustore_64(uint64_t *uaddr, uint64_t val) 691ustore_64(uint64_t *uaddr, uint64_t val)
652{ 692{
653 693
654 ASSERT_SLEEPABLE(); 694 ASSERT_SLEEPABLE();
655 CHECK_ALIGNMENT(); 695 CHECK_ALIGNMENT();
656 return _ustore_64(uaddr, val); 696 return _ustore_64(uaddr, val);
657} 697}
658#endif /* _LP64 */ 698#endif /* _LP64 */
659 699
660__strong_alias(ustore_char,ustore_8); 700__strong_alias(ustore_char,ustore_8);
661__strong_alias(ustore_short,ustore_16); 701__strong_alias(ustore_short,ustore_16);
662__strong_alias(ustore_int,ustore_32); 702__strong_alias(ustore_int,ustore_32);
663#ifdef _LP64 703#ifdef _LP64
664__strong_alias(ustore_long,ustore_64); 704__strong_alias(ustore_long,ustore_64);
665__strong_alias(ustore_ptr,ustore_64); 705__strong_alias(ustore_ptr,ustore_64);
666#else 706#else
667__strong_alias(ustore_long,ustore_32); 707__strong_alias(ustore_long,ustore_32);
668__strong_alias(ustore_ptr,ustore_32); 708__strong_alias(ustore_ptr,ustore_32);
669#endif /* _LP64 */ 709#endif /* _LP64 */