| @@ -1,1463 +1,1481 @@ | | | @@ -1,1463 +1,1481 @@ |
1 | /* $NetBSD: linux_dma_resv.c,v 1.21 2021/12/19 12:36:02 riastradh Exp $ */ | | 1 | /* $NetBSD: linux_dma_resv.c,v 1.22 2022/02/15 22:51:03 riastradh Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 2018 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 2018 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Taylor R. Campbell. | | 8 | * by Taylor R. Campbell. |
9 | * | | 9 | * |
10 | * Redistribution and use in source and binary forms, with or without | | 10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions | | 11 | * modification, are permitted provided that the following conditions |
12 | * are met: | | 12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright | | 13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. | | 14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright | | 15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the | | 16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. | | 17 | * documentation and/or other materials provided with the distribution. |
18 | * | | 18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. | | 29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ | | 30 | */ |
31 | | | 31 | |
32 | #include <sys/cdefs.h> | | 32 | #include <sys/cdefs.h> |
33 | __KERNEL_RCSID(0, "$NetBSD: linux_dma_resv.c,v 1.21 2021/12/19 12:36:02 riastradh Exp $"); | | 33 | __KERNEL_RCSID(0, "$NetBSD: linux_dma_resv.c,v 1.22 2022/02/15 22:51:03 riastradh Exp $"); |
34 | | | 34 | |
35 | #include <sys/param.h> | | 35 | #include <sys/param.h> |
36 | #include <sys/poll.h> | | 36 | #include <sys/poll.h> |
37 | #include <sys/select.h> | | 37 | #include <sys/select.h> |
38 | | | 38 | |
39 | #include <linux/dma-fence.h> | | 39 | #include <linux/dma-fence.h> |
40 | #include <linux/dma-resv.h> | | 40 | #include <linux/dma-resv.h> |
41 | #include <linux/seqlock.h> | | 41 | #include <linux/seqlock.h> |
42 | #include <linux/ww_mutex.h> | | 42 | #include <linux/ww_mutex.h> |
43 | | | 43 | |
44 | DEFINE_WW_CLASS(reservation_ww_class __cacheline_aligned); | | 44 | DEFINE_WW_CLASS(reservation_ww_class __cacheline_aligned); |
45 | | | 45 | |
46 | static struct dma_resv_list * | | 46 | static struct dma_resv_list * |
47 | objlist_tryalloc(uint32_t n) | | 47 | objlist_tryalloc(uint32_t n) |
48 | { | | 48 | { |
49 | struct dma_resv_list *list; | | 49 | struct dma_resv_list *list; |
50 | | | 50 | |
51 | list = kmem_alloc(offsetof(typeof(*list), shared[n]), KM_NOSLEEP); | | 51 | list = kmem_alloc(offsetof(typeof(*list), shared[n]), KM_NOSLEEP); |
52 | if (list == NULL) | | 52 | if (list == NULL) |
53 | return NULL; | | 53 | return NULL; |
54 | list->shared_max = n; | | 54 | list->shared_max = n; |
55 | | | 55 | |
56 | return list; | | 56 | return list; |
57 | } | | 57 | } |
58 | | | 58 | |
| | | 59 | static struct dma_resv_list * |
| | | 60 | objlist_alloc(uint32_t n) |
| | | 61 | { |
| | | 62 | struct dma_resv_list *list; |
| | | 63 | |
| | | 64 | list = kmem_alloc(offsetof(typeof(*list), shared[n]), KM_SLEEP); |
| | | 65 | list->shared_max = n; |
| | | 66 | |
| | | 67 | return list; |
| | | 68 | } |
| | | 69 | |
59 | static void | | 70 | static void |
60 | objlist_free(struct dma_resv_list *list) | | 71 | objlist_free(struct dma_resv_list *list) |
61 | { | | 72 | { |
62 | uint32_t n = list->shared_max; | | 73 | uint32_t n = list->shared_max; |
63 | | | 74 | |
64 | kmem_free(list, offsetof(typeof(*list), shared[n])); | | 75 | kmem_free(list, offsetof(typeof(*list), shared[n])); |
65 | } | | 76 | } |
66 | | | 77 | |
67 | static void | | 78 | static void |
68 | objlist_free_cb(struct rcu_head *rcu) | | 79 | objlist_free_cb(struct rcu_head *rcu) |
69 | { | | 80 | { |
70 | struct dma_resv_list *list = container_of(rcu, | | 81 | struct dma_resv_list *list = container_of(rcu, |
71 | struct dma_resv_list, rol_rcu); | | 82 | struct dma_resv_list, rol_rcu); |
72 | | | 83 | |
73 | objlist_free(list); | | 84 | objlist_free(list); |
74 | } | | 85 | } |
75 | | | 86 | |
76 | static void | | 87 | static void |
77 | objlist_defer_free(struct dma_resv_list *list) | | 88 | objlist_defer_free(struct dma_resv_list *list) |
78 | { | | 89 | { |
79 | | | 90 | |
80 | call_rcu(&list->rol_rcu, objlist_free_cb); | | 91 | call_rcu(&list->rol_rcu, objlist_free_cb); |
81 | } | | 92 | } |
82 | | | 93 | |
83 | /* | | 94 | /* |
84 | * dma_resv_init(robj) | | 95 | * dma_resv_init(robj) |
85 | * | | 96 | * |
86 | * Initialize a reservation object. Caller must later destroy it | | 97 | * Initialize a reservation object. Caller must later destroy it |
87 | * with dma_resv_fini. | | 98 | * with dma_resv_fini. |
88 | */ | | 99 | */ |
89 | void | | 100 | void |
90 | dma_resv_init(struct dma_resv *robj) | | 101 | dma_resv_init(struct dma_resv *robj) |
91 | { | | 102 | { |
92 | | | 103 | |
93 | ww_mutex_init(&robj->lock, &reservation_ww_class); | | 104 | ww_mutex_init(&robj->lock, &reservation_ww_class); |
94 | seqcount_init(&robj->seq); | | 105 | seqcount_init(&robj->seq); |
95 | robj->fence_excl = NULL; | | 106 | robj->fence_excl = NULL; |
96 | robj->fence = NULL; | | 107 | robj->fence = NULL; |
97 | robj->robj_prealloc = NULL; | | 108 | robj->robj_prealloc = NULL; |
98 | } | | 109 | } |
99 | | | 110 | |
100 | /* | | 111 | /* |
101 | * dma_resv_fini(robj) | | 112 | * dma_resv_fini(robj) |
102 | * | | 113 | * |
103 | * Destroy a reservation object, freeing any memory that had been | | 114 | * Destroy a reservation object, freeing any memory that had been |
104 | * allocated for it. Caller must have exclusive access to it. | | 115 | * allocated for it. Caller must have exclusive access to it. |
105 | */ | | 116 | */ |
106 | void | | 117 | void |
107 | dma_resv_fini(struct dma_resv *robj) | | 118 | dma_resv_fini(struct dma_resv *robj) |
108 | { | | 119 | { |
109 | unsigned i; | | 120 | unsigned i; |
110 | | | 121 | |
111 | if (robj->robj_prealloc) { | | 122 | if (robj->robj_prealloc) { |
112 | objlist_free(robj->robj_prealloc); | | 123 | objlist_free(robj->robj_prealloc); |
113 | robj->robj_prealloc = NULL; /* paranoia */ | | 124 | robj->robj_prealloc = NULL; /* paranoia */ |
114 | } | | 125 | } |
115 | if (robj->fence) { | | 126 | if (robj->fence) { |
116 | for (i = 0; i < robj->fence->shared_count; i++) { | | 127 | for (i = 0; i < robj->fence->shared_count; i++) { |
117 | dma_fence_put(robj->fence->shared[i]); | | 128 | dma_fence_put(robj->fence->shared[i]); |
118 | robj->fence->shared[i] = NULL; /* paranoia */ | | 129 | robj->fence->shared[i] = NULL; /* paranoia */ |
119 | } | | 130 | } |
120 | objlist_free(robj->fence); | | 131 | objlist_free(robj->fence); |
121 | robj->fence = NULL; /* paranoia */ | | 132 | robj->fence = NULL; /* paranoia */ |
122 | } | | 133 | } |
123 | if (robj->fence_excl) { | | 134 | if (robj->fence_excl) { |
124 | dma_fence_put(robj->fence_excl); | | 135 | dma_fence_put(robj->fence_excl); |
125 | robj->fence_excl = NULL; /* paranoia */ | | 136 | robj->fence_excl = NULL; /* paranoia */ |
126 | } | | 137 | } |
127 | ww_mutex_destroy(&robj->lock); | | 138 | ww_mutex_destroy(&robj->lock); |
128 | } | | 139 | } |
129 | | | 140 | |
130 | /* | | 141 | /* |
131 | * dma_resv_lock(robj, ctx) | | 142 | * dma_resv_lock(robj, ctx) |
132 | * | | 143 | * |
133 | * Acquire a reservation object's lock. Return 0 on success, | | 144 | * Acquire a reservation object's lock. Return 0 on success, |
134 | * -EALREADY if caller already holds it, -EDEADLK if a | | 145 | * -EALREADY if caller already holds it, -EDEADLK if a |
135 | * higher-priority owner holds it and the caller must back out and | | 146 | * higher-priority owner holds it and the caller must back out and |
136 | * retry. | | 147 | * retry. |
137 | */ | | 148 | */ |
138 | int | | 149 | int |
139 | dma_resv_lock(struct dma_resv *robj, | | 150 | dma_resv_lock(struct dma_resv *robj, |
140 | struct ww_acquire_ctx *ctx) | | 151 | struct ww_acquire_ctx *ctx) |
141 | { | | 152 | { |
142 | | | 153 | |
143 | return ww_mutex_lock(&robj->lock, ctx); | | 154 | return ww_mutex_lock(&robj->lock, ctx); |
144 | } | | 155 | } |
145 | | | 156 | |
146 | /* | | 157 | /* |
147 | * dma_resv_lock_slow(robj, ctx) | | 158 | * dma_resv_lock_slow(robj, ctx) |
148 | * | | 159 | * |
149 | * Acquire a reservation object's lock. Caller must not hold | | 160 | * Acquire a reservation object's lock. Caller must not hold |
150 | * this lock or any others -- this is to be used in slow paths | | 161 | * this lock or any others -- this is to be used in slow paths |
151 | * after dma_resv_lock or dma_resv_lock_interruptible has failed | | 162 | * after dma_resv_lock or dma_resv_lock_interruptible has failed |
152 | * and the caller has backed out all other locks. | | 163 | * and the caller has backed out all other locks. |
153 | */ | | 164 | */ |
154 | void | | 165 | void |
155 | dma_resv_lock_slow(struct dma_resv *robj, | | 166 | dma_resv_lock_slow(struct dma_resv *robj, |
156 | struct ww_acquire_ctx *ctx) | | 167 | struct ww_acquire_ctx *ctx) |
157 | { | | 168 | { |
158 | | | 169 | |
159 | ww_mutex_lock_slow(&robj->lock, ctx); | | 170 | ww_mutex_lock_slow(&robj->lock, ctx); |
160 | } | | 171 | } |
161 | | | 172 | |
162 | /* | | 173 | /* |
163 | * dma_resv_lock_interruptible(robj, ctx) | | 174 | * dma_resv_lock_interruptible(robj, ctx) |
164 | * | | 175 | * |
165 | * Acquire a reservation object's lock. Return 0 on success, | | 176 | * Acquire a reservation object's lock. Return 0 on success, |
166 | * -EALREADY if caller already holds it, -EDEADLK if a | | 177 | * -EALREADY if caller already holds it, -EDEADLK if a |
167 | * higher-priority owner holds it and the caller must back out and | | 178 | * higher-priority owner holds it and the caller must back out and |
168 | * retry, -EINTR if interrupted. | | 179 | * retry, -EINTR if interrupted. |
169 | */ | | 180 | */ |
170 | int | | 181 | int |
171 | dma_resv_lock_interruptible(struct dma_resv *robj, | | 182 | dma_resv_lock_interruptible(struct dma_resv *robj, |
172 | struct ww_acquire_ctx *ctx) | | 183 | struct ww_acquire_ctx *ctx) |
173 | { | | 184 | { |
174 | | | 185 | |
175 | return ww_mutex_lock_interruptible(&robj->lock, ctx); | | 186 | return ww_mutex_lock_interruptible(&robj->lock, ctx); |
176 | } | | 187 | } |
177 | | | 188 | |
178 | /* | | 189 | /* |
179 | * dma_resv_lock_slow_interruptible(robj, ctx) | | 190 | * dma_resv_lock_slow_interruptible(robj, ctx) |
180 | * | | 191 | * |
181 | * Acquire a reservation object's lock. Caller must not hold | | 192 | * Acquire a reservation object's lock. Caller must not hold |
182 | * this lock or any others -- this is to be used in slow paths | | 193 | * this lock or any others -- this is to be used in slow paths |
183 | * after dma_resv_lock or dma_resv_lock_interruptible has failed | | 194 | * after dma_resv_lock or dma_resv_lock_interruptible has failed |
184 | * and the caller has backed out all other locks. Return 0 on | | 195 | * and the caller has backed out all other locks. Return 0 on |
185 | * success, -EINTR if interrupted. | | 196 | * success, -EINTR if interrupted. |
186 | */ | | 197 | */ |
187 | int | | 198 | int |
188 | dma_resv_lock_slow_interruptible(struct dma_resv *robj, | | 199 | dma_resv_lock_slow_interruptible(struct dma_resv *robj, |
189 | struct ww_acquire_ctx *ctx) | | 200 | struct ww_acquire_ctx *ctx) |
190 | { | | 201 | { |
191 | | | 202 | |
192 | return ww_mutex_lock_slow_interruptible(&robj->lock, ctx); | | 203 | return ww_mutex_lock_slow_interruptible(&robj->lock, ctx); |
193 | } | | 204 | } |
194 | | | 205 | |
195 | /* | | 206 | /* |
196 | * dma_resv_trylock(robj) | | 207 | * dma_resv_trylock(robj) |
197 | * | | 208 | * |
198 | * Try to acquire a reservation object's lock without blocking. | | 209 | * Try to acquire a reservation object's lock without blocking. |
199 | * Return true on success, false on failure. | | 210 | * Return true on success, false on failure. |
200 | */ | | 211 | */ |
201 | bool | | 212 | bool |
202 | dma_resv_trylock(struct dma_resv *robj) | | 213 | dma_resv_trylock(struct dma_resv *robj) |
203 | { | | 214 | { |
204 | | | 215 | |
205 | return ww_mutex_trylock(&robj->lock); | | 216 | return ww_mutex_trylock(&robj->lock); |
206 | } | | 217 | } |
207 | | | 218 | |
208 | /* | | 219 | /* |
209 | * dma_resv_locking_ctx(robj) | | 220 | * dma_resv_locking_ctx(robj) |
210 | * | | 221 | * |
211 | * Return a pointer to the ww_acquire_ctx used by the owner of | | 222 | * Return a pointer to the ww_acquire_ctx used by the owner of |
212 | * the reservation object's lock, or NULL if it is either not | | 223 | * the reservation object's lock, or NULL if it is either not |
213 | * owned or if it is locked without context. | | 224 | * owned or if it is locked without context. |
214 | */ | | 225 | */ |
215 | struct ww_acquire_ctx * | | 226 | struct ww_acquire_ctx * |
216 | dma_resv_locking_ctx(struct dma_resv *robj) | | 227 | dma_resv_locking_ctx(struct dma_resv *robj) |
217 | { | | 228 | { |
218 | | | 229 | |
219 | return ww_mutex_locking_ctx(&robj->lock); | | 230 | return ww_mutex_locking_ctx(&robj->lock); |
220 | } | | 231 | } |
221 | | | 232 | |
222 | /* | | 233 | /* |
223 | * dma_resv_unlock(robj) | | 234 | * dma_resv_unlock(robj) |
224 | * | | 235 | * |
225 | * Release a reservation object's lock. | | 236 | * Release a reservation object's lock. |
226 | */ | | 237 | */ |
227 | void | | 238 | void |
228 | dma_resv_unlock(struct dma_resv *robj) | | 239 | dma_resv_unlock(struct dma_resv *robj) |
229 | { | | 240 | { |
230 | | | 241 | |
231 | return ww_mutex_unlock(&robj->lock); | | 242 | return ww_mutex_unlock(&robj->lock); |
232 | } | | 243 | } |
233 | | | 244 | |
234 | /* | | 245 | /* |
235 | * dma_resv_is_locked(robj) | | 246 | * dma_resv_is_locked(robj) |
236 | * | | 247 | * |
237 | * True if robj is locked. | | 248 | * True if robj is locked. |
238 | */ | | 249 | */ |
239 | bool | | 250 | bool |
240 | dma_resv_is_locked(struct dma_resv *robj) | | 251 | dma_resv_is_locked(struct dma_resv *robj) |
241 | { | | 252 | { |
242 | | | 253 | |
243 | return ww_mutex_is_locked(&robj->lock); | | 254 | return ww_mutex_is_locked(&robj->lock); |
244 | } | | 255 | } |
245 | | | 256 | |
246 | /* | | 257 | /* |
247 | * dma_resv_held(robj) | | 258 | * dma_resv_held(robj) |
248 | * | | 259 | * |
249 | * True if robj is locked. | | 260 | * True if robj is locked. |
250 | */ | | 261 | */ |
251 | bool | | 262 | bool |
252 | dma_resv_held(struct dma_resv *robj) | | 263 | dma_resv_held(struct dma_resv *robj) |
253 | { | | 264 | { |
254 | | | 265 | |
255 | return ww_mutex_is_locked(&robj->lock); | | 266 | return ww_mutex_is_locked(&robj->lock); |
256 | } | | 267 | } |
257 | | | 268 | |
258 | /* | | 269 | /* |
259 | * dma_resv_assert_held(robj) | | 270 | * dma_resv_assert_held(robj) |
260 | * | | 271 | * |
261 | * Panic if robj is not held, in DIAGNOSTIC builds. | | 272 | * Panic if robj is not held, in DIAGNOSTIC builds. |
262 | */ | | 273 | */ |
263 | void | | 274 | void |
264 | dma_resv_assert_held(struct dma_resv *robj) | | 275 | dma_resv_assert_held(struct dma_resv *robj) |
265 | { | | 276 | { |
266 | | | 277 | |
267 | KASSERT(dma_resv_held(robj)); | | 278 | KASSERT(dma_resv_held(robj)); |
268 | } | | 279 | } |
269 | | | 280 | |
270 | /* | | 281 | /* |
271 | * dma_resv_get_excl(robj) | | 282 | * dma_resv_get_excl(robj) |
272 | * | | 283 | * |
273 | * Return a pointer to the exclusive fence of the reservation | | 284 | * Return a pointer to the exclusive fence of the reservation |
274 | * object robj. | | 285 | * object robj. |
275 | * | | 286 | * |
276 | * Caller must have robj locked. | | 287 | * Caller must have robj locked. |
277 | */ | | 288 | */ |
278 | struct dma_fence * | | 289 | struct dma_fence * |
279 | dma_resv_get_excl(struct dma_resv *robj) | | 290 | dma_resv_get_excl(struct dma_resv *robj) |
280 | { | | 291 | { |
281 | | | 292 | |
282 | KASSERT(dma_resv_held(robj)); | | 293 | KASSERT(dma_resv_held(robj)); |
283 | return robj->fence_excl; | | 294 | return robj->fence_excl; |
284 | } | | 295 | } |
285 | | | 296 | |
286 | /* | | 297 | /* |
287 | * dma_resv_get_list(robj) | | 298 | * dma_resv_get_list(robj) |
288 | * | | 299 | * |
289 | * Return a pointer to the shared fence list of the reservation | | 300 | * Return a pointer to the shared fence list of the reservation |
290 | * object robj. | | 301 | * object robj. |
291 | * | | 302 | * |
292 | * Caller must have robj locked. | | 303 | * Caller must have robj locked. |
293 | */ | | 304 | */ |
294 | struct dma_resv_list * | | 305 | struct dma_resv_list * |
295 | dma_resv_get_list(struct dma_resv *robj) | | 306 | dma_resv_get_list(struct dma_resv *robj) |
296 | { | | 307 | { |
297 | | | 308 | |
298 | KASSERT(dma_resv_held(robj)); | | 309 | KASSERT(dma_resv_held(robj)); |
299 | return robj->fence; | | 310 | return robj->fence; |
300 | } | | 311 | } |
301 | | | 312 | |
302 | /* | | 313 | /* |
303 | * dma_resv_reserve_shared(robj, num_fences) | | 314 | * dma_resv_reserve_shared(robj, num_fences) |
304 | * | | 315 | * |
305 | * Reserve space in robj to add num_fences shared fences. To be | | 316 | * Reserve space in robj to add num_fences shared fences. To be |
306 | * used only once before calling dma_resv_add_shared_fence. | | 317 | * used only once before calling dma_resv_add_shared_fence. |
307 | * | | 318 | * |
308 | * Caller must have robj locked. | | 319 | * Caller must have robj locked. |
309 | * | | 320 | * |
310 | * Internally, we start with room for four entries and double if | | 321 | * Internally, we start with room for four entries and double if |
311 | * we don't have enough. This is not guaranteed. | | 322 | * we don't have enough. This is not guaranteed. |
312 | */ | | 323 | */ |
313 | int | | 324 | int |
314 | dma_resv_reserve_shared(struct dma_resv *robj, unsigned int num_fences) | | 325 | dma_resv_reserve_shared(struct dma_resv *robj, unsigned int num_fences) |
315 | { | | 326 | { |
316 | struct dma_resv_list *list, *prealloc; | | 327 | struct dma_resv_list *list, *prealloc; |
317 | uint32_t n, nalloc; | | 328 | uint32_t n, nalloc; |
318 | | | 329 | |
319 | KASSERT(dma_resv_held(robj)); | | 330 | KASSERT(dma_resv_held(robj)); |
320 | | | 331 | |
321 | list = robj->fence; | | 332 | list = robj->fence; |
322 | prealloc = robj->robj_prealloc; | | 333 | prealloc = robj->robj_prealloc; |
323 | | | 334 | |
324 | /* If there's an existing list, check it for space. */ | | 335 | /* If there's an existing list, check it for space. */ |
325 | if (list) { | | 336 | if (list) { |
326 | /* If there's too many already, give up. */ | | 337 | /* If there's too many already, give up. */ |
327 | if (list->shared_count > UINT32_MAX - num_fences) | | 338 | if (list->shared_count > UINT32_MAX - num_fences) |
328 | return -ENOMEM; | | 339 | return -ENOMEM; |
329 | | | 340 | |
330 | /* Add some more. */ | | 341 | /* Add some more. */ |
331 | n = list->shared_count + num_fences; | | 342 | n = list->shared_count + num_fences; |
332 | | | 343 | |
333 | /* If there's enough for one more, we're done. */ | | 344 | /* If there's enough for one more, we're done. */ |
334 | if (n <= list->shared_max) | | 345 | if (n <= list->shared_max) |
335 | return 0; | | 346 | return 0; |
336 | } else { | | 347 | } else { |
337 | /* No list already. We need space for num_fences. */ | | 348 | /* No list already. We need space for num_fences. */ |
338 | n = num_fences; | | 349 | n = num_fences; |
339 | } | | 350 | } |
340 | | | 351 | |
341 | /* If not, maybe there's a preallocated list ready. */ | | 352 | /* If not, maybe there's a preallocated list ready. */ |
342 | if (prealloc != NULL) { | | 353 | if (prealloc != NULL) { |
343 | /* If there's enough room in it, stop here. */ | | 354 | /* If there's enough room in it, stop here. */ |
344 | if (n <= prealloc->shared_max) | | 355 | if (n <= prealloc->shared_max) |
345 | return 0; | | 356 | return 0; |
346 | | | 357 | |
347 | /* Try to double its capacity. */ | | 358 | /* Try to double its capacity. */ |
348 | nalloc = n > UINT32_MAX/2 ? UINT32_MAX : 2*n; | | 359 | nalloc = n > UINT32_MAX/2 ? UINT32_MAX : 2*n; |
349 | prealloc = objlist_tryalloc(nalloc); | | 360 | prealloc = objlist_alloc(nalloc); |
350 | if (prealloc == NULL) | | | |
351 | return -ENOMEM; | | | |
352 | | | 361 | |
353 | /* Swap the new preallocated list and free the old one. */ | | 362 | /* Swap the new preallocated list and free the old one. */ |
354 | objlist_free(robj->robj_prealloc); | | 363 | objlist_free(robj->robj_prealloc); |
355 | robj->robj_prealloc = prealloc; | | 364 | robj->robj_prealloc = prealloc; |
356 | } else { | | 365 | } else { |
357 | /* Start with some spare. */ | | 366 | /* Start with some spare. */ |
358 | nalloc = n > UINT32_MAX/2 ? UINT32_MAX : MAX(2*n, 4); | | 367 | nalloc = n > UINT32_MAX/2 ? UINT32_MAX : MAX(2*n, 4); |
359 | prealloc = objlist_tryalloc(nalloc); | | 368 | prealloc = objlist_alloc(nalloc); |
360 | if (prealloc == NULL) | | 369 | |
361 | return -ENOMEM; | | | |
362 | /* Save the new preallocated list. */ | | 370 | /* Save the new preallocated list. */ |
363 | robj->robj_prealloc = prealloc; | | 371 | robj->robj_prealloc = prealloc; |
364 | } | | 372 | } |
365 | | | 373 | |
366 | /* Success! */ | | 374 | /* Success! */ |
367 | return 0; | | 375 | return 0; |
368 | } | | 376 | } |
369 | | | 377 | |
370 | struct dma_resv_write_ticket { | | 378 | struct dma_resv_write_ticket { |
371 | }; | | 379 | }; |
372 | | | 380 | |
373 | /* | | 381 | /* |
374 | * dma_resv_write_begin(robj, ticket) | | 382 | * dma_resv_write_begin(robj, ticket) |
375 | * | | 383 | * |
376 | * Begin an atomic batch of writes to robj, and initialize opaque | | 384 | * Begin an atomic batch of writes to robj, and initialize opaque |
377 | * ticket for it. The ticket must be passed to | | 385 | * ticket for it. The ticket must be passed to |
378 | * dma_resv_write_commit to commit the writes. | | 386 | * dma_resv_write_commit to commit the writes. |
379 | * | | 387 | * |
380 | * Caller must have robj locked. | | 388 | * Caller must have robj locked. |
381 | * | | 389 | * |
382 | * Implies membar_producer, i.e. store-before-store barrier. Does | | 390 | * Implies membar_producer, i.e. store-before-store barrier. Does |
383 | * NOT serve as an acquire operation, however. | | 391 | * NOT serve as an acquire operation, however. |
384 | */ | | 392 | */ |
385 | static void | | 393 | static void |
386 | dma_resv_write_begin(struct dma_resv *robj, | | 394 | dma_resv_write_begin(struct dma_resv *robj, |
387 | struct dma_resv_write_ticket *ticket) | | 395 | struct dma_resv_write_ticket *ticket) |
388 | { | | 396 | { |
389 | | | 397 | |
390 | KASSERT(dma_resv_held(robj)); | | 398 | KASSERT(dma_resv_held(robj)); |
391 | | | 399 | |
392 | write_seqcount_begin(&robj->seq); | | 400 | write_seqcount_begin(&robj->seq); |
393 | } | | 401 | } |
394 | | | 402 | |
395 | /* | | 403 | /* |
396 | * dma_resv_write_commit(robj, ticket) | | 404 | * dma_resv_write_commit(robj, ticket) |
397 | * | | 405 | * |
398 | * Commit an atomic batch of writes to robj begun with the call to | | 406 | * Commit an atomic batch of writes to robj begun with the call to |
399 | * dma_resv_write_begin that returned ticket. | | 407 | * dma_resv_write_begin that returned ticket. |
400 | * | | 408 | * |
401 | * Caller must have robj locked. | | 409 | * Caller must have robj locked. |
402 | * | | 410 | * |
403 | * Implies membar_producer, i.e. store-before-store barrier. Does | | 411 | * Implies membar_producer, i.e. store-before-store barrier. Does |
404 | * NOT serve as a release operation, however. | | 412 | * NOT serve as a release operation, however. |
405 | */ | | 413 | */ |
406 | static void | | 414 | static void |
407 | dma_resv_write_commit(struct dma_resv *robj, | | 415 | dma_resv_write_commit(struct dma_resv *robj, |
408 | struct dma_resv_write_ticket *ticket) | | 416 | struct dma_resv_write_ticket *ticket) |
409 | { | | 417 | { |
410 | | | 418 | |
411 | KASSERT(dma_resv_held(robj)); | | 419 | KASSERT(dma_resv_held(robj)); |
412 | | | 420 | |
413 | write_seqcount_end(&robj->seq); | | 421 | write_seqcount_end(&robj->seq); |
414 | } | | 422 | } |
415 | | | 423 | |
416 | struct dma_resv_read_ticket { | | 424 | struct dma_resv_read_ticket { |
417 | unsigned version; | | 425 | unsigned version; |
418 | }; | | 426 | }; |
419 | | | 427 | |
420 | /* | | 428 | /* |
421 | * dma_resv_read_begin(robj, ticket) | | 429 | * dma_resv_read_begin(robj, ticket) |
422 | * | | 430 | * |
423 | * Begin a read section, and initialize opaque ticket for it. The | | 431 | * Begin a read section, and initialize opaque ticket for it. The |
424 | * ticket must be passed to dma_resv_read_exit, and the | | 432 | * ticket must be passed to dma_resv_read_exit, and the |
425 | * caller must be prepared to retry reading if it fails. | | 433 | * caller must be prepared to retry reading if it fails. |
426 | */ | | 434 | */ |
427 | static void | | 435 | static void |
428 | dma_resv_read_begin(const struct dma_resv *robj, | | 436 | dma_resv_read_begin(const struct dma_resv *robj, |
429 | struct dma_resv_read_ticket *ticket) | | 437 | struct dma_resv_read_ticket *ticket) |
430 | { | | 438 | { |
431 | | | 439 | |
432 | ticket->version = read_seqcount_begin(&robj->seq); | | 440 | ticket->version = read_seqcount_begin(&robj->seq); |
433 | } | | 441 | } |
434 | | | 442 | |
435 | /* | | 443 | /* |
436 | * dma_resv_read_valid(robj, ticket) | | 444 | * dma_resv_read_valid(robj, ticket) |
437 | * | | 445 | * |
438 | * Test whether the read sections are valid. Return true on | | 446 | * Test whether the read sections are valid. Return true on |
439 | * success, or false on failure if the read ticket has been | | 447 | * success, or false on failure if the read ticket has been |
440 | * invalidated. | | 448 | * invalidated. |
441 | */ | | 449 | */ |
442 | static bool | | 450 | static bool |
443 | dma_resv_read_valid(const struct dma_resv *robj, | | 451 | dma_resv_read_valid(const struct dma_resv *robj, |
444 | struct dma_resv_read_ticket *ticket) | | 452 | struct dma_resv_read_ticket *ticket) |
445 | { | | 453 | { |
446 | | | 454 | |
447 | return !read_seqcount_retry(&robj->seq, ticket->version); | | 455 | return !read_seqcount_retry(&robj->seq, ticket->version); |
448 | } | | 456 | } |
449 | | | 457 | |
450 | /* | | 458 | /* |
451 | * dma_resv_get_shared_reader(robj, listp, shared_countp, ticket) | | 459 | * dma_resv_get_shared_reader(robj, listp, shared_countp, ticket) |
452 | * | | 460 | * |
453 | * Set *listp and *shared_countp to a snapshot of the pointer to | | 461 | * Set *listp and *shared_countp to a snapshot of the pointer to |
454 | * and length of the shared fence list of robj and return true, or | | 462 | * and length of the shared fence list of robj and return true, or |
455 | * set them to NULL/0 and return false if a writer intervened so | | 463 | * set them to NULL/0 and return false if a writer intervened so |
456 | * the caller must start over. | | 464 | * the caller must start over. |
457 | * | | 465 | * |
458 | * Both *listp and *shared_countp are unconditionally initialized | | 466 | * Both *listp and *shared_countp are unconditionally initialized |
459 | * on return. They may be NULL/0 even on success, if there is no | | 467 | * on return. They may be NULL/0 even on success, if there is no |
460 | * shared list at the moment. Does not take any fence references. | | 468 | * shared list at the moment. Does not take any fence references. |
461 | */ | | 469 | */ |
462 | static bool | | 470 | static bool |
463 | dma_resv_get_shared_reader(const struct dma_resv *robj, | | 471 | dma_resv_get_shared_reader(const struct dma_resv *robj, |
464 | const struct dma_resv_list **listp, unsigned *shared_countp, | | 472 | const struct dma_resv_list **listp, unsigned *shared_countp, |
465 | struct dma_resv_read_ticket *ticket) | | 473 | struct dma_resv_read_ticket *ticket) |
466 | { | | 474 | { |
467 | struct dma_resv_list *list; | | 475 | struct dma_resv_list *list; |
468 | unsigned shared_count = 0; | | 476 | unsigned shared_count = 0; |
469 | | | 477 | |
470 | /* | | 478 | /* |
471 | * Get the list and, if it is present, its length. If the list | | 479 | * Get the list and, if it is present, its length. If the list |
472 | * is present, it has a valid length. The atomic_load_consume | | 480 | * is present, it has a valid length. The atomic_load_consume |
473 | * pairs with the membar_producer in dma_resv_write_begin. | | 481 | * pairs with the membar_producer in dma_resv_write_begin. |
474 | */ | | 482 | */ |
475 | list = atomic_load_consume(&robj->fence); | | 483 | list = atomic_load_consume(&robj->fence); |
476 | shared_count = list ? atomic_load_relaxed(&list->shared_count) : 0; | | 484 | shared_count = list ? atomic_load_relaxed(&list->shared_count) : 0; |
477 | | | 485 | |
478 | /* | | 486 | /* |
479 | * We are done reading from robj and list. Validate our | | 487 | * We are done reading from robj and list. Validate our |
480 | * parking ticket. If it's invalid, do not pass go and do not | | 488 | * parking ticket. If it's invalid, do not pass go and do not |
481 | * collect $200. | | 489 | * collect $200. |
482 | */ | | 490 | */ |
483 | if (!dma_resv_read_valid(robj, ticket)) | | 491 | if (!dma_resv_read_valid(robj, ticket)) |
484 | goto fail; | | 492 | goto fail; |
485 | | | 493 | |
486 | /* Success! */ | | 494 | /* Success! */ |
487 | *listp = list; | | 495 | *listp = list; |
488 | *shared_countp = shared_count; | | 496 | *shared_countp = shared_count; |
489 | return true; | | 497 | return true; |
490 | | | 498 | |
491 | fail: *listp = NULL; | | 499 | fail: *listp = NULL; |
492 | *shared_countp = 0; | | 500 | *shared_countp = 0; |
493 | return false; | | 501 | return false; |
494 | } | | 502 | } |
495 | | | 503 | |
496 | /* | | 504 | /* |
497 | * dma_resv_get_excl_reader(robj, fencep, ticket) | | 505 | * dma_resv_get_excl_reader(robj, fencep, ticket) |
498 | * | | 506 | * |
499 | * Set *fencep to the exclusive fence of robj and return true, or | | 507 | * Set *fencep to the exclusive fence of robj and return true, or |
500 | * set it to NULL and return false if either | | 508 | * set it to NULL and return false if either |
501 | * (a) a writer intervened, or | | 509 | * (a) a writer intervened, or |
502 | * (b) the fence is scheduled to be destroyed after this RCU grace | | 510 | * (b) the fence is scheduled to be destroyed after this RCU grace |
503 | * period, | | 511 | * period, |
504 | * in either case meaning the caller must restart. | | 512 | * in either case meaning the caller must restart. |
505 | * | | 513 | * |
506 | * The value of *fencep is unconditionally initialized on return. | | 514 | * The value of *fencep is unconditionally initialized on return. |
507 | * It may be NULL, if there is no exclusive fence at the moment. | | 515 | * It may be NULL, if there is no exclusive fence at the moment. |
508 | * If nonnull, *fencep is referenced; caller must dma_fence_put. | | 516 | * If nonnull, *fencep is referenced; caller must dma_fence_put. |
509 | */ | | 517 | */ |
510 | static bool | | 518 | static bool |
511 | dma_resv_get_excl_reader(const struct dma_resv *robj, | | 519 | dma_resv_get_excl_reader(const struct dma_resv *robj, |
512 | struct dma_fence **fencep, | | 520 | struct dma_fence **fencep, |
513 | struct dma_resv_read_ticket *ticket) | | 521 | struct dma_resv_read_ticket *ticket) |
514 | { | | 522 | { |
515 | struct dma_fence *fence; | | 523 | struct dma_fence *fence; |
516 | | | 524 | |
517 | /* | | 525 | /* |
518 | * Get the candidate fence pointer. The atomic_load_consume | | 526 | * Get the candidate fence pointer. The atomic_load_consume |
519 | * pairs with the membar_consumer in dma_resv_write_begin. | | 527 | * pairs with the membar_consumer in dma_resv_write_begin. |
520 | */ | | 528 | */ |
521 | fence = atomic_load_consume(&robj->fence_excl); | | 529 | fence = atomic_load_consume(&robj->fence_excl); |
522 | | | 530 | |
523 | /* | | 531 | /* |
524 | * The load of robj->fence_excl is atomic, but the caller may | | 532 | * The load of robj->fence_excl is atomic, but the caller may |
525 | * have previously loaded the shared fence list and should | | 533 | * have previously loaded the shared fence list and should |
526 | * restart if its view of the entire dma_resv object is not a | | 534 | * restart if its view of the entire dma_resv object is not a |
527 | * consistent snapshot. | | 535 | * consistent snapshot. |
528 | */ | | 536 | */ |
529 | if (!dma_resv_read_valid(robj, ticket)) | | 537 | if (!dma_resv_read_valid(robj, ticket)) |
530 | goto fail; | | 538 | goto fail; |
531 | | | 539 | |
532 | /* | | 540 | /* |
533 | * If the fence is already scheduled to away after this RCU | | 541 | * If the fence is already scheduled to away after this RCU |
534 | * read section, give up. Otherwise, take a reference so it | | 542 | * read section, give up. Otherwise, take a reference so it |
535 | * won't go away until after dma_fence_put. | | 543 | * won't go away until after dma_fence_put. |
536 | */ | | 544 | */ |
537 | if (fence != NULL && | | 545 | if (fence != NULL && |
538 | (fence = dma_fence_get_rcu(fence)) == NULL) | | 546 | (fence = dma_fence_get_rcu(fence)) == NULL) |
539 | goto fail; | | 547 | goto fail; |
540 | | | 548 | |
541 | /* Success! */ | | 549 | /* Success! */ |
542 | *fencep = fence; | | 550 | *fencep = fence; |
543 | return true; | | 551 | return true; |
544 | | | 552 | |
545 | fail: *fencep = NULL; | | 553 | fail: *fencep = NULL; |
546 | return false; | | 554 | return false; |
547 | } | | 555 | } |
548 | | | 556 | |
549 | /* | | 557 | /* |
550 | * dma_resv_add_excl_fence(robj, fence) | | 558 | * dma_resv_add_excl_fence(robj, fence) |
551 | * | | 559 | * |
552 | * Empty and release all of robj's shared fences, and clear and | | 560 | * Empty and release all of robj's shared fences, and clear and |
553 | * release its exclusive fence. If fence is nonnull, acquire a | | 561 | * release its exclusive fence. If fence is nonnull, acquire a |
554 | * reference to it and save it as robj's exclusive fence. | | 562 | * reference to it and save it as robj's exclusive fence. |
555 | * | | 563 | * |
556 | * Caller must have robj locked. | | 564 | * Caller must have robj locked. |
557 | */ | | 565 | */ |
558 | void | | 566 | void |
559 | dma_resv_add_excl_fence(struct dma_resv *robj, | | 567 | dma_resv_add_excl_fence(struct dma_resv *robj, |
560 | struct dma_fence *fence) | | 568 | struct dma_fence *fence) |
561 | { | | 569 | { |
562 | struct dma_fence *old_fence = robj->fence_excl; | | 570 | struct dma_fence *old_fence = robj->fence_excl; |
563 | struct dma_resv_list *old_list = robj->fence; | | 571 | struct dma_resv_list *old_list = robj->fence; |
564 | uint32_t old_shared_count; | | 572 | uint32_t old_shared_count; |
565 | struct dma_resv_write_ticket ticket; | | 573 | struct dma_resv_write_ticket ticket; |
566 | | | 574 | |
567 | KASSERT(dma_resv_held(robj)); | | 575 | KASSERT(dma_resv_held(robj)); |
568 | | | 576 | |
569 | /* | | 577 | /* |
570 | * If we are setting rather than just removing a fence, acquire | | 578 | * If we are setting rather than just removing a fence, acquire |
571 | * a reference for ourselves. | | 579 | * a reference for ourselves. |
572 | */ | | 580 | */ |
573 | if (fence) | | 581 | if (fence) |
574 | (void)dma_fence_get(fence); | | 582 | (void)dma_fence_get(fence); |
575 | | | 583 | |
576 | /* If there are any shared fences, remember how many. */ | | 584 | /* If there are any shared fences, remember how many. */ |
577 | if (old_list) | | 585 | if (old_list) |
578 | old_shared_count = old_list->shared_count; | | 586 | old_shared_count = old_list->shared_count; |
579 | | | 587 | |
580 | /* Begin an update. Implies membar_producer for fence. */ | | 588 | /* Begin an update. Implies membar_producer for fence. */ |
581 | dma_resv_write_begin(robj, &ticket); | | 589 | dma_resv_write_begin(robj, &ticket); |
582 | | | 590 | |
583 | /* Replace the fence and zero the shared count. */ | | 591 | /* Replace the fence and zero the shared count. */ |
584 | atomic_store_relaxed(&robj->fence_excl, fence); | | 592 | atomic_store_relaxed(&robj->fence_excl, fence); |
585 | if (old_list) | | 593 | if (old_list) |
586 | old_list->shared_count = 0; | | 594 | old_list->shared_count = 0; |
587 | | | 595 | |
588 | /* Commit the update. */ | | 596 | /* Commit the update. */ |
589 | dma_resv_write_commit(robj, &ticket); | | 597 | dma_resv_write_commit(robj, &ticket); |
590 | | | 598 | |
591 | /* Release the old exclusive fence, if any. */ | | 599 | /* Release the old exclusive fence, if any. */ |
592 | if (old_fence) { | | 600 | if (old_fence) { |
593 | dma_fence_put(old_fence); | | 601 | dma_fence_put(old_fence); |
594 | old_fence = NULL; /* paranoia */ | | 602 | old_fence = NULL; /* paranoia */ |
595 | } | | 603 | } |
596 | | | 604 | |
597 | /* Release any old shared fences. */ | | 605 | /* Release any old shared fences. */ |
598 | if (old_list) { | | 606 | if (old_list) { |
599 | while (old_shared_count--) { | | 607 | while (old_shared_count--) { |
600 | dma_fence_put(old_list->shared[old_shared_count]); | | 608 | dma_fence_put(old_list->shared[old_shared_count]); |
601 | /* paranoia */ | | 609 | /* paranoia */ |
602 | old_list->shared[old_shared_count] = NULL; | | 610 | old_list->shared[old_shared_count] = NULL; |
603 | } | | 611 | } |
604 | } | | 612 | } |
605 | } | | 613 | } |
606 | | | 614 | |
607 | /* | | 615 | /* |
608 | * dma_resv_add_shared_fence(robj, fence) | | 616 | * dma_resv_add_shared_fence(robj, fence) |
609 | * | | 617 | * |
610 | * Acquire a reference to fence and add it to robj's shared list. | | 618 | * Acquire a reference to fence and add it to robj's shared list. |
611 | * If any fence was already added with the same context number, | | 619 | * If any fence was already added with the same context number, |
612 | * release it and replace it by this one. | | 620 | * release it and replace it by this one. |
613 | * | | 621 | * |
614 | * Caller must have robj locked, and must have preceded with a | | 622 | * Caller must have robj locked, and must have preceded with a |
615 | * call to dma_resv_reserve_shared for each shared fence | | 623 | * call to dma_resv_reserve_shared for each shared fence |
616 | * added. | | 624 | * added. |
617 | */ | | 625 | */ |
618 | void | | 626 | void |
619 | dma_resv_add_shared_fence(struct dma_resv *robj, | | 627 | dma_resv_add_shared_fence(struct dma_resv *robj, |
620 | struct dma_fence *fence) | | 628 | struct dma_fence *fence) |
621 | { | | 629 | { |
622 | struct dma_resv_list *list = robj->fence; | | 630 | struct dma_resv_list *list = robj->fence; |
623 | struct dma_resv_list *prealloc = robj->robj_prealloc; | | 631 | struct dma_resv_list *prealloc = robj->robj_prealloc; |
624 | struct dma_resv_write_ticket ticket; | | 632 | struct dma_resv_write_ticket ticket; |
625 | struct dma_fence *replace = NULL; | | 633 | struct dma_fence *replace = NULL; |
626 | uint32_t i; | | 634 | uint32_t i; |
627 | | | 635 | |
628 | KASSERT(dma_resv_held(robj)); | | 636 | KASSERT(dma_resv_held(robj)); |
629 | | | 637 | |
630 | /* Acquire a reference to the fence. */ | | 638 | /* Acquire a reference to the fence. */ |
631 | KASSERT(fence != NULL); | | 639 | KASSERT(fence != NULL); |
632 | (void)dma_fence_get(fence); | | 640 | (void)dma_fence_get(fence); |
633 | | | 641 | |
634 | /* Check for a preallocated replacement list. */ | | 642 | /* Check for a preallocated replacement list. */ |
635 | if (prealloc == NULL) { | | 643 | if (prealloc == NULL) { |
636 | /* | | 644 | /* |
637 | * If there is no preallocated replacement list, then | | 645 | * If there is no preallocated replacement list, then |
638 | * there must be room in the current list. | | 646 | * there must be room in the current list. |
639 | */ | | 647 | */ |
640 | KASSERT(list != NULL); | | 648 | KASSERT(list != NULL); |
641 | KASSERT(list->shared_count < list->shared_max); | | 649 | KASSERT(list->shared_count < list->shared_max); |
642 | | | 650 | |
643 | /* Begin an update. Implies membar_producer for fence. */ | | 651 | /* Begin an update. Implies membar_producer for fence. */ |
644 | dma_resv_write_begin(robj, &ticket); | | 652 | dma_resv_write_begin(robj, &ticket); |
645 | | | 653 | |
646 | /* Find a fence with the same context number. */ | | 654 | /* Find a fence with the same context number. */ |
647 | for (i = 0; i < list->shared_count; i++) { | | 655 | for (i = 0; i < list->shared_count; i++) { |
648 | if (list->shared[i]->context == fence->context) { | | 656 | if (list->shared[i]->context == fence->context) { |
649 | replace = list->shared[i]; | | 657 | replace = list->shared[i]; |
650 | atomic_store_relaxed(&list->shared[i], fence); | | 658 | atomic_store_relaxed(&list->shared[i], fence); |
651 | break; | | 659 | break; |
652 | } | | 660 | } |
653 | } | | 661 | } |
654 | | | 662 | |
655 | /* If we didn't find one, add it at the end. */ | | 663 | /* If we didn't find one, add it at the end. */ |
656 | if (i == list->shared_count) { | | 664 | if (i == list->shared_count) { |
657 | atomic_store_relaxed(&list->shared[list->shared_count], | | 665 | atomic_store_relaxed(&list->shared[list->shared_count], |
658 | fence); | | 666 | fence); |
659 | atomic_store_relaxed(&list->shared_count, | | 667 | atomic_store_relaxed(&list->shared_count, |
660 | list->shared_count + 1); | | 668 | list->shared_count + 1); |
661 | } | | 669 | } |
662 | | | 670 | |
663 | /* Commit the update. */ | | 671 | /* Commit the update. */ |
664 | dma_resv_write_commit(robj, &ticket); | | 672 | dma_resv_write_commit(robj, &ticket); |
665 | } else { | | 673 | } else { |
666 | /* | | 674 | /* |
667 | * There is a preallocated replacement list. There may | | 675 | * There is a preallocated replacement list. There may |
668 | * not be a current list. If not, treat it as a zero- | | 676 | * not be a current list. If not, treat it as a zero- |
669 | * length list. | | 677 | * length list. |
670 | */ | | 678 | */ |
671 | uint32_t shared_count = (list == NULL? 0 : list->shared_count); | | 679 | uint32_t shared_count = (list == NULL? 0 : list->shared_count); |
672 | | | 680 | |
673 | /* There had better be room in the preallocated list. */ | | 681 | /* There had better be room in the preallocated list. */ |
674 | KASSERT(shared_count < prealloc->shared_max); | | 682 | KASSERT(shared_count < prealloc->shared_max); |
675 | | | 683 | |
676 | /* | | 684 | /* |
677 | * Copy the fences over, but replace if we find one | | 685 | * Copy the fences over, but replace if we find one |
678 | * with the same context number. | | 686 | * with the same context number. |
679 | */ | | 687 | */ |
680 | for (i = 0; i < shared_count; i++) { | | 688 | for (i = 0; i < shared_count; i++) { |
681 | if (replace == NULL && | | 689 | if (replace == NULL && |
682 | list->shared[i]->context == fence->context) { | | 690 | list->shared[i]->context == fence->context) { |
683 | replace = list->shared[i]; | | 691 | replace = list->shared[i]; |
684 | prealloc->shared[i] = fence; | | 692 | prealloc->shared[i] = fence; |
685 | } else { | | 693 | } else { |
686 | prealloc->shared[i] = list->shared[i]; | | 694 | prealloc->shared[i] = list->shared[i]; |
687 | } | | 695 | } |
688 | } | | 696 | } |
689 | prealloc->shared_count = shared_count; | | 697 | prealloc->shared_count = shared_count; |
690 | | | 698 | |
691 | /* If we didn't find one, add it at the end. */ | | 699 | /* If we didn't find one, add it at the end. */ |
692 | if (replace == NULL) | | 700 | if (replace == NULL) { |
| | | 701 | KASSERT(prealloc->shared_count < prealloc->shared_max); |
693 | prealloc->shared[prealloc->shared_count++] = fence; | | 702 | prealloc->shared[prealloc->shared_count++] = fence; |
| | | 703 | } |
694 | | | 704 | |
695 | /* | | 705 | /* |
696 | * Now ready to replace the list. Begin an update. | | 706 | * Now ready to replace the list. Begin an update. |
697 | * Implies membar_producer for fence and prealloc. | | 707 | * Implies membar_producer for fence and prealloc. |
698 | */ | | 708 | */ |
699 | dma_resv_write_begin(robj, &ticket); | | 709 | dma_resv_write_begin(robj, &ticket); |
700 | | | 710 | |
701 | /* Replace the list. */ | | 711 | /* Replace the list. */ |
702 | atomic_store_relaxed(&robj->fence, prealloc); | | 712 | atomic_store_relaxed(&robj->fence, prealloc); |
703 | robj->robj_prealloc = NULL; | | 713 | robj->robj_prealloc = NULL; |
704 | | | 714 | |
705 | /* Commit the update. */ | | 715 | /* Commit the update. */ |
706 | dma_resv_write_commit(robj, &ticket); | | 716 | dma_resv_write_commit(robj, &ticket); |
707 | | | 717 | |
708 | /* | | 718 | /* |
709 | * If there is an old list, free it when convenient. | | 719 | * If there is an old list, free it when convenient. |
710 | * (We are not in a position at this point to sleep | | 720 | * (We are not in a position at this point to sleep |
711 | * waiting for activity on all CPUs.) | | 721 | * waiting for activity on all CPUs.) |
712 | */ | | 722 | */ |
713 | if (list) | | 723 | if (list) |
714 | objlist_defer_free(list); | | 724 | objlist_defer_free(list); |
715 | } | | 725 | } |
716 | | | 726 | |
717 | /* Release a fence if we replaced it. */ | | 727 | /* Release a fence if we replaced it. */ |
718 | if (replace) { | | 728 | if (replace) { |
719 | dma_fence_put(replace); | | 729 | dma_fence_put(replace); |
720 | replace = NULL; /* paranoia */ | | 730 | replace = NULL; /* paranoia */ |
721 | } | | 731 | } |
722 | } | | 732 | } |
723 | | | 733 | |
724 | /* | | 734 | /* |
725 | * dma_resv_get_excl_rcu(robj) | | 735 | * dma_resv_get_excl_rcu(robj) |
726 | * | | 736 | * |
727 | * Note: Caller need not call this from an RCU read section. | | 737 | * Note: Caller need not call this from an RCU read section. |
728 | */ | | 738 | */ |
729 | struct dma_fence * | | 739 | struct dma_fence * |
730 | dma_resv_get_excl_rcu(const struct dma_resv *robj) | | 740 | dma_resv_get_excl_rcu(const struct dma_resv *robj) |
731 | { | | 741 | { |
732 | struct dma_fence *fence; | | 742 | struct dma_fence *fence; |
733 | | | 743 | |
734 | rcu_read_lock(); | | 744 | rcu_read_lock(); |
735 | fence = dma_fence_get_rcu_safe(&robj->fence_excl); | | 745 | fence = dma_fence_get_rcu_safe(&robj->fence_excl); |
736 | rcu_read_unlock(); | | 746 | rcu_read_unlock(); |
737 | | | 747 | |
738 | return fence; | | 748 | return fence; |
739 | } | | 749 | } |
740 | | | 750 | |
741 | /* | | 751 | /* |
742 | * dma_resv_get_fences_rcu(robj, fencep, nsharedp, sharedp) | | 752 | * dma_resv_get_fences_rcu(robj, fencep, nsharedp, sharedp) |
743 | * | | 753 | * |
744 | * Get a snapshot of the exclusive and shared fences of robj. The | | 754 | * Get a snapshot of the exclusive and shared fences of robj. The |
745 | * shared fences are returned as a pointer *sharedp to an array, | | 755 | * shared fences are returned as a pointer *sharedp to an array, |
746 | * to be freed by the caller with kfree, of *nsharedp elements. | | 756 | * to be freed by the caller with kfree, of *nsharedp elements. |
747 | * If fencep is null, then add the exclusive fence, if any, at the | | 757 | * If fencep is null, then add the exclusive fence, if any, at the |
748 | * end of the array instead. | | 758 | * end of the array instead. |
749 | * | | 759 | * |
750 | * Returns zero on success, negative (Linux-style) error code on | | 760 | * Returns zero on success, negative (Linux-style) error code on |
751 | * failure. On failure, *fencep, *nsharedp, and *sharedp are | | 761 | * failure. On failure, *fencep, *nsharedp, and *sharedp are |
752 | * untouched. | | 762 | * untouched. |
753 | */ | | 763 | */ |
754 | int | | 764 | int |
755 | dma_resv_get_fences_rcu(const struct dma_resv *robj, | | 765 | dma_resv_get_fences_rcu(const struct dma_resv *robj, |
756 | struct dma_fence **fencep, unsigned *nsharedp, struct dma_fence ***sharedp) | | 766 | struct dma_fence **fencep, unsigned *nsharedp, struct dma_fence ***sharedp) |
757 | { | | 767 | { |
758 | const struct dma_resv_list *list = NULL; | | 768 | const struct dma_resv_list *list = NULL; |
759 | struct dma_fence *fence = NULL; | | 769 | struct dma_fence *fence = NULL; |
760 | struct dma_fence **shared = NULL; | | 770 | struct dma_fence **shared = NULL; |
761 | unsigned shared_alloc = 0, shared_count, i; | | 771 | unsigned shared_alloc = 0, shared_count, i; |
762 | struct dma_resv_read_ticket ticket; | | 772 | struct dma_resv_read_ticket ticket; |
763 | | | 773 | |
764 | top: KASSERT(fence == NULL); | | 774 | top: KASSERT(fence == NULL); |
765 | | | 775 | |
766 | /* Enter an RCU read section and get a read ticket. */ | | 776 | /* Enter an RCU read section and get a read ticket. */ |
767 | rcu_read_lock(); | | 777 | rcu_read_lock(); |
768 | dma_resv_read_begin(robj, &ticket); | | 778 | dma_resv_read_begin(robj, &ticket); |
769 | | | 779 | |
770 | /* If there is a shared list, grab it. */ | | 780 | /* If there is a shared list, grab it. */ |
771 | if (!dma_resv_get_shared_reader(robj, &list, &shared_count, &ticket)) | | 781 | if (!dma_resv_get_shared_reader(robj, &list, &shared_count, &ticket)) |
772 | goto restart; | | 782 | goto restart; |
773 | if (list != NULL) { | | 783 | if (list != NULL) { |
774 | | | 784 | |
775 | /* | | 785 | /* |
776 | * Avoid arithmetic overflow with `+ 1' below. | | 786 | * Avoid arithmetic overflow with `+ 1' below. |
777 | * Strictly speaking we don't need this if the caller | | 787 | * Strictly speaking we don't need this if the caller |
778 | * specified fencep or if there is no exclusive fence, | | 788 | * specified fencep or if there is no exclusive fence, |
779 | * but it is simpler to not have to consider those | | 789 | * but it is simpler to not have to consider those |
780 | * cases. | | 790 | * cases. |
781 | */ | | 791 | */ |
782 | KASSERT(shared_count <= list->shared_max); | | 792 | KASSERT(shared_count <= list->shared_max); |
783 | if (list->shared_max == UINT_MAX) | | 793 | if (list->shared_max == UINT_MAX) |
784 | return -ENOMEM; | | 794 | return -ENOMEM; |
785 | | | 795 | |
786 | /* Check whether we have a buffer. */ | | 796 | /* Check whether we have a buffer. */ |
787 | if (shared == NULL) { | | 797 | if (shared == NULL) { |
788 | /* | | 798 | /* |
789 | * We don't have a buffer yet. Try to allocate | | 799 | * We don't have a buffer yet. Try to allocate |
790 | * one without waiting. | | 800 | * one without waiting. |
791 | */ | | 801 | */ |
792 | shared_alloc = list->shared_max + 1; | | 802 | shared_alloc = list->shared_max + 1; |
793 | shared = kcalloc(shared_alloc, sizeof(shared[0]), | | 803 | shared = kcalloc(shared_alloc, sizeof(shared[0]), |
794 | GFP_NOWAIT); | | 804 | GFP_NOWAIT); |
795 | if (shared == NULL) { | | 805 | if (shared == NULL) { |
796 | /* | | 806 | /* |
797 | * Couldn't do it immediately. Back | | 807 | * Couldn't do it immediately. Back |
798 | * out of RCU and allocate one with | | 808 | * out of RCU and allocate one with |
799 | * waiting. | | 809 | * waiting. |
800 | */ | | 810 | */ |
801 | rcu_read_unlock(); | | 811 | rcu_read_unlock(); |
802 | shared = kcalloc(shared_alloc, | | 812 | shared = kcalloc(shared_alloc, |
803 | sizeof(shared[0]), GFP_KERNEL); | | 813 | sizeof(shared[0]), GFP_KERNEL); |
804 | if (shared == NULL) | | 814 | if (shared == NULL) |
805 | return -ENOMEM; | | 815 | return -ENOMEM; |
806 | goto top; | | 816 | goto top; |
807 | } | | 817 | } |
808 | } else if (shared_alloc < list->shared_max + 1) { | | 818 | } else if (shared_alloc < list->shared_max + 1) { |
809 | /* | | 819 | /* |
810 | * We have a buffer but it's too small. We're | | 820 | * We have a buffer but it's too small. We're |
811 | * already racing in this case, so just back | | 821 | * already racing in this case, so just back |
812 | * out and wait to allocate a bigger one. | | 822 | * out and wait to allocate a bigger one. |
813 | */ | | 823 | */ |
814 | shared_alloc = list->shared_max + 1; | | 824 | shared_alloc = list->shared_max + 1; |
815 | rcu_read_unlock(); | | 825 | rcu_read_unlock(); |
816 | kfree(shared); | | 826 | kfree(shared); |
817 | shared = kcalloc(shared_alloc, sizeof(shared[0]), | | 827 | shared = kcalloc(shared_alloc, sizeof(shared[0]), |
818 | GFP_KERNEL); | | 828 | GFP_KERNEL); |
819 | if (shared == NULL) | | 829 | if (shared == NULL) |
820 | return -ENOMEM; | | 830 | return -ENOMEM; |
821 | goto top; | | 831 | goto top; |
822 | } | | 832 | } |
823 | | | 833 | |
824 | /* | | 834 | /* |
825 | * We got a buffer large enough. Copy into the buffer | | 835 | * We got a buffer large enough. Copy into the buffer |
826 | * and record the number of elements. Could safely use | | 836 | * and record the number of elements. Could safely use |
827 | * memcpy here, because even if we race with a writer | | 837 | * memcpy here, because even if we race with a writer |
828 | * it'll invalidate the read ticket and we'll start | | 838 | * it'll invalidate the read ticket and we'll start |
829 | * over, but atomic_load in a loop will pacify kcsan. | | 839 | * over, but atomic_load in a loop will pacify kcsan. |
830 | */ | | 840 | */ |
831 | for (i = 0; i < shared_count; i++) | | 841 | for (i = 0; i < shared_count; i++) |
832 | shared[i] = atomic_load_relaxed(&list->shared[i]); | | 842 | shared[i] = atomic_load_relaxed(&list->shared[i]); |
833 | | | 843 | |
834 | /* If anything changed while we were copying, restart. */ | | 844 | /* If anything changed while we were copying, restart. */ |
835 | if (!dma_resv_read_valid(robj, &ticket)) | | 845 | if (!dma_resv_read_valid(robj, &ticket)) |
836 | goto restart; | | 846 | goto restart; |
837 | } | | 847 | } |
838 | | | 848 | |
839 | /* If there is an exclusive fence, grab it. */ | | 849 | /* If there is an exclusive fence, grab it. */ |
840 | KASSERT(fence == NULL); | | 850 | KASSERT(fence == NULL); |
841 | if (!dma_resv_get_excl_reader(robj, &fence, &ticket)) | | 851 | if (!dma_resv_get_excl_reader(robj, &fence, &ticket)) |
842 | goto restart; | | 852 | goto restart; |
843 | | | 853 | |
844 | /* | | 854 | /* |
845 | * Try to get a reference to all of the shared fences. | | 855 | * Try to get a reference to all of the shared fences. |
846 | */ | | 856 | */ |
847 | for (i = 0; i < shared_count; i++) { | | 857 | for (i = 0; i < shared_count; i++) { |
848 | if (dma_fence_get_rcu(atomic_load_relaxed(&shared[i])) == NULL) | | 858 | if (dma_fence_get_rcu(atomic_load_relaxed(&shared[i])) == NULL) |
849 | goto put_restart; | | 859 | goto put_restart; |
850 | } | | 860 | } |
851 | | | 861 | |
852 | /* Success! */ | | 862 | /* Success! */ |
853 | rcu_read_unlock(); | | 863 | rcu_read_unlock(); |
854 | KASSERT(shared_count <= shared_alloc); | | 864 | KASSERT(shared_count <= shared_alloc); |
855 | KASSERT(shared_alloc == 0 || shared_count < shared_alloc); | | 865 | KASSERT(shared_alloc == 0 || shared_count < shared_alloc); |
856 | KASSERT(shared_alloc <= UINT_MAX); | | 866 | KASSERT(shared_alloc <= UINT_MAX); |
857 | if (fencep) { | | 867 | if (fencep) { |
858 | *fencep = fence; | | 868 | *fencep = fence; |
859 | } else if (fence) { | | 869 | } else if (fence) { |
860 | if (shared_count) { | | 870 | if (shared_count) { |
861 | shared[shared_count++] = fence; | | 871 | shared[shared_count++] = fence; |
862 | } else { | | 872 | } else { |
863 | shared = kmalloc(sizeof(shared[0]), GFP_KERNEL); | | 873 | shared = kmalloc(sizeof(shared[0]), GFP_KERNEL); |
864 | shared[0] = fence; | | 874 | shared[0] = fence; |
865 | shared_count = 1; | | 875 | shared_count = 1; |
866 | } | | 876 | } |
867 | } | | 877 | } |
868 | *nsharedp = shared_count; | | 878 | *nsharedp = shared_count; |
869 | *sharedp = shared; | | 879 | *sharedp = shared; |
870 | return 0; | | 880 | return 0; |
871 | | | 881 | |
872 | put_restart: | | 882 | put_restart: |
873 | /* Back out. */ | | 883 | /* Back out. */ |
874 | while (i --> 0) { | | 884 | while (i --> 0) { |
875 | dma_fence_put(shared[i]); | | 885 | dma_fence_put(shared[i]); |
876 | shared[i] = NULL; /* paranoia */ | | 886 | shared[i] = NULL; /* paranoia */ |
877 | } | | 887 | } |
878 | if (fence) { | | 888 | if (fence) { |
879 | dma_fence_put(fence); | | 889 | dma_fence_put(fence); |
880 | fence = NULL; | | 890 | fence = NULL; |
881 | } | | 891 | } |
882 | | | 892 | |
883 | restart: | | 893 | restart: |
884 | KASSERT(fence == NULL); | | 894 | KASSERT(fence == NULL); |
885 | rcu_read_unlock(); | | 895 | rcu_read_unlock(); |
886 | goto top; | | 896 | goto top; |
887 | } | | 897 | } |
888 | | | 898 | |
889 | /* | | 899 | /* |
890 | * dma_resv_copy_fences(dst, src) | | 900 | * dma_resv_copy_fences(dst, src) |
891 | * | | 901 | * |
892 | * Copy the exclusive fence and all the shared fences from src to | | 902 | * Copy the exclusive fence and all the shared fences from src to |
893 | * dst. | | 903 | * dst. |
894 | * | | 904 | * |
895 | * Caller must have dst locked. | | 905 | * Caller must have dst locked. |
896 | */ | | 906 | */ |
897 | int | | 907 | int |
898 | dma_resv_copy_fences(struct dma_resv *dst_robj, | | 908 | dma_resv_copy_fences(struct dma_resv *dst_robj, |
899 | const struct dma_resv *src_robj) | | 909 | const struct dma_resv *src_robj) |
900 | { | | 910 | { |
901 | const struct dma_resv_list *src_list; | | 911 | const struct dma_resv_list *src_list; |
902 | struct dma_resv_list *dst_list = NULL; | | 912 | struct dma_resv_list *dst_list = NULL; |
903 | struct dma_resv_list *old_list; | | 913 | struct dma_resv_list *old_list; |
904 | struct dma_fence *fence = NULL; | | 914 | struct dma_fence *fence = NULL; |
905 | struct dma_fence *old_fence; | | 915 | struct dma_fence *old_fence; |
906 | uint32_t shared_count, i; | | 916 | uint32_t shared_count, i; |
907 | struct dma_resv_read_ticket read_ticket; | | 917 | struct dma_resv_read_ticket read_ticket; |
908 | struct dma_resv_write_ticket write_ticket; | | 918 | struct dma_resv_write_ticket write_ticket; |
909 | | | 919 | |
910 | KASSERT(dma_resv_held(dst_robj)); | | 920 | KASSERT(dma_resv_held(dst_robj)); |
911 | | | 921 | |
912 | top: KASSERT(fence == NULL); | | 922 | top: KASSERT(fence == NULL); |
913 | | | 923 | |
914 | /* Enter an RCU read section and get a read ticket. */ | | 924 | /* Enter an RCU read section and get a read ticket. */ |
915 | rcu_read_lock(); | | 925 | rcu_read_lock(); |
916 | dma_resv_read_begin(src_robj, &read_ticket); | | 926 | dma_resv_read_begin(src_robj, &read_ticket); |
917 | | | 927 | |
918 | /* Get the shared list. */ | | 928 | /* Get the shared list. */ |
919 | if (!dma_resv_get_shared_reader(src_robj, &src_list, &shared_count, | | 929 | if (!dma_resv_get_shared_reader(src_robj, &src_list, &shared_count, |
920 | &read_ticket)) | | 930 | &read_ticket)) |
921 | goto restart; | | 931 | goto restart; |
922 | if (src_list != NULL) { | | 932 | if (src_list) { |
923 | /* Allocate a new list. */ | | 933 | /* Allocate a new list, if necessary. */ |
924 | dst_list = objlist_tryalloc(shared_count); | | | |
925 | if (dst_list == NULL) | | 934 | if (dst_list == NULL) |
926 | return -ENOMEM; | | 935 | dst_list = objlist_tryalloc(shared_count); |
| | | 936 | if (dst_list == NULL || dst_list->shared_max < shared_count) { |
| | | 937 | rcu_read_unlock(); |
| | | 938 | if (dst_list) { |
| | | 939 | objlist_free(dst_list); |
| | | 940 | dst_list = NULL; |
| | | 941 | } |
| | | 942 | dst_list = objlist_alloc(shared_count); |
| | | 943 | dst_list->shared_count = 0; /* paranoia */ |
| | | 944 | goto top; |
| | | 945 | } |
927 | | | 946 | |
928 | /* Copy over all fences that are not yet signalled. */ | | 947 | /* Copy over all fences that are not yet signalled. */ |
929 | dst_list->shared_count = 0; | | 948 | dst_list->shared_count = 0; |
930 | for (i = 0; i < shared_count; i++) { | | 949 | for (i = 0; i < shared_count; i++) { |
931 | KASSERT(fence == NULL); | | 950 | KASSERT(fence == NULL); |
932 | fence = atomic_load_relaxed(&src_list->shared[i]); | | 951 | fence = atomic_load_relaxed(&src_list->shared[i]); |
933 | if ((fence = dma_fence_get_rcu(fence)) == NULL) | | 952 | if ((fence = dma_fence_get_rcu(fence)) == NULL) |
934 | goto restart; | | 953 | goto restart; |
935 | if (dma_fence_is_signaled(fence)) { | | 954 | if (dma_fence_is_signaled(fence)) { |
936 | dma_fence_put(fence); | | 955 | dma_fence_put(fence); |
937 | fence = NULL; | | 956 | fence = NULL; |
938 | continue; | | 957 | continue; |
939 | } | | 958 | } |
940 | dst_list->shared[dst_list->shared_count++] = fence; | | 959 | dst_list->shared[dst_list->shared_count++] = fence; |
941 | fence = NULL; | | 960 | fence = NULL; |
942 | } | | 961 | } |
943 | | | 962 | |
944 | /* If anything changed while we were copying, restart. */ | | 963 | /* If anything changed while we were copying, restart. */ |
945 | if (!dma_resv_read_valid(src_robj, &read_ticket)) | | 964 | if (!dma_resv_read_valid(src_robj, &read_ticket)) |
946 | goto restart; | | 965 | goto restart; |
947 | } | | 966 | } |
948 | | | 967 | |
949 | /* Get the exclusive fence. */ | | 968 | /* Get the exclusive fence. */ |
950 | KASSERT(fence == NULL); | | 969 | KASSERT(fence == NULL); |
951 | if (!dma_resv_get_excl_reader(src_robj, &fence, &read_ticket)) | | 970 | if (!dma_resv_get_excl_reader(src_robj, &fence, &read_ticket)) |
952 | goto restart; | | 971 | goto restart; |
953 | | | 972 | |
954 | /* All done with src; exit the RCU read section. */ | | 973 | /* All done with src; exit the RCU read section. */ |
955 | rcu_read_unlock(); | | 974 | rcu_read_unlock(); |
956 | | | 975 | |
957 | /* | | 976 | /* |
958 | * We now have a snapshot of the shared and exclusive fences of | | 977 | * We now have a snapshot of the shared and exclusive fences of |
959 | * src_robj and we have acquired references to them so they | | 978 | * src_robj and we have acquired references to them so they |
960 | * won't go away. Transfer them over to dst_robj, releasing | | 979 | * won't go away. Transfer them over to dst_robj, releasing |
961 | * references to any that were there. | | 980 | * references to any that were there. |
962 | */ | | 981 | */ |
963 | | | 982 | |
964 | /* Get the old shared and exclusive fences, if any. */ | | 983 | /* Get the old shared and exclusive fences, if any. */ |
965 | old_list = dst_robj->fence; | | 984 | old_list = dst_robj->fence; |
966 | old_fence = dst_robj->fence_excl; | | 985 | old_fence = dst_robj->fence_excl; |
967 | | | 986 | |
968 | /* | | 987 | /* |
969 | * Begin an update. Implies membar_producer for dst_list and | | 988 | * Begin an update. Implies membar_producer for dst_list and |
970 | * fence. | | 989 | * fence. |
971 | */ | | 990 | */ |
972 | dma_resv_write_begin(dst_robj, &write_ticket); | | 991 | dma_resv_write_begin(dst_robj, &write_ticket); |
973 | | | 992 | |
974 | /* Replace the fences. */ | | 993 | /* Replace the fences. */ |
975 | atomic_store_relaxed(&dst_robj->fence, dst_list); | | 994 | atomic_store_relaxed(&dst_robj->fence, dst_list); |
976 | atomic_store_relaxed(&dst_robj->fence_excl, fence); | | 995 | atomic_store_relaxed(&dst_robj->fence_excl, fence); |
977 | | | 996 | |
978 | /* Commit the update. */ | | 997 | /* Commit the update. */ |
979 | dma_resv_write_commit(dst_robj, &write_ticket); | | 998 | dma_resv_write_commit(dst_robj, &write_ticket); |
980 | | | 999 | |
981 | /* Release the old exclusive fence, if any. */ | | 1000 | /* Release the old exclusive fence, if any. */ |
982 | if (old_fence) { | | 1001 | if (old_fence) { |
983 | dma_fence_put(old_fence); | | 1002 | dma_fence_put(old_fence); |
984 | old_fence = NULL; /* paranoia */ | | 1003 | old_fence = NULL; /* paranoia */ |
985 | } | | 1004 | } |
986 | | | 1005 | |
987 | /* Release any old shared fences. */ | | 1006 | /* Release any old shared fences. */ |
988 | if (old_list) { | | 1007 | if (old_list) { |
989 | for (i = old_list->shared_count; i --> 0;) { | | 1008 | for (i = old_list->shared_count; i --> 0;) { |
990 | dma_fence_put(old_list->shared[i]); | | 1009 | dma_fence_put(old_list->shared[i]); |
991 | old_list->shared[i] = NULL; /* paranoia */ | | 1010 | old_list->shared[i] = NULL; /* paranoia */ |
992 | } | | 1011 | } |
993 | objlist_free(old_list); | | 1012 | objlist_free(old_list); |
994 | old_list = NULL; /* paranoia */ | | 1013 | old_list = NULL; /* paranoia */ |
995 | } | | 1014 | } |
996 | | | 1015 | |
997 | /* Success! */ | | 1016 | /* Success! */ |
998 | return 0; | | 1017 | return 0; |
999 | | | 1018 | |
1000 | restart: | | 1019 | restart: |
1001 | KASSERT(fence == NULL); | | 1020 | KASSERT(fence == NULL); |
1002 | rcu_read_unlock(); | | 1021 | rcu_read_unlock(); |
1003 | if (dst_list) { | | 1022 | if (dst_list) { |
1004 | for (i = dst_list->shared_count; i --> 0;) { | | 1023 | for (i = dst_list->shared_count; i --> 0;) { |
1005 | dma_fence_put(dst_list->shared[i]); | | 1024 | dma_fence_put(dst_list->shared[i]); |
1006 | dst_list->shared[i] = NULL; /* paranoia */ | | 1025 | dst_list->shared[i] = NULL; /* paranoia */ |
1007 | } | | 1026 | } |
1008 | objlist_free(dst_list); | | 1027 | /* reuse dst_list allocation for the next attempt */ |
1009 | dst_list = NULL; | | | |
1010 | } | | 1028 | } |
1011 | goto top; | | 1029 | goto top; |
1012 | } | | 1030 | } |
1013 | | | 1031 | |
1014 | /* | | 1032 | /* |
1015 | * dma_resv_test_signaled_rcu(robj, shared) | | 1033 | * dma_resv_test_signaled_rcu(robj, shared) |
1016 | * | | 1034 | * |
1017 | * If shared is true, test whether all of the shared fences are | | 1035 | * If shared is true, test whether all of the shared fences are |
1018 | * signalled, or if there are none, test whether the exclusive | | 1036 | * signalled, or if there are none, test whether the exclusive |
1019 | * fence is signalled. If shared is false, test only whether the | | 1037 | * fence is signalled. If shared is false, test only whether the |
1020 | * exclusive fence is signalled. | | 1038 | * exclusive fence is signalled. |
1021 | * | | 1039 | * |
1022 | * XXX Why does this _not_ test the exclusive fence if shared is | | 1040 | * XXX Why does this _not_ test the exclusive fence if shared is |
1023 | * true only if there are no shared fences? This makes no sense. | | 1041 | * true only if there are no shared fences? This makes no sense. |
1024 | */ | | 1042 | */ |
1025 | bool | | 1043 | bool |
1026 | dma_resv_test_signaled_rcu(const struct dma_resv *robj, | | 1044 | dma_resv_test_signaled_rcu(const struct dma_resv *robj, |
1027 | bool shared) | | 1045 | bool shared) |
1028 | { | | 1046 | { |
1029 | struct dma_resv_read_ticket ticket; | | 1047 | struct dma_resv_read_ticket ticket; |
1030 | const struct dma_resv_list *list; | | 1048 | const struct dma_resv_list *list; |
1031 | struct dma_fence *fence = NULL; | | 1049 | struct dma_fence *fence = NULL; |
1032 | uint32_t i, shared_count; | | 1050 | uint32_t i, shared_count; |
1033 | bool signaled = true; | | 1051 | bool signaled = true; |
1034 | | | 1052 | |
1035 | top: KASSERT(fence == NULL); | | 1053 | top: KASSERT(fence == NULL); |
1036 | | | 1054 | |
1037 | /* Enter an RCU read section and get a read ticket. */ | | 1055 | /* Enter an RCU read section and get a read ticket. */ |
1038 | rcu_read_lock(); | | 1056 | rcu_read_lock(); |
1039 | dma_resv_read_begin(robj, &ticket); | | 1057 | dma_resv_read_begin(robj, &ticket); |
1040 | | | 1058 | |
1041 | /* If shared is requested and there is a shared list, test it. */ | | 1059 | /* If shared is requested and there is a shared list, test it. */ |
1042 | if (shared) { | | 1060 | if (shared) { |
1043 | if (!dma_resv_get_shared_reader(robj, &list, &shared_count, | | 1061 | if (!dma_resv_get_shared_reader(robj, &list, &shared_count, |
1044 | &ticket)) | | 1062 | &ticket)) |
1045 | goto restart; | | 1063 | goto restart; |
1046 | } else { | | 1064 | } else { |
1047 | list = NULL; | | 1065 | list = NULL; |
1048 | shared_count = 0; | | 1066 | shared_count = 0; |
1049 | } | | 1067 | } |
1050 | if (list != NULL) { | | 1068 | if (list != NULL) { |
1051 | /* | | 1069 | /* |
1052 | * For each fence, if it is going away, restart. | | 1070 | * For each fence, if it is going away, restart. |
1053 | * Otherwise, acquire a reference to it to test whether | | 1071 | * Otherwise, acquire a reference to it to test whether |
1054 | * it is signalled. Stop if we find any that is not | | 1072 | * it is signalled. Stop if we find any that is not |
1055 | * signalled. | | 1073 | * signalled. |
1056 | */ | | 1074 | */ |
1057 | for (i = 0; i < shared_count; i++) { | | 1075 | for (i = 0; i < shared_count; i++) { |
1058 | KASSERT(fence == NULL); | | 1076 | KASSERT(fence == NULL); |
1059 | fence = atomic_load_relaxed(&list->shared[i]); | | 1077 | fence = atomic_load_relaxed(&list->shared[i]); |
1060 | if ((fence = dma_fence_get_rcu(fence)) == NULL) | | 1078 | if ((fence = dma_fence_get_rcu(fence)) == NULL) |
1061 | goto restart; | | 1079 | goto restart; |
1062 | signaled &= dma_fence_is_signaled(fence); | | 1080 | signaled &= dma_fence_is_signaled(fence); |
1063 | dma_fence_put(fence); | | 1081 | dma_fence_put(fence); |
1064 | fence = NULL; | | 1082 | fence = NULL; |
1065 | if (!signaled) | | 1083 | if (!signaled) |
1066 | goto out; | | 1084 | goto out; |
1067 | } | | 1085 | } |
1068 | | | 1086 | |
1069 | /* If anything changed while we were testing, restart. */ | | 1087 | /* If anything changed while we were testing, restart. */ |
1070 | if (!dma_resv_read_valid(robj, &ticket)) | | 1088 | if (!dma_resv_read_valid(robj, &ticket)) |
1071 | goto restart; | | 1089 | goto restart; |
1072 | } | | 1090 | } |
1073 | if (shared_count) | | 1091 | if (shared_count) |
1074 | goto out; | | 1092 | goto out; |
1075 | | | 1093 | |
1076 | /* If there is an exclusive fence, test it. */ | | 1094 | /* If there is an exclusive fence, test it. */ |
1077 | KASSERT(fence == NULL); | | 1095 | KASSERT(fence == NULL); |
1078 | if (!dma_resv_get_excl_reader(robj, &fence, &ticket)) | | 1096 | if (!dma_resv_get_excl_reader(robj, &fence, &ticket)) |
1079 | goto restart; | | 1097 | goto restart; |
1080 | if (fence != NULL) { | | 1098 | if (fence != NULL) { |
1081 | /* Test whether it is signalled. If no, stop. */ | | 1099 | /* Test whether it is signalled. If no, stop. */ |
1082 | signaled &= dma_fence_is_signaled(fence); | | 1100 | signaled &= dma_fence_is_signaled(fence); |
1083 | dma_fence_put(fence); | | 1101 | dma_fence_put(fence); |
1084 | fence = NULL; | | 1102 | fence = NULL; |
1085 | if (!signaled) | | 1103 | if (!signaled) |
1086 | goto out; | | 1104 | goto out; |
1087 | } | | 1105 | } |
1088 | | | 1106 | |
1089 | out: KASSERT(fence == NULL); | | 1107 | out: KASSERT(fence == NULL); |
1090 | rcu_read_unlock(); | | 1108 | rcu_read_unlock(); |
1091 | return signaled; | | 1109 | return signaled; |
1092 | | | 1110 | |
1093 | restart: | | 1111 | restart: |
1094 | KASSERT(fence == NULL); | | 1112 | KASSERT(fence == NULL); |
1095 | rcu_read_unlock(); | | 1113 | rcu_read_unlock(); |
1096 | goto top; | | 1114 | goto top; |
1097 | } | | 1115 | } |
1098 | | | 1116 | |
1099 | /* | | 1117 | /* |
1100 | * dma_resv_wait_timeout_rcu(robj, shared, intr, timeout) | | 1118 | * dma_resv_wait_timeout_rcu(robj, shared, intr, timeout) |
1101 | * | | 1119 | * |
1102 | * If shared is true, wait for all of the shared fences to be | | 1120 | * If shared is true, wait for all of the shared fences to be |
1103 | * signalled, or if there are none, wait for the exclusive fence | | 1121 | * signalled, or if there are none, wait for the exclusive fence |
1104 | * to be signalled. If shared is false, wait only for the | | 1122 | * to be signalled. If shared is false, wait only for the |
1105 | * exclusive fence to be signalled. If timeout is zero, don't | | 1123 | * exclusive fence to be signalled. If timeout is zero, don't |
1106 | * wait, only test. | | 1124 | * wait, only test. |
1107 | * | | 1125 | * |
1108 | * XXX Why does this _not_ wait for the exclusive fence if shared | | 1126 | * XXX Why does this _not_ wait for the exclusive fence if shared |
1109 | * is true only if there are no shared fences? This makes no | | 1127 | * is true only if there are no shared fences? This makes no |
1110 | * sense. | | 1128 | * sense. |
1111 | */ | | 1129 | */ |
1112 | long | | 1130 | long |
1113 | dma_resv_wait_timeout_rcu(const struct dma_resv *robj, | | 1131 | dma_resv_wait_timeout_rcu(const struct dma_resv *robj, |
1114 | bool shared, bool intr, unsigned long timeout) | | 1132 | bool shared, bool intr, unsigned long timeout) |
1115 | { | | 1133 | { |
1116 | struct dma_resv_read_ticket ticket; | | 1134 | struct dma_resv_read_ticket ticket; |
1117 | const struct dma_resv_list *list; | | 1135 | const struct dma_resv_list *list; |
1118 | struct dma_fence *fence = NULL; | | 1136 | struct dma_fence *fence = NULL; |
1119 | uint32_t i, shared_count; | | 1137 | uint32_t i, shared_count; |
1120 | long ret; | | 1138 | long ret; |
1121 | | | 1139 | |
1122 | if (timeout == 0) | | 1140 | if (timeout == 0) |
1123 | return dma_resv_test_signaled_rcu(robj, shared); | | 1141 | return dma_resv_test_signaled_rcu(robj, shared); |
1124 | | | 1142 | |
1125 | top: KASSERT(fence == NULL); | | 1143 | top: KASSERT(fence == NULL); |
1126 | | | 1144 | |
1127 | /* Enter an RCU read section and get a read ticket. */ | | 1145 | /* Enter an RCU read section and get a read ticket. */ |
1128 | rcu_read_lock(); | | 1146 | rcu_read_lock(); |
1129 | dma_resv_read_begin(robj, &ticket); | | 1147 | dma_resv_read_begin(robj, &ticket); |
1130 | | | 1148 | |
1131 | /* If shared is requested and there is a shared list, wait on it. */ | | 1149 | /* If shared is requested and there is a shared list, wait on it. */ |
1132 | if (shared) { | | 1150 | if (shared) { |
1133 | if (!dma_resv_get_shared_reader(robj, &list, &shared_count, | | 1151 | if (!dma_resv_get_shared_reader(robj, &list, &shared_count, |
1134 | &ticket)) | | 1152 | &ticket)) |
1135 | goto restart; | | 1153 | goto restart; |
1136 | } else { | | 1154 | } else { |
1137 | list = NULL; | | 1155 | list = NULL; |
1138 | shared_count = 0; | | 1156 | shared_count = 0; |
1139 | } | | 1157 | } |
1140 | if (list != NULL) { | | 1158 | if (list != NULL) { |
1141 | /* | | 1159 | /* |
1142 | * For each fence, if it is going away, restart. | | 1160 | * For each fence, if it is going away, restart. |
1143 | * Otherwise, acquire a reference to it to test whether | | 1161 | * Otherwise, acquire a reference to it to test whether |
1144 | * it is signalled. Stop and wait if we find any that | | 1162 | * it is signalled. Stop and wait if we find any that |
1145 | * is not signalled. | | 1163 | * is not signalled. |
1146 | */ | | 1164 | */ |
1147 | for (i = 0; i < shared_count; i++) { | | 1165 | for (i = 0; i < shared_count; i++) { |
1148 | KASSERT(fence == NULL); | | 1166 | KASSERT(fence == NULL); |
1149 | fence = atomic_load_relaxed(&list->shared[i]); | | 1167 | fence = atomic_load_relaxed(&list->shared[i]); |
1150 | if ((fence = dma_fence_get_rcu(fence)) == NULL) | | 1168 | if ((fence = dma_fence_get_rcu(fence)) == NULL) |
1151 | goto restart; | | 1169 | goto restart; |
1152 | if (!dma_fence_is_signaled(fence)) | | 1170 | if (!dma_fence_is_signaled(fence)) |
1153 | goto wait; | | 1171 | goto wait; |
1154 | dma_fence_put(fence); | | 1172 | dma_fence_put(fence); |
1155 | fence = NULL; | | 1173 | fence = NULL; |
1156 | } | | 1174 | } |
1157 | | | 1175 | |
1158 | /* If anything changed while we were testing, restart. */ | | 1176 | /* If anything changed while we were testing, restart. */ |
1159 | if (!dma_resv_read_valid(robj, &ticket)) | | 1177 | if (!dma_resv_read_valid(robj, &ticket)) |
1160 | goto restart; | | 1178 | goto restart; |
1161 | } | | 1179 | } |
1162 | if (shared_count) | | 1180 | if (shared_count) |
1163 | goto out; | | 1181 | goto out; |
1164 | | | 1182 | |
1165 | /* If there is an exclusive fence, test it. */ | | 1183 | /* If there is an exclusive fence, test it. */ |
1166 | KASSERT(fence == NULL); | | 1184 | KASSERT(fence == NULL); |
1167 | if (!dma_resv_get_excl_reader(robj, &fence, &ticket)) | | 1185 | if (!dma_resv_get_excl_reader(robj, &fence, &ticket)) |
1168 | goto restart; | | 1186 | goto restart; |
1169 | if (fence != NULL) { | | 1187 | if (fence != NULL) { |
1170 | /* Test whether it is signalled. If no, wait. */ | | 1188 | /* Test whether it is signalled. If no, wait. */ |
1171 | if (!dma_fence_is_signaled(fence)) | | 1189 | if (!dma_fence_is_signaled(fence)) |
1172 | goto wait; | | 1190 | goto wait; |
1173 | dma_fence_put(fence); | | 1191 | dma_fence_put(fence); |
1174 | fence = NULL; | | 1192 | fence = NULL; |
1175 | } | | 1193 | } |
1176 | | | 1194 | |
1177 | out: /* Success! Return the number of ticks left. */ | | 1195 | out: /* Success! Return the number of ticks left. */ |
1178 | rcu_read_unlock(); | | 1196 | rcu_read_unlock(); |
1179 | KASSERT(fence == NULL); | | 1197 | KASSERT(fence == NULL); |
1180 | return timeout; | | 1198 | return timeout; |
1181 | | | 1199 | |
1182 | restart: | | 1200 | restart: |
1183 | KASSERT(fence == NULL); | | 1201 | KASSERT(fence == NULL); |
1184 | rcu_read_unlock(); | | 1202 | rcu_read_unlock(); |
1185 | goto top; | | 1203 | goto top; |
1186 | | | 1204 | |
1187 | wait: | | 1205 | wait: |
1188 | /* | | 1206 | /* |
1189 | * Exit the RCU read section, wait for it, and release the | | 1207 | * Exit the RCU read section, wait for it, and release the |
1190 | * fence when we're done. If we time out or fail, bail. | | 1208 | * fence when we're done. If we time out or fail, bail. |
1191 | * Otherwise, go back to the top. | | 1209 | * Otherwise, go back to the top. |
1192 | */ | | 1210 | */ |
1193 | KASSERT(fence != NULL); | | 1211 | KASSERT(fence != NULL); |
1194 | rcu_read_unlock(); | | 1212 | rcu_read_unlock(); |
1195 | ret = dma_fence_wait_timeout(fence, intr, timeout); | | 1213 | ret = dma_fence_wait_timeout(fence, intr, timeout); |
1196 | dma_fence_put(fence); | | 1214 | dma_fence_put(fence); |
1197 | fence = NULL; | | 1215 | fence = NULL; |
1198 | if (ret <= 0) | | 1216 | if (ret <= 0) |
1199 | return ret; | | 1217 | return ret; |
1200 | KASSERT(ret <= timeout); | | 1218 | KASSERT(ret <= timeout); |
1201 | timeout = ret; | | 1219 | timeout = ret; |
1202 | goto top; | | 1220 | goto top; |
1203 | } | | 1221 | } |
1204 | | | 1222 | |
1205 | /* | | 1223 | /* |
1206 | * dma_resv_poll_init(rpoll, lock) | | 1224 | * dma_resv_poll_init(rpoll, lock) |
1207 | * | | 1225 | * |
1208 | * Initialize reservation poll state. | | 1226 | * Initialize reservation poll state. |
1209 | */ | | 1227 | */ |
1210 | void | | 1228 | void |
1211 | dma_resv_poll_init(struct dma_resv_poll *rpoll) | | 1229 | dma_resv_poll_init(struct dma_resv_poll *rpoll) |
1212 | { | | 1230 | { |
1213 | | | 1231 | |
1214 | mutex_init(&rpoll->rp_lock, MUTEX_DEFAULT, IPL_VM); | | 1232 | mutex_init(&rpoll->rp_lock, MUTEX_DEFAULT, IPL_VM); |
1215 | selinit(&rpoll->rp_selq); | | 1233 | selinit(&rpoll->rp_selq); |
1216 | rpoll->rp_claimed = 0; | | 1234 | rpoll->rp_claimed = 0; |
1217 | } | | 1235 | } |
1218 | | | 1236 | |
1219 | /* | | 1237 | /* |
1220 | * dma_resv_poll_fini(rpoll) | | 1238 | * dma_resv_poll_fini(rpoll) |
1221 | * | | 1239 | * |
1222 | * Release any resource associated with reservation poll state. | | 1240 | * Release any resource associated with reservation poll state. |
1223 | */ | | 1241 | */ |
1224 | void | | 1242 | void |
1225 | dma_resv_poll_fini(struct dma_resv_poll *rpoll) | | 1243 | dma_resv_poll_fini(struct dma_resv_poll *rpoll) |
1226 | { | | 1244 | { |
1227 | | | 1245 | |
1228 | KASSERT(rpoll->rp_claimed == 0); | | 1246 | KASSERT(rpoll->rp_claimed == 0); |
1229 | seldestroy(&rpoll->rp_selq); | | 1247 | seldestroy(&rpoll->rp_selq); |
1230 | mutex_destroy(&rpoll->rp_lock); | | 1248 | mutex_destroy(&rpoll->rp_lock); |
1231 | } | | 1249 | } |
1232 | | | 1250 | |
1233 | /* | | 1251 | /* |
1234 | * dma_resv_poll_cb(fence, fcb) | | 1252 | * dma_resv_poll_cb(fence, fcb) |
1235 | * | | 1253 | * |
1236 | * Callback to notify a reservation poll that a fence has | | 1254 | * Callback to notify a reservation poll that a fence has |
1237 | * completed. Notify any waiters and allow the next poller to | | 1255 | * completed. Notify any waiters and allow the next poller to |
1238 | * claim the callback. | | 1256 | * claim the callback. |
1239 | * | | 1257 | * |
1240 | * If one thread is waiting for the exclusive fence only, and we | | 1258 | * If one thread is waiting for the exclusive fence only, and we |
1241 | * spuriously notify them about a shared fence, tough. | | 1259 | * spuriously notify them about a shared fence, tough. |
1242 | */ | | 1260 | */ |
1243 | static void | | 1261 | static void |
1244 | dma_resv_poll_cb(struct dma_fence *fence, struct dma_fence_cb *fcb) | | 1262 | dma_resv_poll_cb(struct dma_fence *fence, struct dma_fence_cb *fcb) |
1245 | { | | 1263 | { |
1246 | struct dma_resv_poll *rpoll = container_of(fcb, | | 1264 | struct dma_resv_poll *rpoll = container_of(fcb, |
1247 | struct dma_resv_poll, rp_fcb); | | 1265 | struct dma_resv_poll, rp_fcb); |
1248 | | | 1266 | |
1249 | mutex_enter(&rpoll->rp_lock); | | 1267 | mutex_enter(&rpoll->rp_lock); |
1250 | selnotify(&rpoll->rp_selq, 0, NOTE_SUBMIT); | | 1268 | selnotify(&rpoll->rp_selq, 0, NOTE_SUBMIT); |
1251 | rpoll->rp_claimed = 0; | | 1269 | rpoll->rp_claimed = 0; |
1252 | mutex_exit(&rpoll->rp_lock); | | 1270 | mutex_exit(&rpoll->rp_lock); |
1253 | } | | 1271 | } |
1254 | | | 1272 | |
1255 | /* | | 1273 | /* |
1256 | * dma_resv_do_poll(robj, events, rpoll) | | 1274 | * dma_resv_do_poll(robj, events, rpoll) |
1257 | * | | 1275 | * |
1258 | * Poll for reservation object events using the reservation poll | | 1276 | * Poll for reservation object events using the reservation poll |
1259 | * state in rpoll: | | 1277 | * state in rpoll: |
1260 | * | | 1278 | * |
1261 | * - POLLOUT wait for all fences shared and exclusive | | 1279 | * - POLLOUT wait for all fences shared and exclusive |
1262 | * - POLLIN wait for the exclusive fence | | 1280 | * - POLLIN wait for the exclusive fence |
1263 | * | | 1281 | * |
1264 | * Return the subset of events in events that are ready. If any | | 1282 | * Return the subset of events in events that are ready. If any |
1265 | * are requested but not ready, arrange to be notified with | | 1283 | * are requested but not ready, arrange to be notified with |
1266 | * selnotify when they are. | | 1284 | * selnotify when they are. |
1267 | */ | | 1285 | */ |
1268 | int | | 1286 | int |
1269 | dma_resv_do_poll(const struct dma_resv *robj, int events, | | 1287 | dma_resv_do_poll(const struct dma_resv *robj, int events, |
1270 | struct dma_resv_poll *rpoll) | | 1288 | struct dma_resv_poll *rpoll) |
1271 | { | | 1289 | { |
1272 | struct dma_resv_read_ticket ticket; | | 1290 | struct dma_resv_read_ticket ticket; |
1273 | const struct dma_resv_list *list; | | 1291 | const struct dma_resv_list *list; |
1274 | struct dma_fence *fence = NULL; | | 1292 | struct dma_fence *fence = NULL; |
1275 | uint32_t i, shared_count; | | 1293 | uint32_t i, shared_count; |
1276 | int revents; | | 1294 | int revents; |
1277 | bool recorded = false; /* curlwp is on the selq */ | | 1295 | bool recorded = false; /* curlwp is on the selq */ |
1278 | bool claimed = false; /* we claimed the callback */ | | 1296 | bool claimed = false; /* we claimed the callback */ |
1279 | bool callback = false; /* we requested a callback */ | | 1297 | bool callback = false; /* we requested a callback */ |
1280 | | | 1298 | |
1281 | /* | | 1299 | /* |
1282 | * Start with the maximal set of events that could be ready. | | 1300 | * Start with the maximal set of events that could be ready. |
1283 | * We will eliminate the events that are definitely not ready | | 1301 | * We will eliminate the events that are definitely not ready |
1284 | * as we go at the same time as we add callbacks to notify us | | 1302 | * as we go at the same time as we add callbacks to notify us |
1285 | * that they may be ready. | | 1303 | * that they may be ready. |
1286 | */ | | 1304 | */ |
1287 | revents = events & (POLLIN|POLLOUT); | | 1305 | revents = events & (POLLIN|POLLOUT); |
1288 | if (revents == 0) | | 1306 | if (revents == 0) |
1289 | return 0; | | 1307 | return 0; |
1290 | | | 1308 | |
1291 | top: KASSERT(fence == NULL); | | 1309 | top: KASSERT(fence == NULL); |
1292 | | | 1310 | |
1293 | /* Enter an RCU read section and get a read ticket. */ | | 1311 | /* Enter an RCU read section and get a read ticket. */ |
1294 | rcu_read_lock(); | | 1312 | rcu_read_lock(); |
1295 | dma_resv_read_begin(robj, &ticket); | | 1313 | dma_resv_read_begin(robj, &ticket); |
1296 | | | 1314 | |
1297 | /* If we want to wait for all fences, get the shared list. */ | | 1315 | /* If we want to wait for all fences, get the shared list. */ |
1298 | if (events & POLLOUT) { | | 1316 | if (events & POLLOUT) { |
1299 | if (!dma_resv_get_shared_reader(robj, &list, &shared_count, | | 1317 | if (!dma_resv_get_shared_reader(robj, &list, &shared_count, |
1300 | &ticket)) | | 1318 | &ticket)) |
1301 | goto restart; | | 1319 | goto restart; |
1302 | } else { | | 1320 | } else { |
1303 | list = NULL; | | 1321 | list = NULL; |
1304 | shared_count = 0; | | 1322 | shared_count = 0; |
1305 | } | | 1323 | } |
1306 | if (list != NULL) do { | | 1324 | if (list != NULL) do { |
1307 | /* | | 1325 | /* |
1308 | * For each fence, if it is going away, restart. | | 1326 | * For each fence, if it is going away, restart. |
1309 | * Otherwise, acquire a reference to it to test whether | | 1327 | * Otherwise, acquire a reference to it to test whether |
1310 | * it is signalled. Stop and request a callback if we | | 1328 | * it is signalled. Stop and request a callback if we |
1311 | * find any that is not signalled. | | 1329 | * find any that is not signalled. |
1312 | */ | | 1330 | */ |
1313 | for (i = 0; i < shared_count; i++) { | | 1331 | for (i = 0; i < shared_count; i++) { |
1314 | KASSERT(fence == NULL); | | 1332 | KASSERT(fence == NULL); |
1315 | fence = atomic_load_relaxed(&list->shared[i]); | | 1333 | fence = atomic_load_relaxed(&list->shared[i]); |
1316 | if ((fence = dma_fence_get_rcu(fence)) == NULL) | | 1334 | if ((fence = dma_fence_get_rcu(fence)) == NULL) |
1317 | goto restart; | | 1335 | goto restart; |
1318 | if (!dma_fence_is_signaled(fence)) { | | 1336 | if (!dma_fence_is_signaled(fence)) { |
1319 | dma_fence_put(fence); | | 1337 | dma_fence_put(fence); |
1320 | fence = NULL; | | 1338 | fence = NULL; |
1321 | break; | | 1339 | break; |
1322 | } | | 1340 | } |
1323 | dma_fence_put(fence); | | 1341 | dma_fence_put(fence); |
1324 | fence = NULL; | | 1342 | fence = NULL; |
1325 | } | | 1343 | } |
1326 | | | 1344 | |
1327 | /* If all shared fences have been signalled, move on. */ | | 1345 | /* If all shared fences have been signalled, move on. */ |
1328 | if (i == shared_count) | | 1346 | if (i == shared_count) |
1329 | break; | | 1347 | break; |
1330 | | | 1348 | |
1331 | /* Put ourselves on the selq if we haven't already. */ | | 1349 | /* Put ourselves on the selq if we haven't already. */ |
1332 | if (!recorded) | | 1350 | if (!recorded) |
1333 | goto record; | | 1351 | goto record; |
1334 | | | 1352 | |
1335 | /* | | 1353 | /* |
1336 | * If someone else claimed the callback, or we already | | 1354 | * If someone else claimed the callback, or we already |
1337 | * requested it, we're guaranteed to be notified, so | | 1355 | * requested it, we're guaranteed to be notified, so |
1338 | * assume the event is not ready. | | 1356 | * assume the event is not ready. |
1339 | */ | | 1357 | */ |
1340 | if (!claimed || callback) { | | 1358 | if (!claimed || callback) { |
1341 | revents &= ~POLLOUT; | | 1359 | revents &= ~POLLOUT; |
1342 | break; | | 1360 | break; |
1343 | } | | 1361 | } |
1344 | | | 1362 | |
1345 | /* | | 1363 | /* |
1346 | * Otherwise, find the first fence that is not | | 1364 | * Otherwise, find the first fence that is not |
1347 | * signalled, request the callback, and clear POLLOUT | | 1365 | * signalled, request the callback, and clear POLLOUT |
1348 | * from the possible ready events. If they are all | | 1366 | * from the possible ready events. If they are all |
1349 | * signalled, leave POLLOUT set; we will simulate the | | 1367 | * signalled, leave POLLOUT set; we will simulate the |
1350 | * callback later. | | 1368 | * callback later. |
1351 | */ | | 1369 | */ |
1352 | for (i = 0; i < shared_count; i++) { | | 1370 | for (i = 0; i < shared_count; i++) { |
1353 | KASSERT(fence == NULL); | | 1371 | KASSERT(fence == NULL); |
1354 | fence = atomic_load_relaxed(&list->shared[i]); | | 1372 | fence = atomic_load_relaxed(&list->shared[i]); |
1355 | if ((fence = dma_fence_get_rcu(fence)) == NULL) | | 1373 | if ((fence = dma_fence_get_rcu(fence)) == NULL) |
1356 | goto restart; | | 1374 | goto restart; |
1357 | if (!dma_fence_add_callback(fence, &rpoll->rp_fcb, | | 1375 | if (!dma_fence_add_callback(fence, &rpoll->rp_fcb, |
1358 | dma_resv_poll_cb)) { | | 1376 | dma_resv_poll_cb)) { |
1359 | dma_fence_put(fence); | | 1377 | dma_fence_put(fence); |
1360 | fence = NULL; | | 1378 | fence = NULL; |
1361 | revents &= ~POLLOUT; | | 1379 | revents &= ~POLLOUT; |
1362 | callback = true; | | 1380 | callback = true; |
1363 | break; | | 1381 | break; |
1364 | } | | 1382 | } |
1365 | dma_fence_put(fence); | | 1383 | dma_fence_put(fence); |
1366 | fence = NULL; | | 1384 | fence = NULL; |
1367 | } | | 1385 | } |
1368 | } while (0); | | 1386 | } while (0); |
1369 | | | 1387 | |
1370 | /* We always wait for at least the exclusive fence, so get it. */ | | 1388 | /* We always wait for at least the exclusive fence, so get it. */ |
1371 | KASSERT(fence == NULL); | | 1389 | KASSERT(fence == NULL); |
1372 | if (!dma_resv_get_excl_reader(robj, &fence, &ticket)) | | 1390 | if (!dma_resv_get_excl_reader(robj, &fence, &ticket)) |
1373 | goto restart; | | 1391 | goto restart; |
1374 | if (fence != NULL) do { | | 1392 | if (fence != NULL) do { |
1375 | /* | | 1393 | /* |
1376 | * Test whether it is signalled. If not, stop and | | 1394 | * Test whether it is signalled. If not, stop and |
1377 | * request a callback. | | 1395 | * request a callback. |
1378 | */ | | 1396 | */ |
1379 | if (dma_fence_is_signaled(fence)) | | 1397 | if (dma_fence_is_signaled(fence)) |
1380 | break; | | 1398 | break; |
1381 | | | 1399 | |
1382 | /* Put ourselves on the selq if we haven't already. */ | | 1400 | /* Put ourselves on the selq if we haven't already. */ |
1383 | if (!recorded) { | | 1401 | if (!recorded) { |
1384 | dma_fence_put(fence); | | 1402 | dma_fence_put(fence); |
1385 | fence = NULL; | | 1403 | fence = NULL; |
1386 | goto record; | | 1404 | goto record; |
1387 | } | | 1405 | } |
1388 | | | 1406 | |
1389 | /* | | 1407 | /* |
1390 | * If someone else claimed the callback, or we already | | 1408 | * If someone else claimed the callback, or we already |
1391 | * requested it, we're guaranteed to be notified, so | | 1409 | * requested it, we're guaranteed to be notified, so |
1392 | * assume the event is not ready. | | 1410 | * assume the event is not ready. |
1393 | */ | | 1411 | */ |
1394 | if (!claimed || callback) { | | 1412 | if (!claimed || callback) { |
1395 | revents = 0; | | 1413 | revents = 0; |
1396 | break; | | 1414 | break; |
1397 | } | | 1415 | } |
1398 | | | 1416 | |
1399 | /* | | 1417 | /* |
1400 | * Otherwise, try to request the callback, and clear | | 1418 | * Otherwise, try to request the callback, and clear |
1401 | * all possible ready events. If the fence has been | | 1419 | * all possible ready events. If the fence has been |
1402 | * signalled in the interim, leave the events set; we | | 1420 | * signalled in the interim, leave the events set; we |
1403 | * will simulate the callback later. | | 1421 | * will simulate the callback later. |
1404 | */ | | 1422 | */ |
1405 | if (!dma_fence_add_callback(fence, &rpoll->rp_fcb, | | 1423 | if (!dma_fence_add_callback(fence, &rpoll->rp_fcb, |
1406 | dma_resv_poll_cb)) { | | 1424 | dma_resv_poll_cb)) { |
1407 | revents = 0; | | 1425 | revents = 0; |
1408 | callback = true; | | 1426 | callback = true; |
1409 | break; | | 1427 | break; |
1410 | } | | 1428 | } |
1411 | } while (0); | | 1429 | } while (0); |
1412 | if (fence != NULL) { | | 1430 | if (fence != NULL) { |
1413 | dma_fence_put(fence); | | 1431 | dma_fence_put(fence); |
1414 | fence = NULL; | | 1432 | fence = NULL; |
1415 | } | | 1433 | } |
1416 | | | 1434 | |
1417 | /* All done reading the fences. */ | | 1435 | /* All done reading the fences. */ |
1418 | rcu_read_unlock(); | | 1436 | rcu_read_unlock(); |
1419 | | | 1437 | |
1420 | if (claimed && !callback) { | | 1438 | if (claimed && !callback) { |
1421 | /* | | 1439 | /* |
1422 | * We claimed the callback but we didn't actually | | 1440 | * We claimed the callback but we didn't actually |
1423 | * request it because a fence was signalled while we | | 1441 | * request it because a fence was signalled while we |
1424 | * were claiming it. Call it ourselves now. The | | 1442 | * were claiming it. Call it ourselves now. The |
1425 | * callback doesn't use the fence nor rely on holding | | 1443 | * callback doesn't use the fence nor rely on holding |
1426 | * any of the fence locks, so this is safe. | | 1444 | * any of the fence locks, so this is safe. |
1427 | */ | | 1445 | */ |
1428 | dma_resv_poll_cb(NULL, &rpoll->rp_fcb); | | 1446 | dma_resv_poll_cb(NULL, &rpoll->rp_fcb); |
1429 | } | | 1447 | } |
1430 | return revents; | | 1448 | return revents; |
1431 | | | 1449 | |
1432 | restart: | | 1450 | restart: |
1433 | KASSERT(fence == NULL); | | 1451 | KASSERT(fence == NULL); |
1434 | rcu_read_unlock(); | | 1452 | rcu_read_unlock(); |
1435 | goto top; | | 1453 | goto top; |
1436 | | | 1454 | |
1437 | record: | | 1455 | record: |
1438 | KASSERT(fence == NULL); | | 1456 | KASSERT(fence == NULL); |
1439 | rcu_read_unlock(); | | 1457 | rcu_read_unlock(); |
1440 | mutex_enter(&rpoll->rp_lock); | | 1458 | mutex_enter(&rpoll->rp_lock); |
1441 | selrecord(curlwp, &rpoll->rp_selq); | | 1459 | selrecord(curlwp, &rpoll->rp_selq); |
1442 | if (!rpoll->rp_claimed) | | 1460 | if (!rpoll->rp_claimed) |
1443 | claimed = rpoll->rp_claimed = true; | | 1461 | claimed = rpoll->rp_claimed = true; |
1444 | mutex_exit(&rpoll->rp_lock); | | 1462 | mutex_exit(&rpoll->rp_lock); |
1445 | recorded = true; | | 1463 | recorded = true; |
1446 | goto top; | | 1464 | goto top; |
1447 | } | | 1465 | } |
1448 | | | 1466 | |
1449 | /* | | 1467 | /* |
1450 | * dma_resv_kqfilter(robj, kn, rpoll) | | 1468 | * dma_resv_kqfilter(robj, kn, rpoll) |
1451 | * | | 1469 | * |
1452 | * Kqueue filter for reservation objects. Currently not | | 1470 | * Kqueue filter for reservation objects. Currently not |
1453 | * implemented because the logic to implement it is nontrivial, | | 1471 | * implemented because the logic to implement it is nontrivial, |
1454 | * and userland will presumably never use it, so it would be | | 1472 | * and userland will presumably never use it, so it would be |
1455 | * dangerous to add never-tested complex code paths to the kernel. | | 1473 | * dangerous to add never-tested complex code paths to the kernel. |
1456 | */ | | 1474 | */ |
1457 | int | | 1475 | int |
1458 | dma_resv_kqfilter(const struct dma_resv *robj, | | 1476 | dma_resv_kqfilter(const struct dma_resv *robj, |
1459 | struct knote *kn, struct dma_resv_poll *rpoll) | | 1477 | struct knote *kn, struct dma_resv_poll *rpoll) |
1460 | { | | 1478 | { |
1461 | | | 1479 | |
1462 | return EINVAL; | | 1480 | return EINVAL; |
1463 | } | | 1481 | } |