| @@ -1,1069 +1,1070 @@ | | | @@ -1,1069 +1,1070 @@ |
1 | /* $NetBSD: linux_reservation.c,v 1.11 2018/09/03 18:02:11 riastradh Exp $ */ | | 1 | /* $NetBSD: linux_reservation.c,v 1.11.8.1 2021/07/08 11:23:28 martin Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 2018 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 2018 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Taylor R. Campbell. | | 8 | * by Taylor R. Campbell. |
9 | * | | 9 | * |
10 | * Redistribution and use in source and binary forms, with or without | | 10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions | | 11 | * modification, are permitted provided that the following conditions |
12 | * are met: | | 12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright | | 13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. | | 14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright | | 15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the | | 16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. | | 17 | * documentation and/or other materials provided with the distribution. |
18 | * | | 18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. | | 29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ | | 30 | */ |
31 | | | 31 | |
32 | #include <sys/cdefs.h> | | 32 | #include <sys/cdefs.h> |
33 | __KERNEL_RCSID(0, "$NetBSD: linux_reservation.c,v 1.11 2018/09/03 18:02:11 riastradh Exp $"); | | 33 | __KERNEL_RCSID(0, "$NetBSD: linux_reservation.c,v 1.11.8.1 2021/07/08 11:23:28 martin Exp $"); |
34 | | | 34 | |
35 | #include <sys/param.h> | | 35 | #include <sys/param.h> |
36 | #include <sys/poll.h> | | 36 | #include <sys/poll.h> |
37 | #include <sys/select.h> | | 37 | #include <sys/select.h> |
38 | | | 38 | |
39 | #include <linux/fence.h> | | 39 | #include <linux/fence.h> |
40 | #include <linux/reservation.h> | | 40 | #include <linux/reservation.h> |
41 | #include <linux/ww_mutex.h> | | 41 | #include <linux/ww_mutex.h> |
42 | | | 42 | |
43 | DEFINE_WW_CLASS(reservation_ww_class __cacheline_aligned); | | 43 | DEFINE_WW_CLASS(reservation_ww_class __cacheline_aligned); |
44 | | | 44 | |
45 | static struct reservation_object_list * | | 45 | static struct reservation_object_list * |
46 | objlist_tryalloc(uint32_t n) | | 46 | objlist_tryalloc(uint32_t n) |
47 | { | | 47 | { |
48 | struct reservation_object_list *list; | | 48 | struct reservation_object_list *list; |
49 | | | 49 | |
50 | list = kmem_alloc(offsetof(typeof(*list), shared[n]), KM_NOSLEEP); | | 50 | list = kmem_alloc(offsetof(typeof(*list), shared[n]), KM_NOSLEEP); |
51 | if (list == NULL) | | 51 | if (list == NULL) |
52 | return NULL; | | 52 | return NULL; |
53 | list->shared_max = n; | | 53 | list->shared_max = n; |
54 | | | 54 | |
55 | return list; | | 55 | return list; |
56 | } | | 56 | } |
57 | | | 57 | |
58 | static void | | 58 | static void |
59 | objlist_free(struct reservation_object_list *list) | | 59 | objlist_free(struct reservation_object_list *list) |
60 | { | | 60 | { |
61 | uint32_t n = list->shared_max; | | 61 | uint32_t n = list->shared_max; |
62 | | | 62 | |
63 | kmem_free(list, offsetof(typeof(*list), shared[n])); | | 63 | kmem_free(list, offsetof(typeof(*list), shared[n])); |
64 | } | | 64 | } |
65 | | | 65 | |
66 | static void | | 66 | static void |
67 | objlist_free_cb(struct rcu_head *rcu) | | 67 | objlist_free_cb(struct rcu_head *rcu) |
68 | { | | 68 | { |
69 | struct reservation_object_list *list = container_of(rcu, | | 69 | struct reservation_object_list *list = container_of(rcu, |
70 | struct reservation_object_list, rol_rcu); | | 70 | struct reservation_object_list, rol_rcu); |
71 | | | 71 | |
72 | objlist_free(list); | | 72 | objlist_free(list); |
73 | } | | 73 | } |
74 | | | 74 | |
75 | static void | | 75 | static void |
76 | objlist_defer_free(struct reservation_object_list *list) | | 76 | objlist_defer_free(struct reservation_object_list *list) |
77 | { | | 77 | { |
78 | | | 78 | |
79 | call_rcu(&list->rol_rcu, objlist_free_cb); | | 79 | call_rcu(&list->rol_rcu, objlist_free_cb); |
80 | } | | 80 | } |
81 | | | 81 | |
82 | /* | | 82 | /* |
83 | * reservation_object_init(robj) | | 83 | * reservation_object_init(robj) |
84 | * | | 84 | * |
85 | * Initialize a reservation object. Caller must later destroy it | | 85 | * Initialize a reservation object. Caller must later destroy it |
86 | * with reservation_object_fini. | | 86 | * with reservation_object_fini. |
87 | */ | | 87 | */ |
88 | void | | 88 | void |
89 | reservation_object_init(struct reservation_object *robj) | | 89 | reservation_object_init(struct reservation_object *robj) |
90 | { | | 90 | { |
91 | | | 91 | |
92 | ww_mutex_init(&robj->lock, &reservation_ww_class); | | 92 | ww_mutex_init(&robj->lock, &reservation_ww_class); |
93 | robj->robj_version = 0; | | 93 | robj->robj_version = 0; |
94 | robj->robj_fence = NULL; | | 94 | robj->robj_fence = NULL; |
95 | robj->robj_list = NULL; | | 95 | robj->robj_list = NULL; |
96 | robj->robj_prealloc = NULL; | | 96 | robj->robj_prealloc = NULL; |
97 | } | | 97 | } |
98 | | | 98 | |
99 | /* | | 99 | /* |
100 | * reservation_object_fini(robj) | | 100 | * reservation_object_fini(robj) |
101 | * | | 101 | * |
102 | * Destroy a reservation object, freeing any memory that had been | | 102 | * Destroy a reservation object, freeing any memory that had been |
103 | * allocated for it. Caller must have exclusive access to it. | | 103 | * allocated for it. Caller must have exclusive access to it. |
104 | */ | | 104 | */ |
105 | void | | 105 | void |
106 | reservation_object_fini(struct reservation_object *robj) | | 106 | reservation_object_fini(struct reservation_object *robj) |
107 | { | | 107 | { |
108 | unsigned i; | | 108 | unsigned i; |
109 | | | 109 | |
110 | if (robj->robj_prealloc) | | 110 | if (robj->robj_prealloc) |
111 | objlist_free(robj->robj_prealloc); | | 111 | objlist_free(robj->robj_prealloc); |
112 | if (robj->robj_list) { | | 112 | if (robj->robj_list) { |
113 | for (i = 0; i < robj->robj_list->shared_count; i++) | | 113 | for (i = 0; i < robj->robj_list->shared_count; i++) |
114 | fence_put(robj->robj_list->shared[i]); | | 114 | fence_put(robj->robj_list->shared[i]); |
115 | objlist_free(robj->robj_list); | | 115 | objlist_free(robj->robj_list); |
116 | } | | 116 | } |
117 | if (robj->robj_fence) | | 117 | if (robj->robj_fence) |
118 | fence_put(robj->robj_fence); | | 118 | fence_put(robj->robj_fence); |
119 | ww_mutex_destroy(&robj->lock); | | 119 | ww_mutex_destroy(&robj->lock); |
120 | } | | 120 | } |
121 | | | 121 | |
122 | /* | | 122 | /* |
123 | * reservation_object_held(roj) | | 123 | * reservation_object_held(roj) |
124 | * | | 124 | * |
125 | * True if robj is locked. | | 125 | * True if robj is locked. |
126 | */ | | 126 | */ |
127 | bool | | 127 | bool |
128 | reservation_object_held(struct reservation_object *robj) | | 128 | reservation_object_held(struct reservation_object *robj) |
129 | { | | 129 | { |
130 | | | 130 | |
131 | return ww_mutex_is_locked(&robj->lock); | | 131 | return ww_mutex_is_locked(&robj->lock); |
132 | } | | 132 | } |
133 | | | 133 | |
134 | /* | | 134 | /* |
135 | * reservation_object_get_excl(robj) | | 135 | * reservation_object_get_excl(robj) |
136 | * | | 136 | * |
137 | * Return a pointer to the exclusive fence of the reservation | | 137 | * Return a pointer to the exclusive fence of the reservation |
138 | * object robj. | | 138 | * object robj. |
139 | * | | 139 | * |
140 | * Caller must have robj locked. | | 140 | * Caller must have robj locked. |
141 | */ | | 141 | */ |
142 | struct fence * | | 142 | struct fence * |
143 | reservation_object_get_excl(struct reservation_object *robj) | | 143 | reservation_object_get_excl(struct reservation_object *robj) |
144 | { | | 144 | { |
145 | | | 145 | |
146 | KASSERT(reservation_object_held(robj)); | | 146 | KASSERT(reservation_object_held(robj)); |
147 | return robj->robj_fence; | | 147 | return robj->robj_fence; |
148 | } | | 148 | } |
149 | | | 149 | |
150 | /* | | 150 | /* |
151 | * reservation_object_get_list(robj) | | 151 | * reservation_object_get_list(robj) |
152 | * | | 152 | * |
153 | * Return a pointer to the shared fence list of the reservation | | 153 | * Return a pointer to the shared fence list of the reservation |
154 | * object robj. | | 154 | * object robj. |
155 | * | | 155 | * |
156 | * Caller must have robj locked. | | 156 | * Caller must have robj locked. |
157 | */ | | 157 | */ |
158 | struct reservation_object_list * | | 158 | struct reservation_object_list * |
159 | reservation_object_get_list(struct reservation_object *robj) | | 159 | reservation_object_get_list(struct reservation_object *robj) |
160 | { | | 160 | { |
161 | | | 161 | |
162 | KASSERT(reservation_object_held(robj)); | | 162 | KASSERT(reservation_object_held(robj)); |
163 | return robj->robj_list; | | 163 | return robj->robj_list; |
164 | } | | 164 | } |
165 | | | 165 | |
166 | /* | | 166 | /* |
167 | * reservation_object_reserve_shared(robj) | | 167 | * reservation_object_reserve_shared(robj) |
168 | * | | 168 | * |
169 | * Reserve space in robj to add a shared fence. To be used only | | 169 | * Reserve space in robj to add a shared fence. To be used only |
170 | * once before calling reservation_object_add_shared_fence. | | 170 | * once before calling reservation_object_add_shared_fence. |
171 | * | | 171 | * |
172 | * Caller must have robj locked. | | 172 | * Caller must have robj locked. |
173 | * | | 173 | * |
174 | * Internally, we start with room for four entries and double if | | 174 | * Internally, we start with room for four entries and double if |
175 | * we don't have enough. This is not guaranteed. | | 175 | * we don't have enough. This is not guaranteed. |
176 | */ | | 176 | */ |
177 | int | | 177 | int |
178 | reservation_object_reserve_shared(struct reservation_object *robj) | | 178 | reservation_object_reserve_shared(struct reservation_object *robj) |
179 | { | | 179 | { |
180 | struct reservation_object_list *list, *prealloc; | | 180 | struct reservation_object_list *list, *prealloc; |
181 | uint32_t n, nalloc; | | 181 | uint32_t n, nalloc; |
182 | | | 182 | |
183 | KASSERT(reservation_object_held(robj)); | | 183 | KASSERT(reservation_object_held(robj)); |
184 | | | 184 | |
185 | list = robj->robj_list; | | 185 | list = robj->robj_list; |
186 | prealloc = robj->robj_prealloc; | | 186 | prealloc = robj->robj_prealloc; |
187 | | | 187 | |
188 | /* If there's an existing list, check it for space. */ | | 188 | /* If there's an existing list, check it for space. */ |
189 | if (list != NULL) { | | 189 | if (list != NULL) { |
190 | /* If there's too many already, give up. */ | | 190 | /* If there's too many already, give up. */ |
191 | if (list->shared_count == UINT32_MAX) | | 191 | if (list->shared_count == UINT32_MAX) |
192 | return -ENOMEM; | | 192 | return -ENOMEM; |
193 | | | 193 | |
194 | /* Add one more. */ | | 194 | /* Add one more. */ |
195 | n = list->shared_count + 1; | | 195 | n = list->shared_count + 1; |
196 | | | 196 | |
197 | /* If there's enough for one more, we're done. */ | | 197 | /* If there's enough for one more, we're done. */ |
198 | if (n <= list->shared_max) | | 198 | if (n <= list->shared_max) |
199 | return 0; | | 199 | return 0; |
200 | } else { | | 200 | } else { |
201 | /* No list already. We need space for 1. */ | | 201 | /* No list already. We need space for 1. */ |
202 | n = 1; | | 202 | n = 1; |
203 | } | | 203 | } |
204 | | | 204 | |
205 | /* If not, maybe there's a preallocated list ready. */ | | 205 | /* If not, maybe there's a preallocated list ready. */ |
206 | if (prealloc != NULL) { | | 206 | if (prealloc != NULL) { |
207 | /* If there's enough room in it, stop here. */ | | 207 | /* If there's enough room in it, stop here. */ |
208 | if (n <= prealloc->shared_max) | | 208 | if (n <= prealloc->shared_max) |
209 | return 0; | | 209 | return 0; |
210 | | | 210 | |
211 | /* Try to double its capacity. */ | | 211 | /* Try to double its capacity. */ |
212 | nalloc = n > UINT32_MAX/2 ? UINT32_MAX : 2*n; | | 212 | nalloc = n > UINT32_MAX/2 ? UINT32_MAX : 2*n; |
213 | prealloc = objlist_tryalloc(nalloc); | | 213 | prealloc = objlist_tryalloc(nalloc); |
214 | if (prealloc == NULL) | | 214 | if (prealloc == NULL) |
215 | return -ENOMEM; | | 215 | return -ENOMEM; |
216 | | | 216 | |
217 | /* Swap the new preallocated list and free the old one. */ | | 217 | /* Swap the new preallocated list and free the old one. */ |
218 | objlist_free(robj->robj_prealloc); | | 218 | objlist_free(robj->robj_prealloc); |
219 | robj->robj_prealloc = prealloc; | | 219 | robj->robj_prealloc = prealloc; |
220 | } else { | | 220 | } else { |
221 | /* Start with some spare. */ | | 221 | /* Start with some spare. */ |
222 | nalloc = n > UINT32_MAX/2 ? UINT32_MAX : MAX(2*n, 4); | | 222 | nalloc = n > UINT32_MAX/2 ? UINT32_MAX : MAX(2*n, 4); |
223 | prealloc = objlist_tryalloc(nalloc); | | 223 | prealloc = objlist_tryalloc(nalloc); |
224 | if (prealloc == NULL) | | 224 | if (prealloc == NULL) |
225 | return -ENOMEM; | | 225 | return -ENOMEM; |
226 | /* Save the new preallocated list. */ | | 226 | /* Save the new preallocated list. */ |
227 | robj->robj_prealloc = prealloc; | | 227 | robj->robj_prealloc = prealloc; |
228 | } | | 228 | } |
229 | | | 229 | |
230 | /* Success! */ | | 230 | /* Success! */ |
231 | return 0; | | 231 | return 0; |
232 | } | | 232 | } |
233 | | | 233 | |
234 | struct reservation_object_write_ticket { | | 234 | struct reservation_object_write_ticket { |
235 | unsigned version; | | 235 | unsigned version; |
236 | }; | | 236 | }; |
237 | | | 237 | |
238 | /* | | 238 | /* |
239 | * reservation_object_write_begin(robj, ticket) | | 239 | * reservation_object_write_begin(robj, ticket) |
240 | * | | 240 | * |
241 | * Begin an atomic batch of writes to robj, and initialize opaque | | 241 | * Begin an atomic batch of writes to robj, and initialize opaque |
242 | * ticket for it. The ticket must be passed to | | 242 | * ticket for it. The ticket must be passed to |
243 | * reservation_object_write_commit to commit the writes. | | 243 | * reservation_object_write_commit to commit the writes. |
244 | * | | 244 | * |
245 | * Caller must have robj locked. | | 245 | * Caller must have robj locked. |
246 | */ | | 246 | */ |
247 | static void | | 247 | static void |
248 | reservation_object_write_begin(struct reservation_object *robj, | | 248 | reservation_object_write_begin(struct reservation_object *robj, |
249 | struct reservation_object_write_ticket *ticket) | | 249 | struct reservation_object_write_ticket *ticket) |
250 | { | | 250 | { |
251 | | | 251 | |
252 | KASSERT(reservation_object_held(robj)); | | 252 | KASSERT(reservation_object_held(robj)); |
253 | | | 253 | |
254 | ticket->version = robj->robj_version |= 1; | | 254 | ticket->version = robj->robj_version |= 1; |
255 | membar_producer(); | | 255 | membar_producer(); |
256 | } | | 256 | } |
257 | | | 257 | |
258 | /* | | 258 | /* |
259 | * reservation_object_write_commit(robj, ticket) | | 259 | * reservation_object_write_commit(robj, ticket) |
260 | * | | 260 | * |
261 | * Commit an atomic batch of writes to robj begun with the call to | | 261 | * Commit an atomic batch of writes to robj begun with the call to |
262 | * reservation_object_write_begin that returned ticket. | | 262 | * reservation_object_write_begin that returned ticket. |
263 | * | | 263 | * |
264 | * Caller must have robj locked. | | 264 | * Caller must have robj locked. |
265 | */ | | 265 | */ |
266 | static void | | 266 | static void |
267 | reservation_object_write_commit(struct reservation_object *robj, | | 267 | reservation_object_write_commit(struct reservation_object *robj, |
268 | struct reservation_object_write_ticket *ticket) | | 268 | struct reservation_object_write_ticket *ticket) |
269 | { | | 269 | { |
270 | | | 270 | |
271 | KASSERT(reservation_object_held(robj)); | | 271 | KASSERT(reservation_object_held(robj)); |
272 | KASSERT(ticket->version == robj->robj_version); | | 272 | KASSERT(ticket->version == robj->robj_version); |
273 | KASSERT((ticket->version & 1) == 1); | | 273 | KASSERT((ticket->version & 1) == 1); |
274 | | | 274 | |
275 | membar_producer(); | | 275 | membar_producer(); |
276 | robj->robj_version = ticket->version + 1; | | 276 | robj->robj_version = ticket->version + 1; |
277 | } | | 277 | } |
278 | | | 278 | |
279 | struct reservation_object_read_ticket { | | 279 | struct reservation_object_read_ticket { |
280 | unsigned version; | | 280 | unsigned version; |
281 | }; | | 281 | }; |
282 | | | 282 | |
283 | /* | | 283 | /* |
284 | * reservation_object_read_begin(robj, ticket) | | 284 | * reservation_object_read_begin(robj, ticket) |
285 | * | | 285 | * |
286 | * Begin a read section, and initialize opaque ticket for it. The | | 286 | * Begin a read section, and initialize opaque ticket for it. The |
287 | * ticket must be passed to reservation_object_read_exit, and the | | 287 | * ticket must be passed to reservation_object_read_exit, and the |
288 | * caller must be prepared to retry reading if it fails. | | 288 | * caller must be prepared to retry reading if it fails. |
289 | */ | | 289 | */ |
290 | static void | | 290 | static void |
291 | reservation_object_read_begin(struct reservation_object *robj, | | 291 | reservation_object_read_begin(struct reservation_object *robj, |
292 | struct reservation_object_read_ticket *ticket) | | 292 | struct reservation_object_read_ticket *ticket) |
293 | { | | 293 | { |
294 | | | 294 | |
295 | while ((ticket->version = robj->robj_version) & 1) | | 295 | while ((ticket->version = robj->robj_version) & 1) |
296 | SPINLOCK_BACKOFF_HOOK; | | 296 | SPINLOCK_BACKOFF_HOOK; |
297 | membar_consumer(); | | 297 | membar_consumer(); |
298 | } | | 298 | } |
299 | | | 299 | |
300 | /* | | 300 | /* |
301 | * reservation_object_read_valid(robj, ticket) | | 301 | * reservation_object_read_valid(robj, ticket) |
302 | * | | 302 | * |
303 | * Test whether the read sections are valid. Return true on | | 303 | * Test whether the read sections are valid. Return true on |
304 | * success, or false on failure if the read ticket has been | | 304 | * success, or false on failure if the read ticket has been |
305 | * invalidated. | | 305 | * invalidated. |
306 | */ | | 306 | */ |
307 | static bool | | 307 | static bool |
308 | reservation_object_read_valid(struct reservation_object *robj, | | 308 | reservation_object_read_valid(struct reservation_object *robj, |
309 | struct reservation_object_read_ticket *ticket) | | 309 | struct reservation_object_read_ticket *ticket) |
310 | { | | 310 | { |
311 | | | 311 | |
312 | membar_consumer(); | | 312 | membar_consumer(); |
313 | return ticket->version == robj->robj_version; | | 313 | return ticket->version == robj->robj_version; |
314 | } | | 314 | } |
315 | | | 315 | |
316 | /* | | 316 | /* |
317 | * reservation_object_add_excl_fence(robj, fence) | | 317 | * reservation_object_add_excl_fence(robj, fence) |
318 | * | | 318 | * |
319 | * Empty and release all of robj's shared fences, and clear and | | 319 | * Empty and release all of robj's shared fences, and clear and |
320 | * release its exclusive fence. If fence is nonnull, acquire a | | 320 | * release its exclusive fence. If fence is nonnull, acquire a |
321 | * reference to it and save it as robj's exclusive fence. | | 321 | * reference to it and save it as robj's exclusive fence. |
322 | * | | 322 | * |
323 | * Caller must have robj locked. | | 323 | * Caller must have robj locked. |
324 | */ | | 324 | */ |
325 | void | | 325 | void |
326 | reservation_object_add_excl_fence(struct reservation_object *robj, | | 326 | reservation_object_add_excl_fence(struct reservation_object *robj, |
327 | struct fence *fence) | | 327 | struct fence *fence) |
328 | { | | 328 | { |
329 | struct fence *old_fence = robj->robj_fence; | | 329 | struct fence *old_fence = robj->robj_fence; |
330 | struct reservation_object_list *old_list = robj->robj_list; | | 330 | struct reservation_object_list *old_list = robj->robj_list; |
331 | uint32_t old_shared_count; | | 331 | uint32_t old_shared_count; |
332 | struct reservation_object_write_ticket ticket; | | 332 | struct reservation_object_write_ticket ticket; |
333 | | | 333 | |
334 | KASSERT(reservation_object_held(robj)); | | 334 | KASSERT(reservation_object_held(robj)); |
335 | | | 335 | |
336 | /* | | 336 | /* |
337 | * If we are setting rather than just removing a fence, acquire | | 337 | * If we are setting rather than just removing a fence, acquire |
338 | * a reference for ourselves. | | 338 | * a reference for ourselves. |
339 | */ | | 339 | */ |
340 | if (fence) | | 340 | if (fence) |
341 | (void)fence_get(fence); | | 341 | (void)fence_get(fence); |
342 | | | 342 | |
343 | /* If there are any shared fences, remember how many. */ | | 343 | /* If there are any shared fences, remember how many. */ |
344 | if (old_list) | | 344 | if (old_list) |
345 | old_shared_count = old_list->shared_count; | | 345 | old_shared_count = old_list->shared_count; |
346 | | | 346 | |
347 | /* Begin an update. */ | | 347 | /* Begin an update. */ |
348 | reservation_object_write_begin(robj, &ticket); | | 348 | reservation_object_write_begin(robj, &ticket); |
349 | | | 349 | |
350 | /* Replace the fence and zero the shared count. */ | | 350 | /* Replace the fence and zero the shared count. */ |
351 | robj->robj_fence = fence; | | 351 | robj->robj_fence = fence; |
352 | if (old_list) | | 352 | if (old_list) |
353 | old_list->shared_count = 0; | | 353 | old_list->shared_count = 0; |
354 | | | 354 | |
355 | /* Commit the update. */ | | 355 | /* Commit the update. */ |
356 | reservation_object_write_commit(robj, &ticket); | | 356 | reservation_object_write_commit(robj, &ticket); |
357 | | | 357 | |
358 | /* Release the old exclusive fence, if any. */ | | 358 | /* Release the old exclusive fence, if any. */ |
359 | if (old_fence) | | 359 | if (old_fence) |
360 | fence_put(old_fence); | | 360 | fence_put(old_fence); |
361 | | | 361 | |
362 | /* Release any old shared fences. */ | | 362 | /* Release any old shared fences. */ |
363 | if (old_list) { | | 363 | if (old_list) { |
364 | while (old_shared_count--) | | 364 | while (old_shared_count--) |
365 | fence_put(old_list->shared[old_shared_count]); | | 365 | fence_put(old_list->shared[old_shared_count]); |
366 | } | | 366 | } |
367 | } | | 367 | } |
368 | | | 368 | |
369 | /* | | 369 | /* |
370 | * reservation_object_add_shared_fence(robj, fence) | | 370 | * reservation_object_add_shared_fence(robj, fence) |
371 | * | | 371 | * |
372 | * Acquire a reference to fence and add it to robj's shared list. | | 372 | * Acquire a reference to fence and add it to robj's shared list. |
373 | * If any fence was already added with the same context number, | | 373 | * If any fence was already added with the same context number, |
374 | * release it and replace it by this one. | | 374 | * release it and replace it by this one. |
375 | * | | 375 | * |
376 | * Caller must have robj locked, and must have preceded with a | | 376 | * Caller must have robj locked, and must have preceded with a |
377 | * call to reservation_object_reserve_shared for each shared fence | | 377 | * call to reservation_object_reserve_shared for each shared fence |
378 | * added. | | 378 | * added. |
379 | */ | | 379 | */ |
380 | void | | 380 | void |
381 | reservation_object_add_shared_fence(struct reservation_object *robj, | | 381 | reservation_object_add_shared_fence(struct reservation_object *robj, |
382 | struct fence *fence) | | 382 | struct fence *fence) |
383 | { | | 383 | { |
384 | struct reservation_object_list *list = robj->robj_list; | | 384 | struct reservation_object_list *list = robj->robj_list; |
385 | struct reservation_object_list *prealloc = robj->robj_prealloc; | | 385 | struct reservation_object_list *prealloc = robj->robj_prealloc; |
386 | struct reservation_object_write_ticket ticket; | | 386 | struct reservation_object_write_ticket ticket; |
387 | struct fence *replace = NULL; | | 387 | struct fence *replace = NULL; |
388 | uint32_t i; | | 388 | uint32_t i; |
389 | | | 389 | |
390 | KASSERT(reservation_object_held(robj)); | | 390 | KASSERT(reservation_object_held(robj)); |
391 | | | 391 | |
392 | /* Acquire a reference to the fence. */ | | 392 | /* Acquire a reference to the fence. */ |
393 | KASSERT(fence != NULL); | | 393 | KASSERT(fence != NULL); |
394 | (void)fence_get(fence); | | 394 | (void)fence_get(fence); |
395 | | | 395 | |
396 | /* Check for a preallocated replacement list. */ | | 396 | /* Check for a preallocated replacement list. */ |
397 | if (prealloc == NULL) { | | 397 | if (prealloc == NULL) { |
398 | /* | | 398 | /* |
399 | * If there is no preallocated replacement list, then | | 399 | * If there is no preallocated replacement list, then |
400 | * there must be room in the current list. | | 400 | * there must be room in the current list. |
401 | */ | | 401 | */ |
402 | KASSERT(list != NULL); | | 402 | KASSERT(list != NULL); |
403 | KASSERT(list->shared_count < list->shared_max); | | 403 | KASSERT(list->shared_count < list->shared_max); |
404 | | | 404 | |
405 | /* Begin an update. */ | | 405 | /* Begin an update. */ |
406 | reservation_object_write_begin(robj, &ticket); | | 406 | reservation_object_write_begin(robj, &ticket); |
407 | | | 407 | |
408 | /* Find a fence with the same context number. */ | | 408 | /* Find a fence with the same context number. */ |
409 | for (i = 0; i < list->shared_count; i++) { | | 409 | for (i = 0; i < list->shared_count; i++) { |
410 | if (list->shared[i]->context == fence->context) { | | 410 | if (list->shared[i]->context == fence->context) { |
411 | replace = list->shared[i]; | | 411 | replace = list->shared[i]; |
412 | list->shared[i] = fence; | | 412 | list->shared[i] = fence; |
413 | break; | | 413 | break; |
414 | } | | 414 | } |
415 | } | | 415 | } |
416 | | | 416 | |
417 | /* If we didn't find one, add it at the end. */ | | 417 | /* If we didn't find one, add it at the end. */ |
418 | if (i == list->shared_count) | | 418 | if (i == list->shared_count) |
419 | list->shared[list->shared_count++] = fence; | | 419 | list->shared[list->shared_count++] = fence; |
420 | | | 420 | |
421 | /* Commit the update. */ | | 421 | /* Commit the update. */ |
422 | reservation_object_write_commit(robj, &ticket); | | 422 | reservation_object_write_commit(robj, &ticket); |
423 | } else { | | 423 | } else { |
424 | /* | | 424 | /* |
425 | * There is a preallocated replacement list. There may | | 425 | * There is a preallocated replacement list. There may |
426 | * not be a current list. If not, treat it as a zero- | | 426 | * not be a current list. If not, treat it as a zero- |
427 | * length list. | | 427 | * length list. |
428 | */ | | 428 | */ |
429 | uint32_t shared_count = (list == NULL? 0 : list->shared_count); | | 429 | uint32_t shared_count = (list == NULL? 0 : list->shared_count); |
430 | | | 430 | |
431 | /* There had better be room in the preallocated list. */ | | 431 | /* There had better be room in the preallocated list. */ |
432 | KASSERT(shared_count < prealloc->shared_max); | | 432 | KASSERT(shared_count < prealloc->shared_max); |
433 | | | 433 | |
434 | /* | | 434 | /* |
435 | * Copy the fences over, but replace if we find one | | 435 | * Copy the fences over, but replace if we find one |
436 | * with the same context number. | | 436 | * with the same context number. |
437 | */ | | 437 | */ |
438 | for (i = 0; i < shared_count; i++) { | | 438 | for (i = 0; i < shared_count; i++) { |
439 | if (replace == NULL && | | 439 | if (replace == NULL && |
440 | list->shared[i]->context == fence->context) { | | 440 | list->shared[i]->context == fence->context) { |
441 | replace = list->shared[i]; | | 441 | replace = list->shared[i]; |
442 | prealloc->shared[i] = fence; | | 442 | prealloc->shared[i] = fence; |
443 | } else { | | 443 | } else { |
444 | prealloc->shared[i] = list->shared[i]; | | 444 | prealloc->shared[i] = list->shared[i]; |
445 | } | | 445 | } |
446 | } | | 446 | } |
447 | prealloc->shared_count = shared_count; | | 447 | prealloc->shared_count = shared_count; |
448 | | | 448 | |
449 | /* If we didn't find one, add it at the end. */ | | 449 | /* If we didn't find one, add it at the end. */ |
450 | if (replace == NULL) | | 450 | if (replace == NULL) |
451 | prealloc->shared[prealloc->shared_count++] = fence; | | 451 | prealloc->shared[prealloc->shared_count++] = fence; |
452 | | | 452 | |
453 | /* Now ready to replace the list. Begin an update. */ | | 453 | /* Now ready to replace the list. Begin an update. */ |
454 | reservation_object_write_begin(robj, &ticket); | | 454 | reservation_object_write_begin(robj, &ticket); |
455 | | | 455 | |
456 | /* Replace the list. */ | | 456 | /* Replace the list. */ |
457 | robj->robj_list = prealloc; | | 457 | robj->robj_list = prealloc; |
458 | robj->robj_prealloc = NULL; | | 458 | robj->robj_prealloc = NULL; |
459 | | | 459 | |
460 | /* Commit the update. */ | | 460 | /* Commit the update. */ |
461 | reservation_object_write_commit(robj, &ticket); | | 461 | reservation_object_write_commit(robj, &ticket); |
462 | | | 462 | |
463 | /* | | 463 | /* |
464 | * If there is an old list, free it when convenient. | | 464 | * If there is an old list, free it when convenient. |
465 | * (We are not in a position at this point to sleep | | 465 | * (We are not in a position at this point to sleep |
466 | * waiting for activity on all CPUs.) | | 466 | * waiting for activity on all CPUs.) |
467 | */ | | 467 | */ |
468 | if (list != NULL) | | 468 | if (list != NULL) |
469 | objlist_defer_free(list); | | 469 | objlist_defer_free(list); |
470 | } | | 470 | } |
471 | | | 471 | |
472 | /* Release a fence if we replaced it. */ | | 472 | /* Release a fence if we replaced it. */ |
473 | if (replace) | | 473 | if (replace) |
474 | fence_put(replace); | | 474 | fence_put(replace); |
475 | } | | 475 | } |
476 | | | 476 | |
477 | int | | 477 | int |
478 | reservation_object_get_fences_rcu(struct reservation_object *robj, | | 478 | reservation_object_get_fences_rcu(struct reservation_object *robj, |
479 | struct fence **fencep, unsigned *nsharedp, struct fence ***sharedp) | | 479 | struct fence **fencep, unsigned *nsharedp, struct fence ***sharedp) |
480 | { | | 480 | { |
481 | struct reservation_object_list *list; | | 481 | struct reservation_object_list *list; |
482 | struct fence *fence; | | 482 | struct fence *fence; |
483 | struct fence **shared = NULL; | | 483 | struct fence **shared = NULL; |
484 | unsigned shared_alloc, shared_count, i; | | 484 | unsigned shared_alloc, shared_count, i; |
485 | struct reservation_object_read_ticket ticket; | | 485 | struct reservation_object_read_ticket ticket; |
486 | | | 486 | |
487 | top: | | 487 | top: |
488 | /* Enter an RCU read section and get a read ticket. */ | | 488 | /* Enter an RCU read section and get a read ticket. */ |
489 | rcu_read_lock(); | | 489 | rcu_read_lock(); |
490 | reservation_object_read_begin(robj, &ticket); | | 490 | reservation_object_read_begin(robj, &ticket); |
491 | | | 491 | |
492 | /* If there is a shared list, grab it. */ | | 492 | /* If there is a shared list, grab it. */ |
493 | if ((list = robj->robj_list) != NULL) { | | 493 | if ((list = robj->robj_list) != NULL) { |
494 | /* Make sure the content of the list has been published. */ | | 494 | /* Make sure the content of the list has been published. */ |
495 | membar_datadep_consumer(); | | 495 | membar_datadep_consumer(); |
496 | | | 496 | |
497 | /* Check whether we have a buffer. */ | | 497 | /* Check whether we have a buffer. */ |
498 | if (shared == NULL) { | | 498 | if (shared == NULL) { |
499 | /* | | 499 | /* |
500 | * We don't have a buffer yet. Try to allocate | | 500 | * We don't have a buffer yet. Try to allocate |
501 | * one without waiting. | | 501 | * one without waiting. |
502 | */ | | 502 | */ |
503 | shared_alloc = list->shared_max; | | 503 | shared_alloc = list->shared_max; |
504 | __insn_barrier(); | | 504 | __insn_barrier(); |
505 | shared = kcalloc(shared_alloc, sizeof(shared[0]), | | 505 | shared = kcalloc(shared_alloc, sizeof(shared[0]), |
506 | GFP_NOWAIT); | | 506 | GFP_NOWAIT); |
507 | if (shared == NULL) { | | 507 | if (shared == NULL) { |
508 | /* | | 508 | /* |
509 | * Couldn't do it immediately. Back | | 509 | * Couldn't do it immediately. Back |
510 | * out of RCU and allocate one with | | 510 | * out of RCU and allocate one with |
511 | * waiting. | | 511 | * waiting. |
512 | */ | | 512 | */ |
513 | rcu_read_unlock(); | | 513 | rcu_read_unlock(); |
514 | shared = kcalloc(shared_alloc, | | 514 | shared = kcalloc(shared_alloc, |
515 | sizeof(shared[0]), GFP_KERNEL); | | 515 | sizeof(shared[0]), GFP_KERNEL); |
516 | if (shared == NULL) | | 516 | if (shared == NULL) |
517 | return -ENOMEM; | | 517 | return -ENOMEM; |
518 | goto top; | | 518 | goto top; |
519 | } | | 519 | } |
520 | } else if (shared_alloc < list->shared_max) { | | 520 | } else if (shared_alloc < list->shared_max) { |
521 | /* | | 521 | /* |
522 | * We have a buffer but it's too small. We're | | 522 | * We have a buffer but it's too small. We're |
523 | * already racing in this case, so just back | | 523 | * already racing in this case, so just back |
524 | * out and wait to allocate a bigger one. | | 524 | * out and wait to allocate a bigger one. |
525 | */ | | 525 | */ |
526 | shared_alloc = list->shared_max; | | 526 | shared_alloc = list->shared_max; |
527 | __insn_barrier(); | | 527 | __insn_barrier(); |
528 | rcu_read_unlock(); | | 528 | rcu_read_unlock(); |
529 | kfree(shared); | | 529 | kfree(shared); |
530 | shared = kcalloc(shared_alloc, sizeof(shared[0]), | | 530 | shared = kcalloc(shared_alloc, sizeof(shared[0]), |
531 | GFP_KERNEL); | | 531 | GFP_KERNEL); |
532 | if (shared == NULL) | | 532 | if (shared == NULL) |
533 | return -ENOMEM; | | 533 | return -ENOMEM; |
534 | } | | 534 | } |
535 | | | 535 | |
536 | /* | | 536 | /* |
537 | * We got a buffer large enough. Copy into the buffer | | 537 | * We got a buffer large enough. Copy into the buffer |
538 | * and record the number of elements. | | 538 | * and record the number of elements. |
539 | */ | | 539 | */ |
540 | memcpy(shared, list->shared, shared_alloc * sizeof(shared[0])); | | 540 | memcpy(shared, list->shared, shared_alloc * sizeof(shared[0])); |
541 | shared_count = list->shared_count; | | 541 | shared_count = list->shared_count; |
542 | } else { | | 542 | } else { |
543 | /* No shared list: shared count is zero. */ | | 543 | /* No shared list: shared count is zero. */ |
544 | shared_count = 0; | | 544 | shared_count = 0; |
545 | } | | 545 | } |
546 | | | 546 | |
547 | /* If there is an exclusive fence, grab it. */ | | 547 | /* If there is an exclusive fence, grab it. */ |
548 | if ((fence = robj->robj_fence) != NULL) { | | 548 | if ((fence = robj->robj_fence) != NULL) { |
549 | /* Make sure the content of the fence has been published. */ | | 549 | /* Make sure the content of the fence has been published. */ |
550 | membar_datadep_consumer(); | | 550 | membar_datadep_consumer(); |
551 | } | | 551 | } |
552 | | | 552 | |
553 | /* | | 553 | /* |
554 | * We are done reading from robj and list. Validate our | | 554 | * We are done reading from robj and list. Validate our |
555 | * parking ticket. If it's invalid, do not pass go and do not | | 555 | * parking ticket. If it's invalid, do not pass go and do not |
556 | * collect $200. | | 556 | * collect $200. |
557 | */ | | 557 | */ |
558 | if (!reservation_object_read_valid(robj, &ticket)) | | 558 | if (!reservation_object_read_valid(robj, &ticket)) |
559 | goto restart; | | 559 | goto restart; |
560 | | | 560 | |
561 | /* | | 561 | /* |
562 | * Try to get a reference to the exclusive fence, if there is | | 562 | * Try to get a reference to the exclusive fence, if there is |
563 | * one. If we can't, start over. | | 563 | * one. If we can't, start over. |
564 | */ | | 564 | */ |
565 | if (fence) { | | 565 | if (fence) { |
566 | if (fence_get_rcu(fence) == NULL) | | 566 | if (fence_get_rcu(fence) == NULL) |
567 | goto restart; | | 567 | goto restart; |
568 | } | | 568 | } |
569 | | | 569 | |
570 | /* | | 570 | /* |
571 | * Try to get a reference to all of the shared fences. | | 571 | * Try to get a reference to all of the shared fences. |
572 | */ | | 572 | */ |
573 | for (i = 0; i < shared_count; i++) { | | 573 | for (i = 0; i < shared_count; i++) { |
574 | if (fence_get_rcu(shared[i]) == NULL) | | 574 | if (fence_get_rcu(shared[i]) == NULL) |
575 | goto put_restart; | | 575 | goto put_restart; |
576 | } | | 576 | } |
577 | | | 577 | |
578 | /* Success! */ | | 578 | /* Success! */ |
579 | rcu_read_unlock(); | | 579 | rcu_read_unlock(); |
580 | *fencep = fence; | | 580 | *fencep = fence; |
581 | *nsharedp = shared_count; | | 581 | *nsharedp = shared_count; |
582 | *sharedp = shared; | | 582 | *sharedp = shared; |
583 | return 0; | | 583 | return 0; |
584 | | | 584 | |
585 | put_restart: | | 585 | put_restart: |
586 | /* Back out. */ | | 586 | /* Back out. */ |
587 | while (i --> 0) { | | 587 | while (i --> 0) { |
588 | fence_put(shared[i]); | | 588 | fence_put(shared[i]); |
589 | shared[i] = NULL; /* paranoia */ | | 589 | shared[i] = NULL; /* paranoia */ |
590 | } | | 590 | } |
591 | if (fence) { | | 591 | if (fence) { |
592 | fence_put(fence); | | 592 | fence_put(fence); |
593 | fence = NULL; /* paranoia */ | | 593 | fence = NULL; /* paranoia */ |
594 | } | | 594 | } |
595 | | | 595 | |
596 | restart: | | 596 | restart: |
597 | rcu_read_unlock(); | | 597 | rcu_read_unlock(); |
598 | goto top; | | 598 | goto top; |
599 | } | | 599 | } |
600 | | | 600 | |
601 | /* | | 601 | /* |
602 | * reservation_object_test_signaled_rcu(robj, shared) | | 602 | * reservation_object_test_signaled_rcu(robj, shared) |
603 | * | | 603 | * |
604 | * If shared is true, test whether all of the shared fences are | | 604 | * If shared is true, test whether all of the shared fences are |
605 | * signalled, or if there are none, test whether the exclusive | | 605 | * signalled, or if there are none, test whether the exclusive |
606 | * fence is signalled. If shared is false, test only whether the | | 606 | * fence is signalled. If shared is false, test only whether the |
607 | * exclusive fence is signalled. | | 607 | * exclusive fence is signalled. |
608 | * | | 608 | * |
609 | * XXX Why does this _not_ test the exclusive fence if shared is | | 609 | * XXX Why does this _not_ test the exclusive fence if shared is |
610 | * true only if there are no shared fences? This makes no sense. | | 610 | * true only if there are no shared fences? This makes no sense. |
611 | */ | | 611 | */ |
612 | bool | | 612 | bool |
613 | reservation_object_test_signaled_rcu(struct reservation_object *robj, | | 613 | reservation_object_test_signaled_rcu(struct reservation_object *robj, |
614 | bool shared) | | 614 | bool shared) |
615 | { | | 615 | { |
616 | struct reservation_object_read_ticket ticket; | | 616 | struct reservation_object_read_ticket ticket; |
617 | struct reservation_object_list *list; | | 617 | struct reservation_object_list *list; |
618 | struct fence *fence; | | 618 | struct fence *fence; |
619 | uint32_t i, shared_count; | | 619 | uint32_t i, shared_count; |
620 | bool signaled = true; | | 620 | bool signaled = true; |
621 | | | 621 | |
622 | top: | | 622 | top: |
623 | /* Enter an RCU read section and get a read ticket. */ | | 623 | /* Enter an RCU read section and get a read ticket. */ |
624 | rcu_read_lock(); | | 624 | rcu_read_lock(); |
625 | reservation_object_read_begin(robj, &ticket); | | 625 | reservation_object_read_begin(robj, &ticket); |
626 | | | 626 | |
627 | /* If shared is requested and there is a shared list, test it. */ | | 627 | /* If shared is requested and there is a shared list, test it. */ |
628 | if (shared && (list = robj->robj_list) != NULL) { | | 628 | if (shared && (list = robj->robj_list) != NULL) { |
629 | /* Make sure the content of the list has been published. */ | | 629 | /* Make sure the content of the list has been published. */ |
630 | membar_datadep_consumer(); | | 630 | membar_datadep_consumer(); |
631 | | | 631 | |
632 | /* Find out how long it is. */ | | 632 | /* Find out how long it is. */ |
633 | shared_count = list->shared_count; | | 633 | shared_count = list->shared_count; |
634 | | | 634 | |
635 | /* | | 635 | /* |
636 | * Make sure we saw a consistent snapshot of the list | | 636 | * Make sure we saw a consistent snapshot of the list |
637 | * pointer and length. | | 637 | * pointer and length. |
638 | */ | | 638 | */ |
639 | if (!reservation_object_read_valid(robj, &ticket)) | | 639 | if (!reservation_object_read_valid(robj, &ticket)) |
640 | goto restart; | | 640 | goto restart; |
641 | | | 641 | |
642 | /* | | 642 | /* |
643 | * For each fence, if it is going away, restart. | | 643 | * For each fence, if it is going away, restart. |
644 | * Otherwise, acquire a reference to it to test whether | | 644 | * Otherwise, acquire a reference to it to test whether |
645 | * it is signalled. Stop if we find any that is not | | 645 | * it is signalled. Stop if we find any that is not |
646 | * signalled. | | 646 | * signalled. |
647 | */ | | 647 | */ |
648 | for (i = 0; i < shared_count; i++) { | | 648 | for (i = 0; i < shared_count; i++) { |
649 | fence = fence_get_rcu(list->shared[i]); | | 649 | fence = fence_get_rcu(list->shared[i]); |
650 | if (fence == NULL) | | 650 | if (fence == NULL) |
651 | goto restart; | | 651 | goto restart; |
652 | signaled &= fence_is_signaled(fence); | | 652 | signaled &= fence_is_signaled(fence); |
653 | fence_put(fence); | | 653 | fence_put(fence); |
654 | if (!signaled) | | 654 | if (!signaled) |
655 | goto out; | | 655 | goto out; |
656 | } | | 656 | } |
657 | } | | 657 | } |
658 | | | 658 | |
659 | /* If there is an exclusive fence, test it. */ | | 659 | /* If there is an exclusive fence, test it. */ |
660 | if ((fence = robj->robj_fence) != NULL) { | | 660 | if ((fence = robj->robj_fence) != NULL) { |
661 | /* Make sure the content of the fence has been published. */ | | 661 | /* Make sure the content of the fence has been published. */ |
662 | membar_datadep_consumer(); | | 662 | membar_datadep_consumer(); |
663 | | | 663 | |
664 | /* | | 664 | /* |
665 | * Make sure we saw a consistent snapshot of the fence. | | 665 | * Make sure we saw a consistent snapshot of the fence. |
666 | * | | 666 | * |
667 | * XXX I'm not actually sure this is necessary since | | 667 | * XXX I'm not actually sure this is necessary since |
668 | * pointer writes are supposed to be atomic. | | 668 | * pointer writes are supposed to be atomic. |
669 | */ | | 669 | */ |
670 | if (!reservation_object_read_valid(robj, &ticket)) | | 670 | if (!reservation_object_read_valid(robj, &ticket)) |
671 | goto restart; | | 671 | goto restart; |
672 | | | 672 | |
673 | /* | | 673 | /* |
674 | * If it is going away, restart. Otherwise, acquire a | | 674 | * If it is going away, restart. Otherwise, acquire a |
675 | * reference to it to test whether it is signalled. | | 675 | * reference to it to test whether it is signalled. |
676 | */ | | 676 | */ |
677 | if ((fence = fence_get_rcu(fence)) == NULL) | | 677 | if ((fence = fence_get_rcu(fence)) == NULL) |
678 | goto restart; | | 678 | goto restart; |
679 | signaled &= fence_is_signaled(fence); | | 679 | signaled &= fence_is_signaled(fence); |
680 | fence_put(fence); | | 680 | fence_put(fence); |
681 | if (!signaled) | | 681 | if (!signaled) |
682 | goto out; | | 682 | goto out; |
683 | } | | 683 | } |
684 | | | 684 | |
685 | out: rcu_read_unlock(); | | 685 | out: rcu_read_unlock(); |
686 | return signaled; | | 686 | return signaled; |
687 | | | 687 | |
688 | restart: | | 688 | restart: |
689 | rcu_read_unlock(); | | 689 | rcu_read_unlock(); |
690 | goto top; | | 690 | goto top; |
691 | } | | 691 | } |
692 | | | 692 | |
693 | /* | | 693 | /* |
694 | * reservation_object_wait_timeout_rcu(robj, shared, intr, timeout) | | 694 | * reservation_object_wait_timeout_rcu(robj, shared, intr, timeout) |
695 | * | | 695 | * |
696 | * If shared is true, wait for all of the shared fences to be | | 696 | * If shared is true, wait for all of the shared fences to be |
697 | * signalled, or if there are none, wait for the exclusive fence | | 697 | * signalled, or if there are none, wait for the exclusive fence |
698 | * to be signalled. If shared is false, wait only for the | | 698 | * to be signalled. If shared is false, wait only for the |
699 | * exclusive fence to be signalled. If timeout is zero, don't | | 699 | * exclusive fence to be signalled. If timeout is zero, don't |
700 | * wait, only test. | | 700 | * wait, only test. |
701 | * | | 701 | * |
702 | * XXX Why does this _not_ wait for the exclusive fence if shared | | 702 | * XXX Why does this _not_ wait for the exclusive fence if shared |
703 | * is true only if there are no shared fences? This makes no | | 703 | * is true only if there are no shared fences? This makes no |
704 | * sense. | | 704 | * sense. |
705 | */ | | 705 | */ |
706 | long | | 706 | long |
707 | reservation_object_wait_timeout_rcu(struct reservation_object *robj, | | 707 | reservation_object_wait_timeout_rcu(struct reservation_object *robj, |
708 | bool shared, bool intr, unsigned long timeout) | | 708 | bool shared, bool intr, unsigned long timeout) |
709 | { | | 709 | { |
710 | struct reservation_object_read_ticket ticket; | | 710 | struct reservation_object_read_ticket ticket; |
711 | struct reservation_object_list *list; | | 711 | struct reservation_object_list *list; |
712 | struct fence *fence; | | 712 | struct fence *fence; |
713 | uint32_t i, shared_count; | | 713 | uint32_t i, shared_count; |
714 | long ret; | | 714 | long ret; |
715 | | | 715 | |
716 | if (timeout == 0) | | 716 | if (timeout == 0) |
717 | return reservation_object_test_signaled_rcu(robj, shared); | | 717 | return reservation_object_test_signaled_rcu(robj, shared); |
718 | | | 718 | |
719 | top: | | 719 | top: |
720 | /* Enter an RCU read section and get a read ticket. */ | | 720 | /* Enter an RCU read section and get a read ticket. */ |
721 | rcu_read_lock(); | | 721 | rcu_read_lock(); |
722 | reservation_object_read_begin(robj, &ticket); | | 722 | reservation_object_read_begin(robj, &ticket); |
723 | | | 723 | |
724 | /* If shared is requested and there is a shared list, wait on it. */ | | 724 | /* If shared is requested and there is a shared list, wait on it. */ |
725 | if (shared && (list = robj->robj_list) != NULL) { | | 725 | if (shared && (list = robj->robj_list) != NULL) { |
726 | /* Make sure the content of the list has been published. */ | | 726 | /* Make sure the content of the list has been published. */ |
727 | membar_datadep_consumer(); | | 727 | membar_datadep_consumer(); |
728 | | | 728 | |
729 | /* Find out how long it is. */ | | 729 | /* Find out how long it is. */ |
730 | shared_count = list->shared_count; | | 730 | shared_count = list->shared_count; |
731 | | | 731 | |
732 | /* | | 732 | /* |
733 | * Make sure we saw a consistent snapshot of the list | | 733 | * Make sure we saw a consistent snapshot of the list |
734 | * pointer and length. | | 734 | * pointer and length. |
735 | */ | | 735 | */ |
736 | if (!reservation_object_read_valid(robj, &ticket)) | | 736 | if (!reservation_object_read_valid(robj, &ticket)) |
737 | goto restart; | | 737 | goto restart; |
738 | | | 738 | |
739 | /* | | 739 | /* |
740 | * For each fence, if it is going away, restart. | | 740 | * For each fence, if it is going away, restart. |
741 | * Otherwise, acquire a reference to it to test whether | | 741 | * Otherwise, acquire a reference to it to test whether |
742 | * it is signalled. Stop and wait if we find any that | | 742 | * it is signalled. Stop and wait if we find any that |
743 | * is not signalled. | | 743 | * is not signalled. |
744 | */ | | 744 | */ |
745 | for (i = 0; i < shared_count; i++) { | | 745 | for (i = 0; i < shared_count; i++) { |
746 | fence = fence_get_rcu(list->shared[i]); | | 746 | fence = fence_get_rcu(list->shared[i]); |
747 | if (fence == NULL) | | 747 | if (fence == NULL) |
748 | goto restart; | | 748 | goto restart; |
749 | if (!fence_is_signaled(fence)) | | 749 | if (!fence_is_signaled(fence)) |
750 | goto wait; | | 750 | goto wait; |
751 | fence_put(fence); | | 751 | fence_put(fence); |
752 | } | | 752 | } |
753 | } | | 753 | } |
754 | | | 754 | |
755 | /* If there is an exclusive fence, test it. */ | | 755 | /* If there is an exclusive fence, test it. */ |
756 | if ((fence = robj->robj_fence) != NULL) { | | 756 | if ((fence = robj->robj_fence) != NULL) { |
757 | /* Make sure the content of the fence has been published. */ | | 757 | /* Make sure the content of the fence has been published. */ |
758 | membar_datadep_consumer(); | | 758 | membar_datadep_consumer(); |
759 | | | 759 | |
760 | /* | | 760 | /* |
761 | * Make sure we saw a consistent snapshot of the fence. | | 761 | * Make sure we saw a consistent snapshot of the fence. |
762 | * | | 762 | * |
763 | * XXX I'm not actually sure this is necessary since | | 763 | * XXX I'm not actually sure this is necessary since |
764 | * pointer writes are supposed to be atomic. | | 764 | * pointer writes are supposed to be atomic. |
765 | */ | | 765 | */ |
766 | if (!reservation_object_read_valid(robj, &ticket)) | | 766 | if (!reservation_object_read_valid(robj, &ticket)) |
767 | goto restart; | | 767 | goto restart; |
768 | | | 768 | |
769 | /* | | 769 | /* |
770 | * If it is going away, restart. Otherwise, acquire a | | 770 | * If it is going away, restart. Otherwise, acquire a |
771 | * reference to it to test whether it is signalled. If | | 771 | * reference to it to test whether it is signalled. If |
772 | * not, wait for it. | | 772 | * not, wait for it. |
773 | */ | | 773 | */ |
774 | if ((fence = fence_get_rcu(fence)) == NULL) | | 774 | if ((fence = fence_get_rcu(fence)) == NULL) |
775 | goto restart; | | 775 | goto restart; |
776 | if (!fence_is_signaled(fence)) | | 776 | if (!fence_is_signaled(fence)) |
777 | goto wait; | | 777 | goto wait; |
778 | fence_put(fence); | | 778 | fence_put(fence); |
779 | } | | 779 | } |
780 | | | 780 | |
781 | /* Success! Return the number of ticks left. */ | | 781 | /* Success! Return the number of ticks left. */ |
782 | rcu_read_unlock(); | | 782 | rcu_read_unlock(); |
783 | return timeout; | | 783 | return timeout; |
784 | | | 784 | |
785 | restart: | | 785 | restart: |
786 | rcu_read_unlock(); | | 786 | rcu_read_unlock(); |
787 | goto top; | | 787 | goto top; |
788 | | | 788 | |
789 | wait: | | 789 | wait: |
790 | /* | | 790 | /* |
791 | * Exit the RCU read section and wait for it. If we time out | | 791 | * Exit the RCU read section and wait for it. If we time out |
792 | * or fail, bail. Otherwise, go back to the top. | | 792 | * or fail, bail. Otherwise, go back to the top. |
793 | */ | | 793 | */ |
794 | KASSERT(fence != NULL); | | 794 | KASSERT(fence != NULL); |
795 | rcu_read_unlock(); | | 795 | rcu_read_unlock(); |
796 | ret = fence_wait_timeout(fence, intr, timeout); | | 796 | ret = fence_wait_timeout(fence, intr, timeout); |
797 | if (ret <= 0) | | 797 | if (ret <= 0) |
798 | return ret; | | 798 | return ret; |
| | | 799 | fence_put(fence); |
799 | KASSERT(ret <= timeout); | | 800 | KASSERT(ret <= timeout); |
800 | timeout = ret; | | 801 | timeout = ret; |
801 | goto top; | | 802 | goto top; |
802 | } | | 803 | } |
803 | | | 804 | |
804 | /* | | 805 | /* |
805 | * reservation_poll_init(rpoll, lock) | | 806 | * reservation_poll_init(rpoll, lock) |
806 | * | | 807 | * |
807 | * Initialize reservation poll state. | | 808 | * Initialize reservation poll state. |
808 | */ | | 809 | */ |
809 | void | | 810 | void |
810 | reservation_poll_init(struct reservation_poll *rpoll) | | 811 | reservation_poll_init(struct reservation_poll *rpoll) |
811 | { | | 812 | { |
812 | | | 813 | |
813 | mutex_init(&rpoll->rp_lock, MUTEX_DEFAULT, IPL_VM); | | 814 | mutex_init(&rpoll->rp_lock, MUTEX_DEFAULT, IPL_VM); |
814 | selinit(&rpoll->rp_selq); | | 815 | selinit(&rpoll->rp_selq); |
815 | rpoll->rp_claimed = 0; | | 816 | rpoll->rp_claimed = 0; |
816 | } | | 817 | } |
817 | | | 818 | |
818 | /* | | 819 | /* |
819 | * reservation_poll_fini(rpoll) | | 820 | * reservation_poll_fini(rpoll) |
820 | * | | 821 | * |
821 | * Release any resource associated with reservation poll state. | | 822 | * Release any resource associated with reservation poll state. |
822 | */ | | 823 | */ |
823 | void | | 824 | void |
824 | reservation_poll_fini(struct reservation_poll *rpoll) | | 825 | reservation_poll_fini(struct reservation_poll *rpoll) |
825 | { | | 826 | { |
826 | | | 827 | |
827 | KASSERT(rpoll->rp_claimed == 0); | | 828 | KASSERT(rpoll->rp_claimed == 0); |
828 | seldestroy(&rpoll->rp_selq); | | 829 | seldestroy(&rpoll->rp_selq); |
829 | mutex_destroy(&rpoll->rp_lock); | | 830 | mutex_destroy(&rpoll->rp_lock); |
830 | } | | 831 | } |
831 | | | 832 | |
832 | /* | | 833 | /* |
833 | * reservation_poll_cb(fence, fcb) | | 834 | * reservation_poll_cb(fence, fcb) |
834 | * | | 835 | * |
835 | * Callback to notify a reservation poll that a fence has | | 836 | * Callback to notify a reservation poll that a fence has |
836 | * completed. Notify any waiters and allow the next poller to | | 837 | * completed. Notify any waiters and allow the next poller to |
837 | * claim the callback. | | 838 | * claim the callback. |
838 | * | | 839 | * |
839 | * If one thread is waiting for the exclusive fence only, and we | | 840 | * If one thread is waiting for the exclusive fence only, and we |
840 | * spuriously notify them about a shared fence, tough. | | 841 | * spuriously notify them about a shared fence, tough. |
841 | */ | | 842 | */ |
842 | static void | | 843 | static void |
843 | reservation_poll_cb(struct fence *fence, struct fence_cb *fcb) | | 844 | reservation_poll_cb(struct fence *fence, struct fence_cb *fcb) |
844 | { | | 845 | { |
845 | struct reservation_poll *rpoll = container_of(fcb, | | 846 | struct reservation_poll *rpoll = container_of(fcb, |
846 | struct reservation_poll, rp_fcb); | | 847 | struct reservation_poll, rp_fcb); |
847 | | | 848 | |
848 | mutex_enter(&rpoll->rp_lock); | | 849 | mutex_enter(&rpoll->rp_lock); |
849 | selnotify(&rpoll->rp_selq, 0, NOTE_SUBMIT); | | 850 | selnotify(&rpoll->rp_selq, 0, NOTE_SUBMIT); |
850 | rpoll->rp_claimed = 0; | | 851 | rpoll->rp_claimed = 0; |
851 | mutex_exit(&rpoll->rp_lock); | | 852 | mutex_exit(&rpoll->rp_lock); |
852 | } | | 853 | } |
853 | | | 854 | |
854 | /* | | 855 | /* |
855 | * reservation_object_poll(robj, events, rpoll) | | 856 | * reservation_object_poll(robj, events, rpoll) |
856 | * | | 857 | * |
857 | * Poll for reservation object events using the reservation poll | | 858 | * Poll for reservation object events using the reservation poll |
858 | * state in rpoll: | | 859 | * state in rpoll: |
859 | * | | 860 | * |
860 | * - POLLOUT wait for all fences shared and exclusive | | 861 | * - POLLOUT wait for all fences shared and exclusive |
861 | * - POLLIN wait for the exclusive fence | | 862 | * - POLLIN wait for the exclusive fence |
862 | * | | 863 | * |
863 | * Return the subset of events in events that are ready. If any | | 864 | * Return the subset of events in events that are ready. If any |
864 | * are requested but not ready, arrange to be notified with | | 865 | * are requested but not ready, arrange to be notified with |
865 | * selnotify when they are. | | 866 | * selnotify when they are. |
866 | */ | | 867 | */ |
867 | int | | 868 | int |
868 | reservation_object_poll(struct reservation_object *robj, int events, | | 869 | reservation_object_poll(struct reservation_object *robj, int events, |
869 | struct reservation_poll *rpoll) | | 870 | struct reservation_poll *rpoll) |
870 | { | | 871 | { |
871 | struct reservation_object_read_ticket ticket; | | 872 | struct reservation_object_read_ticket ticket; |
872 | struct reservation_object_list *list; | | 873 | struct reservation_object_list *list; |
873 | struct fence *fence; | | 874 | struct fence *fence; |
874 | uint32_t i, shared_count; | | 875 | uint32_t i, shared_count; |
875 | int revents; | | 876 | int revents; |
876 | bool recorded = false; /* curlwp is on the selq */ | | 877 | bool recorded = false; /* curlwp is on the selq */ |
877 | bool claimed = false; /* we claimed the callback */ | | 878 | bool claimed = false; /* we claimed the callback */ |
878 | bool callback = false; /* we requested a callback */ | | 879 | bool callback = false; /* we requested a callback */ |
879 | | | 880 | |
880 | /* | | 881 | /* |
881 | * Start with the maximal set of events that could be ready. | | 882 | * Start with the maximal set of events that could be ready. |
882 | * We will eliminate the events that are definitely not ready | | 883 | * We will eliminate the events that are definitely not ready |
883 | * as we go at the same time as we add callbacks to notify us | | 884 | * as we go at the same time as we add callbacks to notify us |
884 | * that they may be ready. | | 885 | * that they may be ready. |
885 | */ | | 886 | */ |
886 | revents = events & (POLLIN|POLLOUT); | | 887 | revents = events & (POLLIN|POLLOUT); |
887 | if (revents == 0) | | 888 | if (revents == 0) |
888 | return 0; | | 889 | return 0; |
889 | | | 890 | |
890 | top: | | 891 | top: |
891 | /* Enter an RCU read section and get a read ticket. */ | | 892 | /* Enter an RCU read section and get a read ticket. */ |
892 | rcu_read_lock(); | | 893 | rcu_read_lock(); |
893 | reservation_object_read_begin(robj, &ticket); | | 894 | reservation_object_read_begin(robj, &ticket); |
894 | | | 895 | |
895 | /* If we want to wait for all fences, get the shared list. */ | | 896 | /* If we want to wait for all fences, get the shared list. */ |
896 | if ((events & POLLOUT) && (list = robj->robj_list) != NULL) do { | | 897 | if ((events & POLLOUT) && (list = robj->robj_list) != NULL) do { |
897 | /* Make sure the content of the list has been published. */ | | 898 | /* Make sure the content of the list has been published. */ |
898 | membar_datadep_consumer(); | | 899 | membar_datadep_consumer(); |
899 | | | 900 | |
900 | /* Find out how long it is. */ | | 901 | /* Find out how long it is. */ |
901 | shared_count = list->shared_count; | | 902 | shared_count = list->shared_count; |
902 | | | 903 | |
903 | /* | | 904 | /* |
904 | * Make sure we saw a consistent snapshot of the list | | 905 | * Make sure we saw a consistent snapshot of the list |
905 | * pointer and length. | | 906 | * pointer and length. |
906 | */ | | 907 | */ |
907 | if (!reservation_object_read_valid(robj, &ticket)) | | 908 | if (!reservation_object_read_valid(robj, &ticket)) |
908 | goto restart; | | 909 | goto restart; |
909 | | | 910 | |
910 | /* | | 911 | /* |
911 | * For each fence, if it is going away, restart. | | 912 | * For each fence, if it is going away, restart. |
912 | * Otherwise, acquire a reference to it to test whether | | 913 | * Otherwise, acquire a reference to it to test whether |
913 | * it is signalled. Stop and request a callback if we | | 914 | * it is signalled. Stop and request a callback if we |
914 | * find any that is not signalled. | | 915 | * find any that is not signalled. |
915 | */ | | 916 | */ |
916 | for (i = 0; i < shared_count; i++) { | | 917 | for (i = 0; i < shared_count; i++) { |
917 | fence = fence_get_rcu(list->shared[i]); | | 918 | fence = fence_get_rcu(list->shared[i]); |
918 | if (fence == NULL) | | 919 | if (fence == NULL) |
919 | goto restart; | | 920 | goto restart; |
920 | if (!fence_is_signaled(fence)) { | | 921 | if (!fence_is_signaled(fence)) { |
921 | fence_put(fence); | | 922 | fence_put(fence); |
922 | break; | | 923 | break; |
923 | } | | 924 | } |
924 | fence_put(fence); | | 925 | fence_put(fence); |
925 | } | | 926 | } |
926 | | | 927 | |
927 | /* If all shared fences have been signalled, move on. */ | | 928 | /* If all shared fences have been signalled, move on. */ |
928 | if (i == shared_count) | | 929 | if (i == shared_count) |
929 | break; | | 930 | break; |
930 | | | 931 | |
931 | /* Put ourselves on the selq if we haven't already. */ | | 932 | /* Put ourselves on the selq if we haven't already. */ |
932 | if (!recorded) | | 933 | if (!recorded) |
933 | goto record; | | 934 | goto record; |
934 | | | 935 | |
935 | /* | | 936 | /* |
936 | * If someone else claimed the callback, or we already | | 937 | * If someone else claimed the callback, or we already |
937 | * requested it, we're guaranteed to be notified, so | | 938 | * requested it, we're guaranteed to be notified, so |
938 | * assume the event is not ready. | | 939 | * assume the event is not ready. |
939 | */ | | 940 | */ |
940 | if (!claimed || callback) { | | 941 | if (!claimed || callback) { |
941 | revents &= ~POLLOUT; | | 942 | revents &= ~POLLOUT; |
942 | break; | | 943 | break; |
943 | } | | 944 | } |
944 | | | 945 | |
945 | /* | | 946 | /* |
946 | * Otherwise, find the first fence that is not | | 947 | * Otherwise, find the first fence that is not |
947 | * signalled, request the callback, and clear POLLOUT | | 948 | * signalled, request the callback, and clear POLLOUT |
948 | * from the possible ready events. If they are all | | 949 | * from the possible ready events. If they are all |
949 | * signalled, leave POLLOUT set; we will simulate the | | 950 | * signalled, leave POLLOUT set; we will simulate the |
950 | * callback later. | | 951 | * callback later. |
951 | */ | | 952 | */ |
952 | for (i = 0; i < shared_count; i++) { | | 953 | for (i = 0; i < shared_count; i++) { |
953 | fence = fence_get_rcu(list->shared[i]); | | 954 | fence = fence_get_rcu(list->shared[i]); |
954 | if (fence == NULL) | | 955 | if (fence == NULL) |
955 | goto restart; | | 956 | goto restart; |
956 | if (!fence_add_callback(fence, &rpoll->rp_fcb, | | 957 | if (!fence_add_callback(fence, &rpoll->rp_fcb, |
957 | reservation_poll_cb)) { | | 958 | reservation_poll_cb)) { |
958 | fence_put(fence); | | 959 | fence_put(fence); |
959 | revents &= ~POLLOUT; | | 960 | revents &= ~POLLOUT; |
960 | callback = true; | | 961 | callback = true; |
961 | break; | | 962 | break; |
962 | } | | 963 | } |
963 | fence_put(fence); | | 964 | fence_put(fence); |
964 | } | | 965 | } |
965 | } while (0); | | 966 | } while (0); |
966 | | | 967 | |
967 | /* We always wait for at least the exclusive fence, so get it. */ | | 968 | /* We always wait for at least the exclusive fence, so get it. */ |
968 | if ((fence = robj->robj_fence) != NULL) do { | | 969 | if ((fence = robj->robj_fence) != NULL) do { |
969 | /* Make sure the content of the fence has been published. */ | | 970 | /* Make sure the content of the fence has been published. */ |
970 | membar_datadep_consumer(); | | 971 | membar_datadep_consumer(); |
971 | | | 972 | |
972 | /* | | 973 | /* |
973 | * Make sure we saw a consistent snapshot of the fence. | | 974 | * Make sure we saw a consistent snapshot of the fence. |
974 | * | | 975 | * |
975 | * XXX I'm not actually sure this is necessary since | | 976 | * XXX I'm not actually sure this is necessary since |
976 | * pointer writes are supposed to be atomic. | | 977 | * pointer writes are supposed to be atomic. |
977 | */ | | 978 | */ |
978 | if (!reservation_object_read_valid(robj, &ticket)) | | 979 | if (!reservation_object_read_valid(robj, &ticket)) |
979 | goto restart; | | 980 | goto restart; |
980 | | | 981 | |
981 | /* | | 982 | /* |
982 | * If it is going away, restart. Otherwise, acquire a | | 983 | * If it is going away, restart. Otherwise, acquire a |
983 | * reference to it to test whether it is signalled. If | | 984 | * reference to it to test whether it is signalled. If |
984 | * not, stop and request a callback. | | 985 | * not, stop and request a callback. |
985 | */ | | 986 | */ |
986 | if ((fence = fence_get_rcu(fence)) == NULL) | | 987 | if ((fence = fence_get_rcu(fence)) == NULL) |
987 | goto restart; | | 988 | goto restart; |
988 | if (fence_is_signaled(fence)) { | | 989 | if (fence_is_signaled(fence)) { |
989 | fence_put(fence); | | 990 | fence_put(fence); |
990 | break; | | 991 | break; |
991 | } | | 992 | } |
992 | | | 993 | |
993 | /* Put ourselves on the selq if we haven't already. */ | | 994 | /* Put ourselves on the selq if we haven't already. */ |
994 | if (!recorded) { | | 995 | if (!recorded) { |
995 | fence_put(fence); | | 996 | fence_put(fence); |
996 | goto record; | | 997 | goto record; |
997 | } | | 998 | } |
998 | | | 999 | |
999 | /* | | 1000 | /* |
1000 | * If someone else claimed the callback, or we already | | 1001 | * If someone else claimed the callback, or we already |
1001 | * requested it, we're guaranteed to be notified, so | | 1002 | * requested it, we're guaranteed to be notified, so |
1002 | * assume the event is not ready. | | 1003 | * assume the event is not ready. |
1003 | */ | | 1004 | */ |
1004 | if (!claimed || callback) { | | 1005 | if (!claimed || callback) { |
1005 | revents = 0; | | 1006 | revents = 0; |
1006 | break; | | 1007 | break; |
1007 | } | | 1008 | } |
1008 | | | 1009 | |
1009 | /* | | 1010 | /* |
1010 | * Otherwise, try to request the callback, and clear | | 1011 | * Otherwise, try to request the callback, and clear |
1011 | * all possible ready events. If the fence has been | | 1012 | * all possible ready events. If the fence has been |
1012 | * signalled in the interim, leave the events set; we | | 1013 | * signalled in the interim, leave the events set; we |
1013 | * will simulate the callback later. | | 1014 | * will simulate the callback later. |
1014 | */ | | 1015 | */ |
1015 | if (!fence_add_callback(fence, &rpoll->rp_fcb, | | 1016 | if (!fence_add_callback(fence, &rpoll->rp_fcb, |
1016 | reservation_poll_cb)) { | | 1017 | reservation_poll_cb)) { |
1017 | fence_put(fence); | | 1018 | fence_put(fence); |
1018 | revents = 0; | | 1019 | revents = 0; |
1019 | callback = true; | | 1020 | callback = true; |
1020 | break; | | 1021 | break; |
1021 | } | | 1022 | } |
1022 | fence_put(fence); | | 1023 | fence_put(fence); |
1023 | } while (0); | | 1024 | } while (0); |
1024 | | | 1025 | |
1025 | /* All done reading the fences. */ | | 1026 | /* All done reading the fences. */ |
1026 | rcu_read_unlock(); | | 1027 | rcu_read_unlock(); |
1027 | | | 1028 | |
1028 | if (claimed && !callback) { | | 1029 | if (claimed && !callback) { |
1029 | /* | | 1030 | /* |
1030 | * We claimed the callback but we didn't actually | | 1031 | * We claimed the callback but we didn't actually |
1031 | * request it because a fence was signalled while we | | 1032 | * request it because a fence was signalled while we |
1032 | * were claiming it. Call it ourselves now. The | | 1033 | * were claiming it. Call it ourselves now. The |
1033 | * callback doesn't use the fence nor rely on holding | | 1034 | * callback doesn't use the fence nor rely on holding |
1034 | * any of the fence locks, so this is safe. | | 1035 | * any of the fence locks, so this is safe. |
1035 | */ | | 1036 | */ |
1036 | reservation_poll_cb(NULL, &rpoll->rp_fcb); | | 1037 | reservation_poll_cb(NULL, &rpoll->rp_fcb); |
1037 | } | | 1038 | } |
1038 | return revents; | | 1039 | return revents; |
1039 | | | 1040 | |
1040 | restart: | | 1041 | restart: |
1041 | rcu_read_unlock(); | | 1042 | rcu_read_unlock(); |
1042 | goto top; | | 1043 | goto top; |
1043 | | | 1044 | |
1044 | record: | | 1045 | record: |
1045 | rcu_read_unlock(); | | 1046 | rcu_read_unlock(); |
1046 | mutex_enter(&rpoll->rp_lock); | | 1047 | mutex_enter(&rpoll->rp_lock); |
1047 | selrecord(curlwp, &rpoll->rp_selq); | | 1048 | selrecord(curlwp, &rpoll->rp_selq); |
1048 | if (!rpoll->rp_claimed) | | 1049 | if (!rpoll->rp_claimed) |
1049 | claimed = rpoll->rp_claimed = true; | | 1050 | claimed = rpoll->rp_claimed = true; |
1050 | mutex_exit(&rpoll->rp_lock); | | 1051 | mutex_exit(&rpoll->rp_lock); |
1051 | recorded = true; | | 1052 | recorded = true; |
1052 | goto top; | | 1053 | goto top; |
1053 | } | | 1054 | } |
1054 | | | 1055 | |
1055 | /* | | 1056 | /* |
1056 | * reservation_object_kqfilter(robj, kn, rpoll) | | 1057 | * reservation_object_kqfilter(robj, kn, rpoll) |
1057 | * | | 1058 | * |
1058 | * Kqueue filter for reservation objects. Currently not | | 1059 | * Kqueue filter for reservation objects. Currently not |
1059 | * implemented because the logic to implement it is nontrivial, | | 1060 | * implemented because the logic to implement it is nontrivial, |
1060 | * and userland will presumably never use it, so it would be | | 1061 | * and userland will presumably never use it, so it would be |
1061 | * dangerous to add never-tested complex code paths to the kernel. | | 1062 | * dangerous to add never-tested complex code paths to the kernel. |
1062 | */ | | 1063 | */ |
1063 | int | | 1064 | int |
1064 | reservation_object_kqfilter(struct reservation_object *robj, struct knote *kn, | | 1065 | reservation_object_kqfilter(struct reservation_object *robj, struct knote *kn, |
1065 | struct reservation_poll *rpoll) | | 1066 | struct reservation_poll *rpoll) |
1066 | { | | 1067 | { |
1067 | | | 1068 | |
1068 | return EINVAL; | | 1069 | return EINVAL; |
1069 | } | | 1070 | } |