| @@ -1,14 +1,14 @@ | | | @@ -1,14 +1,14 @@ |
1 | /* $NetBSD: subr_workqueue.c,v 1.37 2018/06/13 05:26:12 ozaki-r Exp $ */ | | 1 | /* $NetBSD: subr_workqueue.c,v 1.37.6.1 2024/04/18 15:51:35 martin Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c)2002, 2005, 2006, 2007 YAMAMOTO Takashi, | | 4 | * Copyright (c)2002, 2005, 2006, 2007 YAMAMOTO Takashi, |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * Redistribution and use in source and binary forms, with or without | | 7 | * Redistribution and use in source and binary forms, with or without |
8 | * modification, are permitted provided that the following conditions | | 8 | * modification, are permitted provided that the following conditions |
9 | * are met: | | 9 | * are met: |
10 | * 1. Redistributions of source code must retain the above copyright | | 10 | * 1. Redistributions of source code must retain the above copyright |
11 | * notice, this list of conditions and the following disclaimer. | | 11 | * notice, this list of conditions and the following disclaimer. |
12 | * 2. Redistributions in binary form must reproduce the above copyright | | 12 | * 2. Redistributions in binary form must reproduce the above copyright |
13 | * notice, this list of conditions and the following disclaimer in the | | 13 | * notice, this list of conditions and the following disclaimer in the |
14 | * documentation and/or other materials provided with the distribution. | | 14 | * documentation and/or other materials provided with the distribution. |
| @@ -17,69 +17,113 @@ | | | @@ -17,69 +17,113 @@ |
17 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 17 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | | 18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
19 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE | | 19 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
20 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | | 20 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
21 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | | 21 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
22 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 22 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
23 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | | 23 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
24 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 24 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
25 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 25 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
26 | * SUCH DAMAGE. | | 26 | * SUCH DAMAGE. |
27 | */ | | 27 | */ |
28 | | | 28 | |
29 | #include <sys/cdefs.h> | | 29 | #include <sys/cdefs.h> |
30 | __KERNEL_RCSID(0, "$NetBSD: subr_workqueue.c,v 1.37 2018/06/13 05:26:12 ozaki-r Exp $"); | | 30 | __KERNEL_RCSID(0, "$NetBSD: subr_workqueue.c,v 1.37.6.1 2024/04/18 15:51:35 martin Exp $"); |
31 | | | 31 | |
32 | #include <sys/param.h> | | 32 | #include <sys/param.h> |
| | | 33 | |
| | | 34 | #include <sys/condvar.h> |
33 | #include <sys/cpu.h> | | 35 | #include <sys/cpu.h> |
34 | #include <sys/systm.h> | | | |
35 | #include <sys/kthread.h> | | | |
36 | #include <sys/kmem.h> | | 36 | #include <sys/kmem.h> |
37 | #include <sys/proc.h> | | 37 | #include <sys/kthread.h> |
38 | #include <sys/workqueue.h> | | | |
39 | #include <sys/mutex.h> | | 38 | #include <sys/mutex.h> |
40 | #include <sys/condvar.h> | | 39 | #include <sys/proc.h> |
41 | #include <sys/queue.h> | | 40 | #include <sys/queue.h> |
| | | 41 | #include <sys/sdt.h> |
| | | 42 | #include <sys/systm.h> |
| | | 43 | #include <sys/workqueue.h> |
42 | | | 44 | |
43 | typedef struct work_impl { | | 45 | typedef struct work_impl { |
44 | SIMPLEQ_ENTRY(work_impl) wk_entry; | | 46 | SIMPLEQ_ENTRY(work_impl) wk_entry; |
45 | } work_impl_t; | | 47 | } work_impl_t; |
46 | | | 48 | |
47 | SIMPLEQ_HEAD(workqhead, work_impl); | | 49 | SIMPLEQ_HEAD(workqhead, work_impl); |
48 | | | 50 | |
49 | struct workqueue_queue { | | 51 | struct workqueue_queue { |
50 | kmutex_t q_mutex; | | 52 | kmutex_t q_mutex; |
51 | kcondvar_t q_cv; | | 53 | kcondvar_t q_cv; |
52 | struct workqhead q_queue_pending; | | 54 | struct workqhead q_queue_pending; |
53 | struct workqhead q_queue_running; | | 55 | uint64_t q_gen; |
54 | lwp_t *q_worker; | | 56 | lwp_t *q_worker; |
55 | work_impl_t *q_waiter; | | | |
56 | }; | | 57 | }; |
57 | | | 58 | |
58 | struct workqueue { | | 59 | struct workqueue { |
59 | void (*wq_func)(struct work *, void *); | | 60 | void (*wq_func)(struct work *, void *); |
60 | void *wq_arg; | | 61 | void *wq_arg; |
61 | int wq_flags; | | 62 | int wq_flags; |
62 | | | 63 | |
63 | char wq_name[MAXCOMLEN]; | | 64 | char wq_name[MAXCOMLEN]; |
64 | pri_t wq_prio; | | 65 | pri_t wq_prio; |
65 | void *wq_ptr; | | 66 | void *wq_ptr; |
66 | }; | | 67 | }; |
67 | | | 68 | |
68 | #define WQ_SIZE (roundup2(sizeof(struct workqueue), coherency_unit)) | | 69 | #define WQ_SIZE (roundup2(sizeof(struct workqueue), coherency_unit)) |
69 | #define WQ_QUEUE_SIZE (roundup2(sizeof(struct workqueue_queue), coherency_unit)) | | 70 | #define WQ_QUEUE_SIZE (roundup2(sizeof(struct workqueue_queue), coherency_unit)) |
70 | | | 71 | |
71 | #define POISON 0xaabbccdd | | 72 | #define POISON 0xaabbccdd |
72 | | | 73 | |
| | | 74 | SDT_PROBE_DEFINE7(sdt, kernel, workqueue, create, |
| | | 75 | "struct workqueue *"/*wq*/, |
| | | 76 | "const char *"/*name*/, |
| | | 77 | "void (*)(struct work *, void *)"/*func*/, |
| | | 78 | "void *"/*arg*/, |
| | | 79 | "pri_t"/*prio*/, |
| | | 80 | "int"/*ipl*/, |
| | | 81 | "int"/*flags*/); |
| | | 82 | SDT_PROBE_DEFINE1(sdt, kernel, workqueue, destroy, |
| | | 83 | "struct workqueue *"/*wq*/); |
| | | 84 | |
| | | 85 | SDT_PROBE_DEFINE3(sdt, kernel, workqueue, enqueue, |
| | | 86 | "struct workqueue *"/*wq*/, |
| | | 87 | "struct work *"/*wk*/, |
| | | 88 | "struct cpu_info *"/*ci*/); |
| | | 89 | SDT_PROBE_DEFINE4(sdt, kernel, workqueue, entry, |
| | | 90 | "struct workqueue *"/*wq*/, |
| | | 91 | "struct work *"/*wk*/, |
| | | 92 | "void (*)(struct work *, void *)"/*func*/, |
| | | 93 | "void *"/*arg*/); |
| | | 94 | SDT_PROBE_DEFINE4(sdt, kernel, workqueue, return, |
| | | 95 | "struct workqueue *"/*wq*/, |
| | | 96 | "struct work *"/*wk*/, |
| | | 97 | "void (*)(struct work *, void *)"/*func*/, |
| | | 98 | "void *"/*arg*/); |
| | | 99 | SDT_PROBE_DEFINE2(sdt, kernel, workqueue, wait__start, |
| | | 100 | "struct workqueue *"/*wq*/, |
| | | 101 | "struct work *"/*wk*/); |
| | | 102 | SDT_PROBE_DEFINE2(sdt, kernel, workqueue, wait__self, |
| | | 103 | "struct workqueue *"/*wq*/, |
| | | 104 | "struct work *"/*wk*/); |
| | | 105 | SDT_PROBE_DEFINE2(sdt, kernel, workqueue, wait__hit, |
| | | 106 | "struct workqueue *"/*wq*/, |
| | | 107 | "struct work *"/*wk*/); |
| | | 108 | SDT_PROBE_DEFINE2(sdt, kernel, workqueue, wait__done, |
| | | 109 | "struct workqueue *"/*wq*/, |
| | | 110 | "struct work *"/*wk*/); |
| | | 111 | |
| | | 112 | SDT_PROBE_DEFINE1(sdt, kernel, workqueue, exit__start, |
| | | 113 | "struct workqueue *"/*wq*/); |
| | | 114 | SDT_PROBE_DEFINE1(sdt, kernel, workqueue, exit__done, |
| | | 115 | "struct workqueue *"/*wq*/); |
| | | 116 | |
73 | static size_t | | 117 | static size_t |
74 | workqueue_size(int flags) | | 118 | workqueue_size(int flags) |
75 | { | | 119 | { |
76 | | | 120 | |
77 | return WQ_SIZE | | 121 | return WQ_SIZE |
78 | + ((flags & WQ_PERCPU) != 0 ? ncpu : 1) * WQ_QUEUE_SIZE | | 122 | + ((flags & WQ_PERCPU) != 0 ? ncpu : 1) * WQ_QUEUE_SIZE |
79 | + coherency_unit; | | 123 | + coherency_unit; |
80 | } | | 124 | } |
81 | | | 125 | |
82 | static struct workqueue_queue * | | 126 | static struct workqueue_queue * |
83 | workqueue_queue_lookup(struct workqueue *wq, struct cpu_info *ci) | | 127 | workqueue_queue_lookup(struct workqueue *wq, struct cpu_info *ci) |
84 | { | | 128 | { |
85 | u_int idx = 0; | | 129 | u_int idx = 0; |
| @@ -87,70 +131,75 @@ workqueue_queue_lookup(struct workqueue | | | @@ -87,70 +131,75 @@ workqueue_queue_lookup(struct workqueue |
87 | if (wq->wq_flags & WQ_PERCPU) { | | 131 | if (wq->wq_flags & WQ_PERCPU) { |
88 | idx = ci ? cpu_index(ci) : cpu_index(curcpu()); | | 132 | idx = ci ? cpu_index(ci) : cpu_index(curcpu()); |
89 | } | | 133 | } |
90 | | | 134 | |
91 | return (void *)((uintptr_t)(wq) + WQ_SIZE + (idx * WQ_QUEUE_SIZE)); | | 135 | return (void *)((uintptr_t)(wq) + WQ_SIZE + (idx * WQ_QUEUE_SIZE)); |
92 | } | | 136 | } |
93 | | | 137 | |
94 | static void | | 138 | static void |
95 | workqueue_runlist(struct workqueue *wq, struct workqhead *list) | | 139 | workqueue_runlist(struct workqueue *wq, struct workqhead *list) |
96 | { | | 140 | { |
97 | work_impl_t *wk; | | 141 | work_impl_t *wk; |
98 | work_impl_t *next; | | 142 | work_impl_t *next; |
99 | | | 143 | |
100 | /* | | | |
101 | * note that "list" is not a complete SIMPLEQ. | | | |
102 | */ | | | |
103 | | | | |
104 | for (wk = SIMPLEQ_FIRST(list); wk != NULL; wk = next) { | | 144 | for (wk = SIMPLEQ_FIRST(list); wk != NULL; wk = next) { |
105 | next = SIMPLEQ_NEXT(wk, wk_entry); | | 145 | next = SIMPLEQ_NEXT(wk, wk_entry); |
| | | 146 | SDT_PROBE4(sdt, kernel, workqueue, entry, |
| | | 147 | wq, wk, wq->wq_func, wq->wq_arg); |
106 | (*wq->wq_func)((void *)wk, wq->wq_arg); | | 148 | (*wq->wq_func)((void *)wk, wq->wq_arg); |
| | | 149 | SDT_PROBE4(sdt, kernel, workqueue, return, |
| | | 150 | wq, wk, wq->wq_func, wq->wq_arg); |
107 | } | | 151 | } |
108 | } | | 152 | } |
109 | | | 153 | |
110 | static void | | 154 | static void |
111 | workqueue_worker(void *cookie) | | 155 | workqueue_worker(void *cookie) |
112 | { | | 156 | { |
113 | struct workqueue *wq = cookie; | | 157 | struct workqueue *wq = cookie; |
114 | struct workqueue_queue *q; | | 158 | struct workqueue_queue *q; |
115 | | | 159 | |
116 | /* find the workqueue of this kthread */ | | 160 | /* find the workqueue of this kthread */ |
117 | q = workqueue_queue_lookup(wq, curlwp->l_cpu); | | 161 | q = workqueue_queue_lookup(wq, curlwp->l_cpu); |
118 | | | 162 | |
| | | 163 | mutex_enter(&q->q_mutex); |
119 | for (;;) { | | 164 | for (;;) { |
120 | /* | | 165 | struct workqhead tmp; |
121 | * we violate abstraction of SIMPLEQ. | | 166 | |
122 | */ | | 167 | SIMPLEQ_INIT(&tmp); |
123 | | | 168 | |
124 | mutex_enter(&q->q_mutex); | | | |
125 | while (SIMPLEQ_EMPTY(&q->q_queue_pending)) | | 169 | while (SIMPLEQ_EMPTY(&q->q_queue_pending)) |
126 | cv_wait(&q->q_cv, &q->q_mutex); | | 170 | cv_wait(&q->q_cv, &q->q_mutex); |
127 | KASSERT(SIMPLEQ_EMPTY(&q->q_queue_running)); | | 171 | SIMPLEQ_CONCAT(&tmp, &q->q_queue_pending); |
128 | q->q_queue_running.sqh_first = | | | |
129 | q->q_queue_pending.sqh_first; /* XXX */ | | | |
130 | SIMPLEQ_INIT(&q->q_queue_pending); | | 172 | SIMPLEQ_INIT(&q->q_queue_pending); |
| | | 173 | |
| | | 174 | /* |
| | | 175 | * Mark the queue as actively running a batch of work |
| | | 176 | * by setting the generation number odd. |
| | | 177 | */ |
| | | 178 | q->q_gen |= 1; |
131 | mutex_exit(&q->q_mutex); | | 179 | mutex_exit(&q->q_mutex); |
132 | | | 180 | |
133 | workqueue_runlist(wq, &q->q_queue_running); | | 181 | workqueue_runlist(wq, &tmp); |
134 | | | 182 | |
| | | 183 | /* |
| | | 184 | * Notify workqueue_wait that we have completed a batch |
| | | 185 | * of work by incrementing the generation number. |
| | | 186 | */ |
135 | mutex_enter(&q->q_mutex); | | 187 | mutex_enter(&q->q_mutex); |
136 | KASSERT(!SIMPLEQ_EMPTY(&q->q_queue_running)); | | 188 | KASSERTMSG(q->q_gen & 1, "q=%p gen=%"PRIu64, q, q->q_gen); |
137 | SIMPLEQ_INIT(&q->q_queue_running); | | 189 | q->q_gen++; |
138 | if (__predict_false(q->q_waiter != NULL)) { | | 190 | cv_broadcast(&q->q_cv); |
139 | /* Wake up workqueue_wait */ | | | |
140 | cv_signal(&q->q_cv); | | | |
141 | } | | | |
142 | mutex_exit(&q->q_mutex); | | | |
143 | } | | 191 | } |
| | | 192 | mutex_exit(&q->q_mutex); |
144 | } | | 193 | } |
145 | | | 194 | |
146 | static void | | 195 | static void |
147 | workqueue_init(struct workqueue *wq, const char *name, | | 196 | workqueue_init(struct workqueue *wq, const char *name, |
148 | void (*callback_func)(struct work *, void *), void *callback_arg, | | 197 | void (*callback_func)(struct work *, void *), void *callback_arg, |
149 | pri_t prio, int ipl) | | 198 | pri_t prio, int ipl) |
150 | { | | 199 | { |
151 | | | 200 | |
152 | KASSERT(sizeof(wq->wq_name) > strlen(name)); | | 201 | KASSERT(sizeof(wq->wq_name) > strlen(name)); |
153 | strncpy(wq->wq_name, name, sizeof(wq->wq_name)); | | 202 | strncpy(wq->wq_name, name, sizeof(wq->wq_name)); |
154 | | | 203 | |
155 | wq->wq_prio = prio; | | 204 | wq->wq_prio = prio; |
156 | wq->wq_func = callback_func; | | 205 | wq->wq_func = callback_func; |
| @@ -158,27 +207,27 @@ workqueue_init(struct workqueue *wq, con | | | @@ -158,27 +207,27 @@ workqueue_init(struct workqueue *wq, con |
158 | } | | 207 | } |
159 | | | 208 | |
160 | static int | | 209 | static int |
161 | workqueue_initqueue(struct workqueue *wq, struct workqueue_queue *q, | | 210 | workqueue_initqueue(struct workqueue *wq, struct workqueue_queue *q, |
162 | int ipl, struct cpu_info *ci) | | 211 | int ipl, struct cpu_info *ci) |
163 | { | | 212 | { |
164 | int error, ktf; | | 213 | int error, ktf; |
165 | | | 214 | |
166 | KASSERT(q->q_worker == NULL); | | 215 | KASSERT(q->q_worker == NULL); |
167 | | | 216 | |
168 | mutex_init(&q->q_mutex, MUTEX_DEFAULT, ipl); | | 217 | mutex_init(&q->q_mutex, MUTEX_DEFAULT, ipl); |
169 | cv_init(&q->q_cv, wq->wq_name); | | 218 | cv_init(&q->q_cv, wq->wq_name); |
170 | SIMPLEQ_INIT(&q->q_queue_pending); | | 219 | SIMPLEQ_INIT(&q->q_queue_pending); |
171 | SIMPLEQ_INIT(&q->q_queue_running); | | 220 | q->q_gen = 0; |
172 | ktf = ((wq->wq_flags & WQ_MPSAFE) != 0 ? KTHREAD_MPSAFE : 0); | | 221 | ktf = ((wq->wq_flags & WQ_MPSAFE) != 0 ? KTHREAD_MPSAFE : 0); |
173 | if (wq->wq_prio < PRI_KERNEL) | | 222 | if (wq->wq_prio < PRI_KERNEL) |
174 | ktf |= KTHREAD_TS; | | 223 | ktf |= KTHREAD_TS; |
175 | if (ci) { | | 224 | if (ci) { |
176 | error = kthread_create(wq->wq_prio, ktf, ci, workqueue_worker, | | 225 | error = kthread_create(wq->wq_prio, ktf, ci, workqueue_worker, |
177 | wq, &q->q_worker, "%s/%u", wq->wq_name, ci->ci_index); | | 226 | wq, &q->q_worker, "%s/%u", wq->wq_name, ci->ci_index); |
178 | } else { | | 227 | } else { |
179 | error = kthread_create(wq->wq_prio, ktf, ci, workqueue_worker, | | 228 | error = kthread_create(wq->wq_prio, ktf, ci, workqueue_worker, |
180 | wq, &q->q_worker, "%s", wq->wq_name); | | 229 | wq, &q->q_worker, "%s", wq->wq_name); |
181 | } | | 230 | } |
182 | if (error != 0) { | | 231 | if (error != 0) { |
183 | mutex_destroy(&q->q_mutex); | | 232 | mutex_destroy(&q->q_mutex); |
184 | cv_destroy(&q->q_cv); | | 233 | cv_destroy(&q->q_cv); |
| @@ -196,44 +245,44 @@ static void | | | @@ -196,44 +245,44 @@ static void |
196 | workqueue_exit(struct work *wk, void *arg) | | 245 | workqueue_exit(struct work *wk, void *arg) |
197 | { | | 246 | { |
198 | struct workqueue_exitargs *wqe = (void *)wk; | | 247 | struct workqueue_exitargs *wqe = (void *)wk; |
199 | struct workqueue_queue *q = wqe->wqe_q; | | 248 | struct workqueue_queue *q = wqe->wqe_q; |
200 | | | 249 | |
201 | /* | | 250 | /* |
202 | * only competition at this point is workqueue_finiqueue. | | 251 | * only competition at this point is workqueue_finiqueue. |
203 | */ | | 252 | */ |
204 | | | 253 | |
205 | KASSERT(q->q_worker == curlwp); | | 254 | KASSERT(q->q_worker == curlwp); |
206 | KASSERT(SIMPLEQ_EMPTY(&q->q_queue_pending)); | | 255 | KASSERT(SIMPLEQ_EMPTY(&q->q_queue_pending)); |
207 | mutex_enter(&q->q_mutex); | | 256 | mutex_enter(&q->q_mutex); |
208 | q->q_worker = NULL; | | 257 | q->q_worker = NULL; |
209 | cv_signal(&q->q_cv); | | 258 | cv_broadcast(&q->q_cv); |
210 | mutex_exit(&q->q_mutex); | | 259 | mutex_exit(&q->q_mutex); |
211 | kthread_exit(0); | | 260 | kthread_exit(0); |
212 | } | | 261 | } |
213 | | | 262 | |
214 | static void | | 263 | static void |
215 | workqueue_finiqueue(struct workqueue *wq, struct workqueue_queue *q) | | 264 | workqueue_finiqueue(struct workqueue *wq, struct workqueue_queue *q) |
216 | { | | 265 | { |
217 | struct workqueue_exitargs wqe; | | 266 | struct workqueue_exitargs wqe; |
218 | | | 267 | |
219 | KASSERT(wq->wq_func == workqueue_exit); | | 268 | KASSERT(wq->wq_func == workqueue_exit); |
220 | | | 269 | |
221 | wqe.wqe_q = q; | | 270 | wqe.wqe_q = q; |
222 | KASSERT(SIMPLEQ_EMPTY(&q->q_queue_pending)); | | 271 | KASSERT(SIMPLEQ_EMPTY(&q->q_queue_pending)); |
223 | KASSERT(q->q_worker != NULL); | | 272 | KASSERT(q->q_worker != NULL); |
224 | mutex_enter(&q->q_mutex); | | 273 | mutex_enter(&q->q_mutex); |
225 | SIMPLEQ_INSERT_TAIL(&q->q_queue_pending, &wqe.wqe_wk, wk_entry); | | 274 | SIMPLEQ_INSERT_TAIL(&q->q_queue_pending, &wqe.wqe_wk, wk_entry); |
226 | cv_signal(&q->q_cv); | | 275 | cv_broadcast(&q->q_cv); |
227 | while (q->q_worker != NULL) { | | 276 | while (q->q_worker != NULL) { |
228 | cv_wait(&q->q_cv, &q->q_mutex); | | 277 | cv_wait(&q->q_cv, &q->q_mutex); |
229 | } | | 278 | } |
230 | mutex_exit(&q->q_mutex); | | 279 | mutex_exit(&q->q_mutex); |
231 | mutex_destroy(&q->q_mutex); | | 280 | mutex_destroy(&q->q_mutex); |
232 | cv_destroy(&q->q_cv); | | 281 | cv_destroy(&q->q_cv); |
233 | } | | 282 | } |
234 | | | 283 | |
235 | /* --- */ | | 284 | /* --- */ |
236 | | | 285 | |
237 | int | | 286 | int |
238 | workqueue_create(struct workqueue **wqp, const char *name, | | 287 | workqueue_create(struct workqueue **wqp, const char *name, |
239 | void (*callback_func)(struct work *, void *), void *callback_arg, | | 288 | void (*callback_func)(struct work *, void *), void *callback_arg, |
| @@ -271,121 +320,153 @@ workqueue_create(struct workqueue **wqp, | | | @@ -271,121 +320,153 @@ workqueue_create(struct workqueue **wqp, |
271 | error = workqueue_initqueue(wq, q, ipl, NULL); | | 320 | error = workqueue_initqueue(wq, q, ipl, NULL); |
272 | } | | 321 | } |
273 | | | 322 | |
274 | if (error != 0) { | | 323 | if (error != 0) { |
275 | workqueue_destroy(wq); | | 324 | workqueue_destroy(wq); |
276 | } else { | | 325 | } else { |
277 | *wqp = wq; | | 326 | *wqp = wq; |
278 | } | | 327 | } |
279 | | | 328 | |
280 | return error; | | 329 | return error; |
281 | } | | 330 | } |
282 | | | 331 | |
283 | static bool | | 332 | static bool |
284 | workqueue_q_wait(struct workqueue_queue *q, work_impl_t *wk_target) | | 333 | workqueue_q_wait(struct workqueue *wq, struct workqueue_queue *q, |
| | | 334 | work_impl_t *wk_target) |
285 | { | | 335 | { |
286 | work_impl_t *wk; | | 336 | work_impl_t *wk; |
287 | bool found = false; | | 337 | bool found = false; |
| | | 338 | uint64_t gen; |
288 | | | 339 | |
289 | mutex_enter(&q->q_mutex); | | 340 | mutex_enter(&q->q_mutex); |
290 | if (q->q_worker == curlwp) | | 341 | |
| | | 342 | /* |
| | | 343 | * Avoid a deadlock scenario. We can't guarantee that |
| | | 344 | * wk_target has completed at this point, but we can't wait for |
| | | 345 | * it either, so do nothing. |
| | | 346 | * |
| | | 347 | * XXX Are there use-cases that require this semantics? |
| | | 348 | */ |
| | | 349 | if (q->q_worker == curlwp) { |
| | | 350 | SDT_PROBE2(sdt, kernel, workqueue, wait__self, wq, wk_target); |
291 | goto out; | | 351 | goto out; |
| | | 352 | } |
| | | 353 | |
| | | 354 | /* |
| | | 355 | * Wait until the target is no longer pending. If we find it |
| | | 356 | * on this queue, the caller can stop looking in other queues. |
| | | 357 | * If we don't find it in this queue, however, we can't skip |
| | | 358 | * waiting -- it may be hidden in the running queue which we |
| | | 359 | * have no access to. |
| | | 360 | */ |
292 | again: | | 361 | again: |
293 | SIMPLEQ_FOREACH(wk, &q->q_queue_pending, wk_entry) { | | 362 | SIMPLEQ_FOREACH(wk, &q->q_queue_pending, wk_entry) { |
294 | if (wk == wk_target) | | 363 | if (wk == wk_target) { |
295 | goto found; | | 364 | SDT_PROBE2(sdt, kernel, workqueue, wait__hit, wq, wk); |
| | | 365 | found = true; |
| | | 366 | cv_wait(&q->q_cv, &q->q_mutex); |
| | | 367 | goto again; |
| | | 368 | } |
296 | } | | 369 | } |
297 | SIMPLEQ_FOREACH(wk, &q->q_queue_running, wk_entry) { | | 370 | |
298 | if (wk == wk_target) | | 371 | /* |
299 | goto found; | | 372 | * The target may be in the batch of work currently running, |
300 | } | | 373 | * but we can't touch that queue. So if there's anything |
301 | found: | | 374 | * running, wait until the generation changes. |
302 | if (wk != NULL) { | | 375 | */ |
303 | found = true; | | 376 | gen = q->q_gen; |
304 | KASSERT(q->q_waiter == NULL); | | 377 | if (gen & 1) { |
305 | q->q_waiter = wk; | | 378 | do |
306 | cv_wait(&q->q_cv, &q->q_mutex); | | 379 | cv_wait(&q->q_cv, &q->q_mutex); |
307 | goto again; | | 380 | while (gen == q->q_gen); |
308 | } | | 381 | } |
309 | if (q->q_waiter != NULL) | | 382 | |
310 | q->q_waiter = NULL; | | | |
311 | out: | | 383 | out: |
312 | mutex_exit(&q->q_mutex); | | 384 | mutex_exit(&q->q_mutex); |
313 | | | 385 | |
314 | return found; | | 386 | return found; |
315 | } | | 387 | } |
316 | | | 388 | |
317 | /* | | 389 | /* |
318 | * Wait for a specified work to finish. The caller must ensure that no new | | 390 | * Wait for a specified work to finish. The caller must ensure that no new |
319 | * work will be enqueued before calling workqueue_wait. Note that if the | | 391 | * work will be enqueued before calling workqueue_wait. Note that if the |
320 | * workqueue is WQ_PERCPU, the caller can enqueue a new work to another queue | | 392 | * workqueue is WQ_PERCPU, the caller can enqueue a new work to another queue |
321 | * other than the waiting queue. | | 393 | * other than the waiting queue. |
322 | */ | | 394 | */ |
323 | void | | 395 | void |
324 | workqueue_wait(struct workqueue *wq, struct work *wk) | | 396 | workqueue_wait(struct workqueue *wq, struct work *wk) |
325 | { | | 397 | { |
326 | struct workqueue_queue *q; | | 398 | struct workqueue_queue *q; |
327 | bool found; | | 399 | bool found; |
328 | | | 400 | |
| | | 401 | ASSERT_SLEEPABLE(); |
| | | 402 | |
| | | 403 | SDT_PROBE2(sdt, kernel, workqueue, wait__start, wq, wk); |
329 | if (ISSET(wq->wq_flags, WQ_PERCPU)) { | | 404 | if (ISSET(wq->wq_flags, WQ_PERCPU)) { |
330 | struct cpu_info *ci; | | 405 | struct cpu_info *ci; |
331 | CPU_INFO_ITERATOR cii; | | 406 | CPU_INFO_ITERATOR cii; |
332 | for (CPU_INFO_FOREACH(cii, ci)) { | | 407 | for (CPU_INFO_FOREACH(cii, ci)) { |
333 | q = workqueue_queue_lookup(wq, ci); | | 408 | q = workqueue_queue_lookup(wq, ci); |
334 | found = workqueue_q_wait(q, (work_impl_t *)wk); | | 409 | found = workqueue_q_wait(wq, q, (work_impl_t *)wk); |
335 | if (found) | | 410 | if (found) |
336 | break; | | 411 | break; |
337 | } | | 412 | } |
338 | } else { | | 413 | } else { |
339 | q = workqueue_queue_lookup(wq, NULL); | | 414 | q = workqueue_queue_lookup(wq, NULL); |
340 | (void) workqueue_q_wait(q, (work_impl_t *)wk); | | 415 | (void)workqueue_q_wait(wq, q, (work_impl_t *)wk); |
341 | } | | 416 | } |
| | | 417 | SDT_PROBE2(sdt, kernel, workqueue, wait__done, wq, wk); |
342 | } | | 418 | } |
343 | | | 419 | |
344 | void | | 420 | void |
345 | workqueue_destroy(struct workqueue *wq) | | 421 | workqueue_destroy(struct workqueue *wq) |
346 | { | | 422 | { |
347 | struct workqueue_queue *q; | | 423 | struct workqueue_queue *q; |
348 | struct cpu_info *ci; | | 424 | struct cpu_info *ci; |
349 | CPU_INFO_ITERATOR cii; | | 425 | CPU_INFO_ITERATOR cii; |
350 | | | 426 | |
| | | 427 | ASSERT_SLEEPABLE(); |
| | | 428 | |
| | | 429 | SDT_PROBE1(sdt, kernel, workqueue, exit__start, wq); |
351 | wq->wq_func = workqueue_exit; | | 430 | wq->wq_func = workqueue_exit; |
352 | for (CPU_INFO_FOREACH(cii, ci)) { | | 431 | for (CPU_INFO_FOREACH(cii, ci)) { |
353 | q = workqueue_queue_lookup(wq, ci); | | 432 | q = workqueue_queue_lookup(wq, ci); |
354 | if (q->q_worker != NULL) { | | 433 | if (q->q_worker != NULL) { |
355 | workqueue_finiqueue(wq, q); | | 434 | workqueue_finiqueue(wq, q); |
356 | } | | 435 | } |
357 | } | | 436 | } |
| | | 437 | SDT_PROBE1(sdt, kernel, workqueue, exit__done, wq); |
358 | kmem_free(wq->wq_ptr, workqueue_size(wq->wq_flags)); | | 438 | kmem_free(wq->wq_ptr, workqueue_size(wq->wq_flags)); |
359 | } | | 439 | } |
360 | | | 440 | |
361 | #ifdef DEBUG | | 441 | #ifdef DEBUG |
362 | static void | | 442 | static void |
363 | workqueue_check_duplication(struct workqueue_queue *q, work_impl_t *wk) | | 443 | workqueue_check_duplication(struct workqueue_queue *q, work_impl_t *wk) |
364 | { | | 444 | { |
365 | work_impl_t *_wk; | | 445 | work_impl_t *_wk; |
366 | | | 446 | |
367 | SIMPLEQ_FOREACH(_wk, &q->q_queue_pending, wk_entry) { | | 447 | SIMPLEQ_FOREACH(_wk, &q->q_queue_pending, wk_entry) { |
368 | if (_wk == wk) | | 448 | if (_wk == wk) |
369 | panic("%s: tried to enqueue a queued work", __func__); | | 449 | panic("%s: tried to enqueue a queued work", __func__); |
370 | } | | 450 | } |
371 | } | | 451 | } |
372 | #endif | | 452 | #endif |
373 | | | 453 | |
374 | void | | 454 | void |
375 | workqueue_enqueue(struct workqueue *wq, struct work *wk0, struct cpu_info *ci) | | 455 | workqueue_enqueue(struct workqueue *wq, struct work *wk0, struct cpu_info *ci) |
376 | { | | 456 | { |
377 | struct workqueue_queue *q; | | 457 | struct workqueue_queue *q; |
378 | work_impl_t *wk = (void *)wk0; | | 458 | work_impl_t *wk = (void *)wk0; |
379 | | | 459 | |
| | | 460 | SDT_PROBE3(sdt, kernel, workqueue, enqueue, wq, wk0, ci); |
| | | 461 | |
380 | KASSERT(wq->wq_flags & WQ_PERCPU || ci == NULL); | | 462 | KASSERT(wq->wq_flags & WQ_PERCPU || ci == NULL); |
381 | q = workqueue_queue_lookup(wq, ci); | | 463 | q = workqueue_queue_lookup(wq, ci); |
382 | | | 464 | |
383 | mutex_enter(&q->q_mutex); | | 465 | mutex_enter(&q->q_mutex); |
384 | KASSERT(q->q_waiter == NULL); | | | |
385 | #ifdef DEBUG | | 466 | #ifdef DEBUG |
386 | workqueue_check_duplication(q, wk); | | 467 | workqueue_check_duplication(q, wk); |
387 | #endif | | 468 | #endif |
388 | SIMPLEQ_INSERT_TAIL(&q->q_queue_pending, wk, wk_entry); | | 469 | SIMPLEQ_INSERT_TAIL(&q->q_queue_pending, wk, wk_entry); |
389 | cv_signal(&q->q_cv); | | 470 | cv_broadcast(&q->q_cv); |
390 | mutex_exit(&q->q_mutex); | | 471 | mutex_exit(&q->q_mutex); |
391 | } | | 472 | } |