| @@ -1,14 +1,14 @@ | | | @@ -1,14 +1,14 @@ |
1 | /* $NetBSD: workqueue.h,v 1.1.2.9 2013/09/08 15:58:24 riastradh Exp $ */ | | 1 | /* $NetBSD: workqueue.h,v 1.1.2.10 2013/09/08 16:40:36 riastradh Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 2013 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 2013 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Taylor R. Campbell. | | 8 | * by Taylor R. Campbell. |
9 | * | | 9 | * |
10 | * Redistribution and use in source and binary forms, with or without | | 10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions | | 11 | * modification, are permitted provided that the following conditions |
12 | * are met: | | 12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright | | 13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. | | 14 | * notice, this list of conditions and the following disclaimer. |
| @@ -23,101 +23,240 @@ | | | @@ -23,101 +23,240 @@ |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. | | 29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ | | 30 | */ |
31 | | | 31 | |
32 | #ifndef _LINUX_WORKQUEUE_H_ | | 32 | #ifndef _LINUX_WORKQUEUE_H_ |
33 | #define _LINUX_WORKQUEUE_H_ | | 33 | #define _LINUX_WORKQUEUE_H_ |
34 | | | 34 | |
35 | #include <sys/callout.h> | | 35 | #include <sys/callout.h> |
| | | 36 | #include <sys/condvar.h> |
| | | 37 | #include <sys/mutex.h> |
| | | 38 | #include <sys/workqueue.h> |
36 | | | 39 | |
37 | #include <asm/bug.h> | | 40 | #include <asm/bug.h> |
38 | #include <linux/kernel.h> | | 41 | #include <linux/kernel.h> |
39 | | | 42 | |
40 | /* | | 43 | /* |
41 | * XXX This implementation is a load of bollocks -- callouts are | | 44 | * XXX This implementation is a load of bollocks -- callouts and |
42 | * expedient, but wrong, if for no reason other than that we never call | | 45 | * workqueues are expedient, but wrong, if for no reason other than |
43 | * callout_destroy. | | 46 | * that there is no destroy operation. |
| | | 47 | * |
| | | 48 | * XXX The amount of code in here is absurd; it should be given a |
| | | 49 | * proper source file. |
44 | */ | | 50 | */ |
45 | | | 51 | |
46 | struct work_struct { | | 52 | struct work_struct { |
47 | struct callout ws_callout; | | 53 | void (*w_fn)(struct work_struct *); |
| | | 54 | struct workqueue *w_wq; |
| | | 55 | struct work w_wk; |
| | | 56 | kmutex_t w_lock; |
| | | 57 | kcondvar_t w_cv; |
| | | 58 | enum { |
| | | 59 | WORK_IDLE, |
| | | 60 | WORK_QUEUED, |
| | | 61 | WORK_CANCELLED, |
| | | 62 | WORK_INFLIGHT, |
| | | 63 | WORK_REQUEUED, |
| | | 64 | } w_state; |
48 | }; | | 65 | }; |
49 | | | 66 | |
50 | struct delayed_work { | | 67 | static void __unused |
51 | struct work_struct work; | | 68 | linux_work_fn(struct work *wk __unused, void *arg) |
52 | }; | | 69 | { |
| | | 70 | struct work_struct *const work = arg; |
| | | 71 | |
| | | 72 | mutex_spin_enter(&work->w_lock); |
| | | 73 | switch (work->w_state) { |
| | | 74 | case WORK_IDLE: |
| | | 75 | panic("work ran while idle: %p", work); |
| | | 76 | break; |
| | | 77 | |
| | | 78 | case WORK_QUEUED: |
| | | 79 | work->w_state = WORK_INFLIGHT; |
| | | 80 | mutex_spin_exit(&work->w_lock); |
| | | 81 | (*work->w_fn)(work); |
| | | 82 | mutex_spin_enter(&work->w_lock); |
| | | 83 | switch (work->w_state) { |
| | | 84 | case WORK_IDLE: |
| | | 85 | case WORK_QUEUED: |
| | | 86 | panic("work hosed while in flight: %p", work); |
| | | 87 | break; |
| | | 88 | |
| | | 89 | case WORK_INFLIGHT: |
| | | 90 | case WORK_CANCELLED: |
| | | 91 | work->w_state = WORK_IDLE; |
| | | 92 | cv_broadcast(&work->w_cv); |
| | | 93 | break; |
| | | 94 | |
| | | 95 | case WORK_REQUEUED: |
| | | 96 | workqueue_enqueue(work->w_wq, &work->w_wk, NULL); |
| | | 97 | work->w_state = WORK_QUEUED; |
| | | 98 | break; |
| | | 99 | |
| | | 100 | default: |
| | | 101 | panic("work %p in bad state: %d", work, |
| | | 102 | (int)work->w_state); |
| | | 103 | break; |
| | | 104 | } |
| | | 105 | break; |
| | | 106 | |
| | | 107 | case WORK_CANCELLED: |
| | | 108 | work->w_state = WORK_IDLE; |
| | | 109 | cv_broadcast(&work->w_cv); |
| | | 110 | break; |
| | | 111 | |
| | | 112 | case WORK_INFLIGHT: |
| | | 113 | panic("work already in flight: %p", work); |
| | | 114 | break; |
| | | 115 | |
| | | 116 | case WORK_REQUEUED: |
| | | 117 | panic("work requeued while not in flight: %p", work); |
| | | 118 | break; |
| | | 119 | |
| | | 120 | default: |
| | | 121 | panic("work %p in bad state: %d", work, (int)work->w_state); |
| | | 122 | break; |
| | | 123 | } |
| | | 124 | mutex_spin_exit(&work->w_lock); |
| | | 125 | } |
53 | | | 126 | |
54 | static inline void | | 127 | static inline void |
55 | INIT_WORK(struct work_struct *work, void (*fn)(struct work_struct *)) | | 128 | INIT_WORK(struct work_struct *work, void (*fn)(struct work_struct *)) |
56 | { | | 129 | { |
| | | 130 | int error; |
57 | | | 131 | |
58 | callout_init(&work->ws_callout, 0); | | 132 | work->w_fn = fn; |
59 | | | 133 | error = workqueue_create(&work->w_wq, "lnxworkq", &linux_work_fn, |
60 | /* XXX This cast business is sketchy. */ | | 134 | work, PRI_NONE, IPL_VM, WQ_MPSAFE); |
61 | callout_setfunc(&work->ws_callout, (void (*)(void *))fn, work); | | 135 | if (error) |
| | | 136 | panic("workqueue creation failed: %d", error); /* XXX */ |
| | | 137 | |
| | | 138 | mutex_init(&work->w_lock, MUTEX_DEFAULT, IPL_VM); |
| | | 139 | cv_init(&work->w_cv, "linxwork"); |
| | | 140 | work->w_state = WORK_IDLE; |
62 | } | | 141 | } |
63 | | | 142 | |
64 | static inline void | | 143 | static inline void |
65 | INIT_DELAYED_WORK(struct delayed_work *dw, void (*fn)(struct work_struct *)) | | 144 | schedule_work(struct work_struct *work) |
66 | { | | 145 | { |
67 | INIT_WORK(&dw->work, fn); | | 146 | |
| | | 147 | mutex_spin_enter(&work->w_lock); |
| | | 148 | switch (work->w_state) { |
| | | 149 | case WORK_IDLE: |
| | | 150 | workqueue_enqueue(work->w_wq, &work->w_wk, NULL); |
| | | 151 | work->w_state = WORK_QUEUED; |
| | | 152 | break; |
| | | 153 | |
| | | 154 | case WORK_CANCELLED: |
| | | 155 | break; |
| | | 156 | |
| | | 157 | case WORK_INFLIGHT: |
| | | 158 | work->w_state = WORK_REQUEUED; |
| | | 159 | break; |
| | | 160 | |
| | | 161 | case WORK_QUEUED: |
| | | 162 | case WORK_REQUEUED: |
| | | 163 | break; |
| | | 164 | |
| | | 165 | default: |
| | | 166 | panic("work %p in bad state: %d", work, (int)work->w_state); |
| | | 167 | break; |
| | | 168 | } |
| | | 169 | mutex_spin_exit(&work->w_lock); |
68 | } | | 170 | } |
69 | | | 171 | |
70 | static inline struct delayed_work * | | 172 | /* |
71 | to_delayed_work(struct work_struct *work) | | 173 | * XXX This API can't possibly be right because there is no interlock. |
| | | 174 | */ |
| | | 175 | static inline bool |
| | | 176 | cancel_work_sync(struct work_struct *work) |
72 | { | | 177 | { |
73 | return container_of(work, struct delayed_work, work); | | 178 | bool was_pending = false; |
| | | 179 | |
| | | 180 | mutex_spin_enter(&work->w_lock); |
| | | 181 | retry: switch (work->w_state) { |
| | | 182 | case WORK_IDLE: |
| | | 183 | break; |
| | | 184 | |
| | | 185 | case WORK_QUEUED: |
| | | 186 | case WORK_INFLIGHT: |
| | | 187 | case WORK_REQUEUED: |
| | | 188 | work->w_state = WORK_CANCELLED; |
| | | 189 | /* FALLTHROUGH */ |
| | | 190 | case WORK_CANCELLED: |
| | | 191 | cv_wait(&work->w_cv, &work->w_lock); |
| | | 192 | was_pending = true; |
| | | 193 | goto retry; |
| | | 194 | |
| | | 195 | default: |
| | | 196 | panic("work %p in bad state: %d", work, (int)work->w_state); |
| | | 197 | } |
| | | 198 | mutex_spin_exit(&work->w_lock); |
| | | 199 | |
| | | 200 | return was_pending; |
74 | } | | 201 | } |
75 | | | 202 | |
76 | static inline void | | 203 | struct delayed_work { |
77 | schedule_work(struct work_struct *work) | | 204 | struct callout dw_callout; |
| | | 205 | struct work_struct work; /* not dw_work; name must match Linux */ |
| | | 206 | }; |
| | | 207 | |
| | | 208 | static void __unused |
| | | 209 | linux_delayed_work_fn(void *arg) |
78 | { | | 210 | { |
79 | callout_schedule(&work->ws_callout, 0); | | 211 | struct delayed_work *const dw = arg; |
| | | 212 | |
| | | 213 | schedule_work(&dw->work); |
80 | } | | 214 | } |
81 | | | 215 | |
82 | static inline void | | 216 | static inline void |
83 | schedule_delayed_work(struct delayed_work *dw, unsigned long ticks) | | 217 | INIT_DELAYED_WORK(struct delayed_work *dw, void (*fn)(struct work_struct *)) |
84 | { | | 218 | { |
85 | KASSERT(ticks < INT_MAX); | | 219 | callout_init(&dw->dw_callout, CALLOUT_MPSAFE); |
86 | callout_schedule(&dw->work.ws_callout, (int)ticks); | | 220 | callout_setfunc(&dw->dw_callout, linux_delayed_work_fn, dw); |
| | | 221 | INIT_WORK(&dw->work, fn); |
87 | } | | 222 | } |
88 | | | 223 | |
89 | static inline bool | | 224 | static inline struct delayed_work * |
90 | cancel_work(struct work_struct *work) | | 225 | to_delayed_work(struct work_struct *work) |
91 | { | | 226 | { |
92 | return !callout_stop(&work->ws_callout); | | 227 | return container_of(work, struct delayed_work, work); |
93 | } | | 228 | } |
94 | | | 229 | |
95 | static inline bool | | 230 | static inline void |
96 | cancel_work_sync(struct work_struct *work) | | 231 | schedule_delayed_work(struct delayed_work *dw, unsigned long ticks) |
97 | { | | 232 | { |
98 | return !callout_halt(&work->ws_callout, NULL); | | 233 | KASSERT(ticks < INT_MAX); |
| | | 234 | callout_schedule(&dw->dw_callout, (int)ticks); |
99 | } | | 235 | } |
100 | | | 236 | |
101 | static inline bool | | 237 | static inline bool |
102 | cancel_delayed_work(struct delayed_work *dw) | | 238 | cancel_delayed_work(struct delayed_work *dw) |
103 | { | | 239 | { |
104 | return cancel_work(&dw->work); | | 240 | return !callout_stop(&dw->dw_callout); |
105 | } | | 241 | } |
106 | | | 242 | |
107 | static inline bool | | 243 | static inline bool |
108 | cancel_delayed_work_sync(struct delayed_work *dw) | | 244 | cancel_delayed_work_sync(struct delayed_work *dw) |
109 | { | | 245 | { |
110 | return cancel_work_sync(&dw->work); | | 246 | const bool callout_was_pending = !callout_stop(&dw->dw_callout); |
| | | 247 | const bool work_was_pending = cancel_work_sync(&dw->work); |
| | | 248 | |
| | | 249 | return (callout_was_pending || work_was_pending); |
111 | } | | 250 | } |
112 | | | 251 | |
113 | /* | | 252 | /* |
114 | * XXX Bogus stubs for Linux work queues. | | 253 | * XXX Bogus stubs for Linux work queues. |
115 | */ | | 254 | */ |
116 | | | 255 | |
117 | struct workqueue_struct; | | 256 | struct workqueue_struct; |
118 | | | 257 | |
119 | static inline struct workqueue_struct * | | 258 | static inline struct workqueue_struct * |
120 | alloc_ordered_workqueue(const char *name __unused, int flags __unused) | | 259 | alloc_ordered_workqueue(const char *name __unused, int flags __unused) |
121 | { | | 260 | { |
122 | return (void *)(uintptr_t)0xdeadbeef; | | 261 | return (void *)(uintptr_t)0xdeadbeef; |
123 | } | | 262 | } |