Sun Sep 8 16:40:36 2013 UTC ()
Rework Linux `work' to use NetBSD workqueues, not callouts.

Callers expect to be able to allocate in the workers, which callouts
don't allow.

Delayed work uses callouts only to delay enqueueing work.

Linux `workqueues' are still stubs.


(riastradh)
diff -r1.1.2.9 -r1.1.2.10 src/sys/external/bsd/drm2/include/linux/workqueue.h

cvs diff -r1.1.2.9 -r1.1.2.10 src/sys/external/bsd/drm2/include/linux/Attic/workqueue.h (expand / switch to unified diff)

--- src/sys/external/bsd/drm2/include/linux/Attic/workqueue.h 2013/09/08 15:58:24 1.1.2.9
+++ src/sys/external/bsd/drm2/include/linux/Attic/workqueue.h 2013/09/08 16:40:36 1.1.2.10
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: workqueue.h,v 1.1.2.9 2013/09/08 15:58:24 riastradh Exp $ */ 1/* $NetBSD: workqueue.h,v 1.1.2.10 2013/09/08 16:40:36 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc. 4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell. 8 * by Taylor R. Campbell.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -23,101 +23,240 @@ @@ -23,101 +23,240 @@
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#ifndef _LINUX_WORKQUEUE_H_ 32#ifndef _LINUX_WORKQUEUE_H_
33#define _LINUX_WORKQUEUE_H_ 33#define _LINUX_WORKQUEUE_H_
34 34
35#include <sys/callout.h> 35#include <sys/callout.h>
 36#include <sys/condvar.h>
 37#include <sys/mutex.h>
 38#include <sys/workqueue.h>
36 39
37#include <asm/bug.h> 40#include <asm/bug.h>
38#include <linux/kernel.h> 41#include <linux/kernel.h>
39 42
40/* 43/*
41 * XXX This implementation is a load of bollocks -- callouts are 44 * XXX This implementation is a load of bollocks -- callouts and
42 * expedient, but wrong, if for no reason other than that we never call 45 * workqueues are expedient, but wrong, if for no reason other than
43 * callout_destroy. 46 * that there is no destroy operation.
 47 *
 48 * XXX The amount of code in here is absurd; it should be given a
 49 * proper source file.
44 */ 50 */
45 51
46struct work_struct { 52struct work_struct {
47 struct callout ws_callout; 53 void (*w_fn)(struct work_struct *);
 54 struct workqueue *w_wq;
 55 struct work w_wk;
 56 kmutex_t w_lock;
 57 kcondvar_t w_cv;
 58 enum {
 59 WORK_IDLE,
 60 WORK_QUEUED,
 61 WORK_CANCELLED,
 62 WORK_INFLIGHT,
 63 WORK_REQUEUED,
 64 } w_state;
48}; 65};
49 66
50struct delayed_work { 67static void __unused
51 struct work_struct work; 68linux_work_fn(struct work *wk __unused, void *arg)
52}; 69{
 70 struct work_struct *const work = arg;
 71
 72 mutex_spin_enter(&work->w_lock);
 73 switch (work->w_state) {
 74 case WORK_IDLE:
 75 panic("work ran while idle: %p", work);
 76 break;
 77
 78 case WORK_QUEUED:
 79 work->w_state = WORK_INFLIGHT;
 80 mutex_spin_exit(&work->w_lock);
 81 (*work->w_fn)(work);
 82 mutex_spin_enter(&work->w_lock);
 83 switch (work->w_state) {
 84 case WORK_IDLE:
 85 case WORK_QUEUED:
 86 panic("work hosed while in flight: %p", work);
 87 break;
 88
 89 case WORK_INFLIGHT:
 90 case WORK_CANCELLED:
 91 work->w_state = WORK_IDLE;
 92 cv_broadcast(&work->w_cv);
 93 break;
 94
 95 case WORK_REQUEUED:
 96 workqueue_enqueue(work->w_wq, &work->w_wk, NULL);
 97 work->w_state = WORK_QUEUED;
 98 break;
 99
 100 default:
 101 panic("work %p in bad state: %d", work,
 102 (int)work->w_state);
 103 break;
 104 }
 105 break;
 106
 107 case WORK_CANCELLED:
 108 work->w_state = WORK_IDLE;
 109 cv_broadcast(&work->w_cv);
 110 break;
 111
 112 case WORK_INFLIGHT:
 113 panic("work already in flight: %p", work);
 114 break;
 115
 116 case WORK_REQUEUED:
 117 panic("work requeued while not in flight: %p", work);
 118 break;
 119
 120 default:
 121 panic("work %p in bad state: %d", work, (int)work->w_state);
 122 break;
 123 }
 124 mutex_spin_exit(&work->w_lock);
 125}
53 126
54static inline void 127static inline void
55INIT_WORK(struct work_struct *work, void (*fn)(struct work_struct *)) 128INIT_WORK(struct work_struct *work, void (*fn)(struct work_struct *))
56{ 129{
 130 int error;
57 131
58 callout_init(&work->ws_callout, 0); 132 work->w_fn = fn;
59 133 error = workqueue_create(&work->w_wq, "lnxworkq", &linux_work_fn,
60 /* XXX This cast business is sketchy. */ 134 work, PRI_NONE, IPL_VM, WQ_MPSAFE);
61 callout_setfunc(&work->ws_callout, (void (*)(void *))fn, work); 135 if (error)
 136 panic("workqueue creation failed: %d", error); /* XXX */
 137
 138 mutex_init(&work->w_lock, MUTEX_DEFAULT, IPL_VM);
 139 cv_init(&work->w_cv, "linxwork");
 140 work->w_state = WORK_IDLE;
62} 141}
63 142
64static inline void 143static inline void
65INIT_DELAYED_WORK(struct delayed_work *dw, void (*fn)(struct work_struct *)) 144schedule_work(struct work_struct *work)
66{ 145{
67 INIT_WORK(&dw->work, fn); 146
 147 mutex_spin_enter(&work->w_lock);
 148 switch (work->w_state) {
 149 case WORK_IDLE:
 150 workqueue_enqueue(work->w_wq, &work->w_wk, NULL);
 151 work->w_state = WORK_QUEUED;
 152 break;
 153
 154 case WORK_CANCELLED:
 155 break;
 156
 157 case WORK_INFLIGHT:
 158 work->w_state = WORK_REQUEUED;
 159 break;
 160
 161 case WORK_QUEUED:
 162 case WORK_REQUEUED:
 163 break;
 164
 165 default:
 166 panic("work %p in bad state: %d", work, (int)work->w_state);
 167 break;
 168 }
 169 mutex_spin_exit(&work->w_lock);
68} 170}
69 171
70static inline struct delayed_work * 172/*
71to_delayed_work(struct work_struct *work) 173 * XXX This API can't possibly be right because there is no interlock.
 174 */
 175static inline bool
 176cancel_work_sync(struct work_struct *work)
72{ 177{
73 return container_of(work, struct delayed_work, work); 178 bool was_pending = false;
 179
 180 mutex_spin_enter(&work->w_lock);
 181retry: switch (work->w_state) {
 182 case WORK_IDLE:
 183 break;
 184
 185 case WORK_QUEUED:
 186 case WORK_INFLIGHT:
 187 case WORK_REQUEUED:
 188 work->w_state = WORK_CANCELLED;
 189 /* FALLTHROUGH */
 190 case WORK_CANCELLED:
 191 cv_wait(&work->w_cv, &work->w_lock);
 192 was_pending = true;
 193 goto retry;
 194
 195 default:
 196 panic("work %p in bad state: %d", work, (int)work->w_state);
 197 }
 198 mutex_spin_exit(&work->w_lock);
 199
 200 return was_pending;
74} 201}
75 202
76static inline void 203struct delayed_work {
77schedule_work(struct work_struct *work) 204 struct callout dw_callout;
 205 struct work_struct work; /* not dw_work; name must match Linux */
 206};
 207
 208static void __unused
 209linux_delayed_work_fn(void *arg)
78{ 210{
79 callout_schedule(&work->ws_callout, 0); 211 struct delayed_work *const dw = arg;
 212
 213 schedule_work(&dw->work);
80} 214}
81 215
82static inline void 216static inline void
83schedule_delayed_work(struct delayed_work *dw, unsigned long ticks) 217INIT_DELAYED_WORK(struct delayed_work *dw, void (*fn)(struct work_struct *))
84{ 218{
85 KASSERT(ticks < INT_MAX); 219 callout_init(&dw->dw_callout, CALLOUT_MPSAFE);
86 callout_schedule(&dw->work.ws_callout, (int)ticks); 220 callout_setfunc(&dw->dw_callout, linux_delayed_work_fn, dw);
 221 INIT_WORK(&dw->work, fn);
87} 222}
88 223
89static inline bool 224static inline struct delayed_work *
90cancel_work(struct work_struct *work) 225to_delayed_work(struct work_struct *work)
91{ 226{
92 return !callout_stop(&work->ws_callout); 227 return container_of(work, struct delayed_work, work);
93} 228}
94 229
95static inline bool 230static inline void
96cancel_work_sync(struct work_struct *work) 231schedule_delayed_work(struct delayed_work *dw, unsigned long ticks)
97{ 232{
98 return !callout_halt(&work->ws_callout, NULL); 233 KASSERT(ticks < INT_MAX);
 234 callout_schedule(&dw->dw_callout, (int)ticks);
99} 235}
100 236
101static inline bool 237static inline bool
102cancel_delayed_work(struct delayed_work *dw) 238cancel_delayed_work(struct delayed_work *dw)
103{ 239{
104 return cancel_work(&dw->work); 240 return !callout_stop(&dw->dw_callout);
105} 241}
106 242
107static inline bool 243static inline bool
108cancel_delayed_work_sync(struct delayed_work *dw) 244cancel_delayed_work_sync(struct delayed_work *dw)
109{ 245{
110 return cancel_work_sync(&dw->work); 246 const bool callout_was_pending = !callout_stop(&dw->dw_callout);
 247 const bool work_was_pending = cancel_work_sync(&dw->work);
 248
 249 return (callout_was_pending || work_was_pending);
111} 250}
112 251
113/* 252/*
114 * XXX Bogus stubs for Linux work queues. 253 * XXX Bogus stubs for Linux work queues.
115 */ 254 */
116 255
117struct workqueue_struct; 256struct workqueue_struct;
118 257
119static inline struct workqueue_struct * 258static inline struct workqueue_struct *
120alloc_ordered_workqueue(const char *name __unused, int flags __unused) 259alloc_ordered_workqueue(const char *name __unused, int flags __unused)
121{ 260{
122 return (void *)(uintptr_t)0xdeadbeef; 261 return (void *)(uintptr_t)0xdeadbeef;
123} 262}