Wed Jan 13 02:20:15 2021 UTC ()
threadpool(9): Tidy up thread naming.

- `dispatcher', not `overseer' -- much more appropriate metaphor.
- Just omit `/-1' from unbound thread names.
- Just omit `@-1' from dynamic-priority (PRI_NONE) thread names.


(riastradh)
diff -r1.20 -r1.21 src/sys/kern/kern_threadpool.c

cvs diff -r1.20 -r1.21 src/sys/kern/kern_threadpool.c (expand / switch to unified diff)

--- src/sys/kern/kern_threadpool.c 2021/01/13 02:19:08 1.20
+++ src/sys/kern/kern_threadpool.c 2021/01/13 02:20:15 1.21
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_threadpool.c,v 1.20 2021/01/13 02:19:08 riastradh Exp $ */ 1/* $NetBSD: kern_threadpool.c,v 1.21 2021/01/13 02:20:15 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2014, 2018 The NetBSD Foundation, Inc. 4 * Copyright (c) 2014, 2018 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell and Jason R. Thorpe. 8 * by Taylor R. Campbell and Jason R. Thorpe.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -23,75 +23,75 @@ @@ -23,75 +23,75 @@
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Thread pools. 33 * Thread pools.
34 * 34 *
35 * A thread pool is a collection of worker threads idle or running 35 * A thread pool is a collection of worker threads idle or running
36 * jobs, together with an overseer thread that does not run jobs but 36 * jobs, together with an dispatcher thread that does not run jobs but
37 * can be given jobs to assign to a worker thread. Scheduling a job in 37 * can be given jobs to assign to a worker thread. Scheduling a job in
38 * a thread pool does not allocate or even sleep at all, except perhaps 38 * a thread pool does not allocate or even sleep at all, except perhaps
39 * on an adaptive lock, unlike kthread_create. Jobs reuse threads, so 39 * on an adaptive lock, unlike kthread_create. Jobs reuse threads, so
40 * they do not incur the expense of creating and destroying kthreads 40 * they do not incur the expense of creating and destroying kthreads
41 * unless there is not much work to be done. 41 * unless there is not much work to be done.
42 * 42 *
43 * A per-CPU thread pool (threadpool_percpu) is a collection of thread 43 * A per-CPU thread pool (threadpool_percpu) is a collection of thread
44 * pools, one per CPU bound to that CPU. For each priority level in 44 * pools, one per CPU bound to that CPU. For each priority level in
45 * use, there is one shared unbound thread pool (i.e., pool of threads 45 * use, there is one shared unbound thread pool (i.e., pool of threads
46 * not bound to any CPU) and one shared per-CPU thread pool. 46 * not bound to any CPU) and one shared per-CPU thread pool.
47 * 47 *
48 * To use the unbound thread pool at priority pri, call 48 * To use the unbound thread pool at priority pri, call
49 * threadpool_get(&pool, pri). When you're done, call 49 * threadpool_get(&pool, pri). When you're done, call
50 * threadpool_put(pool, pri). 50 * threadpool_put(pool, pri).
51 * 51 *
52 * To use the per-CPU thread pools at priority pri, call 52 * To use the per-CPU thread pools at priority pri, call
53 * threadpool_percpu_get(&pool_percpu, pri), and then use the thread 53 * threadpool_percpu_get(&pool_percpu, pri), and then use the thread
54 * pool returned by threadpool_percpu_ref(pool_percpu) for the current 54 * pool returned by threadpool_percpu_ref(pool_percpu) for the current
55 * CPU, or by threadpool_percpu_ref_remote(pool_percpu, ci) for another 55 * CPU, or by threadpool_percpu_ref_remote(pool_percpu, ci) for another
56 * CPU. When you're done, call threadpool_percpu_put(pool_percpu, 56 * CPU. When you're done, call threadpool_percpu_put(pool_percpu,
57 * pri). 57 * pri).
58 * 58 *
59 * +--MACHINE-----------------------------------------------+ 59 * +--MACHINE-----------------------------------------------------+
60 * | +--CPU 0-------+ +--CPU 1-------+ +--CPU n-------+ | 60 * | +--CPU 0---------+ +--CPU 1---------+ +--CPU n---------+ |
61 * | | <overseer 0> | | <overseer 1> | ... | <overseer n> | | 61 * | | <dispatcher 0> | | <dispatcher 1> | ... | <dispatcher n> | |
62 * | | <idle 0a> | | <running 1a> | ... | <idle na> | | 62 * | | <idle 0a> | | <running 1a> | ... | <idle na> | |
63 * | | <running 0b> | | <running 1b> | ... | <idle nb> | | 63 * | | <running 0b> | | <running 1b> | ... | <idle nb> | |
64 * | | . | | . | ... | . | | 64 * | | . | | . | ... | . | |
65 * | | . | | . | ... | . | | 65 * | | . | | . | ... | . | |
66 * | | . | | . | ... | . | | 66 * | | . | | . | ... | . | |
67 * | +--------------+ +--------------+ +--------------+ | 67 * | +----------------+ +----------------+ +----------------+ |
68 * | +--unbound---------+ | 68 * | +--unbound-----------+ |
69 * | | <overseer n+1> | | 69 * | | <dispatcher n+1> | |
70 * | | <idle (n+1)a> | | 70 * | | <idle (n+1)a> | |
71 * | | <running (n+1)b> | | 71 * | | <running (n+1)b> | |
72 * | +------------------+ | 72 * | +--------------------+ |
73 * +--------------------------------------------------------+ 73 * +--------------------------------------------------------------+
74 * 74 *
75 * XXX Why one overseer per CPU? I did that originally to avoid 75 * XXX Why one dispatcher per CPU? I did that originally to avoid
76 * touching remote CPUs' memory when scheduling a job, but that still 76 * touching remote CPUs' memory when scheduling a job, but that still
77 * requires interprocessor synchronization. Perhaps we could get by 77 * requires interprocessor synchronization. Perhaps we could get by
78 * with a single overseer thread, at the expense of another pointer in 78 * with a single dispatcher thread, at the expense of another pointer
79 * struct threadpool_job to identify the CPU on which it must run 79 * in struct threadpool_job to identify the CPU on which it must run in
80 * in order for the overseer to schedule it correctly. 80 * order for the dispatcher to schedule it correctly.
81 */ 81 */
82 82
83#include <sys/cdefs.h> 83#include <sys/cdefs.h>
84__KERNEL_RCSID(0, "$NetBSD: kern_threadpool.c,v 1.20 2021/01/13 02:19:08 riastradh Exp $"); 84__KERNEL_RCSID(0, "$NetBSD: kern_threadpool.c,v 1.21 2021/01/13 02:20:15 riastradh Exp $");
85 85
86#include <sys/types.h> 86#include <sys/types.h>
87#include <sys/param.h> 87#include <sys/param.h>
88#include <sys/atomic.h> 88#include <sys/atomic.h>
89#include <sys/condvar.h> 89#include <sys/condvar.h>
90#include <sys/cpu.h> 90#include <sys/cpu.h>
91#include <sys/kernel.h> 91#include <sys/kernel.h>
92#include <sys/kmem.h> 92#include <sys/kmem.h>
93#include <sys/kthread.h> 93#include <sys/kthread.h>
94#include <sys/mutex.h> 94#include <sys/mutex.h>
95#include <sys/once.h> 95#include <sys/once.h>
96#include <sys/percpu.h> 96#include <sys/percpu.h>
97#include <sys/pool.h> 97#include <sys/pool.h>
@@ -131,47 +131,47 @@ SDT_PROBE_DEFINE2(sdt, kernel, threadpoo @@ -131,47 +131,47 @@ SDT_PROBE_DEFINE2(sdt, kernel, threadpoo
131SDT_PROBE_DEFINE3(sdt, kernel, threadpool, create__success, 131SDT_PROBE_DEFINE3(sdt, kernel, threadpool, create__success,
132 "struct cpu_info *"/*ci*/, "pri_t"/*pri*/, "struct threadpool *"/*pool*/); 132 "struct cpu_info *"/*ci*/, "pri_t"/*pri*/, "struct threadpool *"/*pool*/);
133SDT_PROBE_DEFINE3(sdt, kernel, threadpool, create__failure, 133SDT_PROBE_DEFINE3(sdt, kernel, threadpool, create__failure,
134 "struct cpu_info *"/*ci*/, "pri_t"/*pri*/, "int"/*error*/); 134 "struct cpu_info *"/*ci*/, "pri_t"/*pri*/, "int"/*error*/);
135SDT_PROBE_DEFINE1(sdt, kernel, threadpool, destroy, 135SDT_PROBE_DEFINE1(sdt, kernel, threadpool, destroy,
136 "struct threadpool *"/*pool*/); 136 "struct threadpool *"/*pool*/);
137SDT_PROBE_DEFINE2(sdt, kernel, threadpool, destroy__wait, 137SDT_PROBE_DEFINE2(sdt, kernel, threadpool, destroy__wait,
138 "struct threadpool *"/*pool*/, "uint64_t"/*refcnt*/); 138 "struct threadpool *"/*pool*/, "uint64_t"/*refcnt*/);
139 139
140SDT_PROBE_DEFINE2(sdt, kernel, threadpool, schedule__job, 140SDT_PROBE_DEFINE2(sdt, kernel, threadpool, schedule__job,
141 "struct threadpool *"/*pool*/, "struct threadpool_job *"/*job*/); 141 "struct threadpool *"/*pool*/, "struct threadpool_job *"/*job*/);
142SDT_PROBE_DEFINE2(sdt, kernel, threadpool, schedule__job__running, 142SDT_PROBE_DEFINE2(sdt, kernel, threadpool, schedule__job__running,
143 "struct threadpool *"/*pool*/, "struct threadpool_job *"/*job*/); 143 "struct threadpool *"/*pool*/, "struct threadpool_job *"/*job*/);
144SDT_PROBE_DEFINE2(sdt, kernel, threadpool, schedule__job__overseer, 144SDT_PROBE_DEFINE2(sdt, kernel, threadpool, schedule__job__dispatcher,
145 "struct threadpool *"/*pool*/, "struct threadpool_job *"/*job*/); 145 "struct threadpool *"/*pool*/, "struct threadpool_job *"/*job*/);
146SDT_PROBE_DEFINE3(sdt, kernel, threadpool, schedule__job__thread, 146SDT_PROBE_DEFINE3(sdt, kernel, threadpool, schedule__job__thread,
147 "struct threadpool *"/*pool*/, 147 "struct threadpool *"/*pool*/,
148 "struct threadpool_job *"/*job*/, 148 "struct threadpool_job *"/*job*/,
149 "struct lwp *"/*thread*/); 149 "struct lwp *"/*thread*/);
150 150
151SDT_PROBE_DEFINE1(sdt, kernel, threadpool, overseer__start, 151SDT_PROBE_DEFINE1(sdt, kernel, threadpool, dispatcher__start,
152 "struct threadpool *"/*pool*/); 152 "struct threadpool *"/*pool*/);
153SDT_PROBE_DEFINE1(sdt, kernel, threadpool, overseer__dying, 153SDT_PROBE_DEFINE1(sdt, kernel, threadpool, dispatcher__dying,
154 "struct threadpool *"/*pool*/); 154 "struct threadpool *"/*pool*/);
155SDT_PROBE_DEFINE1(sdt, kernel, threadpool, overseer__spawn, 155SDT_PROBE_DEFINE1(sdt, kernel, threadpool, dispatcher__spawn,
156 "struct threadpool *"/*pool*/); 156 "struct threadpool *"/*pool*/);
157SDT_PROBE_DEFINE2(sdt, kernel, threadpool, overseer__race, 157SDT_PROBE_DEFINE2(sdt, kernel, threadpool, dispatcher__race,
158 "struct threadpool *"/*pool*/, 158 "struct threadpool *"/*pool*/,
159 "struct threadpool_job *"/*job*/); 159 "struct threadpool_job *"/*job*/);
160SDT_PROBE_DEFINE3(sdt, kernel, threadpool, overseer__assign, 160SDT_PROBE_DEFINE3(sdt, kernel, threadpool, dispatcher__assign,
161 "struct threadpool *"/*pool*/, 161 "struct threadpool *"/*pool*/,
162 "struct threadpool_job *"/*job*/, 162 "struct threadpool_job *"/*job*/,
163 "struct lwp *"/*thread*/); 163 "struct lwp *"/*thread*/);
164SDT_PROBE_DEFINE1(sdt, kernel, threadpool, overseer__exit, 164SDT_PROBE_DEFINE1(sdt, kernel, threadpool, dispatcher__exit,
165 "struct threadpool *"/*pool*/); 165 "struct threadpool *"/*pool*/);
166 166
167SDT_PROBE_DEFINE1(sdt, kernel, threadpool, thread__start, 167SDT_PROBE_DEFINE1(sdt, kernel, threadpool, thread__start,
168 "struct threadpool *"/*pool*/); 168 "struct threadpool *"/*pool*/);
169SDT_PROBE_DEFINE1(sdt, kernel, threadpool, thread__dying, 169SDT_PROBE_DEFINE1(sdt, kernel, threadpool, thread__dying,
170 "struct threadpool *"/*pool*/); 170 "struct threadpool *"/*pool*/);
171SDT_PROBE_DEFINE2(sdt, kernel, threadpool, thread__job, 171SDT_PROBE_DEFINE2(sdt, kernel, threadpool, thread__job,
172 "struct threadpool *"/*pool*/, "struct threadpool_job *"/*job*/); 172 "struct threadpool *"/*pool*/, "struct threadpool_job *"/*job*/);
173SDT_PROBE_DEFINE1(sdt, kernel, threadpool, thread__exit, 173SDT_PROBE_DEFINE1(sdt, kernel, threadpool, thread__exit,
174 "struct threadpool *"/*pool*/); 174 "struct threadpool *"/*pool*/);
175 175
176/* Data structures */ 176/* Data structures */
177 177
@@ -179,51 +179,51 @@ TAILQ_HEAD(job_head, threadpool_job); @@ -179,51 +179,51 @@ TAILQ_HEAD(job_head, threadpool_job);
179TAILQ_HEAD(thread_head, threadpool_thread); 179TAILQ_HEAD(thread_head, threadpool_thread);
180 180
181struct threadpool_thread { 181struct threadpool_thread {
182 struct lwp *tpt_lwp; 182 struct lwp *tpt_lwp;
183 char *tpt_lwp_savedname; 183 char *tpt_lwp_savedname;
184 struct threadpool *tpt_pool; 184 struct threadpool *tpt_pool;
185 struct threadpool_job *tpt_job; 185 struct threadpool_job *tpt_job;
186 kcondvar_t tpt_cv; 186 kcondvar_t tpt_cv;
187 TAILQ_ENTRY(threadpool_thread) tpt_entry; 187 TAILQ_ENTRY(threadpool_thread) tpt_entry;
188}; 188};
189 189
190struct threadpool { 190struct threadpool {
191 kmutex_t tp_lock; 191 kmutex_t tp_lock;
192 struct threadpool_thread tp_overseer; 192 struct threadpool_thread tp_dispatcher;
193 struct job_head tp_jobs; 193 struct job_head tp_jobs;
194 struct thread_head tp_idle_threads; 194 struct thread_head tp_idle_threads;
195 uint64_t tp_refcnt; 195 uint64_t tp_refcnt;
196 int tp_flags; 196 int tp_flags;
197#define THREADPOOL_DYING 0x01 197#define THREADPOOL_DYING 0x01
198 struct cpu_info *tp_cpu; 198 struct cpu_info *tp_cpu;
199 pri_t tp_pri; 199 pri_t tp_pri;
200}; 200};
201 201
202static void threadpool_hold(struct threadpool *); 202static void threadpool_hold(struct threadpool *);
203static void threadpool_rele(struct threadpool *); 203static void threadpool_rele(struct threadpool *);
204 204
205static int threadpool_percpu_create(struct threadpool_percpu **, pri_t); 205static int threadpool_percpu_create(struct threadpool_percpu **, pri_t);
206static void threadpool_percpu_destroy(struct threadpool_percpu *); 206static void threadpool_percpu_destroy(struct threadpool_percpu *);
207static void threadpool_percpu_init(void *, void *, struct cpu_info *); 207static void threadpool_percpu_init(void *, void *, struct cpu_info *);
208static void threadpool_percpu_ok(void *, void *, struct cpu_info *); 208static void threadpool_percpu_ok(void *, void *, struct cpu_info *);
209static void threadpool_percpu_fini(void *, void *, struct cpu_info *); 209static void threadpool_percpu_fini(void *, void *, struct cpu_info *);
210 210
211static threadpool_job_fn_t threadpool_job_dead; 211static threadpool_job_fn_t threadpool_job_dead;
212 212
213static void threadpool_job_hold(struct threadpool_job *); 213static void threadpool_job_hold(struct threadpool_job *);
214static void threadpool_job_rele(struct threadpool_job *); 214static void threadpool_job_rele(struct threadpool_job *);
215 215
216static void threadpool_overseer_thread(void *) __dead; 216static void threadpool_dispatcher_thread(void *) __dead;
217static void threadpool_thread(void *) __dead; 217static void threadpool_thread(void *) __dead;
218 218
219static pool_cache_t threadpool_thread_pc __read_mostly; 219static pool_cache_t threadpool_thread_pc __read_mostly;
220 220
221static kmutex_t threadpools_lock __cacheline_aligned; 221static kmutex_t threadpools_lock __cacheline_aligned;
222 222
223 /* Default to 30 second idle timeout for pool threads. */ 223 /* Default to 30 second idle timeout for pool threads. */
224static int threadpool_idle_time_ms = 30 * 1000; 224static int threadpool_idle_time_ms = 30 * 1000;
225 225
226struct threadpool_unbound { 226struct threadpool_unbound {
227 struct threadpool tpu_pool; 227 struct threadpool tpu_pool;
228 228
229 /* protected by threadpools_lock */ 229 /* protected by threadpools_lock */
@@ -346,143 +346,156 @@ SYSCTL_SETUP(sysctl_threadpool_setup, @@ -346,143 +346,156 @@ SYSCTL_SETUP(sysctl_threadpool_setup,
346void 346void
347threadpools_init(void) 347threadpools_init(void)
348{ 348{
349 349
350 threadpool_thread_pc = 350 threadpool_thread_pc =
351 pool_cache_init(sizeof(struct threadpool_thread), 0, 0, 0, 351 pool_cache_init(sizeof(struct threadpool_thread), 0, 0, 0,
352 "thplthrd", NULL, IPL_NONE, NULL, NULL, NULL); 352 "thplthrd", NULL, IPL_NONE, NULL, NULL, NULL);
353 353
354 LIST_INIT(&unbound_threadpools); 354 LIST_INIT(&unbound_threadpools);
355 LIST_INIT(&percpu_threadpools); 355 LIST_INIT(&percpu_threadpools);
356 mutex_init(&threadpools_lock, MUTEX_DEFAULT, IPL_NONE); 356 mutex_init(&threadpools_lock, MUTEX_DEFAULT, IPL_NONE);
357} 357}
358 358
 359static void
 360threadnamesuffix(char *buf, size_t buflen, struct cpu_info *ci, int pri)
 361{
 362
 363 buf[0] = '\0';
 364 if (ci)
 365 snprintf(buf + strlen(buf), buflen - strlen(buf), "/%d",
 366 cpu_index(ci));
 367 if (pri != PRI_NONE)
 368 snprintf(buf + strlen(buf), buflen - strlen(buf), "@%d", pri);
 369}
 370
359/* Thread pool creation */ 371/* Thread pool creation */
360 372
361static bool 373static bool
362threadpool_pri_is_valid(pri_t pri) 374threadpool_pri_is_valid(pri_t pri)
363{ 375{
364 return (pri == PRI_NONE || (pri >= PRI_USER && pri < PRI_COUNT)); 376 return (pri == PRI_NONE || (pri >= PRI_USER && pri < PRI_COUNT));
365} 377}
366 378
367static int 379static int
368threadpool_create(struct threadpool *const pool, struct cpu_info *ci, 380threadpool_create(struct threadpool *const pool, struct cpu_info *ci,
369 pri_t pri) 381 pri_t pri)
370{ 382{
371 struct lwp *lwp; 383 struct lwp *lwp;
 384 char suffix[16];
372 int ktflags; 385 int ktflags;
373 int error; 386 int error;
374 387
375 KASSERT(threadpool_pri_is_valid(pri)); 388 KASSERT(threadpool_pri_is_valid(pri));
376 389
377 SDT_PROBE2(sdt, kernel, threadpool, create, ci, pri); 390 SDT_PROBE2(sdt, kernel, threadpool, create, ci, pri);
378 391
379 mutex_init(&pool->tp_lock, MUTEX_DEFAULT, IPL_VM); 392 mutex_init(&pool->tp_lock, MUTEX_DEFAULT, IPL_VM);
380 /* XXX overseer */ 393 /* XXX dispatcher */
381 TAILQ_INIT(&pool->tp_jobs); 394 TAILQ_INIT(&pool->tp_jobs);
382 TAILQ_INIT(&pool->tp_idle_threads); 395 TAILQ_INIT(&pool->tp_idle_threads);
383 pool->tp_refcnt = 1; /* overseer's reference */ 396 pool->tp_refcnt = 1; /* dispatcher's reference */
384 pool->tp_flags = 0; 397 pool->tp_flags = 0;
385 pool->tp_cpu = ci; 398 pool->tp_cpu = ci;
386 pool->tp_pri = pri; 399 pool->tp_pri = pri;
387 400
388 pool->tp_overseer.tpt_lwp = NULL; 401 pool->tp_dispatcher.tpt_lwp = NULL;
389 pool->tp_overseer.tpt_pool = pool; 402 pool->tp_dispatcher.tpt_pool = pool;
390 pool->tp_overseer.tpt_job = NULL; 403 pool->tp_dispatcher.tpt_job = NULL;
391 cv_init(&pool->tp_overseer.tpt_cv, "poolover"); 404 cv_init(&pool->tp_dispatcher.tpt_cv, "pooldisp");
392 405
393 ktflags = 0; 406 ktflags = 0;
394 ktflags |= KTHREAD_MPSAFE; 407 ktflags |= KTHREAD_MPSAFE;
395 if (pri < PRI_KERNEL) 408 if (pri < PRI_KERNEL)
396 ktflags |= KTHREAD_TS; 409 ktflags |= KTHREAD_TS;
397 error = kthread_create(pri, ktflags, ci, &threadpool_overseer_thread, 410 threadnamesuffix(suffix, sizeof(suffix), ci, pri);
398 &pool->tp_overseer, &lwp, 411 error = kthread_create(pri, ktflags, ci, &threadpool_dispatcher_thread,
399 "pooloverseer/%d@%d", (ci ? cpu_index(ci) : -1), (int)pri); 412 &pool->tp_dispatcher, &lwp, "pooldisp%s", suffix);
400 if (error) 413 if (error)
401 goto fail0; 414 goto fail0;
402 415
403 mutex_spin_enter(&pool->tp_lock); 416 mutex_spin_enter(&pool->tp_lock);
404 pool->tp_overseer.tpt_lwp = lwp; 417 pool->tp_dispatcher.tpt_lwp = lwp;
405 cv_broadcast(&pool->tp_overseer.tpt_cv); 418 cv_broadcast(&pool->tp_dispatcher.tpt_cv);
406 mutex_spin_exit(&pool->tp_lock); 419 mutex_spin_exit(&pool->tp_lock);
407 420
408 SDT_PROBE3(sdt, kernel, threadpool, create__success, ci, pri, pool); 421 SDT_PROBE3(sdt, kernel, threadpool, create__success, ci, pri, pool);
409 return 0; 422 return 0;
410 423
411fail0: KASSERT(error); 424fail0: KASSERT(error);
412 KASSERT(pool->tp_overseer.tpt_job == NULL); 425 KASSERT(pool->tp_dispatcher.tpt_job == NULL);
413 KASSERT(pool->tp_overseer.tpt_pool == pool); 426 KASSERT(pool->tp_dispatcher.tpt_pool == pool);
414 KASSERT(pool->tp_flags == 0); 427 KASSERT(pool->tp_flags == 0);
415 KASSERT(pool->tp_refcnt == 0); 428 KASSERT(pool->tp_refcnt == 0);
416 KASSERT(TAILQ_EMPTY(&pool->tp_idle_threads)); 429 KASSERT(TAILQ_EMPTY(&pool->tp_idle_threads));
417 KASSERT(TAILQ_EMPTY(&pool->tp_jobs)); 430 KASSERT(TAILQ_EMPTY(&pool->tp_jobs));
418 KASSERT(!cv_has_waiters(&pool->tp_overseer.tpt_cv)); 431 KASSERT(!cv_has_waiters(&pool->tp_dispatcher.tpt_cv));
419 cv_destroy(&pool->tp_overseer.tpt_cv); 432 cv_destroy(&pool->tp_dispatcher.tpt_cv);
420 mutex_destroy(&pool->tp_lock); 433 mutex_destroy(&pool->tp_lock);
421 SDT_PROBE3(sdt, kernel, threadpool, create__failure, ci, pri, error); 434 SDT_PROBE3(sdt, kernel, threadpool, create__failure, ci, pri, error);
422 return error; 435 return error;
423} 436}
424 437
425/* Thread pool destruction */ 438/* Thread pool destruction */
426 439
427static void 440static void
428threadpool_destroy(struct threadpool *pool) 441threadpool_destroy(struct threadpool *pool)
429{ 442{
430 struct threadpool_thread *thread; 443 struct threadpool_thread *thread;
431 444
432 SDT_PROBE1(sdt, kernel, threadpool, destroy, pool); 445 SDT_PROBE1(sdt, kernel, threadpool, destroy, pool);
433 446
434 /* Mark the pool dying and wait for threads to commit suicide. */ 447 /* Mark the pool dying and wait for threads to commit suicide. */
435 mutex_spin_enter(&pool->tp_lock); 448 mutex_spin_enter(&pool->tp_lock);
436 KASSERT(TAILQ_EMPTY(&pool->tp_jobs)); 449 KASSERT(TAILQ_EMPTY(&pool->tp_jobs));
437 pool->tp_flags |= THREADPOOL_DYING; 450 pool->tp_flags |= THREADPOOL_DYING;
438 cv_broadcast(&pool->tp_overseer.tpt_cv); 451 cv_broadcast(&pool->tp_dispatcher.tpt_cv);
439 TAILQ_FOREACH(thread, &pool->tp_idle_threads, tpt_entry) 452 TAILQ_FOREACH(thread, &pool->tp_idle_threads, tpt_entry)
440 cv_broadcast(&thread->tpt_cv); 453 cv_broadcast(&thread->tpt_cv);
441 while (0 < pool->tp_refcnt) { 454 while (0 < pool->tp_refcnt) {
442 SDT_PROBE2(sdt, kernel, threadpool, destroy__wait, 455 SDT_PROBE2(sdt, kernel, threadpool, destroy__wait,
443 pool, pool->tp_refcnt); 456 pool, pool->tp_refcnt);
444 cv_wait(&pool->tp_overseer.tpt_cv, &pool->tp_lock); 457 cv_wait(&pool->tp_dispatcher.tpt_cv, &pool->tp_lock);
445 } 458 }
446 mutex_spin_exit(&pool->tp_lock); 459 mutex_spin_exit(&pool->tp_lock);
447 460
448 KASSERT(pool->tp_overseer.tpt_job == NULL); 461 KASSERT(pool->tp_dispatcher.tpt_job == NULL);
449 KASSERT(pool->tp_overseer.tpt_pool == pool); 462 KASSERT(pool->tp_dispatcher.tpt_pool == pool);
450 KASSERT(pool->tp_flags == THREADPOOL_DYING); 463 KASSERT(pool->tp_flags == THREADPOOL_DYING);
451 KASSERT(pool->tp_refcnt == 0); 464 KASSERT(pool->tp_refcnt == 0);
452 KASSERT(TAILQ_EMPTY(&pool->tp_idle_threads)); 465 KASSERT(TAILQ_EMPTY(&pool->tp_idle_threads));
453 KASSERT(TAILQ_EMPTY(&pool->tp_jobs)); 466 KASSERT(TAILQ_EMPTY(&pool->tp_jobs));
454 KASSERT(!cv_has_waiters(&pool->tp_overseer.tpt_cv)); 467 KASSERT(!cv_has_waiters(&pool->tp_dispatcher.tpt_cv));
455 cv_destroy(&pool->tp_overseer.tpt_cv); 468 cv_destroy(&pool->tp_dispatcher.tpt_cv);
456 mutex_destroy(&pool->tp_lock); 469 mutex_destroy(&pool->tp_lock);
457} 470}
458 471
459static void 472static void
460threadpool_hold(struct threadpool *pool) 473threadpool_hold(struct threadpool *pool)
461{ 474{
462 475
463 KASSERT(mutex_owned(&pool->tp_lock)); 476 KASSERT(mutex_owned(&pool->tp_lock));
464 pool->tp_refcnt++; 477 pool->tp_refcnt++;
465 KASSERT(pool->tp_refcnt != 0); 478 KASSERT(pool->tp_refcnt != 0);
466} 479}
467 480
468static void 481static void
469threadpool_rele(struct threadpool *pool) 482threadpool_rele(struct threadpool *pool)
470{ 483{
471 484
472 KASSERT(mutex_owned(&pool->tp_lock)); 485 KASSERT(mutex_owned(&pool->tp_lock));
473 KASSERT(0 < pool->tp_refcnt); 486 KASSERT(0 < pool->tp_refcnt);
474 if (--pool->tp_refcnt == 0) 487 if (--pool->tp_refcnt == 0)
475 cv_broadcast(&pool->tp_overseer.tpt_cv); 488 cv_broadcast(&pool->tp_dispatcher.tpt_cv);
476} 489}
477 490
478/* Unbound thread pools */ 491/* Unbound thread pools */
479 492
480int 493int
481threadpool_get(struct threadpool **poolp, pri_t pri) 494threadpool_get(struct threadpool **poolp, pri_t pri)
482{ 495{
483 struct threadpool_unbound *tpu, *tmp = NULL; 496 struct threadpool_unbound *tpu, *tmp = NULL;
484 int error; 497 int error;
485 498
486 ASSERT_SLEEPABLE(); 499 ASSERT_SLEEPABLE();
487 500
488 SDT_PROBE1(sdt, kernel, threadpool, get, pri); 501 SDT_PROBE1(sdt, kernel, threadpool, get, pri);
@@ -849,78 +862,78 @@ threadpool_schedule_job(struct threadpoo @@ -849,78 +862,78 @@ threadpool_schedule_job(struct threadpoo
849 * to NULL under the interlock. 862 * to NULL under the interlock.
850 */ 863 */
851 if (__predict_true(job->job_thread != NULL)) { 864 if (__predict_true(job->job_thread != NULL)) {
852 SDT_PROBE2(sdt, kernel, threadpool, schedule__job__running, 865 SDT_PROBE2(sdt, kernel, threadpool, schedule__job__running,
853 pool, job); 866 pool, job);
854 return; 867 return;
855 } 868 }
856 869
857 threadpool_job_hold(job); 870 threadpool_job_hold(job);
858 871
859 /* Otherwise, try to assign a thread to the job. */ 872 /* Otherwise, try to assign a thread to the job. */
860 mutex_spin_enter(&pool->tp_lock); 873 mutex_spin_enter(&pool->tp_lock);
861 if (__predict_false(TAILQ_EMPTY(&pool->tp_idle_threads))) { 874 if (__predict_false(TAILQ_EMPTY(&pool->tp_idle_threads))) {
862 /* Nobody's idle. Give it to the overseer. */ 875 /* Nobody's idle. Give it to the dispatcher. */
863 SDT_PROBE2(sdt, kernel, threadpool, schedule__job__overseer, 876 SDT_PROBE2(sdt, kernel, threadpool, schedule__job__dispatcher,
864 pool, job); 877 pool, job);
865 job->job_thread = &pool->tp_overseer; 878 job->job_thread = &pool->tp_dispatcher;
866 TAILQ_INSERT_TAIL(&pool->tp_jobs, job, job_entry); 879 TAILQ_INSERT_TAIL(&pool->tp_jobs, job, job_entry);
867 } else { 880 } else {
868 /* Assign it to the first idle thread. */ 881 /* Assign it to the first idle thread. */
869 job->job_thread = TAILQ_FIRST(&pool->tp_idle_threads); 882 job->job_thread = TAILQ_FIRST(&pool->tp_idle_threads);
870 SDT_PROBE3(sdt, kernel, threadpool, schedule__job__thread, 883 SDT_PROBE3(sdt, kernel, threadpool, schedule__job__thread,
871 pool, job, job->job_thread->tpt_lwp); 884 pool, job, job->job_thread->tpt_lwp);
872 TAILQ_REMOVE(&pool->tp_idle_threads, job->job_thread, 885 TAILQ_REMOVE(&pool->tp_idle_threads, job->job_thread,
873 tpt_entry); 886 tpt_entry);
874 job->job_thread->tpt_job = job; 887 job->job_thread->tpt_job = job;
875 } 888 }
876 889
877 /* Notify whomever we gave it to, overseer or idle thread. */ 890 /* Notify whomever we gave it to, dispatcher or idle thread. */
878 KASSERT(job->job_thread != NULL); 891 KASSERT(job->job_thread != NULL);
879 cv_broadcast(&job->job_thread->tpt_cv); 892 cv_broadcast(&job->job_thread->tpt_cv);
880 mutex_spin_exit(&pool->tp_lock); 893 mutex_spin_exit(&pool->tp_lock);
881} 894}
882 895
883bool 896bool
884threadpool_cancel_job_async(struct threadpool *pool, struct threadpool_job *job) 897threadpool_cancel_job_async(struct threadpool *pool, struct threadpool_job *job)
885{ 898{
886 899
887 KASSERT(mutex_owned(job->job_lock)); 900 KASSERT(mutex_owned(job->job_lock));
888 901
889 /* 902 /*
890 * XXXJRT This fails (albeit safely) when all of the following 903 * XXXJRT This fails (albeit safely) when all of the following
891 * are true: 904 * are true:
892 * 905 *
893 * => "pool" is something other than what the job was 906 * => "pool" is something other than what the job was
894 * scheduled on. This can legitimately occur if, 907 * scheduled on. This can legitimately occur if,
895 * for example, a job is percpu-scheduled on CPU0 908 * for example, a job is percpu-scheduled on CPU0
896 * and then CPU1 attempts to cancel it without taking 909 * and then CPU1 attempts to cancel it without taking
897 * a remote pool reference. (this might happen by 910 * a remote pool reference. (this might happen by
898 * "luck of the draw"). 911 * "luck of the draw").
899 * 912 *
900 * => "job" is not yet running, but is assigned to the 913 * => "job" is not yet running, but is assigned to the
901 * overseer. 914 * dispatcher.
902 * 915 *
903 * When this happens, this code makes the determination that 916 * When this happens, this code makes the determination that
904 * the job is already running. The failure mode is that the 917 * the job is already running. The failure mode is that the
905 * caller is told the job is running, and thus has to wait. 918 * caller is told the job is running, and thus has to wait.
906 * The overseer will eventually get to it and the job will 919 * The dispatcher will eventually get to it and the job will
907 * proceed as if it had been already running. 920 * proceed as if it had been already running.
908 */ 921 */
909 922
910 if (job->job_thread == NULL) { 923 if (job->job_thread == NULL) {
911 /* Nothing to do. Guaranteed not running. */ 924 /* Nothing to do. Guaranteed not running. */
912 return true; 925 return true;
913 } else if (job->job_thread == &pool->tp_overseer) { 926 } else if (job->job_thread == &pool->tp_dispatcher) {
914 /* Take it off the list to guarantee it won't run. */ 927 /* Take it off the list to guarantee it won't run. */
915 job->job_thread = NULL; 928 job->job_thread = NULL;
916 mutex_spin_enter(&pool->tp_lock); 929 mutex_spin_enter(&pool->tp_lock);
917 TAILQ_REMOVE(&pool->tp_jobs, job, job_entry); 930 TAILQ_REMOVE(&pool->tp_jobs, job, job_entry);
918 mutex_spin_exit(&pool->tp_lock); 931 mutex_spin_exit(&pool->tp_lock);
919 threadpool_job_rele(job); 932 threadpool_job_rele(job);
920 return true; 933 return true;
921 } else { 934 } else {
922 /* Too late -- already running. */ 935 /* Too late -- already running. */
923 return false; 936 return false;
924 } 937 }
925} 938}
926 939
@@ -935,83 +948,84 @@ threadpool_cancel_job(struct threadpool  @@ -935,83 +948,84 @@ threadpool_cancel_job(struct threadpool
935 * as a false-positive. 948 * as a false-positive.
936 */ 949 */
937 950
938 KASSERT(mutex_owned(job->job_lock)); 951 KASSERT(mutex_owned(job->job_lock));
939 952
940 if (threadpool_cancel_job_async(pool, job)) 953 if (threadpool_cancel_job_async(pool, job))
941 return; 954 return;
942 955
943 /* Already running. Wait for it to complete. */ 956 /* Already running. Wait for it to complete. */
944 while (job->job_thread != NULL) 957 while (job->job_thread != NULL)
945 cv_wait(&job->job_cv, job->job_lock); 958 cv_wait(&job->job_cv, job->job_lock);
946} 959}
947 960
948/* Thread pool overseer thread */ 961/* Thread pool dispatcher thread */
949 962
950static void __dead 963static void __dead
951threadpool_overseer_thread(void *arg) 964threadpool_dispatcher_thread(void *arg)
952{ 965{
953 struct threadpool_thread *const overseer = arg; 966 struct threadpool_thread *const dispatcher = arg;
954 struct threadpool *const pool = overseer->tpt_pool; 967 struct threadpool *const pool = dispatcher->tpt_pool;
955 struct lwp *lwp = NULL; 968 struct lwp *lwp = NULL;
956 int ktflags; 969 int ktflags;
 970 char suffix[16];
957 int error; 971 int error;
958 972
959 KASSERT((pool->tp_cpu == NULL) || (pool->tp_cpu == curcpu())); 973 KASSERT((pool->tp_cpu == NULL) || (pool->tp_cpu == curcpu()));
960 KASSERT((pool->tp_cpu == NULL) || (curlwp->l_pflag & LP_BOUND)); 974 KASSERT((pool->tp_cpu == NULL) || (curlwp->l_pflag & LP_BOUND));
961 975
962 /* Wait until we're initialized. */ 976 /* Wait until we're initialized. */
963 mutex_spin_enter(&pool->tp_lock); 977 mutex_spin_enter(&pool->tp_lock);
964 while (overseer->tpt_lwp == NULL) 978 while (dispatcher->tpt_lwp == NULL)
965 cv_wait(&overseer->tpt_cv, &pool->tp_lock); 979 cv_wait(&dispatcher->tpt_cv, &pool->tp_lock);
966 980
967 SDT_PROBE1(sdt, kernel, threadpool, overseer__start, pool); 981 SDT_PROBE1(sdt, kernel, threadpool, dispatcher__start, pool);
968 982
969 for (;;) { 983 for (;;) {
970 /* Wait until there's a job. */ 984 /* Wait until there's a job. */
971 while (TAILQ_EMPTY(&pool->tp_jobs)) { 985 while (TAILQ_EMPTY(&pool->tp_jobs)) {
972 if (ISSET(pool->tp_flags, THREADPOOL_DYING)) { 986 if (ISSET(pool->tp_flags, THREADPOOL_DYING)) {
973 SDT_PROBE1(sdt, kernel, threadpool, 987 SDT_PROBE1(sdt, kernel, threadpool,
974 overseer__dying, pool); 988 dispatcher__dying, pool);
975 break; 989 break;
976 } 990 }
977 cv_wait(&overseer->tpt_cv, &pool->tp_lock); 991 cv_wait(&dispatcher->tpt_cv, &pool->tp_lock);
978 } 992 }
979 if (__predict_false(TAILQ_EMPTY(&pool->tp_jobs))) 993 if (__predict_false(TAILQ_EMPTY(&pool->tp_jobs)))
980 break; 994 break;
981 995
982 /* If there are no threads, we'll have to try to start one. */ 996 /* If there are no threads, we'll have to try to start one. */
983 if (TAILQ_EMPTY(&pool->tp_idle_threads)) { 997 if (TAILQ_EMPTY(&pool->tp_idle_threads)) {
984 SDT_PROBE1(sdt, kernel, threadpool, overseer__spawn, 998 SDT_PROBE1(sdt, kernel, threadpool, dispatcher__spawn,
985 pool); 999 pool);
986 threadpool_hold(pool); 1000 threadpool_hold(pool);
987 mutex_spin_exit(&pool->tp_lock); 1001 mutex_spin_exit(&pool->tp_lock);
988 1002
989 struct threadpool_thread *const thread = 1003 struct threadpool_thread *const thread =
990 pool_cache_get(threadpool_thread_pc, PR_WAITOK); 1004 pool_cache_get(threadpool_thread_pc, PR_WAITOK);
991 thread->tpt_lwp = NULL; 1005 thread->tpt_lwp = NULL;
992 thread->tpt_pool = pool; 1006 thread->tpt_pool = pool;
993 thread->tpt_job = NULL; 1007 thread->tpt_job = NULL;
994 cv_init(&thread->tpt_cv, "poolthrd"); 1008 cv_init(&thread->tpt_cv, "pooljob");
995 1009
996 ktflags = 0; 1010 ktflags = 0;
997 ktflags |= KTHREAD_MPSAFE; 1011 ktflags |= KTHREAD_MPSAFE;
998 if (pool->tp_pri < PRI_KERNEL) 1012 if (pool->tp_pri < PRI_KERNEL)
999 ktflags |= KTHREAD_TS; 1013 ktflags |= KTHREAD_TS;
 1014 threadnamesuffix(suffix, sizeof(suffix), pool->tp_cpu,
 1015 pool->tp_pri);
1000 error = kthread_create(pool->tp_pri, ktflags, 1016 error = kthread_create(pool->tp_pri, ktflags,
1001 pool->tp_cpu, &threadpool_thread, thread, &lwp, 1017 pool->tp_cpu, &threadpool_thread, thread, &lwp,
1002 "poolthread/%d@%d", 1018 "poolthread%s", suffix);
1003 (pool->tp_cpu ? cpu_index(pool->tp_cpu) : -1), 
1004 (int)pool->tp_pri); 
1005 1019
1006 mutex_spin_enter(&pool->tp_lock); 1020 mutex_spin_enter(&pool->tp_lock);
1007 if (error) { 1021 if (error) {
1008 pool_cache_put(threadpool_thread_pc, thread); 1022 pool_cache_put(threadpool_thread_pc, thread);
1009 threadpool_rele(pool); 1023 threadpool_rele(pool);
1010 /* XXX What to do to wait for memory? */ 1024 /* XXX What to do to wait for memory? */
1011 (void)kpause("thrdplcr", false, hz, 1025 (void)kpause("thrdplcr", false, hz,
1012 &pool->tp_lock); 1026 &pool->tp_lock);
1013 continue; 1027 continue;
1014 } 1028 }
1015 /* 1029 /*
1016 * New kthread now owns the reference to the pool 1030 * New kthread now owns the reference to the pool
1017 * taken above. 1031 * taken above.
@@ -1027,66 +1041,66 @@ threadpool_overseer_thread(void *arg) @@ -1027,66 +1041,66 @@ threadpool_overseer_thread(void *arg)
1027 1041
1028 /* There are idle threads, so try giving one a job. */ 1042 /* There are idle threads, so try giving one a job. */
1029 struct threadpool_job *const job = TAILQ_FIRST(&pool->tp_jobs); 1043 struct threadpool_job *const job = TAILQ_FIRST(&pool->tp_jobs);
1030 TAILQ_REMOVE(&pool->tp_jobs, job, job_entry); 1044 TAILQ_REMOVE(&pool->tp_jobs, job, job_entry);
1031 /* 1045 /*
1032 * Take an extra reference on the job temporarily so that 1046 * Take an extra reference on the job temporarily so that
1033 * it won't disappear on us while we have both locks dropped. 1047 * it won't disappear on us while we have both locks dropped.
1034 */ 1048 */
1035 threadpool_job_hold(job); 1049 threadpool_job_hold(job);
1036 mutex_spin_exit(&pool->tp_lock); 1050 mutex_spin_exit(&pool->tp_lock);
1037 1051
1038 mutex_enter(job->job_lock); 1052 mutex_enter(job->job_lock);
1039 /* If the job was cancelled, we'll no longer be its thread. */ 1053 /* If the job was cancelled, we'll no longer be its thread. */
1040 if (__predict_true(job->job_thread == overseer)) { 1054 if (__predict_true(job->job_thread == dispatcher)) {
1041 mutex_spin_enter(&pool->tp_lock); 1055 mutex_spin_enter(&pool->tp_lock);
1042 if (__predict_false( 1056 if (__predict_false(
1043 TAILQ_EMPTY(&pool->tp_idle_threads))) { 1057 TAILQ_EMPTY(&pool->tp_idle_threads))) {
1044 /* 1058 /*
1045 * Someone else snagged the thread 1059 * Someone else snagged the thread
1046 * first. We'll have to try again. 1060 * first. We'll have to try again.
1047 */ 1061 */
1048 SDT_PROBE2(sdt, kernel, threadpool, 1062 SDT_PROBE2(sdt, kernel, threadpool,
1049 overseer__race, pool, job); 1063 dispatcher__race, pool, job);
1050 TAILQ_INSERT_HEAD(&pool->tp_jobs, job, 1064 TAILQ_INSERT_HEAD(&pool->tp_jobs, job,
1051 job_entry); 1065 job_entry);
1052 } else { 1066 } else {
1053 /* 1067 /*
1054 * Assign the job to the thread and 1068 * Assign the job to the thread and
1055 * wake the thread so it starts work. 1069 * wake the thread so it starts work.
1056 */ 1070 */
1057 struct threadpool_thread *const thread = 1071 struct threadpool_thread *const thread =
1058 TAILQ_FIRST(&pool->tp_idle_threads); 1072 TAILQ_FIRST(&pool->tp_idle_threads);
1059 1073
1060 SDT_PROBE2(sdt, kernel, threadpool, 1074 SDT_PROBE2(sdt, kernel, threadpool,
1061 overseer__assign, job, thread->tpt_lwp); 1075 dispatcher__assign, job, thread->tpt_lwp);
1062 KASSERT(thread->tpt_job == NULL); 1076 KASSERT(thread->tpt_job == NULL);
1063 TAILQ_REMOVE(&pool->tp_idle_threads, thread, 1077 TAILQ_REMOVE(&pool->tp_idle_threads, thread,
1064 tpt_entry); 1078 tpt_entry);
1065 thread->tpt_job = job; 1079 thread->tpt_job = job;
1066 job->job_thread = thread; 1080 job->job_thread = thread;
1067 cv_broadcast(&thread->tpt_cv); 1081 cv_broadcast(&thread->tpt_cv);
1068 } 1082 }
1069 mutex_spin_exit(&pool->tp_lock); 1083 mutex_spin_exit(&pool->tp_lock);
1070 } 1084 }
1071 threadpool_job_rele(job); 1085 threadpool_job_rele(job);
1072 mutex_exit(job->job_lock); 1086 mutex_exit(job->job_lock);
1073 1087
1074 mutex_spin_enter(&pool->tp_lock); 1088 mutex_spin_enter(&pool->tp_lock);
1075 } 1089 }
1076 threadpool_rele(pool); 1090 threadpool_rele(pool);
1077 mutex_spin_exit(&pool->tp_lock); 1091 mutex_spin_exit(&pool->tp_lock);
1078 1092
1079 SDT_PROBE1(sdt, kernel, threadpool, overseer__exit, pool); 1093 SDT_PROBE1(sdt, kernel, threadpool, dispatcher__exit, pool);
1080 1094
1081 kthread_exit(0); 1095 kthread_exit(0);
1082} 1096}
1083 1097
1084/* Thread pool thread */ 1098/* Thread pool thread */
1085 1099
1086static void __dead 1100static void __dead
1087threadpool_thread(void *arg) 1101threadpool_thread(void *arg)
1088{ 1102{
1089 struct threadpool_thread *const thread = arg; 1103 struct threadpool_thread *const thread = arg;
1090 struct threadpool *const pool = thread->tpt_pool; 1104 struct threadpool *const pool = thread->tpt_pool;
1091 1105
1092 KASSERT((pool->tp_cpu == NULL) || (pool->tp_cpu == curcpu())); 1106 KASSERT((pool->tp_cpu == NULL) || (pool->tp_cpu == curcpu()));