Wed Dec 26 20:30:36 2018 UTC ()
Make the callers of threadpool_create() and threadpool_destroy()
responsibile for managing their own storage.


(thorpej)
diff -r1.5 -r1.6 src/sys/kern/kern_threadpool.c

cvs diff -r1.5 -r1.6 src/sys/kern/kern_threadpool.c (expand / switch to unified diff)

--- src/sys/kern/kern_threadpool.c 2018/12/26 20:08:22 1.5
+++ src/sys/kern/kern_threadpool.c 2018/12/26 20:30:36 1.6
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_threadpool.c,v 1.5 2018/12/26 20:08:22 thorpej Exp $ */ 1/* $NetBSD: kern_threadpool.c,v 1.6 2018/12/26 20:30:36 thorpej Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2014, 2018 The NetBSD Foundation, Inc. 4 * Copyright (c) 2014, 2018 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell and Jason R. Thorpe. 8 * by Taylor R. Campbell and Jason R. Thorpe.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -71,27 +71,27 @@ @@ -71,27 +71,27 @@
71 * | | <running (n+1)b> | | 71 * | | <running (n+1)b> | |
72 * | +------------------+ | 72 * | +------------------+ |
73 * +--------------------------------------------------------+ 73 * +--------------------------------------------------------+
74 * 74 *
75 * XXX Why one overseer per CPU? I did that originally to avoid 75 * XXX Why one overseer per CPU? I did that originally to avoid
76 * touching remote CPUs' memory when scheduling a job, but that still 76 * touching remote CPUs' memory when scheduling a job, but that still
77 * requires interprocessor synchronization. Perhaps we could get by 77 * requires interprocessor synchronization. Perhaps we could get by
78 * with a single overseer thread, at the expense of another pointer in 78 * with a single overseer thread, at the expense of another pointer in
79 * struct threadpool_job to identify the CPU on which it must run 79 * struct threadpool_job to identify the CPU on which it must run
80 * in order for the overseer to schedule it correctly. 80 * in order for the overseer to schedule it correctly.
81 */ 81 */
82 82
83#include <sys/cdefs.h> 83#include <sys/cdefs.h>
84__KERNEL_RCSID(0, "$NetBSD: kern_threadpool.c,v 1.5 2018/12/26 20:08:22 thorpej Exp $"); 84__KERNEL_RCSID(0, "$NetBSD: kern_threadpool.c,v 1.6 2018/12/26 20:30:36 thorpej Exp $");
85 85
86#include <sys/types.h> 86#include <sys/types.h>
87#include <sys/param.h> 87#include <sys/param.h>
88#include <sys/atomic.h> 88#include <sys/atomic.h>
89#include <sys/condvar.h> 89#include <sys/condvar.h>
90#include <sys/cpu.h> 90#include <sys/cpu.h>
91#include <sys/kernel.h> 91#include <sys/kernel.h>
92#include <sys/kmem.h> 92#include <sys/kmem.h>
93#include <sys/kthread.h> 93#include <sys/kthread.h>
94#include <sys/mutex.h> 94#include <sys/mutex.h>
95#include <sys/once.h> 95#include <sys/once.h>
96#include <sys/percpu.h> 96#include <sys/percpu.h>
97#include <sys/pool.h> 97#include <sys/pool.h>
@@ -146,27 +146,26 @@ static int threadpool_job_hold(struct th @@ -146,27 +146,26 @@ static int threadpool_job_hold(struct th
146static void threadpool_job_rele(struct threadpool_job *); 146static void threadpool_job_rele(struct threadpool_job *);
147 147
148static void threadpool_overseer_thread(void *) __dead; 148static void threadpool_overseer_thread(void *) __dead;
149static void threadpool_thread(void *) __dead; 149static void threadpool_thread(void *) __dead;
150 150
151static pool_cache_t threadpool_thread_pc __read_mostly; 151static pool_cache_t threadpool_thread_pc __read_mostly;
152 152
153static kmutex_t threadpools_lock __cacheline_aligned; 153static kmutex_t threadpools_lock __cacheline_aligned;
154 154
155 /* Idle out threads after 30 seconds */ 155 /* Idle out threads after 30 seconds */
156#define THREADPOOL_IDLE_TICKS mstohz(30 * 1000) 156#define THREADPOOL_IDLE_TICKS mstohz(30 * 1000)
157 157
158struct threadpool_unbound { 158struct threadpool_unbound {
159 /* must be first; see threadpool_create() */ 
160 struct threadpool tpu_pool; 159 struct threadpool tpu_pool;
161 160
162 /* protected by threadpools_lock */ 161 /* protected by threadpools_lock */
163 LIST_ENTRY(threadpool_unbound) tpu_link; 162 LIST_ENTRY(threadpool_unbound) tpu_link;
164 uint64_t tpu_refcnt; 163 uint64_t tpu_refcnt;
165}; 164};
166 165
167static LIST_HEAD(, threadpool_unbound) unbound_threadpools; 166static LIST_HEAD(, threadpool_unbound) unbound_threadpools;
168 167
169static struct threadpool_unbound * 168static struct threadpool_unbound *
170threadpool_lookup_unbound(pri_t pri) 169threadpool_lookup_unbound(pri_t pri)
171{ 170{
172 struct threadpool_unbound *tpu; 171 struct threadpool_unbound *tpu;
@@ -252,30 +251,29 @@ threadpools_init(void) @@ -252,30 +251,29 @@ threadpools_init(void)
252 251
253 return 0; 252 return 0;
254} 253}
255 254
256/* Thread pool creation */ 255/* Thread pool creation */
257 256
258static bool 257static bool
259threadpool_pri_is_valid(pri_t pri) 258threadpool_pri_is_valid(pri_t pri)
260{ 259{
261 return (pri == PRI_NONE || (pri >= PRI_USER && pri < PRI_COUNT)); 260 return (pri == PRI_NONE || (pri >= PRI_USER && pri < PRI_COUNT));
262} 261}
263 262
264static int 263static int
265threadpool_create(struct threadpool **poolp, struct cpu_info *ci, pri_t pri, 264threadpool_create(struct threadpool *const pool, struct cpu_info *ci,
266 size_t size) 265 pri_t pri)
267{ 266{
268 struct threadpool *const pool = kmem_zalloc(size, KM_SLEEP); 
269 struct lwp *lwp; 267 struct lwp *lwp;
270 int ktflags; 268 int ktflags;
271 int error; 269 int error;
272 270
273 KASSERT(threadpool_pri_is_valid(pri)); 271 KASSERT(threadpool_pri_is_valid(pri));
274 272
275 mutex_init(&pool->tp_lock, MUTEX_DEFAULT, IPL_VM); 273 mutex_init(&pool->tp_lock, MUTEX_DEFAULT, IPL_VM);
276 /* XXX overseer */ 274 /* XXX overseer */
277 TAILQ_INIT(&pool->tp_jobs); 275 TAILQ_INIT(&pool->tp_jobs);
278 TAILQ_INIT(&pool->tp_idle_threads); 276 TAILQ_INIT(&pool->tp_idle_threads);
279 pool->tp_refcnt = 0; 277 pool->tp_refcnt = 0;
280 pool->tp_flags = 0; 278 pool->tp_flags = 0;
281 pool->tp_cpu = ci; 279 pool->tp_cpu = ci;
@@ -293,74 +291,71 @@ threadpool_create(struct threadpool **po @@ -293,74 +291,71 @@ threadpool_create(struct threadpool **po
293 if (pri < PRI_KERNEL) 291 if (pri < PRI_KERNEL)
294 ktflags |= KTHREAD_TS; 292 ktflags |= KTHREAD_TS;
295 error = kthread_create(pri, ktflags, ci, &threadpool_overseer_thread, 293 error = kthread_create(pri, ktflags, ci, &threadpool_overseer_thread,
296 &pool->tp_overseer, &lwp, 294 &pool->tp_overseer, &lwp,
297 "pooloverseer/%d@%d", (ci ? cpu_index(ci) : -1), (int)pri); 295 "pooloverseer/%d@%d", (ci ? cpu_index(ci) : -1), (int)pri);
298 if (error) 296 if (error)
299 goto fail0; 297 goto fail0;
300 298
301 mutex_spin_enter(&pool->tp_lock); 299 mutex_spin_enter(&pool->tp_lock);
302 pool->tp_overseer.tpt_lwp = lwp; 300 pool->tp_overseer.tpt_lwp = lwp;
303 cv_broadcast(&pool->tp_overseer.tpt_cv); 301 cv_broadcast(&pool->tp_overseer.tpt_cv);
304 mutex_spin_exit(&pool->tp_lock); 302 mutex_spin_exit(&pool->tp_lock);
305 303
306 *poolp = pool; 
307 return 0; 304 return 0;
308 305
309fail0: KASSERT(error); 306fail0: KASSERT(error);
310 KASSERT(pool->tp_overseer.tpt_job == NULL); 307 KASSERT(pool->tp_overseer.tpt_job == NULL);
311 KASSERT(pool->tp_overseer.tpt_pool == pool); 308 KASSERT(pool->tp_overseer.tpt_pool == pool);
312 KASSERT(pool->tp_flags == 0); 309 KASSERT(pool->tp_flags == 0);
313 KASSERT(pool->tp_refcnt == 0); 310 KASSERT(pool->tp_refcnt == 0);
314 KASSERT(TAILQ_EMPTY(&pool->tp_idle_threads)); 311 KASSERT(TAILQ_EMPTY(&pool->tp_idle_threads));
315 KASSERT(TAILQ_EMPTY(&pool->tp_jobs)); 312 KASSERT(TAILQ_EMPTY(&pool->tp_jobs));
316 KASSERT(!cv_has_waiters(&pool->tp_overseer.tpt_cv)); 313 KASSERT(!cv_has_waiters(&pool->tp_overseer.tpt_cv));
317 cv_destroy(&pool->tp_overseer.tpt_cv); 314 cv_destroy(&pool->tp_overseer.tpt_cv);
318 mutex_destroy(&pool->tp_lock); 315 mutex_destroy(&pool->tp_lock);
319 kmem_free(pool, size); 
320 return error; 316 return error;
321} 317}
322 318
323/* Thread pool destruction */ 319/* Thread pool destruction */
324 320
325static void 321static void
326threadpool_destroy(struct threadpool *pool, size_t size) 322threadpool_destroy(struct threadpool *pool)
327{ 323{
328 struct threadpool_thread *thread; 324 struct threadpool_thread *thread;
329 325
330 /* Mark the pool dying and wait for threads to commit suicide. */ 326 /* Mark the pool dying and wait for threads to commit suicide. */
331 mutex_spin_enter(&pool->tp_lock); 327 mutex_spin_enter(&pool->tp_lock);
332 KASSERT(TAILQ_EMPTY(&pool->tp_jobs)); 328 KASSERT(TAILQ_EMPTY(&pool->tp_jobs));
333 pool->tp_flags |= THREADPOOL_DYING; 329 pool->tp_flags |= THREADPOOL_DYING;
334 cv_broadcast(&pool->tp_overseer.tpt_cv); 330 cv_broadcast(&pool->tp_overseer.tpt_cv);
335 TAILQ_FOREACH(thread, &pool->tp_idle_threads, tpt_entry) 331 TAILQ_FOREACH(thread, &pool->tp_idle_threads, tpt_entry)
336 cv_broadcast(&thread->tpt_cv); 332 cv_broadcast(&thread->tpt_cv);
337 while (0 < pool->tp_refcnt) { 333 while (0 < pool->tp_refcnt) {
338 TP_LOG(("%s: draining %u references...\n", __func__, 334 TP_LOG(("%s: draining %u references...\n", __func__,
339 pool->tp_refcnt)); 335 pool->tp_refcnt));
340 cv_wait(&pool->tp_overseer.tpt_cv, &pool->tp_lock); 336 cv_wait(&pool->tp_overseer.tpt_cv, &pool->tp_lock);
341 } 337 }
342 mutex_spin_exit(&pool->tp_lock); 338 mutex_spin_exit(&pool->tp_lock);
343 339
344 KASSERT(pool->tp_overseer.tpt_job == NULL); 340 KASSERT(pool->tp_overseer.tpt_job == NULL);
345 KASSERT(pool->tp_overseer.tpt_pool == pool); 341 KASSERT(pool->tp_overseer.tpt_pool == pool);
346 KASSERT(pool->tp_flags == THREADPOOL_DYING); 342 KASSERT(pool->tp_flags == THREADPOOL_DYING);
347 KASSERT(pool->tp_refcnt == 0); 343 KASSERT(pool->tp_refcnt == 0);
348 KASSERT(TAILQ_EMPTY(&pool->tp_idle_threads)); 344 KASSERT(TAILQ_EMPTY(&pool->tp_idle_threads));
349 KASSERT(TAILQ_EMPTY(&pool->tp_jobs)); 345 KASSERT(TAILQ_EMPTY(&pool->tp_jobs));
350 KASSERT(!cv_has_waiters(&pool->tp_overseer.tpt_cv)); 346 KASSERT(!cv_has_waiters(&pool->tp_overseer.tpt_cv));
351 cv_destroy(&pool->tp_overseer.tpt_cv); 347 cv_destroy(&pool->tp_overseer.tpt_cv);
352 mutex_destroy(&pool->tp_lock); 348 mutex_destroy(&pool->tp_lock);
353 kmem_free(pool, size); 
354} 349}
355 350
356static int 351static int
357threadpool_hold(struct threadpool *pool) 352threadpool_hold(struct threadpool *pool)
358{ 353{
359 unsigned int refcnt; 354 unsigned int refcnt;
360 355
361 do { 356 do {
362 refcnt = pool->tp_refcnt; 357 refcnt = pool->tp_refcnt;
363 if (refcnt == UINT_MAX) 358 if (refcnt == UINT_MAX)
364 return EBUSY; 359 return EBUSY;
365 } while (atomic_cas_uint(&pool->tp_refcnt, refcnt, (refcnt + 1)) 360 } while (atomic_cas_uint(&pool->tp_refcnt, refcnt, (refcnt + 1))
366 != refcnt); 361 != refcnt);
@@ -397,53 +392,54 @@ threadpool_get(struct threadpool **poolp @@ -397,53 +392,54 @@ threadpool_get(struct threadpool **poolp
397 struct threadpool_unbound *tpu, *tmp = NULL; 392 struct threadpool_unbound *tpu, *tmp = NULL;
398 int error; 393 int error;
399 394
400 THREADPOOL_INIT(); 395 THREADPOOL_INIT();
401 396
402 ASSERT_SLEEPABLE(); 397 ASSERT_SLEEPABLE();
403 398
404 if (! threadpool_pri_is_valid(pri)) 399 if (! threadpool_pri_is_valid(pri))
405 return EINVAL; 400 return EINVAL;
406 401
407 mutex_enter(&threadpools_lock); 402 mutex_enter(&threadpools_lock);
408 tpu = threadpool_lookup_unbound(pri); 403 tpu = threadpool_lookup_unbound(pri);
409 if (tpu == NULL) { 404 if (tpu == NULL) {
410 struct threadpool *new_pool; 
411 mutex_exit(&threadpools_lock); 405 mutex_exit(&threadpools_lock);
412 TP_LOG(("%s: No pool for pri=%d, creating one.\n", 406 TP_LOG(("%s: No pool for pri=%d, creating one.\n",
413 __func__, (int)pri)); 407 __func__, (int)pri));
414 error = threadpool_create(&new_pool, NULL, pri, sizeof(*tpu)); 408 tmp = kmem_zalloc(sizeof(*tmp), KM_SLEEP);
415 if (error) 409 error = threadpool_create(&tmp->tpu_pool, NULL, pri);
 410 if (error) {
 411 kmem_free(tmp, sizeof(*tmp));
416 return error; 412 return error;
417 KASSERT(new_pool != NULL); 413 }
418 tmp = container_of(new_pool, struct threadpool_unbound, 
419 tpu_pool); 
420 mutex_enter(&threadpools_lock); 414 mutex_enter(&threadpools_lock);
421 tpu = threadpool_lookup_unbound(pri); 415 tpu = threadpool_lookup_unbound(pri);
422 if (tpu == NULL) { 416 if (tpu == NULL) {
423 TP_LOG(("%s: Won the creation race for pri=%d.\n", 417 TP_LOG(("%s: Won the creation race for pri=%d.\n",
424 __func__, (int)pri)); 418 __func__, (int)pri));
425 tpu = tmp; 419 tpu = tmp;
426 tmp = NULL; 420 tmp = NULL;
427 threadpool_insert_unbound(tpu); 421 threadpool_insert_unbound(tpu);
428 } 422 }
429 } 423 }
430 KASSERT(tpu != NULL); 424 KASSERT(tpu != NULL);
431 tpu->tpu_refcnt++; 425 tpu->tpu_refcnt++;
432 KASSERT(tpu->tpu_refcnt != 0); 426 KASSERT(tpu->tpu_refcnt != 0);
433 mutex_exit(&threadpools_lock); 427 mutex_exit(&threadpools_lock);
434 428
435 if (tmp != NULL) 429 if (tmp != NULL) {
436 threadpool_destroy((struct threadpool *)tmp, sizeof(*tpu)); 430 threadpool_destroy(&tmp->tpu_pool);
 431 kmem_free(tmp, sizeof(*tmp));
 432 }
437 KASSERT(tpu != NULL); 433 KASSERT(tpu != NULL);
438 *poolp = &tpu->tpu_pool; 434 *poolp = &tpu->tpu_pool;
439 return 0; 435 return 0;
440} 436}
441 437
442void 438void
443threadpool_put(struct threadpool *pool, pri_t pri) 439threadpool_put(struct threadpool *pool, pri_t pri)
444{ 440{
445 struct threadpool_unbound *tpu = 441 struct threadpool_unbound *tpu =
446 container_of(pool, struct threadpool_unbound, tpu_pool); 442 container_of(pool, struct threadpool_unbound, tpu_pool);
447 443
448 THREADPOOL_INIT(); 444 THREADPOOL_INIT();
449 445
@@ -453,28 +449,30 @@ threadpool_put(struct threadpool *pool,  @@ -453,28 +449,30 @@ threadpool_put(struct threadpool *pool,
453 449
454 mutex_enter(&threadpools_lock); 450 mutex_enter(&threadpools_lock);
455 KASSERT(tpu == threadpool_lookup_unbound(pri)); 451 KASSERT(tpu == threadpool_lookup_unbound(pri));
456 KASSERT(0 < tpu->tpu_refcnt); 452 KASSERT(0 < tpu->tpu_refcnt);
457 if (--tpu->tpu_refcnt == 0) { 453 if (--tpu->tpu_refcnt == 0) {
458 TP_LOG(("%s: Last reference for pri=%d, destroying pool.\n", 454 TP_LOG(("%s: Last reference for pri=%d, destroying pool.\n",
459 __func__, (int)pri)); 455 __func__, (int)pri));
460 threadpool_remove_unbound(tpu); 456 threadpool_remove_unbound(tpu);
461 } else { 457 } else {
462 tpu = NULL; 458 tpu = NULL;
463 } 459 }
464 mutex_exit(&threadpools_lock); 460 mutex_exit(&threadpools_lock);
465 461
466 if (tpu) 462 if (tpu) {
467 threadpool_destroy(pool, sizeof(*tpu)); 463 threadpool_destroy(&tpu->tpu_pool);
 464 kmem_free(tpu, sizeof(*tpu));
 465 }
468} 466}
469 467
470/* Per-CPU thread pools */ 468/* Per-CPU thread pools */
471 469
472int 470int
473threadpool_percpu_get(struct threadpool_percpu **pool_percpup, pri_t pri) 471threadpool_percpu_get(struct threadpool_percpu **pool_percpup, pri_t pri)
474{ 472{
475 struct threadpool_percpu *pool_percpu, *tmp = NULL; 473 struct threadpool_percpu *pool_percpu, *tmp = NULL;
476 int error; 474 int error;
477 475
478 THREADPOOL_INIT(); 476 THREADPOOL_INIT();
479 477
480 ASSERT_SLEEPABLE(); 478 ASSERT_SLEEPABLE();
@@ -581,68 +579,73 @@ threadpool_percpu_create(struct threadpo @@ -581,68 +579,73 @@ threadpool_percpu_create(struct threadpo
581 goto fail0; 579 goto fail0;
582 } 580 }
583 pool_percpu->tpp_pri = pri; 581 pool_percpu->tpp_pri = pri;
584 582
585 pool_percpu->tpp_percpu = percpu_alloc(sizeof(struct threadpool *)); 583 pool_percpu->tpp_percpu = percpu_alloc(sizeof(struct threadpool *));
586 if (pool_percpu->tpp_percpu == NULL) { 584 if (pool_percpu->tpp_percpu == NULL) {
587 error = ENOMEM; 585 error = ENOMEM;
588 goto fail1; 586 goto fail1;
589 } 587 }
590 588
591 for (i = 0, CPU_INFO_FOREACH(cii, ci), i++) { 589 for (i = 0, CPU_INFO_FOREACH(cii, ci), i++) {
592 struct threadpool *pool; 590 struct threadpool *pool;
593 591
594 error = threadpool_create(&pool, ci, pri, sizeof(*pool)); 592 pool = kmem_zalloc(sizeof(*pool), KM_SLEEP);
595 if (error) 593 error = threadpool_create(pool, ci, pri);
 594 if (error) {
 595 kmem_free(pool, sizeof(*pool));
596 goto fail2; 596 goto fail2;
 597 }
597 percpu_traverse_enter(); 598 percpu_traverse_enter();
598 struct threadpool **const poolp = 599 struct threadpool **const poolp =
599 percpu_getptr_remote(pool_percpu->tpp_percpu, ci); 600 percpu_getptr_remote(pool_percpu->tpp_percpu, ci);
600 *poolp = pool; 601 *poolp = pool;
601 percpu_traverse_exit(); 602 percpu_traverse_exit();
602 } 603 }
603 604
604 /* Success! */ 605 /* Success! */
605 *pool_percpup = (struct threadpool_percpu *)pool_percpu; 606 *pool_percpup = (struct threadpool_percpu *)pool_percpu;
606 return 0; 607 return 0;
607 608
608fail2: for (j = 0, CPU_INFO_FOREACH(cii, ci), j++) { 609fail2: for (j = 0, CPU_INFO_FOREACH(cii, ci), j++) {
609 if (i <= j) 610 if (i <= j)
610 break; 611 break;
611 percpu_traverse_enter(); 612 percpu_traverse_enter();
612 struct threadpool **const poolp = 613 struct threadpool **const poolp =
613 percpu_getptr_remote(pool_percpu->tpp_percpu, ci); 614 percpu_getptr_remote(pool_percpu->tpp_percpu, ci);
614 struct threadpool *const pool = *poolp; 615 struct threadpool *const pool = *poolp;
615 percpu_traverse_exit(); 616 percpu_traverse_exit();
616 threadpool_destroy(pool, sizeof(*pool)); 617 threadpool_destroy(pool);
 618 kmem_free(pool, sizeof(*pool));
617 } 619 }
618 percpu_free(pool_percpu->tpp_percpu, sizeof(struct taskthread_pool *)); 620 percpu_free(pool_percpu->tpp_percpu, sizeof(struct taskthread_pool *));
619fail1: kmem_free(pool_percpu, sizeof(*pool_percpu)); 621fail1: kmem_free(pool_percpu, sizeof(*pool_percpu));
620fail0: return error; 622fail0: return error;
621} 623}
622 624
623static void 625static void
624threadpool_percpu_destroy(struct threadpool_percpu *pool_percpu) 626threadpool_percpu_destroy(struct threadpool_percpu *pool_percpu)
625{ 627{
626 struct cpu_info *ci; 628 struct cpu_info *ci;
627 CPU_INFO_ITERATOR cii; 629 CPU_INFO_ITERATOR cii;
628 630
629 for (CPU_INFO_FOREACH(cii, ci)) { 631 for (CPU_INFO_FOREACH(cii, ci)) {
630 percpu_traverse_enter(); 632 percpu_traverse_enter();
631 struct threadpool **const poolp = 633 struct threadpool **const poolp =
632 percpu_getptr_remote(pool_percpu->tpp_percpu, ci); 634 percpu_getptr_remote(pool_percpu->tpp_percpu, ci);
633 struct threadpool *const pool = *poolp; 635 struct threadpool *const pool = *poolp;
634 percpu_traverse_exit(); 636 percpu_traverse_exit();
635 threadpool_destroy(pool, sizeof(*pool)); 637 threadpool_destroy(pool);
 638 kmem_free(pool, sizeof(*pool));
636 } 639 }
637 640
638 percpu_free(pool_percpu->tpp_percpu, sizeof(struct threadpool *)); 641 percpu_free(pool_percpu->tpp_percpu, sizeof(struct threadpool *));
639 kmem_free(pool_percpu, sizeof(*pool_percpu)); 642 kmem_free(pool_percpu, sizeof(*pool_percpu));
640} 643}
641 644
642/* Thread pool jobs */ 645/* Thread pool jobs */
643 646
644void __printflike(4,5) 647void __printflike(4,5)
645threadpool_job_init(struct threadpool_job *job, threadpool_job_fn_t fn, 648threadpool_job_init(struct threadpool_job *job, threadpool_job_fn_t fn,
646 kmutex_t *lock, const char *fmt, ...) 649 kmutex_t *lock, const char *fmt, ...)
647{ 650{
648 va_list ap; 651 va_list ap;