Tue Dec 25 05:44:13 2018 UTC ()
Ho ho ho!  We can suppress that warning with __diagused!  Merry Christmas!


(thorpej)
diff -r1.2 -r1.3 src/sys/kern/kern_threadpool.c

cvs diff -r1.2 -r1.3 src/sys/kern/kern_threadpool.c (expand / switch to unified diff)

--- src/sys/kern/kern_threadpool.c 2018/12/25 02:17:07 1.2
+++ src/sys/kern/kern_threadpool.c 2018/12/25 05:44:13 1.3
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_threadpool.c,v 1.2 2018/12/25 02:17:07 kre Exp $ */ 1/* $NetBSD: kern_threadpool.c,v 1.3 2018/12/25 05:44:13 thorpej Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2014, 2018 The NetBSD Foundation, Inc. 4 * Copyright (c) 2014, 2018 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell and Jason R. Thorpe. 8 * by Taylor R. Campbell and Jason R. Thorpe.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -71,61 +71,54 @@ @@ -71,61 +71,54 @@
71 * | | <running (n+1)b> | | 71 * | | <running (n+1)b> | |
72 * | +------------------+ | 72 * | +------------------+ |
73 * +--------------------------------------------------------+ 73 * +--------------------------------------------------------+
74 * 74 *
75 * XXX Why one overseer per CPU? I did that originally to avoid 75 * XXX Why one overseer per CPU? I did that originally to avoid
76 * touching remote CPUs' memory when scheduling a job, but that still 76 * touching remote CPUs' memory when scheduling a job, but that still
77 * requires interprocessor synchronization. Perhaps we could get by 77 * requires interprocessor synchronization. Perhaps we could get by
78 * with a single overseer thread, at the expense of another pointer in 78 * with a single overseer thread, at the expense of another pointer in
79 * struct threadpool_job_impl to identify the CPU on which it must run 79 * struct threadpool_job_impl to identify the CPU on which it must run
80 * in order for the overseer to schedule it correctly. 80 * in order for the overseer to schedule it correctly.
81 */ 81 */
82 82
83#include <sys/cdefs.h> 83#include <sys/cdefs.h>
84__KERNEL_RCSID(0, "$NetBSD: kern_threadpool.c,v 1.2 2018/12/25 02:17:07 kre Exp $"); 84__KERNEL_RCSID(0, "$NetBSD: kern_threadpool.c,v 1.3 2018/12/25 05:44:13 thorpej Exp $");
85 85
86#include <sys/types.h> 86#include <sys/types.h>
87#include <sys/param.h> 87#include <sys/param.h>
88#include <sys/atomic.h> 88#include <sys/atomic.h>
89#include <sys/condvar.h> 89#include <sys/condvar.h>
90#include <sys/cpu.h> 90#include <sys/cpu.h>
91#include <sys/kernel.h> 91#include <sys/kernel.h>
92#include <sys/kmem.h> 92#include <sys/kmem.h>
93#include <sys/kthread.h> 93#include <sys/kthread.h>
94#include <sys/mutex.h> 94#include <sys/mutex.h>
95#include <sys/once.h> 95#include <sys/once.h>
96#include <sys/percpu.h> 96#include <sys/percpu.h>
97#include <sys/pool.h> 97#include <sys/pool.h>
98#include <sys/proc.h> 98#include <sys/proc.h>
99#include <sys/queue.h> 99#include <sys/queue.h>
100#include <sys/systm.h> 100#include <sys/systm.h>
101#include <sys/threadpool.h> 101#include <sys/threadpool.h>
102 102
103static ONCE_DECL(threadpool_init_once) 103static ONCE_DECL(threadpool_init_once)
104 104
105#ifdef DIAGNOSTIC 
106#define THREADPOOL_INIT() \ 105#define THREADPOOL_INIT() \
107do { \ 106do { \
108 int threadpool_init_error = \ 107 int threadpool_init_error __diagused = \
109 RUN_ONCE(&threadpool_init_once, threadpools_init); \ 108 RUN_ONCE(&threadpool_init_once, threadpools_init); \
110 KASSERT(threadpool_init_error == 0); \ 109 KASSERT(threadpool_init_error == 0); \
111} while (/*CONSTCOND*/0) 110} while (/*CONSTCOND*/0)
112#else 111
113#define THREADPOOL_INIT() \ 
114do { \ 
115 RUN_ONCE(&threadpool_init_once, threadpools_init); \ 
116} while (/*CONSTCOND*/0) 
117#endif 
118 
119 112
120/* Data structures */ 113/* Data structures */
121 114
122TAILQ_HEAD(job_head, threadpool_job_impl); 115TAILQ_HEAD(job_head, threadpool_job_impl);
123TAILQ_HEAD(thread_head, threadpool_thread); 116TAILQ_HEAD(thread_head, threadpool_thread);
124 117
125typedef struct threadpool_job_impl { 118typedef struct threadpool_job_impl {
126 kmutex_t *job_lock; /* 1 */ 119 kmutex_t *job_lock; /* 1 */
127 struct threadpool_thread *job_thread; /* 1 */ 120 struct threadpool_thread *job_thread; /* 1 */
128 TAILQ_ENTRY(threadpool_job_impl) job_entry; /* 2 */ 121 TAILQ_ENTRY(threadpool_job_impl) job_entry; /* 2 */
129 volatile unsigned int job_refcnt; /* 1 */ 122 volatile unsigned int job_refcnt; /* 1 */
130 /* implicit pad on _LP64 */ 123 /* implicit pad on _LP64 */
131 kcondvar_t job_cv; /* 3 */ 124 kcondvar_t job_cv; /* 3 */
@@ -721,27 +714,27 @@ threadpool_job_destroy(threadpool_job_t  @@ -721,27 +714,27 @@ threadpool_job_destroy(threadpool_job_t
721 (void)strlcpy(job->job_name, "deadjob", sizeof(job->job_name)); 714 (void)strlcpy(job->job_name, "deadjob", sizeof(job->job_name));
722} 715}
723 716
724static int 717static int
725threadpool_job_hold(threadpool_job_impl_t *job) 718threadpool_job_hold(threadpool_job_impl_t *job)
726{ 719{
727 unsigned int refcnt; 720 unsigned int refcnt;
728 do { 721 do {
729 refcnt = job->job_refcnt; 722 refcnt = job->job_refcnt;
730 if (refcnt == UINT_MAX) 723 if (refcnt == UINT_MAX)
731 return EBUSY; 724 return EBUSY;
732 } while (atomic_cas_uint(&job->job_refcnt, refcnt, (refcnt + 1)) 725 } while (atomic_cas_uint(&job->job_refcnt, refcnt, (refcnt + 1))
733 != refcnt); 726 != refcnt);
734 727
735 return 0; 728 return 0;
736} 729}
737 730
738static void 731static void
739threadpool_job_rele(threadpool_job_impl_t *job) 732threadpool_job_rele(threadpool_job_impl_t *job)
740{ 733{
741 unsigned int refcnt; 734 unsigned int refcnt;
742 735
743 do { 736 do {
744 refcnt = job->job_refcnt; 737 refcnt = job->job_refcnt;
745 KASSERT(0 < refcnt); 738 KASSERT(0 < refcnt);
746 if (refcnt == 1) { 739 if (refcnt == 1) {
747 mutex_enter(job->job_lock); 740 mutex_enter(job->job_lock);