Tue Apr 14 13:26:59 2015 UTC ()
Group initialization of rnd_samples and rnd_global.


(riastradh)
diff -r1.58 -r1.59 src/sys/kern/kern_rndq.c

cvs diff -r1.58 -r1.59 src/sys/kern/Attic/kern_rndq.c (switch to unified diff)

--- src/sys/kern/Attic/kern_rndq.c 2015/04/14 13:23:25 1.58
+++ src/sys/kern/Attic/kern_rndq.c 2015/04/14 13:26:58 1.59
@@ -1,1510 +1,1512 @@ @@ -1,1510 +1,1512 @@
1/* $NetBSD: kern_rndq.c,v 1.58 2015/04/14 13:23:25 riastradh Exp $ */ 1/* $NetBSD: kern_rndq.c,v 1.59 2015/04/14 13:26:58 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1997-2013 The NetBSD Foundation, Inc. 4 * Copyright (c) 1997-2013 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Michael Graff <explorer@flame.org> and Thor Lancelot Simon. 8 * by Michael Graff <explorer@flame.org> and Thor Lancelot Simon.
9 * This code uses ideas and algorithms from the Linux driver written by 9 * This code uses ideas and algorithms from the Linux driver written by
10 * Ted Ts'o. 10 * Ted Ts'o.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
15 * 1. Redistributions of source code must retain the above copyright 15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer. 16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright 17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the 18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution. 19 * documentation and/or other materials provided with the distribution.
20 * 20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE. 31 * POSSIBILITY OF SUCH DAMAGE.
32 */ 32 */
33 33
34#include <sys/cdefs.h> 34#include <sys/cdefs.h>
35__KERNEL_RCSID(0, "$NetBSD: kern_rndq.c,v 1.58 2015/04/14 13:23:25 riastradh Exp $"); 35__KERNEL_RCSID(0, "$NetBSD: kern_rndq.c,v 1.59 2015/04/14 13:26:58 riastradh Exp $");
36 36
37#include <sys/param.h> 37#include <sys/param.h>
38#include <sys/atomic.h> 38#include <sys/atomic.h>
39#include <sys/ioctl.h> 39#include <sys/ioctl.h>
40#include <sys/fcntl.h> 40#include <sys/fcntl.h>
41#include <sys/select.h> 41#include <sys/select.h>
42#include <sys/poll.h> 42#include <sys/poll.h>
43#include <sys/kmem.h> 43#include <sys/kmem.h>
44#include <sys/mutex.h> 44#include <sys/mutex.h>
45#include <sys/proc.h> 45#include <sys/proc.h>
46#include <sys/kernel.h> 46#include <sys/kernel.h>
47#include <sys/conf.h> 47#include <sys/conf.h>
48#include <sys/systm.h> 48#include <sys/systm.h>
49#include <sys/callout.h> 49#include <sys/callout.h>
50#include <sys/intr.h> 50#include <sys/intr.h>
51#include <sys/rnd.h> 51#include <sys/rnd.h>
52#include <sys/rndpool.h> 52#include <sys/rndpool.h>
53#include <sys/rndsink.h> 53#include <sys/rndsink.h>
54#include <sys/rndsource.h> 54#include <sys/rndsource.h>
55#include <sys/vnode.h> 55#include <sys/vnode.h>
56#include <sys/pool.h> 56#include <sys/pool.h>
57#include <sys/kauth.h> 57#include <sys/kauth.h>
58#include <sys/once.h> 58#include <sys/once.h>
59#include <sys/rngtest.h> 59#include <sys/rngtest.h>
60 60
61#include <dev/rnd_private.h> 61#include <dev/rnd_private.h>
62 62
63#ifdef COMPAT_50 63#ifdef COMPAT_50
64#include <compat/sys/rnd.h> 64#include <compat/sys/rnd.h>
65#endif 65#endif
66 66
67#if defined(__HAVE_CPU_COUNTER) 67#if defined(__HAVE_CPU_COUNTER)
68#include <machine/cpu_counter.h> 68#include <machine/cpu_counter.h>
69#endif 69#endif
70 70
71#ifdef RND_DEBUG 71#ifdef RND_DEBUG
72#define DPRINTF(l,x) if (rnd_debug & (l)) rnd_printf x 72#define DPRINTF(l,x) if (rnd_debug & (l)) rnd_printf x
73int rnd_debug = 0; 73int rnd_debug = 0;
74#else 74#else
75#define DPRINTF(l,x) 75#define DPRINTF(l,x)
76#endif 76#endif
77 77
78/* 78/*
79 * list devices attached 79 * list devices attached
80 */ 80 */
81#if 0 81#if 0
82#define RND_VERBOSE 82#define RND_VERBOSE
83#endif 83#endif
84 84
85#ifdef RND_VERBOSE 85#ifdef RND_VERBOSE
86#define rnd_printf_verbose(fmt, ...) rnd_printf(fmt, ##__VA_ARGS__) 86#define rnd_printf_verbose(fmt, ...) rnd_printf(fmt, ##__VA_ARGS__)
87#else 87#else
88#define rnd_printf_verbose(fmt, ...) ((void)0) 88#define rnd_printf_verbose(fmt, ...) ((void)0)
89#endif 89#endif
90 90
91#ifdef RND_VERBOSE 91#ifdef RND_VERBOSE
92static unsigned int deltacnt; 92static unsigned int deltacnt;
93#endif 93#endif
94 94
95/* 95/*
96 * This is a little bit of state information attached to each device that we 96 * This is a little bit of state information attached to each device that we
97 * collect entropy from. This is simply a collection buffer, and when it 97 * collect entropy from. This is simply a collection buffer, and when it
98 * is full it will be "detached" from the source and added to the entropy 98 * is full it will be "detached" from the source and added to the entropy
99 * pool after entropy is distilled as much as possible. 99 * pool after entropy is distilled as much as possible.
100 */ 100 */
101#define RND_SAMPLE_COUNT 64 /* collect N samples, then compress */ 101#define RND_SAMPLE_COUNT 64 /* collect N samples, then compress */
102typedef struct _rnd_sample_t { 102typedef struct _rnd_sample_t {
103 SIMPLEQ_ENTRY(_rnd_sample_t) next; 103 SIMPLEQ_ENTRY(_rnd_sample_t) next;
104 krndsource_t *source; 104 krndsource_t *source;
105 int cursor; 105 int cursor;
106 int entropy; 106 int entropy;
107 uint32_t ts[RND_SAMPLE_COUNT]; 107 uint32_t ts[RND_SAMPLE_COUNT];
108 uint32_t values[RND_SAMPLE_COUNT]; 108 uint32_t values[RND_SAMPLE_COUNT];
109} rnd_sample_t; 109} rnd_sample_t;
110 110
111SIMPLEQ_HEAD(rnd_sampleq, _rnd_sample_t); 111SIMPLEQ_HEAD(rnd_sampleq, _rnd_sample_t);
112 112
113/* 113/*
114 * The sample queue. Samples are put into the queue and processed in a 114 * The sample queue. Samples are put into the queue and processed in a
115 * softint in order to limit the latency of adding a sample. 115 * softint in order to limit the latency of adding a sample.
116 */ 116 */
117static struct { 117static struct {
118 kmutex_t lock; 118 kmutex_t lock;
119 struct rnd_sampleq q; 119 struct rnd_sampleq q;
120} rnd_samples __cacheline_aligned; 120} rnd_samples __cacheline_aligned;
121 121
122/* 122/*
123 * Memory pool for sample buffers 123 * Memory pool for sample buffers
124 */ 124 */
125static pool_cache_t rnd_mempc; 125static pool_cache_t rnd_mempc;
126 126
127/* 127/*
128 * Global entropy pool and sources. 128 * Global entropy pool and sources.
129 */ 129 */
130static struct { 130static struct {
131 kmutex_t lock; 131 kmutex_t lock;
132 rndpool_t pool; 132 rndpool_t pool;
133 LIST_HEAD(, krndsource) sources; 133 LIST_HEAD(, krndsource) sources;
134} rnd_global __cacheline_aligned; 134} rnd_global __cacheline_aligned;
135 135
136/* 136/*
137 * This source is used to easily "remove" queue entries when the source 137 * This source is used to easily "remove" queue entries when the source
138 * which actually generated the events is going away. 138 * which actually generated the events is going away.
139 */ 139 */
140static krndsource_t rnd_source_no_collect = { 140static krndsource_t rnd_source_no_collect = {
141 /* LIST_ENTRY list */ 141 /* LIST_ENTRY list */
142 .name = { 'N', 'o', 'C', 'o', 'l', 'l', 'e', 'c', 't', 142 .name = { 'N', 'o', 'C', 'o', 'l', 'l', 'e', 'c', 't',
143 0, 0, 0, 0, 0, 0, 0 }, 143 0, 0, 0, 0, 0, 0, 0 },
144 .total = 0, 144 .total = 0,
145 .type = RND_TYPE_UNKNOWN, 145 .type = RND_TYPE_UNKNOWN,
146 .flags = (RND_FLAG_NO_COLLECT | 146 .flags = (RND_FLAG_NO_COLLECT |
147 RND_FLAG_NO_ESTIMATE), 147 RND_FLAG_NO_ESTIMATE),
148 .state = NULL, 148 .state = NULL,
149 .test_cnt = 0, 149 .test_cnt = 0,
150 .test = NULL 150 .test = NULL
151}; 151};
152 152
153krndsource_t rnd_printf_source, rnd_autoconf_source; 153krndsource_t rnd_printf_source, rnd_autoconf_source;
154 154
155void *rnd_process, *rnd_wakeup; 155void *rnd_process, *rnd_wakeup;
156 156
157static void rnd_wakeup_readers(void); 157static void rnd_wakeup_readers(void);
158static inline uint32_t rnd_counter(void); 158static inline uint32_t rnd_counter(void);
159static void rnd_intr(void *); 159static void rnd_intr(void *);
160static void rnd_wake(void *); 160static void rnd_wake(void *);
161static void rnd_process_events(void); 161static void rnd_process_events(void);
162static void rnd_add_data_ts(krndsource_t *, const void *const, 162static void rnd_add_data_ts(krndsource_t *, const void *const,
163 uint32_t, uint32_t, uint32_t); 163 uint32_t, uint32_t, uint32_t);
164static inline void rnd_schedule_process(void); 164static inline void rnd_schedule_process(void);
165 165
166int rnd_ready = 0; 166int rnd_ready = 0;
167int rnd_initial_entropy = 0; 167int rnd_initial_entropy = 0;
168 168
169static int rnd_printing = 0; 169static int rnd_printing = 0;
170 170
171#ifdef DIAGNOSTIC 171#ifdef DIAGNOSTIC
172static int rnd_tested = 0; 172static int rnd_tested = 0;
173static rngtest_t rnd_rt; 173static rngtest_t rnd_rt;
174static uint8_t rnd_testbits[sizeof(rnd_rt.rt_b)]; 174static uint8_t rnd_testbits[sizeof(rnd_rt.rt_b)];
175#endif 175#endif
176 176
177static rndsave_t *boot_rsp; 177static rndsave_t *boot_rsp;
178 178
179static inline void 179static inline void
180rnd_printf(const char *fmt, ...) 180rnd_printf(const char *fmt, ...)
181{ 181{
182 va_list ap; 182 va_list ap;
183 183
184 membar_consumer(); 184 membar_consumer();
185 if (rnd_printing) { 185 if (rnd_printing) {
186 return; 186 return;
187 } 187 }
188 rnd_printing = 1; 188 rnd_printing = 1;
189 membar_producer(); 189 membar_producer();
190 va_start(ap, fmt); 190 va_start(ap, fmt);
191 vprintf(fmt, ap); 191 vprintf(fmt, ap);
192 va_end(ap); 192 va_end(ap);
193 rnd_printing = 0; 193 rnd_printing = 0;
194} 194}
195 195
196void 196void
197rnd_init_softint(void) { 197rnd_init_softint(void) {
198 rnd_process = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE, 198 rnd_process = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE,
199 rnd_intr, NULL); 199 rnd_intr, NULL);
200 rnd_wakeup = softint_establish(SOFTINT_CLOCK|SOFTINT_MPSAFE, 200 rnd_wakeup = softint_establish(SOFTINT_CLOCK|SOFTINT_MPSAFE,
201 rnd_wake, NULL); 201 rnd_wake, NULL);
202 rnd_schedule_process(); 202 rnd_schedule_process();
203} 203}
204 204
205/* 205/*
206 * Generate a 32-bit counter. 206 * Generate a 32-bit counter.
207 */ 207 */
208static inline uint32_t 208static inline uint32_t
209rnd_counter(void) 209rnd_counter(void)
210{ 210{
211 struct bintime bt; 211 struct bintime bt;
212 uint32_t ret; 212 uint32_t ret;
213 213
214#if defined(__HAVE_CPU_COUNTER) 214#if defined(__HAVE_CPU_COUNTER)
215 if (cpu_hascounter()) 215 if (cpu_hascounter())
216 return cpu_counter32(); 216 return cpu_counter32();
217#endif 217#endif
218 if (!rnd_ready) 218 if (!rnd_ready)
219 /* Too early to call nanotime. */ 219 /* Too early to call nanotime. */
220 return 0; 220 return 0;
221 221
222 binuptime(&bt); 222 binuptime(&bt);
223 ret = bt.sec; 223 ret = bt.sec;
224 ret |= bt.sec >> 32; 224 ret |= bt.sec >> 32;
225 ret |= bt.frac; 225 ret |= bt.frac;
226 ret |= bt.frac >> 32; 226 ret |= bt.frac >> 32;
227 227
228 return ret; 228 return ret;
229} 229}
230 230
231/* 231/*
232 * We may be called from low IPL -- protect our softint. 232 * We may be called from low IPL -- protect our softint.
233 */ 233 */
234 234
235static inline void 235static inline void
236rnd_schedule_softint(void *softint) 236rnd_schedule_softint(void *softint)
237{ 237{
238 kpreempt_disable(); 238 kpreempt_disable();
239 softint_schedule(softint); 239 softint_schedule(softint);
240 kpreempt_enable(); 240 kpreempt_enable();
241} 241}
242 242
243static inline void 243static inline void
244rnd_schedule_process(void) 244rnd_schedule_process(void)
245{ 245{
246 if (__predict_true(rnd_process)) { 246 if (__predict_true(rnd_process)) {
247 rnd_schedule_softint(rnd_process); 247 rnd_schedule_softint(rnd_process);
248 return; 248 return;
249 }  249 }
250 rnd_process_events(); 250 rnd_process_events();
251} 251}
252 252
253static inline void 253static inline void
254rnd_schedule_wakeup(void) 254rnd_schedule_wakeup(void)
255{ 255{
256 if (__predict_true(rnd_wakeup)) { 256 if (__predict_true(rnd_wakeup)) {
257 rnd_schedule_softint(rnd_wakeup); 257 rnd_schedule_softint(rnd_wakeup);
258 return; 258 return;
259 } 259 }
260 rnd_wakeup_readers(); 260 rnd_wakeup_readers();
261} 261}
262 262
263/* 263/*
264 * Tell any sources with "feed me" callbacks that we are hungry. 264 * Tell any sources with "feed me" callbacks that we are hungry.
265 */ 265 */
266void 266void
267rnd_getmore(size_t byteswanted) 267rnd_getmore(size_t byteswanted)
268{ 268{
269 krndsource_t *rs; 269 krndsource_t *rs;
270 270
271 mutex_spin_enter(&rnd_global.lock); 271 mutex_spin_enter(&rnd_global.lock);
272 LIST_FOREACH(rs, &rnd_global.sources, list) { 272 LIST_FOREACH(rs, &rnd_global.sources, list) {
273 if (!ISSET(rs->flags, RND_FLAG_HASCB)) 273 if (!ISSET(rs->flags, RND_FLAG_HASCB))
274 continue; 274 continue;
275 KASSERT(rs->get != NULL); 275 KASSERT(rs->get != NULL);
276 KASSERT(rs->getarg != NULL); 276 KASSERT(rs->getarg != NULL);
277 rs->get(byteswanted, rs->getarg); 277 rs->get(byteswanted, rs->getarg);
278 rnd_printf_verbose("rnd: entropy estimate %zu bits\n", 278 rnd_printf_verbose("rnd: entropy estimate %zu bits\n",
279 rndpool_get_entropy_count(&rnd_global.pool)); 279 rndpool_get_entropy_count(&rnd_global.pool));
280 rnd_printf_verbose("rnd: asking source %s for %zu bytes\n", 280 rnd_printf_verbose("rnd: asking source %s for %zu bytes\n",
281 rs->name, byteswanted); 281 rs->name, byteswanted);
282 } 282 }
283 mutex_spin_exit(&rnd_global.lock); 283 mutex_spin_exit(&rnd_global.lock);
284} 284}
285 285
286/* 286/*
287 * Check to see if there are readers waiting on us. If so, kick them. 287 * Check to see if there are readers waiting on us. If so, kick them.
288 */ 288 */
289static void 289static void
290rnd_wakeup_readers(void) 290rnd_wakeup_readers(void)
291{ 291{
292 292
293 /* 293 /*
294 * XXX This bookkeeping shouldn't be here -- this is not where 294 * XXX This bookkeeping shouldn't be here -- this is not where
295 * the rnd_initial_entropy state change actually happens. 295 * the rnd_initial_entropy state change actually happens.
296 */ 296 */
297 mutex_spin_enter(&rnd_global.lock); 297 mutex_spin_enter(&rnd_global.lock);
298 const size_t entropy_count = rndpool_get_entropy_count(&rnd_global.pool); 298 const size_t entropy_count = rndpool_get_entropy_count(&rnd_global.pool);
299 if (entropy_count < RND_ENTROPY_THRESHOLD * 8) { 299 if (entropy_count < RND_ENTROPY_THRESHOLD * 8) {
300 mutex_spin_exit(&rnd_global.lock); 300 mutex_spin_exit(&rnd_global.lock);
301 return; 301 return;
302 } else { 302 } else {
303#ifdef RND_VERBOSE 303#ifdef RND_VERBOSE
304 if (__predict_false(!rnd_initial_entropy)) 304 if (__predict_false(!rnd_initial_entropy))
305 rnd_printf_verbose("rnd: have initial entropy (%zu)\n", 305 rnd_printf_verbose("rnd: have initial entropy (%zu)\n",
306 entropy_count); 306 entropy_count);
307#endif 307#endif
308 rnd_initial_entropy = 1; 308 rnd_initial_entropy = 1;
309 } 309 }
310 mutex_spin_exit(&rnd_global.lock); 310 mutex_spin_exit(&rnd_global.lock);
311 311
312 rndsinks_distribute(); 312 rndsinks_distribute();
313} 313}
314 314
315/* 315/*
316 * Use the timing/value of the event to estimate the entropy gathered. 316 * Use the timing/value of the event to estimate the entropy gathered.
317 * If all the differentials (first, second, and third) are non-zero, return 317 * If all the differentials (first, second, and third) are non-zero, return
318 * non-zero. If any of these are zero, return zero. 318 * non-zero. If any of these are zero, return zero.
319 */ 319 */
320static inline uint32_t 320static inline uint32_t
321rnd_delta_estimate(rnd_delta_t *d, uint32_t v, int32_t delta) 321rnd_delta_estimate(rnd_delta_t *d, uint32_t v, int32_t delta)
322{ 322{
323 int32_t delta2, delta3; 323 int32_t delta2, delta3;
324 324
325 d->insamples++; 325 d->insamples++;
326 326
327 /* 327 /*
328 * Calculate the second and third order differentials 328 * Calculate the second and third order differentials
329 */ 329 */
330 delta2 = d->dx - delta; 330 delta2 = d->dx - delta;
331 if (delta2 < 0) 331 if (delta2 < 0)
332 delta2 = -delta2; 332 delta2 = -delta2;
333 333
334 delta3 = d->d2x - delta2; 334 delta3 = d->d2x - delta2;
335 if (delta3 < 0) 335 if (delta3 < 0)
336 delta3 = -delta3; 336 delta3 = -delta3;
337 337
338 d->x = v; 338 d->x = v;
339 d->dx = delta; 339 d->dx = delta;
340 d->d2x = delta2; 340 d->d2x = delta2;
341 341
342 /* 342 /*
343 * If any delta is 0, we got no entropy. If all are non-zero, we 343 * If any delta is 0, we got no entropy. If all are non-zero, we
344 * might have something. 344 * might have something.
345 */ 345 */
346 if (delta == 0 || delta2 == 0 || delta3 == 0) 346 if (delta == 0 || delta2 == 0 || delta3 == 0)
347 return (0); 347 return (0);
348 348
349 d->outbits++; 349 d->outbits++;
350 return (1); 350 return (1);
351} 351}
352 352
353/* 353/*
354 * Delta estimator for 32-bit timeestamps. Must handle wrap. 354 * Delta estimator for 32-bit timeestamps. Must handle wrap.
355 */ 355 */
356static inline uint32_t 356static inline uint32_t
357rnd_dt_estimate(krndsource_t *rs, uint32_t t) 357rnd_dt_estimate(krndsource_t *rs, uint32_t t)
358{ 358{
359 int32_t delta; 359 int32_t delta;
360 uint32_t ret; 360 uint32_t ret;
361 rnd_delta_t *d = &rs->time_delta; 361 rnd_delta_t *d = &rs->time_delta;
362 362
363 if (t < d->x) { 363 if (t < d->x) {
364 delta = UINT32_MAX - d->x + t; 364 delta = UINT32_MAX - d->x + t;
365 } else { 365 } else {
366 delta = d->x - t; 366 delta = d->x - t;
367 } 367 }
368 368
369 if (delta < 0) { 369 if (delta < 0) {
370 delta = -delta; 370 delta = -delta;
371 } 371 }
372 372
373 ret = rnd_delta_estimate(d, t, delta); 373 ret = rnd_delta_estimate(d, t, delta);
374 374
375 KASSERT(d->x == t); 375 KASSERT(d->x == t);
376 KASSERT(d->dx == delta); 376 KASSERT(d->dx == delta);
377#ifdef RND_VERBOSE 377#ifdef RND_VERBOSE
378 if (deltacnt++ % 1151 == 0) { 378 if (deltacnt++ % 1151 == 0) {
379 rnd_printf_verbose("rnd_dt_estimate: %s x = %lld, dx = %lld, " 379 rnd_printf_verbose("rnd_dt_estimate: %s x = %lld, dx = %lld, "
380 "d2x = %lld\n", rs->name, 380 "d2x = %lld\n", rs->name,
381 (int)d->x, (int)d->dx, (int)d->d2x); 381 (int)d->x, (int)d->dx, (int)d->d2x);
382 } 382 }
383#endif 383#endif
384 return ret; 384 return ret;
385} 385}
386 386
387/* 387/*
388 * Delta estimator for 32 or bit values. "Wrap" isn't. 388 * Delta estimator for 32 or bit values. "Wrap" isn't.
389 */ 389 */
390static inline uint32_t 390static inline uint32_t
391rnd_dv_estimate(krndsource_t *rs, uint32_t v) 391rnd_dv_estimate(krndsource_t *rs, uint32_t v)
392{ 392{
393 int32_t delta; 393 int32_t delta;
394 uint32_t ret; 394 uint32_t ret;
395 rnd_delta_t *d = &rs->value_delta; 395 rnd_delta_t *d = &rs->value_delta;
396 396
397 delta = d->x - v; 397 delta = d->x - v;
398 398
399 if (delta < 0) { 399 if (delta < 0) {
400 delta = -delta; 400 delta = -delta;
401 } 401 }
402 ret = rnd_delta_estimate(d, v, (uint32_t)delta); 402 ret = rnd_delta_estimate(d, v, (uint32_t)delta);
403 403
404 KASSERT(d->x == v); 404 KASSERT(d->x == v);
405 KASSERT(d->dx == delta); 405 KASSERT(d->dx == delta);
406#ifdef RND_VERBOSE 406#ifdef RND_VERBOSE
407 if (deltacnt++ % 1151 == 0) { 407 if (deltacnt++ % 1151 == 0) {
408 rnd_printf_verbose("rnd_dv_estimate: %s x = %lld, dx = %lld, " 408 rnd_printf_verbose("rnd_dv_estimate: %s x = %lld, dx = %lld, "
409 " d2x = %lld\n", rs->name, 409 " d2x = %lld\n", rs->name,
410 (long long int)d->x, 410 (long long int)d->x,
411 (long long int)d->dx, 411 (long long int)d->dx,
412 (long long int)d->d2x); 412 (long long int)d->d2x);
413 } 413 }
414#endif 414#endif
415 return ret; 415 return ret;
416} 416}
417 417
418#if defined(__HAVE_CPU_COUNTER) 418#if defined(__HAVE_CPU_COUNTER)
419static struct { 419static struct {
420 kmutex_t lock; 420 kmutex_t lock;
421 struct callout callout; 421 struct callout callout;
422 struct callout stop_callout; 422 struct callout stop_callout;
423 krndsource_t source; 423 krndsource_t source;
424} rnd_skew __cacheline_aligned; 424} rnd_skew __cacheline_aligned;
425 425
426static void rnd_skew_intr(void *); 426static void rnd_skew_intr(void *);
427 427
428static void 428static void
429rnd_skew_enable(krndsource_t *rs, bool enabled) 429rnd_skew_enable(krndsource_t *rs, bool enabled)
430{ 430{
431 431
432 if (enabled) { 432 if (enabled) {
433 rnd_skew_intr(rs); 433 rnd_skew_intr(rs);
434 } else { 434 } else {
435 callout_stop(&rnd_skew.callout); 435 callout_stop(&rnd_skew.callout);
436 } 436 }
437} 437}
438 438
439static void 439static void
440rnd_skew_stop_intr(void *arg) 440rnd_skew_stop_intr(void *arg)
441{ 441{
442 442
443 callout_stop(&rnd_skew.callout); 443 callout_stop(&rnd_skew.callout);
444} 444}
445 445
446static void 446static void
447rnd_skew_get(size_t bytes, void *priv) 447rnd_skew_get(size_t bytes, void *priv)
448{ 448{
449 krndsource_t *skewsrcp = priv; 449 krndsource_t *skewsrcp = priv;
450 450
451 KASSERT(skewsrcp == &rnd_skew.source); 451 KASSERT(skewsrcp == &rnd_skew.source);
452 if (RND_ENABLED(skewsrcp)) { 452 if (RND_ENABLED(skewsrcp)) {
453 /* Measure for 30s */ 453 /* Measure for 30s */
454 callout_schedule(&rnd_skew.stop_callout, hz * 30); 454 callout_schedule(&rnd_skew.stop_callout, hz * 30);
455 callout_schedule(&rnd_skew.callout, 1); 455 callout_schedule(&rnd_skew.callout, 1);
456 } 456 }
457} 457}
458 458
459static void 459static void
460rnd_skew_intr(void *arg) 460rnd_skew_intr(void *arg)
461{ 461{
462 static int flipflop; 462 static int flipflop;
463 463
464 /* 464 /*
465 * Even on systems with seemingly stable clocks, the 465 * Even on systems with seemingly stable clocks, the
466 * delta-time entropy estimator seems to think we get 1 bit here 466 * delta-time entropy estimator seems to think we get 1 bit here
467 * about every 2 calls. 467 * about every 2 calls.
468 * 468 *
469 */ 469 */
470 mutex_spin_enter(&rnd_skew.lock); 470 mutex_spin_enter(&rnd_skew.lock);
471 flipflop = !flipflop; 471 flipflop = !flipflop;
472 472
473 if (RND_ENABLED(&rnd_skew.source)) { 473 if (RND_ENABLED(&rnd_skew.source)) {
474 if (flipflop) { 474 if (flipflop) {
475 rnd_add_uint32(&rnd_skew.source, rnd_counter()); 475 rnd_add_uint32(&rnd_skew.source, rnd_counter());
476 callout_schedule(&rnd_skew.callout, hz / 10); 476 callout_schedule(&rnd_skew.callout, hz / 10);
477 } else { 477 } else {
478 callout_schedule(&rnd_skew.callout, 1); 478 callout_schedule(&rnd_skew.callout, 1);
479 } 479 }
480 } 480 }
481 mutex_spin_exit(&rnd_skew.lock); 481 mutex_spin_exit(&rnd_skew.lock);
482} 482}
483#endif 483#endif
484 484
485/* 485/*
486 * initialize the global random pool for our use. 486 * initialize the global random pool for our use.
487 * rnd_init() must be called very early on in the boot process, so 487 * rnd_init() must be called very early on in the boot process, so
488 * the pool is ready for other devices to attach as sources. 488 * the pool is ready for other devices to attach as sources.
489 */ 489 */
490void 490void
491rnd_init(void) 491rnd_init(void)
492{ 492{
493 uint32_t c; 493 uint32_t c;
494 494
495 if (rnd_ready) 495 if (rnd_ready)
496 return; 496 return;
497 497
498 mutex_init(&rnd_samples.lock, MUTEX_DEFAULT, IPL_VM); 
499 rndsinks_init(); 
500 
501 /* 498 /*
502 * take a counter early, hoping that there's some variance in 499 * take a counter early, hoping that there's some variance in
503 * the following operations 500 * the following operations
504 */ 501 */
505 c = rnd_counter(); 502 c = rnd_counter();
506 503
507 LIST_INIT(&rnd_global.sources); 504 rndsinks_init();
 505
 506 /* Initialize the sample queue. */
 507 mutex_init(&rnd_samples.lock, MUTEX_DEFAULT, IPL_VM);
508 SIMPLEQ_INIT(&rnd_samples.q); 508 SIMPLEQ_INIT(&rnd_samples.q);
509 509
510 rndpool_init(&rnd_global.pool); 510 /* Initialize the global pool and sources list. */
511 mutex_init(&rnd_global.lock, MUTEX_DEFAULT, IPL_VM); 511 mutex_init(&rnd_global.lock, MUTEX_DEFAULT, IPL_VM);
 512 rndpool_init(&rnd_global.pool);
 513 LIST_INIT(&rnd_global.sources);
512 514
513 rnd_mempc = pool_cache_init(sizeof(rnd_sample_t), 0, 0, 0, 515 rnd_mempc = pool_cache_init(sizeof(rnd_sample_t), 0, 0, 0,
514 "rndsample", NULL, IPL_VM, 516 "rndsample", NULL, IPL_VM,
515 NULL, NULL, NULL); 517 NULL, NULL, NULL);
516 518
517 /* 519 /*
518 * Set resource limit. The rnd_process_events() function 520 * Set resource limit. The rnd_process_events() function
519 * is called every tick and process the sample queue. 521 * is called every tick and process the sample queue.
520 * Without limitation, if a lot of rnd_add_*() are called, 522 * Without limitation, if a lot of rnd_add_*() are called,
521 * all kernel memory may be eaten up. 523 * all kernel memory may be eaten up.
522 */ 524 */
523 pool_cache_sethardlimit(rnd_mempc, RND_POOLBITS, NULL, 0); 525 pool_cache_sethardlimit(rnd_mempc, RND_POOLBITS, NULL, 0);
524 526
525 /* 527 /*
526 * Mix *something*, *anything* into the pool to help it get started. 528 * Mix *something*, *anything* into the pool to help it get started.
527 * However, it's not safe for rnd_counter() to call microtime() yet, 529 * However, it's not safe for rnd_counter() to call microtime() yet,
528 * so on some platforms we might just end up with zeros anyway. 530 * so on some platforms we might just end up with zeros anyway.
529 * XXX more things to add would be nice. 531 * XXX more things to add would be nice.
530 */ 532 */
531 if (c) { 533 if (c) {
532 mutex_spin_enter(&rnd_global.lock); 534 mutex_spin_enter(&rnd_global.lock);
533 rndpool_add_data(&rnd_global.pool, &c, sizeof(c), 1); 535 rndpool_add_data(&rnd_global.pool, &c, sizeof(c), 1);
534 c = rnd_counter(); 536 c = rnd_counter();
535 rndpool_add_data(&rnd_global.pool, &c, sizeof(c), 1); 537 rndpool_add_data(&rnd_global.pool, &c, sizeof(c), 1);
536 mutex_spin_exit(&rnd_global.lock); 538 mutex_spin_exit(&rnd_global.lock);
537 } 539 }
538 540
539 /* 541 /*
540 * If we have a cycle counter, take its error with respect 542 * If we have a cycle counter, take its error with respect
541 * to the callout mechanism as a source of entropy, ala 543 * to the callout mechanism as a source of entropy, ala
542 * TrueRand. 544 * TrueRand.
543 * 545 *
544 */ 546 */
545#if defined(__HAVE_CPU_COUNTER) 547#if defined(__HAVE_CPU_COUNTER)
546 /* IPL_VM because taken while rnd_global.lock is held. */ 548 /* IPL_VM because taken while rnd_global.lock is held. */
547 mutex_init(&rnd_skew.lock, MUTEX_DEFAULT, IPL_VM); 549 mutex_init(&rnd_skew.lock, MUTEX_DEFAULT, IPL_VM);
548 callout_init(&rnd_skew.callout, CALLOUT_MPSAFE); 550 callout_init(&rnd_skew.callout, CALLOUT_MPSAFE);
549 callout_init(&rnd_skew.stop_callout, CALLOUT_MPSAFE); 551 callout_init(&rnd_skew.stop_callout, CALLOUT_MPSAFE);
550 callout_setfunc(&rnd_skew.callout, rnd_skew_intr, NULL); 552 callout_setfunc(&rnd_skew.callout, rnd_skew_intr, NULL);
551 callout_setfunc(&rnd_skew.stop_callout, rnd_skew_stop_intr, NULL); 553 callout_setfunc(&rnd_skew.stop_callout, rnd_skew_stop_intr, NULL);
552 rndsource_setcb(&rnd_skew.source, rnd_skew_get, &rnd_skew.source); 554 rndsource_setcb(&rnd_skew.source, rnd_skew_get, &rnd_skew.source);
553 rndsource_setenable(&rnd_skew.source, rnd_skew_enable); 555 rndsource_setenable(&rnd_skew.source, rnd_skew_enable);
554 rnd_attach_source(&rnd_skew.source, "callout", RND_TYPE_SKEW, 556 rnd_attach_source(&rnd_skew.source, "callout", RND_TYPE_SKEW,
555 RND_FLAG_COLLECT_VALUE|RND_FLAG_ESTIMATE_VALUE| 557 RND_FLAG_COLLECT_VALUE|RND_FLAG_ESTIMATE_VALUE|
556 RND_FLAG_HASCB|RND_FLAG_HASENABLE); 558 RND_FLAG_HASCB|RND_FLAG_HASENABLE);
557 rnd_skew_intr(NULL); 559 rnd_skew_intr(NULL);
558#endif 560#endif
559 561
560 rnd_printf_verbose("rnd: initialised (%u)%s", RND_POOLBITS, 562 rnd_printf_verbose("rnd: initialised (%u)%s", RND_POOLBITS,
561 c ? " with counter\n" : "\n"); 563 c ? " with counter\n" : "\n");
562 if (boot_rsp != NULL) { 564 if (boot_rsp != NULL) {
563 mutex_spin_enter(&rnd_global.lock); 565 mutex_spin_enter(&rnd_global.lock);
564 rndpool_add_data(&rnd_global.pool, boot_rsp->data, 566 rndpool_add_data(&rnd_global.pool, boot_rsp->data,
565 sizeof(boot_rsp->data), 567 sizeof(boot_rsp->data),
566 MIN(boot_rsp->entropy, RND_POOLBITS / 2)); 568 MIN(boot_rsp->entropy, RND_POOLBITS / 2));
567 if (rndpool_get_entropy_count(&rnd_global.pool) > 569 if (rndpool_get_entropy_count(&rnd_global.pool) >
568 RND_ENTROPY_THRESHOLD * 8) { 570 RND_ENTROPY_THRESHOLD * 8) {
569 rnd_initial_entropy = 1; 571 rnd_initial_entropy = 1;
570 } 572 }
571 mutex_spin_exit(&rnd_global.lock); 573 mutex_spin_exit(&rnd_global.lock);
572 rnd_printf("rnd: seeded with %d bits\n", 574 rnd_printf("rnd: seeded with %d bits\n",
573 MIN(boot_rsp->entropy, RND_POOLBITS / 2)); 575 MIN(boot_rsp->entropy, RND_POOLBITS / 2));
574 memset(boot_rsp, 0, sizeof(*boot_rsp)); 576 memset(boot_rsp, 0, sizeof(*boot_rsp));
575 } 577 }
576 rnd_attach_source(&rnd_printf_source, "printf", RND_TYPE_UNKNOWN, 578 rnd_attach_source(&rnd_printf_source, "printf", RND_TYPE_UNKNOWN,
577 RND_FLAG_NO_ESTIMATE); 579 RND_FLAG_NO_ESTIMATE);
578 rnd_attach_source(&rnd_autoconf_source, "autoconf", 580 rnd_attach_source(&rnd_autoconf_source, "autoconf",
579 RND_TYPE_UNKNOWN, 581 RND_TYPE_UNKNOWN,
580 RND_FLAG_COLLECT_TIME|RND_FLAG_ESTIMATE_TIME); 582 RND_FLAG_COLLECT_TIME|RND_FLAG_ESTIMATE_TIME);
581 rnd_ready = 1; 583 rnd_ready = 1;
582} 584}
583 585
584static rnd_sample_t * 586static rnd_sample_t *
585rnd_sample_allocate(krndsource_t *source) 587rnd_sample_allocate(krndsource_t *source)
586{ 588{
587 rnd_sample_t *c; 589 rnd_sample_t *c;
588 590
589 c = pool_cache_get(rnd_mempc, PR_WAITOK); 591 c = pool_cache_get(rnd_mempc, PR_WAITOK);
590 if (c == NULL) 592 if (c == NULL)
591 return (NULL); 593 return (NULL);
592 594
593 c->source = source; 595 c->source = source;
594 c->cursor = 0; 596 c->cursor = 0;
595 c->entropy = 0; 597 c->entropy = 0;
596 598
597 return (c); 599 return (c);
598} 600}
599 601
600/* 602/*
601 * Don't wait on allocation. To be used in an interrupt context. 603 * Don't wait on allocation. To be used in an interrupt context.
602 */ 604 */
603static rnd_sample_t * 605static rnd_sample_t *
604rnd_sample_allocate_isr(krndsource_t *source) 606rnd_sample_allocate_isr(krndsource_t *source)
605{ 607{
606 rnd_sample_t *c; 608 rnd_sample_t *c;
607 609
608 c = pool_cache_get(rnd_mempc, PR_NOWAIT); 610 c = pool_cache_get(rnd_mempc, PR_NOWAIT);
609 if (c == NULL) 611 if (c == NULL)
610 return (NULL); 612 return (NULL);
611 613
612 c->source = source; 614 c->source = source;
613 c->cursor = 0; 615 c->cursor = 0;
614 c->entropy = 0; 616 c->entropy = 0;
615 617
616 return (c); 618 return (c);
617} 619}
618 620
619static void 621static void
620rnd_sample_free(rnd_sample_t *c) 622rnd_sample_free(rnd_sample_t *c)
621{ 623{
622 memset(c, 0, sizeof(*c)); 624 memset(c, 0, sizeof(*c));
623 pool_cache_put(rnd_mempc, c); 625 pool_cache_put(rnd_mempc, c);
624} 626}
625 627
626/* 628/*
627 * Add a source to our list of sources. 629 * Add a source to our list of sources.
628 */ 630 */
629void 631void
630rnd_attach_source(krndsource_t *rs, const char *name, uint32_t type, 632rnd_attach_source(krndsource_t *rs, const char *name, uint32_t type,
631 uint32_t flags) 633 uint32_t flags)
632{ 634{
633 uint32_t ts; 635 uint32_t ts;
634 636
635 ts = rnd_counter(); 637 ts = rnd_counter();
636 638
637 strlcpy(rs->name, name, sizeof(rs->name)); 639 strlcpy(rs->name, name, sizeof(rs->name));
638 memset(&rs->time_delta, 0, sizeof(rs->time_delta)); 640 memset(&rs->time_delta, 0, sizeof(rs->time_delta));
639 rs->time_delta.x = ts; 641 rs->time_delta.x = ts;
640 memset(&rs->value_delta, 0, sizeof(rs->value_delta)); 642 memset(&rs->value_delta, 0, sizeof(rs->value_delta));
641 rs->total = 0; 643 rs->total = 0;
642 644
643 /* 645 /*
644 * Some source setup, by type 646 * Some source setup, by type
645 */ 647 */
646 rs->test = NULL; 648 rs->test = NULL;
647 rs->test_cnt = -1; 649 rs->test_cnt = -1;
648 650
649 if (flags == 0) { 651 if (flags == 0) {
650 flags = RND_FLAG_DEFAULT; 652 flags = RND_FLAG_DEFAULT;
651 } 653 }
652 654
653 switch (type) { 655 switch (type) {
654 case RND_TYPE_NET: /* Don't collect by default */ 656 case RND_TYPE_NET: /* Don't collect by default */
655 flags |= (RND_FLAG_NO_COLLECT | RND_FLAG_NO_ESTIMATE); 657 flags |= (RND_FLAG_NO_COLLECT | RND_FLAG_NO_ESTIMATE);
656 break; 658 break;
657 case RND_TYPE_RNG: /* Space for statistical testing */ 659 case RND_TYPE_RNG: /* Space for statistical testing */
658 rs->test = kmem_alloc(sizeof(rngtest_t), KM_NOSLEEP); 660 rs->test = kmem_alloc(sizeof(rngtest_t), KM_NOSLEEP);
659 rs->test_cnt = 0; 661 rs->test_cnt = 0;
660 /* FALLTHRU */ 662 /* FALLTHRU */
661 case RND_TYPE_VM: /* Process samples in bulk always */ 663 case RND_TYPE_VM: /* Process samples in bulk always */
662 flags |= RND_FLAG_FAST; 664 flags |= RND_FLAG_FAST;
663 break; 665 break;
664 default: 666 default:
665 break; 667 break;
666 } 668 }
667 669
668 rs->type = type; 670 rs->type = type;
669 rs->flags = flags; 671 rs->flags = flags;
670 672
671 rs->state = rnd_sample_allocate(rs); 673 rs->state = rnd_sample_allocate(rs);
672 674
673 mutex_spin_enter(&rnd_global.lock); 675 mutex_spin_enter(&rnd_global.lock);
674 LIST_INSERT_HEAD(&rnd_global.sources, rs, list); 676 LIST_INSERT_HEAD(&rnd_global.sources, rs, list);
675 677
676#ifdef RND_VERBOSE 678#ifdef RND_VERBOSE
677 rnd_printf_verbose("rnd: %s attached as an entropy source (", 679 rnd_printf_verbose("rnd: %s attached as an entropy source (",
678 rs->name); 680 rs->name);
679 if (!(flags & RND_FLAG_NO_COLLECT)) { 681 if (!(flags & RND_FLAG_NO_COLLECT)) {
680 rnd_printf_verbose("collecting"); 682 rnd_printf_verbose("collecting");
681 if (flags & RND_FLAG_NO_ESTIMATE) 683 if (flags & RND_FLAG_NO_ESTIMATE)
682 rnd_printf_verbose(" without estimation"); 684 rnd_printf_verbose(" without estimation");
683 } 685 }
684 else 686 else
685 rnd_printf_verbose("off"); 687 rnd_printf_verbose("off");
686 rnd_printf_verbose(")\n"); 688 rnd_printf_verbose(")\n");
687#endif 689#endif
688 690
689 /* 691 /*
690 * Again, put some more initial junk in the pool. 692 * Again, put some more initial junk in the pool.
691 * FreeBSD claim to have an analysis that show 4 bits of 693 * FreeBSD claim to have an analysis that show 4 bits of
692 * entropy per source-attach timestamp. I am skeptical, 694 * entropy per source-attach timestamp. I am skeptical,
693 * but we count 1 bit per source here. 695 * but we count 1 bit per source here.
694 */ 696 */
695 rndpool_add_data(&rnd_global.pool, &ts, sizeof(ts), 1); 697 rndpool_add_data(&rnd_global.pool, &ts, sizeof(ts), 1);
696 mutex_spin_exit(&rnd_global.lock); 698 mutex_spin_exit(&rnd_global.lock);
697} 699}
698 700
699/* 701/*
700 * Remove a source from our list of sources. 702 * Remove a source from our list of sources.
701 */ 703 */
702void 704void
703rnd_detach_source(krndsource_t *source) 705rnd_detach_source(krndsource_t *source)
704{ 706{
705 rnd_sample_t *sample; 707 rnd_sample_t *sample;
706 708
707 mutex_spin_enter(&rnd_global.lock); 709 mutex_spin_enter(&rnd_global.lock);
708 LIST_REMOVE(source, list); 710 LIST_REMOVE(source, list);
709 mutex_spin_exit(&rnd_global.lock); 711 mutex_spin_exit(&rnd_global.lock);
710 712
711 /* 713 /*
712 * If there are samples queued up "remove" them from the sample queue 714 * If there are samples queued up "remove" them from the sample queue
713 * by setting the source to the no-collect pseudosource. 715 * by setting the source to the no-collect pseudosource.
714 */ 716 */
715 mutex_spin_enter(&rnd_samples.lock); 717 mutex_spin_enter(&rnd_samples.lock);
716 sample = SIMPLEQ_FIRST(&rnd_samples.q); 718 sample = SIMPLEQ_FIRST(&rnd_samples.q);
717 while (sample != NULL) { 719 while (sample != NULL) {
718 if (sample->source == source) 720 if (sample->source == source)
719 sample->source = &rnd_source_no_collect; 721 sample->source = &rnd_source_no_collect;
720 722
721 sample = SIMPLEQ_NEXT(sample, next); 723 sample = SIMPLEQ_NEXT(sample, next);
722 } 724 }
723 mutex_spin_exit(&rnd_samples.lock); 725 mutex_spin_exit(&rnd_samples.lock);
724 726
725 if (source->state) { 727 if (source->state) {
726 rnd_sample_free(source->state); 728 rnd_sample_free(source->state);
727 source->state = NULL; 729 source->state = NULL;
728 } 730 }
729 731
730 if (source->test) { 732 if (source->test) {
731 kmem_free(source->test, sizeof(rngtest_t)); 733 kmem_free(source->test, sizeof(rngtest_t));
732 } 734 }
733 735
734 rnd_printf_verbose("rnd: %s detached as an entropy source\n", 736 rnd_printf_verbose("rnd: %s detached as an entropy source\n",
735 source->name); 737 source->name);
736} 738}
737 739
738static inline uint32_t 740static inline uint32_t
739rnd_estimate(krndsource_t *rs, uint32_t ts, uint32_t val) 741rnd_estimate(krndsource_t *rs, uint32_t ts, uint32_t val)
740{ 742{
741 uint32_t entropy = 0, dt_est, dv_est; 743 uint32_t entropy = 0, dt_est, dv_est;
742 744
743 dt_est = rnd_dt_estimate(rs, ts); 745 dt_est = rnd_dt_estimate(rs, ts);
744 dv_est = rnd_dv_estimate(rs, val); 746 dv_est = rnd_dv_estimate(rs, val);
745 747
746 if (!(rs->flags & RND_FLAG_NO_ESTIMATE)) { 748 if (!(rs->flags & RND_FLAG_NO_ESTIMATE)) {
747 if (rs->flags & RND_FLAG_ESTIMATE_TIME) { 749 if (rs->flags & RND_FLAG_ESTIMATE_TIME) {
748 entropy += dt_est; 750 entropy += dt_est;
749 } 751 }
750 752
751 if (rs->flags & RND_FLAG_ESTIMATE_VALUE) { 753 if (rs->flags & RND_FLAG_ESTIMATE_VALUE) {
752 entropy += dv_est; 754 entropy += dv_est;
753 } 755 }
754 756
755 } 757 }
756 return entropy; 758 return entropy;
757} 759}
758 760
759/* 761/*
760 * Add a 32-bit value to the entropy pool. The rs parameter should point to 762 * Add a 32-bit value to the entropy pool. The rs parameter should point to
761 * the source-specific source structure. 763 * the source-specific source structure.
762 */ 764 */
763void 765void
764_rnd_add_uint32(krndsource_t *rs, uint32_t val) 766_rnd_add_uint32(krndsource_t *rs, uint32_t val)
765{ 767{
766 uint32_t ts;  768 uint32_t ts;
767 uint32_t entropy = 0; 769 uint32_t entropy = 0;
768 770
769 if (rs->flags & RND_FLAG_NO_COLLECT) 771 if (rs->flags & RND_FLAG_NO_COLLECT)
770 return; 772 return;
771 773
772 /* 774 /*
773 * Sample the counter as soon as possible to avoid 775 * Sample the counter as soon as possible to avoid
774 * entropy overestimation. 776 * entropy overestimation.
775 */ 777 */
776 ts = rnd_counter(); 778 ts = rnd_counter();
777 779
778 /* 780 /*
779 * Calculate estimates - we may not use them, but if we do 781 * Calculate estimates - we may not use them, but if we do
780 * not calculate them, the estimators' history becomes invalid. 782 * not calculate them, the estimators' history becomes invalid.
781 */ 783 */
782 entropy = rnd_estimate(rs, ts, val); 784 entropy = rnd_estimate(rs, ts, val);
783 785
784 rnd_add_data_ts(rs, &val, sizeof(val), entropy, ts); 786 rnd_add_data_ts(rs, &val, sizeof(val), entropy, ts);
785} 787}
786 788
787void 789void
788_rnd_add_uint64(krndsource_t *rs, uint64_t val) 790_rnd_add_uint64(krndsource_t *rs, uint64_t val)
789{ 791{
790 uint32_t ts;  792 uint32_t ts;
791 uint32_t entropy = 0; 793 uint32_t entropy = 0;
792 794
793 if (rs->flags & RND_FLAG_NO_COLLECT) 795 if (rs->flags & RND_FLAG_NO_COLLECT)
794 return; 796 return;
795 797
796 /* 798 /*
797 * Sample the counter as soon as possible to avoid 799 * Sample the counter as soon as possible to avoid
798 * entropy overestimation. 800 * entropy overestimation.
799 */ 801 */
800 ts = rnd_counter(); 802 ts = rnd_counter();
801 803
802 /* 804 /*
803 * Calculate estimates - we may not use them, but if we do 805 * Calculate estimates - we may not use them, but if we do
804 * not calculate them, the estimators' history becomes invalid. 806 * not calculate them, the estimators' history becomes invalid.
805 */ 807 */
806 entropy = rnd_estimate(rs, ts, (uint32_t)(val & (uint64_t)0xffffffff)); 808 entropy = rnd_estimate(rs, ts, (uint32_t)(val & (uint64_t)0xffffffff));
807 809
808 rnd_add_data_ts(rs, &val, sizeof(val), entropy, ts); 810 rnd_add_data_ts(rs, &val, sizeof(val), entropy, ts);
809} 811}
810 812
811void 813void
812rnd_add_data(krndsource_t *rs, const void *const data, uint32_t len, 814rnd_add_data(krndsource_t *rs, const void *const data, uint32_t len,
813 uint32_t entropy) 815 uint32_t entropy)
814{ 816{
815 /* 817 /*
816 * This interface is meant for feeding data which is, 818 * This interface is meant for feeding data which is,
817 * itself, random. Don't estimate entropy based on 819 * itself, random. Don't estimate entropy based on
818 * timestamp, just directly add the data. 820 * timestamp, just directly add the data.
819 */ 821 */
820 if (__predict_false(rs == NULL)) { 822 if (__predict_false(rs == NULL)) {
821 mutex_spin_enter(&rnd_global.lock); 823 mutex_spin_enter(&rnd_global.lock);
822 rndpool_add_data(&rnd_global.pool, data, len, entropy); 824 rndpool_add_data(&rnd_global.pool, data, len, entropy);
823 mutex_spin_exit(&rnd_global.lock); 825 mutex_spin_exit(&rnd_global.lock);
824 } else { 826 } else {
825 rnd_add_data_ts(rs, data, len, entropy, rnd_counter()); 827 rnd_add_data_ts(rs, data, len, entropy, rnd_counter());
826 } 828 }
827} 829}
828 830
829static void 831static void
830rnd_add_data_ts(krndsource_t *rs, const void *const data, uint32_t len, 832rnd_add_data_ts(krndsource_t *rs, const void *const data, uint32_t len,
831 uint32_t entropy, uint32_t ts) 833 uint32_t entropy, uint32_t ts)
832{ 834{
833 rnd_sample_t *state = NULL; 835 rnd_sample_t *state = NULL;
834 const uint8_t *p = data; 836 const uint8_t *p = data;
835 uint32_t dint; 837 uint32_t dint;
836 int todo, done, filled = 0; 838 int todo, done, filled = 0;
837 int sample_count; 839 int sample_count;
838 struct rnd_sampleq tmp_samples = SIMPLEQ_HEAD_INITIALIZER(tmp_samples); 840 struct rnd_sampleq tmp_samples = SIMPLEQ_HEAD_INITIALIZER(tmp_samples);
839 841
840 if (rs && (rs->flags & RND_FLAG_NO_COLLECT || 842 if (rs && (rs->flags & RND_FLAG_NO_COLLECT ||
841 __predict_false(!(rs->flags &  843 __predict_false(!(rs->flags &
842 (RND_FLAG_COLLECT_TIME| 844 (RND_FLAG_COLLECT_TIME|
843 RND_FLAG_COLLECT_VALUE))))) { 845 RND_FLAG_COLLECT_VALUE))))) {
844 return; 846 return;
845 } 847 }
846 todo = len / sizeof(dint); 848 todo = len / sizeof(dint);
847 /* 849 /*
848 * Let's try to be efficient: if we are warm, and a source 850 * Let's try to be efficient: if we are warm, and a source
849 * is adding entropy at a rate of at least 1 bit every 10 seconds, 851 * is adding entropy at a rate of at least 1 bit every 10 seconds,
850 * mark it as "fast" and add its samples in bulk. 852 * mark it as "fast" and add its samples in bulk.
851 */ 853 */
852 if (__predict_true(rs->flags & RND_FLAG_FAST) || 854 if (__predict_true(rs->flags & RND_FLAG_FAST) ||
853 (todo >= RND_SAMPLE_COUNT)) { 855 (todo >= RND_SAMPLE_COUNT)) {
854 sample_count = RND_SAMPLE_COUNT; 856 sample_count = RND_SAMPLE_COUNT;
855 } else { 857 } else {
856 if (!(rs->flags & RND_FLAG_HASCB) && 858 if (!(rs->flags & RND_FLAG_HASCB) &&
857 !cold && rnd_initial_entropy) { 859 !cold && rnd_initial_entropy) {
858 struct timeval upt; 860 struct timeval upt;
859 861
860 getmicrouptime(&upt); 862 getmicrouptime(&upt);
861 if ( (upt.tv_sec > 0 && rs->total > upt.tv_sec * 10) || 863 if ( (upt.tv_sec > 0 && rs->total > upt.tv_sec * 10) ||
862 (upt.tv_sec > 10 && rs->total > upt.tv_sec) || 864 (upt.tv_sec > 10 && rs->total > upt.tv_sec) ||
863 (upt.tv_sec > 100 && 865 (upt.tv_sec > 100 &&
864 rs->total > upt.tv_sec / 10)) { 866 rs->total > upt.tv_sec / 10)) {
865 rnd_printf_verbose("rnd: source %s is fast" 867 rnd_printf_verbose("rnd: source %s is fast"
866 " (%d samples at once," 868 " (%d samples at once,"
867 " %d bits in %lld seconds), " 869 " %d bits in %lld seconds), "
868 "processing samples in bulk.\n", 870 "processing samples in bulk.\n",
869 rs->name, todo, rs->total, 871 rs->name, todo, rs->total,
870 (long long int)upt.tv_sec); 872 (long long int)upt.tv_sec);
871 rs->flags |= RND_FLAG_FAST; 873 rs->flags |= RND_FLAG_FAST;
872 } 874 }
873 } 875 }
874 sample_count = 2; 876 sample_count = 2;
875 } 877 }
876 878
877 /* 879 /*
878 * Loop over data packaging it into sample buffers. 880 * Loop over data packaging it into sample buffers.
879 * If a sample buffer allocation fails, drop all data. 881 * If a sample buffer allocation fails, drop all data.
880 */ 882 */
881 for (done = 0; done < todo ; done++) { 883 for (done = 0; done < todo ; done++) {
882 state = rs->state; 884 state = rs->state;
883 if (state == NULL) { 885 if (state == NULL) {
884 state = rnd_sample_allocate_isr(rs); 886 state = rnd_sample_allocate_isr(rs);
885 if (__predict_false(state == NULL)) { 887 if (__predict_false(state == NULL)) {
886 break; 888 break;
887 } 889 }
888 rs->state = state; 890 rs->state = state;
889 } 891 }
890 892
891 state->ts[state->cursor] = ts; 893 state->ts[state->cursor] = ts;
892 (void)memcpy(&dint, &p[done*4], 4); 894 (void)memcpy(&dint, &p[done*4], 4);
893 state->values[state->cursor] = dint; 895 state->values[state->cursor] = dint;
894 state->cursor++; 896 state->cursor++;
895 897
896 if (state->cursor == sample_count) { 898 if (state->cursor == sample_count) {
897 SIMPLEQ_INSERT_HEAD(&tmp_samples, state, next); 899 SIMPLEQ_INSERT_HEAD(&tmp_samples, state, next);
898 filled++; 900 filled++;
899 rs->state = NULL; 901 rs->state = NULL;
900 } 902 }
901 } 903 }
902 904
903 if (__predict_false(state == NULL)) { 905 if (__predict_false(state == NULL)) {
904 while ((state = SIMPLEQ_FIRST(&tmp_samples))) { 906 while ((state = SIMPLEQ_FIRST(&tmp_samples))) {
905 SIMPLEQ_REMOVE_HEAD(&tmp_samples, next); 907 SIMPLEQ_REMOVE_HEAD(&tmp_samples, next);
906 rnd_sample_free(state); 908 rnd_sample_free(state);
907 } 909 }
908 return; 910 return;
909 } 911 }
910 912
911 /* 913 /*
912 * Claim all the entropy on the last one we send to 914 * Claim all the entropy on the last one we send to
913 * the pool, so we don't rely on it being evenly distributed 915 * the pool, so we don't rely on it being evenly distributed
914 * in the supplied data. 916 * in the supplied data.
915 * 917 *
916 * XXX The rndpool code must accept samples with more 918 * XXX The rndpool code must accept samples with more
917 * XXX claimed entropy than bits for this to work right. 919 * XXX claimed entropy than bits for this to work right.
918 */ 920 */
919 state->entropy += entropy; 921 state->entropy += entropy;
920 rs->total += entropy; 922 rs->total += entropy;
921 923
922 /* 924 /*
923 * If we didn't finish any sample buffers, we're done. 925 * If we didn't finish any sample buffers, we're done.
924 */ 926 */
925 if (!filled) { 927 if (!filled) {
926 return; 928 return;
927 } 929 }
928 930
929 mutex_spin_enter(&rnd_samples.lock); 931 mutex_spin_enter(&rnd_samples.lock);
930 while ((state = SIMPLEQ_FIRST(&tmp_samples))) { 932 while ((state = SIMPLEQ_FIRST(&tmp_samples))) {
931 SIMPLEQ_REMOVE_HEAD(&tmp_samples, next); 933 SIMPLEQ_REMOVE_HEAD(&tmp_samples, next);
932 SIMPLEQ_INSERT_HEAD(&rnd_samples.q, state, next); 934 SIMPLEQ_INSERT_HEAD(&rnd_samples.q, state, next);
933 } 935 }
934 mutex_spin_exit(&rnd_samples.lock); 936 mutex_spin_exit(&rnd_samples.lock);
935 937
936 /* Cause processing of queued samples */ 938 /* Cause processing of queued samples */
937 rnd_schedule_process(); 939 rnd_schedule_process();
938} 940}
939 941
940static int 942static int
941rnd_hwrng_test(rnd_sample_t *sample) 943rnd_hwrng_test(rnd_sample_t *sample)
942{ 944{
943 krndsource_t *source = sample->source; 945 krndsource_t *source = sample->source;
944 size_t cmplen; 946 size_t cmplen;
945 uint8_t *v1, *v2; 947 uint8_t *v1, *v2;
946 size_t resid, totest; 948 size_t resid, totest;
947 949
948 KASSERT(source->type == RND_TYPE_RNG); 950 KASSERT(source->type == RND_TYPE_RNG);
949 951
950 /* 952 /*
951 * Continuous-output test: compare two halves of the 953 * Continuous-output test: compare two halves of the
952 * sample buffer to each other. The sample buffer (64 ints, 954 * sample buffer to each other. The sample buffer (64 ints,
953 * so either 256 or 512 bytes on any modern machine) should be 955 * so either 256 or 512 bytes on any modern machine) should be
954 * much larger than a typical hardware RNG output, so this seems 956 * much larger than a typical hardware RNG output, so this seems
955 * a reasonable way to do it without retaining extra data. 957 * a reasonable way to do it without retaining extra data.
956 */ 958 */
957 cmplen = sizeof(sample->values) / 2; 959 cmplen = sizeof(sample->values) / 2;
958 v1 = (uint8_t *)sample->values; 960 v1 = (uint8_t *)sample->values;
959 v2 = (uint8_t *)sample->values + cmplen; 961 v2 = (uint8_t *)sample->values + cmplen;
960 962
961 if (__predict_false(!memcmp(v1, v2, cmplen))) { 963 if (__predict_false(!memcmp(v1, v2, cmplen))) {
962 rnd_printf("rnd: source \"%s\" failed continuous-output test.\n", 964 rnd_printf("rnd: source \"%s\" failed continuous-output test.\n",
963 source->name); 965 source->name);
964 return 1; 966 return 1;
965 } 967 }
966 968
967 /* 969 /*
968 * FIPS 140 statistical RNG test. We must accumulate 20,000 bits. 970 * FIPS 140 statistical RNG test. We must accumulate 20,000 bits.
969 */ 971 */
970 if (__predict_true(source->test_cnt == -1)) { 972 if (__predict_true(source->test_cnt == -1)) {
971 /* already passed the test */ 973 /* already passed the test */
972 return 0; 974 return 0;
973 } 975 }
974 resid = FIPS140_RNG_TEST_BYTES - source->test_cnt; 976 resid = FIPS140_RNG_TEST_BYTES - source->test_cnt;
975 totest = MIN(RND_SAMPLE_COUNT * 4, resid); 977 totest = MIN(RND_SAMPLE_COUNT * 4, resid);
976 memcpy(source->test->rt_b + source->test_cnt, sample->values, totest); 978 memcpy(source->test->rt_b + source->test_cnt, sample->values, totest);
977 resid -= totest; 979 resid -= totest;
978 source->test_cnt += totest; 980 source->test_cnt += totest;
979 if (resid == 0) { 981 if (resid == 0) {
980 strlcpy(source->test->rt_name, source->name, 982 strlcpy(source->test->rt_name, source->name,
981 sizeof(source->test->rt_name)); 983 sizeof(source->test->rt_name));
982 if (rngtest(source->test)) { 984 if (rngtest(source->test)) {
983 rnd_printf("rnd: source \"%s\" failed statistical test.", 985 rnd_printf("rnd: source \"%s\" failed statistical test.",
984 source->name); 986 source->name);
985 return 1; 987 return 1;
986 } 988 }
987 source->test_cnt = -1; 989 source->test_cnt = -1;
988 memset(source->test, 0, sizeof(*source->test)); 990 memset(source->test, 0, sizeof(*source->test));
989 } 991 }
990 return 0; 992 return 0;
991} 993}
992 994
993/* 995/*
994 * Process the events in the ring buffer. Called by rnd_timeout or 996 * Process the events in the ring buffer. Called by rnd_timeout or
995 * by the add routines directly if the callout has never fired (that 997 * by the add routines directly if the callout has never fired (that
996 * is, if we are "cold" -- just booted). 998 * is, if we are "cold" -- just booted).
997 * 999 *
998 */ 1000 */
999static void 1001static void
1000rnd_process_events(void) 1002rnd_process_events(void)
1001{ 1003{
1002 rnd_sample_t *sample = NULL; 1004 rnd_sample_t *sample = NULL;
1003 krndsource_t *source; 1005 krndsource_t *source;
1004 static krndsource_t *last_source; 1006 static krndsource_t *last_source;
1005 uint32_t entropy; 1007 uint32_t entropy;
1006 size_t pool_entropy; 1008 size_t pool_entropy;
1007 int found = 0, wake = 0; 1009 int found = 0, wake = 0;
1008 struct rnd_sampleq dq_samples = SIMPLEQ_HEAD_INITIALIZER(dq_samples); 1010 struct rnd_sampleq dq_samples = SIMPLEQ_HEAD_INITIALIZER(dq_samples);
1009 struct rnd_sampleq df_samples = SIMPLEQ_HEAD_INITIALIZER(df_samples); 1011 struct rnd_sampleq df_samples = SIMPLEQ_HEAD_INITIALIZER(df_samples);
1010 1012
1011 /* 1013 /*
1012 * Drain to the on-stack queue and drop the lock. 1014 * Drain to the on-stack queue and drop the lock.
1013 */ 1015 */
1014 mutex_spin_enter(&rnd_samples.lock); 1016 mutex_spin_enter(&rnd_samples.lock);
1015 while ((sample = SIMPLEQ_FIRST(&rnd_samples.q))) { 1017 while ((sample = SIMPLEQ_FIRST(&rnd_samples.q))) {
1016 found++; 1018 found++;
1017 SIMPLEQ_REMOVE_HEAD(&rnd_samples.q, next); 1019 SIMPLEQ_REMOVE_HEAD(&rnd_samples.q, next);
1018 /* 1020 /*
1019 * We repeat this check here, since it is possible 1021 * We repeat this check here, since it is possible
1020 * the source was disabled before we were called, but 1022 * the source was disabled before we were called, but
1021 * after the entry was queued. 1023 * after the entry was queued.
1022 */ 1024 */
1023 if (__predict_false(!(sample->source->flags & 1025 if (__predict_false(!(sample->source->flags &
1024 (RND_FLAG_COLLECT_TIME| 1026 (RND_FLAG_COLLECT_TIME|
1025 RND_FLAG_COLLECT_VALUE)))) { 1027 RND_FLAG_COLLECT_VALUE)))) {
1026 SIMPLEQ_INSERT_TAIL(&df_samples, sample, next); 1028 SIMPLEQ_INSERT_TAIL(&df_samples, sample, next);
1027 } else { 1029 } else {
1028 SIMPLEQ_INSERT_TAIL(&dq_samples, sample, next); 1030 SIMPLEQ_INSERT_TAIL(&dq_samples, sample, next);
1029 } 1031 }
1030 } 1032 }
1031 mutex_spin_exit(&rnd_samples.lock); 1033 mutex_spin_exit(&rnd_samples.lock);
1032 1034
1033 /* Don't thrash the rndpool mtx either. Hold, add all samples. */ 1035 /* Don't thrash the rndpool mtx either. Hold, add all samples. */
1034 mutex_spin_enter(&rnd_global.lock); 1036 mutex_spin_enter(&rnd_global.lock);
1035 1037
1036 pool_entropy = rndpool_get_entropy_count(&rnd_global.pool); 1038 pool_entropy = rndpool_get_entropy_count(&rnd_global.pool);
1037 1039
1038 while ((sample = SIMPLEQ_FIRST(&dq_samples))) { 1040 while ((sample = SIMPLEQ_FIRST(&dq_samples))) {
1039 int sample_count; 1041 int sample_count;
1040 1042
1041 SIMPLEQ_REMOVE_HEAD(&dq_samples, next); 1043 SIMPLEQ_REMOVE_HEAD(&dq_samples, next);
1042 source = sample->source; 1044 source = sample->source;
1043 entropy = sample->entropy; 1045 entropy = sample->entropy;
1044 sample_count = sample->cursor; 1046 sample_count = sample->cursor;
1045 1047
1046 /* 1048 /*
1047 * Don't provide a side channel for timing attacks on 1049 * Don't provide a side channel for timing attacks on
1048 * low-rate sources: require mixing with some other 1050 * low-rate sources: require mixing with some other
1049 * source before we schedule a wakeup. 1051 * source before we schedule a wakeup.
1050 */ 1052 */
1051 if (!wake && 1053 if (!wake &&
1052 (source != last_source || source->flags & RND_FLAG_FAST)) { 1054 (source != last_source || source->flags & RND_FLAG_FAST)) {
1053 wake++; 1055 wake++;
1054 } 1056 }
1055 last_source = source; 1057 last_source = source;
1056 1058
1057 /* 1059 /*
1058 * If the source has been disabled, ignore samples from 1060 * If the source has been disabled, ignore samples from
1059 * it. 1061 * it.
1060 */ 1062 */
1061 if (source->flags & RND_FLAG_NO_COLLECT) 1063 if (source->flags & RND_FLAG_NO_COLLECT)
1062 goto skip; 1064 goto skip;
1063 1065
1064 /* 1066 /*
1065 * Hardware generators are great but sometimes they 1067 * Hardware generators are great but sometimes they
1066 * have...hardware issues. Don't use any data from 1068 * have...hardware issues. Don't use any data from
1067 * them unless it passes some tests. 1069 * them unless it passes some tests.
1068 */ 1070 */
1069 if (source->type == RND_TYPE_RNG) { 1071 if (source->type == RND_TYPE_RNG) {
1070 if (__predict_false(rnd_hwrng_test(sample))) { 1072 if (__predict_false(rnd_hwrng_test(sample))) {
1071 source->flags |= RND_FLAG_NO_COLLECT; 1073 source->flags |= RND_FLAG_NO_COLLECT;
1072 rnd_printf("rnd: disabling source \"%s\".", 1074 rnd_printf("rnd: disabling source \"%s\".",
1073 source->name); 1075 source->name);
1074 goto skip; 1076 goto skip;
1075 } 1077 }
1076 } 1078 }
1077 1079
1078 if (source->flags & RND_FLAG_COLLECT_VALUE) { 1080 if (source->flags & RND_FLAG_COLLECT_VALUE) {
1079 rndpool_add_data(&rnd_global.pool, sample->values, 1081 rndpool_add_data(&rnd_global.pool, sample->values,
1080 sample_count * 1082 sample_count *
1081 sizeof(sample->values[1]), 1083 sizeof(sample->values[1]),
1082 0); 1084 0);
1083 } 1085 }
1084 if (source->flags & RND_FLAG_COLLECT_TIME) { 1086 if (source->flags & RND_FLAG_COLLECT_TIME) {
1085 rndpool_add_data(&rnd_global.pool, sample->ts, 1087 rndpool_add_data(&rnd_global.pool, sample->ts,
1086 sample_count * 1088 sample_count *
1087 sizeof(sample->ts[1]), 1089 sizeof(sample->ts[1]),
1088 0); 1090 0);
1089 } 1091 }
1090 1092
1091 pool_entropy += entropy; 1093 pool_entropy += entropy;
1092 source->total += sample->entropy; 1094 source->total += sample->entropy;
1093skip: SIMPLEQ_INSERT_TAIL(&df_samples, sample, next); 1095skip: SIMPLEQ_INSERT_TAIL(&df_samples, sample, next);
1094 } 1096 }
1095 rndpool_set_entropy_count(&rnd_global.pool, pool_entropy); 1097 rndpool_set_entropy_count(&rnd_global.pool, pool_entropy);
1096 mutex_spin_exit(&rnd_global.lock); 1098 mutex_spin_exit(&rnd_global.lock);
1097 1099
1098 /* 1100 /*
1099 * If we filled the pool past the threshold, wake anyone 1101 * If we filled the pool past the threshold, wake anyone
1100 * waiting for entropy. Otherwise, ask all the entropy sources 1102 * waiting for entropy. Otherwise, ask all the entropy sources
1101 * for more. 1103 * for more.
1102 */ 1104 */
1103 if (pool_entropy > RND_ENTROPY_THRESHOLD * 8) { 1105 if (pool_entropy > RND_ENTROPY_THRESHOLD * 8) {
1104 wake++; 1106 wake++;
1105 } else { 1107 } else {
1106 rnd_getmore(howmany((RND_POOLBITS - pool_entropy), NBBY)); 1108 rnd_getmore(howmany((RND_POOLBITS - pool_entropy), NBBY));
1107 rnd_printf_verbose("rnd: empty, asking for %d bytes\n", 1109 rnd_printf_verbose("rnd: empty, asking for %d bytes\n",
1108 (int)(howmany((RND_POOLBITS - pool_entropy), NBBY))); 1110 (int)(howmany((RND_POOLBITS - pool_entropy), NBBY)));
1109 } 1111 }
1110 1112
1111 /* Now we hold no locks: clean up. */ 1113 /* Now we hold no locks: clean up. */
1112 while ((sample = SIMPLEQ_FIRST(&df_samples))) { 1114 while ((sample = SIMPLEQ_FIRST(&df_samples))) {
1113 SIMPLEQ_REMOVE_HEAD(&df_samples, next); 1115 SIMPLEQ_REMOVE_HEAD(&df_samples, next);
1114 rnd_sample_free(sample); 1116 rnd_sample_free(sample);
1115 } 1117 }
1116 1118
1117 /* 1119 /*
1118 * Wake up any potential readers waiting. 1120 * Wake up any potential readers waiting.
1119 */ 1121 */
1120 if (wake) { 1122 if (wake) {
1121 rnd_schedule_wakeup(); 1123 rnd_schedule_wakeup();
1122 } 1124 }
1123} 1125}
1124 1126
1125static void 1127static void
1126rnd_intr(void *arg) 1128rnd_intr(void *arg)
1127{ 1129{
1128 rnd_process_events(); 1130 rnd_process_events();
1129} 1131}
1130 1132
1131static void 1133static void
1132rnd_wake(void *arg) 1134rnd_wake(void *arg)
1133{ 1135{
1134 rnd_wakeup_readers(); 1136 rnd_wakeup_readers();
1135} 1137}
1136 1138
1137static uint32_t 1139static uint32_t
1138rnd_extract_data(void *p, uint32_t len, uint32_t flags) 1140rnd_extract_data(void *p, uint32_t len, uint32_t flags)
1139{ 1141{
1140 static int timed_in; 1142 static int timed_in;
1141 int entropy_count; 1143 int entropy_count;
1142 uint32_t retval; 1144 uint32_t retval;
1143 1145
1144 mutex_spin_enter(&rnd_global.lock); 1146 mutex_spin_enter(&rnd_global.lock);
1145 if (__predict_false(!timed_in)) { 1147 if (__predict_false(!timed_in)) {
1146 if (boottime.tv_sec) { 1148 if (boottime.tv_sec) {
1147 rndpool_add_data(&rnd_global.pool, &boottime, 1149 rndpool_add_data(&rnd_global.pool, &boottime,
1148 sizeof(boottime), 0); 1150 sizeof(boottime), 0);
1149 } 1151 }
1150 timed_in++; 1152 timed_in++;
1151 } 1153 }
1152 if (__predict_false(!rnd_initial_entropy)) { 1154 if (__predict_false(!rnd_initial_entropy)) {
1153 uint32_t c; 1155 uint32_t c;
1154 1156
1155 rnd_printf_verbose("rnd: WARNING! initial entropy low (%u).\n", 1157 rnd_printf_verbose("rnd: WARNING! initial entropy low (%u).\n",
1156 rndpool_get_entropy_count(&rnd_global.pool)); 1158 rndpool_get_entropy_count(&rnd_global.pool));
1157 /* Try once again to put something in the pool */ 1159 /* Try once again to put something in the pool */
1158 c = rnd_counter(); 1160 c = rnd_counter();
1159 rndpool_add_data(&rnd_global.pool, &c, sizeof(c), 1); 1161 rndpool_add_data(&rnd_global.pool, &c, sizeof(c), 1);
1160 } 1162 }
1161 1163
1162#ifdef DIAGNOSTIC 1164#ifdef DIAGNOSTIC
1163 while (!rnd_tested) { 1165 while (!rnd_tested) {
1164 entropy_count = rndpool_get_entropy_count(&rnd_global.pool); 1166 entropy_count = rndpool_get_entropy_count(&rnd_global.pool);
1165 rnd_printf_verbose("rnd: starting statistical RNG test," 1167 rnd_printf_verbose("rnd: starting statistical RNG test,"
1166 " entropy = %d.\n", 1168 " entropy = %d.\n",
1167 entropy_count); 1169 entropy_count);
1168 if (rndpool_extract_data(&rnd_global.pool, rnd_rt.rt_b, 1170 if (rndpool_extract_data(&rnd_global.pool, rnd_rt.rt_b,
1169 sizeof(rnd_rt.rt_b), RND_EXTRACT_ANY) 1171 sizeof(rnd_rt.rt_b), RND_EXTRACT_ANY)
1170 != sizeof(rnd_rt.rt_b)) { 1172 != sizeof(rnd_rt.rt_b)) {
1171 panic("rnd: could not get bits for statistical test"); 1173 panic("rnd: could not get bits for statistical test");
1172 } 1174 }
1173 /* 1175 /*
1174 * Stash the tested bits so we can put them back in the 1176 * Stash the tested bits so we can put them back in the
1175 * pool, restoring the entropy count. DO NOT rely on 1177 * pool, restoring the entropy count. DO NOT rely on
1176 * rngtest to maintain the bits pristine -- we could end 1178 * rngtest to maintain the bits pristine -- we could end
1177 * up adding back non-random data claiming it were pure 1179 * up adding back non-random data claiming it were pure
1178 * entropy. 1180 * entropy.
1179 */ 1181 */
1180 memcpy(rnd_testbits, rnd_rt.rt_b, sizeof(rnd_rt.rt_b)); 1182 memcpy(rnd_testbits, rnd_rt.rt_b, sizeof(rnd_rt.rt_b));
1181 strlcpy(rnd_rt.rt_name, "entropy pool", sizeof(rnd_rt.rt_name)); 1183 strlcpy(rnd_rt.rt_name, "entropy pool", sizeof(rnd_rt.rt_name));
1182 if (rngtest(&rnd_rt)) { 1184 if (rngtest(&rnd_rt)) {
1183 /* 1185 /*
1184 * The probabiliity of a Type I error is 3/10000, 1186 * The probabiliity of a Type I error is 3/10000,
1185 * but note this can only happen at boot time. 1187 * but note this can only happen at boot time.
1186 * The relevant standard says to reset the module, 1188 * The relevant standard says to reset the module,
1187 * but developers objected... 1189 * but developers objected...
1188 */ 1190 */
1189 rnd_printf("rnd: WARNING, ENTROPY POOL FAILED " 1191 rnd_printf("rnd: WARNING, ENTROPY POOL FAILED "
1190 "STATISTICAL TEST!\n"); 1192 "STATISTICAL TEST!\n");
1191 continue; 1193 continue;
1192 } 1194 }
1193 memset(&rnd_rt, 0, sizeof(rnd_rt)); 1195 memset(&rnd_rt, 0, sizeof(rnd_rt));
1194 rndpool_add_data(&rnd_global.pool, rnd_testbits, 1196 rndpool_add_data(&rnd_global.pool, rnd_testbits,
1195 sizeof(rnd_testbits), entropy_count); 1197 sizeof(rnd_testbits), entropy_count);
1196 memset(rnd_testbits, 0, sizeof(rnd_testbits)); 1198 memset(rnd_testbits, 0, sizeof(rnd_testbits));
1197 rnd_printf_verbose("rnd: statistical RNG test done," 1199 rnd_printf_verbose("rnd: statistical RNG test done,"
1198 " entropy = %d.\n", 1200 " entropy = %d.\n",
1199 rndpool_get_entropy_count(&rnd_global.pool)); 1201 rndpool_get_entropy_count(&rnd_global.pool));
1200 rnd_tested++; 1202 rnd_tested++;
1201 } 1203 }
1202#endif 1204#endif
1203 entropy_count = rndpool_get_entropy_count(&rnd_global.pool); 1205 entropy_count = rndpool_get_entropy_count(&rnd_global.pool);
1204 retval = rndpool_extract_data(&rnd_global.pool, p, len, flags); 1206 retval = rndpool_extract_data(&rnd_global.pool, p, len, flags);
1205 mutex_spin_exit(&rnd_global.lock); 1207 mutex_spin_exit(&rnd_global.lock);
1206 1208
1207 if (entropy_count < (RND_ENTROPY_THRESHOLD * 2 + len) * NBBY) { 1209 if (entropy_count < (RND_ENTROPY_THRESHOLD * 2 + len) * NBBY) {
1208 rnd_printf_verbose("rnd: empty, asking for %d bytes\n", 1210 rnd_printf_verbose("rnd: empty, asking for %d bytes\n",
1209 (int)(howmany((RND_POOLBITS - entropy_count), NBBY))); 1211 (int)(howmany((RND_POOLBITS - entropy_count), NBBY)));
1210 rnd_getmore(howmany((RND_POOLBITS - entropy_count), NBBY)); 1212 rnd_getmore(howmany((RND_POOLBITS - entropy_count), NBBY));
1211 } 1213 }
1212 1214
1213 return retval; 1215 return retval;
1214} 1216}
1215 1217
1216/* 1218/*
1217 * Fill the buffer with as much entropy as we can. Return true if it 1219 * Fill the buffer with as much entropy as we can. Return true if it
1218 * has full entropy and false if not. 1220 * has full entropy and false if not.
1219 */ 1221 */
1220bool 1222bool
1221rnd_extract(void *buffer, size_t bytes) 1223rnd_extract(void *buffer, size_t bytes)
1222{ 1224{
1223 const size_t extracted = rnd_extract_data(buffer, bytes, 1225 const size_t extracted = rnd_extract_data(buffer, bytes,
1224 RND_EXTRACT_GOOD); 1226 RND_EXTRACT_GOOD);
1225 1227
1226 if (extracted < bytes) { 1228 if (extracted < bytes) {
1227 rnd_getmore(bytes - extracted); 1229 rnd_getmore(bytes - extracted);
1228 (void)rnd_extract_data((uint8_t *)buffer + extracted, 1230 (void)rnd_extract_data((uint8_t *)buffer + extracted,
1229 bytes - extracted, RND_EXTRACT_ANY); 1231 bytes - extracted, RND_EXTRACT_ANY);
1230 return false; 1232 return false;
1231 } 1233 }
1232 1234
1233 return true; 1235 return true;
1234} 1236}
1235 1237
1236/* 1238/*
1237 * If we have as much entropy as is requested, fill the buffer with it 1239 * If we have as much entropy as is requested, fill the buffer with it
1238 * and return true. Otherwise, leave the buffer alone and return 1240 * and return true. Otherwise, leave the buffer alone and return
1239 * false. 1241 * false.
1240 */ 1242 */
1241 1243
1242CTASSERT(RND_ENTROPY_THRESHOLD <= 0xffffffffUL); 1244CTASSERT(RND_ENTROPY_THRESHOLD <= 0xffffffffUL);
1243CTASSERT(RNDSINK_MAX_BYTES <= (0xffffffffUL - RND_ENTROPY_THRESHOLD)); 1245CTASSERT(RNDSINK_MAX_BYTES <= (0xffffffffUL - RND_ENTROPY_THRESHOLD));
1244CTASSERT((RNDSINK_MAX_BYTES + RND_ENTROPY_THRESHOLD) <= 1246CTASSERT((RNDSINK_MAX_BYTES + RND_ENTROPY_THRESHOLD) <=
1245 (0xffffffffUL / NBBY)); 1247 (0xffffffffUL / NBBY));
1246 1248
1247bool 1249bool
1248rnd_tryextract(void *buffer, size_t bytes) 1250rnd_tryextract(void *buffer, size_t bytes)
1249{ 1251{
1250 uint32_t bits_needed, bytes_requested; 1252 uint32_t bits_needed, bytes_requested;
1251 1253
1252 KASSERT(bytes <= RNDSINK_MAX_BYTES); 1254 KASSERT(bytes <= RNDSINK_MAX_BYTES);
1253 bits_needed = ((bytes + RND_ENTROPY_THRESHOLD) * NBBY); 1255 bits_needed = ((bytes + RND_ENTROPY_THRESHOLD) * NBBY);
1254 1256
1255 mutex_spin_enter(&rnd_global.lock); 1257 mutex_spin_enter(&rnd_global.lock);
1256 if (bits_needed <= rndpool_get_entropy_count(&rnd_global.pool)) { 1258 if (bits_needed <= rndpool_get_entropy_count(&rnd_global.pool)) {
1257 const uint32_t extracted __diagused = 1259 const uint32_t extracted __diagused =
1258 rndpool_extract_data(&rnd_global.pool, buffer, bytes, 1260 rndpool_extract_data(&rnd_global.pool, buffer, bytes,
1259 RND_EXTRACT_GOOD); 1261 RND_EXTRACT_GOOD);
1260 1262
1261 KASSERT(extracted == bytes); 1263 KASSERT(extracted == bytes);
1262 bytes_requested = 0; 1264 bytes_requested = 0;
1263 } else { 1265 } else {
1264 /* XXX Figure the threshold into this... */ 1266 /* XXX Figure the threshold into this... */
1265 bytes_requested = howmany((bits_needed - 1267 bytes_requested = howmany((bits_needed -
1266 rndpool_get_entropy_count(&rnd_global.pool)), NBBY); 1268 rndpool_get_entropy_count(&rnd_global.pool)), NBBY);
1267 KASSERT(0 < bytes_requested); 1269 KASSERT(0 < bytes_requested);
1268 } 1270 }
1269 mutex_spin_exit(&rnd_global.lock); 1271 mutex_spin_exit(&rnd_global.lock);
1270 1272
1271 if (0 < bytes_requested) 1273 if (0 < bytes_requested)
1272 rnd_getmore(bytes_requested); 1274 rnd_getmore(bytes_requested);
1273 1275
1274 return bytes_requested == 0; 1276 return bytes_requested == 0;
1275} 1277}
1276 1278
1277void 1279void
1278rnd_seed(void *base, size_t len) 1280rnd_seed(void *base, size_t len)
1279{ 1281{
1280 SHA1_CTX s; 1282 SHA1_CTX s;
1281 uint8_t digest[SHA1_DIGEST_LENGTH]; 1283 uint8_t digest[SHA1_DIGEST_LENGTH];
1282 1284
1283 if (len != sizeof(*boot_rsp)) { 1285 if (len != sizeof(*boot_rsp)) {
1284 rnd_printf("rnd: bad seed length %d\n", (int)len); 1286 rnd_printf("rnd: bad seed length %d\n", (int)len);
1285 return; 1287 return;
1286 } 1288 }
1287 1289
1288 boot_rsp = (rndsave_t *)base; 1290 boot_rsp = (rndsave_t *)base;
1289 SHA1Init(&s); 1291 SHA1Init(&s);
1290 SHA1Update(&s, (uint8_t *)&boot_rsp->entropy, 1292 SHA1Update(&s, (uint8_t *)&boot_rsp->entropy,
1291 sizeof(boot_rsp->entropy)); 1293 sizeof(boot_rsp->entropy));
1292 SHA1Update(&s, boot_rsp->data, sizeof(boot_rsp->data)); 1294 SHA1Update(&s, boot_rsp->data, sizeof(boot_rsp->data));
1293 SHA1Final(digest, &s); 1295 SHA1Final(digest, &s);
1294 1296
1295 if (memcmp(digest, boot_rsp->digest, sizeof(digest))) { 1297 if (memcmp(digest, boot_rsp->digest, sizeof(digest))) {
1296 rnd_printf("rnd: bad seed checksum\n"); 1298 rnd_printf("rnd: bad seed checksum\n");
1297 return; 1299 return;
1298 } 1300 }
1299 1301
1300 /* 1302 /*
1301 * It's not really well-defined whether bootloader-supplied 1303 * It's not really well-defined whether bootloader-supplied
1302 * modules run before or after rnd_init(). Handle both cases. 1304 * modules run before or after rnd_init(). Handle both cases.
1303 */ 1305 */
1304 if (rnd_ready) { 1306 if (rnd_ready) {
1305 rnd_printf_verbose("rnd: ready," 1307 rnd_printf_verbose("rnd: ready,"
1306 " feeding in seed data directly.\n"); 1308 " feeding in seed data directly.\n");
1307 mutex_spin_enter(&rnd_global.lock); 1309 mutex_spin_enter(&rnd_global.lock);
1308 rndpool_add_data(&rnd_global.pool, boot_rsp->data, 1310 rndpool_add_data(&rnd_global.pool, boot_rsp->data,
1309 sizeof(boot_rsp->data), 1311 sizeof(boot_rsp->data),
1310 MIN(boot_rsp->entropy, RND_POOLBITS / 2)); 1312 MIN(boot_rsp->entropy, RND_POOLBITS / 2));
1311 memset(boot_rsp, 0, sizeof(*boot_rsp)); 1313 memset(boot_rsp, 0, sizeof(*boot_rsp));
1312 mutex_spin_exit(&rnd_global.lock); 1314 mutex_spin_exit(&rnd_global.lock);
1313 } else { 1315 } else {
1314 rnd_printf_verbose("rnd: not ready, deferring seed feed.\n"); 1316 rnd_printf_verbose("rnd: not ready, deferring seed feed.\n");
1315 } 1317 }
1316} 1318}
1317 1319
1318static void 1320static void
1319krndsource_to_rndsource(krndsource_t *kr, rndsource_t *r) 1321krndsource_to_rndsource(krndsource_t *kr, rndsource_t *r)
1320{ 1322{
1321 memset(r, 0, sizeof(*r)); 1323 memset(r, 0, sizeof(*r));
1322 strlcpy(r->name, kr->name, sizeof(r->name)); 1324 strlcpy(r->name, kr->name, sizeof(r->name));
1323 r->total = kr->total; 1325 r->total = kr->total;
1324 r->type = kr->type; 1326 r->type = kr->type;
1325 r->flags = kr->flags; 1327 r->flags = kr->flags;
1326} 1328}
1327 1329
1328static void 1330static void
1329krndsource_to_rndsource_est(krndsource_t *kr, rndsource_est_t *re) 1331krndsource_to_rndsource_est(krndsource_t *kr, rndsource_est_t *re)
1330{ 1332{
1331 memset(re, 0, sizeof(*re)); 1333 memset(re, 0, sizeof(*re));
1332 krndsource_to_rndsource(kr, &re->rt); 1334 krndsource_to_rndsource(kr, &re->rt);
1333 re->dt_samples = kr->time_delta.insamples; 1335 re->dt_samples = kr->time_delta.insamples;
1334 re->dt_total = kr->time_delta.outbits; 1336 re->dt_total = kr->time_delta.outbits;
1335 re->dv_samples = kr->value_delta.insamples; 1337 re->dv_samples = kr->value_delta.insamples;
1336 re->dv_total = kr->value_delta.outbits; 1338 re->dv_total = kr->value_delta.outbits;
1337} 1339}
1338 1340
1339static void 1341static void
1340krs_setflags(krndsource_t *kr, uint32_t flags, uint32_t mask) 1342krs_setflags(krndsource_t *kr, uint32_t flags, uint32_t mask)
1341{ 1343{
1342 uint32_t oflags = kr->flags; 1344 uint32_t oflags = kr->flags;
1343 1345
1344 kr->flags &= ~mask; 1346 kr->flags &= ~mask;
1345 kr->flags |= (flags & mask); 1347 kr->flags |= (flags & mask);
1346 1348
1347 if (oflags & RND_FLAG_HASENABLE && 1349 if (oflags & RND_FLAG_HASENABLE &&
1348 ((oflags & RND_FLAG_NO_COLLECT) != (flags & RND_FLAG_NO_COLLECT))) { 1350 ((oflags & RND_FLAG_NO_COLLECT) != (flags & RND_FLAG_NO_COLLECT))) {
1349 kr->enable(kr, !(flags & RND_FLAG_NO_COLLECT)); 1351 kr->enable(kr, !(flags & RND_FLAG_NO_COLLECT));
1350 } 1352 }
1351} 1353}
1352 1354
1353int 1355int
1354rnd_system_ioctl(struct file *fp, u_long cmd, void *addr) 1356rnd_system_ioctl(struct file *fp, u_long cmd, void *addr)
1355{ 1357{
1356 krndsource_t *kr; 1358 krndsource_t *kr;
1357 rndstat_t *rst; 1359 rndstat_t *rst;
1358 rndstat_name_t *rstnm; 1360 rndstat_name_t *rstnm;
1359 rndstat_est_t *rset; 1361 rndstat_est_t *rset;
1360 rndstat_est_name_t *rsetnm; 1362 rndstat_est_name_t *rsetnm;
1361 rndctl_t *rctl; 1363 rndctl_t *rctl;
1362 rnddata_t *rnddata; 1364 rnddata_t *rnddata;
1363 uint32_t count, start; 1365 uint32_t count, start;
1364 int ret = 0; 1366 int ret = 0;
1365 int estimate_ok = 0, estimate = 0; 1367 int estimate_ok = 0, estimate = 0;
1366 1368
1367 switch (cmd) { 1369 switch (cmd) {
1368 case RNDGETENTCNT: 1370 case RNDGETENTCNT:
1369 break; 1371 break;
1370 1372
1371 case RNDGETPOOLSTAT: 1373 case RNDGETPOOLSTAT:
1372 case RNDGETSRCNUM: 1374 case RNDGETSRCNUM:
1373 case RNDGETSRCNAME: 1375 case RNDGETSRCNAME:
1374 case RNDGETESTNUM: 1376 case RNDGETESTNUM:
1375 case RNDGETESTNAME: 1377 case RNDGETESTNAME:
1376 ret = kauth_authorize_device(curlwp->l_cred, 1378 ret = kauth_authorize_device(curlwp->l_cred,
1377 KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL); 1379 KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL);
1378 if (ret) 1380 if (ret)
1379 return (ret); 1381 return (ret);
1380 break; 1382 break;
1381 1383
1382 case RNDCTL: 1384 case RNDCTL:
1383 ret = kauth_authorize_device(curlwp->l_cred, 1385 ret = kauth_authorize_device(curlwp->l_cred,
1384 KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL); 1386 KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL);
1385 if (ret) 1387 if (ret)
1386 return (ret); 1388 return (ret);
1387 break; 1389 break;
1388 1390
1389 case RNDADDDATA: 1391 case RNDADDDATA:
1390 ret = kauth_authorize_device(curlwp->l_cred, 1392 ret = kauth_authorize_device(curlwp->l_cred,
1391 KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL); 1393 KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL);
1392 if (ret) 1394 if (ret)
1393 return (ret); 1395 return (ret);
1394 estimate_ok = !kauth_authorize_device(curlwp->l_cred, 1396 estimate_ok = !kauth_authorize_device(curlwp->l_cred,
1395 KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, NULL, NULL, NULL, NULL); 1397 KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, NULL, NULL, NULL, NULL);
1396 break; 1398 break;
1397 1399
1398 default: 1400 default:
1399#ifdef COMPAT_50 1401#ifdef COMPAT_50
1400 return compat_50_rnd_ioctl(fp, cmd, addr); 1402 return compat_50_rnd_ioctl(fp, cmd, addr);
1401#else 1403#else
1402 return ENOTTY; 1404 return ENOTTY;
1403#endif 1405#endif
1404 } 1406 }
1405 1407
1406 switch (cmd) { 1408 switch (cmd) {
1407 case RNDGETENTCNT: 1409 case RNDGETENTCNT:
1408 mutex_spin_enter(&rnd_global.lock); 1410 mutex_spin_enter(&rnd_global.lock);
1409 *(uint32_t *)addr = rndpool_get_entropy_count(&rnd_global.pool); 1411 *(uint32_t *)addr = rndpool_get_entropy_count(&rnd_global.pool);
1410 mutex_spin_exit(&rnd_global.lock); 1412 mutex_spin_exit(&rnd_global.lock);
1411 break; 1413 break;
1412 1414
1413 case RNDGETPOOLSTAT: 1415 case RNDGETPOOLSTAT:
1414 mutex_spin_enter(&rnd_global.lock); 1416 mutex_spin_enter(&rnd_global.lock);
1415 rndpool_get_stats(&rnd_global.pool, addr, 1417 rndpool_get_stats(&rnd_global.pool, addr,
1416 sizeof(rndpoolstat_t)); 1418 sizeof(rndpoolstat_t));
1417 mutex_spin_exit(&rnd_global.lock); 1419 mutex_spin_exit(&rnd_global.lock);
1418 break; 1420 break;
1419 1421
1420 case RNDGETSRCNUM: 1422 case RNDGETSRCNUM:
1421 rst = (rndstat_t *)addr; 1423 rst = (rndstat_t *)addr;
1422 1424
1423 if (rst->count == 0) 1425 if (rst->count == 0)
1424 break; 1426 break;
1425 1427
1426 if (rst->count > RND_MAXSTATCOUNT) 1428 if (rst->count > RND_MAXSTATCOUNT)
1427 return (EINVAL); 1429 return (EINVAL);
1428 1430
1429 mutex_spin_enter(&rnd_global.lock); 1431 mutex_spin_enter(&rnd_global.lock);
1430 /* 1432 /*
1431 * Find the starting source by running through the 1433 * Find the starting source by running through the
1432 * list of sources. 1434 * list of sources.
1433 */ 1435 */
1434 kr = LIST_FIRST(&rnd_global.sources); 1436 kr = LIST_FIRST(&rnd_global.sources);
1435 start = rst->start; 1437 start = rst->start;
1436 while (kr != NULL && start >= 1) { 1438 while (kr != NULL && start >= 1) {
1437 kr = LIST_NEXT(kr, list); 1439 kr = LIST_NEXT(kr, list);
1438 start--; 1440 start--;
1439 } 1441 }
1440 1442
1441 /* 1443 /*
1442 * Return up to as many structures as the user asked 1444 * Return up to as many structures as the user asked
1443 * for. If we run out of sources, a count of zero 1445 * for. If we run out of sources, a count of zero
1444 * will be returned, without an error. 1446 * will be returned, without an error.
1445 */ 1447 */
1446 for (count = 0; count < rst->count && kr != NULL; count++) { 1448 for (count = 0; count < rst->count && kr != NULL; count++) {
1447 krndsource_to_rndsource(kr, &rst->source[count]); 1449 krndsource_to_rndsource(kr, &rst->source[count]);
1448 kr = LIST_NEXT(kr, list); 1450 kr = LIST_NEXT(kr, list);
1449 } 1451 }
1450 1452
1451 rst->count = count; 1453 rst->count = count;
1452 1454
1453 mutex_spin_exit(&rnd_global.lock); 1455 mutex_spin_exit(&rnd_global.lock);
1454 break; 1456 break;
1455 1457
1456 case RNDGETESTNUM: 1458 case RNDGETESTNUM:
1457 rset = (rndstat_est_t *)addr; 1459 rset = (rndstat_est_t *)addr;
1458 1460
1459 if (rset->count == 0) 1461 if (rset->count == 0)
1460 break; 1462 break;
1461 1463
1462 if (rset->count > RND_MAXSTATCOUNT) 1464 if (rset->count > RND_MAXSTATCOUNT)
1463 return (EINVAL); 1465 return (EINVAL);
1464 1466
1465 mutex_spin_enter(&rnd_global.lock); 1467 mutex_spin_enter(&rnd_global.lock);
1466 /* 1468 /*
1467 * Find the starting source by running through the 1469 * Find the starting source by running through the
1468 * list of sources. 1470 * list of sources.
1469 */ 1471 */
1470 kr = LIST_FIRST(&rnd_global.sources); 1472 kr = LIST_FIRST(&rnd_global.sources);
1471 start = rset->start; 1473 start = rset->start;
1472 while (kr != NULL && start > 1) { 1474 while (kr != NULL && start > 1) {
1473 kr = LIST_NEXT(kr, list); 1475 kr = LIST_NEXT(kr, list);
1474 start--; 1476 start--;
1475 } 1477 }
1476 1478
1477 /* Return up to as many structures as the user asked 1479 /* Return up to as many structures as the user asked
1478 * for. If we run out of sources, a count of zero 1480 * for. If we run out of sources, a count of zero
1479 * will be returned, without an error. 1481 * will be returned, without an error.
1480 */ 1482 */
1481 for (count = 0; count < rset->count && kr != NULL; count++) { 1483 for (count = 0; count < rset->count && kr != NULL; count++) {
1482 krndsource_to_rndsource_est(kr, &rset->source[count]); 1484 krndsource_to_rndsource_est(kr, &rset->source[count]);
1483 kr = LIST_NEXT(kr, list); 1485 kr = LIST_NEXT(kr, list);
1484 } 1486 }
1485 1487
1486 rset->count = count; 1488 rset->count = count;
1487 1489
1488 mutex_spin_exit(&rnd_global.lock); 1490 mutex_spin_exit(&rnd_global.lock);
1489 break; 1491 break;
1490 1492
1491 case RNDGETSRCNAME: 1493 case RNDGETSRCNAME:
1492 /* 1494 /*
1493 * Scan through the list, trying to find the name. 1495 * Scan through the list, trying to find the name.
1494 */ 1496 */
1495 mutex_spin_enter(&rnd_global.lock); 1497 mutex_spin_enter(&rnd_global.lock);
1496 rstnm = (rndstat_name_t *)addr; 1498 rstnm = (rndstat_name_t *)addr;
1497 kr = LIST_FIRST(&rnd_global.sources); 1499 kr = LIST_FIRST(&rnd_global.sources);
1498 while (kr != NULL) { 1500 while (kr != NULL) {
1499 if (strncmp(kr->name, rstnm->name, 1501 if (strncmp(kr->name, rstnm->name,
1500 MIN(sizeof(kr->name), 1502 MIN(sizeof(kr->name),
1501 sizeof(rstnm->name))) == 0) { 1503 sizeof(rstnm->name))) == 0) {
1502 krndsource_to_rndsource(kr, &rstnm->source); 1504 krndsource_to_rndsource(kr, &rstnm->source);
1503 mutex_spin_exit(&rnd_global.lock); 1505 mutex_spin_exit(&rnd_global.lock);
1504 return (0); 1506 return (0);
1505 } 1507 }
1506 kr = LIST_NEXT(kr, list); 1508 kr = LIST_NEXT(kr, list);
1507 } 1509 }
1508 mutex_spin_exit(&rnd_global.lock); 1510 mutex_spin_exit(&rnd_global.lock);
1509 1511
1510 ret = ENOENT; /* name not found */ 1512 ret = ENOENT; /* name not found */