Tue Apr 14 14:11:51 2015 UTC ()
Centralize bookkeeping of rnd_initial_entropy.

It is not adjusted everywhere it should be, although the only places
it perhaps formally ought to be are places with questionable entropy
estimates in the first place.


(riastradh)
diff -r1.60 -r1.61 src/sys/kern/kern_rndq.c

cvs diff -r1.60 -r1.61 src/sys/kern/Attic/kern_rndq.c (switch to unified diff)

--- src/sys/kern/Attic/kern_rndq.c 2015/04/14 13:57:35 1.60
+++ src/sys/kern/Attic/kern_rndq.c 2015/04/14 14:11:51 1.61
@@ -1,1624 +1,1625 @@ @@ -1,1624 +1,1625 @@
1/* $NetBSD: kern_rndq.c,v 1.60 2015/04/14 13:57:35 riastradh Exp $ */ 1/* $NetBSD: kern_rndq.c,v 1.61 2015/04/14 14:11:51 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1997-2013 The NetBSD Foundation, Inc. 4 * Copyright (c) 1997-2013 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Michael Graff <explorer@flame.org> and Thor Lancelot Simon. 8 * by Michael Graff <explorer@flame.org> and Thor Lancelot Simon.
9 * This code uses ideas and algorithms from the Linux driver written by 9 * This code uses ideas and algorithms from the Linux driver written by
10 * Ted Ts'o. 10 * Ted Ts'o.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
15 * 1. Redistributions of source code must retain the above copyright 15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer. 16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright 17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the 18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution. 19 * documentation and/or other materials provided with the distribution.
20 * 20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE. 31 * POSSIBILITY OF SUCH DAMAGE.
32 */ 32 */
33 33
34#include <sys/cdefs.h> 34#include <sys/cdefs.h>
35__KERNEL_RCSID(0, "$NetBSD: kern_rndq.c,v 1.60 2015/04/14 13:57:35 riastradh Exp $"); 35__KERNEL_RCSID(0, "$NetBSD: kern_rndq.c,v 1.61 2015/04/14 14:11:51 riastradh Exp $");
36 36
37#include <sys/param.h> 37#include <sys/param.h>
38#include <sys/atomic.h> 38#include <sys/atomic.h>
39#include <sys/ioctl.h> 39#include <sys/ioctl.h>
40#include <sys/fcntl.h> 40#include <sys/fcntl.h>
41#include <sys/select.h> 41#include <sys/select.h>
42#include <sys/poll.h> 42#include <sys/poll.h>
43#include <sys/kmem.h> 43#include <sys/kmem.h>
44#include <sys/mutex.h> 44#include <sys/mutex.h>
45#include <sys/proc.h> 45#include <sys/proc.h>
46#include <sys/kernel.h> 46#include <sys/kernel.h>
47#include <sys/conf.h> 47#include <sys/conf.h>
48#include <sys/systm.h> 48#include <sys/systm.h>
49#include <sys/callout.h> 49#include <sys/callout.h>
50#include <sys/intr.h> 50#include <sys/intr.h>
51#include <sys/rnd.h> 51#include <sys/rnd.h>
52#include <sys/rndpool.h> 52#include <sys/rndpool.h>
53#include <sys/rndsink.h> 53#include <sys/rndsink.h>
54#include <sys/rndsource.h> 54#include <sys/rndsource.h>
55#include <sys/vnode.h> 55#include <sys/vnode.h>
56#include <sys/pool.h> 56#include <sys/pool.h>
57#include <sys/kauth.h> 57#include <sys/kauth.h>
58#include <sys/once.h> 58#include <sys/once.h>
59#include <sys/rngtest.h> 59#include <sys/rngtest.h>
60 60
61#include <dev/rnd_private.h> 61#include <dev/rnd_private.h>
62 62
63#ifdef COMPAT_50 63#ifdef COMPAT_50
64#include <compat/sys/rnd.h> 64#include <compat/sys/rnd.h>
65#endif 65#endif
66 66
67#if defined(__HAVE_CPU_COUNTER) 67#if defined(__HAVE_CPU_COUNTER)
68#include <machine/cpu_counter.h> 68#include <machine/cpu_counter.h>
69#endif 69#endif
70 70
71#ifdef RND_DEBUG 71#ifdef RND_DEBUG
72#define DPRINTF(l,x) if (rnd_debug & (l)) rnd_printf x 72#define DPRINTF(l,x) if (rnd_debug & (l)) rnd_printf x
73int rnd_debug = 0; 73int rnd_debug = 0;
74#else 74#else
75#define DPRINTF(l,x) 75#define DPRINTF(l,x)
76#endif 76#endif
77 77
78/* 78/*
79 * list devices attached 79 * list devices attached
80 */ 80 */
81#if 0 81#if 0
82#define RND_VERBOSE 82#define RND_VERBOSE
83#endif 83#endif
84 84
85#ifdef RND_VERBOSE 85#ifdef RND_VERBOSE
86#define rnd_printf_verbose(fmt, ...) rnd_printf(fmt, ##__VA_ARGS__) 86#define rnd_printf_verbose(fmt, ...) rnd_printf(fmt, ##__VA_ARGS__)
87#else 87#else
88#define rnd_printf_verbose(fmt, ...) ((void)0) 88#define rnd_printf_verbose(fmt, ...) ((void)0)
89#endif 89#endif
90 90
91#ifdef RND_VERBOSE 91#ifdef RND_VERBOSE
92static unsigned int deltacnt; 92static unsigned int deltacnt;
93#endif 93#endif
94 94
95/* 95/*
96 * This is a little bit of state information attached to each device that we 96 * This is a little bit of state information attached to each device that we
97 * collect entropy from. This is simply a collection buffer, and when it 97 * collect entropy from. This is simply a collection buffer, and when it
98 * is full it will be "detached" from the source and added to the entropy 98 * is full it will be "detached" from the source and added to the entropy
99 * pool after entropy is distilled as much as possible. 99 * pool after entropy is distilled as much as possible.
100 */ 100 */
101#define RND_SAMPLE_COUNT 64 /* collect N samples, then compress */ 101#define RND_SAMPLE_COUNT 64 /* collect N samples, then compress */
102typedef struct _rnd_sample_t { 102typedef struct _rnd_sample_t {
103 SIMPLEQ_ENTRY(_rnd_sample_t) next; 103 SIMPLEQ_ENTRY(_rnd_sample_t) next;
104 krndsource_t *source; 104 krndsource_t *source;
105 int cursor; 105 int cursor;
106 int entropy; 106 int entropy;
107 uint32_t ts[RND_SAMPLE_COUNT]; 107 uint32_t ts[RND_SAMPLE_COUNT];
108 uint32_t values[RND_SAMPLE_COUNT]; 108 uint32_t values[RND_SAMPLE_COUNT];
109} rnd_sample_t; 109} rnd_sample_t;
110 110
111SIMPLEQ_HEAD(rnd_sampleq, _rnd_sample_t); 111SIMPLEQ_HEAD(rnd_sampleq, _rnd_sample_t);
112 112
113/* 113/*
114 * The sample queue. Samples are put into the queue and processed in a 114 * The sample queue. Samples are put into the queue and processed in a
115 * softint in order to limit the latency of adding a sample. 115 * softint in order to limit the latency of adding a sample.
116 */ 116 */
117static struct { 117static struct {
118 kmutex_t lock; 118 kmutex_t lock;
119 struct rnd_sampleq q; 119 struct rnd_sampleq q;
120} rnd_samples __cacheline_aligned; 120} rnd_samples __cacheline_aligned;
121 121
122/* 122/*
123 * Memory pool for sample buffers 123 * Memory pool for sample buffers
124 */ 124 */
125static pool_cache_t rnd_mempc; 125static pool_cache_t rnd_mempc;
126 126
127/* 127/*
128 * Global entropy pool and sources. 128 * Global entropy pool and sources.
129 */ 129 */
130static struct { 130static struct {
131 kmutex_t lock; 131 kmutex_t lock;
132 rndpool_t pool; 132 rndpool_t pool;
133 LIST_HEAD(, krndsource) sources; 133 LIST_HEAD(, krndsource) sources;
134} rnd_global __cacheline_aligned; 134} rnd_global __cacheline_aligned;
135 135
136/* 136/*
137 * This source is used to easily "remove" queue entries when the source 137 * This source is used to easily "remove" queue entries when the source
138 * which actually generated the events is going away. 138 * which actually generated the events is going away.
139 */ 139 */
140static krndsource_t rnd_source_no_collect = { 140static krndsource_t rnd_source_no_collect = {
141 /* LIST_ENTRY list */ 141 /* LIST_ENTRY list */
142 .name = { 'N', 'o', 'C', 'o', 'l', 'l', 'e', 'c', 't', 142 .name = { 'N', 'o', 'C', 'o', 'l', 'l', 'e', 'c', 't',
143 0, 0, 0, 0, 0, 0, 0 }, 143 0, 0, 0, 0, 0, 0, 0 },
144 .total = 0, 144 .total = 0,
145 .type = RND_TYPE_UNKNOWN, 145 .type = RND_TYPE_UNKNOWN,
146 .flags = (RND_FLAG_NO_COLLECT | 146 .flags = (RND_FLAG_NO_COLLECT |
147 RND_FLAG_NO_ESTIMATE), 147 RND_FLAG_NO_ESTIMATE),
148 .state = NULL, 148 .state = NULL,
149 .test_cnt = 0, 149 .test_cnt = 0,
150 .test = NULL 150 .test = NULL
151}; 151};
152 152
153krndsource_t rnd_printf_source, rnd_autoconf_source; 153krndsource_t rnd_printf_source, rnd_autoconf_source;
154 154
155static void *rnd_process, *rnd_wakeup; 155static void *rnd_process, *rnd_wakeup;
156 156
157static void rnd_wakeup_readers(void); 157static void rnd_wakeup_readers(void);
158static inline uint32_t rnd_counter(void); 158static inline uint32_t rnd_counter(void);
159static void rnd_intr(void *); 159static void rnd_intr(void *);
160static void rnd_wake(void *); 160static void rnd_wake(void *);
161static void rnd_process_events(void); 161static void rnd_process_events(void);
162static void rnd_add_data_ts(krndsource_t *, const void *const, 162static void rnd_add_data_ts(krndsource_t *, const void *const,
163 uint32_t, uint32_t, uint32_t); 163 uint32_t, uint32_t, uint32_t);
164static inline void rnd_schedule_process(void); 164static inline void rnd_schedule_process(void);
165 165
166int rnd_ready = 0; 166int rnd_ready = 0;
167int rnd_initial_entropy = 0; 167int rnd_initial_entropy = 0;
168 168
169static int rnd_printing = 0; 169static int rnd_printing = 0;
170 170
171#ifdef DIAGNOSTIC 171#ifdef DIAGNOSTIC
172static int rnd_tested = 0; 172static int rnd_tested = 0;
173static rngtest_t rnd_rt; 173static rngtest_t rnd_rt;
174static uint8_t rnd_testbits[sizeof(rnd_rt.rt_b)]; 174static uint8_t rnd_testbits[sizeof(rnd_rt.rt_b)];
175#endif 175#endif
176 176
177static rndsave_t *boot_rsp; 177static rndsave_t *boot_rsp;
178 178
179static inline void 179static inline void
180rnd_printf(const char *fmt, ...) 180rnd_printf(const char *fmt, ...)
181{ 181{
182 va_list ap; 182 va_list ap;
183 183
184 membar_consumer(); 184 membar_consumer();
185 if (rnd_printing) { 185 if (rnd_printing) {
186 return; 186 return;
187 } 187 }
188 rnd_printing = 1; 188 rnd_printing = 1;
189 membar_producer(); 189 membar_producer();
190 va_start(ap, fmt); 190 va_start(ap, fmt);
191 vprintf(fmt, ap); 191 vprintf(fmt, ap);
192 va_end(ap); 192 va_end(ap);
193 rnd_printing = 0; 193 rnd_printing = 0;
194} 194}
195 195
196void 196void
197rnd_init_softint(void) { 197rnd_init_softint(void) {
198 rnd_process = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE, 198 rnd_process = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE,
199 rnd_intr, NULL); 199 rnd_intr, NULL);
200 rnd_wakeup = softint_establish(SOFTINT_CLOCK|SOFTINT_MPSAFE, 200 rnd_wakeup = softint_establish(SOFTINT_CLOCK|SOFTINT_MPSAFE,
201 rnd_wake, NULL); 201 rnd_wake, NULL);
202 rnd_schedule_process(); 202 rnd_schedule_process();
203} 203}
204 204
205/* 205/*
206 * Generate a 32-bit counter. 206 * Generate a 32-bit counter.
207 */ 207 */
208static inline uint32_t 208static inline uint32_t
209rnd_counter(void) 209rnd_counter(void)
210{ 210{
211 struct bintime bt; 211 struct bintime bt;
212 uint32_t ret; 212 uint32_t ret;
213 213
214#if defined(__HAVE_CPU_COUNTER) 214#if defined(__HAVE_CPU_COUNTER)
215 if (cpu_hascounter()) 215 if (cpu_hascounter())
216 return cpu_counter32(); 216 return cpu_counter32();
217#endif 217#endif
218 if (!rnd_ready) 218 if (!rnd_ready)
219 /* Too early to call nanotime. */ 219 /* Too early to call nanotime. */
220 return 0; 220 return 0;
221 221
222 binuptime(&bt); 222 binuptime(&bt);
223 ret = bt.sec; 223 ret = bt.sec;
224 ret |= bt.sec >> 32; 224 ret |= bt.sec >> 32;
225 ret |= bt.frac; 225 ret |= bt.frac;
226 ret |= bt.frac >> 32; 226 ret |= bt.frac >> 32;
227 227
228 return ret; 228 return ret;
229} 229}
230 230
231/* 231/*
232 * We may be called from low IPL -- protect our softint. 232 * We may be called from low IPL -- protect our softint.
233 */ 233 */
234 234
235static inline void 235static inline void
236rnd_schedule_softint(void *softint) 236rnd_schedule_softint(void *softint)
237{ 237{
238 kpreempt_disable(); 238 kpreempt_disable();
239 softint_schedule(softint); 239 softint_schedule(softint);
240 kpreempt_enable(); 240 kpreempt_enable();
241} 241}
242 242
243static inline void 243static inline void
244rnd_schedule_process(void) 244rnd_schedule_process(void)
245{ 245{
246 if (__predict_true(rnd_process)) { 246 if (__predict_true(rnd_process)) {
247 rnd_schedule_softint(rnd_process); 247 rnd_schedule_softint(rnd_process);
248 return; 248 return;
249 }  249 }
250 rnd_process_events(); 250 rnd_process_events();
251} 251}
252 252
253static inline void 253static inline void
254rnd_schedule_wakeup(void) 254rnd_schedule_wakeup(void)
255{ 255{
256 if (__predict_true(rnd_wakeup)) { 256 if (__predict_true(rnd_wakeup)) {
257 rnd_schedule_softint(rnd_wakeup); 257 rnd_schedule_softint(rnd_wakeup);
258 return; 258 return;
259 } 259 }
260 rnd_wakeup_readers(); 260 rnd_wakeup_readers();
261} 261}
262 262
263/* 263/*
264 * Tell any sources with "feed me" callbacks that we are hungry. 264 * Tell any sources with "feed me" callbacks that we are hungry.
265 */ 265 */
266void 266void
267rnd_getmore(size_t byteswanted) 267rnd_getmore(size_t byteswanted)
268{ 268{
269 krndsource_t *rs; 269 krndsource_t *rs;
270 270
271 mutex_spin_enter(&rnd_global.lock); 271 mutex_spin_enter(&rnd_global.lock);
272 LIST_FOREACH(rs, &rnd_global.sources, list) { 272 LIST_FOREACH(rs, &rnd_global.sources, list) {
273 if (!ISSET(rs->flags, RND_FLAG_HASCB)) 273 if (!ISSET(rs->flags, RND_FLAG_HASCB))
274 continue; 274 continue;
275 KASSERT(rs->get != NULL); 275 KASSERT(rs->get != NULL);
276 KASSERT(rs->getarg != NULL); 276 KASSERT(rs->getarg != NULL);
277 rs->get(byteswanted, rs->getarg); 277 rs->get(byteswanted, rs->getarg);
278 rnd_printf_verbose("rnd: entropy estimate %zu bits\n", 278 rnd_printf_verbose("rnd: entropy estimate %zu bits\n",
279 rndpool_get_entropy_count(&rnd_global.pool)); 279 rndpool_get_entropy_count(&rnd_global.pool));
280 rnd_printf_verbose("rnd: asking source %s for %zu bytes\n", 280 rnd_printf_verbose("rnd: asking source %s for %zu bytes\n",
281 rs->name, byteswanted); 281 rs->name, byteswanted);
282 } 282 }
283 mutex_spin_exit(&rnd_global.lock); 283 mutex_spin_exit(&rnd_global.lock);
284} 284}
285 285
286/* 286/*
287 * Check to see if there are readers waiting on us. If so, kick them. 287 * Check to see if there are readers waiting on us. If so, kick them.
288 */ 288 */
289static void 289static void
290rnd_wakeup_readers(void) 290rnd_wakeup_readers(void)
291{ 291{
292 292
293 /* 
294 * XXX This bookkeeping shouldn't be here -- this is not where 
295 * the rnd_initial_entropy state change actually happens. 
296 */ 
297 mutex_spin_enter(&rnd_global.lock); 
298 const size_t entropy_count = rndpool_get_entropy_count(&rnd_global.pool); 
299 if (entropy_count < RND_ENTROPY_THRESHOLD * 8) { 
300 mutex_spin_exit(&rnd_global.lock); 
301 return; 
302 } else { 
303#ifdef RND_VERBOSE 
304 if (__predict_false(!rnd_initial_entropy)) 
305 rnd_printf_verbose("rnd: have initial entropy (%zu)\n", 
306 entropy_count); 
307#endif 
308 rnd_initial_entropy = 1; 
309 } 
310 mutex_spin_exit(&rnd_global.lock); 
311 
312 rndsinks_distribute(); 293 rndsinks_distribute();
313} 294}
314 295
315/* 296/*
316 * Use the timing/value of the event to estimate the entropy gathered. 297 * Use the timing/value of the event to estimate the entropy gathered.
317 * If all the differentials (first, second, and third) are non-zero, return 298 * If all the differentials (first, second, and third) are non-zero, return
318 * non-zero. If any of these are zero, return zero. 299 * non-zero. If any of these are zero, return zero.
319 */ 300 */
320static inline uint32_t 301static inline uint32_t
321rnd_delta_estimate(rnd_delta_t *d, uint32_t v, int32_t delta) 302rnd_delta_estimate(rnd_delta_t *d, uint32_t v, int32_t delta)
322{ 303{
323 int32_t delta2, delta3; 304 int32_t delta2, delta3;
324 305
325 d->insamples++; 306 d->insamples++;
326 307
327 /* 308 /*
328 * Calculate the second and third order differentials 309 * Calculate the second and third order differentials
329 */ 310 */
330 delta2 = d->dx - delta; 311 delta2 = d->dx - delta;
331 if (delta2 < 0) 312 if (delta2 < 0)
332 delta2 = -delta2; 313 delta2 = -delta2;
333 314
334 delta3 = d->d2x - delta2; 315 delta3 = d->d2x - delta2;
335 if (delta3 < 0) 316 if (delta3 < 0)
336 delta3 = -delta3; 317 delta3 = -delta3;
337 318
338 d->x = v; 319 d->x = v;
339 d->dx = delta; 320 d->dx = delta;
340 d->d2x = delta2; 321 d->d2x = delta2;
341 322
342 /* 323 /*
343 * If any delta is 0, we got no entropy. If all are non-zero, we 324 * If any delta is 0, we got no entropy. If all are non-zero, we
344 * might have something. 325 * might have something.
345 */ 326 */
346 if (delta == 0 || delta2 == 0 || delta3 == 0) 327 if (delta == 0 || delta2 == 0 || delta3 == 0)
347 return (0); 328 return (0);
348 329
349 d->outbits++; 330 d->outbits++;
350 return (1); 331 return (1);
351} 332}
352 333
353/* 334/*
354 * Delta estimator for 32-bit timeestamps. Must handle wrap. 335 * Delta estimator for 32-bit timeestamps. Must handle wrap.
355 */ 336 */
356static inline uint32_t 337static inline uint32_t
357rnd_dt_estimate(krndsource_t *rs, uint32_t t) 338rnd_dt_estimate(krndsource_t *rs, uint32_t t)
358{ 339{
359 int32_t delta; 340 int32_t delta;
360 uint32_t ret; 341 uint32_t ret;
361 rnd_delta_t *d = &rs->time_delta; 342 rnd_delta_t *d = &rs->time_delta;
362 343
363 if (t < d->x) { 344 if (t < d->x) {
364 delta = UINT32_MAX - d->x + t; 345 delta = UINT32_MAX - d->x + t;
365 } else { 346 } else {
366 delta = d->x - t; 347 delta = d->x - t;
367 } 348 }
368 349
369 if (delta < 0) { 350 if (delta < 0) {
370 delta = -delta; 351 delta = -delta;
371 } 352 }
372 353
373 ret = rnd_delta_estimate(d, t, delta); 354 ret = rnd_delta_estimate(d, t, delta);
374 355
375 KASSERT(d->x == t); 356 KASSERT(d->x == t);
376 KASSERT(d->dx == delta); 357 KASSERT(d->dx == delta);
377#ifdef RND_VERBOSE 358#ifdef RND_VERBOSE
378 if (deltacnt++ % 1151 == 0) { 359 if (deltacnt++ % 1151 == 0) {
379 rnd_printf_verbose("rnd_dt_estimate: %s x = %lld, dx = %lld, " 360 rnd_printf_verbose("rnd_dt_estimate: %s x = %lld, dx = %lld, "
380 "d2x = %lld\n", rs->name, 361 "d2x = %lld\n", rs->name,
381 (int)d->x, (int)d->dx, (int)d->d2x); 362 (int)d->x, (int)d->dx, (int)d->d2x);
382 } 363 }
383#endif 364#endif
384 return ret; 365 return ret;
385} 366}
386 367
387/* 368/*
388 * Delta estimator for 32 or bit values. "Wrap" isn't. 369 * Delta estimator for 32 or bit values. "Wrap" isn't.
389 */ 370 */
390static inline uint32_t 371static inline uint32_t
391rnd_dv_estimate(krndsource_t *rs, uint32_t v) 372rnd_dv_estimate(krndsource_t *rs, uint32_t v)
392{ 373{
393 int32_t delta; 374 int32_t delta;
394 uint32_t ret; 375 uint32_t ret;
395 rnd_delta_t *d = &rs->value_delta; 376 rnd_delta_t *d = &rs->value_delta;
396 377
397 delta = d->x - v; 378 delta = d->x - v;
398 379
399 if (delta < 0) { 380 if (delta < 0) {
400 delta = -delta; 381 delta = -delta;
401 } 382 }
402 ret = rnd_delta_estimate(d, v, (uint32_t)delta); 383 ret = rnd_delta_estimate(d, v, (uint32_t)delta);
403 384
404 KASSERT(d->x == v); 385 KASSERT(d->x == v);
405 KASSERT(d->dx == delta); 386 KASSERT(d->dx == delta);
406#ifdef RND_VERBOSE 387#ifdef RND_VERBOSE
407 if (deltacnt++ % 1151 == 0) { 388 if (deltacnt++ % 1151 == 0) {
408 rnd_printf_verbose("rnd_dv_estimate: %s x = %lld, dx = %lld, " 389 rnd_printf_verbose("rnd_dv_estimate: %s x = %lld, dx = %lld, "
409 " d2x = %lld\n", rs->name, 390 " d2x = %lld\n", rs->name,
410 (long long int)d->x, 391 (long long int)d->x,
411 (long long int)d->dx, 392 (long long int)d->dx,
412 (long long int)d->d2x); 393 (long long int)d->d2x);
413 } 394 }
414#endif 395#endif
415 return ret; 396 return ret;
416} 397}
417 398
418#if defined(__HAVE_CPU_COUNTER) 399#if defined(__HAVE_CPU_COUNTER)
419static struct { 400static struct {
420 kmutex_t lock; 401 kmutex_t lock;
421 struct callout callout; 402 struct callout callout;
422 struct callout stop_callout; 403 struct callout stop_callout;
423 krndsource_t source; 404 krndsource_t source;
424} rnd_skew __cacheline_aligned; 405} rnd_skew __cacheline_aligned;
425 406
426static void rnd_skew_intr(void *); 407static void rnd_skew_intr(void *);
427 408
428static void 409static void
429rnd_skew_enable(krndsource_t *rs, bool enabled) 410rnd_skew_enable(krndsource_t *rs, bool enabled)
430{ 411{
431 412
432 if (enabled) { 413 if (enabled) {
433 rnd_skew_intr(rs); 414 rnd_skew_intr(rs);
434 } else { 415 } else {
435 callout_stop(&rnd_skew.callout); 416 callout_stop(&rnd_skew.callout);
436 } 417 }
437} 418}
438 419
439static void 420static void
440rnd_skew_stop_intr(void *arg) 421rnd_skew_stop_intr(void *arg)
441{ 422{
442 423
443 callout_stop(&rnd_skew.callout); 424 callout_stop(&rnd_skew.callout);
444} 425}
445 426
446static void 427static void
447rnd_skew_get(size_t bytes, void *priv) 428rnd_skew_get(size_t bytes, void *priv)
448{ 429{
449 krndsource_t *skewsrcp = priv; 430 krndsource_t *skewsrcp = priv;
450 431
451 KASSERT(skewsrcp == &rnd_skew.source); 432 KASSERT(skewsrcp == &rnd_skew.source);
452 if (RND_ENABLED(skewsrcp)) { 433 if (RND_ENABLED(skewsrcp)) {
453 /* Measure for 30s */ 434 /* Measure for 30s */
454 callout_schedule(&rnd_skew.stop_callout, hz * 30); 435 callout_schedule(&rnd_skew.stop_callout, hz * 30);
455 callout_schedule(&rnd_skew.callout, 1); 436 callout_schedule(&rnd_skew.callout, 1);
456 } 437 }
457} 438}
458 439
459static void 440static void
460rnd_skew_intr(void *arg) 441rnd_skew_intr(void *arg)
461{ 442{
462 static int flipflop; 443 static int flipflop;
463 444
464 /* 445 /*
465 * Even on systems with seemingly stable clocks, the 446 * Even on systems with seemingly stable clocks, the
466 * delta-time entropy estimator seems to think we get 1 bit here 447 * delta-time entropy estimator seems to think we get 1 bit here
467 * about every 2 calls. 448 * about every 2 calls.
468 * 449 *
469 */ 450 */
470 mutex_spin_enter(&rnd_skew.lock); 451 mutex_spin_enter(&rnd_skew.lock);
471 flipflop = !flipflop; 452 flipflop = !flipflop;
472 453
473 if (RND_ENABLED(&rnd_skew.source)) { 454 if (RND_ENABLED(&rnd_skew.source)) {
474 if (flipflop) { 455 if (flipflop) {
475 rnd_add_uint32(&rnd_skew.source, rnd_counter()); 456 rnd_add_uint32(&rnd_skew.source, rnd_counter());
476 callout_schedule(&rnd_skew.callout, hz / 10); 457 callout_schedule(&rnd_skew.callout, hz / 10);
477 } else { 458 } else {
478 callout_schedule(&rnd_skew.callout, 1); 459 callout_schedule(&rnd_skew.callout, 1);
479 } 460 }
480 } 461 }
481 mutex_spin_exit(&rnd_skew.lock); 462 mutex_spin_exit(&rnd_skew.lock);
482} 463}
483#endif 464#endif
484 465
485/* 466/*
 467 * Entropy was just added to the pool. If we crossed the threshold for
 468 * the first time, set rnd_initial_entropy = 1.
 469 */
 470static void
 471rnd_entropy_added(void)
 472{
 473 uint32_t pool_entropy;
 474
 475 KASSERT(mutex_owned(&rnd_global.lock));
 476
 477 if (__predict_true(rnd_initial_entropy))
 478 return;
 479 pool_entropy = rndpool_get_entropy_count(&rnd_global.pool);
 480 if (pool_entropy > RND_ENTROPY_THRESHOLD * NBBY) {
 481 rnd_printf_verbose("rnd: have initial entropy (%zu)\n",
 482 pool_entropy);
 483 rnd_initial_entropy = 1;
 484 }
 485}
 486
 487/*
486 * initialize the global random pool for our use. 488 * initialize the global random pool for our use.
487 * rnd_init() must be called very early on in the boot process, so 489 * rnd_init() must be called very early on in the boot process, so
488 * the pool is ready for other devices to attach as sources. 490 * the pool is ready for other devices to attach as sources.
489 */ 491 */
490void 492void
491rnd_init(void) 493rnd_init(void)
492{ 494{
493 uint32_t c; 495 uint32_t c;
494 496
495 if (rnd_ready) 497 if (rnd_ready)
496 return; 498 return;
497 499
498 /* 500 /*
499 * take a counter early, hoping that there's some variance in 501 * take a counter early, hoping that there's some variance in
500 * the following operations 502 * the following operations
501 */ 503 */
502 c = rnd_counter(); 504 c = rnd_counter();
503 505
504 rndsinks_init(); 506 rndsinks_init();
505 507
506 /* Initialize the sample queue. */ 508 /* Initialize the sample queue. */
507 mutex_init(&rnd_samples.lock, MUTEX_DEFAULT, IPL_VM); 509 mutex_init(&rnd_samples.lock, MUTEX_DEFAULT, IPL_VM);
508 SIMPLEQ_INIT(&rnd_samples.q); 510 SIMPLEQ_INIT(&rnd_samples.q);
509 511
510 /* Initialize the global pool and sources list. */ 512 /* Initialize the global pool and sources list. */
511 mutex_init(&rnd_global.lock, MUTEX_DEFAULT, IPL_VM); 513 mutex_init(&rnd_global.lock, MUTEX_DEFAULT, IPL_VM);
512 rndpool_init(&rnd_global.pool); 514 rndpool_init(&rnd_global.pool);
513 LIST_INIT(&rnd_global.sources); 515 LIST_INIT(&rnd_global.sources);
514 516
515 rnd_mempc = pool_cache_init(sizeof(rnd_sample_t), 0, 0, 0, 517 rnd_mempc = pool_cache_init(sizeof(rnd_sample_t), 0, 0, 0,
516 "rndsample", NULL, IPL_VM, 518 "rndsample", NULL, IPL_VM,
517 NULL, NULL, NULL); 519 NULL, NULL, NULL);
518 520
519 /* 521 /*
520 * Set resource limit. The rnd_process_events() function 522 * Set resource limit. The rnd_process_events() function
521 * is called every tick and process the sample queue. 523 * is called every tick and process the sample queue.
522 * Without limitation, if a lot of rnd_add_*() are called, 524 * Without limitation, if a lot of rnd_add_*() are called,
523 * all kernel memory may be eaten up. 525 * all kernel memory may be eaten up.
524 */ 526 */
525 pool_cache_sethardlimit(rnd_mempc, RND_POOLBITS, NULL, 0); 527 pool_cache_sethardlimit(rnd_mempc, RND_POOLBITS, NULL, 0);
526 528
527 /* 529 /*
528 * Mix *something*, *anything* into the pool to help it get started. 530 * Mix *something*, *anything* into the pool to help it get started.
529 * However, it's not safe for rnd_counter() to call microtime() yet, 531 * However, it's not safe for rnd_counter() to call microtime() yet,
530 * so on some platforms we might just end up with zeros anyway. 532 * so on some platforms we might just end up with zeros anyway.
531 * XXX more things to add would be nice. 533 * XXX more things to add would be nice.
532 */ 534 */
533 if (c) { 535 if (c) {
534 mutex_spin_enter(&rnd_global.lock); 536 mutex_spin_enter(&rnd_global.lock);
535 rndpool_add_data(&rnd_global.pool, &c, sizeof(c), 1); 537 rndpool_add_data(&rnd_global.pool, &c, sizeof(c), 1);
536 c = rnd_counter(); 538 c = rnd_counter();
537 rndpool_add_data(&rnd_global.pool, &c, sizeof(c), 1); 539 rndpool_add_data(&rnd_global.pool, &c, sizeof(c), 1);
538 mutex_spin_exit(&rnd_global.lock); 540 mutex_spin_exit(&rnd_global.lock);
539 } 541 }
540 542
541 /* 543 /*
542 * If we have a cycle counter, take its error with respect 544 * If we have a cycle counter, take its error with respect
543 * to the callout mechanism as a source of entropy, ala 545 * to the callout mechanism as a source of entropy, ala
544 * TrueRand. 546 * TrueRand.
545 * 547 *
546 */ 548 */
547#if defined(__HAVE_CPU_COUNTER) 549#if defined(__HAVE_CPU_COUNTER)
548 /* IPL_VM because taken while rnd_global.lock is held. */ 550 /* IPL_VM because taken while rnd_global.lock is held. */
549 mutex_init(&rnd_skew.lock, MUTEX_DEFAULT, IPL_VM); 551 mutex_init(&rnd_skew.lock, MUTEX_DEFAULT, IPL_VM);
550 callout_init(&rnd_skew.callout, CALLOUT_MPSAFE); 552 callout_init(&rnd_skew.callout, CALLOUT_MPSAFE);
551 callout_init(&rnd_skew.stop_callout, CALLOUT_MPSAFE); 553 callout_init(&rnd_skew.stop_callout, CALLOUT_MPSAFE);
552 callout_setfunc(&rnd_skew.callout, rnd_skew_intr, NULL); 554 callout_setfunc(&rnd_skew.callout, rnd_skew_intr, NULL);
553 callout_setfunc(&rnd_skew.stop_callout, rnd_skew_stop_intr, NULL); 555 callout_setfunc(&rnd_skew.stop_callout, rnd_skew_stop_intr, NULL);
554 rndsource_setcb(&rnd_skew.source, rnd_skew_get, &rnd_skew.source); 556 rndsource_setcb(&rnd_skew.source, rnd_skew_get, &rnd_skew.source);
555 rndsource_setenable(&rnd_skew.source, rnd_skew_enable); 557 rndsource_setenable(&rnd_skew.source, rnd_skew_enable);
556 rnd_attach_source(&rnd_skew.source, "callout", RND_TYPE_SKEW, 558 rnd_attach_source(&rnd_skew.source, "callout", RND_TYPE_SKEW,
557 RND_FLAG_COLLECT_VALUE|RND_FLAG_ESTIMATE_VALUE| 559 RND_FLAG_COLLECT_VALUE|RND_FLAG_ESTIMATE_VALUE|
558 RND_FLAG_HASCB|RND_FLAG_HASENABLE); 560 RND_FLAG_HASCB|RND_FLAG_HASENABLE);
559 rnd_skew_intr(NULL); 561 rnd_skew_intr(NULL);
560#endif 562#endif
561 563
562 rnd_printf_verbose("rnd: initialised (%u)%s", RND_POOLBITS, 564 rnd_printf_verbose("rnd: initialised (%u)%s", RND_POOLBITS,
563 c ? " with counter\n" : "\n"); 565 c ? " with counter\n" : "\n");
564 if (boot_rsp != NULL) { 566 if (boot_rsp != NULL) {
565 mutex_spin_enter(&rnd_global.lock); 567 mutex_spin_enter(&rnd_global.lock);
566 rndpool_add_data(&rnd_global.pool, boot_rsp->data, 568 rndpool_add_data(&rnd_global.pool, boot_rsp->data,
567 sizeof(boot_rsp->data), 569 sizeof(boot_rsp->data),
568 MIN(boot_rsp->entropy, RND_POOLBITS / 2)); 570 MIN(boot_rsp->entropy, RND_POOLBITS / 2));
569 if (rndpool_get_entropy_count(&rnd_global.pool) > 571 rnd_entropy_added();
570 RND_ENTROPY_THRESHOLD * 8) { 572 mutex_spin_exit(&rnd_global.lock);
571 rnd_initial_entropy = 1; 
572 } 
573 mutex_spin_exit(&rnd_global.lock); 
574 rnd_printf("rnd: seeded with %d bits\n", 573 rnd_printf("rnd: seeded with %d bits\n",
575 MIN(boot_rsp->entropy, RND_POOLBITS / 2)); 574 MIN(boot_rsp->entropy, RND_POOLBITS / 2));
576 memset(boot_rsp, 0, sizeof(*boot_rsp)); 575 memset(boot_rsp, 0, sizeof(*boot_rsp));
577 } 576 }
578 rnd_attach_source(&rnd_printf_source, "printf", RND_TYPE_UNKNOWN, 577 rnd_attach_source(&rnd_printf_source, "printf", RND_TYPE_UNKNOWN,
579 RND_FLAG_NO_ESTIMATE); 578 RND_FLAG_NO_ESTIMATE);
580 rnd_attach_source(&rnd_autoconf_source, "autoconf", 579 rnd_attach_source(&rnd_autoconf_source, "autoconf",
581 RND_TYPE_UNKNOWN, 580 RND_TYPE_UNKNOWN,
582 RND_FLAG_COLLECT_TIME|RND_FLAG_ESTIMATE_TIME); 581 RND_FLAG_COLLECT_TIME|RND_FLAG_ESTIMATE_TIME);
583 rnd_ready = 1; 582 rnd_ready = 1;
584} 583}
585 584
586static rnd_sample_t * 585static rnd_sample_t *
587rnd_sample_allocate(krndsource_t *source) 586rnd_sample_allocate(krndsource_t *source)
588{ 587{
589 rnd_sample_t *c; 588 rnd_sample_t *c;
590 589
591 c = pool_cache_get(rnd_mempc, PR_WAITOK); 590 c = pool_cache_get(rnd_mempc, PR_WAITOK);
592 if (c == NULL) 591 if (c == NULL)
593 return (NULL); 592 return (NULL);
594 593
595 c->source = source; 594 c->source = source;
596 c->cursor = 0; 595 c->cursor = 0;
597 c->entropy = 0; 596 c->entropy = 0;
598 597
599 return (c); 598 return (c);
600} 599}
601 600
602/* 601/*
603 * Don't wait on allocation. To be used in an interrupt context. 602 * Don't wait on allocation. To be used in an interrupt context.
604 */ 603 */
605static rnd_sample_t * 604static rnd_sample_t *
606rnd_sample_allocate_isr(krndsource_t *source) 605rnd_sample_allocate_isr(krndsource_t *source)
607{ 606{
608 rnd_sample_t *c; 607 rnd_sample_t *c;
609 608
610 c = pool_cache_get(rnd_mempc, PR_NOWAIT); 609 c = pool_cache_get(rnd_mempc, PR_NOWAIT);
611 if (c == NULL) 610 if (c == NULL)
612 return (NULL); 611 return (NULL);
613 612
614 c->source = source; 613 c->source = source;
615 c->cursor = 0; 614 c->cursor = 0;
616 c->entropy = 0; 615 c->entropy = 0;
617 616
618 return (c); 617 return (c);
619} 618}
620 619
621static void 620static void
622rnd_sample_free(rnd_sample_t *c) 621rnd_sample_free(rnd_sample_t *c)
623{ 622{
624 memset(c, 0, sizeof(*c)); 623 memset(c, 0, sizeof(*c));
625 pool_cache_put(rnd_mempc, c); 624 pool_cache_put(rnd_mempc, c);
626} 625}
627 626
628/* 627/*
629 * Add a source to our list of sources. 628 * Add a source to our list of sources.
630 */ 629 */
631void 630void
632rnd_attach_source(krndsource_t *rs, const char *name, uint32_t type, 631rnd_attach_source(krndsource_t *rs, const char *name, uint32_t type,
633 uint32_t flags) 632 uint32_t flags)
634{ 633{
635 uint32_t ts; 634 uint32_t ts;
636 635
637 ts = rnd_counter(); 636 ts = rnd_counter();
638 637
639 strlcpy(rs->name, name, sizeof(rs->name)); 638 strlcpy(rs->name, name, sizeof(rs->name));
640 memset(&rs->time_delta, 0, sizeof(rs->time_delta)); 639 memset(&rs->time_delta, 0, sizeof(rs->time_delta));
641 rs->time_delta.x = ts; 640 rs->time_delta.x = ts;
642 memset(&rs->value_delta, 0, sizeof(rs->value_delta)); 641 memset(&rs->value_delta, 0, sizeof(rs->value_delta));
643 rs->total = 0; 642 rs->total = 0;
644 643
645 /* 644 /*
646 * Some source setup, by type 645 * Some source setup, by type
647 */ 646 */
648 rs->test = NULL; 647 rs->test = NULL;
649 rs->test_cnt = -1; 648 rs->test_cnt = -1;
650 649
651 if (flags == 0) { 650 if (flags == 0) {
652 flags = RND_FLAG_DEFAULT; 651 flags = RND_FLAG_DEFAULT;
653 } 652 }
654 653
655 switch (type) { 654 switch (type) {
656 case RND_TYPE_NET: /* Don't collect by default */ 655 case RND_TYPE_NET: /* Don't collect by default */
657 flags |= (RND_FLAG_NO_COLLECT | RND_FLAG_NO_ESTIMATE); 656 flags |= (RND_FLAG_NO_COLLECT | RND_FLAG_NO_ESTIMATE);
658 break; 657 break;
659 case RND_TYPE_RNG: /* Space for statistical testing */ 658 case RND_TYPE_RNG: /* Space for statistical testing */
660 rs->test = kmem_alloc(sizeof(rngtest_t), KM_NOSLEEP); 659 rs->test = kmem_alloc(sizeof(rngtest_t), KM_NOSLEEP);
661 rs->test_cnt = 0; 660 rs->test_cnt = 0;
662 /* FALLTHRU */ 661 /* FALLTHRU */
663 case RND_TYPE_VM: /* Process samples in bulk always */ 662 case RND_TYPE_VM: /* Process samples in bulk always */
664 flags |= RND_FLAG_FAST; 663 flags |= RND_FLAG_FAST;
665 break; 664 break;
666 default: 665 default:
667 break; 666 break;
668 } 667 }
669 668
670 rs->type = type; 669 rs->type = type;
671 rs->flags = flags; 670 rs->flags = flags;
672 671
673 rs->state = rnd_sample_allocate(rs); 672 rs->state = rnd_sample_allocate(rs);
674 673
675 mutex_spin_enter(&rnd_global.lock); 674 mutex_spin_enter(&rnd_global.lock);
676 LIST_INSERT_HEAD(&rnd_global.sources, rs, list); 675 LIST_INSERT_HEAD(&rnd_global.sources, rs, list);
677 676
678#ifdef RND_VERBOSE 677#ifdef RND_VERBOSE
679 rnd_printf_verbose("rnd: %s attached as an entropy source (", 678 rnd_printf_verbose("rnd: %s attached as an entropy source (",
680 rs->name); 679 rs->name);
681 if (!(flags & RND_FLAG_NO_COLLECT)) { 680 if (!(flags & RND_FLAG_NO_COLLECT)) {
682 rnd_printf_verbose("collecting"); 681 rnd_printf_verbose("collecting");
683 if (flags & RND_FLAG_NO_ESTIMATE) 682 if (flags & RND_FLAG_NO_ESTIMATE)
684 rnd_printf_verbose(" without estimation"); 683 rnd_printf_verbose(" without estimation");
685 } 684 }
686 else 685 else
687 rnd_printf_verbose("off"); 686 rnd_printf_verbose("off");
688 rnd_printf_verbose(")\n"); 687 rnd_printf_verbose(")\n");
689#endif 688#endif
690 689
691 /* 690 /*
692 * Again, put some more initial junk in the pool. 691 * Again, put some more initial junk in the pool.
693 * FreeBSD claim to have an analysis that show 4 bits of 692 * FreeBSD claim to have an analysis that show 4 bits of
694 * entropy per source-attach timestamp. I am skeptical, 693 * entropy per source-attach timestamp. I am skeptical,
695 * but we count 1 bit per source here. 694 * but we count 1 bit per source here.
696 */ 695 */
697 rndpool_add_data(&rnd_global.pool, &ts, sizeof(ts), 1); 696 rndpool_add_data(&rnd_global.pool, &ts, sizeof(ts), 1);
698 mutex_spin_exit(&rnd_global.lock); 697 mutex_spin_exit(&rnd_global.lock);
699} 698}
700 699
701/* 700/*
702 * Remove a source from our list of sources. 701 * Remove a source from our list of sources.
703 */ 702 */
704void 703void
705rnd_detach_source(krndsource_t *source) 704rnd_detach_source(krndsource_t *source)
706{ 705{
707 rnd_sample_t *sample; 706 rnd_sample_t *sample;
708 707
709 mutex_spin_enter(&rnd_global.lock); 708 mutex_spin_enter(&rnd_global.lock);
710 LIST_REMOVE(source, list); 709 LIST_REMOVE(source, list);
711 mutex_spin_exit(&rnd_global.lock); 710 mutex_spin_exit(&rnd_global.lock);
712 711
713 /* 712 /*
714 * If there are samples queued up "remove" them from the sample queue 713 * If there are samples queued up "remove" them from the sample queue
715 * by setting the source to the no-collect pseudosource. 714 * by setting the source to the no-collect pseudosource.
716 */ 715 */
717 mutex_spin_enter(&rnd_samples.lock); 716 mutex_spin_enter(&rnd_samples.lock);
718 sample = SIMPLEQ_FIRST(&rnd_samples.q); 717 sample = SIMPLEQ_FIRST(&rnd_samples.q);
719 while (sample != NULL) { 718 while (sample != NULL) {
720 if (sample->source == source) 719 if (sample->source == source)
721 sample->source = &rnd_source_no_collect; 720 sample->source = &rnd_source_no_collect;
722 721
723 sample = SIMPLEQ_NEXT(sample, next); 722 sample = SIMPLEQ_NEXT(sample, next);
724 } 723 }
725 mutex_spin_exit(&rnd_samples.lock); 724 mutex_spin_exit(&rnd_samples.lock);
726 725
727 if (source->state) { 726 if (source->state) {
728 rnd_sample_free(source->state); 727 rnd_sample_free(source->state);
729 source->state = NULL; 728 source->state = NULL;
730 } 729 }
731 730
732 if (source->test) { 731 if (source->test) {
733 kmem_free(source->test, sizeof(rngtest_t)); 732 kmem_free(source->test, sizeof(rngtest_t));
734 } 733 }
735 734
736 rnd_printf_verbose("rnd: %s detached as an entropy source\n", 735 rnd_printf_verbose("rnd: %s detached as an entropy source\n",
737 source->name); 736 source->name);
738} 737}
739 738
740static inline uint32_t 739static inline uint32_t
741rnd_estimate(krndsource_t *rs, uint32_t ts, uint32_t val) 740rnd_estimate(krndsource_t *rs, uint32_t ts, uint32_t val)
742{ 741{
743 uint32_t entropy = 0, dt_est, dv_est; 742 uint32_t entropy = 0, dt_est, dv_est;
744 743
745 dt_est = rnd_dt_estimate(rs, ts); 744 dt_est = rnd_dt_estimate(rs, ts);
746 dv_est = rnd_dv_estimate(rs, val); 745 dv_est = rnd_dv_estimate(rs, val);
747 746
748 if (!(rs->flags & RND_FLAG_NO_ESTIMATE)) { 747 if (!(rs->flags & RND_FLAG_NO_ESTIMATE)) {
749 if (rs->flags & RND_FLAG_ESTIMATE_TIME) { 748 if (rs->flags & RND_FLAG_ESTIMATE_TIME) {
750 entropy += dt_est; 749 entropy += dt_est;
751 } 750 }
752 751
753 if (rs->flags & RND_FLAG_ESTIMATE_VALUE) { 752 if (rs->flags & RND_FLAG_ESTIMATE_VALUE) {
754 entropy += dv_est; 753 entropy += dv_est;
755 } 754 }
756 755
757 } 756 }
758 return entropy; 757 return entropy;
759} 758}
760 759
761/* 760/*
762 * Add a 32-bit value to the entropy pool. The rs parameter should point to 761 * Add a 32-bit value to the entropy pool. The rs parameter should point to
763 * the source-specific source structure. 762 * the source-specific source structure.
764 */ 763 */
765void 764void
766_rnd_add_uint32(krndsource_t *rs, uint32_t val) 765_rnd_add_uint32(krndsource_t *rs, uint32_t val)
767{ 766{
768 uint32_t ts;  767 uint32_t ts;
769 uint32_t entropy = 0; 768 uint32_t entropy = 0;
770 769
771 if (rs->flags & RND_FLAG_NO_COLLECT) 770 if (rs->flags & RND_FLAG_NO_COLLECT)
772 return; 771 return;
773 772
774 /* 773 /*
775 * Sample the counter as soon as possible to avoid 774 * Sample the counter as soon as possible to avoid
776 * entropy overestimation. 775 * entropy overestimation.
777 */ 776 */
778 ts = rnd_counter(); 777 ts = rnd_counter();
779 778
780 /* 779 /*
781 * Calculate estimates - we may not use them, but if we do 780 * Calculate estimates - we may not use them, but if we do
782 * not calculate them, the estimators' history becomes invalid. 781 * not calculate them, the estimators' history becomes invalid.
783 */ 782 */
784 entropy = rnd_estimate(rs, ts, val); 783 entropy = rnd_estimate(rs, ts, val);
785 784
786 rnd_add_data_ts(rs, &val, sizeof(val), entropy, ts); 785 rnd_add_data_ts(rs, &val, sizeof(val), entropy, ts);
787} 786}
788 787
789void 788void
790_rnd_add_uint64(krndsource_t *rs, uint64_t val) 789_rnd_add_uint64(krndsource_t *rs, uint64_t val)
791{ 790{
792 uint32_t ts;  791 uint32_t ts;
793 uint32_t entropy = 0; 792 uint32_t entropy = 0;
794 793
795 if (rs->flags & RND_FLAG_NO_COLLECT) 794 if (rs->flags & RND_FLAG_NO_COLLECT)
796 return; 795 return;
797 796
798 /* 797 /*
799 * Sample the counter as soon as possible to avoid 798 * Sample the counter as soon as possible to avoid
800 * entropy overestimation. 799 * entropy overestimation.
801 */ 800 */
802 ts = rnd_counter(); 801 ts = rnd_counter();
803 802
804 /* 803 /*
805 * Calculate estimates - we may not use them, but if we do 804 * Calculate estimates - we may not use them, but if we do
806 * not calculate them, the estimators' history becomes invalid. 805 * not calculate them, the estimators' history becomes invalid.
807 */ 806 */
808 entropy = rnd_estimate(rs, ts, (uint32_t)(val & (uint64_t)0xffffffff)); 807 entropy = rnd_estimate(rs, ts, (uint32_t)(val & (uint64_t)0xffffffff));
809 808
810 rnd_add_data_ts(rs, &val, sizeof(val), entropy, ts); 809 rnd_add_data_ts(rs, &val, sizeof(val), entropy, ts);
811} 810}
812 811
813void 812void
814rnd_add_data(krndsource_t *rs, const void *const data, uint32_t len, 813rnd_add_data(krndsource_t *rs, const void *const data, uint32_t len,
815 uint32_t entropy) 814 uint32_t entropy)
816{ 815{
817 /* 816 /*
818 * This interface is meant for feeding data which is, 817 * This interface is meant for feeding data which is,
819 * itself, random. Don't estimate entropy based on 818 * itself, random. Don't estimate entropy based on
820 * timestamp, just directly add the data. 819 * timestamp, just directly add the data.
821 */ 820 */
822 if (__predict_false(rs == NULL)) { 821 if (__predict_false(rs == NULL)) {
823 mutex_spin_enter(&rnd_global.lock); 822 mutex_spin_enter(&rnd_global.lock);
824 rndpool_add_data(&rnd_global.pool, data, len, entropy); 823 rndpool_add_data(&rnd_global.pool, data, len, entropy);
825 mutex_spin_exit(&rnd_global.lock); 824 mutex_spin_exit(&rnd_global.lock);
826 } else { 825 } else {
827 rnd_add_data_ts(rs, data, len, entropy, rnd_counter()); 826 rnd_add_data_ts(rs, data, len, entropy, rnd_counter());
828 } 827 }
829} 828}
830 829
831static void 830static void
832rnd_add_data_ts(krndsource_t *rs, const void *const data, uint32_t len, 831rnd_add_data_ts(krndsource_t *rs, const void *const data, uint32_t len,
833 uint32_t entropy, uint32_t ts) 832 uint32_t entropy, uint32_t ts)
834{ 833{
835 rnd_sample_t *state = NULL; 834 rnd_sample_t *state = NULL;
836 const uint8_t *p = data; 835 const uint8_t *p = data;
837 uint32_t dint; 836 uint32_t dint;
838 int todo, done, filled = 0; 837 int todo, done, filled = 0;
839 int sample_count; 838 int sample_count;
840 struct rnd_sampleq tmp_samples = SIMPLEQ_HEAD_INITIALIZER(tmp_samples); 839 struct rnd_sampleq tmp_samples = SIMPLEQ_HEAD_INITIALIZER(tmp_samples);
841 840
842 if (rs && (rs->flags & RND_FLAG_NO_COLLECT || 841 if (rs && (rs->flags & RND_FLAG_NO_COLLECT ||
843 __predict_false(!(rs->flags &  842 __predict_false(!(rs->flags &
844 (RND_FLAG_COLLECT_TIME| 843 (RND_FLAG_COLLECT_TIME|
845 RND_FLAG_COLLECT_VALUE))))) { 844 RND_FLAG_COLLECT_VALUE))))) {
846 return; 845 return;
847 } 846 }
848 todo = len / sizeof(dint); 847 todo = len / sizeof(dint);
849 /* 848 /*
850 * Let's try to be efficient: if we are warm, and a source 849 * Let's try to be efficient: if we are warm, and a source
851 * is adding entropy at a rate of at least 1 bit every 10 seconds, 850 * is adding entropy at a rate of at least 1 bit every 10 seconds,
852 * mark it as "fast" and add its samples in bulk. 851 * mark it as "fast" and add its samples in bulk.
853 */ 852 */
854 if (__predict_true(rs->flags & RND_FLAG_FAST) || 853 if (__predict_true(rs->flags & RND_FLAG_FAST) ||
855 (todo >= RND_SAMPLE_COUNT)) { 854 (todo >= RND_SAMPLE_COUNT)) {
856 sample_count = RND_SAMPLE_COUNT; 855 sample_count = RND_SAMPLE_COUNT;
857 } else { 856 } else {
858 if (!(rs->flags & RND_FLAG_HASCB) && 857 if (!(rs->flags & RND_FLAG_HASCB) &&
859 !cold && rnd_initial_entropy) { 858 !cold && rnd_initial_entropy) {
860 struct timeval upt; 859 struct timeval upt;
861 860
862 getmicrouptime(&upt); 861 getmicrouptime(&upt);
863 if ( (upt.tv_sec > 0 && rs->total > upt.tv_sec * 10) || 862 if ( (upt.tv_sec > 0 && rs->total > upt.tv_sec * 10) ||
864 (upt.tv_sec > 10 && rs->total > upt.tv_sec) || 863 (upt.tv_sec > 10 && rs->total > upt.tv_sec) ||
865 (upt.tv_sec > 100 && 864 (upt.tv_sec > 100 &&
866 rs->total > upt.tv_sec / 10)) { 865 rs->total > upt.tv_sec / 10)) {
867 rnd_printf_verbose("rnd: source %s is fast" 866 rnd_printf_verbose("rnd: source %s is fast"
868 " (%d samples at once," 867 " (%d samples at once,"
869 " %d bits in %lld seconds), " 868 " %d bits in %lld seconds), "
870 "processing samples in bulk.\n", 869 "processing samples in bulk.\n",
871 rs->name, todo, rs->total, 870 rs->name, todo, rs->total,
872 (long long int)upt.tv_sec); 871 (long long int)upt.tv_sec);
873 rs->flags |= RND_FLAG_FAST; 872 rs->flags |= RND_FLAG_FAST;
874 } 873 }
875 } 874 }
876 sample_count = 2; 875 sample_count = 2;
877 } 876 }
878 877
879 /* 878 /*
880 * Loop over data packaging it into sample buffers. 879 * Loop over data packaging it into sample buffers.
881 * If a sample buffer allocation fails, drop all data. 880 * If a sample buffer allocation fails, drop all data.
882 */ 881 */
883 for (done = 0; done < todo ; done++) { 882 for (done = 0; done < todo ; done++) {
884 state = rs->state; 883 state = rs->state;
885 if (state == NULL) { 884 if (state == NULL) {
886 state = rnd_sample_allocate_isr(rs); 885 state = rnd_sample_allocate_isr(rs);
887 if (__predict_false(state == NULL)) { 886 if (__predict_false(state == NULL)) {
888 break; 887 break;
889 } 888 }
890 rs->state = state; 889 rs->state = state;
891 } 890 }
892 891
893 state->ts[state->cursor] = ts; 892 state->ts[state->cursor] = ts;
894 (void)memcpy(&dint, &p[done*4], 4); 893 (void)memcpy(&dint, &p[done*4], 4);
895 state->values[state->cursor] = dint; 894 state->values[state->cursor] = dint;
896 state->cursor++; 895 state->cursor++;
897 896
898 if (state->cursor == sample_count) { 897 if (state->cursor == sample_count) {
899 SIMPLEQ_INSERT_HEAD(&tmp_samples, state, next); 898 SIMPLEQ_INSERT_HEAD(&tmp_samples, state, next);
900 filled++; 899 filled++;
901 rs->state = NULL; 900 rs->state = NULL;
902 } 901 }
903 } 902 }
904 903
905 if (__predict_false(state == NULL)) { 904 if (__predict_false(state == NULL)) {
906 while ((state = SIMPLEQ_FIRST(&tmp_samples))) { 905 while ((state = SIMPLEQ_FIRST(&tmp_samples))) {
907 SIMPLEQ_REMOVE_HEAD(&tmp_samples, next); 906 SIMPLEQ_REMOVE_HEAD(&tmp_samples, next);
908 rnd_sample_free(state); 907 rnd_sample_free(state);
909 } 908 }
910 return; 909 return;
911 } 910 }
912 911
913 /* 912 /*
914 * Claim all the entropy on the last one we send to 913 * Claim all the entropy on the last one we send to
915 * the pool, so we don't rely on it being evenly distributed 914 * the pool, so we don't rely on it being evenly distributed
916 * in the supplied data. 915 * in the supplied data.
917 * 916 *
918 * XXX The rndpool code must accept samples with more 917 * XXX The rndpool code must accept samples with more
919 * XXX claimed entropy than bits for this to work right. 918 * XXX claimed entropy than bits for this to work right.
920 */ 919 */
921 state->entropy += entropy; 920 state->entropy += entropy;
922 rs->total += entropy; 921 rs->total += entropy;
923 922
924 /* 923 /*
925 * If we didn't finish any sample buffers, we're done. 924 * If we didn't finish any sample buffers, we're done.
926 */ 925 */
927 if (!filled) { 926 if (!filled) {
928 return; 927 return;
929 } 928 }
930 929
931 mutex_spin_enter(&rnd_samples.lock); 930 mutex_spin_enter(&rnd_samples.lock);
932 while ((state = SIMPLEQ_FIRST(&tmp_samples))) { 931 while ((state = SIMPLEQ_FIRST(&tmp_samples))) {
933 SIMPLEQ_REMOVE_HEAD(&tmp_samples, next); 932 SIMPLEQ_REMOVE_HEAD(&tmp_samples, next);
934 SIMPLEQ_INSERT_HEAD(&rnd_samples.q, state, next); 933 SIMPLEQ_INSERT_HEAD(&rnd_samples.q, state, next);
935 } 934 }
936 mutex_spin_exit(&rnd_samples.lock); 935 mutex_spin_exit(&rnd_samples.lock);
937 936
938 /* Cause processing of queued samples */ 937 /* Cause processing of queued samples */
939 rnd_schedule_process(); 938 rnd_schedule_process();
940} 939}
941 940
942static int 941static int
943rnd_hwrng_test(rnd_sample_t *sample) 942rnd_hwrng_test(rnd_sample_t *sample)
944{ 943{
945 krndsource_t *source = sample->source; 944 krndsource_t *source = sample->source;
946 size_t cmplen; 945 size_t cmplen;
947 uint8_t *v1, *v2; 946 uint8_t *v1, *v2;
948 size_t resid, totest; 947 size_t resid, totest;
949 948
950 KASSERT(source->type == RND_TYPE_RNG); 949 KASSERT(source->type == RND_TYPE_RNG);
951 950
952 /* 951 /*
953 * Continuous-output test: compare two halves of the 952 * Continuous-output test: compare two halves of the
954 * sample buffer to each other. The sample buffer (64 ints, 953 * sample buffer to each other. The sample buffer (64 ints,
955 * so either 256 or 512 bytes on any modern machine) should be 954 * so either 256 or 512 bytes on any modern machine) should be
956 * much larger than a typical hardware RNG output, so this seems 955 * much larger than a typical hardware RNG output, so this seems
957 * a reasonable way to do it without retaining extra data. 956 * a reasonable way to do it without retaining extra data.
958 */ 957 */
959 cmplen = sizeof(sample->values) / 2; 958 cmplen = sizeof(sample->values) / 2;
960 v1 = (uint8_t *)sample->values; 959 v1 = (uint8_t *)sample->values;
961 v2 = (uint8_t *)sample->values + cmplen; 960 v2 = (uint8_t *)sample->values + cmplen;
962 961
963 if (__predict_false(!memcmp(v1, v2, cmplen))) { 962 if (__predict_false(!memcmp(v1, v2, cmplen))) {
964 rnd_printf("rnd: source \"%s\" failed continuous-output test.\n", 963 rnd_printf("rnd: source \"%s\" failed continuous-output test.\n",
965 source->name); 964 source->name);
966 return 1; 965 return 1;
967 } 966 }
968 967
969 /* 968 /*
970 * FIPS 140 statistical RNG test. We must accumulate 20,000 bits. 969 * FIPS 140 statistical RNG test. We must accumulate 20,000 bits.
971 */ 970 */
972 if (__predict_true(source->test_cnt == -1)) { 971 if (__predict_true(source->test_cnt == -1)) {
973 /* already passed the test */ 972 /* already passed the test */
974 return 0; 973 return 0;
975 } 974 }
976 resid = FIPS140_RNG_TEST_BYTES - source->test_cnt; 975 resid = FIPS140_RNG_TEST_BYTES - source->test_cnt;
977 totest = MIN(RND_SAMPLE_COUNT * 4, resid); 976 totest = MIN(RND_SAMPLE_COUNT * 4, resid);
978 memcpy(source->test->rt_b + source->test_cnt, sample->values, totest); 977 memcpy(source->test->rt_b + source->test_cnt, sample->values, totest);
979 resid -= totest; 978 resid -= totest;
980 source->test_cnt += totest; 979 source->test_cnt += totest;
981 if (resid == 0) { 980 if (resid == 0) {
982 strlcpy(source->test->rt_name, source->name, 981 strlcpy(source->test->rt_name, source->name,
983 sizeof(source->test->rt_name)); 982 sizeof(source->test->rt_name));
984 if (rngtest(source->test)) { 983 if (rngtest(source->test)) {
985 rnd_printf("rnd: source \"%s\" failed statistical test.", 984 rnd_printf("rnd: source \"%s\" failed statistical test.",
986 source->name); 985 source->name);
987 return 1; 986 return 1;
988 } 987 }
989 source->test_cnt = -1; 988 source->test_cnt = -1;
990 memset(source->test, 0, sizeof(*source->test)); 989 memset(source->test, 0, sizeof(*source->test));
991 } 990 }
992 return 0; 991 return 0;
993} 992}
994 993
995/* 994/*
996 * Process the events in the ring buffer. Called by rnd_timeout or 995 * Process the events in the ring buffer. Called by rnd_timeout or
997 * by the add routines directly if the callout has never fired (that 996 * by the add routines directly if the callout has never fired (that
998 * is, if we are "cold" -- just booted). 997 * is, if we are "cold" -- just booted).
999 * 998 *
1000 */ 999 */
1001static void 1000static void
1002rnd_process_events(void) 1001rnd_process_events(void)
1003{ 1002{
1004 rnd_sample_t *sample = NULL; 1003 rnd_sample_t *sample = NULL;
1005 krndsource_t *source; 1004 krndsource_t *source;
1006 static krndsource_t *last_source; 1005 static krndsource_t *last_source;
1007 uint32_t entropy; 1006 uint32_t entropy;
1008 size_t pool_entropy; 1007 size_t pool_entropy;
1009 int found = 0, wake = 0; 1008 int found = 0, wake = 0;
1010 struct rnd_sampleq dq_samples = SIMPLEQ_HEAD_INITIALIZER(dq_samples); 1009 struct rnd_sampleq dq_samples = SIMPLEQ_HEAD_INITIALIZER(dq_samples);
1011 struct rnd_sampleq df_samples = SIMPLEQ_HEAD_INITIALIZER(df_samples); 1010 struct rnd_sampleq df_samples = SIMPLEQ_HEAD_INITIALIZER(df_samples);
1012 1011
1013 /* 1012 /*
1014 * Drain to the on-stack queue and drop the lock. 1013 * Drain to the on-stack queue and drop the lock.
1015 */ 1014 */
1016 mutex_spin_enter(&rnd_samples.lock); 1015 mutex_spin_enter(&rnd_samples.lock);
1017 while ((sample = SIMPLEQ_FIRST(&rnd_samples.q))) { 1016 while ((sample = SIMPLEQ_FIRST(&rnd_samples.q))) {
1018 found++; 1017 found++;
1019 SIMPLEQ_REMOVE_HEAD(&rnd_samples.q, next); 1018 SIMPLEQ_REMOVE_HEAD(&rnd_samples.q, next);
1020 /* 1019 /*
1021 * We repeat this check here, since it is possible 1020 * We repeat this check here, since it is possible
1022 * the source was disabled before we were called, but 1021 * the source was disabled before we were called, but
1023 * after the entry was queued. 1022 * after the entry was queued.
1024 */ 1023 */
1025 if (__predict_false(!(sample->source->flags & 1024 if (__predict_false(!(sample->source->flags &
1026 (RND_FLAG_COLLECT_TIME| 1025 (RND_FLAG_COLLECT_TIME|
1027 RND_FLAG_COLLECT_VALUE)))) { 1026 RND_FLAG_COLLECT_VALUE)))) {
1028 SIMPLEQ_INSERT_TAIL(&df_samples, sample, next); 1027 SIMPLEQ_INSERT_TAIL(&df_samples, sample, next);
1029 } else { 1028 } else {
1030 SIMPLEQ_INSERT_TAIL(&dq_samples, sample, next); 1029 SIMPLEQ_INSERT_TAIL(&dq_samples, sample, next);
1031 } 1030 }
1032 } 1031 }
1033 mutex_spin_exit(&rnd_samples.lock); 1032 mutex_spin_exit(&rnd_samples.lock);
1034 1033
1035 /* Don't thrash the rndpool mtx either. Hold, add all samples. */ 1034 /* Don't thrash the rndpool mtx either. Hold, add all samples. */
1036 mutex_spin_enter(&rnd_global.lock); 1035 mutex_spin_enter(&rnd_global.lock);
1037 1036
1038 pool_entropy = rndpool_get_entropy_count(&rnd_global.pool); 1037 pool_entropy = rndpool_get_entropy_count(&rnd_global.pool);
1039 1038
1040 while ((sample = SIMPLEQ_FIRST(&dq_samples))) { 1039 while ((sample = SIMPLEQ_FIRST(&dq_samples))) {
1041 int sample_count; 1040 int sample_count;
1042 1041
1043 SIMPLEQ_REMOVE_HEAD(&dq_samples, next); 1042 SIMPLEQ_REMOVE_HEAD(&dq_samples, next);
1044 source = sample->source; 1043 source = sample->source;
1045 entropy = sample->entropy; 1044 entropy = sample->entropy;
1046 sample_count = sample->cursor; 1045 sample_count = sample->cursor;
1047 1046
1048 /* 1047 /*
1049 * Don't provide a side channel for timing attacks on 1048 * Don't provide a side channel for timing attacks on
1050 * low-rate sources: require mixing with some other 1049 * low-rate sources: require mixing with some other
1051 * source before we schedule a wakeup. 1050 * source before we schedule a wakeup.
1052 */ 1051 */
1053 if (!wake && 1052 if (!wake &&
1054 (source != last_source || source->flags & RND_FLAG_FAST)) { 1053 (source != last_source || source->flags & RND_FLAG_FAST)) {
1055 wake++; 1054 wake++;
1056 } 1055 }
1057 last_source = source; 1056 last_source = source;
1058 1057
1059 /* 1058 /*
1060 * If the source has been disabled, ignore samples from 1059 * If the source has been disabled, ignore samples from
1061 * it. 1060 * it.
1062 */ 1061 */
1063 if (source->flags & RND_FLAG_NO_COLLECT) 1062 if (source->flags & RND_FLAG_NO_COLLECT)
1064 goto skip; 1063 goto skip;
1065 1064
1066 /* 1065 /*
1067 * Hardware generators are great but sometimes they 1066 * Hardware generators are great but sometimes they
1068 * have...hardware issues. Don't use any data from 1067 * have...hardware issues. Don't use any data from
1069 * them unless it passes some tests. 1068 * them unless it passes some tests.
1070 */ 1069 */
1071 if (source->type == RND_TYPE_RNG) { 1070 if (source->type == RND_TYPE_RNG) {
1072 if (__predict_false(rnd_hwrng_test(sample))) { 1071 if (__predict_false(rnd_hwrng_test(sample))) {
1073 source->flags |= RND_FLAG_NO_COLLECT; 1072 source->flags |= RND_FLAG_NO_COLLECT;
1074 rnd_printf("rnd: disabling source \"%s\".", 1073 rnd_printf("rnd: disabling source \"%s\".",
1075 source->name); 1074 source->name);
1076 goto skip; 1075 goto skip;
1077 } 1076 }
1078 } 1077 }
1079 1078
1080 if (source->flags & RND_FLAG_COLLECT_VALUE) { 1079 if (source->flags & RND_FLAG_COLLECT_VALUE) {
1081 rndpool_add_data(&rnd_global.pool, sample->values, 1080 rndpool_add_data(&rnd_global.pool, sample->values,
1082 sample_count * 1081 sample_count *
1083 sizeof(sample->values[1]), 1082 sizeof(sample->values[1]),
1084 0); 1083 0);
1085 } 1084 }
1086 if (source->flags & RND_FLAG_COLLECT_TIME) { 1085 if (source->flags & RND_FLAG_COLLECT_TIME) {
1087 rndpool_add_data(&rnd_global.pool, sample->ts, 1086 rndpool_add_data(&rnd_global.pool, sample->ts,
1088 sample_count * 1087 sample_count *
1089 sizeof(sample->ts[1]), 1088 sizeof(sample->ts[1]),
1090 0); 1089 0);
1091 } 1090 }
1092 1091
1093 pool_entropy += entropy; 1092 pool_entropy += entropy;
1094 source->total += sample->entropy; 1093 source->total += sample->entropy;
1095skip: SIMPLEQ_INSERT_TAIL(&df_samples, sample, next); 1094skip: SIMPLEQ_INSERT_TAIL(&df_samples, sample, next);
1096 } 1095 }
1097 rndpool_set_entropy_count(&rnd_global.pool, pool_entropy); 1096 rndpool_set_entropy_count(&rnd_global.pool, pool_entropy);
 1097 rnd_entropy_added();
1098 mutex_spin_exit(&rnd_global.lock); 1098 mutex_spin_exit(&rnd_global.lock);
1099 1099
1100 /* 1100 /*
1101 * If we filled the pool past the threshold, wake anyone 1101 * If we filled the pool past the threshold, wake anyone
1102 * waiting for entropy. Otherwise, ask all the entropy sources 1102 * waiting for entropy. Otherwise, ask all the entropy sources
1103 * for more. 1103 * for more.
1104 */ 1104 */
1105 if (pool_entropy > RND_ENTROPY_THRESHOLD * 8) { 1105 if (pool_entropy > RND_ENTROPY_THRESHOLD * 8) {
1106 wake++; 1106 wake++;
1107 } else { 1107 } else {
1108 rnd_getmore(howmany((RND_POOLBITS - pool_entropy), NBBY)); 1108 rnd_getmore(howmany((RND_POOLBITS - pool_entropy), NBBY));
1109 rnd_printf_verbose("rnd: empty, asking for %d bytes\n", 1109 rnd_printf_verbose("rnd: empty, asking for %d bytes\n",
1110 (int)(howmany((RND_POOLBITS - pool_entropy), NBBY))); 1110 (int)(howmany((RND_POOLBITS - pool_entropy), NBBY)));
1111 } 1111 }
1112 1112
1113 /* Now we hold no locks: clean up. */ 1113 /* Now we hold no locks: clean up. */
1114 while ((sample = SIMPLEQ_FIRST(&df_samples))) { 1114 while ((sample = SIMPLEQ_FIRST(&df_samples))) {
1115 SIMPLEQ_REMOVE_HEAD(&df_samples, next); 1115 SIMPLEQ_REMOVE_HEAD(&df_samples, next);
1116 rnd_sample_free(sample); 1116 rnd_sample_free(sample);
1117 } 1117 }
1118 1118
1119 /* 1119 /*
1120 * Wake up any potential readers waiting. 1120 * Wake up any potential readers waiting.
1121 */ 1121 */
1122 if (wake) { 1122 if (wake) {
1123 rnd_schedule_wakeup(); 1123 rnd_schedule_wakeup();
1124 } 1124 }
1125} 1125}
1126 1126
1127static void 1127static void
1128rnd_intr(void *arg) 1128rnd_intr(void *arg)
1129{ 1129{
1130 rnd_process_events(); 1130 rnd_process_events();
1131} 1131}
1132 1132
1133static void 1133static void
1134rnd_wake(void *arg) 1134rnd_wake(void *arg)
1135{ 1135{
1136 rnd_wakeup_readers(); 1136 rnd_wakeup_readers();
1137} 1137}
1138 1138
1139static uint32_t 1139static uint32_t
1140rnd_extract_data(void *p, uint32_t len, uint32_t flags) 1140rnd_extract_data(void *p, uint32_t len, uint32_t flags)
1141{ 1141{
1142 static int timed_in; 1142 static int timed_in;
1143 int entropy_count; 1143 int entropy_count;
1144 uint32_t retval; 1144 uint32_t retval;
1145 1145
1146 mutex_spin_enter(&rnd_global.lock); 1146 mutex_spin_enter(&rnd_global.lock);
1147 if (__predict_false(!timed_in)) { 1147 if (__predict_false(!timed_in)) {
1148 if (boottime.tv_sec) { 1148 if (boottime.tv_sec) {
1149 rndpool_add_data(&rnd_global.pool, &boottime, 1149 rndpool_add_data(&rnd_global.pool, &boottime,
1150 sizeof(boottime), 0); 1150 sizeof(boottime), 0);
1151 } 1151 }
1152 timed_in++; 1152 timed_in++;
1153 } 1153 }
1154 if (__predict_false(!rnd_initial_entropy)) { 1154 if (__predict_false(!rnd_initial_entropy)) {
1155 uint32_t c; 1155 uint32_t c;
1156 1156
1157 rnd_printf_verbose("rnd: WARNING! initial entropy low (%u).\n", 1157 rnd_printf_verbose("rnd: WARNING! initial entropy low (%u).\n",
1158 rndpool_get_entropy_count(&rnd_global.pool)); 1158 rndpool_get_entropy_count(&rnd_global.pool));
1159 /* Try once again to put something in the pool */ 1159 /* Try once again to put something in the pool */
1160 c = rnd_counter(); 1160 c = rnd_counter();
1161 rndpool_add_data(&rnd_global.pool, &c, sizeof(c), 1); 1161 rndpool_add_data(&rnd_global.pool, &c, sizeof(c), 1);
1162 } 1162 }
1163 1163
1164#ifdef DIAGNOSTIC 1164#ifdef DIAGNOSTIC
1165 while (!rnd_tested) { 1165 while (!rnd_tested) {
1166 entropy_count = rndpool_get_entropy_count(&rnd_global.pool); 1166 entropy_count = rndpool_get_entropy_count(&rnd_global.pool);
1167 rnd_printf_verbose("rnd: starting statistical RNG test," 1167 rnd_printf_verbose("rnd: starting statistical RNG test,"
1168 " entropy = %d.\n", 1168 " entropy = %d.\n",
1169 entropy_count); 1169 entropy_count);
1170 if (rndpool_extract_data(&rnd_global.pool, rnd_rt.rt_b, 1170 if (rndpool_extract_data(&rnd_global.pool, rnd_rt.rt_b,
1171 sizeof(rnd_rt.rt_b), RND_EXTRACT_ANY) 1171 sizeof(rnd_rt.rt_b), RND_EXTRACT_ANY)
1172 != sizeof(rnd_rt.rt_b)) { 1172 != sizeof(rnd_rt.rt_b)) {
1173 panic("rnd: could not get bits for statistical test"); 1173 panic("rnd: could not get bits for statistical test");
1174 } 1174 }
1175 /* 1175 /*
1176 * Stash the tested bits so we can put them back in the 1176 * Stash the tested bits so we can put them back in the
1177 * pool, restoring the entropy count. DO NOT rely on 1177 * pool, restoring the entropy count. DO NOT rely on
1178 * rngtest to maintain the bits pristine -- we could end 1178 * rngtest to maintain the bits pristine -- we could end
1179 * up adding back non-random data claiming it were pure 1179 * up adding back non-random data claiming it were pure
1180 * entropy. 1180 * entropy.
1181 */ 1181 */
1182 memcpy(rnd_testbits, rnd_rt.rt_b, sizeof(rnd_rt.rt_b)); 1182 memcpy(rnd_testbits, rnd_rt.rt_b, sizeof(rnd_rt.rt_b));
1183 strlcpy(rnd_rt.rt_name, "entropy pool", sizeof(rnd_rt.rt_name)); 1183 strlcpy(rnd_rt.rt_name, "entropy pool", sizeof(rnd_rt.rt_name));
1184 if (rngtest(&rnd_rt)) { 1184 if (rngtest(&rnd_rt)) {
1185 /* 1185 /*
1186 * The probabiliity of a Type I error is 3/10000, 1186 * The probabiliity of a Type I error is 3/10000,
1187 * but note this can only happen at boot time. 1187 * but note this can only happen at boot time.
1188 * The relevant standard says to reset the module, 1188 * The relevant standard says to reset the module,
1189 * but developers objected... 1189 * but developers objected...
1190 */ 1190 */
1191 rnd_printf("rnd: WARNING, ENTROPY POOL FAILED " 1191 rnd_printf("rnd: WARNING, ENTROPY POOL FAILED "
1192 "STATISTICAL TEST!\n"); 1192 "STATISTICAL TEST!\n");
1193 continue; 1193 continue;
1194 } 1194 }
1195 memset(&rnd_rt, 0, sizeof(rnd_rt)); 1195 memset(&rnd_rt, 0, sizeof(rnd_rt));
1196 rndpool_add_data(&rnd_global.pool, rnd_testbits, 1196 rndpool_add_data(&rnd_global.pool, rnd_testbits,
1197 sizeof(rnd_testbits), entropy_count); 1197 sizeof(rnd_testbits), entropy_count);
1198 memset(rnd_testbits, 0, sizeof(rnd_testbits)); 1198 memset(rnd_testbits, 0, sizeof(rnd_testbits));
1199 rnd_printf_verbose("rnd: statistical RNG test done," 1199 rnd_printf_verbose("rnd: statistical RNG test done,"
1200 " entropy = %d.\n", 1200 " entropy = %d.\n",
1201 rndpool_get_entropy_count(&rnd_global.pool)); 1201 rndpool_get_entropy_count(&rnd_global.pool));
1202 rnd_tested++; 1202 rnd_tested++;
1203 } 1203 }
1204#endif 1204#endif
1205 entropy_count = rndpool_get_entropy_count(&rnd_global.pool); 1205 entropy_count = rndpool_get_entropy_count(&rnd_global.pool);
1206 retval = rndpool_extract_data(&rnd_global.pool, p, len, flags); 1206 retval = rndpool_extract_data(&rnd_global.pool, p, len, flags);
1207 mutex_spin_exit(&rnd_global.lock); 1207 mutex_spin_exit(&rnd_global.lock);
1208 1208
1209 if (entropy_count < (RND_ENTROPY_THRESHOLD * 2 + len) * NBBY) { 1209 if (entropy_count < (RND_ENTROPY_THRESHOLD * 2 + len) * NBBY) {
1210 rnd_printf_verbose("rnd: empty, asking for %d bytes\n", 1210 rnd_printf_verbose("rnd: empty, asking for %d bytes\n",
1211 (int)(howmany((RND_POOLBITS - entropy_count), NBBY))); 1211 (int)(howmany((RND_POOLBITS - entropy_count), NBBY)));
1212 rnd_getmore(howmany((RND_POOLBITS - entropy_count), NBBY)); 1212 rnd_getmore(howmany((RND_POOLBITS - entropy_count), NBBY));
1213 } 1213 }
1214 1214
1215 return retval; 1215 return retval;
1216} 1216}
1217 1217
1218/* 1218/*
1219 * Fill the buffer with as much entropy as we can. Return true if it 1219 * Fill the buffer with as much entropy as we can. Return true if it
1220 * has full entropy and false if not. 1220 * has full entropy and false if not.
1221 */ 1221 */
1222bool 1222bool
1223rnd_extract(void *buffer, size_t bytes) 1223rnd_extract(void *buffer, size_t bytes)
1224{ 1224{
1225 const size_t extracted = rnd_extract_data(buffer, bytes, 1225 const size_t extracted = rnd_extract_data(buffer, bytes,
1226 RND_EXTRACT_GOOD); 1226 RND_EXTRACT_GOOD);
1227 1227
1228 if (extracted < bytes) { 1228 if (extracted < bytes) {
1229 rnd_getmore(bytes - extracted); 1229 rnd_getmore(bytes - extracted);
1230 (void)rnd_extract_data((uint8_t *)buffer + extracted, 1230 (void)rnd_extract_data((uint8_t *)buffer + extracted,
1231 bytes - extracted, RND_EXTRACT_ANY); 1231 bytes - extracted, RND_EXTRACT_ANY);
1232 return false; 1232 return false;
1233 } 1233 }
1234 1234
1235 return true; 1235 return true;
1236} 1236}
1237 1237
1238/* 1238/*
1239 * If we have as much entropy as is requested, fill the buffer with it 1239 * If we have as much entropy as is requested, fill the buffer with it
1240 * and return true. Otherwise, leave the buffer alone and return 1240 * and return true. Otherwise, leave the buffer alone and return
1241 * false. 1241 * false.
1242 */ 1242 */
1243 1243
1244CTASSERT(RND_ENTROPY_THRESHOLD <= 0xffffffffUL); 1244CTASSERT(RND_ENTROPY_THRESHOLD <= 0xffffffffUL);
1245CTASSERT(RNDSINK_MAX_BYTES <= (0xffffffffUL - RND_ENTROPY_THRESHOLD)); 1245CTASSERT(RNDSINK_MAX_BYTES <= (0xffffffffUL - RND_ENTROPY_THRESHOLD));
1246CTASSERT((RNDSINK_MAX_BYTES + RND_ENTROPY_THRESHOLD) <= 1246CTASSERT((RNDSINK_MAX_BYTES + RND_ENTROPY_THRESHOLD) <=
1247 (0xffffffffUL / NBBY)); 1247 (0xffffffffUL / NBBY));
1248 1248
1249bool 1249bool
1250rnd_tryextract(void *buffer, size_t bytes) 1250rnd_tryextract(void *buffer, size_t bytes)
1251{ 1251{
1252 uint32_t bits_needed, bytes_requested; 1252 uint32_t bits_needed, bytes_requested;
1253 1253
1254 KASSERT(bytes <= RNDSINK_MAX_BYTES); 1254 KASSERT(bytes <= RNDSINK_MAX_BYTES);
1255 bits_needed = ((bytes + RND_ENTROPY_THRESHOLD) * NBBY); 1255 bits_needed = ((bytes + RND_ENTROPY_THRESHOLD) * NBBY);
1256 1256
1257 mutex_spin_enter(&rnd_global.lock); 1257 mutex_spin_enter(&rnd_global.lock);
1258 if (bits_needed <= rndpool_get_entropy_count(&rnd_global.pool)) { 1258 if (bits_needed <= rndpool_get_entropy_count(&rnd_global.pool)) {
1259 const uint32_t extracted __diagused = 1259 const uint32_t extracted __diagused =
1260 rndpool_extract_data(&rnd_global.pool, buffer, bytes, 1260 rndpool_extract_data(&rnd_global.pool, buffer, bytes,
1261 RND_EXTRACT_GOOD); 1261 RND_EXTRACT_GOOD);
1262 1262
1263 KASSERT(extracted == bytes); 1263 KASSERT(extracted == bytes);
1264 bytes_requested = 0; 1264 bytes_requested = 0;
1265 } else { 1265 } else {
1266 /* XXX Figure the threshold into this... */ 1266 /* XXX Figure the threshold into this... */
1267 bytes_requested = howmany((bits_needed - 1267 bytes_requested = howmany((bits_needed -
1268 rndpool_get_entropy_count(&rnd_global.pool)), NBBY); 1268 rndpool_get_entropy_count(&rnd_global.pool)), NBBY);
1269 KASSERT(0 < bytes_requested); 1269 KASSERT(0 < bytes_requested);
1270 } 1270 }
1271 mutex_spin_exit(&rnd_global.lock); 1271 mutex_spin_exit(&rnd_global.lock);
1272 1272
1273 if (0 < bytes_requested) 1273 if (0 < bytes_requested)
1274 rnd_getmore(bytes_requested); 1274 rnd_getmore(bytes_requested);
1275 1275
1276 return bytes_requested == 0; 1276 return bytes_requested == 0;
1277} 1277}
1278 1278
1279void 1279void
1280rnd_seed(void *base, size_t len) 1280rnd_seed(void *base, size_t len)
1281{ 1281{
1282 SHA1_CTX s; 1282 SHA1_CTX s;
1283 uint8_t digest[SHA1_DIGEST_LENGTH]; 1283 uint8_t digest[SHA1_DIGEST_LENGTH];
1284 1284
1285 if (len != sizeof(*boot_rsp)) { 1285 if (len != sizeof(*boot_rsp)) {
1286 rnd_printf("rnd: bad seed length %d\n", (int)len); 1286 rnd_printf("rnd: bad seed length %d\n", (int)len);
1287 return; 1287 return;
1288 } 1288 }
1289 1289
1290 boot_rsp = (rndsave_t *)base; 1290 boot_rsp = (rndsave_t *)base;
1291 SHA1Init(&s); 1291 SHA1Init(&s);
1292 SHA1Update(&s, (uint8_t *)&boot_rsp->entropy, 1292 SHA1Update(&s, (uint8_t *)&boot_rsp->entropy,
1293 sizeof(boot_rsp->entropy)); 1293 sizeof(boot_rsp->entropy));
1294 SHA1Update(&s, boot_rsp->data, sizeof(boot_rsp->data)); 1294 SHA1Update(&s, boot_rsp->data, sizeof(boot_rsp->data));
1295 SHA1Final(digest, &s); 1295 SHA1Final(digest, &s);
1296 1296
1297 if (memcmp(digest, boot_rsp->digest, sizeof(digest))) { 1297 if (memcmp(digest, boot_rsp->digest, sizeof(digest))) {
1298 rnd_printf("rnd: bad seed checksum\n"); 1298 rnd_printf("rnd: bad seed checksum\n");
1299 return; 1299 return;
1300 } 1300 }
1301 1301
1302 /* 1302 /*
1303 * It's not really well-defined whether bootloader-supplied 1303 * It's not really well-defined whether bootloader-supplied
1304 * modules run before or after rnd_init(). Handle both cases. 1304 * modules run before or after rnd_init(). Handle both cases.
1305 */ 1305 */
1306 if (rnd_ready) { 1306 if (rnd_ready) {
1307 rnd_printf_verbose("rnd: ready," 1307 rnd_printf_verbose("rnd: ready,"
1308 " feeding in seed data directly.\n"); 1308 " feeding in seed data directly.\n");
1309 mutex_spin_enter(&rnd_global.lock); 1309 mutex_spin_enter(&rnd_global.lock);
1310 rndpool_add_data(&rnd_global.pool, boot_rsp->data, 1310 rndpool_add_data(&rnd_global.pool, boot_rsp->data,
1311 sizeof(boot_rsp->data), 1311 sizeof(boot_rsp->data),
1312 MIN(boot_rsp->entropy, RND_POOLBITS / 2)); 1312 MIN(boot_rsp->entropy, RND_POOLBITS / 2));
1313 memset(boot_rsp, 0, sizeof(*boot_rsp)); 1313 memset(boot_rsp, 0, sizeof(*boot_rsp));
1314 mutex_spin_exit(&rnd_global.lock); 1314 mutex_spin_exit(&rnd_global.lock);
1315 } else { 1315 } else {
1316 rnd_printf_verbose("rnd: not ready, deferring seed feed.\n"); 1316 rnd_printf_verbose("rnd: not ready, deferring seed feed.\n");
1317 } 1317 }
1318} 1318}
1319 1319
1320static void 1320static void
1321krndsource_to_rndsource(krndsource_t *kr, rndsource_t *r) 1321krndsource_to_rndsource(krndsource_t *kr, rndsource_t *r)
1322{ 1322{
1323 memset(r, 0, sizeof(*r)); 1323 memset(r, 0, sizeof(*r));
1324 strlcpy(r->name, kr->name, sizeof(r->name)); 1324 strlcpy(r->name, kr->name, sizeof(r->name));
1325 r->total = kr->total; 1325 r->total = kr->total;
1326 r->type = kr->type; 1326 r->type = kr->type;
1327 r->flags = kr->flags; 1327 r->flags = kr->flags;
1328} 1328}
1329 1329
1330static void 1330static void
1331krndsource_to_rndsource_est(krndsource_t *kr, rndsource_est_t *re) 1331krndsource_to_rndsource_est(krndsource_t *kr, rndsource_est_t *re)
1332{ 1332{
1333 memset(re, 0, sizeof(*re)); 1333 memset(re, 0, sizeof(*re));
1334 krndsource_to_rndsource(kr, &re->rt); 1334 krndsource_to_rndsource(kr, &re->rt);
1335 re->dt_samples = kr->time_delta.insamples; 1335 re->dt_samples = kr->time_delta.insamples;
1336 re->dt_total = kr->time_delta.outbits; 1336 re->dt_total = kr->time_delta.outbits;
1337 re->dv_samples = kr->value_delta.insamples; 1337 re->dv_samples = kr->value_delta.insamples;
1338 re->dv_total = kr->value_delta.outbits; 1338 re->dv_total = kr->value_delta.outbits;
1339} 1339}
1340 1340
1341static void 1341static void
1342krs_setflags(krndsource_t *kr, uint32_t flags, uint32_t mask) 1342krs_setflags(krndsource_t *kr, uint32_t flags, uint32_t mask)
1343{ 1343{
1344 uint32_t oflags = kr->flags; 1344 uint32_t oflags = kr->flags;
1345 1345
1346 kr->flags &= ~mask; 1346 kr->flags &= ~mask;
1347 kr->flags |= (flags & mask); 1347 kr->flags |= (flags & mask);
1348 1348
1349 if (oflags & RND_FLAG_HASENABLE && 1349 if (oflags & RND_FLAG_HASENABLE &&
1350 ((oflags & RND_FLAG_NO_COLLECT) != (flags & RND_FLAG_NO_COLLECT))) { 1350 ((oflags & RND_FLAG_NO_COLLECT) != (flags & RND_FLAG_NO_COLLECT))) {
1351 kr->enable(kr, !(flags & RND_FLAG_NO_COLLECT)); 1351 kr->enable(kr, !(flags & RND_FLAG_NO_COLLECT));
1352 } 1352 }
1353} 1353}
1354 1354
1355int 1355int
1356rnd_system_ioctl(struct file *fp, u_long cmd, void *addr) 1356rnd_system_ioctl(struct file *fp, u_long cmd, void *addr)
1357{ 1357{
1358 krndsource_t *kr; 1358 krndsource_t *kr;
1359 rndstat_t *rst; 1359 rndstat_t *rst;
1360 rndstat_name_t *rstnm; 1360 rndstat_name_t *rstnm;
1361 rndstat_est_t *rset; 1361 rndstat_est_t *rset;
1362 rndstat_est_name_t *rsetnm; 1362 rndstat_est_name_t *rsetnm;
1363 rndctl_t *rctl; 1363 rndctl_t *rctl;
1364 rnddata_t *rnddata; 1364 rnddata_t *rnddata;
1365 uint32_t count, start; 1365 uint32_t count, start;
1366 int ret = 0; 1366 int ret = 0;
1367 int estimate_ok = 0, estimate = 0; 1367 int estimate_ok = 0, estimate = 0;
1368 1368
1369 switch (cmd) { 1369 switch (cmd) {
1370 case RNDGETENTCNT: 1370 case RNDGETENTCNT:
1371 break; 1371 break;
1372 1372
1373 case RNDGETPOOLSTAT: 1373 case RNDGETPOOLSTAT:
1374 case RNDGETSRCNUM: 1374 case RNDGETSRCNUM:
1375 case RNDGETSRCNAME: 1375 case RNDGETSRCNAME:
1376 case RNDGETESTNUM: 1376 case RNDGETESTNUM:
1377 case RNDGETESTNAME: 1377 case RNDGETESTNAME:
1378 ret = kauth_authorize_device(curlwp->l_cred, 1378 ret = kauth_authorize_device(curlwp->l_cred,
1379 KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL); 1379 KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL);
1380 if (ret) 1380 if (ret)
1381 return (ret); 1381 return (ret);
1382 break; 1382 break;
1383 1383
1384 case RNDCTL: 1384 case RNDCTL:
1385 ret = kauth_authorize_device(curlwp->l_cred, 1385 ret = kauth_authorize_device(curlwp->l_cred,
1386 KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL); 1386 KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL);
1387 if (ret) 1387 if (ret)
1388 return (ret); 1388 return (ret);
1389 break; 1389 break;
1390 1390
1391 case RNDADDDATA: 1391 case RNDADDDATA:
1392 ret = kauth_authorize_device(curlwp->l_cred, 1392 ret = kauth_authorize_device(curlwp->l_cred,
1393 KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL); 1393 KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL);
1394 if (ret) 1394 if (ret)
1395 return (ret); 1395 return (ret);
1396 estimate_ok = !kauth_authorize_device(curlwp->l_cred, 1396 estimate_ok = !kauth_authorize_device(curlwp->l_cred,
1397 KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, NULL, NULL, NULL, NULL); 1397 KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, NULL, NULL, NULL, NULL);
1398 break; 1398 break;
1399 1399
1400 default: 1400 default:
1401#ifdef COMPAT_50 1401#ifdef COMPAT_50
1402 return compat_50_rnd_ioctl(fp, cmd, addr); 1402 return compat_50_rnd_ioctl(fp, cmd, addr);
1403#else 1403#else
1404 return ENOTTY; 1404 return ENOTTY;
1405#endif 1405#endif
1406 } 1406 }
1407 1407
1408 switch (cmd) { 1408 switch (cmd) {
1409 case RNDGETENTCNT: 1409 case RNDGETENTCNT:
1410 mutex_spin_enter(&rnd_global.lock); 1410 mutex_spin_enter(&rnd_global.lock);
1411 *(uint32_t *)addr = rndpool_get_entropy_count(&rnd_global.pool); 1411 *(uint32_t *)addr = rndpool_get_entropy_count(&rnd_global.pool);
1412 mutex_spin_exit(&rnd_global.lock); 1412 mutex_spin_exit(&rnd_global.lock);
1413 break; 1413 break;
1414 1414
1415 case RNDGETPOOLSTAT: 1415 case RNDGETPOOLSTAT:
1416 mutex_spin_enter(&rnd_global.lock); 1416 mutex_spin_enter(&rnd_global.lock);
1417 rndpool_get_stats(&rnd_global.pool, addr, 1417 rndpool_get_stats(&rnd_global.pool, addr,
1418 sizeof(rndpoolstat_t)); 1418 sizeof(rndpoolstat_t));
1419 mutex_spin_exit(&rnd_global.lock); 1419 mutex_spin_exit(&rnd_global.lock);
1420 break; 1420 break;
1421 1421
1422 case RNDGETSRCNUM: 1422 case RNDGETSRCNUM:
1423 rst = (rndstat_t *)addr; 1423 rst = (rndstat_t *)addr;
1424 1424
1425 if (rst->count == 0) 1425 if (rst->count == 0)
1426 break; 1426 break;
1427 1427
1428 if (rst->count > RND_MAXSTATCOUNT) 1428 if (rst->count > RND_MAXSTATCOUNT)
1429 return (EINVAL); 1429 return (EINVAL);
1430 1430
1431 mutex_spin_enter(&rnd_global.lock); 1431 mutex_spin_enter(&rnd_global.lock);
1432 /* 1432 /*
1433 * Find the starting source by running through the 1433 * Find the starting source by running through the
1434 * list of sources. 1434 * list of sources.
1435 */ 1435 */
1436 kr = LIST_FIRST(&rnd_global.sources); 1436 kr = LIST_FIRST(&rnd_global.sources);
1437 start = rst->start; 1437 start = rst->start;
1438 while (kr != NULL && start >= 1) { 1438 while (kr != NULL && start >= 1) {
1439 kr = LIST_NEXT(kr, list); 1439 kr = LIST_NEXT(kr, list);
1440 start--; 1440 start--;
1441 } 1441 }
1442 1442
1443 /* 1443 /*
1444 * Return up to as many structures as the user asked 1444 * Return up to as many structures as the user asked
1445 * for. If we run out of sources, a count of zero 1445 * for. If we run out of sources, a count of zero
1446 * will be returned, without an error. 1446 * will be returned, without an error.
1447 */ 1447 */
1448 for (count = 0; count < rst->count && kr != NULL; count++) { 1448 for (count = 0; count < rst->count && kr != NULL; count++) {
1449 krndsource_to_rndsource(kr, &rst->source[count]); 1449 krndsource_to_rndsource(kr, &rst->source[count]);
1450 kr = LIST_NEXT(kr, list); 1450 kr = LIST_NEXT(kr, list);
1451 } 1451 }
1452 1452
1453 rst->count = count; 1453 rst->count = count;
1454 1454
1455 mutex_spin_exit(&rnd_global.lock); 1455 mutex_spin_exit(&rnd_global.lock);
1456 break; 1456 break;
1457 1457
1458 case RNDGETESTNUM: 1458 case RNDGETESTNUM:
1459 rset = (rndstat_est_t *)addr; 1459 rset = (rndstat_est_t *)addr;
1460 1460
1461 if (rset->count == 0) 1461 if (rset->count == 0)
1462 break; 1462 break;
1463 1463
1464 if (rset->count > RND_MAXSTATCOUNT) 1464 if (rset->count > RND_MAXSTATCOUNT)
1465 return (EINVAL); 1465 return (EINVAL);
1466 1466
1467 mutex_spin_enter(&rnd_global.lock); 1467 mutex_spin_enter(&rnd_global.lock);
1468 /* 1468 /*
1469 * Find the starting source by running through the 1469 * Find the starting source by running through the
1470 * list of sources. 1470 * list of sources.
1471 */ 1471 */
1472 kr = LIST_FIRST(&rnd_global.sources); 1472 kr = LIST_FIRST(&rnd_global.sources);
1473 start = rset->start; 1473 start = rset->start;
1474 while (kr != NULL && start > 1) { 1474 while (kr != NULL && start > 1) {
1475 kr = LIST_NEXT(kr, list); 1475 kr = LIST_NEXT(kr, list);
1476 start--; 1476 start--;
1477 } 1477 }
1478 1478
1479 /* Return up to as many structures as the user asked 1479 /* Return up to as many structures as the user asked
1480 * for. If we run out of sources, a count of zero 1480 * for. If we run out of sources, a count of zero
1481 * will be returned, without an error. 1481 * will be returned, without an error.
1482 */ 1482 */
1483 for (count = 0; count < rset->count && kr != NULL; count++) { 1483 for (count = 0; count < rset->count && kr != NULL; count++) {
1484 krndsource_to_rndsource_est(kr, &rset->source[count]); 1484 krndsource_to_rndsource_est(kr, &rset->source[count]);
1485 kr = LIST_NEXT(kr, list); 1485 kr = LIST_NEXT(kr, list);
1486 } 1486 }
1487 1487
1488 rset->count = count; 1488 rset->count = count;
1489 1489
1490 mutex_spin_exit(&rnd_global.lock); 1490 mutex_spin_exit(&rnd_global.lock);
1491 break; 1491 break;
1492 1492
1493 case RNDGETSRCNAME: 1493 case RNDGETSRCNAME:
1494 /* 1494 /*
1495 * Scan through the list, trying to find the name. 1495 * Scan through the list, trying to find the name.
1496 */ 1496 */
1497 mutex_spin_enter(&rnd_global.lock); 1497 mutex_spin_enter(&rnd_global.lock);
1498 rstnm = (rndstat_name_t *)addr; 1498 rstnm = (rndstat_name_t *)addr;
1499 kr = LIST_FIRST(&rnd_global.sources); 1499 kr = LIST_FIRST(&rnd_global.sources);
1500 while (kr != NULL) { 1500 while (kr != NULL) {
1501 if (strncmp(kr->name, rstnm->name, 1501 if (strncmp(kr->name, rstnm->name,
1502 MIN(sizeof(kr->name), 1502 MIN(sizeof(kr->name),
1503 sizeof(rstnm->name))) == 0) { 1503 sizeof(rstnm->name))) == 0) {
1504 krndsource_to_rndsource(kr, &rstnm->source); 1504 krndsource_to_rndsource(kr, &rstnm->source);
1505 mutex_spin_exit(&rnd_global.lock); 1505 mutex_spin_exit(&rnd_global.lock);
1506 return (0); 1506 return (0);
1507 } 1507 }
1508 kr = LIST_NEXT(kr, list); 1508 kr = LIST_NEXT(kr, list);
1509 } 1509 }
1510 mutex_spin_exit(&rnd_global.lock); 1510 mutex_spin_exit(&rnd_global.lock);
1511 1511
1512 ret = ENOENT; /* name not found */ 1512 ret = ENOENT; /* name not found */
1513 1513
1514 break; 1514 break;
1515 1515
1516 case RNDGETESTNAME: 1516 case RNDGETESTNAME:
1517 /* 1517 /*
1518 * Scan through the list, trying to find the name. 1518 * Scan through the list, trying to find the name.
1519 */ 1519 */
1520 mutex_spin_enter(&rnd_global.lock); 1520 mutex_spin_enter(&rnd_global.lock);
1521 rsetnm = (rndstat_est_name_t *)addr; 1521 rsetnm = (rndstat_est_name_t *)addr;
1522 kr = LIST_FIRST(&rnd_global.sources); 1522 kr = LIST_FIRST(&rnd_global.sources);
1523 while (kr != NULL) { 1523 while (kr != NULL) {
1524 if (strncmp(kr->name, rsetnm->name, 1524 if (strncmp(kr->name, rsetnm->name,
1525 MIN(sizeof(kr->name), 1525 MIN(sizeof(kr->name),
1526 sizeof(rsetnm->name))) == 0) { 1526 sizeof(rsetnm->name))) == 0) {
1527 krndsource_to_rndsource_est(kr, 1527 krndsource_to_rndsource_est(kr,
1528 &rsetnm->source); 1528 &rsetnm->source);
1529 mutex_spin_exit(&rnd_global.lock); 1529 mutex_spin_exit(&rnd_global.lock);
1530 return (0); 1530 return (0);
1531 } 1531 }
1532 kr = LIST_NEXT(kr, list); 1532 kr = LIST_NEXT(kr, list);
1533 } 1533 }
1534 mutex_spin_exit(&rnd_global.lock); 1534 mutex_spin_exit(&rnd_global.lock);
1535 1535
1536 ret = ENOENT; /* name not found */ 1536 ret = ENOENT; /* name not found */
1537 1537
1538 break; 1538 break;
1539 1539
1540 case RNDCTL: 1540 case RNDCTL:
1541 /* 1541 /*
1542 * Set flags to enable/disable entropy counting and/or 1542 * Set flags to enable/disable entropy counting and/or
1543 * collection. 1543 * collection.
1544 */ 1544 */
1545 mutex_spin_enter(&rnd_global.lock); 1545 mutex_spin_enter(&rnd_global.lock);
1546 rctl = (rndctl_t *)addr; 1546 rctl = (rndctl_t *)addr;
1547 kr = LIST_FIRST(&rnd_global.sources); 1547 kr = LIST_FIRST(&rnd_global.sources);
1548 1548
1549 /* 1549 /*
1550 * Flags set apply to all sources of this type. 1550 * Flags set apply to all sources of this type.
1551 */ 1551 */
1552 if (rctl->type != 0xff) { 1552 if (rctl->type != 0xff) {
1553 while (kr != NULL) { 1553 while (kr != NULL) {
1554 if (kr->type == rctl->type) { 1554 if (kr->type == rctl->type) {
1555 krs_setflags(kr, 1555 krs_setflags(kr,
1556 rctl->flags, rctl->mask); 1556 rctl->flags, rctl->mask);
1557 } 1557 }
1558 kr = LIST_NEXT(kr, list); 1558 kr = LIST_NEXT(kr, list);
1559 } 1559 }
1560 mutex_spin_exit(&rnd_global.lock); 1560 mutex_spin_exit(&rnd_global.lock);
1561 return (0); 1561 return (0);
1562 } 1562 }
1563 1563
1564 /* 1564 /*
1565 * scan through the list, trying to find the name 1565 * scan through the list, trying to find the name
1566 */ 1566 */
1567 while (kr != NULL) { 1567 while (kr != NULL) {
1568 if (strncmp(kr->name, rctl->name, 1568 if (strncmp(kr->name, rctl->name,
1569 MIN(sizeof(kr->name), 1569 MIN(sizeof(kr->name),
1570 sizeof(rctl->name))) == 0) { 1570 sizeof(rctl->name))) == 0) {
1571 krs_setflags(kr, rctl->flags, rctl->mask); 1571 krs_setflags(kr, rctl->flags, rctl->mask);
1572 mutex_spin_exit(&rnd_global.lock); 1572 mutex_spin_exit(&rnd_global.lock);
1573 return (0); 1573 return (0);
1574 } 1574 }
1575 kr = LIST_NEXT(kr, list); 1575 kr = LIST_NEXT(kr, list);
1576 } 1576 }
1577 1577
1578 mutex_spin_exit(&rnd_global.lock); 1578 mutex_spin_exit(&rnd_global.lock);
1579 ret = ENOENT; /* name not found */ 1579 ret = ENOENT; /* name not found */
1580 1580
1581 break; 1581 break;
1582 1582
1583 case RNDADDDATA: 1583 case RNDADDDATA:
1584 /* 1584 /*
1585 * Don't seed twice if our bootloader has 1585 * Don't seed twice if our bootloader has
1586 * seed loading support. 1586 * seed loading support.
1587 */ 1587 */
1588 if (!boot_rsp) { 1588 if (!boot_rsp) {
1589 rnddata = (rnddata_t *)addr; 1589 rnddata = (rnddata_t *)addr;
1590 1590
1591 if (rnddata->len > sizeof(rnddata->data)) 1591 if (rnddata->len > sizeof(rnddata->data))
1592 return EINVAL; 1592 return EINVAL;
1593 1593
1594 if (estimate_ok) { 1594 if (estimate_ok) {
1595 /* 1595 /*
1596 * Do not accept absurd entropy estimates, and 1596 * Do not accept absurd entropy estimates, and
1597 * do not flood the pool with entropy such that 1597 * do not flood the pool with entropy such that
1598 * new samples are discarded henceforth. 1598 * new samples are discarded henceforth.
1599 */ 1599 */
1600 estimate = MIN((rnddata->len * NBBY) / 2, 1600 estimate = MIN((rnddata->len * NBBY) / 2,
1601 MIN(rnddata->entropy, 1601 MIN(rnddata->entropy,
1602 RND_POOLBITS / 2)); 1602 RND_POOLBITS / 2));
1603 } else { 1603 } else {
1604 estimate = 0; 1604 estimate = 0;
1605 } 1605 }
1606 1606
1607 mutex_spin_enter(&rnd_global.lock); 1607 mutex_spin_enter(&rnd_global.lock);
1608 rndpool_add_data(&rnd_global.pool, rnddata->data, 1608 rndpool_add_data(&rnd_global.pool, rnddata->data,
1609 rnddata->len, estimate); 1609 rnddata->len, estimate);
 1610 rnd_entropy_added();
1610 mutex_spin_exit(&rnd_global.lock); 1611 mutex_spin_exit(&rnd_global.lock);
1611 1612
1612 rnd_wakeup_readers(); 1613 rnd_wakeup_readers();
1613 } else { 1614 } else {
1614 rnd_printf_verbose("rnd" 1615 rnd_printf_verbose("rnd"
1615 ": already seeded by boot loader\n"); 1616 ": already seeded by boot loader\n");
1616 } 1617 }
1617 break; 1618 break;
1618 1619
1619 default: 1620 default:
1620 return ENOTTY; 1621 return ENOTTY;
1621 } 1622 }
1622 1623
1623 return (ret); 1624 return (ret);
1624} 1625}