Tue Apr 14 14:18:57 2015 UTC ()
Omit needless variable.


(riastradh)
diff -r1.62 -r1.63 src/sys/kern/kern_rndq.c

cvs diff -r1.62 -r1.63 src/sys/kern/Attic/kern_rndq.c (switch to unified diff)

--- src/sys/kern/Attic/kern_rndq.c 2015/04/14 14:16:34 1.62
+++ src/sys/kern/Attic/kern_rndq.c 2015/04/14 14:18:57 1.63
@@ -1,1614 +1,1613 @@ @@ -1,1614 +1,1613 @@
1/* $NetBSD: kern_rndq.c,v 1.62 2015/04/14 14:16:34 riastradh Exp $ */ 1/* $NetBSD: kern_rndq.c,v 1.63 2015/04/14 14:18:57 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1997-2013 The NetBSD Foundation, Inc. 4 * Copyright (c) 1997-2013 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Michael Graff <explorer@flame.org> and Thor Lancelot Simon. 8 * by Michael Graff <explorer@flame.org> and Thor Lancelot Simon.
9 * This code uses ideas and algorithms from the Linux driver written by 9 * This code uses ideas and algorithms from the Linux driver written by
10 * Ted Ts'o. 10 * Ted Ts'o.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
15 * 1. Redistributions of source code must retain the above copyright 15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer. 16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright 17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the 18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution. 19 * documentation and/or other materials provided with the distribution.
20 * 20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE. 31 * POSSIBILITY OF SUCH DAMAGE.
32 */ 32 */
33 33
34#include <sys/cdefs.h> 34#include <sys/cdefs.h>
35__KERNEL_RCSID(0, "$NetBSD: kern_rndq.c,v 1.62 2015/04/14 14:16:34 riastradh Exp $"); 35__KERNEL_RCSID(0, "$NetBSD: kern_rndq.c,v 1.63 2015/04/14 14:18:57 riastradh Exp $");
36 36
37#include <sys/param.h> 37#include <sys/param.h>
38#include <sys/atomic.h> 38#include <sys/atomic.h>
39#include <sys/ioctl.h> 39#include <sys/ioctl.h>
40#include <sys/fcntl.h> 40#include <sys/fcntl.h>
41#include <sys/select.h> 41#include <sys/select.h>
42#include <sys/poll.h> 42#include <sys/poll.h>
43#include <sys/kmem.h> 43#include <sys/kmem.h>
44#include <sys/mutex.h> 44#include <sys/mutex.h>
45#include <sys/proc.h> 45#include <sys/proc.h>
46#include <sys/kernel.h> 46#include <sys/kernel.h>
47#include <sys/conf.h> 47#include <sys/conf.h>
48#include <sys/systm.h> 48#include <sys/systm.h>
49#include <sys/callout.h> 49#include <sys/callout.h>
50#include <sys/intr.h> 50#include <sys/intr.h>
51#include <sys/rnd.h> 51#include <sys/rnd.h>
52#include <sys/rndpool.h> 52#include <sys/rndpool.h>
53#include <sys/rndsink.h> 53#include <sys/rndsink.h>
54#include <sys/rndsource.h> 54#include <sys/rndsource.h>
55#include <sys/vnode.h> 55#include <sys/vnode.h>
56#include <sys/pool.h> 56#include <sys/pool.h>
57#include <sys/kauth.h> 57#include <sys/kauth.h>
58#include <sys/once.h> 58#include <sys/once.h>
59#include <sys/rngtest.h> 59#include <sys/rngtest.h>
60 60
61#include <dev/rnd_private.h> 61#include <dev/rnd_private.h>
62 62
63#ifdef COMPAT_50 63#ifdef COMPAT_50
64#include <compat/sys/rnd.h> 64#include <compat/sys/rnd.h>
65#endif 65#endif
66 66
67#if defined(__HAVE_CPU_COUNTER) 67#if defined(__HAVE_CPU_COUNTER)
68#include <machine/cpu_counter.h> 68#include <machine/cpu_counter.h>
69#endif 69#endif
70 70
71#ifdef RND_DEBUG 71#ifdef RND_DEBUG
72#define DPRINTF(l,x) if (rnd_debug & (l)) rnd_printf x 72#define DPRINTF(l,x) if (rnd_debug & (l)) rnd_printf x
73int rnd_debug = 0; 73int rnd_debug = 0;
74#else 74#else
75#define DPRINTF(l,x) 75#define DPRINTF(l,x)
76#endif 76#endif
77 77
78/* 78/*
79 * list devices attached 79 * list devices attached
80 */ 80 */
81#if 0 81#if 0
82#define RND_VERBOSE 82#define RND_VERBOSE
83#endif 83#endif
84 84
85#ifdef RND_VERBOSE 85#ifdef RND_VERBOSE
86#define rnd_printf_verbose(fmt, ...) rnd_printf(fmt, ##__VA_ARGS__) 86#define rnd_printf_verbose(fmt, ...) rnd_printf(fmt, ##__VA_ARGS__)
87#else 87#else
88#define rnd_printf_verbose(fmt, ...) ((void)0) 88#define rnd_printf_verbose(fmt, ...) ((void)0)
89#endif 89#endif
90 90
91#ifdef RND_VERBOSE 91#ifdef RND_VERBOSE
92static unsigned int deltacnt; 92static unsigned int deltacnt;
93#endif 93#endif
94 94
95/* 95/*
96 * This is a little bit of state information attached to each device that we 96 * This is a little bit of state information attached to each device that we
97 * collect entropy from. This is simply a collection buffer, and when it 97 * collect entropy from. This is simply a collection buffer, and when it
98 * is full it will be "detached" from the source and added to the entropy 98 * is full it will be "detached" from the source and added to the entropy
99 * pool after entropy is distilled as much as possible. 99 * pool after entropy is distilled as much as possible.
100 */ 100 */
101#define RND_SAMPLE_COUNT 64 /* collect N samples, then compress */ 101#define RND_SAMPLE_COUNT 64 /* collect N samples, then compress */
102typedef struct _rnd_sample_t { 102typedef struct _rnd_sample_t {
103 SIMPLEQ_ENTRY(_rnd_sample_t) next; 103 SIMPLEQ_ENTRY(_rnd_sample_t) next;
104 krndsource_t *source; 104 krndsource_t *source;
105 int cursor; 105 int cursor;
106 int entropy; 106 int entropy;
107 uint32_t ts[RND_SAMPLE_COUNT]; 107 uint32_t ts[RND_SAMPLE_COUNT];
108 uint32_t values[RND_SAMPLE_COUNT]; 108 uint32_t values[RND_SAMPLE_COUNT];
109} rnd_sample_t; 109} rnd_sample_t;
110 110
111SIMPLEQ_HEAD(rnd_sampleq, _rnd_sample_t); 111SIMPLEQ_HEAD(rnd_sampleq, _rnd_sample_t);
112 112
113/* 113/*
114 * The sample queue. Samples are put into the queue and processed in a 114 * The sample queue. Samples are put into the queue and processed in a
115 * softint in order to limit the latency of adding a sample. 115 * softint in order to limit the latency of adding a sample.
116 */ 116 */
117static struct { 117static struct {
118 kmutex_t lock; 118 kmutex_t lock;
119 struct rnd_sampleq q; 119 struct rnd_sampleq q;
120} rnd_samples __cacheline_aligned; 120} rnd_samples __cacheline_aligned;
121 121
122/* 122/*
123 * Memory pool for sample buffers 123 * Memory pool for sample buffers
124 */ 124 */
125static pool_cache_t rnd_mempc; 125static pool_cache_t rnd_mempc;
126 126
127/* 127/*
128 * Global entropy pool and sources. 128 * Global entropy pool and sources.
129 */ 129 */
130static struct { 130static struct {
131 kmutex_t lock; 131 kmutex_t lock;
132 rndpool_t pool; 132 rndpool_t pool;
133 LIST_HEAD(, krndsource) sources; 133 LIST_HEAD(, krndsource) sources;
134} rnd_global __cacheline_aligned; 134} rnd_global __cacheline_aligned;
135 135
136/* 136/*
137 * This source is used to easily "remove" queue entries when the source 137 * This source is used to easily "remove" queue entries when the source
138 * which actually generated the events is going away. 138 * which actually generated the events is going away.
139 */ 139 */
140static krndsource_t rnd_source_no_collect = { 140static krndsource_t rnd_source_no_collect = {
141 /* LIST_ENTRY list */ 141 /* LIST_ENTRY list */
142 .name = { 'N', 'o', 'C', 'o', 'l', 'l', 'e', 'c', 't', 142 .name = { 'N', 'o', 'C', 'o', 'l', 'l', 'e', 'c', 't',
143 0, 0, 0, 0, 0, 0, 0 }, 143 0, 0, 0, 0, 0, 0, 0 },
144 .total = 0, 144 .total = 0,
145 .type = RND_TYPE_UNKNOWN, 145 .type = RND_TYPE_UNKNOWN,
146 .flags = (RND_FLAG_NO_COLLECT | 146 .flags = (RND_FLAG_NO_COLLECT |
147 RND_FLAG_NO_ESTIMATE), 147 RND_FLAG_NO_ESTIMATE),
148 .state = NULL, 148 .state = NULL,
149 .test_cnt = 0, 149 .test_cnt = 0,
150 .test = NULL 150 .test = NULL
151}; 151};
152 152
153krndsource_t rnd_printf_source, rnd_autoconf_source; 153krndsource_t rnd_printf_source, rnd_autoconf_source;
154 154
155static void *rnd_process, *rnd_wakeup; 155static void *rnd_process, *rnd_wakeup;
156 156
157static inline uint32_t rnd_counter(void); 157static inline uint32_t rnd_counter(void);
158static void rnd_intr(void *); 158static void rnd_intr(void *);
159static void rnd_wake(void *); 159static void rnd_wake(void *);
160static void rnd_process_events(void); 160static void rnd_process_events(void);
161static void rnd_add_data_ts(krndsource_t *, const void *const, 161static void rnd_add_data_ts(krndsource_t *, const void *const,
162 uint32_t, uint32_t, uint32_t); 162 uint32_t, uint32_t, uint32_t);
163static inline void rnd_schedule_process(void); 163static inline void rnd_schedule_process(void);
164 164
165int rnd_ready = 0; 165int rnd_ready = 0;
166int rnd_initial_entropy = 0; 166int rnd_initial_entropy = 0;
167 167
168static int rnd_printing = 0; 168static int rnd_printing = 0;
169 169
170#ifdef DIAGNOSTIC 170#ifdef DIAGNOSTIC
171static int rnd_tested = 0; 171static int rnd_tested = 0;
172static rngtest_t rnd_rt; 172static rngtest_t rnd_rt;
173static uint8_t rnd_testbits[sizeof(rnd_rt.rt_b)]; 173static uint8_t rnd_testbits[sizeof(rnd_rt.rt_b)];
174#endif 174#endif
175 175
176static rndsave_t *boot_rsp; 176static rndsave_t *boot_rsp;
177 177
178static inline void 178static inline void
179rnd_printf(const char *fmt, ...) 179rnd_printf(const char *fmt, ...)
180{ 180{
181 va_list ap; 181 va_list ap;
182 182
183 membar_consumer(); 183 membar_consumer();
184 if (rnd_printing) { 184 if (rnd_printing) {
185 return; 185 return;
186 } 186 }
187 rnd_printing = 1; 187 rnd_printing = 1;
188 membar_producer(); 188 membar_producer();
189 va_start(ap, fmt); 189 va_start(ap, fmt);
190 vprintf(fmt, ap); 190 vprintf(fmt, ap);
191 va_end(ap); 191 va_end(ap);
192 rnd_printing = 0; 192 rnd_printing = 0;
193} 193}
194 194
195void 195void
196rnd_init_softint(void) { 196rnd_init_softint(void) {
197 rnd_process = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE, 197 rnd_process = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE,
198 rnd_intr, NULL); 198 rnd_intr, NULL);
199 rnd_wakeup = softint_establish(SOFTINT_CLOCK|SOFTINT_MPSAFE, 199 rnd_wakeup = softint_establish(SOFTINT_CLOCK|SOFTINT_MPSAFE,
200 rnd_wake, NULL); 200 rnd_wake, NULL);
201 rnd_schedule_process(); 201 rnd_schedule_process();
202} 202}
203 203
204/* 204/*
205 * Generate a 32-bit counter. 205 * Generate a 32-bit counter.
206 */ 206 */
207static inline uint32_t 207static inline uint32_t
208rnd_counter(void) 208rnd_counter(void)
209{ 209{
210 struct bintime bt; 210 struct bintime bt;
211 uint32_t ret; 211 uint32_t ret;
212 212
213#if defined(__HAVE_CPU_COUNTER) 213#if defined(__HAVE_CPU_COUNTER)
214 if (cpu_hascounter()) 214 if (cpu_hascounter())
215 return cpu_counter32(); 215 return cpu_counter32();
216#endif 216#endif
217 if (!rnd_ready) 217 if (!rnd_ready)
218 /* Too early to call nanotime. */ 218 /* Too early to call nanotime. */
219 return 0; 219 return 0;
220 220
221 binuptime(&bt); 221 binuptime(&bt);
222 ret = bt.sec; 222 ret = bt.sec;
223 ret |= bt.sec >> 32; 223 ret |= bt.sec >> 32;
224 ret |= bt.frac; 224 ret |= bt.frac;
225 ret |= bt.frac >> 32; 225 ret |= bt.frac >> 32;
226 226
227 return ret; 227 return ret;
228} 228}
229 229
230/* 230/*
231 * We may be called from low IPL -- protect our softint. 231 * We may be called from low IPL -- protect our softint.
232 */ 232 */
233 233
234static inline void 234static inline void
235rnd_schedule_softint(void *softint) 235rnd_schedule_softint(void *softint)
236{ 236{
237 kpreempt_disable(); 237 kpreempt_disable();
238 softint_schedule(softint); 238 softint_schedule(softint);
239 kpreempt_enable(); 239 kpreempt_enable();
240} 240}
241 241
242static inline void 242static inline void
243rnd_schedule_process(void) 243rnd_schedule_process(void)
244{ 244{
245 if (__predict_true(rnd_process)) { 245 if (__predict_true(rnd_process)) {
246 rnd_schedule_softint(rnd_process); 246 rnd_schedule_softint(rnd_process);
247 return; 247 return;
248 }  248 }
249 rnd_process_events(); 249 rnd_process_events();
250} 250}
251 251
252static inline void 252static inline void
253rnd_schedule_wakeup(void) 253rnd_schedule_wakeup(void)
254{ 254{
255 if (__predict_true(rnd_wakeup)) { 255 if (__predict_true(rnd_wakeup)) {
256 rnd_schedule_softint(rnd_wakeup); 256 rnd_schedule_softint(rnd_wakeup);
257 return; 257 return;
258 } 258 }
259 rndsinks_distribute(); 259 rndsinks_distribute();
260} 260}
261 261
262/* 262/*
263 * Tell any sources with "feed me" callbacks that we are hungry. 263 * Tell any sources with "feed me" callbacks that we are hungry.
264 */ 264 */
265void 265void
266rnd_getmore(size_t byteswanted) 266rnd_getmore(size_t byteswanted)
267{ 267{
268 krndsource_t *rs; 268 krndsource_t *rs;
269 269
270 mutex_spin_enter(&rnd_global.lock); 270 mutex_spin_enter(&rnd_global.lock);
271 LIST_FOREACH(rs, &rnd_global.sources, list) { 271 LIST_FOREACH(rs, &rnd_global.sources, list) {
272 if (!ISSET(rs->flags, RND_FLAG_HASCB)) 272 if (!ISSET(rs->flags, RND_FLAG_HASCB))
273 continue; 273 continue;
274 KASSERT(rs->get != NULL); 274 KASSERT(rs->get != NULL);
275 KASSERT(rs->getarg != NULL); 275 KASSERT(rs->getarg != NULL);
276 rs->get(byteswanted, rs->getarg); 276 rs->get(byteswanted, rs->getarg);
277 rnd_printf_verbose("rnd: entropy estimate %zu bits\n", 277 rnd_printf_verbose("rnd: entropy estimate %zu bits\n",
278 rndpool_get_entropy_count(&rnd_global.pool)); 278 rndpool_get_entropy_count(&rnd_global.pool));
279 rnd_printf_verbose("rnd: asking source %s for %zu bytes\n", 279 rnd_printf_verbose("rnd: asking source %s for %zu bytes\n",
280 rs->name, byteswanted); 280 rs->name, byteswanted);
281 } 281 }
282 mutex_spin_exit(&rnd_global.lock); 282 mutex_spin_exit(&rnd_global.lock);
283} 283}
284 284
285/* 285/*
286 * Use the timing/value of the event to estimate the entropy gathered. 286 * Use the timing/value of the event to estimate the entropy gathered.
287 * If all the differentials (first, second, and third) are non-zero, return 287 * If all the differentials (first, second, and third) are non-zero, return
288 * non-zero. If any of these are zero, return zero. 288 * non-zero. If any of these are zero, return zero.
289 */ 289 */
290static inline uint32_t 290static inline uint32_t
291rnd_delta_estimate(rnd_delta_t *d, uint32_t v, int32_t delta) 291rnd_delta_estimate(rnd_delta_t *d, uint32_t v, int32_t delta)
292{ 292{
293 int32_t delta2, delta3; 293 int32_t delta2, delta3;
294 294
295 d->insamples++; 295 d->insamples++;
296 296
297 /* 297 /*
298 * Calculate the second and third order differentials 298 * Calculate the second and third order differentials
299 */ 299 */
300 delta2 = d->dx - delta; 300 delta2 = d->dx - delta;
301 if (delta2 < 0) 301 if (delta2 < 0)
302 delta2 = -delta2; 302 delta2 = -delta2;
303 303
304 delta3 = d->d2x - delta2; 304 delta3 = d->d2x - delta2;
305 if (delta3 < 0) 305 if (delta3 < 0)
306 delta3 = -delta3; 306 delta3 = -delta3;
307 307
308 d->x = v; 308 d->x = v;
309 d->dx = delta; 309 d->dx = delta;
310 d->d2x = delta2; 310 d->d2x = delta2;
311 311
312 /* 312 /*
313 * If any delta is 0, we got no entropy. If all are non-zero, we 313 * If any delta is 0, we got no entropy. If all are non-zero, we
314 * might have something. 314 * might have something.
315 */ 315 */
316 if (delta == 0 || delta2 == 0 || delta3 == 0) 316 if (delta == 0 || delta2 == 0 || delta3 == 0)
317 return (0); 317 return (0);
318 318
319 d->outbits++; 319 d->outbits++;
320 return (1); 320 return (1);
321} 321}
322 322
323/* 323/*
324 * Delta estimator for 32-bit timeestamps. Must handle wrap. 324 * Delta estimator for 32-bit timeestamps. Must handle wrap.
325 */ 325 */
326static inline uint32_t 326static inline uint32_t
327rnd_dt_estimate(krndsource_t *rs, uint32_t t) 327rnd_dt_estimate(krndsource_t *rs, uint32_t t)
328{ 328{
329 int32_t delta; 329 int32_t delta;
330 uint32_t ret; 330 uint32_t ret;
331 rnd_delta_t *d = &rs->time_delta; 331 rnd_delta_t *d = &rs->time_delta;
332 332
333 if (t < d->x) { 333 if (t < d->x) {
334 delta = UINT32_MAX - d->x + t; 334 delta = UINT32_MAX - d->x + t;
335 } else { 335 } else {
336 delta = d->x - t; 336 delta = d->x - t;
337 } 337 }
338 338
339 if (delta < 0) { 339 if (delta < 0) {
340 delta = -delta; 340 delta = -delta;
341 } 341 }
342 342
343 ret = rnd_delta_estimate(d, t, delta); 343 ret = rnd_delta_estimate(d, t, delta);
344 344
345 KASSERT(d->x == t); 345 KASSERT(d->x == t);
346 KASSERT(d->dx == delta); 346 KASSERT(d->dx == delta);
347#ifdef RND_VERBOSE 347#ifdef RND_VERBOSE
348 if (deltacnt++ % 1151 == 0) { 348 if (deltacnt++ % 1151 == 0) {
349 rnd_printf_verbose("rnd_dt_estimate: %s x = %lld, dx = %lld, " 349 rnd_printf_verbose("rnd_dt_estimate: %s x = %lld, dx = %lld, "
350 "d2x = %lld\n", rs->name, 350 "d2x = %lld\n", rs->name,
351 (int)d->x, (int)d->dx, (int)d->d2x); 351 (int)d->x, (int)d->dx, (int)d->d2x);
352 } 352 }
353#endif 353#endif
354 return ret; 354 return ret;
355} 355}
356 356
357/* 357/*
358 * Delta estimator for 32 or bit values. "Wrap" isn't. 358 * Delta estimator for 32 or bit values. "Wrap" isn't.
359 */ 359 */
360static inline uint32_t 360static inline uint32_t
361rnd_dv_estimate(krndsource_t *rs, uint32_t v) 361rnd_dv_estimate(krndsource_t *rs, uint32_t v)
362{ 362{
363 int32_t delta; 363 int32_t delta;
364 uint32_t ret; 364 uint32_t ret;
365 rnd_delta_t *d = &rs->value_delta; 365 rnd_delta_t *d = &rs->value_delta;
366 366
367 delta = d->x - v; 367 delta = d->x - v;
368 368
369 if (delta < 0) { 369 if (delta < 0) {
370 delta = -delta; 370 delta = -delta;
371 } 371 }
372 ret = rnd_delta_estimate(d, v, (uint32_t)delta); 372 ret = rnd_delta_estimate(d, v, (uint32_t)delta);
373 373
374 KASSERT(d->x == v); 374 KASSERT(d->x == v);
375 KASSERT(d->dx == delta); 375 KASSERT(d->dx == delta);
376#ifdef RND_VERBOSE 376#ifdef RND_VERBOSE
377 if (deltacnt++ % 1151 == 0) { 377 if (deltacnt++ % 1151 == 0) {
378 rnd_printf_verbose("rnd_dv_estimate: %s x = %lld, dx = %lld, " 378 rnd_printf_verbose("rnd_dv_estimate: %s x = %lld, dx = %lld, "
379 " d2x = %lld\n", rs->name, 379 " d2x = %lld\n", rs->name,
380 (long long int)d->x, 380 (long long int)d->x,
381 (long long int)d->dx, 381 (long long int)d->dx,
382 (long long int)d->d2x); 382 (long long int)d->d2x);
383 } 383 }
384#endif 384#endif
385 return ret; 385 return ret;
386} 386}
387 387
388#if defined(__HAVE_CPU_COUNTER) 388#if defined(__HAVE_CPU_COUNTER)
389static struct { 389static struct {
390 kmutex_t lock; 390 kmutex_t lock;
391 struct callout callout; 391 struct callout callout;
392 struct callout stop_callout; 392 struct callout stop_callout;
393 krndsource_t source; 393 krndsource_t source;
394} rnd_skew __cacheline_aligned; 394} rnd_skew __cacheline_aligned;
395 395
396static void rnd_skew_intr(void *); 396static void rnd_skew_intr(void *);
397 397
398static void 398static void
399rnd_skew_enable(krndsource_t *rs, bool enabled) 399rnd_skew_enable(krndsource_t *rs, bool enabled)
400{ 400{
401 401
402 if (enabled) { 402 if (enabled) {
403 rnd_skew_intr(rs); 403 rnd_skew_intr(rs);
404 } else { 404 } else {
405 callout_stop(&rnd_skew.callout); 405 callout_stop(&rnd_skew.callout);
406 } 406 }
407} 407}
408 408
409static void 409static void
410rnd_skew_stop_intr(void *arg) 410rnd_skew_stop_intr(void *arg)
411{ 411{
412 412
413 callout_stop(&rnd_skew.callout); 413 callout_stop(&rnd_skew.callout);
414} 414}
415 415
416static void 416static void
417rnd_skew_get(size_t bytes, void *priv) 417rnd_skew_get(size_t bytes, void *priv)
418{ 418{
419 krndsource_t *skewsrcp = priv; 419 krndsource_t *skewsrcp = priv;
420 420
421 KASSERT(skewsrcp == &rnd_skew.source); 421 KASSERT(skewsrcp == &rnd_skew.source);
422 if (RND_ENABLED(skewsrcp)) { 422 if (RND_ENABLED(skewsrcp)) {
423 /* Measure for 30s */ 423 /* Measure for 30s */
424 callout_schedule(&rnd_skew.stop_callout, hz * 30); 424 callout_schedule(&rnd_skew.stop_callout, hz * 30);
425 callout_schedule(&rnd_skew.callout, 1); 425 callout_schedule(&rnd_skew.callout, 1);
426 } 426 }
427} 427}
428 428
429static void 429static void
430rnd_skew_intr(void *arg) 430rnd_skew_intr(void *arg)
431{ 431{
432 static int flipflop; 432 static int flipflop;
433 433
434 /* 434 /*
435 * Even on systems with seemingly stable clocks, the 435 * Even on systems with seemingly stable clocks, the
436 * delta-time entropy estimator seems to think we get 1 bit here 436 * delta-time entropy estimator seems to think we get 1 bit here
437 * about every 2 calls. 437 * about every 2 calls.
438 * 438 *
439 */ 439 */
440 mutex_spin_enter(&rnd_skew.lock); 440 mutex_spin_enter(&rnd_skew.lock);
441 flipflop = !flipflop; 441 flipflop = !flipflop;
442 442
443 if (RND_ENABLED(&rnd_skew.source)) { 443 if (RND_ENABLED(&rnd_skew.source)) {
444 if (flipflop) { 444 if (flipflop) {
445 rnd_add_uint32(&rnd_skew.source, rnd_counter()); 445 rnd_add_uint32(&rnd_skew.source, rnd_counter());
446 callout_schedule(&rnd_skew.callout, hz / 10); 446 callout_schedule(&rnd_skew.callout, hz / 10);
447 } else { 447 } else {
448 callout_schedule(&rnd_skew.callout, 1); 448 callout_schedule(&rnd_skew.callout, 1);
449 } 449 }
450 } 450 }
451 mutex_spin_exit(&rnd_skew.lock); 451 mutex_spin_exit(&rnd_skew.lock);
452} 452}
453#endif 453#endif
454 454
455/* 455/*
456 * Entropy was just added to the pool. If we crossed the threshold for 456 * Entropy was just added to the pool. If we crossed the threshold for
457 * the first time, set rnd_initial_entropy = 1. 457 * the first time, set rnd_initial_entropy = 1.
458 */ 458 */
459static void 459static void
460rnd_entropy_added(void) 460rnd_entropy_added(void)
461{ 461{
462 uint32_t pool_entropy; 462 uint32_t pool_entropy;
463 463
464 KASSERT(mutex_owned(&rnd_global.lock)); 464 KASSERT(mutex_owned(&rnd_global.lock));
465 465
466 if (__predict_true(rnd_initial_entropy)) 466 if (__predict_true(rnd_initial_entropy))
467 return; 467 return;
468 pool_entropy = rndpool_get_entropy_count(&rnd_global.pool); 468 pool_entropy = rndpool_get_entropy_count(&rnd_global.pool);
469 if (pool_entropy > RND_ENTROPY_THRESHOLD * NBBY) { 469 if (pool_entropy > RND_ENTROPY_THRESHOLD * NBBY) {
470 rnd_printf_verbose("rnd: have initial entropy (%zu)\n", 470 rnd_printf_verbose("rnd: have initial entropy (%zu)\n",
471 pool_entropy); 471 pool_entropy);
472 rnd_initial_entropy = 1; 472 rnd_initial_entropy = 1;
473 } 473 }
474} 474}
475 475
476/* 476/*
477 * initialize the global random pool for our use. 477 * initialize the global random pool for our use.
478 * rnd_init() must be called very early on in the boot process, so 478 * rnd_init() must be called very early on in the boot process, so
479 * the pool is ready for other devices to attach as sources. 479 * the pool is ready for other devices to attach as sources.
480 */ 480 */
481void 481void
482rnd_init(void) 482rnd_init(void)
483{ 483{
484 uint32_t c; 484 uint32_t c;
485 485
486 if (rnd_ready) 486 if (rnd_ready)
487 return; 487 return;
488 488
489 /* 489 /*
490 * take a counter early, hoping that there's some variance in 490 * take a counter early, hoping that there's some variance in
491 * the following operations 491 * the following operations
492 */ 492 */
493 c = rnd_counter(); 493 c = rnd_counter();
494 494
495 rndsinks_init(); 495 rndsinks_init();
496 496
497 /* Initialize the sample queue. */ 497 /* Initialize the sample queue. */
498 mutex_init(&rnd_samples.lock, MUTEX_DEFAULT, IPL_VM); 498 mutex_init(&rnd_samples.lock, MUTEX_DEFAULT, IPL_VM);
499 SIMPLEQ_INIT(&rnd_samples.q); 499 SIMPLEQ_INIT(&rnd_samples.q);
500 500
501 /* Initialize the global pool and sources list. */ 501 /* Initialize the global pool and sources list. */
502 mutex_init(&rnd_global.lock, MUTEX_DEFAULT, IPL_VM); 502 mutex_init(&rnd_global.lock, MUTEX_DEFAULT, IPL_VM);
503 rndpool_init(&rnd_global.pool); 503 rndpool_init(&rnd_global.pool);
504 LIST_INIT(&rnd_global.sources); 504 LIST_INIT(&rnd_global.sources);
505 505
506 rnd_mempc = pool_cache_init(sizeof(rnd_sample_t), 0, 0, 0, 506 rnd_mempc = pool_cache_init(sizeof(rnd_sample_t), 0, 0, 0,
507 "rndsample", NULL, IPL_VM, 507 "rndsample", NULL, IPL_VM,
508 NULL, NULL, NULL); 508 NULL, NULL, NULL);
509 509
510 /* 510 /*
511 * Set resource limit. The rnd_process_events() function 511 * Set resource limit. The rnd_process_events() function
512 * is called every tick and process the sample queue. 512 * is called every tick and process the sample queue.
513 * Without limitation, if a lot of rnd_add_*() are called, 513 * Without limitation, if a lot of rnd_add_*() are called,
514 * all kernel memory may be eaten up. 514 * all kernel memory may be eaten up.
515 */ 515 */
516 pool_cache_sethardlimit(rnd_mempc, RND_POOLBITS, NULL, 0); 516 pool_cache_sethardlimit(rnd_mempc, RND_POOLBITS, NULL, 0);
517 517
518 /* 518 /*
519 * Mix *something*, *anything* into the pool to help it get started. 519 * Mix *something*, *anything* into the pool to help it get started.
520 * However, it's not safe for rnd_counter() to call microtime() yet, 520 * However, it's not safe for rnd_counter() to call microtime() yet,
521 * so on some platforms we might just end up with zeros anyway. 521 * so on some platforms we might just end up with zeros anyway.
522 * XXX more things to add would be nice. 522 * XXX more things to add would be nice.
523 */ 523 */
524 if (c) { 524 if (c) {
525 mutex_spin_enter(&rnd_global.lock); 525 mutex_spin_enter(&rnd_global.lock);
526 rndpool_add_data(&rnd_global.pool, &c, sizeof(c), 1); 526 rndpool_add_data(&rnd_global.pool, &c, sizeof(c), 1);
527 c = rnd_counter(); 527 c = rnd_counter();
528 rndpool_add_data(&rnd_global.pool, &c, sizeof(c), 1); 528 rndpool_add_data(&rnd_global.pool, &c, sizeof(c), 1);
529 mutex_spin_exit(&rnd_global.lock); 529 mutex_spin_exit(&rnd_global.lock);
530 } 530 }
531 531
532 /* 532 /*
533 * If we have a cycle counter, take its error with respect 533 * If we have a cycle counter, take its error with respect
534 * to the callout mechanism as a source of entropy, ala 534 * to the callout mechanism as a source of entropy, ala
535 * TrueRand. 535 * TrueRand.
536 * 536 *
537 */ 537 */
538#if defined(__HAVE_CPU_COUNTER) 538#if defined(__HAVE_CPU_COUNTER)
539 /* IPL_VM because taken while rnd_global.lock is held. */ 539 /* IPL_VM because taken while rnd_global.lock is held. */
540 mutex_init(&rnd_skew.lock, MUTEX_DEFAULT, IPL_VM); 540 mutex_init(&rnd_skew.lock, MUTEX_DEFAULT, IPL_VM);
541 callout_init(&rnd_skew.callout, CALLOUT_MPSAFE); 541 callout_init(&rnd_skew.callout, CALLOUT_MPSAFE);
542 callout_init(&rnd_skew.stop_callout, CALLOUT_MPSAFE); 542 callout_init(&rnd_skew.stop_callout, CALLOUT_MPSAFE);
543 callout_setfunc(&rnd_skew.callout, rnd_skew_intr, NULL); 543 callout_setfunc(&rnd_skew.callout, rnd_skew_intr, NULL);
544 callout_setfunc(&rnd_skew.stop_callout, rnd_skew_stop_intr, NULL); 544 callout_setfunc(&rnd_skew.stop_callout, rnd_skew_stop_intr, NULL);
545 rndsource_setcb(&rnd_skew.source, rnd_skew_get, &rnd_skew.source); 545 rndsource_setcb(&rnd_skew.source, rnd_skew_get, &rnd_skew.source);
546 rndsource_setenable(&rnd_skew.source, rnd_skew_enable); 546 rndsource_setenable(&rnd_skew.source, rnd_skew_enable);
547 rnd_attach_source(&rnd_skew.source, "callout", RND_TYPE_SKEW, 547 rnd_attach_source(&rnd_skew.source, "callout", RND_TYPE_SKEW,
548 RND_FLAG_COLLECT_VALUE|RND_FLAG_ESTIMATE_VALUE| 548 RND_FLAG_COLLECT_VALUE|RND_FLAG_ESTIMATE_VALUE|
549 RND_FLAG_HASCB|RND_FLAG_HASENABLE); 549 RND_FLAG_HASCB|RND_FLAG_HASENABLE);
550 rnd_skew_intr(NULL); 550 rnd_skew_intr(NULL);
551#endif 551#endif
552 552
553 rnd_printf_verbose("rnd: initialised (%u)%s", RND_POOLBITS, 553 rnd_printf_verbose("rnd: initialised (%u)%s", RND_POOLBITS,
554 c ? " with counter\n" : "\n"); 554 c ? " with counter\n" : "\n");
555 if (boot_rsp != NULL) { 555 if (boot_rsp != NULL) {
556 mutex_spin_enter(&rnd_global.lock); 556 mutex_spin_enter(&rnd_global.lock);
557 rndpool_add_data(&rnd_global.pool, boot_rsp->data, 557 rndpool_add_data(&rnd_global.pool, boot_rsp->data,
558 sizeof(boot_rsp->data), 558 sizeof(boot_rsp->data),
559 MIN(boot_rsp->entropy, RND_POOLBITS / 2)); 559 MIN(boot_rsp->entropy, RND_POOLBITS / 2));
560 rnd_entropy_added(); 560 rnd_entropy_added();
561 mutex_spin_exit(&rnd_global.lock); 561 mutex_spin_exit(&rnd_global.lock);
562 rnd_printf("rnd: seeded with %d bits\n", 562 rnd_printf("rnd: seeded with %d bits\n",
563 MIN(boot_rsp->entropy, RND_POOLBITS / 2)); 563 MIN(boot_rsp->entropy, RND_POOLBITS / 2));
564 memset(boot_rsp, 0, sizeof(*boot_rsp)); 564 memset(boot_rsp, 0, sizeof(*boot_rsp));
565 } 565 }
566 rnd_attach_source(&rnd_printf_source, "printf", RND_TYPE_UNKNOWN, 566 rnd_attach_source(&rnd_printf_source, "printf", RND_TYPE_UNKNOWN,
567 RND_FLAG_NO_ESTIMATE); 567 RND_FLAG_NO_ESTIMATE);
568 rnd_attach_source(&rnd_autoconf_source, "autoconf", 568 rnd_attach_source(&rnd_autoconf_source, "autoconf",
569 RND_TYPE_UNKNOWN, 569 RND_TYPE_UNKNOWN,
570 RND_FLAG_COLLECT_TIME|RND_FLAG_ESTIMATE_TIME); 570 RND_FLAG_COLLECT_TIME|RND_FLAG_ESTIMATE_TIME);
571 rnd_ready = 1; 571 rnd_ready = 1;
572} 572}
573 573
574static rnd_sample_t * 574static rnd_sample_t *
575rnd_sample_allocate(krndsource_t *source) 575rnd_sample_allocate(krndsource_t *source)
576{ 576{
577 rnd_sample_t *c; 577 rnd_sample_t *c;
578 578
579 c = pool_cache_get(rnd_mempc, PR_WAITOK); 579 c = pool_cache_get(rnd_mempc, PR_WAITOK);
580 if (c == NULL) 580 if (c == NULL)
581 return (NULL); 581 return (NULL);
582 582
583 c->source = source; 583 c->source = source;
584 c->cursor = 0; 584 c->cursor = 0;
585 c->entropy = 0; 585 c->entropy = 0;
586 586
587 return (c); 587 return (c);
588} 588}
589 589
590/* 590/*
591 * Don't wait on allocation. To be used in an interrupt context. 591 * Don't wait on allocation. To be used in an interrupt context.
592 */ 592 */
593static rnd_sample_t * 593static rnd_sample_t *
594rnd_sample_allocate_isr(krndsource_t *source) 594rnd_sample_allocate_isr(krndsource_t *source)
595{ 595{
596 rnd_sample_t *c; 596 rnd_sample_t *c;
597 597
598 c = pool_cache_get(rnd_mempc, PR_NOWAIT); 598 c = pool_cache_get(rnd_mempc, PR_NOWAIT);
599 if (c == NULL) 599 if (c == NULL)
600 return (NULL); 600 return (NULL);
601 601
602 c->source = source; 602 c->source = source;
603 c->cursor = 0; 603 c->cursor = 0;
604 c->entropy = 0; 604 c->entropy = 0;
605 605
606 return (c); 606 return (c);
607} 607}
608 608
609static void 609static void
610rnd_sample_free(rnd_sample_t *c) 610rnd_sample_free(rnd_sample_t *c)
611{ 611{
612 memset(c, 0, sizeof(*c)); 612 memset(c, 0, sizeof(*c));
613 pool_cache_put(rnd_mempc, c); 613 pool_cache_put(rnd_mempc, c);
614} 614}
615 615
616/* 616/*
617 * Add a source to our list of sources. 617 * Add a source to our list of sources.
618 */ 618 */
619void 619void
620rnd_attach_source(krndsource_t *rs, const char *name, uint32_t type, 620rnd_attach_source(krndsource_t *rs, const char *name, uint32_t type,
621 uint32_t flags) 621 uint32_t flags)
622{ 622{
623 uint32_t ts; 623 uint32_t ts;
624 624
625 ts = rnd_counter(); 625 ts = rnd_counter();
626 626
627 strlcpy(rs->name, name, sizeof(rs->name)); 627 strlcpy(rs->name, name, sizeof(rs->name));
628 memset(&rs->time_delta, 0, sizeof(rs->time_delta)); 628 memset(&rs->time_delta, 0, sizeof(rs->time_delta));
629 rs->time_delta.x = ts; 629 rs->time_delta.x = ts;
630 memset(&rs->value_delta, 0, sizeof(rs->value_delta)); 630 memset(&rs->value_delta, 0, sizeof(rs->value_delta));
631 rs->total = 0; 631 rs->total = 0;
632 632
633 /* 633 /*
634 * Some source setup, by type 634 * Some source setup, by type
635 */ 635 */
636 rs->test = NULL; 636 rs->test = NULL;
637 rs->test_cnt = -1; 637 rs->test_cnt = -1;
638 638
639 if (flags == 0) { 639 if (flags == 0) {
640 flags = RND_FLAG_DEFAULT; 640 flags = RND_FLAG_DEFAULT;
641 } 641 }
642 642
643 switch (type) { 643 switch (type) {
644 case RND_TYPE_NET: /* Don't collect by default */ 644 case RND_TYPE_NET: /* Don't collect by default */
645 flags |= (RND_FLAG_NO_COLLECT | RND_FLAG_NO_ESTIMATE); 645 flags |= (RND_FLAG_NO_COLLECT | RND_FLAG_NO_ESTIMATE);
646 break; 646 break;
647 case RND_TYPE_RNG: /* Space for statistical testing */ 647 case RND_TYPE_RNG: /* Space for statistical testing */
648 rs->test = kmem_alloc(sizeof(rngtest_t), KM_NOSLEEP); 648 rs->test = kmem_alloc(sizeof(rngtest_t), KM_NOSLEEP);
649 rs->test_cnt = 0; 649 rs->test_cnt = 0;
650 /* FALLTHRU */ 650 /* FALLTHRU */
651 case RND_TYPE_VM: /* Process samples in bulk always */ 651 case RND_TYPE_VM: /* Process samples in bulk always */
652 flags |= RND_FLAG_FAST; 652 flags |= RND_FLAG_FAST;
653 break; 653 break;
654 default: 654 default:
655 break; 655 break;
656 } 656 }
657 657
658 rs->type = type; 658 rs->type = type;
659 rs->flags = flags; 659 rs->flags = flags;
660 660
661 rs->state = rnd_sample_allocate(rs); 661 rs->state = rnd_sample_allocate(rs);
662 662
663 mutex_spin_enter(&rnd_global.lock); 663 mutex_spin_enter(&rnd_global.lock);
664 LIST_INSERT_HEAD(&rnd_global.sources, rs, list); 664 LIST_INSERT_HEAD(&rnd_global.sources, rs, list);
665 665
666#ifdef RND_VERBOSE 666#ifdef RND_VERBOSE
667 rnd_printf_verbose("rnd: %s attached as an entropy source (", 667 rnd_printf_verbose("rnd: %s attached as an entropy source (",
668 rs->name); 668 rs->name);
669 if (!(flags & RND_FLAG_NO_COLLECT)) { 669 if (!(flags & RND_FLAG_NO_COLLECT)) {
670 rnd_printf_verbose("collecting"); 670 rnd_printf_verbose("collecting");
671 if (flags & RND_FLAG_NO_ESTIMATE) 671 if (flags & RND_FLAG_NO_ESTIMATE)
672 rnd_printf_verbose(" without estimation"); 672 rnd_printf_verbose(" without estimation");
673 } 673 }
674 else 674 else
675 rnd_printf_verbose("off"); 675 rnd_printf_verbose("off");
676 rnd_printf_verbose(")\n"); 676 rnd_printf_verbose(")\n");
677#endif 677#endif
678 678
679 /* 679 /*
680 * Again, put some more initial junk in the pool. 680 * Again, put some more initial junk in the pool.
681 * FreeBSD claim to have an analysis that show 4 bits of 681 * FreeBSD claim to have an analysis that show 4 bits of
682 * entropy per source-attach timestamp. I am skeptical, 682 * entropy per source-attach timestamp. I am skeptical,
683 * but we count 1 bit per source here. 683 * but we count 1 bit per source here.
684 */ 684 */
685 rndpool_add_data(&rnd_global.pool, &ts, sizeof(ts), 1); 685 rndpool_add_data(&rnd_global.pool, &ts, sizeof(ts), 1);
686 mutex_spin_exit(&rnd_global.lock); 686 mutex_spin_exit(&rnd_global.lock);
687} 687}
688 688
689/* 689/*
690 * Remove a source from our list of sources. 690 * Remove a source from our list of sources.
691 */ 691 */
692void 692void
693rnd_detach_source(krndsource_t *source) 693rnd_detach_source(krndsource_t *source)
694{ 694{
695 rnd_sample_t *sample; 695 rnd_sample_t *sample;
696 696
697 mutex_spin_enter(&rnd_global.lock); 697 mutex_spin_enter(&rnd_global.lock);
698 LIST_REMOVE(source, list); 698 LIST_REMOVE(source, list);
699 mutex_spin_exit(&rnd_global.lock); 699 mutex_spin_exit(&rnd_global.lock);
700 700
701 /* 701 /*
702 * If there are samples queued up "remove" them from the sample queue 702 * If there are samples queued up "remove" them from the sample queue
703 * by setting the source to the no-collect pseudosource. 703 * by setting the source to the no-collect pseudosource.
704 */ 704 */
705 mutex_spin_enter(&rnd_samples.lock); 705 mutex_spin_enter(&rnd_samples.lock);
706 sample = SIMPLEQ_FIRST(&rnd_samples.q); 706 sample = SIMPLEQ_FIRST(&rnd_samples.q);
707 while (sample != NULL) { 707 while (sample != NULL) {
708 if (sample->source == source) 708 if (sample->source == source)
709 sample->source = &rnd_source_no_collect; 709 sample->source = &rnd_source_no_collect;
710 710
711 sample = SIMPLEQ_NEXT(sample, next); 711 sample = SIMPLEQ_NEXT(sample, next);
712 } 712 }
713 mutex_spin_exit(&rnd_samples.lock); 713 mutex_spin_exit(&rnd_samples.lock);
714 714
715 if (source->state) { 715 if (source->state) {
716 rnd_sample_free(source->state); 716 rnd_sample_free(source->state);
717 source->state = NULL; 717 source->state = NULL;
718 } 718 }
719 719
720 if (source->test) { 720 if (source->test) {
721 kmem_free(source->test, sizeof(rngtest_t)); 721 kmem_free(source->test, sizeof(rngtest_t));
722 } 722 }
723 723
724 rnd_printf_verbose("rnd: %s detached as an entropy source\n", 724 rnd_printf_verbose("rnd: %s detached as an entropy source\n",
725 source->name); 725 source->name);
726} 726}
727 727
728static inline uint32_t 728static inline uint32_t
729rnd_estimate(krndsource_t *rs, uint32_t ts, uint32_t val) 729rnd_estimate(krndsource_t *rs, uint32_t ts, uint32_t val)
730{ 730{
731 uint32_t entropy = 0, dt_est, dv_est; 731 uint32_t entropy = 0, dt_est, dv_est;
732 732
733 dt_est = rnd_dt_estimate(rs, ts); 733 dt_est = rnd_dt_estimate(rs, ts);
734 dv_est = rnd_dv_estimate(rs, val); 734 dv_est = rnd_dv_estimate(rs, val);
735 735
736 if (!(rs->flags & RND_FLAG_NO_ESTIMATE)) { 736 if (!(rs->flags & RND_FLAG_NO_ESTIMATE)) {
737 if (rs->flags & RND_FLAG_ESTIMATE_TIME) { 737 if (rs->flags & RND_FLAG_ESTIMATE_TIME) {
738 entropy += dt_est; 738 entropy += dt_est;
739 } 739 }
740 740
741 if (rs->flags & RND_FLAG_ESTIMATE_VALUE) { 741 if (rs->flags & RND_FLAG_ESTIMATE_VALUE) {
742 entropy += dv_est; 742 entropy += dv_est;
743 } 743 }
744 744
745 } 745 }
746 return entropy; 746 return entropy;
747} 747}
748 748
749/* 749/*
750 * Add a 32-bit value to the entropy pool. The rs parameter should point to 750 * Add a 32-bit value to the entropy pool. The rs parameter should point to
751 * the source-specific source structure. 751 * the source-specific source structure.
752 */ 752 */
753void 753void
754_rnd_add_uint32(krndsource_t *rs, uint32_t val) 754_rnd_add_uint32(krndsource_t *rs, uint32_t val)
755{ 755{
756 uint32_t ts;  756 uint32_t ts;
757 uint32_t entropy = 0; 757 uint32_t entropy = 0;
758 758
759 if (rs->flags & RND_FLAG_NO_COLLECT) 759 if (rs->flags & RND_FLAG_NO_COLLECT)
760 return; 760 return;
761 761
762 /* 762 /*
763 * Sample the counter as soon as possible to avoid 763 * Sample the counter as soon as possible to avoid
764 * entropy overestimation. 764 * entropy overestimation.
765 */ 765 */
766 ts = rnd_counter(); 766 ts = rnd_counter();
767 767
768 /* 768 /*
769 * Calculate estimates - we may not use them, but if we do 769 * Calculate estimates - we may not use them, but if we do
770 * not calculate them, the estimators' history becomes invalid. 770 * not calculate them, the estimators' history becomes invalid.
771 */ 771 */
772 entropy = rnd_estimate(rs, ts, val); 772 entropy = rnd_estimate(rs, ts, val);
773 773
774 rnd_add_data_ts(rs, &val, sizeof(val), entropy, ts); 774 rnd_add_data_ts(rs, &val, sizeof(val), entropy, ts);
775} 775}
776 776
777void 777void
778_rnd_add_uint64(krndsource_t *rs, uint64_t val) 778_rnd_add_uint64(krndsource_t *rs, uint64_t val)
779{ 779{
780 uint32_t ts;  780 uint32_t ts;
781 uint32_t entropy = 0; 781 uint32_t entropy = 0;
782 782
783 if (rs->flags & RND_FLAG_NO_COLLECT) 783 if (rs->flags & RND_FLAG_NO_COLLECT)
784 return; 784 return;
785 785
786 /* 786 /*
787 * Sample the counter as soon as possible to avoid 787 * Sample the counter as soon as possible to avoid
788 * entropy overestimation. 788 * entropy overestimation.
789 */ 789 */
790 ts = rnd_counter(); 790 ts = rnd_counter();
791 791
792 /* 792 /*
793 * Calculate estimates - we may not use them, but if we do 793 * Calculate estimates - we may not use them, but if we do
794 * not calculate them, the estimators' history becomes invalid. 794 * not calculate them, the estimators' history becomes invalid.
795 */ 795 */
796 entropy = rnd_estimate(rs, ts, (uint32_t)(val & (uint64_t)0xffffffff)); 796 entropy = rnd_estimate(rs, ts, (uint32_t)(val & (uint64_t)0xffffffff));
797 797
798 rnd_add_data_ts(rs, &val, sizeof(val), entropy, ts); 798 rnd_add_data_ts(rs, &val, sizeof(val), entropy, ts);
799} 799}
800 800
801void 801void
802rnd_add_data(krndsource_t *rs, const void *const data, uint32_t len, 802rnd_add_data(krndsource_t *rs, const void *const data, uint32_t len,
803 uint32_t entropy) 803 uint32_t entropy)
804{ 804{
805 /* 805 /*
806 * This interface is meant for feeding data which is, 806 * This interface is meant for feeding data which is,
807 * itself, random. Don't estimate entropy based on 807 * itself, random. Don't estimate entropy based on
808 * timestamp, just directly add the data. 808 * timestamp, just directly add the data.
809 */ 809 */
810 if (__predict_false(rs == NULL)) { 810 if (__predict_false(rs == NULL)) {
811 mutex_spin_enter(&rnd_global.lock); 811 mutex_spin_enter(&rnd_global.lock);
812 rndpool_add_data(&rnd_global.pool, data, len, entropy); 812 rndpool_add_data(&rnd_global.pool, data, len, entropy);
813 mutex_spin_exit(&rnd_global.lock); 813 mutex_spin_exit(&rnd_global.lock);
814 } else { 814 } else {
815 rnd_add_data_ts(rs, data, len, entropy, rnd_counter()); 815 rnd_add_data_ts(rs, data, len, entropy, rnd_counter());
816 } 816 }
817} 817}
818 818
819static void 819static void
820rnd_add_data_ts(krndsource_t *rs, const void *const data, uint32_t len, 820rnd_add_data_ts(krndsource_t *rs, const void *const data, uint32_t len,
821 uint32_t entropy, uint32_t ts) 821 uint32_t entropy, uint32_t ts)
822{ 822{
823 rnd_sample_t *state = NULL; 823 rnd_sample_t *state = NULL;
824 const uint8_t *p = data; 824 const uint8_t *p = data;
825 uint32_t dint; 825 uint32_t dint;
826 int todo, done, filled = 0; 826 int todo, done, filled = 0;
827 int sample_count; 827 int sample_count;
828 struct rnd_sampleq tmp_samples = SIMPLEQ_HEAD_INITIALIZER(tmp_samples); 828 struct rnd_sampleq tmp_samples = SIMPLEQ_HEAD_INITIALIZER(tmp_samples);
829 829
830 if (rs && (rs->flags & RND_FLAG_NO_COLLECT || 830 if (rs && (rs->flags & RND_FLAG_NO_COLLECT ||
831 __predict_false(!(rs->flags &  831 __predict_false(!(rs->flags &
832 (RND_FLAG_COLLECT_TIME| 832 (RND_FLAG_COLLECT_TIME|
833 RND_FLAG_COLLECT_VALUE))))) { 833 RND_FLAG_COLLECT_VALUE))))) {
834 return; 834 return;
835 } 835 }
836 todo = len / sizeof(dint); 836 todo = len / sizeof(dint);
837 /* 837 /*
838 * Let's try to be efficient: if we are warm, and a source 838 * Let's try to be efficient: if we are warm, and a source
839 * is adding entropy at a rate of at least 1 bit every 10 seconds, 839 * is adding entropy at a rate of at least 1 bit every 10 seconds,
840 * mark it as "fast" and add its samples in bulk. 840 * mark it as "fast" and add its samples in bulk.
841 */ 841 */
842 if (__predict_true(rs->flags & RND_FLAG_FAST) || 842 if (__predict_true(rs->flags & RND_FLAG_FAST) ||
843 (todo >= RND_SAMPLE_COUNT)) { 843 (todo >= RND_SAMPLE_COUNT)) {
844 sample_count = RND_SAMPLE_COUNT; 844 sample_count = RND_SAMPLE_COUNT;
845 } else { 845 } else {
846 if (!(rs->flags & RND_FLAG_HASCB) && 846 if (!(rs->flags & RND_FLAG_HASCB) &&
847 !cold && rnd_initial_entropy) { 847 !cold && rnd_initial_entropy) {
848 struct timeval upt; 848 struct timeval upt;
849 849
850 getmicrouptime(&upt); 850 getmicrouptime(&upt);
851 if ( (upt.tv_sec > 0 && rs->total > upt.tv_sec * 10) || 851 if ( (upt.tv_sec > 0 && rs->total > upt.tv_sec * 10) ||
852 (upt.tv_sec > 10 && rs->total > upt.tv_sec) || 852 (upt.tv_sec > 10 && rs->total > upt.tv_sec) ||
853 (upt.tv_sec > 100 && 853 (upt.tv_sec > 100 &&
854 rs->total > upt.tv_sec / 10)) { 854 rs->total > upt.tv_sec / 10)) {
855 rnd_printf_verbose("rnd: source %s is fast" 855 rnd_printf_verbose("rnd: source %s is fast"
856 " (%d samples at once," 856 " (%d samples at once,"
857 " %d bits in %lld seconds), " 857 " %d bits in %lld seconds), "
858 "processing samples in bulk.\n", 858 "processing samples in bulk.\n",
859 rs->name, todo, rs->total, 859 rs->name, todo, rs->total,
860 (long long int)upt.tv_sec); 860 (long long int)upt.tv_sec);
861 rs->flags |= RND_FLAG_FAST; 861 rs->flags |= RND_FLAG_FAST;
862 } 862 }
863 } 863 }
864 sample_count = 2; 864 sample_count = 2;
865 } 865 }
866 866
867 /* 867 /*
868 * Loop over data packaging it into sample buffers. 868 * Loop over data packaging it into sample buffers.
869 * If a sample buffer allocation fails, drop all data. 869 * If a sample buffer allocation fails, drop all data.
870 */ 870 */
871 for (done = 0; done < todo ; done++) { 871 for (done = 0; done < todo ; done++) {
872 state = rs->state; 872 state = rs->state;
873 if (state == NULL) { 873 if (state == NULL) {
874 state = rnd_sample_allocate_isr(rs); 874 state = rnd_sample_allocate_isr(rs);
875 if (__predict_false(state == NULL)) { 875 if (__predict_false(state == NULL)) {
876 break; 876 break;
877 } 877 }
878 rs->state = state; 878 rs->state = state;
879 } 879 }
880 880
881 state->ts[state->cursor] = ts; 881 state->ts[state->cursor] = ts;
882 (void)memcpy(&dint, &p[done*4], 4); 882 (void)memcpy(&dint, &p[done*4], 4);
883 state->values[state->cursor] = dint; 883 state->values[state->cursor] = dint;
884 state->cursor++; 884 state->cursor++;
885 885
886 if (state->cursor == sample_count) { 886 if (state->cursor == sample_count) {
887 SIMPLEQ_INSERT_HEAD(&tmp_samples, state, next); 887 SIMPLEQ_INSERT_HEAD(&tmp_samples, state, next);
888 filled++; 888 filled++;
889 rs->state = NULL; 889 rs->state = NULL;
890 } 890 }
891 } 891 }
892 892
893 if (__predict_false(state == NULL)) { 893 if (__predict_false(state == NULL)) {
894 while ((state = SIMPLEQ_FIRST(&tmp_samples))) { 894 while ((state = SIMPLEQ_FIRST(&tmp_samples))) {
895 SIMPLEQ_REMOVE_HEAD(&tmp_samples, next); 895 SIMPLEQ_REMOVE_HEAD(&tmp_samples, next);
896 rnd_sample_free(state); 896 rnd_sample_free(state);
897 } 897 }
898 return; 898 return;
899 } 899 }
900 900
901 /* 901 /*
902 * Claim all the entropy on the last one we send to 902 * Claim all the entropy on the last one we send to
903 * the pool, so we don't rely on it being evenly distributed 903 * the pool, so we don't rely on it being evenly distributed
904 * in the supplied data. 904 * in the supplied data.
905 * 905 *
906 * XXX The rndpool code must accept samples with more 906 * XXX The rndpool code must accept samples with more
907 * XXX claimed entropy than bits for this to work right. 907 * XXX claimed entropy than bits for this to work right.
908 */ 908 */
909 state->entropy += entropy; 909 state->entropy += entropy;
910 rs->total += entropy; 910 rs->total += entropy;
911 911
912 /* 912 /*
913 * If we didn't finish any sample buffers, we're done. 913 * If we didn't finish any sample buffers, we're done.
914 */ 914 */
915 if (!filled) { 915 if (!filled) {
916 return; 916 return;
917 } 917 }
918 918
919 mutex_spin_enter(&rnd_samples.lock); 919 mutex_spin_enter(&rnd_samples.lock);
920 while ((state = SIMPLEQ_FIRST(&tmp_samples))) { 920 while ((state = SIMPLEQ_FIRST(&tmp_samples))) {
921 SIMPLEQ_REMOVE_HEAD(&tmp_samples, next); 921 SIMPLEQ_REMOVE_HEAD(&tmp_samples, next);
922 SIMPLEQ_INSERT_HEAD(&rnd_samples.q, state, next); 922 SIMPLEQ_INSERT_HEAD(&rnd_samples.q, state, next);
923 } 923 }
924 mutex_spin_exit(&rnd_samples.lock); 924 mutex_spin_exit(&rnd_samples.lock);
925 925
926 /* Cause processing of queued samples */ 926 /* Cause processing of queued samples */
927 rnd_schedule_process(); 927 rnd_schedule_process();
928} 928}
929 929
930static int 930static int
931rnd_hwrng_test(rnd_sample_t *sample) 931rnd_hwrng_test(rnd_sample_t *sample)
932{ 932{
933 krndsource_t *source = sample->source; 933 krndsource_t *source = sample->source;
934 size_t cmplen; 934 size_t cmplen;
935 uint8_t *v1, *v2; 935 uint8_t *v1, *v2;
936 size_t resid, totest; 936 size_t resid, totest;
937 937
938 KASSERT(source->type == RND_TYPE_RNG); 938 KASSERT(source->type == RND_TYPE_RNG);
939 939
940 /* 940 /*
941 * Continuous-output test: compare two halves of the 941 * Continuous-output test: compare two halves of the
942 * sample buffer to each other. The sample buffer (64 ints, 942 * sample buffer to each other. The sample buffer (64 ints,
943 * so either 256 or 512 bytes on any modern machine) should be 943 * so either 256 or 512 bytes on any modern machine) should be
944 * much larger than a typical hardware RNG output, so this seems 944 * much larger than a typical hardware RNG output, so this seems
945 * a reasonable way to do it without retaining extra data. 945 * a reasonable way to do it without retaining extra data.
946 */ 946 */
947 cmplen = sizeof(sample->values) / 2; 947 cmplen = sizeof(sample->values) / 2;
948 v1 = (uint8_t *)sample->values; 948 v1 = (uint8_t *)sample->values;
949 v2 = (uint8_t *)sample->values + cmplen; 949 v2 = (uint8_t *)sample->values + cmplen;
950 950
951 if (__predict_false(!memcmp(v1, v2, cmplen))) { 951 if (__predict_false(!memcmp(v1, v2, cmplen))) {
952 rnd_printf("rnd: source \"%s\" failed continuous-output test.\n", 952 rnd_printf("rnd: source \"%s\" failed continuous-output test.\n",
953 source->name); 953 source->name);
954 return 1; 954 return 1;
955 } 955 }
956 956
957 /* 957 /*
958 * FIPS 140 statistical RNG test. We must accumulate 20,000 bits. 958 * FIPS 140 statistical RNG test. We must accumulate 20,000 bits.
959 */ 959 */
960 if (__predict_true(source->test_cnt == -1)) { 960 if (__predict_true(source->test_cnt == -1)) {
961 /* already passed the test */ 961 /* already passed the test */
962 return 0; 962 return 0;
963 } 963 }
964 resid = FIPS140_RNG_TEST_BYTES - source->test_cnt; 964 resid = FIPS140_RNG_TEST_BYTES - source->test_cnt;
965 totest = MIN(RND_SAMPLE_COUNT * 4, resid); 965 totest = MIN(RND_SAMPLE_COUNT * 4, resid);
966 memcpy(source->test->rt_b + source->test_cnt, sample->values, totest); 966 memcpy(source->test->rt_b + source->test_cnt, sample->values, totest);
967 resid -= totest; 967 resid -= totest;
968 source->test_cnt += totest; 968 source->test_cnt += totest;
969 if (resid == 0) { 969 if (resid == 0) {
970 strlcpy(source->test->rt_name, source->name, 970 strlcpy(source->test->rt_name, source->name,
971 sizeof(source->test->rt_name)); 971 sizeof(source->test->rt_name));
972 if (rngtest(source->test)) { 972 if (rngtest(source->test)) {
973 rnd_printf("rnd: source \"%s\" failed statistical test.", 973 rnd_printf("rnd: source \"%s\" failed statistical test.",
974 source->name); 974 source->name);
975 return 1; 975 return 1;
976 } 976 }
977 source->test_cnt = -1; 977 source->test_cnt = -1;
978 memset(source->test, 0, sizeof(*source->test)); 978 memset(source->test, 0, sizeof(*source->test));
979 } 979 }
980 return 0; 980 return 0;
981} 981}
982 982
983/* 983/*
984 * Process the events in the ring buffer. Called by rnd_timeout or 984 * Process the events in the ring buffer. Called by rnd_timeout or
985 * by the add routines directly if the callout has never fired (that 985 * by the add routines directly if the callout has never fired (that
986 * is, if we are "cold" -- just booted). 986 * is, if we are "cold" -- just booted).
987 * 987 *
988 */ 988 */
989static void 989static void
990rnd_process_events(void) 990rnd_process_events(void)
991{ 991{
992 rnd_sample_t *sample = NULL; 992 rnd_sample_t *sample = NULL;
993 krndsource_t *source; 993 krndsource_t *source;
994 static krndsource_t *last_source; 994 static krndsource_t *last_source;
995 uint32_t entropy; 995 uint32_t entropy;
996 size_t pool_entropy; 996 size_t pool_entropy;
997 int found = 0, wake = 0; 997 int wake = 0;
998 struct rnd_sampleq dq_samples = SIMPLEQ_HEAD_INITIALIZER(dq_samples); 998 struct rnd_sampleq dq_samples = SIMPLEQ_HEAD_INITIALIZER(dq_samples);
999 struct rnd_sampleq df_samples = SIMPLEQ_HEAD_INITIALIZER(df_samples); 999 struct rnd_sampleq df_samples = SIMPLEQ_HEAD_INITIALIZER(df_samples);
1000 1000
1001 /* 1001 /*
1002 * Drain to the on-stack queue and drop the lock. 1002 * Drain to the on-stack queue and drop the lock.
1003 */ 1003 */
1004 mutex_spin_enter(&rnd_samples.lock); 1004 mutex_spin_enter(&rnd_samples.lock);
1005 while ((sample = SIMPLEQ_FIRST(&rnd_samples.q))) { 1005 while ((sample = SIMPLEQ_FIRST(&rnd_samples.q))) {
1006 found++; 
1007 SIMPLEQ_REMOVE_HEAD(&rnd_samples.q, next); 1006 SIMPLEQ_REMOVE_HEAD(&rnd_samples.q, next);
1008 /* 1007 /*
1009 * We repeat this check here, since it is possible 1008 * We repeat this check here, since it is possible
1010 * the source was disabled before we were called, but 1009 * the source was disabled before we were called, but
1011 * after the entry was queued. 1010 * after the entry was queued.
1012 */ 1011 */
1013 if (__predict_false(!(sample->source->flags & 1012 if (__predict_false(!(sample->source->flags &
1014 (RND_FLAG_COLLECT_TIME| 1013 (RND_FLAG_COLLECT_TIME|
1015 RND_FLAG_COLLECT_VALUE)))) { 1014 RND_FLAG_COLLECT_VALUE)))) {
1016 SIMPLEQ_INSERT_TAIL(&df_samples, sample, next); 1015 SIMPLEQ_INSERT_TAIL(&df_samples, sample, next);
1017 } else { 1016 } else {
1018 SIMPLEQ_INSERT_TAIL(&dq_samples, sample, next); 1017 SIMPLEQ_INSERT_TAIL(&dq_samples, sample, next);
1019 } 1018 }
1020 } 1019 }
1021 mutex_spin_exit(&rnd_samples.lock); 1020 mutex_spin_exit(&rnd_samples.lock);
1022 1021
1023 /* Don't thrash the rndpool mtx either. Hold, add all samples. */ 1022 /* Don't thrash the rndpool mtx either. Hold, add all samples. */
1024 mutex_spin_enter(&rnd_global.lock); 1023 mutex_spin_enter(&rnd_global.lock);
1025 1024
1026 pool_entropy = rndpool_get_entropy_count(&rnd_global.pool); 1025 pool_entropy = rndpool_get_entropy_count(&rnd_global.pool);
1027 1026
1028 while ((sample = SIMPLEQ_FIRST(&dq_samples))) { 1027 while ((sample = SIMPLEQ_FIRST(&dq_samples))) {
1029 int sample_count; 1028 int sample_count;
1030 1029
1031 SIMPLEQ_REMOVE_HEAD(&dq_samples, next); 1030 SIMPLEQ_REMOVE_HEAD(&dq_samples, next);
1032 source = sample->source; 1031 source = sample->source;
1033 entropy = sample->entropy; 1032 entropy = sample->entropy;
1034 sample_count = sample->cursor; 1033 sample_count = sample->cursor;
1035 1034
1036 /* 1035 /*
1037 * Don't provide a side channel for timing attacks on 1036 * Don't provide a side channel for timing attacks on
1038 * low-rate sources: require mixing with some other 1037 * low-rate sources: require mixing with some other
1039 * source before we schedule a wakeup. 1038 * source before we schedule a wakeup.
1040 */ 1039 */
1041 if (!wake && 1040 if (!wake &&
1042 (source != last_source || source->flags & RND_FLAG_FAST)) { 1041 (source != last_source || source->flags & RND_FLAG_FAST)) {
1043 wake++; 1042 wake++;
1044 } 1043 }
1045 last_source = source; 1044 last_source = source;
1046 1045
1047 /* 1046 /*
1048 * If the source has been disabled, ignore samples from 1047 * If the source has been disabled, ignore samples from
1049 * it. 1048 * it.
1050 */ 1049 */
1051 if (source->flags & RND_FLAG_NO_COLLECT) 1050 if (source->flags & RND_FLAG_NO_COLLECT)
1052 goto skip; 1051 goto skip;
1053 1052
1054 /* 1053 /*
1055 * Hardware generators are great but sometimes they 1054 * Hardware generators are great but sometimes they
1056 * have...hardware issues. Don't use any data from 1055 * have...hardware issues. Don't use any data from
1057 * them unless it passes some tests. 1056 * them unless it passes some tests.
1058 */ 1057 */
1059 if (source->type == RND_TYPE_RNG) { 1058 if (source->type == RND_TYPE_RNG) {
1060 if (__predict_false(rnd_hwrng_test(sample))) { 1059 if (__predict_false(rnd_hwrng_test(sample))) {
1061 source->flags |= RND_FLAG_NO_COLLECT; 1060 source->flags |= RND_FLAG_NO_COLLECT;
1062 rnd_printf("rnd: disabling source \"%s\".", 1061 rnd_printf("rnd: disabling source \"%s\".",
1063 source->name); 1062 source->name);
1064 goto skip; 1063 goto skip;
1065 } 1064 }
1066 } 1065 }
1067 1066
1068 if (source->flags & RND_FLAG_COLLECT_VALUE) { 1067 if (source->flags & RND_FLAG_COLLECT_VALUE) {
1069 rndpool_add_data(&rnd_global.pool, sample->values, 1068 rndpool_add_data(&rnd_global.pool, sample->values,
1070 sample_count * 1069 sample_count *
1071 sizeof(sample->values[1]), 1070 sizeof(sample->values[1]),
1072 0); 1071 0);
1073 } 1072 }
1074 if (source->flags & RND_FLAG_COLLECT_TIME) { 1073 if (source->flags & RND_FLAG_COLLECT_TIME) {
1075 rndpool_add_data(&rnd_global.pool, sample->ts, 1074 rndpool_add_data(&rnd_global.pool, sample->ts,
1076 sample_count * 1075 sample_count *
1077 sizeof(sample->ts[1]), 1076 sizeof(sample->ts[1]),
1078 0); 1077 0);
1079 } 1078 }
1080 1079
1081 pool_entropy += entropy; 1080 pool_entropy += entropy;
1082 source->total += sample->entropy; 1081 source->total += sample->entropy;
1083skip: SIMPLEQ_INSERT_TAIL(&df_samples, sample, next); 1082skip: SIMPLEQ_INSERT_TAIL(&df_samples, sample, next);
1084 } 1083 }
1085 rndpool_set_entropy_count(&rnd_global.pool, pool_entropy); 1084 rndpool_set_entropy_count(&rnd_global.pool, pool_entropy);
1086 rnd_entropy_added(); 1085 rnd_entropy_added();
1087 mutex_spin_exit(&rnd_global.lock); 1086 mutex_spin_exit(&rnd_global.lock);
1088 1087
1089 /* 1088 /*
1090 * If we filled the pool past the threshold, wake anyone 1089 * If we filled the pool past the threshold, wake anyone
1091 * waiting for entropy. Otherwise, ask all the entropy sources 1090 * waiting for entropy. Otherwise, ask all the entropy sources
1092 * for more. 1091 * for more.
1093 */ 1092 */
1094 if (pool_entropy > RND_ENTROPY_THRESHOLD * 8) { 1093 if (pool_entropy > RND_ENTROPY_THRESHOLD * 8) {
1095 wake++; 1094 wake++;
1096 } else { 1095 } else {
1097 rnd_getmore(howmany((RND_POOLBITS - pool_entropy), NBBY)); 1096 rnd_getmore(howmany((RND_POOLBITS - pool_entropy), NBBY));
1098 rnd_printf_verbose("rnd: empty, asking for %d bytes\n", 1097 rnd_printf_verbose("rnd: empty, asking for %d bytes\n",
1099 (int)(howmany((RND_POOLBITS - pool_entropy), NBBY))); 1098 (int)(howmany((RND_POOLBITS - pool_entropy), NBBY)));
1100 } 1099 }
1101 1100
1102 /* Now we hold no locks: clean up. */ 1101 /* Now we hold no locks: clean up. */
1103 while ((sample = SIMPLEQ_FIRST(&df_samples))) { 1102 while ((sample = SIMPLEQ_FIRST(&df_samples))) {
1104 SIMPLEQ_REMOVE_HEAD(&df_samples, next); 1103 SIMPLEQ_REMOVE_HEAD(&df_samples, next);
1105 rnd_sample_free(sample); 1104 rnd_sample_free(sample);
1106 } 1105 }
1107 1106
1108 /* 1107 /*
1109 * Wake up any potential readers waiting. 1108 * Wake up any potential readers waiting.
1110 */ 1109 */
1111 if (wake) { 1110 if (wake) {
1112 rnd_schedule_wakeup(); 1111 rnd_schedule_wakeup();
1113 } 1112 }
1114} 1113}
1115 1114
1116static void 1115static void
1117rnd_intr(void *arg) 1116rnd_intr(void *arg)
1118{ 1117{
1119 rnd_process_events(); 1118 rnd_process_events();
1120} 1119}
1121 1120
1122static void 1121static void
1123rnd_wake(void *arg) 1122rnd_wake(void *arg)
1124{ 1123{
1125 rndsinks_distribute(); 1124 rndsinks_distribute();
1126} 1125}
1127 1126
1128static uint32_t 1127static uint32_t
1129rnd_extract_data(void *p, uint32_t len, uint32_t flags) 1128rnd_extract_data(void *p, uint32_t len, uint32_t flags)
1130{ 1129{
1131 static int timed_in; 1130 static int timed_in;
1132 int entropy_count; 1131 int entropy_count;
1133 uint32_t retval; 1132 uint32_t retval;
1134 1133
1135 mutex_spin_enter(&rnd_global.lock); 1134 mutex_spin_enter(&rnd_global.lock);
1136 if (__predict_false(!timed_in)) { 1135 if (__predict_false(!timed_in)) {
1137 if (boottime.tv_sec) { 1136 if (boottime.tv_sec) {
1138 rndpool_add_data(&rnd_global.pool, &boottime, 1137 rndpool_add_data(&rnd_global.pool, &boottime,
1139 sizeof(boottime), 0); 1138 sizeof(boottime), 0);
1140 } 1139 }
1141 timed_in++; 1140 timed_in++;
1142 } 1141 }
1143 if (__predict_false(!rnd_initial_entropy)) { 1142 if (__predict_false(!rnd_initial_entropy)) {
1144 uint32_t c; 1143 uint32_t c;
1145 1144
1146 rnd_printf_verbose("rnd: WARNING! initial entropy low (%u).\n", 1145 rnd_printf_verbose("rnd: WARNING! initial entropy low (%u).\n",
1147 rndpool_get_entropy_count(&rnd_global.pool)); 1146 rndpool_get_entropy_count(&rnd_global.pool));
1148 /* Try once again to put something in the pool */ 1147 /* Try once again to put something in the pool */
1149 c = rnd_counter(); 1148 c = rnd_counter();
1150 rndpool_add_data(&rnd_global.pool, &c, sizeof(c), 1); 1149 rndpool_add_data(&rnd_global.pool, &c, sizeof(c), 1);
1151 } 1150 }
1152 1151
1153#ifdef DIAGNOSTIC 1152#ifdef DIAGNOSTIC
1154 while (!rnd_tested) { 1153 while (!rnd_tested) {
1155 entropy_count = rndpool_get_entropy_count(&rnd_global.pool); 1154 entropy_count = rndpool_get_entropy_count(&rnd_global.pool);
1156 rnd_printf_verbose("rnd: starting statistical RNG test," 1155 rnd_printf_verbose("rnd: starting statistical RNG test,"
1157 " entropy = %d.\n", 1156 " entropy = %d.\n",
1158 entropy_count); 1157 entropy_count);
1159 if (rndpool_extract_data(&rnd_global.pool, rnd_rt.rt_b, 1158 if (rndpool_extract_data(&rnd_global.pool, rnd_rt.rt_b,
1160 sizeof(rnd_rt.rt_b), RND_EXTRACT_ANY) 1159 sizeof(rnd_rt.rt_b), RND_EXTRACT_ANY)
1161 != sizeof(rnd_rt.rt_b)) { 1160 != sizeof(rnd_rt.rt_b)) {
1162 panic("rnd: could not get bits for statistical test"); 1161 panic("rnd: could not get bits for statistical test");
1163 } 1162 }
1164 /* 1163 /*
1165 * Stash the tested bits so we can put them back in the 1164 * Stash the tested bits so we can put them back in the
1166 * pool, restoring the entropy count. DO NOT rely on 1165 * pool, restoring the entropy count. DO NOT rely on
1167 * rngtest to maintain the bits pristine -- we could end 1166 * rngtest to maintain the bits pristine -- we could end
1168 * up adding back non-random data claiming it were pure 1167 * up adding back non-random data claiming it were pure
1169 * entropy. 1168 * entropy.
1170 */ 1169 */
1171 memcpy(rnd_testbits, rnd_rt.rt_b, sizeof(rnd_rt.rt_b)); 1170 memcpy(rnd_testbits, rnd_rt.rt_b, sizeof(rnd_rt.rt_b));
1172 strlcpy(rnd_rt.rt_name, "entropy pool", sizeof(rnd_rt.rt_name)); 1171 strlcpy(rnd_rt.rt_name, "entropy pool", sizeof(rnd_rt.rt_name));
1173 if (rngtest(&rnd_rt)) { 1172 if (rngtest(&rnd_rt)) {
1174 /* 1173 /*
1175 * The probabiliity of a Type I error is 3/10000, 1174 * The probabiliity of a Type I error is 3/10000,
1176 * but note this can only happen at boot time. 1175 * but note this can only happen at boot time.
1177 * The relevant standard says to reset the module, 1176 * The relevant standard says to reset the module,
1178 * but developers objected... 1177 * but developers objected...
1179 */ 1178 */
1180 rnd_printf("rnd: WARNING, ENTROPY POOL FAILED " 1179 rnd_printf("rnd: WARNING, ENTROPY POOL FAILED "
1181 "STATISTICAL TEST!\n"); 1180 "STATISTICAL TEST!\n");
1182 continue; 1181 continue;
1183 } 1182 }
1184 memset(&rnd_rt, 0, sizeof(rnd_rt)); 1183 memset(&rnd_rt, 0, sizeof(rnd_rt));
1185 rndpool_add_data(&rnd_global.pool, rnd_testbits, 1184 rndpool_add_data(&rnd_global.pool, rnd_testbits,
1186 sizeof(rnd_testbits), entropy_count); 1185 sizeof(rnd_testbits), entropy_count);
1187 memset(rnd_testbits, 0, sizeof(rnd_testbits)); 1186 memset(rnd_testbits, 0, sizeof(rnd_testbits));
1188 rnd_printf_verbose("rnd: statistical RNG test done," 1187 rnd_printf_verbose("rnd: statistical RNG test done,"
1189 " entropy = %d.\n", 1188 " entropy = %d.\n",
1190 rndpool_get_entropy_count(&rnd_global.pool)); 1189 rndpool_get_entropy_count(&rnd_global.pool));
1191 rnd_tested++; 1190 rnd_tested++;
1192 } 1191 }
1193#endif 1192#endif
1194 entropy_count = rndpool_get_entropy_count(&rnd_global.pool); 1193 entropy_count = rndpool_get_entropy_count(&rnd_global.pool);
1195 retval = rndpool_extract_data(&rnd_global.pool, p, len, flags); 1194 retval = rndpool_extract_data(&rnd_global.pool, p, len, flags);
1196 mutex_spin_exit(&rnd_global.lock); 1195 mutex_spin_exit(&rnd_global.lock);
1197 1196
1198 if (entropy_count < (RND_ENTROPY_THRESHOLD * 2 + len) * NBBY) { 1197 if (entropy_count < (RND_ENTROPY_THRESHOLD * 2 + len) * NBBY) {
1199 rnd_printf_verbose("rnd: empty, asking for %d bytes\n", 1198 rnd_printf_verbose("rnd: empty, asking for %d bytes\n",
1200 (int)(howmany((RND_POOLBITS - entropy_count), NBBY))); 1199 (int)(howmany((RND_POOLBITS - entropy_count), NBBY)));
1201 rnd_getmore(howmany((RND_POOLBITS - entropy_count), NBBY)); 1200 rnd_getmore(howmany((RND_POOLBITS - entropy_count), NBBY));
1202 } 1201 }
1203 1202
1204 return retval; 1203 return retval;
1205} 1204}
1206 1205
1207/* 1206/*
1208 * Fill the buffer with as much entropy as we can. Return true if it 1207 * Fill the buffer with as much entropy as we can. Return true if it
1209 * has full entropy and false if not. 1208 * has full entropy and false if not.
1210 */ 1209 */
1211bool 1210bool
1212rnd_extract(void *buffer, size_t bytes) 1211rnd_extract(void *buffer, size_t bytes)
1213{ 1212{
1214 const size_t extracted = rnd_extract_data(buffer, bytes, 1213 const size_t extracted = rnd_extract_data(buffer, bytes,
1215 RND_EXTRACT_GOOD); 1214 RND_EXTRACT_GOOD);
1216 1215
1217 if (extracted < bytes) { 1216 if (extracted < bytes) {
1218 rnd_getmore(bytes - extracted); 1217 rnd_getmore(bytes - extracted);
1219 (void)rnd_extract_data((uint8_t *)buffer + extracted, 1218 (void)rnd_extract_data((uint8_t *)buffer + extracted,
1220 bytes - extracted, RND_EXTRACT_ANY); 1219 bytes - extracted, RND_EXTRACT_ANY);
1221 return false; 1220 return false;
1222 } 1221 }
1223 1222
1224 return true; 1223 return true;
1225} 1224}
1226 1225
1227/* 1226/*
1228 * If we have as much entropy as is requested, fill the buffer with it 1227 * If we have as much entropy as is requested, fill the buffer with it
1229 * and return true. Otherwise, leave the buffer alone and return 1228 * and return true. Otherwise, leave the buffer alone and return
1230 * false. 1229 * false.
1231 */ 1230 */
1232 1231
1233CTASSERT(RND_ENTROPY_THRESHOLD <= 0xffffffffUL); 1232CTASSERT(RND_ENTROPY_THRESHOLD <= 0xffffffffUL);
1234CTASSERT(RNDSINK_MAX_BYTES <= (0xffffffffUL - RND_ENTROPY_THRESHOLD)); 1233CTASSERT(RNDSINK_MAX_BYTES <= (0xffffffffUL - RND_ENTROPY_THRESHOLD));
1235CTASSERT((RNDSINK_MAX_BYTES + RND_ENTROPY_THRESHOLD) <= 1234CTASSERT((RNDSINK_MAX_BYTES + RND_ENTROPY_THRESHOLD) <=
1236 (0xffffffffUL / NBBY)); 1235 (0xffffffffUL / NBBY));
1237 1236
1238bool 1237bool
1239rnd_tryextract(void *buffer, size_t bytes) 1238rnd_tryextract(void *buffer, size_t bytes)
1240{ 1239{
1241 uint32_t bits_needed, bytes_requested; 1240 uint32_t bits_needed, bytes_requested;
1242 1241
1243 KASSERT(bytes <= RNDSINK_MAX_BYTES); 1242 KASSERT(bytes <= RNDSINK_MAX_BYTES);
1244 bits_needed = ((bytes + RND_ENTROPY_THRESHOLD) * NBBY); 1243 bits_needed = ((bytes + RND_ENTROPY_THRESHOLD) * NBBY);
1245 1244
1246 mutex_spin_enter(&rnd_global.lock); 1245 mutex_spin_enter(&rnd_global.lock);
1247 if (bits_needed <= rndpool_get_entropy_count(&rnd_global.pool)) { 1246 if (bits_needed <= rndpool_get_entropy_count(&rnd_global.pool)) {
1248 const uint32_t extracted __diagused = 1247 const uint32_t extracted __diagused =
1249 rndpool_extract_data(&rnd_global.pool, buffer, bytes, 1248 rndpool_extract_data(&rnd_global.pool, buffer, bytes,
1250 RND_EXTRACT_GOOD); 1249 RND_EXTRACT_GOOD);
1251 1250
1252 KASSERT(extracted == bytes); 1251 KASSERT(extracted == bytes);
1253 bytes_requested = 0; 1252 bytes_requested = 0;
1254 } else { 1253 } else {
1255 /* XXX Figure the threshold into this... */ 1254 /* XXX Figure the threshold into this... */
1256 bytes_requested = howmany((bits_needed - 1255 bytes_requested = howmany((bits_needed -
1257 rndpool_get_entropy_count(&rnd_global.pool)), NBBY); 1256 rndpool_get_entropy_count(&rnd_global.pool)), NBBY);
1258 KASSERT(0 < bytes_requested); 1257 KASSERT(0 < bytes_requested);
1259 } 1258 }
1260 mutex_spin_exit(&rnd_global.lock); 1259 mutex_spin_exit(&rnd_global.lock);
1261 1260
1262 if (0 < bytes_requested) 1261 if (0 < bytes_requested)
1263 rnd_getmore(bytes_requested); 1262 rnd_getmore(bytes_requested);
1264 1263
1265 return bytes_requested == 0; 1264 return bytes_requested == 0;
1266} 1265}
1267 1266
1268void 1267void
1269rnd_seed(void *base, size_t len) 1268rnd_seed(void *base, size_t len)
1270{ 1269{
1271 SHA1_CTX s; 1270 SHA1_CTX s;
1272 uint8_t digest[SHA1_DIGEST_LENGTH]; 1271 uint8_t digest[SHA1_DIGEST_LENGTH];
1273 1272
1274 if (len != sizeof(*boot_rsp)) { 1273 if (len != sizeof(*boot_rsp)) {
1275 rnd_printf("rnd: bad seed length %d\n", (int)len); 1274 rnd_printf("rnd: bad seed length %d\n", (int)len);
1276 return; 1275 return;
1277 } 1276 }
1278 1277
1279 boot_rsp = (rndsave_t *)base; 1278 boot_rsp = (rndsave_t *)base;
1280 SHA1Init(&s); 1279 SHA1Init(&s);
1281 SHA1Update(&s, (uint8_t *)&boot_rsp->entropy, 1280 SHA1Update(&s, (uint8_t *)&boot_rsp->entropy,
1282 sizeof(boot_rsp->entropy)); 1281 sizeof(boot_rsp->entropy));
1283 SHA1Update(&s, boot_rsp->data, sizeof(boot_rsp->data)); 1282 SHA1Update(&s, boot_rsp->data, sizeof(boot_rsp->data));
1284 SHA1Final(digest, &s); 1283 SHA1Final(digest, &s);
1285 1284
1286 if (memcmp(digest, boot_rsp->digest, sizeof(digest))) { 1285 if (memcmp(digest, boot_rsp->digest, sizeof(digest))) {
1287 rnd_printf("rnd: bad seed checksum\n"); 1286 rnd_printf("rnd: bad seed checksum\n");
1288 return; 1287 return;
1289 } 1288 }
1290 1289
1291 /* 1290 /*
1292 * It's not really well-defined whether bootloader-supplied 1291 * It's not really well-defined whether bootloader-supplied
1293 * modules run before or after rnd_init(). Handle both cases. 1292 * modules run before or after rnd_init(). Handle both cases.
1294 */ 1293 */
1295 if (rnd_ready) { 1294 if (rnd_ready) {
1296 rnd_printf_verbose("rnd: ready," 1295 rnd_printf_verbose("rnd: ready,"
1297 " feeding in seed data directly.\n"); 1296 " feeding in seed data directly.\n");
1298 mutex_spin_enter(&rnd_global.lock); 1297 mutex_spin_enter(&rnd_global.lock);
1299 rndpool_add_data(&rnd_global.pool, boot_rsp->data, 1298 rndpool_add_data(&rnd_global.pool, boot_rsp->data,
1300 sizeof(boot_rsp->data), 1299 sizeof(boot_rsp->data),
1301 MIN(boot_rsp->entropy, RND_POOLBITS / 2)); 1300 MIN(boot_rsp->entropy, RND_POOLBITS / 2));
1302 memset(boot_rsp, 0, sizeof(*boot_rsp)); 1301 memset(boot_rsp, 0, sizeof(*boot_rsp));
1303 mutex_spin_exit(&rnd_global.lock); 1302 mutex_spin_exit(&rnd_global.lock);
1304 } else { 1303 } else {
1305 rnd_printf_verbose("rnd: not ready, deferring seed feed.\n"); 1304 rnd_printf_verbose("rnd: not ready, deferring seed feed.\n");
1306 } 1305 }
1307} 1306}
1308 1307
1309static void 1308static void
1310krndsource_to_rndsource(krndsource_t *kr, rndsource_t *r) 1309krndsource_to_rndsource(krndsource_t *kr, rndsource_t *r)
1311{ 1310{
1312 memset(r, 0, sizeof(*r)); 1311 memset(r, 0, sizeof(*r));
1313 strlcpy(r->name, kr->name, sizeof(r->name)); 1312 strlcpy(r->name, kr->name, sizeof(r->name));
1314 r->total = kr->total; 1313 r->total = kr->total;
1315 r->type = kr->type; 1314 r->type = kr->type;
1316 r->flags = kr->flags; 1315 r->flags = kr->flags;
1317} 1316}
1318 1317
1319static void 1318static void
1320krndsource_to_rndsource_est(krndsource_t *kr, rndsource_est_t *re) 1319krndsource_to_rndsource_est(krndsource_t *kr, rndsource_est_t *re)
1321{ 1320{
1322 memset(re, 0, sizeof(*re)); 1321 memset(re, 0, sizeof(*re));
1323 krndsource_to_rndsource(kr, &re->rt); 1322 krndsource_to_rndsource(kr, &re->rt);
1324 re->dt_samples = kr->time_delta.insamples; 1323 re->dt_samples = kr->time_delta.insamples;
1325 re->dt_total = kr->time_delta.outbits; 1324 re->dt_total = kr->time_delta.outbits;
1326 re->dv_samples = kr->value_delta.insamples; 1325 re->dv_samples = kr->value_delta.insamples;
1327 re->dv_total = kr->value_delta.outbits; 1326 re->dv_total = kr->value_delta.outbits;
1328} 1327}
1329 1328
1330static void 1329static void
1331krs_setflags(krndsource_t *kr, uint32_t flags, uint32_t mask) 1330krs_setflags(krndsource_t *kr, uint32_t flags, uint32_t mask)
1332{ 1331{
1333 uint32_t oflags = kr->flags; 1332 uint32_t oflags = kr->flags;
1334 1333
1335 kr->flags &= ~mask; 1334 kr->flags &= ~mask;
1336 kr->flags |= (flags & mask); 1335 kr->flags |= (flags & mask);
1337 1336
1338 if (oflags & RND_FLAG_HASENABLE && 1337 if (oflags & RND_FLAG_HASENABLE &&
1339 ((oflags & RND_FLAG_NO_COLLECT) != (flags & RND_FLAG_NO_COLLECT))) { 1338 ((oflags & RND_FLAG_NO_COLLECT) != (flags & RND_FLAG_NO_COLLECT))) {
1340 kr->enable(kr, !(flags & RND_FLAG_NO_COLLECT)); 1339 kr->enable(kr, !(flags & RND_FLAG_NO_COLLECT));
1341 } 1340 }
1342} 1341}
1343 1342
1344int 1343int
1345rnd_system_ioctl(struct file *fp, u_long cmd, void *addr) 1344rnd_system_ioctl(struct file *fp, u_long cmd, void *addr)
1346{ 1345{
1347 krndsource_t *kr; 1346 krndsource_t *kr;
1348 rndstat_t *rst; 1347 rndstat_t *rst;
1349 rndstat_name_t *rstnm; 1348 rndstat_name_t *rstnm;
1350 rndstat_est_t *rset; 1349 rndstat_est_t *rset;
1351 rndstat_est_name_t *rsetnm; 1350 rndstat_est_name_t *rsetnm;
1352 rndctl_t *rctl; 1351 rndctl_t *rctl;
1353 rnddata_t *rnddata; 1352 rnddata_t *rnddata;
1354 uint32_t count, start; 1353 uint32_t count, start;
1355 int ret = 0; 1354 int ret = 0;
1356 int estimate_ok = 0, estimate = 0; 1355 int estimate_ok = 0, estimate = 0;
1357 1356
1358 switch (cmd) { 1357 switch (cmd) {
1359 case RNDGETENTCNT: 1358 case RNDGETENTCNT:
1360 break; 1359 break;
1361 1360
1362 case RNDGETPOOLSTAT: 1361 case RNDGETPOOLSTAT:
1363 case RNDGETSRCNUM: 1362 case RNDGETSRCNUM:
1364 case RNDGETSRCNAME: 1363 case RNDGETSRCNAME:
1365 case RNDGETESTNUM: 1364 case RNDGETESTNUM:
1366 case RNDGETESTNAME: 1365 case RNDGETESTNAME:
1367 ret = kauth_authorize_device(curlwp->l_cred, 1366 ret = kauth_authorize_device(curlwp->l_cred,
1368 KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL); 1367 KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL);
1369 if (ret) 1368 if (ret)
1370 return (ret); 1369 return (ret);
1371 break; 1370 break;
1372 1371
1373 case RNDCTL: 1372 case RNDCTL:
1374 ret = kauth_authorize_device(curlwp->l_cred, 1373 ret = kauth_authorize_device(curlwp->l_cred,
1375 KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL); 1374 KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL);
1376 if (ret) 1375 if (ret)
1377 return (ret); 1376 return (ret);
1378 break; 1377 break;
1379 1378
1380 case RNDADDDATA: 1379 case RNDADDDATA:
1381 ret = kauth_authorize_device(curlwp->l_cred, 1380 ret = kauth_authorize_device(curlwp->l_cred,
1382 KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL); 1381 KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL);
1383 if (ret) 1382 if (ret)
1384 return (ret); 1383 return (ret);
1385 estimate_ok = !kauth_authorize_device(curlwp->l_cred, 1384 estimate_ok = !kauth_authorize_device(curlwp->l_cred,
1386 KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, NULL, NULL, NULL, NULL); 1385 KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, NULL, NULL, NULL, NULL);
1387 break; 1386 break;
1388 1387
1389 default: 1388 default:
1390#ifdef COMPAT_50 1389#ifdef COMPAT_50
1391 return compat_50_rnd_ioctl(fp, cmd, addr); 1390 return compat_50_rnd_ioctl(fp, cmd, addr);
1392#else 1391#else
1393 return ENOTTY; 1392 return ENOTTY;
1394#endif 1393#endif
1395 } 1394 }
1396 1395
1397 switch (cmd) { 1396 switch (cmd) {
1398 case RNDGETENTCNT: 1397 case RNDGETENTCNT:
1399 mutex_spin_enter(&rnd_global.lock); 1398 mutex_spin_enter(&rnd_global.lock);
1400 *(uint32_t *)addr = rndpool_get_entropy_count(&rnd_global.pool); 1399 *(uint32_t *)addr = rndpool_get_entropy_count(&rnd_global.pool);
1401 mutex_spin_exit(&rnd_global.lock); 1400 mutex_spin_exit(&rnd_global.lock);
1402 break; 1401 break;
1403 1402
1404 case RNDGETPOOLSTAT: 1403 case RNDGETPOOLSTAT:
1405 mutex_spin_enter(&rnd_global.lock); 1404 mutex_spin_enter(&rnd_global.lock);
1406 rndpool_get_stats(&rnd_global.pool, addr, 1405 rndpool_get_stats(&rnd_global.pool, addr,
1407 sizeof(rndpoolstat_t)); 1406 sizeof(rndpoolstat_t));
1408 mutex_spin_exit(&rnd_global.lock); 1407 mutex_spin_exit(&rnd_global.lock);
1409 break; 1408 break;
1410 1409
1411 case RNDGETSRCNUM: 1410 case RNDGETSRCNUM:
1412 rst = (rndstat_t *)addr; 1411 rst = (rndstat_t *)addr;
1413 1412
1414 if (rst->count == 0) 1413 if (rst->count == 0)
1415 break; 1414 break;
1416 1415
1417 if (rst->count > RND_MAXSTATCOUNT) 1416 if (rst->count > RND_MAXSTATCOUNT)
1418 return (EINVAL); 1417 return (EINVAL);
1419 1418
1420 mutex_spin_enter(&rnd_global.lock); 1419 mutex_spin_enter(&rnd_global.lock);
1421 /* 1420 /*
1422 * Find the starting source by running through the 1421 * Find the starting source by running through the
1423 * list of sources. 1422 * list of sources.
1424 */ 1423 */
1425 kr = LIST_FIRST(&rnd_global.sources); 1424 kr = LIST_FIRST(&rnd_global.sources);
1426 start = rst->start; 1425 start = rst->start;
1427 while (kr != NULL && start >= 1) { 1426 while (kr != NULL && start >= 1) {
1428 kr = LIST_NEXT(kr, list); 1427 kr = LIST_NEXT(kr, list);
1429 start--; 1428 start--;
1430 } 1429 }
1431 1430
1432 /* 1431 /*
1433 * Return up to as many structures as the user asked 1432 * Return up to as many structures as the user asked
1434 * for. If we run out of sources, a count of zero 1433 * for. If we run out of sources, a count of zero
1435 * will be returned, without an error. 1434 * will be returned, without an error.
1436 */ 1435 */
1437 for (count = 0; count < rst->count && kr != NULL; count++) { 1436 for (count = 0; count < rst->count && kr != NULL; count++) {
1438 krndsource_to_rndsource(kr, &rst->source[count]); 1437 krndsource_to_rndsource(kr, &rst->source[count]);
1439 kr = LIST_NEXT(kr, list); 1438 kr = LIST_NEXT(kr, list);
1440 } 1439 }
1441 1440
1442 rst->count = count; 1441 rst->count = count;
1443 1442
1444 mutex_spin_exit(&rnd_global.lock); 1443 mutex_spin_exit(&rnd_global.lock);
1445 break; 1444 break;
1446 1445
1447 case RNDGETESTNUM: 1446 case RNDGETESTNUM:
1448 rset = (rndstat_est_t *)addr; 1447 rset = (rndstat_est_t *)addr;
1449 1448
1450 if (rset->count == 0) 1449 if (rset->count == 0)
1451 break; 1450 break;
1452 1451
1453 if (rset->count > RND_MAXSTATCOUNT) 1452 if (rset->count > RND_MAXSTATCOUNT)
1454 return (EINVAL); 1453 return (EINVAL);
1455 1454
1456 mutex_spin_enter(&rnd_global.lock); 1455 mutex_spin_enter(&rnd_global.lock);
1457 /* 1456 /*
1458 * Find the starting source by running through the 1457 * Find the starting source by running through the
1459 * list of sources. 1458 * list of sources.
1460 */ 1459 */
1461 kr = LIST_FIRST(&rnd_global.sources); 1460 kr = LIST_FIRST(&rnd_global.sources);
1462 start = rset->start; 1461 start = rset->start;
1463 while (kr != NULL && start > 1) { 1462 while (kr != NULL && start > 1) {
1464 kr = LIST_NEXT(kr, list); 1463 kr = LIST_NEXT(kr, list);
1465 start--; 1464 start--;
1466 } 1465 }
1467 1466
1468 /* Return up to as many structures as the user asked 1467 /* Return up to as many structures as the user asked
1469 * for. If we run out of sources, a count of zero 1468 * for. If we run out of sources, a count of zero
1470 * will be returned, without an error. 1469 * will be returned, without an error.
1471 */ 1470 */
1472 for (count = 0; count < rset->count && kr != NULL; count++) { 1471 for (count = 0; count < rset->count && kr != NULL; count++) {
1473 krndsource_to_rndsource_est(kr, &rset->source[count]); 1472 krndsource_to_rndsource_est(kr, &rset->source[count]);
1474 kr = LIST_NEXT(kr, list); 1473 kr = LIST_NEXT(kr, list);
1475 } 1474 }
1476 1475
1477 rset->count = count; 1476 rset->count = count;
1478 1477
1479 mutex_spin_exit(&rnd_global.lock); 1478 mutex_spin_exit(&rnd_global.lock);
1480 break; 1479 break;
1481 1480
1482 case RNDGETSRCNAME: 1481 case RNDGETSRCNAME:
1483 /* 1482 /*
1484 * Scan through the list, trying to find the name. 1483 * Scan through the list, trying to find the name.
1485 */ 1484 */
1486 mutex_spin_enter(&rnd_global.lock); 1485 mutex_spin_enter(&rnd_global.lock);
1487 rstnm = (rndstat_name_t *)addr; 1486 rstnm = (rndstat_name_t *)addr;
1488 kr = LIST_FIRST(&rnd_global.sources); 1487 kr = LIST_FIRST(&rnd_global.sources);
1489 while (kr != NULL) { 1488 while (kr != NULL) {
1490 if (strncmp(kr->name, rstnm->name, 1489 if (strncmp(kr->name, rstnm->name,
1491 MIN(sizeof(kr->name), 1490 MIN(sizeof(kr->name),
1492 sizeof(rstnm->name))) == 0) { 1491 sizeof(rstnm->name))) == 0) {
1493 krndsource_to_rndsource(kr, &rstnm->source); 1492 krndsource_to_rndsource(kr, &rstnm->source);
1494 mutex_spin_exit(&rnd_global.lock); 1493 mutex_spin_exit(&rnd_global.lock);
1495 return (0); 1494 return (0);
1496 } 1495 }
1497 kr = LIST_NEXT(kr, list); 1496 kr = LIST_NEXT(kr, list);
1498 } 1497 }
1499 mutex_spin_exit(&rnd_global.lock); 1498 mutex_spin_exit(&rnd_global.lock);
1500 1499
1501 ret = ENOENT; /* name not found */ 1500 ret = ENOENT; /* name not found */
1502 1501
1503 break; 1502 break;
1504 1503
1505 case RNDGETESTNAME: 1504 case RNDGETESTNAME:
1506 /* 1505 /*
1507 * Scan through the list, trying to find the name. 1506 * Scan through the list, trying to find the name.
1508 */ 1507 */
1509 mutex_spin_enter(&rnd_global.lock); 1508 mutex_spin_enter(&rnd_global.lock);
1510 rsetnm = (rndstat_est_name_t *)addr; 1509 rsetnm = (rndstat_est_name_t *)addr;
1511 kr = LIST_FIRST(&rnd_global.sources); 1510 kr = LIST_FIRST(&rnd_global.sources);
1512 while (kr != NULL) { 1511 while (kr != NULL) {
1513 if (strncmp(kr->name, rsetnm->name, 1512 if (strncmp(kr->name, rsetnm->name,
1514 MIN(sizeof(kr->name), 1513 MIN(sizeof(kr->name),
1515 sizeof(rsetnm->name))) == 0) { 1514 sizeof(rsetnm->name))) == 0) {
1516 krndsource_to_rndsource_est(kr, 1515 krndsource_to_rndsource_est(kr,
1517 &rsetnm->source); 1516 &rsetnm->source);
1518 mutex_spin_exit(&rnd_global.lock); 1517 mutex_spin_exit(&rnd_global.lock);
1519 return (0); 1518 return (0);
1520 } 1519 }
1521 kr = LIST_NEXT(kr, list); 1520 kr = LIST_NEXT(kr, list);
1522 } 1521 }
1523 mutex_spin_exit(&rnd_global.lock); 1522 mutex_spin_exit(&rnd_global.lock);
1524 1523
1525 ret = ENOENT; /* name not found */ 1524 ret = ENOENT; /* name not found */
1526 1525
1527 break; 1526 break;
1528 1527
1529 case RNDCTL: 1528 case RNDCTL:
1530 /* 1529 /*
1531 * Set flags to enable/disable entropy counting and/or 1530 * Set flags to enable/disable entropy counting and/or
1532 * collection. 1531 * collection.
1533 */ 1532 */
1534 mutex_spin_enter(&rnd_global.lock); 1533 mutex_spin_enter(&rnd_global.lock);
1535 rctl = (rndctl_t *)addr; 1534 rctl = (rndctl_t *)addr;
1536 kr = LIST_FIRST(&rnd_global.sources); 1535 kr = LIST_FIRST(&rnd_global.sources);
1537 1536
1538 /* 1537 /*
1539 * Flags set apply to all sources of this type. 1538 * Flags set apply to all sources of this type.
1540 */ 1539 */
1541 if (rctl->type != 0xff) { 1540 if (rctl->type != 0xff) {
1542 while (kr != NULL) { 1541 while (kr != NULL) {
1543 if (kr->type == rctl->type) { 1542 if (kr->type == rctl->type) {
1544 krs_setflags(kr, 1543 krs_setflags(kr,
1545 rctl->flags, rctl->mask); 1544 rctl->flags, rctl->mask);
1546 } 1545 }
1547 kr = LIST_NEXT(kr, list); 1546 kr = LIST_NEXT(kr, list);
1548 } 1547 }
1549 mutex_spin_exit(&rnd_global.lock); 1548 mutex_spin_exit(&rnd_global.lock);
1550 return (0); 1549 return (0);
1551 } 1550 }
1552 1551
1553 /* 1552 /*
1554 * scan through the list, trying to find the name 1553 * scan through the list, trying to find the name
1555 */ 1554 */
1556 while (kr != NULL) { 1555 while (kr != NULL) {
1557 if (strncmp(kr->name, rctl->name, 1556 if (strncmp(kr->name, rctl->name,
1558 MIN(sizeof(kr->name), 1557 MIN(sizeof(kr->name),
1559 sizeof(rctl->name))) == 0) { 1558 sizeof(rctl->name))) == 0) {
1560 krs_setflags(kr, rctl->flags, rctl->mask); 1559 krs_setflags(kr, rctl->flags, rctl->mask);
1561 mutex_spin_exit(&rnd_global.lock); 1560 mutex_spin_exit(&rnd_global.lock);
1562 return (0); 1561 return (0);
1563 } 1562 }
1564 kr = LIST_NEXT(kr, list); 1563 kr = LIST_NEXT(kr, list);
1565 } 1564 }
1566 1565
1567 mutex_spin_exit(&rnd_global.lock); 1566 mutex_spin_exit(&rnd_global.lock);
1568 ret = ENOENT; /* name not found */ 1567 ret = ENOENT; /* name not found */
1569 1568
1570 break; 1569 break;
1571 1570
1572 case RNDADDDATA: 1571 case RNDADDDATA:
1573 /* 1572 /*
1574 * Don't seed twice if our bootloader has 1573 * Don't seed twice if our bootloader has
1575 * seed loading support. 1574 * seed loading support.
1576 */ 1575 */
1577 if (!boot_rsp) { 1576 if (!boot_rsp) {
1578 rnddata = (rnddata_t *)addr; 1577 rnddata = (rnddata_t *)addr;
1579 1578
1580 if (rnddata->len > sizeof(rnddata->data)) 1579 if (rnddata->len > sizeof(rnddata->data))
1581 return EINVAL; 1580 return EINVAL;
1582 1581
1583 if (estimate_ok) { 1582 if (estimate_ok) {
1584 /* 1583 /*
1585 * Do not accept absurd entropy estimates, and 1584 * Do not accept absurd entropy estimates, and
1586 * do not flood the pool with entropy such that 1585 * do not flood the pool with entropy such that
1587 * new samples are discarded henceforth. 1586 * new samples are discarded henceforth.
1588 */ 1587 */
1589 estimate = MIN((rnddata->len * NBBY) / 2, 1588 estimate = MIN((rnddata->len * NBBY) / 2,
1590 MIN(rnddata->entropy, 1589 MIN(rnddata->entropy,
1591 RND_POOLBITS / 2)); 1590 RND_POOLBITS / 2));
1592 } else { 1591 } else {
1593 estimate = 0; 1592 estimate = 0;
1594 } 1593 }
1595 1594
1596 mutex_spin_enter(&rnd_global.lock); 1595 mutex_spin_enter(&rnd_global.lock);
1597 rndpool_add_data(&rnd_global.pool, rnddata->data, 1596 rndpool_add_data(&rnd_global.pool, rnddata->data,
1598 rnddata->len, estimate); 1597 rnddata->len, estimate);
1599 rnd_entropy_added(); 1598 rnd_entropy_added();
1600 mutex_spin_exit(&rnd_global.lock); 1599 mutex_spin_exit(&rnd_global.lock);
1601 1600
1602 rndsinks_distribute(); 1601 rndsinks_distribute();
1603 } else { 1602 } else {
1604 rnd_printf_verbose("rnd" 1603 rnd_printf_verbose("rnd"
1605 ": already seeded by boot loader\n"); 1604 ": already seeded by boot loader\n");
1606 } 1605 }
1607 break; 1606 break;
1608 1607
1609 default: 1608 default:
1610 return ENOTTY; 1609 return ENOTTY;
1611 } 1610 }
1612 1611
1613 return (ret); 1612 return (ret);
1614} 1613}