Tue Apr 14 12:28:12 2015 UTC ()
Eliminate remaining cases of u_int*_t in kern_rndq.c.


(riastradh)
diff -r1.49 -r1.50 src/sys/kern/kern_rndq.c

cvs diff -r1.49 -r1.50 src/sys/kern/Attic/kern_rndq.c (switch to unified diff)

--- src/sys/kern/Attic/kern_rndq.c 2015/04/14 12:25:41 1.49
+++ src/sys/kern/Attic/kern_rndq.c 2015/04/14 12:28:12 1.50
@@ -1,1329 +1,1329 @@ @@ -1,1329 +1,1329 @@
1/* $NetBSD: kern_rndq.c,v 1.49 2015/04/14 12:25:41 riastradh Exp $ */ 1/* $NetBSD: kern_rndq.c,v 1.50 2015/04/14 12:28:12 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1997-2013 The NetBSD Foundation, Inc. 4 * Copyright (c) 1997-2013 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Michael Graff <explorer@flame.org> and Thor Lancelot Simon. 8 * by Michael Graff <explorer@flame.org> and Thor Lancelot Simon.
9 * This code uses ideas and algorithms from the Linux driver written by 9 * This code uses ideas and algorithms from the Linux driver written by
10 * Ted Ts'o. 10 * Ted Ts'o.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
15 * 1. Redistributions of source code must retain the above copyright 15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer. 16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright 17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the 18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution. 19 * documentation and/or other materials provided with the distribution.
20 * 20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE. 31 * POSSIBILITY OF SUCH DAMAGE.
32 */ 32 */
33 33
34#include <sys/cdefs.h> 34#include <sys/cdefs.h>
35__KERNEL_RCSID(0, "$NetBSD: kern_rndq.c,v 1.49 2015/04/14 12:25:41 riastradh Exp $"); 35__KERNEL_RCSID(0, "$NetBSD: kern_rndq.c,v 1.50 2015/04/14 12:28:12 riastradh Exp $");
36 36
37#include <sys/param.h> 37#include <sys/param.h>
38#include <sys/atomic.h> 38#include <sys/atomic.h>
39#include <sys/ioctl.h> 39#include <sys/ioctl.h>
40#include <sys/fcntl.h> 40#include <sys/fcntl.h>
41#include <sys/select.h> 41#include <sys/select.h>
42#include <sys/poll.h> 42#include <sys/poll.h>
43#include <sys/kmem.h> 43#include <sys/kmem.h>
44#include <sys/mutex.h> 44#include <sys/mutex.h>
45#include <sys/proc.h> 45#include <sys/proc.h>
46#include <sys/kernel.h> 46#include <sys/kernel.h>
47#include <sys/conf.h> 47#include <sys/conf.h>
48#include <sys/systm.h> 48#include <sys/systm.h>
49#include <sys/callout.h> 49#include <sys/callout.h>
50#include <sys/intr.h> 50#include <sys/intr.h>
51#include <sys/rnd.h> 51#include <sys/rnd.h>
52#include <sys/rndpool.h> 52#include <sys/rndpool.h>
53#include <sys/rndsink.h> 53#include <sys/rndsink.h>
54#include <sys/rndsource.h> 54#include <sys/rndsource.h>
55#include <sys/vnode.h> 55#include <sys/vnode.h>
56#include <sys/pool.h> 56#include <sys/pool.h>
57#include <sys/kauth.h> 57#include <sys/kauth.h>
58#include <sys/once.h> 58#include <sys/once.h>
59#include <sys/rngtest.h> 59#include <sys/rngtest.h>
60 60
61#include <dev/rnd_private.h> 61#include <dev/rnd_private.h>
62 62
63#if defined(__HAVE_CPU_COUNTER) 63#if defined(__HAVE_CPU_COUNTER)
64#include <machine/cpu_counter.h> 64#include <machine/cpu_counter.h>
65#endif 65#endif
66 66
67#ifdef RND_DEBUG 67#ifdef RND_DEBUG
68#define DPRINTF(l,x) if (rnd_debug & (l)) rnd_printf x 68#define DPRINTF(l,x) if (rnd_debug & (l)) rnd_printf x
69int rnd_debug = 0; 69int rnd_debug = 0;
70#else 70#else
71#define DPRINTF(l,x) 71#define DPRINTF(l,x)
72#endif 72#endif
73 73
74/* 74/*
75 * list devices attached 75 * list devices attached
76 */ 76 */
77#if 0 77#if 0
78#define RND_VERBOSE 78#define RND_VERBOSE
79#endif 79#endif
80 80
81#ifdef RND_VERBOSE 81#ifdef RND_VERBOSE
82#define rnd_printf_verbose(fmt, ...) rnd_printf(fmt, ##__VA_ARGS__) 82#define rnd_printf_verbose(fmt, ...) rnd_printf(fmt, ##__VA_ARGS__)
83#else 83#else
84#define rnd_printf_verbose(fmt, ...) ((void)0) 84#define rnd_printf_verbose(fmt, ...) ((void)0)
85#endif 85#endif
86 86
87#ifdef RND_VERBOSE 87#ifdef RND_VERBOSE
88static unsigned int deltacnt; 88static unsigned int deltacnt;
89#endif 89#endif
90 90
91/* 91/*
92 * This is a little bit of state information attached to each device that we 92 * This is a little bit of state information attached to each device that we
93 * collect entropy from. This is simply a collection buffer, and when it 93 * collect entropy from. This is simply a collection buffer, and when it
94 * is full it will be "detached" from the source and added to the entropy 94 * is full it will be "detached" from the source and added to the entropy
95 * pool after entropy is distilled as much as possible. 95 * pool after entropy is distilled as much as possible.
96 */ 96 */
97#define RND_SAMPLE_COUNT 64 /* collect N samples, then compress */ 97#define RND_SAMPLE_COUNT 64 /* collect N samples, then compress */
98typedef struct _rnd_sample_t { 98typedef struct _rnd_sample_t {
99 SIMPLEQ_ENTRY(_rnd_sample_t) next; 99 SIMPLEQ_ENTRY(_rnd_sample_t) next;
100 krndsource_t *source; 100 krndsource_t *source;
101 int cursor; 101 int cursor;
102 int entropy; 102 int entropy;
103 uint32_t ts[RND_SAMPLE_COUNT]; 103 uint32_t ts[RND_SAMPLE_COUNT];
104 u_int32_t values[RND_SAMPLE_COUNT]; 104 uint32_t values[RND_SAMPLE_COUNT];
105} rnd_sample_t; 105} rnd_sample_t;
106 106
107SIMPLEQ_HEAD(rnd_sampleq, _rnd_sample_t); 107SIMPLEQ_HEAD(rnd_sampleq, _rnd_sample_t);
108 108
109/* 109/*
110 * The sample queue. Samples are put into the queue and processed in a 110 * The sample queue. Samples are put into the queue and processed in a
111 * softint in order to limit the latency of adding a sample. 111 * softint in order to limit the latency of adding a sample.
112 */ 112 */
113static struct { 113static struct {
114 kmutex_t lock; 114 kmutex_t lock;
115 struct rnd_sampleq q; 115 struct rnd_sampleq q;
116} rnd_samples __cacheline_aligned; 116} rnd_samples __cacheline_aligned;
117 117
118/* 118/*
119 * Memory pool for sample buffers 119 * Memory pool for sample buffers
120 */ 120 */
121static pool_cache_t rnd_mempc; 121static pool_cache_t rnd_mempc;
122 122
123/* 123/*
124 * Our random pool. This is defined here rather than using the general 124 * Our random pool. This is defined here rather than using the general
125 * purpose one defined in rndpool.c. 125 * purpose one defined in rndpool.c.
126 * 126 *
127 * Samples are collected and queued into a separate mutex-protected queue 127 * Samples are collected and queued into a separate mutex-protected queue
128 * (rnd_samples, see above), and processed in a timeout routine; therefore, 128 * (rnd_samples, see above), and processed in a timeout routine; therefore,
129 * the mutex protecting the random pool is at IPL_SOFTCLOCK() as well. 129 * the mutex protecting the random pool is at IPL_SOFTCLOCK() as well.
130 */ 130 */
131rndpool_t rnd_pool; 131rndpool_t rnd_pool;
132kmutex_t rndpool_mtx; 132kmutex_t rndpool_mtx;
133kcondvar_t rndpool_cv; 133kcondvar_t rndpool_cv;
134 134
135/* 135/*
136 * This source is used to easily "remove" queue entries when the source 136 * This source is used to easily "remove" queue entries when the source
137 * which actually generated the events is going away. 137 * which actually generated the events is going away.
138 */ 138 */
139static krndsource_t rnd_source_no_collect = { 139static krndsource_t rnd_source_no_collect = {
140 /* LIST_ENTRY list */ 140 /* LIST_ENTRY list */
141 .name = { 'N', 'o', 'C', 'o', 'l', 'l', 'e', 'c', 't', 141 .name = { 'N', 'o', 'C', 'o', 'l', 'l', 'e', 'c', 't',
142 0, 0, 0, 0, 0, 0, 0 }, 142 0, 0, 0, 0, 0, 0, 0 },
143 .total = 0, 143 .total = 0,
144 .type = RND_TYPE_UNKNOWN, 144 .type = RND_TYPE_UNKNOWN,
145 .flags = (RND_FLAG_NO_COLLECT | 145 .flags = (RND_FLAG_NO_COLLECT |
146 RND_FLAG_NO_ESTIMATE), 146 RND_FLAG_NO_ESTIMATE),
147 .state = NULL, 147 .state = NULL,
148 .test_cnt = 0, 148 .test_cnt = 0,
149 .test = NULL 149 .test = NULL
150}; 150};
151 151
152static krndsource_t rnd_source_anonymous = { 152static krndsource_t rnd_source_anonymous = {
153 /* LIST_ENTRY list */ 153 /* LIST_ENTRY list */
154 .name = { 'A', 'n', 'o', 'n', 'y', 'm', 'o', 'u', 's', 154 .name = { 'A', 'n', 'o', 'n', 'y', 'm', 'o', 'u', 's',
155 0, 0, 0, 0, 0, 0, 0 }, 155 0, 0, 0, 0, 0, 0, 0 },
156 .total = 0, 156 .total = 0,
157 .type = RND_TYPE_UNKNOWN, 157 .type = RND_TYPE_UNKNOWN,
158 .flags = (RND_FLAG_COLLECT_TIME| 158 .flags = (RND_FLAG_COLLECT_TIME|
159 RND_FLAG_COLLECT_VALUE| 159 RND_FLAG_COLLECT_VALUE|
160 RND_FLAG_ESTIMATE_TIME), 160 RND_FLAG_ESTIMATE_TIME),
161 .state = NULL, 161 .state = NULL,
162 .test_cnt = 0, 162 .test_cnt = 0,
163 .test = NULL 163 .test = NULL
164}; 164};
165 165
166krndsource_t rnd_printf_source, rnd_autoconf_source; 166krndsource_t rnd_printf_source, rnd_autoconf_source;
167 167
168void *rnd_process, *rnd_wakeup; 168void *rnd_process, *rnd_wakeup;
169 169
170void rnd_wakeup_readers(void); 170void rnd_wakeup_readers(void);
171static inline uint32_t rnd_counter(void); 171static inline uint32_t rnd_counter(void);
172static void rnd_intr(void *); 172static void rnd_intr(void *);
173static void rnd_wake(void *); 173static void rnd_wake(void *);
174static void rnd_process_events(void); 174static void rnd_process_events(void);
175static void rnd_add_data_ts(krndsource_t *, const void *const, 175static void rnd_add_data_ts(krndsource_t *, const void *const,
176 uint32_t, uint32_t, uint32_t); 176 uint32_t, uint32_t, uint32_t);
177static inline void rnd_schedule_process(void); 177static inline void rnd_schedule_process(void);
178 178
179int rnd_ready = 0; 179int rnd_ready = 0;
180int rnd_initial_entropy = 0; 180int rnd_initial_entropy = 0;
181 181
182static int rnd_printing = 0; 182static int rnd_printing = 0;
183 183
184#ifdef DIAGNOSTIC 184#ifdef DIAGNOSTIC
185static int rnd_tested = 0; 185static int rnd_tested = 0;
186static rngtest_t rnd_rt; 186static rngtest_t rnd_rt;
187static uint8_t rnd_testbits[sizeof(rnd_rt.rt_b)]; 187static uint8_t rnd_testbits[sizeof(rnd_rt.rt_b)];
188#endif 188#endif
189 189
190struct rndsource_head rnd_sources; 190struct rndsource_head rnd_sources;
191 191
192rndsave_t *boot_rsp; 192rndsave_t *boot_rsp;
193 193
194static inline void 194static inline void
195rnd_printf(const char *fmt, ...) 195rnd_printf(const char *fmt, ...)
196{ 196{
197 va_list ap; 197 va_list ap;
198 198
199 membar_consumer(); 199 membar_consumer();
200 if (rnd_printing) { 200 if (rnd_printing) {
201 return; 201 return;
202 } 202 }
203 rnd_printing = 1; 203 rnd_printing = 1;
204 membar_producer(); 204 membar_producer();
205 va_start(ap, fmt); 205 va_start(ap, fmt);
206 vprintf(fmt, ap); 206 vprintf(fmt, ap);
207 va_end(ap); 207 va_end(ap);
208 rnd_printing = 0; 208 rnd_printing = 0;
209} 209}
210 210
211void 211void
212rnd_init_softint(void) { 212rnd_init_softint(void) {
213 rnd_process = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE, 213 rnd_process = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE,
214 rnd_intr, NULL); 214 rnd_intr, NULL);
215 rnd_wakeup = softint_establish(SOFTINT_CLOCK|SOFTINT_MPSAFE, 215 rnd_wakeup = softint_establish(SOFTINT_CLOCK|SOFTINT_MPSAFE,
216 rnd_wake, NULL); 216 rnd_wake, NULL);
217 rnd_schedule_process(); 217 rnd_schedule_process();
218} 218}
219 219
220/* 220/*
221 * Generate a 32-bit counter. 221 * Generate a 32-bit counter.
222 */ 222 */
223static inline uint32_t 223static inline uint32_t
224rnd_counter(void) 224rnd_counter(void)
225{ 225{
226 struct bintime bt; 226 struct bintime bt;
227 uint32_t ret; 227 uint32_t ret;
228 228
229#if defined(__HAVE_CPU_COUNTER) 229#if defined(__HAVE_CPU_COUNTER)
230 if (cpu_hascounter()) 230 if (cpu_hascounter())
231 return cpu_counter32(); 231 return cpu_counter32();
232#endif 232#endif
233 if (!rnd_ready) 233 if (!rnd_ready)
234 /* Too early to call nanotime. */ 234 /* Too early to call nanotime. */
235 return 0; 235 return 0;
236 236
237 binuptime(&bt); 237 binuptime(&bt);
238 ret = bt.sec; 238 ret = bt.sec;
239 ret |= bt.sec >> 32; 239 ret |= bt.sec >> 32;
240 ret |= bt.frac; 240 ret |= bt.frac;
241 ret |= bt.frac >> 32; 241 ret |= bt.frac >> 32;
242 242
243 return ret; 243 return ret;
244} 244}
245 245
246/* 246/*
247 * We may be called from low IPL -- protect our softint. 247 * We may be called from low IPL -- protect our softint.
248 */ 248 */
249 249
250static inline void 250static inline void
251rnd_schedule_softint(void *softint) 251rnd_schedule_softint(void *softint)
252{ 252{
253 kpreempt_disable(); 253 kpreempt_disable();
254 softint_schedule(softint); 254 softint_schedule(softint);
255 kpreempt_enable(); 255 kpreempt_enable();
256} 256}
257 257
258static inline void 258static inline void
259rnd_schedule_process(void) 259rnd_schedule_process(void)
260{ 260{
261 if (__predict_true(rnd_process)) { 261 if (__predict_true(rnd_process)) {
262 rnd_schedule_softint(rnd_process); 262 rnd_schedule_softint(rnd_process);
263 return; 263 return;
264 }  264 }
265 rnd_process_events(); 265 rnd_process_events();
266} 266}
267 267
268static inline void 268static inline void
269rnd_schedule_wakeup(void) 269rnd_schedule_wakeup(void)
270{ 270{
271 if (__predict_true(rnd_wakeup)) { 271 if (__predict_true(rnd_wakeup)) {
272 rnd_schedule_softint(rnd_wakeup); 272 rnd_schedule_softint(rnd_wakeup);
273 return; 273 return;
274 } 274 }
275 rnd_wakeup_readers(); 275 rnd_wakeup_readers();
276} 276}
277 277
278/* 278/*
279 * Tell any sources with "feed me" callbacks that we are hungry. 279 * Tell any sources with "feed me" callbacks that we are hungry.
280 */ 280 */
281void 281void
282rnd_getmore(size_t byteswanted) 282rnd_getmore(size_t byteswanted)
283{ 283{
284 krndsource_t *rs; 284 krndsource_t *rs;
285 285
286 KASSERT(mutex_owned(&rndpool_mtx)); 286 KASSERT(mutex_owned(&rndpool_mtx));
287 287
288 LIST_FOREACH(rs, &rnd_sources, list) { 288 LIST_FOREACH(rs, &rnd_sources, list) {
289 if (!ISSET(rs->flags, RND_FLAG_HASCB)) 289 if (!ISSET(rs->flags, RND_FLAG_HASCB))
290 continue; 290 continue;
291 KASSERT(rs->get != NULL); 291 KASSERT(rs->get != NULL);
292 KASSERT(rs->getarg != NULL); 292 KASSERT(rs->getarg != NULL);
293 rs->get(byteswanted, rs->getarg); 293 rs->get(byteswanted, rs->getarg);
294 rnd_printf_verbose("rnd: entropy estimate %zu bits\n", 294 rnd_printf_verbose("rnd: entropy estimate %zu bits\n",
295 rndpool_get_entropy_count(&rnd_pool)); 295 rndpool_get_entropy_count(&rnd_pool));
296 rnd_printf_verbose("rnd: asking source %s for %zu bytes\n", 296 rnd_printf_verbose("rnd: asking source %s for %zu bytes\n",
297 rs->name, byteswanted); 297 rs->name, byteswanted);
298 } 298 }
299} 299}
300 300
301/* 301/*
302 * Check to see if there are readers waiting on us. If so, kick them. 302 * Check to see if there are readers waiting on us. If so, kick them.
303 */ 303 */
304void 304void
305rnd_wakeup_readers(void) 305rnd_wakeup_readers(void)
306{ 306{
307 307
308 /* 308 /*
309 * XXX This bookkeeping shouldn't be here -- this is not where 309 * XXX This bookkeeping shouldn't be here -- this is not where
310 * the rnd_initial_entropy state change actually happens. 310 * the rnd_initial_entropy state change actually happens.
311 */ 311 */
312 mutex_spin_enter(&rndpool_mtx); 312 mutex_spin_enter(&rndpool_mtx);
313 const size_t entropy_count = rndpool_get_entropy_count(&rnd_pool); 313 const size_t entropy_count = rndpool_get_entropy_count(&rnd_pool);
314 if (entropy_count < RND_ENTROPY_THRESHOLD * 8) { 314 if (entropy_count < RND_ENTROPY_THRESHOLD * 8) {
315 mutex_spin_exit(&rndpool_mtx); 315 mutex_spin_exit(&rndpool_mtx);
316 return; 316 return;
317 } else { 317 } else {
318#ifdef RND_VERBOSE 318#ifdef RND_VERBOSE
319 if (__predict_false(!rnd_initial_entropy)) 319 if (__predict_false(!rnd_initial_entropy))
320 rnd_printf_verbose("rnd: have initial entropy (%zu)\n", 320 rnd_printf_verbose("rnd: have initial entropy (%zu)\n",
321 entropy_count); 321 entropy_count);
322#endif 322#endif
323 rnd_initial_entropy = 1; 323 rnd_initial_entropy = 1;
324 } 324 }
325 mutex_spin_exit(&rndpool_mtx); 325 mutex_spin_exit(&rndpool_mtx);
326 326
327 rndsinks_distribute(); 327 rndsinks_distribute();
328} 328}
329 329
330/* 330/*
331 * Use the timing/value of the event to estimate the entropy gathered. 331 * Use the timing/value of the event to estimate the entropy gathered.
332 * If all the differentials (first, second, and third) are non-zero, return 332 * If all the differentials (first, second, and third) are non-zero, return
333 * non-zero. If any of these are zero, return zero. 333 * non-zero. If any of these are zero, return zero.
334 */ 334 */
335static inline uint32_t 335static inline uint32_t
336rnd_delta_estimate(rnd_delta_t *d, uint32_t v, int32_t delta) 336rnd_delta_estimate(rnd_delta_t *d, uint32_t v, int32_t delta)
337{ 337{
338 int32_t delta2, delta3; 338 int32_t delta2, delta3;
339 339
340 d->insamples++; 340 d->insamples++;
341 341
342 /* 342 /*
343 * Calculate the second and third order differentials 343 * Calculate the second and third order differentials
344 */ 344 */
345 delta2 = d->dx - delta; 345 delta2 = d->dx - delta;
346 if (delta2 < 0) 346 if (delta2 < 0)
347 delta2 = -delta2; 347 delta2 = -delta2;
348 348
349 delta3 = d->d2x - delta2; 349 delta3 = d->d2x - delta2;
350 if (delta3 < 0) 350 if (delta3 < 0)
351 delta3 = -delta3; 351 delta3 = -delta3;
352 352
353 d->x = v; 353 d->x = v;
354 d->dx = delta; 354 d->dx = delta;
355 d->d2x = delta2; 355 d->d2x = delta2;
356 356
357 /* 357 /*
358 * If any delta is 0, we got no entropy. If all are non-zero, we 358 * If any delta is 0, we got no entropy. If all are non-zero, we
359 * might have something. 359 * might have something.
360 */ 360 */
361 if (delta == 0 || delta2 == 0 || delta3 == 0) 361 if (delta == 0 || delta2 == 0 || delta3 == 0)
362 return (0); 362 return (0);
363 363
364 d->outbits++; 364 d->outbits++;
365 return (1); 365 return (1);
366} 366}
367 367
368/* 368/*
369 * Delta estimator for 32-bit timeestamps. Must handle wrap. 369 * Delta estimator for 32-bit timeestamps. Must handle wrap.
370 */ 370 */
371static inline uint32_t 371static inline uint32_t
372rnd_dt_estimate(krndsource_t *rs, uint32_t t) 372rnd_dt_estimate(krndsource_t *rs, uint32_t t)
373{ 373{
374 int32_t delta; 374 int32_t delta;
375 uint32_t ret; 375 uint32_t ret;
376 rnd_delta_t *d = &rs->time_delta; 376 rnd_delta_t *d = &rs->time_delta;
377 377
378 if (t < d->x) { 378 if (t < d->x) {
379 delta = UINT32_MAX - d->x + t; 379 delta = UINT32_MAX - d->x + t;
380 } else { 380 } else {
381 delta = d->x - t; 381 delta = d->x - t;
382 } 382 }
383 383
384 if (delta < 0) { 384 if (delta < 0) {
385 delta = -delta; 385 delta = -delta;
386 } 386 }
387 387
388 ret = rnd_delta_estimate(d, t, delta); 388 ret = rnd_delta_estimate(d, t, delta);
389 389
390 KASSERT(d->x == t); 390 KASSERT(d->x == t);
391 KASSERT(d->dx == delta); 391 KASSERT(d->dx == delta);
392#ifdef RND_VERBOSE 392#ifdef RND_VERBOSE
393 if (deltacnt++ % 1151 == 0) { 393 if (deltacnt++ % 1151 == 0) {
394 rnd_printf_verbose("rnd_dt_estimate: %s x = %lld, dx = %lld, " 394 rnd_printf_verbose("rnd_dt_estimate: %s x = %lld, dx = %lld, "
395 "d2x = %lld\n", rs->name, 395 "d2x = %lld\n", rs->name,
396 (int)d->x, (int)d->dx, (int)d->d2x); 396 (int)d->x, (int)d->dx, (int)d->d2x);
397 } 397 }
398#endif 398#endif
399 return ret; 399 return ret;
400} 400}
401 401
402/* 402/*
403 * Delta estimator for 32 or bit values. "Wrap" isn't. 403 * Delta estimator for 32 or bit values. "Wrap" isn't.
404 */ 404 */
405static inline uint32_t 405static inline uint32_t
406rnd_dv_estimate(krndsource_t *rs, uint32_t v) 406rnd_dv_estimate(krndsource_t *rs, uint32_t v)
407{ 407{
408 int32_t delta; 408 int32_t delta;
409 uint32_t ret; 409 uint32_t ret;
410 rnd_delta_t *d = &rs->value_delta; 410 rnd_delta_t *d = &rs->value_delta;
411 411
412 delta = d->x - v; 412 delta = d->x - v;
413 413
414 if (delta < 0) { 414 if (delta < 0) {
415 delta = -delta; 415 delta = -delta;
416 } 416 }
417 ret = rnd_delta_estimate(d, v, (uint32_t)delta); 417 ret = rnd_delta_estimate(d, v, (uint32_t)delta);
418 418
419 KASSERT(d->x == v); 419 KASSERT(d->x == v);
420 KASSERT(d->dx == delta); 420 KASSERT(d->dx == delta);
421#ifdef RND_VERBOSE 421#ifdef RND_VERBOSE
422 if (deltacnt++ % 1151 == 0) { 422 if (deltacnt++ % 1151 == 0) {
423 rnd_printf_verbose("rnd_dv_estimate: %s x = %lld, dx = %lld, " 423 rnd_printf_verbose("rnd_dv_estimate: %s x = %lld, dx = %lld, "
424 " d2x = %lld\n", rs->name, 424 " d2x = %lld\n", rs->name,
425 (long long int)d->x, 425 (long long int)d->x,
426 (long long int)d->dx, 426 (long long int)d->dx,
427 (long long int)d->d2x); 427 (long long int)d->d2x);
428 } 428 }
429#endif 429#endif
430 return ret; 430 return ret;
431} 431}
432 432
433#if defined(__HAVE_CPU_COUNTER) 433#if defined(__HAVE_CPU_COUNTER)
434static struct { 434static struct {
435 kmutex_t lock; 435 kmutex_t lock;
436 struct callout callout; 436 struct callout callout;
437 struct callout stop_callout; 437 struct callout stop_callout;
438 krndsource_t source; 438 krndsource_t source;
439} rnd_skew __cacheline_aligned; 439} rnd_skew __cacheline_aligned;
440 440
441static void rnd_skew_intr(void *); 441static void rnd_skew_intr(void *);
442 442
443static void 443static void
444rnd_skew_enable(krndsource_t *rs, bool enabled) 444rnd_skew_enable(krndsource_t *rs, bool enabled)
445{ 445{
446 446
447 if (enabled) { 447 if (enabled) {
448 rnd_skew_intr(rs); 448 rnd_skew_intr(rs);
449 } else { 449 } else {
450 callout_stop(&rnd_skew.callout); 450 callout_stop(&rnd_skew.callout);
451 } 451 }
452} 452}
453 453
454static void 454static void
455rnd_skew_stop_intr(void *arg) 455rnd_skew_stop_intr(void *arg)
456{ 456{
457 457
458 callout_stop(&rnd_skew.callout); 458 callout_stop(&rnd_skew.callout);
459} 459}
460 460
461static void 461static void
462rnd_skew_get(size_t bytes, void *priv) 462rnd_skew_get(size_t bytes, void *priv)
463{ 463{
464 krndsource_t *skewsrcp = priv; 464 krndsource_t *skewsrcp = priv;
465 465
466 KASSERT(skewsrcp == &rnd_skew.source); 466 KASSERT(skewsrcp == &rnd_skew.source);
467 if (RND_ENABLED(skewsrcp)) { 467 if (RND_ENABLED(skewsrcp)) {
468 /* Measure for 30s */ 468 /* Measure for 30s */
469 callout_schedule(&rnd_skew.stop_callout, hz * 30); 469 callout_schedule(&rnd_skew.stop_callout, hz * 30);
470 callout_schedule(&rnd_skew.callout, 1); 470 callout_schedule(&rnd_skew.callout, 1);
471 } 471 }
472} 472}
473 473
474static void 474static void
475rnd_skew_intr(void *arg) 475rnd_skew_intr(void *arg)
476{ 476{
477 static int flipflop; 477 static int flipflop;
478 478
479 /* 479 /*
480 * Even on systems with seemingly stable clocks, the 480 * Even on systems with seemingly stable clocks, the
481 * delta-time entropy estimator seems to think we get 1 bit here 481 * delta-time entropy estimator seems to think we get 1 bit here
482 * about every 2 calls. 482 * about every 2 calls.
483 * 483 *
484 */ 484 */
485 mutex_spin_enter(&rnd_skew.lock); 485 mutex_spin_enter(&rnd_skew.lock);
486 flipflop = !flipflop; 486 flipflop = !flipflop;
487 487
488 if (RND_ENABLED(&rnd_skew.source)) { 488 if (RND_ENABLED(&rnd_skew.source)) {
489 if (flipflop) { 489 if (flipflop) {
490 rnd_add_uint32(&rnd_skew.source, rnd_counter()); 490 rnd_add_uint32(&rnd_skew.source, rnd_counter());
491 callout_schedule(&rnd_skew.callout, hz / 10); 491 callout_schedule(&rnd_skew.callout, hz / 10);
492 } else { 492 } else {
493 callout_schedule(&rnd_skew.callout, 1); 493 callout_schedule(&rnd_skew.callout, 1);
494 } 494 }
495 } 495 }
496 mutex_spin_exit(&rnd_skew.lock); 496 mutex_spin_exit(&rnd_skew.lock);
497} 497}
498#endif 498#endif
499 499
500/* 500/*
501 * initialize the global random pool for our use. 501 * initialize the global random pool for our use.
502 * rnd_init() must be called very early on in the boot process, so 502 * rnd_init() must be called very early on in the boot process, so
503 * the pool is ready for other devices to attach as sources. 503 * the pool is ready for other devices to attach as sources.
504 */ 504 */
505void 505void
506rnd_init(void) 506rnd_init(void)
507{ 507{
508 uint32_t c; 508 uint32_t c;
509 509
510 if (rnd_ready) 510 if (rnd_ready)
511 return; 511 return;
512 512
513 mutex_init(&rnd_samples.lock, MUTEX_DEFAULT, IPL_VM); 513 mutex_init(&rnd_samples.lock, MUTEX_DEFAULT, IPL_VM);
514 rndsinks_init(); 514 rndsinks_init();
515 515
516 /* 516 /*
517 * take a counter early, hoping that there's some variance in 517 * take a counter early, hoping that there's some variance in
518 * the following operations 518 * the following operations
519 */ 519 */
520 c = rnd_counter(); 520 c = rnd_counter();
521 521
522 LIST_INIT(&rnd_sources); 522 LIST_INIT(&rnd_sources);
523 SIMPLEQ_INIT(&rnd_samples.q); 523 SIMPLEQ_INIT(&rnd_samples.q);
524 524
525 rndpool_init(&rnd_pool); 525 rndpool_init(&rnd_pool);
526 mutex_init(&rndpool_mtx, MUTEX_DEFAULT, IPL_VM); 526 mutex_init(&rndpool_mtx, MUTEX_DEFAULT, IPL_VM);
527 cv_init(&rndpool_cv, "rndread"); 527 cv_init(&rndpool_cv, "rndread");
528 528
529 rnd_mempc = pool_cache_init(sizeof(rnd_sample_t), 0, 0, 0, 529 rnd_mempc = pool_cache_init(sizeof(rnd_sample_t), 0, 0, 0,
530 "rndsample", NULL, IPL_VM, 530 "rndsample", NULL, IPL_VM,
531 NULL, NULL, NULL); 531 NULL, NULL, NULL);
532 532
533 /* 533 /*
534 * Set resource limit. The rnd_process_events() function 534 * Set resource limit. The rnd_process_events() function
535 * is called every tick and process the sample queue. 535 * is called every tick and process the sample queue.
536 * Without limitation, if a lot of rnd_add_*() are called, 536 * Without limitation, if a lot of rnd_add_*() are called,
537 * all kernel memory may be eaten up. 537 * all kernel memory may be eaten up.
538 */ 538 */
539 pool_cache_sethardlimit(rnd_mempc, RND_POOLBITS, NULL, 0); 539 pool_cache_sethardlimit(rnd_mempc, RND_POOLBITS, NULL, 0);
540 540
541 /* 541 /*
542 * Mix *something*, *anything* into the pool to help it get started. 542 * Mix *something*, *anything* into the pool to help it get started.
543 * However, it's not safe for rnd_counter() to call microtime() yet, 543 * However, it's not safe for rnd_counter() to call microtime() yet,
544 * so on some platforms we might just end up with zeros anyway. 544 * so on some platforms we might just end up with zeros anyway.
545 * XXX more things to add would be nice. 545 * XXX more things to add would be nice.
546 */ 546 */
547 if (c) { 547 if (c) {
548 mutex_spin_enter(&rndpool_mtx); 548 mutex_spin_enter(&rndpool_mtx);
549 rndpool_add_data(&rnd_pool, &c, sizeof(c), 1); 549 rndpool_add_data(&rnd_pool, &c, sizeof(c), 1);
550 c = rnd_counter(); 550 c = rnd_counter();
551 rndpool_add_data(&rnd_pool, &c, sizeof(c), 1); 551 rndpool_add_data(&rnd_pool, &c, sizeof(c), 1);
552 mutex_spin_exit(&rndpool_mtx); 552 mutex_spin_exit(&rndpool_mtx);
553 } 553 }
554 554
555 /* 555 /*
556 * If we have a cycle counter, take its error with respect 556 * If we have a cycle counter, take its error with respect
557 * to the callout mechanism as a source of entropy, ala 557 * to the callout mechanism as a source of entropy, ala
558 * TrueRand. 558 * TrueRand.
559 * 559 *
560 */ 560 */
561#if defined(__HAVE_CPU_COUNTER) 561#if defined(__HAVE_CPU_COUNTER)
562 /* IPL_VM because taken while rndpool_mtx is held. */ 562 /* IPL_VM because taken while rndpool_mtx is held. */
563 mutex_init(&rnd_skew.lock, MUTEX_DEFAULT, IPL_VM); 563 mutex_init(&rnd_skew.lock, MUTEX_DEFAULT, IPL_VM);
564 callout_init(&rnd_skew.callout, CALLOUT_MPSAFE); 564 callout_init(&rnd_skew.callout, CALLOUT_MPSAFE);
565 callout_init(&rnd_skew.stop_callout, CALLOUT_MPSAFE); 565 callout_init(&rnd_skew.stop_callout, CALLOUT_MPSAFE);
566 callout_setfunc(&rnd_skew.callout, rnd_skew_intr, NULL); 566 callout_setfunc(&rnd_skew.callout, rnd_skew_intr, NULL);
567 callout_setfunc(&rnd_skew.stop_callout, rnd_skew_stop_intr, NULL); 567 callout_setfunc(&rnd_skew.stop_callout, rnd_skew_stop_intr, NULL);
568 rndsource_setcb(&rnd_skew.source, rnd_skew_get, &rnd_skew.source); 568 rndsource_setcb(&rnd_skew.source, rnd_skew_get, &rnd_skew.source);
569 rndsource_setenable(&rnd_skew.source, rnd_skew_enable); 569 rndsource_setenable(&rnd_skew.source, rnd_skew_enable);
570 rnd_attach_source(&rnd_skew.source, "callout", RND_TYPE_SKEW, 570 rnd_attach_source(&rnd_skew.source, "callout", RND_TYPE_SKEW,
571 RND_FLAG_COLLECT_VALUE|RND_FLAG_ESTIMATE_VALUE| 571 RND_FLAG_COLLECT_VALUE|RND_FLAG_ESTIMATE_VALUE|
572 RND_FLAG_HASCB|RND_FLAG_HASENABLE); 572 RND_FLAG_HASCB|RND_FLAG_HASENABLE);
573 rnd_skew_intr(NULL); 573 rnd_skew_intr(NULL);
574#endif 574#endif
575 575
576 rnd_printf_verbose("rnd: initialised (%u)%s", RND_POOLBITS, 576 rnd_printf_verbose("rnd: initialised (%u)%s", RND_POOLBITS,
577 c ? " with counter\n" : "\n"); 577 c ? " with counter\n" : "\n");
578 if (boot_rsp != NULL) { 578 if (boot_rsp != NULL) {
579 mutex_spin_enter(&rndpool_mtx); 579 mutex_spin_enter(&rndpool_mtx);
580 rndpool_add_data(&rnd_pool, boot_rsp->data, 580 rndpool_add_data(&rnd_pool, boot_rsp->data,
581 sizeof(boot_rsp->data), 581 sizeof(boot_rsp->data),
582 MIN(boot_rsp->entropy, RND_POOLBITS / 2)); 582 MIN(boot_rsp->entropy, RND_POOLBITS / 2));
583 if (rndpool_get_entropy_count(&rnd_pool) > 583 if (rndpool_get_entropy_count(&rnd_pool) >
584 RND_ENTROPY_THRESHOLD * 8) { 584 RND_ENTROPY_THRESHOLD * 8) {
585 rnd_initial_entropy = 1; 585 rnd_initial_entropy = 1;
586 } 586 }
587 mutex_spin_exit(&rndpool_mtx); 587 mutex_spin_exit(&rndpool_mtx);
588 rnd_printf("rnd: seeded with %d bits\n", 588 rnd_printf("rnd: seeded with %d bits\n",
589 MIN(boot_rsp->entropy, RND_POOLBITS / 2)); 589 MIN(boot_rsp->entropy, RND_POOLBITS / 2));
590 memset(boot_rsp, 0, sizeof(*boot_rsp)); 590 memset(boot_rsp, 0, sizeof(*boot_rsp));
591 } 591 }
592 rnd_attach_source(&rnd_source_anonymous, "Anonymous", 592 rnd_attach_source(&rnd_source_anonymous, "Anonymous",
593 RND_TYPE_UNKNOWN, 593 RND_TYPE_UNKNOWN,
594 RND_FLAG_COLLECT_TIME|RND_FLAG_COLLECT_VALUE| 594 RND_FLAG_COLLECT_TIME|RND_FLAG_COLLECT_VALUE|
595 RND_FLAG_ESTIMATE_TIME); 595 RND_FLAG_ESTIMATE_TIME);
596 rnd_attach_source(&rnd_printf_source, "printf", RND_TYPE_UNKNOWN, 596 rnd_attach_source(&rnd_printf_source, "printf", RND_TYPE_UNKNOWN,
597 RND_FLAG_NO_ESTIMATE); 597 RND_FLAG_NO_ESTIMATE);
598 rnd_attach_source(&rnd_autoconf_source, "autoconf", 598 rnd_attach_source(&rnd_autoconf_source, "autoconf",
599 RND_TYPE_UNKNOWN, 599 RND_TYPE_UNKNOWN,
600 RND_FLAG_COLLECT_TIME|RND_FLAG_ESTIMATE_TIME); 600 RND_FLAG_COLLECT_TIME|RND_FLAG_ESTIMATE_TIME);
601 rnd_ready = 1; 601 rnd_ready = 1;
602} 602}
603 603
604static rnd_sample_t * 604static rnd_sample_t *
605rnd_sample_allocate(krndsource_t *source) 605rnd_sample_allocate(krndsource_t *source)
606{ 606{
607 rnd_sample_t *c; 607 rnd_sample_t *c;
608 608
609 c = pool_cache_get(rnd_mempc, PR_WAITOK); 609 c = pool_cache_get(rnd_mempc, PR_WAITOK);
610 if (c == NULL) 610 if (c == NULL)
611 return (NULL); 611 return (NULL);
612 612
613 c->source = source; 613 c->source = source;
614 c->cursor = 0; 614 c->cursor = 0;
615 c->entropy = 0; 615 c->entropy = 0;
616 616
617 return (c); 617 return (c);
618} 618}
619 619
620/* 620/*
621 * Don't wait on allocation. To be used in an interrupt context. 621 * Don't wait on allocation. To be used in an interrupt context.
622 */ 622 */
623static rnd_sample_t * 623static rnd_sample_t *
624rnd_sample_allocate_isr(krndsource_t *source) 624rnd_sample_allocate_isr(krndsource_t *source)
625{ 625{
626 rnd_sample_t *c; 626 rnd_sample_t *c;
627 627
628 c = pool_cache_get(rnd_mempc, PR_NOWAIT); 628 c = pool_cache_get(rnd_mempc, PR_NOWAIT);
629 if (c == NULL) 629 if (c == NULL)
630 return (NULL); 630 return (NULL);
631 631
632 c->source = source; 632 c->source = source;
633 c->cursor = 0; 633 c->cursor = 0;
634 c->entropy = 0; 634 c->entropy = 0;
635 635
636 return (c); 636 return (c);
637} 637}
638 638
639static void 639static void
640rnd_sample_free(rnd_sample_t *c) 640rnd_sample_free(rnd_sample_t *c)
641{ 641{
642 memset(c, 0, sizeof(*c)); 642 memset(c, 0, sizeof(*c));
643 pool_cache_put(rnd_mempc, c); 643 pool_cache_put(rnd_mempc, c);
644} 644}
645 645
646/* 646/*
647 * Add a source to our list of sources. 647 * Add a source to our list of sources.
648 */ 648 */
649void 649void
650rnd_attach_source(krndsource_t *rs, const char *name, uint32_t type, 650rnd_attach_source(krndsource_t *rs, const char *name, uint32_t type,
651 uint32_t flags) 651 uint32_t flags)
652{ 652{
653 uint32_t ts; 653 uint32_t ts;
654 654
655 ts = rnd_counter(); 655 ts = rnd_counter();
656 656
657 strlcpy(rs->name, name, sizeof(rs->name)); 657 strlcpy(rs->name, name, sizeof(rs->name));
658 memset(&rs->time_delta, 0, sizeof(rs->time_delta)); 658 memset(&rs->time_delta, 0, sizeof(rs->time_delta));
659 rs->time_delta.x = ts; 659 rs->time_delta.x = ts;
660 memset(&rs->value_delta, 0, sizeof(rs->value_delta)); 660 memset(&rs->value_delta, 0, sizeof(rs->value_delta));
661 rs->total = 0; 661 rs->total = 0;
662 662
663 /* 663 /*
664 * Some source setup, by type 664 * Some source setup, by type
665 */ 665 */
666 rs->test = NULL; 666 rs->test = NULL;
667 rs->test_cnt = -1; 667 rs->test_cnt = -1;
668 668
669 if (flags == 0) { 669 if (flags == 0) {
670 flags = RND_FLAG_DEFAULT; 670 flags = RND_FLAG_DEFAULT;
671 } 671 }
672 672
673 switch (type) { 673 switch (type) {
674 case RND_TYPE_NET: /* Don't collect by default */ 674 case RND_TYPE_NET: /* Don't collect by default */
675 flags |= (RND_FLAG_NO_COLLECT | RND_FLAG_NO_ESTIMATE); 675 flags |= (RND_FLAG_NO_COLLECT | RND_FLAG_NO_ESTIMATE);
676 break; 676 break;
677 case RND_TYPE_RNG: /* Space for statistical testing */ 677 case RND_TYPE_RNG: /* Space for statistical testing */
678 rs->test = kmem_alloc(sizeof(rngtest_t), KM_NOSLEEP); 678 rs->test = kmem_alloc(sizeof(rngtest_t), KM_NOSLEEP);
679 rs->test_cnt = 0; 679 rs->test_cnt = 0;
680 /* FALLTHRU */ 680 /* FALLTHRU */
681 case RND_TYPE_VM: /* Process samples in bulk always */ 681 case RND_TYPE_VM: /* Process samples in bulk always */
682 flags |= RND_FLAG_FAST; 682 flags |= RND_FLAG_FAST;
683 break; 683 break;
684 default: 684 default:
685 break; 685 break;
686 } 686 }
687 687
688 rs->type = type; 688 rs->type = type;
689 rs->flags = flags; 689 rs->flags = flags;
690 690
691 rs->state = rnd_sample_allocate(rs); 691 rs->state = rnd_sample_allocate(rs);
692 692
693 mutex_spin_enter(&rndpool_mtx); 693 mutex_spin_enter(&rndpool_mtx);
694 LIST_INSERT_HEAD(&rnd_sources, rs, list); 694 LIST_INSERT_HEAD(&rnd_sources, rs, list);
695 695
696#ifdef RND_VERBOSE 696#ifdef RND_VERBOSE
697 rnd_printf_verbose("rnd: %s attached as an entropy source (", 697 rnd_printf_verbose("rnd: %s attached as an entropy source (",
698 rs->name); 698 rs->name);
699 if (!(flags & RND_FLAG_NO_COLLECT)) { 699 if (!(flags & RND_FLAG_NO_COLLECT)) {
700 rnd_printf_verbose("collecting"); 700 rnd_printf_verbose("collecting");
701 if (flags & RND_FLAG_NO_ESTIMATE) 701 if (flags & RND_FLAG_NO_ESTIMATE)
702 rnd_printf_verbose(" without estimation"); 702 rnd_printf_verbose(" without estimation");
703 } 703 }
704 else 704 else
705 rnd_printf_verbose("off"); 705 rnd_printf_verbose("off");
706 rnd_printf_verbose(")\n"); 706 rnd_printf_verbose(")\n");
707#endif 707#endif
708 708
709 /* 709 /*
710 * Again, put some more initial junk in the pool. 710 * Again, put some more initial junk in the pool.
711 * FreeBSD claim to have an analysis that show 4 bits of 711 * FreeBSD claim to have an analysis that show 4 bits of
712 * entropy per source-attach timestamp. I am skeptical, 712 * entropy per source-attach timestamp. I am skeptical,
713 * but we count 1 bit per source here. 713 * but we count 1 bit per source here.
714 */ 714 */
715 rndpool_add_data(&rnd_pool, &ts, sizeof(ts), 1); 715 rndpool_add_data(&rnd_pool, &ts, sizeof(ts), 1);
716 mutex_spin_exit(&rndpool_mtx); 716 mutex_spin_exit(&rndpool_mtx);
717} 717}
718 718
719/* 719/*
720 * Remove a source from our list of sources. 720 * Remove a source from our list of sources.
721 */ 721 */
722void 722void
723rnd_detach_source(krndsource_t *source) 723rnd_detach_source(krndsource_t *source)
724{ 724{
725 rnd_sample_t *sample; 725 rnd_sample_t *sample;
726 726
727 mutex_spin_enter(&rndpool_mtx); 727 mutex_spin_enter(&rndpool_mtx);
728 LIST_REMOVE(source, list); 728 LIST_REMOVE(source, list);
729 mutex_spin_exit(&rndpool_mtx); 729 mutex_spin_exit(&rndpool_mtx);
730 730
731 /* 731 /*
732 * If there are samples queued up "remove" them from the sample queue 732 * If there are samples queued up "remove" them from the sample queue
733 * by setting the source to the no-collect pseudosource. 733 * by setting the source to the no-collect pseudosource.
734 */ 734 */
735 mutex_spin_enter(&rnd_samples.lock); 735 mutex_spin_enter(&rnd_samples.lock);
736 sample = SIMPLEQ_FIRST(&rnd_samples.q); 736 sample = SIMPLEQ_FIRST(&rnd_samples.q);
737 while (sample != NULL) { 737 while (sample != NULL) {
738 if (sample->source == source) 738 if (sample->source == source)
739 sample->source = &rnd_source_no_collect; 739 sample->source = &rnd_source_no_collect;
740 740
741 sample = SIMPLEQ_NEXT(sample, next); 741 sample = SIMPLEQ_NEXT(sample, next);
742 } 742 }
743 mutex_spin_exit(&rnd_samples.lock); 743 mutex_spin_exit(&rnd_samples.lock);
744 744
745 if (source->state) { 745 if (source->state) {
746 rnd_sample_free(source->state); 746 rnd_sample_free(source->state);
747 source->state = NULL; 747 source->state = NULL;
748 } 748 }
749 749
750 if (source->test) { 750 if (source->test) {
751 kmem_free(source->test, sizeof(rngtest_t)); 751 kmem_free(source->test, sizeof(rngtest_t));
752 } 752 }
753 753
754 rnd_printf_verbose("rnd: %s detached as an entropy source\n", 754 rnd_printf_verbose("rnd: %s detached as an entropy source\n",
755 source->name); 755 source->name);
756} 756}
757 757
758static inline uint32_t 758static inline uint32_t
759rnd_estimate(krndsource_t *rs, uint32_t ts, uint32_t val) 759rnd_estimate(krndsource_t *rs, uint32_t ts, uint32_t val)
760{ 760{
761 uint32_t entropy = 0, dt_est, dv_est; 761 uint32_t entropy = 0, dt_est, dv_est;
762 762
763 dt_est = rnd_dt_estimate(rs, ts); 763 dt_est = rnd_dt_estimate(rs, ts);
764 dv_est = rnd_dv_estimate(rs, val); 764 dv_est = rnd_dv_estimate(rs, val);
765 765
766 if (!(rs->flags & RND_FLAG_NO_ESTIMATE)) { 766 if (!(rs->flags & RND_FLAG_NO_ESTIMATE)) {
767 if (rs->flags & RND_FLAG_ESTIMATE_TIME) { 767 if (rs->flags & RND_FLAG_ESTIMATE_TIME) {
768 entropy += dt_est; 768 entropy += dt_est;
769 } 769 }
770 770
771 if (rs->flags & RND_FLAG_ESTIMATE_VALUE) { 771 if (rs->flags & RND_FLAG_ESTIMATE_VALUE) {
772 entropy += dv_est; 772 entropy += dv_est;
773 } 773 }
774 774
775 } 775 }
776 return entropy; 776 return entropy;
777} 777}
778 778
779/* 779/*
780 * Add a 32-bit value to the entropy pool. The rs parameter should point to 780 * Add a 32-bit value to the entropy pool. The rs parameter should point to
781 * the source-specific source structure. 781 * the source-specific source structure.
782 */ 782 */
783void 783void
784_rnd_add_uint32(krndsource_t *rs, uint32_t val) 784_rnd_add_uint32(krndsource_t *rs, uint32_t val)
785{ 785{
786 uint32_t ts;  786 uint32_t ts;
787 uint32_t entropy = 0; 787 uint32_t entropy = 0;
788 788
789 if (rs->flags & RND_FLAG_NO_COLLECT) 789 if (rs->flags & RND_FLAG_NO_COLLECT)
790 return; 790 return;
791 791
792 /* 792 /*
793 * Sample the counter as soon as possible to avoid 793 * Sample the counter as soon as possible to avoid
794 * entropy overestimation. 794 * entropy overestimation.
795 */ 795 */
796 ts = rnd_counter(); 796 ts = rnd_counter();
797 797
798 /* 798 /*
799 * Calculate estimates - we may not use them, but if we do 799 * Calculate estimates - we may not use them, but if we do
800 * not calculate them, the estimators' history becomes invalid. 800 * not calculate them, the estimators' history becomes invalid.
801 */ 801 */
802 entropy = rnd_estimate(rs, ts, val); 802 entropy = rnd_estimate(rs, ts, val);
803 803
804 rnd_add_data_ts(rs, &val, sizeof(val), entropy, ts); 804 rnd_add_data_ts(rs, &val, sizeof(val), entropy, ts);
805} 805}
806 806
807void 807void
808_rnd_add_uint64(krndsource_t *rs, uint64_t val) 808_rnd_add_uint64(krndsource_t *rs, uint64_t val)
809{ 809{
810 uint32_t ts;  810 uint32_t ts;
811 uint32_t entropy = 0; 811 uint32_t entropy = 0;
812 812
813 if (rs->flags & RND_FLAG_NO_COLLECT) 813 if (rs->flags & RND_FLAG_NO_COLLECT)
814 return; 814 return;
815 815
816 /* 816 /*
817 * Sample the counter as soon as possible to avoid 817 * Sample the counter as soon as possible to avoid
818 * entropy overestimation. 818 * entropy overestimation.
819 */ 819 */
820 ts = rnd_counter(); 820 ts = rnd_counter();
821 821
822 /* 822 /*
823 * Calculate estimates - we may not use them, but if we do 823 * Calculate estimates - we may not use them, but if we do
824 * not calculate them, the estimators' history becomes invalid. 824 * not calculate them, the estimators' history becomes invalid.
825 */ 825 */
826 entropy = rnd_estimate(rs, ts, (uint32_t)(val & (uint64_t)0xffffffff)); 826 entropy = rnd_estimate(rs, ts, (uint32_t)(val & (uint64_t)0xffffffff));
827 827
828 rnd_add_data_ts(rs, &val, sizeof(val), entropy, ts); 828 rnd_add_data_ts(rs, &val, sizeof(val), entropy, ts);
829} 829}
830 830
831void 831void
832rnd_add_data(krndsource_t *rs, const void *const data, uint32_t len, 832rnd_add_data(krndsource_t *rs, const void *const data, uint32_t len,
833 uint32_t entropy) 833 uint32_t entropy)
834{ 834{
835 /* 835 /*
836 * This interface is meant for feeding data which is, 836 * This interface is meant for feeding data which is,
837 * itself, random. Don't estimate entropy based on 837 * itself, random. Don't estimate entropy based on
838 * timestamp, just directly add the data. 838 * timestamp, just directly add the data.
839 */ 839 */
840 if (__predict_false(rs == NULL)) { 840 if (__predict_false(rs == NULL)) {
841 mutex_spin_enter(&rndpool_mtx); 841 mutex_spin_enter(&rndpool_mtx);
842 rndpool_add_data(&rnd_pool, data, len, entropy); 842 rndpool_add_data(&rnd_pool, data, len, entropy);
843 mutex_spin_exit(&rndpool_mtx); 843 mutex_spin_exit(&rndpool_mtx);
844 } else { 844 } else {
845 rnd_add_data_ts(rs, data, len, entropy, rnd_counter()); 845 rnd_add_data_ts(rs, data, len, entropy, rnd_counter());
846 } 846 }
847} 847}
848 848
849static void 849static void
850rnd_add_data_ts(krndsource_t *rs, const void *const data, u_int32_t len, 850rnd_add_data_ts(krndsource_t *rs, const void *const data, uint32_t len,
851 u_int32_t entropy, uint32_t ts) 851 uint32_t entropy, uint32_t ts)
852{ 852{
853 rnd_sample_t *state = NULL; 853 rnd_sample_t *state = NULL;
854 const uint8_t *p = data; 854 const uint8_t *p = data;
855 uint32_t dint; 855 uint32_t dint;
856 int todo, done, filled = 0; 856 int todo, done, filled = 0;
857 int sample_count; 857 int sample_count;
858 struct rnd_sampleq tmp_samples = SIMPLEQ_HEAD_INITIALIZER(tmp_samples); 858 struct rnd_sampleq tmp_samples = SIMPLEQ_HEAD_INITIALIZER(tmp_samples);
859 859
860 if (rs && (rs->flags & RND_FLAG_NO_COLLECT || 860 if (rs && (rs->flags & RND_FLAG_NO_COLLECT ||
861 __predict_false(!(rs->flags &  861 __predict_false(!(rs->flags &
862 (RND_FLAG_COLLECT_TIME| 862 (RND_FLAG_COLLECT_TIME|
863 RND_FLAG_COLLECT_VALUE))))) { 863 RND_FLAG_COLLECT_VALUE))))) {
864 return; 864 return;
865 } 865 }
866 todo = len / sizeof(dint); 866 todo = len / sizeof(dint);
867 /* 867 /*
868 * Let's try to be efficient: if we are warm, and a source 868 * Let's try to be efficient: if we are warm, and a source
869 * is adding entropy at a rate of at least 1 bit every 10 seconds, 869 * is adding entropy at a rate of at least 1 bit every 10 seconds,
870 * mark it as "fast" and add its samples in bulk. 870 * mark it as "fast" and add its samples in bulk.
871 */ 871 */
872 if (__predict_true(rs->flags & RND_FLAG_FAST) || 872 if (__predict_true(rs->flags & RND_FLAG_FAST) ||
873 (todo >= RND_SAMPLE_COUNT)) { 873 (todo >= RND_SAMPLE_COUNT)) {
874 sample_count = RND_SAMPLE_COUNT; 874 sample_count = RND_SAMPLE_COUNT;
875 } else { 875 } else {
876 if (!(rs->flags & RND_FLAG_HASCB) && 876 if (!(rs->flags & RND_FLAG_HASCB) &&
877 !cold && rnd_initial_entropy) { 877 !cold && rnd_initial_entropy) {
878 struct timeval upt; 878 struct timeval upt;
879 879
880 getmicrouptime(&upt); 880 getmicrouptime(&upt);
881 if ( (upt.tv_sec > 0 && rs->total > upt.tv_sec * 10) || 881 if ( (upt.tv_sec > 0 && rs->total > upt.tv_sec * 10) ||
882 (upt.tv_sec > 10 && rs->total > upt.tv_sec) || 882 (upt.tv_sec > 10 && rs->total > upt.tv_sec) ||
883 (upt.tv_sec > 100 && 883 (upt.tv_sec > 100 &&
884 rs->total > upt.tv_sec / 10)) { 884 rs->total > upt.tv_sec / 10)) {
885 rnd_printf_verbose("rnd: source %s is fast" 885 rnd_printf_verbose("rnd: source %s is fast"
886 " (%d samples at once," 886 " (%d samples at once,"
887 " %d bits in %lld seconds), " 887 " %d bits in %lld seconds), "
888 "processing samples in bulk.\n", 888 "processing samples in bulk.\n",
889 rs->name, todo, rs->total, 889 rs->name, todo, rs->total,
890 (long long int)upt.tv_sec); 890 (long long int)upt.tv_sec);
891 rs->flags |= RND_FLAG_FAST; 891 rs->flags |= RND_FLAG_FAST;
892 } 892 }
893 } 893 }
894 sample_count = 2; 894 sample_count = 2;
895 } 895 }
896 896
897 /* 897 /*
898 * Loop over data packaging it into sample buffers. 898 * Loop over data packaging it into sample buffers.
899 * If a sample buffer allocation fails, drop all data. 899 * If a sample buffer allocation fails, drop all data.
900 */ 900 */
901 for (done = 0; done < todo ; done++) { 901 for (done = 0; done < todo ; done++) {
902 state = rs->state; 902 state = rs->state;
903 if (state == NULL) { 903 if (state == NULL) {
904 state = rnd_sample_allocate_isr(rs); 904 state = rnd_sample_allocate_isr(rs);
905 if (__predict_false(state == NULL)) { 905 if (__predict_false(state == NULL)) {
906 break; 906 break;
907 } 907 }
908 rs->state = state; 908 rs->state = state;
909 } 909 }
910 910
911 state->ts[state->cursor] = ts; 911 state->ts[state->cursor] = ts;
912 (void)memcpy(&dint, &p[done*4], 4); 912 (void)memcpy(&dint, &p[done*4], 4);
913 state->values[state->cursor] = dint; 913 state->values[state->cursor] = dint;
914 state->cursor++; 914 state->cursor++;
915 915
916 if (state->cursor == sample_count) { 916 if (state->cursor == sample_count) {
917 SIMPLEQ_INSERT_HEAD(&tmp_samples, state, next); 917 SIMPLEQ_INSERT_HEAD(&tmp_samples, state, next);
918 filled++; 918 filled++;
919 rs->state = NULL; 919 rs->state = NULL;
920 } 920 }
921 } 921 }
922 922
923 if (__predict_false(state == NULL)) { 923 if (__predict_false(state == NULL)) {
924 while ((state = SIMPLEQ_FIRST(&tmp_samples))) { 924 while ((state = SIMPLEQ_FIRST(&tmp_samples))) {
925 SIMPLEQ_REMOVE_HEAD(&tmp_samples, next); 925 SIMPLEQ_REMOVE_HEAD(&tmp_samples, next);
926 rnd_sample_free(state); 926 rnd_sample_free(state);
927 } 927 }
928 return; 928 return;
929 } 929 }
930 930
931 /* 931 /*
932 * Claim all the entropy on the last one we send to 932 * Claim all the entropy on the last one we send to
933 * the pool, so we don't rely on it being evenly distributed 933 * the pool, so we don't rely on it being evenly distributed
934 * in the supplied data. 934 * in the supplied data.
935 * 935 *
936 * XXX The rndpool code must accept samples with more 936 * XXX The rndpool code must accept samples with more
937 * XXX claimed entropy than bits for this to work right. 937 * XXX claimed entropy than bits for this to work right.
938 */ 938 */
939 state->entropy += entropy; 939 state->entropy += entropy;
940 rs->total += entropy; 940 rs->total += entropy;
941 941
942 /* 942 /*
943 * If we didn't finish any sample buffers, we're done. 943 * If we didn't finish any sample buffers, we're done.
944 */ 944 */
945 if (!filled) { 945 if (!filled) {
946 return; 946 return;
947 } 947 }
948 948
949 mutex_spin_enter(&rnd_samples.lock); 949 mutex_spin_enter(&rnd_samples.lock);
950 while ((state = SIMPLEQ_FIRST(&tmp_samples))) { 950 while ((state = SIMPLEQ_FIRST(&tmp_samples))) {
951 SIMPLEQ_REMOVE_HEAD(&tmp_samples, next); 951 SIMPLEQ_REMOVE_HEAD(&tmp_samples, next);
952 SIMPLEQ_INSERT_HEAD(&rnd_samples.q, state, next); 952 SIMPLEQ_INSERT_HEAD(&rnd_samples.q, state, next);
953 } 953 }
954 mutex_spin_exit(&rnd_samples.lock); 954 mutex_spin_exit(&rnd_samples.lock);
955 955
956 /* Cause processing of queued samples */ 956 /* Cause processing of queued samples */
957 rnd_schedule_process(); 957 rnd_schedule_process();
958} 958}
959 959
960static int 960static int
961rnd_hwrng_test(rnd_sample_t *sample) 961rnd_hwrng_test(rnd_sample_t *sample)
962{ 962{
963 krndsource_t *source = sample->source; 963 krndsource_t *source = sample->source;
964 size_t cmplen; 964 size_t cmplen;
965 uint8_t *v1, *v2; 965 uint8_t *v1, *v2;
966 size_t resid, totest; 966 size_t resid, totest;
967 967
968 KASSERT(source->type == RND_TYPE_RNG); 968 KASSERT(source->type == RND_TYPE_RNG);
969 969
970 /* 970 /*
971 * Continuous-output test: compare two halves of the 971 * Continuous-output test: compare two halves of the
972 * sample buffer to each other. The sample buffer (64 ints, 972 * sample buffer to each other. The sample buffer (64 ints,
973 * so either 256 or 512 bytes on any modern machine) should be 973 * so either 256 or 512 bytes on any modern machine) should be
974 * much larger than a typical hardware RNG output, so this seems 974 * much larger than a typical hardware RNG output, so this seems
975 * a reasonable way to do it without retaining extra data. 975 * a reasonable way to do it without retaining extra data.
976 */ 976 */
977 cmplen = sizeof(sample->values) / 2; 977 cmplen = sizeof(sample->values) / 2;
978 v1 = (uint8_t *)sample->values; 978 v1 = (uint8_t *)sample->values;
979 v2 = (uint8_t *)sample->values + cmplen; 979 v2 = (uint8_t *)sample->values + cmplen;
980 980
981 if (__predict_false(!memcmp(v1, v2, cmplen))) { 981 if (__predict_false(!memcmp(v1, v2, cmplen))) {
982 rnd_printf("rnd: source \"%s\" failed continuous-output test.\n", 982 rnd_printf("rnd: source \"%s\" failed continuous-output test.\n",
983 source->name); 983 source->name);
984 return 1; 984 return 1;
985 } 985 }
986 986
987 /* 987 /*
988 * FIPS 140 statistical RNG test. We must accumulate 20,000 bits. 988 * FIPS 140 statistical RNG test. We must accumulate 20,000 bits.
989 */ 989 */
990 if (__predict_true(source->test_cnt == -1)) { 990 if (__predict_true(source->test_cnt == -1)) {
991 /* already passed the test */ 991 /* already passed the test */
992 return 0; 992 return 0;
993 } 993 }
994 resid = FIPS140_RNG_TEST_BYTES - source->test_cnt; 994 resid = FIPS140_RNG_TEST_BYTES - source->test_cnt;
995 totest = MIN(RND_SAMPLE_COUNT * 4, resid); 995 totest = MIN(RND_SAMPLE_COUNT * 4, resid);
996 memcpy(source->test->rt_b + source->test_cnt, sample->values, totest); 996 memcpy(source->test->rt_b + source->test_cnt, sample->values, totest);
997 resid -= totest; 997 resid -= totest;
998 source->test_cnt += totest; 998 source->test_cnt += totest;
999 if (resid == 0) { 999 if (resid == 0) {
1000 strlcpy(source->test->rt_name, source->name, 1000 strlcpy(source->test->rt_name, source->name,
1001 sizeof(source->test->rt_name)); 1001 sizeof(source->test->rt_name));
1002 if (rngtest(source->test)) { 1002 if (rngtest(source->test)) {
1003 rnd_printf("rnd: source \"%s\" failed statistical test.", 1003 rnd_printf("rnd: source \"%s\" failed statistical test.",
1004 source->name); 1004 source->name);
1005 return 1; 1005 return 1;
1006 } 1006 }
1007 source->test_cnt = -1; 1007 source->test_cnt = -1;
1008 memset(source->test, 0, sizeof(*source->test)); 1008 memset(source->test, 0, sizeof(*source->test));
1009 } 1009 }
1010 return 0; 1010 return 0;
1011} 1011}
1012 1012
1013/* 1013/*
1014 * Process the events in the ring buffer. Called by rnd_timeout or 1014 * Process the events in the ring buffer. Called by rnd_timeout or
1015 * by the add routines directly if the callout has never fired (that 1015 * by the add routines directly if the callout has never fired (that
1016 * is, if we are "cold" -- just booted). 1016 * is, if we are "cold" -- just booted).
1017 * 1017 *
1018 */ 1018 */
1019static void 1019static void
1020rnd_process_events(void) 1020rnd_process_events(void)
1021{ 1021{
1022 rnd_sample_t *sample = NULL; 1022 rnd_sample_t *sample = NULL;
1023 krndsource_t *source, *badsource = NULL; 1023 krndsource_t *source, *badsource = NULL;
1024 static krndsource_t *last_source; 1024 static krndsource_t *last_source;
1025 u_int32_t entropy; 1025 uint32_t entropy;
1026 size_t pool_entropy; 1026 size_t pool_entropy;
1027 int found = 0, wake = 0; 1027 int found = 0, wake = 0;
1028 struct rnd_sampleq dq_samples = SIMPLEQ_HEAD_INITIALIZER(dq_samples); 1028 struct rnd_sampleq dq_samples = SIMPLEQ_HEAD_INITIALIZER(dq_samples);
1029 struct rnd_sampleq df_samples = SIMPLEQ_HEAD_INITIALIZER(df_samples); 1029 struct rnd_sampleq df_samples = SIMPLEQ_HEAD_INITIALIZER(df_samples);
1030 1030
1031 /* 1031 /*
1032 * Drain to the on-stack queue and drop the lock. 1032 * Drain to the on-stack queue and drop the lock.
1033 */ 1033 */
1034 mutex_spin_enter(&rnd_samples.lock); 1034 mutex_spin_enter(&rnd_samples.lock);
1035 while ((sample = SIMPLEQ_FIRST(&rnd_samples.q))) { 1035 while ((sample = SIMPLEQ_FIRST(&rnd_samples.q))) {
1036 found++; 1036 found++;
1037 SIMPLEQ_REMOVE_HEAD(&rnd_samples.q, next); 1037 SIMPLEQ_REMOVE_HEAD(&rnd_samples.q, next);
1038 /* 1038 /*
1039 * We repeat this check here, since it is possible 1039 * We repeat this check here, since it is possible
1040 * the source was disabled before we were called, but 1040 * the source was disabled before we were called, but
1041 * after the entry was queued. 1041 * after the entry was queued.
1042 */ 1042 */
1043 if (__predict_false(!(sample->source->flags & 1043 if (__predict_false(!(sample->source->flags &
1044 (RND_FLAG_COLLECT_TIME| 1044 (RND_FLAG_COLLECT_TIME|
1045 RND_FLAG_COLLECT_VALUE)))) { 1045 RND_FLAG_COLLECT_VALUE)))) {
1046 SIMPLEQ_INSERT_TAIL(&df_samples, sample, next); 1046 SIMPLEQ_INSERT_TAIL(&df_samples, sample, next);
1047 } else { 1047 } else {
1048 SIMPLEQ_INSERT_TAIL(&dq_samples, sample, next); 1048 SIMPLEQ_INSERT_TAIL(&dq_samples, sample, next);
1049 } 1049 }
1050 } 1050 }
1051 mutex_spin_exit(&rnd_samples.lock); 1051 mutex_spin_exit(&rnd_samples.lock);
1052 1052
1053 /* Don't thrash the rndpool mtx either. Hold, add all samples. */ 1053 /* Don't thrash the rndpool mtx either. Hold, add all samples. */
1054 mutex_spin_enter(&rndpool_mtx); 1054 mutex_spin_enter(&rndpool_mtx);
1055 1055
1056 pool_entropy = rndpool_get_entropy_count(&rnd_pool); 1056 pool_entropy = rndpool_get_entropy_count(&rnd_pool);
1057 1057
1058 while ((sample = SIMPLEQ_FIRST(&dq_samples))) { 1058 while ((sample = SIMPLEQ_FIRST(&dq_samples))) {
1059 int sample_count; 1059 int sample_count;
1060 1060
1061 SIMPLEQ_REMOVE_HEAD(&dq_samples, next); 1061 SIMPLEQ_REMOVE_HEAD(&dq_samples, next);
1062 source = sample->source; 1062 source = sample->source;
1063 entropy = sample->entropy; 1063 entropy = sample->entropy;
1064 sample_count = sample->cursor; 1064 sample_count = sample->cursor;
1065 1065
1066 /* 1066 /*
1067 * Don't provide a side channel for timing attacks on 1067 * Don't provide a side channel for timing attacks on
1068 * low-rate sources: require mixing with some other 1068 * low-rate sources: require mixing with some other
1069 * source before we schedule a wakeup. 1069 * source before we schedule a wakeup.
1070 */ 1070 */
1071 if (!wake && 1071 if (!wake &&
1072 (source != last_source || source->flags & RND_FLAG_FAST)) { 1072 (source != last_source || source->flags & RND_FLAG_FAST)) {
1073 wake++; 1073 wake++;
1074 } 1074 }
1075 last_source = source; 1075 last_source = source;
1076 1076
1077 /* 1077 /*
1078 * If the source has been disabled, ignore samples from 1078 * If the source has been disabled, ignore samples from
1079 * it. 1079 * it.
1080 */ 1080 */
1081 if (source->flags & RND_FLAG_NO_COLLECT) 1081 if (source->flags & RND_FLAG_NO_COLLECT)
1082 goto skip; 1082 goto skip;
1083 1083
1084 /* 1084 /*
1085 * Hardware generators are great but sometimes they 1085 * Hardware generators are great but sometimes they
1086 * have...hardware issues. Don't use any data from 1086 * have...hardware issues. Don't use any data from
1087 * them unless it passes some tests. 1087 * them unless it passes some tests.
1088 */ 1088 */
1089 if (source->type == RND_TYPE_RNG) { 1089 if (source->type == RND_TYPE_RNG) {
1090 if (__predict_false(rnd_hwrng_test(sample))) { 1090 if (__predict_false(rnd_hwrng_test(sample))) {
1091 source->flags |= RND_FLAG_NO_COLLECT; 1091 source->flags |= RND_FLAG_NO_COLLECT;
1092 rnd_printf("rnd: disabling source \"%s\".", 1092 rnd_printf("rnd: disabling source \"%s\".",
1093 badsource->name); 1093 badsource->name);
1094 goto skip; 1094 goto skip;
1095 } 1095 }
1096 } 1096 }
1097 1097
1098 if (source->flags & RND_FLAG_COLLECT_VALUE) { 1098 if (source->flags & RND_FLAG_COLLECT_VALUE) {
1099 rndpool_add_data(&rnd_pool, sample->values, 1099 rndpool_add_data(&rnd_pool, sample->values,
1100 sample_count * 1100 sample_count *
1101 sizeof(sample->values[1]), 1101 sizeof(sample->values[1]),
1102 0); 1102 0);
1103 } 1103 }
1104 if (source->flags & RND_FLAG_COLLECT_TIME) { 1104 if (source->flags & RND_FLAG_COLLECT_TIME) {
1105 rndpool_add_data(&rnd_pool, sample->ts, 1105 rndpool_add_data(&rnd_pool, sample->ts,
1106 sample_count * 1106 sample_count *
1107 sizeof(sample->ts[1]), 1107 sizeof(sample->ts[1]),
1108 0); 1108 0);
1109 } 1109 }
1110 1110
1111 pool_entropy += entropy; 1111 pool_entropy += entropy;
1112 source->total += sample->entropy; 1112 source->total += sample->entropy;
1113skip: SIMPLEQ_INSERT_TAIL(&df_samples, sample, next); 1113skip: SIMPLEQ_INSERT_TAIL(&df_samples, sample, next);
1114 } 1114 }
1115 rndpool_set_entropy_count(&rnd_pool, pool_entropy); 1115 rndpool_set_entropy_count(&rnd_pool, pool_entropy);
1116 if (pool_entropy > RND_ENTROPY_THRESHOLD * 8) { 1116 if (pool_entropy > RND_ENTROPY_THRESHOLD * 8) {
1117 wake++; 1117 wake++;
1118 } else { 1118 } else {
1119 rnd_getmore(howmany((RND_POOLBITS - pool_entropy), NBBY)); 1119 rnd_getmore(howmany((RND_POOLBITS - pool_entropy), NBBY));
1120 rnd_printf_verbose("rnd: empty, asking for %d bytes\n", 1120 rnd_printf_verbose("rnd: empty, asking for %d bytes\n",
1121 (int)(howmany((RND_POOLBITS - pool_entropy), NBBY))); 1121 (int)(howmany((RND_POOLBITS - pool_entropy), NBBY)));
1122 } 1122 }
1123 mutex_spin_exit(&rndpool_mtx); 1123 mutex_spin_exit(&rndpool_mtx);
1124 1124
1125 /* Now we hold no locks: clean up. */ 1125 /* Now we hold no locks: clean up. */
1126 while ((sample = SIMPLEQ_FIRST(&df_samples))) { 1126 while ((sample = SIMPLEQ_FIRST(&df_samples))) {
1127 SIMPLEQ_REMOVE_HEAD(&df_samples, next); 1127 SIMPLEQ_REMOVE_HEAD(&df_samples, next);
1128 rnd_sample_free(sample); 1128 rnd_sample_free(sample);
1129 } 1129 }
1130 1130
1131 /* 1131 /*
1132 * Wake up any potential readers waiting. 1132 * Wake up any potential readers waiting.
1133 */ 1133 */
1134 if (wake) { 1134 if (wake) {
1135 rnd_schedule_wakeup(); 1135 rnd_schedule_wakeup();
1136 } 1136 }
1137} 1137}
1138 1138
1139static void 1139static void
1140rnd_intr(void *arg) 1140rnd_intr(void *arg)
1141{ 1141{
1142 rnd_process_events(); 1142 rnd_process_events();
1143} 1143}
1144 1144
1145static void 1145static void
1146rnd_wake(void *arg) 1146rnd_wake(void *arg)
1147{ 1147{
1148 rnd_wakeup_readers(); 1148 rnd_wakeup_readers();
1149} 1149}
1150 1150
1151static uint32_t 1151static uint32_t
1152rnd_extract_data(void *p, u_int32_t len, u_int32_t flags) 1152rnd_extract_data(void *p, uint32_t len, uint32_t flags)
1153{ 1153{
1154 static int timed_in; 1154 static int timed_in;
1155 int entropy_count; 1155 int entropy_count;
1156 uint32_t retval; 1156 uint32_t retval;
1157 1157
1158 mutex_spin_enter(&rndpool_mtx); 1158 mutex_spin_enter(&rndpool_mtx);
1159 if (__predict_false(!timed_in)) { 1159 if (__predict_false(!timed_in)) {
1160 if (boottime.tv_sec) { 1160 if (boottime.tv_sec) {
1161 rndpool_add_data(&rnd_pool, &boottime, 1161 rndpool_add_data(&rnd_pool, &boottime,
1162 sizeof(boottime), 0); 1162 sizeof(boottime), 0);
1163 } 1163 }
1164 timed_in++; 1164 timed_in++;
1165 } 1165 }
1166 if (__predict_false(!rnd_initial_entropy)) { 1166 if (__predict_false(!rnd_initial_entropy)) {
1167 uint32_t c; 1167 uint32_t c;
1168 1168
1169 rnd_printf_verbose("rnd: WARNING! initial entropy low (%u).\n", 1169 rnd_printf_verbose("rnd: WARNING! initial entropy low (%u).\n",
1170 rndpool_get_entropy_count(&rnd_pool)); 1170 rndpool_get_entropy_count(&rnd_pool));
1171 /* Try once again to put something in the pool */ 1171 /* Try once again to put something in the pool */
1172 c = rnd_counter(); 1172 c = rnd_counter();
1173 rndpool_add_data(&rnd_pool, &c, sizeof(c), 1); 1173 rndpool_add_data(&rnd_pool, &c, sizeof(c), 1);
1174 } 1174 }
1175 1175
1176#ifdef DIAGNOSTIC 1176#ifdef DIAGNOSTIC
1177 while (!rnd_tested) { 1177 while (!rnd_tested) {
1178 entropy_count = rndpool_get_entropy_count(&rnd_pool); 1178 entropy_count = rndpool_get_entropy_count(&rnd_pool);
1179 rnd_printf_verbose("rnd: starting statistical RNG test," 1179 rnd_printf_verbose("rnd: starting statistical RNG test,"
1180 " entropy = %d.\n", 1180 " entropy = %d.\n",
1181 entropy_count); 1181 entropy_count);
1182 if (rndpool_extract_data(&rnd_pool, rnd_rt.rt_b, 1182 if (rndpool_extract_data(&rnd_pool, rnd_rt.rt_b,
1183 sizeof(rnd_rt.rt_b), RND_EXTRACT_ANY) 1183 sizeof(rnd_rt.rt_b), RND_EXTRACT_ANY)
1184 != sizeof(rnd_rt.rt_b)) { 1184 != sizeof(rnd_rt.rt_b)) {
1185 panic("rnd: could not get bits for statistical test"); 1185 panic("rnd: could not get bits for statistical test");
1186 } 1186 }
1187 /* 1187 /*
1188 * Stash the tested bits so we can put them back in the 1188 * Stash the tested bits so we can put them back in the
1189 * pool, restoring the entropy count. DO NOT rely on 1189 * pool, restoring the entropy count. DO NOT rely on
1190 * rngtest to maintain the bits pristine -- we could end 1190 * rngtest to maintain the bits pristine -- we could end
1191 * up adding back non-random data claiming it were pure 1191 * up adding back non-random data claiming it were pure
1192 * entropy. 1192 * entropy.
1193 */ 1193 */
1194 memcpy(rnd_testbits, rnd_rt.rt_b, sizeof(rnd_rt.rt_b)); 1194 memcpy(rnd_testbits, rnd_rt.rt_b, sizeof(rnd_rt.rt_b));
1195 strlcpy(rnd_rt.rt_name, "entropy pool", sizeof(rnd_rt.rt_name)); 1195 strlcpy(rnd_rt.rt_name, "entropy pool", sizeof(rnd_rt.rt_name));
1196 if (rngtest(&rnd_rt)) { 1196 if (rngtest(&rnd_rt)) {
1197 /* 1197 /*
1198 * The probabiliity of a Type I error is 3/10000, 1198 * The probabiliity of a Type I error is 3/10000,
1199 * but note this can only happen at boot time. 1199 * but note this can only happen at boot time.
1200 * The relevant standard says to reset the module, 1200 * The relevant standard says to reset the module,
1201 * but developers objected... 1201 * but developers objected...
1202 */ 1202 */
1203 rnd_printf("rnd: WARNING, ENTROPY POOL FAILED " 1203 rnd_printf("rnd: WARNING, ENTROPY POOL FAILED "
1204 "STATISTICAL TEST!\n"); 1204 "STATISTICAL TEST!\n");
1205 continue; 1205 continue;
1206 } 1206 }
1207 memset(&rnd_rt, 0, sizeof(rnd_rt)); 1207 memset(&rnd_rt, 0, sizeof(rnd_rt));
1208 rndpool_add_data(&rnd_pool, rnd_testbits, sizeof(rnd_testbits), 1208 rndpool_add_data(&rnd_pool, rnd_testbits, sizeof(rnd_testbits),
1209 entropy_count); 1209 entropy_count);
1210 memset(rnd_testbits, 0, sizeof(rnd_testbits)); 1210 memset(rnd_testbits, 0, sizeof(rnd_testbits));
1211 rnd_printf_verbose("rnd: statistical RNG test done," 1211 rnd_printf_verbose("rnd: statistical RNG test done,"
1212 " entropy = %d.\n", 1212 " entropy = %d.\n",
1213 rndpool_get_entropy_count(&rnd_pool)); 1213 rndpool_get_entropy_count(&rnd_pool));
1214 rnd_tested++; 1214 rnd_tested++;
1215 } 1215 }
1216#endif 1216#endif
1217 entropy_count = rndpool_get_entropy_count(&rnd_pool); 1217 entropy_count = rndpool_get_entropy_count(&rnd_pool);
1218 if (entropy_count < (RND_ENTROPY_THRESHOLD * 2 + len) * NBBY) { 1218 if (entropy_count < (RND_ENTROPY_THRESHOLD * 2 + len) * NBBY) {
1219 rnd_printf_verbose("rnd: empty, asking for %d bytes\n", 1219 rnd_printf_verbose("rnd: empty, asking for %d bytes\n",
1220 (int)(howmany((RND_POOLBITS - entropy_count), NBBY))); 1220 (int)(howmany((RND_POOLBITS - entropy_count), NBBY)));
1221 rnd_getmore(howmany((RND_POOLBITS - entropy_count), NBBY)); 1221 rnd_getmore(howmany((RND_POOLBITS - entropy_count), NBBY));
1222 } 1222 }
1223 retval = rndpool_extract_data(&rnd_pool, p, len, flags); 1223 retval = rndpool_extract_data(&rnd_pool, p, len, flags);
1224 mutex_spin_exit(&rndpool_mtx); 1224 mutex_spin_exit(&rndpool_mtx);
1225 1225
1226 return retval; 1226 return retval;
1227} 1227}
1228 1228
1229/* 1229/*
1230 * Fill the buffer with as much entropy as we can. Return true if it 1230 * Fill the buffer with as much entropy as we can. Return true if it
1231 * has full entropy and false if not. 1231 * has full entropy and false if not.
1232 */ 1232 */
1233bool 1233bool
1234rnd_extract(void *buffer, size_t bytes) 1234rnd_extract(void *buffer, size_t bytes)
1235{ 1235{
1236 const size_t extracted = rnd_extract_data(buffer, bytes, 1236 const size_t extracted = rnd_extract_data(buffer, bytes,
1237 RND_EXTRACT_GOOD); 1237 RND_EXTRACT_GOOD);
1238 1238
1239 if (extracted < bytes) { 1239 if (extracted < bytes) {
1240 (void)rnd_extract_data((uint8_t *)buffer + extracted, 1240 (void)rnd_extract_data((uint8_t *)buffer + extracted,
1241 bytes - extracted, RND_EXTRACT_ANY); 1241 bytes - extracted, RND_EXTRACT_ANY);
1242 mutex_spin_enter(&rndpool_mtx); 1242 mutex_spin_enter(&rndpool_mtx);
1243 rnd_getmore(bytes - extracted); 1243 rnd_getmore(bytes - extracted);
1244 mutex_spin_exit(&rndpool_mtx); 1244 mutex_spin_exit(&rndpool_mtx);
1245 return false; 1245 return false;
1246 } 1246 }
1247 1247
1248 return true; 1248 return true;
1249} 1249}
1250 1250
1251/* 1251/*
1252 * If we have as much entropy as is requested, fill the buffer with it 1252 * If we have as much entropy as is requested, fill the buffer with it
1253 * and return true. Otherwise, leave the buffer alone and return 1253 * and return true. Otherwise, leave the buffer alone and return
1254 * false. 1254 * false.
1255 */ 1255 */
1256 1256
1257CTASSERT(RND_ENTROPY_THRESHOLD <= 0xffffffffUL); 1257CTASSERT(RND_ENTROPY_THRESHOLD <= 0xffffffffUL);
1258CTASSERT(RNDSINK_MAX_BYTES <= (0xffffffffUL - RND_ENTROPY_THRESHOLD)); 1258CTASSERT(RNDSINK_MAX_BYTES <= (0xffffffffUL - RND_ENTROPY_THRESHOLD));
1259CTASSERT((RNDSINK_MAX_BYTES + RND_ENTROPY_THRESHOLD) <= 1259CTASSERT((RNDSINK_MAX_BYTES + RND_ENTROPY_THRESHOLD) <=
1260 (0xffffffffUL / NBBY)); 1260 (0xffffffffUL / NBBY));
1261 1261
1262bool 1262bool
1263rnd_tryextract(void *buffer, size_t bytes) 1263rnd_tryextract(void *buffer, size_t bytes)
1264{ 1264{
1265 bool ok; 1265 bool ok;
1266 1266
1267 KASSERT(bytes <= RNDSINK_MAX_BYTES); 1267 KASSERT(bytes <= RNDSINK_MAX_BYTES);
1268 1268
1269 const uint32_t bits_needed = ((bytes + RND_ENTROPY_THRESHOLD) * NBBY); 1269 const uint32_t bits_needed = ((bytes + RND_ENTROPY_THRESHOLD) * NBBY);
1270 1270
1271 mutex_spin_enter(&rndpool_mtx); 1271 mutex_spin_enter(&rndpool_mtx);
1272 if (bits_needed <= rndpool_get_entropy_count(&rnd_pool)) { 1272 if (bits_needed <= rndpool_get_entropy_count(&rnd_pool)) {
1273 const uint32_t extracted __diagused = 1273 const uint32_t extracted __diagused =
1274 rndpool_extract_data(&rnd_pool, buffer, bytes, 1274 rndpool_extract_data(&rnd_pool, buffer, bytes,
1275 RND_EXTRACT_GOOD); 1275 RND_EXTRACT_GOOD);
1276 1276
1277 KASSERT(extracted == bytes); 1277 KASSERT(extracted == bytes);
1278 1278
1279 ok = true; 1279 ok = true;
1280 } else { 1280 } else {
1281 ok = false; 1281 ok = false;
1282 rnd_getmore(howmany(bits_needed - 1282 rnd_getmore(howmany(bits_needed -
1283 rndpool_get_entropy_count(&rnd_pool), NBBY)); 1283 rndpool_get_entropy_count(&rnd_pool), NBBY));
1284 } 1284 }
1285 mutex_spin_exit(&rndpool_mtx); 1285 mutex_spin_exit(&rndpool_mtx);
1286 1286
1287 return ok; 1287 return ok;
1288} 1288}
1289 1289
1290void 1290void
1291rnd_seed(void *base, size_t len) 1291rnd_seed(void *base, size_t len)
1292{ 1292{
1293 SHA1_CTX s; 1293 SHA1_CTX s;
1294 uint8_t digest[SHA1_DIGEST_LENGTH]; 1294 uint8_t digest[SHA1_DIGEST_LENGTH];
1295 1295
1296 if (len != sizeof(*boot_rsp)) { 1296 if (len != sizeof(*boot_rsp)) {
1297 rnd_printf("rnd: bad seed length %d\n", (int)len); 1297 rnd_printf("rnd: bad seed length %d\n", (int)len);
1298 return; 1298 return;
1299 } 1299 }
1300 1300
1301 boot_rsp = (rndsave_t *)base; 1301 boot_rsp = (rndsave_t *)base;
1302 SHA1Init(&s); 1302 SHA1Init(&s);
1303 SHA1Update(&s, (uint8_t *)&boot_rsp->entropy, 1303 SHA1Update(&s, (uint8_t *)&boot_rsp->entropy,
1304 sizeof(boot_rsp->entropy)); 1304 sizeof(boot_rsp->entropy));
1305 SHA1Update(&s, boot_rsp->data, sizeof(boot_rsp->data)); 1305 SHA1Update(&s, boot_rsp->data, sizeof(boot_rsp->data));
1306 SHA1Final(digest, &s); 1306 SHA1Final(digest, &s);
1307 1307
1308 if (memcmp(digest, boot_rsp->digest, sizeof(digest))) { 1308 if (memcmp(digest, boot_rsp->digest, sizeof(digest))) {
1309 rnd_printf("rnd: bad seed checksum\n"); 1309 rnd_printf("rnd: bad seed checksum\n");
1310 return; 1310 return;
1311 } 1311 }
1312 1312
1313 /* 1313 /*
1314 * It's not really well-defined whether bootloader-supplied 1314 * It's not really well-defined whether bootloader-supplied
1315 * modules run before or after rnd_init(). Handle both cases. 1315 * modules run before or after rnd_init(). Handle both cases.
1316 */ 1316 */
1317 if (rnd_ready) { 1317 if (rnd_ready) {
1318 rnd_printf_verbose("rnd: ready," 1318 rnd_printf_verbose("rnd: ready,"
1319 " feeding in seed data directly.\n"); 1319 " feeding in seed data directly.\n");
1320 mutex_spin_enter(&rndpool_mtx); 1320 mutex_spin_enter(&rndpool_mtx);
1321 rndpool_add_data(&rnd_pool, boot_rsp->data, 1321 rndpool_add_data(&rnd_pool, boot_rsp->data,
1322 sizeof(boot_rsp->data), 1322 sizeof(boot_rsp->data),
1323 MIN(boot_rsp->entropy, RND_POOLBITS / 2)); 1323 MIN(boot_rsp->entropy, RND_POOLBITS / 2));
1324 memset(boot_rsp, 0, sizeof(*boot_rsp)); 1324 memset(boot_rsp, 0, sizeof(*boot_rsp));
1325 mutex_spin_exit(&rndpool_mtx); 1325 mutex_spin_exit(&rndpool_mtx);
1326 } else { 1326 } else {
1327 rnd_printf_verbose("rnd: not ready, deferring seed feed.\n"); 1327 rnd_printf_verbose("rnd: not ready, deferring seed feed.\n");
1328 } 1328 }
1329} 1329}