Thu Jan 24 14:23:45 2013 UTC ()
Assert equality, not assignment, in rnd_hwrng_test.

Not tested, but by inspection, the only caller, rnd_process_events,
clearly guarantees the condition.


(riastradh)
diff -r1.7 -r1.8 src/sys/kern/kern_rndq.c

cvs diff -r1.7 -r1.8 src/sys/kern/Attic/kern_rndq.c (switch to unified diff)

--- src/sys/kern/Attic/kern_rndq.c 2013/01/16 06:45:24 1.7
+++ src/sys/kern/Attic/kern_rndq.c 2013/01/24 14:23:45 1.8
@@ -1,1085 +1,1085 @@ @@ -1,1085 +1,1085 @@
1/* $NetBSD: kern_rndq.c,v 1.7 2013/01/16 06:45:24 msaitoh Exp $ */ 1/* $NetBSD: kern_rndq.c,v 1.8 2013/01/24 14:23:45 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1997-2011 The NetBSD Foundation, Inc. 4 * Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Michael Graff <explorer@flame.org> and Thor Lancelot Simon. 8 * by Michael Graff <explorer@flame.org> and Thor Lancelot Simon.
9 * This code uses ideas and algorithms from the Linux driver written by 9 * This code uses ideas and algorithms from the Linux driver written by
10 * Ted Ts'o. 10 * Ted Ts'o.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
15 * 1. Redistributions of source code must retain the above copyright 15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer. 16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright 17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the 18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution. 19 * documentation and/or other materials provided with the distribution.
20 * 20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE. 31 * POSSIBILITY OF SUCH DAMAGE.
32 */ 32 */
33 33
34#include <sys/cdefs.h> 34#include <sys/cdefs.h>
35__KERNEL_RCSID(0, "$NetBSD: kern_rndq.c,v 1.7 2013/01/16 06:45:24 msaitoh Exp $"); 35__KERNEL_RCSID(0, "$NetBSD: kern_rndq.c,v 1.8 2013/01/24 14:23:45 riastradh Exp $");
36 36
37#include <sys/param.h> 37#include <sys/param.h>
38#include <sys/ioctl.h> 38#include <sys/ioctl.h>
39#include <sys/fcntl.h> 39#include <sys/fcntl.h>
40#include <sys/select.h> 40#include <sys/select.h>
41#include <sys/poll.h> 41#include <sys/poll.h>
42#include <sys/kmem.h> 42#include <sys/kmem.h>
43#include <sys/mutex.h> 43#include <sys/mutex.h>
44#include <sys/proc.h> 44#include <sys/proc.h>
45#include <sys/kernel.h> 45#include <sys/kernel.h>
46#include <sys/conf.h> 46#include <sys/conf.h>
47#include <sys/systm.h> 47#include <sys/systm.h>
48#include <sys/callout.h> 48#include <sys/callout.h>
49#include <sys/rnd.h> 49#include <sys/rnd.h>
50#include <sys/vnode.h> 50#include <sys/vnode.h>
51#include <sys/pool.h> 51#include <sys/pool.h>
52#include <sys/kauth.h> 52#include <sys/kauth.h>
53#include <sys/once.h> 53#include <sys/once.h>
54#include <sys/rngtest.h> 54#include <sys/rngtest.h>
55#include <sys/cpu.h> /* XXX temporary, see rnd_detach_source */ 55#include <sys/cpu.h> /* XXX temporary, see rnd_detach_source */
56 56
57#include <dev/rnd_private.h> 57#include <dev/rnd_private.h>
58 58
59#if defined(__HAVE_CPU_COUNTER) && !defined(_RUMPKERNEL) /* XXX: bad pooka */ 59#if defined(__HAVE_CPU_COUNTER) && !defined(_RUMPKERNEL) /* XXX: bad pooka */
60#include <machine/cpu_counter.h> 60#include <machine/cpu_counter.h>
61#endif 61#endif
62 62
63#ifdef RND_DEBUG 63#ifdef RND_DEBUG
64#define DPRINTF(l,x) if (rnd_debug & (l)) printf x 64#define DPRINTF(l,x) if (rnd_debug & (l)) printf x
65int rnd_debug = 0; 65int rnd_debug = 0;
66#else 66#else
67#define DPRINTF(l,x) 67#define DPRINTF(l,x)
68#endif 68#endif
69 69
70#define RND_DEBUG_WRITE 0x0001 70#define RND_DEBUG_WRITE 0x0001
71#define RND_DEBUG_READ 0x0002 71#define RND_DEBUG_READ 0x0002
72#define RND_DEBUG_IOCTL 0x0004 72#define RND_DEBUG_IOCTL 0x0004
73#define RND_DEBUG_SNOOZE 0x0008 73#define RND_DEBUG_SNOOZE 0x0008
74 74
75/* 75/*
76 * list devices attached 76 * list devices attached
77 */ 77 */
78#if 0 78#if 0
79#define RND_VERBOSE 79#define RND_VERBOSE
80#endif 80#endif
81 81
82/* 82/*
83 * The size of a temporary buffer, kmem_alloc()ed when needed, and used for 83 * The size of a temporary buffer, kmem_alloc()ed when needed, and used for
84 * reading and writing data. 84 * reading and writing data.
85 */ 85 */
86#define RND_TEMP_BUFFER_SIZE 128 86#define RND_TEMP_BUFFER_SIZE 128
87 87
88/* 88/*
89 * This is a little bit of state information attached to each device that we 89 * This is a little bit of state information attached to each device that we
90 * collect entropy from. This is simply a collection buffer, and when it 90 * collect entropy from. This is simply a collection buffer, and when it
91 * is full it will be "detached" from the source and added to the entropy 91 * is full it will be "detached" from the source and added to the entropy
92 * pool after entropy is distilled as much as possible. 92 * pool after entropy is distilled as much as possible.
93 */ 93 */
94#define RND_SAMPLE_COUNT 64 /* collect N samples, then compress */ 94#define RND_SAMPLE_COUNT 64 /* collect N samples, then compress */
95typedef struct _rnd_sample_t { 95typedef struct _rnd_sample_t {
96 SIMPLEQ_ENTRY(_rnd_sample_t) next; 96 SIMPLEQ_ENTRY(_rnd_sample_t) next;
97 krndsource_t *source; 97 krndsource_t *source;
98 int cursor; 98 int cursor;
99 int entropy; 99 int entropy;
100 u_int32_t ts[RND_SAMPLE_COUNT]; 100 u_int32_t ts[RND_SAMPLE_COUNT];
101 u_int32_t values[RND_SAMPLE_COUNT]; 101 u_int32_t values[RND_SAMPLE_COUNT];
102} rnd_sample_t; 102} rnd_sample_t;
103 103
104/* 104/*
105 * The event queue. Fields are altered at an interrupt level. 105 * The event queue. Fields are altered at an interrupt level.
106 * All accesses must be protected with the mutex. 106 * All accesses must be protected with the mutex.
107 */ 107 */
108volatile int rnd_timeout_pending; 108volatile int rnd_timeout_pending;
109SIMPLEQ_HEAD(, _rnd_sample_t) rnd_samples; 109SIMPLEQ_HEAD(, _rnd_sample_t) rnd_samples;
110kmutex_t rnd_mtx; 110kmutex_t rnd_mtx;
111 111
112 112
113/* 113/*
114 * Entropy sinks: usually other generators waiting to be rekeyed. 114 * Entropy sinks: usually other generators waiting to be rekeyed.
115 * 115 *
116 * A sink's callback MUST NOT re-add the sink to the list, or 116 * A sink's callback MUST NOT re-add the sink to the list, or
117 * list corruption will occur. The list is protected by the 117 * list corruption will occur. The list is protected by the
118 * rndsink_mtx, which must be released before calling any sink's 118 * rndsink_mtx, which must be released before calling any sink's
119 * callback. 119 * callback.
120 */ 120 */
121TAILQ_HEAD(, rndsink) rnd_sinks; 121TAILQ_HEAD(, rndsink) rnd_sinks;
122kmutex_t rndsink_mtx; 122kmutex_t rndsink_mtx;
123 123
124/* 124/*
125 * Memory pool for sample buffers 125 * Memory pool for sample buffers
126 */ 126 */
127static pool_cache_t rnd_mempc; 127static pool_cache_t rnd_mempc;
128 128
129/* 129/*
130 * Our random pool. This is defined here rather than using the general 130 * Our random pool. This is defined here rather than using the general
131 * purpose one defined in rndpool.c. 131 * purpose one defined in rndpool.c.
132 * 132 *
133 * Samples are collected and queued into a separate mutex-protected queue 133 * Samples are collected and queued into a separate mutex-protected queue
134 * (rnd_samples, see above), and processed in a timeout routine; therefore, 134 * (rnd_samples, see above), and processed in a timeout routine; therefore,
135 * the mutex protecting the random pool is at IPL_SOFTCLOCK() as well. 135 * the mutex protecting the random pool is at IPL_SOFTCLOCK() as well.
136 */ 136 */
137rndpool_t rnd_pool; 137rndpool_t rnd_pool;
138kmutex_t rndpool_mtx; 138kmutex_t rndpool_mtx;
139kcondvar_t rndpool_cv; 139kcondvar_t rndpool_cv;
140 140
141/* 141/*
142 * This source is used to easily "remove" queue entries when the source 142 * This source is used to easily "remove" queue entries when the source
143 * which actually generated the events is going away. 143 * which actually generated the events is going away.
144 */ 144 */
145static krndsource_t rnd_source_no_collect = { 145static krndsource_t rnd_source_no_collect = {
146 /* LIST_ENTRY list */ 146 /* LIST_ENTRY list */
147 .name = { 'N', 'o', 'C', 'o', 'l', 'l', 'e', 'c', 't', 147 .name = { 'N', 'o', 'C', 'o', 'l', 'l', 'e', 'c', 't',
148 0, 0, 0, 0, 0, 0, 0 }, 148 0, 0, 0, 0, 0, 0, 0 },
149 .last_time = 0, .last_delta = 0, .last_delta2 = 0, .total = 0, 149 .last_time = 0, .last_delta = 0, .last_delta2 = 0, .total = 0,
150 .type = RND_TYPE_UNKNOWN, 150 .type = RND_TYPE_UNKNOWN,
151 .flags = (RND_FLAG_NO_COLLECT | 151 .flags = (RND_FLAG_NO_COLLECT |
152 RND_FLAG_NO_ESTIMATE | 152 RND_FLAG_NO_ESTIMATE |
153 RND_TYPE_UNKNOWN), 153 RND_TYPE_UNKNOWN),
154 .state = NULL, 154 .state = NULL,
155 .test_cnt = 0, 155 .test_cnt = 0,
156 .test = NULL 156 .test = NULL
157}; 157};
158 158
159struct callout rnd_callout, skew_callout; 159struct callout rnd_callout, skew_callout;
160 160
161void rnd_wakeup_readers(void); 161void rnd_wakeup_readers(void);
162static inline u_int32_t rnd_estimate_entropy(krndsource_t *, u_int32_t); 162static inline u_int32_t rnd_estimate_entropy(krndsource_t *, u_int32_t);
163static inline u_int32_t rnd_counter(void); 163static inline u_int32_t rnd_counter(void);
164static void rnd_timeout(void *); 164static void rnd_timeout(void *);
165static void rnd_process_events(void *); 165static void rnd_process_events(void *);
166u_int32_t rnd_extract_data_locked(void *, u_int32_t, u_int32_t); /* XXX */ 166u_int32_t rnd_extract_data_locked(void *, u_int32_t, u_int32_t); /* XXX */
167static void rnd_add_data_ts(krndsource_t *, const void *const, 167static void rnd_add_data_ts(krndsource_t *, const void *const,
168 uint32_t, uint32_t, uint32_t); 168 uint32_t, uint32_t, uint32_t);
169 169
170int rnd_ready = 0; 170int rnd_ready = 0;
171int rnd_initial_entropy = 0; 171int rnd_initial_entropy = 0;
172 172
173#ifdef DIAGNOSTIC 173#ifdef DIAGNOSTIC
174static int rnd_tested = 0; 174static int rnd_tested = 0;
175static rngtest_t rnd_rt; 175static rngtest_t rnd_rt;
176static uint8_t rnd_testbits[sizeof(rnd_rt.rt_b)]; 176static uint8_t rnd_testbits[sizeof(rnd_rt.rt_b)];
177#endif 177#endif
178 178
179LIST_HEAD(, krndsource) rnd_sources; 179LIST_HEAD(, krndsource) rnd_sources;
180 180
181rndsave_t *boot_rsp; 181rndsave_t *boot_rsp;
182 182
183/* 183/*
184 * Generate a 32-bit counter. This should be more machine dependent, 184 * Generate a 32-bit counter. This should be more machine dependent,
185 * using cycle counters and the like when possible. 185 * using cycle counters and the like when possible.
186 */ 186 */
187static inline u_int32_t 187static inline u_int32_t
188rnd_counter(void) 188rnd_counter(void)
189{ 189{
190 struct timeval tv; 190 struct timeval tv;
191 191
192#if defined(__HAVE_CPU_COUNTER) && !defined(_RUMPKERNEL) /* XXX: bad pooka */ 192#if defined(__HAVE_CPU_COUNTER) && !defined(_RUMPKERNEL) /* XXX: bad pooka */
193 if (cpu_hascounter()) 193 if (cpu_hascounter())
194 return (cpu_counter32()); 194 return (cpu_counter32());
195#endif 195#endif
196 if (rnd_ready) { 196 if (rnd_ready) {
197 microtime(&tv); 197 microtime(&tv);
198 return (tv.tv_sec * 1000000 + tv.tv_usec); 198 return (tv.tv_sec * 1000000 + tv.tv_usec);
199 } 199 }
200 /* when called from rnd_init, its too early to call microtime safely */ 200 /* when called from rnd_init, its too early to call microtime safely */
201 return (0); 201 return (0);
202} 202}
203 203
204/* 204/*
205 * Check to see if there are readers waiting on us. If so, kick them. 205 * Check to see if there are readers waiting on us. If so, kick them.
206 */ 206 */
207void 207void
208rnd_wakeup_readers(void) 208rnd_wakeup_readers(void)
209{ 209{
210 rndsink_t *sink, *tsink; 210 rndsink_t *sink, *tsink;
211 TAILQ_HEAD(, rndsink) sunk = TAILQ_HEAD_INITIALIZER(sunk); 211 TAILQ_HEAD(, rndsink) sunk = TAILQ_HEAD_INITIALIZER(sunk);
212 212
213 mutex_spin_enter(&rndpool_mtx); 213 mutex_spin_enter(&rndpool_mtx);
214 if (rndpool_get_entropy_count(&rnd_pool) < RND_ENTROPY_THRESHOLD * 8) { 214 if (rndpool_get_entropy_count(&rnd_pool) < RND_ENTROPY_THRESHOLD * 8) {
215 mutex_spin_exit(&rndpool_mtx); 215 mutex_spin_exit(&rndpool_mtx);
216 return; 216 return;
217 } 217 }
218 218
219 /* 219 /*
220 * First, take care of in-kernel consumers needing rekeying. 220 * First, take care of in-kernel consumers needing rekeying.
221 */ 221 */
222 mutex_spin_enter(&rndsink_mtx); 222 mutex_spin_enter(&rndsink_mtx);
223 TAILQ_FOREACH_SAFE(sink, &rnd_sinks, tailq, tsink) { 223 TAILQ_FOREACH_SAFE(sink, &rnd_sinks, tailq, tsink) {
224 if (!mutex_tryenter(&sink->mtx)) { 224 if (!mutex_tryenter(&sink->mtx)) {
225#ifdef RND_VERBOSE 225#ifdef RND_VERBOSE
226 printf("rnd_wakeup_readers: " 226 printf("rnd_wakeup_readers: "
227 "skipping busy rndsink\n"); 227 "skipping busy rndsink\n");
228#endif 228#endif
229 continue; 229 continue;
230 } 230 }
231 231
232 KASSERT(RSTATE_PENDING == sink->state); 232 KASSERT(RSTATE_PENDING == sink->state);
233  233
234 if ((sink->len + RND_ENTROPY_THRESHOLD) * 8 < 234 if ((sink->len + RND_ENTROPY_THRESHOLD) * 8 <
235 rndpool_get_entropy_count(&rnd_pool)) { 235 rndpool_get_entropy_count(&rnd_pool)) {
236 /* We have enough entropy to sink some here. */ 236 /* We have enough entropy to sink some here. */
237 if (rndpool_extract_data(&rnd_pool, sink->data, 237 if (rndpool_extract_data(&rnd_pool, sink->data,
238 sink->len, RND_EXTRACT_GOOD) 238 sink->len, RND_EXTRACT_GOOD)
239 != sink->len) { 239 != sink->len) {
240 panic("could not extract estimated " 240 panic("could not extract estimated "
241 "entropy from pool"); 241 "entropy from pool");
242 } 242 }
243 sink->state = RSTATE_HASBITS; 243 sink->state = RSTATE_HASBITS;
244 /* Move this sink to the list of pending callbacks */ 244 /* Move this sink to the list of pending callbacks */
245 TAILQ_REMOVE(&rnd_sinks, sink, tailq); 245 TAILQ_REMOVE(&rnd_sinks, sink, tailq);
246 TAILQ_INSERT_HEAD(&sunk, sink, tailq); 246 TAILQ_INSERT_HEAD(&sunk, sink, tailq);
247 } else { 247 } else {
248 mutex_exit(&sink->mtx); 248 mutex_exit(&sink->mtx);
249 } 249 }
250 } 250 }
251 mutex_spin_exit(&rndsink_mtx); 251 mutex_spin_exit(&rndsink_mtx);
252  252
253 /* 253 /*
254 * If we still have enough new bits to do something, feed userspace. 254 * If we still have enough new bits to do something, feed userspace.
255 */ 255 */
256 if (rndpool_get_entropy_count(&rnd_pool) > RND_ENTROPY_THRESHOLD * 8) { 256 if (rndpool_get_entropy_count(&rnd_pool) > RND_ENTROPY_THRESHOLD * 8) {
257#ifdef RND_VERBOSE 257#ifdef RND_VERBOSE
258 if (!rnd_initial_entropy) 258 if (!rnd_initial_entropy)
259 printf("rnd: have initial entropy (%u)\n", 259 printf("rnd: have initial entropy (%u)\n",
260 rndpool_get_entropy_count(&rnd_pool)); 260 rndpool_get_entropy_count(&rnd_pool));
261#endif 261#endif
262 rnd_initial_entropy = 1; 262 rnd_initial_entropy = 1;
263 mutex_spin_exit(&rndpool_mtx); 263 mutex_spin_exit(&rndpool_mtx);
264 } else { 264 } else {
265 mutex_spin_exit(&rndpool_mtx); 265 mutex_spin_exit(&rndpool_mtx);
266 } 266 }
267 267
268 /* 268 /*
269 * Now that we have dropped the mutex, we can run sinks' callbacks. 269 * Now that we have dropped the mutex, we can run sinks' callbacks.
270 * Since we have reused the "tailq" member of the sink structure for 270 * Since we have reused the "tailq" member of the sink structure for
271 * this temporary on-stack queue, the callback must NEVER re-add 271 * this temporary on-stack queue, the callback must NEVER re-add
272 * the sink to the main queue, or our on-stack queue will become 272 * the sink to the main queue, or our on-stack queue will become
273 * corrupt. 273 * corrupt.
274 */ 274 */
275 while ((sink = TAILQ_FIRST(&sunk))) { 275 while ((sink = TAILQ_FIRST(&sunk))) {
276#ifdef RND_VERBOSE 276#ifdef RND_VERBOSE
277 printf("supplying %d bytes to entropy sink \"%s\"" 277 printf("supplying %d bytes to entropy sink \"%s\""
278 " (cb %p, arg %p).\n", 278 " (cb %p, arg %p).\n",
279 (int)sink->len, sink->name, sink->cb, sink->arg); 279 (int)sink->len, sink->name, sink->cb, sink->arg);
280#endif 280#endif
281 sink->state = RSTATE_HASBITS; 281 sink->state = RSTATE_HASBITS;
282 sink->cb(sink->arg); 282 sink->cb(sink->arg);
283 TAILQ_REMOVE(&sunk, sink, tailq); 283 TAILQ_REMOVE(&sunk, sink, tailq);
284 mutex_spin_exit(&sink->mtx); 284 mutex_spin_exit(&sink->mtx);
285 } 285 }
286} 286}
287 287
288/* 288/*
289 * Use the timing of the event to estimate the entropy gathered. 289 * Use the timing of the event to estimate the entropy gathered.
290 * If all the differentials (first, second, and third) are non-zero, return 290 * If all the differentials (first, second, and third) are non-zero, return
291 * non-zero. If any of these are zero, return zero. 291 * non-zero. If any of these are zero, return zero.
292 */ 292 */
293static inline u_int32_t 293static inline u_int32_t
294rnd_estimate_entropy(krndsource_t *rs, u_int32_t t) 294rnd_estimate_entropy(krndsource_t *rs, u_int32_t t)
295{ 295{
296 int32_t delta, delta2, delta3; 296 int32_t delta, delta2, delta3;
297 297
298 /* 298 /*
299 * If the time counter has overflowed, calculate the real difference. 299 * If the time counter has overflowed, calculate the real difference.
300 * If it has not, it is simplier. 300 * If it has not, it is simplier.
301 */ 301 */
302 if (t < rs->last_time) 302 if (t < rs->last_time)
303 delta = UINT_MAX - rs->last_time + t; 303 delta = UINT_MAX - rs->last_time + t;
304 else 304 else
305 delta = rs->last_time - t; 305 delta = rs->last_time - t;
306 306
307 if (delta < 0) 307 if (delta < 0)
308 delta = -delta; 308 delta = -delta;
309 309
310 /* 310 /*
311 * Calculate the second and third order differentials 311 * Calculate the second and third order differentials
312 */ 312 */
313 delta2 = rs->last_delta - delta; 313 delta2 = rs->last_delta - delta;
314 if (delta2 < 0) 314 if (delta2 < 0)
315 delta2 = -delta2; 315 delta2 = -delta2;
316 316
317 delta3 = rs->last_delta2 - delta2; 317 delta3 = rs->last_delta2 - delta2;
318 if (delta3 < 0) 318 if (delta3 < 0)
319 delta3 = -delta3; 319 delta3 = -delta3;
320 320
321 rs->last_time = t; 321 rs->last_time = t;
322 rs->last_delta = delta; 322 rs->last_delta = delta;
323 rs->last_delta2 = delta2; 323 rs->last_delta2 = delta2;
324 324
325 /* 325 /*
326 * If any delta is 0, we got no entropy. If all are non-zero, we 326 * If any delta is 0, we got no entropy. If all are non-zero, we
327 * might have something. 327 * might have something.
328 */ 328 */
329 if (delta == 0 || delta2 == 0 || delta3 == 0) 329 if (delta == 0 || delta2 == 0 || delta3 == 0)
330 return (0); 330 return (0);
331 331
332 return (1); 332 return (1);
333} 333}
334 334
335#if defined(__HAVE_CPU_COUNTER) && !defined(_RUMPKERNEL) 335#if defined(__HAVE_CPU_COUNTER) && !defined(_RUMPKERNEL)
336static void 336static void
337rnd_skew(void *arg) 337rnd_skew(void *arg)
338{ 338{
339 static krndsource_t skewsrc; 339 static krndsource_t skewsrc;
340 static int live, flipflop; 340 static int live, flipflop;
341 341
342 /* 342 /*
343 * Only one instance of this callout will ever be scheduled 343 * Only one instance of this callout will ever be scheduled
344 * at a time (it is only ever scheduled by itself). So no 344 * at a time (it is only ever scheduled by itself). So no
345 * locking is required here. 345 * locking is required here.
346 */ 346 */
347 347
348 /* 348 /*
349 * Even on systems with seemingly stable clocks, the 349 * Even on systems with seemingly stable clocks, the
350 * entropy estimator seems to think we get 1 bit here 350 * entropy estimator seems to think we get 1 bit here
351 * about every 2 calls. That seems like too much. Set 351 * about every 2 calls. That seems like too much. Set
352 * NO_ESTIMATE on this source until we can better analyze 352 * NO_ESTIMATE on this source until we can better analyze
353 * the entropy of its output. 353 * the entropy of its output.
354 */ 354 */
355 if (__predict_false(!live)) { 355 if (__predict_false(!live)) {
356 rnd_attach_source(&skewsrc, "callout", RND_TYPE_SKEW, 356 rnd_attach_source(&skewsrc, "callout", RND_TYPE_SKEW,
357 RND_FLAG_NO_ESTIMATE); 357 RND_FLAG_NO_ESTIMATE);
358 live = 1; 358 live = 1;
359 } 359 }
360 360
361 flipflop = !flipflop; 361 flipflop = !flipflop;
362 362
363 if (flipflop) { 363 if (flipflop) {
364 rnd_add_uint32(&skewsrc, rnd_counter()); 364 rnd_add_uint32(&skewsrc, rnd_counter());
365 callout_schedule(&skew_callout, hz); 365 callout_schedule(&skew_callout, hz);
366 } else { 366 } else {
367 callout_schedule(&skew_callout, 1); 367 callout_schedule(&skew_callout, 1);
368 } 368 }
369} 369}
370#endif 370#endif
371 371
372/* 372/*
373 * initialize the global random pool for our use. 373 * initialize the global random pool for our use.
374 * rnd_init() must be called very early on in the boot process, so 374 * rnd_init() must be called very early on in the boot process, so
375 * the pool is ready for other devices to attach as sources. 375 * the pool is ready for other devices to attach as sources.
376 */ 376 */
377void 377void
378rnd_init(void) 378rnd_init(void)
379{ 379{
380 u_int32_t c; 380 u_int32_t c;
381 381
382 if (rnd_ready) 382 if (rnd_ready)
383 return; 383 return;
384 384
385 mutex_init(&rnd_mtx, MUTEX_DEFAULT, IPL_VM); 385 mutex_init(&rnd_mtx, MUTEX_DEFAULT, IPL_VM);
386 mutex_init(&rndsink_mtx, MUTEX_DEFAULT, IPL_VM); 386 mutex_init(&rndsink_mtx, MUTEX_DEFAULT, IPL_VM);
387 387
388 callout_init(&rnd_callout, CALLOUT_MPSAFE); 388 callout_init(&rnd_callout, CALLOUT_MPSAFE);
389 callout_setfunc(&rnd_callout, rnd_timeout, NULL); 389 callout_setfunc(&rnd_callout, rnd_timeout, NULL);
390 390
391 /* 391 /*
392 * take a counter early, hoping that there's some variance in 392 * take a counter early, hoping that there's some variance in
393 * the following operations 393 * the following operations
394 */ 394 */
395 c = rnd_counter(); 395 c = rnd_counter();
396 396
397 LIST_INIT(&rnd_sources); 397 LIST_INIT(&rnd_sources);
398 SIMPLEQ_INIT(&rnd_samples); 398 SIMPLEQ_INIT(&rnd_samples);
399 TAILQ_INIT(&rnd_sinks); 399 TAILQ_INIT(&rnd_sinks);
400 400
401 rndpool_init(&rnd_pool); 401 rndpool_init(&rnd_pool);
402 mutex_init(&rndpool_mtx, MUTEX_DEFAULT, IPL_VM); 402 mutex_init(&rndpool_mtx, MUTEX_DEFAULT, IPL_VM);
403 cv_init(&rndpool_cv, "rndread"); 403 cv_init(&rndpool_cv, "rndread");
404 404
405 rnd_mempc = pool_cache_init(sizeof(rnd_sample_t), 0, 0, 0, 405 rnd_mempc = pool_cache_init(sizeof(rnd_sample_t), 0, 0, 0,
406 "rndsample", NULL, IPL_VM, 406 "rndsample", NULL, IPL_VM,
407 NULL, NULL, NULL); 407 NULL, NULL, NULL);
408 408
409 /* 409 /*
410 * Set resource limit. The rnd_process_events() function 410 * Set resource limit. The rnd_process_events() function
411 * is called every tick and process the sample queue. 411 * is called every tick and process the sample queue.
412 * Without limitation, if a lot of rnd_add_*() are called, 412 * Without limitation, if a lot of rnd_add_*() are called,
413 * all kernel memory may be eaten up. 413 * all kernel memory may be eaten up.
414 */ 414 */
415 pool_cache_sethardlimit(rnd_mempc, RND_POOLBITS, NULL, 0); 415 pool_cache_sethardlimit(rnd_mempc, RND_POOLBITS, NULL, 0);
416 416
417 /* 417 /*
418 * Mix *something*, *anything* into the pool to help it get started. 418 * Mix *something*, *anything* into the pool to help it get started.
419 * However, it's not safe for rnd_counter() to call microtime() yet, 419 * However, it's not safe for rnd_counter() to call microtime() yet,
420 * so on some platforms we might just end up with zeros anyway. 420 * so on some platforms we might just end up with zeros anyway.
421 * XXX more things to add would be nice. 421 * XXX more things to add would be nice.
422 */ 422 */
423 if (c) { 423 if (c) {
424 mutex_spin_enter(&rndpool_mtx); 424 mutex_spin_enter(&rndpool_mtx);
425 rndpool_add_data(&rnd_pool, &c, sizeof(c), 1); 425 rndpool_add_data(&rnd_pool, &c, sizeof(c), 1);
426 c = rnd_counter(); 426 c = rnd_counter();
427 rndpool_add_data(&rnd_pool, &c, sizeof(c), 1); 427 rndpool_add_data(&rnd_pool, &c, sizeof(c), 1);
428 mutex_spin_exit(&rndpool_mtx); 428 mutex_spin_exit(&rndpool_mtx);
429 } 429 }
430 430
431 rnd_ready = 1; 431 rnd_ready = 1;
432 432
433 /* 433 /*
434 * If we have a cycle counter, take its error with respect 434 * If we have a cycle counter, take its error with respect
435 * to the callout mechanism as a source of entropy, ala 435 * to the callout mechanism as a source of entropy, ala
436 * TrueRand. 436 * TrueRand.
437 * 437 *
438 * XXX This will do little when the cycle counter *is* what's 438 * XXX This will do little when the cycle counter *is* what's
439 * XXX clocking the callout mechanism. How to get this right 439 * XXX clocking the callout mechanism. How to get this right
440 * XXX without unsightly spelunking in the timecounter code? 440 * XXX without unsightly spelunking in the timecounter code?
441 */ 441 */
442#if defined(__HAVE_CPU_COUNTER) && !defined(_RUMPKERNEL) /* XXX: bad pooka */ 442#if defined(__HAVE_CPU_COUNTER) && !defined(_RUMPKERNEL) /* XXX: bad pooka */
443 callout_init(&skew_callout, CALLOUT_MPSAFE); 443 callout_init(&skew_callout, CALLOUT_MPSAFE);
444 callout_setfunc(&skew_callout, rnd_skew, NULL); 444 callout_setfunc(&skew_callout, rnd_skew, NULL);
445 rnd_skew(NULL); 445 rnd_skew(NULL);
446#endif 446#endif
447 447
448#ifdef RND_VERBOSE 448#ifdef RND_VERBOSE
449 printf("rnd: initialised (%u)%s", RND_POOLBITS, 449 printf("rnd: initialised (%u)%s", RND_POOLBITS,
450 c ? " with counter\n" : "\n"); 450 c ? " with counter\n" : "\n");
451#endif 451#endif
452 if (boot_rsp != NULL) { 452 if (boot_rsp != NULL) {
453 mutex_spin_enter(&rndpool_mtx); 453 mutex_spin_enter(&rndpool_mtx);
454 rndpool_add_data(&rnd_pool, boot_rsp->data, 454 rndpool_add_data(&rnd_pool, boot_rsp->data,
455 sizeof(boot_rsp->data), 455 sizeof(boot_rsp->data),
456 MIN(boot_rsp->entropy, 456 MIN(boot_rsp->entropy,
457 RND_POOLBITS / 2)); 457 RND_POOLBITS / 2));
458 if (rndpool_get_entropy_count(&rnd_pool) > 458 if (rndpool_get_entropy_count(&rnd_pool) >
459 RND_ENTROPY_THRESHOLD * 8) { 459 RND_ENTROPY_THRESHOLD * 8) {
460 rnd_initial_entropy = 1; 460 rnd_initial_entropy = 1;
461 } 461 }
462 mutex_spin_exit(&rndpool_mtx); 462 mutex_spin_exit(&rndpool_mtx);
463#ifdef RND_VERBOSE 463#ifdef RND_VERBOSE
464 printf("rnd: seeded with %d bits\n", 464 printf("rnd: seeded with %d bits\n",
465 MIN(boot_rsp->entropy, RND_POOLBITS / 2)); 465 MIN(boot_rsp->entropy, RND_POOLBITS / 2));
466#endif 466#endif
467 memset(boot_rsp, 0, sizeof(*boot_rsp)); 467 memset(boot_rsp, 0, sizeof(*boot_rsp));
468 } 468 }
469} 469}
470 470
471static rnd_sample_t * 471static rnd_sample_t *
472rnd_sample_allocate(krndsource_t *source) 472rnd_sample_allocate(krndsource_t *source)
473{ 473{
474 rnd_sample_t *c; 474 rnd_sample_t *c;
475 475
476 c = pool_cache_get(rnd_mempc, PR_WAITOK); 476 c = pool_cache_get(rnd_mempc, PR_WAITOK);
477 if (c == NULL) 477 if (c == NULL)
478 return (NULL); 478 return (NULL);
479 479
480 c->source = source; 480 c->source = source;
481 c->cursor = 0; 481 c->cursor = 0;
482 c->entropy = 0; 482 c->entropy = 0;
483 483
484 return (c); 484 return (c);
485} 485}
486 486
487/* 487/*
488 * Don't wait on allocation. To be used in an interrupt context. 488 * Don't wait on allocation. To be used in an interrupt context.
489 */ 489 */
490static rnd_sample_t * 490static rnd_sample_t *
491rnd_sample_allocate_isr(krndsource_t *source) 491rnd_sample_allocate_isr(krndsource_t *source)
492{ 492{
493 rnd_sample_t *c; 493 rnd_sample_t *c;
494 494
495 c = pool_cache_get(rnd_mempc, PR_NOWAIT); 495 c = pool_cache_get(rnd_mempc, PR_NOWAIT);
496 if (c == NULL) 496 if (c == NULL)
497 return (NULL); 497 return (NULL);
498 498
499 c->source = source; 499 c->source = source;
500 c->cursor = 0; 500 c->cursor = 0;
501 c->entropy = 0; 501 c->entropy = 0;
502 502
503 return (c); 503 return (c);
504} 504}
505 505
506static void 506static void
507rnd_sample_free(rnd_sample_t *c) 507rnd_sample_free(rnd_sample_t *c)
508{ 508{
509 memset(c, 0, sizeof(*c)); 509 memset(c, 0, sizeof(*c));
510 pool_cache_put(rnd_mempc, c); 510 pool_cache_put(rnd_mempc, c);
511} 511}
512 512
513/* 513/*
514 * Add a source to our list of sources. 514 * Add a source to our list of sources.
515 */ 515 */
516void 516void
517rnd_attach_source(krndsource_t *rs, const char *name, u_int32_t type, 517rnd_attach_source(krndsource_t *rs, const char *name, u_int32_t type,
518 u_int32_t flags) 518 u_int32_t flags)
519{ 519{
520 u_int32_t ts; 520 u_int32_t ts;
521 521
522 ts = rnd_counter(); 522 ts = rnd_counter();
523 523
524 strlcpy(rs->name, name, sizeof(rs->name)); 524 strlcpy(rs->name, name, sizeof(rs->name));
525 rs->last_time = ts; 525 rs->last_time = ts;
526 rs->last_delta = 0; 526 rs->last_delta = 0;
527 rs->last_delta2 = 0; 527 rs->last_delta2 = 0;
528 rs->total = 0; 528 rs->total = 0;
529 529
530 /* 530 /*
531 * Force network devices to not collect any entropy by 531 * Force network devices to not collect any entropy by
532 * default. 532 * default.
533 */ 533 */
534 if (type == RND_TYPE_NET) 534 if (type == RND_TYPE_NET)
535 flags |= (RND_FLAG_NO_COLLECT | RND_FLAG_NO_ESTIMATE); 535 flags |= (RND_FLAG_NO_COLLECT | RND_FLAG_NO_ESTIMATE);
536 536
537 /* 537 /*
538 * Hardware RNGs get extra space for statistical testing. 538 * Hardware RNGs get extra space for statistical testing.
539 */ 539 */
540 if (type == RND_TYPE_RNG) { 540 if (type == RND_TYPE_RNG) {
541 rs->test = kmem_alloc(sizeof(rngtest_t), KM_NOSLEEP); 541 rs->test = kmem_alloc(sizeof(rngtest_t), KM_NOSLEEP);
542 rs->test_cnt = 0; 542 rs->test_cnt = 0;
543 } else { 543 } else {
544 rs->test = NULL; 544 rs->test = NULL;
545 rs->test_cnt = -1; 545 rs->test_cnt = -1;
546 } 546 }
547 547
548 rs->type = type; 548 rs->type = type;
549 rs->flags = flags; 549 rs->flags = flags;
550 550
551 rs->state = rnd_sample_allocate(rs); 551 rs->state = rnd_sample_allocate(rs);
552 552
553 mutex_spin_enter(&rndpool_mtx); 553 mutex_spin_enter(&rndpool_mtx);
554 LIST_INSERT_HEAD(&rnd_sources, rs, list); 554 LIST_INSERT_HEAD(&rnd_sources, rs, list);
555 555
556#ifdef RND_VERBOSE 556#ifdef RND_VERBOSE
557 printf("rnd: %s attached as an entropy source (", rs->name); 557 printf("rnd: %s attached as an entropy source (", rs->name);
558 if (!(flags & RND_FLAG_NO_COLLECT)) { 558 if (!(flags & RND_FLAG_NO_COLLECT)) {
559 printf("collecting"); 559 printf("collecting");
560 if (flags & RND_FLAG_NO_ESTIMATE) 560 if (flags & RND_FLAG_NO_ESTIMATE)
561 printf(" without estimation"); 561 printf(" without estimation");
562 } 562 }
563 else 563 else
564 printf("off"); 564 printf("off");
565 printf(")\n"); 565 printf(")\n");
566#endif 566#endif
567 567
568 /* 568 /*
569 * Again, put some more initial junk in the pool. 569 * Again, put some more initial junk in the pool.
570 * XXX Bogus, but harder to guess than zeros. 570 * XXX Bogus, but harder to guess than zeros.
571 */ 571 */
572 rndpool_add_data(&rnd_pool, &ts, sizeof(u_int32_t), 1); 572 rndpool_add_data(&rnd_pool, &ts, sizeof(u_int32_t), 1);
573 mutex_spin_exit(&rndpool_mtx); 573 mutex_spin_exit(&rndpool_mtx);
574} 574}
575 575
576/* 576/*
577 * Remove a source from our list of sources. 577 * Remove a source from our list of sources.
578 */ 578 */
579void 579void
580rnd_detach_source(krndsource_t *source) 580rnd_detach_source(krndsource_t *source)
581{ 581{
582 rnd_sample_t *sample; 582 rnd_sample_t *sample;
583 583
584 mutex_spin_enter(&rnd_mtx); 584 mutex_spin_enter(&rnd_mtx);
585 585
586 LIST_REMOVE(source, list); 586 LIST_REMOVE(source, list);
587 587
588 /* 588 /*
589 * If there are samples queued up "remove" them from the sample queue 589 * If there are samples queued up "remove" them from the sample queue
590 * by setting the source to the no-collect pseudosource. 590 * by setting the source to the no-collect pseudosource.
591 */ 591 */
592 sample = SIMPLEQ_FIRST(&rnd_samples); 592 sample = SIMPLEQ_FIRST(&rnd_samples);
593 while (sample != NULL) { 593 while (sample != NULL) {
594 if (sample->source == source) 594 if (sample->source == source)
595 sample->source = &rnd_source_no_collect; 595 sample->source = &rnd_source_no_collect;
596 596
597 sample = SIMPLEQ_NEXT(sample, next); 597 sample = SIMPLEQ_NEXT(sample, next);
598 } 598 }
599 599
600 mutex_spin_exit(&rnd_mtx); 600 mutex_spin_exit(&rnd_mtx);
601 601
602 if (!cpu_softintr_p()) { /* XXX XXX very temporary "fix" */ 602 if (!cpu_softintr_p()) { /* XXX XXX very temporary "fix" */
603 if (source->state) { 603 if (source->state) {
604 rnd_sample_free(source->state); 604 rnd_sample_free(source->state);
605 source->state = NULL; 605 source->state = NULL;
606 } 606 }
607 607
608 if (source->test) { 608 if (source->test) {
609 kmem_free(source->test, sizeof(rngtest_t)); 609 kmem_free(source->test, sizeof(rngtest_t));
610 } 610 }
611 } 611 }
612 612
613#ifdef RND_VERBOSE 613#ifdef RND_VERBOSE
614 printf("rnd: %s detached as an entropy source\n", source->name); 614 printf("rnd: %s detached as an entropy source\n", source->name);
615#endif 615#endif
616} 616}
617 617
618/* 618/*
619 * Add a 32-bit value to the entropy pool. The rs parameter should point to 619 * Add a 32-bit value to the entropy pool. The rs parameter should point to
620 * the source-specific source structure. 620 * the source-specific source structure.
621 */ 621 */
622void 622void
623_rnd_add_uint32(krndsource_t *rs, u_int32_t val) 623_rnd_add_uint32(krndsource_t *rs, u_int32_t val)
624{ 624{
625 u_int32_t ts; 625 u_int32_t ts;
626 u_int32_t entropy = 0; 626 u_int32_t entropy = 0;
627 627
628 if (rs->flags & RND_FLAG_NO_COLLECT) 628 if (rs->flags & RND_FLAG_NO_COLLECT)
629 return; 629 return;
630 630
631 /* 631 /*
632 * Sample the counter as soon as possible to avoid 632 * Sample the counter as soon as possible to avoid
633 * entropy overestimation. 633 * entropy overestimation.
634 */ 634 */
635 ts = rnd_counter(); 635 ts = rnd_counter();
636 636
637 /* 637 /*
638 * If we are estimating entropy on this source, 638 * If we are estimating entropy on this source,
639 * calculate differentials. 639 * calculate differentials.
640 */ 640 */
641 641
642 if ((rs->flags & RND_FLAG_NO_ESTIMATE) == 0) { 642 if ((rs->flags & RND_FLAG_NO_ESTIMATE) == 0) {
643 entropy = rnd_estimate_entropy(rs, ts); 643 entropy = rnd_estimate_entropy(rs, ts);
644 } 644 }
645 645
646 rnd_add_data_ts(rs, &val, sizeof(val), entropy, ts); 646 rnd_add_data_ts(rs, &val, sizeof(val), entropy, ts);
647} 647}
648 648
649void 649void
650rnd_add_data(krndsource_t *rs, const void *const data, uint32_t len, 650rnd_add_data(krndsource_t *rs, const void *const data, uint32_t len,
651 uint32_t entropy) 651 uint32_t entropy)
652{ 652{
653 /* 653 /*
654 * This interface is meant for feeding data which is, 654 * This interface is meant for feeding data which is,
655 * itself, random. Don't estimate entropy based on 655 * itself, random. Don't estimate entropy based on
656 * timestamp, just directly add the data. 656 * timestamp, just directly add the data.
657 */ 657 */
658 rnd_add_data_ts(rs, data, len, entropy, rnd_counter()); 658 rnd_add_data_ts(rs, data, len, entropy, rnd_counter());
659} 659}
660 660
661static void 661static void
662rnd_add_data_ts(krndsource_t *rs, const void *const data, u_int32_t len, 662rnd_add_data_ts(krndsource_t *rs, const void *const data, u_int32_t len,
663 u_int32_t entropy, uint32_t ts) 663 u_int32_t entropy, uint32_t ts)
664{ 664{
665 rnd_sample_t *state = NULL; 665 rnd_sample_t *state = NULL;
666 const uint32_t *dint = data; 666 const uint32_t *dint = data;
667 int todo, done, filled = 0; 667 int todo, done, filled = 0;
668 SIMPLEQ_HEAD(, _rnd_sample_t) tmp_samples = 668 SIMPLEQ_HEAD(, _rnd_sample_t) tmp_samples =
669 SIMPLEQ_HEAD_INITIALIZER(tmp_samples); 669 SIMPLEQ_HEAD_INITIALIZER(tmp_samples);
670 670
671 if (rs->flags & RND_FLAG_NO_COLLECT) { 671 if (rs->flags & RND_FLAG_NO_COLLECT) {
672 return; 672 return;
673 } 673 }
674 674
675 /* 675 /*
676 * Loop over data packaging it into sample buffers. 676 * Loop over data packaging it into sample buffers.
677 * If a sample buffer allocation fails, drop all data. 677 * If a sample buffer allocation fails, drop all data.
678 */ 678 */
679 todo = len / sizeof(*dint); 679 todo = len / sizeof(*dint);
680 for (done = 0; done < todo ; done++) { 680 for (done = 0; done < todo ; done++) {
681 state = rs->state; 681 state = rs->state;
682 if (state == NULL) { 682 if (state == NULL) {
683 state = rnd_sample_allocate_isr(rs); 683 state = rnd_sample_allocate_isr(rs);
684 if (__predict_false(state == NULL)) { 684 if (__predict_false(state == NULL)) {
685 break; 685 break;
686 } 686 }
687 rs->state = state; 687 rs->state = state;
688 } 688 }
689 689
690 state->ts[state->cursor] = ts; 690 state->ts[state->cursor] = ts;
691 state->values[state->cursor] = dint[done]; 691 state->values[state->cursor] = dint[done];
692 state->cursor++; 692 state->cursor++;
693 693
694 if (state->cursor == RND_SAMPLE_COUNT) { 694 if (state->cursor == RND_SAMPLE_COUNT) {
695 SIMPLEQ_INSERT_HEAD(&tmp_samples, state, next); 695 SIMPLEQ_INSERT_HEAD(&tmp_samples, state, next);
696 filled++; 696 filled++;
697 rs->state = NULL; 697 rs->state = NULL;
698 } 698 }
699 } 699 }
700 700
701 if (__predict_false(state == NULL)) { 701 if (__predict_false(state == NULL)) {
702 while ((state = SIMPLEQ_FIRST(&tmp_samples))) { 702 while ((state = SIMPLEQ_FIRST(&tmp_samples))) {
703 SIMPLEQ_REMOVE_HEAD(&tmp_samples, next); 703 SIMPLEQ_REMOVE_HEAD(&tmp_samples, next);
704 rnd_sample_free(state); 704 rnd_sample_free(state);
705 } 705 }
706 return; 706 return;
707 } 707 }
708 708
709 /* 709 /*
710 * Claim all the entropy on the last one we send to 710 * Claim all the entropy on the last one we send to
711 * the pool, so we don't rely on it being evenly distributed 711 * the pool, so we don't rely on it being evenly distributed
712 * in the supplied data. 712 * in the supplied data.
713 * 713 *
714 * XXX The rndpool code must accept samples with more 714 * XXX The rndpool code must accept samples with more
715 * XXX claimed entropy than bits for this to work right. 715 * XXX claimed entropy than bits for this to work right.
716 */ 716 */
717 state->entropy += entropy; 717 state->entropy += entropy;
718 rs->total += entropy; 718 rs->total += entropy;
719 719
720 /* 720 /*
721 * If we didn't finish any sample buffers, we're done. 721 * If we didn't finish any sample buffers, we're done.
722 */ 722 */
723 if (!filled) { 723 if (!filled) {
724 return; 724 return;
725 } 725 }
726 726
727 mutex_spin_enter(&rnd_mtx); 727 mutex_spin_enter(&rnd_mtx);
728 while ((state = SIMPLEQ_FIRST(&tmp_samples))) { 728 while ((state = SIMPLEQ_FIRST(&tmp_samples))) {
729 SIMPLEQ_REMOVE_HEAD(&tmp_samples, next); 729 SIMPLEQ_REMOVE_HEAD(&tmp_samples, next);
730 SIMPLEQ_INSERT_HEAD(&rnd_samples, state, next); 730 SIMPLEQ_INSERT_HEAD(&rnd_samples, state, next);
731 } 731 }
732 732
733 /* 733 /*
734 * If we are still starting up, cause immediate processing of 734 * If we are still starting up, cause immediate processing of
735 * the queued samples. Otherwise, if the timeout isn't 735 * the queued samples. Otherwise, if the timeout isn't
736 * pending, have it run in the near future. 736 * pending, have it run in the near future.
737 */ 737 */
738 if (__predict_false(cold)) { 738 if (__predict_false(cold)) {
739#ifdef RND_VERBOSE 739#ifdef RND_VERBOSE
740 printf("rnd: directly processing boot-time events.\n"); 740 printf("rnd: directly processing boot-time events.\n");
741#endif 741#endif
742 rnd_process_events(NULL); /* Drops lock! */ 742 rnd_process_events(NULL); /* Drops lock! */
743 return; 743 return;
744 } 744 }
745 if (rnd_timeout_pending == 0) { 745 if (rnd_timeout_pending == 0) {
746 rnd_timeout_pending = 1; 746 rnd_timeout_pending = 1;
747 mutex_spin_exit(&rnd_mtx); 747 mutex_spin_exit(&rnd_mtx);
748 callout_schedule(&rnd_callout, 1); 748 callout_schedule(&rnd_callout, 1);
749 return; 749 return;
750 } 750 }
751 mutex_spin_exit(&rnd_mtx); 751 mutex_spin_exit(&rnd_mtx);
752} 752}
753 753
754static int 754static int
755rnd_hwrng_test(rnd_sample_t *sample) 755rnd_hwrng_test(rnd_sample_t *sample)
756{ 756{
757 krndsource_t *source = sample->source; 757 krndsource_t *source = sample->source;
758 size_t cmplen; 758 size_t cmplen;
759 uint8_t *v1, *v2; 759 uint8_t *v1, *v2;
760 size_t resid, totest; 760 size_t resid, totest;
761 761
762 KASSERT(source->type = RND_TYPE_RNG); 762 KASSERT(source->type == RND_TYPE_RNG);
763 763
764 /* 764 /*
765 * Continuous-output test: compare two halves of the 765 * Continuous-output test: compare two halves of the
766 * sample buffer to each other. The sample buffer (64 ints, 766 * sample buffer to each other. The sample buffer (64 ints,
767 * so either 256 or 512 bytes on any modern machine) should be 767 * so either 256 or 512 bytes on any modern machine) should be
768 * much larger than a typical hardware RNG output, so this seems 768 * much larger than a typical hardware RNG output, so this seems
769 * a reasonable way to do it without retaining extra data. 769 * a reasonable way to do it without retaining extra data.
770 */ 770 */
771 cmplen = sizeof(sample->values) / 2; 771 cmplen = sizeof(sample->values) / 2;
772 v1 = (uint8_t *)sample->values; 772 v1 = (uint8_t *)sample->values;
773 v2 = (uint8_t *)sample->values + cmplen; 773 v2 = (uint8_t *)sample->values + cmplen;
774 774
775 if (__predict_false(!memcmp(v1, v2, cmplen))) { 775 if (__predict_false(!memcmp(v1, v2, cmplen))) {
776 printf("rnd: source \"%s\" failed continuous-output test.\n", 776 printf("rnd: source \"%s\" failed continuous-output test.\n",
777 source->name); 777 source->name);
778 return 1; 778 return 1;
779 } 779 }
780 780
781 /* 781 /*
782 * FIPS 140 statistical RNG test. We must accumulate 20,000 bits. 782 * FIPS 140 statistical RNG test. We must accumulate 20,000 bits.
783 */ 783 */
784 if (__predict_true(source->test_cnt == -1)) { 784 if (__predict_true(source->test_cnt == -1)) {
785 /* already passed the test */ 785 /* already passed the test */
786 return 0; 786 return 0;
787 } 787 }
788 resid = FIPS140_RNG_TEST_BYTES - source->test_cnt; 788 resid = FIPS140_RNG_TEST_BYTES - source->test_cnt;
789 totest = MIN(RND_SAMPLE_COUNT * 4, resid); 789 totest = MIN(RND_SAMPLE_COUNT * 4, resid);
790 memcpy(source->test->rt_b + source->test_cnt, sample->values, totest); 790 memcpy(source->test->rt_b + source->test_cnt, sample->values, totest);
791 resid -= totest; 791 resid -= totest;
792 source->test_cnt += totest; 792 source->test_cnt += totest;
793 if (resid == 0) { 793 if (resid == 0) {
794 strlcpy(source->test->rt_name, source->name, 794 strlcpy(source->test->rt_name, source->name,
795 sizeof(source->test->rt_name)); 795 sizeof(source->test->rt_name));
796 if (rngtest(source->test)) { 796 if (rngtest(source->test)) {
797 printf("rnd: source \"%s\" failed statistical test.", 797 printf("rnd: source \"%s\" failed statistical test.",
798 source->name); 798 source->name);
799 return 1; 799 return 1;
800 } 800 }
801 source->test_cnt = -1; 801 source->test_cnt = -1;
802 memset(source->test, 0, sizeof(*source->test)); 802 memset(source->test, 0, sizeof(*source->test));
803 } 803 }
804 return 0; 804 return 0;
805} 805}
806 806
807/* 807/*
808 * Process the events in the ring buffer. Called by rnd_timeout or 808 * Process the events in the ring buffer. Called by rnd_timeout or
809 * by the add routines directly if the callout has never fired (that 809 * by the add routines directly if the callout has never fired (that
810 * is, if we are "cold" -- just booted). 810 * is, if we are "cold" -- just booted).
811 * 811 *
812 * Call with rnd_mtx held -- WILL RELEASE IT. 812 * Call with rnd_mtx held -- WILL RELEASE IT.
813 */ 813 */
814static void 814static void
815rnd_process_events(void *arg) 815rnd_process_events(void *arg)
816{ 816{
817 rnd_sample_t *sample; 817 rnd_sample_t *sample;
818 krndsource_t *source, *badsource = NULL; 818 krndsource_t *source, *badsource = NULL;
819 u_int32_t entropy; 819 u_int32_t entropy;
820 SIMPLEQ_HEAD(, _rnd_sample_t) dq_samples = 820 SIMPLEQ_HEAD(, _rnd_sample_t) dq_samples =
821 SIMPLEQ_HEAD_INITIALIZER(dq_samples); 821 SIMPLEQ_HEAD_INITIALIZER(dq_samples);
822 SIMPLEQ_HEAD(, _rnd_sample_t) df_samples = 822 SIMPLEQ_HEAD(, _rnd_sample_t) df_samples =
823 SIMPLEQ_HEAD_INITIALIZER(df_samples); 823 SIMPLEQ_HEAD_INITIALIZER(df_samples);
824 TAILQ_HEAD(, rndsink) sunk = TAILQ_HEAD_INITIALIZER(sunk); 824 TAILQ_HEAD(, rndsink) sunk = TAILQ_HEAD_INITIALIZER(sunk);
825 825
826 /* 826 /*
827 * Sample queue is protected by rnd_mtx, drain to onstack queue 827 * Sample queue is protected by rnd_mtx, drain to onstack queue
828 * and drop lock. 828 * and drop lock.
829 */ 829 */
830 830
831 while ((sample = SIMPLEQ_FIRST(&rnd_samples))) { 831 while ((sample = SIMPLEQ_FIRST(&rnd_samples))) {
832 SIMPLEQ_REMOVE_HEAD(&rnd_samples, next); 832 SIMPLEQ_REMOVE_HEAD(&rnd_samples, next);
833 /* 833 /*
834 * We repeat this check here, since it is possible 834 * We repeat this check here, since it is possible
835 * the source was disabled before we were called, but 835 * the source was disabled before we were called, but
836 * after the entry was queued. 836 * after the entry was queued.
837 */ 837 */
838 if (__predict_false(sample->source->flags 838 if (__predict_false(sample->source->flags
839 & RND_FLAG_NO_COLLECT)) { 839 & RND_FLAG_NO_COLLECT)) {
840 SIMPLEQ_INSERT_TAIL(&df_samples, sample, next); 840 SIMPLEQ_INSERT_TAIL(&df_samples, sample, next);
841 } else { 841 } else {
842 SIMPLEQ_INSERT_TAIL(&dq_samples, sample, next); 842 SIMPLEQ_INSERT_TAIL(&dq_samples, sample, next);
843 } 843 }
844 } 844 }
845 mutex_spin_exit(&rnd_mtx); 845 mutex_spin_exit(&rnd_mtx);
846 846
847 /* Don't thrash the rndpool mtx either. Hold, add all samples. */ 847 /* Don't thrash the rndpool mtx either. Hold, add all samples. */
848 mutex_spin_enter(&rndpool_mtx); 848 mutex_spin_enter(&rndpool_mtx);
849 while ((sample = SIMPLEQ_FIRST(&dq_samples))) { 849 while ((sample = SIMPLEQ_FIRST(&dq_samples))) {
850 SIMPLEQ_REMOVE_HEAD(&dq_samples, next); 850 SIMPLEQ_REMOVE_HEAD(&dq_samples, next);
851 source = sample->source; 851 source = sample->source;
852 entropy = sample->entropy; 852 entropy = sample->entropy;
853 853
854 /* 854 /*
855 * Hardware generators are great but sometimes they 855 * Hardware generators are great but sometimes they
856 * have...hardware issues. Don't use any data from 856 * have...hardware issues. Don't use any data from
857 * them unless it passes some tests. 857 * them unless it passes some tests.
858 */ 858 */
859 if (source->type == RND_TYPE_RNG) { 859 if (source->type == RND_TYPE_RNG) {
860 if (__predict_false(rnd_hwrng_test(sample))) { 860 if (__predict_false(rnd_hwrng_test(sample))) {
861 /* 861 /*
862 * Detach the bad source. See below. 862 * Detach the bad source. See below.
863 */ 863 */
864 badsource = source; 864 badsource = source;
865 printf("rnd: detaching source \"%s\".", 865 printf("rnd: detaching source \"%s\".",
866 badsource->name); 866 badsource->name);
867 break; 867 break;
868 } 868 }
869 } 869 }
870 rndpool_add_data(&rnd_pool, sample->values, 870 rndpool_add_data(&rnd_pool, sample->values,
871 RND_SAMPLE_COUNT * 4, 0); 871 RND_SAMPLE_COUNT * 4, 0);
872 872
873 rndpool_add_data(&rnd_pool, sample->ts, 873 rndpool_add_data(&rnd_pool, sample->ts,
874 RND_SAMPLE_COUNT * 4, entropy); 874 RND_SAMPLE_COUNT * 4, entropy);
875 875
876 source->total += sample->entropy; 876 source->total += sample->entropy;
877 SIMPLEQ_INSERT_TAIL(&df_samples, sample, next); 877 SIMPLEQ_INSERT_TAIL(&df_samples, sample, next);
878 } 878 }
879 mutex_spin_exit(&rndpool_mtx); 879 mutex_spin_exit(&rndpool_mtx);
880 880
881 /* Now we hold no locks: clean up. */ 881 /* Now we hold no locks: clean up. */
882 if (__predict_false(badsource)) { 882 if (__predict_false(badsource)) {
883 /* 883 /*
884 * The detach routine frees any samples we have not 884 * The detach routine frees any samples we have not
885 * dequeued ourselves. For sanity's sake, we simply 885 * dequeued ourselves. For sanity's sake, we simply
886 * free (without using) all dequeued samples from the 886 * free (without using) all dequeued samples from the
887 * point at which we detected a problem onwards. 887 * point at which we detected a problem onwards.
888 */ 888 */
889 rnd_detach_source(badsource); 889 rnd_detach_source(badsource);
890 while ((sample = SIMPLEQ_FIRST(&dq_samples))) { 890 while ((sample = SIMPLEQ_FIRST(&dq_samples))) {
891 SIMPLEQ_REMOVE_HEAD(&dq_samples, next); 891 SIMPLEQ_REMOVE_HEAD(&dq_samples, next);
892 rnd_sample_free(sample); 892 rnd_sample_free(sample);
893 } 893 }
894 } 894 }
895 while ((sample = SIMPLEQ_FIRST(&df_samples))) { 895 while ((sample = SIMPLEQ_FIRST(&df_samples))) {
896 SIMPLEQ_REMOVE_HEAD(&df_samples, next); 896 SIMPLEQ_REMOVE_HEAD(&df_samples, next);
897 rnd_sample_free(sample); 897 rnd_sample_free(sample);
898 } 898 }
899 899
900 /* 900 /*
901 * Wake up any potential readers waiting. 901 * Wake up any potential readers waiting.
902 */ 902 */
903 rnd_wakeup_readers(); 903 rnd_wakeup_readers();
904} 904}
905 905
906/* 906/*
907 * Timeout, run to process the events in the ring buffer. 907 * Timeout, run to process the events in the ring buffer.
908 */ 908 */
909static void 909static void
910rnd_timeout(void *arg) 910rnd_timeout(void *arg)
911{ 911{
912 mutex_spin_enter(&rnd_mtx); 912 mutex_spin_enter(&rnd_mtx);
913 rnd_timeout_pending = 0; 913 rnd_timeout_pending = 0;
914 rnd_process_events(arg); 914 rnd_process_events(arg);
915} 915}
916 916
917u_int32_t 917u_int32_t
918rnd_extract_data_locked(void *p, u_int32_t len, u_int32_t flags) 918rnd_extract_data_locked(void *p, u_int32_t len, u_int32_t flags)
919{ 919{
920 static int timed_in; 920 static int timed_in;
921 921
922 KASSERT(mutex_owned(&rndpool_mtx)); 922 KASSERT(mutex_owned(&rndpool_mtx));
923 if (__predict_false(!timed_in)) { 923 if (__predict_false(!timed_in)) {
924 if (boottime.tv_sec) { 924 if (boottime.tv_sec) {
925 rndpool_add_data(&rnd_pool, &boottime, 925 rndpool_add_data(&rnd_pool, &boottime,
926 sizeof(boottime), 0); 926 sizeof(boottime), 0);
927 } 927 }
928 timed_in++; 928 timed_in++;
929 } 929 }
930 if (__predict_false(!rnd_initial_entropy)) { 930 if (__predict_false(!rnd_initial_entropy)) {
931 u_int32_t c; 931 u_int32_t c;
932 932
933#ifdef RND_VERBOSE 933#ifdef RND_VERBOSE
934 printf("rnd: WARNING! initial entropy low (%u).\n", 934 printf("rnd: WARNING! initial entropy low (%u).\n",
935 rndpool_get_entropy_count(&rnd_pool)); 935 rndpool_get_entropy_count(&rnd_pool));
936#endif 936#endif
937 /* Try once again to put something in the pool */ 937 /* Try once again to put something in the pool */
938 c = rnd_counter(); 938 c = rnd_counter();
939 rndpool_add_data(&rnd_pool, &c, sizeof(u_int32_t), 1); 939 rndpool_add_data(&rnd_pool, &c, sizeof(u_int32_t), 1);
940 } 940 }
941 941
942#ifdef DIAGNOSTIC 942#ifdef DIAGNOSTIC
943 while (!rnd_tested) { 943 while (!rnd_tested) {
944 int entropy_count; 944 int entropy_count;
945 945
946 entropy_count = rndpool_get_entropy_count(&rnd_pool); 946 entropy_count = rndpool_get_entropy_count(&rnd_pool);
947#ifdef RND_VERBOSE 947#ifdef RND_VERBOSE
948 printf("rnd: starting statistical RNG test, entropy = %d.\n", 948 printf("rnd: starting statistical RNG test, entropy = %d.\n",
949 entropy_count); 949 entropy_count);
950#endif 950#endif
951 if (rndpool_extract_data(&rnd_pool, rnd_rt.rt_b, 951 if (rndpool_extract_data(&rnd_pool, rnd_rt.rt_b,
952 sizeof(rnd_rt.rt_b), RND_EXTRACT_ANY) 952 sizeof(rnd_rt.rt_b), RND_EXTRACT_ANY)
953 != sizeof(rnd_rt.rt_b)) { 953 != sizeof(rnd_rt.rt_b)) {
954 panic("rnd: could not get bits for statistical test"); 954 panic("rnd: could not get bits for statistical test");
955 } 955 }
956 /* 956 /*
957 * Stash the tested bits so we can put them back in the 957 * Stash the tested bits so we can put them back in the
958 * pool, restoring the entropy count. DO NOT rely on 958 * pool, restoring the entropy count. DO NOT rely on
959 * rngtest to maintain the bits pristine -- we could end 959 * rngtest to maintain the bits pristine -- we could end
960 * up adding back non-random data claiming it were pure 960 * up adding back non-random data claiming it were pure
961 * entropy. 961 * entropy.
962 */ 962 */
963 memcpy(rnd_testbits, rnd_rt.rt_b, sizeof(rnd_rt.rt_b)); 963 memcpy(rnd_testbits, rnd_rt.rt_b, sizeof(rnd_rt.rt_b));
964 strlcpy(rnd_rt.rt_name, "entropy pool", sizeof(rnd_rt.rt_name)); 964 strlcpy(rnd_rt.rt_name, "entropy pool", sizeof(rnd_rt.rt_name));
965 if (rngtest(&rnd_rt)) { 965 if (rngtest(&rnd_rt)) {
966 /* 966 /*
967 * The probabiliity of a Type I error is 3/10000, 967 * The probabiliity of a Type I error is 3/10000,
968 * but note this can only happen at boot time. 968 * but note this can only happen at boot time.
969 * The relevant standard says to reset the module, 969 * The relevant standard says to reset the module,
970 * but developers objected... 970 * but developers objected...
971 */ 971 */
972 printf("rnd: WARNING, ENTROPY POOL FAILED " 972 printf("rnd: WARNING, ENTROPY POOL FAILED "
973 "STATISTICAL TEST!\n"); 973 "STATISTICAL TEST!\n");
974 continue; 974 continue;
975 } 975 }
976 memset(&rnd_rt, 0, sizeof(rnd_rt)); 976 memset(&rnd_rt, 0, sizeof(rnd_rt));
977 rndpool_add_data(&rnd_pool, rnd_testbits, sizeof(rnd_testbits), 977 rndpool_add_data(&rnd_pool, rnd_testbits, sizeof(rnd_testbits),
978 entropy_count); 978 entropy_count);
979 memset(rnd_testbits, 0, sizeof(rnd_testbits)); 979 memset(rnd_testbits, 0, sizeof(rnd_testbits));
980#ifdef RND_VERBOSE 980#ifdef RND_VERBOSE
981 printf("rnd: statistical RNG test done, entropy = %d.\n", 981 printf("rnd: statistical RNG test done, entropy = %d.\n",
982 rndpool_get_entropy_count(&rnd_pool)); 982 rndpool_get_entropy_count(&rnd_pool));
983#endif 983#endif
984 rnd_tested++; 984 rnd_tested++;
985 } 985 }
986#endif 986#endif
987 return rndpool_extract_data(&rnd_pool, p, len, flags); 987 return rndpool_extract_data(&rnd_pool, p, len, flags);
988} 988}
989 989
990u_int32_t 990u_int32_t
991rnd_extract_data(void *p, u_int32_t len, u_int32_t flags) 991rnd_extract_data(void *p, u_int32_t len, u_int32_t flags)
992{ 992{
993 uint32_t retval; 993 uint32_t retval;
994 994
995 mutex_spin_enter(&rndpool_mtx); 995 mutex_spin_enter(&rndpool_mtx);
996 retval = rnd_extract_data_locked(p, len, flags); 996 retval = rnd_extract_data_locked(p, len, flags);
997 mutex_spin_exit(&rndpool_mtx); 997 mutex_spin_exit(&rndpool_mtx);
998 return retval; 998 return retval;
999} 999}
1000 1000
1001void 1001void
1002rndsink_attach(rndsink_t *rs) 1002rndsink_attach(rndsink_t *rs)
1003{ 1003{
1004#ifdef RND_VERBOSE 1004#ifdef RND_VERBOSE
1005 printf("rnd: entropy sink \"%s\" wants %d bytes of data.\n", 1005 printf("rnd: entropy sink \"%s\" wants %d bytes of data.\n",
1006 rs->name, (int)rs->len); 1006 rs->name, (int)rs->len);
1007#endif 1007#endif
1008 1008
1009 KASSERT(mutex_owned(&rs->mtx)); 1009 KASSERT(mutex_owned(&rs->mtx));
1010 KASSERT(rs->state = RSTATE_PENDING); 1010 KASSERT(rs->state = RSTATE_PENDING);
1011 1011
1012 mutex_spin_enter(&rndsink_mtx); 1012 mutex_spin_enter(&rndsink_mtx);
1013 TAILQ_INSERT_TAIL(&rnd_sinks, rs, tailq); 1013 TAILQ_INSERT_TAIL(&rnd_sinks, rs, tailq);
1014 mutex_spin_exit(&rndsink_mtx); 1014 mutex_spin_exit(&rndsink_mtx);
1015 1015
1016 mutex_spin_enter(&rnd_mtx); 1016 mutex_spin_enter(&rnd_mtx);
1017 if (rnd_timeout_pending == 0) { 1017 if (rnd_timeout_pending == 0) {
1018 rnd_timeout_pending = 1; 1018 rnd_timeout_pending = 1;
1019 callout_schedule(&rnd_callout, 1); 1019 callout_schedule(&rnd_callout, 1);
1020 } 1020 }
1021 mutex_spin_exit(&rnd_mtx); 1021 mutex_spin_exit(&rnd_mtx);
1022 1022
1023} 1023}
1024 1024
1025void 1025void
1026rndsink_detach(rndsink_t *rs) 1026rndsink_detach(rndsink_t *rs)
1027{ 1027{
1028 rndsink_t *sink, *tsink; 1028 rndsink_t *sink, *tsink;
1029#ifdef RND_VERBOSE 1029#ifdef RND_VERBOSE
1030 printf("rnd: entropy sink \"%s\" no longer wants data.\n", rs->name); 1030 printf("rnd: entropy sink \"%s\" no longer wants data.\n", rs->name);
1031#endif 1031#endif
1032 KASSERT(mutex_owned(&rs->mtx)); 1032 KASSERT(mutex_owned(&rs->mtx));
1033 1033
1034 mutex_spin_enter(&rndsink_mtx); 1034 mutex_spin_enter(&rndsink_mtx);
1035 TAILQ_FOREACH_SAFE(sink, &rnd_sinks, tailq, tsink) { 1035 TAILQ_FOREACH_SAFE(sink, &rnd_sinks, tailq, tsink) {
1036 if (sink == rs) { 1036 if (sink == rs) {
1037 TAILQ_REMOVE(&rnd_sinks, rs, tailq); 1037 TAILQ_REMOVE(&rnd_sinks, rs, tailq);
1038 } 1038 }
1039 } 1039 }
1040 mutex_spin_exit(&rndsink_mtx); 1040 mutex_spin_exit(&rndsink_mtx);
1041} 1041}
1042 1042
1043void 1043void
1044rnd_seed(void *base, size_t len) 1044rnd_seed(void *base, size_t len)
1045{ 1045{
1046 SHA1_CTX s; 1046 SHA1_CTX s;
1047 uint8_t digest[SHA1_DIGEST_LENGTH]; 1047 uint8_t digest[SHA1_DIGEST_LENGTH];
1048 1048
1049 if (len != sizeof(*boot_rsp)) { 1049 if (len != sizeof(*boot_rsp)) {
1050 aprint_error("rnd: bad seed length %d\n", (int)len); 1050 aprint_error("rnd: bad seed length %d\n", (int)len);
1051 return; 1051 return;
1052 } 1052 }
1053 1053
1054 boot_rsp = (rndsave_t *)base; 1054 boot_rsp = (rndsave_t *)base;
1055 SHA1Init(&s); 1055 SHA1Init(&s);
1056 SHA1Update(&s, (uint8_t *)&boot_rsp->entropy, 1056 SHA1Update(&s, (uint8_t *)&boot_rsp->entropy,
1057 sizeof(boot_rsp->entropy)); 1057 sizeof(boot_rsp->entropy));
1058 SHA1Update(&s, boot_rsp->data, sizeof(boot_rsp->data)); 1058 SHA1Update(&s, boot_rsp->data, sizeof(boot_rsp->data));
1059 SHA1Final(digest, &s); 1059 SHA1Final(digest, &s);
1060 1060
1061 if (memcmp(digest, boot_rsp->digest, sizeof(digest))) { 1061 if (memcmp(digest, boot_rsp->digest, sizeof(digest))) {
1062 aprint_error("rnd: bad seed checksum\n"); 1062 aprint_error("rnd: bad seed checksum\n");
1063 return; 1063 return;
1064 } 1064 }
1065 1065
1066 /* 1066 /*
1067 * It's not really well-defined whether bootloader-supplied 1067 * It's not really well-defined whether bootloader-supplied
1068 * modules run before or after rnd_init(). Handle both cases. 1068 * modules run before or after rnd_init(). Handle both cases.
1069 */ 1069 */
1070 if (rnd_ready) { 1070 if (rnd_ready) {
1071#ifdef RND_VERBOSE 1071#ifdef RND_VERBOSE
1072 printf("rnd: ready, feeding in seed data directly.\n"); 1072 printf("rnd: ready, feeding in seed data directly.\n");
1073#endif 1073#endif
1074 mutex_spin_enter(&rndpool_mtx); 1074 mutex_spin_enter(&rndpool_mtx);
1075 rndpool_add_data(&rnd_pool, boot_rsp->data, 1075 rndpool_add_data(&rnd_pool, boot_rsp->data,
1076 sizeof(boot_rsp->data), 1076 sizeof(boot_rsp->data),
1077 MIN(boot_rsp->entropy, RND_POOLBITS / 2)); 1077 MIN(boot_rsp->entropy, RND_POOLBITS / 2));
1078 memset(boot_rsp, 0, sizeof(*boot_rsp)); 1078 memset(boot_rsp, 0, sizeof(*boot_rsp));
1079 mutex_spin_exit(&rndpool_mtx); 1079 mutex_spin_exit(&rndpool_mtx);
1080 } else { 1080 } else {
1081#ifdef RND_VERBOSE 1081#ifdef RND_VERBOSE
1082 printf("rnd: not ready, deferring seed feed.\n"); 1082 printf("rnd: not ready, deferring seed feed.\n");
1083#endif 1083#endif
1084 } 1084 }
1085} 1085}