| @@ -1,1199 +1,1193 @@ | | | @@ -1,1199 +1,1193 @@ |
1 | /* $NetBSD: kern_rndq.c,v 1.11 2013/06/13 00:55:01 tls Exp $ */ | | 1 | /* $NetBSD: kern_rndq.c,v 1.12 2013/06/13 01:37:03 tls Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 1997-2013 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 1997-2013 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Michael Graff <explorer@flame.org> and Thor Lancelot Simon. | | 8 | * by Michael Graff <explorer@flame.org> and Thor Lancelot Simon. |
9 | * This code uses ideas and algorithms from the Linux driver written by | | 9 | * This code uses ideas and algorithms from the Linux driver written by |
10 | * Ted Ts'o. | | 10 | * Ted Ts'o. |
11 | * | | 11 | * |
12 | * Redistribution and use in source and binary forms, with or without | | 12 | * Redistribution and use in source and binary forms, with or without |
13 | * modification, are permitted provided that the following conditions | | 13 | * modification, are permitted provided that the following conditions |
14 | * are met: | | 14 | * are met: |
15 | * 1. Redistributions of source code must retain the above copyright | | 15 | * 1. Redistributions of source code must retain the above copyright |
16 | * notice, this list of conditions and the following disclaimer. | | 16 | * notice, this list of conditions and the following disclaimer. |
17 | * 2. Redistributions in binary form must reproduce the above copyright | | 17 | * 2. Redistributions in binary form must reproduce the above copyright |
18 | * notice, this list of conditions and the following disclaimer in the | | 18 | * notice, this list of conditions and the following disclaimer in the |
19 | * documentation and/or other materials provided with the distribution. | | 19 | * documentation and/or other materials provided with the distribution. |
20 | * | | 20 | * |
21 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 21 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
22 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 22 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
23 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 23 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
24 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 24 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
25 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 25 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
26 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 26 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
27 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 27 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
28 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 28 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
29 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 29 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
30 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 30 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
31 | * POSSIBILITY OF SUCH DAMAGE. | | 31 | * POSSIBILITY OF SUCH DAMAGE. |
32 | */ | | 32 | */ |
33 | | | 33 | |
34 | #include <sys/cdefs.h> | | 34 | #include <sys/cdefs.h> |
35 | __KERNEL_RCSID(0, "$NetBSD: kern_rndq.c,v 1.11 2013/06/13 00:55:01 tls Exp $"); | | 35 | __KERNEL_RCSID(0, "$NetBSD: kern_rndq.c,v 1.12 2013/06/13 01:37:03 tls Exp $"); |
36 | | | 36 | |
37 | #include <sys/param.h> | | 37 | #include <sys/param.h> |
38 | #include <sys/ioctl.h> | | 38 | #include <sys/ioctl.h> |
39 | #include <sys/fcntl.h> | | 39 | #include <sys/fcntl.h> |
40 | #include <sys/select.h> | | 40 | #include <sys/select.h> |
41 | #include <sys/poll.h> | | 41 | #include <sys/poll.h> |
42 | #include <sys/kmem.h> | | 42 | #include <sys/kmem.h> |
43 | #include <sys/mutex.h> | | 43 | #include <sys/mutex.h> |
44 | #include <sys/proc.h> | | 44 | #include <sys/proc.h> |
45 | #include <sys/kernel.h> | | 45 | #include <sys/kernel.h> |
46 | #include <sys/conf.h> | | 46 | #include <sys/conf.h> |
47 | #include <sys/systm.h> | | 47 | #include <sys/systm.h> |
48 | #include <sys/callout.h> | | 48 | #include <sys/callout.h> |
49 | #include <sys/intr.h> | | 49 | #include <sys/intr.h> |
50 | #include <sys/rnd.h> | | 50 | #include <sys/rnd.h> |
51 | #include <sys/vnode.h> | | 51 | #include <sys/vnode.h> |
52 | #include <sys/pool.h> | | 52 | #include <sys/pool.h> |
53 | #include <sys/kauth.h> | | 53 | #include <sys/kauth.h> |
54 | #include <sys/once.h> | | 54 | #include <sys/once.h> |
55 | #include <sys/rngtest.h> | | 55 | #include <sys/rngtest.h> |
56 | #include <sys/cpu.h> /* XXX temporary, see rnd_detach_source */ | | 56 | #include <sys/cpu.h> /* XXX temporary, see rnd_detach_source */ |
57 | | | 57 | |
58 | #include <dev/rnd_private.h> | | 58 | #include <dev/rnd_private.h> |
59 | | | 59 | |
60 | #if defined(__HAVE_CPU_COUNTER) && !defined(_RUMPKERNEL) /* XXX: bad pooka */ | | 60 | #if defined(__HAVE_CPU_COUNTER) && !defined(_RUMPKERNEL) /* XXX: bad pooka */ |
61 | #include <machine/cpu_counter.h> | | 61 | #include <machine/cpu_counter.h> |
62 | #endif | | 62 | #endif |
63 | | | 63 | |
64 | #ifdef RND_DEBUG | | 64 | #ifdef RND_DEBUG |
65 | #define DPRINTF(l,x) if (rnd_debug & (l)) printf x | | 65 | #define DPRINTF(l,x) if (rnd_debug & (l)) printf x |
66 | int rnd_debug = 0; | | 66 | int rnd_debug = 0; |
67 | #else | | 67 | #else |
68 | #define DPRINTF(l,x) | | 68 | #define DPRINTF(l,x) |
69 | #endif | | 69 | #endif |
70 | | | 70 | |
71 | #define RND_DEBUG_WRITE 0x0001 | | 71 | #define RND_DEBUG_WRITE 0x0001 |
72 | #define RND_DEBUG_READ 0x0002 | | 72 | #define RND_DEBUG_READ 0x0002 |
73 | #define RND_DEBUG_IOCTL 0x0004 | | 73 | #define RND_DEBUG_IOCTL 0x0004 |
74 | #define RND_DEBUG_SNOOZE 0x0008 | | 74 | #define RND_DEBUG_SNOOZE 0x0008 |
75 | | | 75 | |
76 | /* | | 76 | /* |
77 | * list devices attached | | 77 | * list devices attached |
78 | */ | | 78 | */ |
79 | #if 0 | | 79 | #if 0 |
80 | #define RND_VERBOSE | | 80 | #define RND_VERBOSE |
81 | #endif | | 81 | #endif |
82 | | | 82 | |
83 | /* | | 83 | /* |
84 | * This is a little bit of state information attached to each device that we | | 84 | * This is a little bit of state information attached to each device that we |
85 | * collect entropy from. This is simply a collection buffer, and when it | | 85 | * collect entropy from. This is simply a collection buffer, and when it |
86 | * is full it will be "detached" from the source and added to the entropy | | 86 | * is full it will be "detached" from the source and added to the entropy |
87 | * pool after entropy is distilled as much as possible. | | 87 | * pool after entropy is distilled as much as possible. |
88 | */ | | 88 | */ |
89 | #define RND_SAMPLE_COUNT 64 /* collect N samples, then compress */ | | 89 | #define RND_SAMPLE_COUNT 64 /* collect N samples, then compress */ |
90 | typedef struct _rnd_sample_t { | | 90 | typedef struct _rnd_sample_t { |
91 | SIMPLEQ_ENTRY(_rnd_sample_t) next; | | 91 | SIMPLEQ_ENTRY(_rnd_sample_t) next; |
92 | krndsource_t *source; | | 92 | krndsource_t *source; |
93 | int cursor; | | 93 | int cursor; |
94 | int entropy; | | 94 | int entropy; |
95 | u_int32_t ts[RND_SAMPLE_COUNT]; | | 95 | u_int32_t ts[RND_SAMPLE_COUNT]; |
96 | u_int32_t values[RND_SAMPLE_COUNT]; | | 96 | u_int32_t values[RND_SAMPLE_COUNT]; |
97 | } rnd_sample_t; | | 97 | } rnd_sample_t; |
98 | | | 98 | |
99 | /* | | 99 | /* |
100 | * The event queue. Fields are altered at an interrupt level. | | 100 | * The event queue. Fields are altered at an interrupt level. |
101 | * All accesses must be protected with the mutex. | | 101 | * All accesses must be protected with the mutex. |
102 | */ | | 102 | */ |
103 | SIMPLEQ_HEAD(, _rnd_sample_t) rnd_samples; | | 103 | SIMPLEQ_HEAD(, _rnd_sample_t) rnd_samples; |
104 | kmutex_t rnd_mtx; | | 104 | kmutex_t rnd_mtx; |
105 | | | 105 | |
106 | /* | | 106 | /* |
107 | * This lock protects dispatch of our soft interrupts. | | | |
108 | */ | | | |
109 | kmutex_t rndsoft_mtx; | | | |
110 | | | | |
111 | /* | | | |
112 | * Entropy sinks: usually other generators waiting to be rekeyed. | | 107 | * Entropy sinks: usually other generators waiting to be rekeyed. |
113 | * | | 108 | * |
114 | * A sink's callback MUST NOT re-add the sink to the list, or | | 109 | * A sink's callback MUST NOT re-add the sink to the list, or |
115 | * list corruption will occur. The list is protected by the | | 110 | * list corruption will occur. The list is protected by the |
116 | * rndsink_mtx, which must be released before calling any sink's | | 111 | * rndsink_mtx, which must be released before calling any sink's |
117 | * callback. | | 112 | * callback. |
118 | */ | | 113 | */ |
119 | TAILQ_HEAD(, rndsink) rnd_sinks; | | 114 | TAILQ_HEAD(, rndsink) rnd_sinks; |
120 | kmutex_t rndsink_mtx; | | 115 | kmutex_t rndsink_mtx; |
121 | | | 116 | |
122 | /* | | 117 | /* |
123 | * Memory pool for sample buffers | | 118 | * Memory pool for sample buffers |
124 | */ | | 119 | */ |
125 | static pool_cache_t rnd_mempc; | | 120 | static pool_cache_t rnd_mempc; |
126 | | | 121 | |
127 | /* | | 122 | /* |
128 | * Our random pool. This is defined here rather than using the general | | 123 | * Our random pool. This is defined here rather than using the general |
129 | * purpose one defined in rndpool.c. | | 124 | * purpose one defined in rndpool.c. |
130 | * | | 125 | * |
131 | * Samples are collected and queued into a separate mutex-protected queue | | 126 | * Samples are collected and queued into a separate mutex-protected queue |
132 | * (rnd_samples, see above), and processed in a timeout routine; therefore, | | 127 | * (rnd_samples, see above), and processed in a timeout routine; therefore, |
133 | * the mutex protecting the random pool is at IPL_SOFTCLOCK() as well. | | 128 | * the mutex protecting the random pool is at IPL_SOFTCLOCK() as well. |
134 | */ | | 129 | */ |
135 | rndpool_t rnd_pool; | | 130 | rndpool_t rnd_pool; |
136 | kmutex_t rndpool_mtx; | | 131 | kmutex_t rndpool_mtx; |
137 | kcondvar_t rndpool_cv; | | 132 | kcondvar_t rndpool_cv; |
138 | | | 133 | |
139 | /* | | 134 | /* |
140 | * This source is used to easily "remove" queue entries when the source | | 135 | * This source is used to easily "remove" queue entries when the source |
141 | * which actually generated the events is going away. | | 136 | * which actually generated the events is going away. |
142 | */ | | 137 | */ |
143 | static krndsource_t rnd_source_no_collect = { | | 138 | static krndsource_t rnd_source_no_collect = { |
144 | /* LIST_ENTRY list */ | | 139 | /* LIST_ENTRY list */ |
145 | .name = { 'N', 'o', 'C', 'o', 'l', 'l', 'e', 'c', 't', | | 140 | .name = { 'N', 'o', 'C', 'o', 'l', 'l', 'e', 'c', 't', |
146 | 0, 0, 0, 0, 0, 0, 0 }, | | 141 | 0, 0, 0, 0, 0, 0, 0 }, |
147 | .last_time = 0, .last_delta = 0, .last_delta2 = 0, .total = 0, | | 142 | .last_time = 0, .last_delta = 0, .last_delta2 = 0, .total = 0, |
148 | .type = RND_TYPE_UNKNOWN, | | 143 | .type = RND_TYPE_UNKNOWN, |
149 | .flags = (RND_FLAG_NO_COLLECT | | | 144 | .flags = (RND_FLAG_NO_COLLECT | |
150 | RND_FLAG_NO_ESTIMATE | | | 145 | RND_FLAG_NO_ESTIMATE | |
151 | RND_TYPE_UNKNOWN), | | 146 | RND_TYPE_UNKNOWN), |
152 | .state = NULL, | | 147 | .state = NULL, |
153 | .test_cnt = 0, | | 148 | .test_cnt = 0, |
154 | .test = NULL | | 149 | .test = NULL |
155 | }; | | 150 | }; |
156 | void *rnd_process, *rnd_wakeup; | | 151 | void *rnd_process, *rnd_wakeup; |
157 | struct callout skew_callout; | | 152 | struct callout skew_callout; |
158 | | | 153 | |
159 | void rnd_wakeup_readers(void); | | 154 | void rnd_wakeup_readers(void); |
160 | static inline u_int32_t rnd_estimate_entropy(krndsource_t *, u_int32_t); | | 155 | static inline u_int32_t rnd_estimate_entropy(krndsource_t *, u_int32_t); |
161 | static inline u_int32_t rnd_counter(void); | | 156 | static inline u_int32_t rnd_counter(void); |
162 | static void rnd_intr(void *); | | 157 | static void rnd_intr(void *); |
163 | static void rnd_wake(void *); | | 158 | static void rnd_wake(void *); |
164 | static void rnd_process_events(void); | | 159 | static void rnd_process_events(void); |
165 | u_int32_t rnd_extract_data_locked(void *, u_int32_t, u_int32_t); /* XXX */ | | 160 | u_int32_t rnd_extract_data_locked(void *, u_int32_t, u_int32_t); /* XXX */ |
166 | static void rnd_add_data_ts(krndsource_t *, const void *const, | | 161 | static void rnd_add_data_ts(krndsource_t *, const void *const, |
167 | uint32_t, uint32_t, uint32_t); | | 162 | uint32_t, uint32_t, uint32_t); |
168 | | | 163 | |
169 | int rnd_ready = 0; | | 164 | int rnd_ready = 0; |
170 | int rnd_initial_entropy = 0; | | 165 | int rnd_initial_entropy = 0; |
171 | | | 166 | |
172 | #ifdef DIAGNOSTIC | | 167 | #ifdef DIAGNOSTIC |
173 | static int rnd_tested = 0; | | 168 | static int rnd_tested = 0; |
174 | static rngtest_t rnd_rt; | | 169 | static rngtest_t rnd_rt; |
175 | static uint8_t rnd_testbits[sizeof(rnd_rt.rt_b)]; | | 170 | static uint8_t rnd_testbits[sizeof(rnd_rt.rt_b)]; |
176 | #endif | | 171 | #endif |
177 | | | 172 | |
178 | LIST_HEAD(, krndsource) rnd_sources; | | 173 | LIST_HEAD(, krndsource) rnd_sources; |
179 | | | 174 | |
180 | rndsave_t *boot_rsp; | | 175 | rndsave_t *boot_rsp; |
181 | | | 176 | |
182 | /* | | 177 | /* |
183 | * Generate a 32-bit counter. This should be more machine dependent, | | 178 | * Generate a 32-bit counter. This should be more machine dependent, |
184 | * using cycle counters and the like when possible. | | 179 | * using cycle counters and the like when possible. |
185 | */ | | 180 | */ |
186 | static inline u_int32_t | | 181 | static inline u_int32_t |
187 | rnd_counter(void) | | 182 | rnd_counter(void) |
188 | { | | 183 | { |
189 | struct timeval tv; | | 184 | struct timeval tv; |
190 | | | 185 | |
191 | #if defined(__HAVE_CPU_COUNTER) && !defined(_RUMPKERNEL) /* XXX: bad pooka */ | | 186 | #if defined(__HAVE_CPU_COUNTER) && !defined(_RUMPKERNEL) /* XXX: bad pooka */ |
192 | if (cpu_hascounter()) | | 187 | if (cpu_hascounter()) |
193 | return (cpu_counter32()); | | 188 | return (cpu_counter32()); |
194 | #endif | | 189 | #endif |
195 | if (rnd_ready) { | | 190 | if (rnd_ready) { |
196 | microtime(&tv); | | 191 | microtime(&tv); |
197 | return (tv.tv_sec * 1000000 + tv.tv_usec); | | 192 | return (tv.tv_sec * 1000000 + tv.tv_usec); |
198 | } | | 193 | } |
199 | /* when called from rnd_init, its too early to call microtime safely */ | | 194 | /* when called from rnd_init, its too early to call microtime safely */ |
200 | return (0); | | 195 | return (0); |
201 | } | | 196 | } |
202 | | | 197 | |
203 | /* | | 198 | /* |
204 | * We may be called from low IPL -- protect our softint. | | 199 | * We may be called from low IPL -- protect our softint. |
205 | */ | | 200 | */ |
206 | | | 201 | |
207 | static inline void | | 202 | static inline void |
208 | rnd_schedule_softint(void *softint) | | 203 | rnd_schedule_softint(void *softint) |
209 | { | | 204 | { |
210 | mutex_spin_enter(&rndsoft_mtx); | | 205 | kpreempt_disable(); |
211 | softint_schedule(softint); | | 206 | softint_schedule(softint); |
212 | mutex_spin_exit(&rndsoft_mtx); | | 207 | kpreempt_enable(); |
213 | } | | 208 | } |
214 | | | 209 | |
215 | /* | | 210 | /* |
216 | * XXX repulsive: we can't initialize our softints in rnd_init | | 211 | * XXX repulsive: we can't initialize our softints in rnd_init |
217 | * XXX (too early) so we wrap the points where we'd schedule them, thus. | | 212 | * XXX (too early) so we wrap the points where we'd schedule them, thus. |
218 | */ | | 213 | */ |
219 | static inline void | | 214 | static inline void |
220 | rnd_schedule_process(void) | | 215 | rnd_schedule_process(void) |
221 | { | | 216 | { |
222 | if (__predict_true(rnd_process)) { | | 217 | if (__predict_true(rnd_process)) { |
223 | rnd_schedule_softint(rnd_process); | | 218 | rnd_schedule_softint(rnd_process); |
224 | return; | | 219 | return; |
225 | } | | 220 | } |
226 | if (!cold) { | | 221 | if (!cold) { |
227 | rnd_process = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE, | | 222 | rnd_process = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE, |
228 | rnd_intr, NULL); | | 223 | rnd_intr, NULL); |
229 | } | | 224 | } |
230 | rnd_process_events(); | | 225 | rnd_process_events(); |
231 | } | | 226 | } |
232 | | | 227 | |
233 | static inline void | | 228 | static inline void |
234 | rnd_schedule_wakeup(void) | | 229 | rnd_schedule_wakeup(void) |
235 | { | | 230 | { |
236 | if (__predict_true(rnd_wakeup)) { | | 231 | if (__predict_true(rnd_wakeup)) { |
237 | rnd_schedule_softint(rnd_wakeup); | | 232 | rnd_schedule_softint(rnd_wakeup); |
238 | return; | | 233 | return; |
239 | } | | 234 | } |
240 | if (!cold) { | | 235 | if (!cold) { |
241 | rnd_wakeup = softint_establish(SOFTINT_CLOCK|SOFTINT_MPSAFE, | | 236 | rnd_wakeup = softint_establish(SOFTINT_CLOCK|SOFTINT_MPSAFE, |
242 | rnd_wake, NULL); | | 237 | rnd_wake, NULL); |
243 | } | | 238 | } |
244 | rnd_wakeup_readers(); | | 239 | rnd_wakeup_readers(); |
245 | } | | 240 | } |
246 | | | 241 | |
247 | /* | | 242 | /* |
248 | * Tell any sources with "feed me" callbacks that we are hungry. | | 243 | * Tell any sources with "feed me" callbacks that we are hungry. |
249 | */ | | 244 | */ |
250 | static void | | 245 | static void |
251 | rnd_getmore(size_t byteswanted) | | 246 | rnd_getmore(size_t byteswanted) |
252 | { | | 247 | { |
253 | krndsource_t *rs; | | 248 | krndsource_t *rs; |
254 | | | 249 | |
255 | KASSERT(mutex_owned(&rndpool_mtx)); | | 250 | KASSERT(mutex_owned(&rndpool_mtx)); |
256 | | | 251 | |
257 | LIST_FOREACH(rs, &rnd_sources, list) { | | 252 | LIST_FOREACH(rs, &rnd_sources, list) { |
258 | if (rs->flags & RND_FLAG_HASCB) { | | 253 | if (rs->flags & RND_FLAG_HASCB) { |
259 | KASSERT(rs->get != NULL); | | 254 | KASSERT(rs->get != NULL); |
260 | KASSERT(rs->getarg != NULL); | | 255 | KASSERT(rs->getarg != NULL); |
261 | rs->get((size_t)byteswanted, rs->getarg); | | 256 | rs->get((size_t)byteswanted, rs->getarg); |
262 | #ifdef RND_VERBOSE | | 257 | #ifdef RND_VERBOSE |
263 | printf("rnd: asking source %s for %d bytes\n", | | 258 | printf("rnd: asking source %s for %d bytes\n", |
264 | rs->name, (int)byteswanted); | | 259 | rs->name, (int)byteswanted); |
265 | #endif | | 260 | #endif |
266 | } | | 261 | } |
267 | } | | 262 | } |
268 | } | | 263 | } |
269 | | | 264 | |
270 | /* | | 265 | /* |
271 | * Check to see if there are readers waiting on us. If so, kick them. | | 266 | * Check to see if there are readers waiting on us. If so, kick them. |
272 | */ | | 267 | */ |
273 | void | | 268 | void |
274 | rnd_wakeup_readers(void) | | 269 | rnd_wakeup_readers(void) |
275 | { | | 270 | { |
276 | rndsink_t *sink, *tsink; | | 271 | rndsink_t *sink, *tsink; |
277 | size_t entropy_count; | | 272 | size_t entropy_count; |
278 | TAILQ_HEAD(, rndsink) sunk = TAILQ_HEAD_INITIALIZER(sunk); | | 273 | TAILQ_HEAD(, rndsink) sunk = TAILQ_HEAD_INITIALIZER(sunk); |
279 | | | 274 | |
280 | mutex_spin_enter(&rndpool_mtx); | | 275 | mutex_spin_enter(&rndpool_mtx); |
281 | entropy_count = rndpool_get_entropy_count(&rnd_pool); | | 276 | entropy_count = rndpool_get_entropy_count(&rnd_pool); |
282 | if (entropy_count < RND_ENTROPY_THRESHOLD * 8) { | | 277 | if (entropy_count < RND_ENTROPY_THRESHOLD * 8) { |
283 | rnd_empty = 1; | | 278 | rnd_empty = 1; |
284 | mutex_spin_exit(&rndpool_mtx); | | 279 | mutex_spin_exit(&rndpool_mtx); |
285 | return; | | 280 | return; |
286 | } else { | | 281 | } else { |
287 | #ifdef RND_VERBOSE | | 282 | #ifdef RND_VERBOSE |
288 | if (__predict_false(!rnd_initial_entropy)) { | | 283 | if (__predict_false(!rnd_initial_entropy)) { |
289 | printf("rnd: have initial entropy (%u)\n", | | 284 | printf("rnd: have initial entropy (%u)\n", |
290 | (unsigned int)entropy_count); | | 285 | (unsigned int)entropy_count); |
291 | } | | 286 | } |
292 | #endif | | 287 | #endif |
293 | rnd_empty = 0; | | 288 | rnd_empty = 0; |
294 | rnd_initial_entropy = 1; | | 289 | rnd_initial_entropy = 1; |
295 | } | | 290 | } |
296 | | | 291 | |
297 | /* | | 292 | /* |
298 | * First, take care of consumers needing rekeying. | | 293 | * First, take care of consumers needing rekeying. |
299 | */ | | 294 | */ |
300 | mutex_spin_enter(&rndsink_mtx); | | 295 | mutex_spin_enter(&rndsink_mtx); |
301 | TAILQ_FOREACH_SAFE(sink, &rnd_sinks, tailq, tsink) { | | 296 | TAILQ_FOREACH_SAFE(sink, &rnd_sinks, tailq, tsink) { |
302 | if (!mutex_tryenter(&sink->mtx)) { | | 297 | if (!mutex_tryenter(&sink->mtx)) { |
303 | #ifdef RND_VERBOSE | | 298 | #ifdef RND_VERBOSE |
304 | printf("rnd_wakeup_readers: " | | 299 | printf("rnd_wakeup_readers: " |
305 | "skipping busy rndsink\n"); | | 300 | "skipping busy rndsink\n"); |
306 | #endif | | 301 | #endif |
307 | continue; | | 302 | continue; |
308 | } | | 303 | } |
309 | | | 304 | |
310 | KASSERT(RSTATE_PENDING == sink->state); | | 305 | KASSERT(RSTATE_PENDING == sink->state); |
311 | | | 306 | |
312 | if (sink->len * 8 < rndpool_get_entropy_count(&rnd_pool)) { | | 307 | if (sink->len * 8 < rndpool_get_entropy_count(&rnd_pool)) { |
313 | /* We have enough entropy to sink some here. */ | | 308 | /* We have enough entropy to sink some here. */ |
314 | if (rndpool_extract_data(&rnd_pool, sink->data, | | 309 | if (rndpool_extract_data(&rnd_pool, sink->data, |
315 | sink->len, RND_EXTRACT_GOOD) | | 310 | sink->len, RND_EXTRACT_GOOD) |
316 | != sink->len) { | | 311 | != sink->len) { |
317 | panic("could not extract estimated " | | 312 | panic("could not extract estimated " |
318 | "entropy from pool"); | | 313 | "entropy from pool"); |
319 | } | | 314 | } |
320 | sink->state = RSTATE_HASBITS; | | 315 | sink->state = RSTATE_HASBITS; |
321 | /* Move this sink to the list of pending callbacks */ | | 316 | /* Move this sink to the list of pending callbacks */ |
322 | TAILQ_REMOVE(&rnd_sinks, sink, tailq); | | 317 | TAILQ_REMOVE(&rnd_sinks, sink, tailq); |
323 | TAILQ_INSERT_HEAD(&sunk, sink, tailq); | | 318 | TAILQ_INSERT_HEAD(&sunk, sink, tailq); |
324 | } else { | | 319 | } else { |
325 | #ifdef RND_VERBOSE | | 320 | #ifdef RND_VERBOSE |
326 | printf("sink wants %d, we have %d, asking for more\n", | | 321 | printf("sink wants %d, we have %d, asking for more\n", |
327 | (int)sink->len * 8, | | 322 | (int)sink->len * 8, |
328 | (int)rndpool_get_entropy_count(&rnd_pool)); | | 323 | (int)rndpool_get_entropy_count(&rnd_pool)); |
329 | #endif | | 324 | #endif |
330 | mutex_spin_exit(&sink->mtx); | | 325 | mutex_spin_exit(&sink->mtx); |
331 | rnd_getmore(sink->len * 8); | | 326 | rnd_getmore(sink->len * 8); |
332 | } | | 327 | } |
333 | } | | 328 | } |
334 | mutex_spin_exit(&rndsink_mtx); | | 329 | mutex_spin_exit(&rndsink_mtx); |
335 | mutex_spin_exit(&rndpool_mtx); | | 330 | mutex_spin_exit(&rndpool_mtx); |
336 | | | 331 | |
337 | /* | | 332 | /* |
338 | * Now that we have dropped the mutexes, we can run sinks' callbacks. | | 333 | * Now that we have dropped the mutexes, we can run sinks' callbacks. |
339 | * Since we have reused the "tailq" member of the sink structure for | | 334 | * Since we have reused the "tailq" member of the sink structure for |
340 | * this temporary on-stack queue, the callback must NEVER re-add | | 335 | * this temporary on-stack queue, the callback must NEVER re-add |
341 | * the sink to the main queue, or our on-stack queue will become | | 336 | * the sink to the main queue, or our on-stack queue will become |
342 | * corrupt. | | 337 | * corrupt. |
343 | */ | | 338 | */ |
344 | while ((sink = TAILQ_FIRST(&sunk))) { | | 339 | while ((sink = TAILQ_FIRST(&sunk))) { |
345 | #ifdef RND_VERBOSE | | 340 | #ifdef RND_VERBOSE |
346 | printf("supplying %d bytes to entropy sink \"%s\"" | | 341 | printf("supplying %d bytes to entropy sink \"%s\"" |
347 | " (cb %p, arg %p).\n", | | 342 | " (cb %p, arg %p).\n", |
348 | (int)sink->len, sink->name, sink->cb, sink->arg); | | 343 | (int)sink->len, sink->name, sink->cb, sink->arg); |
349 | #endif | | 344 | #endif |
350 | sink->state = RSTATE_HASBITS; | | 345 | sink->state = RSTATE_HASBITS; |
351 | sink->cb(sink->arg); | | 346 | sink->cb(sink->arg); |
352 | TAILQ_REMOVE(&sunk, sink, tailq); | | 347 | TAILQ_REMOVE(&sunk, sink, tailq); |
353 | mutex_spin_exit(&sink->mtx); | | 348 | mutex_spin_exit(&sink->mtx); |
354 | } | | 349 | } |
355 | } | | 350 | } |
356 | | | 351 | |
357 | /* | | 352 | /* |
358 | * Use the timing of the event to estimate the entropy gathered. | | 353 | * Use the timing of the event to estimate the entropy gathered. |
359 | * If all the differentials (first, second, and third) are non-zero, return | | 354 | * If all the differentials (first, second, and third) are non-zero, return |
360 | * non-zero. If any of these are zero, return zero. | | 355 | * non-zero. If any of these are zero, return zero. |
361 | */ | | 356 | */ |
362 | static inline u_int32_t | | 357 | static inline u_int32_t |
363 | rnd_estimate_entropy(krndsource_t *rs, u_int32_t t) | | 358 | rnd_estimate_entropy(krndsource_t *rs, u_int32_t t) |
364 | { | | 359 | { |
365 | int32_t delta, delta2, delta3; | | 360 | int32_t delta, delta2, delta3; |
366 | | | 361 | |
367 | /* | | 362 | /* |
368 | * If the time counter has overflowed, calculate the real difference. | | 363 | * If the time counter has overflowed, calculate the real difference. |
369 | * If it has not, it is simplier. | | 364 | * If it has not, it is simplier. |
370 | */ | | 365 | */ |
371 | if (t < rs->last_time) | | 366 | if (t < rs->last_time) |
372 | delta = UINT_MAX - rs->last_time + t; | | 367 | delta = UINT_MAX - rs->last_time + t; |
373 | else | | 368 | else |
374 | delta = rs->last_time - t; | | 369 | delta = rs->last_time - t; |
375 | | | 370 | |
376 | if (delta < 0) | | 371 | if (delta < 0) |
377 | delta = -delta; | | 372 | delta = -delta; |
378 | | | 373 | |
379 | /* | | 374 | /* |
380 | * Calculate the second and third order differentials | | 375 | * Calculate the second and third order differentials |
381 | */ | | 376 | */ |
382 | delta2 = rs->last_delta - delta; | | 377 | delta2 = rs->last_delta - delta; |
383 | if (delta2 < 0) | | 378 | if (delta2 < 0) |
384 | delta2 = -delta2; | | 379 | delta2 = -delta2; |
385 | | | 380 | |
386 | delta3 = rs->last_delta2 - delta2; | | 381 | delta3 = rs->last_delta2 - delta2; |
387 | if (delta3 < 0) | | 382 | if (delta3 < 0) |
388 | delta3 = -delta3; | | 383 | delta3 = -delta3; |
389 | | | 384 | |
390 | rs->last_time = t; | | 385 | rs->last_time = t; |
391 | rs->last_delta = delta; | | 386 | rs->last_delta = delta; |
392 | rs->last_delta2 = delta2; | | 387 | rs->last_delta2 = delta2; |
393 | | | 388 | |
394 | /* | | 389 | /* |
395 | * If any delta is 0, we got no entropy. If all are non-zero, we | | 390 | * If any delta is 0, we got no entropy. If all are non-zero, we |
396 | * might have something. | | 391 | * might have something. |
397 | */ | | 392 | */ |
398 | if (delta == 0 || delta2 == 0 || delta3 == 0) | | 393 | if (delta == 0 || delta2 == 0 || delta3 == 0) |
399 | return (0); | | 394 | return (0); |
400 | | | 395 | |
401 | return (1); | | 396 | return (1); |
402 | } | | 397 | } |
403 | | | 398 | |
404 | #if defined(__HAVE_CPU_COUNTER) && !defined(_RUMPKERNEL) | | 399 | #if defined(__HAVE_CPU_COUNTER) && !defined(_RUMPKERNEL) |
405 | static void | | 400 | static void |
406 | rnd_skew(void *arg) | | 401 | rnd_skew(void *arg) |
407 | { | | 402 | { |
408 | static krndsource_t skewsrc; | | 403 | static krndsource_t skewsrc; |
409 | static int live, flipflop; | | 404 | static int live, flipflop; |
410 | | | 405 | |
411 | /* | | 406 | /* |
412 | * Only one instance of this callout will ever be scheduled | | 407 | * Only one instance of this callout will ever be scheduled |
413 | * at a time (it is only ever scheduled by itself). So no | | 408 | * at a time (it is only ever scheduled by itself). So no |
414 | * locking is required here. | | 409 | * locking is required here. |
415 | */ | | 410 | */ |
416 | | | 411 | |
417 | /* | | 412 | /* |
418 | * Even on systems with seemingly stable clocks, the | | 413 | * Even on systems with seemingly stable clocks, the |
419 | * entropy estimator seems to think we get 1 bit here | | 414 | * entropy estimator seems to think we get 1 bit here |
420 | * about every 2 calls. That seems like too much. Set | | 415 | * about every 2 calls. That seems like too much. Set |
421 | * NO_ESTIMATE on this source until we can better analyze | | 416 | * NO_ESTIMATE on this source until we can better analyze |
422 | * the entropy of its output. | | 417 | * the entropy of its output. |
423 | */ | | 418 | */ |
424 | if (__predict_false(!live)) { | | 419 | if (__predict_false(!live)) { |
425 | rnd_attach_source(&skewsrc, "callout", RND_TYPE_SKEW, | | 420 | rnd_attach_source(&skewsrc, "callout", RND_TYPE_SKEW, |
426 | RND_FLAG_NO_ESTIMATE); | | 421 | RND_FLAG_NO_ESTIMATE); |
427 | live = 1; | | 422 | live = 1; |
428 | } | | 423 | } |
429 | | | 424 | |
430 | flipflop = !flipflop; | | 425 | flipflop = !flipflop; |
431 | | | 426 | |
432 | if (flipflop) { | | 427 | if (flipflop) { |
433 | rnd_add_uint32(&skewsrc, rnd_counter()); | | 428 | rnd_add_uint32(&skewsrc, rnd_counter()); |
434 | callout_schedule(&skew_callout, hz); | | 429 | callout_schedule(&skew_callout, hz); |
435 | } else { | | 430 | } else { |
436 | callout_schedule(&skew_callout, 1); | | 431 | callout_schedule(&skew_callout, 1); |
437 | } | | 432 | } |
438 | } | | 433 | } |
439 | #endif | | 434 | #endif |
440 | | | 435 | |
441 | /* | | 436 | /* |
442 | * initialize the global random pool for our use. | | 437 | * initialize the global random pool for our use. |
443 | * rnd_init() must be called very early on in the boot process, so | | 438 | * rnd_init() must be called very early on in the boot process, so |
444 | * the pool is ready for other devices to attach as sources. | | 439 | * the pool is ready for other devices to attach as sources. |
445 | */ | | 440 | */ |
446 | void | | 441 | void |
447 | rnd_init(void) | | 442 | rnd_init(void) |
448 | { | | 443 | { |
449 | u_int32_t c; | | 444 | u_int32_t c; |
450 | | | 445 | |
451 | if (rnd_ready) | | 446 | if (rnd_ready) |
452 | return; | | 447 | return; |
453 | | | 448 | |
454 | mutex_init(&rnd_mtx, MUTEX_DEFAULT, IPL_VM); | | 449 | mutex_init(&rnd_mtx, MUTEX_DEFAULT, IPL_VM); |
455 | mutex_init(&rndsink_mtx, MUTEX_DEFAULT, IPL_VM); | | 450 | mutex_init(&rndsink_mtx, MUTEX_DEFAULT, IPL_VM); |
456 | mutex_init(&rndsoft_mtx, MUTEX_DEFAULT, IPL_VM); | | | |
457 | | | 451 | |
458 | /* | | 452 | /* |
459 | * take a counter early, hoping that there's some variance in | | 453 | * take a counter early, hoping that there's some variance in |
460 | * the following operations | | 454 | * the following operations |
461 | */ | | 455 | */ |
462 | c = rnd_counter(); | | 456 | c = rnd_counter(); |
463 | | | 457 | |
464 | LIST_INIT(&rnd_sources); | | 458 | LIST_INIT(&rnd_sources); |
465 | SIMPLEQ_INIT(&rnd_samples); | | 459 | SIMPLEQ_INIT(&rnd_samples); |
466 | TAILQ_INIT(&rnd_sinks); | | 460 | TAILQ_INIT(&rnd_sinks); |
467 | | | 461 | |
468 | rndpool_init(&rnd_pool); | | 462 | rndpool_init(&rnd_pool); |
469 | mutex_init(&rndpool_mtx, MUTEX_DEFAULT, IPL_VM); | | 463 | mutex_init(&rndpool_mtx, MUTEX_DEFAULT, IPL_VM); |
470 | cv_init(&rndpool_cv, "rndread"); | | 464 | cv_init(&rndpool_cv, "rndread"); |
471 | | | 465 | |
472 | rnd_mempc = pool_cache_init(sizeof(rnd_sample_t), 0, 0, 0, | | 466 | rnd_mempc = pool_cache_init(sizeof(rnd_sample_t), 0, 0, 0, |
473 | "rndsample", NULL, IPL_VM, | | 467 | "rndsample", NULL, IPL_VM, |
474 | NULL, NULL, NULL); | | 468 | NULL, NULL, NULL); |
475 | | | 469 | |
476 | /* | | 470 | /* |
477 | * Set resource limit. The rnd_process_events() function | | 471 | * Set resource limit. The rnd_process_events() function |
478 | * is called every tick and process the sample queue. | | 472 | * is called every tick and process the sample queue. |
479 | * Without limitation, if a lot of rnd_add_*() are called, | | 473 | * Without limitation, if a lot of rnd_add_*() are called, |
480 | * all kernel memory may be eaten up. | | 474 | * all kernel memory may be eaten up. |
481 | */ | | 475 | */ |
482 | pool_cache_sethardlimit(rnd_mempc, RND_POOLBITS, NULL, 0); | | 476 | pool_cache_sethardlimit(rnd_mempc, RND_POOLBITS, NULL, 0); |
483 | | | 477 | |
484 | /* | | 478 | /* |
485 | * Mix *something*, *anything* into the pool to help it get started. | | 479 | * Mix *something*, *anything* into the pool to help it get started. |
486 | * However, it's not safe for rnd_counter() to call microtime() yet, | | 480 | * However, it's not safe for rnd_counter() to call microtime() yet, |
487 | * so on some platforms we might just end up with zeros anyway. | | 481 | * so on some platforms we might just end up with zeros anyway. |
488 | * XXX more things to add would be nice. | | 482 | * XXX more things to add would be nice. |
489 | */ | | 483 | */ |
490 | if (c) { | | 484 | if (c) { |
491 | mutex_spin_enter(&rndpool_mtx); | | 485 | mutex_spin_enter(&rndpool_mtx); |
492 | rndpool_add_data(&rnd_pool, &c, sizeof(c), 1); | | 486 | rndpool_add_data(&rnd_pool, &c, sizeof(c), 1); |
493 | c = rnd_counter(); | | 487 | c = rnd_counter(); |
494 | rndpool_add_data(&rnd_pool, &c, sizeof(c), 1); | | 488 | rndpool_add_data(&rnd_pool, &c, sizeof(c), 1); |
495 | mutex_spin_exit(&rndpool_mtx); | | 489 | mutex_spin_exit(&rndpool_mtx); |
496 | } | | 490 | } |
497 | | | 491 | |
498 | rnd_ready = 1; | | 492 | rnd_ready = 1; |
499 | | | 493 | |
500 | /* | | 494 | /* |
501 | * If we have a cycle counter, take its error with respect | | 495 | * If we have a cycle counter, take its error with respect |
502 | * to the callout mechanism as a source of entropy, ala | | 496 | * to the callout mechanism as a source of entropy, ala |
503 | * TrueRand. | | 497 | * TrueRand. |
504 | * | | 498 | * |
505 | * XXX This will do little when the cycle counter *is* what's | | 499 | * XXX This will do little when the cycle counter *is* what's |
506 | * XXX clocking the callout mechanism. How to get this right | | 500 | * XXX clocking the callout mechanism. How to get this right |
507 | * XXX without unsightly spelunking in the timecounter code? | | 501 | * XXX without unsightly spelunking in the timecounter code? |
508 | */ | | 502 | */ |
509 | #if defined(__HAVE_CPU_COUNTER) && !defined(_RUMPKERNEL) /* XXX: bad pooka */ | | 503 | #if defined(__HAVE_CPU_COUNTER) && !defined(_RUMPKERNEL) /* XXX: bad pooka */ |
510 | callout_init(&skew_callout, CALLOUT_MPSAFE); | | 504 | callout_init(&skew_callout, CALLOUT_MPSAFE); |
511 | callout_setfunc(&skew_callout, rnd_skew, NULL); | | 505 | callout_setfunc(&skew_callout, rnd_skew, NULL); |
512 | rnd_skew(NULL); | | 506 | rnd_skew(NULL); |
513 | #endif | | 507 | #endif |
514 | | | 508 | |
515 | #ifdef RND_VERBOSE | | 509 | #ifdef RND_VERBOSE |
516 | printf("rnd: initialised (%u)%s", RND_POOLBITS, | | 510 | printf("rnd: initialised (%u)%s", RND_POOLBITS, |
517 | c ? " with counter\n" : "\n"); | | 511 | c ? " with counter\n" : "\n"); |
518 | #endif | | 512 | #endif |
519 | if (boot_rsp != NULL) { | | 513 | if (boot_rsp != NULL) { |
520 | mutex_spin_enter(&rndpool_mtx); | | 514 | mutex_spin_enter(&rndpool_mtx); |
521 | rndpool_add_data(&rnd_pool, boot_rsp->data, | | 515 | rndpool_add_data(&rnd_pool, boot_rsp->data, |
522 | sizeof(boot_rsp->data), | | 516 | sizeof(boot_rsp->data), |
523 | MIN(boot_rsp->entropy, | | 517 | MIN(boot_rsp->entropy, |
524 | RND_POOLBITS / 2)); | | 518 | RND_POOLBITS / 2)); |
525 | if (rndpool_get_entropy_count(&rnd_pool) > | | 519 | if (rndpool_get_entropy_count(&rnd_pool) > |
526 | RND_ENTROPY_THRESHOLD * 8) { | | 520 | RND_ENTROPY_THRESHOLD * 8) { |
527 | rnd_initial_entropy = 1; | | 521 | rnd_initial_entropy = 1; |
528 | } | | 522 | } |
529 | mutex_spin_exit(&rndpool_mtx); | | 523 | mutex_spin_exit(&rndpool_mtx); |
530 | #ifdef RND_VERBOSE | | 524 | #ifdef RND_VERBOSE |
531 | printf("rnd: seeded with %d bits\n", | | 525 | printf("rnd: seeded with %d bits\n", |
532 | MIN(boot_rsp->entropy, RND_POOLBITS / 2)); | | 526 | MIN(boot_rsp->entropy, RND_POOLBITS / 2)); |
533 | #endif | | 527 | #endif |
534 | memset(boot_rsp, 0, sizeof(*boot_rsp)); | | 528 | memset(boot_rsp, 0, sizeof(*boot_rsp)); |
535 | } | | 529 | } |
536 | } | | 530 | } |
537 | | | 531 | |
538 | static rnd_sample_t * | | 532 | static rnd_sample_t * |
539 | rnd_sample_allocate(krndsource_t *source) | | 533 | rnd_sample_allocate(krndsource_t *source) |
540 | { | | 534 | { |
541 | rnd_sample_t *c; | | 535 | rnd_sample_t *c; |
542 | | | 536 | |
543 | c = pool_cache_get(rnd_mempc, PR_WAITOK); | | 537 | c = pool_cache_get(rnd_mempc, PR_WAITOK); |
544 | if (c == NULL) | | 538 | if (c == NULL) |
545 | return (NULL); | | 539 | return (NULL); |
546 | | | 540 | |
547 | c->source = source; | | 541 | c->source = source; |
548 | c->cursor = 0; | | 542 | c->cursor = 0; |
549 | c->entropy = 0; | | 543 | c->entropy = 0; |
550 | | | 544 | |
551 | return (c); | | 545 | return (c); |
552 | } | | 546 | } |
553 | | | 547 | |
554 | /* | | 548 | /* |
555 | * Don't wait on allocation. To be used in an interrupt context. | | 549 | * Don't wait on allocation. To be used in an interrupt context. |
556 | */ | | 550 | */ |
557 | static rnd_sample_t * | | 551 | static rnd_sample_t * |
558 | rnd_sample_allocate_isr(krndsource_t *source) | | 552 | rnd_sample_allocate_isr(krndsource_t *source) |
559 | { | | 553 | { |
560 | rnd_sample_t *c; | | 554 | rnd_sample_t *c; |
561 | | | 555 | |
562 | c = pool_cache_get(rnd_mempc, PR_NOWAIT); | | 556 | c = pool_cache_get(rnd_mempc, PR_NOWAIT); |
563 | if (c == NULL) | | 557 | if (c == NULL) |
564 | return (NULL); | | 558 | return (NULL); |
565 | | | 559 | |
566 | c->source = source; | | 560 | c->source = source; |
567 | c->cursor = 0; | | 561 | c->cursor = 0; |
568 | c->entropy = 0; | | 562 | c->entropy = 0; |
569 | | | 563 | |
570 | return (c); | | 564 | return (c); |
571 | } | | 565 | } |
572 | | | 566 | |
573 | static void | | 567 | static void |
574 | rnd_sample_free(rnd_sample_t *c) | | 568 | rnd_sample_free(rnd_sample_t *c) |
575 | { | | 569 | { |
576 | memset(c, 0, sizeof(*c)); | | 570 | memset(c, 0, sizeof(*c)); |
577 | pool_cache_put(rnd_mempc, c); | | 571 | pool_cache_put(rnd_mempc, c); |
578 | } | | 572 | } |
579 | | | 573 | |
580 | /* | | 574 | /* |
581 | * Add a source to our list of sources. | | 575 | * Add a source to our list of sources. |
582 | */ | | 576 | */ |
583 | void | | 577 | void |
584 | rnd_attach_source(krndsource_t *rs, const char *name, u_int32_t type, | | 578 | rnd_attach_source(krndsource_t *rs, const char *name, u_int32_t type, |
585 | u_int32_t flags) | | 579 | u_int32_t flags) |
586 | { | | 580 | { |
587 | u_int32_t ts; | | 581 | u_int32_t ts; |
588 | | | 582 | |
589 | ts = rnd_counter(); | | 583 | ts = rnd_counter(); |
590 | | | 584 | |
591 | strlcpy(rs->name, name, sizeof(rs->name)); | | 585 | strlcpy(rs->name, name, sizeof(rs->name)); |
592 | rs->last_time = ts; | | 586 | rs->last_time = ts; |
593 | rs->last_delta = 0; | | 587 | rs->last_delta = 0; |
594 | rs->last_delta2 = 0; | | 588 | rs->last_delta2 = 0; |
595 | rs->total = 0; | | 589 | rs->total = 0; |
596 | | | 590 | |
597 | /* | | 591 | /* |
598 | * Some source setup, by type | | 592 | * Some source setup, by type |
599 | */ | | 593 | */ |
600 | rs->test = NULL; | | 594 | rs->test = NULL; |
601 | rs->test_cnt = -1; | | 595 | rs->test_cnt = -1; |
602 | | | 596 | |
603 | switch (type) { | | 597 | switch (type) { |
604 | case RND_TYPE_NET: /* Don't collect by default */ | | 598 | case RND_TYPE_NET: /* Don't collect by default */ |
605 | flags |= (RND_FLAG_NO_COLLECT | RND_FLAG_NO_ESTIMATE); | | 599 | flags |= (RND_FLAG_NO_COLLECT | RND_FLAG_NO_ESTIMATE); |
606 | break; | | 600 | break; |
607 | case RND_TYPE_RNG: /* Space for statistical testing */ | | 601 | case RND_TYPE_RNG: /* Space for statistical testing */ |
608 | rs->test = kmem_alloc(sizeof(rngtest_t), KM_NOSLEEP); | | 602 | rs->test = kmem_alloc(sizeof(rngtest_t), KM_NOSLEEP); |
609 | rs->test_cnt = 0; | | 603 | rs->test_cnt = 0; |
610 | /* FALLTHRU */ | | 604 | /* FALLTHRU */ |
611 | case RND_TYPE_VM: /* Process samples in bulk always */ | | 605 | case RND_TYPE_VM: /* Process samples in bulk always */ |
612 | flags |= RND_FLAG_FAST; | | 606 | flags |= RND_FLAG_FAST; |
613 | break; | | 607 | break; |
614 | default: | | 608 | default: |
615 | break; | | 609 | break; |
616 | } | | 610 | } |
617 | | | 611 | |
618 | rs->type = type; | | 612 | rs->type = type; |
619 | rs->flags = flags; | | 613 | rs->flags = flags; |
620 | | | 614 | |
621 | rs->state = rnd_sample_allocate(rs); | | 615 | rs->state = rnd_sample_allocate(rs); |
622 | | | 616 | |
623 | mutex_spin_enter(&rndpool_mtx); | | 617 | mutex_spin_enter(&rndpool_mtx); |
624 | LIST_INSERT_HEAD(&rnd_sources, rs, list); | | 618 | LIST_INSERT_HEAD(&rnd_sources, rs, list); |
625 | | | 619 | |
626 | #ifdef RND_VERBOSE | | 620 | #ifdef RND_VERBOSE |
627 | printf("rnd: %s attached as an entropy source (", rs->name); | | 621 | printf("rnd: %s attached as an entropy source (", rs->name); |
628 | if (!(flags & RND_FLAG_NO_COLLECT)) { | | 622 | if (!(flags & RND_FLAG_NO_COLLECT)) { |
629 | printf("collecting"); | | 623 | printf("collecting"); |
630 | if (flags & RND_FLAG_NO_ESTIMATE) | | 624 | if (flags & RND_FLAG_NO_ESTIMATE) |
631 | printf(" without estimation"); | | 625 | printf(" without estimation"); |
632 | } | | 626 | } |
633 | else | | 627 | else |
634 | printf("off"); | | 628 | printf("off"); |
635 | printf(")\n"); | | 629 | printf(")\n"); |
636 | #endif | | 630 | #endif |
637 | | | 631 | |
638 | /* | | 632 | /* |
639 | * Again, put some more initial junk in the pool. | | 633 | * Again, put some more initial junk in the pool. |
640 | * XXX Bogus, but harder to guess than zeros. | | 634 | * XXX Bogus, but harder to guess than zeros. |
641 | */ | | 635 | */ |
642 | rndpool_add_data(&rnd_pool, &ts, sizeof(u_int32_t), 1); | | 636 | rndpool_add_data(&rnd_pool, &ts, sizeof(u_int32_t), 1); |
643 | mutex_spin_exit(&rndpool_mtx); | | 637 | mutex_spin_exit(&rndpool_mtx); |
644 | } | | 638 | } |
645 | | | 639 | |
646 | /* | | 640 | /* |
647 | * Remove a source from our list of sources. | | 641 | * Remove a source from our list of sources. |
648 | */ | | 642 | */ |
649 | void | | 643 | void |
650 | rnd_detach_source(krndsource_t *source) | | 644 | rnd_detach_source(krndsource_t *source) |
651 | { | | 645 | { |
652 | rnd_sample_t *sample; | | 646 | rnd_sample_t *sample; |
653 | | | 647 | |
654 | mutex_spin_enter(&rnd_mtx); | | 648 | mutex_spin_enter(&rnd_mtx); |
655 | | | 649 | |
656 | LIST_REMOVE(source, list); | | 650 | LIST_REMOVE(source, list); |
657 | | | 651 | |
658 | /* | | 652 | /* |
659 | * If there are samples queued up "remove" them from the sample queue | | 653 | * If there are samples queued up "remove" them from the sample queue |
660 | * by setting the source to the no-collect pseudosource. | | 654 | * by setting the source to the no-collect pseudosource. |
661 | */ | | 655 | */ |
662 | sample = SIMPLEQ_FIRST(&rnd_samples); | | 656 | sample = SIMPLEQ_FIRST(&rnd_samples); |
663 | while (sample != NULL) { | | 657 | while (sample != NULL) { |
664 | if (sample->source == source) | | 658 | if (sample->source == source) |
665 | sample->source = &rnd_source_no_collect; | | 659 | sample->source = &rnd_source_no_collect; |
666 | | | 660 | |
667 | sample = SIMPLEQ_NEXT(sample, next); | | 661 | sample = SIMPLEQ_NEXT(sample, next); |
668 | } | | 662 | } |
669 | | | 663 | |
670 | mutex_spin_exit(&rnd_mtx); | | 664 | mutex_spin_exit(&rnd_mtx); |
671 | | | 665 | |
672 | if (!cpu_softintr_p()) { /* XXX XXX very temporary "fix" */ | | 666 | if (!cpu_softintr_p()) { /* XXX XXX very temporary "fix" */ |
673 | if (source->state) { | | 667 | if (source->state) { |
674 | rnd_sample_free(source->state); | | 668 | rnd_sample_free(source->state); |
675 | source->state = NULL; | | 669 | source->state = NULL; |
676 | } | | 670 | } |
677 | | | 671 | |
678 | if (source->test) { | | 672 | if (source->test) { |
679 | kmem_free(source->test, sizeof(rngtest_t)); | | 673 | kmem_free(source->test, sizeof(rngtest_t)); |
680 | } | | 674 | } |
681 | } | | 675 | } |
682 | | | 676 | |
683 | #ifdef RND_VERBOSE | | 677 | #ifdef RND_VERBOSE |
684 | printf("rnd: %s detached as an entropy source\n", source->name); | | 678 | printf("rnd: %s detached as an entropy source\n", source->name); |
685 | #endif | | 679 | #endif |
686 | } | | 680 | } |
687 | | | 681 | |
688 | /* | | 682 | /* |
689 | * Add a 32-bit value to the entropy pool. The rs parameter should point to | | 683 | * Add a 32-bit value to the entropy pool. The rs parameter should point to |
690 | * the source-specific source structure. | | 684 | * the source-specific source structure. |
691 | */ | | 685 | */ |
692 | void | | 686 | void |
693 | _rnd_add_uint32(krndsource_t *rs, u_int32_t val) | | 687 | _rnd_add_uint32(krndsource_t *rs, u_int32_t val) |
694 | { | | 688 | { |
695 | u_int32_t ts; | | 689 | u_int32_t ts; |
696 | u_int32_t entropy = 0; | | 690 | u_int32_t entropy = 0; |
697 | | | 691 | |
698 | if (rs->flags & RND_FLAG_NO_COLLECT) | | 692 | if (rs->flags & RND_FLAG_NO_COLLECT) |
699 | return; | | 693 | return; |
700 | | | 694 | |
701 | /* | | 695 | /* |
702 | * Sample the counter as soon as possible to avoid | | 696 | * Sample the counter as soon as possible to avoid |
703 | * entropy overestimation. | | 697 | * entropy overestimation. |
704 | */ | | 698 | */ |
705 | ts = rnd_counter(); | | 699 | ts = rnd_counter(); |
706 | | | 700 | |
707 | /* | | 701 | /* |
708 | * If we are estimating entropy on this source, | | 702 | * If we are estimating entropy on this source, |
709 | * calculate differentials. | | 703 | * calculate differentials. |
710 | */ | | 704 | */ |
711 | | | 705 | |
712 | if ((rs->flags & RND_FLAG_NO_ESTIMATE) == 0) { | | 706 | if ((rs->flags & RND_FLAG_NO_ESTIMATE) == 0) { |
713 | entropy = rnd_estimate_entropy(rs, ts); | | 707 | entropy = rnd_estimate_entropy(rs, ts); |
714 | } | | 708 | } |
715 | | | 709 | |
716 | rnd_add_data_ts(rs, &val, sizeof(val), entropy, ts); | | 710 | rnd_add_data_ts(rs, &val, sizeof(val), entropy, ts); |
717 | } | | 711 | } |
718 | | | 712 | |
719 | void | | 713 | void |
720 | rnd_add_data(krndsource_t *rs, const void *const data, uint32_t len, | | 714 | rnd_add_data(krndsource_t *rs, const void *const data, uint32_t len, |
721 | uint32_t entropy) | | 715 | uint32_t entropy) |
722 | { | | 716 | { |
723 | /* | | 717 | /* |
724 | * This interface is meant for feeding data which is, | | 718 | * This interface is meant for feeding data which is, |
725 | * itself, random. Don't estimate entropy based on | | 719 | * itself, random. Don't estimate entropy based on |
726 | * timestamp, just directly add the data. | | 720 | * timestamp, just directly add the data. |
727 | */ | | 721 | */ |
728 | rnd_add_data_ts(rs, data, len, entropy, rnd_counter()); | | 722 | rnd_add_data_ts(rs, data, len, entropy, rnd_counter()); |
729 | } | | 723 | } |
730 | | | 724 | |
731 | static void | | 725 | static void |
732 | rnd_add_data_ts(krndsource_t *rs, const void *const data, u_int32_t len, | | 726 | rnd_add_data_ts(krndsource_t *rs, const void *const data, u_int32_t len, |
733 | u_int32_t entropy, uint32_t ts) | | 727 | u_int32_t entropy, uint32_t ts) |
734 | { | | 728 | { |
735 | rnd_sample_t *state = NULL; | | 729 | rnd_sample_t *state = NULL; |
736 | const uint32_t *dint = data; | | 730 | const uint32_t *dint = data; |
737 | int todo, done, filled = 0; | | 731 | int todo, done, filled = 0; |
738 | int sample_count; | | 732 | int sample_count; |
739 | SIMPLEQ_HEAD(, _rnd_sample_t) tmp_samples = | | 733 | SIMPLEQ_HEAD(, _rnd_sample_t) tmp_samples = |
740 | SIMPLEQ_HEAD_INITIALIZER(tmp_samples); | | 734 | SIMPLEQ_HEAD_INITIALIZER(tmp_samples); |
741 | | | 735 | |
742 | if (rs->flags & RND_FLAG_NO_COLLECT) { | | 736 | if (rs->flags & RND_FLAG_NO_COLLECT) { |
743 | return; | | 737 | return; |
744 | } | | 738 | } |
745 | | | 739 | |
746 | todo = len / sizeof(*dint); | | 740 | todo = len / sizeof(*dint); |
747 | /* | | 741 | /* |
748 | * Let's try to be efficient: if we are warm, and a source | | 742 | * Let's try to be efficient: if we are warm, and a source |
749 | * is adding entropy at a rate of at least 1 bit every 10 seconds, | | 743 | * is adding entropy at a rate of at least 1 bit every 10 seconds, |
750 | * mark it as "fast" and add its samples in bulk. | | 744 | * mark it as "fast" and add its samples in bulk. |
751 | */ | | 745 | */ |
752 | if (__predict_true(rs->flags & RND_FLAG_FAST)) { | | 746 | if (__predict_true(rs->flags & RND_FLAG_FAST)) { |
753 | sample_count = RND_SAMPLE_COUNT; | | 747 | sample_count = RND_SAMPLE_COUNT; |
754 | } else { | | 748 | } else { |
755 | if (!cold && rnd_initial_entropy) { | | 749 | if (!cold && rnd_initial_entropy) { |
756 | struct timeval upt; | | 750 | struct timeval upt; |
757 | | | 751 | |
758 | getmicrouptime(&upt); | | 752 | getmicrouptime(&upt); |
759 | if ((todo >= RND_SAMPLE_COUNT) || | | 753 | if ((todo >= RND_SAMPLE_COUNT) || |
760 | (rs->total > upt.tv_sec * 10) || | | 754 | (rs->total > upt.tv_sec * 10) || |
761 | (upt.tv_sec > 10 && rs->total > upt.tv_sec) || | | 755 | (upt.tv_sec > 10 && rs->total > upt.tv_sec) || |
762 | (upt.tv_sec > 100 && | | 756 | (upt.tv_sec > 100 && |
763 | rs->total > upt.tv_sec / 10)) { | | 757 | rs->total > upt.tv_sec / 10)) { |
764 | #ifdef RND_VERBOSE | | 758 | #ifdef RND_VERBOSE |
765 | printf("rnd: source %s is fast (%d samples " | | 759 | printf("rnd: source %s is fast (%d samples " |
766 | "at once, %d bits in %lld seconds), " | | 760 | "at once, %d bits in %lld seconds), " |
767 | "processing samples in bulk.\n", | | 761 | "processing samples in bulk.\n", |
768 | rs->name, todo, rs->total, | | 762 | rs->name, todo, rs->total, |
769 | (long long int)upt.tv_sec); | | 763 | (long long int)upt.tv_sec); |
770 | #endif | | 764 | #endif |
771 | rs->flags |= RND_FLAG_FAST; | | 765 | rs->flags |= RND_FLAG_FAST; |
772 | } | | 766 | } |
773 | } | | 767 | } |
774 | sample_count = 2; | | 768 | sample_count = 2; |
775 | } | | 769 | } |
776 | | | 770 | |
777 | /* | | 771 | /* |
778 | * Loop over data packaging it into sample buffers. | | 772 | * Loop over data packaging it into sample buffers. |
779 | * If a sample buffer allocation fails, drop all data. | | 773 | * If a sample buffer allocation fails, drop all data. |
780 | */ | | 774 | */ |
781 | for (done = 0; done < todo ; done++) { | | 775 | for (done = 0; done < todo ; done++) { |
782 | state = rs->state; | | 776 | state = rs->state; |
783 | if (state == NULL) { | | 777 | if (state == NULL) { |
784 | state = rnd_sample_allocate_isr(rs); | | 778 | state = rnd_sample_allocate_isr(rs); |
785 | if (__predict_false(state == NULL)) { | | 779 | if (__predict_false(state == NULL)) { |
786 | break; | | 780 | break; |
787 | } | | 781 | } |
788 | rs->state = state; | | 782 | rs->state = state; |
789 | } | | 783 | } |
790 | | | 784 | |
791 | state->ts[state->cursor] = ts; | | 785 | state->ts[state->cursor] = ts; |
792 | state->values[state->cursor] = dint[done]; | | 786 | state->values[state->cursor] = dint[done]; |
793 | state->cursor++; | | 787 | state->cursor++; |
794 | | | 788 | |
795 | if (state->cursor == sample_count) { | | 789 | if (state->cursor == sample_count) { |
796 | SIMPLEQ_INSERT_HEAD(&tmp_samples, state, next); | | 790 | SIMPLEQ_INSERT_HEAD(&tmp_samples, state, next); |
797 | filled++; | | 791 | filled++; |
798 | rs->state = NULL; | | 792 | rs->state = NULL; |
799 | } | | 793 | } |
800 | } | | 794 | } |
801 | | | 795 | |
802 | if (__predict_false(state == NULL)) { | | 796 | if (__predict_false(state == NULL)) { |
803 | while ((state = SIMPLEQ_FIRST(&tmp_samples))) { | | 797 | while ((state = SIMPLEQ_FIRST(&tmp_samples))) { |
804 | SIMPLEQ_REMOVE_HEAD(&tmp_samples, next); | | 798 | SIMPLEQ_REMOVE_HEAD(&tmp_samples, next); |
805 | rnd_sample_free(state); | | 799 | rnd_sample_free(state); |
806 | } | | 800 | } |
807 | return; | | 801 | return; |
808 | } | | 802 | } |
809 | | | 803 | |
810 | /* | | 804 | /* |
811 | * Claim all the entropy on the last one we send to | | 805 | * Claim all the entropy on the last one we send to |
812 | * the pool, so we don't rely on it being evenly distributed | | 806 | * the pool, so we don't rely on it being evenly distributed |
813 | * in the supplied data. | | 807 | * in the supplied data. |
814 | * | | 808 | * |
815 | * XXX The rndpool code must accept samples with more | | 809 | * XXX The rndpool code must accept samples with more |
816 | * XXX claimed entropy than bits for this to work right. | | 810 | * XXX claimed entropy than bits for this to work right. |
817 | */ | | 811 | */ |
818 | state->entropy += entropy; | | 812 | state->entropy += entropy; |
819 | rs->total += entropy; | | 813 | rs->total += entropy; |
820 | | | 814 | |
821 | /* | | 815 | /* |
822 | * If we didn't finish any sample buffers, we're done. | | 816 | * If we didn't finish any sample buffers, we're done. |
823 | */ | | 817 | */ |
824 | if (!filled) { | | 818 | if (!filled) { |
825 | return; | | 819 | return; |
826 | } | | 820 | } |
827 | | | 821 | |
828 | mutex_spin_enter(&rnd_mtx); | | 822 | mutex_spin_enter(&rnd_mtx); |
829 | while ((state = SIMPLEQ_FIRST(&tmp_samples))) { | | 823 | while ((state = SIMPLEQ_FIRST(&tmp_samples))) { |
830 | SIMPLEQ_REMOVE_HEAD(&tmp_samples, next); | | 824 | SIMPLEQ_REMOVE_HEAD(&tmp_samples, next); |
831 | SIMPLEQ_INSERT_HEAD(&rnd_samples, state, next); | | 825 | SIMPLEQ_INSERT_HEAD(&rnd_samples, state, next); |
832 | } | | 826 | } |
833 | mutex_spin_exit(&rnd_mtx); | | 827 | mutex_spin_exit(&rnd_mtx); |
834 | | | 828 | |
835 | /* Cause processing of queued samples */ | | 829 | /* Cause processing of queued samples */ |
836 | rnd_schedule_process(); | | 830 | rnd_schedule_process(); |
837 | } | | 831 | } |
838 | | | 832 | |
839 | static int | | 833 | static int |
840 | rnd_hwrng_test(rnd_sample_t *sample) | | 834 | rnd_hwrng_test(rnd_sample_t *sample) |
841 | { | | 835 | { |
842 | krndsource_t *source = sample->source; | | 836 | krndsource_t *source = sample->source; |
843 | size_t cmplen; | | 837 | size_t cmplen; |
844 | uint8_t *v1, *v2; | | 838 | uint8_t *v1, *v2; |
845 | size_t resid, totest; | | 839 | size_t resid, totest; |
846 | | | 840 | |
847 | KASSERT(source->type == RND_TYPE_RNG); | | 841 | KASSERT(source->type == RND_TYPE_RNG); |
848 | | | 842 | |
849 | /* | | 843 | /* |
850 | * Continuous-output test: compare two halves of the | | 844 | * Continuous-output test: compare two halves of the |
851 | * sample buffer to each other. The sample buffer (64 ints, | | 845 | * sample buffer to each other. The sample buffer (64 ints, |
852 | * so either 256 or 512 bytes on any modern machine) should be | | 846 | * so either 256 or 512 bytes on any modern machine) should be |
853 | * much larger than a typical hardware RNG output, so this seems | | 847 | * much larger than a typical hardware RNG output, so this seems |
854 | * a reasonable way to do it without retaining extra data. | | 848 | * a reasonable way to do it without retaining extra data. |
855 | */ | | 849 | */ |
856 | cmplen = sizeof(sample->values) / 2; | | 850 | cmplen = sizeof(sample->values) / 2; |
857 | v1 = (uint8_t *)sample->values; | | 851 | v1 = (uint8_t *)sample->values; |
858 | v2 = (uint8_t *)sample->values + cmplen; | | 852 | v2 = (uint8_t *)sample->values + cmplen; |
859 | | | 853 | |
860 | if (__predict_false(!memcmp(v1, v2, cmplen))) { | | 854 | if (__predict_false(!memcmp(v1, v2, cmplen))) { |
861 | printf("rnd: source \"%s\" failed continuous-output test.\n", | | 855 | printf("rnd: source \"%s\" failed continuous-output test.\n", |
862 | source->name); | | 856 | source->name); |
863 | return 1; | | 857 | return 1; |
864 | } | | 858 | } |
865 | | | 859 | |
866 | /* | | 860 | /* |
867 | * FIPS 140 statistical RNG test. We must accumulate 20,000 bits. | | 861 | * FIPS 140 statistical RNG test. We must accumulate 20,000 bits. |
868 | */ | | 862 | */ |
869 | if (__predict_true(source->test_cnt == -1)) { | | 863 | if (__predict_true(source->test_cnt == -1)) { |
870 | /* already passed the test */ | | 864 | /* already passed the test */ |
871 | return 0; | | 865 | return 0; |
872 | } | | 866 | } |
873 | resid = FIPS140_RNG_TEST_BYTES - source->test_cnt; | | 867 | resid = FIPS140_RNG_TEST_BYTES - source->test_cnt; |
874 | totest = MIN(RND_SAMPLE_COUNT * 4, resid); | | 868 | totest = MIN(RND_SAMPLE_COUNT * 4, resid); |
875 | memcpy(source->test->rt_b + source->test_cnt, sample->values, totest); | | 869 | memcpy(source->test->rt_b + source->test_cnt, sample->values, totest); |
876 | resid -= totest; | | 870 | resid -= totest; |
877 | source->test_cnt += totest; | | 871 | source->test_cnt += totest; |
878 | if (resid == 0) { | | 872 | if (resid == 0) { |
879 | strlcpy(source->test->rt_name, source->name, | | 873 | strlcpy(source->test->rt_name, source->name, |
880 | sizeof(source->test->rt_name)); | | 874 | sizeof(source->test->rt_name)); |
881 | if (rngtest(source->test)) { | | 875 | if (rngtest(source->test)) { |
882 | printf("rnd: source \"%s\" failed statistical test.", | | 876 | printf("rnd: source \"%s\" failed statistical test.", |
883 | source->name); | | 877 | source->name); |
884 | return 1; | | 878 | return 1; |
885 | } | | 879 | } |
886 | source->test_cnt = -1; | | 880 | source->test_cnt = -1; |
887 | memset(source->test, 0, sizeof(*source->test)); | | 881 | memset(source->test, 0, sizeof(*source->test)); |
888 | } | | 882 | } |
889 | return 0; | | 883 | return 0; |
890 | } | | 884 | } |
891 | | | 885 | |
892 | /* | | 886 | /* |
893 | * Process the events in the ring buffer. Called by rnd_timeout or | | 887 | * Process the events in the ring buffer. Called by rnd_timeout or |
894 | * by the add routines directly if the callout has never fired (that | | 888 | * by the add routines directly if the callout has never fired (that |
895 | * is, if we are "cold" -- just booted). | | 889 | * is, if we are "cold" -- just booted). |
896 | * | | 890 | * |
897 | */ | | 891 | */ |
898 | static void | | 892 | static void |
899 | rnd_process_events(void) | | 893 | rnd_process_events(void) |
900 | { | | 894 | { |
901 | rnd_sample_t *sample = NULL; | | 895 | rnd_sample_t *sample = NULL; |
902 | krndsource_t *source, *badsource = NULL; | | 896 | krndsource_t *source, *badsource = NULL; |
903 | static krndsource_t *last_source; | | 897 | static krndsource_t *last_source; |
904 | u_int32_t entropy; | | 898 | u_int32_t entropy; |
905 | size_t pool_entropy; | | 899 | size_t pool_entropy; |
906 | int found = 0, wake = 0; | | 900 | int found = 0, wake = 0; |
907 | SIMPLEQ_HEAD(, _rnd_sample_t) dq_samples = | | 901 | SIMPLEQ_HEAD(, _rnd_sample_t) dq_samples = |
908 | SIMPLEQ_HEAD_INITIALIZER(dq_samples); | | 902 | SIMPLEQ_HEAD_INITIALIZER(dq_samples); |
909 | SIMPLEQ_HEAD(, _rnd_sample_t) df_samples = | | 903 | SIMPLEQ_HEAD(, _rnd_sample_t) df_samples = |
910 | SIMPLEQ_HEAD_INITIALIZER(df_samples); | | 904 | SIMPLEQ_HEAD_INITIALIZER(df_samples); |
911 | TAILQ_HEAD(, rndsink) sunk = TAILQ_HEAD_INITIALIZER(sunk); | | 905 | TAILQ_HEAD(, rndsink) sunk = TAILQ_HEAD_INITIALIZER(sunk); |
912 | | | 906 | |
913 | /* | | 907 | /* |
914 | * Sample queue is protected by rnd_mtx, drain to onstack queue | | 908 | * Sample queue is protected by rnd_mtx, drain to onstack queue |
915 | * and drop lock. | | 909 | * and drop lock. |
916 | */ | | 910 | */ |
917 | | | 911 | |
918 | mutex_spin_enter(&rnd_mtx); | | 912 | mutex_spin_enter(&rnd_mtx); |
919 | while ((sample = SIMPLEQ_FIRST(&rnd_samples))) { | | 913 | while ((sample = SIMPLEQ_FIRST(&rnd_samples))) { |
920 | found++; | | 914 | found++; |
921 | SIMPLEQ_REMOVE_HEAD(&rnd_samples, next); | | 915 | SIMPLEQ_REMOVE_HEAD(&rnd_samples, next); |
922 | /* | | 916 | /* |
923 | * We repeat this check here, since it is possible | | 917 | * We repeat this check here, since it is possible |
924 | * the source was disabled before we were called, but | | 918 | * the source was disabled before we were called, but |
925 | * after the entry was queued. | | 919 | * after the entry was queued. |
926 | */ | | 920 | */ |
927 | if (__predict_false(sample->source->flags | | 921 | if (__predict_false(sample->source->flags |
928 | & RND_FLAG_NO_COLLECT)) { | | 922 | & RND_FLAG_NO_COLLECT)) { |
929 | SIMPLEQ_INSERT_TAIL(&df_samples, sample, next); | | 923 | SIMPLEQ_INSERT_TAIL(&df_samples, sample, next); |
930 | } else { | | 924 | } else { |
931 | SIMPLEQ_INSERT_TAIL(&dq_samples, sample, next); | | 925 | SIMPLEQ_INSERT_TAIL(&dq_samples, sample, next); |
932 | } | | 926 | } |
933 | } | | 927 | } |
934 | mutex_spin_exit(&rnd_mtx); | | 928 | mutex_spin_exit(&rnd_mtx); |
935 | | | 929 | |
936 | /* Don't thrash the rndpool mtx either. Hold, add all samples. */ | | 930 | /* Don't thrash the rndpool mtx either. Hold, add all samples. */ |
937 | mutex_spin_enter(&rndpool_mtx); | | 931 | mutex_spin_enter(&rndpool_mtx); |
938 | | | 932 | |
939 | pool_entropy = rndpool_get_entropy_count(&rnd_pool); | | 933 | pool_entropy = rndpool_get_entropy_count(&rnd_pool); |
940 | if (pool_entropy > RND_ENTROPY_THRESHOLD * 8) { | | 934 | if (pool_entropy > RND_ENTROPY_THRESHOLD * 8) { |
941 | wake++; | | 935 | wake++; |
942 | } else { | | 936 | } else { |
943 | rnd_empty = 1; | | 937 | rnd_empty = 1; |
944 | rnd_getmore((RND_POOLBITS - pool_entropy) / 8); | | 938 | rnd_getmore((RND_POOLBITS - pool_entropy) / 8); |
945 | #ifdef RND_VERBOSE | | 939 | #ifdef RND_VERBOSE |
946 | printf("rnd: empty, asking for %d bits\n", | | 940 | printf("rnd: empty, asking for %d bits\n", |
947 | (int)((RND_POOLBITS - pool_entropy) / 8)); | | 941 | (int)((RND_POOLBITS - pool_entropy) / 8)); |
948 | #endif | | 942 | #endif |
949 | } | | 943 | } |
950 | | | 944 | |
951 | while ((sample = SIMPLEQ_FIRST(&dq_samples))) { | | 945 | while ((sample = SIMPLEQ_FIRST(&dq_samples))) { |
952 | SIMPLEQ_REMOVE_HEAD(&dq_samples, next); | | 946 | SIMPLEQ_REMOVE_HEAD(&dq_samples, next); |
953 | source = sample->source; | | 947 | source = sample->source; |
954 | entropy = sample->entropy; | | 948 | entropy = sample->entropy; |
955 | | | 949 | |
956 | /* | | 950 | /* |
957 | * Don't provide a side channel for timing attacks on | | 951 | * Don't provide a side channel for timing attacks on |
958 | * low-rate sources: require mixing with some other | | 952 | * low-rate sources: require mixing with some other |
959 | * source before we schedule a wakeup. | | 953 | * source before we schedule a wakeup. |
960 | */ | | 954 | */ |
961 | if (!wake && | | 955 | if (!wake && |
962 | (source != last_source || source->flags & RND_FLAG_FAST)) { | | 956 | (source != last_source || source->flags & RND_FLAG_FAST)) { |
963 | wake++; | | 957 | wake++; |
964 | } | | 958 | } |
965 | last_source = source; | | 959 | last_source = source; |
966 | | | 960 | |
967 | /* | | 961 | /* |
968 | * Hardware generators are great but sometimes they | | 962 | * Hardware generators are great but sometimes they |
969 | * have...hardware issues. Don't use any data from | | 963 | * have...hardware issues. Don't use any data from |
970 | * them unless it passes some tests. | | 964 | * them unless it passes some tests. |
971 | */ | | 965 | */ |
972 | if (source->type == RND_TYPE_RNG) { | | 966 | if (source->type == RND_TYPE_RNG) { |
973 | if (__predict_false(rnd_hwrng_test(sample))) { | | 967 | if (__predict_false(rnd_hwrng_test(sample))) { |
974 | /* | | 968 | /* |
975 | * Detach the bad source. See below. | | 969 | * Detach the bad source. See below. |
976 | */ | | 970 | */ |
977 | badsource = source; | | 971 | badsource = source; |
978 | printf("rnd: detaching source \"%s\".", | | 972 | printf("rnd: detaching source \"%s\".", |
979 | badsource->name); | | 973 | badsource->name); |
980 | break; | | 974 | break; |
981 | } | | 975 | } |
982 | } | | 976 | } |
983 | rndpool_add_data(&rnd_pool, sample->values, | | 977 | rndpool_add_data(&rnd_pool, sample->values, |
984 | RND_SAMPLE_COUNT * 4, 0); | | 978 | RND_SAMPLE_COUNT * 4, 0); |
985 | | | 979 | |
986 | rndpool_add_data(&rnd_pool, sample->ts, | | 980 | rndpool_add_data(&rnd_pool, sample->ts, |
987 | RND_SAMPLE_COUNT * 4, entropy); | | 981 | RND_SAMPLE_COUNT * 4, entropy); |
988 | | | 982 | |
989 | source->total += sample->entropy; | | 983 | source->total += sample->entropy; |
990 | SIMPLEQ_INSERT_TAIL(&df_samples, sample, next); | | 984 | SIMPLEQ_INSERT_TAIL(&df_samples, sample, next); |
991 | } | | 985 | } |
992 | mutex_spin_exit(&rndpool_mtx); | | 986 | mutex_spin_exit(&rndpool_mtx); |
993 | | | 987 | |
994 | /* Now we hold no locks: clean up. */ | | 988 | /* Now we hold no locks: clean up. */ |
995 | if (__predict_false(badsource)) { | | 989 | if (__predict_false(badsource)) { |
996 | /* | | 990 | /* |
997 | * The detach routine frees any samples we have not | | 991 | * The detach routine frees any samples we have not |
998 | * dequeued ourselves. For sanity's sake, we simply | | 992 | * dequeued ourselves. For sanity's sake, we simply |
999 | * free (without using) all dequeued samples from the | | 993 | * free (without using) all dequeued samples from the |
1000 | * point at which we detected a problem onwards. | | 994 | * point at which we detected a problem onwards. |
1001 | */ | | 995 | */ |
1002 | rnd_detach_source(badsource); | | 996 | rnd_detach_source(badsource); |
1003 | while ((sample = SIMPLEQ_FIRST(&dq_samples))) { | | 997 | while ((sample = SIMPLEQ_FIRST(&dq_samples))) { |
1004 | SIMPLEQ_REMOVE_HEAD(&dq_samples, next); | | 998 | SIMPLEQ_REMOVE_HEAD(&dq_samples, next); |
1005 | rnd_sample_free(sample); | | 999 | rnd_sample_free(sample); |
1006 | } | | 1000 | } |
1007 | } | | 1001 | } |
1008 | while ((sample = SIMPLEQ_FIRST(&df_samples))) { | | 1002 | while ((sample = SIMPLEQ_FIRST(&df_samples))) { |
1009 | SIMPLEQ_REMOVE_HEAD(&df_samples, next); | | 1003 | SIMPLEQ_REMOVE_HEAD(&df_samples, next); |
1010 | rnd_sample_free(sample); | | 1004 | rnd_sample_free(sample); |
1011 | } | | 1005 | } |
1012 | | | 1006 | |
1013 | | | 1007 | |
1014 | /* | | 1008 | /* |
1015 | * Wake up any potential readers waiting. | | 1009 | * Wake up any potential readers waiting. |
1016 | */ | | 1010 | */ |
1017 | if (wake) { | | 1011 | if (wake) { |
1018 | rnd_schedule_wakeup(); | | 1012 | rnd_schedule_wakeup(); |
1019 | } | | 1013 | } |
1020 | } | | 1014 | } |
1021 | | | 1015 | |
1022 | static void | | 1016 | static void |
1023 | rnd_intr(void *arg) | | 1017 | rnd_intr(void *arg) |
1024 | { | | 1018 | { |
1025 | rnd_process_events(); | | 1019 | rnd_process_events(); |
1026 | } | | 1020 | } |
1027 | | | 1021 | |
1028 | static void | | 1022 | static void |
1029 | rnd_wake(void *arg) | | 1023 | rnd_wake(void *arg) |
1030 | { | | 1024 | { |
1031 | rnd_wakeup_readers(); | | 1025 | rnd_wakeup_readers(); |
1032 | } | | 1026 | } |
1033 | | | 1027 | |
1034 | u_int32_t | | 1028 | u_int32_t |
1035 | rnd_extract_data_locked(void *p, u_int32_t len, u_int32_t flags) | | 1029 | rnd_extract_data_locked(void *p, u_int32_t len, u_int32_t flags) |
1036 | { | | 1030 | { |
1037 | static int timed_in; | | 1031 | static int timed_in; |
1038 | int entropy_count; | | 1032 | int entropy_count; |
1039 | | | 1033 | |
1040 | KASSERT(mutex_owned(&rndpool_mtx)); | | 1034 | KASSERT(mutex_owned(&rndpool_mtx)); |
1041 | if (__predict_false(!timed_in)) { | | 1035 | if (__predict_false(!timed_in)) { |
1042 | if (boottime.tv_sec) { | | 1036 | if (boottime.tv_sec) { |
1043 | rndpool_add_data(&rnd_pool, &boottime, | | 1037 | rndpool_add_data(&rnd_pool, &boottime, |
1044 | sizeof(boottime), 0); | | 1038 | sizeof(boottime), 0); |
1045 | } | | 1039 | } |
1046 | timed_in++; | | 1040 | timed_in++; |
1047 | } | | 1041 | } |
1048 | if (__predict_false(!rnd_initial_entropy)) { | | 1042 | if (__predict_false(!rnd_initial_entropy)) { |
1049 | u_int32_t c; | | 1043 | u_int32_t c; |
1050 | | | 1044 | |
1051 | #ifdef RND_VERBOSE | | 1045 | #ifdef RND_VERBOSE |
1052 | printf("rnd: WARNING! initial entropy low (%u).\n", | | 1046 | printf("rnd: WARNING! initial entropy low (%u).\n", |
1053 | rndpool_get_entropy_count(&rnd_pool)); | | 1047 | rndpool_get_entropy_count(&rnd_pool)); |
1054 | #endif | | 1048 | #endif |
1055 | /* Try once again to put something in the pool */ | | 1049 | /* Try once again to put something in the pool */ |
1056 | c = rnd_counter(); | | 1050 | c = rnd_counter(); |
1057 | rndpool_add_data(&rnd_pool, &c, sizeof(u_int32_t), 1); | | 1051 | rndpool_add_data(&rnd_pool, &c, sizeof(u_int32_t), 1); |
1058 | } | | 1052 | } |
1059 | | | 1053 | |
1060 | #ifdef DIAGNOSTIC | | 1054 | #ifdef DIAGNOSTIC |
1061 | while (!rnd_tested) { | | 1055 | while (!rnd_tested) { |
1062 | entropy_count = rndpool_get_entropy_count(&rnd_pool); | | 1056 | entropy_count = rndpool_get_entropy_count(&rnd_pool); |
1063 | #ifdef RND_VERBOSE | | 1057 | #ifdef RND_VERBOSE |
1064 | printf("rnd: starting statistical RNG test, entropy = %d.\n", | | 1058 | printf("rnd: starting statistical RNG test, entropy = %d.\n", |
1065 | entropy_count); | | 1059 | entropy_count); |
1066 | #endif | | 1060 | #endif |
1067 | if (rndpool_extract_data(&rnd_pool, rnd_rt.rt_b, | | 1061 | if (rndpool_extract_data(&rnd_pool, rnd_rt.rt_b, |
1068 | sizeof(rnd_rt.rt_b), RND_EXTRACT_ANY) | | 1062 | sizeof(rnd_rt.rt_b), RND_EXTRACT_ANY) |
1069 | != sizeof(rnd_rt.rt_b)) { | | 1063 | != sizeof(rnd_rt.rt_b)) { |
1070 | panic("rnd: could not get bits for statistical test"); | | 1064 | panic("rnd: could not get bits for statistical test"); |
1071 | } | | 1065 | } |
1072 | /* | | 1066 | /* |
1073 | * Stash the tested bits so we can put them back in the | | 1067 | * Stash the tested bits so we can put them back in the |
1074 | * pool, restoring the entropy count. DO NOT rely on | | 1068 | * pool, restoring the entropy count. DO NOT rely on |
1075 | * rngtest to maintain the bits pristine -- we could end | | 1069 | * rngtest to maintain the bits pristine -- we could end |
1076 | * up adding back non-random data claiming it were pure | | 1070 | * up adding back non-random data claiming it were pure |
1077 | * entropy. | | 1071 | * entropy. |
1078 | */ | | 1072 | */ |
1079 | memcpy(rnd_testbits, rnd_rt.rt_b, sizeof(rnd_rt.rt_b)); | | 1073 | memcpy(rnd_testbits, rnd_rt.rt_b, sizeof(rnd_rt.rt_b)); |
1080 | strlcpy(rnd_rt.rt_name, "entropy pool", sizeof(rnd_rt.rt_name)); | | 1074 | strlcpy(rnd_rt.rt_name, "entropy pool", sizeof(rnd_rt.rt_name)); |
1081 | if (rngtest(&rnd_rt)) { | | 1075 | if (rngtest(&rnd_rt)) { |
1082 | /* | | 1076 | /* |
1083 | * The probabiliity of a Type I error is 3/10000, | | 1077 | * The probabiliity of a Type I error is 3/10000, |
1084 | * but note this can only happen at boot time. | | 1078 | * but note this can only happen at boot time. |
1085 | * The relevant standard says to reset the module, | | 1079 | * The relevant standard says to reset the module, |
1086 | * but developers objected... | | 1080 | * but developers objected... |
1087 | */ | | 1081 | */ |
1088 | printf("rnd: WARNING, ENTROPY POOL FAILED " | | 1082 | printf("rnd: WARNING, ENTROPY POOL FAILED " |
1089 | "STATISTICAL TEST!\n"); | | 1083 | "STATISTICAL TEST!\n"); |
1090 | continue; | | 1084 | continue; |
1091 | } | | 1085 | } |
1092 | memset(&rnd_rt, 0, sizeof(rnd_rt)); | | 1086 | memset(&rnd_rt, 0, sizeof(rnd_rt)); |
1093 | rndpool_add_data(&rnd_pool, rnd_testbits, sizeof(rnd_testbits), | | 1087 | rndpool_add_data(&rnd_pool, rnd_testbits, sizeof(rnd_testbits), |
1094 | entropy_count); | | 1088 | entropy_count); |
1095 | memset(rnd_testbits, 0, sizeof(rnd_testbits)); | | 1089 | memset(rnd_testbits, 0, sizeof(rnd_testbits)); |
1096 | #ifdef RND_VERBOSE | | 1090 | #ifdef RND_VERBOSE |
1097 | printf("rnd: statistical RNG test done, entropy = %d.\n", | | 1091 | printf("rnd: statistical RNG test done, entropy = %d.\n", |
1098 | rndpool_get_entropy_count(&rnd_pool)); | | 1092 | rndpool_get_entropy_count(&rnd_pool)); |
1099 | #endif | | 1093 | #endif |
1100 | rnd_tested++; | | 1094 | rnd_tested++; |
1101 | } | | 1095 | } |
1102 | #endif | | 1096 | #endif |
1103 | entropy_count = rndpool_get_entropy_count(&rnd_pool); | | 1097 | entropy_count = rndpool_get_entropy_count(&rnd_pool); |
1104 | if (entropy_count < (RND_ENTROPY_THRESHOLD * 2 + len) * 8) { | | 1098 | if (entropy_count < (RND_ENTROPY_THRESHOLD * 2 + len) * 8) { |
1105 | rnd_getmore(RND_POOLBITS - entropy_count * 8); | | 1099 | rnd_getmore(RND_POOLBITS - entropy_count * 8); |
1106 | } | | 1100 | } |
1107 | return rndpool_extract_data(&rnd_pool, p, len, flags); | | 1101 | return rndpool_extract_data(&rnd_pool, p, len, flags); |
1108 | } | | 1102 | } |
1109 | | | 1103 | |
1110 | u_int32_t | | 1104 | u_int32_t |
1111 | rnd_extract_data(void *p, u_int32_t len, u_int32_t flags) | | 1105 | rnd_extract_data(void *p, u_int32_t len, u_int32_t flags) |
1112 | { | | 1106 | { |
1113 | uint32_t retval; | | 1107 | uint32_t retval; |
1114 | | | 1108 | |
1115 | mutex_spin_enter(&rndpool_mtx); | | 1109 | mutex_spin_enter(&rndpool_mtx); |
1116 | retval = rnd_extract_data_locked(p, len, flags); | | 1110 | retval = rnd_extract_data_locked(p, len, flags); |
1117 | mutex_spin_exit(&rndpool_mtx); | | 1111 | mutex_spin_exit(&rndpool_mtx); |
1118 | return retval; | | 1112 | return retval; |
1119 | } | | 1113 | } |
1120 | | | 1114 | |
1121 | void | | 1115 | void |
1122 | rndsink_attach(rndsink_t *rs) | | 1116 | rndsink_attach(rndsink_t *rs) |
1123 | { | | 1117 | { |
1124 | #ifdef RND_VERBOSE | | 1118 | #ifdef RND_VERBOSE |
1125 | printf("rnd: entropy sink \"%s\" wants %d bytes of data.\n", | | 1119 | printf("rnd: entropy sink \"%s\" wants %d bytes of data.\n", |
1126 | rs->name, (int)rs->len); | | 1120 | rs->name, (int)rs->len); |
1127 | #endif | | 1121 | #endif |
1128 | | | 1122 | |
1129 | KASSERT(mutex_owned(&rs->mtx)); | | 1123 | KASSERT(mutex_owned(&rs->mtx)); |
1130 | KASSERT(rs->state = RSTATE_PENDING); | | 1124 | KASSERT(rs->state = RSTATE_PENDING); |
1131 | | | 1125 | |
1132 | mutex_spin_enter(&rndsink_mtx); | | 1126 | mutex_spin_enter(&rndsink_mtx); |
1133 | TAILQ_INSERT_TAIL(&rnd_sinks, rs, tailq); | | 1127 | TAILQ_INSERT_TAIL(&rnd_sinks, rs, tailq); |
1134 | mutex_spin_exit(&rndsink_mtx); | | 1128 | mutex_spin_exit(&rndsink_mtx); |
1135 | | | 1129 | |
1136 | rnd_schedule_process(); | | 1130 | rnd_schedule_process(); |
1137 | } | | 1131 | } |
1138 | | | 1132 | |
1139 | void | | 1133 | void |
1140 | rndsink_detach(rndsink_t *rs) | | 1134 | rndsink_detach(rndsink_t *rs) |
1141 | { | | 1135 | { |
1142 | rndsink_t *sink, *tsink; | | 1136 | rndsink_t *sink, *tsink; |
1143 | #ifdef RND_VERBOSE | | 1137 | #ifdef RND_VERBOSE |
1144 | printf("rnd: entropy sink \"%s\" no longer wants data.\n", rs->name); | | 1138 | printf("rnd: entropy sink \"%s\" no longer wants data.\n", rs->name); |
1145 | #endif | | 1139 | #endif |
1146 | KASSERT(mutex_owned(&rs->mtx)); | | 1140 | KASSERT(mutex_owned(&rs->mtx)); |
1147 | | | 1141 | |
1148 | mutex_spin_enter(&rndsink_mtx); | | 1142 | mutex_spin_enter(&rndsink_mtx); |
1149 | TAILQ_FOREACH_SAFE(sink, &rnd_sinks, tailq, tsink) { | | 1143 | TAILQ_FOREACH_SAFE(sink, &rnd_sinks, tailq, tsink) { |
1150 | if (sink == rs) { | | 1144 | if (sink == rs) { |
1151 | TAILQ_REMOVE(&rnd_sinks, rs, tailq); | | 1145 | TAILQ_REMOVE(&rnd_sinks, rs, tailq); |
1152 | } | | 1146 | } |
1153 | } | | 1147 | } |
1154 | mutex_spin_exit(&rndsink_mtx); | | 1148 | mutex_spin_exit(&rndsink_mtx); |
1155 | } | | 1149 | } |
1156 | | | 1150 | |
1157 | void | | 1151 | void |
1158 | rnd_seed(void *base, size_t len) | | 1152 | rnd_seed(void *base, size_t len) |
1159 | { | | 1153 | { |
1160 | SHA1_CTX s; | | 1154 | SHA1_CTX s; |
1161 | uint8_t digest[SHA1_DIGEST_LENGTH]; | | 1155 | uint8_t digest[SHA1_DIGEST_LENGTH]; |
1162 | | | 1156 | |
1163 | if (len != sizeof(*boot_rsp)) { | | 1157 | if (len != sizeof(*boot_rsp)) { |
1164 | aprint_error("rnd: bad seed length %d\n", (int)len); | | 1158 | aprint_error("rnd: bad seed length %d\n", (int)len); |
1165 | return; | | 1159 | return; |
1166 | } | | 1160 | } |
1167 | | | 1161 | |
1168 | boot_rsp = (rndsave_t *)base; | | 1162 | boot_rsp = (rndsave_t *)base; |
1169 | SHA1Init(&s); | | 1163 | SHA1Init(&s); |
1170 | SHA1Update(&s, (uint8_t *)&boot_rsp->entropy, | | 1164 | SHA1Update(&s, (uint8_t *)&boot_rsp->entropy, |
1171 | sizeof(boot_rsp->entropy)); | | 1165 | sizeof(boot_rsp->entropy)); |
1172 | SHA1Update(&s, boot_rsp->data, sizeof(boot_rsp->data)); | | 1166 | SHA1Update(&s, boot_rsp->data, sizeof(boot_rsp->data)); |
1173 | SHA1Final(digest, &s); | | 1167 | SHA1Final(digest, &s); |
1174 | | | 1168 | |
1175 | if (memcmp(digest, boot_rsp->digest, sizeof(digest))) { | | 1169 | if (memcmp(digest, boot_rsp->digest, sizeof(digest))) { |
1176 | aprint_error("rnd: bad seed checksum\n"); | | 1170 | aprint_error("rnd: bad seed checksum\n"); |
1177 | return; | | 1171 | return; |
1178 | } | | 1172 | } |
1179 | | | 1173 | |
1180 | /* | | 1174 | /* |
1181 | * It's not really well-defined whether bootloader-supplied | | 1175 | * It's not really well-defined whether bootloader-supplied |
1182 | * modules run before or after rnd_init(). Handle both cases. | | 1176 | * modules run before or after rnd_init(). Handle both cases. |
1183 | */ | | 1177 | */ |
1184 | if (rnd_ready) { | | 1178 | if (rnd_ready) { |
1185 | #ifdef RND_VERBOSE | | 1179 | #ifdef RND_VERBOSE |
1186 | printf("rnd: ready, feeding in seed data directly.\n"); | | 1180 | printf("rnd: ready, feeding in seed data directly.\n"); |
1187 | #endif | | 1181 | #endif |
1188 | mutex_spin_enter(&rndpool_mtx); | | 1182 | mutex_spin_enter(&rndpool_mtx); |
1189 | rndpool_add_data(&rnd_pool, boot_rsp->data, | | 1183 | rndpool_add_data(&rnd_pool, boot_rsp->data, |
1190 | sizeof(boot_rsp->data), | | 1184 | sizeof(boot_rsp->data), |
1191 | MIN(boot_rsp->entropy, RND_POOLBITS / 2)); | | 1185 | MIN(boot_rsp->entropy, RND_POOLBITS / 2)); |
1192 | memset(boot_rsp, 0, sizeof(*boot_rsp)); | | 1186 | memset(boot_rsp, 0, sizeof(*boot_rsp)); |
1193 | mutex_spin_exit(&rndpool_mtx); | | 1187 | mutex_spin_exit(&rndpool_mtx); |
1194 | } else { | | 1188 | } else { |
1195 | #ifdef RND_VERBOSE | | 1189 | #ifdef RND_VERBOSE |
1196 | printf("rnd: not ready, deferring seed feed.\n"); | | 1190 | printf("rnd: not ready, deferring seed feed.\n"); |
1197 | #endif | | 1191 | #endif |
1198 | } | | 1192 | } |
1199 | } | | 1193 | } |