Tue Apr 14 12:27:02 2015 UTC ()
Eliminate last two cases of u_int*_t in rndpseudo.c.


(riastradh)
diff -r1.29 -r1.30 src/sys/dev/rndpseudo.c

cvs diff -r1.29 -r1.30 src/sys/dev/Attic/rndpseudo.c (switch to unified diff)

--- src/sys/dev/Attic/rndpseudo.c 2015/04/14 12:25:41 1.29
+++ src/sys/dev/Attic/rndpseudo.c 2015/04/14 12:27:02 1.30
@@ -1,884 +1,884 @@ @@ -1,884 +1,884 @@
1/* $NetBSD: rndpseudo.c,v 1.29 2015/04/14 12:25:41 riastradh Exp $ */ 1/* $NetBSD: rndpseudo.c,v 1.30 2015/04/14 12:27:02 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1997-2013 The NetBSD Foundation, Inc. 4 * Copyright (c) 1997-2013 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Michael Graff <explorer@flame.org>, Thor Lancelot Simon, and 8 * by Michael Graff <explorer@flame.org>, Thor Lancelot Simon, and
9 * Taylor R. Campbell. 9 * Taylor R. Campbell.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer. 15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE. 30 * POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32 32
33#include <sys/cdefs.h> 33#include <sys/cdefs.h>
34__KERNEL_RCSID(0, "$NetBSD: rndpseudo.c,v 1.29 2015/04/14 12:25:41 riastradh Exp $"); 34__KERNEL_RCSID(0, "$NetBSD: rndpseudo.c,v 1.30 2015/04/14 12:27:02 riastradh Exp $");
35 35
36#if defined(_KERNEL_OPT) 36#if defined(_KERNEL_OPT)
37#include "opt_compat_netbsd.h" 37#include "opt_compat_netbsd.h"
38#endif 38#endif
39 39
40#include <sys/param.h> 40#include <sys/param.h>
41#include <sys/ioctl.h> 41#include <sys/ioctl.h>
42#include <sys/fcntl.h> 42#include <sys/fcntl.h>
43#include <sys/file.h> 43#include <sys/file.h>
44#include <sys/filedesc.h> 44#include <sys/filedesc.h>
45#include <sys/select.h> 45#include <sys/select.h>
46#include <sys/poll.h> 46#include <sys/poll.h>
47#include <sys/kmem.h> 47#include <sys/kmem.h>
48#include <sys/atomic.h> 48#include <sys/atomic.h>
49#include <sys/mutex.h> 49#include <sys/mutex.h>
50#include <sys/proc.h> 50#include <sys/proc.h>
51#include <sys/kernel.h> 51#include <sys/kernel.h>
52#include <sys/conf.h> 52#include <sys/conf.h>
53#include <sys/systm.h> 53#include <sys/systm.h>
54#include <sys/vnode.h> 54#include <sys/vnode.h>
55#include <sys/pool.h> 55#include <sys/pool.h>
56#include <sys/kauth.h> 56#include <sys/kauth.h>
57#include <sys/cprng.h> 57#include <sys/cprng.h>
58#include <sys/cpu.h> 58#include <sys/cpu.h>
59#include <sys/stat.h> 59#include <sys/stat.h>
60#include <sys/percpu.h> 60#include <sys/percpu.h>
61#include <sys/evcnt.h> 61#include <sys/evcnt.h>
62 62
63#include <sys/rnd.h> 63#include <sys/rnd.h>
64#include <sys/rndpool.h> 64#include <sys/rndpool.h>
65#include <sys/rndsource.h> 65#include <sys/rndsource.h>
66#ifdef COMPAT_50 66#ifdef COMPAT_50
67#include <compat/sys/rnd.h> 67#include <compat/sys/rnd.h>
68#endif 68#endif
69 69
70#include <dev/rnd_private.h> 70#include <dev/rnd_private.h>
71 71
72#if defined(__HAVE_CPU_COUNTER) 72#if defined(__HAVE_CPU_COUNTER)
73#include <machine/cpu_counter.h> 73#include <machine/cpu_counter.h>
74#endif 74#endif
75 75
76#ifdef RND_DEBUG 76#ifdef RND_DEBUG
77#define DPRINTF(l,x) if (rnd_debug & (l)) printf x 77#define DPRINTF(l,x) if (rnd_debug & (l)) printf x
78#else 78#else
79#define DPRINTF(l,x) 79#define DPRINTF(l,x)
80#endif 80#endif
81 81
82/* 82/*
83 * list devices attached 83 * list devices attached
84 */ 84 */
85#if 0 85#if 0
86#define RND_VERBOSE 86#define RND_VERBOSE
87#endif 87#endif
88 88
89/* 89/*
90 * The size of a temporary buffer for reading and writing entropy. 90 * The size of a temporary buffer for reading and writing entropy.
91 */ 91 */
92#define RND_TEMP_BUFFER_SIZE 512 92#define RND_TEMP_BUFFER_SIZE 512
93 93
94static pool_cache_t rnd_temp_buffer_cache; 94static pool_cache_t rnd_temp_buffer_cache;
95 95
96/* 96/*
97 * Per-open state -- a lazily initialized CPRNG. 97 * Per-open state -- a lazily initialized CPRNG.
98 */ 98 */
99struct rnd_ctx { 99struct rnd_ctx {
100 struct cprng_strong *rc_cprng; 100 struct cprng_strong *rc_cprng;
101 bool rc_hard; 101 bool rc_hard;
102}; 102};
103 103
104static pool_cache_t rnd_ctx_cache; 104static pool_cache_t rnd_ctx_cache;
105 105
106/* 106/*
107 * The per-CPU RNGs used for short requests 107 * The per-CPU RNGs used for short requests
108 */ 108 */
109static percpu_t *percpu_urandom_cprng; 109static percpu_t *percpu_urandom_cprng;
110 110
111/* Used by ioconf.c to attach the rnd pseudo-device. */ 111/* Used by ioconf.c to attach the rnd pseudo-device. */
112void rndattach(int); 112void rndattach(int);
113 113
114dev_type_open(rndopen); 114dev_type_open(rndopen);
115 115
116const struct cdevsw rnd_cdevsw = { 116const struct cdevsw rnd_cdevsw = {
117 .d_open = rndopen, 117 .d_open = rndopen,
118 .d_close = noclose, 118 .d_close = noclose,
119 .d_read = noread, 119 .d_read = noread,
120 .d_write = nowrite, 120 .d_write = nowrite,
121 .d_ioctl = noioctl, 121 .d_ioctl = noioctl,
122 .d_stop = nostop, 122 .d_stop = nostop,
123 .d_tty = notty, 123 .d_tty = notty,
124 .d_poll = nopoll, 124 .d_poll = nopoll,
125 .d_mmap = nommap, 125 .d_mmap = nommap,
126 .d_kqfilter = nokqfilter, 126 .d_kqfilter = nokqfilter,
127 .d_discard = nodiscard, 127 .d_discard = nodiscard,
128 .d_flag = D_OTHER | D_MPSAFE 128 .d_flag = D_OTHER | D_MPSAFE
129}; 129};
130 130
131static int rnd_read(struct file *, off_t *, struct uio *, kauth_cred_t, int); 131static int rnd_read(struct file *, off_t *, struct uio *, kauth_cred_t, int);
132static int rnd_write(struct file *, off_t *, struct uio *, kauth_cred_t, int); 132static int rnd_write(struct file *, off_t *, struct uio *, kauth_cred_t, int);
133static int rnd_ioctl(struct file *, u_long, void *); 133static int rnd_ioctl(struct file *, u_long, void *);
134static int rnd_poll(struct file *, int); 134static int rnd_poll(struct file *, int);
135static int rnd_stat(struct file *, struct stat *); 135static int rnd_stat(struct file *, struct stat *);
136static int rnd_close(struct file *); 136static int rnd_close(struct file *);
137static int rnd_kqfilter(struct file *, struct knote *); 137static int rnd_kqfilter(struct file *, struct knote *);
138 138
139const struct fileops rnd_fileops = { 139const struct fileops rnd_fileops = {
140 .fo_read = rnd_read, 140 .fo_read = rnd_read,
141 .fo_write = rnd_write, 141 .fo_write = rnd_write,
142 .fo_ioctl = rnd_ioctl, 142 .fo_ioctl = rnd_ioctl,
143 .fo_fcntl = fnullop_fcntl, 143 .fo_fcntl = fnullop_fcntl,
144 .fo_poll = rnd_poll, 144 .fo_poll = rnd_poll,
145 .fo_stat = rnd_stat, 145 .fo_stat = rnd_stat,
146 .fo_close = rnd_close, 146 .fo_close = rnd_close,
147 .fo_kqfilter = rnd_kqfilter, 147 .fo_kqfilter = rnd_kqfilter,
148 .fo_restart = fnullop_restart 148 .fo_restart = fnullop_restart
149}; 149};
150 150
151static struct evcnt rndpseudo_soft = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 151static struct evcnt rndpseudo_soft = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
152 NULL, "rndpseudo", "open soft"); 152 NULL, "rndpseudo", "open soft");
153static struct evcnt rndpseudo_hard = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, 153static struct evcnt rndpseudo_hard = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
154 NULL, "rndpseudo", "open hard"); 154 NULL, "rndpseudo", "open hard");
155EVCNT_ATTACH_STATIC(rndpseudo_soft); 155EVCNT_ATTACH_STATIC(rndpseudo_soft);
156EVCNT_ATTACH_STATIC(rndpseudo_hard); 156EVCNT_ATTACH_STATIC(rndpseudo_hard);
157 157
158/* 158/*
159 * Generate a 32-bit counter. 159 * Generate a 32-bit counter.
160 */ 160 */
161static inline uint32_t 161static inline uint32_t
162rndpseudo_counter(void) 162rndpseudo_counter(void)
163{ 163{
164 struct bintime bt; 164 struct bintime bt;
165 uint32_t ret; 165 uint32_t ret;
166 166
167#if defined(__HAVE_CPU_COUNTER) 167#if defined(__HAVE_CPU_COUNTER)
168 if (cpu_hascounter()) 168 if (cpu_hascounter())
169 return (cpu_counter32()); 169 return (cpu_counter32());
170#endif 170#endif
171 171
172 binuptime(&bt); 172 binuptime(&bt);
173 ret = bt.sec; 173 ret = bt.sec;
174 ret |= bt.sec >> 32; 174 ret |= bt.sec >> 32;
175 ret |= bt.frac; 175 ret |= bt.frac;
176 ret |= bt.frac >> 32; 176 ret |= bt.frac >> 32;
177 177
178 return ret; 178 return ret;
179} 179}
180 180
181/* 181/*
182 * `Attach' the random device. We use the timing of this event as 182 * `Attach' the random device. We use the timing of this event as
183 * another potential source of initial entropy. 183 * another potential source of initial entropy.
184 */ 184 */
185void 185void
186rndattach(int num) 186rndattach(int num)
187{ 187{
188 uint32_t c; 188 uint32_t c;
189 189
190 /* Trap unwary players who don't call rnd_init() early. */ 190 /* Trap unwary players who don't call rnd_init() early. */
191 KASSERT(rnd_ready); 191 KASSERT(rnd_ready);
192 192
193 rnd_temp_buffer_cache = pool_cache_init(RND_TEMP_BUFFER_SIZE, 0, 0, 0, 193 rnd_temp_buffer_cache = pool_cache_init(RND_TEMP_BUFFER_SIZE, 0, 0, 0,
194 "rndtemp", NULL, IPL_NONE, NULL, NULL, NULL); 194 "rndtemp", NULL, IPL_NONE, NULL, NULL, NULL);
195 rnd_ctx_cache = pool_cache_init(sizeof(struct rnd_ctx), 0, 0, 0, 195 rnd_ctx_cache = pool_cache_init(sizeof(struct rnd_ctx), 0, 0, 0,
196 "rndctx", NULL, IPL_NONE, NULL, NULL, NULL); 196 "rndctx", NULL, IPL_NONE, NULL, NULL, NULL);
197 percpu_urandom_cprng = percpu_alloc(sizeof(struct cprng_strong *)); 197 percpu_urandom_cprng = percpu_alloc(sizeof(struct cprng_strong *));
198 198
199 /* Mix in another counter. */ 199 /* Mix in another counter. */
200 c = rndpseudo_counter(); 200 c = rndpseudo_counter();
201 rnd_add_data(NULL, &c, sizeof(c), 1); 201 rnd_add_data(NULL, &c, sizeof(c), 1);
202} 202}
203 203
204int 204int
205rndopen(dev_t dev, int flags, int fmt, struct lwp *l) 205rndopen(dev_t dev, int flags, int fmt, struct lwp *l)
206{ 206{
207 bool hard; 207 bool hard;
208 struct file *fp; 208 struct file *fp;
209 int fd; 209 int fd;
210 int error; 210 int error;
211 211
212 switch (minor(dev)) { 212 switch (minor(dev)) {
213 case RND_DEV_URANDOM: 213 case RND_DEV_URANDOM:
214 hard = false; 214 hard = false;
215 rndpseudo_soft.ev_count++; 215 rndpseudo_soft.ev_count++;
216 break; 216 break;
217 217
218 case RND_DEV_RANDOM: 218 case RND_DEV_RANDOM:
219 hard = true; 219 hard = true;
220 rndpseudo_hard.ev_count++; 220 rndpseudo_hard.ev_count++;
221 break; 221 break;
222 222
223 default: 223 default:
224 return ENXIO; 224 return ENXIO;
225 } 225 }
226 226
227 error = fd_allocfile(&fp, &fd); 227 error = fd_allocfile(&fp, &fd);
228 if (error) 228 if (error)
229 return error; 229 return error;
230 230
231 /* 231 /*
232 * Allocate a context, but don't create a CPRNG yet -- do that 232 * Allocate a context, but don't create a CPRNG yet -- do that
233 * lazily because it consumes entropy from the system entropy 233 * lazily because it consumes entropy from the system entropy
234 * pool, which (currently) has the effect of depleting it and 234 * pool, which (currently) has the effect of depleting it and
235 * causing readers from /dev/random to block. If this is 235 * causing readers from /dev/random to block. If this is
236 * /dev/urandom and the process is about to send only short 236 * /dev/urandom and the process is about to send only short
237 * reads to it, then we will be using a per-CPU CPRNG anyway. 237 * reads to it, then we will be using a per-CPU CPRNG anyway.
238 */ 238 */
239 struct rnd_ctx *const ctx = pool_cache_get(rnd_ctx_cache, PR_WAITOK); 239 struct rnd_ctx *const ctx = pool_cache_get(rnd_ctx_cache, PR_WAITOK);
240 ctx->rc_cprng = NULL; 240 ctx->rc_cprng = NULL;
241 ctx->rc_hard = hard; 241 ctx->rc_hard = hard;
242 242
243 error = fd_clone(fp, fd, flags, &rnd_fileops, ctx); 243 error = fd_clone(fp, fd, flags, &rnd_fileops, ctx);
244 KASSERT(error == EMOVEFD); 244 KASSERT(error == EMOVEFD);
245 245
246 return error; 246 return error;
247} 247}
248 248
249/* 249/*
250 * Fetch a /dev/u?random context's CPRNG, or create and save one if 250 * Fetch a /dev/u?random context's CPRNG, or create and save one if
251 * necessary. 251 * necessary.
252 */ 252 */
253static struct cprng_strong * 253static struct cprng_strong *
254rnd_ctx_cprng(struct rnd_ctx *ctx) 254rnd_ctx_cprng(struct rnd_ctx *ctx)
255{ 255{
256 struct cprng_strong *cprng, *tmp = NULL; 256 struct cprng_strong *cprng, *tmp = NULL;
257 257
258 /* Fast path: if someone has already allocated a CPRNG, use it. */ 258 /* Fast path: if someone has already allocated a CPRNG, use it. */
259 cprng = ctx->rc_cprng; 259 cprng = ctx->rc_cprng;
260 if (__predict_true(cprng != NULL)) { 260 if (__predict_true(cprng != NULL)) {
261 /* Make sure the CPU hasn't prefetched cprng's guts. */ 261 /* Make sure the CPU hasn't prefetched cprng's guts. */
262 membar_consumer(); 262 membar_consumer();
263 goto out; 263 goto out;
264 } 264 }
265 265
266 /* Slow path: create a CPRNG. Allocate before taking locks. */ 266 /* Slow path: create a CPRNG. Allocate before taking locks. */
267 char name[64]; 267 char name[64];
268 struct lwp *const l = curlwp; 268 struct lwp *const l = curlwp;
269 (void)snprintf(name, sizeof(name), "%d %"PRIu64" %u", 269 (void)snprintf(name, sizeof(name), "%d %"PRIu64" %u",
270 (int)l->l_proc->p_pid, l->l_ncsw, l->l_cpticks); 270 (int)l->l_proc->p_pid, l->l_ncsw, l->l_cpticks);
271 const int flags = (ctx->rc_hard? (CPRNG_USE_CV | CPRNG_HARD) : 271 const int flags = (ctx->rc_hard? (CPRNG_USE_CV | CPRNG_HARD) :
272 (CPRNG_INIT_ANY | CPRNG_REKEY_ANY)); 272 (CPRNG_INIT_ANY | CPRNG_REKEY_ANY));
273 tmp = cprng_strong_create(name, IPL_NONE, flags); 273 tmp = cprng_strong_create(name, IPL_NONE, flags);
274 274
275 /* Publish cprng's guts before the pointer to them. */ 275 /* Publish cprng's guts before the pointer to them. */
276 membar_producer(); 276 membar_producer();
277 277
278 /* Attempt to publish tmp, unless someone beat us. */ 278 /* Attempt to publish tmp, unless someone beat us. */
279 cprng = atomic_cas_ptr(&ctx->rc_cprng, NULL, tmp); 279 cprng = atomic_cas_ptr(&ctx->rc_cprng, NULL, tmp);
280 if (__predict_false(cprng != NULL)) { 280 if (__predict_false(cprng != NULL)) {
281 /* Make sure the CPU hasn't prefetched cprng's guts. */ 281 /* Make sure the CPU hasn't prefetched cprng's guts. */
282 membar_consumer(); 282 membar_consumer();
283 goto out; 283 goto out;
284 } 284 }
285 285
286 /* Published. Commit tmp. */ 286 /* Published. Commit tmp. */
287 cprng = tmp; 287 cprng = tmp;
288 tmp = NULL; 288 tmp = NULL;
289 289
290out: if (tmp != NULL) 290out: if (tmp != NULL)
291 cprng_strong_destroy(tmp); 291 cprng_strong_destroy(tmp);
292 KASSERT(cprng != NULL); 292 KASSERT(cprng != NULL);
293 return cprng; 293 return cprng;
294} 294}
295 295
296/* 296/*
297 * Fetch a per-CPU CPRNG, or create and save one if necessary. 297 * Fetch a per-CPU CPRNG, or create and save one if necessary.
298 */ 298 */
299static struct cprng_strong * 299static struct cprng_strong *
300rnd_percpu_cprng(void) 300rnd_percpu_cprng(void)
301{ 301{
302 struct cprng_strong **cprngp, *cprng, *tmp = NULL; 302 struct cprng_strong **cprngp, *cprng, *tmp = NULL;
303 303
304 /* Fast path: if there already is a CPRNG for this CPU, use it. */ 304 /* Fast path: if there already is a CPRNG for this CPU, use it. */
305 cprngp = percpu_getref(percpu_urandom_cprng); 305 cprngp = percpu_getref(percpu_urandom_cprng);
306 cprng = *cprngp; 306 cprng = *cprngp;
307 if (__predict_true(cprng != NULL)) 307 if (__predict_true(cprng != NULL))
308 goto out; 308 goto out;
309 percpu_putref(percpu_urandom_cprng); 309 percpu_putref(percpu_urandom_cprng);
310 310
311 /* 311 /*
312 * Slow path: create a CPRNG named by this CPU. 312 * Slow path: create a CPRNG named by this CPU.
313 * 313 *
314 * XXX The CPU of the name may be different from the CPU to 314 * XXX The CPU of the name may be different from the CPU to
315 * which it is assigned, because we need to choose a name and 315 * which it is assigned, because we need to choose a name and
316 * allocate a cprng while preemption is enabled. This could be 316 * allocate a cprng while preemption is enabled. This could be
317 * fixed by changing the cprng_strong API (e.g., by adding a 317 * fixed by changing the cprng_strong API (e.g., by adding a
318 * cprng_strong_setname or by separating allocation from 318 * cprng_strong_setname or by separating allocation from
319 * initialization), but it's not clear that's worth the 319 * initialization), but it's not clear that's worth the
320 * trouble. 320 * trouble.
321 */ 321 */
322 char name[32]; 322 char name[32];
323 (void)snprintf(name, sizeof(name), "urandom%u", cpu_index(curcpu())); 323 (void)snprintf(name, sizeof(name), "urandom%u", cpu_index(curcpu()));
324 tmp = cprng_strong_create(name, IPL_NONE, 324 tmp = cprng_strong_create(name, IPL_NONE,
325 (CPRNG_INIT_ANY | CPRNG_REKEY_ANY)); 325 (CPRNG_INIT_ANY | CPRNG_REKEY_ANY));
326 326
327 /* Try again, but we may have been preempted and lost a race. */ 327 /* Try again, but we may have been preempted and lost a race. */
328 cprngp = percpu_getref(percpu_urandom_cprng); 328 cprngp = percpu_getref(percpu_urandom_cprng);
329 cprng = *cprngp; 329 cprng = *cprngp;
330 if (__predict_false(cprng != NULL)) 330 if (__predict_false(cprng != NULL))
331 goto out; 331 goto out;
332 332
333 /* Commit the CPRNG we just created. */ 333 /* Commit the CPRNG we just created. */
334 cprng = tmp; 334 cprng = tmp;
335 tmp = NULL; 335 tmp = NULL;
336 *cprngp = cprng; 336 *cprngp = cprng;
337 337
338out: percpu_putref(percpu_urandom_cprng); 338out: percpu_putref(percpu_urandom_cprng);
339 if (tmp != NULL) 339 if (tmp != NULL)
340 cprng_strong_destroy(tmp); 340 cprng_strong_destroy(tmp);
341 KASSERT(cprng != NULL); 341 KASSERT(cprng != NULL);
342 return cprng; 342 return cprng;
343} 343}
344 344
345static int 345static int
346rnd_read(struct file *fp, off_t *offp, struct uio *uio, kauth_cred_t cred, 346rnd_read(struct file *fp, off_t *offp, struct uio *uio, kauth_cred_t cred,
347 int flags) 347 int flags)
348{ 348{
349 int error = 0; 349 int error = 0;
350 350
351 DPRINTF(RND_DEBUG_READ, 351 DPRINTF(RND_DEBUG_READ,
352 ("Random: Read of %zu requested, flags 0x%08x\n", 352 ("Random: Read of %zu requested, flags 0x%08x\n",
353 uio->uio_resid, flags)); 353 uio->uio_resid, flags));
354 354
355 if (uio->uio_resid == 0) 355 if (uio->uio_resid == 0)
356 return 0; 356 return 0;
357 357
358 struct rnd_ctx *const ctx = fp->f_rndctx; 358 struct rnd_ctx *const ctx = fp->f_rndctx;
359 uint8_t *const buf = pool_cache_get(rnd_temp_buffer_cache, PR_WAITOK); 359 uint8_t *const buf = pool_cache_get(rnd_temp_buffer_cache, PR_WAITOK);
360 360
361 /* 361 /*
362 * Choose a CPRNG to use -- either the per-open CPRNG, if this 362 * Choose a CPRNG to use -- either the per-open CPRNG, if this
363 * is /dev/random or a long read, or the per-CPU one otherwise. 363 * is /dev/random or a long read, or the per-CPU one otherwise.
364 * 364 *
365 * XXX NIST_BLOCK_KEYLEN_BYTES is a detail of the cprng(9) 365 * XXX NIST_BLOCK_KEYLEN_BYTES is a detail of the cprng(9)
366 * implementation and as such should not be mentioned here. 366 * implementation and as such should not be mentioned here.
367 */ 367 */
368 struct cprng_strong *const cprng = 368 struct cprng_strong *const cprng =
369 ((ctx->rc_hard || (uio->uio_resid > NIST_BLOCK_KEYLEN_BYTES))? 369 ((ctx->rc_hard || (uio->uio_resid > NIST_BLOCK_KEYLEN_BYTES))?
370 rnd_ctx_cprng(ctx) : rnd_percpu_cprng()); 370 rnd_ctx_cprng(ctx) : rnd_percpu_cprng());
371 371
372 /* 372 /*
373 * Generate the data in RND_TEMP_BUFFER_SIZE chunks. 373 * Generate the data in RND_TEMP_BUFFER_SIZE chunks.
374 */ 374 */
375 while (uio->uio_resid > 0) { 375 while (uio->uio_resid > 0) {
376 const size_t n_req = MIN(uio->uio_resid, RND_TEMP_BUFFER_SIZE); 376 const size_t n_req = MIN(uio->uio_resid, RND_TEMP_BUFFER_SIZE);
377 377
378 CTASSERT(RND_TEMP_BUFFER_SIZE <= CPRNG_MAX_LEN); 378 CTASSERT(RND_TEMP_BUFFER_SIZE <= CPRNG_MAX_LEN);
379 const size_t n_read = cprng_strong(cprng, buf, n_req, 379 const size_t n_read = cprng_strong(cprng, buf, n_req,
380 ((ctx->rc_hard && ISSET(fp->f_flag, FNONBLOCK))? 380 ((ctx->rc_hard && ISSET(fp->f_flag, FNONBLOCK))?
381 FNONBLOCK : 0)); 381 FNONBLOCK : 0));
382 382
383 /* 383 /*
384 * Equality will hold unless this is /dev/random, in 384 * Equality will hold unless this is /dev/random, in
385 * which case we get only as many bytes as are left 385 * which case we get only as many bytes as are left
386 * from the CPRNG's `information-theoretic strength' 386 * from the CPRNG's `information-theoretic strength'
387 * since the last rekey. 387 * since the last rekey.
388 */ 388 */
389 KASSERT(n_read <= n_req); 389 KASSERT(n_read <= n_req);
390 KASSERT(ctx->rc_hard || (n_read == n_req)); 390 KASSERT(ctx->rc_hard || (n_read == n_req));
391 391
392 error = uiomove(buf, n_read, uio); 392 error = uiomove(buf, n_read, uio);
393 if (error) 393 if (error)
394 goto out; 394 goto out;
395 395
396 /* 396 /*
397 * For /dev/urandom: Reads always succeed in full, no 397 * For /dev/urandom: Reads always succeed in full, no
398 * matter how many iterations that takes. (XXX But 398 * matter how many iterations that takes. (XXX But
399 * this means the computation can't be interrupted, 399 * this means the computation can't be interrupted,
400 * wihch seems suboptimal.) 400 * wihch seems suboptimal.)
401 * 401 *
402 * For /dev/random, nonblocking: Reads succeed with as 402 * For /dev/random, nonblocking: Reads succeed with as
403 * many bytes as a single request can return without 403 * many bytes as a single request can return without
404 * blocking, or fail with EAGAIN if a request would 404 * blocking, or fail with EAGAIN if a request would
405 * block. (There is no sense in trying multiple 405 * block. (There is no sense in trying multiple
406 * requests because if the first one didn't fill the 406 * requests because if the first one didn't fill the
407 * buffer, the second one would almost certainly 407 * buffer, the second one would almost certainly
408 * block.) 408 * block.)
409 * 409 *
410 * For /dev/random, blocking: Reads succeed with as 410 * For /dev/random, blocking: Reads succeed with as
411 * many bytes as a single request -- which may block -- 411 * many bytes as a single request -- which may block --
412 * can return if uninterrupted, or fail with EINTR if 412 * can return if uninterrupted, or fail with EINTR if
413 * the request is interrupted. 413 * the request is interrupted.
414 */ 414 */
415 KASSERT((0 < n_read) || ctx->rc_hard); 415 KASSERT((0 < n_read) || ctx->rc_hard);
416 if (ctx->rc_hard) { 416 if (ctx->rc_hard) {
417 if (0 < n_read) 417 if (0 < n_read)
418 error = 0; 418 error = 0;
419 else if (ISSET(fp->f_flag, FNONBLOCK)) 419 else if (ISSET(fp->f_flag, FNONBLOCK))
420 error = EAGAIN; 420 error = EAGAIN;
421 else 421 else
422 error = EINTR; 422 error = EINTR;
423 goto out; 423 goto out;
424 } 424 }
425 } 425 }
426 426
427out: pool_cache_put(rnd_temp_buffer_cache, buf); 427out: pool_cache_put(rnd_temp_buffer_cache, buf);
428 return error; 428 return error;
429} 429}
430 430
431static int 431static int
432rnd_write(struct file *fp, off_t *offp, struct uio *uio, 432rnd_write(struct file *fp, off_t *offp, struct uio *uio,
433 kauth_cred_t cred, int flags) 433 kauth_cred_t cred, int flags)
434{ 434{
435 u_int8_t *bf; 435 uint8_t *bf;
436 int n, ret = 0, estimate_ok = 0, estimate = 0, added = 0; 436 int n, ret = 0, estimate_ok = 0, estimate = 0, added = 0;
437 437
438 ret = kauth_authorize_device(cred, 438 ret = kauth_authorize_device(cred,
439 KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL); 439 KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL);
440 if (ret) { 440 if (ret) {
441 return (ret); 441 return (ret);
442 } 442 }
443 estimate_ok = !kauth_authorize_device(cred, 443 estimate_ok = !kauth_authorize_device(cred,
444 KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, NULL, NULL, NULL, NULL); 444 KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, NULL, NULL, NULL, NULL);
445 445
446 DPRINTF(RND_DEBUG_WRITE, 446 DPRINTF(RND_DEBUG_WRITE,
447 ("Random: Write of %zu requested\n", uio->uio_resid)); 447 ("Random: Write of %zu requested\n", uio->uio_resid));
448 448
449 if (uio->uio_resid == 0) 449 if (uio->uio_resid == 0)
450 return (0); 450 return (0);
451 ret = 0; 451 ret = 0;
452 bf = pool_cache_get(rnd_temp_buffer_cache, PR_WAITOK); 452 bf = pool_cache_get(rnd_temp_buffer_cache, PR_WAITOK);
453 while (uio->uio_resid > 0) { 453 while (uio->uio_resid > 0) {
454 /* 454 /*
455 * Don't flood the pool. 455 * Don't flood the pool.
456 */ 456 */
457 if (added > RND_POOLWORDS * sizeof(int)) { 457 if (added > RND_POOLWORDS * sizeof(int)) {
458#ifdef RND_VERBOSE 458#ifdef RND_VERBOSE
459 printf("rnd: added %d already, adding no more.\n", 459 printf("rnd: added %d already, adding no more.\n",
460 added); 460 added);
461#endif 461#endif
462 break; 462 break;
463 } 463 }
464 n = min(RND_TEMP_BUFFER_SIZE, uio->uio_resid); 464 n = min(RND_TEMP_BUFFER_SIZE, uio->uio_resid);
465 465
466 ret = uiomove((void *)bf, n, uio); 466 ret = uiomove((void *)bf, n, uio);
467 if (ret != 0) 467 if (ret != 0)
468 break; 468 break;
469 469
470 if (estimate_ok) { 470 if (estimate_ok) {
471 /* 471 /*
472 * Don't cause samples to be discarded by taking 472 * Don't cause samples to be discarded by taking
473 * the pool's entropy estimate to the max. 473 * the pool's entropy estimate to the max.
474 */ 474 */
475 if (added > RND_POOLWORDS / 2) 475 if (added > RND_POOLWORDS / 2)
476 estimate = 0; 476 estimate = 0;
477 else 477 else
478 estimate = n * NBBY / 2; 478 estimate = n * NBBY / 2;
479#ifdef RND_VERBOSE 479#ifdef RND_VERBOSE
480 printf("rnd: adding on write, %d bytes, estimate %d\n", 480 printf("rnd: adding on write, %d bytes, estimate %d\n",
481 n, estimate); 481 n, estimate);
482#endif 482#endif
483 } else { 483 } else {
484#ifdef RND_VERBOSE 484#ifdef RND_VERBOSE
485 printf("rnd: kauth says no entropy.\n"); 485 printf("rnd: kauth says no entropy.\n");
486#endif 486#endif
487 } 487 }
488 488
489 /* 489 /*
490 * Mix in the bytes. 490 * Mix in the bytes.
491 */ 491 */
492 rnd_add_data(NULL, bf, n, estimate); 492 rnd_add_data(NULL, bf, n, estimate);
493 493
494 added += n; 494 added += n;
495 DPRINTF(RND_DEBUG_WRITE, ("Random: Copied in %d bytes\n", n)); 495 DPRINTF(RND_DEBUG_WRITE, ("Random: Copied in %d bytes\n", n));
496 } 496 }
497 pool_cache_put(rnd_temp_buffer_cache, bf); 497 pool_cache_put(rnd_temp_buffer_cache, bf);
498 return (ret); 498 return (ret);
499} 499}
500 500
501static void 501static void
502krndsource_to_rndsource(krndsource_t *kr, rndsource_t *r) 502krndsource_to_rndsource(krndsource_t *kr, rndsource_t *r)
503{ 503{
504 memset(r, 0, sizeof(*r)); 504 memset(r, 0, sizeof(*r));
505 strlcpy(r->name, kr->name, sizeof(r->name)); 505 strlcpy(r->name, kr->name, sizeof(r->name));
506 r->total = kr->total; 506 r->total = kr->total;
507 r->type = kr->type; 507 r->type = kr->type;
508 r->flags = kr->flags; 508 r->flags = kr->flags;
509} 509}
510 510
511static void 511static void
512krndsource_to_rndsource_est(krndsource_t *kr, rndsource_est_t *re) 512krndsource_to_rndsource_est(krndsource_t *kr, rndsource_est_t *re)
513{ 513{
514 memset(re, 0, sizeof(*re)); 514 memset(re, 0, sizeof(*re));
515 krndsource_to_rndsource(kr, &re->rt); 515 krndsource_to_rndsource(kr, &re->rt);
516 re->dt_samples = kr->time_delta.insamples; 516 re->dt_samples = kr->time_delta.insamples;
517 re->dt_total = kr->time_delta.outbits; 517 re->dt_total = kr->time_delta.outbits;
518 re->dv_samples = kr->value_delta.insamples; 518 re->dv_samples = kr->value_delta.insamples;
519 re->dv_total = kr->value_delta.outbits; 519 re->dv_total = kr->value_delta.outbits;
520} 520}
521 521
522static void 522static void
523krs_setflags(krndsource_t *kr, uint32_t flags, uint32_t mask) 523krs_setflags(krndsource_t *kr, uint32_t flags, uint32_t mask)
524{ 524{
525 uint32_t oflags = kr->flags; 525 uint32_t oflags = kr->flags;
526 526
527 kr->flags &= ~mask; 527 kr->flags &= ~mask;
528 kr->flags |= (flags & mask); 528 kr->flags |= (flags & mask);
529 529
530 if (oflags & RND_FLAG_HASENABLE && 530 if (oflags & RND_FLAG_HASENABLE &&
531 ((oflags & RND_FLAG_NO_COLLECT) != (flags & RND_FLAG_NO_COLLECT))) { 531 ((oflags & RND_FLAG_NO_COLLECT) != (flags & RND_FLAG_NO_COLLECT))) {
532 kr->enable(kr, !(flags & RND_FLAG_NO_COLLECT)); 532 kr->enable(kr, !(flags & RND_FLAG_NO_COLLECT));
533 } 533 }
534} 534}
535 535
536int 536int
537rnd_ioctl(struct file *fp, u_long cmd, void *addr) 537rnd_ioctl(struct file *fp, u_long cmd, void *addr)
538{ 538{
539 krndsource_t *kr; 539 krndsource_t *kr;
540 rndstat_t *rst; 540 rndstat_t *rst;
541 rndstat_name_t *rstnm; 541 rndstat_name_t *rstnm;
542 rndstat_est_t *rset; 542 rndstat_est_t *rset;
543 rndstat_est_name_t *rsetnm; 543 rndstat_est_name_t *rsetnm;
544 rndctl_t *rctl; 544 rndctl_t *rctl;
545 rnddata_t *rnddata; 545 rnddata_t *rnddata;
546 uint32_t count, start; 546 uint32_t count, start;
547 int ret = 0; 547 int ret = 0;
548 int estimate_ok = 0, estimate = 0; 548 int estimate_ok = 0, estimate = 0;
549 549
550 switch (cmd) { 550 switch (cmd) {
551 case FIONBIO: 551 case FIONBIO:
552 case FIOASYNC: 552 case FIOASYNC:
553 case RNDGETENTCNT: 553 case RNDGETENTCNT:
554 break; 554 break;
555 555
556 case RNDGETPOOLSTAT: 556 case RNDGETPOOLSTAT:
557 case RNDGETSRCNUM: 557 case RNDGETSRCNUM:
558 case RNDGETSRCNAME: 558 case RNDGETSRCNAME:
559 case RNDGETESTNUM: 559 case RNDGETESTNUM:
560 case RNDGETESTNAME: 560 case RNDGETESTNAME:
561 ret = kauth_authorize_device(curlwp->l_cred, 561 ret = kauth_authorize_device(curlwp->l_cred,
562 KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL); 562 KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL);
563 if (ret) 563 if (ret)
564 return (ret); 564 return (ret);
565 break; 565 break;
566 566
567 case RNDCTL: 567 case RNDCTL:
568 ret = kauth_authorize_device(curlwp->l_cred, 568 ret = kauth_authorize_device(curlwp->l_cred,
569 KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL); 569 KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL);
570 if (ret) 570 if (ret)
571 return (ret); 571 return (ret);
572 break; 572 break;
573 573
574 case RNDADDDATA: 574 case RNDADDDATA:
575 ret = kauth_authorize_device(curlwp->l_cred, 575 ret = kauth_authorize_device(curlwp->l_cred,
576 KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL); 576 KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL);
577 if (ret) 577 if (ret)
578 return (ret); 578 return (ret);
579 estimate_ok = !kauth_authorize_device(curlwp->l_cred, 579 estimate_ok = !kauth_authorize_device(curlwp->l_cred,
580 KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, NULL, NULL, NULL, NULL); 580 KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, NULL, NULL, NULL, NULL);
581 break; 581 break;
582 582
583 default: 583 default:
584#ifdef COMPAT_50 584#ifdef COMPAT_50
585 return compat_50_rnd_ioctl(fp, cmd, addr); 585 return compat_50_rnd_ioctl(fp, cmd, addr);
586#else 586#else
587 return ENOTTY; 587 return ENOTTY;
588#endif 588#endif
589 } 589 }
590 590
591 switch (cmd) { 591 switch (cmd) {
592 592
593 /* 593 /*
594 * Handled in upper layer really, but we have to return zero 594 * Handled in upper layer really, but we have to return zero
595 * for it to be accepted by the upper layer. 595 * for it to be accepted by the upper layer.
596 */ 596 */
597 case FIONBIO: 597 case FIONBIO:
598 case FIOASYNC: 598 case FIOASYNC:
599 break; 599 break;
600 600
601 case RNDGETENTCNT: 601 case RNDGETENTCNT:
602 mutex_spin_enter(&rndpool_mtx); 602 mutex_spin_enter(&rndpool_mtx);
603 *(u_int32_t *)addr = rndpool_get_entropy_count(&rnd_pool); 603 *(uint32_t *)addr = rndpool_get_entropy_count(&rnd_pool);
604 mutex_spin_exit(&rndpool_mtx); 604 mutex_spin_exit(&rndpool_mtx);
605 break; 605 break;
606 606
607 case RNDGETPOOLSTAT: 607 case RNDGETPOOLSTAT:
608 mutex_spin_enter(&rndpool_mtx); 608 mutex_spin_enter(&rndpool_mtx);
609 rndpool_get_stats(&rnd_pool, addr, sizeof(rndpoolstat_t)); 609 rndpool_get_stats(&rnd_pool, addr, sizeof(rndpoolstat_t));
610 mutex_spin_exit(&rndpool_mtx); 610 mutex_spin_exit(&rndpool_mtx);
611 break; 611 break;
612 612
613 case RNDGETSRCNUM: 613 case RNDGETSRCNUM:
614 rst = (rndstat_t *)addr; 614 rst = (rndstat_t *)addr;
615 615
616 if (rst->count == 0) 616 if (rst->count == 0)
617 break; 617 break;
618 618
619 if (rst->count > RND_MAXSTATCOUNT) 619 if (rst->count > RND_MAXSTATCOUNT)
620 return (EINVAL); 620 return (EINVAL);
621 621
622 mutex_spin_enter(&rndpool_mtx); 622 mutex_spin_enter(&rndpool_mtx);
623 /* 623 /*
624 * Find the starting source by running through the 624 * Find the starting source by running through the
625 * list of sources. 625 * list of sources.
626 */ 626 */
627 kr = LIST_FIRST(&rnd_sources); 627 kr = LIST_FIRST(&rnd_sources);
628 start = rst->start; 628 start = rst->start;
629 while (kr != NULL && start >= 1) { 629 while (kr != NULL && start >= 1) {
630 kr = LIST_NEXT(kr, list); 630 kr = LIST_NEXT(kr, list);
631 start--; 631 start--;
632 } 632 }
633 633
634 /* 634 /*
635 * Return up to as many structures as the user asked 635 * Return up to as many structures as the user asked
636 * for. If we run out of sources, a count of zero 636 * for. If we run out of sources, a count of zero
637 * will be returned, without an error. 637 * will be returned, without an error.
638 */ 638 */
639 for (count = 0; count < rst->count && kr != NULL; count++) { 639 for (count = 0; count < rst->count && kr != NULL; count++) {
640 krndsource_to_rndsource(kr, &rst->source[count]); 640 krndsource_to_rndsource(kr, &rst->source[count]);
641 kr = LIST_NEXT(kr, list); 641 kr = LIST_NEXT(kr, list);
642 } 642 }
643 643
644 rst->count = count; 644 rst->count = count;
645 645
646 mutex_spin_exit(&rndpool_mtx); 646 mutex_spin_exit(&rndpool_mtx);
647 break; 647 break;
648 648
649 case RNDGETESTNUM: 649 case RNDGETESTNUM:
650 rset = (rndstat_est_t *)addr; 650 rset = (rndstat_est_t *)addr;
651 651
652 if (rset->count == 0) 652 if (rset->count == 0)
653 break; 653 break;
654 654
655 if (rset->count > RND_MAXSTATCOUNT) 655 if (rset->count > RND_MAXSTATCOUNT)
656 return (EINVAL); 656 return (EINVAL);
657 657
658 mutex_spin_enter(&rndpool_mtx); 658 mutex_spin_enter(&rndpool_mtx);
659 /* 659 /*
660 * Find the starting source by running through the 660 * Find the starting source by running through the
661 * list of sources. 661 * list of sources.
662 */ 662 */
663 kr = LIST_FIRST(&rnd_sources); 663 kr = LIST_FIRST(&rnd_sources);
664 start = rset->start; 664 start = rset->start;
665 while (kr != NULL && start > 1) { 665 while (kr != NULL && start > 1) {
666 kr = LIST_NEXT(kr, list); 666 kr = LIST_NEXT(kr, list);
667 start--; 667 start--;
668 } 668 }
669 669
670 /* Return up to as many structures as the user asked 670 /* Return up to as many structures as the user asked
671 * for. If we run out of sources, a count of zero 671 * for. If we run out of sources, a count of zero
672 * will be returned, without an error. 672 * will be returned, without an error.
673 */ 673 */
674 for (count = 0; count < rset->count && kr != NULL; count++) { 674 for (count = 0; count < rset->count && kr != NULL; count++) {
675 krndsource_to_rndsource_est(kr, &rset->source[count]); 675 krndsource_to_rndsource_est(kr, &rset->source[count]);
676 kr = LIST_NEXT(kr, list); 676 kr = LIST_NEXT(kr, list);
677 } 677 }
678 678
679 rset->count = count; 679 rset->count = count;
680 680
681 mutex_spin_exit(&rndpool_mtx); 681 mutex_spin_exit(&rndpool_mtx);
682 break; 682 break;
683 683
684 case RNDGETSRCNAME: 684 case RNDGETSRCNAME:
685 /* 685 /*
686 * Scan through the list, trying to find the name. 686 * Scan through the list, trying to find the name.
687 */ 687 */
688 mutex_spin_enter(&rndpool_mtx); 688 mutex_spin_enter(&rndpool_mtx);
689 rstnm = (rndstat_name_t *)addr; 689 rstnm = (rndstat_name_t *)addr;
690 kr = LIST_FIRST(&rnd_sources); 690 kr = LIST_FIRST(&rnd_sources);
691 while (kr != NULL) { 691 while (kr != NULL) {
692 if (strncmp(kr->name, rstnm->name, 692 if (strncmp(kr->name, rstnm->name,
693 MIN(sizeof(kr->name), 693 MIN(sizeof(kr->name),
694 sizeof(rstnm->name))) == 0) { 694 sizeof(rstnm->name))) == 0) {
695 krndsource_to_rndsource(kr, &rstnm->source); 695 krndsource_to_rndsource(kr, &rstnm->source);
696 mutex_spin_exit(&rndpool_mtx); 696 mutex_spin_exit(&rndpool_mtx);
697 return (0); 697 return (0);
698 } 698 }
699 kr = LIST_NEXT(kr, list); 699 kr = LIST_NEXT(kr, list);
700 } 700 }
701 mutex_spin_exit(&rndpool_mtx); 701 mutex_spin_exit(&rndpool_mtx);
702 702
703 ret = ENOENT; /* name not found */ 703 ret = ENOENT; /* name not found */
704 704
705 break; 705 break;
706 706
707 case RNDGETESTNAME: 707 case RNDGETESTNAME:
708 /* 708 /*
709 * Scan through the list, trying to find the name. 709 * Scan through the list, trying to find the name.
710 */ 710 */
711 mutex_spin_enter(&rndpool_mtx); 711 mutex_spin_enter(&rndpool_mtx);
712 rsetnm = (rndstat_est_name_t *)addr; 712 rsetnm = (rndstat_est_name_t *)addr;
713 kr = LIST_FIRST(&rnd_sources); 713 kr = LIST_FIRST(&rnd_sources);
714 while (kr != NULL) { 714 while (kr != NULL) {
715 if (strncmp(kr->name, rsetnm->name, 715 if (strncmp(kr->name, rsetnm->name,
716 MIN(sizeof(kr->name), 716 MIN(sizeof(kr->name),
717 sizeof(rsetnm->name))) == 0) { 717 sizeof(rsetnm->name))) == 0) {
718 krndsource_to_rndsource_est(kr, 718 krndsource_to_rndsource_est(kr,
719 &rsetnm->source); 719 &rsetnm->source);
720 mutex_spin_exit(&rndpool_mtx); 720 mutex_spin_exit(&rndpool_mtx);
721 return (0); 721 return (0);
722 } 722 }
723 kr = LIST_NEXT(kr, list); 723 kr = LIST_NEXT(kr, list);
724 } 724 }
725 mutex_spin_exit(&rndpool_mtx); 725 mutex_spin_exit(&rndpool_mtx);
726 726
727 ret = ENOENT; /* name not found */ 727 ret = ENOENT; /* name not found */
728 728
729 break; 729 break;
730 730
731 case RNDCTL: 731 case RNDCTL:
732 /* 732 /*
733 * Set flags to enable/disable entropy counting and/or 733 * Set flags to enable/disable entropy counting and/or
734 * collection. 734 * collection.
735 */ 735 */
736 mutex_spin_enter(&rndpool_mtx); 736 mutex_spin_enter(&rndpool_mtx);
737 rctl = (rndctl_t *)addr; 737 rctl = (rndctl_t *)addr;
738 kr = LIST_FIRST(&rnd_sources); 738 kr = LIST_FIRST(&rnd_sources);
739 739
740 /* 740 /*
741 * Flags set apply to all sources of this type. 741 * Flags set apply to all sources of this type.
742 */ 742 */
743 if (rctl->type != 0xff) { 743 if (rctl->type != 0xff) {
744 while (kr != NULL) { 744 while (kr != NULL) {
745 if (kr->type == rctl->type) { 745 if (kr->type == rctl->type) {
746 krs_setflags(kr, 746 krs_setflags(kr,
747 rctl->flags, rctl->mask); 747 rctl->flags, rctl->mask);
748 } 748 }
749 kr = LIST_NEXT(kr, list); 749 kr = LIST_NEXT(kr, list);
750 } 750 }
751 mutex_spin_exit(&rndpool_mtx); 751 mutex_spin_exit(&rndpool_mtx);
752 return (0); 752 return (0);
753 } 753 }
754 754
755 /* 755 /*
756 * scan through the list, trying to find the name 756 * scan through the list, trying to find the name
757 */ 757 */
758 while (kr != NULL) { 758 while (kr != NULL) {
759 if (strncmp(kr->name, rctl->name, 759 if (strncmp(kr->name, rctl->name,
760 MIN(sizeof(kr->name), 760 MIN(sizeof(kr->name),
761 sizeof(rctl->name))) == 0) { 761 sizeof(rctl->name))) == 0) {
762 krs_setflags(kr, rctl->flags, rctl->mask); 762 krs_setflags(kr, rctl->flags, rctl->mask);
763 mutex_spin_exit(&rndpool_mtx); 763 mutex_spin_exit(&rndpool_mtx);
764 return (0); 764 return (0);
765 } 765 }
766 kr = LIST_NEXT(kr, list); 766 kr = LIST_NEXT(kr, list);
767 } 767 }
768 768
769 mutex_spin_exit(&rndpool_mtx); 769 mutex_spin_exit(&rndpool_mtx);
770 ret = ENOENT; /* name not found */ 770 ret = ENOENT; /* name not found */
771  771
772 break; 772 break;
773 773
774 case RNDADDDATA: 774 case RNDADDDATA:
775 /* 775 /*
776 * Don't seed twice if our bootloader has 776 * Don't seed twice if our bootloader has
777 * seed loading support. 777 * seed loading support.
778 */ 778 */
779 if (!boot_rsp) { 779 if (!boot_rsp) {
780 rnddata = (rnddata_t *)addr; 780 rnddata = (rnddata_t *)addr;
781 781
782 if (rnddata->len > sizeof(rnddata->data)) 782 if (rnddata->len > sizeof(rnddata->data))
783 return EINVAL; 783 return EINVAL;
784 784
785 if (estimate_ok) { 785 if (estimate_ok) {
786 /* 786 /*
787 * Do not accept absurd entropy estimates, and 787 * Do not accept absurd entropy estimates, and
788 * do not flood the pool with entropy such that 788 * do not flood the pool with entropy such that
789 * new samples are discarded henceforth. 789 * new samples are discarded henceforth.
790 */ 790 */
791 estimate = MIN((rnddata->len * NBBY) / 2, 791 estimate = MIN((rnddata->len * NBBY) / 2,
792 MIN(rnddata->entropy, 792 MIN(rnddata->entropy,
793 RND_POOLBITS / 2)); 793 RND_POOLBITS / 2));
794 } else { 794 } else {
795 estimate = 0; 795 estimate = 0;
796 } 796 }
797 797
798 mutex_spin_enter(&rndpool_mtx); 798 mutex_spin_enter(&rndpool_mtx);
799 rndpool_add_data(&rnd_pool, rnddata->data, 799 rndpool_add_data(&rnd_pool, rnddata->data,
800 rnddata->len, estimate); 800 rnddata->len, estimate);
801 mutex_spin_exit(&rndpool_mtx); 801 mutex_spin_exit(&rndpool_mtx);
802 802
803 rnd_wakeup_readers(); 803 rnd_wakeup_readers();
804 } 804 }
805#ifdef RND_VERBOSE 805#ifdef RND_VERBOSE
806 else { 806 else {
807 printf("rnd: already seeded by boot loader\n"); 807 printf("rnd: already seeded by boot loader\n");
808 } 808 }
809#endif 809#endif
810 break; 810 break;
811 811
812 default: 812 default:
813 return ENOTTY; 813 return ENOTTY;
814 } 814 }
815 815
816 return (ret); 816 return (ret);
817} 817}
818 818
819static int 819static int
820rnd_poll(struct file *fp, int events) 820rnd_poll(struct file *fp, int events)
821{ 821{
822 struct rnd_ctx *const ctx = fp->f_rndctx; 822 struct rnd_ctx *const ctx = fp->f_rndctx;
823 int revents; 823 int revents;
824 824
825 /* 825 /*
826 * We are always writable. 826 * We are always writable.
827 */ 827 */
828 revents = events & (POLLOUT | POLLWRNORM); 828 revents = events & (POLLOUT | POLLWRNORM);
829 829
830 /* 830 /*
831 * Save some work if not checking for reads. 831 * Save some work if not checking for reads.
832 */ 832 */
833 if ((events & (POLLIN | POLLRDNORM)) == 0) 833 if ((events & (POLLIN | POLLRDNORM)) == 0)
834 return revents; 834 return revents;
835 835
836 /* 836 /*
837 * For /dev/random, ask the CPRNG, which may require creating 837 * For /dev/random, ask the CPRNG, which may require creating
838 * one. For /dev/urandom, we're always readable. 838 * one. For /dev/urandom, we're always readable.
839 */ 839 */
840 if (ctx->rc_hard) 840 if (ctx->rc_hard)
841 revents |= cprng_strong_poll(rnd_ctx_cprng(ctx), events); 841 revents |= cprng_strong_poll(rnd_ctx_cprng(ctx), events);
842 else 842 else
843 revents |= (events & (POLLIN | POLLRDNORM)); 843 revents |= (events & (POLLIN | POLLRDNORM));
844 844
845 return revents; 845 return revents;
846} 846}
847 847
848static int 848static int
849rnd_stat(struct file *fp, struct stat *st) 849rnd_stat(struct file *fp, struct stat *st)
850{ 850{
851 struct rnd_ctx *const ctx = fp->f_rndctx; 851 struct rnd_ctx *const ctx = fp->f_rndctx;
852 852
853 /* XXX lock, if cprng allocated? why? */ 853 /* XXX lock, if cprng allocated? why? */
854 memset(st, 0, sizeof(*st)); 854 memset(st, 0, sizeof(*st));
855 st->st_dev = makedev(cdevsw_lookup_major(&rnd_cdevsw), 855 st->st_dev = makedev(cdevsw_lookup_major(&rnd_cdevsw),
856 (ctx->rc_hard? RND_DEV_RANDOM : RND_DEV_URANDOM)); 856 (ctx->rc_hard? RND_DEV_RANDOM : RND_DEV_URANDOM));
857 /* XXX leave atimespect, mtimespec, ctimespec = 0? */ 857 /* XXX leave atimespect, mtimespec, ctimespec = 0? */
858 858
859 st->st_uid = kauth_cred_geteuid(fp->f_cred); 859 st->st_uid = kauth_cred_geteuid(fp->f_cred);
860 st->st_gid = kauth_cred_getegid(fp->f_cred); 860 st->st_gid = kauth_cred_getegid(fp->f_cred);
861 st->st_mode = S_IFCHR; 861 st->st_mode = S_IFCHR;
862 return 0; 862 return 0;
863} 863}
864 864
865static int 865static int
866rnd_close(struct file *fp) 866rnd_close(struct file *fp)
867{ 867{
868 struct rnd_ctx *const ctx = fp->f_rndctx; 868 struct rnd_ctx *const ctx = fp->f_rndctx;
869 869
870 if (ctx->rc_cprng != NULL) 870 if (ctx->rc_cprng != NULL)
871 cprng_strong_destroy(ctx->rc_cprng); 871 cprng_strong_destroy(ctx->rc_cprng);
872 fp->f_rndctx = NULL; 872 fp->f_rndctx = NULL;
873 pool_cache_put(rnd_ctx_cache, ctx); 873 pool_cache_put(rnd_ctx_cache, ctx);
874 874
875 return 0; 875 return 0;
876} 876}
877 877
878static int 878static int
879rnd_kqfilter(struct file *fp, struct knote *kn) 879rnd_kqfilter(struct file *fp, struct knote *kn)
880{ 880{
881 struct rnd_ctx *const ctx = fp->f_rndctx; 881 struct rnd_ctx *const ctx = fp->f_rndctx;
882 882
883 return cprng_strong_kqfilter(rnd_ctx_cprng(ctx), kn); 883 return cprng_strong_kqfilter(rnd_ctx_cprng(ctx), kn);
884} 884}